1 /* hci_core.c - HCI core Bluetooth handling */
2
3 /*
4 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
5 * Copyright (c) 2015-2016 Intel Corporation
6 *
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #include <zephyr/kernel.h>
11 #include <string.h>
12 #include <stdio.h>
13 #include <errno.h>
14 #include <zephyr/sys/atomic.h>
15 #include <zephyr/sys/check.h>
16 #include <zephyr/sys/util.h>
17 #include <zephyr/sys/slist.h>
18 #include <zephyr/sys/byteorder.h>
19 #include <zephyr/debug/stack.h>
20 #include <zephyr/sys/__assert.h>
21 #include <soc.h>
22
23 #include <zephyr/settings/settings.h>
24
25 #include <zephyr/bluetooth/bluetooth.h>
26 #include <zephyr/bluetooth/conn.h>
27 #include <zephyr/bluetooth/l2cap.h>
28 #include <zephyr/bluetooth/hci.h>
29 #include <zephyr/bluetooth/hci_vs.h>
30 #include <zephyr/drivers/bluetooth/hci_driver.h>
31
32 #include "common/bt_str.h"
33 #include "common/assert.h"
34
35 #include "common/rpa.h"
36 #include "keys.h"
37 #include "monitor.h"
38 #include "hci_core.h"
39 #include "hci_ecc.h"
40 #include "ecc.h"
41 #include "id.h"
42 #include "adv.h"
43 #include "scan.h"
44
45 #include "addr_internal.h"
46 #include "conn_internal.h"
47 #include "iso_internal.h"
48 #include "l2cap_internal.h"
49 #include "gatt_internal.h"
50 #include "smp.h"
51 #include "crypto.h"
52 #include "settings.h"
53
54 #if defined(CONFIG_BT_BREDR)
55 #include "br.h"
56 #endif
57
58 #if defined(CONFIG_BT_DF)
59 #include "direction_internal.h"
60 #endif /* CONFIG_BT_DF */
61
62 #define LOG_LEVEL CONFIG_BT_HCI_CORE_LOG_LEVEL
63 #include <zephyr/logging/log.h>
64 LOG_MODULE_REGISTER(bt_hci_core);
65
66 #define HCI_CMD_TIMEOUT K_SECONDS(10)
67
68 /* Stacks for the threads */
69 #if !defined(CONFIG_BT_RECV_BLOCKING)
70 static void rx_work_handler(struct k_work *work);
71 static K_WORK_DEFINE(rx_work, rx_work_handler);
72 #if defined(CONFIG_BT_RECV_WORKQ_BT)
73 static struct k_work_q bt_workq;
74 static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
75 #endif /* CONFIG_BT_RECV_WORKQ_BT */
76 #endif /* !CONFIG_BT_RECV_BLOCKING */
77 static struct k_thread tx_thread_data;
78 static K_KERNEL_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
79
80 static void init_work(struct k_work *work);
81
82 struct bt_dev bt_dev = {
83 .init = Z_WORK_INITIALIZER(init_work),
84 #if defined(CONFIG_BT_PRIVACY)
85 .rpa_timeout = CONFIG_BT_RPA_TIMEOUT,
86 #endif
87 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
88 .appearance = CONFIG_BT_DEVICE_APPEARANCE,
89 #endif
90 };
91
92 static bt_ready_cb_t ready_cb;
93
94 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
95 static bt_hci_vnd_evt_cb_t *hci_vnd_evt_cb;
96 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
97
98 struct cmd_data {
99 /** HCI status of the command completion */
100 uint8_t status;
101
102 /** The command OpCode that the buffer contains */
103 uint16_t opcode;
104
105 /** The state to update when command completes with success. */
106 struct bt_hci_cmd_state_set *state;
107
108 /** Used by bt_hci_cmd_send_sync. */
109 struct k_sem *sync;
110 };
111
112 static struct cmd_data cmd_data[CONFIG_BT_BUF_CMD_TX_COUNT];
113
114 #if defined(CONFIG_BT_CONN)
115 struct acl_data {
116 uint16_t acl_handle;
117 };
118
119 static struct acl_data acl_data[CONFIG_BT_BUF_ACL_RX_COUNT];
120 #endif
121
122 #define cmd(buf) (&cmd_data[net_buf_id(buf)])
123 #define acl(buf) (&acl_data[net_buf_id(buf)])
124
bt_hci_cmd_state_set_init(struct net_buf * buf,struct bt_hci_cmd_state_set * state,atomic_t * target,int bit,bool val)125 void bt_hci_cmd_state_set_init(struct net_buf *buf,
126 struct bt_hci_cmd_state_set *state,
127 atomic_t *target, int bit, bool val)
128 {
129 state->target = target;
130 state->bit = bit;
131 state->val = val;
132 cmd(buf)->state = state;
133 }
134
135 /* HCI command buffers. Derive the needed size from both Command and Event
136 * buffer length since the buffer is also used for the response event i.e
137 * command complete or command status.
138 */
139 #define CMD_BUF_SIZE MAX(BT_BUF_EVT_RX_SIZE, BT_BUF_CMD_TX_SIZE)
140 NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_BUF_CMD_TX_COUNT,
141 CMD_BUF_SIZE, 8, NULL);
142
143 struct event_handler {
144 uint8_t event;
145 uint8_t min_len;
146 void (*handler)(struct net_buf *buf);
147 };
148
149 #define EVENT_HANDLER(_evt, _handler, _min_len) \
150 { \
151 .event = _evt, \
152 .handler = _handler, \
153 .min_len = _min_len, \
154 }
155
handle_event_common(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)156 static int handle_event_common(uint8_t event, struct net_buf *buf,
157 const struct event_handler *handlers, size_t num_handlers)
158 {
159 size_t i;
160
161 for (i = 0; i < num_handlers; i++) {
162 const struct event_handler *handler = &handlers[i];
163
164 if (handler->event != event) {
165 continue;
166 }
167
168 if (buf->len < handler->min_len) {
169 LOG_ERR("Too small (%u bytes) event 0x%02x", buf->len, event);
170 return -EINVAL;
171 }
172
173 handler->handler(buf);
174 return 0;
175 }
176
177 return -EOPNOTSUPP;
178 }
179
handle_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)180 static void handle_event(uint8_t event, struct net_buf *buf, const struct event_handler *handlers,
181 size_t num_handlers)
182 {
183 int err;
184
185 err = handle_event_common(event, buf, handlers, num_handlers);
186 if (err == -EOPNOTSUPP) {
187 LOG_WRN("Unhandled event 0x%02x len %u: %s", event, buf->len,
188 bt_hex(buf->data, buf->len));
189 }
190
191 /* Other possible errors are handled by handle_event_common function */
192 }
193
handle_vs_event(uint8_t event,struct net_buf * buf,const struct event_handler * handlers,size_t num_handlers)194 static void handle_vs_event(uint8_t event, struct net_buf *buf,
195 const struct event_handler *handlers, size_t num_handlers)
196 {
197 int err;
198
199 err = handle_event_common(event, buf, handlers, num_handlers);
200 if (err == -EOPNOTSUPP) {
201 LOG_WRN("Unhandled vendor-specific event: %s", bt_hex(buf->data, buf->len));
202 }
203
204 /* Other possible errors are handled by handle_event_common function */
205 }
206
207 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
bt_hci_host_num_completed_packets(struct net_buf * buf)208 void bt_hci_host_num_completed_packets(struct net_buf *buf)
209 {
210
211 struct bt_hci_cp_host_num_completed_packets *cp;
212 uint16_t handle = acl(buf)->acl_handle;
213 struct bt_hci_handle_count *hc;
214 struct bt_conn *conn;
215
216 net_buf_destroy(buf);
217
218 /* Do nothing if controller to host flow control is not supported */
219 if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
220 return;
221 }
222
223 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
224 if (!conn) {
225 LOG_WRN("Unable to look up conn with ACL handle %u", handle);
226 return;
227 }
228
229 if (conn->state != BT_CONN_CONNECTED &&
230 conn->state != BT_CONN_DISCONNECTING) {
231 LOG_WRN("Not reporting packet for non-connected conn");
232 bt_conn_unref(conn);
233 return;
234 }
235
236 bt_conn_unref(conn);
237
238 LOG_DBG("Reporting completed packet for handle %u", handle);
239
240 buf = bt_hci_cmd_create(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS,
241 sizeof(*cp) + sizeof(*hc));
242 if (!buf) {
243 LOG_ERR("Unable to allocate new HCI command");
244 return;
245 }
246
247 cp = net_buf_add(buf, sizeof(*cp));
248 cp->num_handles = sys_cpu_to_le16(1);
249
250 hc = net_buf_add(buf, sizeof(*hc));
251 hc->handle = sys_cpu_to_le16(handle);
252 hc->count = sys_cpu_to_le16(1);
253
254 bt_hci_cmd_send(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS, buf);
255 }
256 #endif /* defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) */
257
bt_hci_cmd_create(uint16_t opcode,uint8_t param_len)258 struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
259 {
260 struct bt_hci_cmd_hdr *hdr;
261 struct net_buf *buf;
262
263 LOG_DBG("opcode 0x%04x param_len %u", opcode, param_len);
264
265 buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
266 __ASSERT_NO_MSG(buf);
267
268 LOG_DBG("buf %p", buf);
269
270 net_buf_reserve(buf, BT_BUF_RESERVE);
271
272 bt_buf_set_type(buf, BT_BUF_CMD);
273
274 cmd(buf)->opcode = opcode;
275 cmd(buf)->sync = NULL;
276 cmd(buf)->state = NULL;
277
278 hdr = net_buf_add(buf, sizeof(*hdr));
279 hdr->opcode = sys_cpu_to_le16(opcode);
280 hdr->param_len = param_len;
281
282 return buf;
283 }
284
bt_hci_cmd_send(uint16_t opcode,struct net_buf * buf)285 int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
286 {
287 if (!buf) {
288 buf = bt_hci_cmd_create(opcode, 0);
289 if (!buf) {
290 return -ENOBUFS;
291 }
292 }
293
294 LOG_DBG("opcode 0x%04x len %u", opcode, buf->len);
295
296 /* Host Number of Completed Packets can ignore the ncmd value
297 * and does not generate any cmd complete/status events.
298 */
299 if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
300 int err;
301
302 err = bt_send(buf);
303 if (err) {
304 LOG_ERR("Unable to send to driver (err %d)", err);
305 net_buf_unref(buf);
306 }
307
308 return err;
309 }
310
311 net_buf_put(&bt_dev.cmd_tx_queue, buf);
312
313 return 0;
314 }
315
bt_hci_cmd_send_sync(uint16_t opcode,struct net_buf * buf,struct net_buf ** rsp)316 int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
317 struct net_buf **rsp)
318 {
319 struct k_sem sync_sem;
320 uint8_t status;
321 int err;
322
323 if (!buf) {
324 buf = bt_hci_cmd_create(opcode, 0);
325 if (!buf) {
326 return -ENOBUFS;
327 }
328 }
329
330 LOG_DBG("buf %p opcode 0x%04x len %u", buf, opcode, buf->len);
331
332 k_sem_init(&sync_sem, 0, 1);
333 cmd(buf)->sync = &sync_sem;
334
335 net_buf_put(&bt_dev.cmd_tx_queue, net_buf_ref(buf));
336
337 err = k_sem_take(&sync_sem, HCI_CMD_TIMEOUT);
338 BT_ASSERT_MSG(err == 0, "command opcode 0x%04x timeout with err %d", opcode, err);
339
340 status = cmd(buf)->status;
341 if (status) {
342 LOG_WRN("opcode 0x%04x status 0x%02x", opcode, status);
343 net_buf_unref(buf);
344
345 switch (status) {
346 case BT_HCI_ERR_CONN_LIMIT_EXCEEDED:
347 return -ECONNREFUSED;
348 case BT_HCI_ERR_INSUFFICIENT_RESOURCES:
349 return -ENOMEM;
350 default:
351 return -EIO;
352 }
353 }
354
355 LOG_DBG("rsp %p opcode 0x%04x len %u", buf, opcode, buf->len);
356
357 if (rsp) {
358 *rsp = buf;
359 } else {
360 net_buf_unref(buf);
361 }
362
363 return 0;
364 }
365
bt_hci_le_rand(void * buffer,size_t len)366 int bt_hci_le_rand(void *buffer, size_t len)
367 {
368 struct bt_hci_rp_le_rand *rp;
369 struct net_buf *rsp;
370 size_t count;
371 int err;
372
373 /* Check first that HCI_LE_Rand is supported */
374 if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 7)) {
375 return -ENOTSUP;
376 }
377
378 while (len > 0) {
379 /* Number of bytes to fill on this iteration */
380 count = MIN(len, sizeof(rp->rand));
381 /* Request the next 8 bytes over HCI */
382 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_RAND, NULL, &rsp);
383 if (err) {
384 return err;
385 }
386 /* Copy random data into buffer */
387 rp = (void *)rsp->data;
388 memcpy(buffer, rp->rand, count);
389
390 net_buf_unref(rsp);
391 buffer = (uint8_t *)buffer + count;
392 len -= count;
393 }
394
395 return 0;
396 }
397
hci_le_read_max_data_len(uint16_t * tx_octets,uint16_t * tx_time)398 static int hci_le_read_max_data_len(uint16_t *tx_octets, uint16_t *tx_time)
399 {
400 struct bt_hci_rp_le_read_max_data_len *rp;
401 struct net_buf *rsp;
402 int err;
403
404 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_MAX_DATA_LEN, NULL, &rsp);
405 if (err) {
406 LOG_ERR("Failed to read DLE max data len");
407 return err;
408 }
409
410 rp = (void *)rsp->data;
411 *tx_octets = sys_le16_to_cpu(rp->max_tx_octets);
412 *tx_time = sys_le16_to_cpu(rp->max_tx_time);
413 net_buf_unref(rsp);
414
415 return 0;
416 }
417
bt_get_phy(uint8_t hci_phy)418 uint8_t bt_get_phy(uint8_t hci_phy)
419 {
420 switch (hci_phy) {
421 case BT_HCI_LE_PHY_1M:
422 return BT_GAP_LE_PHY_1M;
423 case BT_HCI_LE_PHY_2M:
424 return BT_GAP_LE_PHY_2M;
425 case BT_HCI_LE_PHY_CODED:
426 return BT_GAP_LE_PHY_CODED;
427 default:
428 return 0;
429 }
430 }
431
432 #if defined(CONFIG_BT_CONN_TX)
hci_num_completed_packets(struct net_buf * buf)433 static void hci_num_completed_packets(struct net_buf *buf)
434 {
435 struct bt_hci_evt_num_completed_packets *evt = (void *)buf->data;
436 int i;
437
438 if (sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles > buf->len) {
439 LOG_ERR("evt num_handles (=%u) too large (%u > %u)",
440 evt->num_handles,
441 sizeof(*evt) + sizeof(evt->h[0]) * evt->num_handles,
442 buf->len);
443 return;
444 }
445
446 LOG_DBG("num_handles %u", evt->num_handles);
447
448 for (i = 0; i < evt->num_handles; i++) {
449 uint16_t handle, count;
450 struct bt_conn *conn;
451
452 handle = sys_le16_to_cpu(evt->h[i].handle);
453 count = sys_le16_to_cpu(evt->h[i].count);
454
455 LOG_DBG("handle %u count %u", handle, count);
456
457 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
458 if (!conn) {
459 LOG_ERR("No connection for handle %u", handle);
460 continue;
461 }
462
463 while (count--) {
464 struct bt_conn_tx *tx;
465 sys_snode_t *node;
466 unsigned int key;
467
468 key = irq_lock();
469
470 if (conn->pending_no_cb) {
471 conn->pending_no_cb--;
472 irq_unlock(key);
473 k_sem_give(bt_conn_get_pkts(conn));
474 continue;
475 }
476
477 node = sys_slist_get(&conn->tx_pending);
478 irq_unlock(key);
479
480 if (!node) {
481 LOG_ERR("packets count mismatch");
482 break;
483 }
484
485 tx = CONTAINER_OF(node, struct bt_conn_tx, node);
486
487 key = irq_lock();
488 conn->pending_no_cb = tx->pending_no_cb;
489 tx->pending_no_cb = 0U;
490 sys_slist_append(&conn->tx_complete, &tx->node);
491 irq_unlock(key);
492
493 k_work_submit(&conn->tx_complete_work);
494 k_sem_give(bt_conn_get_pkts(conn));
495 }
496
497 bt_conn_unref(conn);
498 }
499 }
500 #endif /* CONFIG_BT_CONN_TX */
501
502 #if defined(CONFIG_BT_CONN)
hci_acl(struct net_buf * buf)503 static void hci_acl(struct net_buf *buf)
504 {
505 struct bt_hci_acl_hdr *hdr;
506 uint16_t handle, len;
507 struct bt_conn *conn;
508 uint8_t flags;
509
510 LOG_DBG("buf %p", buf);
511 if (buf->len < sizeof(*hdr)) {
512 LOG_ERR("Invalid HCI ACL packet size (%u)", buf->len);
513 net_buf_unref(buf);
514 return;
515 }
516
517 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
518 len = sys_le16_to_cpu(hdr->len);
519 handle = sys_le16_to_cpu(hdr->handle);
520 flags = bt_acl_flags(handle);
521
522 acl(buf)->acl_handle = bt_acl_handle(handle);
523
524 LOG_DBG("handle %u len %u flags %u", acl(buf)->acl_handle, len, flags);
525
526 if (buf->len != len) {
527 LOG_ERR("ACL data length mismatch (%u != %u)", buf->len, len);
528 net_buf_unref(buf);
529 return;
530 }
531
532 conn = bt_conn_lookup_handle(acl(buf)->acl_handle, BT_CONN_TYPE_ALL);
533 if (!conn) {
534 LOG_ERR("Unable to find conn for handle %u", acl(buf)->acl_handle);
535 net_buf_unref(buf);
536 return;
537 }
538
539 bt_conn_recv(conn, buf, flags);
540 bt_conn_unref(conn);
541 }
542
hci_data_buf_overflow(struct net_buf * buf)543 static void hci_data_buf_overflow(struct net_buf *buf)
544 {
545 struct bt_hci_evt_data_buf_overflow *evt = (void *)buf->data;
546
547 LOG_WRN("Data buffer overflow (link type 0x%02x)", evt->link_type);
548 }
549
550 #if defined(CONFIG_BT_CENTRAL)
set_phy_conn_param(const struct bt_conn * conn,struct bt_hci_ext_conn_phy * phy)551 static void set_phy_conn_param(const struct bt_conn *conn,
552 struct bt_hci_ext_conn_phy *phy)
553 {
554 phy->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
555 phy->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
556 phy->conn_latency = sys_cpu_to_le16(conn->le.latency);
557 phy->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
558
559 phy->min_ce_len = 0;
560 phy->max_ce_len = 0;
561 }
562
bt_le_create_conn_ext(const struct bt_conn * conn)563 int bt_le_create_conn_ext(const struct bt_conn *conn)
564 {
565 struct bt_hci_cp_le_ext_create_conn *cp;
566 struct bt_hci_ext_conn_phy *phy;
567 struct bt_hci_cmd_state_set state;
568 bool use_filter = false;
569 struct net_buf *buf;
570 uint8_t own_addr_type;
571 uint8_t num_phys;
572 int err;
573
574 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
575 use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
576 }
577
578 err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
579 if (err) {
580 return err;
581 }
582
583 num_phys = (!(bt_dev.create_param.options &
584 BT_CONN_LE_OPT_NO_1M) ? 1 : 0) +
585 ((bt_dev.create_param.options &
586 BT_CONN_LE_OPT_CODED) ? 1 : 0);
587
588 buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN, sizeof(*cp) +
589 num_phys * sizeof(*phy));
590 if (!buf) {
591 return -ENOBUFS;
592 }
593
594 cp = net_buf_add(buf, sizeof(*cp));
595 (void)memset(cp, 0, sizeof(*cp));
596
597 if (use_filter) {
598 /* User Initiated procedure use fast scan parameters. */
599 bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
600 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
601 } else {
602 const bt_addr_le_t *peer_addr = &conn->le.dst;
603
604 #if defined(CONFIG_BT_SMP)
605 if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
606 /* Host resolving is used, use the RPA directly. */
607 peer_addr = &conn->le.resp_addr;
608 }
609 #endif
610 bt_addr_le_copy(&cp->peer_addr, peer_addr);
611 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
612 }
613
614 cp->own_addr_type = own_addr_type;
615 cp->phys = 0;
616
617 if (!(bt_dev.create_param.options & BT_CONN_LE_OPT_NO_1M)) {
618 cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
619 phy = net_buf_add(buf, sizeof(*phy));
620 phy->scan_interval = sys_cpu_to_le16(
621 bt_dev.create_param.interval);
622 phy->scan_window = sys_cpu_to_le16(
623 bt_dev.create_param.window);
624 set_phy_conn_param(conn, phy);
625 }
626
627 if (bt_dev.create_param.options & BT_CONN_LE_OPT_CODED) {
628 cp->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
629 phy = net_buf_add(buf, sizeof(*phy));
630 phy->scan_interval = sys_cpu_to_le16(
631 bt_dev.create_param.interval_coded);
632 phy->scan_window = sys_cpu_to_le16(
633 bt_dev.create_param.window_coded);
634 set_phy_conn_param(conn, phy);
635 }
636
637 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
638 BT_DEV_INITIATING, true);
639
640 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN, buf, NULL);
641 }
642
bt_le_create_conn_synced(const struct bt_conn * conn,const struct bt_le_ext_adv * adv,uint8_t subevent)643 int bt_le_create_conn_synced(const struct bt_conn *conn, const struct bt_le_ext_adv *adv,
644 uint8_t subevent)
645 {
646 struct bt_hci_cp_le_ext_create_conn_v2 *cp;
647 struct bt_hci_ext_conn_phy *phy;
648 struct bt_hci_cmd_state_set state;
649 struct net_buf *buf;
650 uint8_t own_addr_type;
651 int err;
652
653 err = bt_id_set_create_conn_own_addr(false, &own_addr_type);
654 if (err) {
655 return err;
656 }
657
658 /* There shall only be one Initiating_PHYs */
659 buf = bt_hci_cmd_create(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, sizeof(*cp) + sizeof(*phy));
660 if (!buf) {
661 return -ENOBUFS;
662 }
663
664 cp = net_buf_add(buf, sizeof(*cp));
665 (void)memset(cp, 0, sizeof(*cp));
666
667 cp->subevent = subevent;
668 cp->adv_handle = adv->handle;
669 bt_addr_le_copy(&cp->peer_addr, &conn->le.dst);
670 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
671 cp->own_addr_type = own_addr_type;
672
673 /* The Initiating_PHY is the secondary phy of the corresponding ext adv set */
674 if (adv->options & BT_LE_ADV_OPT_CODED) {
675 cp->phys = BT_HCI_LE_EXT_SCAN_PHY_CODED;
676 } else if (adv->options & BT_LE_ADV_OPT_NO_2M) {
677 cp->phys = BT_HCI_LE_EXT_SCAN_PHY_1M;
678 } else {
679 cp->phys = BT_HCI_LE_EXT_SCAN_PHY_2M;
680 }
681
682 phy = net_buf_add(buf, sizeof(*phy));
683 (void)memset(phy, 0, sizeof(*phy));
684 set_phy_conn_param(conn, phy);
685
686 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_INITIATING, true);
687
688 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_EXT_CREATE_CONN_V2, buf, NULL);
689 }
690
bt_le_create_conn_legacy(const struct bt_conn * conn)691 static int bt_le_create_conn_legacy(const struct bt_conn *conn)
692 {
693 struct bt_hci_cp_le_create_conn *cp;
694 struct bt_hci_cmd_state_set state;
695 bool use_filter = false;
696 struct net_buf *buf;
697 uint8_t own_addr_type;
698 int err;
699
700 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
701 use_filter = atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT);
702 }
703
704 err = bt_id_set_create_conn_own_addr(use_filter, &own_addr_type);
705 if (err) {
706 return err;
707 }
708
709 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN, sizeof(*cp));
710 if (!buf) {
711 return -ENOBUFS;
712 }
713
714 cp = net_buf_add(buf, sizeof(*cp));
715 memset(cp, 0, sizeof(*cp));
716 cp->own_addr_type = own_addr_type;
717
718 if (use_filter) {
719 /* User Initiated procedure use fast scan parameters. */
720 bt_addr_le_copy(&cp->peer_addr, BT_ADDR_LE_ANY);
721 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_FILTER;
722 } else {
723 const bt_addr_le_t *peer_addr = &conn->le.dst;
724
725 #if defined(CONFIG_BT_SMP)
726 if (bt_dev.le.rl_entries > bt_dev.le.rl_size) {
727 /* Host resolving is used, use the RPA directly. */
728 peer_addr = &conn->le.resp_addr;
729 }
730 #endif
731 bt_addr_le_copy(&cp->peer_addr, peer_addr);
732 cp->filter_policy = BT_HCI_LE_CREATE_CONN_FP_NO_FILTER;
733 }
734
735 cp->scan_interval = sys_cpu_to_le16(bt_dev.create_param.interval);
736 cp->scan_window = sys_cpu_to_le16(bt_dev.create_param.window);
737
738 cp->conn_interval_min = sys_cpu_to_le16(conn->le.interval_min);
739 cp->conn_interval_max = sys_cpu_to_le16(conn->le.interval_max);
740 cp->conn_latency = sys_cpu_to_le16(conn->le.latency);
741 cp->supervision_timeout = sys_cpu_to_le16(conn->le.timeout);
742
743 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
744 BT_DEV_INITIATING, true);
745
746 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN, buf, NULL);
747 }
748
bt_le_create_conn(const struct bt_conn * conn)749 int bt_le_create_conn(const struct bt_conn *conn)
750 {
751 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
752 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
753 return bt_le_create_conn_ext(conn);
754 }
755
756 return bt_le_create_conn_legacy(conn);
757 }
758
bt_le_create_conn_cancel(void)759 int bt_le_create_conn_cancel(void)
760 {
761 struct net_buf *buf;
762 struct bt_hci_cmd_state_set state;
763
764 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CREATE_CONN_CANCEL, 0);
765
766 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags,
767 BT_DEV_INITIATING, false);
768
769 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_CREATE_CONN_CANCEL, buf, NULL);
770 }
771 #endif /* CONFIG_BT_CENTRAL */
772
bt_hci_disconnect(uint16_t handle,uint8_t reason)773 int bt_hci_disconnect(uint16_t handle, uint8_t reason)
774 {
775 struct net_buf *buf;
776 struct bt_hci_cp_disconnect *disconn;
777
778 buf = bt_hci_cmd_create(BT_HCI_OP_DISCONNECT, sizeof(*disconn));
779 if (!buf) {
780 return -ENOBUFS;
781 }
782
783 disconn = net_buf_add(buf, sizeof(*disconn));
784 disconn->handle = sys_cpu_to_le16(handle);
785 disconn->reason = reason;
786
787 return bt_hci_cmd_send_sync(BT_HCI_OP_DISCONNECT, buf, NULL);
788 }
789
790 static uint16_t disconnected_handles[CONFIG_BT_MAX_CONN];
disconnected_handles_reset(void)791 static void disconnected_handles_reset(void)
792 {
793 (void)memset(disconnected_handles, 0, sizeof(disconnected_handles));
794 }
795
conn_handle_disconnected(uint16_t handle)796 static void conn_handle_disconnected(uint16_t handle)
797 {
798 for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
799 if (!disconnected_handles[i]) {
800 /* Use invalid connection handle bits so that connection
801 * handle 0 can be used as a valid non-zero handle.
802 */
803 disconnected_handles[i] = ~BT_ACL_HANDLE_MASK | handle;
804 }
805 }
806 }
807
conn_handle_is_disconnected(uint16_t handle)808 static bool conn_handle_is_disconnected(uint16_t handle)
809 {
810 handle |= ~BT_ACL_HANDLE_MASK;
811
812 for (int i = 0; i < ARRAY_SIZE(disconnected_handles); i++) {
813 if (disconnected_handles[i] == handle) {
814 disconnected_handles[i] = 0;
815 return true;
816 }
817 }
818
819 return false;
820 }
821
hci_disconn_complete_prio(struct net_buf * buf)822 static void hci_disconn_complete_prio(struct net_buf *buf)
823 {
824 struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
825 uint16_t handle = sys_le16_to_cpu(evt->handle);
826 struct bt_conn *conn;
827
828 LOG_DBG("status 0x%02x handle %u reason 0x%02x", evt->status, handle, evt->reason);
829
830 if (evt->status) {
831 return;
832 }
833
834 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
835 if (!conn) {
836 /* Priority disconnect complete event received before normal
837 * connection complete event.
838 */
839 conn_handle_disconnected(handle);
840 return;
841 }
842
843 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
844 bt_conn_unref(conn);
845 }
846
hci_disconn_complete(struct net_buf * buf)847 static void hci_disconn_complete(struct net_buf *buf)
848 {
849 struct bt_hci_evt_disconn_complete *evt = (void *)buf->data;
850 uint16_t handle = sys_le16_to_cpu(evt->handle);
851 struct bt_conn *conn;
852
853 LOG_DBG("status 0x%02x handle %u reason 0x%02x", evt->status, handle, evt->reason);
854
855 if (evt->status) {
856 return;
857 }
858
859 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
860 if (!conn) {
861 LOG_ERR("Unable to look up conn with handle %u", handle);
862 return;
863 }
864
865 conn->err = evt->reason;
866
867 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
868
869 if (conn->type != BT_CONN_TYPE_LE) {
870 #if defined(CONFIG_BT_BREDR)
871 if (conn->type == BT_CONN_TYPE_SCO) {
872 bt_sco_cleanup(conn);
873 return;
874 }
875 /*
876 * If only for one connection session bond was set, clear keys
877 * database row for this connection.
878 */
879 if (conn->type == BT_CONN_TYPE_BR &&
880 atomic_test_and_clear_bit(conn->flags, BT_CONN_BR_NOBOND)) {
881 bt_keys_link_key_clear(conn->br.link_key);
882 }
883 #endif
884 bt_conn_unref(conn);
885 return;
886 }
887
888 #if defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST)
889 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
890 bt_conn_set_state(conn, BT_CONN_CONNECTING_SCAN);
891 bt_le_scan_update(false);
892 }
893 #endif /* defined(CONFIG_BT_CENTRAL) && !defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
894
895 bt_conn_unref(conn);
896 }
897
hci_le_read_remote_features(struct bt_conn * conn)898 static int hci_le_read_remote_features(struct bt_conn *conn)
899 {
900 struct bt_hci_cp_le_read_remote_features *cp;
901 struct net_buf *buf;
902
903 buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_REMOTE_FEATURES,
904 sizeof(*cp));
905 if (!buf) {
906 return -ENOBUFS;
907 }
908
909 cp = net_buf_add(buf, sizeof(*cp));
910 cp->handle = sys_cpu_to_le16(conn->handle);
911 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_REMOTE_FEATURES, buf, NULL);
912 }
913
hci_read_remote_version(struct bt_conn * conn)914 static int hci_read_remote_version(struct bt_conn *conn)
915 {
916 struct bt_hci_cp_read_remote_version_info *cp;
917 struct net_buf *buf;
918
919 if (conn->state != BT_CONN_CONNECTED) {
920 return -ENOTCONN;
921 }
922
923 /* Remote version cannot change. */
924 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
925 return 0;
926 }
927
928 buf = bt_hci_cmd_create(BT_HCI_OP_READ_REMOTE_VERSION_INFO,
929 sizeof(*cp));
930 if (!buf) {
931 return -ENOBUFS;
932 }
933
934 cp = net_buf_add(buf, sizeof(*cp));
935 cp->handle = sys_cpu_to_le16(conn->handle);
936
937 return bt_hci_cmd_send_sync(BT_HCI_OP_READ_REMOTE_VERSION_INFO, buf,
938 NULL);
939 }
940
941 /* LE Data Length Change Event is optional so this function just ignore
942 * error and stack will continue to use default values.
943 */
bt_le_set_data_len(struct bt_conn * conn,uint16_t tx_octets,uint16_t tx_time)944 int bt_le_set_data_len(struct bt_conn *conn, uint16_t tx_octets, uint16_t tx_time)
945 {
946 struct bt_hci_cp_le_set_data_len *cp;
947 struct net_buf *buf;
948
949 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_DATA_LEN, sizeof(*cp));
950 if (!buf) {
951 return -ENOBUFS;
952 }
953
954 cp = net_buf_add(buf, sizeof(*cp));
955 cp->handle = sys_cpu_to_le16(conn->handle);
956 cp->tx_octets = sys_cpu_to_le16(tx_octets);
957 cp->tx_time = sys_cpu_to_le16(tx_time);
958
959 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_DATA_LEN, buf, NULL);
960 }
961
962 #if defined(CONFIG_BT_USER_PHY_UPDATE)
hci_le_read_phy(struct bt_conn * conn)963 static int hci_le_read_phy(struct bt_conn *conn)
964 {
965 struct bt_hci_cp_le_read_phy *cp;
966 struct bt_hci_rp_le_read_phy *rp;
967 struct net_buf *buf, *rsp;
968 int err;
969
970 buf = bt_hci_cmd_create(BT_HCI_OP_LE_READ_PHY, sizeof(*cp));
971 if (!buf) {
972 return -ENOBUFS;
973 }
974
975 cp = net_buf_add(buf, sizeof(*cp));
976 cp->handle = sys_cpu_to_le16(conn->handle);
977
978 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_PHY, buf, &rsp);
979 if (err) {
980 return err;
981 }
982
983 rp = (void *)rsp->data;
984 conn->le.phy.tx_phy = bt_get_phy(rp->tx_phy);
985 conn->le.phy.rx_phy = bt_get_phy(rp->rx_phy);
986 net_buf_unref(rsp);
987
988 return 0;
989 }
990 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
991
bt_le_set_phy(struct bt_conn * conn,uint8_t all_phys,uint8_t pref_tx_phy,uint8_t pref_rx_phy,uint8_t phy_opts)992 int bt_le_set_phy(struct bt_conn *conn, uint8_t all_phys,
993 uint8_t pref_tx_phy, uint8_t pref_rx_phy, uint8_t phy_opts)
994 {
995 struct bt_hci_cp_le_set_phy *cp;
996 struct net_buf *buf;
997
998 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PHY, sizeof(*cp));
999 if (!buf) {
1000 return -ENOBUFS;
1001 }
1002
1003 cp = net_buf_add(buf, sizeof(*cp));
1004 cp->handle = sys_cpu_to_le16(conn->handle);
1005 cp->all_phys = all_phys;
1006 cp->tx_phys = pref_tx_phy;
1007 cp->rx_phys = pref_rx_phy;
1008 cp->phy_opts = phy_opts;
1009
1010 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PHY, buf, NULL);
1011 }
1012
find_pending_connect(uint8_t role,bt_addr_le_t * peer_addr)1013 static struct bt_conn *find_pending_connect(uint8_t role, bt_addr_le_t *peer_addr)
1014 {
1015 struct bt_conn *conn;
1016
1017 /*
1018 * Make lookup to check if there's a connection object in
1019 * CONNECT or CONNECT_AUTO state associated with passed peer LE address.
1020 */
1021 if (IS_ENABLED(CONFIG_BT_CENTRAL) && role == BT_HCI_ROLE_CENTRAL) {
1022 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, peer_addr,
1023 BT_CONN_CONNECTING);
1024 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) && !conn) {
1025 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT,
1026 BT_ADDR_LE_NONE,
1027 BT_CONN_CONNECTING_AUTO);
1028 }
1029
1030 return conn;
1031 }
1032
1033 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) && role == BT_HCI_ROLE_PERIPHERAL) {
1034 conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id, peer_addr,
1035 BT_CONN_CONNECTING_DIR_ADV);
1036 if (!conn) {
1037 conn = bt_conn_lookup_state_le(bt_dev.adv_conn_id,
1038 BT_ADDR_LE_NONE,
1039 BT_CONN_CONNECTING_ADV);
1040 }
1041
1042 return conn;
1043 }
1044
1045 return NULL;
1046 }
1047
1048 /* We don't want the application to get a PHY update callback upon connection
1049 * establishment on 2M PHY. Therefore we must prevent issuing LE Set PHY
1050 * in this scenario.
1051 */
skip_auto_phy_update_on_conn_establishment(struct bt_conn * conn)1052 static bool skip_auto_phy_update_on_conn_establishment(struct bt_conn *conn)
1053 {
1054 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1055 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1056 IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1057 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1058 if (conn->le.phy.tx_phy == BT_HCI_LE_PHY_2M &&
1059 conn->le.phy.rx_phy == BT_HCI_LE_PHY_2M) {
1060 return true;
1061 }
1062 }
1063 #else
1064 ARG_UNUSED(conn);
1065 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1066
1067 return false;
1068 }
1069
conn_auto_initiate(struct bt_conn * conn)1070 static void conn_auto_initiate(struct bt_conn *conn)
1071 {
1072 int err;
1073
1074 if (conn->state != BT_CONN_CONNECTED) {
1075 /* It is possible that connection was disconnected directly from
1076 * connected callback so we must check state before doing
1077 * connection parameters update.
1078 */
1079 return;
1080 }
1081
1082 if (!atomic_test_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH) &&
1083 ((conn->role == BT_HCI_ROLE_CENTRAL) ||
1084 BT_FEAT_LE_PER_INIT_FEAT_XCHG(bt_dev.le.features))) {
1085 err = hci_le_read_remote_features(conn);
1086 if (err) {
1087 LOG_ERR("Failed read remote features (%d)", err);
1088 }
1089 }
1090
1091 if (IS_ENABLED(CONFIG_BT_REMOTE_VERSION) &&
1092 !atomic_test_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO)) {
1093 err = hci_read_remote_version(conn);
1094 if (err) {
1095 LOG_ERR("Failed read remote version (%d)", err);
1096 }
1097 }
1098
1099 if (IS_ENABLED(CONFIG_BT_AUTO_PHY_UPDATE) &&
1100 BT_FEAT_LE_PHY_2M(bt_dev.le.features) &&
1101 !skip_auto_phy_update_on_conn_establishment(conn)) {
1102 err = bt_le_set_phy(conn, 0U, BT_HCI_LE_PHY_PREFER_2M,
1103 BT_HCI_LE_PHY_PREFER_2M,
1104 BT_HCI_LE_PHY_CODED_ANY);
1105 if (err) {
1106 LOG_ERR("Failed LE Set PHY (%d)", err);
1107 }
1108 }
1109
1110 if (IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
1111 BT_FEAT_LE_DLE(bt_dev.le.features)) {
1112 if (IS_BT_QUIRK_NO_AUTO_DLE(&bt_dev)) {
1113 uint16_t tx_octets, tx_time;
1114
1115 err = hci_le_read_max_data_len(&tx_octets, &tx_time);
1116 if (!err) {
1117 err = bt_le_set_data_len(conn,
1118 tx_octets, tx_time);
1119 if (err) {
1120 LOG_ERR("Failed to set data len (%d)", err);
1121 }
1122 }
1123 } else {
1124 /* No need to auto-initiate DLE procedure.
1125 * It is done by the controller.
1126 */
1127 }
1128 }
1129 }
1130
le_conn_complete_cancel(uint8_t err)1131 static void le_conn_complete_cancel(uint8_t err)
1132 {
1133 struct bt_conn *conn;
1134
1135 /* Handle create connection cancel.
1136 *
1137 * There is no need to check ID address as only one
1138 * connection in central role can be in pending state.
1139 */
1140 conn = find_pending_connect(BT_HCI_ROLE_CENTRAL, NULL);
1141 if (!conn) {
1142 LOG_ERR("No pending central connection");
1143 return;
1144 }
1145
1146 conn->err = err;
1147
1148 /* Handle cancellation of outgoing connection attempt. */
1149 if (!IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST)) {
1150 /* We notify before checking autoconnect flag
1151 * as application may choose to change it from
1152 * callback.
1153 */
1154 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1155 /* Check if device is marked for autoconnect. */
1156 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1157 /* Restart passive scanner for device */
1158 bt_conn_set_state(conn, BT_CONN_CONNECTING_SCAN);
1159 }
1160 } else {
1161 if (atomic_test_bit(conn->flags, BT_CONN_AUTO_CONNECT)) {
1162 /* Restart FAL initiator after RPA timeout. */
1163 bt_le_create_conn(conn);
1164 } else {
1165 /* Create connection canceled by timeout */
1166 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1167 }
1168 }
1169
1170 bt_conn_unref(conn);
1171 }
1172
le_conn_complete_adv_timeout(void)1173 static void le_conn_complete_adv_timeout(void)
1174 {
1175 if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1176 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1177 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1178 struct bt_conn *conn;
1179
1180 /* Handle advertising timeout after high duty cycle directed
1181 * advertising.
1182 */
1183
1184 atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1185
1186 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1187 !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1188 /* No advertising set terminated event, must be a
1189 * legacy advertiser set.
1190 */
1191 bt_le_adv_delete_legacy();
1192 }
1193
1194 /* There is no need to check ID address as only one
1195 * connection in peripheral role can be in pending state.
1196 */
1197 conn = find_pending_connect(BT_HCI_ROLE_PERIPHERAL, NULL);
1198 if (!conn) {
1199 LOG_ERR("No pending peripheral connection");
1200 return;
1201 }
1202
1203 conn->err = BT_HCI_ERR_ADV_TIMEOUT;
1204 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
1205
1206 bt_conn_unref(conn);
1207 }
1208 }
1209
enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1210 static void enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1211 {
1212 #if defined(CONFIG_BT_CONN) && (CONFIG_BT_EXT_ADV_MAX_ADV_SET > 1)
1213 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1214 evt->role == BT_HCI_ROLE_PERIPHERAL &&
1215 evt->status == BT_HCI_ERR_SUCCESS &&
1216 (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1217 BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1218
1219 /* Cache the connection complete event. Process it later.
1220 * See bt_dev.cached_conn_complete.
1221 */
1222 for (int i = 0; i < ARRAY_SIZE(bt_dev.cached_conn_complete); i++) {
1223 if (!bt_dev.cached_conn_complete[i].valid) {
1224 (void)memcpy(&bt_dev.cached_conn_complete[i].evt,
1225 evt,
1226 sizeof(struct bt_hci_evt_le_enh_conn_complete));
1227 bt_dev.cached_conn_complete[i].valid = true;
1228 return;
1229 }
1230 }
1231
1232 __ASSERT(false, "No more cache entries available."
1233 "This should not happen by design");
1234
1235 return;
1236 }
1237 #endif
1238 bt_hci_le_enh_conn_complete(evt);
1239 }
1240
translate_addrs(bt_addr_le_t * peer_addr,bt_addr_le_t * id_addr,const struct bt_hci_evt_le_enh_conn_complete * evt,uint8_t id)1241 static void translate_addrs(bt_addr_le_t *peer_addr, bt_addr_le_t *id_addr,
1242 const struct bt_hci_evt_le_enh_conn_complete *evt, uint8_t id)
1243 {
1244 if (bt_addr_le_is_resolved(&evt->peer_addr)) {
1245 bt_addr_le_copy_resolved(id_addr, &evt->peer_addr);
1246
1247 bt_addr_copy(&peer_addr->a, &evt->peer_rpa);
1248 peer_addr->type = BT_ADDR_LE_RANDOM;
1249 } else {
1250 bt_addr_le_copy(id_addr, bt_lookup_id_addr(id, &evt->peer_addr));
1251 bt_addr_le_copy(peer_addr, &evt->peer_addr);
1252 }
1253 }
1254
update_conn(struct bt_conn * conn,const bt_addr_le_t * id_addr,const struct bt_hci_evt_le_enh_conn_complete * evt)1255 static void update_conn(struct bt_conn *conn, const bt_addr_le_t *id_addr,
1256 const struct bt_hci_evt_le_enh_conn_complete *evt)
1257 {
1258 conn->handle = sys_le16_to_cpu(evt->handle);
1259 bt_addr_le_copy(&conn->le.dst, id_addr);
1260 conn->le.interval = sys_le16_to_cpu(evt->interval);
1261 conn->le.latency = sys_le16_to_cpu(evt->latency);
1262 conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1263 conn->role = evt->role;
1264 conn->err = 0U;
1265
1266 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1267 conn->le.data_len.tx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1268 conn->le.data_len.tx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1269 conn->le.data_len.rx_max_len = BT_GAP_DATA_LEN_DEFAULT;
1270 conn->le.data_len.rx_max_time = BT_GAP_DATA_TIME_DEFAULT;
1271 #endif
1272 }
1273
bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete * evt)1274 void bt_hci_le_enh_conn_complete(struct bt_hci_evt_le_enh_conn_complete *evt)
1275 {
1276 uint16_t handle = sys_le16_to_cpu(evt->handle);
1277 bool is_disconnected = conn_handle_is_disconnected(handle);
1278 bt_addr_le_t peer_addr, id_addr;
1279 struct bt_conn *conn;
1280 uint8_t id;
1281
1282 LOG_DBG("status 0x%02x handle %u role %u peer %s peer RPA %s", evt->status, handle,
1283 evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
1284 LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1285
1286 #if defined(CONFIG_BT_SMP)
1287 bt_id_pending_keys_update();
1288 #endif
1289
1290 if (evt->status) {
1291 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1292 evt->status == BT_HCI_ERR_ADV_TIMEOUT) {
1293 le_conn_complete_adv_timeout();
1294 return;
1295 }
1296
1297 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1298 evt->status == BT_HCI_ERR_UNKNOWN_CONN_ID) {
1299 le_conn_complete_cancel(evt->status);
1300 bt_le_scan_update(false);
1301 return;
1302 }
1303
1304 if (IS_ENABLED(CONFIG_BT_CENTRAL) && IS_ENABLED(CONFIG_BT_PER_ADV_RSP) &&
1305 evt->status == BT_HCI_ERR_CONN_FAIL_TO_ESTAB) {
1306 le_conn_complete_cancel(evt->status);
1307
1308 atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1309
1310 return;
1311 }
1312
1313 LOG_WRN("Unexpected status 0x%02x", evt->status);
1314
1315 return;
1316 }
1317
1318 id = evt->role == BT_HCI_ROLE_PERIPHERAL ? bt_dev.adv_conn_id : BT_ID_DEFAULT;
1319 translate_addrs(&peer_addr, &id_addr, evt, id);
1320
1321 conn = find_pending_connect(evt->role, &id_addr);
1322
1323 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1324 evt->role == BT_HCI_ROLE_PERIPHERAL &&
1325 !(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1326 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1327 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1328 /* Clear advertising even if we are not able to add connection
1329 * object to keep host in sync with controller state.
1330 */
1331 atomic_clear_bit(adv->flags, BT_ADV_ENABLED);
1332 (void)bt_le_lim_adv_cancel_timeout(adv);
1333 }
1334
1335 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1336 evt->role == BT_HCI_ROLE_CENTRAL) {
1337 /* Clear initiating even if we are not able to add connection
1338 * object to keep the host in sync with controller state.
1339 */
1340 atomic_clear_bit(bt_dev.flags, BT_DEV_INITIATING);
1341 }
1342
1343 if (!conn) {
1344 LOG_ERR("No pending conn for peer %s", bt_addr_le_str(&evt->peer_addr));
1345 bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1346 return;
1347 }
1348
1349 update_conn(conn, &id_addr, evt);
1350
1351 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1352 conn->le.phy.tx_phy = BT_GAP_LE_PHY_1M;
1353 conn->le.phy.rx_phy = BT_GAP_LE_PHY_1M;
1354 #endif
1355 /*
1356 * Use connection address (instead of identity address) as initiator
1357 * or responder address. Only peripheral needs to be updated. For central all
1358 * was set during outgoing connection creation.
1359 */
1360 if (IS_ENABLED(CONFIG_BT_PERIPHERAL) &&
1361 conn->role == BT_HCI_ROLE_PERIPHERAL) {
1362 bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1363
1364 if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1365 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
1366 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1367
1368 if (IS_ENABLED(CONFIG_BT_PRIVACY) &&
1369 !atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
1370 conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
1371 if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
1372 bt_addr_copy(&conn->le.resp_addr.a,
1373 &evt->local_rpa);
1374 } else {
1375 bt_addr_copy(&conn->le.resp_addr.a,
1376 &bt_dev.random_addr.a);
1377 }
1378 } else {
1379 bt_addr_le_copy(&conn->le.resp_addr,
1380 &bt_dev.id_addr[conn->id]);
1381 }
1382 } else {
1383 /* Copy the local RPA and handle this in advertising set
1384 * terminated event.
1385 */
1386 bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
1387 }
1388
1389 /* if the controller supports, lets advertise for another
1390 * peripheral connection.
1391 * check for connectable advertising state is sufficient as
1392 * this is how this le connection complete for peripheral occurred.
1393 */
1394 if (BT_LE_STATES_PER_CONN_ADV(bt_dev.le.states)) {
1395 bt_le_adv_resume();
1396 }
1397
1398 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1399 !BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1400 struct bt_le_ext_adv *adv = bt_le_adv_lookup_legacy();
1401 /* No advertising set terminated event, must be a
1402 * legacy advertiser set.
1403 */
1404 if (!atomic_test_bit(adv->flags, BT_ADV_PERSIST)) {
1405 bt_le_adv_delete_legacy();
1406 }
1407 }
1408 }
1409
1410 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1411 conn->role == BT_HCI_ROLE_CENTRAL) {
1412 bt_addr_le_copy(&conn->le.resp_addr, &peer_addr);
1413
1414 if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1415 conn->le.init_addr.type = BT_ADDR_LE_RANDOM;
1416 if (!bt_addr_eq(&evt->local_rpa, BT_ADDR_ANY)) {
1417 bt_addr_copy(&conn->le.init_addr.a,
1418 &evt->local_rpa);
1419 } else {
1420 bt_addr_copy(&conn->le.init_addr.a,
1421 &bt_dev.random_addr.a);
1422 }
1423 } else {
1424 bt_addr_le_copy(&conn->le.init_addr,
1425 &bt_dev.id_addr[conn->id]);
1426 }
1427 }
1428
1429 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1430 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1431 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1432 int err;
1433
1434 err = hci_le_read_phy(conn);
1435 if (err) {
1436 LOG_WRN("Failed to read PHY (%d)", err);
1437 }
1438 }
1439 #endif /* defined(CONFIG_BT_USER_PHY_UPDATE) */
1440
1441 bt_conn_set_state(conn, BT_CONN_CONNECTED);
1442
1443 if (is_disconnected) {
1444 /* Mark the connection as already disconnected before calling
1445 * the connected callback, so that the application cannot
1446 * start sending packets
1447 */
1448 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1449 }
1450
1451 bt_conn_connected(conn);
1452
1453 /* Start auto-initiated procedures */
1454 conn_auto_initiate(conn);
1455
1456 bt_conn_unref(conn);
1457
1458 if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
1459 conn->role == BT_HCI_ROLE_CENTRAL) {
1460 bt_le_scan_update(false);
1461 }
1462 }
1463
1464 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 * evt,struct bt_le_per_adv_sync * sync)1465 void bt_hci_le_enh_conn_complete_sync(struct bt_hci_evt_le_enh_conn_complete_v2 *evt,
1466 struct bt_le_per_adv_sync *sync)
1467 {
1468 uint16_t handle = sys_le16_to_cpu(evt->handle);
1469 bool is_disconnected = conn_handle_is_disconnected(handle);
1470 bt_addr_le_t peer_addr, id_addr;
1471 struct bt_conn *conn;
1472
1473 if (!sync->num_subevents) {
1474 LOG_ERR("Unexpected connection complete event");
1475
1476 return;
1477 }
1478
1479 conn = bt_conn_add_le(BT_ID_DEFAULT, BT_ADDR_LE_ANY);
1480 if (!conn) {
1481 LOG_ERR("Unable to allocate connection");
1482 /* Tell the controller to disconnect to keep it in sync with
1483 * the host state and avoid a "rogue" connection.
1484 */
1485 bt_hci_disconnect(handle, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1486
1487 return;
1488 }
1489
1490 LOG_DBG("status 0x%02x handle %u role %u peer %s peer RPA %s", evt->status, handle,
1491 evt->role, bt_addr_le_str(&evt->peer_addr), bt_addr_str(&evt->peer_rpa));
1492 LOG_DBG("local RPA %s", bt_addr_str(&evt->local_rpa));
1493
1494 if (evt->role != BT_HCI_ROLE_PERIPHERAL) {
1495 LOG_ERR("PAwR sync always becomes peripheral");
1496
1497 return;
1498 }
1499
1500 #if defined(CONFIG_BT_SMP)
1501 bt_id_pending_keys_update();
1502 #endif
1503
1504 if (evt->status) {
1505 LOG_ERR("Unexpected status 0x%02x", evt->status);
1506
1507 return;
1508 }
1509
1510 translate_addrs(&peer_addr, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt,
1511 BT_ID_DEFAULT);
1512 update_conn(conn, &id_addr, (const struct bt_hci_evt_le_enh_conn_complete *)evt);
1513
1514 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1515 /* The connection is always initated on the same phy as the PAwR advertiser */
1516 conn->le.phy.tx_phy = sync->phy;
1517 conn->le.phy.rx_phy = sync->phy;
1518 #endif
1519
1520 bt_addr_le_copy(&conn->le.init_addr, &peer_addr);
1521
1522 if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1523 conn->le.resp_addr.type = BT_ADDR_LE_RANDOM;
1524 bt_addr_copy(&conn->le.resp_addr.a, &evt->local_rpa);
1525 } else {
1526 bt_addr_le_copy(&conn->le.resp_addr, &bt_dev.id_addr[conn->id]);
1527 }
1528
1529 bt_conn_set_state(conn, BT_CONN_CONNECTED);
1530
1531 if (is_disconnected) {
1532 /* Mark the connection as already disconnected before calling
1533 * the connected callback, so that the application cannot
1534 * start sending packets
1535 */
1536 bt_conn_set_state(conn, BT_CONN_DISCONNECT_COMPLETE);
1537 }
1538
1539 bt_conn_connected(conn);
1540
1541 /* Since we don't give the application a reference to manage
1542 * for peripheral connections, we need to release this reference here.
1543 */
1544 bt_conn_unref(conn);
1545
1546 /* Start auto-initiated procedures */
1547 conn_auto_initiate(conn);
1548 }
1549 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1550
le_enh_conn_complete(struct net_buf * buf)1551 static void le_enh_conn_complete(struct net_buf *buf)
1552 {
1553 enh_conn_complete((void *)buf->data);
1554 }
1555
1556 #if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
le_enh_conn_complete_v2(struct net_buf * buf)1557 static void le_enh_conn_complete_v2(struct net_buf *buf)
1558 {
1559 struct bt_hci_evt_le_enh_conn_complete_v2 *evt =
1560 (struct bt_hci_evt_le_enh_conn_complete_v2 *)buf->data;
1561
1562 if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
1563 evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
1564 /* The connection was not created via PAwR, handle the event like v1 */
1565 enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
1566 }
1567 #if defined(CONFIG_BT_PER_ADV_RSP)
1568 else if (evt->adv_handle != BT_HCI_ADV_HANDLE_INVALID &&
1569 evt->sync_handle == BT_HCI_SYNC_HANDLE_INVALID) {
1570 /* The connection was created via PAwR advertiser, it can be handled like v1 */
1571 enh_conn_complete((struct bt_hci_evt_le_enh_conn_complete *)evt);
1572 }
1573 #endif /* CONFIG_BT_PER_ADV_RSP */
1574 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1575 else if (evt->adv_handle == BT_HCI_ADV_HANDLE_INVALID &&
1576 evt->sync_handle != BT_HCI_SYNC_HANDLE_INVALID) {
1577 /* Created via PAwR sync, no adv set terminated event, needs separate handling */
1578 struct bt_le_per_adv_sync *sync;
1579
1580 sync = bt_hci_get_per_adv_sync(evt->sync_handle);
1581 if (!sync) {
1582 LOG_ERR("Unknown sync handle %d", evt->sync_handle);
1583
1584 return;
1585 }
1586
1587 bt_hci_le_enh_conn_complete_sync(evt, sync);
1588 }
1589 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1590 else {
1591 LOG_ERR("Invalid connection complete event");
1592 }
1593 }
1594 #endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
1595
le_legacy_conn_complete(struct net_buf * buf)1596 static void le_legacy_conn_complete(struct net_buf *buf)
1597 {
1598 struct bt_hci_evt_le_conn_complete *evt = (void *)buf->data;
1599 struct bt_hci_evt_le_enh_conn_complete enh;
1600
1601 LOG_DBG("status 0x%02x role %u %s", evt->status, evt->role,
1602 bt_addr_le_str(&evt->peer_addr));
1603
1604 enh.status = evt->status;
1605 enh.handle = evt->handle;
1606 enh.role = evt->role;
1607 enh.interval = evt->interval;
1608 enh.latency = evt->latency;
1609 enh.supv_timeout = evt->supv_timeout;
1610 enh.clock_accuracy = evt->clock_accuracy;
1611
1612 bt_addr_le_copy(&enh.peer_addr, &evt->peer_addr);
1613
1614 if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
1615 bt_addr_copy(&enh.local_rpa, &bt_dev.random_addr.a);
1616 } else {
1617 bt_addr_copy(&enh.local_rpa, BT_ADDR_ANY);
1618 }
1619
1620 bt_addr_copy(&enh.peer_rpa, BT_ADDR_ANY);
1621
1622 enh_conn_complete(&enh);
1623 }
1624
le_remote_feat_complete(struct net_buf * buf)1625 static void le_remote_feat_complete(struct net_buf *buf)
1626 {
1627 struct bt_hci_evt_le_remote_feat_complete *evt = (void *)buf->data;
1628 uint16_t handle = sys_le16_to_cpu(evt->handle);
1629 struct bt_conn *conn;
1630
1631 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1632 if (!conn) {
1633 LOG_ERR("Unable to lookup conn for handle %u", handle);
1634 return;
1635 }
1636
1637 if (!evt->status) {
1638 memcpy(conn->le.features, evt->features,
1639 sizeof(conn->le.features));
1640 }
1641
1642 atomic_set_bit(conn->flags, BT_CONN_AUTO_FEATURE_EXCH);
1643
1644 if (IS_ENABLED(CONFIG_BT_REMOTE_INFO) &&
1645 !IS_ENABLED(CONFIG_BT_REMOTE_VERSION)) {
1646 notify_remote_info(conn);
1647 }
1648
1649 bt_conn_unref(conn);
1650 }
1651
1652 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
le_data_len_change(struct net_buf * buf)1653 static void le_data_len_change(struct net_buf *buf)
1654 {
1655 struct bt_hci_evt_le_data_len_change *evt = (void *)buf->data;
1656 uint16_t handle = sys_le16_to_cpu(evt->handle);
1657 struct bt_conn *conn;
1658
1659 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1660 if (!conn) {
1661 LOG_ERR("Unable to lookup conn for handle %u", handle);
1662 return;
1663 }
1664
1665 #if defined(CONFIG_BT_USER_DATA_LEN_UPDATE)
1666 uint16_t max_tx_octets = sys_le16_to_cpu(evt->max_tx_octets);
1667 uint16_t max_rx_octets = sys_le16_to_cpu(evt->max_rx_octets);
1668 uint16_t max_tx_time = sys_le16_to_cpu(evt->max_tx_time);
1669 uint16_t max_rx_time = sys_le16_to_cpu(evt->max_rx_time);
1670
1671 LOG_DBG("max. tx: %u (%uus), max. rx: %u (%uus)", max_tx_octets, max_tx_time, max_rx_octets,
1672 max_rx_time);
1673
1674 conn->le.data_len.tx_max_len = max_tx_octets;
1675 conn->le.data_len.tx_max_time = max_tx_time;
1676 conn->le.data_len.rx_max_len = max_rx_octets;
1677 conn->le.data_len.rx_max_time = max_rx_time;
1678 notify_le_data_len_updated(conn);
1679 #endif
1680
1681 bt_conn_unref(conn);
1682 }
1683 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
1684
1685 #if defined(CONFIG_BT_PHY_UPDATE)
le_phy_update_complete(struct net_buf * buf)1686 static void le_phy_update_complete(struct net_buf *buf)
1687 {
1688 struct bt_hci_evt_le_phy_update_complete *evt = (void *)buf->data;
1689 uint16_t handle = sys_le16_to_cpu(evt->handle);
1690 struct bt_conn *conn;
1691
1692 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1693 if (!conn) {
1694 LOG_ERR("Unable to lookup conn for handle %u", handle);
1695 return;
1696 }
1697
1698 LOG_DBG("PHY updated: status: 0x%02x, tx: %u, rx: %u", evt->status, evt->tx_phy,
1699 evt->rx_phy);
1700
1701 #if defined(CONFIG_BT_USER_PHY_UPDATE)
1702 conn->le.phy.tx_phy = bt_get_phy(evt->tx_phy);
1703 conn->le.phy.rx_phy = bt_get_phy(evt->rx_phy);
1704 notify_le_phy_updated(conn);
1705 #endif
1706
1707 bt_conn_unref(conn);
1708 }
1709 #endif /* CONFIG_BT_PHY_UPDATE */
1710
bt_le_conn_params_valid(const struct bt_le_conn_param * param)1711 bool bt_le_conn_params_valid(const struct bt_le_conn_param *param)
1712 {
1713 if (IS_ENABLED(CONFIG_BT_CONN_PARAM_ANY)) {
1714 return true;
1715 }
1716
1717 /* All limits according to BT Core spec 5.0 [Vol 2, Part E, 7.8.12] */
1718
1719 if (param->interval_min > param->interval_max ||
1720 param->interval_min < 6 || param->interval_max > 3200) {
1721 return false;
1722 }
1723
1724 if (param->latency > 499) {
1725 return false;
1726 }
1727
1728 if (param->timeout < 10 || param->timeout > 3200 ||
1729 ((param->timeout * 4U) <=
1730 ((1U + param->latency) * param->interval_max))) {
1731 return false;
1732 }
1733
1734 return true;
1735 }
1736
le_conn_param_neg_reply(uint16_t handle,uint8_t reason)1737 static void le_conn_param_neg_reply(uint16_t handle, uint8_t reason)
1738 {
1739 struct bt_hci_cp_le_conn_param_req_neg_reply *cp;
1740 struct net_buf *buf;
1741
1742 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY,
1743 sizeof(*cp));
1744 if (!buf) {
1745 LOG_ERR("Unable to allocate buffer");
1746 return;
1747 }
1748
1749 cp = net_buf_add(buf, sizeof(*cp));
1750 cp->handle = sys_cpu_to_le16(handle);
1751 cp->reason = sys_cpu_to_le16(reason);
1752
1753 bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, buf);
1754 }
1755
le_conn_param_req_reply(uint16_t handle,const struct bt_le_conn_param * param)1756 static int le_conn_param_req_reply(uint16_t handle,
1757 const struct bt_le_conn_param *param)
1758 {
1759 struct bt_hci_cp_le_conn_param_req_reply *cp;
1760 struct net_buf *buf;
1761
1762 buf = bt_hci_cmd_create(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(*cp));
1763 if (!buf) {
1764 return -ENOBUFS;
1765 }
1766
1767 cp = net_buf_add(buf, sizeof(*cp));
1768 (void)memset(cp, 0, sizeof(*cp));
1769
1770 cp->handle = sys_cpu_to_le16(handle);
1771 cp->interval_min = sys_cpu_to_le16(param->interval_min);
1772 cp->interval_max = sys_cpu_to_le16(param->interval_max);
1773 cp->latency = sys_cpu_to_le16(param->latency);
1774 cp->timeout = sys_cpu_to_le16(param->timeout);
1775
1776 return bt_hci_cmd_send(BT_HCI_OP_LE_CONN_PARAM_REQ_REPLY, buf);
1777 }
1778
le_conn_param_req(struct net_buf * buf)1779 static void le_conn_param_req(struct net_buf *buf)
1780 {
1781 struct bt_hci_evt_le_conn_param_req *evt = (void *)buf->data;
1782 struct bt_le_conn_param param;
1783 struct bt_conn *conn;
1784 uint16_t handle;
1785
1786 handle = sys_le16_to_cpu(evt->handle);
1787 param.interval_min = sys_le16_to_cpu(evt->interval_min);
1788 param.interval_max = sys_le16_to_cpu(evt->interval_max);
1789 param.latency = sys_le16_to_cpu(evt->latency);
1790 param.timeout = sys_le16_to_cpu(evt->timeout);
1791
1792 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1793 if (!conn) {
1794 LOG_ERR("Unable to lookup conn for handle %u", handle);
1795 le_conn_param_neg_reply(handle, BT_HCI_ERR_UNKNOWN_CONN_ID);
1796 return;
1797 }
1798
1799 if (!le_param_req(conn, ¶m)) {
1800 le_conn_param_neg_reply(handle, BT_HCI_ERR_INVALID_LL_PARAM);
1801 } else {
1802 le_conn_param_req_reply(handle, ¶m);
1803 }
1804
1805 bt_conn_unref(conn);
1806 }
1807
le_conn_update_complete(struct net_buf * buf)1808 static void le_conn_update_complete(struct net_buf *buf)
1809 {
1810 struct bt_hci_evt_le_conn_update_complete *evt = (void *)buf->data;
1811 struct bt_conn *conn;
1812 uint16_t handle;
1813
1814 handle = sys_le16_to_cpu(evt->handle);
1815
1816 LOG_DBG("status 0x%02x, handle %u", evt->status, handle);
1817
1818 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
1819 if (!conn) {
1820 LOG_ERR("Unable to lookup conn for handle %u", handle);
1821 return;
1822 }
1823
1824 if (evt->status == BT_HCI_ERR_UNSUPP_REMOTE_FEATURE &&
1825 conn->role == BT_HCI_ROLE_PERIPHERAL &&
1826 !atomic_test_and_set_bit(conn->flags,
1827 BT_CONN_PERIPHERAL_PARAM_L2CAP)) {
1828 /* CPR not supported, let's try L2CAP CPUP instead */
1829 struct bt_le_conn_param param;
1830
1831 param.interval_min = conn->le.interval_min;
1832 param.interval_max = conn->le.interval_max;
1833 param.latency = conn->le.pending_latency;
1834 param.timeout = conn->le.pending_timeout;
1835
1836 bt_l2cap_update_conn_param(conn, ¶m);
1837 } else {
1838 if (!evt->status) {
1839 conn->le.interval = sys_le16_to_cpu(evt->interval);
1840 conn->le.latency = sys_le16_to_cpu(evt->latency);
1841 conn->le.timeout = sys_le16_to_cpu(evt->supv_timeout);
1842
1843 #if defined(CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS)
1844 atomic_clear_bit(conn->flags,
1845 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
1846 } else if (atomic_test_bit(conn->flags,
1847 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE) &&
1848 evt->status == BT_HCI_ERR_UNSUPP_LL_PARAM_VAL &&
1849 conn->le.conn_param_retry_countdown) {
1850 conn->le.conn_param_retry_countdown--;
1851 k_work_schedule(&conn->deferred_work,
1852 K_MSEC(CONFIG_BT_CONN_PARAM_RETRY_TIMEOUT));
1853 } else {
1854 atomic_clear_bit(conn->flags,
1855 BT_CONN_PERIPHERAL_PARAM_AUTO_UPDATE);
1856 #endif /* CONFIG_BT_GAP_AUTO_UPDATE_CONN_PARAMS */
1857
1858 }
1859
1860 notify_le_param_updated(conn);
1861 }
1862
1863 bt_conn_unref(conn);
1864 }
1865
1866 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
set_flow_control(void)1867 static int set_flow_control(void)
1868 {
1869 struct bt_hci_cp_host_buffer_size *hbs;
1870 struct net_buf *buf;
1871 int err;
1872
1873 /* Check if host flow control is actually supported */
1874 if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
1875 LOG_WRN("Controller to host flow control not supported");
1876 return 0;
1877 }
1878
1879 buf = bt_hci_cmd_create(BT_HCI_OP_HOST_BUFFER_SIZE,
1880 sizeof(*hbs));
1881 if (!buf) {
1882 return -ENOBUFS;
1883 }
1884
1885 hbs = net_buf_add(buf, sizeof(*hbs));
1886 (void)memset(hbs, 0, sizeof(*hbs));
1887 hbs->acl_mtu = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_SIZE);
1888 hbs->acl_pkts = sys_cpu_to_le16(CONFIG_BT_BUF_ACL_RX_COUNT);
1889
1890 err = bt_hci_cmd_send_sync(BT_HCI_OP_HOST_BUFFER_SIZE, buf, NULL);
1891 if (err) {
1892 return err;
1893 }
1894
1895 buf = bt_hci_cmd_create(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, 1);
1896 if (!buf) {
1897 return -ENOBUFS;
1898 }
1899
1900 net_buf_add_u8(buf, BT_HCI_CTL_TO_HOST_FLOW_ENABLE);
1901 return bt_hci_cmd_send_sync(BT_HCI_OP_SET_CTL_TO_HOST_FLOW, buf, NULL);
1902 }
1903 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
1904
unpair(uint8_t id,const bt_addr_le_t * addr)1905 static void unpair(uint8_t id, const bt_addr_le_t *addr)
1906 {
1907 struct bt_keys *keys = NULL;
1908 struct bt_conn *conn = bt_conn_lookup_addr_le(id, addr);
1909
1910 if (conn) {
1911 /* Clear the conn->le.keys pointer since we'll invalidate it,
1912 * and don't want any subsequent code (like disconnected
1913 * callbacks) accessing it.
1914 */
1915 if (conn->type == BT_CONN_TYPE_LE) {
1916 keys = conn->le.keys;
1917 conn->le.keys = NULL;
1918 }
1919
1920 bt_conn_disconnect(conn, BT_HCI_ERR_REMOTE_USER_TERM_CONN);
1921 bt_conn_unref(conn);
1922 }
1923
1924 if (IS_ENABLED(CONFIG_BT_BREDR)) {
1925 /* LE Public may indicate BR/EDR as well */
1926 if (addr->type == BT_ADDR_LE_PUBLIC) {
1927 bt_keys_link_key_clear_addr(&addr->a);
1928 }
1929 }
1930
1931 if (IS_ENABLED(CONFIG_BT_SMP)) {
1932 if (!keys) {
1933 keys = bt_keys_find_addr(id, addr);
1934 }
1935
1936 if (keys) {
1937 bt_keys_clear(keys);
1938 }
1939 }
1940
1941 bt_gatt_clear(id, addr);
1942
1943 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
1944 struct bt_conn_auth_info_cb *listener, *next;
1945
1946 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&bt_auth_info_cbs, listener,
1947 next, node) {
1948 if (listener->bond_deleted) {
1949 listener->bond_deleted(id, addr);
1950 }
1951 }
1952 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR) */
1953 }
1954
unpair_remote(const struct bt_bond_info * info,void * data)1955 static void unpair_remote(const struct bt_bond_info *info, void *data)
1956 {
1957 uint8_t *id = (uint8_t *) data;
1958
1959 unpair(*id, &info->addr);
1960 }
1961
bt_unpair(uint8_t id,const bt_addr_le_t * addr)1962 int bt_unpair(uint8_t id, const bt_addr_le_t *addr)
1963 {
1964 if (id >= CONFIG_BT_ID_MAX) {
1965 return -EINVAL;
1966 }
1967
1968 if (IS_ENABLED(CONFIG_BT_SMP)) {
1969 if (!addr || bt_addr_le_eq(addr, BT_ADDR_LE_ANY)) {
1970 bt_foreach_bond(id, unpair_remote, &id);
1971 } else {
1972 unpair(id, addr);
1973 }
1974 } else {
1975 CHECKIF(addr == NULL) {
1976 LOG_DBG("addr is NULL");
1977 return -EINVAL;
1978 }
1979
1980 unpair(id, addr);
1981 }
1982
1983 return 0;
1984 }
1985
1986 #endif /* CONFIG_BT_CONN */
1987
1988 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
bt_security_err_get(uint8_t hci_err)1989 enum bt_security_err bt_security_err_get(uint8_t hci_err)
1990 {
1991 switch (hci_err) {
1992 case BT_HCI_ERR_SUCCESS:
1993 return BT_SECURITY_ERR_SUCCESS;
1994 case BT_HCI_ERR_AUTH_FAIL:
1995 return BT_SECURITY_ERR_AUTH_FAIL;
1996 case BT_HCI_ERR_PIN_OR_KEY_MISSING:
1997 return BT_SECURITY_ERR_PIN_OR_KEY_MISSING;
1998 case BT_HCI_ERR_PAIRING_NOT_SUPPORTED:
1999 return BT_SECURITY_ERR_PAIR_NOT_SUPPORTED;
2000 case BT_HCI_ERR_PAIRING_NOT_ALLOWED:
2001 return BT_SECURITY_ERR_PAIR_NOT_ALLOWED;
2002 case BT_HCI_ERR_INVALID_PARAM:
2003 return BT_SECURITY_ERR_INVALID_PARAM;
2004 default:
2005 return BT_SECURITY_ERR_UNSPECIFIED;
2006 }
2007 }
2008 #endif /* defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR) */
2009
2010 #if defined(CONFIG_BT_SMP)
update_sec_level(struct bt_conn * conn)2011 static bool update_sec_level(struct bt_conn *conn)
2012 {
2013 if (conn->le.keys && (conn->le.keys->flags & BT_KEYS_AUTHENTICATED)) {
2014 if (conn->le.keys->flags & BT_KEYS_SC &&
2015 conn->le.keys->enc_size == BT_SMP_MAX_ENC_KEY_SIZE) {
2016 conn->sec_level = BT_SECURITY_L4;
2017 } else {
2018 conn->sec_level = BT_SECURITY_L3;
2019 }
2020 } else {
2021 conn->sec_level = BT_SECURITY_L2;
2022 }
2023
2024 return !(conn->required_sec_level > conn->sec_level);
2025 }
2026 #endif /* CONFIG_BT_SMP */
2027
2028 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
hci_encrypt_change(struct net_buf * buf)2029 static void hci_encrypt_change(struct net_buf *buf)
2030 {
2031 struct bt_hci_evt_encrypt_change *evt = (void *)buf->data;
2032 uint16_t handle = sys_le16_to_cpu(evt->handle);
2033 uint8_t status = evt->status;
2034 struct bt_conn *conn;
2035
2036 LOG_DBG("status 0x%02x handle %u encrypt 0x%02x", evt->status, handle, evt->encrypt);
2037
2038 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
2039 if (!conn) {
2040 LOG_ERR("Unable to look up conn with handle %u", handle);
2041 return;
2042 }
2043
2044 if (status) {
2045 bt_conn_security_changed(conn, status,
2046 bt_security_err_get(status));
2047 bt_conn_unref(conn);
2048 return;
2049 }
2050
2051 conn->encrypt = evt->encrypt;
2052
2053 #if defined(CONFIG_BT_SMP)
2054 if (conn->type == BT_CONN_TYPE_LE) {
2055 /*
2056 * we update keys properties only on successful encryption to
2057 * avoid losing valid keys if encryption was not successful.
2058 *
2059 * Update keys with last pairing info for proper sec level
2060 * update. This is done only for LE transport, for BR/EDR keys
2061 * are updated on HCI 'Link Key Notification Event'
2062 */
2063 if (conn->encrypt) {
2064 bt_smp_update_keys(conn);
2065 }
2066
2067 if (!update_sec_level(conn)) {
2068 status = BT_HCI_ERR_AUTH_FAIL;
2069 }
2070 }
2071 #endif /* CONFIG_BT_SMP */
2072 #if defined(CONFIG_BT_BREDR)
2073 if (conn->type == BT_CONN_TYPE_BR) {
2074 if (!bt_br_update_sec_level(conn)) {
2075 bt_conn_unref(conn);
2076 return;
2077 }
2078
2079 if (IS_ENABLED(CONFIG_BT_SMP)) {
2080 /*
2081 * Start SMP over BR/EDR if we are pairing and are
2082 * central on the link
2083 */
2084 if (atomic_test_bit(conn->flags, BT_CONN_BR_PAIRING) &&
2085 conn->role == BT_CONN_ROLE_CENTRAL) {
2086 bt_smp_br_send_pairing_req(conn);
2087 }
2088 }
2089 }
2090 #endif /* CONFIG_BT_BREDR */
2091
2092 bt_conn_security_changed(conn, status, bt_security_err_get(status));
2093
2094 if (status) {
2095 LOG_ERR("Failed to set required security level");
2096 bt_conn_disconnect(conn, status);
2097 }
2098
2099 bt_conn_unref(conn);
2100 }
2101
hci_encrypt_key_refresh_complete(struct net_buf * buf)2102 static void hci_encrypt_key_refresh_complete(struct net_buf *buf)
2103 {
2104 struct bt_hci_evt_encrypt_key_refresh_complete *evt = (void *)buf->data;
2105 uint8_t status = evt->status;
2106 struct bt_conn *conn;
2107 uint16_t handle;
2108
2109 handle = sys_le16_to_cpu(evt->handle);
2110
2111 LOG_DBG("status 0x%02x handle %u", evt->status, handle);
2112
2113 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
2114 if (!conn) {
2115 LOG_ERR("Unable to look up conn with handle %u", handle);
2116 return;
2117 }
2118
2119 if (status) {
2120 bt_conn_security_changed(conn, status,
2121 bt_security_err_get(status));
2122 bt_conn_unref(conn);
2123 return;
2124 }
2125
2126 /*
2127 * Update keys with last pairing info for proper sec level update.
2128 * This is done only for LE transport. For BR/EDR transport keys are
2129 * updated on HCI 'Link Key Notification Event', therefore update here
2130 * only security level based on available keys and encryption state.
2131 */
2132 #if defined(CONFIG_BT_SMP)
2133 if (conn->type == BT_CONN_TYPE_LE) {
2134 bt_smp_update_keys(conn);
2135
2136 if (!update_sec_level(conn)) {
2137 status = BT_HCI_ERR_AUTH_FAIL;
2138 }
2139 }
2140 #endif /* CONFIG_BT_SMP */
2141 #if defined(CONFIG_BT_BREDR)
2142 if (conn->type == BT_CONN_TYPE_BR) {
2143 if (!bt_br_update_sec_level(conn)) {
2144 bt_conn_unref(conn);
2145 return;
2146 }
2147 }
2148 #endif /* CONFIG_BT_BREDR */
2149
2150 bt_conn_security_changed(conn, status, bt_security_err_get(status));
2151 if (status) {
2152 LOG_ERR("Failed to set required security level");
2153 bt_conn_disconnect(conn, status);
2154 }
2155
2156 bt_conn_unref(conn);
2157 }
2158 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
2159
2160 #if defined(CONFIG_BT_REMOTE_VERSION)
bt_hci_evt_read_remote_version_complete(struct net_buf * buf)2161 static void bt_hci_evt_read_remote_version_complete(struct net_buf *buf)
2162 {
2163 struct bt_hci_evt_remote_version_info *evt;
2164 struct bt_conn *conn;
2165 uint16_t handle;
2166
2167 evt = net_buf_pull_mem(buf, sizeof(*evt));
2168 handle = sys_le16_to_cpu(evt->handle);
2169 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_ALL);
2170 if (!conn) {
2171 LOG_ERR("No connection for handle %u", handle);
2172 return;
2173 }
2174
2175 if (!evt->status) {
2176 conn->rv.version = evt->version;
2177 conn->rv.manufacturer = sys_le16_to_cpu(evt->manufacturer);
2178 conn->rv.subversion = sys_le16_to_cpu(evt->subversion);
2179 }
2180
2181 atomic_set_bit(conn->flags, BT_CONN_AUTO_VERSION_INFO);
2182
2183 if (IS_ENABLED(CONFIG_BT_REMOTE_INFO)) {
2184 /* Remote features is already present */
2185 notify_remote_info(conn);
2186 }
2187
2188 bt_conn_unref(conn);
2189 }
2190 #endif /* CONFIG_BT_REMOTE_VERSION */
2191
hci_hardware_error(struct net_buf * buf)2192 static void hci_hardware_error(struct net_buf *buf)
2193 {
2194 struct bt_hci_evt_hardware_error *evt;
2195
2196 evt = net_buf_pull_mem(buf, sizeof(*evt));
2197
2198 LOG_ERR("Hardware error, hardware code: %d", evt->hardware_code);
2199 }
2200
2201 #if defined(CONFIG_BT_SMP)
le_ltk_neg_reply(uint16_t handle)2202 static void le_ltk_neg_reply(uint16_t handle)
2203 {
2204 struct bt_hci_cp_le_ltk_req_neg_reply *cp;
2205 struct net_buf *buf;
2206
2207 buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, sizeof(*cp));
2208 if (!buf) {
2209 LOG_ERR("Out of command buffers");
2210
2211 return;
2212 }
2213
2214 cp = net_buf_add(buf, sizeof(*cp));
2215 cp->handle = sys_cpu_to_le16(handle);
2216
2217 bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_NEG_REPLY, buf);
2218 }
2219
le_ltk_reply(uint16_t handle,uint8_t * ltk)2220 static void le_ltk_reply(uint16_t handle, uint8_t *ltk)
2221 {
2222 struct bt_hci_cp_le_ltk_req_reply *cp;
2223 struct net_buf *buf;
2224
2225 buf = bt_hci_cmd_create(BT_HCI_OP_LE_LTK_REQ_REPLY,
2226 sizeof(*cp));
2227 if (!buf) {
2228 LOG_ERR("Out of command buffers");
2229 return;
2230 }
2231
2232 cp = net_buf_add(buf, sizeof(*cp));
2233 cp->handle = sys_cpu_to_le16(handle);
2234 memcpy(cp->ltk, ltk, sizeof(cp->ltk));
2235
2236 bt_hci_cmd_send(BT_HCI_OP_LE_LTK_REQ_REPLY, buf);
2237 }
2238
le_ltk_request(struct net_buf * buf)2239 static void le_ltk_request(struct net_buf *buf)
2240 {
2241 struct bt_hci_evt_le_ltk_request *evt = (void *)buf->data;
2242 struct bt_conn *conn;
2243 uint16_t handle;
2244 uint8_t ltk[16];
2245
2246 handle = sys_le16_to_cpu(evt->handle);
2247
2248 LOG_DBG("handle %u", handle);
2249
2250 conn = bt_conn_lookup_handle(handle, BT_CONN_TYPE_LE);
2251 if (!conn) {
2252 LOG_ERR("Unable to lookup conn for handle %u", handle);
2253 return;
2254 }
2255
2256 if (bt_smp_request_ltk(conn, evt->rand, evt->ediv, ltk)) {
2257 le_ltk_reply(handle, ltk);
2258 } else {
2259 le_ltk_neg_reply(handle);
2260 }
2261
2262 bt_conn_unref(conn);
2263 }
2264 #endif /* CONFIG_BT_SMP */
2265
hci_reset_complete(struct net_buf * buf)2266 static void hci_reset_complete(struct net_buf *buf)
2267 {
2268 uint8_t status = buf->data[0];
2269 atomic_t flags;
2270
2271 LOG_DBG("status 0x%02x", status);
2272
2273 if (status) {
2274 return;
2275 }
2276
2277 if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
2278 bt_scan_reset();
2279 }
2280
2281 #if defined(CONFIG_BT_BREDR)
2282 bt_br_discovery_reset();
2283 #endif /* CONFIG_BT_BREDR */
2284
2285 flags = (atomic_get(bt_dev.flags) & BT_DEV_PERSISTENT_FLAGS);
2286 atomic_set(bt_dev.flags, flags);
2287 }
2288
hci_cmd_done(uint16_t opcode,uint8_t status,struct net_buf * buf)2289 static void hci_cmd_done(uint16_t opcode, uint8_t status, struct net_buf *buf)
2290 {
2291 LOG_DBG("opcode 0x%04x status 0x%02x buf %p", opcode, status, buf);
2292
2293 if (net_buf_pool_get(buf->pool_id) != &hci_cmd_pool) {
2294 LOG_WRN("opcode 0x%04x pool id %u pool %p != &hci_cmd_pool %p", opcode,
2295 buf->pool_id, net_buf_pool_get(buf->pool_id), &hci_cmd_pool);
2296 return;
2297 }
2298
2299 if (cmd(buf)->opcode != opcode) {
2300 LOG_WRN("OpCode 0x%04x completed instead of expected 0x%04x", opcode,
2301 cmd(buf)->opcode);
2302 return;
2303 }
2304
2305 if (bt_dev.sent_cmd) {
2306 net_buf_unref(bt_dev.sent_cmd);
2307 bt_dev.sent_cmd = NULL;
2308 }
2309
2310 if (cmd(buf)->state && !status) {
2311 struct bt_hci_cmd_state_set *update = cmd(buf)->state;
2312
2313 atomic_set_bit_to(update->target, update->bit, update->val);
2314 }
2315
2316 /* If the command was synchronous wake up bt_hci_cmd_send_sync() */
2317 if (cmd(buf)->sync) {
2318 cmd(buf)->status = status;
2319 k_sem_give(cmd(buf)->sync);
2320 }
2321 }
2322
hci_cmd_complete(struct net_buf * buf)2323 static void hci_cmd_complete(struct net_buf *buf)
2324 {
2325 struct bt_hci_evt_cmd_complete *evt;
2326 uint8_t status, ncmd;
2327 uint16_t opcode;
2328
2329 evt = net_buf_pull_mem(buf, sizeof(*evt));
2330 ncmd = evt->ncmd;
2331 opcode = sys_le16_to_cpu(evt->opcode);
2332
2333 LOG_DBG("opcode 0x%04x", opcode);
2334
2335 /* All command return parameters have a 1-byte status in the
2336 * beginning, so we can safely make this generalization.
2337 */
2338 status = buf->data[0];
2339
2340 hci_cmd_done(opcode, status, buf);
2341
2342 /* Allow next command to be sent */
2343 if (ncmd) {
2344 k_sem_give(&bt_dev.ncmd_sem);
2345 }
2346 }
2347
hci_cmd_status(struct net_buf * buf)2348 static void hci_cmd_status(struct net_buf *buf)
2349 {
2350 struct bt_hci_evt_cmd_status *evt;
2351 uint16_t opcode;
2352 uint8_t ncmd;
2353
2354 evt = net_buf_pull_mem(buf, sizeof(*evt));
2355 opcode = sys_le16_to_cpu(evt->opcode);
2356 ncmd = evt->ncmd;
2357
2358 LOG_DBG("opcode 0x%04x", opcode);
2359
2360 hci_cmd_done(opcode, evt->status, buf);
2361
2362 /* Allow next command to be sent */
2363 if (ncmd) {
2364 k_sem_give(&bt_dev.ncmd_sem);
2365 }
2366 }
2367
bt_hci_get_conn_handle(const struct bt_conn * conn,uint16_t * conn_handle)2368 int bt_hci_get_conn_handle(const struct bt_conn *conn, uint16_t *conn_handle)
2369 {
2370 if (conn->state != BT_CONN_CONNECTED) {
2371 return -ENOTCONN;
2372 }
2373
2374 *conn_handle = conn->handle;
2375 return 0;
2376 }
2377
2378 #if defined(CONFIG_BT_EXT_ADV)
bt_hci_get_adv_handle(const struct bt_le_ext_adv * adv,uint8_t * adv_handle)2379 int bt_hci_get_adv_handle(const struct bt_le_ext_adv *adv, uint8_t *adv_handle)
2380 {
2381 if (!atomic_test_bit(adv->flags, BT_ADV_CREATED)) {
2382 return -EINVAL;
2383 }
2384
2385 *adv_handle = adv->handle;
2386 return 0;
2387 }
2388 #endif /* CONFIG_BT_EXT_ADV */
2389
2390 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)2391 int bt_hci_register_vnd_evt_cb(bt_hci_vnd_evt_cb_t cb)
2392 {
2393 hci_vnd_evt_cb = cb;
2394 return 0;
2395 }
2396 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2397
2398 static const struct event_handler vs_events[] = {
2399 #if defined(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
2400 EVENT_HANDLER(BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
2401 bt_hci_le_vs_df_connectionless_iq_report,
2402 sizeof(struct bt_hci_evt_vs_le_connectionless_iq_report)),
2403 #endif /* CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
2404 #if defined(CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES)
2405 EVENT_HANDLER(BT_HCI_EVT_VS_LE_CONNECTION_IQ_REPORT, bt_hci_le_vs_df_connection_iq_report,
2406 sizeof(struct bt_hci_evt_vs_le_connection_iq_report)),
2407 #endif /* CONFIG_BT_DF_VS_CONN_IQ_REPORT_16_BITS_IQ_SAMPLES */
2408 };
2409
hci_vendor_event(struct net_buf * buf)2410 static void hci_vendor_event(struct net_buf *buf)
2411 {
2412 bool handled = false;
2413
2414 #if defined(CONFIG_BT_HCI_VS_EVT_USER)
2415 if (hci_vnd_evt_cb) {
2416 struct net_buf_simple_state state;
2417
2418 net_buf_simple_save(&buf->b, &state);
2419
2420 handled = hci_vnd_evt_cb(&buf->b);
2421
2422 net_buf_simple_restore(&buf->b, &state);
2423 }
2424 #endif /* CONFIG_BT_HCI_VS_EVT_USER */
2425
2426 if (IS_ENABLED(CONFIG_BT_HCI_VS_EVT) && !handled) {
2427 struct bt_hci_evt_vs *evt;
2428
2429 evt = net_buf_pull_mem(buf, sizeof(*evt));
2430
2431 LOG_DBG("subevent 0x%02x", evt->subevent);
2432
2433 handle_vs_event(evt->subevent, buf, vs_events, ARRAY_SIZE(vs_events));
2434 }
2435 }
2436
2437 static const struct event_handler meta_events[] = {
2438 #if defined(CONFIG_BT_OBSERVER)
2439 EVENT_HANDLER(BT_HCI_EVT_LE_ADVERTISING_REPORT, bt_hci_le_adv_report,
2440 sizeof(struct bt_hci_evt_le_advertising_report)),
2441 #endif /* CONFIG_BT_OBSERVER */
2442 #if defined(CONFIG_BT_CONN)
2443 EVENT_HANDLER(BT_HCI_EVT_LE_CONN_COMPLETE, le_legacy_conn_complete,
2444 sizeof(struct bt_hci_evt_le_conn_complete)),
2445 EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE, le_enh_conn_complete,
2446 sizeof(struct bt_hci_evt_le_enh_conn_complete)),
2447 EVENT_HANDLER(BT_HCI_EVT_LE_CONN_UPDATE_COMPLETE,
2448 le_conn_update_complete,
2449 sizeof(struct bt_hci_evt_le_conn_update_complete)),
2450 EVENT_HANDLER(BT_HCI_EVT_LE_REMOTE_FEAT_COMPLETE,
2451 le_remote_feat_complete,
2452 sizeof(struct bt_hci_evt_le_remote_feat_complete)),
2453 EVENT_HANDLER(BT_HCI_EVT_LE_CONN_PARAM_REQ, le_conn_param_req,
2454 sizeof(struct bt_hci_evt_le_conn_param_req)),
2455 #if defined(CONFIG_BT_DATA_LEN_UPDATE)
2456 EVENT_HANDLER(BT_HCI_EVT_LE_DATA_LEN_CHANGE, le_data_len_change,
2457 sizeof(struct bt_hci_evt_le_data_len_change)),
2458 #endif /* CONFIG_BT_DATA_LEN_UPDATE */
2459 #if defined(CONFIG_BT_PHY_UPDATE)
2460 EVENT_HANDLER(BT_HCI_EVT_LE_PHY_UPDATE_COMPLETE,
2461 le_phy_update_complete,
2462 sizeof(struct bt_hci_evt_le_phy_update_complete)),
2463 #endif /* CONFIG_BT_PHY_UPDATE */
2464 #endif /* CONFIG_BT_CONN */
2465 #if defined(CONFIG_BT_SMP)
2466 EVENT_HANDLER(BT_HCI_EVT_LE_LTK_REQUEST, le_ltk_request,
2467 sizeof(struct bt_hci_evt_le_ltk_request)),
2468 #endif /* CONFIG_BT_SMP */
2469 #if defined(CONFIG_BT_ECC)
2470 EVENT_HANDLER(BT_HCI_EVT_LE_P256_PUBLIC_KEY_COMPLETE,
2471 bt_hci_evt_le_pkey_complete,
2472 sizeof(struct bt_hci_evt_le_p256_public_key_complete)),
2473 EVENT_HANDLER(BT_HCI_EVT_LE_GENERATE_DHKEY_COMPLETE,
2474 bt_hci_evt_le_dhkey_complete,
2475 sizeof(struct bt_hci_evt_le_generate_dhkey_complete)),
2476 #endif /* CONFIG_BT_SMP */
2477 #if defined(CONFIG_BT_EXT_ADV)
2478 #if defined(CONFIG_BT_BROADCASTER)
2479 EVENT_HANDLER(BT_HCI_EVT_LE_ADV_SET_TERMINATED, bt_hci_le_adv_set_terminated,
2480 sizeof(struct bt_hci_evt_le_adv_set_terminated)),
2481 EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_REQ_RECEIVED, bt_hci_le_scan_req_received,
2482 sizeof(struct bt_hci_evt_le_scan_req_received)),
2483 #endif
2484 #if defined(CONFIG_BT_OBSERVER)
2485 EVENT_HANDLER(BT_HCI_EVT_LE_SCAN_TIMEOUT, bt_hci_le_scan_timeout,
2486 0),
2487 EVENT_HANDLER(BT_HCI_EVT_LE_EXT_ADVERTISING_REPORT, bt_hci_le_adv_ext_report,
2488 sizeof(struct bt_hci_evt_le_ext_advertising_report)),
2489 #endif /* defined(CONFIG_BT_OBSERVER) */
2490 #if defined(CONFIG_BT_PER_ADV_SYNC)
2491 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED,
2492 bt_hci_le_per_adv_sync_established,
2493 sizeof(struct bt_hci_evt_le_per_adv_sync_established)),
2494 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT, bt_hci_le_per_adv_report,
2495 sizeof(struct bt_hci_evt_le_per_advertising_report)),
2496 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_LOST, bt_hci_le_per_adv_sync_lost,
2497 sizeof(struct bt_hci_evt_le_per_adv_sync_lost)),
2498 #if defined(CONFIG_BT_CONN)
2499 EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED, bt_hci_le_past_received,
2500 sizeof(struct bt_hci_evt_le_past_received)),
2501 #endif /* CONFIG_BT_CONN */
2502 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
2503 #endif /* defined(CONFIG_BT_EXT_ADV) */
2504 #if defined(CONFIG_BT_ISO_UNICAST)
2505 EVENT_HANDLER(BT_HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_established,
2506 sizeof(struct bt_hci_evt_le_cis_established)),
2507 #if defined(CONFIG_BT_ISO_PERIPHERAL)
2508 EVENT_HANDLER(BT_HCI_EVT_LE_CIS_REQ, hci_le_cis_req,
2509 sizeof(struct bt_hci_evt_le_cis_req)),
2510 #endif /* (CONFIG_BT_ISO_PERIPHERAL) */
2511 #endif /* (CONFIG_BT_ISO_UNICAST) */
2512 #if defined(CONFIG_BT_ISO_BROADCASTER)
2513 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_COMPLETE,
2514 hci_le_big_complete,
2515 sizeof(struct bt_hci_evt_le_big_complete)),
2516 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_TERMINATE,
2517 hci_le_big_terminate,
2518 sizeof(struct bt_hci_evt_le_big_terminate)),
2519 #endif /* CONFIG_BT_ISO_BROADCASTER */
2520 #if defined(CONFIG_BT_ISO_SYNC_RECEIVER)
2521 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_ESTABLISHED,
2522 hci_le_big_sync_established,
2523 sizeof(struct bt_hci_evt_le_big_sync_established)),
2524 EVENT_HANDLER(BT_HCI_EVT_LE_BIG_SYNC_LOST,
2525 hci_le_big_sync_lost,
2526 sizeof(struct bt_hci_evt_le_big_sync_lost)),
2527 EVENT_HANDLER(BT_HCI_EVT_LE_BIGINFO_ADV_REPORT,
2528 bt_hci_le_biginfo_adv_report,
2529 sizeof(struct bt_hci_evt_le_biginfo_adv_report)),
2530 #endif /* CONFIG_BT_ISO_SYNC_RECEIVER */
2531 #if defined(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)
2532 EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT, bt_hci_le_df_connectionless_iq_report,
2533 sizeof(struct bt_hci_evt_le_connectionless_iq_report)),
2534 #endif /* CONFIG_BT_DF_CONNECTIONLESS_CTE_RX */
2535 #if defined(CONFIG_BT_DF_CONNECTION_CTE_RX)
2536 EVENT_HANDLER(BT_HCI_EVT_LE_CONNECTION_IQ_REPORT, bt_hci_le_df_connection_iq_report,
2537 sizeof(struct bt_hci_evt_le_connection_iq_report)),
2538 #endif /* CONFIG_BT_DF_CONNECTION_CTE_RX */
2539 #if defined(CONFIG_BT_DF_CONNECTION_CTE_REQ)
2540 EVENT_HANDLER(BT_HCI_EVT_LE_CTE_REQUEST_FAILED, bt_hci_le_df_cte_req_failed,
2541 sizeof(struct bt_hci_evt_le_cte_req_failed)),
2542 #endif /* CONFIG_BT_DF_CONNECTION_CTE_REQ */
2543 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
2544 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADVERTISING_REPORT_V2, bt_hci_le_per_adv_report_v2,
2545 sizeof(struct bt_hci_evt_le_per_advertising_report_v2)),
2546 EVENT_HANDLER(BT_HCI_EVT_LE_PAST_RECEIVED_V2, bt_hci_le_past_received_v2,
2547 sizeof(struct bt_hci_evt_le_past_received_v2)),
2548 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SYNC_ESTABLISHED_V2,
2549 bt_hci_le_per_adv_sync_established_v2,
2550 sizeof(struct bt_hci_evt_le_per_adv_sync_established_v2)),
2551 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
2552 #if defined(CONFIG_BT_PER_ADV_RSP)
2553 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_SUBEVENT_DATA_REQUEST,
2554 bt_hci_le_per_adv_subevent_data_request,
2555 sizeof(struct bt_hci_evt_le_per_adv_subevent_data_request)),
2556 EVENT_HANDLER(BT_HCI_EVT_LE_PER_ADV_RESPONSE_REPORT, bt_hci_le_per_adv_response_report,
2557 sizeof(struct bt_hci_evt_le_per_adv_response_report)),
2558 #endif /* CONFIG_BT_PER_ADV_RSP */
2559 #if defined(CONFIG_BT_CONN)
2560 #if defined(CONFIG_BT_PER_ADV_RSP) || defined(CONFIG_BT_PER_ADV_SYNC_RSP)
2561 EVENT_HANDLER(BT_HCI_EVT_LE_ENH_CONN_COMPLETE_V2, le_enh_conn_complete_v2,
2562 sizeof(struct bt_hci_evt_le_enh_conn_complete_v2)),
2563 #endif /* CONFIG_BT_PER_ADV_RSP || CONFIG_BT_PER_ADV_SYNC_RSP */
2564 #endif /* CONFIG_BT_CONN */
2565
2566 };
2567
hci_le_meta_event(struct net_buf * buf)2568 static void hci_le_meta_event(struct net_buf *buf)
2569 {
2570 struct bt_hci_evt_le_meta_event *evt;
2571
2572 evt = net_buf_pull_mem(buf, sizeof(*evt));
2573
2574 LOG_DBG("subevent 0x%02x", evt->subevent);
2575
2576 handle_event(evt->subevent, buf, meta_events, ARRAY_SIZE(meta_events));
2577 }
2578
2579 static const struct event_handler normal_events[] = {
2580 EVENT_HANDLER(BT_HCI_EVT_VENDOR, hci_vendor_event,
2581 sizeof(struct bt_hci_evt_vs)),
2582 EVENT_HANDLER(BT_HCI_EVT_LE_META_EVENT, hci_le_meta_event,
2583 sizeof(struct bt_hci_evt_le_meta_event)),
2584 #if defined(CONFIG_BT_BREDR)
2585 EVENT_HANDLER(BT_HCI_EVT_CONN_REQUEST, bt_hci_conn_req,
2586 sizeof(struct bt_hci_evt_conn_request)),
2587 EVENT_HANDLER(BT_HCI_EVT_CONN_COMPLETE, bt_hci_conn_complete,
2588 sizeof(struct bt_hci_evt_conn_complete)),
2589 EVENT_HANDLER(BT_HCI_EVT_PIN_CODE_REQ, bt_hci_pin_code_req,
2590 sizeof(struct bt_hci_evt_pin_code_req)),
2591 EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_NOTIFY, bt_hci_link_key_notify,
2592 sizeof(struct bt_hci_evt_link_key_notify)),
2593 EVENT_HANDLER(BT_HCI_EVT_LINK_KEY_REQ, bt_hci_link_key_req,
2594 sizeof(struct bt_hci_evt_link_key_req)),
2595 EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_RESP, bt_hci_io_capa_resp,
2596 sizeof(struct bt_hci_evt_io_capa_resp)),
2597 EVENT_HANDLER(BT_HCI_EVT_IO_CAPA_REQ, bt_hci_io_capa_req,
2598 sizeof(struct bt_hci_evt_io_capa_req)),
2599 EVENT_HANDLER(BT_HCI_EVT_SSP_COMPLETE, bt_hci_ssp_complete,
2600 sizeof(struct bt_hci_evt_ssp_complete)),
2601 EVENT_HANDLER(BT_HCI_EVT_USER_CONFIRM_REQ, bt_hci_user_confirm_req,
2602 sizeof(struct bt_hci_evt_user_confirm_req)),
2603 EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_NOTIFY,
2604 bt_hci_user_passkey_notify,
2605 sizeof(struct bt_hci_evt_user_passkey_notify)),
2606 EVENT_HANDLER(BT_HCI_EVT_USER_PASSKEY_REQ, bt_hci_user_passkey_req,
2607 sizeof(struct bt_hci_evt_user_passkey_req)),
2608 EVENT_HANDLER(BT_HCI_EVT_INQUIRY_COMPLETE, bt_hci_inquiry_complete,
2609 sizeof(struct bt_hci_evt_inquiry_complete)),
2610 EVENT_HANDLER(BT_HCI_EVT_INQUIRY_RESULT_WITH_RSSI,
2611 bt_hci_inquiry_result_with_rssi,
2612 sizeof(struct bt_hci_evt_inquiry_result_with_rssi)),
2613 EVENT_HANDLER(BT_HCI_EVT_EXTENDED_INQUIRY_RESULT,
2614 bt_hci_extended_inquiry_result,
2615 sizeof(struct bt_hci_evt_extended_inquiry_result)),
2616 EVENT_HANDLER(BT_HCI_EVT_REMOTE_NAME_REQ_COMPLETE,
2617 bt_hci_remote_name_request_complete,
2618 sizeof(struct bt_hci_evt_remote_name_req_complete)),
2619 EVENT_HANDLER(BT_HCI_EVT_AUTH_COMPLETE, bt_hci_auth_complete,
2620 sizeof(struct bt_hci_evt_auth_complete)),
2621 EVENT_HANDLER(BT_HCI_EVT_REMOTE_FEATURES,
2622 bt_hci_read_remote_features_complete,
2623 sizeof(struct bt_hci_evt_remote_features)),
2624 EVENT_HANDLER(BT_HCI_EVT_REMOTE_EXT_FEATURES,
2625 bt_hci_read_remote_ext_features_complete,
2626 sizeof(struct bt_hci_evt_remote_ext_features)),
2627 EVENT_HANDLER(BT_HCI_EVT_ROLE_CHANGE, bt_hci_role_change,
2628 sizeof(struct bt_hci_evt_role_change)),
2629 EVENT_HANDLER(BT_HCI_EVT_SYNC_CONN_COMPLETE, bt_hci_synchronous_conn_complete,
2630 sizeof(struct bt_hci_evt_sync_conn_complete)),
2631 #endif /* CONFIG_BT_BREDR */
2632 #if defined(CONFIG_BT_CONN)
2633 EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete,
2634 sizeof(struct bt_hci_evt_disconn_complete)),
2635 #endif /* CONFIG_BT_CONN */
2636 #if defined(CONFIG_BT_SMP) || defined(CONFIG_BT_BREDR)
2637 EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_CHANGE, hci_encrypt_change,
2638 sizeof(struct bt_hci_evt_encrypt_change)),
2639 EVENT_HANDLER(BT_HCI_EVT_ENCRYPT_KEY_REFRESH_COMPLETE,
2640 hci_encrypt_key_refresh_complete,
2641 sizeof(struct bt_hci_evt_encrypt_key_refresh_complete)),
2642 #endif /* CONFIG_BT_SMP || CONFIG_BT_BREDR */
2643 #if defined(CONFIG_BT_REMOTE_VERSION)
2644 EVENT_HANDLER(BT_HCI_EVT_REMOTE_VERSION_INFO,
2645 bt_hci_evt_read_remote_version_complete,
2646 sizeof(struct bt_hci_evt_remote_version_info)),
2647 #endif /* CONFIG_BT_REMOTE_VERSION */
2648 EVENT_HANDLER(BT_HCI_EVT_HARDWARE_ERROR, hci_hardware_error,
2649 sizeof(struct bt_hci_evt_hardware_error)),
2650 };
2651
hci_event(struct net_buf * buf)2652 static void hci_event(struct net_buf *buf)
2653 {
2654 struct bt_hci_evt_hdr *hdr;
2655
2656 if (buf->len < sizeof(*hdr)) {
2657 LOG_ERR("Invalid HCI event size (%u)", buf->len);
2658 net_buf_unref(buf);
2659 return;
2660 }
2661
2662 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
2663 LOG_DBG("event 0x%02x", hdr->evt);
2664 BT_ASSERT(bt_hci_evt_get_flags(hdr->evt) & BT_HCI_EVT_FLAG_RECV);
2665
2666 handle_event(hdr->evt, buf, normal_events, ARRAY_SIZE(normal_events));
2667
2668 net_buf_unref(buf);
2669 }
2670
send_cmd(void)2671 static void send_cmd(void)
2672 {
2673 struct net_buf *buf;
2674 int err;
2675
2676 /* Get next command */
2677 LOG_DBG("calling net_buf_get");
2678 buf = net_buf_get(&bt_dev.cmd_tx_queue, K_NO_WAIT);
2679 BT_ASSERT(buf);
2680
2681 /* Wait until ncmd > 0 */
2682 LOG_DBG("calling sem_take_wait");
2683 k_sem_take(&bt_dev.ncmd_sem, K_FOREVER);
2684
2685 /* Clear out any existing sent command */
2686 if (bt_dev.sent_cmd) {
2687 LOG_ERR("Uncleared pending sent_cmd");
2688 net_buf_unref(bt_dev.sent_cmd);
2689 bt_dev.sent_cmd = NULL;
2690 }
2691
2692 bt_dev.sent_cmd = net_buf_ref(buf);
2693
2694 LOG_DBG("Sending command 0x%04x (buf %p) to driver", cmd(buf)->opcode, buf);
2695
2696 err = bt_send(buf);
2697 if (err) {
2698 LOG_ERR("Unable to send to driver (err %d)", err);
2699 k_sem_give(&bt_dev.ncmd_sem);
2700 hci_cmd_done(cmd(buf)->opcode, BT_HCI_ERR_UNSPECIFIED, buf);
2701 net_buf_unref(buf);
2702 }
2703 }
2704
process_events(struct k_poll_event * ev,int count)2705 static void process_events(struct k_poll_event *ev, int count)
2706 {
2707 LOG_DBG("count %d", count);
2708
2709 for (; count; ev++, count--) {
2710 LOG_DBG("ev->state %u", ev->state);
2711
2712 switch (ev->state) {
2713 case K_POLL_STATE_SIGNALED:
2714 break;
2715 case K_POLL_STATE_SEM_AVAILABLE:
2716 /* After this fn is exec'd, `bt_conn_prepare_events()`
2717 * will be called once again, and this time buffers will
2718 * be available, so the FIFO will be added to the poll
2719 * list instead of the ctlr buffers semaphore.
2720 */
2721 break;
2722 case K_POLL_STATE_FIFO_DATA_AVAILABLE:
2723 if (ev->tag == BT_EVENT_CMD_TX) {
2724 send_cmd();
2725 } else if (IS_ENABLED(CONFIG_BT_CONN) ||
2726 IS_ENABLED(CONFIG_BT_ISO)) {
2727 struct bt_conn *conn;
2728
2729 if (ev->tag == BT_EVENT_CONN_TX_QUEUE) {
2730 conn = CONTAINER_OF(ev->fifo,
2731 struct bt_conn,
2732 tx_queue);
2733 bt_conn_process_tx(conn);
2734 }
2735 }
2736 break;
2737 case K_POLL_STATE_NOT_READY:
2738 break;
2739 default:
2740 LOG_WRN("Unexpected k_poll event state %u", ev->state);
2741 break;
2742 }
2743 }
2744 }
2745
2746 #if defined(CONFIG_BT_CONN)
2747 #if defined(CONFIG_BT_ISO)
2748 /* command FIFO + conn_change signal + MAX_CONN + ISO_MAX_CHAN */
2749 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN + CONFIG_BT_ISO_MAX_CHAN)
2750 #else
2751 /* command FIFO + conn_change signal + MAX_CONN */
2752 #define EV_COUNT (2 + CONFIG_BT_MAX_CONN)
2753 #endif /* CONFIG_BT_ISO */
2754 #else
2755 #if defined(CONFIG_BT_ISO)
2756 /* command FIFO + conn_change signal + ISO_MAX_CHAN */
2757 #define EV_COUNT (2 + CONFIG_BT_ISO_MAX_CHAN)
2758 #else
2759 /* command FIFO */
2760 #define EV_COUNT 1
2761 #endif /* CONFIG_BT_ISO */
2762 #endif /* CONFIG_BT_CONN */
2763
hci_tx_thread(void * p1,void * p2,void * p3)2764 static void hci_tx_thread(void *p1, void *p2, void *p3)
2765 {
2766 static struct k_poll_event events[EV_COUNT] = {
2767 K_POLL_EVENT_STATIC_INITIALIZER(K_POLL_TYPE_FIFO_DATA_AVAILABLE,
2768 K_POLL_MODE_NOTIFY_ONLY,
2769 &bt_dev.cmd_tx_queue,
2770 BT_EVENT_CMD_TX),
2771 };
2772
2773 LOG_DBG("Started");
2774
2775 while (1) {
2776 int ev_count, err;
2777
2778 events[0].state = K_POLL_STATE_NOT_READY;
2779 ev_count = 1;
2780
2781 /* This adds the FIFO per-connection */
2782 if (IS_ENABLED(CONFIG_BT_CONN) || IS_ENABLED(CONFIG_BT_ISO)) {
2783 ev_count += bt_conn_prepare_events(&events[1]);
2784 }
2785
2786 LOG_DBG("Calling k_poll with %d events", ev_count);
2787
2788 err = k_poll(events, ev_count, K_FOREVER);
2789 BT_ASSERT(err == 0);
2790
2791 process_events(events, ev_count);
2792
2793 /* Make sure we don't hog the CPU if there's all the time
2794 * some ready events.
2795 */
2796 k_yield();
2797 }
2798 }
2799
2800
read_local_ver_complete(struct net_buf * buf)2801 static void read_local_ver_complete(struct net_buf *buf)
2802 {
2803 struct bt_hci_rp_read_local_version_info *rp = (void *)buf->data;
2804
2805 LOG_DBG("status 0x%02x", rp->status);
2806
2807 bt_dev.hci_version = rp->hci_version;
2808 bt_dev.hci_revision = sys_le16_to_cpu(rp->hci_revision);
2809 bt_dev.lmp_version = rp->lmp_version;
2810 bt_dev.lmp_subversion = sys_le16_to_cpu(rp->lmp_subversion);
2811 bt_dev.manufacturer = sys_le16_to_cpu(rp->manufacturer);
2812 }
2813
read_le_features_complete(struct net_buf * buf)2814 static void read_le_features_complete(struct net_buf *buf)
2815 {
2816 struct bt_hci_rp_le_read_local_features *rp = (void *)buf->data;
2817
2818 LOG_DBG("status 0x%02x", rp->status);
2819
2820 memcpy(bt_dev.le.features, rp->features, sizeof(bt_dev.le.features));
2821 }
2822
2823 #if defined(CONFIG_BT_CONN)
2824 #if !defined(CONFIG_BT_BREDR)
read_buffer_size_complete(struct net_buf * buf)2825 static void read_buffer_size_complete(struct net_buf *buf)
2826 {
2827 struct bt_hci_rp_read_buffer_size *rp = (void *)buf->data;
2828 uint16_t pkts;
2829
2830 LOG_DBG("status 0x%02x", rp->status);
2831
2832 /* If LE-side has buffers we can ignore the BR/EDR values */
2833 if (bt_dev.le.acl_mtu) {
2834 return;
2835 }
2836
2837 bt_dev.le.acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
2838 pkts = sys_le16_to_cpu(rp->acl_max_num);
2839
2840 LOG_DBG("ACL BR/EDR buffers: pkts %u mtu %u", pkts, bt_dev.le.acl_mtu);
2841
2842 k_sem_init(&bt_dev.le.acl_pkts, pkts, pkts);
2843 }
2844 #endif /* !defined(CONFIG_BT_BREDR) */
2845 #endif /* CONFIG_BT_CONN */
2846
le_read_buffer_size_complete(struct net_buf * buf)2847 static void le_read_buffer_size_complete(struct net_buf *buf)
2848 {
2849 struct bt_hci_rp_le_read_buffer_size *rp = (void *)buf->data;
2850
2851 LOG_DBG("status 0x%02x", rp->status);
2852
2853 #if defined(CONFIG_BT_CONN)
2854 uint16_t acl_mtu = sys_le16_to_cpu(rp->le_max_len);
2855
2856 if (!acl_mtu || !rp->le_max_num) {
2857 return;
2858 }
2859
2860 bt_dev.le.acl_mtu = acl_mtu;
2861
2862 LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->le_max_num, bt_dev.le.acl_mtu);
2863
2864 k_sem_init(&bt_dev.le.acl_pkts, rp->le_max_num, rp->le_max_num);
2865 #endif /* CONFIG_BT_CONN */
2866 }
2867
read_buffer_size_v2_complete(struct net_buf * buf)2868 static void read_buffer_size_v2_complete(struct net_buf *buf)
2869 {
2870 #if defined(CONFIG_BT_ISO)
2871 struct bt_hci_rp_le_read_buffer_size_v2 *rp = (void *)buf->data;
2872
2873 LOG_DBG("status %u", rp->status);
2874
2875 #if defined(CONFIG_BT_CONN)
2876 uint16_t acl_mtu = sys_le16_to_cpu(rp->acl_max_len);
2877
2878 if (acl_mtu && rp->acl_max_num) {
2879 bt_dev.le.acl_mtu = acl_mtu;
2880 LOG_DBG("ACL LE buffers: pkts %u mtu %u", rp->acl_max_num, bt_dev.le.acl_mtu);
2881
2882 k_sem_init(&bt_dev.le.acl_pkts, rp->acl_max_num, rp->acl_max_num);
2883 }
2884 #endif /* CONFIG_BT_CONN */
2885
2886 uint16_t iso_mtu = sys_le16_to_cpu(rp->iso_max_len);
2887
2888 if (!iso_mtu || !rp->iso_max_num) {
2889 LOG_ERR("ISO buffer size not set");
2890 return;
2891 }
2892
2893 bt_dev.le.iso_mtu = iso_mtu;
2894
2895 LOG_DBG("ISO buffers: pkts %u mtu %u", rp->iso_max_num, bt_dev.le.iso_mtu);
2896
2897 k_sem_init(&bt_dev.le.iso_pkts, rp->iso_max_num, rp->iso_max_num);
2898 bt_dev.le.iso_limit = rp->iso_max_num;
2899 #endif /* CONFIG_BT_ISO */
2900 }
2901
le_set_host_feature(uint8_t bit_number,uint8_t bit_value)2902 static int le_set_host_feature(uint8_t bit_number, uint8_t bit_value)
2903 {
2904 struct bt_hci_cp_le_set_host_feature *cp;
2905 struct net_buf *buf;
2906
2907 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_FEATURE, sizeof(*cp));
2908 if (!buf) {
2909 return -ENOBUFS;
2910 }
2911
2912 cp = net_buf_add(buf, sizeof(*cp));
2913 cp->bit_number = bit_number;
2914 cp->bit_value = bit_value;
2915
2916 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_FEATURE, buf, NULL);
2917 }
2918
read_supported_commands_complete(struct net_buf * buf)2919 static void read_supported_commands_complete(struct net_buf *buf)
2920 {
2921 struct bt_hci_rp_read_supported_commands *rp = (void *)buf->data;
2922
2923 LOG_DBG("status 0x%02x", rp->status);
2924
2925 memcpy(bt_dev.supported_commands, rp->commands,
2926 sizeof(bt_dev.supported_commands));
2927
2928 /* Report additional HCI commands used for ECDH as
2929 * supported if TinyCrypt ECC is used for emulation.
2930 */
2931 if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
2932 bt_hci_ecc_supported_commands(bt_dev.supported_commands);
2933 }
2934 }
2935
read_local_features_complete(struct net_buf * buf)2936 static void read_local_features_complete(struct net_buf *buf)
2937 {
2938 struct bt_hci_rp_read_local_features *rp = (void *)buf->data;
2939
2940 LOG_DBG("status 0x%02x", rp->status);
2941
2942 memcpy(bt_dev.features[0], rp->features, sizeof(bt_dev.features[0]));
2943 }
2944
le_read_supp_states_complete(struct net_buf * buf)2945 static void le_read_supp_states_complete(struct net_buf *buf)
2946 {
2947 struct bt_hci_rp_le_read_supp_states *rp = (void *)buf->data;
2948
2949 LOG_DBG("status 0x%02x", rp->status);
2950
2951 bt_dev.le.states = sys_get_le64(rp->le_states);
2952 }
2953
2954 #if defined(CONFIG_BT_SMP)
le_read_resolving_list_size_complete(struct net_buf * buf)2955 static void le_read_resolving_list_size_complete(struct net_buf *buf)
2956 {
2957 struct bt_hci_rp_le_read_rl_size *rp = (void *)buf->data;
2958
2959 LOG_DBG("Resolving List size %u", rp->rl_size);
2960
2961 bt_dev.le.rl_size = rp->rl_size;
2962 }
2963 #endif /* defined(CONFIG_BT_SMP) */
2964
common_init(void)2965 static int common_init(void)
2966 {
2967 struct net_buf *rsp;
2968 int err;
2969
2970 if (!(bt_dev.drv->quirks & BT_QUIRK_NO_RESET)) {
2971 /* Send HCI_RESET */
2972 err = bt_hci_cmd_send_sync(BT_HCI_OP_RESET, NULL, &rsp);
2973 if (err) {
2974 return err;
2975 }
2976 hci_reset_complete(rsp);
2977 net_buf_unref(rsp);
2978 }
2979
2980 /* Read Local Supported Features */
2981 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_FEATURES, NULL, &rsp);
2982 if (err) {
2983 return err;
2984 }
2985 read_local_features_complete(rsp);
2986 net_buf_unref(rsp);
2987
2988 /* Read Local Version Information */
2989 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_LOCAL_VERSION_INFO, NULL,
2990 &rsp);
2991 if (err) {
2992 return err;
2993 }
2994 read_local_ver_complete(rsp);
2995 net_buf_unref(rsp);
2996
2997 /* Read Local Supported Commands */
2998 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_SUPPORTED_COMMANDS, NULL,
2999 &rsp);
3000 if (err) {
3001 return err;
3002 }
3003 read_supported_commands_complete(rsp);
3004 net_buf_unref(rsp);
3005
3006 if (IS_ENABLED(CONFIG_BT_HOST_CRYPTO_PRNG)) {
3007 /* Initialize the PRNG so that it is safe to use it later
3008 * on in the initialization process.
3009 */
3010 err = prng_init();
3011 if (err) {
3012 return err;
3013 }
3014 }
3015
3016 #if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
3017 err = set_flow_control();
3018 if (err) {
3019 return err;
3020 }
3021 #endif /* CONFIG_BT_HCI_ACL_FLOW_CONTROL */
3022
3023 return 0;
3024 }
3025
le_set_event_mask(void)3026 static int le_set_event_mask(void)
3027 {
3028 struct bt_hci_cp_le_set_event_mask *cp_mask;
3029 struct net_buf *buf;
3030 uint64_t mask = 0U;
3031
3032 /* Set LE event mask */
3033 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EVENT_MASK, sizeof(*cp_mask));
3034 if (!buf) {
3035 return -ENOBUFS;
3036 }
3037
3038 cp_mask = net_buf_add(buf, sizeof(*cp_mask));
3039
3040 mask |= BT_EVT_MASK_LE_ADVERTISING_REPORT;
3041
3042 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
3043 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
3044 mask |= BT_EVT_MASK_LE_ADV_SET_TERMINATED;
3045 mask |= BT_EVT_MASK_LE_SCAN_REQ_RECEIVED;
3046 mask |= BT_EVT_MASK_LE_EXT_ADVERTISING_REPORT;
3047 mask |= BT_EVT_MASK_LE_SCAN_TIMEOUT;
3048 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC)) {
3049 mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED;
3050 mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT;
3051 mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_LOST;
3052 mask |= BT_EVT_MASK_LE_PAST_RECEIVED;
3053 }
3054 }
3055
3056 if (IS_ENABLED(CONFIG_BT_CONN)) {
3057 if ((IS_ENABLED(CONFIG_BT_SMP) &&
3058 BT_FEAT_LE_PRIVACY(bt_dev.le.features)) ||
3059 (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
3060 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
3061 /* C24:
3062 * Mandatory if the LE Controller supports Connection
3063 * State and either LE Feature (LL Privacy) or
3064 * LE Feature (Extended Advertising) is supported, ...
3065 */
3066 mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE;
3067 } else {
3068 mask |= BT_EVT_MASK_LE_CONN_COMPLETE;
3069 }
3070
3071 mask |= BT_EVT_MASK_LE_CONN_UPDATE_COMPLETE;
3072 mask |= BT_EVT_MASK_LE_REMOTE_FEAT_COMPLETE;
3073
3074 if (BT_FEAT_LE_CONN_PARAM_REQ_PROC(bt_dev.le.features)) {
3075 mask |= BT_EVT_MASK_LE_CONN_PARAM_REQ;
3076 }
3077
3078 if (IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
3079 BT_FEAT_LE_DLE(bt_dev.le.features)) {
3080 mask |= BT_EVT_MASK_LE_DATA_LEN_CHANGE;
3081 }
3082
3083 if (IS_ENABLED(CONFIG_BT_PHY_UPDATE) &&
3084 (BT_FEAT_LE_PHY_2M(bt_dev.le.features) ||
3085 BT_FEAT_LE_PHY_CODED(bt_dev.le.features))) {
3086 mask |= BT_EVT_MASK_LE_PHY_UPDATE_COMPLETE;
3087 }
3088 }
3089
3090 if (IS_ENABLED(CONFIG_BT_SMP) &&
3091 BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3092 mask |= BT_EVT_MASK_LE_LTK_REQUEST;
3093 }
3094
3095 /*
3096 * If "LE Read Local P-256 Public Key" and "LE Generate DH Key" are
3097 * supported we need to enable events generated by those commands.
3098 */
3099 if (IS_ENABLED(CONFIG_BT_ECC) &&
3100 (BT_CMD_TEST(bt_dev.supported_commands, 34, 1)) &&
3101 (BT_CMD_TEST(bt_dev.supported_commands, 34, 2))) {
3102 mask |= BT_EVT_MASK_LE_P256_PUBLIC_KEY_COMPLETE;
3103 mask |= BT_EVT_MASK_LE_GENERATE_DHKEY_COMPLETE;
3104 }
3105
3106 /*
3107 * Enable CIS events only if ISO connections are enabled and controller
3108 * support them.
3109 */
3110 if (IS_ENABLED(CONFIG_BT_ISO) &&
3111 BT_FEAT_LE_CIS(bt_dev.le.features)) {
3112 mask |= BT_EVT_MASK_LE_CIS_ESTABLISHED;
3113 if (BT_FEAT_LE_CIS_PERIPHERAL(bt_dev.le.features)) {
3114 mask |= BT_EVT_MASK_LE_CIS_REQ;
3115 }
3116 }
3117
3118 /* Enable BIS events for broadcaster and/or receiver */
3119 if (IS_ENABLED(CONFIG_BT_ISO) && BT_FEAT_LE_BIS(bt_dev.le.features)) {
3120 if (IS_ENABLED(CONFIG_BT_ISO_BROADCASTER) &&
3121 BT_FEAT_LE_ISO_BROADCASTER(bt_dev.le.features)) {
3122 mask |= BT_EVT_MASK_LE_BIG_COMPLETE;
3123 mask |= BT_EVT_MASK_LE_BIG_TERMINATED;
3124 }
3125 if (IS_ENABLED(CONFIG_BT_ISO_SYNC_RECEIVER) &&
3126 BT_FEAT_LE_SYNC_RECEIVER(bt_dev.le.features)) {
3127 mask |= BT_EVT_MASK_LE_BIG_SYNC_ESTABLISHED;
3128 mask |= BT_EVT_MASK_LE_BIG_SYNC_LOST;
3129 mask |= BT_EVT_MASK_LE_BIGINFO_ADV_REPORT;
3130 }
3131 }
3132
3133 /* Enable IQ samples report events receiver */
3134 if (IS_ENABLED(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)) {
3135 mask |= BT_EVT_MASK_LE_CONNECTIONLESS_IQ_REPORT;
3136 }
3137
3138 if (IS_ENABLED(CONFIG_BT_DF_CONNECTION_CTE_RX)) {
3139 mask |= BT_EVT_MASK_LE_CONNECTION_IQ_REPORT;
3140 mask |= BT_EVT_MASK_LE_CTE_REQUEST_FAILED;
3141 }
3142
3143 if (IS_ENABLED(CONFIG_BT_PER_ADV_RSP)) {
3144 mask |= BT_EVT_MASK_LE_PER_ADV_SUBEVENT_DATA_REQ;
3145 mask |= BT_EVT_MASK_LE_PER_ADV_RESPONSE_REPORT;
3146 }
3147
3148 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
3149 mask |= BT_EVT_MASK_LE_PER_ADVERTISING_REPORT_V2;
3150 mask |= BT_EVT_MASK_LE_PER_ADV_SYNC_ESTABLISHED_V2;
3151 mask |= BT_EVT_MASK_LE_PAST_RECEIVED_V2;
3152 }
3153
3154 if (IS_ENABLED(CONFIG_BT_CONN) &&
3155 (IS_ENABLED(CONFIG_BT_PER_ADV_RSP) || IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP))) {
3156 mask |= BT_EVT_MASK_LE_ENH_CONN_COMPLETE_V2;
3157 }
3158
3159 sys_put_le64(mask, cp_mask->events);
3160 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EVENT_MASK, buf, NULL);
3161 }
3162
le_init_iso(void)3163 static int le_init_iso(void)
3164 {
3165 int err;
3166 struct net_buf *rsp;
3167
3168 if (IS_ENABLED(CONFIG_BT_ISO_UNICAST)) {
3169 /* Set Connected Isochronous Streams - Host support */
3170 err = le_set_host_feature(BT_LE_FEAT_BIT_ISO_CHANNELS, 1);
3171 if (err) {
3172 return err;
3173 }
3174 }
3175
3176 /* Octet 41, bit 5 is read buffer size V2 */
3177 if (BT_CMD_TEST(bt_dev.supported_commands, 41, 5)) {
3178 /* Read ISO Buffer Size V2 */
3179 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE_V2,
3180 NULL, &rsp);
3181 if (err) {
3182 return err;
3183 }
3184
3185 read_buffer_size_v2_complete(rsp);
3186
3187 net_buf_unref(rsp);
3188 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
3189 LOG_WRN("Read Buffer Size V2 command is not supported."
3190 "No ISO buffers will be available");
3191
3192 /* Read LE Buffer Size */
3193 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
3194 NULL, &rsp);
3195 if (err) {
3196 return err;
3197 }
3198
3199 le_read_buffer_size_complete(rsp);
3200
3201 net_buf_unref(rsp);
3202 }
3203
3204 return 0;
3205 }
3206
le_init(void)3207 static int le_init(void)
3208 {
3209 struct bt_hci_cp_write_le_host_supp *cp_le;
3210 struct net_buf *buf, *rsp;
3211 int err;
3212
3213 /* For now we only support LE capable controllers */
3214 if (!BT_FEAT_LE(bt_dev.features)) {
3215 LOG_ERR("Non-LE capable controller detected!");
3216 return -ENODEV;
3217 }
3218
3219 /* Read Low Energy Supported Features */
3220 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_LOCAL_FEATURES, NULL,
3221 &rsp);
3222 if (err) {
3223 return err;
3224 }
3225
3226 read_le_features_complete(rsp);
3227 net_buf_unref(rsp);
3228
3229 if (IS_ENABLED(CONFIG_BT_ISO) &&
3230 BT_FEAT_LE_ISO(bt_dev.le.features)) {
3231 err = le_init_iso();
3232 if (err) {
3233 return err;
3234 }
3235 } else if (IS_ENABLED(CONFIG_BT_CONN)) {
3236 /* Read LE Buffer Size */
3237 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_BUFFER_SIZE,
3238 NULL, &rsp);
3239 if (err) {
3240 return err;
3241 }
3242
3243 le_read_buffer_size_complete(rsp);
3244
3245 net_buf_unref(rsp);
3246 }
3247
3248 if (BT_FEAT_BREDR(bt_dev.features)) {
3249 buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP,
3250 sizeof(*cp_le));
3251 if (!buf) {
3252 return -ENOBUFS;
3253 }
3254
3255 cp_le = net_buf_add(buf, sizeof(*cp_le));
3256
3257 /* Explicitly enable LE for dual-mode controllers */
3258 cp_le->le = 0x01;
3259 cp_le->simul = 0x00;
3260 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_LE_HOST_SUPP, buf,
3261 NULL);
3262 if (err) {
3263 return err;
3264 }
3265 }
3266
3267 /* Read LE Supported States */
3268 if (BT_CMD_LE_STATES(bt_dev.supported_commands)) {
3269 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_SUPP_STATES, NULL,
3270 &rsp);
3271 if (err) {
3272 return err;
3273 }
3274
3275 le_read_supp_states_complete(rsp);
3276 net_buf_unref(rsp);
3277 }
3278
3279 if (IS_ENABLED(CONFIG_BT_CONN) &&
3280 IS_ENABLED(CONFIG_BT_DATA_LEN_UPDATE) &&
3281 IS_ENABLED(CONFIG_BT_AUTO_DATA_LEN_UPDATE) &&
3282 BT_FEAT_LE_DLE(bt_dev.le.features)) {
3283 struct bt_hci_cp_le_write_default_data_len *cp;
3284 uint16_t tx_octets, tx_time;
3285
3286 err = hci_le_read_max_data_len(&tx_octets, &tx_time);
3287 if (err) {
3288 return err;
3289 }
3290
3291 buf = bt_hci_cmd_create(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
3292 sizeof(*cp));
3293 if (!buf) {
3294 return -ENOBUFS;
3295 }
3296
3297 cp = net_buf_add(buf, sizeof(*cp));
3298 cp->max_tx_octets = sys_cpu_to_le16(tx_octets);
3299 cp->max_tx_time = sys_cpu_to_le16(tx_time);
3300
3301 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_WRITE_DEFAULT_DATA_LEN,
3302 buf, NULL);
3303 if (err) {
3304 return err;
3305 }
3306 }
3307
3308 #if defined(CONFIG_BT_SMP)
3309 if (BT_FEAT_LE_PRIVACY(bt_dev.le.features)) {
3310 #if defined(CONFIG_BT_PRIVACY)
3311 struct bt_hci_cp_le_set_rpa_timeout *cp;
3312
3313 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RPA_TIMEOUT,
3314 sizeof(*cp));
3315 if (!buf) {
3316 return -ENOBUFS;
3317 }
3318
3319 cp = net_buf_add(buf, sizeof(*cp));
3320 cp->rpa_timeout = sys_cpu_to_le16(bt_dev.rpa_timeout);
3321 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_RPA_TIMEOUT, buf,
3322 NULL);
3323 if (err) {
3324 return err;
3325 }
3326 #endif /* defined(CONFIG_BT_PRIVACY) */
3327
3328 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_READ_RL_SIZE, NULL,
3329 &rsp);
3330 if (err) {
3331 return err;
3332 }
3333 le_read_resolving_list_size_complete(rsp);
3334 net_buf_unref(rsp);
3335 }
3336 #endif
3337
3338 #if defined(CONFIG_BT_DF)
3339 if (BT_FEAT_LE_CONNECTIONLESS_CTE_TX(bt_dev.le.features) ||
3340 BT_FEAT_LE_CONNECTIONLESS_CTE_RX(bt_dev.le.features) ||
3341 BT_FEAT_LE_RX_CTE(bt_dev.le.features)) {
3342 err = le_df_init();
3343 if (err) {
3344 return err;
3345 }
3346 }
3347 #endif /* CONFIG_BT_DF */
3348
3349 return le_set_event_mask();
3350 }
3351
3352 #if !defined(CONFIG_BT_BREDR)
bt_br_init(void)3353 static int bt_br_init(void)
3354 {
3355 #if defined(CONFIG_BT_CONN)
3356 struct net_buf *rsp;
3357 int err;
3358
3359 if (bt_dev.le.acl_mtu) {
3360 return 0;
3361 }
3362
3363 /* Use BR/EDR buffer size if LE reports zero buffers */
3364 err = bt_hci_cmd_send_sync(BT_HCI_OP_READ_BUFFER_SIZE, NULL, &rsp);
3365 if (err) {
3366 return err;
3367 }
3368
3369 read_buffer_size_complete(rsp);
3370 net_buf_unref(rsp);
3371 #endif /* CONFIG_BT_CONN */
3372
3373 return 0;
3374 }
3375 #endif /* !defined(CONFIG_BT_BREDR) */
3376
set_event_mask(void)3377 static int set_event_mask(void)
3378 {
3379 struct bt_hci_cp_set_event_mask *ev;
3380 struct net_buf *buf;
3381 uint64_t mask = 0U;
3382
3383 buf = bt_hci_cmd_create(BT_HCI_OP_SET_EVENT_MASK, sizeof(*ev));
3384 if (!buf) {
3385 return -ENOBUFS;
3386 }
3387
3388 ev = net_buf_add(buf, sizeof(*ev));
3389
3390 if (IS_ENABLED(CONFIG_BT_BREDR)) {
3391 /* Since we require LE support, we can count on a
3392 * Bluetooth 4.0 feature set
3393 */
3394 mask |= BT_EVT_MASK_INQUIRY_COMPLETE;
3395 mask |= BT_EVT_MASK_CONN_COMPLETE;
3396 mask |= BT_EVT_MASK_CONN_REQUEST;
3397 mask |= BT_EVT_MASK_AUTH_COMPLETE;
3398 mask |= BT_EVT_MASK_REMOTE_NAME_REQ_COMPLETE;
3399 mask |= BT_EVT_MASK_REMOTE_FEATURES;
3400 mask |= BT_EVT_MASK_ROLE_CHANGE;
3401 mask |= BT_EVT_MASK_PIN_CODE_REQ;
3402 mask |= BT_EVT_MASK_LINK_KEY_REQ;
3403 mask |= BT_EVT_MASK_LINK_KEY_NOTIFY;
3404 mask |= BT_EVT_MASK_INQUIRY_RESULT_WITH_RSSI;
3405 mask |= BT_EVT_MASK_REMOTE_EXT_FEATURES;
3406 mask |= BT_EVT_MASK_SYNC_CONN_COMPLETE;
3407 mask |= BT_EVT_MASK_EXTENDED_INQUIRY_RESULT;
3408 mask |= BT_EVT_MASK_IO_CAPA_REQ;
3409 mask |= BT_EVT_MASK_IO_CAPA_RESP;
3410 mask |= BT_EVT_MASK_USER_CONFIRM_REQ;
3411 mask |= BT_EVT_MASK_USER_PASSKEY_REQ;
3412 mask |= BT_EVT_MASK_SSP_COMPLETE;
3413 mask |= BT_EVT_MASK_USER_PASSKEY_NOTIFY;
3414 }
3415
3416 mask |= BT_EVT_MASK_HARDWARE_ERROR;
3417 mask |= BT_EVT_MASK_DATA_BUFFER_OVERFLOW;
3418 mask |= BT_EVT_MASK_LE_META_EVENT;
3419
3420 if (IS_ENABLED(CONFIG_BT_CONN)) {
3421 mask |= BT_EVT_MASK_DISCONN_COMPLETE;
3422 mask |= BT_EVT_MASK_REMOTE_VERSION_INFO;
3423 }
3424
3425 if (IS_ENABLED(CONFIG_BT_SMP) &&
3426 BT_FEAT_LE_ENCR(bt_dev.le.features)) {
3427 mask |= BT_EVT_MASK_ENCRYPT_CHANGE;
3428 mask |= BT_EVT_MASK_ENCRYPT_KEY_REFRESH_COMPLETE;
3429 }
3430
3431 sys_put_le64(mask, ev->events);
3432 return bt_hci_cmd_send_sync(BT_HCI_OP_SET_EVENT_MASK, buf, NULL);
3433 }
3434
ver_str(uint8_t ver)3435 static const char *ver_str(uint8_t ver)
3436 {
3437 const char * const str[] = {
3438 "1.0b", "1.1", "1.2", "2.0", "2.1", "3.0", "4.0", "4.1", "4.2",
3439 "5.0", "5.1", "5.2", "5.3", "5.4"
3440 };
3441
3442 if (ver < ARRAY_SIZE(str)) {
3443 return str[ver];
3444 }
3445
3446 return "unknown";
3447 }
3448
bt_dev_show_info(void)3449 static void bt_dev_show_info(void)
3450 {
3451 int i;
3452
3453 LOG_INF("Identity%s: %s", bt_dev.id_count > 1 ? "[0]" : "",
3454 bt_addr_le_str(&bt_dev.id_addr[0]));
3455
3456 if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3457 #if defined(CONFIG_BT_PRIVACY)
3458 uint8_t irk[16];
3459
3460 sys_memcpy_swap(irk, bt_dev.irk[0], 16);
3461 LOG_INF("IRK%s: 0x%s", bt_dev.id_count > 1 ? "[0]" : "", bt_hex(irk, 16));
3462 #endif
3463 }
3464
3465 for (i = 1; i < bt_dev.id_count; i++) {
3466 LOG_INF("Identity[%d]: %s", i, bt_addr_le_str(&bt_dev.id_addr[i]));
3467
3468 if (IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3469 #if defined(CONFIG_BT_PRIVACY)
3470 uint8_t irk[16];
3471
3472 sys_memcpy_swap(irk, bt_dev.irk[i], 16);
3473 LOG_INF("IRK[%d]: 0x%s", i, bt_hex(irk, 16));
3474 #endif
3475 }
3476 }
3477
3478 if (IS_ENABLED(CONFIG_BT_SMP) &&
3479 IS_ENABLED(CONFIG_BT_LOG_SNIFFER_INFO)) {
3480 bt_keys_foreach_type(BT_KEYS_ALL, bt_keys_show_sniffer_info, NULL);
3481 }
3482
3483 LOG_INF("HCI: version %s (0x%02x) revision 0x%04x, manufacturer 0x%04x",
3484 ver_str(bt_dev.hci_version), bt_dev.hci_version, bt_dev.hci_revision,
3485 bt_dev.manufacturer);
3486 LOG_INF("LMP: version %s (0x%02x) subver 0x%04x", ver_str(bt_dev.lmp_version),
3487 bt_dev.lmp_version, bt_dev.lmp_subversion);
3488 }
3489
3490 #if defined(CONFIG_BT_HCI_VS_EXT)
vs_hw_platform(uint16_t platform)3491 static const char *vs_hw_platform(uint16_t platform)
3492 {
3493 static const char * const plat_str[] = {
3494 "reserved", "Intel Corporation", "Nordic Semiconductor",
3495 "NXP Semiconductors" };
3496
3497 if (platform < ARRAY_SIZE(plat_str)) {
3498 return plat_str[platform];
3499 }
3500
3501 return "unknown";
3502 }
3503
vs_hw_variant(uint16_t platform,uint16_t variant)3504 static const char *vs_hw_variant(uint16_t platform, uint16_t variant)
3505 {
3506 static const char * const nordic_str[] = {
3507 "reserved", "nRF51x", "nRF52x", "nRF53x"
3508 };
3509
3510 if (platform != BT_HCI_VS_HW_PLAT_NORDIC) {
3511 return "unknown";
3512 }
3513
3514 if (variant < ARRAY_SIZE(nordic_str)) {
3515 return nordic_str[variant];
3516 }
3517
3518 return "unknown";
3519 }
3520
vs_fw_variant(uint8_t variant)3521 static const char *vs_fw_variant(uint8_t variant)
3522 {
3523 static const char * const var_str[] = {
3524 "Standard Bluetooth controller",
3525 "Vendor specific controller",
3526 "Firmware loader",
3527 "Rescue image",
3528 };
3529
3530 if (variant < ARRAY_SIZE(var_str)) {
3531 return var_str[variant];
3532 }
3533
3534 return "unknown";
3535 }
3536
hci_vs_init(void)3537 static void hci_vs_init(void)
3538 {
3539 union {
3540 struct bt_hci_rp_vs_read_version_info *info;
3541 struct bt_hci_rp_vs_read_supported_commands *cmds;
3542 struct bt_hci_rp_vs_read_supported_features *feat;
3543 } rp;
3544 struct net_buf *rsp;
3545 int err;
3546
3547 /* If heuristics is enabled, try to guess HCI VS support by looking
3548 * at the HCI version and identity address. We haven't set any addresses
3549 * at this point. So we need to read the public address.
3550 */
3551 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT)) {
3552 bt_addr_le_t addr;
3553
3554 if ((bt_dev.hci_version < BT_HCI_VERSION_5_0) ||
3555 bt_id_read_public_addr(&addr)) {
3556 LOG_WRN("Controller doesn't seem to support "
3557 "Zephyr vendor HCI");
3558 return;
3559 }
3560 }
3561
3562 err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_VERSION_INFO, NULL, &rsp);
3563 if (err) {
3564 LOG_WRN("Vendor HCI extensions not available");
3565 return;
3566 }
3567
3568 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3569 rsp->len != sizeof(struct bt_hci_rp_vs_read_version_info)) {
3570 LOG_WRN("Invalid Vendor HCI extensions");
3571 net_buf_unref(rsp);
3572 return;
3573 }
3574
3575 rp.info = (void *)rsp->data;
3576 LOG_INF("HW Platform: %s (0x%04x)", vs_hw_platform(sys_le16_to_cpu(rp.info->hw_platform)),
3577 sys_le16_to_cpu(rp.info->hw_platform));
3578 LOG_INF("HW Variant: %s (0x%04x)",
3579 vs_hw_variant(sys_le16_to_cpu(rp.info->hw_platform),
3580 sys_le16_to_cpu(rp.info->hw_variant)),
3581 sys_le16_to_cpu(rp.info->hw_variant));
3582 LOG_INF("Firmware: %s (0x%02x) Version %u.%u Build %u", vs_fw_variant(rp.info->fw_variant),
3583 rp.info->fw_variant, rp.info->fw_version, sys_le16_to_cpu(rp.info->fw_revision),
3584 sys_le32_to_cpu(rp.info->fw_build));
3585
3586 net_buf_unref(rsp);
3587
3588 err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_COMMANDS,
3589 NULL, &rsp);
3590 if (err) {
3591 LOG_WRN("Failed to read supported vendor commands");
3592 return;
3593 }
3594
3595 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3596 rsp->len != sizeof(struct bt_hci_rp_vs_read_supported_commands)) {
3597 LOG_WRN("Invalid Vendor HCI extensions");
3598 net_buf_unref(rsp);
3599 return;
3600 }
3601
3602 rp.cmds = (void *)rsp->data;
3603 memcpy(bt_dev.vs_commands, rp.cmds->commands, BT_DEV_VS_CMDS_MAX);
3604 net_buf_unref(rsp);
3605
3606 if (BT_VS_CMD_SUP_FEAT(bt_dev.vs_commands)) {
3607 err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_READ_SUPPORTED_FEATURES,
3608 NULL, &rsp);
3609 if (err) {
3610 LOG_WRN("Failed to read supported vendor features");
3611 return;
3612 }
3613
3614 if (IS_ENABLED(CONFIG_BT_HCI_VS_EXT_DETECT) &&
3615 rsp->len !=
3616 sizeof(struct bt_hci_rp_vs_read_supported_features)) {
3617 LOG_WRN("Invalid Vendor HCI extensions");
3618 net_buf_unref(rsp);
3619 return;
3620 }
3621
3622 rp.feat = (void *)rsp->data;
3623 memcpy(bt_dev.vs_features, rp.feat->features,
3624 BT_DEV_VS_FEAT_MAX);
3625 net_buf_unref(rsp);
3626 }
3627 }
3628 #endif /* CONFIG_BT_HCI_VS_EXT */
3629
hci_init(void)3630 static int hci_init(void)
3631 {
3632 int err;
3633 #if defined(CONFIG_BT_HCI_SETUP)
3634 if (bt_dev.drv->setup) {
3635 err = bt_dev.drv->setup();
3636 if (err) {
3637 return err;
3638 }
3639 }
3640 #endif /* defined(CONFIG_BT_HCI_SETUP) */
3641
3642 err = common_init();
3643 if (err) {
3644 return err;
3645 }
3646
3647 err = le_init();
3648 if (err) {
3649 return err;
3650 }
3651
3652 if (BT_FEAT_BREDR(bt_dev.features)) {
3653 err = bt_br_init();
3654 if (err) {
3655 return err;
3656 }
3657 } else if (IS_ENABLED(CONFIG_BT_BREDR)) {
3658 LOG_ERR("Non-BR/EDR controller detected");
3659 return -EIO;
3660 }
3661 #if defined(CONFIG_BT_CONN)
3662 else if (!bt_dev.le.acl_mtu) {
3663 LOG_ERR("ACL BR/EDR buffers not initialized");
3664 return -EIO;
3665 }
3666 #endif
3667
3668 err = set_event_mask();
3669 if (err) {
3670 return err;
3671 }
3672
3673 #if defined(CONFIG_BT_HCI_VS_EXT)
3674 hci_vs_init();
3675 #endif
3676 err = bt_id_init();
3677 if (err) {
3678 return err;
3679 }
3680
3681 return 0;
3682 }
3683
bt_send(struct net_buf * buf)3684 int bt_send(struct net_buf *buf)
3685 {
3686 LOG_DBG("buf %p len %u type %u", buf, buf->len, bt_buf_get_type(buf));
3687
3688 bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3689
3690 if (IS_ENABLED(CONFIG_BT_TINYCRYPT_ECC)) {
3691 return bt_hci_ecc_send(buf);
3692 }
3693
3694 return bt_dev.drv->send(buf);
3695 }
3696
3697 static const struct event_handler prio_events[] = {
3698 EVENT_HANDLER(BT_HCI_EVT_CMD_COMPLETE, hci_cmd_complete,
3699 sizeof(struct bt_hci_evt_cmd_complete)),
3700 EVENT_HANDLER(BT_HCI_EVT_CMD_STATUS, hci_cmd_status,
3701 sizeof(struct bt_hci_evt_cmd_status)),
3702 #if defined(CONFIG_BT_CONN)
3703 EVENT_HANDLER(BT_HCI_EVT_DATA_BUF_OVERFLOW,
3704 hci_data_buf_overflow,
3705 sizeof(struct bt_hci_evt_data_buf_overflow)),
3706 EVENT_HANDLER(BT_HCI_EVT_DISCONN_COMPLETE, hci_disconn_complete_prio,
3707 sizeof(struct bt_hci_evt_disconn_complete)),
3708 #endif /* CONFIG_BT_CONN */
3709 #if defined(CONFIG_BT_CONN_TX)
3710 EVENT_HANDLER(BT_HCI_EVT_NUM_COMPLETED_PACKETS,
3711 hci_num_completed_packets,
3712 sizeof(struct bt_hci_evt_num_completed_packets)),
3713 #endif /* CONFIG_BT_CONN_TX */
3714 };
3715
hci_event_prio(struct net_buf * buf)3716 void hci_event_prio(struct net_buf *buf)
3717 {
3718 struct net_buf_simple_state state;
3719 struct bt_hci_evt_hdr *hdr;
3720 uint8_t evt_flags;
3721
3722 net_buf_simple_save(&buf->b, &state);
3723
3724 if (buf->len < sizeof(*hdr)) {
3725 LOG_ERR("Invalid HCI event size (%u)", buf->len);
3726 net_buf_unref(buf);
3727 return;
3728 }
3729
3730 hdr = net_buf_pull_mem(buf, sizeof(*hdr));
3731 evt_flags = bt_hci_evt_get_flags(hdr->evt);
3732 BT_ASSERT(evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO);
3733
3734 handle_event(hdr->evt, buf, prio_events, ARRAY_SIZE(prio_events));
3735
3736 if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
3737 net_buf_simple_restore(&buf->b, &state);
3738 } else {
3739 net_buf_unref(buf);
3740 }
3741 }
3742
3743 #if !defined(CONFIG_BT_RECV_BLOCKING)
rx_queue_put(struct net_buf * buf)3744 static void rx_queue_put(struct net_buf *buf)
3745 {
3746 net_buf_slist_put(&bt_dev.rx_queue, buf);
3747
3748 #if defined(CONFIG_BT_RECV_WORKQ_SYS)
3749 const int err = k_work_submit(&rx_work);
3750 #elif defined(CONFIG_BT_RECV_WORKQ_BT)
3751 const int err = k_work_submit_to_queue(&bt_workq, &rx_work);
3752 #endif /* CONFIG_BT_RECV_WORKQ_SYS */
3753 if (err < 0) {
3754 LOG_ERR("Could not submit rx_work: %d", err);
3755 }
3756 }
3757 #endif /* !CONFIG_BT_RECV_BLOCKING */
3758
bt_recv(struct net_buf * buf)3759 int bt_recv(struct net_buf *buf)
3760 {
3761 bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3762
3763 LOG_DBG("buf %p len %u", buf, buf->len);
3764
3765 switch (bt_buf_get_type(buf)) {
3766 #if defined(CONFIG_BT_CONN)
3767 case BT_BUF_ACL_IN:
3768 #if defined(CONFIG_BT_RECV_BLOCKING)
3769 hci_acl(buf);
3770 #else
3771 rx_queue_put(buf);
3772 #endif
3773 return 0;
3774 #endif /* BT_CONN */
3775 case BT_BUF_EVT:
3776 {
3777 #if defined(CONFIG_BT_RECV_BLOCKING)
3778 hci_event(buf);
3779 #else
3780 struct bt_hci_evt_hdr *hdr = (void *)buf->data;
3781 uint8_t evt_flags = bt_hci_evt_get_flags(hdr->evt);
3782
3783 if (evt_flags & BT_HCI_EVT_FLAG_RECV_PRIO) {
3784 hci_event_prio(buf);
3785 }
3786
3787 if (evt_flags & BT_HCI_EVT_FLAG_RECV) {
3788 rx_queue_put(buf);
3789 }
3790 #endif
3791 return 0;
3792
3793 }
3794 #if defined(CONFIG_BT_ISO)
3795 case BT_BUF_ISO_IN:
3796 #if defined(CONFIG_BT_RECV_BLOCKING)
3797 hci_iso(buf);
3798 #else
3799 rx_queue_put(buf);
3800 #endif
3801 return 0;
3802 #endif /* CONFIG_BT_ISO */
3803 default:
3804 LOG_ERR("Invalid buf type %u", bt_buf_get_type(buf));
3805 net_buf_unref(buf);
3806 return -EINVAL;
3807 }
3808 }
3809
bt_recv_prio(struct net_buf * buf)3810 int bt_recv_prio(struct net_buf *buf)
3811 {
3812 bt_monitor_send(bt_monitor_opcode(buf), buf->data, buf->len);
3813
3814 BT_ASSERT(bt_buf_get_type(buf) == BT_BUF_EVT);
3815
3816 hci_event_prio(buf);
3817
3818 return 0;
3819 }
3820
bt_hci_driver_register(const struct bt_hci_driver * drv)3821 int bt_hci_driver_register(const struct bt_hci_driver *drv)
3822 {
3823 if (bt_dev.drv) {
3824 return -EALREADY;
3825 }
3826
3827 if (!drv->open || !drv->send) {
3828 return -EINVAL;
3829 }
3830
3831 bt_dev.drv = drv;
3832
3833 LOG_DBG("Registered %s", drv->name ? drv->name : "");
3834
3835 bt_monitor_new_index(BT_MONITOR_TYPE_PRIMARY, drv->bus,
3836 BT_ADDR_ANY, drv->name ? drv->name : "bt0");
3837
3838 return 0;
3839 }
3840
bt_finalize_init(void)3841 void bt_finalize_init(void)
3842 {
3843 atomic_set_bit(bt_dev.flags, BT_DEV_READY);
3844
3845 if (IS_ENABLED(CONFIG_BT_OBSERVER)) {
3846 bt_le_scan_update(false);
3847 }
3848
3849 bt_dev_show_info();
3850 }
3851
bt_init(void)3852 static int bt_init(void)
3853 {
3854 int err;
3855
3856 err = hci_init();
3857 if (err) {
3858 return err;
3859 }
3860
3861 if (IS_ENABLED(CONFIG_BT_CONN)) {
3862 err = bt_conn_init();
3863 if (err) {
3864 return err;
3865 }
3866 }
3867
3868 if (IS_ENABLED(CONFIG_BT_ISO)) {
3869 err = bt_conn_iso_init();
3870 if (err) {
3871 return err;
3872 }
3873 }
3874
3875 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3876 if (!bt_dev.id_count) {
3877 LOG_INF("No ID address. App must call settings_load()");
3878 return 0;
3879 }
3880
3881 atomic_set_bit(bt_dev.flags, BT_DEV_PRESET_ID);
3882 }
3883
3884 bt_finalize_init();
3885 return 0;
3886 }
3887
init_work(struct k_work * work)3888 static void init_work(struct k_work *work)
3889 {
3890 int err;
3891
3892 err = bt_init();
3893 if (ready_cb) {
3894 ready_cb(err);
3895 }
3896 }
3897
3898 #if !defined(CONFIG_BT_RECV_BLOCKING)
rx_work_handler(struct k_work * work)3899 static void rx_work_handler(struct k_work *work)
3900 {
3901 int err;
3902
3903 struct net_buf *buf;
3904
3905 LOG_DBG("Getting net_buf from queue");
3906 buf = net_buf_slist_get(&bt_dev.rx_queue);
3907 if (!buf) {
3908 return;
3909 }
3910
3911 LOG_DBG("buf %p type %u len %u", buf, bt_buf_get_type(buf), buf->len);
3912
3913 switch (bt_buf_get_type(buf)) {
3914 #if defined(CONFIG_BT_CONN)
3915 case BT_BUF_ACL_IN:
3916 hci_acl(buf);
3917 break;
3918 #endif /* CONFIG_BT_CONN */
3919 #if defined(CONFIG_BT_ISO)
3920 case BT_BUF_ISO_IN:
3921 hci_iso(buf);
3922 break;
3923 #endif /* CONFIG_BT_ISO */
3924 case BT_BUF_EVT:
3925 hci_event(buf);
3926 break;
3927 default:
3928 LOG_ERR("Unknown buf type %u", bt_buf_get_type(buf));
3929 net_buf_unref(buf);
3930 break;
3931 }
3932
3933 /* Schedule the work handler to be executed again if there are
3934 * additional items in the queue. This allows for other users of the
3935 * work queue to get a chance at running, which wouldn't be possible if
3936 * we used a while() loop with a k_yield() statement.
3937 */
3938 if (!sys_slist_is_empty(&bt_dev.rx_queue)) {
3939
3940 #if defined(CONFIG_BT_RECV_WORKQ_SYS)
3941 err = k_work_submit(&rx_work);
3942 #elif defined(CONFIG_BT_RECV_WORKQ_BT)
3943 err = k_work_submit_to_queue(&bt_workq, &rx_work);
3944 #endif
3945 if (err < 0) {
3946 LOG_ERR("Could not submit rx_work: %d", err);
3947 }
3948 }
3949 }
3950 #endif /* !CONFIG_BT_RECV_BLOCKING */
3951
3952 #if defined(CONFIG_BT_TESTING)
bt_testing_tx_tid_get(void)3953 k_tid_t bt_testing_tx_tid_get(void)
3954 {
3955 return &tx_thread_data;
3956 }
3957 #endif
3958
bt_enable(bt_ready_cb_t cb)3959 int bt_enable(bt_ready_cb_t cb)
3960 {
3961 int err;
3962
3963 if (!bt_dev.drv) {
3964 LOG_ERR("No HCI driver registered");
3965 return -ENODEV;
3966 }
3967
3968 atomic_clear_bit(bt_dev.flags, BT_DEV_DISABLE);
3969
3970 if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_ENABLE)) {
3971 return -EALREADY;
3972 }
3973
3974 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
3975 err = bt_settings_init();
3976 if (err) {
3977 return err;
3978 }
3979 } else if (IS_ENABLED(CONFIG_BT_DEVICE_NAME_DYNAMIC)) {
3980 err = bt_set_name(CONFIG_BT_DEVICE_NAME);
3981 if (err) {
3982 LOG_WRN("Failed to set device name (%d)", err);
3983 }
3984 }
3985
3986 ready_cb = cb;
3987
3988 /* Give cmd_sem allowing to send first HCI_Reset cmd, the only
3989 * exception is if the controller requests to wait for an
3990 * initial Command Complete for NOP.
3991 */
3992 if (!IS_ENABLED(CONFIG_BT_WAIT_NOP)) {
3993 k_sem_init(&bt_dev.ncmd_sem, 1, 1);
3994 } else {
3995 k_sem_init(&bt_dev.ncmd_sem, 0, 1);
3996 }
3997 k_fifo_init(&bt_dev.cmd_tx_queue);
3998 /* TX thread */
3999 k_thread_create(&tx_thread_data, tx_thread_stack,
4000 K_KERNEL_STACK_SIZEOF(tx_thread_stack),
4001 hci_tx_thread, NULL, NULL, NULL,
4002 K_PRIO_COOP(CONFIG_BT_HCI_TX_PRIO),
4003 0, K_NO_WAIT);
4004 k_thread_name_set(&tx_thread_data, "BT TX");
4005
4006 #if defined(CONFIG_BT_RECV_WORKQ_BT)
4007 /* RX thread */
4008 k_work_queue_init(&bt_workq);
4009 k_work_queue_start(&bt_workq, rx_thread_stack,
4010 CONFIG_BT_RX_STACK_SIZE,
4011 K_PRIO_COOP(CONFIG_BT_RX_PRIO), NULL);
4012 k_thread_name_set(&bt_workq.thread, "BT RX");
4013 #endif
4014
4015 err = bt_dev.drv->open();
4016 if (err) {
4017 LOG_ERR("HCI driver open failed (%d)", err);
4018 return err;
4019 }
4020
4021 bt_monitor_send(BT_MONITOR_OPEN_INDEX, NULL, 0);
4022
4023 if (!cb) {
4024 return bt_init();
4025 }
4026
4027 k_work_submit(&bt_dev.init);
4028 return 0;
4029 }
4030
bt_disable(void)4031 int bt_disable(void)
4032 {
4033 int err;
4034
4035 if (!bt_dev.drv) {
4036 LOG_ERR("No HCI driver registered");
4037 return -ENODEV;
4038 }
4039
4040 if (!bt_dev.drv->close) {
4041 return -ENOTSUP;
4042 }
4043
4044 if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_DISABLE)) {
4045 return -EALREADY;
4046 }
4047
4048 /* Clear BT_DEV_READY before disabling HCI link */
4049 atomic_clear_bit(bt_dev.flags, BT_DEV_READY);
4050
4051 err = bt_dev.drv->close();
4052 if (err) {
4053 LOG_ERR("HCI driver close failed (%d)", err);
4054
4055 /* Re-enable BT_DEV_READY to avoid inconsistent stack state */
4056 atomic_set_bit(bt_dev.flags, BT_DEV_READY);
4057
4058 return err;
4059 }
4060
4061 /* Some functions rely on checking this bitfield */
4062 memset(bt_dev.supported_commands, 0x00, sizeof(bt_dev.supported_commands));
4063
4064 /* If random address was set up - clear it */
4065 bt_addr_le_copy(&bt_dev.random_addr, BT_ADDR_LE_ANY);
4066
4067 #if defined(CONFIG_BT_BROADCASTER)
4068 bt_adv_reset_adv_pool();
4069 #endif /* CONFIG_BT_BROADCASTER */
4070
4071 #if defined(CONFIG_BT_PRIVACY)
4072 k_work_cancel_delayable(&bt_dev.rpa_update);
4073 #endif /* CONFIG_BT_PRIVACY */
4074
4075 #if defined(CONFIG_BT_PER_ADV_SYNC)
4076 bt_periodic_sync_disable();
4077 #endif /* CONFIG_BT_PER_ADV_SYNC */
4078
4079 #if defined(CONFIG_BT_CONN)
4080 if (IS_ENABLED(CONFIG_BT_SMP)) {
4081 bt_pub_key_hci_disrupted();
4082 }
4083 bt_conn_cleanup_all();
4084 disconnected_handles_reset();
4085 #endif /* CONFIG_BT_CONN */
4086
4087 /* Abort TX thread */
4088 k_thread_abort(&tx_thread_data);
4089
4090 #if defined(CONFIG_BT_RECV_WORKQ_BT)
4091 /* Abort RX thread */
4092 k_thread_abort(&bt_workq.thread);
4093 #endif
4094
4095 bt_monitor_send(BT_MONITOR_CLOSE_INDEX, NULL, 0);
4096
4097 /* Clear BT_DEV_ENABLE here to prevent early bt_enable() calls, before disable is
4098 * completed.
4099 */
4100 atomic_clear_bit(bt_dev.flags, BT_DEV_ENABLE);
4101
4102 return 0;
4103 }
4104
bt_is_ready(void)4105 bool bt_is_ready(void)
4106 {
4107 return atomic_test_bit(bt_dev.flags, BT_DEV_READY);
4108 }
4109
4110 #define DEVICE_NAME_LEN (sizeof(CONFIG_BT_DEVICE_NAME) - 1)
4111 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4112 BUILD_ASSERT(DEVICE_NAME_LEN < CONFIG_BT_DEVICE_NAME_MAX);
4113 #else
4114 BUILD_ASSERT(DEVICE_NAME_LEN < 248);
4115 #endif
4116
bt_set_name(const char * name)4117 int bt_set_name(const char *name)
4118 {
4119 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4120 size_t len = strlen(name);
4121 int err;
4122
4123 if (len > CONFIG_BT_DEVICE_NAME_MAX) {
4124 return -ENOMEM;
4125 }
4126
4127 if (!strcmp(bt_dev.name, name)) {
4128 return 0;
4129 }
4130
4131 memcpy(bt_dev.name, name, len);
4132 bt_dev.name[len] = '\0';
4133
4134 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4135 err = bt_settings_store_name(bt_dev.name, len);
4136 if (err) {
4137 LOG_WRN("Unable to store name");
4138 }
4139 }
4140
4141 return 0;
4142 #else
4143 return -ENOMEM;
4144 #endif
4145 }
4146
bt_get_name(void)4147 const char *bt_get_name(void)
4148 {
4149 #if defined(CONFIG_BT_DEVICE_NAME_DYNAMIC)
4150 return bt_dev.name;
4151 #else
4152 return CONFIG_BT_DEVICE_NAME;
4153 #endif
4154 }
4155
bt_get_appearance(void)4156 uint16_t bt_get_appearance(void)
4157 {
4158 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
4159 return bt_dev.appearance;
4160 #else
4161 return CONFIG_BT_DEVICE_APPEARANCE;
4162 #endif
4163 }
4164
4165 #if defined(CONFIG_BT_DEVICE_APPEARANCE_DYNAMIC)
bt_set_appearance(uint16_t appearance)4166 int bt_set_appearance(uint16_t appearance)
4167 {
4168 if (bt_dev.appearance != appearance) {
4169 if (IS_ENABLED(CONFIG_BT_SETTINGS)) {
4170 int err = bt_settings_store_appearance(&appearance, sizeof(appearance));
4171 if (err) {
4172 LOG_ERR("Unable to save setting 'bt/appearance' (err %d).", err);
4173 return err;
4174 }
4175 }
4176
4177 bt_dev.appearance = appearance;
4178 }
4179
4180 return 0;
4181 }
4182 #endif
4183
bt_addr_le_is_bonded(uint8_t id,const bt_addr_le_t * addr)4184 bool bt_addr_le_is_bonded(uint8_t id, const bt_addr_le_t *addr)
4185 {
4186 if (IS_ENABLED(CONFIG_BT_SMP)) {
4187 struct bt_keys *keys = bt_keys_find_addr(id, addr);
4188
4189 /* if there are any keys stored then device is bonded */
4190 return keys && keys->keys;
4191 } else {
4192 return false;
4193 }
4194 }
4195
4196 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
bt_le_filter_accept_list_add(const bt_addr_le_t * addr)4197 int bt_le_filter_accept_list_add(const bt_addr_le_t *addr)
4198 {
4199 struct bt_hci_cp_le_add_dev_to_fal *cp;
4200 struct net_buf *buf;
4201 int err;
4202
4203 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4204 return -EAGAIN;
4205 }
4206
4207 buf = bt_hci_cmd_create(BT_HCI_OP_LE_ADD_DEV_TO_FAL, sizeof(*cp));
4208 if (!buf) {
4209 return -ENOBUFS;
4210 }
4211
4212 cp = net_buf_add(buf, sizeof(*cp));
4213 bt_addr_le_copy(&cp->addr, addr);
4214
4215 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ADD_DEV_TO_FAL, buf, NULL);
4216 if (err) {
4217 LOG_ERR("Failed to add device to filter accept list");
4218
4219 return err;
4220 }
4221
4222 return 0;
4223 }
4224
bt_le_filter_accept_list_remove(const bt_addr_le_t * addr)4225 int bt_le_filter_accept_list_remove(const bt_addr_le_t *addr)
4226 {
4227 struct bt_hci_cp_le_rem_dev_from_fal *cp;
4228 struct net_buf *buf;
4229 int err;
4230
4231 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4232 return -EAGAIN;
4233 }
4234
4235 buf = bt_hci_cmd_create(BT_HCI_OP_LE_REM_DEV_FROM_FAL, sizeof(*cp));
4236 if (!buf) {
4237 return -ENOBUFS;
4238 }
4239
4240 cp = net_buf_add(buf, sizeof(*cp));
4241 bt_addr_le_copy(&cp->addr, addr);
4242
4243 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_REM_DEV_FROM_FAL, buf, NULL);
4244 if (err) {
4245 LOG_ERR("Failed to remove device from filter accept list");
4246 return err;
4247 }
4248
4249 return 0;
4250 }
4251
bt_le_filter_accept_list_clear(void)4252 int bt_le_filter_accept_list_clear(void)
4253 {
4254 int err;
4255
4256 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
4257 return -EAGAIN;
4258 }
4259
4260 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_CLEAR_FAL, NULL, NULL);
4261 if (err) {
4262 LOG_ERR("Failed to clear filter accept list");
4263 return err;
4264 }
4265
4266 return 0;
4267 }
4268 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
4269
bt_le_set_chan_map(uint8_t chan_map[5])4270 int bt_le_set_chan_map(uint8_t chan_map[5])
4271 {
4272 struct bt_hci_cp_le_set_host_chan_classif *cp;
4273 struct net_buf *buf;
4274
4275 if (!IS_ENABLED(CONFIG_BT_CENTRAL)) {
4276 return -ENOTSUP;
4277 }
4278
4279 if (!BT_CMD_TEST(bt_dev.supported_commands, 27, 3)) {
4280 LOG_WRN("Set Host Channel Classification command is "
4281 "not supported");
4282 return -ENOTSUP;
4283 }
4284
4285 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
4286 sizeof(*cp));
4287 if (!buf) {
4288 return -ENOBUFS;
4289 }
4290
4291 cp = net_buf_add(buf, sizeof(*cp));
4292
4293 memcpy(&cp->ch_map[0], &chan_map[0], 4);
4294 cp->ch_map[4] = chan_map[4] & BIT_MASK(5);
4295
4296 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_HOST_CHAN_CLASSIF,
4297 buf, NULL);
4298 }
4299
4300 #if defined(CONFIG_BT_RPA_TIMEOUT_DYNAMIC)
bt_le_set_rpa_timeout(uint16_t new_rpa_timeout)4301 int bt_le_set_rpa_timeout(uint16_t new_rpa_timeout)
4302 {
4303 if ((new_rpa_timeout == 0) || (new_rpa_timeout > 3600)) {
4304 return -EINVAL;
4305 }
4306
4307 if (new_rpa_timeout == bt_dev.rpa_timeout) {
4308 return 0;
4309 }
4310
4311 bt_dev.rpa_timeout = new_rpa_timeout;
4312 atomic_set_bit(bt_dev.flags, BT_DEV_RPA_TIMEOUT_CHANGED);
4313
4314 return 0;
4315 }
4316 #endif
4317
bt_configure_data_path(uint8_t dir,uint8_t id,uint8_t vs_config_len,const uint8_t * vs_config)4318 int bt_configure_data_path(uint8_t dir, uint8_t id, uint8_t vs_config_len,
4319 const uint8_t *vs_config)
4320 {
4321 struct bt_hci_rp_configure_data_path *rp;
4322 struct bt_hci_cp_configure_data_path *cp;
4323 struct net_buf *rsp;
4324 struct net_buf *buf;
4325 int err;
4326
4327 buf = bt_hci_cmd_create(BT_HCI_OP_CONFIGURE_DATA_PATH, sizeof(*cp) +
4328 vs_config_len);
4329 if (!buf) {
4330 return -ENOBUFS;
4331 }
4332
4333 cp = net_buf_add(buf, sizeof(*cp));
4334 cp->data_path_dir = dir;
4335 cp->data_path_id = id;
4336 cp->vs_config_len = vs_config_len;
4337 if (vs_config_len) {
4338 (void)memcpy(cp->vs_config, vs_config, vs_config_len);
4339 }
4340
4341 err = bt_hci_cmd_send_sync(BT_HCI_OP_CONFIGURE_DATA_PATH, buf, &rsp);
4342 if (err) {
4343 return err;
4344 }
4345
4346 rp = (void *)rsp->data;
4347 if (rp->status) {
4348 err = -EIO;
4349 }
4350 net_buf_unref(rsp);
4351
4352 return err;
4353 }
4354