1 /*
2 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3 * Copyright (c) 2015-2016 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7 #include <errno.h>
8 #include <stdbool.h>
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <string.h>
12
13 #include <zephyr/autoconf.h>
14 #include <zephyr/bluetooth/bluetooth.h>
15 #include <zephyr/bluetooth/conn.h>
16 #include <zephyr/bluetooth/gap.h>
17 #include <zephyr/bluetooth/hci_types.h>
18 #include <zephyr/bluetooth/iso.h>
19 #include <zephyr/bluetooth/buf.h>
20 #include <zephyr/bluetooth/direction.h>
21 #include <zephyr/bluetooth/addr.h>
22 #include <zephyr/bluetooth/hci.h>
23 #include <zephyr/bluetooth/hci_vs.h>
24 #include <zephyr/kernel.h>
25 #include <zephyr/net_buf.h>
26 #include <zephyr/sys/__assert.h>
27 #include <zephyr/sys/atomic.h>
28 #include <zephyr/sys/byteorder.h>
29 #include <zephyr/sys/check.h>
30 #include <zephyr/sys/slist.h>
31 #include <zephyr/sys/util.h>
32 #include <zephyr/sys/util_macro.h>
33 #include <zephyr/toolchain.h>
34
35 #include <sys/types.h>
36
37 #include "addr_internal.h"
38 #include "common/bt_str.h"
39 #include "conn_internal.h"
40 #include "direction_internal.h"
41 #include "hci_core.h"
42 #include "id.h"
43 #include "scan.h"
44
45 #define LOG_LEVEL CONFIG_BT_HCI_CORE_LOG_LEVEL
46 #include <zephyr/logging/log.h>
47 LOG_MODULE_REGISTER(bt_scan);
48
49 struct scanner_state {
50 ATOMIC_DEFINE(scan_flags, BT_LE_SCAN_USER_NUM_FLAGS);
51 struct bt_le_scan_param explicit_scan_param;
52 struct bt_le_scan_param used_scan_param;
53 struct k_mutex scan_update_mutex;
54 struct k_mutex scan_explicit_params_mutex;
55 };
56
57 enum scan_action {
58 SCAN_ACTION_NONE,
59 SCAN_ACTION_START,
60 SCAN_ACTION_STOP,
61 SCAN_ACTION_UPDATE,
62 };
63
64 static bt_le_scan_cb_t *scan_dev_found_cb;
65 static sys_slist_t scan_cbs = SYS_SLIST_STATIC_INIT(&scan_cbs);
66
67 static struct scanner_state scan_state;
68
69 #if defined(CONFIG_BT_EXT_ADV)
70 /* A buffer used to reassemble advertisement data from the controller. */
71 NET_BUF_SIMPLE_DEFINE(ext_scan_buf, CONFIG_BT_EXT_SCAN_BUF_SIZE);
72
73 struct fragmented_advertiser {
74 bt_addr_le_t addr;
75 uint8_t sid;
76 enum {
77 FRAG_ADV_INACTIVE,
78 FRAG_ADV_REASSEMBLING,
79 FRAG_ADV_DISCARDING,
80 } state;
81 };
82
83 static struct fragmented_advertiser reassembling_advertiser;
84
fragmented_advertisers_equal(const struct fragmented_advertiser * a,const bt_addr_le_t * addr,uint8_t sid)85 static bool fragmented_advertisers_equal(const struct fragmented_advertiser *a,
86 const bt_addr_le_t *addr, uint8_t sid)
87 {
88 /* Two advertisers are equal if they are the same adv set from the same device */
89 return a->sid == sid && bt_addr_le_eq(&a->addr, addr);
90 }
91
92 /* Sets the address and sid of the advertiser to be reassembled. */
init_reassembling_advertiser(const bt_addr_le_t * addr,uint8_t sid)93 static void init_reassembling_advertiser(const bt_addr_le_t *addr, uint8_t sid)
94 {
95 bt_addr_le_copy(&reassembling_advertiser.addr, addr);
96 reassembling_advertiser.sid = sid;
97 reassembling_advertiser.state = FRAG_ADV_REASSEMBLING;
98 }
99
reset_reassembling_advertiser(void)100 static void reset_reassembling_advertiser(void)
101 {
102 net_buf_simple_reset(&ext_scan_buf);
103 reassembling_advertiser.state = FRAG_ADV_INACTIVE;
104 }
105
106 #if defined(CONFIG_BT_PER_ADV_SYNC)
107 static struct bt_le_per_adv_sync *get_pending_per_adv_sync(void);
108 static struct bt_le_per_adv_sync per_adv_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
109 static sys_slist_t pa_sync_cbs = SYS_SLIST_STATIC_INIT(&pa_sync_cbs);
110 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
111 #endif /* defined(CONFIG_BT_EXT_ADV) */
112
bt_scan_softreset(void)113 void bt_scan_softreset(void)
114 {
115 scan_dev_found_cb = NULL;
116 #if defined(CONFIG_BT_EXT_ADV)
117 reset_reassembling_advertiser();
118 #endif
119 }
120
bt_scan_reset(void)121 void bt_scan_reset(void)
122 {
123 memset(&scan_state, 0x0, sizeof(scan_state));
124 k_mutex_init(&scan_state.scan_update_mutex);
125 k_mutex_init(&scan_state.scan_explicit_params_mutex);
126 bt_scan_softreset();
127 }
128
cmd_le_set_ext_scan_enable(bool enable,bool filter_duplicates,uint16_t duration)129 static int cmd_le_set_ext_scan_enable(bool enable, bool filter_duplicates, uint16_t duration)
130 {
131 struct bt_hci_cp_le_set_ext_scan_enable *cp;
132 struct bt_hci_cmd_state_set state;
133 struct net_buf *buf;
134 int err;
135
136 buf = bt_hci_cmd_alloc(K_FOREVER);
137 if (!buf) {
138 return -ENOBUFS;
139 }
140
141 cp = net_buf_add(buf, sizeof(*cp));
142
143 cp->filter_dup = filter_duplicates;
144 cp->enable = enable;
145 cp->duration = sys_cpu_to_le16(duration);
146 cp->period = 0;
147
148 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_SCANNING,
149 enable == BT_HCI_LE_SCAN_ENABLE);
150
151 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE, buf, NULL);
152 if (err) {
153 return err;
154 }
155
156 return 0;
157 }
158
cmd_le_set_scan_enable_legacy(bool enable,bool filter_duplicates)159 static int cmd_le_set_scan_enable_legacy(bool enable, bool filter_duplicates)
160 {
161 struct bt_hci_cp_le_set_scan_enable *cp;
162 struct bt_hci_cmd_state_set state;
163 struct net_buf *buf;
164 int err;
165
166 buf = bt_hci_cmd_alloc(K_FOREVER);
167 if (!buf) {
168 return -ENOBUFS;
169 }
170
171 cp = net_buf_add(buf, sizeof(*cp));
172
173 cp->filter_dup = filter_duplicates;
174 cp->enable = enable;
175
176 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_SCANNING,
177 enable == BT_HCI_LE_SCAN_ENABLE);
178
179 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_SCAN_ENABLE, buf, NULL);
180 if (err) {
181 return err;
182 }
183
184 return 0;
185 }
186
cmd_le_set_scan_enable(bool enable,bool filter_duplicates)187 static int cmd_le_set_scan_enable(bool enable, bool filter_duplicates)
188 {
189 if (IS_ENABLED(CONFIG_BT_EXT_ADV) && BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
190 return cmd_le_set_ext_scan_enable(enable, filter_duplicates, 0);
191 }
192
193 return cmd_le_set_scan_enable_legacy(enable, filter_duplicates);
194 }
195
bt_le_scan_set_enable(uint8_t enable)196 int bt_le_scan_set_enable(uint8_t enable)
197 {
198 return cmd_le_set_scan_enable(enable, scan_state.used_scan_param.options &
199 BT_LE_SCAN_OPT_FILTER_DUPLICATE);
200 }
201
start_le_scan_ext(struct bt_le_scan_param * scan_param)202 static int start_le_scan_ext(struct bt_le_scan_param *scan_param)
203 {
204 struct bt_hci_ext_scan_phy param_1m;
205 struct bt_hci_ext_scan_phy param_coded;
206
207 struct bt_hci_ext_scan_phy *phy_1m = NULL;
208 struct bt_hci_ext_scan_phy *phy_coded = NULL;
209
210 if (!(scan_param->options & BT_LE_SCAN_OPT_NO_1M)) {
211 param_1m.type = scan_param->type;
212 param_1m.interval = sys_cpu_to_le16(scan_param->interval);
213 param_1m.window = sys_cpu_to_le16(scan_param->window);
214
215 phy_1m = ¶m_1m;
216 }
217
218 if (scan_param->options & BT_LE_SCAN_OPT_CODED) {
219 uint16_t interval = scan_param->interval_coded ? scan_param->interval_coded
220 : scan_param->interval;
221 uint16_t window =
222 scan_param->window_coded ? scan_param->window_coded : scan_param->window;
223
224 param_coded.type = scan_param->type;
225 param_coded.interval = sys_cpu_to_le16(interval);
226 param_coded.window = sys_cpu_to_le16(window);
227 phy_coded = ¶m_coded;
228 }
229
230 struct bt_hci_cp_le_set_ext_scan_param *set_param;
231 struct net_buf *buf;
232 uint8_t own_addr_type;
233 bool active_scan;
234 int err;
235
236 active_scan = (phy_1m && phy_1m->type == BT_HCI_LE_SCAN_ACTIVE) ||
237 (phy_coded && phy_coded->type == BT_HCI_LE_SCAN_ACTIVE);
238
239 if (scan_param->timeout > 0) {
240 atomic_set_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED);
241
242 /* Allow bt_le_oob_get_local to be called directly before
243 * starting a scan limited by timeout.
244 */
245 if (IS_ENABLED(CONFIG_BT_PRIVACY) && !bt_id_rpa_is_new()) {
246 atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
247 }
248 }
249
250 err = bt_id_set_scan_own_addr(active_scan, &own_addr_type);
251 if (err) {
252 return err;
253 }
254
255 buf = bt_hci_cmd_alloc(K_FOREVER);
256 if (!buf) {
257 return -ENOBUFS;
258 }
259
260 set_param = net_buf_add(buf, sizeof(*set_param));
261 set_param->own_addr_type = own_addr_type;
262 set_param->phys = 0;
263 set_param->filter_policy = scan_param->options & BT_LE_SCAN_OPT_FILTER_ACCEPT_LIST
264 ? BT_HCI_LE_SCAN_FP_BASIC_FILTER
265 : BT_HCI_LE_SCAN_FP_BASIC_NO_FILTER;
266
267 if (phy_1m) {
268 set_param->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
269 net_buf_add_mem(buf, phy_1m, sizeof(*phy_1m));
270 }
271
272 if (phy_coded) {
273 set_param->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
274 net_buf_add_mem(buf, phy_coded, sizeof(*phy_coded));
275 }
276
277 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM, buf, NULL);
278 if (err) {
279 return err;
280 }
281
282 err = cmd_le_set_ext_scan_enable(BT_HCI_LE_SCAN_ENABLE,
283 scan_param->options & BT_LE_SCAN_OPT_FILTER_DUPLICATE,
284 scan_param->timeout);
285 if (err) {
286 return err;
287 }
288
289 return 0;
290 }
291
start_le_scan_legacy(struct bt_le_scan_param * param)292 static int start_le_scan_legacy(struct bt_le_scan_param *param)
293 {
294 struct bt_hci_cp_le_set_scan_param set_param;
295 struct net_buf *buf;
296 int err;
297 bool active_scan;
298
299 (void)memset(&set_param, 0, sizeof(set_param));
300
301 set_param.scan_type = param->type;
302
303 /* for the rest parameters apply default values according to
304 * spec 4.2, vol2, part E, 7.8.10
305 */
306 set_param.interval = sys_cpu_to_le16(param->interval);
307 set_param.window = sys_cpu_to_le16(param->window);
308
309 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) &&
310 param->options & BT_LE_SCAN_OPT_FILTER_ACCEPT_LIST) {
311 set_param.filter_policy = BT_HCI_LE_SCAN_FP_BASIC_FILTER;
312 } else {
313 set_param.filter_policy = BT_HCI_LE_SCAN_FP_BASIC_NO_FILTER;
314 }
315
316 active_scan = param->type == BT_HCI_LE_SCAN_ACTIVE;
317 err = bt_id_set_scan_own_addr(active_scan, &set_param.addr_type);
318 if (err) {
319 return err;
320 }
321
322 buf = bt_hci_cmd_alloc(K_FOREVER);
323 if (!buf) {
324 return -ENOBUFS;
325 }
326
327 net_buf_add_mem(buf, &set_param, sizeof(set_param));
328
329 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_SCAN_PARAM, buf, NULL);
330 if (err) {
331 return err;
332 }
333
334 err = cmd_le_set_scan_enable(BT_HCI_LE_SCAN_ENABLE,
335 param->options & BT_LE_SCAN_OPT_FILTER_DUPLICATE);
336 if (err) {
337 return err;
338 }
339
340 return 0;
341 }
342
bt_le_scan_active_scanner_running(void)343 bool bt_le_scan_active_scanner_running(void)
344 {
345 return atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING) &&
346 scan_state.used_scan_param.type == BT_LE_SCAN_TYPE_ACTIVE;
347 }
348
select_scan_params(struct bt_le_scan_param * scan_param)349 static void select_scan_params(struct bt_le_scan_param *scan_param)
350 {
351 /* From high priority to low priority: select parameters */
352 /* 1. Priority: explicitly chosen parameters */
353 if (atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_EXPLICIT_SCAN)) {
354 memcpy(scan_param, &scan_state.explicit_scan_param, sizeof(*scan_param));
355 }
356 /* Below this, the scanner module chooses the parameters. */
357 /* 2. Priority: reuse parameters from initiator */
358 else if (atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
359 *scan_param = (struct bt_le_scan_param){
360 .type = BT_LE_SCAN_TYPE_PASSIVE,
361 .options = BT_LE_SCAN_OPT_FILTER_DUPLICATE,
362 .interval = bt_dev.create_param.interval,
363 .window = bt_dev.create_param.window,
364 .timeout = 0,
365 .interval_coded = bt_dev.create_param.interval_coded,
366 .window_coded = bt_dev.create_param.window_coded,
367 };
368 }
369 /* 3. Priority: choose custom parameters */
370 else {
371 *scan_param = (struct bt_le_scan_param){
372 .type = BT_LE_SCAN_TYPE_PASSIVE,
373 .options = BT_LE_SCAN_OPT_FILTER_DUPLICATE,
374 .interval = CONFIG_BT_BACKGROUND_SCAN_INTERVAL,
375 .window = CONFIG_BT_BACKGROUND_SCAN_WINDOW,
376 .timeout = 0,
377 .interval_coded = 0,
378 .window_coded = 0,
379 };
380
381 if (BT_FEAT_LE_PHY_CODED(bt_dev.le.features)) {
382 scan_param->options |= BT_LE_SCAN_OPT_CODED;
383 }
384
385 if (atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_PER_SYNC) ||
386 atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_CONN)) {
387 scan_param->window = BT_GAP_SCAN_FAST_WINDOW;
388 scan_param->interval = BT_GAP_SCAN_FAST_INTERVAL;
389 }
390 }
391 }
392
start_scan(struct bt_le_scan_param * scan_param)393 static int start_scan(struct bt_le_scan_param *scan_param)
394 {
395 if (IS_ENABLED(CONFIG_BT_EXT_ADV) && BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
396 return start_le_scan_ext(scan_param);
397 }
398
399 return start_le_scan_legacy(scan_param);
400 }
401
is_already_using_same_params(struct bt_le_scan_param * scan_param)402 static bool is_already_using_same_params(struct bt_le_scan_param *scan_param)
403 {
404 return !memcmp(scan_param, &scan_state.used_scan_param, sizeof(*scan_param));
405 }
406
get_scan_action(struct bt_le_scan_param * scan_param)407 static enum scan_action get_scan_action(struct bt_le_scan_param *scan_param)
408 {
409 bool is_scanning = atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING);
410
411 /* Check if there is reason to have the scanner running */
412 if (atomic_get(scan_state.scan_flags) != 0) {
413 if (is_scanning) {
414 if (is_already_using_same_params(scan_param)) {
415 /* Already scanning with the desired parameters */
416 return SCAN_ACTION_NONE;
417 } else {
418 return SCAN_ACTION_UPDATE;
419 }
420 } else {
421 return SCAN_ACTION_START;
422 }
423 } else {
424 /* Scanner should not run */
425 if (is_scanning) {
426 return SCAN_ACTION_STOP;
427 } else {
428 return SCAN_ACTION_NONE;
429 }
430 }
431 }
432
scan_update(void)433 static int scan_update(void)
434 {
435 int32_t err;
436
437 struct bt_le_scan_param scan_param;
438
439 /* Prevent partial updates of the scanner state. */
440 err = k_mutex_lock(&scan_state.scan_update_mutex, K_NO_WAIT);
441
442 if (err) {
443 return err;
444 }
445
446 select_scan_params(&scan_param);
447
448 enum scan_action action = get_scan_action(&scan_param);
449
450 /* start/stop/update if required and allowed */
451 switch (action) {
452 case SCAN_ACTION_NONE:
453 break;
454 case SCAN_ACTION_STOP:
455 err = cmd_le_set_scan_enable(BT_HCI_LE_SCAN_DISABLE,
456 BT_HCI_LE_SCAN_FILTER_DUP_DISABLE);
457 if (err) {
458 LOG_DBG("Could not stop scanner: %d", err);
459 break;
460 }
461 memset(&scan_state.used_scan_param, 0x0, sizeof(scan_state.used_scan_param));
462 break;
463 case SCAN_ACTION_UPDATE:
464 err = cmd_le_set_scan_enable(BT_HCI_LE_SCAN_DISABLE,
465 BT_HCI_LE_SCAN_FILTER_DUP_DISABLE);
466 if (err) {
467 LOG_DBG("Could not stop scanner to update: %d", err);
468 break;
469 }
470 __fallthrough;
471 case SCAN_ACTION_START:
472 err = start_scan(&scan_param);
473 if (err) {
474 LOG_DBG("Could not start scanner: %d", err);
475 break;
476 }
477 memcpy(&scan_state.used_scan_param, &scan_param, sizeof(scan_param));
478 break;
479 }
480
481 k_mutex_unlock(&scan_state.scan_update_mutex);
482
483 return err;
484 }
485
scan_check_if_state_allowed(enum bt_le_scan_user flag)486 static int scan_check_if_state_allowed(enum bt_le_scan_user flag)
487 {
488 /* check if state is already set */
489 if (atomic_test_bit(scan_state.scan_flags, flag)) {
490 return -EALREADY;
491 }
492
493 if (flag == BT_LE_SCAN_USER_EXPLICIT_SCAN && !BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
494 atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
495 return -EPERM;
496 }
497
498 return 0;
499 }
500
bt_le_scan_user_add(enum bt_le_scan_user flag)501 int bt_le_scan_user_add(enum bt_le_scan_user flag)
502 {
503 uint32_t err;
504
505 if (flag == BT_LE_SCAN_USER_NONE) {
506 /* Only check if the scanner parameters should be updated / the scanner should be
507 * started. This is mainly triggered once connections are established.
508 */
509 return scan_update();
510 }
511
512 err = scan_check_if_state_allowed(flag);
513 if (err) {
514 return err;
515 }
516
517 atomic_set_bit(scan_state.scan_flags, flag);
518
519 err = scan_update();
520 if (err) {
521 atomic_clear_bit(scan_state.scan_flags, flag);
522 }
523
524 return err;
525 }
526
bt_le_scan_user_remove(enum bt_le_scan_user flag)527 int bt_le_scan_user_remove(enum bt_le_scan_user flag)
528 {
529 if (flag == BT_LE_SCAN_USER_NONE) {
530 /* Only check if the scanner parameters should be updated / the scanner should be
531 * started. This is mainly triggered once connections are established.
532 */
533 } else {
534 atomic_clear_bit(scan_state.scan_flags, flag);
535 }
536
537 return scan_update();
538 }
539
check_pending_conn(const bt_addr_le_t * id_addr,const bt_addr_le_t * addr,uint8_t adv_props)540 static void check_pending_conn(const bt_addr_le_t *id_addr, const bt_addr_le_t *addr,
541 uint8_t adv_props)
542 {
543 struct bt_conn *conn;
544 int err;
545
546 /* No connections are allowed during explicit scanning
547 * when the controller does not support concurrent scanning and initiating.
548 */
549 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
550 atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_EXPLICIT_SCAN)) {
551 return;
552 }
553
554 /* Return if event is not connectable */
555 if (!(adv_props & BT_HCI_LE_ADV_EVT_TYPE_CONN)) {
556 return;
557 }
558
559 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, id_addr, BT_CONN_SCAN_BEFORE_INITIATING);
560 if (!conn) {
561 return;
562 }
563
564 /* Stop the scanner if there is no other reason to have it running.
565 * Ignore possible failures here, since the user is guaranteed to be removed
566 * and the scanner state is updated once the initiator starts / stops.
567 */
568 err = bt_le_scan_user_remove(BT_LE_SCAN_USER_CONN);
569 if (err) {
570 LOG_DBG("Error while removing conn user from scanner (%d)", err);
571 }
572
573 bt_addr_le_copy(&conn->le.resp_addr, addr);
574 if (bt_le_create_conn(conn)) {
575 goto failed;
576 }
577
578 bt_conn_set_state(conn, BT_CONN_INITIATING);
579 bt_conn_unref(conn);
580 return;
581
582 failed:
583 conn->err = BT_HCI_ERR_UNSPECIFIED;
584 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
585 bt_conn_unref(conn);
586 /* Just a best-effort check if the scanner should be started. */
587 err = bt_le_scan_user_remove(BT_LE_SCAN_USER_NONE);
588
589 if (err) {
590 LOG_WRN("Error while updating the scanner (%d)", err);
591 }
592 }
593
594 /* Convert Legacy adv report evt_type field to adv props */
get_adv_props_legacy(uint8_t evt_type)595 static uint8_t get_adv_props_legacy(uint8_t evt_type)
596 {
597 switch (evt_type) {
598 case BT_GAP_ADV_TYPE_ADV_IND:
599 return BT_GAP_ADV_PROP_CONNECTABLE | BT_GAP_ADV_PROP_SCANNABLE;
600
601 case BT_GAP_ADV_TYPE_ADV_DIRECT_IND:
602 return BT_GAP_ADV_PROP_CONNECTABLE | BT_GAP_ADV_PROP_DIRECTED;
603
604 case BT_GAP_ADV_TYPE_ADV_SCAN_IND:
605 return BT_GAP_ADV_PROP_SCANNABLE;
606
607 case BT_GAP_ADV_TYPE_ADV_NONCONN_IND:
608 return 0;
609
610 /* In legacy advertising report, we don't know if the scan
611 * response come from a connectable advertiser, so don't
612 * set connectable property bit.
613 */
614 case BT_GAP_ADV_TYPE_SCAN_RSP:
615 return BT_GAP_ADV_PROP_SCAN_RESPONSE | BT_GAP_ADV_PROP_SCANNABLE;
616
617 default:
618 return 0;
619 }
620 }
621
le_adv_recv(bt_addr_le_t * addr,struct bt_le_scan_recv_info * info,struct net_buf_simple * buf,uint16_t len)622 static void le_adv_recv(bt_addr_le_t *addr, struct bt_le_scan_recv_info *info,
623 struct net_buf_simple *buf, uint16_t len)
624 {
625 struct bt_le_scan_cb *listener, *next;
626 struct net_buf_simple_state state;
627 bt_addr_le_t id_addr;
628 bool explicit_scan = atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_EXPLICIT_SCAN);
629 bool conn_scan = atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_CONN);
630
631 LOG_DBG("%s event %u, len %u, rssi %d dBm", bt_addr_le_str(addr), info->adv_type, len,
632 info->rssi);
633
634 if (!IS_ENABLED(CONFIG_BT_PRIVACY) && !IS_ENABLED(CONFIG_BT_SCAN_WITH_IDENTITY) &&
635 explicit_scan && (info->adv_props & BT_HCI_LE_ADV_PROP_DIRECT)) {
636 LOG_DBG("Dropped direct adv report");
637 return;
638 }
639
640 if (bt_addr_le_is_resolved(addr)) {
641 bt_addr_le_copy_resolved(&id_addr, addr);
642 } else if (addr->type == BT_HCI_PEER_ADDR_ANONYMOUS) {
643 bt_addr_le_copy(&id_addr, BT_ADDR_LE_ANY);
644 } else {
645 bt_addr_le_copy(&id_addr, bt_lookup_id_addr(BT_ID_DEFAULT, addr));
646 }
647
648 /* For connection-purpose scanning,
649 * skip app callbacks but allow pending-conn check logic.
650 */
651 if (IS_ENABLED(CONFIG_BT_CENTRAL) && !explicit_scan && conn_scan) {
652 goto check_pending_conn;
653 }
654
655 if (scan_dev_found_cb) {
656 net_buf_simple_save(buf, &state);
657
658 buf->len = len;
659 scan_dev_found_cb(&id_addr, info->rssi, info->adv_type, buf);
660
661 net_buf_simple_restore(buf, &state);
662 }
663
664 info->addr = &id_addr;
665
666 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&scan_cbs, listener, next, node) {
667 if (listener->recv) {
668 net_buf_simple_save(buf, &state);
669
670 buf->len = len;
671 listener->recv(info, buf);
672
673 net_buf_simple_restore(buf, &state);
674 }
675 }
676
677 /* Clear pointer to this stack frame before returning to calling function */
678 info->addr = NULL;
679
680 check_pending_conn:
681 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
682 check_pending_conn(&id_addr, addr, info->adv_props);
683 }
684 }
685
686 #if defined(CONFIG_BT_EXT_ADV)
bt_hci_le_scan_timeout(struct net_buf * buf)687 void bt_hci_le_scan_timeout(struct net_buf *buf)
688 {
689 struct bt_le_scan_cb *listener, *next;
690
691 int err = bt_le_scan_user_remove(BT_LE_SCAN_USER_EXPLICIT_SCAN);
692
693 if (err) {
694 k_yield();
695 err = bt_le_scan_user_remove(BT_LE_SCAN_USER_EXPLICIT_SCAN);
696 }
697
698 if (err) {
699 LOG_WRN("Could not stop the explicit scanner (%d)", err);
700 }
701
702 atomic_clear_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED);
703 atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
704
705 #if defined(CONFIG_BT_SMP)
706 bt_id_pending_keys_update();
707 #endif
708
709 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&scan_cbs, listener, next, node) {
710 if (listener->timeout) {
711 listener->timeout();
712 }
713 }
714 }
715
716 /* Convert Extended adv report evt_type field into adv type */
get_adv_type(uint8_t evt_type)717 static uint8_t get_adv_type(uint8_t evt_type)
718 {
719 switch (evt_type) {
720 case (BT_HCI_LE_ADV_EVT_TYPE_CONN | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
721 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
722 return BT_GAP_ADV_TYPE_ADV_IND;
723
724 case (BT_HCI_LE_ADV_EVT_TYPE_CONN | BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
725 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
726 return BT_GAP_ADV_TYPE_ADV_DIRECT_IND;
727
728 case (BT_HCI_LE_ADV_EVT_TYPE_SCAN | BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
729 return BT_GAP_ADV_TYPE_ADV_SCAN_IND;
730
731 case BT_HCI_LE_ADV_EVT_TYPE_LEGACY:
732 return BT_GAP_ADV_TYPE_ADV_NONCONN_IND;
733
734 case (BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP | BT_HCI_LE_ADV_EVT_TYPE_CONN |
735 BT_HCI_LE_ADV_EVT_TYPE_SCAN | BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
736 case (BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP | BT_HCI_LE_ADV_EVT_TYPE_SCAN |
737 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
738 /* Scan response from connectable or non-connectable advertiser.
739 */
740 return BT_GAP_ADV_TYPE_SCAN_RSP;
741
742 default:
743 return BT_GAP_ADV_TYPE_EXT_ADV;
744 }
745 }
746
747 /* Convert Extended adv report PHY to GAP PHY */
get_ext_adv_coding_sel_phy(uint8_t hci_phy)748 static uint8_t get_ext_adv_coding_sel_phy(uint8_t hci_phy)
749 {
750 /* Converts from Extended adv report PHY to BT_GAP_LE_PHY_*
751 * When Advertising Coding Selection (Host Support) is enabled
752 * the controller will return the advertising coding scheme which
753 * can be S=2 or S=8 data coding.
754 */
755 switch (hci_phy) {
756 case BT_HCI_LE_ADV_EVT_PHY_1M:
757 return BT_GAP_LE_PHY_1M;
758 case BT_HCI_LE_ADV_EVT_PHY_2M:
759 return BT_GAP_LE_PHY_2M;
760 case BT_HCI_LE_ADV_EVT_PHY_CODED_S8:
761 return BT_GAP_LE_PHY_CODED_S8;
762 case BT_HCI_LE_ADV_EVT_PHY_CODED_S2:
763 return BT_GAP_LE_PHY_CODED_S2;
764 default:
765 return 0;
766 }
767 }
768
769 /* Convert extended adv report evt_type field to adv props */
get_adv_props_extended(uint16_t evt_type)770 static uint16_t get_adv_props_extended(uint16_t evt_type)
771 {
772 /* Converts from BT_HCI_LE_ADV_EVT_TYPE_* to BT_GAP_ADV_PROP_*
773 * The first 4 bits are the same (conn, scan, direct, scan_rsp).
774 * Bit 4 must be flipped as the meaning of 1 is opposite (legacy -> extended)
775 * The rest of the bits are zeroed out.
776 */
777 return (evt_type ^ BT_HCI_LE_ADV_EVT_TYPE_LEGACY) & BIT_MASK(5);
778 }
779
create_ext_adv_info(struct bt_hci_evt_le_ext_advertising_info const * const evt,struct bt_le_scan_recv_info * const scan_info)780 static void create_ext_adv_info(struct bt_hci_evt_le_ext_advertising_info const *const evt,
781 struct bt_le_scan_recv_info *const scan_info)
782 {
783 if (IS_ENABLED(CONFIG_BT_EXT_ADV_CODING_SELECTION) &&
784 BT_FEAT_LE_ADV_CODING_SEL(bt_dev.le.features)) {
785 scan_info->primary_phy = get_ext_adv_coding_sel_phy(evt->prim_phy);
786 scan_info->secondary_phy = get_ext_adv_coding_sel_phy(evt->sec_phy);
787 } else {
788 scan_info->primary_phy = bt_get_phy(evt->prim_phy);
789 scan_info->secondary_phy = bt_get_phy(evt->sec_phy);
790 }
791
792 scan_info->tx_power = evt->tx_power;
793 scan_info->rssi = evt->rssi;
794 scan_info->sid = evt->sid;
795 scan_info->interval = sys_le16_to_cpu(evt->interval);
796 scan_info->adv_type = get_adv_type(sys_le16_to_cpu(evt->evt_type));
797 scan_info->adv_props = get_adv_props_extended(sys_le16_to_cpu(evt->evt_type));
798 }
799
bt_hci_le_adv_ext_report(struct net_buf * buf)800 void bt_hci_le_adv_ext_report(struct net_buf *buf)
801 {
802 uint8_t num_reports = net_buf_pull_u8(buf);
803 bool explicit_scan = atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_EXPLICIT_SCAN);
804 bool conn_scan = atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_CONN);
805
806 LOG_DBG("Adv number of reports %u", num_reports);
807
808 while (num_reports--) {
809 struct bt_hci_evt_le_ext_advertising_info *evt;
810 struct bt_le_scan_recv_info scan_info;
811 uint16_t data_status;
812 uint16_t evt_type;
813 bool is_report_complete;
814 bool more_to_come;
815 bool is_new_advertiser;
816
817 if (!explicit_scan) {
818 /* The application has not requested explicit scan, so it is not expecting
819 * advertising reports. Discard, and reset the reassembler if not inactive
820 * This is done in the loop as this flag can change between each iteration,
821 * and it is not uncommon that scanning is disabled in the callback called
822 * from le_adv_recv.
823 *
824 * However, if scanning is running for connection purposes,
825 * the report shall still be processed to allow pending connections.
826 */
827 if (reassembling_advertiser.state != FRAG_ADV_INACTIVE) {
828 reset_reassembling_advertiser();
829 }
830
831 if (!conn_scan) {
832 break;
833 }
834 }
835
836 if (buf->len < sizeof(*evt)) {
837 LOG_ERR("Unexpected end of buffer");
838 break;
839 }
840
841 evt = net_buf_pull_mem(buf, sizeof(*evt));
842 evt_type = sys_le16_to_cpu(evt->evt_type);
843 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS(evt_type);
844 is_report_complete = data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
845 more_to_come = data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
846
847 if (evt->length > buf->len) {
848 LOG_WRN("Adv report corrupted (wants %u out of %u)", evt->length, buf->len);
849
850 net_buf_reset(buf);
851
852 if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_LEGACY) {
853 return;
854 }
855
856 /* Start discarding irrespective of the `more_to_come` flag. We
857 * assume we may have lost a partial adv report in the truncated
858 * data.
859 */
860 reassembling_advertiser.state = FRAG_ADV_DISCARDING;
861
862 return;
863 }
864
865 if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_LEGACY) {
866 /* Legacy advertising reports are complete.
867 * Create event immediately.
868 */
869 create_ext_adv_info(evt, &scan_info);
870 le_adv_recv(&evt->addr, &scan_info, &buf->b, evt->length);
871 goto cont;
872 }
873
874 is_new_advertiser = reassembling_advertiser.state == FRAG_ADV_INACTIVE ||
875 !fragmented_advertisers_equal(&reassembling_advertiser,
876 &evt->addr, evt->sid);
877
878 if (is_new_advertiser && is_report_complete) {
879 /* Only advertising report from this advertiser.
880 * Create event immediately.
881 */
882 create_ext_adv_info(evt, &scan_info);
883 le_adv_recv(&evt->addr, &scan_info, &buf->b, evt->length);
884 goto cont;
885 }
886
887 if (is_new_advertiser && reassembling_advertiser.state == FRAG_ADV_REASSEMBLING) {
888 LOG_WRN("Received an incomplete advertising report while reassembling "
889 "advertising reports from a different advertiser. The advertising "
890 "report is discarded and future scan results may be incomplete. "
891 "Interleaving of fragmented advertising reports from different "
892 "advertisers is not yet supported.");
893 goto cont;
894 }
895
896 if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE) {
897 /* Got HCI_LE_Extended_Advertising_Report: Incomplete, data truncated, no
898 * more to come. This means the Controller is aborting the reassembly. We
899 * discard the partially received report, and the application is not
900 * notified.
901 *
902 * See the Controller's documentation for possible reasons for aborting.
903 * Hint: CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX.
904 */
905 LOG_DBG("Discarding incomplete advertisement.");
906 reset_reassembling_advertiser();
907 goto cont;
908 }
909
910 if (is_new_advertiser) {
911 /* We are not reassembling reports from an advertiser and
912 * this is the first report from the new advertiser.
913 * Initialize the new advertiser.
914 */
915 __ASSERT_NO_MSG(reassembling_advertiser.state == FRAG_ADV_INACTIVE);
916 init_reassembling_advertiser(&evt->addr, evt->sid);
917 }
918
919 if (evt->length + ext_scan_buf.len > ext_scan_buf.size) {
920 /* The report does not fit in the reassembly buffer
921 * Discard this and future reports from the advertiser.
922 */
923 reassembling_advertiser.state = FRAG_ADV_DISCARDING;
924 }
925
926 if (reassembling_advertiser.state == FRAG_ADV_DISCARDING) {
927 if (!more_to_come) {
928 /* We do no longer need to keep track of this advertiser as
929 * all the expected data is received.
930 */
931 reset_reassembling_advertiser();
932 }
933 goto cont;
934 }
935
936 net_buf_simple_add_mem(&ext_scan_buf, buf->data, evt->length);
937 if (more_to_come) {
938 /* The controller will send additional reports to be reassembled */
939 continue;
940 }
941
942 /* No more data coming from the controller.
943 * Create event.
944 */
945 __ASSERT_NO_MSG(is_report_complete);
946 create_ext_adv_info(evt, &scan_info);
947 le_adv_recv(&evt->addr, &scan_info, &ext_scan_buf, ext_scan_buf.len);
948
949 /* We do no longer need to keep track of this advertiser. */
950 reset_reassembling_advertiser();
951
952 cont:
953 net_buf_pull(buf, evt->length);
954 }
955 }
956
957 #if defined(CONFIG_BT_PER_ADV_SYNC)
per_adv_sync_delete(struct bt_le_per_adv_sync * per_adv_sync)958 static void per_adv_sync_delete(struct bt_le_per_adv_sync *per_adv_sync)
959 {
960 atomic_clear(per_adv_sync->flags);
961 }
962
per_adv_sync_new(void)963 static struct bt_le_per_adv_sync *per_adv_sync_new(void)
964 {
965 struct bt_le_per_adv_sync *per_adv_sync = NULL;
966
967 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
968 if (!atomic_test_bit(per_adv_sync_pool[i].flags, BT_PER_ADV_SYNC_CREATED)) {
969 per_adv_sync = &per_adv_sync_pool[i];
970 break;
971 }
972 }
973
974 if (!per_adv_sync) {
975 return NULL;
976 }
977
978 (void)memset(per_adv_sync, 0, sizeof(*per_adv_sync));
979 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_CREATED);
980
981 #if CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0
982 net_buf_simple_init_with_data(&per_adv_sync->reassembly, per_adv_sync->reassembly_data,
983 CONFIG_BT_PER_ADV_SYNC_BUF_SIZE);
984 net_buf_simple_reset(&per_adv_sync->reassembly);
985 #endif /* CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0 */
986
987 return per_adv_sync;
988 }
989
get_pending_per_adv_sync(void)990 static struct bt_le_per_adv_sync *get_pending_per_adv_sync(void)
991 {
992 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
993 if (atomic_test_bit(per_adv_sync_pool[i].flags, BT_PER_ADV_SYNC_SYNCING)) {
994 return &per_adv_sync_pool[i];
995 }
996 }
997
998 return NULL;
999 }
1000
bt_periodic_sync_disable(void)1001 void bt_periodic_sync_disable(void)
1002 {
1003 for (size_t i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
1004 per_adv_sync_delete(&per_adv_sync_pool[i]);
1005 }
1006 }
1007
bt_hci_per_adv_sync_lookup_handle(uint16_t handle)1008 struct bt_le_per_adv_sync *bt_hci_per_adv_sync_lookup_handle(uint16_t handle)
1009 {
1010 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
1011 if (per_adv_sync_pool[i].handle == handle &&
1012 atomic_test_bit(per_adv_sync_pool[i].flags, BT_PER_ADV_SYNC_SYNCED)) {
1013 return &per_adv_sync_pool[i];
1014 }
1015 }
1016
1017 return NULL;
1018 }
1019
bt_hci_le_per_adv_report_recv(struct bt_le_per_adv_sync * per_adv_sync,struct net_buf_simple * buf,const struct bt_le_per_adv_sync_recv_info * info)1020 void bt_hci_le_per_adv_report_recv(struct bt_le_per_adv_sync *per_adv_sync,
1021 struct net_buf_simple *buf,
1022 const struct bt_le_per_adv_sync_recv_info *info)
1023 {
1024 struct net_buf_simple_state state;
1025 struct bt_le_per_adv_sync_cb *listener;
1026
1027 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1028 if (listener->recv) {
1029 net_buf_simple_save(buf, &state);
1030 listener->recv(per_adv_sync, info, buf);
1031 net_buf_simple_restore(buf, &state);
1032 }
1033 }
1034 }
1035
1036 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP) && (CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0)
bt_hci_le_per_adv_report_recv_failure(struct bt_le_per_adv_sync * per_adv_sync,const struct bt_le_per_adv_sync_recv_info * info)1037 static void bt_hci_le_per_adv_report_recv_failure(struct bt_le_per_adv_sync *per_adv_sync,
1038 const struct bt_le_per_adv_sync_recv_info *info)
1039 {
1040 struct bt_le_per_adv_sync_cb *listener;
1041
1042 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1043 if (listener->recv) {
1044 listener->recv(per_adv_sync, info, NULL);
1045 }
1046 }
1047 }
1048 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) && (CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0) */
1049
bt_hci_le_per_adv_report_common(struct net_buf * buf)1050 static void bt_hci_le_per_adv_report_common(struct net_buf *buf)
1051 {
1052 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1053 struct bt_hci_evt_le_per_advertising_report_v2 *evt;
1054 #else
1055 struct bt_hci_evt_le_per_advertising_report *evt;
1056 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1057
1058 struct bt_le_per_adv_sync *per_adv_sync;
1059 struct bt_le_per_adv_sync_recv_info info;
1060
1061 if (buf->len < sizeof(*evt)) {
1062 LOG_ERR("Unexpected end of buffer");
1063 return;
1064 }
1065
1066 evt = net_buf_pull_mem(buf, sizeof(*evt));
1067
1068 per_adv_sync = bt_hci_per_adv_sync_lookup_handle(sys_le16_to_cpu(evt->handle));
1069
1070 if (!per_adv_sync) {
1071 LOG_ERR("Unknown handle 0x%04X for periodic advertising report",
1072 sys_le16_to_cpu(evt->handle));
1073 return;
1074 }
1075
1076 if (atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED)) {
1077 LOG_ERR("Received PA adv report when receive disabled");
1078 return;
1079 }
1080
1081 info.tx_power = evt->tx_power;
1082 info.rssi = evt->rssi;
1083 info.cte_type = bt_get_df_cte_type(evt->cte_type);
1084 info.addr = &per_adv_sync->addr;
1085 info.sid = per_adv_sync->sid;
1086
1087 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1088 info.periodic_event_counter = sys_le16_to_cpu(evt->periodic_event_counter);
1089 info.subevent = evt->subevent;
1090 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1091
1092 if (!per_adv_sync->report_truncated) {
1093 #if CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0
1094 if (net_buf_simple_tailroom(&per_adv_sync->reassembly) < evt->length) {
1095 /* The buffer is too small for the entire report. Drop it */
1096 LOG_WRN("Buffer is too small to reassemble the report. "
1097 "Use CONFIG_BT_PER_ADV_SYNC_BUF_SIZE to change "
1098 "the buffer size.");
1099
1100 per_adv_sync->report_truncated = true;
1101 net_buf_simple_reset(&per_adv_sync->reassembly);
1102 return;
1103 }
1104
1105 if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
1106 if (per_adv_sync->reassembly.len == 0) {
1107 /* We have not received any partial data before.
1108 * This buffer can be forwarded without an extra copy.
1109 */
1110 bt_hci_le_per_adv_report_recv(per_adv_sync, &buf->b, &info);
1111 } else {
1112 net_buf_simple_add_mem(&per_adv_sync->reassembly, buf->data,
1113 evt->length);
1114 bt_hci_le_per_adv_report_recv(per_adv_sync,
1115 &per_adv_sync->reassembly, &info);
1116 net_buf_simple_reset(&per_adv_sync->reassembly);
1117 }
1118 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE) {
1119 LOG_DBG("Received incomplete advertising data. "
1120 "Advertising report dropped.");
1121
1122 net_buf_simple_reset(&per_adv_sync->reassembly);
1123
1124 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL) {
1125 net_buf_simple_add_mem(&per_adv_sync->reassembly, buf->data, evt->length);
1126 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1127 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_RX_FAILED &&
1128 per_adv_sync->num_subevents) {
1129 bt_hci_le_per_adv_report_recv_failure(per_adv_sync, &info);
1130 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1131 } else {
1132 __ASSERT(false, "Invalid data status 0x%02X", evt->data_status);
1133 }
1134 #else /* CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0 */
1135 if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
1136 bt_hci_le_per_adv_report_recv(per_adv_sync, &buf->b, &info);
1137 } else {
1138 per_adv_sync->report_truncated = true;
1139 }
1140 #endif /* CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0 */
1141 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
1142 per_adv_sync->report_truncated = false;
1143 }
1144 }
1145
bt_hci_le_per_adv_report(struct net_buf * buf)1146 void bt_hci_le_per_adv_report(struct net_buf *buf)
1147 {
1148 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
1149 LOG_ERR("The controller shall raise the latest unmasked version of the event");
1150
1151 return;
1152 }
1153
1154 bt_hci_le_per_adv_report_common(buf);
1155 }
1156
per_adv_sync_terminate(uint16_t handle)1157 static int per_adv_sync_terminate(uint16_t handle)
1158 {
1159 struct bt_hci_cp_le_per_adv_terminate_sync *cp;
1160 struct net_buf *buf;
1161
1162 buf = bt_hci_cmd_alloc(K_FOREVER);
1163 if (!buf) {
1164 return -ENOBUFS;
1165 }
1166
1167 cp = net_buf_add(buf, sizeof(*cp));
1168 (void)memset(cp, 0, sizeof(*cp));
1169
1170 cp->handle = sys_cpu_to_le16(handle);
1171
1172 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC, buf, NULL);
1173 }
1174
per_adv_sync_terminated(struct bt_le_per_adv_sync * per_adv_sync,uint8_t reason)1175 static void per_adv_sync_terminated(struct bt_le_per_adv_sync *per_adv_sync, uint8_t reason)
1176 {
1177 /* Terminate the PA sync and notify app */
1178 const struct bt_le_per_adv_sync_term_info term_info = {
1179 .addr = &per_adv_sync->addr,
1180 .sid = per_adv_sync->sid,
1181 .reason = reason,
1182 };
1183 struct bt_le_per_adv_sync_cb *listener;
1184
1185 /* Deleting before callback, so the caller will be able
1186 * to restart sync in the callback.
1187 */
1188 per_adv_sync_delete(per_adv_sync);
1189
1190 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1191 if (listener->term) {
1192 listener->term(per_adv_sync, &term_info);
1193 }
1194 }
1195 }
1196
bt_hci_le_per_adv_sync_established_common(struct net_buf * buf)1197 static void bt_hci_le_per_adv_sync_established_common(struct net_buf *buf)
1198 {
1199 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1200 struct bt_hci_evt_le_per_adv_sync_established_v2 *evt =
1201 (struct bt_hci_evt_le_per_adv_sync_established_v2 *)buf->data;
1202 #else
1203 struct bt_hci_evt_le_per_adv_sync_established *evt =
1204 (struct bt_hci_evt_le_per_adv_sync_established *)buf->data;
1205 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1206
1207 struct bt_le_per_adv_sync_synced_info sync_info;
1208 struct bt_le_per_adv_sync *pending_per_adv_sync;
1209 struct bt_le_per_adv_sync_cb *listener;
1210 bt_addr_le_t id_addr;
1211 bool unexpected_evt;
1212 int err;
1213
1214 pending_per_adv_sync = get_pending_per_adv_sync();
1215
1216 if (pending_per_adv_sync) {
1217 atomic_clear_bit(pending_per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCING);
1218 err = bt_le_scan_user_remove(BT_LE_SCAN_USER_PER_SYNC);
1219
1220 if (err) {
1221 LOG_ERR("Could not update scan (%d)", err);
1222 }
1223 }
1224
1225 if (evt->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
1226 /* Cancelled locally, don't call CB */
1227 if (pending_per_adv_sync) {
1228 per_adv_sync_delete(pending_per_adv_sync);
1229 } else {
1230 LOG_ERR("Unexpected per adv sync cancelled event");
1231 }
1232
1233 return;
1234 }
1235
1236 if (bt_addr_le_is_resolved(&evt->adv_addr)) {
1237 bt_addr_le_copy_resolved(&id_addr, &evt->adv_addr);
1238 } else {
1239 bt_addr_le_copy(&id_addr, bt_lookup_id_addr(BT_ID_DEFAULT, &evt->adv_addr));
1240 }
1241
1242 if (!pending_per_adv_sync ||
1243 (!atomic_test_bit(pending_per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCING_USE_LIST) &&
1244 ((pending_per_adv_sync->sid != evt->sid) ||
1245 !bt_addr_le_eq(&pending_per_adv_sync->addr, &id_addr)))) {
1246 LOG_ERR("Unexpected per adv sync established event");
1247 /* Request terminate of pending periodic advertising in controller */
1248 per_adv_sync_terminate(sys_le16_to_cpu(evt->handle));
1249
1250 unexpected_evt = true;
1251 } else {
1252 unexpected_evt = false;
1253 }
1254
1255 if (unexpected_evt || evt->status != BT_HCI_ERR_SUCCESS) {
1256 if (pending_per_adv_sync) {
1257 const uint8_t reason =
1258 unexpected_evt ? BT_HCI_ERR_UNSPECIFIED : evt->status;
1259
1260 if (atomic_test_bit(pending_per_adv_sync->flags,
1261 BT_PER_ADV_SYNC_SYNCING_USE_LIST)) {
1262 /* Update the addr and sid for the callback
1263 * Already set if not using the sync list
1264 */
1265 bt_addr_le_copy(&pending_per_adv_sync->addr, &id_addr);
1266 pending_per_adv_sync->sid = evt->sid;
1267 }
1268
1269 per_adv_sync_terminated(pending_per_adv_sync, reason);
1270 }
1271 return;
1272 }
1273
1274 pending_per_adv_sync->report_truncated = false;
1275
1276 atomic_set_bit(pending_per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED);
1277
1278 pending_per_adv_sync->handle = sys_le16_to_cpu(evt->handle);
1279 pending_per_adv_sync->interval = sys_le16_to_cpu(evt->interval);
1280 pending_per_adv_sync->clock_accuracy = sys_le16_to_cpu(evt->clock_accuracy);
1281 pending_per_adv_sync->phy = bt_get_phy(evt->phy);
1282
1283 memset(&sync_info, 0, sizeof(sync_info));
1284 sync_info.interval = pending_per_adv_sync->interval;
1285 sync_info.phy = pending_per_adv_sync->phy;
1286
1287 if (atomic_test_bit(pending_per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCING_USE_LIST)) {
1288 /* Now we know which address and SID we synchronized to. */
1289 pending_per_adv_sync->sid = evt->sid;
1290
1291 if (bt_addr_le_is_resolved(&pending_per_adv_sync->addr)) {
1292 bt_addr_le_copy_resolved(&pending_per_adv_sync->addr, &id_addr);
1293 } else {
1294 bt_addr_le_copy(&pending_per_adv_sync->addr, &id_addr);
1295 }
1296 }
1297
1298 sync_info.addr = &pending_per_adv_sync->addr;
1299 sync_info.sid = pending_per_adv_sync->sid;
1300 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1301 sync_info.num_subevents = evt->num_subevents;
1302 sync_info.subevent_interval = evt->subevent_interval;
1303 sync_info.response_slot_delay = evt->response_slot_delay;
1304 sync_info.response_slot_spacing = evt->response_slot_spacing;
1305
1306 pending_per_adv_sync->num_subevents = evt->num_subevents;
1307 pending_per_adv_sync->subevent_interval = evt->subevent_interval;
1308 pending_per_adv_sync->response_slot_delay = evt->response_slot_delay;
1309 pending_per_adv_sync->response_slot_spacing = evt->response_slot_spacing;
1310 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1311
1312 sync_info.recv_enabled =
1313 !atomic_test_bit(pending_per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED);
1314
1315 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1316 if (listener->synced) {
1317 listener->synced(pending_per_adv_sync, &sync_info);
1318 }
1319 }
1320 }
1321
bt_hci_le_per_adv_sync_established(struct net_buf * buf)1322 void bt_hci_le_per_adv_sync_established(struct net_buf *buf)
1323 {
1324 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
1325 LOG_ERR("The controller shall raise the latest unmasked version of the event");
1326
1327 return;
1328 }
1329
1330 bt_hci_le_per_adv_sync_established_common(buf);
1331 }
1332
1333 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_le_per_adv_sync_subevent(struct bt_le_per_adv_sync * per_adv_sync,struct bt_le_per_adv_sync_subevent_params * params)1334 int bt_le_per_adv_sync_subevent(struct bt_le_per_adv_sync *per_adv_sync,
1335 struct bt_le_per_adv_sync_subevent_params *params)
1336 {
1337 struct bt_hci_cp_le_set_pawr_sync_subevent *cp;
1338 struct net_buf *buf;
1339
1340 if (params->num_subevents > BT_HCI_PAWR_SUBEVENT_MAX) {
1341 return -EINVAL;
1342 }
1343
1344 buf = bt_hci_cmd_alloc(K_FOREVER);
1345 if (!buf) {
1346 return -ENOBUFS;
1347 }
1348
1349 cp = net_buf_add(buf, sizeof(*cp));
1350 (void)memset(cp, 0, sizeof(*cp));
1351 cp->sync_handle = sys_cpu_to_le16(per_adv_sync->handle);
1352 cp->periodic_adv_properties = sys_cpu_to_le16(params->properties);
1353 cp->num_subevents = params->num_subevents;
1354 net_buf_add_mem(buf, params->subevents, cp->num_subevents);
1355
1356 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PER_ADV_SYNC_SUBEVENT, buf, NULL);
1357 }
1358
bt_le_per_adv_set_response_data(struct bt_le_per_adv_sync * per_adv_sync,const struct bt_le_per_adv_response_params * param,const struct net_buf_simple * data)1359 int bt_le_per_adv_set_response_data(struct bt_le_per_adv_sync *per_adv_sync,
1360 const struct bt_le_per_adv_response_params *param,
1361 const struct net_buf_simple *data)
1362 {
1363 struct bt_hci_cp_le_set_pawr_response_data *cp;
1364 struct net_buf *buf;
1365
1366 if (per_adv_sync->num_subevents == 0) {
1367 return -EINVAL;
1368 }
1369
1370 if (param->request_subevent >= per_adv_sync->num_subevents) {
1371 return -EINVAL;
1372 }
1373
1374 if (param->response_subevent >= per_adv_sync->num_subevents) {
1375 return -EINVAL;
1376 }
1377
1378 if (data->len > 247) {
1379 return -EINVAL;
1380 }
1381
1382 buf = bt_hci_cmd_alloc(K_FOREVER);
1383 if (!buf) {
1384 return -ENOBUFS;
1385 }
1386
1387 cp = net_buf_add(buf, sizeof(*cp));
1388 (void)memset(cp, 0, sizeof(*cp));
1389 cp->sync_handle = sys_cpu_to_le16(per_adv_sync->handle);
1390 cp->request_event = sys_cpu_to_le16(param->request_event);
1391 cp->request_subevent = param->request_subevent;
1392 cp->response_subevent = param->response_subevent;
1393 cp->response_slot = param->response_slot;
1394 cp->response_data_length = data->len;
1395
1396 net_buf_add_mem(buf, data->data, cp->response_data_length);
1397
1398 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PER_ADV_RESPONSE_DATA, buf, NULL);
1399 }
1400 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1401
bt_hci_le_per_adv_sync_lost(struct net_buf * buf)1402 void bt_hci_le_per_adv_sync_lost(struct net_buf *buf)
1403 {
1404 struct bt_hci_evt_le_per_adv_sync_lost *evt =
1405 (struct bt_hci_evt_le_per_adv_sync_lost *)buf->data;
1406 struct bt_le_per_adv_sync *per_adv_sync;
1407
1408 per_adv_sync = bt_hci_per_adv_sync_lookup_handle(sys_le16_to_cpu(evt->handle));
1409
1410 if (!per_adv_sync) {
1411 LOG_ERR("Unknown handle 0x%04X for periodic adv sync lost",
1412 sys_le16_to_cpu(evt->handle));
1413 return;
1414 }
1415
1416 /* There is no status in the per. adv. sync lost event */
1417 per_adv_sync_terminated(per_adv_sync, BT_HCI_ERR_UNSPECIFIED);
1418 }
1419
1420 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER)
1421 static uint8_t conn_past_modes[CONFIG_BT_MAX_CONN];
1422 static uint8_t default_past_mode;
1423
past_disconnected_cb(struct bt_conn * conn,uint8_t reason)1424 static void past_disconnected_cb(struct bt_conn *conn, uint8_t reason)
1425 {
1426 /* The core spec does not explicit state that the mode of a connection handle is cleared on
1427 * disconnect, but let's assume it is.
1428 */
1429 conn_past_modes[bt_conn_index(conn)] = BT_HCI_LE_PAST_MODE_NO_SYNC;
1430 }
1431
1432 BT_CONN_CB_DEFINE(past_conn_callbacks) = {
1433 .disconnected = past_disconnected_cb,
1434 };
1435
bt_hci_le_past_received_common(struct net_buf * buf)1436 static void bt_hci_le_past_received_common(struct net_buf *buf)
1437 {
1438 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1439 struct bt_hci_evt_le_past_received_v2 *evt =
1440 (struct bt_hci_evt_le_past_received_v2 *)buf->data;
1441 #else
1442 struct bt_hci_evt_le_past_received *evt = (struct bt_hci_evt_le_past_received *)buf->data;
1443 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1444
1445 struct bt_le_per_adv_sync_synced_info sync_info;
1446 struct bt_le_per_adv_sync_cb *listener;
1447 struct bt_le_per_adv_sync *per_adv_sync;
1448 bt_addr_le_t id_addr;
1449
1450 if (evt->status) {
1451 /* No sync created, don't notify app */
1452 LOG_DBG("PAST receive failed with status 0x%02X %s", evt->status,
1453 bt_hci_err_to_str(evt->status));
1454 return;
1455 }
1456
1457 sync_info.conn = bt_conn_lookup_handle(sys_le16_to_cpu(evt->conn_handle), BT_CONN_TYPE_LE);
1458
1459 if (!sync_info.conn) {
1460 LOG_ERR("Could not lookup connection handle from PAST");
1461 per_adv_sync_terminate(sys_le16_to_cpu(evt->sync_handle));
1462 return;
1463 }
1464
1465 per_adv_sync = per_adv_sync_new();
1466 if (!per_adv_sync) {
1467 LOG_WRN("Could not allocate new PA sync from PAST");
1468 per_adv_sync_terminate(sys_le16_to_cpu(evt->sync_handle));
1469 bt_conn_unref(sync_info.conn);
1470 return;
1471 }
1472
1473 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED);
1474
1475 if (bt_addr_le_is_resolved(&evt->addr)) {
1476 bt_addr_le_copy_resolved(&id_addr, &evt->addr);
1477 } else {
1478 bt_addr_le_copy(&id_addr, bt_lookup_id_addr(BT_ID_DEFAULT, &evt->addr));
1479 }
1480
1481 per_adv_sync->handle = sys_le16_to_cpu(evt->sync_handle);
1482 per_adv_sync->interval = sys_le16_to_cpu(evt->interval);
1483 per_adv_sync->clock_accuracy = sys_le16_to_cpu(evt->clock_accuracy);
1484 per_adv_sync->phy = bt_get_phy(evt->phy);
1485 bt_addr_le_copy(&per_adv_sync->addr, &id_addr);
1486 per_adv_sync->sid = evt->adv_sid;
1487
1488 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1489 per_adv_sync->num_subevents = evt->num_subevents;
1490 per_adv_sync->subevent_interval = evt->subevent_interval;
1491 per_adv_sync->response_slot_delay = evt->response_slot_delay;
1492 per_adv_sync->response_slot_spacing = evt->response_slot_spacing;
1493 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1494
1495 sync_info.interval = per_adv_sync->interval;
1496 sync_info.phy = per_adv_sync->phy;
1497 sync_info.addr = &per_adv_sync->addr;
1498 sync_info.sid = per_adv_sync->sid;
1499 sync_info.service_data = sys_le16_to_cpu(evt->service_data);
1500
1501 const uint8_t mode = conn_past_modes[bt_conn_index(sync_info.conn)];
1502
1503 if (mode == BT_HCI_LE_PAST_MODE_NO_SYNC) {
1504 /* Use the default parameter mode as the conn specific mode is not set */
1505 sync_info.recv_enabled =
1506 default_past_mode == BT_HCI_LE_PAST_MODE_SYNC ||
1507 default_past_mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES;
1508 } else {
1509 sync_info.recv_enabled = mode == BT_HCI_LE_PAST_MODE_SYNC ||
1510 mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES;
1511 }
1512
1513 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1514 sync_info.num_subevents = per_adv_sync->num_subevents;
1515 sync_info.subevent_interval = per_adv_sync->subevent_interval;
1516 sync_info.response_slot_delay = per_adv_sync->response_slot_delay;
1517 sync_info.response_slot_spacing = per_adv_sync->response_slot_spacing;
1518 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1519
1520 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1521 if (listener->synced) {
1522 listener->synced(per_adv_sync, &sync_info);
1523 }
1524 }
1525
1526 bt_conn_unref(sync_info.conn);
1527 }
1528
bt_hci_le_past_received(struct net_buf * buf)1529 void bt_hci_le_past_received(struct net_buf *buf)
1530 {
1531 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
1532 LOG_ERR("The controller shall raise the latest unmasked version of the event");
1533
1534 return;
1535 }
1536
1537 bt_hci_le_past_received_common(buf);
1538 }
1539
1540 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_past_received_v2(struct net_buf * buf)1541 void bt_hci_le_past_received_v2(struct net_buf *buf)
1542 {
1543 bt_hci_le_past_received_common(buf);
1544 }
1545 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1546 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER */
1547
1548 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_per_adv_sync_established_v2(struct net_buf * buf)1549 void bt_hci_le_per_adv_sync_established_v2(struct net_buf *buf)
1550 {
1551 bt_hci_le_per_adv_sync_established_common(buf);
1552 }
1553
bt_hci_le_per_adv_report_v2(struct net_buf * buf)1554 void bt_hci_le_per_adv_report_v2(struct net_buf *buf)
1555 {
1556 bt_hci_le_per_adv_report_common(buf);
1557 }
1558 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1559
1560 #if defined(CONFIG_BT_ISO_BROADCAST)
bt_hci_le_biginfo_adv_report(struct net_buf * buf)1561 void bt_hci_le_biginfo_adv_report(struct net_buf *buf)
1562 {
1563 struct bt_hci_evt_le_biginfo_adv_report *evt;
1564 struct bt_le_per_adv_sync *per_adv_sync;
1565 struct bt_le_per_adv_sync_cb *listener;
1566 struct bt_iso_biginfo biginfo;
1567
1568 evt = net_buf_pull_mem(buf, sizeof(*evt));
1569
1570 per_adv_sync = bt_hci_per_adv_sync_lookup_handle(sys_le16_to_cpu(evt->sync_handle));
1571
1572 if (!per_adv_sync) {
1573 LOG_ERR("Unknown handle 0x%04X for periodic advertising report",
1574 sys_le16_to_cpu(evt->sync_handle));
1575 return;
1576 }
1577
1578 biginfo.addr = &per_adv_sync->addr;
1579 biginfo.sid = per_adv_sync->sid;
1580 biginfo.num_bis = evt->num_bis;
1581 biginfo.sub_evt_count = evt->nse;
1582 biginfo.iso_interval = sys_le16_to_cpu(evt->iso_interval);
1583 biginfo.burst_number = evt->bn;
1584 biginfo.offset = evt->pto;
1585 biginfo.rep_count = evt->irc;
1586 biginfo.max_pdu = sys_le16_to_cpu(evt->max_pdu);
1587 biginfo.sdu_interval = sys_get_le24(evt->sdu_interval);
1588 biginfo.max_sdu = sys_le16_to_cpu(evt->max_sdu);
1589 biginfo.phy = bt_get_phy(evt->phy);
1590 biginfo.framing = evt->framing;
1591 biginfo.encryption = evt->encryption ? true : false;
1592
1593 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1594 if (listener->biginfo) {
1595 listener->biginfo(per_adv_sync, &biginfo);
1596 }
1597 }
1598 }
1599 #endif /* CONFIG_BT_ISO_BROADCAST */
1600 #if defined(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)
bt_hci_le_df_connectionless_iq_report_common(uint8_t event,struct net_buf * buf)1601 static void bt_hci_le_df_connectionless_iq_report_common(uint8_t event, struct net_buf *buf)
1602 {
1603 int err;
1604
1605 struct bt_df_per_adv_sync_iq_samples_report cte_report;
1606 struct bt_le_per_adv_sync *per_adv_sync;
1607 struct bt_le_per_adv_sync_cb *listener;
1608
1609 if (event == BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT) {
1610 err = hci_df_prepare_connectionless_iq_report(buf, &cte_report, &per_adv_sync);
1611 if (err) {
1612 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
1613 return;
1614 }
1615 } else if (IS_ENABLED(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES) &&
1616 event == BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT) {
1617 err = hci_df_vs_prepare_connectionless_iq_report(buf, &cte_report, &per_adv_sync);
1618 if (err) {
1619 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
1620 return;
1621 }
1622 } else {
1623 LOG_ERR("Unhandled VS connectionless IQ report");
1624 return;
1625 }
1626
1627 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1628 if (listener->cte_report_cb) {
1629 listener->cte_report_cb(per_adv_sync, &cte_report);
1630 }
1631 }
1632 }
1633
bt_hci_le_df_connectionless_iq_report(struct net_buf * buf)1634 void bt_hci_le_df_connectionless_iq_report(struct net_buf *buf)
1635 {
1636 bt_hci_le_df_connectionless_iq_report_common(BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT, buf);
1637 }
1638
1639 #if defined(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
bt_hci_le_vs_df_connectionless_iq_report(struct net_buf * buf)1640 void bt_hci_le_vs_df_connectionless_iq_report(struct net_buf *buf)
1641 {
1642 bt_hci_le_df_connectionless_iq_report_common(BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
1643 buf);
1644 }
1645 #endif /* CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
1646 #endif /* CONFIG_BT_DF_CONNECTIONLESS_CTE_RX */
1647 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
1648 #endif /* defined(CONFIG_BT_EXT_ADV) */
1649
bt_hci_le_adv_report(struct net_buf * buf)1650 void bt_hci_le_adv_report(struct net_buf *buf)
1651 {
1652 uint8_t num_reports = net_buf_pull_u8(buf);
1653 struct bt_hci_evt_le_advertising_info *evt;
1654 bool explicit_scan = atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_EXPLICIT_SCAN);
1655 bool conn_scan = atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_CONN);
1656
1657 LOG_DBG("Adv number of reports %u", num_reports);
1658
1659 while (num_reports--) {
1660 struct bt_le_scan_recv_info adv_info;
1661
1662 if (!explicit_scan && !conn_scan) {
1663 /* The application has not requested explicit scan, so it is not expecting
1664 * advertising reports. Discard.
1665 * This is done in the loop as this flag can change between each iteration,
1666 * and it is not uncommon that scanning is disabled in the callback called
1667 * from le_adv_recv.
1668 *
1669 * However, if scanning is running for connection purposes,
1670 * the report shall still be processed to allow pending connections.
1671 */
1672
1673 break;
1674 }
1675
1676 if (buf->len < sizeof(*evt)) {
1677 LOG_ERR("Unexpected end of buffer");
1678 break;
1679 }
1680
1681 evt = net_buf_pull_mem(buf, sizeof(*evt));
1682
1683 if (buf->len < evt->length + sizeof(adv_info.rssi)) {
1684 LOG_ERR("Unexpected end of buffer");
1685 break;
1686 }
1687
1688 adv_info.primary_phy = BT_GAP_LE_PHY_1M;
1689 adv_info.secondary_phy = 0;
1690 adv_info.tx_power = BT_GAP_TX_POWER_INVALID;
1691 adv_info.rssi = evt->data[evt->length];
1692 adv_info.sid = BT_GAP_SID_INVALID;
1693 adv_info.interval = 0U;
1694
1695 adv_info.adv_type = evt->evt_type;
1696 adv_info.adv_props = get_adv_props_legacy(evt->evt_type);
1697
1698 le_adv_recv(&evt->addr, &adv_info, &buf->b, evt->length);
1699
1700 net_buf_pull(buf, evt->length + sizeof(adv_info.rssi));
1701 }
1702 }
1703
valid_le_scan_param(const struct bt_le_scan_param * param)1704 static bool valid_le_scan_param(const struct bt_le_scan_param *param)
1705 {
1706 if (IS_ENABLED(CONFIG_BT_PRIVACY) && param->type == BT_LE_SCAN_TYPE_ACTIVE &&
1707 param->timeout != 0) {
1708 /* This is marked as not supported as a stopgap until the (scan,
1709 * adv, init) roles are reworked into proper state machines.
1710 *
1711 * Having proper state machines is necessary to be able to
1712 * suspend all roles that use the (resolvable) private address,
1713 * update the RPA and resume them again with the right
1714 * parameters.
1715 *
1716 * Else we lower the privacy of the device as either the RPA
1717 * update will fail or the scanner will not use the newly
1718 * generated RPA.
1719 */
1720 return false;
1721 }
1722
1723 if (param->type != BT_LE_SCAN_TYPE_PASSIVE && param->type != BT_LE_SCAN_TYPE_ACTIVE) {
1724 return false;
1725 }
1726
1727 if (param->options & ~(BT_LE_SCAN_OPT_FILTER_DUPLICATE | BT_LE_SCAN_OPT_FILTER_ACCEPT_LIST |
1728 BT_LE_SCAN_OPT_CODED | BT_LE_SCAN_OPT_NO_1M)) {
1729 return false;
1730 }
1731
1732 if (param->interval < 0x0004 || param->interval > 0x4000) {
1733 return false;
1734 }
1735
1736 if (param->window < 0x0004 || param->window > 0x4000) {
1737 return false;
1738 }
1739
1740 if (param->window > param->interval) {
1741 return false;
1742 }
1743
1744 return true;
1745 }
1746
bt_le_scan_start(const struct bt_le_scan_param * param,bt_le_scan_cb_t cb)1747 int bt_le_scan_start(const struct bt_le_scan_param *param, bt_le_scan_cb_t cb)
1748 {
1749 int err;
1750
1751 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
1752 return -EAGAIN;
1753 }
1754
1755 /* Check that the parameters have valid values */
1756 if (!valid_le_scan_param(param)) {
1757 return -EINVAL;
1758 }
1759
1760 if (param->type && !bt_id_scan_random_addr_check()) {
1761 return -EINVAL;
1762 }
1763
1764 /* Prevent multiple threads to try to enable explicit scanning at the same time.
1765 * That could lead to unwanted overwriting of scan_state.explicit_scan_param.
1766 */
1767 err = k_mutex_lock(&scan_state.scan_explicit_params_mutex, K_NO_WAIT);
1768
1769 if (err) {
1770 return err;
1771 }
1772
1773 err = scan_check_if_state_allowed(BT_LE_SCAN_USER_EXPLICIT_SCAN);
1774
1775 if (err) {
1776 k_mutex_unlock(&scan_state.scan_explicit_params_mutex);
1777 return err;
1778 }
1779
1780 /* store the parameters that were used to start the scanner */
1781 memcpy(&scan_state.explicit_scan_param, param, sizeof(scan_state.explicit_scan_param));
1782
1783 scan_dev_found_cb = cb;
1784 err = bt_le_scan_user_add(BT_LE_SCAN_USER_EXPLICIT_SCAN);
1785 k_mutex_unlock(&scan_state.scan_explicit_params_mutex);
1786
1787 return err;
1788 }
1789
bt_le_scan_stop(void)1790 int bt_le_scan_stop(void)
1791 {
1792 bt_scan_softreset();
1793 scan_dev_found_cb = NULL;
1794
1795 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1796 atomic_test_and_clear_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED)) {
1797 atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
1798
1799 #if defined(CONFIG_BT_SMP)
1800 bt_id_pending_keys_update();
1801 #endif
1802 }
1803
1804 return bt_le_scan_user_remove(BT_LE_SCAN_USER_EXPLICIT_SCAN);
1805 }
1806
bt_le_scan_cb_register(struct bt_le_scan_cb * cb)1807 int bt_le_scan_cb_register(struct bt_le_scan_cb *cb)
1808 {
1809 if (sys_slist_find(&scan_cbs, &cb->node, NULL)) {
1810 return -EEXIST;
1811 }
1812
1813 sys_slist_append(&scan_cbs, &cb->node);
1814
1815 return 0;
1816 }
1817
bt_le_scan_cb_unregister(struct bt_le_scan_cb * cb)1818 void bt_le_scan_cb_unregister(struct bt_le_scan_cb *cb)
1819 {
1820 sys_slist_find_and_remove(&scan_cbs, &cb->node);
1821 }
1822
1823 #if defined(CONFIG_BT_PER_ADV_SYNC)
bt_le_per_adv_sync_get_index(struct bt_le_per_adv_sync * per_adv_sync)1824 uint8_t bt_le_per_adv_sync_get_index(struct bt_le_per_adv_sync *per_adv_sync)
1825 {
1826 __ASSERT(IS_ARRAY_ELEMENT(per_adv_sync_pool, per_adv_sync), "Invalid per_adv_sync pointer");
1827
1828 return (uint8_t)ARRAY_INDEX(per_adv_sync_pool, per_adv_sync);
1829 }
1830
bt_le_per_adv_sync_lookup_index(uint8_t index)1831 struct bt_le_per_adv_sync *bt_le_per_adv_sync_lookup_index(uint8_t index)
1832 {
1833 if (index >= ARRAY_SIZE(per_adv_sync_pool)) {
1834 return NULL;
1835 }
1836
1837 return &per_adv_sync_pool[index];
1838 }
1839
bt_le_per_adv_sync_get_info(struct bt_le_per_adv_sync * per_adv_sync,struct bt_le_per_adv_sync_info * info)1840 int bt_le_per_adv_sync_get_info(struct bt_le_per_adv_sync *per_adv_sync,
1841 struct bt_le_per_adv_sync_info *info)
1842 {
1843 CHECKIF(per_adv_sync == NULL || info == NULL) {
1844 return -EINVAL;
1845 }
1846
1847 bt_addr_le_copy(&info->addr, &per_adv_sync->addr);
1848 info->sid = per_adv_sync->sid;
1849 info->phy = per_adv_sync->phy;
1850 info->interval = per_adv_sync->interval;
1851
1852 return 0;
1853 }
1854
bt_le_per_adv_sync_lookup_addr(const bt_addr_le_t * adv_addr,uint8_t sid)1855 struct bt_le_per_adv_sync *bt_le_per_adv_sync_lookup_addr(const bt_addr_le_t *adv_addr, uint8_t sid)
1856 {
1857 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
1858 struct bt_le_per_adv_sync *sync = &per_adv_sync_pool[i];
1859
1860 if (!atomic_test_bit(per_adv_sync_pool[i].flags, BT_PER_ADV_SYNC_CREATED)) {
1861 continue;
1862 }
1863
1864 if (bt_addr_le_eq(&sync->addr, adv_addr) && sync->sid == sid) {
1865 return sync;
1866 }
1867 }
1868
1869 return NULL;
1870 }
1871
bt_le_per_adv_sync_create(const struct bt_le_per_adv_sync_param * param,struct bt_le_per_adv_sync ** out_sync)1872 int bt_le_per_adv_sync_create(const struct bt_le_per_adv_sync_param *param,
1873 struct bt_le_per_adv_sync **out_sync)
1874 {
1875 struct bt_hci_cp_le_per_adv_create_sync *cp;
1876 struct net_buf *buf;
1877 struct bt_le_per_adv_sync *per_adv_sync;
1878 int err;
1879
1880 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
1881 return -ENOTSUP;
1882 }
1883
1884 if (get_pending_per_adv_sync()) {
1885 return -EBUSY;
1886 }
1887
1888 if (param->sid > BT_GAP_SID_MAX || param->skip > BT_GAP_PER_ADV_MAX_SKIP ||
1889 param->timeout > BT_GAP_PER_ADV_MAX_TIMEOUT ||
1890 param->timeout < BT_GAP_PER_ADV_MIN_TIMEOUT) {
1891 return -EINVAL;
1892 }
1893
1894 if ((param->options & BT_LE_PER_ADV_SYNC_OPT_FILTER_DUPLICATE) != 0 &&
1895 BT_FEAT_LE_PER_ADV_ADI_SUPP(bt_dev.le.features) == 0) {
1896 return -ENOTSUP;
1897 }
1898
1899 per_adv_sync = per_adv_sync_new();
1900 if (!per_adv_sync) {
1901 return -ENOMEM;
1902 }
1903
1904 buf = bt_hci_cmd_alloc(K_FOREVER);
1905 if (!buf) {
1906 per_adv_sync_delete(per_adv_sync);
1907 return -ENOBUFS;
1908 }
1909
1910 cp = net_buf_add(buf, sizeof(*cp));
1911 (void)memset(cp, 0, sizeof(*cp));
1912
1913 if (param->options & BT_LE_PER_ADV_SYNC_OPT_USE_PER_ADV_LIST) {
1914 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCING_USE_LIST);
1915
1916 cp->options |= BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
1917 } else {
1918 /* If BT_LE_PER_ADV_SYNC_OPT_USE_PER_ADV_LIST is set, then the
1919 * address and SID are ignored by the controller, so we only
1920 * copy/assign them in case that the periodic advertising list
1921 * is not used.
1922 */
1923 bt_addr_le_copy(&cp->addr, ¶m->addr);
1924 cp->sid = param->sid;
1925 }
1926
1927 if (param->options & BT_LE_PER_ADV_SYNC_OPT_REPORTING_INITIALLY_DISABLED) {
1928 cp->options |= BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED;
1929
1930 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED);
1931 }
1932
1933 if (param->options & BT_LE_PER_ADV_SYNC_OPT_FILTER_DUPLICATE) {
1934 cp->options |= BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE;
1935 }
1936
1937 if (param->options & BT_LE_PER_ADV_SYNC_OPT_DONT_SYNC_AOA) {
1938 cp->cte_type |= BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOA;
1939 }
1940
1941 if (param->options & BT_LE_PER_ADV_SYNC_OPT_DONT_SYNC_AOD_1US) {
1942 cp->cte_type |= BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_1US;
1943 }
1944
1945 if (param->options & BT_LE_PER_ADV_SYNC_OPT_DONT_SYNC_AOD_2US) {
1946 cp->cte_type |= BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_2US;
1947 }
1948
1949 if (param->options & BT_LE_PER_ADV_SYNC_OPT_SYNC_ONLY_CONST_TONE_EXT) {
1950 cp->cte_type |= BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_ONLY_CTE;
1951 }
1952
1953 cp->skip = sys_cpu_to_le16(param->skip);
1954 cp->sync_timeout = sys_cpu_to_le16(param->timeout);
1955
1956 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC, buf, NULL);
1957 if (err) {
1958 per_adv_sync_delete(per_adv_sync);
1959 return err;
1960 }
1961
1962 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCING);
1963
1964 /* Syncing requires that scan is enabled. If the caller doesn't enable
1965 * scan first, we enable it here, and disable it once the sync has been
1966 * established. We don't need to use any callbacks since we rely on
1967 * the advertiser address in the sync params.
1968 */
1969 err = bt_le_scan_user_add(BT_LE_SCAN_USER_PER_SYNC);
1970 if (err) {
1971 int per_sync_remove_err = bt_le_scan_user_remove(BT_LE_SCAN_USER_PER_SYNC);
1972
1973 if (per_sync_remove_err) {
1974 LOG_WRN("Error while updating the scanner (%d)", per_sync_remove_err);
1975 }
1976
1977 bt_le_per_adv_sync_delete(per_adv_sync);
1978 return err;
1979 }
1980
1981 *out_sync = per_adv_sync;
1982 bt_addr_le_copy(&per_adv_sync->addr, ¶m->addr);
1983 per_adv_sync->sid = param->sid;
1984
1985 return 0;
1986 }
1987
bt_le_per_adv_sync_create_cancel(struct bt_le_per_adv_sync * per_adv_sync)1988 static int bt_le_per_adv_sync_create_cancel(struct bt_le_per_adv_sync *per_adv_sync)
1989 {
1990 struct net_buf *buf;
1991 int err;
1992
1993 if (get_pending_per_adv_sync() != per_adv_sync) {
1994 return -EINVAL;
1995 }
1996
1997 err = bt_le_scan_user_remove(BT_LE_SCAN_USER_PER_SYNC);
1998
1999 if (err) {
2000 return err;
2001 }
2002
2003 buf = bt_hci_cmd_alloc(K_FOREVER);
2004 if (!buf) {
2005 return -ENOBUFS;
2006 }
2007
2008 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL, buf, NULL);
2009 if (err) {
2010 return err;
2011 }
2012
2013 return 0;
2014 }
2015
bt_le_per_adv_sync_terminate(struct bt_le_per_adv_sync * per_adv_sync)2016 static int bt_le_per_adv_sync_terminate(struct bt_le_per_adv_sync *per_adv_sync)
2017 {
2018 int err;
2019
2020 if (!atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED)) {
2021 return -EINVAL;
2022 }
2023
2024 err = per_adv_sync_terminate(per_adv_sync->handle);
2025
2026 if (err) {
2027 return err;
2028 }
2029
2030 return 0;
2031 }
2032
bt_le_per_adv_sync_delete(struct bt_le_per_adv_sync * per_adv_sync)2033 int bt_le_per_adv_sync_delete(struct bt_le_per_adv_sync *per_adv_sync)
2034 {
2035 int err = 0;
2036
2037 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2038 return -ENOTSUP;
2039 }
2040
2041 if (atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED)) {
2042 err = bt_le_per_adv_sync_terminate(per_adv_sync);
2043
2044 if (!err) {
2045 per_adv_sync_terminated(per_adv_sync, BT_HCI_ERR_LOCALHOST_TERM_CONN);
2046 }
2047 } else if (get_pending_per_adv_sync() == per_adv_sync) {
2048 err = bt_le_per_adv_sync_create_cancel(per_adv_sync);
2049 /* Delete of the per_adv_sync will be done in the event
2050 * handler when cancelling.
2051 */
2052 }
2053
2054 return err;
2055 }
2056
bt_le_per_adv_sync_cb_register(struct bt_le_per_adv_sync_cb * cb)2057 int bt_le_per_adv_sync_cb_register(struct bt_le_per_adv_sync_cb *cb)
2058 {
2059 if (sys_slist_find(&pa_sync_cbs, &cb->node, NULL)) {
2060 return -EEXIST;
2061 }
2062
2063 sys_slist_append(&pa_sync_cbs, &cb->node);
2064
2065 return 0;
2066 }
2067
bt_le_set_per_adv_recv_enable(struct bt_le_per_adv_sync * per_adv_sync,bool enable)2068 static int bt_le_set_per_adv_recv_enable(struct bt_le_per_adv_sync *per_adv_sync, bool enable)
2069 {
2070 struct bt_hci_cp_le_set_per_adv_recv_enable *cp;
2071 struct bt_le_per_adv_sync_cb *listener;
2072 struct bt_le_per_adv_sync_state_info info;
2073 struct net_buf *buf;
2074 struct bt_hci_cmd_state_set state;
2075 int err;
2076
2077 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2078 return -EAGAIN;
2079 }
2080
2081 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2082 return -ENOTSUP;
2083 }
2084
2085 if (!atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED)) {
2086 return -EINVAL;
2087 }
2088
2089 if ((enable && !atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED)) ||
2090 (!enable && atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED))) {
2091 return -EALREADY;
2092 }
2093
2094 buf = bt_hci_cmd_alloc(K_FOREVER);
2095 if (!buf) {
2096 return -ENOBUFS;
2097 }
2098
2099 cp = net_buf_add(buf, sizeof(*cp));
2100 (void)memset(cp, 0, sizeof(*cp));
2101
2102 cp->handle = sys_cpu_to_le16(per_adv_sync->handle);
2103 cp->enable = enable ? 1 : 0;
2104
2105 bt_hci_cmd_state_set_init(buf, &state, per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED,
2106 !enable);
2107
2108 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE, buf, NULL);
2109
2110 if (err) {
2111 return err;
2112 }
2113
2114 info.recv_enabled = !atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_RECV_DISABLED);
2115
2116 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
2117 if (listener->state_changed) {
2118 listener->state_changed(per_adv_sync, &info);
2119 }
2120 }
2121
2122 return 0;
2123 }
2124
bt_le_per_adv_sync_recv_enable(struct bt_le_per_adv_sync * per_adv_sync)2125 int bt_le_per_adv_sync_recv_enable(struct bt_le_per_adv_sync *per_adv_sync)
2126 {
2127 return bt_le_set_per_adv_recv_enable(per_adv_sync, true);
2128 }
2129
bt_le_per_adv_sync_recv_disable(struct bt_le_per_adv_sync * per_adv_sync)2130 int bt_le_per_adv_sync_recv_disable(struct bt_le_per_adv_sync *per_adv_sync)
2131 {
2132 return bt_le_set_per_adv_recv_enable(per_adv_sync, false);
2133 }
2134
2135 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_SENDER)
bt_le_per_adv_sync_transfer(const struct bt_le_per_adv_sync * per_adv_sync,const struct bt_conn * conn,uint16_t service_data)2136 int bt_le_per_adv_sync_transfer(const struct bt_le_per_adv_sync *per_adv_sync,
2137 const struct bt_conn *conn, uint16_t service_data)
2138 {
2139 struct bt_hci_cp_le_per_adv_sync_transfer *cp;
2140 struct net_buf *buf;
2141
2142 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2143 return -ENOTSUP;
2144 } else if (!BT_FEAT_LE_PAST_SEND(bt_dev.le.features)) {
2145 return -ENOTSUP;
2146 }
2147
2148 buf = bt_hci_cmd_alloc(K_FOREVER);
2149 if (!buf) {
2150 return -ENOBUFS;
2151 }
2152
2153 cp = net_buf_add(buf, sizeof(*cp));
2154 (void)memset(cp, 0, sizeof(*cp));
2155
2156 cp->conn_handle = sys_cpu_to_le16(conn->handle);
2157 cp->sync_handle = sys_cpu_to_le16(per_adv_sync->handle);
2158 cp->service_data = sys_cpu_to_le16(service_data);
2159
2160 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_SYNC_TRANSFER, buf, NULL);
2161 }
2162 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_SENDER */
2163
2164 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER)
valid_past_param(const struct bt_le_per_adv_sync_transfer_param * param)2165 static bool valid_past_param(const struct bt_le_per_adv_sync_transfer_param *param)
2166 {
2167 if (param->skip > 0x01f3 || param->timeout < 0x000A || param->timeout > 0x4000) {
2168 return false;
2169 }
2170 if ((param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_REPORTING_INITIALLY_DISABLED) &&
2171 (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_FILTER_DUPLICATES)) {
2172 return false;
2173 }
2174
2175 return true;
2176 }
2177
past_param_set(const struct bt_conn * conn,uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)2178 static int past_param_set(const struct bt_conn *conn, uint8_t mode, uint16_t skip, uint16_t timeout,
2179 uint8_t cte_type)
2180 {
2181 struct bt_hci_cp_le_past_param *cp;
2182 struct net_buf *buf;
2183 int err;
2184
2185 buf = bt_hci_cmd_alloc(K_FOREVER);
2186 if (!buf) {
2187 return -ENOBUFS;
2188 }
2189
2190 cp = net_buf_add(buf, sizeof(*cp));
2191 (void)memset(cp, 0, sizeof(*cp));
2192
2193 cp->conn_handle = sys_cpu_to_le16(conn->handle);
2194 cp->mode = mode;
2195 cp->skip = sys_cpu_to_le16(skip);
2196 cp->timeout = sys_cpu_to_le16(timeout);
2197 cp->cte_type = cte_type;
2198 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_PAST_PARAM, buf, NULL);
2199
2200 return err;
2201 }
2202
default_past_param_set(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)2203 static int default_past_param_set(uint8_t mode, uint16_t skip, uint16_t timeout, uint8_t cte_type)
2204 {
2205 struct bt_hci_cp_le_default_past_param *cp;
2206 struct net_buf *buf;
2207 int err;
2208
2209 buf = bt_hci_cmd_alloc(K_FOREVER);
2210 if (!buf) {
2211 return -ENOBUFS;
2212 }
2213
2214 cp = net_buf_add(buf, sizeof(*cp));
2215 (void)memset(cp, 0, sizeof(*cp));
2216
2217 cp->mode = mode;
2218 cp->skip = sys_cpu_to_le16(skip);
2219 cp->timeout = sys_cpu_to_le16(timeout);
2220 cp->cte_type = cte_type;
2221 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_DEFAULT_PAST_PARAM, buf, NULL);
2222
2223 return err;
2224 }
2225
bt_le_per_adv_sync_transfer_subscribe(const struct bt_conn * conn,const struct bt_le_per_adv_sync_transfer_param * param)2226 int bt_le_per_adv_sync_transfer_subscribe(const struct bt_conn *conn,
2227 const struct bt_le_per_adv_sync_transfer_param *param)
2228 {
2229 uint8_t cte_type = 0;
2230 uint8_t mode = BT_HCI_LE_PAST_MODE_SYNC;
2231 int err;
2232
2233 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2234 return -ENOTSUP;
2235 } else if (!BT_FEAT_LE_PAST_RECV(bt_dev.le.features)) {
2236 return -ENOTSUP;
2237 }
2238
2239 if (!valid_past_param(param)) {
2240 return -EINVAL;
2241 }
2242
2243 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_NO_AOA) {
2244 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_NO_AOA;
2245 }
2246
2247 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_NO_AOD_1US) {
2248 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_NO_AOD_1US;
2249 }
2250
2251 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_NO_AOD_2US) {
2252 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_NO_AOD_2US;
2253 }
2254
2255 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_ONLY_CTE) {
2256 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_ONLY_CTE;
2257 }
2258
2259 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_REPORTING_INITIALLY_DISABLED) {
2260 mode = BT_HCI_LE_PAST_MODE_NO_REPORTS;
2261 } else if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_FILTER_DUPLICATES) {
2262 mode = BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES;
2263 }
2264
2265 if (conn) {
2266 const uint8_t conn_idx = bt_conn_index(conn);
2267 const uint8_t old_mode = conn_past_modes[conn_idx];
2268
2269 conn_past_modes[conn_idx] = mode;
2270
2271 err = past_param_set(conn, mode, param->skip, param->timeout, cte_type);
2272 if (err != 0) {
2273 /* Restore old mode */
2274 conn_past_modes[conn_idx] = old_mode;
2275 }
2276 } else {
2277 const uint8_t old_mode = default_past_mode;
2278
2279 default_past_mode = mode;
2280
2281 err = default_past_param_set(mode, param->skip, param->timeout, cte_type);
2282 if (err != 0) {
2283 /* Restore old mode */
2284 default_past_mode = old_mode;
2285 }
2286 }
2287
2288 return err;
2289 }
2290
bt_le_per_adv_sync_transfer_unsubscribe(const struct bt_conn * conn)2291 int bt_le_per_adv_sync_transfer_unsubscribe(const struct bt_conn *conn)
2292 {
2293 int err;
2294
2295 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2296 return -ENOTSUP;
2297 } else if (!BT_FEAT_LE_PAST_RECV(bt_dev.le.features)) {
2298 return -ENOTSUP;
2299 }
2300
2301 if (conn) {
2302 const uint8_t conn_idx = bt_conn_index(conn);
2303 const uint8_t old_mode = conn_past_modes[conn_idx];
2304
2305 conn_past_modes[conn_idx] = BT_HCI_LE_PAST_MODE_NO_SYNC;
2306
2307 err = past_param_set(conn, BT_HCI_LE_PAST_MODE_NO_SYNC, 0, 0x0a, 0);
2308 if (err != 0) {
2309 /* Restore old mode */
2310 conn_past_modes[conn_idx] = old_mode;
2311 }
2312 } else {
2313 const uint8_t old_mode = default_past_mode;
2314
2315 default_past_mode = BT_HCI_LE_PAST_MODE_NO_SYNC;
2316 err = default_past_param_set(BT_HCI_LE_PAST_MODE_NO_SYNC, 0, 0x0a, 0);
2317 if (err != 0) {
2318 /* Restore old mode */
2319 default_past_mode = old_mode;
2320 }
2321 }
2322
2323 return err;
2324 }
2325 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER */
2326
bt_le_per_adv_list_add(const bt_addr_le_t * addr,uint8_t sid)2327 int bt_le_per_adv_list_add(const bt_addr_le_t *addr, uint8_t sid)
2328 {
2329 struct bt_hci_cp_le_add_dev_to_per_adv_list *cp;
2330 struct net_buf *buf;
2331 int err;
2332
2333 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2334 return -EAGAIN;
2335 }
2336
2337 buf = bt_hci_cmd_alloc(K_FOREVER);
2338 if (!buf) {
2339 return -ENOBUFS;
2340 }
2341
2342 cp = net_buf_add(buf, sizeof(*cp));
2343 bt_addr_le_copy(&cp->addr, addr);
2344 cp->sid = sid;
2345
2346 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST, buf, NULL);
2347 if (err) {
2348 LOG_ERR("Failed to add device to periodic advertiser list");
2349 }
2350
2351 return err;
2352 }
2353
bt_le_per_adv_list_remove(const bt_addr_le_t * addr,uint8_t sid)2354 int bt_le_per_adv_list_remove(const bt_addr_le_t *addr, uint8_t sid)
2355 {
2356 struct bt_hci_cp_le_rem_dev_from_per_adv_list *cp;
2357 struct net_buf *buf;
2358 int err;
2359
2360 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2361 return -EAGAIN;
2362 }
2363
2364 buf = bt_hci_cmd_alloc(K_FOREVER);
2365 if (!buf) {
2366 return -ENOBUFS;
2367 }
2368
2369 cp = net_buf_add(buf, sizeof(*cp));
2370 bt_addr_le_copy(&cp->addr, addr);
2371 cp->sid = sid;
2372
2373 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST, buf, NULL);
2374 if (err) {
2375 LOG_ERR("Failed to remove device from periodic advertiser list");
2376 return err;
2377 }
2378
2379 return 0;
2380 }
2381
bt_le_per_adv_list_clear(void)2382 int bt_le_per_adv_list_clear(void)
2383 {
2384 int err;
2385
2386 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2387 return -EAGAIN;
2388 }
2389
2390 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST, NULL, NULL);
2391 if (err) {
2392 LOG_ERR("Failed to clear periodic advertiser list");
2393 return err;
2394 }
2395
2396 return 0;
2397 }
2398 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
2399
bt_le_explicit_scanner_running(void)2400 bool bt_le_explicit_scanner_running(void)
2401 {
2402 return atomic_test_bit(scan_state.scan_flags, BT_LE_SCAN_USER_EXPLICIT_SCAN);
2403 }
2404
bt_le_explicit_scanner_uses_same_params(const struct bt_conn_le_create_param * create_param)2405 bool bt_le_explicit_scanner_uses_same_params(const struct bt_conn_le_create_param *create_param)
2406 {
2407 if (scan_state.explicit_scan_param.window != create_param->window ||
2408 scan_state.explicit_scan_param.interval != create_param->interval) {
2409 return false;
2410 }
2411
2412 if (scan_state.explicit_scan_param.options & BT_LE_SCAN_OPT_CODED) {
2413 if (scan_state.explicit_scan_param.window_coded != create_param->window_coded ||
2414 scan_state.explicit_scan_param.interval_coded != create_param->interval_coded) {
2415 return false;
2416 }
2417 }
2418
2419 return true;
2420 }
2421