1 /*
2 * Copyright (c) 2017-2021 Nordic Semiconductor ASA
3 * Copyright (c) 2015-2016 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7 #include <sys/types.h>
8
9 #include <zephyr/sys/atomic.h>
10 #include <zephyr/sys/byteorder.h>
11 #include <zephyr/sys/check.h>
12
13 #include <zephyr/bluetooth/bluetooth.h>
14 #include <zephyr/bluetooth/iso.h>
15 #include <zephyr/bluetooth/buf.h>
16 #include <zephyr/bluetooth/direction.h>
17 #include <zephyr/bluetooth/addr.h>
18 #include <zephyr/bluetooth/hci.h>
19 #include <zephyr/bluetooth/hci_vs.h>
20
21 #include "addr_internal.h"
22 #include "hci_core.h"
23 #include "conn_internal.h"
24 #include "direction_internal.h"
25 #include "id.h"
26
27 #include "common/bt_str.h"
28
29 #define LOG_LEVEL CONFIG_BT_HCI_CORE_LOG_LEVEL
30 #include <zephyr/logging/log.h>
31 LOG_MODULE_REGISTER(bt_scan);
32
33 static bt_le_scan_cb_t *scan_dev_found_cb;
34 static sys_slist_t scan_cbs = SYS_SLIST_STATIC_INIT(&scan_cbs);
35
36 #if defined(CONFIG_BT_EXT_ADV)
37 /* A buffer used to reassemble advertisement data from the controller. */
38 NET_BUF_SIMPLE_DEFINE(ext_scan_buf, CONFIG_BT_EXT_SCAN_BUF_SIZE);
39
40 struct fragmented_advertiser {
41 bt_addr_le_t addr;
42 uint8_t sid;
43 enum {
44 FRAG_ADV_INACTIVE,
45 FRAG_ADV_REASSEMBLING,
46 FRAG_ADV_DISCARDING,
47 } state;
48 };
49
50 static struct fragmented_advertiser reassembling_advertiser;
51
fragmented_advertisers_equal(const struct fragmented_advertiser * a,const bt_addr_le_t * addr,uint8_t sid)52 static bool fragmented_advertisers_equal(const struct fragmented_advertiser *a,
53 const bt_addr_le_t *addr, uint8_t sid)
54 {
55 /* Two advertisers are equal if they are the same adv set from the same device */
56 return a->sid == sid && bt_addr_le_eq(&a->addr, addr);
57 }
58
59 /* Sets the address and sid of the advertiser to be reassembled. */
init_reassembling_advertiser(const bt_addr_le_t * addr,uint8_t sid)60 static void init_reassembling_advertiser(const bt_addr_le_t *addr, uint8_t sid)
61 {
62 bt_addr_le_copy(&reassembling_advertiser.addr, addr);
63 reassembling_advertiser.sid = sid;
64 reassembling_advertiser.state = FRAG_ADV_REASSEMBLING;
65 }
66
reset_reassembling_advertiser(void)67 static void reset_reassembling_advertiser(void)
68 {
69 net_buf_simple_reset(&ext_scan_buf);
70 reassembling_advertiser.state = FRAG_ADV_INACTIVE;
71 }
72
73 #if defined(CONFIG_BT_PER_ADV_SYNC)
74 static struct bt_le_per_adv_sync *get_pending_per_adv_sync(void);
75 static struct bt_le_per_adv_sync per_adv_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
76 static sys_slist_t pa_sync_cbs = SYS_SLIST_STATIC_INIT(&pa_sync_cbs);
77 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
78 #endif /* defined(CONFIG_BT_EXT_ADV) */
79
bt_scan_reset(void)80 void bt_scan_reset(void)
81 {
82 scan_dev_found_cb = NULL;
83 #if defined(CONFIG_BT_EXT_ADV)
84 reset_reassembling_advertiser();
85 #endif
86 }
87
set_le_ext_scan_enable(uint8_t enable,uint16_t duration)88 static int set_le_ext_scan_enable(uint8_t enable, uint16_t duration)
89 {
90 struct bt_hci_cp_le_set_ext_scan_enable *cp;
91 struct bt_hci_cmd_state_set state;
92 struct net_buf *buf;
93 int err;
94
95 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(*cp));
96 if (!buf) {
97 return -ENOBUFS;
98 }
99
100 cp = net_buf_add(buf, sizeof(*cp));
101
102 if (enable == BT_HCI_LE_SCAN_ENABLE) {
103 cp->filter_dup = atomic_test_bit(bt_dev.flags,
104 BT_DEV_SCAN_FILTER_DUP);
105 } else {
106 cp->filter_dup = BT_HCI_LE_SCAN_FILTER_DUP_DISABLE;
107 }
108
109 cp->enable = enable;
110 cp->duration = sys_cpu_to_le16(duration);
111 cp->period = 0;
112
113 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_SCANNING,
114 enable == BT_HCI_LE_SCAN_ENABLE);
115
116 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EXT_SCAN_ENABLE, buf, NULL);
117 if (err) {
118 return err;
119 }
120
121 return 0;
122 }
123
bt_le_scan_set_enable_legacy(uint8_t enable)124 static int bt_le_scan_set_enable_legacy(uint8_t enable)
125 {
126 struct bt_hci_cp_le_set_scan_enable *cp;
127 struct bt_hci_cmd_state_set state;
128 struct net_buf *buf;
129 int err;
130
131 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_SCAN_ENABLE, sizeof(*cp));
132 if (!buf) {
133 return -ENOBUFS;
134 }
135
136 cp = net_buf_add(buf, sizeof(*cp));
137
138 if (enable == BT_HCI_LE_SCAN_ENABLE) {
139 cp->filter_dup = atomic_test_bit(bt_dev.flags,
140 BT_DEV_SCAN_FILTER_DUP);
141 } else {
142 cp->filter_dup = BT_HCI_LE_SCAN_FILTER_DUP_DISABLE;
143 }
144
145 cp->enable = enable;
146
147 bt_hci_cmd_state_set_init(buf, &state, bt_dev.flags, BT_DEV_SCANNING,
148 enable == BT_HCI_LE_SCAN_ENABLE);
149
150 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_SCAN_ENABLE, buf, NULL);
151 if (err) {
152 return err;
153 }
154
155 return 0;
156 }
157
bt_le_scan_set_enable(uint8_t enable)158 int bt_le_scan_set_enable(uint8_t enable)
159 {
160 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
161 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
162 return set_le_ext_scan_enable(enable, 0);
163 }
164
165 return bt_le_scan_set_enable_legacy(enable);
166 }
167
start_le_scan_ext(struct bt_hci_ext_scan_phy * phy_1m,struct bt_hci_ext_scan_phy * phy_coded,uint16_t duration)168 static int start_le_scan_ext(struct bt_hci_ext_scan_phy *phy_1m,
169 struct bt_hci_ext_scan_phy *phy_coded,
170 uint16_t duration)
171 {
172 struct bt_hci_cp_le_set_ext_scan_param *set_param;
173 struct net_buf *buf;
174 uint8_t own_addr_type;
175 bool active_scan;
176 int err;
177
178 active_scan = (phy_1m && phy_1m->type == BT_HCI_LE_SCAN_ACTIVE) ||
179 (phy_coded && phy_coded->type == BT_HCI_LE_SCAN_ACTIVE);
180
181 if (duration > 0) {
182 atomic_set_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED);
183
184 /* Allow bt_le_oob_get_local to be called directly before
185 * starting a scan limited by timeout.
186 */
187 if (IS_ENABLED(CONFIG_BT_PRIVACY) && !bt_id_rpa_is_new()) {
188 atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
189 }
190 }
191
192 err = bt_id_set_scan_own_addr(active_scan, &own_addr_type);
193 if (err) {
194 return err;
195 }
196
197 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM,
198 sizeof(*set_param) +
199 (phy_1m ? sizeof(*phy_1m) : 0) +
200 (phy_coded ? sizeof(*phy_coded) : 0));
201 if (!buf) {
202 return -ENOBUFS;
203 }
204
205 set_param = net_buf_add(buf, sizeof(*set_param));
206 set_param->own_addr_type = own_addr_type;
207 set_param->phys = 0;
208
209 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) &&
210 atomic_test_bit(bt_dev.flags, BT_DEV_SCAN_FILTERED)) {
211 set_param->filter_policy = BT_HCI_LE_SCAN_FP_BASIC_FILTER;
212 } else {
213 set_param->filter_policy = BT_HCI_LE_SCAN_FP_BASIC_NO_FILTER;
214 }
215
216 if (phy_1m) {
217 set_param->phys |= BT_HCI_LE_EXT_SCAN_PHY_1M;
218 net_buf_add_mem(buf, phy_1m, sizeof(*phy_1m));
219 }
220
221 if (phy_coded) {
222 set_param->phys |= BT_HCI_LE_EXT_SCAN_PHY_CODED;
223 net_buf_add_mem(buf, phy_coded, sizeof(*phy_coded));
224 }
225
226 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EXT_SCAN_PARAM, buf, NULL);
227 if (err) {
228 return err;
229 }
230
231 err = set_le_ext_scan_enable(BT_HCI_LE_SCAN_ENABLE, duration);
232 if (err) {
233 return err;
234 }
235
236 atomic_set_bit_to(bt_dev.flags, BT_DEV_ACTIVE_SCAN, active_scan);
237
238 return 0;
239 }
240
start_le_scan_legacy(uint8_t scan_type,uint16_t interval,uint16_t window)241 static int start_le_scan_legacy(uint8_t scan_type, uint16_t interval, uint16_t window)
242 {
243 struct bt_hci_cp_le_set_scan_param set_param;
244 struct net_buf *buf;
245 int err;
246 bool active_scan;
247
248 (void)memset(&set_param, 0, sizeof(set_param));
249
250 set_param.scan_type = scan_type;
251
252 /* for the rest parameters apply default values according to
253 * spec 4.2, vol2, part E, 7.8.10
254 */
255 set_param.interval = sys_cpu_to_le16(interval);
256 set_param.window = sys_cpu_to_le16(window);
257
258 if (IS_ENABLED(CONFIG_BT_FILTER_ACCEPT_LIST) &&
259 atomic_test_bit(bt_dev.flags, BT_DEV_SCAN_FILTERED)) {
260 set_param.filter_policy = BT_HCI_LE_SCAN_FP_BASIC_FILTER;
261 } else {
262 set_param.filter_policy = BT_HCI_LE_SCAN_FP_BASIC_NO_FILTER;
263 }
264
265 active_scan = scan_type == BT_HCI_LE_SCAN_ACTIVE;
266 err = bt_id_set_scan_own_addr(active_scan, &set_param.addr_type);
267 if (err) {
268 return err;
269 }
270
271 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_SCAN_PARAM, sizeof(set_param));
272 if (!buf) {
273 return -ENOBUFS;
274 }
275
276 net_buf_add_mem(buf, &set_param, sizeof(set_param));
277
278 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_SCAN_PARAM, buf, NULL);
279 if (err) {
280 return err;
281 }
282
283 err = bt_le_scan_set_enable(BT_HCI_LE_SCAN_ENABLE);
284 if (err) {
285 return err;
286 }
287
288 atomic_set_bit_to(bt_dev.flags, BT_DEV_ACTIVE_SCAN, active_scan);
289
290 return 0;
291 }
292
start_host_initiated_scan(bool fast_scan)293 static int start_host_initiated_scan(bool fast_scan)
294 {
295 uint16_t interval, window;
296
297 if (fast_scan) {
298 interval = BT_GAP_SCAN_FAST_INTERVAL;
299 window = BT_GAP_SCAN_FAST_WINDOW;
300 } else {
301 interval = CONFIG_BT_BACKGROUND_SCAN_INTERVAL;
302 window = CONFIG_BT_BACKGROUND_SCAN_WINDOW;
303 }
304
305 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
306 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
307 struct bt_hci_ext_scan_phy scan_phy_params;
308
309 scan_phy_params.type = BT_HCI_LE_SCAN_PASSIVE;
310 scan_phy_params.interval = sys_cpu_to_le16(interval);
311 scan_phy_params.window = sys_cpu_to_le16(window);
312
313 /* Scan on 1M + Coded if the controller supports it*/
314 if (BT_FEAT_LE_PHY_CODED(bt_dev.le.features)) {
315 return start_le_scan_ext(&scan_phy_params, &scan_phy_params, 0);
316 } else {
317 return start_le_scan_ext(&scan_phy_params, NULL, 0);
318 }
319
320 }
321
322 return start_le_scan_legacy(BT_HCI_LE_SCAN_PASSIVE, interval, window);
323 }
324
bt_le_scan_update(bool fast_scan)325 int bt_le_scan_update(bool fast_scan)
326 {
327 if (atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
328 /* The application has already explicitly started scanning.
329 * We should keep the scanner running to avoid changing scan parameters.
330 */
331 return 0;
332 }
333
334 if (atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
335 int err;
336
337 err = bt_le_scan_set_enable(BT_HCI_LE_SCAN_DISABLE);
338 if (err) {
339 return err;
340 }
341 }
342
343 if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
344 struct bt_conn *conn;
345
346 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states)) {
347 /* don't restart scan if we have pending connection */
348 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, NULL,
349 BT_CONN_INITIATING);
350 if (conn) {
351 bt_conn_unref(conn);
352 return 0;
353 }
354 }
355
356 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, NULL,
357 BT_CONN_SCAN_BEFORE_INITIATING);
358 if (conn) {
359 atomic_set_bit(bt_dev.flags, BT_DEV_SCAN_FILTER_DUP);
360
361 bt_conn_unref(conn);
362
363 /* Start/Restart the scanner */
364 return start_host_initiated_scan(fast_scan);
365 }
366 }
367
368 #if defined(CONFIG_BT_PER_ADV_SYNC)
369 if (get_pending_per_adv_sync()) {
370 /* Start/Restart the scanner. */
371 return start_host_initiated_scan(fast_scan);
372 }
373 #endif
374
375 return 0;
376 }
377
378 #if defined(CONFIG_BT_CENTRAL)
check_pending_conn(const bt_addr_le_t * id_addr,const bt_addr_le_t * addr,uint8_t adv_props)379 static void check_pending_conn(const bt_addr_le_t *id_addr,
380 const bt_addr_le_t *addr, uint8_t adv_props)
381 {
382 struct bt_conn *conn;
383
384 /* No connections are allowed during explicit scanning
385 * when the controller does not support concurrent scanning and initiating.
386 */
387 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states) &&
388 atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
389 return;
390 }
391
392 /* Return if event is not connectable */
393 if (!(adv_props & BT_HCI_LE_ADV_EVT_TYPE_CONN)) {
394 return;
395 }
396
397 conn = bt_conn_lookup_state_le(BT_ID_DEFAULT, id_addr,
398 BT_CONN_SCAN_BEFORE_INITIATING);
399 if (!conn) {
400 return;
401 }
402
403 if (!BT_LE_STATES_SCAN_INIT(bt_dev.le.states)) {
404 if (atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING) &&
405 bt_le_scan_set_enable(BT_HCI_LE_SCAN_DISABLE)) {
406 goto failed;
407 }
408 }
409
410 bt_addr_le_copy(&conn->le.resp_addr, addr);
411 if (bt_le_create_conn(conn)) {
412 goto failed;
413 }
414
415 bt_conn_set_state(conn, BT_CONN_INITIATING);
416 bt_conn_unref(conn);
417 return;
418
419 failed:
420 conn->err = BT_HCI_ERR_UNSPECIFIED;
421 bt_conn_set_state(conn, BT_CONN_DISCONNECTED);
422 bt_conn_unref(conn);
423 bt_le_scan_update(false);
424 }
425 #endif /* CONFIG_BT_CENTRAL */
426
427 /* Convert Legacy adv report evt_type field to adv props */
get_adv_props_legacy(uint8_t evt_type)428 static uint8_t get_adv_props_legacy(uint8_t evt_type)
429 {
430 switch (evt_type) {
431 case BT_GAP_ADV_TYPE_ADV_IND:
432 return BT_GAP_ADV_PROP_CONNECTABLE |
433 BT_GAP_ADV_PROP_SCANNABLE;
434
435 case BT_GAP_ADV_TYPE_ADV_DIRECT_IND:
436 return BT_GAP_ADV_PROP_CONNECTABLE |
437 BT_GAP_ADV_PROP_DIRECTED;
438
439 case BT_GAP_ADV_TYPE_ADV_SCAN_IND:
440 return BT_GAP_ADV_PROP_SCANNABLE;
441
442 case BT_GAP_ADV_TYPE_ADV_NONCONN_IND:
443 return 0;
444
445 /* In legacy advertising report, we don't know if the scan
446 * response come from a connectable advertiser, so don't
447 * set connectable property bit.
448 */
449 case BT_GAP_ADV_TYPE_SCAN_RSP:
450 return BT_GAP_ADV_PROP_SCAN_RESPONSE |
451 BT_GAP_ADV_PROP_SCANNABLE;
452
453 default:
454 return 0;
455 }
456 }
457
le_adv_recv(bt_addr_le_t * addr,struct bt_le_scan_recv_info * info,struct net_buf_simple * buf,uint16_t len)458 static void le_adv_recv(bt_addr_le_t *addr, struct bt_le_scan_recv_info *info,
459 struct net_buf_simple *buf, uint16_t len)
460 {
461 struct bt_le_scan_cb *listener, *next;
462 struct net_buf_simple_state state;
463 bt_addr_le_t id_addr;
464
465 LOG_DBG("%s event %u, len %u, rssi %d dBm", bt_addr_le_str(addr), info->adv_type, len,
466 info->rssi);
467
468 if (!IS_ENABLED(CONFIG_BT_PRIVACY) &&
469 !IS_ENABLED(CONFIG_BT_SCAN_WITH_IDENTITY) &&
470 atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN) &&
471 (info->adv_props & BT_HCI_LE_ADV_PROP_DIRECT)) {
472 LOG_DBG("Dropped direct adv report");
473 return;
474 }
475
476 if (bt_addr_le_is_resolved(addr)) {
477 bt_addr_le_copy_resolved(&id_addr, addr);
478 } else if (addr->type == BT_HCI_PEER_ADDR_ANONYMOUS) {
479 bt_addr_le_copy(&id_addr, BT_ADDR_LE_ANY);
480 } else {
481 bt_addr_le_copy(&id_addr,
482 bt_lookup_id_addr(BT_ID_DEFAULT, addr));
483 }
484
485 if (scan_dev_found_cb) {
486 net_buf_simple_save(buf, &state);
487
488 buf->len = len;
489 scan_dev_found_cb(&id_addr, info->rssi, info->adv_type, buf);
490
491 net_buf_simple_restore(buf, &state);
492 }
493
494 info->addr = &id_addr;
495
496 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&scan_cbs, listener, next, node) {
497 if (listener->recv) {
498 net_buf_simple_save(buf, &state);
499
500 buf->len = len;
501 listener->recv(info, buf);
502
503 net_buf_simple_restore(buf, &state);
504 }
505 }
506
507 /* Clear pointer to this stack frame before returning to calling function */
508 info->addr = NULL;
509
510 #if defined(CONFIG_BT_CENTRAL)
511 check_pending_conn(&id_addr, addr, info->adv_props);
512 #endif /* CONFIG_BT_CENTRAL */
513 }
514
515 #if defined(CONFIG_BT_EXT_ADV)
bt_hci_le_scan_timeout(struct net_buf * buf)516 void bt_hci_le_scan_timeout(struct net_buf *buf)
517 {
518 struct bt_le_scan_cb *listener, *next;
519
520 atomic_clear_bit(bt_dev.flags, BT_DEV_SCANNING);
521 atomic_clear_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN);
522
523 atomic_clear_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED);
524 atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
525
526 #if defined(CONFIG_BT_SMP)
527 bt_id_pending_keys_update();
528 #endif
529
530 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&scan_cbs, listener, next, node) {
531 if (listener->timeout) {
532 listener->timeout();
533 }
534 }
535 }
536
537 /* Convert Extended adv report evt_type field into adv type */
get_adv_type(uint8_t evt_type)538 static uint8_t get_adv_type(uint8_t evt_type)
539 {
540 switch (evt_type) {
541 case (BT_HCI_LE_ADV_EVT_TYPE_CONN |
542 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
543 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
544 return BT_GAP_ADV_TYPE_ADV_IND;
545
546 case (BT_HCI_LE_ADV_EVT_TYPE_CONN |
547 BT_HCI_LE_ADV_EVT_TYPE_DIRECT |
548 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
549 return BT_GAP_ADV_TYPE_ADV_DIRECT_IND;
550
551 case (BT_HCI_LE_ADV_EVT_TYPE_SCAN |
552 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
553 return BT_GAP_ADV_TYPE_ADV_SCAN_IND;
554
555 case BT_HCI_LE_ADV_EVT_TYPE_LEGACY:
556 return BT_GAP_ADV_TYPE_ADV_NONCONN_IND;
557
558 case (BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
559 BT_HCI_LE_ADV_EVT_TYPE_CONN |
560 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
561 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
562 case (BT_HCI_LE_ADV_EVT_TYPE_SCAN_RSP |
563 BT_HCI_LE_ADV_EVT_TYPE_SCAN |
564 BT_HCI_LE_ADV_EVT_TYPE_LEGACY):
565 /* Scan response from connectable or non-connectable advertiser.
566 */
567 return BT_GAP_ADV_TYPE_SCAN_RSP;
568
569 default:
570 return BT_GAP_ADV_TYPE_EXT_ADV;
571 }
572 }
573
574 /* Convert extended adv report evt_type field to adv props */
get_adv_props_extended(uint16_t evt_type)575 static uint16_t get_adv_props_extended(uint16_t evt_type)
576 {
577 /* Converts from BT_HCI_LE_ADV_EVT_TYPE_* to BT_GAP_ADV_PROP_*
578 * The first 4 bits are the same (conn, scan, direct, scan_rsp).
579 * Bit 4 must be flipped as the meaning of 1 is opposite (legacy -> extended)
580 * The rest of the bits are zeroed out.
581 */
582 return (evt_type ^ BT_HCI_LE_ADV_EVT_TYPE_LEGACY) & BIT_MASK(5);
583 }
584
create_ext_adv_info(struct bt_hci_evt_le_ext_advertising_info const * const evt,struct bt_le_scan_recv_info * const scan_info)585 static void create_ext_adv_info(struct bt_hci_evt_le_ext_advertising_info const *const evt,
586 struct bt_le_scan_recv_info *const scan_info)
587 {
588 scan_info->primary_phy = bt_get_phy(evt->prim_phy);
589 scan_info->secondary_phy = bt_get_phy(evt->sec_phy);
590 scan_info->tx_power = evt->tx_power;
591 scan_info->rssi = evt->rssi;
592 scan_info->sid = evt->sid;
593 scan_info->interval = sys_le16_to_cpu(evt->interval);
594 scan_info->adv_type = get_adv_type(sys_le16_to_cpu(evt->evt_type));
595 scan_info->adv_props = get_adv_props_extended(sys_le16_to_cpu(evt->evt_type));
596 }
597
bt_hci_le_adv_ext_report(struct net_buf * buf)598 void bt_hci_le_adv_ext_report(struct net_buf *buf)
599 {
600 uint8_t num_reports = net_buf_pull_u8(buf);
601
602 LOG_DBG("Adv number of reports %u", num_reports);
603
604 while (num_reports--) {
605 struct bt_hci_evt_le_ext_advertising_info *evt;
606 struct bt_le_scan_recv_info scan_info;
607 uint16_t data_status;
608 uint16_t evt_type;
609 bool is_report_complete;
610 bool more_to_come;
611 bool is_new_advertiser;
612
613 if (!atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
614 /* The application has not requested explicit scan, so it is not expecting
615 * advertising reports. Discard, and reset the reassembler if not inactive
616 * This is done in the loop as this flag can change between each iteration,
617 * and it is not uncommon that scanning is disabled in the callback called
618 * from le_adv_recv
619 */
620
621 if (reassembling_advertiser.state != FRAG_ADV_INACTIVE) {
622 reset_reassembling_advertiser();
623 }
624
625 break;
626 }
627
628 if (buf->len < sizeof(*evt)) {
629 LOG_ERR("Unexpected end of buffer");
630 break;
631 }
632
633 evt = net_buf_pull_mem(buf, sizeof(*evt));
634 evt_type = sys_le16_to_cpu(evt->evt_type);
635 data_status = BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS(evt_type);
636 is_report_complete = data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE;
637 more_to_come = data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL;
638
639 if (evt->length > buf->len) {
640 LOG_WRN("Adv report corrupted (wants %u out of %u)", evt->length, buf->len);
641
642 net_buf_reset(buf);
643
644 if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_LEGACY) {
645 return;
646 }
647
648 /* Start discarding irrespective of the `more_to_come` flag. We
649 * assume we may have lost a partial adv report in the truncated
650 * data.
651 */
652 reassembling_advertiser.state = FRAG_ADV_DISCARDING;
653
654 return;
655 }
656
657 if (evt_type & BT_HCI_LE_ADV_EVT_TYPE_LEGACY) {
658 /* Legacy advertising reports are complete.
659 * Create event immediately.
660 */
661 create_ext_adv_info(evt, &scan_info);
662 le_adv_recv(&evt->addr, &scan_info, &buf->b, evt->length);
663 goto cont;
664 }
665
666 is_new_advertiser = reassembling_advertiser.state == FRAG_ADV_INACTIVE ||
667 !fragmented_advertisers_equal(&reassembling_advertiser,
668 &evt->addr, evt->sid);
669
670 if (is_new_advertiser && is_report_complete) {
671 /* Only advertising report from this advertiser.
672 * Create event immediately.
673 */
674 create_ext_adv_info(evt, &scan_info);
675 le_adv_recv(&evt->addr, &scan_info, &buf->b, evt->length);
676 goto cont;
677 }
678
679 if (is_new_advertiser && reassembling_advertiser.state == FRAG_ADV_REASSEMBLING) {
680 LOG_WRN("Received an incomplete advertising report while reassembling "
681 "advertising reports from a different advertiser. The advertising "
682 "report is discarded and future scan results may be incomplete. "
683 "Interleaving of fragmented advertising reports from different "
684 "advertisers is not yet supported.");
685 goto cont;
686 }
687
688 if (data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE) {
689 /* Got HCI_LE_Extended_Advertising_Report: Incomplete, data truncated, no
690 * more to come. This means the Controller is aborting the reassembly. We
691 * discard the partially received report, and the application is not
692 * notified.
693 *
694 * See the Controller's documentation for possible reasons for aborting.
695 * Hint: CONFIG_BT_CTLR_SCAN_DATA_LEN_MAX.
696 */
697 LOG_DBG("Discarding incomplete advertisement.");
698 reset_reassembling_advertiser();
699 goto cont;
700 }
701
702 if (is_new_advertiser) {
703 /* We are not reassembling reports from an advertiser and
704 * this is the first report from the new advertiser.
705 * Initialize the new advertiser.
706 */
707 __ASSERT_NO_MSG(reassembling_advertiser.state == FRAG_ADV_INACTIVE);
708 init_reassembling_advertiser(&evt->addr, evt->sid);
709 }
710
711 if (evt->length + ext_scan_buf.len > ext_scan_buf.size) {
712 /* The report does not fit in the reassemby buffer
713 * Discard this and future reports from the advertiser.
714 */
715 reassembling_advertiser.state = FRAG_ADV_DISCARDING;
716 }
717
718 if (reassembling_advertiser.state == FRAG_ADV_DISCARDING) {
719 if (!more_to_come) {
720 /* We do no longer need to keep track of this advertiser as
721 * all the expected data is received.
722 */
723 reset_reassembling_advertiser();
724 }
725 goto cont;
726 }
727
728 net_buf_simple_add_mem(&ext_scan_buf, buf->data, evt->length);
729 if (more_to_come) {
730 /* The controller will send additional reports to be reassembled */
731 continue;
732 }
733
734 /* No more data coming from the controller.
735 * Create event.
736 */
737 __ASSERT_NO_MSG(is_report_complete);
738 create_ext_adv_info(evt, &scan_info);
739 le_adv_recv(&evt->addr, &scan_info, &ext_scan_buf, ext_scan_buf.len);
740
741 /* We do no longer need to keep track of this advertiser. */
742 reset_reassembling_advertiser();
743
744 cont:
745 net_buf_pull(buf, evt->length);
746 }
747 }
748
749 #if defined(CONFIG_BT_PER_ADV_SYNC)
per_adv_sync_delete(struct bt_le_per_adv_sync * per_adv_sync)750 static void per_adv_sync_delete(struct bt_le_per_adv_sync *per_adv_sync)
751 {
752 atomic_clear(per_adv_sync->flags);
753 }
754
per_adv_sync_new(void)755 static struct bt_le_per_adv_sync *per_adv_sync_new(void)
756 {
757 struct bt_le_per_adv_sync *per_adv_sync = NULL;
758
759 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
760 if (!atomic_test_bit(per_adv_sync_pool[i].flags,
761 BT_PER_ADV_SYNC_CREATED)) {
762 per_adv_sync = &per_adv_sync_pool[i];
763 break;
764 }
765 }
766
767 if (!per_adv_sync) {
768 return NULL;
769 }
770
771 (void)memset(per_adv_sync, 0, sizeof(*per_adv_sync));
772 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_CREATED);
773
774 #if CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0
775 net_buf_simple_init_with_data(&per_adv_sync->reassembly,
776 per_adv_sync->reassembly_data,
777 CONFIG_BT_PER_ADV_SYNC_BUF_SIZE);
778 net_buf_simple_reset(&per_adv_sync->reassembly);
779 #endif /* CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0 */
780
781 return per_adv_sync;
782 }
783
get_pending_per_adv_sync(void)784 static struct bt_le_per_adv_sync *get_pending_per_adv_sync(void)
785 {
786 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
787 if (atomic_test_bit(per_adv_sync_pool[i].flags,
788 BT_PER_ADV_SYNC_SYNCING)) {
789 return &per_adv_sync_pool[i];
790 }
791 }
792
793 return NULL;
794 }
795
bt_periodic_sync_disable(void)796 void bt_periodic_sync_disable(void)
797 {
798 for (size_t i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
799 per_adv_sync_delete(&per_adv_sync_pool[i]);
800 }
801 }
802
bt_hci_per_adv_sync_lookup_handle(uint16_t handle)803 struct bt_le_per_adv_sync *bt_hci_per_adv_sync_lookup_handle(uint16_t handle)
804 {
805 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
806 if (per_adv_sync_pool[i].handle == handle &&
807 atomic_test_bit(per_adv_sync_pool[i].flags,
808 BT_PER_ADV_SYNC_SYNCED)) {
809 return &per_adv_sync_pool[i];
810 }
811 }
812
813 return NULL;
814 }
815
bt_hci_le_per_adv_report_recv(struct bt_le_per_adv_sync * per_adv_sync,struct net_buf_simple * buf,const struct bt_le_per_adv_sync_recv_info * info)816 void bt_hci_le_per_adv_report_recv(struct bt_le_per_adv_sync *per_adv_sync,
817 struct net_buf_simple *buf,
818 const struct bt_le_per_adv_sync_recv_info *info)
819 {
820 struct net_buf_simple_state state;
821 struct bt_le_per_adv_sync_cb *listener;
822
823 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
824 if (listener->recv) {
825 net_buf_simple_save(buf, &state);
826 listener->recv(per_adv_sync, info, buf);
827 net_buf_simple_restore(buf, &state);
828 }
829 }
830 }
831
832 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP) && (CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0)
bt_hci_le_per_adv_report_recv_failure(struct bt_le_per_adv_sync * per_adv_sync,const struct bt_le_per_adv_sync_recv_info * info)833 static void bt_hci_le_per_adv_report_recv_failure(struct bt_le_per_adv_sync *per_adv_sync,
834 const struct bt_le_per_adv_sync_recv_info *info)
835 {
836 struct bt_le_per_adv_sync_cb *listener;
837
838 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
839 if (listener->recv) {
840 listener->recv(per_adv_sync, info, NULL);
841 }
842 }
843 }
844 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) && (CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0) */
845
bt_hci_le_per_adv_report_common(struct net_buf * buf)846 static void bt_hci_le_per_adv_report_common(struct net_buf *buf)
847 {
848 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
849 struct bt_hci_evt_le_per_advertising_report_v2 *evt;
850 #else
851 struct bt_hci_evt_le_per_advertising_report *evt;
852 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
853
854 struct bt_le_per_adv_sync *per_adv_sync;
855 struct bt_le_per_adv_sync_recv_info info;
856
857 if (buf->len < sizeof(*evt)) {
858 LOG_ERR("Unexpected end of buffer");
859 return;
860 }
861
862 evt = net_buf_pull_mem(buf, sizeof(*evt));
863
864 per_adv_sync = bt_hci_per_adv_sync_lookup_handle(sys_le16_to_cpu(evt->handle));
865
866 if (!per_adv_sync) {
867 LOG_ERR("Unknown handle 0x%04X for periodic advertising report",
868 sys_le16_to_cpu(evt->handle));
869 return;
870 }
871
872 if (atomic_test_bit(per_adv_sync->flags,
873 BT_PER_ADV_SYNC_RECV_DISABLED)) {
874 LOG_ERR("Received PA adv report when receive disabled");
875 return;
876 }
877
878 info.tx_power = evt->tx_power;
879 info.rssi = evt->rssi;
880 info.cte_type = bt_get_df_cte_type(evt->cte_type);
881 info.addr = &per_adv_sync->addr;
882 info.sid = per_adv_sync->sid;
883
884 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
885 info.periodic_event_counter = sys_le16_to_cpu(evt->periodic_event_counter);
886 info.subevent = evt->subevent;
887 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
888
889 if (!per_adv_sync->report_truncated) {
890 #if CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0
891 if (net_buf_simple_tailroom(&per_adv_sync->reassembly) < evt->length) {
892 /* The buffer is too small for the entire report. Drop it */
893 LOG_WRN("Buffer is too small to reassemble the report. "
894 "Use CONFIG_BT_PER_ADV_SYNC_BUF_SIZE to change "
895 "the buffer size.");
896
897 per_adv_sync->report_truncated = true;
898 net_buf_simple_reset(&per_adv_sync->reassembly);
899 return;
900 }
901
902 if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
903 if (per_adv_sync->reassembly.len == 0) {
904 /* We have not received any partial data before.
905 * This buffer can be forwarded without an extra copy.
906 */
907 bt_hci_le_per_adv_report_recv(per_adv_sync, &buf->b, &info);
908 } else {
909 net_buf_simple_add_mem(&per_adv_sync->reassembly,
910 buf->data, evt->length);
911 bt_hci_le_per_adv_report_recv(per_adv_sync,
912 &per_adv_sync->reassembly, &info);
913 net_buf_simple_reset(&per_adv_sync->reassembly);
914 }
915 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_INCOMPLETE) {
916 LOG_DBG("Received incomplete advertising data. "
917 "Advertising report dropped.");
918
919 net_buf_simple_reset(&per_adv_sync->reassembly);
920
921 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_PARTIAL) {
922 net_buf_simple_add_mem(&per_adv_sync->reassembly, buf->data, evt->length);
923 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
924 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_RX_FAILED &&
925 per_adv_sync->num_subevents) {
926 bt_hci_le_per_adv_report_recv_failure(per_adv_sync, &info);
927 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
928 } else {
929 __ASSERT(false, "Invalid data status 0x%02X", evt->data_status);
930 }
931 #else /* CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0 */
932 if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
933 bt_hci_le_per_adv_report_recv(per_adv_sync, &buf->b, &info);
934 } else {
935 per_adv_sync->report_truncated = true;
936 }
937 #endif /* CONFIG_BT_PER_ADV_SYNC_BUF_SIZE > 0 */
938 } else if (evt->data_status == BT_HCI_LE_ADV_EVT_TYPE_DATA_STATUS_COMPLETE) {
939 per_adv_sync->report_truncated = false;
940 }
941 }
942
bt_hci_le_per_adv_report(struct net_buf * buf)943 void bt_hci_le_per_adv_report(struct net_buf *buf)
944 {
945 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
946 LOG_ERR("The controller shall raise the latest unmasked version of the event");
947
948 return;
949 }
950
951 bt_hci_le_per_adv_report_common(buf);
952 }
953
per_adv_sync_terminate(uint16_t handle)954 static int per_adv_sync_terminate(uint16_t handle)
955 {
956 struct bt_hci_cp_le_per_adv_terminate_sync *cp;
957 struct net_buf *buf;
958
959 buf = bt_hci_cmd_create(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC,
960 sizeof(*cp));
961 if (!buf) {
962 return -ENOBUFS;
963 }
964
965 cp = net_buf_add(buf, sizeof(*cp));
966 (void)memset(cp, 0, sizeof(*cp));
967
968 cp->handle = sys_cpu_to_le16(handle);
969
970 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_TERMINATE_SYNC, buf,
971 NULL);
972 }
973
per_adv_sync_terminated(struct bt_le_per_adv_sync * per_adv_sync,uint8_t reason)974 static void per_adv_sync_terminated(struct bt_le_per_adv_sync *per_adv_sync,
975 uint8_t reason)
976 {
977 /* Terminate the PA sync and notify app */
978 const struct bt_le_per_adv_sync_term_info term_info = {
979 .addr = &per_adv_sync->addr,
980 .sid = per_adv_sync->sid,
981 .reason = reason,
982 };
983 struct bt_le_per_adv_sync_cb *listener;
984
985 /* Deleting before callback, so the caller will be able
986 * to restart sync in the callback.
987 */
988 per_adv_sync_delete(per_adv_sync);
989
990 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
991 if (listener->term) {
992 listener->term(per_adv_sync, &term_info);
993 }
994 }
995 }
996
bt_hci_le_per_adv_sync_established_common(struct net_buf * buf)997 static void bt_hci_le_per_adv_sync_established_common(struct net_buf *buf)
998 {
999 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1000 struct bt_hci_evt_le_per_adv_sync_established_v2 *evt =
1001 (struct bt_hci_evt_le_per_adv_sync_established_v2 *)buf->data;
1002 #else
1003 struct bt_hci_evt_le_per_adv_sync_established *evt =
1004 (struct bt_hci_evt_le_per_adv_sync_established *)buf->data;
1005 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1006
1007 struct bt_le_per_adv_sync_synced_info sync_info;
1008 struct bt_le_per_adv_sync *pending_per_adv_sync;
1009 struct bt_le_per_adv_sync_cb *listener;
1010 bt_addr_le_t id_addr;
1011 bool unexpected_evt;
1012 int err;
1013
1014 pending_per_adv_sync = get_pending_per_adv_sync();
1015
1016 if (pending_per_adv_sync) {
1017 atomic_clear_bit(pending_per_adv_sync->flags,
1018 BT_PER_ADV_SYNC_SYNCING);
1019 err = bt_le_scan_update(false);
1020
1021 if (err) {
1022 LOG_ERR("Could not update scan (%d)", err);
1023 }
1024 }
1025
1026 if (evt->status == BT_HCI_ERR_OP_CANCELLED_BY_HOST) {
1027 /* Cancelled locally, don't call CB */
1028 if (pending_per_adv_sync) {
1029 per_adv_sync_delete(pending_per_adv_sync);
1030 } else {
1031 LOG_ERR("Unexpected per adv sync cancelled event");
1032 }
1033
1034 return;
1035 }
1036
1037 if (bt_addr_le_is_resolved(&evt->adv_addr)) {
1038 bt_addr_le_copy_resolved(&id_addr, &evt->adv_addr);
1039 } else {
1040 bt_addr_le_copy(&id_addr,
1041 bt_lookup_id_addr(BT_ID_DEFAULT,
1042 &evt->adv_addr));
1043 }
1044
1045 if (!pending_per_adv_sync ||
1046 (!atomic_test_bit(pending_per_adv_sync->flags,
1047 BT_PER_ADV_SYNC_SYNCING_USE_LIST) &&
1048 ((pending_per_adv_sync->sid != evt->sid) ||
1049 !bt_addr_le_eq(&pending_per_adv_sync->addr, &id_addr)))) {
1050 LOG_ERR("Unexpected per adv sync established event");
1051 /* Request terminate of pending periodic advertising in controller */
1052 per_adv_sync_terminate(sys_le16_to_cpu(evt->handle));
1053
1054 unexpected_evt = true;
1055 } else {
1056 unexpected_evt = false;
1057 }
1058
1059 if (unexpected_evt || evt->status != BT_HCI_ERR_SUCCESS) {
1060 if (pending_per_adv_sync) {
1061 const uint8_t reason = unexpected_evt ? BT_HCI_ERR_UNSPECIFIED
1062 : evt->status;
1063
1064 if (atomic_test_bit(pending_per_adv_sync->flags,
1065 BT_PER_ADV_SYNC_SYNCING_USE_LIST)) {
1066 /* Update the addr and sid for the callback
1067 * Already set if not using the sync list
1068 */
1069 bt_addr_le_copy(&pending_per_adv_sync->addr,
1070 &id_addr);
1071 pending_per_adv_sync->sid = evt->sid;
1072 }
1073
1074 per_adv_sync_terminated(pending_per_adv_sync, reason);
1075 }
1076 return;
1077 }
1078
1079 pending_per_adv_sync->report_truncated = false;
1080
1081 atomic_set_bit(pending_per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED);
1082
1083 pending_per_adv_sync->handle = sys_le16_to_cpu(evt->handle);
1084 pending_per_adv_sync->interval = sys_le16_to_cpu(evt->interval);
1085 pending_per_adv_sync->clock_accuracy =
1086 sys_le16_to_cpu(evt->clock_accuracy);
1087 pending_per_adv_sync->phy = bt_get_phy(evt->phy);
1088
1089 memset(&sync_info, 0, sizeof(sync_info));
1090 sync_info.interval = pending_per_adv_sync->interval;
1091 sync_info.phy = pending_per_adv_sync->phy;
1092
1093 if (atomic_test_bit(pending_per_adv_sync->flags,
1094 BT_PER_ADV_SYNC_SYNCING_USE_LIST)) {
1095 /* Now we know which address and SID we synchronized to. */
1096 pending_per_adv_sync->sid = evt->sid;
1097
1098 if (bt_addr_le_is_resolved(&pending_per_adv_sync->addr)) {
1099 bt_addr_le_copy_resolved(&pending_per_adv_sync->addr,
1100 &id_addr);
1101 } else {
1102 bt_addr_le_copy(&pending_per_adv_sync->addr, &id_addr);
1103 }
1104 }
1105
1106 sync_info.addr = &pending_per_adv_sync->addr;
1107 sync_info.sid = pending_per_adv_sync->sid;
1108 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1109 sync_info.num_subevents = evt->num_subevents;
1110 sync_info.subevent_interval = evt->subevent_interval;
1111 sync_info.response_slot_delay = evt->response_slot_delay;
1112 sync_info.response_slot_spacing = evt->response_slot_spacing;
1113
1114 pending_per_adv_sync->num_subevents = evt->num_subevents;
1115 pending_per_adv_sync->subevent_interval = evt->subevent_interval;
1116 pending_per_adv_sync->response_slot_delay = evt->response_slot_delay;
1117 pending_per_adv_sync->response_slot_spacing = evt->response_slot_spacing;
1118 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1119
1120 sync_info.recv_enabled =
1121 !atomic_test_bit(pending_per_adv_sync->flags,
1122 BT_PER_ADV_SYNC_RECV_DISABLED);
1123
1124 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1125 if (listener->synced) {
1126 listener->synced(pending_per_adv_sync, &sync_info);
1127 }
1128 }
1129 }
1130
bt_hci_le_per_adv_sync_established(struct net_buf * buf)1131 void bt_hci_le_per_adv_sync_established(struct net_buf *buf)
1132 {
1133 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
1134 LOG_ERR("The controller shall raise the latest unmasked version of the event");
1135
1136 return;
1137 }
1138
1139 bt_hci_le_per_adv_sync_established_common(buf);
1140 }
1141
1142 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_le_per_adv_sync_subevent(struct bt_le_per_adv_sync * per_adv_sync,struct bt_le_per_adv_sync_subevent_params * params)1143 int bt_le_per_adv_sync_subevent(struct bt_le_per_adv_sync *per_adv_sync,
1144 struct bt_le_per_adv_sync_subevent_params *params)
1145 {
1146 struct bt_hci_cp_le_set_pawr_sync_subevent *cp;
1147 struct net_buf *buf;
1148
1149 if (params->num_subevents > BT_HCI_PAWR_SUBEVENT_MAX) {
1150 return -EINVAL;
1151 }
1152
1153 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PER_ADV_SYNC_SUBEVENT,
1154 sizeof(*cp) + params->num_subevents);
1155
1156 if (!buf) {
1157 return -ENOBUFS;
1158 }
1159
1160 cp = net_buf_add(buf, sizeof(*cp));
1161 (void)memset(cp, 0, sizeof(*cp));
1162 cp->sync_handle = sys_cpu_to_le16(per_adv_sync->handle);
1163 cp->periodic_adv_properties = sys_cpu_to_le16(params->properties);
1164 cp->num_subevents = params->num_subevents;
1165 net_buf_add_mem(buf, params->subevents, cp->num_subevents);
1166
1167 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PER_ADV_SYNC_SUBEVENT, buf, NULL);
1168 }
1169
bt_le_per_adv_set_response_data(struct bt_le_per_adv_sync * per_adv_sync,const struct bt_le_per_adv_response_params * param,const struct net_buf_simple * data)1170 int bt_le_per_adv_set_response_data(struct bt_le_per_adv_sync *per_adv_sync,
1171 const struct bt_le_per_adv_response_params *param,
1172 const struct net_buf_simple *data)
1173 {
1174 struct bt_hci_cp_le_set_pawr_response_data *cp;
1175 struct net_buf *buf;
1176
1177 if (per_adv_sync->num_subevents == 0) {
1178 return -EINVAL;
1179 }
1180
1181 if (param->request_subevent >= per_adv_sync->num_subevents) {
1182 return -EINVAL;
1183 }
1184
1185 if (param->response_subevent >= per_adv_sync->num_subevents) {
1186 return -EINVAL;
1187 }
1188
1189 if (data->len > 247) {
1190 return -EINVAL;
1191 }
1192
1193 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PER_ADV_RESPONSE_DATA,
1194 sizeof(*cp) + data->len);
1195
1196 if (!buf) {
1197 return -ENOBUFS;
1198 }
1199
1200 cp = net_buf_add(buf, sizeof(*cp));
1201 (void)memset(cp, 0, sizeof(*cp));
1202 cp->sync_handle = sys_cpu_to_le16(per_adv_sync->handle);
1203 cp->request_event = sys_cpu_to_le16(param->request_event);
1204 cp->request_subevent = param->request_subevent;
1205 cp->response_subevent = param->response_subevent;
1206 cp->response_slot = param->response_slot;
1207 cp->response_data_length = data->len;
1208
1209 net_buf_add_mem(buf, data->data, cp->response_data_length);
1210
1211 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PER_ADV_RESPONSE_DATA, buf, NULL);
1212 }
1213 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1214
bt_hci_le_per_adv_sync_lost(struct net_buf * buf)1215 void bt_hci_le_per_adv_sync_lost(struct net_buf *buf)
1216 {
1217 struct bt_hci_evt_le_per_adv_sync_lost *evt =
1218 (struct bt_hci_evt_le_per_adv_sync_lost *)buf->data;
1219 struct bt_le_per_adv_sync *per_adv_sync;
1220
1221 per_adv_sync = bt_hci_per_adv_sync_lookup_handle(sys_le16_to_cpu(evt->handle));
1222
1223 if (!per_adv_sync) {
1224 LOG_ERR("Unknown handle 0x%04Xfor periodic adv sync lost",
1225 sys_le16_to_cpu(evt->handle));
1226 return;
1227 }
1228
1229 /* There is no status in the per. adv. sync lost event */
1230 per_adv_sync_terminated(per_adv_sync, BT_HCI_ERR_UNSPECIFIED);
1231 }
1232
1233 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER)
1234 static uint8_t conn_past_modes[CONFIG_BT_MAX_CONN];
1235 static uint8_t default_past_mode;
1236
past_disconnected_cb(struct bt_conn * conn,uint8_t reason)1237 static void past_disconnected_cb(struct bt_conn *conn, uint8_t reason)
1238 {
1239 /* The core spec does not explicit state that the mode of a connection handle is cleared on
1240 * disconnect, but let's assume it is.
1241 */
1242 conn_past_modes[bt_conn_index(conn)] = BT_HCI_LE_PAST_MODE_NO_SYNC;
1243 }
1244
1245 BT_CONN_CB_DEFINE(past_conn_callbacks) = {
1246 .disconnected = past_disconnected_cb,
1247 };
1248
bt_hci_le_past_received_common(struct net_buf * buf)1249 static void bt_hci_le_past_received_common(struct net_buf *buf)
1250 {
1251 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1252 struct bt_hci_evt_le_past_received_v2 *evt =
1253 (struct bt_hci_evt_le_past_received_v2 *)buf->data;
1254 #else
1255 struct bt_hci_evt_le_past_received *evt =
1256 (struct bt_hci_evt_le_past_received *)buf->data;
1257 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1258
1259 struct bt_le_per_adv_sync_synced_info sync_info;
1260 struct bt_le_per_adv_sync_cb *listener;
1261 struct bt_le_per_adv_sync *per_adv_sync;
1262 bt_addr_le_t id_addr;
1263
1264 if (evt->status) {
1265 /* No sync created, don't notify app */
1266 LOG_DBG("PAST receive failed with status 0x%02X %s",
1267 evt->status, bt_hci_err_to_str(evt->status));
1268 return;
1269 }
1270
1271 sync_info.conn = bt_conn_lookup_handle(
1272 sys_le16_to_cpu(evt->conn_handle),
1273 BT_CONN_TYPE_LE);
1274
1275 if (!sync_info.conn) {
1276 LOG_ERR("Could not lookup connection handle from PAST");
1277 per_adv_sync_terminate(sys_le16_to_cpu(evt->sync_handle));
1278 return;
1279 }
1280
1281 per_adv_sync = per_adv_sync_new();
1282 if (!per_adv_sync) {
1283 LOG_WRN("Could not allocate new PA sync from PAST");
1284 per_adv_sync_terminate(sys_le16_to_cpu(evt->sync_handle));
1285 bt_conn_unref(sync_info.conn);
1286 return;
1287 }
1288
1289 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED);
1290
1291 if (bt_addr_le_is_resolved(&evt->addr)) {
1292 bt_addr_le_copy_resolved(&id_addr, &evt->addr);
1293 } else {
1294 bt_addr_le_copy(&id_addr,
1295 bt_lookup_id_addr(BT_ID_DEFAULT, &evt->addr));
1296 }
1297
1298 per_adv_sync->handle = sys_le16_to_cpu(evt->sync_handle);
1299 per_adv_sync->interval = sys_le16_to_cpu(evt->interval);
1300 per_adv_sync->clock_accuracy = sys_le16_to_cpu(evt->clock_accuracy);
1301 per_adv_sync->phy = bt_get_phy(evt->phy);
1302 bt_addr_le_copy(&per_adv_sync->addr, &id_addr);
1303 per_adv_sync->sid = evt->adv_sid;
1304
1305 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1306 per_adv_sync->num_subevents = evt->num_subevents;
1307 per_adv_sync->subevent_interval = evt->subevent_interval;
1308 per_adv_sync->response_slot_delay = evt->response_slot_delay;
1309 per_adv_sync->response_slot_spacing = evt->response_slot_spacing;
1310 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1311
1312 sync_info.interval = per_adv_sync->interval;
1313 sync_info.phy = per_adv_sync->phy;
1314 sync_info.addr = &per_adv_sync->addr;
1315 sync_info.sid = per_adv_sync->sid;
1316 sync_info.service_data = sys_le16_to_cpu(evt->service_data);
1317
1318 const uint8_t mode = conn_past_modes[bt_conn_index(sync_info.conn)];
1319
1320 if (mode == BT_HCI_LE_PAST_MODE_NO_SYNC) {
1321 /* Use the default parameter mode as the conn specific mode is not set */
1322 sync_info.recv_enabled =
1323 default_past_mode == BT_HCI_LE_PAST_MODE_SYNC ||
1324 default_past_mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES;
1325 } else {
1326 sync_info.recv_enabled = mode == BT_HCI_LE_PAST_MODE_SYNC ||
1327 mode == BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES;
1328 }
1329
1330 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
1331 sync_info.num_subevents = per_adv_sync->num_subevents;
1332 sync_info.subevent_interval = per_adv_sync->subevent_interval;
1333 sync_info.response_slot_delay = per_adv_sync->response_slot_delay;
1334 sync_info.response_slot_spacing = per_adv_sync->response_slot_spacing;
1335 #endif /* defined(CONFIG_BT_PER_ADV_SYNC_RSP) */
1336
1337 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1338 if (listener->synced) {
1339 listener->synced(per_adv_sync, &sync_info);
1340 }
1341 }
1342
1343 bt_conn_unref(sync_info.conn);
1344 }
1345
bt_hci_le_past_received(struct net_buf * buf)1346 void bt_hci_le_past_received(struct net_buf *buf)
1347 {
1348 if (IS_ENABLED(CONFIG_BT_PER_ADV_SYNC_RSP)) {
1349 LOG_ERR("The controller shall raise the latest unmasked version of the event");
1350
1351 return;
1352 }
1353
1354 bt_hci_le_past_received_common(buf);
1355 }
1356
1357 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_past_received_v2(struct net_buf * buf)1358 void bt_hci_le_past_received_v2(struct net_buf *buf)
1359 {
1360 bt_hci_le_past_received_common(buf);
1361 }
1362 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1363 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER */
1364
1365 #if defined(CONFIG_BT_PER_ADV_SYNC_RSP)
bt_hci_le_per_adv_sync_established_v2(struct net_buf * buf)1366 void bt_hci_le_per_adv_sync_established_v2(struct net_buf *buf)
1367 {
1368 bt_hci_le_per_adv_sync_established_common(buf);
1369 }
1370
bt_hci_le_per_adv_report_v2(struct net_buf * buf)1371 void bt_hci_le_per_adv_report_v2(struct net_buf *buf)
1372 {
1373 bt_hci_le_per_adv_report_common(buf);
1374 }
1375 #endif /* CONFIG_BT_PER_ADV_SYNC_RSP */
1376
1377 #if defined(CONFIG_BT_ISO_BROADCAST)
bt_hci_le_biginfo_adv_report(struct net_buf * buf)1378 void bt_hci_le_biginfo_adv_report(struct net_buf *buf)
1379 {
1380 struct bt_hci_evt_le_biginfo_adv_report *evt;
1381 struct bt_le_per_adv_sync *per_adv_sync;
1382 struct bt_le_per_adv_sync_cb *listener;
1383 struct bt_iso_biginfo biginfo;
1384
1385 evt = net_buf_pull_mem(buf, sizeof(*evt));
1386
1387 per_adv_sync = bt_hci_per_adv_sync_lookup_handle(sys_le16_to_cpu(evt->sync_handle));
1388
1389 if (!per_adv_sync) {
1390 LOG_ERR("Unknown handle 0x%04X for periodic advertising report",
1391 sys_le16_to_cpu(evt->sync_handle));
1392 return;
1393 }
1394
1395 biginfo.addr = &per_adv_sync->addr;
1396 biginfo.sid = per_adv_sync->sid;
1397 biginfo.num_bis = evt->num_bis;
1398 biginfo.sub_evt_count = evt->nse;
1399 biginfo.iso_interval = sys_le16_to_cpu(evt->iso_interval);
1400 biginfo.burst_number = evt->bn;
1401 biginfo.offset = evt->pto;
1402 biginfo.rep_count = evt->irc;
1403 biginfo.max_pdu = sys_le16_to_cpu(evt->max_pdu);
1404 biginfo.sdu_interval = sys_get_le24(evt->sdu_interval);
1405 biginfo.max_sdu = sys_le16_to_cpu(evt->max_sdu);
1406 biginfo.phy = bt_get_phy(evt->phy);
1407 biginfo.framing = evt->framing;
1408 biginfo.encryption = evt->encryption ? true : false;
1409
1410 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1411 if (listener->biginfo) {
1412 listener->biginfo(per_adv_sync, &biginfo);
1413 }
1414 }
1415 }
1416 #endif /* CONFIG_BT_ISO_BROADCAST */
1417 #if defined(CONFIG_BT_DF_CONNECTIONLESS_CTE_RX)
bt_hci_le_df_connectionless_iq_report_common(uint8_t event,struct net_buf * buf)1418 static void bt_hci_le_df_connectionless_iq_report_common(uint8_t event, struct net_buf *buf)
1419 {
1420 int err;
1421
1422 struct bt_df_per_adv_sync_iq_samples_report cte_report;
1423 struct bt_le_per_adv_sync *per_adv_sync;
1424 struct bt_le_per_adv_sync_cb *listener;
1425
1426 if (event == BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT) {
1427 err = hci_df_prepare_connectionless_iq_report(buf, &cte_report, &per_adv_sync);
1428 if (err) {
1429 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
1430 return;
1431 }
1432 } else if (IS_ENABLED(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES) &&
1433 event == BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT) {
1434 err = hci_df_vs_prepare_connectionless_iq_report(buf, &cte_report, &per_adv_sync);
1435 if (err) {
1436 LOG_ERR("Prepare CTE conn IQ report failed %d", err);
1437 return;
1438 }
1439 } else {
1440 LOG_ERR("Unhandled VS connectionless IQ report");
1441 return;
1442 }
1443
1444 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1445 if (listener->cte_report_cb) {
1446 listener->cte_report_cb(per_adv_sync, &cte_report);
1447 }
1448 }
1449 }
1450
bt_hci_le_df_connectionless_iq_report(struct net_buf * buf)1451 void bt_hci_le_df_connectionless_iq_report(struct net_buf *buf)
1452 {
1453 bt_hci_le_df_connectionless_iq_report_common(BT_HCI_EVT_LE_CONNECTIONLESS_IQ_REPORT, buf);
1454 }
1455
1456 #if defined(CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES)
bt_hci_le_vs_df_connectionless_iq_report(struct net_buf * buf)1457 void bt_hci_le_vs_df_connectionless_iq_report(struct net_buf *buf)
1458 {
1459 bt_hci_le_df_connectionless_iq_report_common(BT_HCI_EVT_VS_LE_CONNECTIONLESS_IQ_REPORT,
1460 buf);
1461 }
1462 #endif /* CONFIG_BT_DF_VS_CL_IQ_REPORT_16_BITS_IQ_SAMPLES */
1463 #endif /* CONFIG_BT_DF_CONNECTIONLESS_CTE_RX */
1464 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
1465 #endif /* defined(CONFIG_BT_EXT_ADV) */
1466
bt_hci_le_adv_report(struct net_buf * buf)1467 void bt_hci_le_adv_report(struct net_buf *buf)
1468 {
1469 uint8_t num_reports = net_buf_pull_u8(buf);
1470 struct bt_hci_evt_le_advertising_info *evt;
1471
1472 LOG_DBG("Adv number of reports %u", num_reports);
1473
1474 while (num_reports--) {
1475 struct bt_le_scan_recv_info adv_info;
1476
1477 if (!atomic_test_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
1478 /* The application has not requested explicit scan, so it is not expecting
1479 * advertising reports. Discard.
1480 * This is done in the loop as this flag can change between each iteration,
1481 * and it is not uncommon that scanning is disabled in the callback called
1482 * from le_adv_recv
1483 */
1484
1485 break;
1486 }
1487
1488 if (buf->len < sizeof(*evt)) {
1489 LOG_ERR("Unexpected end of buffer");
1490 break;
1491 }
1492
1493 evt = net_buf_pull_mem(buf, sizeof(*evt));
1494
1495 if (buf->len < evt->length + sizeof(adv_info.rssi)) {
1496 LOG_ERR("Unexpected end of buffer");
1497 break;
1498 }
1499
1500 adv_info.primary_phy = BT_GAP_LE_PHY_1M;
1501 adv_info.secondary_phy = 0;
1502 adv_info.tx_power = BT_GAP_TX_POWER_INVALID;
1503 adv_info.rssi = evt->data[evt->length];
1504 adv_info.sid = BT_GAP_SID_INVALID;
1505 adv_info.interval = 0U;
1506
1507 adv_info.adv_type = evt->evt_type;
1508 adv_info.adv_props = get_adv_props_legacy(evt->evt_type);
1509
1510 le_adv_recv(&evt->addr, &adv_info, &buf->b, evt->length);
1511
1512 net_buf_pull(buf, evt->length + sizeof(adv_info.rssi));
1513 }
1514 }
1515
valid_le_scan_param(const struct bt_le_scan_param * param)1516 static bool valid_le_scan_param(const struct bt_le_scan_param *param)
1517 {
1518 if (IS_ENABLED(CONFIG_BT_PRIVACY) &&
1519 param->type == BT_LE_SCAN_TYPE_ACTIVE &&
1520 param->timeout != 0) {
1521 /* This is marked as not supported as a stopgap until the (scan,
1522 * adv, init) roles are reworked into proper state machines.
1523 *
1524 * Having proper state machines is necessary to be able to
1525 * suspend all roles that use the (resolvable) private address,
1526 * update the RPA and resume them again with the right
1527 * parameters.
1528 *
1529 * Else we lower the privacy of the device as either the RPA
1530 * update will fail or the scanner will not use the newly
1531 * generated RPA.
1532 */
1533 return false;
1534 }
1535
1536 if (param->type != BT_LE_SCAN_TYPE_PASSIVE &&
1537 param->type != BT_LE_SCAN_TYPE_ACTIVE) {
1538 return false;
1539 }
1540
1541 if (param->options & ~(BT_LE_SCAN_OPT_FILTER_DUPLICATE |
1542 BT_LE_SCAN_OPT_FILTER_ACCEPT_LIST |
1543 BT_LE_SCAN_OPT_CODED |
1544 BT_LE_SCAN_OPT_NO_1M)) {
1545 return false;
1546 }
1547
1548 if (param->interval < 0x0004 || param->interval > 0x4000) {
1549 return false;
1550 }
1551
1552 if (param->window < 0x0004 || param->window > 0x4000) {
1553 return false;
1554 }
1555
1556 if (param->window > param->interval) {
1557 return false;
1558 }
1559
1560 return true;
1561 }
1562
bt_le_scan_start(const struct bt_le_scan_param * param,bt_le_scan_cb_t cb)1563 int bt_le_scan_start(const struct bt_le_scan_param *param, bt_le_scan_cb_t cb)
1564 {
1565 int err;
1566
1567 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
1568 return -EAGAIN;
1569 }
1570
1571 /* Check that the parameters have valid values */
1572 if (!valid_le_scan_param(param)) {
1573 return -EINVAL;
1574 }
1575
1576 if (param->type && !bt_id_scan_random_addr_check()) {
1577 return -EINVAL;
1578 }
1579
1580 /* Return if active scan is already enabled */
1581 if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
1582 return -EALREADY;
1583 }
1584
1585 if (atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
1586 err = bt_le_scan_set_enable(BT_HCI_LE_SCAN_DISABLE);
1587 if (err) {
1588 atomic_clear_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN);
1589 return err;
1590 }
1591 }
1592
1593 atomic_set_bit_to(bt_dev.flags, BT_DEV_SCAN_FILTER_DUP,
1594 param->options & BT_LE_SCAN_OPT_FILTER_DUPLICATE);
1595
1596 #if defined(CONFIG_BT_FILTER_ACCEPT_LIST)
1597 atomic_set_bit_to(bt_dev.flags, BT_DEV_SCAN_FILTERED,
1598 param->options & BT_LE_SCAN_OPT_FILTER_ACCEPT_LIST);
1599 #endif /* defined(CONFIG_BT_FILTER_ACCEPT_LIST) */
1600
1601 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1602 BT_DEV_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
1603 if (IS_ENABLED(CONFIG_BT_SCAN_AND_INITIATE_IN_PARALLEL) && param->timeout) {
1604 atomic_clear_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN);
1605 return -ENOTSUP;
1606 }
1607
1608 struct bt_hci_ext_scan_phy param_1m;
1609 struct bt_hci_ext_scan_phy param_coded;
1610
1611 struct bt_hci_ext_scan_phy *phy_1m = NULL;
1612 struct bt_hci_ext_scan_phy *phy_coded = NULL;
1613
1614 if (!(param->options & BT_LE_SCAN_OPT_NO_1M)) {
1615 param_1m.type = param->type;
1616 param_1m.interval = sys_cpu_to_le16(param->interval);
1617 param_1m.window = sys_cpu_to_le16(param->window);
1618
1619 phy_1m = ¶m_1m;
1620 }
1621
1622 if (param->options & BT_LE_SCAN_OPT_CODED) {
1623 uint16_t interval = param->interval_coded ?
1624 param->interval_coded :
1625 param->interval;
1626 uint16_t window = param->window_coded ?
1627 param->window_coded :
1628 param->window;
1629
1630 param_coded.type = param->type;
1631 param_coded.interval = sys_cpu_to_le16(interval);
1632 param_coded.window = sys_cpu_to_le16(window);
1633 phy_coded = ¶m_coded;
1634 }
1635
1636 err = start_le_scan_ext(phy_1m, phy_coded, param->timeout);
1637 } else {
1638 if (param->timeout) {
1639 atomic_clear_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN);
1640 return -ENOTSUP;
1641 }
1642
1643 err = start_le_scan_legacy(param->type, param->interval,
1644 param->window);
1645 }
1646
1647 if (err) {
1648 atomic_clear_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN);
1649 return err;
1650 }
1651
1652 scan_dev_found_cb = cb;
1653
1654 return 0;
1655 }
1656
bt_le_scan_stop(void)1657 int bt_le_scan_stop(void)
1658 {
1659 /* Return if active scanning is already disabled */
1660 if (!atomic_test_and_clear_bit(bt_dev.flags, BT_DEV_EXPLICIT_SCAN)) {
1661 return -EALREADY;
1662 }
1663
1664 bt_scan_reset();
1665
1666 if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
1667 atomic_test_and_clear_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED)) {
1668 atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
1669
1670 #if defined(CONFIG_BT_SMP)
1671 bt_id_pending_keys_update();
1672 #endif
1673 }
1674
1675 return bt_le_scan_update(false);
1676 }
1677
bt_le_scan_cb_register(struct bt_le_scan_cb * cb)1678 int bt_le_scan_cb_register(struct bt_le_scan_cb *cb)
1679 {
1680 if (sys_slist_find(&scan_cbs, &cb->node, NULL)) {
1681 return -EEXIST;
1682 }
1683
1684 sys_slist_append(&scan_cbs, &cb->node);
1685
1686 return 0;
1687 }
1688
bt_le_scan_cb_unregister(struct bt_le_scan_cb * cb)1689 void bt_le_scan_cb_unregister(struct bt_le_scan_cb *cb)
1690 {
1691 sys_slist_find_and_remove(&scan_cbs, &cb->node);
1692 }
1693
1694 #if defined(CONFIG_BT_PER_ADV_SYNC)
bt_le_per_adv_sync_get_index(struct bt_le_per_adv_sync * per_adv_sync)1695 uint8_t bt_le_per_adv_sync_get_index(struct bt_le_per_adv_sync *per_adv_sync)
1696 {
1697 ptrdiff_t index = per_adv_sync - per_adv_sync_pool;
1698
1699 __ASSERT(index >= 0 && ARRAY_SIZE(per_adv_sync_pool) > index,
1700 "Invalid per_adv_sync pointer");
1701 return (uint8_t)index;
1702 }
1703
bt_le_per_adv_sync_lookup_index(uint8_t index)1704 struct bt_le_per_adv_sync *bt_le_per_adv_sync_lookup_index(uint8_t index)
1705 {
1706 if (index >= ARRAY_SIZE(per_adv_sync_pool)) {
1707 return NULL;
1708 }
1709
1710 return &per_adv_sync_pool[index];
1711 }
1712
bt_le_per_adv_sync_get_info(struct bt_le_per_adv_sync * per_adv_sync,struct bt_le_per_adv_sync_info * info)1713 int bt_le_per_adv_sync_get_info(struct bt_le_per_adv_sync *per_adv_sync,
1714 struct bt_le_per_adv_sync_info *info)
1715 {
1716 CHECKIF(per_adv_sync == NULL || info == NULL) {
1717 return -EINVAL;
1718 }
1719
1720 bt_addr_le_copy(&info->addr, &per_adv_sync->addr);
1721 info->sid = per_adv_sync->sid;
1722 info->phy = per_adv_sync->phy;
1723 info->interval = per_adv_sync->interval;
1724
1725 return 0;
1726 }
1727
bt_le_per_adv_sync_lookup_addr(const bt_addr_le_t * adv_addr,uint8_t sid)1728 struct bt_le_per_adv_sync *bt_le_per_adv_sync_lookup_addr(const bt_addr_le_t *adv_addr,
1729 uint8_t sid)
1730 {
1731 for (int i = 0; i < ARRAY_SIZE(per_adv_sync_pool); i++) {
1732 struct bt_le_per_adv_sync *sync = &per_adv_sync_pool[i];
1733
1734 if (!atomic_test_bit(per_adv_sync_pool[i].flags,
1735 BT_PER_ADV_SYNC_CREATED)) {
1736 continue;
1737 }
1738
1739 if (bt_addr_le_eq(&sync->addr, adv_addr) && sync->sid == sid) {
1740 return sync;
1741 }
1742 }
1743
1744 return NULL;
1745 }
1746
bt_le_per_adv_sync_create(const struct bt_le_per_adv_sync_param * param,struct bt_le_per_adv_sync ** out_sync)1747 int bt_le_per_adv_sync_create(const struct bt_le_per_adv_sync_param *param,
1748 struct bt_le_per_adv_sync **out_sync)
1749 {
1750 struct bt_hci_cp_le_per_adv_create_sync *cp;
1751 struct net_buf *buf;
1752 struct bt_le_per_adv_sync *per_adv_sync;
1753 int err;
1754
1755 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
1756 return -ENOTSUP;
1757 }
1758
1759 if (get_pending_per_adv_sync()) {
1760 return -EBUSY;
1761 }
1762
1763 if (param->sid > BT_GAP_SID_MAX ||
1764 param->skip > BT_GAP_PER_ADV_MAX_SKIP ||
1765 param->timeout > BT_GAP_PER_ADV_MAX_TIMEOUT ||
1766 param->timeout < BT_GAP_PER_ADV_MIN_TIMEOUT) {
1767 return -EINVAL;
1768 }
1769
1770 per_adv_sync = per_adv_sync_new();
1771 if (!per_adv_sync) {
1772 return -ENOMEM;
1773 }
1774
1775 buf = bt_hci_cmd_create(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC, sizeof(*cp));
1776 if (!buf) {
1777 per_adv_sync_delete(per_adv_sync);
1778 return -ENOBUFS;
1779 }
1780
1781 cp = net_buf_add(buf, sizeof(*cp));
1782 (void)memset(cp, 0, sizeof(*cp));
1783
1784 if (param->options & BT_LE_PER_ADV_SYNC_OPT_USE_PER_ADV_LIST) {
1785 atomic_set_bit(per_adv_sync->flags,
1786 BT_PER_ADV_SYNC_SYNCING_USE_LIST);
1787
1788 cp->options |= BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_USE_LIST;
1789 } else {
1790 /* If BT_LE_PER_ADV_SYNC_OPT_USE_PER_ADV_LIST is set, then the
1791 * address and SID are ignored by the controller, so we only
1792 * copy/assign them in case that the periodic advertising list
1793 * is not used.
1794 */
1795 bt_addr_le_copy(&cp->addr, ¶m->addr);
1796 cp->sid = param->sid;
1797 }
1798
1799 if (param->options &
1800 BT_LE_PER_ADV_SYNC_OPT_REPORTING_INITIALLY_DISABLED) {
1801 cp->options |=
1802 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_REPORTS_DISABLED;
1803
1804 atomic_set_bit(per_adv_sync->flags,
1805 BT_PER_ADV_SYNC_RECV_DISABLED);
1806 }
1807
1808 if (param->options & BT_LE_PER_ADV_SYNC_OPT_FILTER_DUPLICATE) {
1809 cp->options |=
1810 BT_HCI_LE_PER_ADV_CREATE_SYNC_FP_FILTER_DUPLICATE;
1811 }
1812
1813 if (param->options & BT_LE_PER_ADV_SYNC_OPT_DONT_SYNC_AOA) {
1814 cp->cte_type |= BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOA;
1815 }
1816
1817 if (param->options & BT_LE_PER_ADV_SYNC_OPT_DONT_SYNC_AOD_1US) {
1818 cp->cte_type |=
1819 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_1US;
1820 }
1821
1822 if (param->options & BT_LE_PER_ADV_SYNC_OPT_DONT_SYNC_AOD_2US) {
1823 cp->cte_type |=
1824 BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_NO_AOD_2US;
1825 }
1826
1827 if (param->options & BT_LE_PER_ADV_SYNC_OPT_SYNC_ONLY_CONST_TONE_EXT) {
1828 cp->cte_type |= BT_HCI_LE_PER_ADV_CREATE_SYNC_CTE_TYPE_ONLY_CTE;
1829 }
1830
1831 cp->skip = sys_cpu_to_le16(param->skip);
1832 cp->sync_timeout = sys_cpu_to_le16(param->timeout);
1833
1834 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC, buf, NULL);
1835 if (err) {
1836 per_adv_sync_delete(per_adv_sync);
1837 return err;
1838 }
1839
1840 atomic_set_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCING);
1841
1842 /* Syncing requires that scan is enabled. If the caller doesn't enable
1843 * scan first, we enable it here, and disable it once the sync has been
1844 * established. We don't need to use any callbacks since we rely on
1845 * the advertiser address in the sync params.
1846 */
1847 if (!atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING)) {
1848 err = bt_le_scan_update(true);
1849
1850 if (err) {
1851 bt_le_per_adv_sync_delete(per_adv_sync);
1852 return err;
1853 }
1854 }
1855
1856 *out_sync = per_adv_sync;
1857 bt_addr_le_copy(&per_adv_sync->addr, ¶m->addr);
1858 per_adv_sync->sid = param->sid;
1859
1860 return 0;
1861 }
1862
bt_le_per_adv_sync_create_cancel(struct bt_le_per_adv_sync * per_adv_sync)1863 static int bt_le_per_adv_sync_create_cancel(
1864 struct bt_le_per_adv_sync *per_adv_sync)
1865 {
1866 struct net_buf *buf;
1867 int err;
1868
1869 if (get_pending_per_adv_sync() != per_adv_sync) {
1870 return -EINVAL;
1871 }
1872
1873 buf = bt_hci_cmd_create(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL, 0);
1874 if (!buf) {
1875 return -ENOBUFS;
1876 }
1877
1878 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_CREATE_SYNC_CANCEL, buf,
1879 NULL);
1880 if (err) {
1881 return err;
1882 }
1883
1884 return 0;
1885 }
1886
bt_le_per_adv_sync_terminate(struct bt_le_per_adv_sync * per_adv_sync)1887 static int bt_le_per_adv_sync_terminate(struct bt_le_per_adv_sync *per_adv_sync)
1888 {
1889 int err;
1890
1891 if (!atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED)) {
1892 return -EINVAL;
1893 }
1894
1895 err = per_adv_sync_terminate(per_adv_sync->handle);
1896
1897 if (err) {
1898 return err;
1899 }
1900
1901 return 0;
1902 }
1903
bt_le_per_adv_sync_delete(struct bt_le_per_adv_sync * per_adv_sync)1904 int bt_le_per_adv_sync_delete(struct bt_le_per_adv_sync *per_adv_sync)
1905 {
1906 int err = 0;
1907
1908 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
1909 return -ENOTSUP;
1910 }
1911
1912 if (atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED)) {
1913 err = bt_le_per_adv_sync_terminate(per_adv_sync);
1914
1915 if (!err) {
1916 per_adv_sync_terminated(per_adv_sync,
1917 BT_HCI_ERR_LOCALHOST_TERM_CONN);
1918 }
1919 } else if (get_pending_per_adv_sync() == per_adv_sync) {
1920 err = bt_le_per_adv_sync_create_cancel(per_adv_sync);
1921 /* Delete of the per_adv_sync will be done in the event
1922 * handler when cancelling.
1923 */
1924 }
1925
1926 return err;
1927 }
1928
bt_le_per_adv_sync_cb_register(struct bt_le_per_adv_sync_cb * cb)1929 int bt_le_per_adv_sync_cb_register(struct bt_le_per_adv_sync_cb *cb)
1930 {
1931 if (sys_slist_find(&pa_sync_cbs, &cb->node, NULL)) {
1932 return -EEXIST;
1933 }
1934
1935 sys_slist_append(&pa_sync_cbs, &cb->node);
1936
1937 return 0;
1938 }
1939
bt_le_set_per_adv_recv_enable(struct bt_le_per_adv_sync * per_adv_sync,bool enable)1940 static int bt_le_set_per_adv_recv_enable(
1941 struct bt_le_per_adv_sync *per_adv_sync, bool enable)
1942 {
1943 struct bt_hci_cp_le_set_per_adv_recv_enable *cp;
1944 struct bt_le_per_adv_sync_cb *listener;
1945 struct bt_le_per_adv_sync_state_info info;
1946 struct net_buf *buf;
1947 struct bt_hci_cmd_state_set state;
1948 int err;
1949
1950 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
1951 return -EAGAIN;
1952 }
1953
1954 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
1955 return -ENOTSUP;
1956 }
1957
1958 if (!atomic_test_bit(per_adv_sync->flags, BT_PER_ADV_SYNC_SYNCED)) {
1959 return -EINVAL;
1960 }
1961
1962 if ((enable && !atomic_test_bit(per_adv_sync->flags,
1963 BT_PER_ADV_SYNC_RECV_DISABLED)) ||
1964 (!enable && atomic_test_bit(per_adv_sync->flags,
1965 BT_PER_ADV_SYNC_RECV_DISABLED))) {
1966 return -EALREADY;
1967 }
1968
1969 buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE,
1970 sizeof(*cp));
1971 if (!buf) {
1972 return -ENOBUFS;
1973 }
1974
1975 cp = net_buf_add(buf, sizeof(*cp));
1976 (void)memset(cp, 0, sizeof(*cp));
1977
1978 cp->handle = sys_cpu_to_le16(per_adv_sync->handle);
1979 cp->enable = enable ? 1 : 0;
1980
1981 bt_hci_cmd_state_set_init(buf, &state, per_adv_sync->flags,
1982 BT_PER_ADV_SYNC_RECV_DISABLED, !enable);
1983
1984 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_PER_ADV_RECV_ENABLE,
1985 buf, NULL);
1986
1987 if (err) {
1988 return err;
1989 }
1990
1991 info.recv_enabled = !atomic_test_bit(per_adv_sync->flags,
1992 BT_PER_ADV_SYNC_RECV_DISABLED);
1993
1994 SYS_SLIST_FOR_EACH_CONTAINER(&pa_sync_cbs, listener, node) {
1995 if (listener->state_changed) {
1996 listener->state_changed(per_adv_sync, &info);
1997 }
1998 }
1999
2000 return 0;
2001 }
2002
bt_le_per_adv_sync_recv_enable(struct bt_le_per_adv_sync * per_adv_sync)2003 int bt_le_per_adv_sync_recv_enable(struct bt_le_per_adv_sync *per_adv_sync)
2004 {
2005 return bt_le_set_per_adv_recv_enable(per_adv_sync, true);
2006 }
2007
bt_le_per_adv_sync_recv_disable(struct bt_le_per_adv_sync * per_adv_sync)2008 int bt_le_per_adv_sync_recv_disable(struct bt_le_per_adv_sync *per_adv_sync)
2009 {
2010 return bt_le_set_per_adv_recv_enable(per_adv_sync, false);
2011 }
2012
2013 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_SENDER)
bt_le_per_adv_sync_transfer(const struct bt_le_per_adv_sync * per_adv_sync,const struct bt_conn * conn,uint16_t service_data)2014 int bt_le_per_adv_sync_transfer(const struct bt_le_per_adv_sync *per_adv_sync,
2015 const struct bt_conn *conn,
2016 uint16_t service_data)
2017 {
2018 struct bt_hci_cp_le_per_adv_sync_transfer *cp;
2019 struct net_buf *buf;
2020
2021
2022 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2023 return -ENOTSUP;
2024 } else if (!BT_FEAT_LE_PAST_SEND(bt_dev.le.features)) {
2025 return -ENOTSUP;
2026 }
2027
2028 buf = bt_hci_cmd_create(BT_HCI_OP_LE_PER_ADV_SYNC_TRANSFER,
2029 sizeof(*cp));
2030 if (!buf) {
2031 return -ENOBUFS;
2032 }
2033
2034 cp = net_buf_add(buf, sizeof(*cp));
2035 (void)memset(cp, 0, sizeof(*cp));
2036
2037 cp->conn_handle = sys_cpu_to_le16(conn->handle);
2038 cp->sync_handle = sys_cpu_to_le16(per_adv_sync->handle);
2039 cp->service_data = sys_cpu_to_le16(service_data);
2040
2041 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_PER_ADV_SYNC_TRANSFER, buf,
2042 NULL);
2043 }
2044 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_SENDER */
2045
2046 #if defined(CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER)
valid_past_param(const struct bt_le_per_adv_sync_transfer_param * param)2047 static bool valid_past_param(
2048 const struct bt_le_per_adv_sync_transfer_param *param)
2049 {
2050 if (param->skip > 0x01f3 ||
2051 param->timeout < 0x000A ||
2052 param->timeout > 0x4000) {
2053 return false;
2054 }
2055 if ((param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_REPORTING_INITIALLY_DISABLED) &&
2056 (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_FILTER_DUPLICATES)) {
2057 return false;
2058 }
2059
2060 return true;
2061 }
2062
past_param_set(const struct bt_conn * conn,uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)2063 static int past_param_set(const struct bt_conn *conn, uint8_t mode,
2064 uint16_t skip, uint16_t timeout, uint8_t cte_type)
2065 {
2066 struct bt_hci_cp_le_past_param *cp;
2067 struct net_buf *buf;
2068
2069 buf = bt_hci_cmd_create(BT_HCI_OP_LE_PAST_PARAM, sizeof(*cp));
2070 if (!buf) {
2071 return -ENOBUFS;
2072 }
2073
2074 cp = net_buf_add(buf, sizeof(*cp));
2075 (void)memset(cp, 0, sizeof(*cp));
2076
2077 cp->conn_handle = sys_cpu_to_le16(conn->handle);
2078 cp->mode = mode;
2079 cp->skip = sys_cpu_to_le16(skip);
2080 cp->timeout = sys_cpu_to_le16(timeout);
2081 cp->cte_type = cte_type;
2082
2083 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_PAST_PARAM, buf, NULL);
2084 }
2085
default_past_param_set(uint8_t mode,uint16_t skip,uint16_t timeout,uint8_t cte_type)2086 static int default_past_param_set(uint8_t mode, uint16_t skip, uint16_t timeout,
2087 uint8_t cte_type)
2088 {
2089 struct bt_hci_cp_le_default_past_param *cp;
2090 struct net_buf *buf;
2091
2092 buf = bt_hci_cmd_create(BT_HCI_OP_LE_DEFAULT_PAST_PARAM, sizeof(*cp));
2093 if (!buf) {
2094 return -ENOBUFS;
2095 }
2096
2097 cp = net_buf_add(buf, sizeof(*cp));
2098 (void)memset(cp, 0, sizeof(*cp));
2099
2100 cp->mode = mode;
2101 cp->skip = sys_cpu_to_le16(skip);
2102 cp->timeout = sys_cpu_to_le16(timeout);
2103 cp->cte_type = cte_type;
2104
2105 return bt_hci_cmd_send_sync(BT_HCI_OP_LE_DEFAULT_PAST_PARAM, buf, NULL);
2106 }
2107
bt_le_per_adv_sync_transfer_subscribe(const struct bt_conn * conn,const struct bt_le_per_adv_sync_transfer_param * param)2108 int bt_le_per_adv_sync_transfer_subscribe(
2109 const struct bt_conn *conn,
2110 const struct bt_le_per_adv_sync_transfer_param *param)
2111 {
2112 uint8_t cte_type = 0;
2113 uint8_t mode = BT_HCI_LE_PAST_MODE_SYNC;
2114 int err;
2115
2116 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2117 return -ENOTSUP;
2118 } else if (!BT_FEAT_LE_PAST_RECV(bt_dev.le.features)) {
2119 return -ENOTSUP;
2120 }
2121
2122 if (!valid_past_param(param)) {
2123 return -EINVAL;
2124 }
2125
2126 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_NO_AOA) {
2127 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_NO_AOA;
2128 }
2129
2130 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_NO_AOD_1US) {
2131 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_NO_AOD_1US;
2132 }
2133
2134 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_NO_AOD_2US) {
2135 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_NO_AOD_2US;
2136 }
2137
2138 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_SYNC_ONLY_CTE) {
2139 cte_type |= BT_HCI_LE_PAST_CTE_TYPE_ONLY_CTE;
2140 }
2141
2142 if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_REPORTING_INITIALLY_DISABLED) {
2143 mode = BT_HCI_LE_PAST_MODE_NO_REPORTS;
2144 } else if (param->options & BT_LE_PER_ADV_SYNC_TRANSFER_OPT_FILTER_DUPLICATES) {
2145 mode = BT_HCI_LE_PAST_MODE_SYNC_FILTER_DUPLICATES;
2146 }
2147
2148 if (conn) {
2149 const uint8_t conn_idx = bt_conn_index(conn);
2150 const uint8_t old_mode = conn_past_modes[conn_idx];
2151
2152 conn_past_modes[conn_idx] = mode;
2153
2154 err = past_param_set(conn, mode, param->skip, param->timeout, cte_type);
2155 if (err != 0) {
2156 /* Restore old mode */
2157 conn_past_modes[conn_idx] = old_mode;
2158 }
2159 } else {
2160 const uint8_t old_mode = default_past_mode;
2161
2162 default_past_mode = mode;
2163
2164 err = default_past_param_set(mode, param->skip, param->timeout, cte_type);
2165 if (err != 0) {
2166 /* Restore old mode */
2167 default_past_mode = old_mode;
2168 }
2169 }
2170
2171 return err;
2172 }
2173
bt_le_per_adv_sync_transfer_unsubscribe(const struct bt_conn * conn)2174 int bt_le_per_adv_sync_transfer_unsubscribe(const struct bt_conn *conn)
2175 {
2176 int err;
2177
2178 if (!BT_FEAT_LE_EXT_PER_ADV(bt_dev.le.features)) {
2179 return -ENOTSUP;
2180 } else if (!BT_FEAT_LE_PAST_RECV(bt_dev.le.features)) {
2181 return -ENOTSUP;
2182 }
2183
2184 if (conn) {
2185 const uint8_t conn_idx = bt_conn_index(conn);
2186 const uint8_t old_mode = conn_past_modes[conn_idx];
2187
2188 conn_past_modes[conn_idx] = BT_HCI_LE_PAST_MODE_NO_SYNC;
2189
2190 err = past_param_set(conn, BT_HCI_LE_PAST_MODE_NO_SYNC, 0, 0x0a, 0);
2191 if (err != 0) {
2192 /* Restore old mode */
2193 conn_past_modes[conn_idx] = old_mode;
2194 }
2195 } else {
2196 const uint8_t old_mode = default_past_mode;
2197
2198 default_past_mode = BT_HCI_LE_PAST_MODE_NO_SYNC;
2199 err = default_past_param_set(BT_HCI_LE_PAST_MODE_NO_SYNC, 0, 0x0a, 0);
2200 if (err != 0) {
2201 /* Restore old mode */
2202 default_past_mode = old_mode;
2203 }
2204 }
2205
2206 return err;
2207 }
2208 #endif /* CONFIG_BT_PER_ADV_SYNC_TRANSFER_RECEIVER */
2209
bt_le_per_adv_list_add(const bt_addr_le_t * addr,uint8_t sid)2210 int bt_le_per_adv_list_add(const bt_addr_le_t *addr, uint8_t sid)
2211 {
2212 struct bt_hci_cp_le_add_dev_to_per_adv_list *cp;
2213 struct net_buf *buf;
2214 int err;
2215
2216 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2217 return -EAGAIN;
2218 }
2219
2220 buf = bt_hci_cmd_create(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST,
2221 sizeof(*cp));
2222 if (!buf) {
2223 return -ENOBUFS;
2224 }
2225
2226 cp = net_buf_add(buf, sizeof(*cp));
2227 bt_addr_le_copy(&cp->addr, addr);
2228 cp->sid = sid;
2229
2230 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_ADD_DEV_TO_PER_ADV_LIST, buf,
2231 NULL);
2232 if (err) {
2233 LOG_ERR("Failed to add device to periodic advertiser list");
2234
2235 return err;
2236 }
2237
2238 return 0;
2239 }
2240
bt_le_per_adv_list_remove(const bt_addr_le_t * addr,uint8_t sid)2241 int bt_le_per_adv_list_remove(const bt_addr_le_t *addr, uint8_t sid)
2242 {
2243 struct bt_hci_cp_le_rem_dev_from_per_adv_list *cp;
2244 struct net_buf *buf;
2245 int err;
2246
2247 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2248 return -EAGAIN;
2249 }
2250
2251 buf = bt_hci_cmd_create(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST,
2252 sizeof(*cp));
2253 if (!buf) {
2254 return -ENOBUFS;
2255 }
2256
2257 cp = net_buf_add(buf, sizeof(*cp));
2258 bt_addr_le_copy(&cp->addr, addr);
2259 cp->sid = sid;
2260
2261 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_REM_DEV_FROM_PER_ADV_LIST, buf,
2262 NULL);
2263 if (err) {
2264 LOG_ERR("Failed to remove device from periodic advertiser list");
2265 return err;
2266 }
2267
2268 return 0;
2269 }
2270
bt_le_per_adv_list_clear(void)2271 int bt_le_per_adv_list_clear(void)
2272 {
2273 int err;
2274
2275 if (!atomic_test_bit(bt_dev.flags, BT_DEV_READY)) {
2276 return -EAGAIN;
2277 }
2278
2279 err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_CLEAR_PER_ADV_LIST, NULL, NULL);
2280 if (err) {
2281 LOG_ERR("Failed to clear periodic advertiser list");
2282 return err;
2283 }
2284
2285 return 0;
2286 }
2287 #endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
2288