1 /*
2 * Copyright (c) 2016-2019 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/radio.h"
15 #include "hal/ticker.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22
23 #include "ticker/ticker.h"
24
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28
29 #include "lll.h"
30 #include "lll/lll_vendor.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_scan.h"
35 #include "lll/lll_df_types.h"
36 #include "lll_conn.h"
37 #include "lll_filter.h"
38
39 #include "ll_sw/ull_tx_queue.h"
40
41 #include "ull_adv_types.h"
42 #include "ull_filter.h"
43
44 #include "ull_conn_types.h"
45 #include "ull_internal.h"
46 #include "ull_adv_internal.h"
47 #include "ull_scan_types.h"
48 #include "ull_scan_internal.h"
49 #include "ull_sched_internal.h"
50
51 #include "ll.h"
52
53 #include "hal/debug.h"
54
55 static int init_reset(void);
56 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
57 uint32_t remainder, uint16_t lazy, uint8_t force,
58 void *param);
59 static uint8_t disable(uint8_t handle);
60
61 #if defined(CONFIG_BT_CTLR_ADV_EXT)
62 #define IS_PHY_ENABLED(scan_ctx, scan_phy) ((scan_ctx)->lll.phy & (scan_phy))
63
64 static uint8_t is_scan_update(uint8_t handle, uint16_t duration,
65 uint16_t period, struct ll_scan_set **scan,
66 struct node_rx_pdu **node_rx_scan_term);
67 static uint8_t duration_period_setup(struct ll_scan_set *scan,
68 uint16_t duration, uint16_t period,
69 struct node_rx_pdu **node_rx_scan_term);
70 static uint8_t duration_period_update(struct ll_scan_set *scan,
71 uint8_t is_update);
72 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
73 static void ext_disable(void *param);
74 static void ext_disabled_cb(void *param);
75 #endif /* CONFIG_BT_CTLR_ADV_EXT */
76
77 static struct ll_scan_set ll_scan[BT_CTLR_SCAN_SET];
78
79 #if defined(CONFIG_BT_TICKER_EXT)
80 static struct ticker_ext ll_scan_ticker_ext[BT_CTLR_SCAN_SET];
81 #endif /* CONFIG_BT_TICKER_EXT */
82
ll_scan_params_set(uint8_t type,uint16_t interval,uint16_t window,uint8_t own_addr_type,uint8_t filter_policy)83 uint8_t ll_scan_params_set(uint8_t type, uint16_t interval, uint16_t window,
84 uint8_t own_addr_type, uint8_t filter_policy)
85 {
86 struct ll_scan_set *scan;
87 struct lll_scan *lll;
88
89 scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
90 if (!scan) {
91 return BT_HCI_ERR_CMD_DISALLOWED;
92 }
93
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 uint8_t phy;
96
97 phy = type >> 1;
98 if (phy & BT_HCI_LE_EXT_SCAN_PHY_CODED) {
99 struct ll_scan_set *scan_coded;
100
101 if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
102 return BT_HCI_ERR_CMD_DISALLOWED;
103 }
104
105 scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
106 if (!scan_coded) {
107 return BT_HCI_ERR_CMD_DISALLOWED;
108 }
109
110 scan = scan_coded;
111 }
112
113 lll = &scan->lll;
114
115 /* NOTE: Pass invalid interval value to not start scanning using this
116 * scan instance.
117 */
118 if (!interval) {
119 /* Set PHY to 0 to not start scanning on this instance */
120 lll->phy = 0U;
121
122 return 0;
123 }
124
125 /* If phy assigned is PHY_1M or PHY_CODED, then scanning on that
126 * PHY is enabled.
127 */
128 lll->phy = phy;
129
130 #else /* !CONFIG_BT_CTLR_ADV_EXT */
131 lll = &scan->lll;
132 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
133
134 scan->own_addr_type = own_addr_type;
135
136 scan->ticks_window = ull_scan_params_set(lll, type, interval, window,
137 filter_policy);
138
139 return 0;
140 }
141
142 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_scan_enable(uint8_t enable,uint16_t duration,uint16_t period)143 uint8_t ll_scan_enable(uint8_t enable, uint16_t duration, uint16_t period)
144 {
145 struct node_rx_pdu *node_rx_scan_term = NULL;
146 uint8_t is_update_coded = 0U;
147 uint8_t is_update_1m = 0U;
148 #else /* !CONFIG_BT_CTLR_ADV_EXT */
149 uint8_t ll_scan_enable(uint8_t enable)
150 {
151 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
152 struct ll_scan_set *scan_coded = NULL;
153 uint8_t own_addr_type = 0U;
154 uint8_t is_coded_phy = 0U;
155 struct ll_scan_set *scan;
156 uint8_t err;
157
158 if (!enable) {
159 err = disable(SCAN_HANDLE_1M);
160
161 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
162 IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
163 uint8_t err_coded;
164
165 err_coded = disable(SCAN_HANDLE_PHY_CODED);
166 if (!err_coded) {
167 err = 0U;
168 }
169 }
170
171 return err;
172 }
173
174 scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
175 if (!scan) {
176 #if defined(CONFIG_BT_CTLR_ADV_EXT)
177 is_update_1m = is_scan_update(SCAN_HANDLE_1M, duration, period,
178 &scan, &node_rx_scan_term);
179 if (!is_update_1m)
180 #endif /* CONFIG_BT_CTLR_ADV_EXT */
181 {
182 return BT_HCI_ERR_CMD_DISALLOWED;
183 }
184 }
185
186 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
187 scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
188 if (!scan_coded) {
189 is_update_coded = is_scan_update(SCAN_HANDLE_PHY_CODED,
190 duration, period, &scan_coded,
191 &node_rx_scan_term);
192 if (!is_update_coded) {
193 return BT_HCI_ERR_CMD_DISALLOWED;
194 }
195 }
196
197 own_addr_type = scan_coded->own_addr_type;
198 is_coded_phy = (scan_coded->lll.phy &
199 BT_HCI_LE_EXT_SCAN_PHY_CODED);
200 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
201
202 if ((is_coded_phy && (own_addr_type & 0x1)) ||
203 (!is_coded_phy && (scan->own_addr_type & 0x1))) {
204 if (!mem_nz(ll_addr_get(BT_ADDR_LE_RANDOM), BDADDR_SIZE)) {
205 return BT_HCI_ERR_INVALID_PARAM;
206 }
207 }
208
209 #if defined(CONFIG_BT_CTLR_ADV_EXT)
210 #if defined(CONFIG_BT_CTLR_PHY_CODED)
211 if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
212 #endif /* CONFIG_BT_CTLR_PHY_CODED */
213 {
214 err = duration_period_setup(scan, duration, period,
215 &node_rx_scan_term);
216 if (err) {
217 return err;
218 }
219 }
220
221 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
222 is_coded_phy) {
223 err = duration_period_setup(scan_coded, duration, period,
224 &node_rx_scan_term);
225 if (err) {
226 return err;
227 }
228 }
229 #endif /* CONFIG_BT_CTLR_ADV_EXT */
230
231 #if defined(CONFIG_BT_CTLR_PRIVACY)
232 struct lll_scan *lll;
233
234 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) && is_coded_phy) {
235 lll = &scan_coded->lll;
236
237 /* TODO: Privacy support in Advertising Extensions */
238 } else {
239 lll = &scan->lll;
240 own_addr_type = scan->own_addr_type;
241 }
242
243 ull_filter_scan_update(lll->filter_policy);
244
245 lll->rl_idx = FILTER_IDX_NONE;
246 lll->rpa_gen = 0;
247
248 if ((lll->type & 0x1) &&
249 (own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
250 own_addr_type == BT_ADDR_LE_RANDOM_ID)) {
251 /* Generate RPAs if required */
252 ull_filter_rpa_update(false);
253 lll->rpa_gen = 1;
254 }
255 #endif /* CONFIG_BT_CTLR_PRIVACY */
256
257 #if defined(CONFIG_BT_CTLR_ADV_EXT)
258 #if defined(CONFIG_BT_CTLR_PHY_CODED)
259 if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
260 #endif /* CONFIG_BT_CTLR_PHY_CODED */
261 {
262 err = duration_period_update(scan, is_update_1m);
263 if (err) {
264 return err;
265 } else if (is_update_1m) {
266 return 0;
267 }
268 }
269
270 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
271 is_coded_phy) {
272 err = duration_period_update(scan_coded, is_update_coded);
273 if (err) {
274 return err;
275 } else if (is_update_coded) {
276 return 0;
277 }
278 }
279
280 #if defined(CONFIG_BT_CTLR_PHY_CODED)
281 if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
282 #endif /* CONFIG_BT_CTLR_PHY_CODED */
283 #endif /* CONFIG_BT_CTLR_ADV_EXT */
284 {
285 err = ull_scan_enable(scan);
286 if (err) {
287 return err;
288 }
289 }
290
291 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
292 IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
293 is_coded_phy) {
294 err = ull_scan_enable(scan_coded);
295 if (err) {
296 return err;
297 }
298 }
299
300 return 0;
301 }
302
303 int ull_scan_init(void)
304 {
305 int err;
306
307 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
308 err = ull_scan_aux_init();
309 if (err) {
310 return err;
311 }
312 }
313
314 err = init_reset();
315 if (err) {
316 return err;
317 }
318
319 return 0;
320 }
321
322 int ull_scan_reset(void)
323 {
324 uint8_t handle;
325 int err;
326
327 for (handle = 0U; handle < BT_CTLR_SCAN_SET; handle++) {
328 (void)disable(handle);
329
330 #if defined(CONFIG_BT_CTLR_ADV_EXT)
331 /* Initialize PHY value to 0 to not start scanning on the scan
332 * instance if an explicit ll_scan_params_set() has not been
333 * invoked from HCI to enable scanning on that PHY.
334 */
335 ll_scan[handle].lll.phy = 0U;
336 #endif /* CONFIG_BT_CTLR_ADV_EXT */
337 }
338
339 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
340 err = ull_scan_aux_reset();
341 if (err) {
342 return err;
343 }
344 }
345
346 err = init_reset();
347 if (err) {
348 return err;
349 }
350
351 return 0;
352 }
353
354 uint32_t ull_scan_params_set(struct lll_scan *lll, uint8_t type,
355 uint16_t interval, uint16_t window,
356 uint8_t filter_policy)
357 {
358 /* type value:
359 * 0000b - legacy 1M passive
360 * 0001b - legacy 1M active
361 * 0010b - Ext. 1M passive
362 * 0011b - Ext. 1M active
363 * 0100b - invalid
364 * 0101b - invalid
365 * 0110b - invalid
366 * 0111b - invalid
367 * 1000b - Ext. Coded passive
368 * 1001b - Ext. Coded active
369 */
370 lll->type = type;
371 lll->filter_policy = filter_policy;
372 lll->interval = interval;
373 lll->ticks_window = HAL_TICKER_US_TO_TICKS((uint64_t)window *
374 SCAN_INT_UNIT_US);
375
376 return lll->ticks_window;
377 }
378
379 uint8_t ull_scan_enable(struct ll_scan_set *scan)
380 {
381 uint32_t ticks_slot_overhead;
382 uint32_t volatile ret_cb;
383 uint32_t ticks_interval;
384 uint32_t ticks_anchor;
385 uint32_t ticks_offset;
386 struct lll_scan *lll;
387 uint8_t handle;
388 uint32_t ret;
389
390 #if defined(CONFIG_BT_CTLR_ADV_EXT)
391 /* Initialize extend scan stop request */
392 scan->is_stop = 0U;
393 #endif /* CONFIG_BT_CTLR_ADV_EXT */
394
395 /* Initialize LLL scan context */
396 lll = &scan->lll;
397 lll->init_addr_type = scan->own_addr_type;
398 (void)ll_addr_read(lll->init_addr_type, lll->init_addr);
399 lll->chan = 0U;
400 lll->is_stop = 0U;
401
402 ull_hdr_init(&scan->ull);
403 lll_hdr_init(lll, scan);
404
405 ticks_interval = HAL_TICKER_US_TO_TICKS((uint64_t)lll->interval *
406 SCAN_INT_UNIT_US);
407
408 /* TODO: active_to_start feature port */
409 scan->ull.ticks_active_to_start = 0U;
410 scan->ull.ticks_prepare_to_start =
411 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
412 scan->ull.ticks_preempt_to_start =
413 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
414
415 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
416 ticks_slot_overhead = MAX(scan->ull.ticks_active_to_start,
417 scan->ull.ticks_prepare_to_start);
418 } else {
419 ticks_slot_overhead = 0U;
420 }
421
422 handle = ull_scan_handle_get(scan);
423
424 lll->ticks_window = scan->ticks_window;
425 if ((lll->ticks_window +
426 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) <
427 (ticks_interval - ticks_slot_overhead)) {
428 scan->ull.ticks_slot =
429 (lll->ticks_window +
430 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US));
431
432 #if defined(CONFIG_BT_TICKER_EXT)
433 ll_scan_ticker_ext[handle].ticks_slot_window =
434 scan->ull.ticks_slot + ticks_slot_overhead;
435 #endif /* CONFIG_BT_TICKER_EXT */
436
437 } else {
438 if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
439 scan->ull.ticks_slot = 0U;
440 } else {
441 scan->ull.ticks_slot = ticks_interval -
442 ticks_slot_overhead;
443 }
444
445 lll->ticks_window = 0U;
446
447 #if defined(CONFIG_BT_TICKER_EXT)
448 ll_scan_ticker_ext[handle].ticks_slot_window = ticks_interval;
449 #endif /* CONFIG_BT_TICKER_EXT */
450 }
451
452 if (false) {
453
454 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
455 } else if (handle == SCAN_HANDLE_1M) {
456 const struct ll_scan_set *scan_coded;
457
458 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
459 if (IS_PHY_ENABLED(scan_coded, PHY_CODED) &&
460 (lll->ticks_window != 0U)) {
461 const struct lll_scan *lll_coded;
462 uint32_t ticks_interval_coded;
463 uint32_t ticks_window_sum_min;
464 uint32_t ticks_window_sum_max;
465
466 lll_coded = &scan_coded->lll;
467 ticks_interval_coded = HAL_TICKER_US_TO_TICKS(
468 (uint64_t)lll_coded->interval *
469 SCAN_INT_UNIT_US);
470 ticks_window_sum_min = lll->ticks_window +
471 lll_coded->ticks_window;
472 ticks_window_sum_max = ticks_window_sum_min +
473 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US << 1);
474 /* Check if 1M and Coded PHY scanning use same interval
475 * and the sum of the scan window duration equals their
476 * interval then use continuous scanning and avoid time
477 * reservation from overlapping.
478 */
479 if ((ticks_interval == ticks_interval_coded) &&
480 IN_RANGE(ticks_interval, ticks_window_sum_min,
481 ticks_window_sum_max)) {
482 if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
483 scan->ull.ticks_slot = 0U;
484 } else {
485 scan->ull.ticks_slot =
486 lll->ticks_window -
487 ticks_slot_overhead -
488 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US) -
489 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US);
490 }
491
492 /* Continuous scanning, no scan window stop
493 * ticker to be started but we will zero the
494 * ticks_window value when coded PHY scan is
495 * enabled (the next following else clause).
496 * Due to this the first scan window will have
497 * the stop ticker started but consecutive
498 * scan window will not have the stop ticker
499 * started once coded PHY scan window has been
500 * enabled.
501 */
502 }
503 }
504
505 /* 1M scan window starts without any offset */
506 ticks_offset = 0U;
507
508 } else if (handle == SCAN_HANDLE_PHY_CODED) {
509 struct ll_scan_set *scan_1m;
510
511 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
512 if (IS_PHY_ENABLED(scan_1m, PHY_1M) &&
513 (lll->ticks_window != 0U)) {
514 uint32_t ticks_window_sum_min;
515 uint32_t ticks_window_sum_max;
516 uint32_t ticks_interval_1m;
517 struct lll_scan *lll_1m;
518
519 lll_1m = &scan_1m->lll;
520 ticks_interval_1m = HAL_TICKER_US_TO_TICKS(
521 (uint64_t)lll_1m->interval *
522 SCAN_INT_UNIT_US);
523 ticks_window_sum_min = lll->ticks_window +
524 lll_1m->ticks_window;
525 ticks_window_sum_max = ticks_window_sum_min +
526 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US << 1);
527 /* Check if 1M and Coded PHY scanning use same interval
528 * and the sum of the scan window duration equals their
529 * interval then use continuous scanning and avoid time
530 * reservation from overlapping.
531 */
532 if ((ticks_interval == ticks_interval_1m) &&
533 IN_RANGE(ticks_interval, ticks_window_sum_min,
534 ticks_window_sum_max)) {
535 if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
536 scan->ull.ticks_slot = 0U;
537 } else {
538 scan->ull.ticks_slot =
539 lll->ticks_window -
540 ticks_slot_overhead -
541 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US) -
542 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US);
543 }
544 /* Offset the coded PHY scan window, place
545 * after 1M scan window.
546 * Have some margin for jitter due to ticker
547 * resolution.
548 */
549 ticks_offset = lll_1m->ticks_window;
550 ticks_offset += HAL_TICKER_US_TO_TICKS(
551 EVENT_TICKER_RES_MARGIN_US << 1);
552
553 /* Continuous scanning, no scan window stop
554 * ticker started for both 1M and coded PHY.
555 */
556 lll->ticks_window = 0U;
557 lll_1m->ticks_window = 0U;
558
559 } else {
560 ticks_offset = 0U;
561 }
562 } else {
563 ticks_offset = 0U;
564 }
565 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
566
567 } else {
568 ticks_offset = 0U;
569 }
570
571 ticks_anchor = ticker_ticks_now_get();
572 ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
573
574 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
575 if (!lll->conn) {
576 uint32_t ticks_ref = 0U;
577 uint32_t offset_us = 0U;
578 int err;
579
580 err = ull_sched_after_cen_slot_get(TICKER_USER_ID_THREAD,
581 (scan->ull.ticks_slot +
582 ticks_slot_overhead),
583 &ticks_ref, &offset_us);
584
585 /* Use the ticks_ref as scanner's anchor if a free time space
586 * after any central role is available (indicated by a non-zero
587 * offset_us value).
588 */
589 if (!err) {
590 ticks_anchor = ticks_ref +
591 HAL_TICKER_US_TO_TICKS(offset_us);
592 }
593 }
594 #endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */
595
596 ret_cb = TICKER_STATUS_BUSY;
597
598 #if defined(CONFIG_BT_TICKER_EXT)
599 ret = ticker_start_ext(
600 #else
601 ret = ticker_start(
602 #endif /* CONFIG_BT_TICKER_EXT */
603 TICKER_INSTANCE_ID_CTLR,
604 TICKER_USER_ID_THREAD, TICKER_ID_SCAN_BASE + handle,
605 (ticks_anchor + ticks_offset), 0, ticks_interval,
606 HAL_TICKER_REMAINDER((uint64_t)lll->interval *
607 SCAN_INT_UNIT_US),
608 TICKER_NULL_LAZY,
609 (scan->ull.ticks_slot + ticks_slot_overhead),
610 ticker_cb, scan,
611 ull_ticker_status_give, (void *)&ret_cb
612 #if defined(CONFIG_BT_TICKER_EXT)
613 ,
614 &ll_scan_ticker_ext[handle]
615 #endif /* CONFIG_BT_TICKER_EXT */
616 );
617 ret = ull_ticker_status_take(ret, &ret_cb);
618 if (ret != TICKER_STATUS_SUCCESS) {
619 return BT_HCI_ERR_CMD_DISALLOWED;
620 }
621
622 scan->is_enabled = 1U;
623
624 #if defined(CONFIG_BT_CTLR_PRIVACY)
625 #if defined(CONFIG_BT_BROADCASTER)
626 if (!ull_adv_is_enabled_get(0))
627 #endif
628 {
629 ull_filter_adv_scan_state_cb(BIT(1));
630 }
631 #endif
632
633 return 0;
634 }
635
636 uint8_t ull_scan_disable(uint8_t handle, struct ll_scan_set *scan)
637 {
638 int err;
639
640 #if defined(CONFIG_BT_CTLR_ADV_EXT)
641 /* Request Extended Scan stop */
642 scan->is_stop = 1U;
643 cpu_dmb();
644 #endif /* CONFIG_BT_CTLR_ADV_EXT */
645
646 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_BASE + handle,
647 scan, &scan->lll);
648 LL_ASSERT(err == 0 || err == -EALREADY);
649 if (err) {
650 return BT_HCI_ERR_CMD_DISALLOWED;
651 }
652
653 #if defined(CONFIG_BT_CTLR_ADV_EXT)
654 /* Find and stop associated auxiliary scan contexts */
655 for (uint8_t aux_handle = 0; aux_handle < CONFIG_BT_CTLR_SCAN_AUX_SET;
656 aux_handle++) {
657 struct lll_scan_aux *aux_scan_lll;
658 struct ll_scan_set *aux_scan;
659 struct ll_scan_aux_set *aux;
660
661 aux = ull_scan_aux_set_get(aux_handle);
662 aux_scan_lll = aux->parent;
663 if (!aux_scan_lll) {
664 continue;
665 }
666
667 aux_scan = HDR_LLL2ULL(aux_scan_lll);
668 if (aux_scan == scan) {
669 void *parent;
670
671 err = ull_scan_aux_stop(aux);
672 if (err && (err != -EALREADY)) {
673 return BT_HCI_ERR_CMD_DISALLOWED;
674 }
675
676 /* Use a local variable to assert on auxiliary context's
677 * release.
678 * Under race condition a released aux context can be
679 * allocated for reception of chain PDU of a periodic
680 * sync role.
681 */
682 parent = aux->parent;
683 LL_ASSERT(!parent || (parent != aux_scan_lll));
684 }
685 }
686 #endif /* CONFIG_BT_CTLR_ADV_EXT */
687
688 return 0;
689 }
690
691 #if defined(CONFIG_BT_CTLR_ADV_EXT)
692 void ull_scan_done(struct node_rx_event_done *done)
693 {
694 struct node_rx_hdr *rx_hdr;
695 struct ll_scan_set *scan;
696 struct lll_scan *lll;
697 uint8_t handle;
698 uint32_t ret;
699
700 /* Get reference to ULL context */
701 scan = CONTAINER_OF(done->param, struct ll_scan_set, ull);
702 lll = &scan->lll;
703
704 if (likely(scan->duration_lazy || !lll->duration_reload ||
705 lll->duration_expire)) {
706 return;
707 }
708
709 /* Prevent duplicate terminate event generation */
710 lll->duration_reload = 0U;
711
712 handle = ull_scan_handle_get(scan);
713 LL_ASSERT(handle < BT_CTLR_SCAN_SET);
714
715 #if defined(CONFIG_BT_CTLR_PHY_CODED)
716 /* Prevent duplicate terminate event if ull_scan_done get called by
717 * the other scan instance.
718 */
719 struct ll_scan_set *scan_other;
720
721 if (handle == SCAN_HANDLE_1M) {
722 scan_other = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
723 } else {
724 scan_other = ull_scan_set_get(SCAN_HANDLE_1M);
725 }
726 scan_other->lll.duration_reload = 0U;
727 #endif /* CONFIG_BT_CTLR_PHY_CODED */
728
729 rx_hdr = (void *)scan->node_rx_scan_term;
730 rx_hdr->type = NODE_RX_TYPE_EXT_SCAN_TERMINATE;
731 rx_hdr->handle = handle;
732
733 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
734 (TICKER_ID_SCAN_BASE + handle), ticker_stop_ext_op_cb,
735 scan);
736
737 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
738 (ret == TICKER_STATUS_BUSY));
739 }
740
741 void ull_scan_term_dequeue(uint8_t handle)
742 {
743 struct ll_scan_set *scan;
744
745 scan = ull_scan_set_get(handle);
746 LL_ASSERT(scan);
747
748 scan->is_enabled = 0U;
749
750 #if defined(CONFIG_BT_CTLR_PHY_CODED)
751 if (handle == SCAN_HANDLE_1M) {
752 struct ll_scan_set *scan_coded;
753
754 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
755 if (IS_PHY_ENABLED(scan_coded, PHY_CODED)) {
756 uint8_t err;
757
758 err = disable(SCAN_HANDLE_PHY_CODED);
759 LL_ASSERT(!err);
760 }
761 } else {
762 struct ll_scan_set *scan_1m;
763
764 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
765 if (IS_PHY_ENABLED(scan_1m, PHY_1M)) {
766 uint8_t err;
767
768 err = disable(SCAN_HANDLE_1M);
769 LL_ASSERT(!err);
770 }
771 }
772 #endif /* CONFIG_BT_CTLR_PHY_CODED */
773 }
774 #endif /* CONFIG_BT_CTLR_ADV_EXT */
775
776 struct ll_scan_set *ull_scan_set_get(uint8_t handle)
777 {
778 if (handle >= BT_CTLR_SCAN_SET) {
779 return NULL;
780 }
781
782 return &ll_scan[handle];
783 }
784
785 uint8_t ull_scan_handle_get(struct ll_scan_set *scan)
786 {
787 return ((uint8_t *)scan - (uint8_t *)ll_scan) / sizeof(*scan);
788 }
789
790 uint8_t ull_scan_lll_handle_get(struct lll_scan *lll)
791 {
792 return ull_scan_handle_get((void *)lll->hdr.parent);
793 }
794
795 struct ll_scan_set *ull_scan_is_valid_get(struct ll_scan_set *scan)
796 {
797 if (((uint8_t *)scan < (uint8_t *)ll_scan) ||
798 ((uint8_t *)scan > ((uint8_t *)ll_scan +
799 (sizeof(struct ll_scan_set) *
800 (BT_CTLR_SCAN_SET - 1))))) {
801 return NULL;
802 }
803
804 return scan;
805 }
806
807 struct lll_scan *ull_scan_lll_is_valid_get(struct lll_scan *lll)
808 {
809 struct ll_scan_set *scan;
810
811 scan = HDR_LLL2ULL(lll);
812 scan = ull_scan_is_valid_get(scan);
813 if (scan) {
814 return &scan->lll;
815 }
816
817 return NULL;
818 }
819
820 struct ll_scan_set *ull_scan_is_enabled_get(uint8_t handle)
821 {
822 struct ll_scan_set *scan;
823
824 scan = ull_scan_set_get(handle);
825 if (!scan || !scan->is_enabled) {
826 return NULL;
827 }
828
829 return scan;
830 }
831
832 struct ll_scan_set *ull_scan_is_disabled_get(uint8_t handle)
833 {
834 struct ll_scan_set *scan;
835
836 scan = ull_scan_set_get(handle);
837 if (!scan || scan->is_enabled) {
838 return NULL;
839 }
840
841 return scan;
842 }
843
844 uint32_t ull_scan_is_enabled(uint8_t handle)
845 {
846 struct ll_scan_set *scan;
847
848 scan = ull_scan_is_enabled_get(handle);
849 if (!scan) {
850 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
851 scan = ull_scan_set_get(handle);
852
853 return scan->periodic.sync ? ULL_SCAN_IS_SYNC : 0U;
854 #else
855 return 0U;
856 #endif
857 }
858
859 return (((uint32_t)scan->is_enabled << scan->lll.type) |
860 #if defined(CONFIG_BT_CENTRAL)
861 (scan->lll.conn ? ULL_SCAN_IS_INITIATOR : 0U) |
862 #endif
863 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
864 (scan->periodic.sync ? ULL_SCAN_IS_SYNC : 0U) |
865 #endif
866 0U);
867 }
868
869 uint32_t ull_scan_filter_pol_get(uint8_t handle)
870 {
871 struct ll_scan_set *scan;
872
873 scan = ull_scan_is_enabled_get(handle);
874 if (!scan) {
875 return 0;
876 }
877
878 return scan->lll.filter_policy;
879 }
880
881 static int init_reset(void)
882 {
883 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
884 !defined(CONFIG_BT_CTLR_ADV_EXT)
885 ll_scan[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
886 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
887
888 return 0;
889 }
890
891 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
892 uint32_t remainder, uint16_t lazy, uint8_t force,
893 void *param)
894 {
895 static memq_link_t link;
896 static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_prepare};
897 static struct lll_prepare_param p;
898 struct ll_scan_set *scan;
899 struct lll_scan *lll;
900 uint32_t ret;
901 uint8_t ref;
902
903 DEBUG_RADIO_PREPARE_O(1);
904
905 scan = param;
906 lll = &scan->lll;
907
908 /* Increment prepare reference count */
909 ref = ull_ref_inc(&scan->ull);
910 LL_ASSERT(ref);
911
912 /* Append timing parameters */
913 p.ticks_at_expire = ticks_at_expire;
914 p.remainder = remainder;
915 p.lazy = lazy;
916 p.param = lll;
917 p.force = force;
918 mfy.param = &p;
919
920 /* Kick LLL prepare */
921 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
922 0, &mfy);
923 LL_ASSERT(!ret);
924
925 #if defined(CONFIG_BT_CTLR_ADV_EXT)
926 if (lll->duration_expire) {
927 uint16_t elapsed;
928
929 elapsed = lazy + 1;
930 if (lll->duration_expire > elapsed) {
931 lll->duration_expire -= elapsed;
932 } else {
933 if (scan->duration_lazy) {
934 uint8_t handle;
935 uint16_t duration_lazy;
936
937 duration_lazy = lll->duration_expire +
938 scan->duration_lazy - elapsed;
939
940 handle = ull_scan_handle_get(scan);
941 LL_ASSERT(handle < BT_CTLR_SCAN_SET);
942
943 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
944 TICKER_USER_ID_ULL_HIGH,
945 (TICKER_ID_SCAN_BASE +
946 handle), 0, 0, 0, 0,
947 duration_lazy, 0,
948 NULL, NULL);
949 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
950 (ret == TICKER_STATUS_BUSY));
951 }
952
953 lll->duration_expire = 0U;
954 }
955 } else if (lll->duration_reload && lazy) {
956 uint8_t handle;
957
958 handle = ull_scan_handle_get(scan);
959 LL_ASSERT(handle < BT_CTLR_SCAN_SET);
960
961 lll->duration_expire = lll->duration_reload;
962 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
963 TICKER_USER_ID_ULL_HIGH,
964 (TICKER_ID_SCAN_BASE + handle),
965 0, 0, 0, 0, 1, 1, NULL, NULL);
966 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
967 (ret == TICKER_STATUS_BUSY));
968 }
969 #endif /* CONFIG_BT_CTLR_ADV_EXT */
970
971 DEBUG_RADIO_PREPARE_O(1);
972 }
973
974 #if defined(CONFIG_BT_CTLR_ADV_EXT)
975 static uint8_t is_scan_update(uint8_t handle, uint16_t duration,
976 uint16_t period, struct ll_scan_set **scan,
977 struct node_rx_pdu **node_rx_scan_term)
978 {
979 *scan = ull_scan_set_get(handle);
980 *node_rx_scan_term = (void *)(*scan)->node_rx_scan_term;
981 return duration && period && (*scan)->lll.duration_reload &&
982 (*scan)->duration_lazy;
983 }
984
985 static uint8_t duration_period_setup(struct ll_scan_set *scan,
986 uint16_t duration, uint16_t period,
987 struct node_rx_pdu **node_rx_scan_term)
988 {
989 struct lll_scan *lll;
990
991 lll = &scan->lll;
992 if (duration) {
993 lll->duration_reload =
994 ULL_SCAN_DURATION_TO_EVENTS(duration,
995 scan->lll.interval);
996 if (period) {
997 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
998 (duration >= ULL_SCAN_PERIOD_TO_DURATION(period))) {
999 return BT_HCI_ERR_INVALID_PARAM;
1000 }
1001
1002 scan->duration_lazy =
1003 ULL_SCAN_PERIOD_TO_EVENTS(period,
1004 scan->lll.interval);
1005 scan->duration_lazy -= lll->duration_reload;
1006 scan->node_rx_scan_term = NULL;
1007 } else {
1008 struct node_rx_pdu *node_rx;
1009 void *link_scan_term;
1010
1011 scan->duration_lazy = 0U;
1012
1013 if (*node_rx_scan_term) {
1014 scan->node_rx_scan_term =
1015 (void *)*node_rx_scan_term;
1016
1017 return 0;
1018 }
1019
1020 /* The alloc here used for ext scan termination event */
1021 link_scan_term = ll_rx_link_alloc();
1022 if (!link_scan_term) {
1023 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1024 }
1025
1026 node_rx = ll_rx_alloc();
1027 if (!node_rx) {
1028 ll_rx_link_release(link_scan_term);
1029
1030 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1031 }
1032
1033 node_rx->hdr.link = (void *)link_scan_term;
1034 scan->node_rx_scan_term = (void *)node_rx;
1035 *node_rx_scan_term = node_rx;
1036 }
1037 } else {
1038 lll->duration_reload = 0U;
1039 scan->duration_lazy = 0U;
1040 scan->node_rx_scan_term = NULL;
1041 }
1042
1043 return 0;
1044 }
1045
1046 static uint8_t duration_period_update(struct ll_scan_set *scan,
1047 uint8_t is_update)
1048 {
1049 if (is_update) {
1050 uint32_t volatile ret_cb;
1051 uint32_t ret;
1052
1053 scan->lll.duration_expire = 0U;
1054
1055 ret_cb = TICKER_STATUS_BUSY;
1056 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1057 TICKER_USER_ID_THREAD,
1058 (TICKER_ID_SCAN_BASE +
1059 ull_scan_handle_get(scan)),
1060 0, 0, 0, 0, 1, 1,
1061 ull_ticker_status_give, (void *)&ret_cb);
1062 ret = ull_ticker_status_take(ret, &ret_cb);
1063 if (ret != TICKER_STATUS_SUCCESS) {
1064 return BT_HCI_ERR_CMD_DISALLOWED;
1065 }
1066
1067 return 0;
1068 } else {
1069 scan->lll.duration_expire = scan->lll.duration_reload;
1070 }
1071
1072 return 0;
1073 }
1074
1075 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
1076 {
1077 static memq_link_t link;
1078 static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
1079 uint32_t ret;
1080
1081 /* Ignore if race between thread and ULL */
1082 if (status != TICKER_STATUS_SUCCESS) {
1083 /* TODO: detect race */
1084
1085 return;
1086 }
1087
1088 /* Check if any pending LLL events that need to be aborted */
1089 mfy.param = param;
1090 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1091 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1092 LL_ASSERT(!ret);
1093 }
1094
1095 static void ext_disable(void *param)
1096 {
1097 struct ll_scan_set *scan;
1098 struct ull_hdr *hdr;
1099
1100 /* Check ref count to determine if any pending LLL events in pipeline */
1101 scan = param;
1102 hdr = &scan->ull;
1103 if (ull_ref_get(hdr)) {
1104 static memq_link_t link;
1105 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1106 uint32_t ret;
1107
1108 mfy.param = &scan->lll;
1109
1110 /* Setup disabled callback to be called when ref count
1111 * returns to zero.
1112 */
1113 LL_ASSERT(!hdr->disabled_cb);
1114 hdr->disabled_param = mfy.param;
1115 hdr->disabled_cb = ext_disabled_cb;
1116
1117 /* Trigger LLL disable */
1118 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1119 TICKER_USER_ID_LLL, 0, &mfy);
1120 LL_ASSERT(!ret);
1121 } else {
1122 /* No pending LLL events */
1123 ext_disabled_cb(&scan->lll);
1124 }
1125 }
1126
1127 static void ext_disabled_cb(void *param)
1128 {
1129 struct node_rx_hdr *rx_hdr;
1130 struct ll_scan_set *scan;
1131 struct lll_scan *lll;
1132
1133 /* Under race condition, if a connection has been established then
1134 * node_rx is already utilized to send terminate event on connection
1135 */
1136 lll = (void *)param;
1137 scan = HDR_LLL2ULL(lll);
1138 rx_hdr = (void *)scan->node_rx_scan_term;
1139 if (!rx_hdr) {
1140 return;
1141 }
1142
1143 /* NOTE: parameters are already populated on disable,
1144 * just enqueue here
1145 */
1146 ll_rx_put_sched(rx_hdr->link, rx_hdr);
1147 }
1148 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1149
1150 static uint8_t disable(uint8_t handle)
1151 {
1152 struct ll_scan_set *scan;
1153 uint8_t ret;
1154
1155 scan = ull_scan_is_enabled_get(handle);
1156 if (!scan) {
1157 return BT_HCI_ERR_CMD_DISALLOWED;
1158 }
1159
1160 #if defined(CONFIG_BT_CENTRAL)
1161 if (scan->lll.conn) {
1162 return BT_HCI_ERR_CMD_DISALLOWED;
1163 }
1164 #endif
1165
1166 ret = ull_scan_disable(handle, scan);
1167 if (ret) {
1168 return ret;
1169 }
1170
1171 scan->is_enabled = 0U;
1172
1173 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1174 if (scan->node_rx_scan_term) {
1175 struct node_rx_pdu *node_rx_scan_term =
1176 (void *)scan->node_rx_scan_term;
1177
1178 scan->node_rx_scan_term = NULL;
1179
1180 ll_rx_link_release(node_rx_scan_term->hdr.link);
1181 ll_rx_release(node_rx_scan_term);
1182 }
1183 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1184
1185 #if defined(CONFIG_BT_CTLR_PRIVACY)
1186 #if defined(CONFIG_BT_BROADCASTER)
1187 if (!ull_adv_is_enabled_get(0))
1188 #endif
1189 {
1190 ull_filter_adv_scan_state_cb(0);
1191 }
1192 #endif
1193
1194 return 0;
1195 }
1196