1 /*
2 * Copyright (c) 2016-2019 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/radio.h"
15 #include "hal/ticker.h"
16
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22
23 #include "ticker/ticker.h"
24
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28
29 #include "lll.h"
30 #include "lll/lll_vendor.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_scan.h"
35 #include "lll/lll_df_types.h"
36 #include "lll_conn.h"
37 #include "lll_filter.h"
38
39 #include "ll_sw/ull_tx_queue.h"
40
41 #include "ull_adv_types.h"
42 #include "ull_filter.h"
43
44 #include "ull_conn_types.h"
45 #include "ull_internal.h"
46 #include "ull_adv_internal.h"
47 #include "ull_scan_types.h"
48 #include "ull_scan_internal.h"
49 #include "ull_sched_internal.h"
50
51 #include "ll.h"
52
53 #include "hal/debug.h"
54
55 static int init_reset(void);
56 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
57 uint32_t remainder, uint16_t lazy, uint8_t force,
58 void *param);
59 static uint8_t disable(uint8_t handle);
60
61 #if defined(CONFIG_BT_CTLR_ADV_EXT)
62 #define IS_PHY_ENABLED(scan_ctx, scan_phy) ((scan_ctx)->lll.phy & (scan_phy))
63
64 static uint8_t is_scan_update(uint8_t handle, uint16_t duration,
65 uint16_t period, struct ll_scan_set **scan,
66 struct node_rx_pdu **node_rx_scan_term);
67 static uint8_t duration_period_setup(struct ll_scan_set *scan,
68 uint16_t duration, uint16_t period,
69 struct node_rx_pdu **node_rx_scan_term);
70 static uint8_t duration_period_update(struct ll_scan_set *scan,
71 uint8_t is_update);
72 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
73 static void ext_disable(void *param);
74 static void ext_disabled_cb(void *param);
75 #endif /* CONFIG_BT_CTLR_ADV_EXT */
76
77 static struct ll_scan_set ll_scan[BT_CTLR_SCAN_SET];
78
79 #if defined(CONFIG_BT_TICKER_EXT)
80 static struct ticker_ext ll_scan_ticker_ext[BT_CTLR_SCAN_SET];
81 #endif /* CONFIG_BT_TICKER_EXT */
82
ll_scan_params_set(uint8_t type,uint16_t interval,uint16_t window,uint8_t own_addr_type,uint8_t filter_policy)83 uint8_t ll_scan_params_set(uint8_t type, uint16_t interval, uint16_t window,
84 uint8_t own_addr_type, uint8_t filter_policy)
85 {
86 struct ll_scan_set *scan;
87 struct lll_scan *lll;
88
89 scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
90 if (!scan) {
91 return BT_HCI_ERR_CMD_DISALLOWED;
92 }
93
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 uint8_t phy;
96
97 phy = type >> 1;
98 if (phy & BT_HCI_LE_EXT_SCAN_PHY_CODED) {
99 struct ll_scan_set *scan_coded;
100
101 if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
102 return BT_HCI_ERR_CMD_DISALLOWED;
103 }
104
105 scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
106 if (!scan_coded) {
107 return BT_HCI_ERR_CMD_DISALLOWED;
108 }
109
110 scan = scan_coded;
111 }
112
113 lll = &scan->lll;
114
115 /* NOTE: Pass invalid interval value to not start scanning using this
116 * scan instance.
117 */
118 if (!interval) {
119 /* Set PHY to 0 to not start scanning on this instance */
120 lll->phy = 0U;
121
122 return 0;
123 }
124
125 /* If phy assigned is PHY_1M or PHY_CODED, then scanning on that
126 * PHY is enabled.
127 */
128 lll->phy = phy;
129
130 #else /* !CONFIG_BT_CTLR_ADV_EXT */
131 lll = &scan->lll;
132 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
133
134 scan->own_addr_type = own_addr_type;
135
136 scan->ticks_window = ull_scan_params_set(lll, type, interval, window,
137 filter_policy);
138
139 return 0;
140 }
141
142 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_scan_enable(uint8_t enable,uint16_t duration,uint16_t period)143 uint8_t ll_scan_enable(uint8_t enable, uint16_t duration, uint16_t period)
144 {
145 struct node_rx_pdu *node_rx_scan_term = NULL;
146 uint8_t is_update_coded = 0U;
147 uint8_t is_update_1m = 0U;
148 #else /* !CONFIG_BT_CTLR_ADV_EXT */
149 uint8_t ll_scan_enable(uint8_t enable)
150 {
151 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
152 struct ll_scan_set *scan_coded = NULL;
153 uint8_t own_addr_type = 0U;
154 uint8_t is_coded_phy = 0U;
155 struct ll_scan_set *scan;
156 uint8_t err;
157
158 if (!enable) {
159 err = disable(SCAN_HANDLE_1M);
160
161 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
162 IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
163 uint8_t err_coded;
164
165 err_coded = disable(SCAN_HANDLE_PHY_CODED);
166 if (!err_coded) {
167 err = 0U;
168 }
169 }
170
171 return err;
172 }
173
174 scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
175 if (!scan) {
176 #if defined(CONFIG_BT_CTLR_ADV_EXT)
177 is_update_1m = is_scan_update(SCAN_HANDLE_1M, duration, period,
178 &scan, &node_rx_scan_term);
179 if (!is_update_1m)
180 #endif /* CONFIG_BT_CTLR_ADV_EXT */
181 {
182 return BT_HCI_ERR_CMD_DISALLOWED;
183 }
184 }
185
186 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
187 scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
188 if (!scan_coded) {
189 is_update_coded = is_scan_update(SCAN_HANDLE_PHY_CODED,
190 duration, period, &scan_coded,
191 &node_rx_scan_term);
192 if (!is_update_coded) {
193 return BT_HCI_ERR_CMD_DISALLOWED;
194 }
195 }
196
197 own_addr_type = scan_coded->own_addr_type;
198 is_coded_phy = (scan_coded->lll.phy &
199 BT_HCI_LE_EXT_SCAN_PHY_CODED);
200 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
201
202 if ((is_coded_phy && (own_addr_type & 0x1)) ||
203 (!is_coded_phy && (scan->own_addr_type & 0x1))) {
204 if (!mem_nz(ll_addr_get(BT_ADDR_LE_RANDOM), BDADDR_SIZE)) {
205 return BT_HCI_ERR_INVALID_PARAM;
206 }
207 }
208
209 #if defined(CONFIG_BT_CTLR_ADV_EXT)
210 #if defined(CONFIG_BT_CTLR_PHY_CODED)
211 if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
212 #endif /* CONFIG_BT_CTLR_PHY_CODED */
213 {
214 err = duration_period_setup(scan, duration, period,
215 &node_rx_scan_term);
216 if (err) {
217 return err;
218 }
219 }
220
221 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
222 is_coded_phy) {
223 err = duration_period_setup(scan_coded, duration, period,
224 &node_rx_scan_term);
225 if (err) {
226 return err;
227 }
228 }
229 #endif /* CONFIG_BT_CTLR_ADV_EXT */
230
231 #if (defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_JIT_SCHEDULING)) || \
232 defined(CONFIG_BT_CTLR_PRIVACY)
233 struct lll_scan *lll;
234
235 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) && is_coded_phy) {
236 lll = &scan_coded->lll;
237
238 /* TODO: Privacy support in Advertising Extensions */
239 } else {
240 lll = &scan->lll;
241 own_addr_type = scan->own_addr_type;
242 }
243
244 #if defined(CONFIG_BT_CTLR_PRIVACY)
245 ull_filter_scan_update(lll->filter_policy);
246
247 lll->rl_idx = FILTER_IDX_NONE;
248 lll->rpa_gen = 0;
249
250 if ((lll->type & 0x1) && (own_addr_type == BT_HCI_OWN_ADDR_RPA_OR_PUBLIC ||
251 own_addr_type == BT_HCI_OWN_ADDR_RPA_OR_RANDOM)) {
252 /* Generate RPAs if required */
253 ull_filter_rpa_update(false);
254 lll->rpa_gen = 1;
255 }
256 #endif /* CONFIG_BT_CTLR_PRIVACY */
257
258 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
259 lll->scan_aux_score = 0;
260 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_JIT_SCHEDULING */
261 #endif /* (CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_JIT_SCHEDULING) || CONFIG_BT_CTLR_PRIVACY */
262
263 #if defined(CONFIG_BT_CTLR_ADV_EXT)
264 #if defined(CONFIG_BT_CTLR_PHY_CODED)
265 if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
266 #endif /* CONFIG_BT_CTLR_PHY_CODED */
267 {
268 err = duration_period_update(scan, is_update_1m);
269 if (err) {
270 return err;
271 } else if (is_update_1m) {
272 return 0;
273 }
274 }
275
276 if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
277 is_coded_phy) {
278 err = duration_period_update(scan_coded, is_update_coded);
279 if (err) {
280 return err;
281 } else if (is_update_coded) {
282 return 0;
283 }
284 }
285
286 #if defined(CONFIG_BT_CTLR_PHY_CODED)
287 if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
288 #endif /* CONFIG_BT_CTLR_PHY_CODED */
289 #endif /* CONFIG_BT_CTLR_ADV_EXT */
290 {
291 err = ull_scan_enable(scan);
292 if (err) {
293 return err;
294 }
295 }
296
297 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
298 IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
299 is_coded_phy) {
300 err = ull_scan_enable(scan_coded);
301 if (err) {
302 return err;
303 }
304 }
305
306 return 0;
307 }
308
309 int ull_scan_init(void)
310 {
311 int err;
312
313 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
314 err = ull_scan_aux_init();
315 if (err) {
316 return err;
317 }
318 }
319
320 err = init_reset();
321 if (err) {
322 return err;
323 }
324
325 return 0;
326 }
327
328 int ull_scan_reset(void)
329 {
330 uint8_t handle;
331 int err;
332
333 for (handle = 0U; handle < BT_CTLR_SCAN_SET; handle++) {
334 (void)disable(handle);
335
336 #if defined(CONFIG_BT_CTLR_ADV_EXT)
337 /* Initialize PHY value to 0 to not start scanning on the scan
338 * instance if an explicit ll_scan_params_set() has not been
339 * invoked from HCI to enable scanning on that PHY.
340 */
341 ll_scan[handle].lll.phy = 0U;
342 #endif /* CONFIG_BT_CTLR_ADV_EXT */
343 }
344
345 if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
346 err = ull_scan_aux_reset();
347 if (err) {
348 return err;
349 }
350 }
351
352 err = init_reset();
353 if (err) {
354 return err;
355 }
356
357 return 0;
358 }
359
360 uint32_t ull_scan_params_set(struct lll_scan *lll, uint8_t type,
361 uint16_t interval, uint16_t window,
362 uint8_t filter_policy)
363 {
364 /* type value:
365 * 0000b - legacy 1M passive
366 * 0001b - legacy 1M active
367 * 0010b - Ext. 1M passive
368 * 0011b - Ext. 1M active
369 * 0100b - invalid
370 * 0101b - invalid
371 * 0110b - invalid
372 * 0111b - invalid
373 * 1000b - Ext. Coded passive
374 * 1001b - Ext. Coded active
375 */
376 lll->type = type;
377 lll->filter_policy = filter_policy;
378 lll->interval = interval;
379 lll->ticks_window = HAL_TICKER_US_TO_TICKS((uint64_t)window *
380 SCAN_INT_UNIT_US);
381
382 return lll->ticks_window;
383 }
384
385 uint8_t ull_scan_enable(struct ll_scan_set *scan)
386 {
387 uint32_t ticks_slot_overhead;
388 uint32_t volatile ret_cb;
389 uint32_t ticks_interval;
390 uint32_t ticks_anchor;
391 uint32_t ticks_offset;
392 struct lll_scan *lll;
393 uint8_t handle;
394 uint32_t ret;
395
396 #if defined(CONFIG_BT_CTLR_ADV_EXT)
397 /* Initialize extend scan stop request */
398 scan->is_stop = 0U;
399 #endif /* CONFIG_BT_CTLR_ADV_EXT */
400
401 /* Initialize LLL scan context */
402 lll = &scan->lll;
403 lll->init_addr_type = scan->own_addr_type;
404 (void)ll_addr_read(lll->init_addr_type, lll->init_addr);
405 lll->chan = 0U;
406 lll->is_stop = 0U;
407
408 ull_hdr_init(&scan->ull);
409 lll_hdr_init(lll, scan);
410
411 ticks_interval = HAL_TICKER_US_TO_TICKS((uint64_t)lll->interval *
412 SCAN_INT_UNIT_US);
413
414 /* TODO: active_to_start feature port */
415 scan->ull.ticks_active_to_start = 0U;
416 scan->ull.ticks_prepare_to_start =
417 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
418 scan->ull.ticks_preempt_to_start =
419 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
420
421 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
422 ticks_slot_overhead = MAX(scan->ull.ticks_active_to_start,
423 scan->ull.ticks_prepare_to_start);
424 } else {
425 ticks_slot_overhead = 0U;
426 }
427
428 handle = ull_scan_handle_get(scan);
429
430 lll->ticks_window = scan->ticks_window;
431 if ((lll->ticks_window +
432 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) <
433 (ticks_interval - ticks_slot_overhead)) {
434 scan->ull.ticks_slot =
435 (lll->ticks_window +
436 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US));
437
438 #if defined(CONFIG_BT_TICKER_EXT)
439 ll_scan_ticker_ext[handle].ticks_slot_window =
440 scan->ull.ticks_slot + ticks_slot_overhead;
441 #endif /* CONFIG_BT_TICKER_EXT */
442
443 } else {
444 if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
445 scan->ull.ticks_slot = 0U;
446 } else {
447 scan->ull.ticks_slot = ticks_interval -
448 ticks_slot_overhead;
449 }
450
451 lll->ticks_window = 0U;
452
453 #if defined(CONFIG_BT_TICKER_EXT)
454 ll_scan_ticker_ext[handle].ticks_slot_window = ticks_interval;
455 #endif /* CONFIG_BT_TICKER_EXT */
456 }
457
458 if (false) {
459
460 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
461 } else if (handle == SCAN_HANDLE_1M) {
462 const struct ll_scan_set *scan_coded;
463
464 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
465 if (IS_PHY_ENABLED(scan_coded, PHY_CODED) &&
466 (lll->ticks_window != 0U)) {
467 const struct lll_scan *lll_coded;
468 uint32_t ticks_interval_coded;
469 uint32_t ticks_window_sum_min;
470 uint32_t ticks_window_sum_max;
471
472 lll_coded = &scan_coded->lll;
473 ticks_interval_coded = HAL_TICKER_US_TO_TICKS(
474 (uint64_t)lll_coded->interval *
475 SCAN_INT_UNIT_US);
476 ticks_window_sum_min = lll->ticks_window +
477 lll_coded->ticks_window;
478 ticks_window_sum_max = ticks_window_sum_min +
479 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US << 1);
480 /* Check if 1M and Coded PHY scanning use same interval
481 * and the sum of the scan window duration equals their
482 * interval then use continuous scanning and avoid time
483 * reservation from overlapping.
484 */
485 if ((ticks_interval == ticks_interval_coded) &&
486 IN_RANGE(ticks_interval, ticks_window_sum_min,
487 ticks_window_sum_max)) {
488 if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
489 scan->ull.ticks_slot = 0U;
490 } else {
491 scan->ull.ticks_slot =
492 lll->ticks_window -
493 ticks_slot_overhead -
494 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US) -
495 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US);
496 }
497
498 /* Continuous scanning, no scan window stop
499 * ticker to be started but we will zero the
500 * ticks_window value when coded PHY scan is
501 * enabled (the next following else clause).
502 * Due to this the first scan window will have
503 * the stop ticker started but consecutive
504 * scan window will not have the stop ticker
505 * started once coded PHY scan window has been
506 * enabled.
507 */
508 }
509
510 #if defined(CONFIG_BT_TICKER_EXT)
511 ll_scan_ticker_ext[handle].ticks_slot_window = 0U;
512 #endif /* CONFIG_BT_TICKER_EXT */
513 }
514
515 /* 1M scan window starts without any offset */
516 ticks_offset = 0U;
517
518 } else if (handle == SCAN_HANDLE_PHY_CODED) {
519 struct ll_scan_set *scan_1m;
520
521 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
522 if (IS_PHY_ENABLED(scan_1m, PHY_1M) &&
523 (lll->ticks_window != 0U)) {
524 uint32_t ticks_window_sum_min;
525 uint32_t ticks_window_sum_max;
526 uint32_t ticks_interval_1m;
527 struct lll_scan *lll_1m;
528
529 lll_1m = &scan_1m->lll;
530 ticks_interval_1m = HAL_TICKER_US_TO_TICKS(
531 (uint64_t)lll_1m->interval *
532 SCAN_INT_UNIT_US);
533 ticks_window_sum_min = lll->ticks_window +
534 lll_1m->ticks_window;
535 ticks_window_sum_max = ticks_window_sum_min +
536 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US << 1);
537 /* Check if 1M and Coded PHY scanning use same interval
538 * and the sum of the scan window duration equals their
539 * interval then use continuous scanning and avoid time
540 * reservation from overlapping.
541 */
542 if ((ticks_interval == ticks_interval_1m) &&
543 IN_RANGE(ticks_interval, ticks_window_sum_min,
544 ticks_window_sum_max)) {
545 if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
546 scan->ull.ticks_slot = 0U;
547 } else {
548 scan->ull.ticks_slot =
549 lll->ticks_window -
550 ticks_slot_overhead -
551 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US) -
552 HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US);
553 }
554 /* Offset the coded PHY scan window, place
555 * after 1M scan window.
556 * Have some margin for jitter due to ticker
557 * resolution.
558 */
559 ticks_offset = lll_1m->ticks_window;
560 ticks_offset += HAL_TICKER_US_TO_TICKS(
561 EVENT_TICKER_RES_MARGIN_US << 1);
562
563 /* Continuous scanning, no scan window stop
564 * ticker started for both 1M and coded PHY.
565 */
566 lll->ticks_window = 0U;
567 lll_1m->ticks_window = 0U;
568
569 } else {
570 ticks_offset = 0U;
571 }
572
573 #if defined(CONFIG_BT_TICKER_EXT)
574 ll_scan_ticker_ext[handle].ticks_slot_window = 0U;
575 #endif /* CONFIG_BT_TICKER_EXT */
576 } else {
577 ticks_offset = 0U;
578 }
579 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
580
581 } else {
582 ticks_offset = 0U;
583 }
584
585 ticks_anchor = ticker_ticks_now_get();
586 ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
587
588 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
589 if (!lll->conn) {
590 uint32_t ticks_ref = 0U;
591 uint32_t offset_us = 0U;
592 int err;
593
594 err = ull_sched_after_cen_slot_get(TICKER_USER_ID_THREAD,
595 (scan->ull.ticks_slot +
596 ticks_slot_overhead),
597 &ticks_ref, &offset_us);
598
599 /* Use the ticks_ref as scanner's anchor if a free time space
600 * after any central role is available (indicated by a non-zero
601 * offset_us value).
602 */
603 if (!err) {
604 ticks_anchor = ticks_ref +
605 HAL_TICKER_US_TO_TICKS(offset_us);
606 }
607 }
608 #endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */
609
610 ret_cb = TICKER_STATUS_BUSY;
611
612 #if defined(CONFIG_BT_TICKER_EXT)
613 ret = ticker_start_ext(
614 #else
615 ret = ticker_start(
616 #endif /* CONFIG_BT_TICKER_EXT */
617 TICKER_INSTANCE_ID_CTLR,
618 TICKER_USER_ID_THREAD, TICKER_ID_SCAN_BASE + handle,
619 (ticks_anchor + ticks_offset), 0, ticks_interval,
620 HAL_TICKER_REMAINDER((uint64_t)lll->interval *
621 SCAN_INT_UNIT_US),
622 TICKER_NULL_LAZY,
623 (scan->ull.ticks_slot + ticks_slot_overhead),
624 ticker_cb, scan,
625 ull_ticker_status_give, (void *)&ret_cb
626 #if defined(CONFIG_BT_TICKER_EXT)
627 ,
628 &ll_scan_ticker_ext[handle]
629 #endif /* CONFIG_BT_TICKER_EXT */
630 );
631 ret = ull_ticker_status_take(ret, &ret_cb);
632 if (ret != TICKER_STATUS_SUCCESS) {
633 return BT_HCI_ERR_CMD_DISALLOWED;
634 }
635
636 scan->is_enabled = 1U;
637
638 #if defined(CONFIG_BT_CTLR_PRIVACY)
639 #if defined(CONFIG_BT_BROADCASTER)
640 if (!ull_adv_is_enabled_get(0))
641 #endif
642 {
643 ull_filter_adv_scan_state_cb(BIT(1));
644 }
645 #endif
646
647 return 0;
648 }
649
650 uint8_t ull_scan_disable(uint8_t handle, struct ll_scan_set *scan)
651 {
652 int err;
653
654 #if defined(CONFIG_BT_CTLR_ADV_EXT)
655 /* Request Extended Scan stop */
656 scan->is_stop = 1U;
657 cpu_dmb();
658 #endif /* CONFIG_BT_CTLR_ADV_EXT */
659
660 err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_BASE + handle,
661 scan, &scan->lll);
662 LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
663 if (err) {
664 return BT_HCI_ERR_CMD_DISALLOWED;
665 }
666
667 #if defined(CONFIG_BT_CTLR_ADV_EXT)
668 #if defined(CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS)
669 /* Stop associated auxiliary scan contexts */
670 err = ull_scan_aux_stop(&scan->lll);
671 if (err && (err != -EALREADY)) {
672 return BT_HCI_ERR_CMD_DISALLOWED;
673 }
674 #else /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
675 /* Find and stop associated auxiliary scan contexts */
676 for (uint8_t aux_handle = 0; aux_handle < CONFIG_BT_CTLR_SCAN_AUX_SET;
677 aux_handle++) {
678 struct lll_scan_aux *aux_scan_lll;
679 struct ll_scan_set *aux_scan;
680 struct ll_scan_aux_set *aux;
681
682 aux = ull_scan_aux_set_get(aux_handle);
683 aux_scan_lll = aux->parent;
684 if (!aux_scan_lll) {
685 continue;
686 }
687
688 aux_scan = HDR_LLL2ULL(aux_scan_lll);
689 if (aux_scan == scan) {
690 void *parent;
691
692 err = ull_scan_aux_stop(aux);
693 if (err && (err != -EALREADY)) {
694 return BT_HCI_ERR_CMD_DISALLOWED;
695 }
696
697 /* Use a local variable to assert on auxiliary context's
698 * release.
699 * Under race condition a released aux context can be
700 * allocated for reception of chain PDU of a periodic
701 * sync role.
702 */
703 parent = aux->parent;
704 LL_ASSERT(!parent || (parent != aux_scan_lll));
705 }
706 }
707 #endif /* !CONFIG_BT_CTLR_SCAN_AUX_USE_CHAINS */
708 #endif /* CONFIG_BT_CTLR_ADV_EXT */
709
710 return 0;
711 }
712
713 #if defined(CONFIG_BT_CTLR_ADV_EXT)
714 void ull_scan_done(struct node_rx_event_done *done)
715 {
716 struct node_rx_pdu *rx;
717 struct ll_scan_set *scan;
718 struct lll_scan *lll;
719 uint8_t handle;
720 uint32_t ret;
721
722 /* Get reference to ULL context */
723 scan = CONTAINER_OF(done->param, struct ll_scan_set, ull);
724 lll = &scan->lll;
725
726 if (likely(scan->duration_lazy || !lll->duration_reload ||
727 lll->duration_expire)) {
728 return;
729 }
730
731 /* Prevent duplicate terminate event generation */
732 lll->duration_reload = 0U;
733
734 handle = ull_scan_handle_get(scan);
735 LL_ASSERT(handle < BT_CTLR_SCAN_SET);
736
737 #if defined(CONFIG_BT_CTLR_PHY_CODED)
738 /* Prevent duplicate terminate event if ull_scan_done get called by
739 * the other scan instance.
740 */
741 struct ll_scan_set *scan_other;
742
743 if (handle == SCAN_HANDLE_1M) {
744 scan_other = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
745 } else {
746 scan_other = ull_scan_set_get(SCAN_HANDLE_1M);
747 }
748 scan_other->lll.duration_reload = 0U;
749 #endif /* CONFIG_BT_CTLR_PHY_CODED */
750
751 rx = (void *)scan->node_rx_scan_term;
752 rx->hdr.type = NODE_RX_TYPE_EXT_SCAN_TERMINATE;
753 rx->hdr.handle = handle;
754
755 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
756 (TICKER_ID_SCAN_BASE + handle), ticker_stop_ext_op_cb,
757 scan);
758
759 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
760 (ret == TICKER_STATUS_BUSY));
761 }
762
763 void ull_scan_term_dequeue(uint8_t handle)
764 {
765 struct ll_scan_set *scan;
766
767 scan = ull_scan_set_get(handle);
768 LL_ASSERT(scan);
769
770 scan->is_enabled = 0U;
771
772 #if defined(CONFIG_BT_CTLR_PHY_CODED)
773 if (handle == SCAN_HANDLE_1M) {
774 struct ll_scan_set *scan_coded;
775
776 scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
777 if (IS_PHY_ENABLED(scan_coded, PHY_CODED)) {
778 uint8_t err;
779
780 err = disable(SCAN_HANDLE_PHY_CODED);
781 LL_ASSERT(!err);
782 }
783 } else {
784 struct ll_scan_set *scan_1m;
785
786 scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
787 if (IS_PHY_ENABLED(scan_1m, PHY_1M)) {
788 uint8_t err;
789
790 err = disable(SCAN_HANDLE_1M);
791 LL_ASSERT(!err);
792 }
793 }
794 #endif /* CONFIG_BT_CTLR_PHY_CODED */
795 }
796 #endif /* CONFIG_BT_CTLR_ADV_EXT */
797
798 struct ll_scan_set *ull_scan_set_get(uint8_t handle)
799 {
800 if (handle >= BT_CTLR_SCAN_SET) {
801 return NULL;
802 }
803
804 return &ll_scan[handle];
805 }
806
807 uint8_t ull_scan_handle_get(struct ll_scan_set *scan)
808 {
809 return ((uint8_t *)scan - (uint8_t *)ll_scan) / sizeof(*scan);
810 }
811
812 uint8_t ull_scan_lll_handle_get(struct lll_scan *lll)
813 {
814 return ull_scan_handle_get((void *)lll->hdr.parent);
815 }
816
817 struct ll_scan_set *ull_scan_is_valid_get(struct ll_scan_set *scan)
818 {
819 if (((uint8_t *)scan < (uint8_t *)ll_scan) ||
820 ((uint8_t *)scan > ((uint8_t *)ll_scan +
821 (sizeof(struct ll_scan_set) *
822 (BT_CTLR_SCAN_SET - 1))))) {
823 return NULL;
824 }
825
826 return scan;
827 }
828
829 struct lll_scan *ull_scan_lll_is_valid_get(struct lll_scan *lll)
830 {
831 struct ll_scan_set *scan;
832
833 scan = HDR_LLL2ULL(lll);
834 scan = ull_scan_is_valid_get(scan);
835 if (scan) {
836 return &scan->lll;
837 }
838
839 return NULL;
840 }
841
842 struct ll_scan_set *ull_scan_is_enabled_get(uint8_t handle)
843 {
844 struct ll_scan_set *scan;
845
846 scan = ull_scan_set_get(handle);
847 if (!scan || !scan->is_enabled) {
848 return NULL;
849 }
850
851 return scan;
852 }
853
854 struct ll_scan_set *ull_scan_is_disabled_get(uint8_t handle)
855 {
856 struct ll_scan_set *scan;
857
858 scan = ull_scan_set_get(handle);
859 if (!scan || scan->is_enabled) {
860 return NULL;
861 }
862
863 return scan;
864 }
865
866 uint32_t ull_scan_is_enabled(uint8_t handle)
867 {
868 struct ll_scan_set *scan;
869
870 scan = ull_scan_is_enabled_get(handle);
871 if (!scan) {
872 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
873 scan = ull_scan_set_get(handle);
874
875 return scan->periodic.sync ? ULL_SCAN_IS_SYNC : 0U;
876 #else
877 return 0U;
878 #endif
879 }
880
881 return (((uint32_t)scan->is_enabled << scan->lll.type) |
882 #if defined(CONFIG_BT_CENTRAL)
883 (scan->lll.conn ? ULL_SCAN_IS_INITIATOR : 0U) |
884 #endif
885 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
886 (scan->periodic.sync ? ULL_SCAN_IS_SYNC : 0U) |
887 #endif
888 0U);
889 }
890
891 uint32_t ull_scan_filter_pol_get(uint8_t handle)
892 {
893 struct ll_scan_set *scan;
894
895 scan = ull_scan_is_enabled_get(handle);
896 if (!scan) {
897 return 0;
898 }
899
900 return scan->lll.filter_policy;
901 }
902
903 static int init_reset(void)
904 {
905 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
906 !defined(CONFIG_BT_CTLR_ADV_EXT)
907 ll_scan[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
908 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
909
910 return 0;
911 }
912
913 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
914 uint32_t remainder, uint16_t lazy, uint8_t force,
915 void *param)
916 {
917 static memq_link_t link;
918 static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_prepare};
919 static struct lll_prepare_param p;
920 struct ll_scan_set *scan;
921 struct lll_scan *lll;
922 uint32_t ret;
923 uint8_t ref;
924
925 DEBUG_RADIO_PREPARE_O(1);
926
927 scan = param;
928 lll = &scan->lll;
929
930 /* Increment prepare reference count */
931 ref = ull_ref_inc(&scan->ull);
932 LL_ASSERT(ref);
933
934 /* Append timing parameters */
935 p.ticks_at_expire = ticks_at_expire;
936 p.remainder = remainder;
937 p.lazy = lazy;
938 p.param = lll;
939 p.force = force;
940 mfy.param = &p;
941
942 /* Kick LLL prepare */
943 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
944 0, &mfy);
945 LL_ASSERT(!ret);
946
947 #if defined(CONFIG_BT_CTLR_ADV_EXT)
948 if (lll->duration_expire) {
949 uint16_t elapsed;
950
951 elapsed = lazy + 1;
952 if (lll->duration_expire > elapsed) {
953 lll->duration_expire -= elapsed;
954 } else {
955 if (scan->duration_lazy) {
956 uint8_t handle;
957 uint16_t duration_lazy;
958
959 duration_lazy = lll->duration_expire +
960 scan->duration_lazy - elapsed;
961
962 handle = ull_scan_handle_get(scan);
963 LL_ASSERT(handle < BT_CTLR_SCAN_SET);
964
965 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
966 TICKER_USER_ID_ULL_HIGH,
967 (TICKER_ID_SCAN_BASE +
968 handle), 0, 0, 0, 0,
969 duration_lazy, 0,
970 NULL, NULL);
971 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
972 (ret == TICKER_STATUS_BUSY));
973 }
974
975 lll->duration_expire = 0U;
976 }
977 } else if (lll->duration_reload && lazy) {
978 uint8_t handle;
979
980 handle = ull_scan_handle_get(scan);
981 LL_ASSERT(handle < BT_CTLR_SCAN_SET);
982
983 lll->duration_expire = lll->duration_reload;
984 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
985 TICKER_USER_ID_ULL_HIGH,
986 (TICKER_ID_SCAN_BASE + handle),
987 0, 0, 0, 0, 1, 1, NULL, NULL);
988 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
989 (ret == TICKER_STATUS_BUSY));
990 }
991 #endif /* CONFIG_BT_CTLR_ADV_EXT */
992
993 DEBUG_RADIO_PREPARE_O(1);
994 }
995
996 #if defined(CONFIG_BT_CTLR_ADV_EXT)
997 static uint8_t is_scan_update(uint8_t handle, uint16_t duration,
998 uint16_t period, struct ll_scan_set **scan,
999 struct node_rx_pdu **node_rx_scan_term)
1000 {
1001 *scan = ull_scan_set_get(handle);
1002 *node_rx_scan_term = (*scan)->node_rx_scan_term;
1003 return duration && period && (*scan)->lll.duration_reload &&
1004 (*scan)->duration_lazy;
1005 }
1006
1007 static uint8_t duration_period_setup(struct ll_scan_set *scan,
1008 uint16_t duration, uint16_t period,
1009 struct node_rx_pdu **node_rx_scan_term)
1010 {
1011 struct lll_scan *lll;
1012
1013 lll = &scan->lll;
1014 if (duration) {
1015 lll->duration_reload =
1016 ULL_SCAN_DURATION_TO_EVENTS(duration,
1017 scan->lll.interval);
1018 if (period) {
1019 if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1020 (duration >= ULL_SCAN_PERIOD_TO_DURATION(period))) {
1021 return BT_HCI_ERR_INVALID_PARAM;
1022 }
1023
1024 scan->duration_lazy =
1025 ULL_SCAN_PERIOD_TO_EVENTS(period,
1026 scan->lll.interval);
1027 scan->duration_lazy -= lll->duration_reload;
1028 scan->node_rx_scan_term = NULL;
1029 } else {
1030 struct node_rx_pdu *node_rx;
1031 void *link_scan_term;
1032
1033 scan->duration_lazy = 0U;
1034
1035 if (*node_rx_scan_term) {
1036 scan->node_rx_scan_term = *node_rx_scan_term;
1037
1038 return 0;
1039 }
1040
1041 /* The alloc here used for ext scan termination event */
1042 link_scan_term = ll_rx_link_alloc();
1043 if (!link_scan_term) {
1044 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1045 }
1046
1047 node_rx = ll_rx_alloc();
1048 if (!node_rx) {
1049 ll_rx_link_release(link_scan_term);
1050
1051 return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1052 }
1053
1054 node_rx->hdr.link = (void *)link_scan_term;
1055 scan->node_rx_scan_term = node_rx;
1056 *node_rx_scan_term = node_rx;
1057 }
1058 } else {
1059 lll->duration_reload = 0U;
1060 scan->duration_lazy = 0U;
1061 scan->node_rx_scan_term = NULL;
1062 }
1063
1064 return 0;
1065 }
1066
1067 static uint8_t duration_period_update(struct ll_scan_set *scan,
1068 uint8_t is_update)
1069 {
1070 if (is_update) {
1071 uint32_t volatile ret_cb;
1072 uint32_t ret;
1073
1074 scan->lll.duration_expire = 0U;
1075
1076 ret_cb = TICKER_STATUS_BUSY;
1077 ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1078 TICKER_USER_ID_THREAD,
1079 (TICKER_ID_SCAN_BASE +
1080 ull_scan_handle_get(scan)),
1081 0, 0, 0, 0, 1, 1,
1082 ull_ticker_status_give, (void *)&ret_cb);
1083 ret = ull_ticker_status_take(ret, &ret_cb);
1084 if (ret != TICKER_STATUS_SUCCESS) {
1085 return BT_HCI_ERR_CMD_DISALLOWED;
1086 }
1087
1088 return 0;
1089 } else {
1090 scan->lll.duration_expire = scan->lll.duration_reload;
1091 }
1092
1093 return 0;
1094 }
1095
1096 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
1097 {
1098 static memq_link_t link;
1099 static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
1100 uint32_t ret;
1101
1102 /* Ignore if race between thread and ULL */
1103 if (status != TICKER_STATUS_SUCCESS) {
1104 /* TODO: detect race */
1105
1106 return;
1107 }
1108
1109 /* Check if any pending LLL events that need to be aborted */
1110 mfy.param = param;
1111 ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1112 TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1113 LL_ASSERT(!ret);
1114 }
1115
1116 static void ext_disable(void *param)
1117 {
1118 struct ll_scan_set *scan;
1119 struct ull_hdr *hdr;
1120
1121 /* Check ref count to determine if any pending LLL events in pipeline */
1122 scan = param;
1123 hdr = &scan->ull;
1124 if (ull_ref_get(hdr)) {
1125 static memq_link_t link;
1126 static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1127 uint32_t ret;
1128
1129 mfy.param = &scan->lll;
1130
1131 /* Setup disabled callback to be called when ref count
1132 * returns to zero.
1133 */
1134 LL_ASSERT(!hdr->disabled_cb);
1135 hdr->disabled_param = mfy.param;
1136 hdr->disabled_cb = ext_disabled_cb;
1137
1138 /* Trigger LLL disable */
1139 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1140 TICKER_USER_ID_LLL, 0, &mfy);
1141 LL_ASSERT(!ret);
1142 } else {
1143 /* No pending LLL events */
1144 ext_disabled_cb(&scan->lll);
1145 }
1146 }
1147
1148 static void ext_disabled_cb(void *param)
1149 {
1150 struct node_rx_pdu *rx;
1151 struct ll_scan_set *scan;
1152 struct lll_scan *lll;
1153
1154 /* Under race condition, if a connection has been established then
1155 * node_rx is already utilized to send terminate event on connection
1156 */
1157 lll = (void *)param;
1158 scan = HDR_LLL2ULL(lll);
1159 rx = scan->node_rx_scan_term;
1160 if (!rx) {
1161 return;
1162 }
1163
1164 /* NOTE: parameters are already populated on disable,
1165 * just enqueue here
1166 */
1167 ll_rx_put_sched(rx->hdr.link, rx);
1168 }
1169 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1170
1171 static uint8_t disable(uint8_t handle)
1172 {
1173 struct ll_scan_set *scan;
1174 uint8_t ret;
1175
1176 scan = ull_scan_is_enabled_get(handle);
1177 if (!scan) {
1178 return BT_HCI_ERR_CMD_DISALLOWED;
1179 }
1180
1181 #if defined(CONFIG_BT_CENTRAL)
1182 if (scan->lll.conn) {
1183 return BT_HCI_ERR_CMD_DISALLOWED;
1184 }
1185 #endif
1186
1187 ret = ull_scan_disable(handle, scan);
1188 if (ret) {
1189 return ret;
1190 }
1191
1192 scan->is_enabled = 0U;
1193
1194 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1195 if (scan->node_rx_scan_term) {
1196 struct node_rx_pdu *node_rx_scan_term = scan->node_rx_scan_term;
1197
1198 scan->node_rx_scan_term = NULL;
1199
1200 ll_rx_link_release(node_rx_scan_term->hdr.link);
1201 ll_rx_release(node_rx_scan_term);
1202 }
1203 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1204
1205 #if defined(CONFIG_BT_CTLR_PRIVACY)
1206 #if defined(CONFIG_BT_BROADCASTER)
1207 if (!ull_adv_is_enabled_get(0))
1208 #endif
1209 {
1210 ull_filter_adv_scan_state_cb(0);
1211 }
1212 #endif
1213
1214 return 0;
1215 }
1216