1 /*
2  * Copyright (c) 2016-2019 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <soc.h>
10 #include <zephyr/bluetooth/hci_types.h>
11 
12 #include "hal/cpu.h"
13 #include "hal/ccm.h"
14 #include "hal/radio.h"
15 #include "hal/ticker.h"
16 
17 #include "util/util.h"
18 #include "util/mem.h"
19 #include "util/memq.h"
20 #include "util/mayfly.h"
21 #include "util/dbuf.h"
22 
23 #include "ticker/ticker.h"
24 
25 #include "pdu_df.h"
26 #include "lll/pdu_vendor.h"
27 #include "pdu.h"
28 
29 #include "lll.h"
30 #include "lll/lll_vendor.h"
31 #include "lll/lll_adv_types.h"
32 #include "lll_adv.h"
33 #include "lll/lll_adv_pdu.h"
34 #include "lll_scan.h"
35 #include "lll/lll_df_types.h"
36 #include "lll_conn.h"
37 #include "lll_filter.h"
38 
39 #include "ll_sw/ull_tx_queue.h"
40 
41 #include "ull_adv_types.h"
42 #include "ull_filter.h"
43 
44 #include "ull_conn_types.h"
45 #include "ull_internal.h"
46 #include "ull_adv_internal.h"
47 #include "ull_scan_types.h"
48 #include "ull_scan_internal.h"
49 #include "ull_sched_internal.h"
50 
51 #include "ll.h"
52 
53 #include "hal/debug.h"
54 
55 static int init_reset(void);
56 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
57 		      uint32_t remainder, uint16_t lazy, uint8_t force,
58 		      void *param);
59 static uint8_t disable(uint8_t handle);
60 
61 #if defined(CONFIG_BT_CTLR_ADV_EXT)
62 #define IS_PHY_ENABLED(scan_ctx, scan_phy) ((scan_ctx)->lll.phy & (scan_phy))
63 
64 static uint8_t is_scan_update(uint8_t handle, uint16_t duration,
65 			      uint16_t period, struct ll_scan_set **scan,
66 			      struct node_rx_pdu **node_rx_scan_term);
67 static uint8_t duration_period_setup(struct ll_scan_set *scan,
68 				     uint16_t duration, uint16_t period,
69 				     struct node_rx_pdu **node_rx_scan_term);
70 static uint8_t duration_period_update(struct ll_scan_set *scan,
71 				      uint8_t is_update);
72 static void ticker_stop_ext_op_cb(uint32_t status, void *param);
73 static void ext_disable(void *param);
74 static void ext_disabled_cb(void *param);
75 #endif /* CONFIG_BT_CTLR_ADV_EXT */
76 
77 static struct ll_scan_set ll_scan[BT_CTLR_SCAN_SET];
78 
79 #if defined(CONFIG_BT_TICKER_EXT)
80 static struct ticker_ext ll_scan_ticker_ext[BT_CTLR_SCAN_SET];
81 #endif /* CONFIG_BT_TICKER_EXT */
82 
ll_scan_params_set(uint8_t type,uint16_t interval,uint16_t window,uint8_t own_addr_type,uint8_t filter_policy)83 uint8_t ll_scan_params_set(uint8_t type, uint16_t interval, uint16_t window,
84 			uint8_t own_addr_type, uint8_t filter_policy)
85 {
86 	struct ll_scan_set *scan;
87 	struct lll_scan *lll;
88 
89 	scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
90 	if (!scan) {
91 		return BT_HCI_ERR_CMD_DISALLOWED;
92 	}
93 
94 #if defined(CONFIG_BT_CTLR_ADV_EXT)
95 	uint8_t phy;
96 
97 	phy  = type >> 1;
98 	if (phy & BT_HCI_LE_EXT_SCAN_PHY_CODED) {
99 		struct ll_scan_set *scan_coded;
100 
101 		if (!IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
102 			return BT_HCI_ERR_CMD_DISALLOWED;
103 		}
104 
105 		scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
106 		if (!scan_coded) {
107 			return BT_HCI_ERR_CMD_DISALLOWED;
108 		}
109 
110 		scan = scan_coded;
111 	}
112 
113 	lll = &scan->lll;
114 
115 	/* NOTE: Pass invalid interval value to not start scanning using this
116 	 *       scan instance.
117 	 */
118 	if (!interval) {
119 		/* Set PHY to 0 to not start scanning on this instance */
120 		lll->phy = 0U;
121 
122 		return 0;
123 	}
124 
125 	/* If phy assigned is PHY_1M or PHY_CODED, then scanning on that
126 	 * PHY is enabled.
127 	 */
128 	lll->phy = phy;
129 
130 #else /* !CONFIG_BT_CTLR_ADV_EXT */
131 	lll = &scan->lll;
132 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
133 
134 	scan->own_addr_type = own_addr_type;
135 
136 	scan->ticks_window = ull_scan_params_set(lll, type, interval, window,
137 						 filter_policy);
138 
139 	return 0;
140 }
141 
142 #if defined(CONFIG_BT_CTLR_ADV_EXT)
ll_scan_enable(uint8_t enable,uint16_t duration,uint16_t period)143 uint8_t ll_scan_enable(uint8_t enable, uint16_t duration, uint16_t period)
144 {
145 	struct node_rx_pdu *node_rx_scan_term = NULL;
146 	uint8_t is_update_coded = 0U;
147 	uint8_t is_update_1m = 0U;
148 #else /* !CONFIG_BT_CTLR_ADV_EXT */
149 uint8_t ll_scan_enable(uint8_t enable)
150 {
151 #endif /* !CONFIG_BT_CTLR_ADV_EXT */
152 	struct ll_scan_set *scan_coded = NULL;
153 	uint8_t own_addr_type = 0U;
154 	uint8_t is_coded_phy = 0U;
155 	struct ll_scan_set *scan;
156 	uint8_t err;
157 
158 	if (!enable) {
159 		err = disable(SCAN_HANDLE_1M);
160 
161 		if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
162 		    IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED)) {
163 			uint8_t err_coded;
164 
165 			err_coded = disable(SCAN_HANDLE_PHY_CODED);
166 			if (!err_coded) {
167 				err = 0U;
168 			}
169 		}
170 
171 		return err;
172 	}
173 
174 	scan = ull_scan_is_disabled_get(SCAN_HANDLE_1M);
175 	if (!scan) {
176 #if defined(CONFIG_BT_CTLR_ADV_EXT)
177 		is_update_1m = is_scan_update(SCAN_HANDLE_1M, duration, period,
178 					      &scan, &node_rx_scan_term);
179 		if (!is_update_1m)
180 #endif /* CONFIG_BT_CTLR_ADV_EXT */
181 		{
182 			return BT_HCI_ERR_CMD_DISALLOWED;
183 		}
184 	}
185 
186 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
187 	scan_coded = ull_scan_is_disabled_get(SCAN_HANDLE_PHY_CODED);
188 	if (!scan_coded) {
189 		is_update_coded = is_scan_update(SCAN_HANDLE_PHY_CODED,
190 						 duration, period, &scan_coded,
191 						 &node_rx_scan_term);
192 		if (!is_update_coded) {
193 			return BT_HCI_ERR_CMD_DISALLOWED;
194 		}
195 	}
196 
197 	own_addr_type = scan_coded->own_addr_type;
198 	is_coded_phy = (scan_coded->lll.phy &
199 			BT_HCI_LE_EXT_SCAN_PHY_CODED);
200 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
201 
202 	if ((is_coded_phy && (own_addr_type & 0x1)) ||
203 	    (!is_coded_phy && (scan->own_addr_type & 0x1))) {
204 		if (!mem_nz(ll_addr_get(BT_ADDR_LE_RANDOM), BDADDR_SIZE)) {
205 			return BT_HCI_ERR_INVALID_PARAM;
206 		}
207 	}
208 
209 #if defined(CONFIG_BT_CTLR_ADV_EXT)
210 #if defined(CONFIG_BT_CTLR_PHY_CODED)
211 	if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
212 #endif /* CONFIG_BT_CTLR_PHY_CODED */
213 	{
214 		err = duration_period_setup(scan, duration, period,
215 					    &node_rx_scan_term);
216 		if (err) {
217 			return err;
218 		}
219 	}
220 
221 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
222 	    is_coded_phy) {
223 		err = duration_period_setup(scan_coded, duration, period,
224 					    &node_rx_scan_term);
225 		if (err) {
226 			return err;
227 		}
228 	}
229 #endif /* CONFIG_BT_CTLR_ADV_EXT */
230 
231 #if defined(CONFIG_BT_CTLR_PRIVACY)
232 	struct lll_scan *lll;
233 
234 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) && is_coded_phy) {
235 		lll = &scan_coded->lll;
236 
237 		/* TODO: Privacy support in Advertising Extensions */
238 	} else {
239 		lll = &scan->lll;
240 		own_addr_type = scan->own_addr_type;
241 	}
242 
243 	ull_filter_scan_update(lll->filter_policy);
244 
245 	lll->rl_idx = FILTER_IDX_NONE;
246 	lll->rpa_gen = 0;
247 
248 	if ((lll->type & 0x1) &&
249 	    (own_addr_type == BT_ADDR_LE_PUBLIC_ID ||
250 	     own_addr_type == BT_ADDR_LE_RANDOM_ID)) {
251 		/* Generate RPAs if required */
252 		ull_filter_rpa_update(false);
253 		lll->rpa_gen = 1;
254 	}
255 #endif /* CONFIG_BT_CTLR_PRIVACY */
256 
257 #if defined(CONFIG_BT_CTLR_ADV_EXT)
258 #if defined(CONFIG_BT_CTLR_PHY_CODED)
259 	if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
260 #endif /* CONFIG_BT_CTLR_PHY_CODED */
261 	{
262 		err = duration_period_update(scan, is_update_1m);
263 		if (err) {
264 			return err;
265 		} else if (is_update_1m) {
266 			return 0;
267 		}
268 	}
269 
270 	if (IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
271 	    is_coded_phy) {
272 		err = duration_period_update(scan_coded, is_update_coded);
273 		if (err) {
274 			return err;
275 		} else if (is_update_coded) {
276 			return 0;
277 		}
278 	}
279 
280 #if defined(CONFIG_BT_CTLR_PHY_CODED)
281 	if (!is_coded_phy || IS_PHY_ENABLED(scan, PHY_1M))
282 #endif /* CONFIG_BT_CTLR_PHY_CODED */
283 #endif /* CONFIG_BT_CTLR_ADV_EXT */
284 	{
285 		err = ull_scan_enable(scan);
286 		if (err) {
287 			return err;
288 		}
289 	}
290 
291 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT) &&
292 	    IS_ENABLED(CONFIG_BT_CTLR_PHY_CODED) &&
293 	    is_coded_phy) {
294 		err = ull_scan_enable(scan_coded);
295 		if (err) {
296 			return err;
297 		}
298 	}
299 
300 	return 0;
301 }
302 
303 int ull_scan_init(void)
304 {
305 	int err;
306 
307 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
308 		err = ull_scan_aux_init();
309 		if (err) {
310 			return err;
311 		}
312 	}
313 
314 	err = init_reset();
315 	if (err) {
316 		return err;
317 	}
318 
319 	return 0;
320 }
321 
322 int ull_scan_reset(void)
323 {
324 	uint8_t handle;
325 	int err;
326 
327 	for (handle = 0U; handle < BT_CTLR_SCAN_SET; handle++) {
328 		(void)disable(handle);
329 
330 #if defined(CONFIG_BT_CTLR_ADV_EXT)
331 		/* Initialize PHY value to 0 to not start scanning on the scan
332 		 * instance if an explicit ll_scan_params_set() has not been
333 		 * invoked from HCI to enable scanning on that PHY.
334 		 */
335 		ll_scan[handle].lll.phy = 0U;
336 #endif /* CONFIG_BT_CTLR_ADV_EXT */
337 	}
338 
339 	if (IS_ENABLED(CONFIG_BT_CTLR_ADV_EXT)) {
340 		err = ull_scan_aux_reset();
341 		if (err) {
342 			return err;
343 		}
344 	}
345 
346 	err = init_reset();
347 	if (err) {
348 		return err;
349 	}
350 
351 	return 0;
352 }
353 
354 uint32_t ull_scan_params_set(struct lll_scan *lll, uint8_t type,
355 			     uint16_t interval, uint16_t window,
356 			     uint8_t filter_policy)
357 {
358 	/* type value:
359 	 * 0000b - legacy 1M passive
360 	 * 0001b - legacy 1M active
361 	 * 0010b - Ext. 1M passive
362 	 * 0011b - Ext. 1M active
363 	 * 0100b - invalid
364 	 * 0101b - invalid
365 	 * 0110b - invalid
366 	 * 0111b - invalid
367 	 * 1000b - Ext. Coded passive
368 	 * 1001b - Ext. Coded active
369 	 */
370 	lll->type = type;
371 	lll->filter_policy = filter_policy;
372 	lll->interval = interval;
373 	lll->ticks_window = HAL_TICKER_US_TO_TICKS((uint64_t)window *
374 						   SCAN_INT_UNIT_US);
375 
376 	return lll->ticks_window;
377 }
378 
379 uint8_t ull_scan_enable(struct ll_scan_set *scan)
380 {
381 	uint32_t ticks_slot_overhead;
382 	uint32_t volatile ret_cb;
383 	uint32_t ticks_interval;
384 	uint32_t ticks_anchor;
385 	uint32_t ticks_offset;
386 	struct lll_scan *lll;
387 	uint8_t handle;
388 	uint32_t ret;
389 
390 #if defined(CONFIG_BT_CTLR_ADV_EXT)
391 	/* Initialize extend scan stop request */
392 	scan->is_stop = 0U;
393 #endif /* CONFIG_BT_CTLR_ADV_EXT */
394 
395 	/* Initialize LLL scan context */
396 	lll = &scan->lll;
397 	lll->init_addr_type = scan->own_addr_type;
398 	(void)ll_addr_read(lll->init_addr_type, lll->init_addr);
399 	lll->chan = 0U;
400 	lll->is_stop = 0U;
401 
402 	ull_hdr_init(&scan->ull);
403 	lll_hdr_init(lll, scan);
404 
405 	ticks_interval = HAL_TICKER_US_TO_TICKS((uint64_t)lll->interval *
406 						SCAN_INT_UNIT_US);
407 
408 	/* TODO: active_to_start feature port */
409 	scan->ull.ticks_active_to_start = 0U;
410 	scan->ull.ticks_prepare_to_start =
411 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_XTAL_US);
412 	scan->ull.ticks_preempt_to_start =
413 		HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_PREEMPT_MIN_US);
414 
415 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT)) {
416 		ticks_slot_overhead = MAX(scan->ull.ticks_active_to_start,
417 					  scan->ull.ticks_prepare_to_start);
418 	} else {
419 		ticks_slot_overhead = 0U;
420 	}
421 
422 	handle = ull_scan_handle_get(scan);
423 
424 	lll->ticks_window = scan->ticks_window;
425 	if ((lll->ticks_window +
426 	     HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) <
427 	    (ticks_interval - ticks_slot_overhead)) {
428 		scan->ull.ticks_slot =
429 			(lll->ticks_window +
430 			 HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US));
431 
432 #if defined(CONFIG_BT_TICKER_EXT)
433 		ll_scan_ticker_ext[handle].ticks_slot_window =
434 			scan->ull.ticks_slot + ticks_slot_overhead;
435 #endif /* CONFIG_BT_TICKER_EXT */
436 
437 	} else {
438 		if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
439 			scan->ull.ticks_slot = 0U;
440 		} else {
441 			scan->ull.ticks_slot = ticks_interval -
442 					       ticks_slot_overhead;
443 		}
444 
445 		lll->ticks_window = 0U;
446 
447 #if defined(CONFIG_BT_TICKER_EXT)
448 		ll_scan_ticker_ext[handle].ticks_slot_window = ticks_interval;
449 #endif /* CONFIG_BT_TICKER_EXT */
450 	}
451 
452 	if (false) {
453 
454 #if defined(CONFIG_BT_CTLR_ADV_EXT) && defined(CONFIG_BT_CTLR_PHY_CODED)
455 	} else if (handle == SCAN_HANDLE_1M) {
456 		const struct ll_scan_set *scan_coded;
457 
458 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
459 		if (IS_PHY_ENABLED(scan_coded, PHY_CODED) &&
460 		    (lll->ticks_window != 0U)) {
461 			const struct lll_scan *lll_coded;
462 			uint32_t ticks_interval_coded;
463 			uint32_t ticks_window_sum_min;
464 			uint32_t ticks_window_sum_max;
465 
466 			lll_coded = &scan_coded->lll;
467 			ticks_interval_coded = HAL_TICKER_US_TO_TICKS(
468 						(uint64_t)lll_coded->interval *
469 						SCAN_INT_UNIT_US);
470 			ticks_window_sum_min = lll->ticks_window +
471 					       lll_coded->ticks_window;
472 			ticks_window_sum_max = ticks_window_sum_min +
473 				HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US << 1);
474 			/* Check if 1M and Coded PHY scanning use same interval
475 			 * and the sum of the scan window duration equals their
476 			 * interval then use continuous scanning and avoid time
477 			 * reservation from overlapping.
478 			 */
479 			if ((ticks_interval == ticks_interval_coded) &&
480 			    IN_RANGE(ticks_interval, ticks_window_sum_min,
481 				     ticks_window_sum_max)) {
482 				if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
483 					scan->ull.ticks_slot = 0U;
484 				} else {
485 					scan->ull.ticks_slot =
486 						lll->ticks_window -
487 						ticks_slot_overhead -
488 						HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US) -
489 						HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US);
490 				}
491 
492 				/* Continuous scanning, no scan window stop
493 				 * ticker to be started but we will zero the
494 				 * ticks_window value when coded PHY scan is
495 				 * enabled (the next following else clause).
496 				 * Due to this the first scan window will have
497 				 * the stop ticker started but consecutive
498 				 * scan window will not have the stop ticker
499 				 * started once coded PHY scan window has been
500 				 * enabled.
501 				 */
502 			}
503 
504 #if defined(CONFIG_BT_TICKER_EXT)
505 			ll_scan_ticker_ext[handle].ticks_slot_window = 0U;
506 #endif /* CONFIG_BT_TICKER_EXT */
507 		}
508 
509 		/* 1M scan window starts without any offset */
510 		ticks_offset = 0U;
511 
512 	} else if (handle == SCAN_HANDLE_PHY_CODED) {
513 		struct ll_scan_set *scan_1m;
514 
515 		scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
516 		if (IS_PHY_ENABLED(scan_1m, PHY_1M) &&
517 		    (lll->ticks_window != 0U)) {
518 			uint32_t ticks_window_sum_min;
519 			uint32_t ticks_window_sum_max;
520 			uint32_t ticks_interval_1m;
521 			struct lll_scan *lll_1m;
522 
523 			lll_1m = &scan_1m->lll;
524 			ticks_interval_1m = HAL_TICKER_US_TO_TICKS(
525 						(uint64_t)lll_1m->interval *
526 						SCAN_INT_UNIT_US);
527 			ticks_window_sum_min = lll->ticks_window +
528 					       lll_1m->ticks_window;
529 			ticks_window_sum_max = ticks_window_sum_min +
530 				HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US << 1);
531 			/* Check if 1M and Coded PHY scanning use same interval
532 			 * and the sum of the scan window duration equals their
533 			 * interval then use continuous scanning and avoid time
534 			 * reservation from overlapping.
535 			 */
536 			if ((ticks_interval == ticks_interval_1m) &&
537 			    IN_RANGE(ticks_interval, ticks_window_sum_min,
538 				     ticks_window_sum_max)) {
539 				if (IS_ENABLED(CONFIG_BT_CTLR_SCAN_UNRESERVED)) {
540 					scan->ull.ticks_slot = 0U;
541 				} else {
542 					scan->ull.ticks_slot =
543 						lll->ticks_window -
544 						ticks_slot_overhead -
545 						HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US) -
546 						HAL_TICKER_US_TO_TICKS(EVENT_TICKER_RES_MARGIN_US);
547 				}
548 				/* Offset the coded PHY scan window, place
549 				 * after 1M scan window.
550 				 * Have some margin for jitter due to ticker
551 				 * resolution.
552 				 */
553 				ticks_offset = lll_1m->ticks_window;
554 				ticks_offset += HAL_TICKER_US_TO_TICKS(
555 					EVENT_TICKER_RES_MARGIN_US << 1);
556 
557 				/* Continuous scanning, no scan window stop
558 				 * ticker started for both 1M and coded PHY.
559 				 */
560 				lll->ticks_window = 0U;
561 				lll_1m->ticks_window = 0U;
562 
563 			} else {
564 				ticks_offset = 0U;
565 			}
566 
567 #if defined(CONFIG_BT_TICKER_EXT)
568 			ll_scan_ticker_ext[handle].ticks_slot_window = 0U;
569 #endif /* CONFIG_BT_TICKER_EXT */
570 		} else {
571 			ticks_offset = 0U;
572 		}
573 #endif /* CONFIG_BT_CTLR_ADV_EXT && CONFIG_BT_CTLR_PHY_CODED */
574 
575 	} else {
576 		ticks_offset = 0U;
577 	}
578 
579 	ticks_anchor = ticker_ticks_now_get();
580 	ticks_anchor += HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US);
581 
582 #if defined(CONFIG_BT_CENTRAL) && defined(CONFIG_BT_CTLR_SCHED_ADVANCED)
583 	if (!lll->conn) {
584 		uint32_t ticks_ref = 0U;
585 		uint32_t offset_us = 0U;
586 		int err;
587 
588 		err = ull_sched_after_cen_slot_get(TICKER_USER_ID_THREAD,
589 						   (scan->ull.ticks_slot +
590 						    ticks_slot_overhead),
591 						   &ticks_ref, &offset_us);
592 
593 		/* Use the ticks_ref as scanner's anchor if a free time space
594 		 * after any central role is available (indicated by a non-zero
595 		 * offset_us value).
596 		 */
597 		if (!err) {
598 			ticks_anchor = ticks_ref +
599 				       HAL_TICKER_US_TO_TICKS(offset_us);
600 		}
601 	}
602 #endif /* CONFIG_BT_CENTRAL && CONFIG_BT_CTLR_SCHED_ADVANCED */
603 
604 	ret_cb = TICKER_STATUS_BUSY;
605 
606 #if defined(CONFIG_BT_TICKER_EXT)
607 	ret = ticker_start_ext(
608 #else
609 	ret = ticker_start(
610 #endif /* CONFIG_BT_TICKER_EXT */
611 			   TICKER_INSTANCE_ID_CTLR,
612 			   TICKER_USER_ID_THREAD, TICKER_ID_SCAN_BASE + handle,
613 			   (ticks_anchor + ticks_offset), 0, ticks_interval,
614 			   HAL_TICKER_REMAINDER((uint64_t)lll->interval *
615 						SCAN_INT_UNIT_US),
616 			   TICKER_NULL_LAZY,
617 			   (scan->ull.ticks_slot + ticks_slot_overhead),
618 			   ticker_cb, scan,
619 			   ull_ticker_status_give, (void *)&ret_cb
620 #if defined(CONFIG_BT_TICKER_EXT)
621 			   ,
622 			   &ll_scan_ticker_ext[handle]
623 #endif /* CONFIG_BT_TICKER_EXT */
624 			   );
625 	ret = ull_ticker_status_take(ret, &ret_cb);
626 	if (ret != TICKER_STATUS_SUCCESS) {
627 		return BT_HCI_ERR_CMD_DISALLOWED;
628 	}
629 
630 	scan->is_enabled = 1U;
631 
632 #if defined(CONFIG_BT_CTLR_PRIVACY)
633 #if defined(CONFIG_BT_BROADCASTER)
634 	if (!ull_adv_is_enabled_get(0))
635 #endif
636 	{
637 		ull_filter_adv_scan_state_cb(BIT(1));
638 	}
639 #endif
640 
641 	return 0;
642 }
643 
644 uint8_t ull_scan_disable(uint8_t handle, struct ll_scan_set *scan)
645 {
646 	int err;
647 
648 #if defined(CONFIG_BT_CTLR_ADV_EXT)
649 	/* Request Extended Scan stop */
650 	scan->is_stop = 1U;
651 	cpu_dmb();
652 #endif /* CONFIG_BT_CTLR_ADV_EXT */
653 
654 	err = ull_ticker_stop_with_mark(TICKER_ID_SCAN_BASE + handle,
655 					scan, &scan->lll);
656 	LL_ASSERT_INFO2(err == 0 || err == -EALREADY, handle, err);
657 	if (err) {
658 		return BT_HCI_ERR_CMD_DISALLOWED;
659 	}
660 
661 #if defined(CONFIG_BT_CTLR_ADV_EXT)
662 	/* Find and stop associated auxiliary scan contexts */
663 	for (uint8_t aux_handle = 0; aux_handle < CONFIG_BT_CTLR_SCAN_AUX_SET;
664 	     aux_handle++) {
665 		struct lll_scan_aux *aux_scan_lll;
666 		struct ll_scan_set *aux_scan;
667 		struct ll_scan_aux_set *aux;
668 
669 		aux = ull_scan_aux_set_get(aux_handle);
670 		aux_scan_lll = aux->parent;
671 		if (!aux_scan_lll) {
672 			continue;
673 		}
674 
675 		aux_scan = HDR_LLL2ULL(aux_scan_lll);
676 		if (aux_scan == scan) {
677 			void *parent;
678 
679 			err = ull_scan_aux_stop(aux);
680 			if (err && (err != -EALREADY)) {
681 				return BT_HCI_ERR_CMD_DISALLOWED;
682 			}
683 
684 			/* Use a local variable to assert on auxiliary context's
685 			 * release.
686 			 * Under race condition a released aux context can be
687 			 * allocated for reception of chain PDU of a periodic
688 			 * sync role.
689 			 */
690 			parent = aux->parent;
691 			LL_ASSERT(!parent || (parent != aux_scan_lll));
692 		}
693 	}
694 #endif /* CONFIG_BT_CTLR_ADV_EXT */
695 
696 	return 0;
697 }
698 
699 #if defined(CONFIG_BT_CTLR_ADV_EXT)
700 void ull_scan_done(struct node_rx_event_done *done)
701 {
702 	struct node_rx_pdu *rx;
703 	struct ll_scan_set *scan;
704 	struct lll_scan *lll;
705 	uint8_t handle;
706 	uint32_t ret;
707 
708 	/* Get reference to ULL context */
709 	scan = CONTAINER_OF(done->param, struct ll_scan_set, ull);
710 	lll = &scan->lll;
711 
712 	if (likely(scan->duration_lazy || !lll->duration_reload ||
713 		   lll->duration_expire)) {
714 		return;
715 	}
716 
717 	/* Prevent duplicate terminate event generation */
718 	lll->duration_reload = 0U;
719 
720 	handle = ull_scan_handle_get(scan);
721 	LL_ASSERT(handle < BT_CTLR_SCAN_SET);
722 
723 #if defined(CONFIG_BT_CTLR_PHY_CODED)
724 	/* Prevent duplicate terminate event if ull_scan_done get called by
725 	 * the other scan instance.
726 	 */
727 	struct ll_scan_set *scan_other;
728 
729 	if (handle == SCAN_HANDLE_1M) {
730 		scan_other = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
731 	} else {
732 		scan_other = ull_scan_set_get(SCAN_HANDLE_1M);
733 	}
734 	scan_other->lll.duration_reload = 0U;
735 #endif /* CONFIG_BT_CTLR_PHY_CODED */
736 
737 	rx = (void *)scan->node_rx_scan_term;
738 	rx->hdr.type = NODE_RX_TYPE_EXT_SCAN_TERMINATE;
739 	rx->hdr.handle = handle;
740 
741 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR, TICKER_USER_ID_ULL_HIGH,
742 			  (TICKER_ID_SCAN_BASE + handle), ticker_stop_ext_op_cb,
743 			  scan);
744 
745 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
746 		  (ret == TICKER_STATUS_BUSY));
747 }
748 
749 void ull_scan_term_dequeue(uint8_t handle)
750 {
751 	struct ll_scan_set *scan;
752 
753 	scan = ull_scan_set_get(handle);
754 	LL_ASSERT(scan);
755 
756 	scan->is_enabled = 0U;
757 
758 #if defined(CONFIG_BT_CTLR_PHY_CODED)
759 	if (handle == SCAN_HANDLE_1M) {
760 		struct ll_scan_set *scan_coded;
761 
762 		scan_coded = ull_scan_set_get(SCAN_HANDLE_PHY_CODED);
763 		if (IS_PHY_ENABLED(scan_coded, PHY_CODED)) {
764 			uint8_t err;
765 
766 			err = disable(SCAN_HANDLE_PHY_CODED);
767 			LL_ASSERT(!err);
768 		}
769 	} else {
770 		struct ll_scan_set *scan_1m;
771 
772 		scan_1m = ull_scan_set_get(SCAN_HANDLE_1M);
773 		if (IS_PHY_ENABLED(scan_1m, PHY_1M)) {
774 			uint8_t err;
775 
776 			err = disable(SCAN_HANDLE_1M);
777 			LL_ASSERT(!err);
778 		}
779 	}
780 #endif /* CONFIG_BT_CTLR_PHY_CODED */
781 }
782 #endif /* CONFIG_BT_CTLR_ADV_EXT */
783 
784 struct ll_scan_set *ull_scan_set_get(uint8_t handle)
785 {
786 	if (handle >= BT_CTLR_SCAN_SET) {
787 		return NULL;
788 	}
789 
790 	return &ll_scan[handle];
791 }
792 
793 uint8_t ull_scan_handle_get(struct ll_scan_set *scan)
794 {
795 	return ((uint8_t *)scan - (uint8_t *)ll_scan) / sizeof(*scan);
796 }
797 
798 uint8_t ull_scan_lll_handle_get(struct lll_scan *lll)
799 {
800 	return ull_scan_handle_get((void *)lll->hdr.parent);
801 }
802 
803 struct ll_scan_set *ull_scan_is_valid_get(struct ll_scan_set *scan)
804 {
805 	if (((uint8_t *)scan < (uint8_t *)ll_scan) ||
806 	    ((uint8_t *)scan > ((uint8_t *)ll_scan +
807 				(sizeof(struct ll_scan_set) *
808 				 (BT_CTLR_SCAN_SET - 1))))) {
809 		return NULL;
810 	}
811 
812 	return scan;
813 }
814 
815 struct lll_scan *ull_scan_lll_is_valid_get(struct lll_scan *lll)
816 {
817 	struct ll_scan_set *scan;
818 
819 	scan = HDR_LLL2ULL(lll);
820 	scan = ull_scan_is_valid_get(scan);
821 	if (scan) {
822 		return &scan->lll;
823 	}
824 
825 	return NULL;
826 }
827 
828 struct ll_scan_set *ull_scan_is_enabled_get(uint8_t handle)
829 {
830 	struct ll_scan_set *scan;
831 
832 	scan = ull_scan_set_get(handle);
833 	if (!scan || !scan->is_enabled) {
834 		return NULL;
835 	}
836 
837 	return scan;
838 }
839 
840 struct ll_scan_set *ull_scan_is_disabled_get(uint8_t handle)
841 {
842 	struct ll_scan_set *scan;
843 
844 	scan = ull_scan_set_get(handle);
845 	if (!scan || scan->is_enabled) {
846 		return NULL;
847 	}
848 
849 	return scan;
850 }
851 
852 uint32_t ull_scan_is_enabled(uint8_t handle)
853 {
854 	struct ll_scan_set *scan;
855 
856 	scan = ull_scan_is_enabled_get(handle);
857 	if (!scan) {
858 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
859 		scan = ull_scan_set_get(handle);
860 
861 		return scan->periodic.sync ? ULL_SCAN_IS_SYNC : 0U;
862 #else
863 		return 0U;
864 #endif
865 	}
866 
867 	return (((uint32_t)scan->is_enabled << scan->lll.type) |
868 #if defined(CONFIG_BT_CENTRAL)
869 		(scan->lll.conn ? ULL_SCAN_IS_INITIATOR : 0U) |
870 #endif
871 #if defined(CONFIG_BT_CTLR_SYNC_PERIODIC)
872 		(scan->periodic.sync ? ULL_SCAN_IS_SYNC : 0U) |
873 #endif
874 		0U);
875 }
876 
877 uint32_t ull_scan_filter_pol_get(uint8_t handle)
878 {
879 	struct ll_scan_set *scan;
880 
881 	scan = ull_scan_is_enabled_get(handle);
882 	if (!scan) {
883 		return 0;
884 	}
885 
886 	return scan->lll.filter_policy;
887 }
888 
889 static int init_reset(void)
890 {
891 #if defined(CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL) && \
892 	!defined(CONFIG_BT_CTLR_ADV_EXT)
893 	ll_scan[0].lll.tx_pwr_lvl = RADIO_TXP_DEFAULT;
894 #endif /* CONFIG_BT_CTLR_TX_PWR_DYNAMIC_CONTROL && !CONFIG_BT_CTLR_ADV_EXT */
895 
896 	return 0;
897 }
898 
899 static void ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
900 		      uint32_t remainder, uint16_t lazy, uint8_t force,
901 		      void *param)
902 {
903 	static memq_link_t link;
904 	static struct mayfly mfy = {0, 0, &link, NULL, lll_scan_prepare};
905 	static struct lll_prepare_param p;
906 	struct ll_scan_set *scan;
907 	struct lll_scan *lll;
908 	uint32_t ret;
909 	uint8_t ref;
910 
911 	DEBUG_RADIO_PREPARE_O(1);
912 
913 	scan = param;
914 	lll = &scan->lll;
915 
916 	/* Increment prepare reference count */
917 	ref = ull_ref_inc(&scan->ull);
918 	LL_ASSERT(ref);
919 
920 	/* Append timing parameters */
921 	p.ticks_at_expire = ticks_at_expire;
922 	p.remainder = remainder;
923 	p.lazy = lazy;
924 	p.param = lll;
925 	p.force = force;
926 	mfy.param = &p;
927 
928 	/* Kick LLL prepare */
929 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
930 			     0, &mfy);
931 	LL_ASSERT(!ret);
932 
933 #if defined(CONFIG_BT_CTLR_ADV_EXT)
934 	if (lll->duration_expire) {
935 		uint16_t elapsed;
936 
937 		elapsed = lazy + 1;
938 		if (lll->duration_expire > elapsed) {
939 			lll->duration_expire -= elapsed;
940 		} else {
941 			if (scan->duration_lazy) {
942 				uint8_t handle;
943 				uint16_t duration_lazy;
944 
945 				duration_lazy = lll->duration_expire +
946 						scan->duration_lazy - elapsed;
947 
948 				handle = ull_scan_handle_get(scan);
949 				LL_ASSERT(handle < BT_CTLR_SCAN_SET);
950 
951 				ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
952 						    TICKER_USER_ID_ULL_HIGH,
953 						    (TICKER_ID_SCAN_BASE +
954 						     handle), 0, 0, 0, 0,
955 						    duration_lazy, 0,
956 						    NULL, NULL);
957 				LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
958 					  (ret == TICKER_STATUS_BUSY));
959 			}
960 
961 			lll->duration_expire = 0U;
962 		}
963 	} else if (lll->duration_reload && lazy) {
964 		uint8_t handle;
965 
966 		handle = ull_scan_handle_get(scan);
967 		LL_ASSERT(handle < BT_CTLR_SCAN_SET);
968 
969 		lll->duration_expire = lll->duration_reload;
970 		ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
971 				    TICKER_USER_ID_ULL_HIGH,
972 				    (TICKER_ID_SCAN_BASE + handle),
973 				    0, 0, 0, 0, 1, 1, NULL, NULL);
974 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
975 			  (ret == TICKER_STATUS_BUSY));
976 	}
977 #endif /* CONFIG_BT_CTLR_ADV_EXT */
978 
979 	DEBUG_RADIO_PREPARE_O(1);
980 }
981 
982 #if defined(CONFIG_BT_CTLR_ADV_EXT)
983 static uint8_t is_scan_update(uint8_t handle, uint16_t duration,
984 			      uint16_t period, struct ll_scan_set **scan,
985 			      struct node_rx_pdu **node_rx_scan_term)
986 {
987 	*scan = ull_scan_set_get(handle);
988 	*node_rx_scan_term = (*scan)->node_rx_scan_term;
989 	return duration && period && (*scan)->lll.duration_reload &&
990 	       (*scan)->duration_lazy;
991 }
992 
993 static uint8_t duration_period_setup(struct ll_scan_set *scan,
994 				     uint16_t duration, uint16_t period,
995 				     struct node_rx_pdu **node_rx_scan_term)
996 {
997 	struct lll_scan *lll;
998 
999 	lll = &scan->lll;
1000 	if (duration) {
1001 		lll->duration_reload =
1002 			ULL_SCAN_DURATION_TO_EVENTS(duration,
1003 						    scan->lll.interval);
1004 		if (period) {
1005 			if (IS_ENABLED(CONFIG_BT_CTLR_PARAM_CHECK) &&
1006 			    (duration >= ULL_SCAN_PERIOD_TO_DURATION(period))) {
1007 				return BT_HCI_ERR_INVALID_PARAM;
1008 			}
1009 
1010 			scan->duration_lazy =
1011 				ULL_SCAN_PERIOD_TO_EVENTS(period,
1012 							  scan->lll.interval);
1013 			scan->duration_lazy -= lll->duration_reload;
1014 			scan->node_rx_scan_term = NULL;
1015 		} else {
1016 			struct node_rx_pdu *node_rx;
1017 			void *link_scan_term;
1018 
1019 			scan->duration_lazy = 0U;
1020 
1021 			if (*node_rx_scan_term) {
1022 				scan->node_rx_scan_term = *node_rx_scan_term;
1023 
1024 				return 0;
1025 			}
1026 
1027 			/* The alloc here used for ext scan termination event */
1028 			link_scan_term = ll_rx_link_alloc();
1029 			if (!link_scan_term) {
1030 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1031 			}
1032 
1033 			node_rx = ll_rx_alloc();
1034 			if (!node_rx) {
1035 				ll_rx_link_release(link_scan_term);
1036 
1037 				return BT_HCI_ERR_MEM_CAPACITY_EXCEEDED;
1038 			}
1039 
1040 			node_rx->hdr.link = (void *)link_scan_term;
1041 			scan->node_rx_scan_term = node_rx;
1042 			*node_rx_scan_term = node_rx;
1043 		}
1044 	} else {
1045 		lll->duration_reload = 0U;
1046 		scan->duration_lazy = 0U;
1047 		scan->node_rx_scan_term = NULL;
1048 	}
1049 
1050 	return 0;
1051 }
1052 
1053 static uint8_t duration_period_update(struct ll_scan_set *scan,
1054 				      uint8_t is_update)
1055 {
1056 	if (is_update) {
1057 		uint32_t volatile ret_cb;
1058 		uint32_t ret;
1059 
1060 		scan->lll.duration_expire = 0U;
1061 
1062 		ret_cb = TICKER_STATUS_BUSY;
1063 		ret = ticker_update(TICKER_INSTANCE_ID_CTLR,
1064 				    TICKER_USER_ID_THREAD,
1065 				    (TICKER_ID_SCAN_BASE +
1066 				     ull_scan_handle_get(scan)),
1067 				    0, 0, 0, 0, 1, 1,
1068 				    ull_ticker_status_give, (void *)&ret_cb);
1069 		ret = ull_ticker_status_take(ret, &ret_cb);
1070 		if (ret != TICKER_STATUS_SUCCESS) {
1071 			return BT_HCI_ERR_CMD_DISALLOWED;
1072 		}
1073 
1074 		return 0;
1075 	} else {
1076 		scan->lll.duration_expire = scan->lll.duration_reload;
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 static void ticker_stop_ext_op_cb(uint32_t status, void *param)
1083 {
1084 	static memq_link_t link;
1085 	static struct mayfly mfy = {0, 0, &link, NULL, ext_disable};
1086 	uint32_t ret;
1087 
1088 	/* Ignore if race between thread and ULL */
1089 	if (status != TICKER_STATUS_SUCCESS) {
1090 		/* TODO: detect race */
1091 
1092 		return;
1093 	}
1094 
1095 	/* Check if any pending LLL events that need to be aborted */
1096 	mfy.param = param;
1097 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_LOW,
1098 			     TICKER_USER_ID_ULL_HIGH, 0, &mfy);
1099 	LL_ASSERT(!ret);
1100 }
1101 
1102 static void ext_disable(void *param)
1103 {
1104 	struct ll_scan_set *scan;
1105 	struct ull_hdr *hdr;
1106 
1107 	/* Check ref count to determine if any pending LLL events in pipeline */
1108 	scan = param;
1109 	hdr = &scan->ull;
1110 	if (ull_ref_get(hdr)) {
1111 		static memq_link_t link;
1112 		static struct mayfly mfy = {0, 0, &link, NULL, lll_disable};
1113 		uint32_t ret;
1114 
1115 		mfy.param = &scan->lll;
1116 
1117 		/* Setup disabled callback to be called when ref count
1118 		 * returns to zero.
1119 		 */
1120 		LL_ASSERT(!hdr->disabled_cb);
1121 		hdr->disabled_param = mfy.param;
1122 		hdr->disabled_cb = ext_disabled_cb;
1123 
1124 		/* Trigger LLL disable */
1125 		ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH,
1126 				     TICKER_USER_ID_LLL, 0, &mfy);
1127 		LL_ASSERT(!ret);
1128 	} else {
1129 		/* No pending LLL events */
1130 		ext_disabled_cb(&scan->lll);
1131 	}
1132 }
1133 
1134 static void ext_disabled_cb(void *param)
1135 {
1136 	struct node_rx_pdu *rx;
1137 	struct ll_scan_set *scan;
1138 	struct lll_scan *lll;
1139 
1140 	/* Under race condition, if a connection has been established then
1141 	 * node_rx is already utilized to send terminate event on connection
1142 	 */
1143 	lll = (void *)param;
1144 	scan = HDR_LLL2ULL(lll);
1145 	rx = scan->node_rx_scan_term;
1146 	if (!rx) {
1147 		return;
1148 	}
1149 
1150 	/* NOTE: parameters are already populated on disable,
1151 	 * just enqueue here
1152 	 */
1153 	ll_rx_put_sched(rx->hdr.link, rx);
1154 }
1155 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1156 
1157 static uint8_t disable(uint8_t handle)
1158 {
1159 	struct ll_scan_set *scan;
1160 	uint8_t ret;
1161 
1162 	scan = ull_scan_is_enabled_get(handle);
1163 	if (!scan) {
1164 		return BT_HCI_ERR_CMD_DISALLOWED;
1165 	}
1166 
1167 #if defined(CONFIG_BT_CENTRAL)
1168 	if (scan->lll.conn) {
1169 		return BT_HCI_ERR_CMD_DISALLOWED;
1170 	}
1171 #endif
1172 
1173 	ret = ull_scan_disable(handle, scan);
1174 	if (ret) {
1175 		return ret;
1176 	}
1177 
1178 	scan->is_enabled = 0U;
1179 
1180 #if defined(CONFIG_BT_CTLR_ADV_EXT)
1181 	if (scan->node_rx_scan_term) {
1182 		struct node_rx_pdu *node_rx_scan_term = scan->node_rx_scan_term;
1183 
1184 		scan->node_rx_scan_term = NULL;
1185 
1186 		ll_rx_link_release(node_rx_scan_term->hdr.link);
1187 		ll_rx_release(node_rx_scan_term);
1188 	}
1189 #endif /* CONFIG_BT_CTLR_ADV_EXT */
1190 
1191 #if defined(CONFIG_BT_CTLR_PRIVACY)
1192 #if defined(CONFIG_BT_BROADCASTER)
1193 	if (!ull_adv_is_enabled_get(0))
1194 #endif
1195 	{
1196 		ull_filter_adv_scan_state_cb(0);
1197 	}
1198 #endif
1199 
1200 	return 0;
1201 }
1202