1 /*
2  * Copyright (c) 2018-2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <stdbool.h>
9 #include <errno.h>
10 
11 #include <zephyr/toolchain.h>
12 
13 #include <soc.h>
14 #include <zephyr/device.h>
15 
16 #include <zephyr/drivers/entropy.h>
17 #include <zephyr/irq.h>
18 
19 #include "hal/swi.h"
20 #include "hal/ccm.h"
21 #include "hal/cntr.h"
22 #include "hal/radio.h"
23 #include "hal/ticker.h"
24 
25 #include "util/mem.h"
26 #include "util/memq.h"
27 #include "util/mayfly.h"
28 
29 #include "ticker/ticker.h"
30 
31 #include "lll.h"
32 #include "lll_vendor.h"
33 #include "lll_clock.h"
34 #include "lll_internal.h"
35 #include "lll_prof_internal.h"
36 
37 #include "hal/debug.h"
38 
39 #if defined(CONFIG_BT_CTLR_ZLI)
40 #define IRQ_CONNECT_FLAGS IRQ_ZERO_LATENCY
41 #else
42 #define IRQ_CONNECT_FLAGS 0
43 #endif
44 
45 static struct {
46 	struct {
47 		void              *param;
48 		lll_is_abort_cb_t is_abort_cb;
49 		lll_abort_cb_t    abort_cb;
50 	} curr;
51 
52 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
53 	struct {
54 		uint8_t volatile lll_count;
55 		uint8_t          ull_count;
56 	} done;
57 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
58 } event;
59 
60 /* Entropy device */
61 #if defined(CONFIG_ENTROPY_HAS_DRIVER)
62 /* FIXME: This could probably use a chosen entropy device instead on relying on
63  * the nodelabel being the same as for the old nrf rng.
64  */
65 static const struct device *const dev_entropy = DEVICE_DT_GET(DT_NODELABEL(rng));
66 #endif /* CONFIG_ENTROPY_HAS_DRIVER */
67 
68 static int init_reset(void);
69 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
70 static inline void done_inc(void);
71 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
72 static inline bool is_done_sync(void);
73 static inline struct lll_event *prepare_dequeue_iter_ready_get(uint8_t *idx);
74 static inline struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb);
75 static void isr_race(void *param);
76 
77 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
78 static uint32_t preempt_ticker_start(struct lll_event *first,
79 				     struct lll_event *prev,
80 				     struct lll_event *next);
81 static uint32_t preempt_ticker_stop(void);
82 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
83 			      uint32_t remainder, uint16_t lazy, uint8_t force,
84 			      void *param);
85 static void preempt(void *param);
86 #else /* CONFIG_BT_CTLR_LOW_LAT */
87 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
88 static void mfy_ticker_job_idle_get(void *param);
89 static void ticker_op_job_disable(uint32_t status, void *op_context);
90 #endif
91 #endif /* CONFIG_BT_CTLR_LOW_LAT */
92 
93 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) && \
94 	defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
radio_nrf5_isr(const void * arg)95 static void radio_nrf5_isr(const void *arg)
96 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
97 ISR_DIRECT_DECLARE(radio_nrf5_isr)
98 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
99 {
100 	DEBUG_RADIO_ISR(1);
101 
102 	lll_prof_enter_radio();
103 
104 	isr_radio();
105 
106 	ISR_DIRECT_PM();
107 
108 	lll_prof_exit_radio();
109 
110 	DEBUG_RADIO_ISR(0);
111 
112 #if !defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) || \
113 	!defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
114 	return 1;
115 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
116 }
117 
118 #if defined(CONFIG_BT_CTLR_RADIO_TIMER_ISR)
119 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) && \
120 	defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
timer_nrf5_isr(const void * arg)121 static void timer_nrf5_isr(const void *arg)
122 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
123 ISR_DIRECT_DECLARE(timer_nrf5_isr)
124 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
125 {
126 	DEBUG_RADIO_ISR(1);
127 
128 	lll_prof_enter_radio();
129 
130 	isr_radio_tmr();
131 
132 	ISR_DIRECT_PM();
133 
134 	lll_prof_exit_radio();
135 
136 	DEBUG_RADIO_ISR(0);
137 
138 #if !defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) || \
139 	!defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
140 	return 1;
141 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
142 }
143 #endif /* CONFIG_BT_CTLR_RADIO_TIMER_ISR */
144 
rtc0_nrf5_isr(const void * arg)145 static void rtc0_nrf5_isr(const void *arg)
146 {
147 	DEBUG_TICKER_ISR(1);
148 
149 	lll_prof_enter_ull_high();
150 
151 	/* On compare0 run ticker worker instance0 */
152 #if defined(CONFIG_BT_CTLR_NRF_GRTC)
153 	if (NRF_GRTC->EVENTS_COMPARE[HAL_CNTR_GRTC_CC_IDX_TICKER]) {
154 		nrf_grtc_event_clear(NRF_GRTC, HAL_CNTR_GRTC_EVENT_COMPARE_TICKER);
155 #else /* !CONFIG_BT_CTLR_NRF_GRTC */
156 	if (NRF_RTC->EVENTS_COMPARE[0]) {
157 		nrf_rtc_event_clear(NRF_RTC, NRF_RTC_EVENT_COMPARE_0);
158 #endif  /* !CONFIG_BT_CTLR_NRF_GRTC */
159 
160 		ticker_trigger(0);
161 	}
162 
163 	mayfly_run(TICKER_USER_ID_ULL_HIGH);
164 
165 	lll_prof_exit_ull_high();
166 
167 #if !defined(CONFIG_BT_CTLR_LOW_LAT) && \
168 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
169 	lll_prof_enter_ull_low();
170 
171 	mayfly_run(TICKER_USER_ID_ULL_LOW);
172 
173 	lll_prof_exit_ull_low();
174 #endif
175 
176 	DEBUG_TICKER_ISR(0);
177 }
178 
179 static void swi_lll_nrf5_isr(const void *arg)
180 {
181 	DEBUG_RADIO_ISR(1);
182 
183 	lll_prof_enter_lll();
184 
185 	mayfly_run(TICKER_USER_ID_LLL);
186 
187 	lll_prof_exit_lll();
188 
189 	DEBUG_RADIO_ISR(0);
190 }
191 
192 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
193 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
194 static void swi_ull_low_nrf5_isr(const void *arg)
195 {
196 	DEBUG_TICKER_JOB(1);
197 
198 	lll_prof_enter_ull_low();
199 
200 	mayfly_run(TICKER_USER_ID_ULL_LOW);
201 
202 	lll_prof_exit_ull_low();
203 
204 	DEBUG_TICKER_JOB(0);
205 }
206 #endif
207 
208 int lll_init(void)
209 {
210 	int err;
211 
212 #if defined(CONFIG_ENTROPY_HAS_DRIVER)
213 	/* Get reference to entropy device */
214 	if (!device_is_ready(dev_entropy)) {
215 		return -ENODEV;
216 	}
217 #endif /* CONFIG_ENTROPY_HAS_DRIVER */
218 
219 	/* Initialise LLL internals */
220 	event.curr.abort_cb = NULL;
221 
222 	/* Initialize Clocks */
223 	err = lll_clock_init();
224 	if (err < 0) {
225 		return err;
226 	}
227 
228 	err = init_reset();
229 	if (err) {
230 		return err;
231 	}
232 
233 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
234 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
235 		err = radio_gpio_pa_lna_init();
236 		if (err) {
237 			return err;
238 		}
239 	}
240 
241 	/* Initialize SW IRQ structure */
242 	hal_swi_init();
243 
244 	/* Connect ISRs */
245 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS)
246 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
247 	ARM_IRQ_DIRECT_DYNAMIC_CONNECT(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
248 				       IRQ_CONNECT_FLAGS, no_reschedule);
249 	irq_connect_dynamic(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
250 			    radio_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
251 #if defined(CONFIG_BT_CTLR_RADIO_TIMER_ISR)
252 	ARM_IRQ_DIRECT_DYNAMIC_CONNECT(TIMER0_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
253 				       IRQ_CONNECT_FLAGS, no_reschedule);
254 	irq_connect_dynamic(TIMER0_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
255 			    timer_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
256 #endif /* CONFIG_BT_CTLR_RADIO_TIMER_ISR */
257 #else /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
258 	IRQ_DIRECT_CONNECT(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
259 			   radio_nrf5_isr, IRQ_CONNECT_FLAGS);
260 #if defined(CONFIG_BT_CTLR_RADIO_TIMER_ISR)
261 	IRQ_DIRECT_CONNECT(TIMER0_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
262 			   timer_nrf5_isr, IRQ_CONNECT_FLAGS);
263 #endif /* CONFIG_BT_CTLR_RADIO_TIMER_ISR */
264 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
265 	irq_connect_dynamic(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
266 			    rtc0_nrf5_isr, NULL, 0U);
267 	irq_connect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
268 			    swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
269 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
270 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
271 	irq_connect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
272 			    swi_ull_low_nrf5_isr, NULL, 0U);
273 #endif
274 
275 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
276 	IRQ_DIRECT_CONNECT(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
277 			   radio_nrf5_isr, IRQ_CONNECT_FLAGS);
278 #if defined(CONFIG_BT_CTLR_RADIO_TIMER_ISR)
279 	IRQ_DIRECT_CONNECT(TIMER0_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
280 			   timer_nrf5_isr, IRQ_CONNECT_FLAGS);
281 #endif /* CONFIG_BT_CTLR_RADIO_TIMER_ISR */
282 	IRQ_CONNECT(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
283 		    rtc0_nrf5_isr, NULL, 0);
284 #if defined(CONFIG_BT_CTLR_ZLI)
285 	IRQ_DIRECT_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
286 			   swi_lll_nrf5_isr, IRQ_CONNECT_FLAGS);
287 #else
288 	IRQ_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
289 		    swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
290 #endif
291 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
292 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
293 	IRQ_CONNECT(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
294 		    swi_ull_low_nrf5_isr, NULL, 0);
295 #endif
296 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
297 
298 	/* Enable IRQs */
299 	irq_enable(HAL_RADIO_IRQn);
300 	irq_enable(HAL_RTC_IRQn);
301 	irq_enable(HAL_SWI_RADIO_IRQ);
302 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
303 		(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
304 		irq_enable(HAL_SWI_JOB_IRQ);
305 	}
306 
307 	radio_setup();
308 
309 	return 0;
310 }
311 
312 int lll_deinit(void)
313 {
314 	int err;
315 
316 	/* Release clocks */
317 	err = lll_clock_deinit();
318 	if (err < 0) {
319 		return err;
320 	}
321 
322 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
323 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
324 		radio_gpio_pa_lna_deinit();
325 	}
326 
327 	/* Disable IRQs */
328 	irq_disable(HAL_RADIO_IRQn);
329 	irq_disable(HAL_RTC_IRQn);
330 	irq_disable(HAL_SWI_RADIO_IRQ);
331 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
332 		(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
333 		irq_disable(HAL_SWI_JOB_IRQ);
334 	}
335 
336 	/* Disconnect dynamic ISRs used */
337 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS)
338 #if defined(CONFIG_SHARED_INTERRUPTS)
339 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
340 	irq_disconnect_dynamic(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
341 			       radio_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
342 #if defined(CONFIG_BT_CTLR_RADIO_TIMER_ISR)
343 	irq_disconnect_dynamic(TIMER0_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
344 			       timer_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
345 #endif /* CONFIG_BT_CTLR_RADIO_TIMER_ISR */
346 #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
347 	irq_disconnect_dynamic(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
348 			       rtc0_nrf5_isr, NULL, 0U);
349 	irq_disconnect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
350 			       swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
351 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
352 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
353 	irq_disconnect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
354 			       swi_ull_low_nrf5_isr, NULL, 0U);
355 #endif
356 #else /* !CONFIG_SHARED_INTERRUPTS */
357 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
358 	irq_connect_dynamic(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, NULL, NULL,
359 			    IRQ_CONNECT_FLAGS);
360 #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
361 	irq_connect_dynamic(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO, NULL, NULL,
362 			    0U);
363 	irq_connect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO, NULL,
364 			    NULL, IRQ_CONNECT_FLAGS);
365 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
366 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
367 	irq_connect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO, NULL,
368 			    NULL, 0U);
369 #endif
370 #endif /* !CONFIG_SHARED_INTERRUPTS */
371 #endif /* CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
372 
373 	return 0;
374 }
375 
376 int lll_csrand_get(void *buf, size_t len)
377 {
378 #if defined(CONFIG_ENTROPY_HAS_DRIVER)
379 	return entropy_get_entropy(dev_entropy, buf, len);
380 #else /* !CONFIG_ENTROPY_HAS_DRIVER */
381 	/* FIXME: No suitable entropy device available yet.
382 	 *        It is required by Controller to use random numbers.
383 	 *        Hence, return uninitialized buf contents, for now.
384 	 */
385 	return 0;
386 #endif /* !CONFIG_ENTROPY_HAS_DRIVER */
387 }
388 
389 int lll_csrand_isr_get(void *buf, size_t len)
390 {
391 #if defined(CONFIG_ENTROPY_HAS_DRIVER)
392 	return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
393 #else /* !CONFIG_ENTROPY_HAS_DRIVER */
394 	/* FIXME: No suitable entropy device available yet.
395 	 *        It is required by Controller to use random numbers.
396 	 *        Hence, return uninitialized buf contents, for now.
397 	 */
398 	return 0;
399 #endif /* !CONFIG_ENTROPY_HAS_DRIVER */
400 }
401 
402 int lll_rand_get(void *buf, size_t len)
403 {
404 #if defined(CONFIG_ENTROPY_HAS_DRIVER)
405 	return entropy_get_entropy(dev_entropy, buf, len);
406 #else /* !CONFIG_ENTROPY_HAS_DRIVER */
407 	/* FIXME: No suitable entropy device available yet.
408 	 *        It is required by Controller to use random numbers.
409 	 *        Hence, return uninitialized buf contents, for now.
410 	 */
411 	return 0;
412 #endif /* !CONFIG_ENTROPY_HAS_DRIVER */
413 }
414 
415 int lll_rand_isr_get(void *buf, size_t len)
416 {
417 #if defined(CONFIG_ENTROPY_HAS_DRIVER)
418 	return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
419 #else /* !CONFIG_ENTROPY_HAS_DRIVER */
420 	/* FIXME: No suitable entropy device available yet.
421 	 *        It is required by Controller to use random numbers.
422 	 *        Hence, return uninitialized buf contents, for now.
423 	 */
424 	return 0;
425 #endif /* !CONFIG_ENTROPY_HAS_DRIVER */
426 }
427 
428 int lll_reset(void)
429 {
430 	int err;
431 
432 	err = init_reset();
433 	if (err) {
434 		return err;
435 	}
436 
437 	return 0;
438 }
439 
440 void lll_disable(void *param)
441 {
442 	/* LLL disable of current event, done is generated */
443 	if (!param || (param == event.curr.param)) {
444 		if (event.curr.abort_cb && event.curr.param) {
445 			event.curr.abort_cb(NULL, event.curr.param);
446 		} else {
447 			LL_ASSERT(!param);
448 		}
449 	}
450 	{
451 		struct lll_event *next;
452 		uint8_t idx;
453 
454 		idx = UINT8_MAX;
455 		next = ull_prepare_dequeue_iter(&idx);
456 		while (next) {
457 			if (!next->is_aborted &&
458 			    (!param || (param == next->prepare_param.param))) {
459 				next->is_aborted = 1;
460 				next->abort_cb(&next->prepare_param,
461 					       next->prepare_param.param);
462 
463 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
464 				/* NOTE: abort_cb called lll_done which modifies
465 				 *       the prepare pipeline hence re-iterate
466 				 *       through the prepare pipeline.
467 				 */
468 				idx = UINT8_MAX;
469 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
470 			}
471 
472 			next = ull_prepare_dequeue_iter(&idx);
473 		}
474 	}
475 }
476 
477 int lll_prepare_done(void *param)
478 {
479 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
480 	    (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
481 	static memq_link_t link;
482 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_ticker_job_idle_get};
483 	uint32_t ret;
484 
485 	ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW,
486 			     1, &mfy);
487 	if (ret) {
488 		return -EFAULT;
489 	}
490 
491 	return 0;
492 #else
493 	return 0;
494 #endif /* CONFIG_BT_CTLR_LOW_LAT */
495 }
496 
497 int lll_done(void *param)
498 {
499 	struct lll_event *next;
500 	struct ull_hdr *ull;
501 	void *evdone;
502 
503 	/* Assert if param supplied without a pending prepare to cancel. */
504 	next = ull_prepare_dequeue_get();
505 	LL_ASSERT(!param || next);
506 
507 	/* check if current LLL event is done */
508 	if (!param) {
509 		/* Reset current event instance */
510 		LL_ASSERT(event.curr.abort_cb);
511 		event.curr.abort_cb = NULL;
512 
513 		param = event.curr.param;
514 		event.curr.param = NULL;
515 
516 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
517 		done_inc();
518 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
519 
520 		if (param) {
521 			ull = HDR_LLL2ULL(param);
522 		} else {
523 			ull = NULL;
524 		}
525 
526 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
527 		    (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
528 			mayfly_enable(TICKER_USER_ID_LLL,
529 				      TICKER_USER_ID_ULL_LOW,
530 				      1);
531 		}
532 
533 		DEBUG_RADIO_CLOSE(0);
534 	} else {
535 		ull = HDR_LLL2ULL(param);
536 	}
537 
538 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
539 	ull_prepare_dequeue(TICKER_USER_ID_LLL);
540 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
541 
542 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
543 	struct event_done_extra *extra;
544 	uint8_t result;
545 
546 	/* TODO: Pass from calling function */
547 	result = DONE_COMPLETED;
548 
549 	lll_done_score(param, result);
550 
551 	extra = ull_event_done_extra_get();
552 	LL_ASSERT(extra);
553 
554 	/* Set result in done extra data - type was set by the role */
555 	extra->result = result;
556 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
557 
558 	/* Let ULL know about LLL event done */
559 	evdone = ull_event_done(ull);
560 	LL_ASSERT(evdone);
561 
562 	return 0;
563 }
564 
565 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
566 void lll_done_ull_inc(void)
567 {
568 	LL_ASSERT(event.done.ull_count != event.done.lll_count);
569 	event.done.ull_count++;
570 }
571 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
572 
573 bool lll_is_done(void *param, bool *is_resume)
574 {
575 	/* NOTE: Current radio event when preempted could put itself in resume
576 	 *       into the prepare pipeline in which case event.curr.param would
577 	 *       be set to NULL.
578 	 */
579 	*is_resume = (param != event.curr.param);
580 
581 	return !event.curr.abort_cb;
582 }
583 
584 int lll_is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
585 {
586 	return -ECANCELED;
587 }
588 
589 void lll_abort_cb(struct lll_prepare_param *prepare_param, void *param)
590 {
591 	int err;
592 
593 	/* NOTE: This is not a prepare being cancelled */
594 	if (!prepare_param) {
595 		/* Perform event abort here.
596 		 * After event has been cleanly aborted, clean up resources
597 		 * and dispatch event done.
598 		 */
599 		radio_isr_set(lll_isr_done, param);
600 		radio_disable();
601 		return;
602 	}
603 
604 	/* NOTE: Else clean the top half preparations of the aborted event
605 	 * currently in preparation pipeline.
606 	 */
607 	err = lll_hfclock_off();
608 	LL_ASSERT(err >= 0);
609 
610 	lll_done(param);
611 }
612 
613 uint32_t lll_event_offset_get(struct ull_hdr *ull)
614 {
615 	if (0) {
616 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
617 	} else if (ull->ticks_prepare_to_start & XON_BITMASK) {
618 		return MAX(ull->ticks_active_to_start,
619 			   ull->ticks_preempt_to_start);
620 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
621 	} else {
622 		return MAX(ull->ticks_active_to_start,
623 			   ull->ticks_prepare_to_start);
624 	}
625 }
626 
627 uint32_t lll_preempt_calc(struct ull_hdr *ull, uint8_t ticker_id,
628 		       uint32_t ticks_at_event)
629 {
630 	uint32_t ticks_now;
631 	uint32_t diff;
632 
633 	ticks_now = ticker_ticks_now_get();
634 	diff = ticker_ticks_diff_get(ticks_now, ticks_at_event);
635 	if (diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
636 		return 0;
637 	}
638 
639 	diff += HAL_TICKER_CNTR_CMP_OFFSET_MIN;
640 	if (diff > HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) {
641 		/* TODO: for Low Latency Feature with Advanced XTAL feature.
642 		 * 1. Release retained HF clock.
643 		 * 2. Advance the radio event to accommodate normal prepare
644 		 *    duration.
645 		 * 3. Increase the preempt to start ticks for future events.
646 		 */
647 		return diff;
648 	}
649 
650 	return 0U;
651 }
652 
653 void lll_chan_set(uint32_t chan)
654 {
655 	switch (chan) {
656 	case 37:
657 		radio_freq_chan_set(2);
658 		break;
659 
660 	case 38:
661 		radio_freq_chan_set(26);
662 		break;
663 
664 	case 39:
665 		radio_freq_chan_set(80);
666 		break;
667 
668 	default:
669 		if (chan < 11) {
670 			radio_freq_chan_set(4 + (chan * 2U));
671 		} else if (chan < 40) {
672 			radio_freq_chan_set(28 + ((chan - 11) * 2U));
673 		} else {
674 			LL_ASSERT(0);
675 		}
676 		break;
677 	}
678 
679 	radio_whiten_iv_set(chan);
680 }
681 
682 
683 uint32_t lll_radio_is_idle(void)
684 {
685 	return radio_is_idle();
686 }
687 
688 uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags)
689 {
690 	return radio_tx_ready_delay_get(phy, flags);
691 }
692 
693 uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags)
694 {
695 	return radio_rx_ready_delay_get(phy, flags);
696 }
697 
698 int8_t lll_radio_tx_pwr_min_get(void)
699 {
700 	return radio_tx_power_min_get();
701 }
702 
703 int8_t lll_radio_tx_pwr_max_get(void)
704 {
705 	return radio_tx_power_max_get();
706 }
707 
708 int8_t lll_radio_tx_pwr_floor(int8_t tx_pwr_lvl)
709 {
710 	return radio_tx_power_floor(tx_pwr_lvl);
711 }
712 
713 void lll_isr_tx_status_reset(void)
714 {
715 	radio_status_reset();
716 	radio_tmr_status_reset();
717 
718 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
719 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
720 		radio_gpio_pa_lna_disable();
721 	}
722 }
723 
724 void lll_isr_rx_status_reset(void)
725 {
726 	radio_status_reset();
727 	radio_tmr_status_reset();
728 	radio_rssi_status_reset();
729 
730 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
731 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
732 		radio_gpio_pa_lna_disable();
733 	}
734 }
735 
736 void lll_isr_tx_sub_status_reset(void)
737 {
738 	radio_status_reset();
739 	radio_tmr_tx_status_reset();
740 
741 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
742 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
743 		radio_gpio_pa_lna_disable();
744 	}
745 }
746 
747 void lll_isr_rx_sub_status_reset(void)
748 {
749 	radio_status_reset();
750 	radio_tmr_rx_status_reset();
751 
752 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
753 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
754 		radio_gpio_pa_lna_disable();
755 	}
756 }
757 
758 void lll_isr_status_reset(void)
759 {
760 	radio_status_reset();
761 	radio_tmr_status_reset();
762 	radio_filter_status_reset();
763 	if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
764 		radio_ar_status_reset();
765 	}
766 	radio_rssi_status_reset();
767 
768 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
769 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
770 		radio_gpio_pa_lna_disable();
771 	}
772 }
773 
774 inline void lll_isr_abort(void *param)
775 {
776 	lll_isr_status_reset();
777 	lll_isr_cleanup(param);
778 }
779 
780 void lll_isr_done(void *param)
781 {
782 	lll_isr_abort(param);
783 }
784 
785 void lll_isr_cleanup(void *param)
786 {
787 	int err;
788 
789 	radio_isr_set(isr_race, param);
790 	if (!radio_is_idle()) {
791 		radio_disable();
792 	}
793 
794 	radio_tmr_stop();
795 	radio_stop();
796 
797 	err = lll_hfclock_off();
798 	LL_ASSERT(err >= 0);
799 
800 	lll_done(NULL);
801 }
802 
803 void lll_isr_early_abort(void *param)
804 {
805 	int err;
806 
807 	radio_isr_set(isr_race, param);
808 	if (!radio_is_idle()) {
809 		radio_disable();
810 	}
811 
812 	err = lll_hfclock_off();
813 	LL_ASSERT(err >= 0);
814 
815 	lll_done(NULL);
816 }
817 
818 int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
819 			lll_prepare_cb_t prepare_cb,
820 			struct lll_prepare_param *prepare_param,
821 			uint8_t is_resume, uint8_t is_dequeue)
822 {
823 	struct lll_event *ready_short = NULL;
824 	struct lll_event *ready;
825 	struct lll_event *next;
826 	uint8_t idx;
827 	int err;
828 
829 	/* Find the ready prepare in the pipeline */
830 	idx = UINT8_MAX;
831 	ready = prepare_dequeue_iter_ready_get(&idx);
832 
833 	/* Find any short prepare */
834 	if (ready) {
835 		uint32_t ticks_at_preempt_min = prepare_param->ticks_at_expire;
836 		uint32_t ticks_at_preempt_next;
837 		uint8_t idx_backup = idx;
838 		uint32_t diff;
839 
840 		ticks_at_preempt_next = ready->prepare_param.ticks_at_expire;
841 		diff = ticker_ticks_diff_get(ticks_at_preempt_min,
842 					     ticks_at_preempt_next);
843 		if (is_resume || ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U)) {
844 			ticks_at_preempt_min = ticks_at_preempt_next;
845 			if (&ready->prepare_param != prepare_param) {
846 				ready_short = ready;
847 			}
848 		} else {
849 			ready = NULL;
850 			idx_backup = UINT8_MAX;
851 		}
852 
853 		do {
854 			struct lll_event *ready_next;
855 
856 			ready_next = prepare_dequeue_iter_ready_get(&idx);
857 			if (!ready_next) {
858 				break;
859 			}
860 
861 			ticks_at_preempt_next = ready_next->prepare_param.ticks_at_expire;
862 			diff = ticker_ticks_diff_get(ticks_at_preempt_next,
863 						     ticks_at_preempt_min);
864 			if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
865 				continue;
866 			}
867 
868 			ready_short = ready_next;
869 			ticks_at_preempt_min = ticks_at_preempt_next;
870 		} while (true);
871 
872 		idx = idx_backup;
873 	}
874 
875 	/* Current event active or another prepare is ready in the pipeline */
876 	if ((!is_dequeue && !is_done_sync()) ||
877 	    event.curr.abort_cb || ready_short ||
878 	    (ready && is_resume)) {
879 #if defined(CONFIG_BT_CTLR_LOW_LAT)
880 		lll_prepare_cb_t resume_cb;
881 #endif /* CONFIG_BT_CTLR_LOW_LAT */
882 
883 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && event.curr.param) {
884 			/* early abort */
885 			event.curr.abort_cb(NULL, event.curr.param);
886 		}
887 
888 		/* Store the next prepare for deferred call */
889 		next = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
890 					   prepare_cb, is_resume);
891 		LL_ASSERT(next);
892 
893 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
894 		if (is_resume) {
895 			return -EINPROGRESS;
896 		}
897 
898 		/* Find any short prepare */
899 		if (ready_short) {
900 			ready = ready_short;
901 		}
902 
903 		/* Always start preempt timeout for first prepare in pipeline */
904 		struct lll_event *first = ready ? ready : next;
905 		uint32_t ret;
906 
907 		/* Start the preempt timeout */
908 		ret  = preempt_ticker_start(first, ready, next);
909 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
910 			  (ret == TICKER_STATUS_BUSY));
911 
912 #else /* CONFIG_BT_CTLR_LOW_LAT */
913 		next = NULL;
914 		while (ready) {
915 			if (!ready->is_aborted) {
916 				if (event.curr.param == ready->prepare_param.param) {
917 					ready->is_aborted = 1;
918 					ready->abort_cb(&ready->prepare_param,
919 							ready->prepare_param.param);
920 				} else {
921 					next = ready;
922 				}
923 			}
924 
925 			ready = ull_prepare_dequeue_iter(&idx);
926 		}
927 
928 		if (next) {
929 			/* check if resume requested by curr */
930 			err = event.curr.is_abort_cb(NULL, event.curr.param,
931 						     &resume_cb);
932 			LL_ASSERT(err);
933 
934 			if (err == -EAGAIN) {
935 				next = resume_enqueue(resume_cb);
936 				LL_ASSERT(next);
937 			} else {
938 				LL_ASSERT(err == -ECANCELED);
939 			}
940 		}
941 #endif /* CONFIG_BT_CTLR_LOW_LAT */
942 
943 		return -EINPROGRESS;
944 	}
945 
946 	LL_ASSERT(!ready || &ready->prepare_param == prepare_param);
947 
948 	event.curr.param = prepare_param->param;
949 	event.curr.is_abort_cb = is_abort_cb;
950 	event.curr.abort_cb = abort_cb;
951 
952 	err = prepare_cb(prepare_param);
953 
954 	if (!IS_ENABLED(CONFIG_BT_CTLR_ASSERT_OVERHEAD_START) &&
955 	    (err == -ECANCELED)) {
956 		err = 0;
957 	}
958 
959 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
960 	uint32_t ret;
961 
962 	/* NOTE: preempt timeout started prior for the current event that has
963 	 *       its prepare that is now invoked is not explicitly stopped here.
964 	 *       If there is a next prepare event in pipeline, then the prior
965 	 *       preempt timeout if started will be stopped before starting
966 	 *       the new preempt timeout. Refer to implementation in
967 	 *       preempt_ticker_start().
968 	 */
969 
970 	/* Find next prepare needing preempt timeout to be setup */
971 	next = prepare_dequeue_iter_ready_get(&idx);
972 	if (!next) {
973 		return err;
974 	}
975 
976 	/* Start the preempt timeout */
977 	ret = preempt_ticker_start(next, NULL, next);
978 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
979 		  (ret == TICKER_STATUS_BUSY));
980 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
981 
982 	return err;
983 }
984 
985 static int init_reset(void)
986 {
987 	return 0;
988 }
989 
990 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
991 static inline void done_inc(void)
992 {
993 	event.done.lll_count++;
994 	LL_ASSERT(event.done.lll_count != event.done.ull_count);
995 }
996 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
997 
998 static inline bool is_done_sync(void)
999 {
1000 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
1001 	return event.done.lll_count == event.done.ull_count;
1002 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
1003 	return true;
1004 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
1005 }
1006 
1007 static inline struct lll_event *prepare_dequeue_iter_ready_get(uint8_t *idx)
1008 {
1009 	struct lll_event *ready;
1010 
1011 	do {
1012 		ready = ull_prepare_dequeue_iter(idx);
1013 	} while (ready && (ready->is_aborted || ready->is_resume));
1014 
1015 	return ready;
1016 }
1017 
1018 static inline struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb)
1019 {
1020 	struct lll_prepare_param prepare_param = {0};
1021 
1022 	/* Enqueue into prepare pipeline as resume radio event, and remove
1023 	 * parameter assignment from currently active radio event so that
1024 	 * done event is not generated.
1025 	 */
1026 	prepare_param.param = event.curr.param;
1027 	event.curr.param = NULL;
1028 
1029 	return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
1030 				   &prepare_param, resume_cb, 1);
1031 }
1032 
1033 static void isr_race(void *param)
1034 {
1035 	/* NOTE: lll_disable could have a race with ... */
1036 	radio_status_reset();
1037 }
1038 
1039 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
1040 static uint8_t volatile preempt_start_req;
1041 static uint8_t preempt_start_ack;
1042 static uint8_t volatile preempt_stop_req;
1043 static uint8_t preempt_stop_ack;
1044 static uint8_t preempt_req;
1045 static uint8_t volatile preempt_ack;
1046 
1047 static void ticker_stop_op_cb(uint32_t status, void *param)
1048 {
1049 	ARG_UNUSED(param);
1050 
1051 	LL_ASSERT(preempt_stop_req != preempt_stop_ack);
1052 	preempt_stop_ack = preempt_stop_req;
1053 
1054 	/* We do not fail on status not being success because under scenarios
1055 	 * where there is ticker_start then ticker_stop and then ticker_start,
1056 	 * the call to ticker_stop will fail and this is acceptable.
1057 	 * Also, the preempt_req and preempt_ack would not be update as the
1058 	 * ticker_start was not processed before ticker_stop. Hence, it is
1059 	 * safe to reset preempt_req and preempt_ack here.
1060 	 */
1061 	if (status == TICKER_STATUS_SUCCESS) {
1062 		LL_ASSERT(preempt_req != preempt_ack);
1063 	}
1064 
1065 	preempt_req = preempt_ack;
1066 }
1067 
1068 static void ticker_start_op_cb(uint32_t status, void *param)
1069 {
1070 	ARG_UNUSED(param);
1071 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1072 
1073 	/* Increase preempt requested count before acknowledging that the
1074 	 * ticker start operation for the preempt timeout has been handled.
1075 	 */
1076 	LL_ASSERT(preempt_req == preempt_ack);
1077 	preempt_req++;
1078 
1079 	/* Increase preempt start ack count, to acknowledge that the ticker
1080 	 * start operation has been handled.
1081 	 */
1082 	LL_ASSERT(preempt_start_req != preempt_start_ack);
1083 	preempt_start_ack = preempt_start_req;
1084 }
1085 
1086 static uint32_t preempt_ticker_start(struct lll_event *first,
1087 				     struct lll_event *prev,
1088 				     struct lll_event *next)
1089 {
1090 	const struct lll_prepare_param *p;
1091 	static uint32_t ticks_at_preempt;
1092 	uint32_t ticks_at_preempt_new;
1093 	uint32_t preempt_anchor;
1094 	struct ull_hdr *ull;
1095 	uint32_t preempt_to;
1096 	uint32_t ret;
1097 
1098 	/* Do not request to start preempt timeout if already requested.
1099 	 *
1100 	 * Check if there is pending preempt timeout start requested or if
1101 	 * preempt timeout ticker has already been scheduled.
1102 	 */
1103 	if ((preempt_start_req != preempt_start_ack) ||
1104 	    (preempt_req != preempt_ack)) {
1105 		uint32_t diff;
1106 
1107 		/* Calc the preempt timeout */
1108 		p = &next->prepare_param;
1109 		ull = HDR_LLL2ULL(p->param);
1110 		preempt_anchor = p->ticks_at_expire;
1111 		preempt_to = MAX(ull->ticks_active_to_start,
1112 				 ull->ticks_prepare_to_start) -
1113 			     ull->ticks_preempt_to_start;
1114 
1115 		ticks_at_preempt_new = preempt_anchor + preempt_to;
1116 		ticks_at_preempt_new &= HAL_TICKER_CNTR_MASK;
1117 
1118 		/* Check for short preempt timeouts */
1119 		diff = ticker_ticks_diff_get(ticks_at_preempt_new,
1120 					     ticks_at_preempt);
1121 		if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
1122 			return TICKER_STATUS_SUCCESS;
1123 		}
1124 
1125 		/* Stop any scheduled preempt ticker */
1126 		ret = preempt_ticker_stop();
1127 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1128 			  (ret == TICKER_STATUS_BUSY));
1129 
1130 		/* Schedule short preempt timeout */
1131 		first = next;
1132 	} else {
1133 		/* Calc the preempt timeout */
1134 		p = &first->prepare_param;
1135 		ull = HDR_LLL2ULL(p->param);
1136 		preempt_anchor = p->ticks_at_expire;
1137 		preempt_to = MAX(ull->ticks_active_to_start,
1138 				 ull->ticks_prepare_to_start) -
1139 			     ull->ticks_preempt_to_start;
1140 
1141 		ticks_at_preempt_new = preempt_anchor + preempt_to;
1142 		ticks_at_preempt_new &= HAL_TICKER_CNTR_MASK;
1143 	}
1144 
1145 	preempt_start_req++;
1146 
1147 	ticks_at_preempt = ticks_at_preempt_new;
1148 
1149 	/* Setup pre empt timeout */
1150 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1151 			   TICKER_USER_ID_LLL,
1152 			   TICKER_ID_LLL_PREEMPT,
1153 			   preempt_anchor,
1154 			   preempt_to,
1155 			   TICKER_NULL_PERIOD,
1156 			   TICKER_NULL_REMAINDER,
1157 			   TICKER_NULL_LAZY,
1158 			   TICKER_NULL_SLOT,
1159 			   preempt_ticker_cb, first->prepare_param.param,
1160 			   ticker_start_op_cb, NULL);
1161 
1162 	return ret;
1163 }
1164 
1165 static uint32_t preempt_ticker_stop(void)
1166 {
1167 	uint32_t ret;
1168 
1169 	/* Do not request to stop preempt timeout if already requested or
1170 	 * has expired
1171 	 */
1172 	if ((preempt_stop_req != preempt_stop_ack) ||
1173 	    (preempt_req == preempt_ack)) {
1174 		return TICKER_STATUS_SUCCESS;
1175 	}
1176 
1177 	preempt_stop_req++;
1178 
1179 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1180 			  TICKER_USER_ID_LLL,
1181 			  TICKER_ID_LLL_PREEMPT,
1182 			  ticker_stop_op_cb, NULL);
1183 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1184 		  (ret == TICKER_STATUS_BUSY));
1185 
1186 	return ret;
1187 }
1188 
1189 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1190 			      uint32_t remainder, uint16_t lazy, uint8_t force,
1191 			      void *param)
1192 {
1193 	static memq_link_t link;
1194 	static struct mayfly mfy = {0, 0, &link, NULL, preempt};
1195 	uint32_t ret;
1196 
1197 	LL_ASSERT(preempt_ack != preempt_req);
1198 	preempt_ack = preempt_req;
1199 
1200 	mfy.param = param;
1201 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1202 			     0, &mfy);
1203 	LL_ASSERT(!ret);
1204 }
1205 
1206 static void preempt(void *param)
1207 {
1208 	lll_prepare_cb_t resume_cb;
1209 	struct lll_event *ready;
1210 	uint8_t idx;
1211 	int err;
1212 
1213 	/* No event to abort */
1214 	if (!event.curr.abort_cb || !event.curr.param) {
1215 		return;
1216 	}
1217 
1218 preempt_find_preemptor:
1219 	/* Find a prepare that is ready and not a resume */
1220 	idx = UINT8_MAX;
1221 	ready = prepare_dequeue_iter_ready_get(&idx);
1222 	if (!ready) {
1223 		/* No ready prepare */
1224 		return;
1225 	}
1226 
1227 	/* Preemptor not in pipeline */
1228 	if (ready->prepare_param.param != param) {
1229 		uint32_t ticks_at_preempt_min = ready->prepare_param.ticks_at_expire;
1230 		struct lll_event *ready_short = NULL;
1231 		struct lll_event *ready_next = NULL;
1232 		struct lll_event *preemptor;
1233 
1234 		/* Find if the short prepare request in the pipeline */
1235 		do {
1236 			uint32_t ticks_at_preempt_next;
1237 			uint32_t diff;
1238 
1239 			preemptor = prepare_dequeue_iter_ready_get(&idx);
1240 			if (!preemptor) {
1241 				break;
1242 			}
1243 
1244 			if (!ready_next) {
1245 				ready_next = preemptor;
1246 			}
1247 
1248 			if (preemptor->prepare_param.param == param) {
1249 				break;
1250 			}
1251 
1252 			ticks_at_preempt_next = preemptor->prepare_param.ticks_at_expire;
1253 			diff = ticker_ticks_diff_get(ticks_at_preempt_next,
1254 						     ticks_at_preempt_min);
1255 			if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
1256 				continue;
1257 			}
1258 
1259 			ready_short = preemptor;
1260 			ticks_at_preempt_min = ticks_at_preempt_next;
1261 		} while (true);
1262 
1263 		/* "The" short prepare we were looking for is not in pipeline */
1264 		if (!preemptor) {
1265 			uint32_t ret;
1266 
1267 			/* Find any short prepare */
1268 			if (ready_short) {
1269 				ready = ready_short;
1270 			}
1271 
1272 			/* Start the preempt timeout for (short) ready event */
1273 			ret = preempt_ticker_start(ready, NULL, ready);
1274 			LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1275 				  (ret == TICKER_STATUS_BUSY));
1276 
1277 			return;
1278 		}
1279 
1280 		/* FIXME: Prepare pipeline is not a ordered list implementation,
1281 		 *        and for short prepare being enqueued, ideally the
1282 		 *        pipeline has to be implemented as ordered list.
1283 		 *        Until then a workaround to abort a prepare present
1284 		 *        before the short prepare being enqueued is implemented
1285 		 *        below.
1286 		 *        A proper solution will be to re-design the pipeline
1287 		 *        as a ordered list, instead of the current FIFO.
1288 		 */
1289 
1290 		/* Abort the prepare that is present before the short prepare */
1291 		ready->is_aborted = 1;
1292 		ready->abort_cb(&ready->prepare_param, ready->prepare_param.param);
1293 
1294 		/* Abort all events in pipeline before the short prepare */
1295 		if (preemptor != ready_next) {
1296 			goto preempt_find_preemptor;
1297 		}
1298 
1299 		/* As the prepare queue has been refreshed due to the call of
1300 		 * abort_cb which invokes the lll_done, find the latest prepare
1301 		 */
1302 		idx = UINT8_MAX;
1303 		ready = prepare_dequeue_iter_ready_get(&idx);
1304 		if (!ready) {
1305 			/* No ready prepare */
1306 			return;
1307 		}
1308 
1309 		LL_ASSERT(ready->prepare_param.param == param);
1310 	}
1311 
1312 	/* Check if current event want to continue */
1313 	err = event.curr.is_abort_cb(ready->prepare_param.param, event.curr.param, &resume_cb);
1314 	if (!err || (err == -EBUSY)) {
1315 		/* Returns -EBUSY when same curr and next state/role, do not
1316 		 * abort same curr and next event.
1317 		 */
1318 		if (err != -EBUSY) {
1319 			/* Let preemptor LLL know about the cancelled prepare */
1320 			ready->is_aborted = 1;
1321 			ready->abort_cb(&ready->prepare_param, ready->prepare_param.param);
1322 		}
1323 
1324 		return;
1325 	}
1326 
1327 	/* Abort the current event */
1328 	event.curr.abort_cb(NULL, event.curr.param);
1329 
1330 	/* Check if resume requested */
1331 	if (err == -EAGAIN) {
1332 		uint8_t is_resume_abort = 0U;
1333 		struct lll_event *iter;
1334 		uint8_t iter_idx;
1335 
1336 preempt_abort_resume:
1337 		/* Abort any duplicate non-resume, that they get dequeued */
1338 		iter_idx = UINT8_MAX;
1339 		iter = ull_prepare_dequeue_iter(&iter_idx);
1340 		while (iter) {
1341 			if (!iter->is_aborted &&
1342 			    (is_resume_abort || !iter->is_resume) &&
1343 			    event.curr.param == iter->prepare_param.param) {
1344 				iter->is_aborted = 1;
1345 				iter->abort_cb(&iter->prepare_param,
1346 					       iter->prepare_param.param);
1347 
1348 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
1349 				/* NOTE: abort_cb called lll_done which modifies
1350 				 *       the prepare pipeline hence re-iterate
1351 				 *       through the prepare pipeline.
1352 				 */
1353 				iter_idx = UINT8_MAX;
1354 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
1355 			}
1356 
1357 			iter = ull_prepare_dequeue_iter(&iter_idx);
1358 		}
1359 
1360 		if (!is_resume_abort) {
1361 			is_resume_abort = 1U;
1362 
1363 			goto preempt_abort_resume;
1364 		}
1365 
1366 		/* Enqueue as resume event */
1367 		iter = resume_enqueue(resume_cb);
1368 		LL_ASSERT(iter);
1369 	} else {
1370 		LL_ASSERT(err == -ECANCELED);
1371 	}
1372 }
1373 #else /* CONFIG_BT_CTLR_LOW_LAT */
1374 
1375 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
1376 static void mfy_ticker_job_idle_get(void *param)
1377 {
1378 	uint32_t ret;
1379 
1380 	/* Ticker Job Silence */
1381 	ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
1382 				  TICKER_USER_ID_ULL_LOW,
1383 				  ticker_op_job_disable, NULL);
1384 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1385 		  (ret == TICKER_STATUS_BUSY));
1386 }
1387 
1388 static void ticker_op_job_disable(uint32_t status, void *op_context)
1389 {
1390 	ARG_UNUSED(status);
1391 	ARG_UNUSED(op_context);
1392 
1393 	/* FIXME: */
1394 	if (1 /* _radio.state != STATE_NONE */) {
1395 		mayfly_enable(TICKER_USER_ID_ULL_LOW,
1396 			      TICKER_USER_ID_ULL_LOW, 0);
1397 	}
1398 }
1399 #endif
1400 
1401 #endif /* CONFIG_BT_CTLR_LOW_LAT */
1402