1 /*
2  * Copyright (c) 2018-2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <stdbool.h>
9 #include <errno.h>
10 
11 #include <hal/nrf_rtc.h>
12 
13 #include <zephyr/toolchain.h>
14 
15 #include <soc.h>
16 #include <zephyr/device.h>
17 
18 #include <zephyr/drivers/entropy.h>
19 #include <zephyr/irq.h>
20 
21 #include "hal/swi.h"
22 #include "hal/ccm.h"
23 #include "hal/radio.h"
24 #include "hal/ticker.h"
25 
26 #include "util/mem.h"
27 #include "util/memq.h"
28 #include "util/mayfly.h"
29 
30 #include "ticker/ticker.h"
31 
32 #include "lll.h"
33 #include "lll_vendor.h"
34 #include "lll_clock.h"
35 #include "lll_internal.h"
36 #include "lll_prof_internal.h"
37 
38 #include "hal/debug.h"
39 
40 #if defined(CONFIG_BT_CTLR_ZLI)
41 #define IRQ_CONNECT_FLAGS IRQ_ZERO_LATENCY
42 #else
43 #define IRQ_CONNECT_FLAGS 0
44 #endif
45 
46 static struct {
47 	struct {
48 		void              *param;
49 		lll_is_abort_cb_t is_abort_cb;
50 		lll_abort_cb_t    abort_cb;
51 	} curr;
52 
53 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
54 	struct {
55 		uint8_t volatile lll_count;
56 		uint8_t          ull_count;
57 	} done;
58 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
59 } event;
60 
61 /* Entropy device */
62 #if defined(CONFIG_ENTROPY_NRF5_RNG)
63 static const struct device *const dev_entropy = DEVICE_DT_GET(DT_NODELABEL(rng));
64 #endif /* CONFIG_ENTROPY_NRF5_RNG */
65 
66 static int init_reset(void);
67 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
68 static inline void done_inc(void);
69 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
70 static inline bool is_done_sync(void);
71 static inline struct lll_event *prepare_dequeue_iter_ready_get(uint8_t *idx);
72 static inline struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb);
73 static void isr_race(void *param);
74 
75 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
76 static uint32_t preempt_ticker_start(struct lll_event *first,
77 				     struct lll_event *prev,
78 				     struct lll_event *next);
79 static uint32_t preempt_ticker_stop(void);
80 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
81 			      uint32_t remainder, uint16_t lazy, uint8_t force,
82 			      void *param);
83 static void preempt(void *param);
84 #else /* CONFIG_BT_CTLR_LOW_LAT */
85 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
86 static void mfy_ticker_job_idle_get(void *param);
87 static void ticker_op_job_disable(uint32_t status, void *op_context);
88 #endif
89 #endif /* CONFIG_BT_CTLR_LOW_LAT */
90 
91 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) && \
92 	defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
radio_nrf5_isr(const void * arg)93 static void radio_nrf5_isr(const void *arg)
94 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
95 ISR_DIRECT_DECLARE(radio_nrf5_isr)
96 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
97 {
98 	DEBUG_RADIO_ISR(1);
99 
100 	lll_prof_enter_radio();
101 
102 	isr_radio();
103 
104 	ISR_DIRECT_PM();
105 
106 	lll_prof_exit_radio();
107 
108 	DEBUG_RADIO_ISR(0);
109 
110 #if !defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) || \
111 	!defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
112 	return 1;
113 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
114 }
115 
rtc0_nrf5_isr(const void * arg)116 static void rtc0_nrf5_isr(const void *arg)
117 {
118 	DEBUG_TICKER_ISR(1);
119 
120 	lll_prof_enter_ull_high();
121 
122 	/* On compare0 run ticker worker instance0 */
123 	if (NRF_RTC->EVENTS_COMPARE[0]) {
124 		nrf_rtc_event_clear(NRF_RTC, NRF_RTC_EVENT_COMPARE_0);
125 
126 		ticker_trigger(0);
127 	}
128 
129 	mayfly_run(TICKER_USER_ID_ULL_HIGH);
130 
131 	lll_prof_exit_ull_high();
132 
133 #if !defined(CONFIG_BT_CTLR_LOW_LAT) && \
134 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
135 	lll_prof_enter_ull_low();
136 
137 	mayfly_run(TICKER_USER_ID_ULL_LOW);
138 
139 	lll_prof_exit_ull_low();
140 #endif
141 
142 	DEBUG_TICKER_ISR(0);
143 }
144 
swi_lll_nrf5_isr(const void * arg)145 static void swi_lll_nrf5_isr(const void *arg)
146 {
147 	DEBUG_RADIO_ISR(1);
148 
149 	lll_prof_enter_lll();
150 
151 	mayfly_run(TICKER_USER_ID_LLL);
152 
153 	lll_prof_exit_lll();
154 
155 	DEBUG_RADIO_ISR(0);
156 }
157 
158 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
159 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
swi_ull_low_nrf5_isr(const void * arg)160 static void swi_ull_low_nrf5_isr(const void *arg)
161 {
162 	DEBUG_TICKER_JOB(1);
163 
164 	lll_prof_enter_ull_low();
165 
166 	mayfly_run(TICKER_USER_ID_ULL_LOW);
167 
168 	lll_prof_exit_ull_low();
169 
170 	DEBUG_TICKER_JOB(0);
171 }
172 #endif
173 
lll_init(void)174 int lll_init(void)
175 {
176 	int err;
177 
178 #if defined(CONFIG_ENTROPY_NRF5_RNG)
179 	/* Get reference to entropy device */
180 	if (!device_is_ready(dev_entropy)) {
181 		return -ENODEV;
182 	}
183 #endif /* CONFIG_ENTROPY_NRF5_RNG */
184 
185 	/* Initialise LLL internals */
186 	event.curr.abort_cb = NULL;
187 
188 	/* Initialize Clocks */
189 	err = lll_clock_init();
190 	if (err < 0) {
191 		return err;
192 	}
193 
194 	err = init_reset();
195 	if (err) {
196 		return err;
197 	}
198 
199 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
200 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
201 		err = radio_gpio_pa_lna_init();
202 		if (err) {
203 			return err;
204 		}
205 	}
206 
207 	/* Initialize SW IRQ structure */
208 	hal_swi_init();
209 
210 	/* Connect ISRs */
211 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS)
212 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
213 	ARM_IRQ_DIRECT_DYNAMIC_CONNECT(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
214 				       IRQ_CONNECT_FLAGS, no_reschedule);
215 	irq_connect_dynamic(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
216 			    radio_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
217 #else /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
218 	IRQ_DIRECT_CONNECT(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
219 			   radio_nrf5_isr, IRQ_CONNECT_FLAGS);
220 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
221 	irq_connect_dynamic(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
222 			    rtc0_nrf5_isr, NULL, 0U);
223 	irq_connect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
224 			    swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
225 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
226 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
227 	irq_connect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
228 			    swi_ull_low_nrf5_isr, NULL, 0U);
229 #endif
230 
231 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
232 	IRQ_DIRECT_CONNECT(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
233 			   radio_nrf5_isr, IRQ_CONNECT_FLAGS);
234 	IRQ_CONNECT(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
235 		    rtc0_nrf5_isr, NULL, 0);
236 #if defined(CONFIG_BT_CTLR_ZLI)
237 	IRQ_DIRECT_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
238 			   swi_lll_nrf5_isr, IRQ_CONNECT_FLAGS);
239 #else
240 	IRQ_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
241 		    swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
242 #endif
243 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
244 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
245 	IRQ_CONNECT(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
246 		    swi_ull_low_nrf5_isr, NULL, 0);
247 #endif
248 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
249 
250 	/* Enable IRQs */
251 	irq_enable(HAL_RADIO_IRQn);
252 	irq_enable(HAL_RTC_IRQn);
253 	irq_enable(HAL_SWI_RADIO_IRQ);
254 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
255 		(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
256 		irq_enable(HAL_SWI_JOB_IRQ);
257 	}
258 
259 	radio_setup();
260 
261 	return 0;
262 }
263 
lll_deinit(void)264 int lll_deinit(void)
265 {
266 	int err;
267 
268 	/* Release clocks */
269 	err = lll_clock_deinit();
270 	if (err < 0) {
271 		return err;
272 	}
273 
274 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
275 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
276 		radio_gpio_pa_lna_deinit();
277 	}
278 
279 	/* Disable IRQs */
280 	irq_disable(HAL_RADIO_IRQn);
281 	irq_disable(HAL_RTC_IRQn);
282 	irq_disable(HAL_SWI_RADIO_IRQ);
283 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
284 		(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
285 		irq_disable(HAL_SWI_JOB_IRQ);
286 	}
287 
288 	/* Disconnect dynamic ISRs used */
289 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS)
290 #if defined(CONFIG_SHARED_INTERRUPTS)
291 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
292 	irq_disconnect_dynamic(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
293 			       radio_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
294 #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
295 	irq_disconnect_dynamic(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
296 			       rtc0_nrf5_isr, NULL, 0U);
297 	irq_disconnect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
298 			       swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
299 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
300 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
301 	irq_disconnect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
302 			       swi_ull_low_nrf5_isr, NULL, 0U);
303 #endif
304 #else /* !CONFIG_SHARED_INTERRUPTS */
305 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
306 	irq_connect_dynamic(HAL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, NULL, NULL,
307 			    IRQ_CONNECT_FLAGS);
308 #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
309 	irq_connect_dynamic(HAL_RTC_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO, NULL, NULL,
310 			    0U);
311 	irq_connect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO, NULL,
312 			    NULL, IRQ_CONNECT_FLAGS);
313 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
314 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
315 	irq_connect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO, NULL,
316 			    NULL, 0U);
317 #endif
318 #endif /* !CONFIG_SHARED_INTERRUPTS */
319 #endif /* CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
320 
321 	return 0;
322 }
323 
lll_csrand_get(void * buf,size_t len)324 int lll_csrand_get(void *buf, size_t len)
325 {
326 #if defined(CONFIG_ENTROPY_NRF5_RNG)
327 	return entropy_get_entropy(dev_entropy, buf, len);
328 #else
329 	/* FIXME: No suitable entropy device available yet.
330 	 *        It is required by Controller to use random numbers.
331 	 *        Hence, return uninitialized buf contents, for now.
332 	 */
333 	return 0;
334 #endif
335 }
336 
lll_csrand_isr_get(void * buf,size_t len)337 int lll_csrand_isr_get(void *buf, size_t len)
338 {
339 #if defined(CONFIG_ENTROPY_NRF5_RNG)
340 	return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
341 #else
342 	/* FIXME: No suitable entropy device available yet.
343 	 *        It is required by Controller to use random numbers.
344 	 *        Hence, return uninitialized buf contents, for now.
345 	 */
346 	return 0;
347 #endif
348 }
349 
lll_rand_get(void * buf,size_t len)350 int lll_rand_get(void *buf, size_t len)
351 {
352 #if defined(CONFIG_ENTROPY_NRF5_RNG)
353 	return entropy_get_entropy(dev_entropy, buf, len);
354 #else
355 	/* FIXME: No suitable entropy device available yet.
356 	 *        It is required by Controller to use random numbers.
357 	 *        Hence, return uninitialized buf contents, for now.
358 	 */
359 	return 0;
360 #endif
361 }
362 
lll_rand_isr_get(void * buf,size_t len)363 int lll_rand_isr_get(void *buf, size_t len)
364 {
365 #if defined(CONFIG_ENTROPY_NRF5_RNG)
366 	return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
367 #else
368 	/* FIXME: No suitable entropy device available yet.
369 	 *        It is required by Controller to use random numbers.
370 	 *        Hence, return uninitialized buf contents, for now.
371 	 */
372 	return 0;
373 #endif
374 }
375 
lll_reset(void)376 int lll_reset(void)
377 {
378 	int err;
379 
380 	err = init_reset();
381 	if (err) {
382 		return err;
383 	}
384 
385 	return 0;
386 }
387 
lll_disable(void * param)388 void lll_disable(void *param)
389 {
390 	/* LLL disable of current event, done is generated */
391 	if (!param || (param == event.curr.param)) {
392 		if (event.curr.abort_cb && event.curr.param) {
393 			event.curr.abort_cb(NULL, event.curr.param);
394 		} else {
395 			LL_ASSERT(!param);
396 		}
397 	}
398 	{
399 		struct lll_event *next;
400 		uint8_t idx;
401 
402 		idx = UINT8_MAX;
403 		next = ull_prepare_dequeue_iter(&idx);
404 		while (next) {
405 			if (!next->is_aborted &&
406 			    (!param || (param == next->prepare_param.param))) {
407 				next->is_aborted = 1;
408 				next->abort_cb(&next->prepare_param,
409 					       next->prepare_param.param);
410 
411 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
412 				/* NOTE: abort_cb called lll_done which modifies
413 				 *       the prepare pipeline hence re-iterate
414 				 *       through the prepare pipeline.
415 				 */
416 				idx = UINT8_MAX;
417 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
418 			}
419 
420 			next = ull_prepare_dequeue_iter(&idx);
421 		}
422 	}
423 }
424 
lll_prepare_done(void * param)425 int lll_prepare_done(void *param)
426 {
427 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
428 	    (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
429 	static memq_link_t link;
430 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_ticker_job_idle_get};
431 	uint32_t ret;
432 
433 	ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW,
434 			     1, &mfy);
435 	if (ret) {
436 		return -EFAULT;
437 	}
438 
439 	return 0;
440 #else
441 	return 0;
442 #endif /* CONFIG_BT_CTLR_LOW_LAT */
443 }
444 
lll_done(void * param)445 int lll_done(void *param)
446 {
447 	struct lll_event *next;
448 	struct ull_hdr *ull;
449 	void *evdone;
450 
451 	/* Assert if param supplied without a pending prepare to cancel. */
452 	next = ull_prepare_dequeue_get();
453 	LL_ASSERT(!param || next);
454 
455 	/* check if current LLL event is done */
456 	if (!param) {
457 		/* Reset current event instance */
458 		LL_ASSERT(event.curr.abort_cb);
459 		event.curr.abort_cb = NULL;
460 
461 		param = event.curr.param;
462 		event.curr.param = NULL;
463 
464 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
465 		done_inc();
466 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
467 
468 		if (param) {
469 			ull = HDR_LLL2ULL(param);
470 		} else {
471 			ull = NULL;
472 		}
473 
474 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
475 		    (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
476 			mayfly_enable(TICKER_USER_ID_LLL,
477 				      TICKER_USER_ID_ULL_LOW,
478 				      1);
479 		}
480 
481 		DEBUG_RADIO_CLOSE(0);
482 	} else {
483 		ull = HDR_LLL2ULL(param);
484 	}
485 
486 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
487 	ull_prepare_dequeue(TICKER_USER_ID_LLL);
488 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
489 
490 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
491 	struct event_done_extra *extra;
492 	uint8_t result;
493 
494 	/* TODO: Pass from calling function */
495 	result = DONE_COMPLETED;
496 
497 	lll_done_score(param, result);
498 
499 	extra = ull_event_done_extra_get();
500 	LL_ASSERT(extra);
501 
502 	/* Set result in done extra data - type was set by the role */
503 	extra->result = result;
504 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
505 
506 	/* Let ULL know about LLL event done */
507 	evdone = ull_event_done(ull);
508 	LL_ASSERT(evdone);
509 
510 	return 0;
511 }
512 
513 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
lll_done_ull_inc(void)514 void lll_done_ull_inc(void)
515 {
516 	LL_ASSERT(event.done.ull_count != event.done.lll_count);
517 	event.done.ull_count++;
518 }
519 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
520 
lll_is_done(void * param,bool * is_resume)521 bool lll_is_done(void *param, bool *is_resume)
522 {
523 	/* NOTE: Current radio event when preempted could put itself in resume
524 	 *       into the prepare pipeline in which case event.curr.param would
525 	 *       be set to NULL.
526 	 */
527 	*is_resume = (param != event.curr.param);
528 
529 	return !event.curr.abort_cb;
530 }
531 
lll_is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)532 int lll_is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
533 {
534 	return -ECANCELED;
535 }
536 
lll_abort_cb(struct lll_prepare_param * prepare_param,void * param)537 void lll_abort_cb(struct lll_prepare_param *prepare_param, void *param)
538 {
539 	int err;
540 
541 	/* NOTE: This is not a prepare being cancelled */
542 	if (!prepare_param) {
543 		/* Perform event abort here.
544 		 * After event has been cleanly aborted, clean up resources
545 		 * and dispatch event done.
546 		 */
547 		radio_isr_set(lll_isr_done, param);
548 		radio_disable();
549 		return;
550 	}
551 
552 	/* NOTE: Else clean the top half preparations of the aborted event
553 	 * currently in preparation pipeline.
554 	 */
555 	err = lll_hfclock_off();
556 	LL_ASSERT(err >= 0);
557 
558 	lll_done(param);
559 }
560 
lll_event_offset_get(struct ull_hdr * ull)561 uint32_t lll_event_offset_get(struct ull_hdr *ull)
562 {
563 	if (0) {
564 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
565 	} else if (ull->ticks_prepare_to_start & XON_BITMASK) {
566 		return MAX(ull->ticks_active_to_start,
567 			   ull->ticks_preempt_to_start);
568 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
569 	} else {
570 		return MAX(ull->ticks_active_to_start,
571 			   ull->ticks_prepare_to_start);
572 	}
573 }
574 
lll_preempt_calc(struct ull_hdr * ull,uint8_t ticker_id,uint32_t ticks_at_event)575 uint32_t lll_preempt_calc(struct ull_hdr *ull, uint8_t ticker_id,
576 		       uint32_t ticks_at_event)
577 {
578 	uint32_t ticks_now;
579 	uint32_t diff;
580 
581 	ticks_now = ticker_ticks_now_get();
582 	diff = ticker_ticks_diff_get(ticks_now, ticks_at_event);
583 	if (diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
584 		return 0;
585 	}
586 
587 	diff += HAL_TICKER_CNTR_CMP_OFFSET_MIN;
588 	if (diff > HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) {
589 		/* TODO: for Low Latency Feature with Advanced XTAL feature.
590 		 * 1. Release retained HF clock.
591 		 * 2. Advance the radio event to accommodate normal prepare
592 		 *    duration.
593 		 * 3. Increase the preempt to start ticks for future events.
594 		 */
595 		return diff;
596 	}
597 
598 	return 0U;
599 }
600 
lll_chan_set(uint32_t chan)601 void lll_chan_set(uint32_t chan)
602 {
603 	switch (chan) {
604 	case 37:
605 		radio_freq_chan_set(2);
606 		break;
607 
608 	case 38:
609 		radio_freq_chan_set(26);
610 		break;
611 
612 	case 39:
613 		radio_freq_chan_set(80);
614 		break;
615 
616 	default:
617 		if (chan < 11) {
618 			radio_freq_chan_set(4 + (chan * 2U));
619 		} else if (chan < 40) {
620 			radio_freq_chan_set(28 + ((chan - 11) * 2U));
621 		} else {
622 			LL_ASSERT(0);
623 		}
624 		break;
625 	}
626 
627 	radio_whiten_iv_set(chan);
628 }
629 
630 
lll_radio_is_idle(void)631 uint32_t lll_radio_is_idle(void)
632 {
633 	return radio_is_idle();
634 }
635 
lll_radio_tx_ready_delay_get(uint8_t phy,uint8_t flags)636 uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags)
637 {
638 	return radio_tx_ready_delay_get(phy, flags);
639 }
640 
lll_radio_rx_ready_delay_get(uint8_t phy,uint8_t flags)641 uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags)
642 {
643 	return radio_rx_ready_delay_get(phy, flags);
644 }
645 
lll_radio_tx_pwr_min_get(void)646 int8_t lll_radio_tx_pwr_min_get(void)
647 {
648 	return radio_tx_power_min_get();
649 }
650 
lll_radio_tx_pwr_max_get(void)651 int8_t lll_radio_tx_pwr_max_get(void)
652 {
653 	return radio_tx_power_max_get();
654 }
655 
lll_radio_tx_pwr_floor(int8_t tx_pwr_lvl)656 int8_t lll_radio_tx_pwr_floor(int8_t tx_pwr_lvl)
657 {
658 	return radio_tx_power_floor(tx_pwr_lvl);
659 }
660 
lll_isr_tx_status_reset(void)661 void lll_isr_tx_status_reset(void)
662 {
663 	radio_status_reset();
664 	radio_tmr_status_reset();
665 
666 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
667 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
668 		radio_gpio_pa_lna_disable();
669 	}
670 }
671 
lll_isr_rx_status_reset(void)672 void lll_isr_rx_status_reset(void)
673 {
674 	radio_status_reset();
675 	radio_tmr_status_reset();
676 	radio_rssi_status_reset();
677 
678 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
679 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
680 		radio_gpio_pa_lna_disable();
681 	}
682 }
683 
lll_isr_tx_sub_status_reset(void)684 void lll_isr_tx_sub_status_reset(void)
685 {
686 	radio_status_reset();
687 	radio_tmr_tx_status_reset();
688 
689 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
690 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
691 		radio_gpio_pa_lna_disable();
692 	}
693 }
694 
lll_isr_rx_sub_status_reset(void)695 void lll_isr_rx_sub_status_reset(void)
696 {
697 	radio_status_reset();
698 	radio_tmr_rx_status_reset();
699 
700 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
701 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
702 		radio_gpio_pa_lna_disable();
703 	}
704 }
705 
lll_isr_status_reset(void)706 void lll_isr_status_reset(void)
707 {
708 	radio_status_reset();
709 	radio_tmr_status_reset();
710 	radio_filter_status_reset();
711 	if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
712 		radio_ar_status_reset();
713 	}
714 	radio_rssi_status_reset();
715 
716 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
717 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
718 		radio_gpio_pa_lna_disable();
719 	}
720 }
721 
lll_isr_abort(void * param)722 inline void lll_isr_abort(void *param)
723 {
724 	lll_isr_status_reset();
725 	lll_isr_cleanup(param);
726 }
727 
lll_isr_done(void * param)728 void lll_isr_done(void *param)
729 {
730 	lll_isr_abort(param);
731 }
732 
lll_isr_cleanup(void * param)733 void lll_isr_cleanup(void *param)
734 {
735 	int err;
736 
737 	radio_isr_set(isr_race, param);
738 	if (!radio_is_idle()) {
739 		radio_disable();
740 	}
741 
742 	radio_tmr_stop();
743 	radio_stop();
744 
745 	err = lll_hfclock_off();
746 	LL_ASSERT(err >= 0);
747 
748 	lll_done(NULL);
749 }
750 
lll_isr_early_abort(void * param)751 void lll_isr_early_abort(void *param)
752 {
753 	int err;
754 
755 	radio_isr_set(isr_race, param);
756 	if (!radio_is_idle()) {
757 		radio_disable();
758 	}
759 
760 	err = lll_hfclock_off();
761 	LL_ASSERT(err >= 0);
762 
763 	lll_done(NULL);
764 }
765 
lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,lll_prepare_cb_t prepare_cb,struct lll_prepare_param * prepare_param,uint8_t is_resume,uint8_t is_dequeue)766 int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
767 			lll_prepare_cb_t prepare_cb,
768 			struct lll_prepare_param *prepare_param,
769 			uint8_t is_resume, uint8_t is_dequeue)
770 {
771 	struct lll_event *ready_short = NULL;
772 	struct lll_event *ready;
773 	struct lll_event *next;
774 	uint8_t idx;
775 	int err;
776 
777 	/* Find the ready prepare in the pipeline */
778 	idx = UINT8_MAX;
779 	ready = prepare_dequeue_iter_ready_get(&idx);
780 
781 	/* Find any short prepare */
782 	if (ready) {
783 		uint32_t ticks_at_preempt_min = ready->prepare_param.ticks_at_expire;
784 
785 		do {
786 			uint32_t ticks_at_preempt_next;
787 			struct lll_event *ready_next;
788 			uint32_t diff;
789 
790 			ready_next = prepare_dequeue_iter_ready_get(&idx);
791 			if (!ready_next) {
792 				break;
793 			}
794 
795 			ticks_at_preempt_next = ready_next->prepare_param.ticks_at_expire;
796 			diff = ticker_ticks_diff_get(ticks_at_preempt_next,
797 						     ticks_at_preempt_min);
798 			if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
799 				continue;
800 			}
801 
802 			ready_short = ready_next;
803 			ticks_at_preempt_min = ticks_at_preempt_next;
804 		} while (true);
805 	}
806 
807 	/* Current event active or another prepare is ready in the pipeline */
808 	if ((!is_dequeue && !is_done_sync()) ||
809 	    event.curr.abort_cb || ready_short ||
810 	    (ready && is_resume)) {
811 #if defined(CONFIG_BT_CTLR_LOW_LAT)
812 		lll_prepare_cb_t resume_cb;
813 #endif /* CONFIG_BT_CTLR_LOW_LAT */
814 
815 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && event.curr.param) {
816 			/* early abort */
817 			event.curr.abort_cb(NULL, event.curr.param);
818 		}
819 
820 		/* Store the next prepare for deferred call */
821 		next = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
822 					   prepare_cb, is_resume);
823 		LL_ASSERT(next);
824 
825 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
826 		if (is_resume) {
827 			return -EINPROGRESS;
828 		}
829 
830 		/* Find any short prepare */
831 		if (ready_short) {
832 			ready = ready_short;
833 		}
834 
835 		/* Always start preempt timeout for first prepare in pipeline */
836 		struct lll_event *first = ready ? ready : next;
837 		uint32_t ret;
838 
839 		/* Start the preempt timeout */
840 		ret  = preempt_ticker_start(first, ready, next);
841 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
842 			  (ret == TICKER_STATUS_BUSY));
843 
844 #else /* CONFIG_BT_CTLR_LOW_LAT */
845 		next = NULL;
846 		while (ready) {
847 			if (!ready->is_aborted) {
848 				if (event.curr.param == ready->prepare_param.param) {
849 					ready->is_aborted = 1;
850 					ready->abort_cb(&ready->prepare_param,
851 							ready->prepare_param.param);
852 				} else {
853 					next = ready;
854 				}
855 			}
856 
857 			ready = ull_prepare_dequeue_iter(&idx);
858 		}
859 
860 		if (next) {
861 			/* check if resume requested by curr */
862 			err = event.curr.is_abort_cb(NULL, event.curr.param,
863 						     &resume_cb);
864 			LL_ASSERT(err);
865 
866 			if (err == -EAGAIN) {
867 				next = resume_enqueue(resume_cb);
868 				LL_ASSERT(next);
869 			} else {
870 				LL_ASSERT(err == -ECANCELED);
871 			}
872 		}
873 #endif /* CONFIG_BT_CTLR_LOW_LAT */
874 
875 		return -EINPROGRESS;
876 	}
877 
878 	LL_ASSERT(!ready || &ready->prepare_param == prepare_param);
879 
880 	event.curr.param = prepare_param->param;
881 	event.curr.is_abort_cb = is_abort_cb;
882 	event.curr.abort_cb = abort_cb;
883 
884 	err = prepare_cb(prepare_param);
885 
886 	if (!IS_ENABLED(CONFIG_BT_CTLR_ASSERT_OVERHEAD_START) &&
887 	    (err == -ECANCELED)) {
888 		err = 0;
889 	}
890 
891 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
892 	uint32_t ret;
893 
894 	/* NOTE: preempt timeout started prior for the current event that has
895 	 *       its prepare that is now invoked is not explicitly stopped here.
896 	 *       If there is a next prepare event in pipeline, then the prior
897 	 *       preempt timeout if started will be stopped before starting
898 	 *       the new preempt timeout. Refer to implementation in
899 	 *       preempt_ticker_start().
900 	 */
901 
902 	/* Find next prepare needing preempt timeout to be setup */
903 	next = prepare_dequeue_iter_ready_get(&idx);
904 	if (!next) {
905 		return err;
906 	}
907 
908 	/* Start the preempt timeout */
909 	ret = preempt_ticker_start(next, NULL, next);
910 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
911 		  (ret == TICKER_STATUS_BUSY));
912 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
913 
914 	return err;
915 }
916 
init_reset(void)917 static int init_reset(void)
918 {
919 	return 0;
920 }
921 
922 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
done_inc(void)923 static inline void done_inc(void)
924 {
925 	event.done.lll_count++;
926 	LL_ASSERT(event.done.lll_count != event.done.ull_count);
927 }
928 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
929 
is_done_sync(void)930 static inline bool is_done_sync(void)
931 {
932 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
933 	return event.done.lll_count == event.done.ull_count;
934 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
935 	return true;
936 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
937 }
938 
prepare_dequeue_iter_ready_get(uint8_t * idx)939 static inline struct lll_event *prepare_dequeue_iter_ready_get(uint8_t *idx)
940 {
941 	struct lll_event *ready;
942 
943 	do {
944 		ready = ull_prepare_dequeue_iter(idx);
945 	} while (ready && (ready->is_aborted || ready->is_resume));
946 
947 	return ready;
948 }
949 
resume_enqueue(lll_prepare_cb_t resume_cb)950 static inline struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb)
951 {
952 	struct lll_prepare_param prepare_param = {0};
953 
954 	/* Enqueue into prepare pipeline as resume radio event, and remove
955 	 * parameter assignment from currently active radio event so that
956 	 * done event is not generated.
957 	 */
958 	prepare_param.param = event.curr.param;
959 	event.curr.param = NULL;
960 
961 	return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
962 				   &prepare_param, resume_cb, 1);
963 }
964 
isr_race(void * param)965 static void isr_race(void *param)
966 {
967 	/* NOTE: lll_disable could have a race with ... */
968 	radio_status_reset();
969 }
970 
971 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
972 static uint8_t volatile preempt_start_req;
973 static uint8_t preempt_start_ack;
974 static uint8_t volatile preempt_stop_req;
975 static uint8_t preempt_stop_ack;
976 static uint8_t preempt_req;
977 static uint8_t volatile preempt_ack;
978 
ticker_stop_op_cb(uint32_t status,void * param)979 static void ticker_stop_op_cb(uint32_t status, void *param)
980 {
981 	ARG_UNUSED(param);
982 
983 	LL_ASSERT(preempt_stop_req != preempt_stop_ack);
984 	preempt_stop_ack = preempt_stop_req;
985 
986 	/* We do not fail on status not being success because under scenarios
987 	 * where there is ticker_start then ticker_stop and then ticker_start,
988 	 * the call to ticker_stop will fail and this is acceptable.
989 	 * Also, the preempt_req and preempt_ack would not be update as the
990 	 * ticker_start was not processed before ticker_stop. Hence, it is
991 	 * safe to reset preempt_req and preempt_ack here.
992 	 */
993 	if (status == TICKER_STATUS_SUCCESS) {
994 		LL_ASSERT(preempt_req != preempt_ack);
995 	}
996 
997 	preempt_req = preempt_ack;
998 }
999 
ticker_start_op_cb(uint32_t status,void * param)1000 static void ticker_start_op_cb(uint32_t status, void *param)
1001 {
1002 	ARG_UNUSED(param);
1003 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
1004 
1005 	/* Increase preempt requested count before acknowledging that the
1006 	 * ticker start operation for the preempt timeout has been handled.
1007 	 */
1008 	LL_ASSERT(preempt_req == preempt_ack);
1009 	preempt_req++;
1010 
1011 	/* Increase preempt start ack count, to acknowledge that the ticker
1012 	 * start operation has been handled.
1013 	 */
1014 	LL_ASSERT(preempt_start_req != preempt_start_ack);
1015 	preempt_start_ack = preempt_start_req;
1016 }
1017 
preempt_ticker_start(struct lll_event * first,struct lll_event * prev,struct lll_event * next)1018 static uint32_t preempt_ticker_start(struct lll_event *first,
1019 				     struct lll_event *prev,
1020 				     struct lll_event *next)
1021 {
1022 	const struct lll_prepare_param *p;
1023 	static uint32_t ticks_at_preempt;
1024 	uint32_t ticks_at_preempt_new;
1025 	uint32_t preempt_anchor;
1026 	struct ull_hdr *ull;
1027 	uint32_t preempt_to;
1028 	uint32_t ret;
1029 
1030 	/* Do not request to start preempt timeout if already requested.
1031 	 *
1032 	 * Check if there is pending preempt timeout start requested or if
1033 	 * preempt timeout ticker has already been scheduled.
1034 	 */
1035 	if ((preempt_start_req != preempt_start_ack) ||
1036 	    (preempt_req != preempt_ack)) {
1037 		uint32_t diff;
1038 
1039 		/* Calc the preempt timeout */
1040 		p = &next->prepare_param;
1041 		ull = HDR_LLL2ULL(p->param);
1042 		preempt_anchor = p->ticks_at_expire;
1043 		preempt_to = MAX(ull->ticks_active_to_start,
1044 				 ull->ticks_prepare_to_start) -
1045 			     ull->ticks_preempt_to_start;
1046 
1047 		ticks_at_preempt_new = preempt_anchor + preempt_to;
1048 		ticks_at_preempt_new &= HAL_TICKER_CNTR_MASK;
1049 
1050 		/* Check for short preempt timeouts */
1051 		diff = ticker_ticks_diff_get(ticks_at_preempt_new,
1052 					     ticks_at_preempt);
1053 		if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
1054 			return TICKER_STATUS_SUCCESS;
1055 		}
1056 
1057 		/* Stop any scheduled preempt ticker */
1058 		ret = preempt_ticker_stop();
1059 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1060 			  (ret == TICKER_STATUS_BUSY));
1061 
1062 #if defined(CONFIG_BT_CTLR_EARLY_ABORT_PREVIOUS_PREPARE)
1063 		/* FIXME: Prepare pipeline is not a ordered list implementation,
1064 		 *        and for short prepare being enqueued, ideally the
1065 		 *        pipeline has to be implemented as ordered list.
1066 		 *        Until then a workaround to abort a prepare present
1067 		 *        before the short prepare being enqueued is implemented
1068 		 *        below.
1069 		 *        A proper solution will be to re-design the pipeline
1070 		 *        as a ordered list, instead of the current FIFO.
1071 		 */
1072 		/* preempt timeout already started but no role/state in the head
1073 		 * of prepare pipeline.
1074 		 */
1075 		if (prev && !prev->is_aborted) {
1076 			/* Set early as we get called again through the call to
1077 			 * abort_cb().
1078 			 */
1079 			ticks_at_preempt = ticks_at_preempt_new;
1080 
1081 			/* Abort previous prepare that set the preempt timeout */
1082 			prev->is_aborted = 1U;
1083 			prev->abort_cb(&prev->prepare_param,
1084 				       prev->prepare_param.param);
1085 		}
1086 #endif /* CONFIG_BT_CTLR_EARLY_ABORT_PREVIOUS_PREPARE */
1087 
1088 		/* Schedule short preempt timeout */
1089 		first = next;
1090 	} else {
1091 		/* Calc the preempt timeout */
1092 		p = &first->prepare_param;
1093 		ull = HDR_LLL2ULL(p->param);
1094 		preempt_anchor = p->ticks_at_expire;
1095 		preempt_to = MAX(ull->ticks_active_to_start,
1096 				 ull->ticks_prepare_to_start) -
1097 			     ull->ticks_preempt_to_start;
1098 
1099 		ticks_at_preempt_new = preempt_anchor + preempt_to;
1100 		ticks_at_preempt_new &= HAL_TICKER_CNTR_MASK;
1101 	}
1102 
1103 	preempt_start_req++;
1104 
1105 	ticks_at_preempt = ticks_at_preempt_new;
1106 
1107 	/* Setup pre empt timeout */
1108 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1109 			   TICKER_USER_ID_LLL,
1110 			   TICKER_ID_LLL_PREEMPT,
1111 			   preempt_anchor,
1112 			   preempt_to,
1113 			   TICKER_NULL_PERIOD,
1114 			   TICKER_NULL_REMAINDER,
1115 			   TICKER_NULL_LAZY,
1116 			   TICKER_NULL_SLOT,
1117 			   preempt_ticker_cb, first->prepare_param.param,
1118 			   ticker_start_op_cb, NULL);
1119 
1120 	return ret;
1121 }
1122 
preempt_ticker_stop(void)1123 static uint32_t preempt_ticker_stop(void)
1124 {
1125 	uint32_t ret;
1126 
1127 	/* Do not request to stop preempt timeout if already requested or
1128 	 * has expired
1129 	 */
1130 	if ((preempt_stop_req != preempt_stop_ack) ||
1131 	    (preempt_req == preempt_ack)) {
1132 		return TICKER_STATUS_SUCCESS;
1133 	}
1134 
1135 	preempt_stop_req++;
1136 
1137 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1138 			  TICKER_USER_ID_LLL,
1139 			  TICKER_ID_LLL_PREEMPT,
1140 			  ticker_stop_op_cb, NULL);
1141 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1142 		  (ret == TICKER_STATUS_BUSY));
1143 
1144 	return ret;
1145 }
1146 
preempt_ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1147 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1148 			      uint32_t remainder, uint16_t lazy, uint8_t force,
1149 			      void *param)
1150 {
1151 	static memq_link_t link;
1152 	static struct mayfly mfy = {0, 0, &link, NULL, preempt};
1153 	uint32_t ret;
1154 
1155 	LL_ASSERT(preempt_ack != preempt_req);
1156 	preempt_ack = preempt_req;
1157 
1158 	mfy.param = param;
1159 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1160 			     0, &mfy);
1161 	LL_ASSERT(!ret);
1162 }
1163 
preempt(void * param)1164 static void preempt(void *param)
1165 {
1166 	lll_prepare_cb_t resume_cb;
1167 	struct lll_event *ready;
1168 	uint8_t idx;
1169 	int err;
1170 
1171 	/* No event to abort */
1172 	if (!event.curr.abort_cb || !event.curr.param) {
1173 		return;
1174 	}
1175 
1176 	/* Find a prepare that is ready and not a resume */
1177 	idx = UINT8_MAX;
1178 	ready = prepare_dequeue_iter_ready_get(&idx);
1179 	if (!ready) {
1180 		/* No ready prepare */
1181 		return;
1182 	}
1183 
1184 	/* Preemptor not in pipeline */
1185 	if (ready->prepare_param.param != param) {
1186 		struct lll_event *ready_next = NULL;
1187 		struct lll_event *preemptor;
1188 		uint32_t ret;
1189 
1190 		/* Find if a short prepare request in the pipeline */
1191 		do {
1192 			preemptor = ull_prepare_dequeue_iter(&idx);
1193 			if (!ready_next && preemptor && !preemptor->is_aborted &&
1194 			    !preemptor->is_resume) {
1195 				ready_next = preemptor;
1196 			}
1197 		} while (preemptor && (preemptor->is_aborted || preemptor->is_resume ||
1198 			 (preemptor->prepare_param.param != param)));
1199 
1200 		/* No short prepare request in pipeline */
1201 		if (!preemptor) {
1202 			/* Start the preempt timeout for ready event */
1203 			ret = preempt_ticker_start(ready, NULL, ready);
1204 			LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1205 				  (ret == TICKER_STATUS_BUSY));
1206 
1207 			return;
1208 		}
1209 
1210 		/* FIXME: Abort all events in pipeline before the short
1211 		 *        prepare event. For now, lets assert when many
1212 		 *        enqueued prepares need aborting.
1213 		 */
1214 		LL_ASSERT(preemptor == ready_next);
1215 
1216 		/* Abort the prepare that is present before the short prepare */
1217 		ready->is_aborted = 1;
1218 		ready->abort_cb(&ready->prepare_param, ready->prepare_param.param);
1219 
1220 		/* As the prepare queue has been refreshed due to the call of
1221 		 * abort_cb which invokes the lll_done, find the latest prepare
1222 		 */
1223 		idx = UINT8_MAX;
1224 		ready = prepare_dequeue_iter_ready_get(&idx);
1225 		if (!ready) {
1226 			/* No ready prepare */
1227 			return;
1228 		}
1229 
1230 		LL_ASSERT(ready->prepare_param.param == param);
1231 	}
1232 
1233 	/* Check if current event want to continue */
1234 	err = event.curr.is_abort_cb(ready->prepare_param.param, event.curr.param, &resume_cb);
1235 	if (!err) {
1236 		/* Let preemptor LLL know about the cancelled prepare */
1237 		ready->is_aborted = 1;
1238 		ready->abort_cb(&ready->prepare_param, ready->prepare_param.param);
1239 
1240 		return;
1241 	}
1242 
1243 	/* Abort the current event */
1244 	event.curr.abort_cb(NULL, event.curr.param);
1245 
1246 	/* Check if resume requested */
1247 	if (err == -EAGAIN) {
1248 		struct lll_event *iter;
1249 		uint8_t iter_idx;
1250 
1251 		/* Abort any duplicates so that they get dequeued */
1252 		iter_idx = UINT8_MAX;
1253 		iter = ull_prepare_dequeue_iter(&iter_idx);
1254 		while (iter) {
1255 			if (!iter->is_aborted &&
1256 			    event.curr.param == iter->prepare_param.param) {
1257 				iter->is_aborted = 1;
1258 				iter->abort_cb(&iter->prepare_param,
1259 					       iter->prepare_param.param);
1260 
1261 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
1262 				/* NOTE: abort_cb called lll_done which modifies
1263 				 *       the prepare pipeline hence re-iterate
1264 				 *       through the prepare pipeline.
1265 				 */
1266 				iter_idx = UINT8_MAX;
1267 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
1268 			}
1269 
1270 			iter = ull_prepare_dequeue_iter(&iter_idx);
1271 		}
1272 
1273 		/* Enqueue as resume event */
1274 		iter = resume_enqueue(resume_cb);
1275 		LL_ASSERT(iter);
1276 	} else {
1277 		LL_ASSERT(err == -ECANCELED);
1278 	}
1279 }
1280 #else /* CONFIG_BT_CTLR_LOW_LAT */
1281 
1282 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mfy_ticker_job_idle_get(void * param)1283 static void mfy_ticker_job_idle_get(void *param)
1284 {
1285 	uint32_t ret;
1286 
1287 	/* Ticker Job Silence */
1288 	ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
1289 				  TICKER_USER_ID_ULL_LOW,
1290 				  ticker_op_job_disable, NULL);
1291 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1292 		  (ret == TICKER_STATUS_BUSY));
1293 }
1294 
ticker_op_job_disable(uint32_t status,void * op_context)1295 static void ticker_op_job_disable(uint32_t status, void *op_context)
1296 {
1297 	ARG_UNUSED(status);
1298 	ARG_UNUSED(op_context);
1299 
1300 	/* FIXME: */
1301 	if (1 /* _radio.state != STATE_NONE */) {
1302 		mayfly_enable(TICKER_USER_ID_ULL_LOW,
1303 			      TICKER_USER_ID_ULL_LOW, 0);
1304 	}
1305 }
1306 #endif
1307 
1308 #endif /* CONFIG_BT_CTLR_LOW_LAT */
1309