1 /*
2  * Copyright (c) 2018-2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <stdint.h>
8 #include <stdbool.h>
9 #include <errno.h>
10 
11 #include <hal/nrf_rtc.h>
12 
13 #include <zephyr/toolchain.h>
14 
15 #include <soc.h>
16 #include <zephyr/device.h>
17 
18 #include <zephyr/drivers/entropy.h>
19 #include <zephyr/irq.h>
20 
21 #include "hal/swi.h"
22 #include "hal/ccm.h"
23 #include "hal/radio.h"
24 #include "hal/ticker.h"
25 
26 #include "util/mem.h"
27 #include "util/memq.h"
28 #include "util/mayfly.h"
29 
30 #include "ticker/ticker.h"
31 
32 #include "lll.h"
33 #include "lll_vendor.h"
34 #include "lll_clock.h"
35 #include "lll_internal.h"
36 #include "lll_prof_internal.h"
37 
38 #include "hal/debug.h"
39 
40 #if defined(CONFIG_BT_CTLR_ZLI)
41 #define IRQ_CONNECT_FLAGS IRQ_ZERO_LATENCY
42 #else
43 #define IRQ_CONNECT_FLAGS 0
44 #endif
45 
46 static struct {
47 	struct {
48 		void              *param;
49 		lll_is_abort_cb_t is_abort_cb;
50 		lll_abort_cb_t    abort_cb;
51 	} curr;
52 
53 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
54 	struct {
55 		uint8_t volatile lll_count;
56 		uint8_t          ull_count;
57 	} done;
58 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
59 } event;
60 
61 /* Entropy device */
62 static const struct device *const dev_entropy = DEVICE_DT_GET(DT_NODELABEL(rng));
63 
64 static int init_reset(void);
65 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
66 static inline void done_inc(void);
67 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
68 static inline bool is_done_sync(void);
69 static inline struct lll_event *prepare_dequeue_iter_ready_get(uint8_t *idx);
70 static inline struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb);
71 static void isr_race(void *param);
72 
73 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
74 static uint32_t preempt_ticker_start(struct lll_event *first,
75 				     struct lll_event *prev,
76 				     struct lll_event *next);
77 static uint32_t preempt_ticker_stop(void);
78 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
79 			      uint32_t remainder, uint16_t lazy, uint8_t force,
80 			      void *param);
81 static void preempt(void *param);
82 #else /* CONFIG_BT_CTLR_LOW_LAT */
83 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
84 static void mfy_ticker_job_idle_get(void *param);
85 static void ticker_op_job_disable(uint32_t status, void *op_context);
86 #endif
87 #endif /* CONFIG_BT_CTLR_LOW_LAT */
88 
89 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) && \
90 	defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
radio_nrf5_isr(const void * arg)91 static void radio_nrf5_isr(const void *arg)
92 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
93 ISR_DIRECT_DECLARE(radio_nrf5_isr)
94 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
95 {
96 	DEBUG_RADIO_ISR(1);
97 
98 	lll_prof_enter_radio();
99 
100 	isr_radio();
101 
102 	ISR_DIRECT_PM();
103 
104 	lll_prof_exit_radio();
105 
106 	DEBUG_RADIO_ISR(0);
107 
108 #if !defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS) || \
109 	!defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
110 	return 1;
111 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
112 }
113 
rtc0_nrf5_isr(const void * arg)114 static void rtc0_nrf5_isr(const void *arg)
115 {
116 	DEBUG_TICKER_ISR(1);
117 
118 	lll_prof_enter_ull_high();
119 
120 	/* On compare0 run ticker worker instance0 */
121 	if (NRF_RTC0->EVENTS_COMPARE[0]) {
122 		nrf_rtc_event_clear(NRF_RTC0, NRF_RTC_EVENT_COMPARE_0);
123 
124 		ticker_trigger(0);
125 	}
126 
127 	mayfly_run(TICKER_USER_ID_ULL_HIGH);
128 
129 	lll_prof_exit_ull_high();
130 
131 #if !defined(CONFIG_BT_CTLR_LOW_LAT) && \
132 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
133 	lll_prof_enter_ull_low();
134 
135 	mayfly_run(TICKER_USER_ID_ULL_LOW);
136 
137 	lll_prof_exit_ull_low();
138 #endif
139 
140 	DEBUG_TICKER_ISR(0);
141 }
142 
swi_lll_nrf5_isr(const void * arg)143 static void swi_lll_nrf5_isr(const void *arg)
144 {
145 	DEBUG_RADIO_ISR(1);
146 
147 	lll_prof_enter_lll();
148 
149 	mayfly_run(TICKER_USER_ID_LLL);
150 
151 	lll_prof_exit_lll();
152 
153 	DEBUG_RADIO_ISR(0);
154 }
155 
156 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
157 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
swi_ull_low_nrf5_isr(const void * arg)158 static void swi_ull_low_nrf5_isr(const void *arg)
159 {
160 	DEBUG_TICKER_JOB(1);
161 
162 	lll_prof_enter_ull_low();
163 
164 	mayfly_run(TICKER_USER_ID_ULL_LOW);
165 
166 	lll_prof_exit_ull_low();
167 
168 	DEBUG_TICKER_JOB(0);
169 }
170 #endif
171 
lll_init(void)172 int lll_init(void)
173 {
174 	int err;
175 
176 	/* Get reference to entropy device */
177 	if (!device_is_ready(dev_entropy)) {
178 		return -ENODEV;
179 	}
180 
181 	/* Initialise LLL internals */
182 	event.curr.abort_cb = NULL;
183 
184 	/* Initialize Clocks */
185 	err = lll_clock_init();
186 	if (err < 0) {
187 		return err;
188 	}
189 
190 	err = init_reset();
191 	if (err) {
192 		return err;
193 	}
194 
195 	/* Initialize SW IRQ structure */
196 	hal_swi_init();
197 
198 	/* Connect ISRs */
199 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS)
200 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
201 	ARM_IRQ_DIRECT_DYNAMIC_CONNECT(RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
202 				       IRQ_CONNECT_FLAGS, no_reschedule);
203 	irq_connect_dynamic(RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
204 			    radio_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
205 #else /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
206 	IRQ_DIRECT_CONNECT(RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
207 			   radio_nrf5_isr, IRQ_CONNECT_FLAGS);
208 #endif /* !CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
209 	irq_connect_dynamic(RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
210 			    rtc0_nrf5_isr, NULL, 0U);
211 	irq_connect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
212 			    swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
213 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
214 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
215 	irq_connect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
216 			    swi_ull_low_nrf5_isr, NULL, 0U);
217 #endif
218 
219 #else /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
220 	IRQ_DIRECT_CONNECT(RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
221 			   radio_nrf5_isr, IRQ_CONNECT_FLAGS);
222 	IRQ_CONNECT(RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
223 		    rtc0_nrf5_isr, NULL, 0);
224 #if defined(CONFIG_BT_CTLR_ZLI)
225 	IRQ_DIRECT_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
226 			   swi_lll_nrf5_isr, IRQ_CONNECT_FLAGS);
227 #else
228 	IRQ_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
229 		    swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
230 #endif
231 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
232 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
233 	IRQ_CONNECT(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
234 		    swi_ull_low_nrf5_isr, NULL, 0);
235 #endif
236 #endif /* !CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
237 
238 	/* Enable IRQs */
239 	irq_enable(RADIO_IRQn);
240 	irq_enable(RTC0_IRQn);
241 	irq_enable(HAL_SWI_RADIO_IRQ);
242 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
243 		(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
244 		irq_enable(HAL_SWI_JOB_IRQ);
245 	}
246 
247 	radio_setup();
248 
249 	return 0;
250 }
251 
lll_deinit(void)252 int lll_deinit(void)
253 {
254 	int err;
255 
256 	/* Release clocks */
257 	err = lll_clock_deinit();
258 	if (err < 0) {
259 		return err;
260 	}
261 
262 	/* Disable IRQs */
263 	irq_disable(RADIO_IRQn);
264 	irq_disable(RTC0_IRQn);
265 	irq_disable(HAL_SWI_RADIO_IRQ);
266 	if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
267 		(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
268 		irq_disable(HAL_SWI_JOB_IRQ);
269 	}
270 
271 	/* Disconnect dynamic ISRs used */
272 #if defined(CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS)
273 #if defined(CONFIG_SHARED_INTERRUPTS)
274 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
275 	irq_disconnect_dynamic(RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO,
276 			       radio_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
277 #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
278 	irq_disconnect_dynamic(RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
279 			       rtc0_nrf5_isr, NULL, 0U);
280 	irq_disconnect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
281 			       swi_lll_nrf5_isr, NULL, IRQ_CONNECT_FLAGS);
282 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
283 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
284 	irq_disconnect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
285 			       swi_ull_low_nrf5_isr, NULL, 0U);
286 #endif
287 #else /* !CONFIG_SHARED_INTERRUPTS */
288 #if defined(CONFIG_DYNAMIC_DIRECT_INTERRUPTS)
289 	irq_connect_dynamic(RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, NULL, NULL,
290 			    IRQ_CONNECT_FLAGS);
291 #endif /* CONFIG_DYNAMIC_DIRECT_INTERRUPTS */
292 	irq_connect_dynamic(RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO, NULL, NULL,
293 			    0U);
294 	irq_connect_dynamic(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO, NULL,
295 			    NULL, IRQ_CONNECT_FLAGS);
296 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
297 	(CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
298 	irq_connect_dynamic(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO, NULL,
299 			    NULL, 0U);
300 #endif
301 #endif /* !CONFIG_SHARED_INTERRUPTS */
302 #endif /* CONFIG_BT_CTLR_DYNAMIC_INTERRUPTS */
303 
304 	return 0;
305 }
306 
lll_csrand_get(void * buf,size_t len)307 int lll_csrand_get(void *buf, size_t len)
308 {
309 	return entropy_get_entropy(dev_entropy, buf, len);
310 }
311 
lll_csrand_isr_get(void * buf,size_t len)312 int lll_csrand_isr_get(void *buf, size_t len)
313 {
314 	return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
315 }
316 
lll_rand_get(void * buf,size_t len)317 int lll_rand_get(void *buf, size_t len)
318 {
319 	return entropy_get_entropy(dev_entropy, buf, len);
320 }
321 
lll_rand_isr_get(void * buf,size_t len)322 int lll_rand_isr_get(void *buf, size_t len)
323 {
324 	return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
325 }
326 
lll_reset(void)327 int lll_reset(void)
328 {
329 	int err;
330 
331 	err = init_reset();
332 	if (err) {
333 		return err;
334 	}
335 
336 	return 0;
337 }
338 
lll_disable(void * param)339 void lll_disable(void *param)
340 {
341 	/* LLL disable of current event, done is generated */
342 	if (!param || (param == event.curr.param)) {
343 		if (event.curr.abort_cb && event.curr.param) {
344 			event.curr.abort_cb(NULL, event.curr.param);
345 		} else {
346 			LL_ASSERT(!param);
347 		}
348 	}
349 	{
350 		struct lll_event *next;
351 		uint8_t idx;
352 
353 		idx = UINT8_MAX;
354 		next = ull_prepare_dequeue_iter(&idx);
355 		while (next) {
356 			if (!next->is_aborted &&
357 			    (!param || (param == next->prepare_param.param))) {
358 				next->is_aborted = 1;
359 				next->abort_cb(&next->prepare_param,
360 					       next->prepare_param.param);
361 
362 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
363 				/* NOTE: abort_cb called lll_done which modifies
364 				 *       the prepare pipeline hence re-iterate
365 				 *       through the prepare pipeline.
366 				 */
367 				idx = UINT8_MAX;
368 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
369 			}
370 
371 			next = ull_prepare_dequeue_iter(&idx);
372 		}
373 	}
374 }
375 
lll_prepare_done(void * param)376 int lll_prepare_done(void *param)
377 {
378 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
379 	    (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
380 	static memq_link_t link;
381 	static struct mayfly mfy = {0, 0, &link, NULL, mfy_ticker_job_idle_get};
382 	uint32_t ret;
383 
384 	ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW,
385 			     1, &mfy);
386 	if (ret) {
387 		return -EFAULT;
388 	}
389 
390 	return 0;
391 #else
392 	return 0;
393 #endif /* CONFIG_BT_CTLR_LOW_LAT */
394 }
395 
lll_done(void * param)396 int lll_done(void *param)
397 {
398 	struct lll_event *next;
399 	struct ull_hdr *ull;
400 	void *evdone;
401 
402 	/* Assert if param supplied without a pending prepare to cancel. */
403 	next = ull_prepare_dequeue_get();
404 	LL_ASSERT(!param || next);
405 
406 	/* check if current LLL event is done */
407 	if (!param) {
408 		/* Reset current event instance */
409 		LL_ASSERT(event.curr.abort_cb);
410 		event.curr.abort_cb = NULL;
411 
412 		param = event.curr.param;
413 		event.curr.param = NULL;
414 
415 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
416 		done_inc();
417 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
418 
419 		if (param) {
420 			ull = HDR_LLL2ULL(param);
421 		} else {
422 			ull = NULL;
423 		}
424 
425 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
426 		    (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
427 			mayfly_enable(TICKER_USER_ID_LLL,
428 				      TICKER_USER_ID_ULL_LOW,
429 				      1);
430 		}
431 
432 		DEBUG_RADIO_CLOSE(0);
433 	} else {
434 		ull = HDR_LLL2ULL(param);
435 	}
436 
437 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
438 	ull_prepare_dequeue(TICKER_USER_ID_LLL);
439 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
440 
441 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
442 	struct event_done_extra *extra;
443 	uint8_t result;
444 
445 	/* TODO: Pass from calling function */
446 	result = DONE_COMPLETED;
447 
448 	lll_done_score(param, result);
449 
450 	extra = ull_event_done_extra_get();
451 	LL_ASSERT(extra);
452 
453 	/* Set result in done extra data - type was set by the role */
454 	extra->result = result;
455 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
456 
457 	/* Let ULL know about LLL event done */
458 	evdone = ull_event_done(ull);
459 	LL_ASSERT(evdone);
460 
461 	return 0;
462 }
463 
464 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
lll_done_ull_inc(void)465 void lll_done_ull_inc(void)
466 {
467 	LL_ASSERT(event.done.ull_count != event.done.lll_count);
468 	event.done.ull_count++;
469 }
470 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
471 
lll_is_done(void * param,bool * is_resume)472 bool lll_is_done(void *param, bool *is_resume)
473 {
474 	/* NOTE: Current radio event when preempted could put itself in resume
475 	 *       into the prepare pipeline in which case event.curr.param would
476 	 *       be set to NULL.
477 	 */
478 	*is_resume = (param != event.curr.param);
479 
480 	return !event.curr.abort_cb;
481 }
482 
lll_is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)483 int lll_is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
484 {
485 	return -ECANCELED;
486 }
487 
lll_abort_cb(struct lll_prepare_param * prepare_param,void * param)488 void lll_abort_cb(struct lll_prepare_param *prepare_param, void *param)
489 {
490 	int err;
491 
492 	/* NOTE: This is not a prepare being cancelled */
493 	if (!prepare_param) {
494 		/* Perform event abort here.
495 		 * After event has been cleanly aborted, clean up resources
496 		 * and dispatch event done.
497 		 */
498 		radio_isr_set(lll_isr_done, param);
499 		radio_disable();
500 		return;
501 	}
502 
503 	/* NOTE: Else clean the top half preparations of the aborted event
504 	 * currently in preparation pipeline.
505 	 */
506 	err = lll_hfclock_off();
507 	LL_ASSERT(err >= 0);
508 
509 	lll_done(param);
510 }
511 
lll_event_offset_get(struct ull_hdr * ull)512 uint32_t lll_event_offset_get(struct ull_hdr *ull)
513 {
514 	if (0) {
515 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
516 	} else if (ull->ticks_prepare_to_start & XON_BITMASK) {
517 		return MAX(ull->ticks_active_to_start,
518 			   ull->ticks_preempt_to_start);
519 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
520 	} else {
521 		return MAX(ull->ticks_active_to_start,
522 			   ull->ticks_prepare_to_start);
523 	}
524 }
525 
lll_preempt_calc(struct ull_hdr * ull,uint8_t ticker_id,uint32_t ticks_at_event)526 uint32_t lll_preempt_calc(struct ull_hdr *ull, uint8_t ticker_id,
527 		       uint32_t ticks_at_event)
528 {
529 	uint32_t ticks_now;
530 	uint32_t diff;
531 
532 	ticks_now = ticker_ticks_now_get();
533 	diff = ticker_ticks_diff_get(ticks_now, ticks_at_event);
534 	if (diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
535 		return 0;
536 	}
537 
538 	diff += HAL_TICKER_CNTR_CMP_OFFSET_MIN;
539 	if (diff > HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) {
540 		/* TODO: for Low Latency Feature with Advanced XTAL feature.
541 		 * 1. Release retained HF clock.
542 		 * 2. Advance the radio event to accommodate normal prepare
543 		 *    duration.
544 		 * 3. Increase the preempt to start ticks for future events.
545 		 */
546 		return diff;
547 	}
548 
549 	return 0U;
550 }
551 
lll_chan_set(uint32_t chan)552 void lll_chan_set(uint32_t chan)
553 {
554 	switch (chan) {
555 	case 37:
556 		radio_freq_chan_set(2);
557 		break;
558 
559 	case 38:
560 		radio_freq_chan_set(26);
561 		break;
562 
563 	case 39:
564 		radio_freq_chan_set(80);
565 		break;
566 
567 	default:
568 		if (chan < 11) {
569 			radio_freq_chan_set(4 + (chan * 2U));
570 		} else if (chan < 40) {
571 			radio_freq_chan_set(28 + ((chan - 11) * 2U));
572 		} else {
573 			LL_ASSERT(0);
574 		}
575 		break;
576 	}
577 
578 	radio_whiten_iv_set(chan);
579 }
580 
581 
lll_radio_is_idle(void)582 uint32_t lll_radio_is_idle(void)
583 {
584 	return radio_is_idle();
585 }
586 
lll_radio_tx_ready_delay_get(uint8_t phy,uint8_t flags)587 uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags)
588 {
589 	return radio_tx_ready_delay_get(phy, flags);
590 }
591 
lll_radio_rx_ready_delay_get(uint8_t phy,uint8_t flags)592 uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags)
593 {
594 	return radio_rx_ready_delay_get(phy, flags);
595 }
596 
lll_radio_tx_pwr_min_get(void)597 int8_t lll_radio_tx_pwr_min_get(void)
598 {
599 	return radio_tx_power_min_get();
600 }
601 
lll_radio_tx_pwr_max_get(void)602 int8_t lll_radio_tx_pwr_max_get(void)
603 {
604 	return radio_tx_power_max_get();
605 }
606 
lll_radio_tx_pwr_floor(int8_t tx_pwr_lvl)607 int8_t lll_radio_tx_pwr_floor(int8_t tx_pwr_lvl)
608 {
609 	return radio_tx_power_floor(tx_pwr_lvl);
610 }
611 
lll_isr_tx_status_reset(void)612 void lll_isr_tx_status_reset(void)
613 {
614 	radio_status_reset();
615 	radio_tmr_status_reset();
616 
617 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
618 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
619 		radio_gpio_pa_lna_disable();
620 	}
621 }
622 
lll_isr_rx_status_reset(void)623 void lll_isr_rx_status_reset(void)
624 {
625 	radio_status_reset();
626 	radio_tmr_status_reset();
627 	radio_rssi_status_reset();
628 
629 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
630 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
631 		radio_gpio_pa_lna_disable();
632 	}
633 }
634 
lll_isr_tx_sub_status_reset(void)635 void lll_isr_tx_sub_status_reset(void)
636 {
637 	radio_status_reset();
638 	radio_tmr_tx_status_reset();
639 
640 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
641 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
642 		radio_gpio_pa_lna_disable();
643 	}
644 }
645 
lll_isr_rx_sub_status_reset(void)646 void lll_isr_rx_sub_status_reset(void)
647 {
648 	radio_status_reset();
649 	radio_tmr_rx_status_reset();
650 
651 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
652 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
653 		radio_gpio_pa_lna_disable();
654 	}
655 }
656 
lll_isr_status_reset(void)657 void lll_isr_status_reset(void)
658 {
659 	radio_status_reset();
660 	radio_tmr_status_reset();
661 	radio_filter_status_reset();
662 	if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
663 		radio_ar_status_reset();
664 	}
665 	radio_rssi_status_reset();
666 
667 	if (IS_ENABLED(HAL_RADIO_GPIO_HAVE_PA_PIN) ||
668 	    IS_ENABLED(HAL_RADIO_GPIO_HAVE_LNA_PIN)) {
669 		radio_gpio_pa_lna_disable();
670 	}
671 }
672 
lll_isr_abort(void * param)673 inline void lll_isr_abort(void *param)
674 {
675 	lll_isr_status_reset();
676 	lll_isr_cleanup(param);
677 }
678 
lll_isr_done(void * param)679 void lll_isr_done(void *param)
680 {
681 	lll_isr_abort(param);
682 }
683 
lll_isr_cleanup(void * param)684 void lll_isr_cleanup(void *param)
685 {
686 	int err;
687 
688 	radio_isr_set(isr_race, param);
689 	if (!radio_is_idle()) {
690 		radio_disable();
691 	}
692 
693 	radio_tmr_stop();
694 	radio_stop();
695 
696 	err = lll_hfclock_off();
697 	LL_ASSERT(err >= 0);
698 
699 	lll_done(NULL);
700 }
701 
lll_isr_early_abort(void * param)702 void lll_isr_early_abort(void *param)
703 {
704 	int err;
705 
706 	radio_isr_set(isr_race, param);
707 	if (!radio_is_idle()) {
708 		radio_disable();
709 	}
710 
711 	err = lll_hfclock_off();
712 	LL_ASSERT(err >= 0);
713 
714 	lll_done(NULL);
715 }
716 
lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,lll_prepare_cb_t prepare_cb,struct lll_prepare_param * prepare_param,uint8_t is_resume,uint8_t is_dequeue)717 int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
718 			lll_prepare_cb_t prepare_cb,
719 			struct lll_prepare_param *prepare_param,
720 			uint8_t is_resume, uint8_t is_dequeue)
721 {
722 	struct lll_event *ready;
723 	struct lll_event *next;
724 	uint8_t idx;
725 	int err;
726 
727 	/* Find the ready prepare in the pipeline */
728 	idx = UINT8_MAX;
729 	ready = prepare_dequeue_iter_ready_get(&idx);
730 
731 	/* Current event active or another prepare is ready in the pipeline */
732 	if ((!is_dequeue && !is_done_sync()) ||
733 	    event.curr.abort_cb ||
734 	    (ready && is_resume)) {
735 #if defined(CONFIG_BT_CTLR_LOW_LAT)
736 		lll_prepare_cb_t resume_cb;
737 #endif /* CONFIG_BT_CTLR_LOW_LAT */
738 
739 		if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && event.curr.param) {
740 			/* early abort */
741 			event.curr.abort_cb(NULL, event.curr.param);
742 		}
743 
744 		/* Store the next prepare for deferred call */
745 		next = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
746 					   prepare_cb, is_resume);
747 		LL_ASSERT(next);
748 
749 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
750 		if (is_resume) {
751 			return -EINPROGRESS;
752 		}
753 
754 		/* Always start preempt timeout for first prepare in pipeline */
755 		struct lll_event *first = ready ? ready : next;
756 		uint32_t ret;
757 
758 		/* Start the preempt timeout */
759 		ret  = preempt_ticker_start(first, ready, next);
760 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
761 			  (ret == TICKER_STATUS_BUSY));
762 
763 #else /* CONFIG_BT_CTLR_LOW_LAT */
764 		next = NULL;
765 		while (ready) {
766 			if (!ready->is_aborted) {
767 				if (event.curr.param == ready->prepare_param.param) {
768 					ready->is_aborted = 1;
769 					ready->abort_cb(&ready->prepare_param,
770 							ready->prepare_param.param);
771 				} else {
772 					next = ready;
773 				}
774 			}
775 
776 			ready = ull_prepare_dequeue_iter(&idx);
777 		}
778 
779 		if (next) {
780 			/* check if resume requested by curr */
781 			err = event.curr.is_abort_cb(NULL, event.curr.param,
782 						     &resume_cb);
783 			LL_ASSERT(err);
784 
785 			if (err == -EAGAIN) {
786 				next = resume_enqueue(resume_cb);
787 				LL_ASSERT(next);
788 			} else {
789 				LL_ASSERT(err == -ECANCELED);
790 			}
791 		}
792 #endif /* CONFIG_BT_CTLR_LOW_LAT */
793 
794 		return -EINPROGRESS;
795 	}
796 
797 	LL_ASSERT(!ready || &ready->prepare_param == prepare_param);
798 
799 	event.curr.param = prepare_param->param;
800 	event.curr.is_abort_cb = is_abort_cb;
801 	event.curr.abort_cb = abort_cb;
802 
803 	err = prepare_cb(prepare_param);
804 
805 	if (!IS_ENABLED(CONFIG_BT_CTLR_ASSERT_OVERHEAD_START) &&
806 	    (err == -ECANCELED)) {
807 		err = 0;
808 	}
809 
810 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
811 	uint32_t ret;
812 
813 	/* NOTE: preempt timeout started prior for the current event that has
814 	 *       its prepare that is now invoked is not explicitly stopped here.
815 	 *       If there is a next prepare event in pipeline, then the prior
816 	 *       preempt timeout if started will be stopped before starting
817 	 *       the new preempt timeout. Refer to implementation in
818 	 *       preempt_ticker_start().
819 	 */
820 
821 	/* Find next prepare needing preempt timeout to be setup */
822 	next = prepare_dequeue_iter_ready_get(&idx);
823 	if (!next) {
824 		return err;
825 	}
826 
827 	/* Start the preempt timeout */
828 	ret = preempt_ticker_start(next, NULL, next);
829 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
830 		  (ret == TICKER_STATUS_BUSY));
831 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
832 
833 	return err;
834 }
835 
init_reset(void)836 static int init_reset(void)
837 {
838 	return 0;
839 }
840 
841 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
done_inc(void)842 static inline void done_inc(void)
843 {
844 	event.done.lll_count++;
845 	LL_ASSERT(event.done.lll_count != event.done.ull_count);
846 }
847 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
848 
is_done_sync(void)849 static inline bool is_done_sync(void)
850 {
851 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
852 	return event.done.lll_count == event.done.ull_count;
853 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
854 	return true;
855 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
856 }
857 
prepare_dequeue_iter_ready_get(uint8_t * idx)858 static inline struct lll_event *prepare_dequeue_iter_ready_get(uint8_t *idx)
859 {
860 	struct lll_event *ready;
861 
862 	do {
863 		ready = ull_prepare_dequeue_iter(idx);
864 	} while (ready && (ready->is_aborted || ready->is_resume));
865 
866 	return ready;
867 }
868 
resume_enqueue(lll_prepare_cb_t resume_cb)869 static inline struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb)
870 {
871 	struct lll_prepare_param prepare_param = {0};
872 
873 	/* Enqueue into prepare pipeline as resume radio event, and remove
874 	 * parameter assignment from currently active radio event so that
875 	 * done event is not generated.
876 	 */
877 	prepare_param.param = event.curr.param;
878 	event.curr.param = NULL;
879 
880 	return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
881 				   &prepare_param, resume_cb, 1);
882 }
883 
isr_race(void * param)884 static void isr_race(void *param)
885 {
886 	/* NOTE: lll_disable could have a race with ... */
887 	radio_status_reset();
888 }
889 
890 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
891 static uint8_t volatile preempt_start_req;
892 static uint8_t preempt_start_ack;
893 static uint8_t volatile preempt_stop_req;
894 static uint8_t preempt_stop_ack;
895 static uint8_t preempt_req;
896 static uint8_t volatile preempt_ack;
897 
ticker_stop_op_cb(uint32_t status,void * param)898 static void ticker_stop_op_cb(uint32_t status, void *param)
899 {
900 	ARG_UNUSED(param);
901 	ARG_UNUSED(status);
902 
903 	LL_ASSERT(preempt_stop_req != preempt_stop_ack);
904 	preempt_stop_ack = preempt_stop_req;
905 
906 	preempt_req = preempt_ack;
907 }
908 
ticker_start_op_cb(uint32_t status,void * param)909 static void ticker_start_op_cb(uint32_t status, void *param)
910 {
911 	ARG_UNUSED(param);
912 	LL_ASSERT(status == TICKER_STATUS_SUCCESS);
913 
914 	/* Increase preempt requested count before acknowledging that the
915 	 * ticker start operation for the preempt timeout has been handled.
916 	 */
917 	LL_ASSERT(preempt_req == preempt_ack);
918 	preempt_req++;
919 
920 	/* Increase preempt start ack count, to acknowledge that the ticker
921 	 * start operation has been handled.
922 	 */
923 	LL_ASSERT(preempt_start_req != preempt_start_ack);
924 	preempt_start_ack = preempt_start_req;
925 }
926 
preempt_ticker_start(struct lll_event * first,struct lll_event * prev,struct lll_event * next)927 static uint32_t preempt_ticker_start(struct lll_event *first,
928 				     struct lll_event *prev,
929 				     struct lll_event *next)
930 {
931 	const struct lll_prepare_param *p;
932 	static uint32_t ticks_at_preempt;
933 	uint32_t ticks_at_preempt_new;
934 	uint32_t preempt_anchor;
935 	struct ull_hdr *ull;
936 	uint32_t preempt_to;
937 	uint32_t ret;
938 
939 	/* Do not request to start preempt timeout if already requested.
940 	 *
941 	 * Check if there is pending preempt timeout start requested or if
942 	 * preempt timeout ticker has already been scheduled.
943 	 */
944 	if ((preempt_start_req != preempt_start_ack) ||
945 	    (preempt_req != preempt_ack)) {
946 		uint32_t diff;
947 
948 		/* preempt timeout already started but no role/state in the head
949 		 * of prepare pipeline.
950 		 */
951 		if (!prev || prev->is_aborted) {
952 			return TICKER_STATUS_SUCCESS;
953 		}
954 
955 		/* Calc the preempt timeout */
956 		p = &next->prepare_param;
957 		ull = HDR_LLL2ULL(p->param);
958 		preempt_anchor = p->ticks_at_expire;
959 		preempt_to = MAX(ull->ticks_active_to_start,
960 				 ull->ticks_prepare_to_start) -
961 			     ull->ticks_preempt_to_start;
962 
963 		ticks_at_preempt_new = preempt_anchor + preempt_to;
964 		ticks_at_preempt_new &= HAL_TICKER_CNTR_MASK;
965 
966 		/* Check for short preempt timeouts */
967 		diff = ticker_ticks_diff_get(ticks_at_preempt_new,
968 					     ticks_at_preempt);
969 		if ((diff & BIT(HAL_TICKER_CNTR_MSBIT)) == 0U) {
970 			return TICKER_STATUS_SUCCESS;
971 		}
972 
973 		/* Stop any scheduled preempt ticker */
974 		ret = preempt_ticker_stop();
975 		LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
976 			  (ret == TICKER_STATUS_BUSY));
977 
978 #if defined(CONFIG_BT_CTLR_EARLY_ABORT_PREVIOUS_PREPARE)
979 		/* FIXME: Prepare pipeline is not a ordered list implementation,
980 		 *        and for short prepare being enqueued, ideally the
981 		 *        pipeline has to be implemented as ordered list.
982 		 *        Until then a workaround to abort a prepare present
983 		 *        before the short prepare being enqueued is implemented
984 		 *        below.
985 		 *        A proper solution will be to re-design the pipeline
986 		 *        as a ordered list, instead of the current FIFO.
987 		 */
988 		/* Set early as we get called again through the call to
989 		 * abort_cb().
990 		 */
991 		ticks_at_preempt = ticks_at_preempt_new;
992 
993 		/* Abort previous prepare that set the preempt timeout */
994 		prev->is_aborted = 1U;
995 		prev->abort_cb(&prev->prepare_param, prev->prepare_param.param);
996 #endif /* CONFIG_BT_CTLR_EARLY_ABORT_PREVIOUS_PREPARE */
997 
998 		/* Schedule short preempt timeout */
999 		first = next;
1000 	} else {
1001 		/* Calc the preempt timeout */
1002 		p = &first->prepare_param;
1003 		ull = HDR_LLL2ULL(p->param);
1004 		preempt_anchor = p->ticks_at_expire;
1005 		preempt_to = MAX(ull->ticks_active_to_start,
1006 				 ull->ticks_prepare_to_start) -
1007 			     ull->ticks_preempt_to_start;
1008 
1009 		ticks_at_preempt_new = preempt_anchor + preempt_to;
1010 		ticks_at_preempt_new &= HAL_TICKER_CNTR_MASK;
1011 	}
1012 
1013 	preempt_start_req++;
1014 
1015 	ticks_at_preempt = ticks_at_preempt_new;
1016 
1017 	/* Setup pre empt timeout */
1018 	ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
1019 			   TICKER_USER_ID_LLL,
1020 			   TICKER_ID_LLL_PREEMPT,
1021 			   preempt_anchor,
1022 			   preempt_to,
1023 			   TICKER_NULL_PERIOD,
1024 			   TICKER_NULL_REMAINDER,
1025 			   TICKER_NULL_LAZY,
1026 			   TICKER_NULL_SLOT,
1027 			   preempt_ticker_cb, first->prepare_param.param,
1028 			   ticker_start_op_cb, NULL);
1029 
1030 	return ret;
1031 }
1032 
preempt_ticker_stop(void)1033 static uint32_t preempt_ticker_stop(void)
1034 {
1035 	uint32_t ret;
1036 
1037 	/* Do not request to stop preempt timeout if already requested or
1038 	 * has expired
1039 	 */
1040 	if ((preempt_stop_req != preempt_stop_ack) ||
1041 	    (preempt_req == preempt_ack)) {
1042 		return TICKER_STATUS_SUCCESS;
1043 	}
1044 
1045 	preempt_stop_req++;
1046 
1047 	ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
1048 			  TICKER_USER_ID_LLL,
1049 			  TICKER_ID_LLL_PREEMPT,
1050 			  ticker_stop_op_cb, NULL);
1051 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1052 		  (ret == TICKER_STATUS_BUSY));
1053 
1054 	return ret;
1055 }
1056 
preempt_ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)1057 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
1058 			      uint32_t remainder, uint16_t lazy, uint8_t force,
1059 			      void *param)
1060 {
1061 	static memq_link_t link;
1062 	static struct mayfly mfy = {0, 0, &link, NULL, preempt};
1063 	uint32_t ret;
1064 
1065 	LL_ASSERT(preempt_ack != preempt_req);
1066 	preempt_ack = preempt_req;
1067 
1068 	mfy.param = param;
1069 	ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
1070 			     0, &mfy);
1071 	LL_ASSERT(!ret);
1072 }
1073 
preempt(void * param)1074 static void preempt(void *param)
1075 {
1076 	lll_prepare_cb_t resume_cb;
1077 	struct lll_event *ready;
1078 	uint8_t idx;
1079 	int err;
1080 
1081 	/* No event to abort */
1082 	if (!event.curr.abort_cb || !event.curr.param) {
1083 		return;
1084 	}
1085 
1086 	/* Find a prepare that is ready and not a resume */
1087 	idx = UINT8_MAX;
1088 	ready = prepare_dequeue_iter_ready_get(&idx);
1089 	if (!ready) {
1090 		/* No ready prepare */
1091 		return;
1092 	}
1093 
1094 	/* Preemptor not in pipeline */
1095 	if (ready->prepare_param.param != param) {
1096 		struct lll_event *ready_next = NULL;
1097 		struct lll_event *preemptor;
1098 		uint32_t ret;
1099 
1100 		/* Find if a short prepare request in the pipeline */
1101 		do {
1102 			preemptor = ull_prepare_dequeue_iter(&idx);
1103 			if (!ready_next && preemptor && !preemptor->is_aborted &&
1104 			    !preemptor->is_resume) {
1105 				ready_next = preemptor;
1106 			}
1107 		} while (preemptor && (preemptor->is_aborted || preemptor->is_resume ||
1108 			 (preemptor->prepare_param.param != param)));
1109 
1110 		/* No short prepare request in pipeline */
1111 		if (!preemptor) {
1112 			/* Start the preempt timeout for ready event */
1113 			ret = preempt_ticker_start(ready, NULL, ready);
1114 			LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1115 				  (ret == TICKER_STATUS_BUSY));
1116 
1117 			return;
1118 		}
1119 
1120 		/* FIXME: Abort all events in pipeline before the short
1121 		 *        prepare event. For now, lets assert when many
1122 		 *        enqueued prepares need aborting.
1123 		 */
1124 		LL_ASSERT(preemptor == ready_next);
1125 
1126 		/* Abort the prepare that is present before the short prepare */
1127 		ready->is_aborted = 1;
1128 		ready->abort_cb(&ready->prepare_param, ready->prepare_param.param);
1129 
1130 		/* As the prepare queue has been refreshed due to the call of
1131 		 * abort_cb which invokes the lll_done, find the latest prepare
1132 		 */
1133 		idx = UINT8_MAX;
1134 		ready = prepare_dequeue_iter_ready_get(&idx);
1135 		if (!ready) {
1136 			/* No ready prepare */
1137 			return;
1138 		}
1139 	}
1140 
1141 	/* Check if current event want to continue */
1142 	err = event.curr.is_abort_cb(ready->prepare_param.param, event.curr.param, &resume_cb);
1143 	if (!err) {
1144 		/* Let preemptor LLL know about the cancelled prepare */
1145 		ready->is_aborted = 1;
1146 		ready->abort_cb(&ready->prepare_param, ready->prepare_param.param);
1147 
1148 		return;
1149 	}
1150 
1151 	/* Abort the current event */
1152 	event.curr.abort_cb(NULL, event.curr.param);
1153 
1154 	/* Check if resume requested */
1155 	if (err == -EAGAIN) {
1156 		struct lll_event *iter;
1157 		uint8_t iter_idx;
1158 
1159 		/* Abort any duplicates so that they get dequeued */
1160 		iter_idx = UINT8_MAX;
1161 		iter = ull_prepare_dequeue_iter(&iter_idx);
1162 		while (iter) {
1163 			if (!iter->is_aborted &&
1164 			    event.curr.param == iter->prepare_param.param) {
1165 				iter->is_aborted = 1;
1166 				iter->abort_cb(&iter->prepare_param,
1167 					       iter->prepare_param.param);
1168 
1169 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
1170 				/* NOTE: abort_cb called lll_done which modifies
1171 				 *       the prepare pipeline hence re-iterate
1172 				 *       through the prepare pipeline.
1173 				 */
1174 				iter_idx = UINT8_MAX;
1175 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
1176 			}
1177 
1178 			iter = ull_prepare_dequeue_iter(&iter_idx);
1179 		}
1180 
1181 		/* Enqueue as resume event */
1182 		iter = resume_enqueue(resume_cb);
1183 		LL_ASSERT(iter);
1184 	} else {
1185 		LL_ASSERT(err == -ECANCELED);
1186 	}
1187 }
1188 #else /* CONFIG_BT_CTLR_LOW_LAT */
1189 
1190 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mfy_ticker_job_idle_get(void * param)1191 static void mfy_ticker_job_idle_get(void *param)
1192 {
1193 	uint32_t ret;
1194 
1195 	/* Ticker Job Silence */
1196 	ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
1197 				  TICKER_USER_ID_ULL_LOW,
1198 				  ticker_op_job_disable, NULL);
1199 	LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
1200 		  (ret == TICKER_STATUS_BUSY));
1201 }
1202 
ticker_op_job_disable(uint32_t status,void * op_context)1203 static void ticker_op_job_disable(uint32_t status, void *op_context)
1204 {
1205 	ARG_UNUSED(status);
1206 	ARG_UNUSED(op_context);
1207 
1208 	/* FIXME: */
1209 	if (1 /* _radio.state != STATE_NONE */) {
1210 		mayfly_enable(TICKER_USER_ID_ULL_LOW,
1211 			      TICKER_USER_ID_ULL_LOW, 0);
1212 	}
1213 }
1214 #endif
1215 
1216 #endif /* CONFIG_BT_CTLR_LOW_LAT */
1217