1 /*
2 * Copyright (c) 2018-2019 Nordic Semiconductor ASA
3 * Copyright 2019 NXP
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stdint.h>
9 #include <stdbool.h>
10 #include <errno.h>
11
12 #include <zephyr/toolchain.h>
13
14 #include <soc.h>
15 #include <zephyr/device.h>
16
17 #include <zephyr/drivers/entropy.h>
18 #include <zephyr/irq.h>
19
20 #include "hal/swi.h"
21 #include "hal/ccm.h"
22 #include "hal/radio.h"
23 #include "hal/ticker.h"
24
25 #include "util/mem.h"
26 #include "util/memq.h"
27 #include "util/mayfly.h"
28
29 #include "ticker/ticker.h"
30
31 #include "lll.h"
32 #include "lll_vendor.h"
33 #include "lll_internal.h"
34
35 #include "hal/debug.h"
36
37 static struct {
38 struct {
39 void *param;
40 lll_is_abort_cb_t is_abort_cb;
41 lll_abort_cb_t abort_cb;
42 } curr;
43
44 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
45 struct {
46 uint8_t volatile lll_count;
47 uint8_t ull_count;
48 } done;
49 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
50 } event;
51
52 /* Entropy device */
53 static const struct device *const dev_entropy = DEVICE_DT_GET(DT_CHOSEN(zephyr_entropy));
54
55 static int init_reset(void);
56 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
57 static inline void done_inc(void);
58 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
59 static struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb);
60
61 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
62 static void ticker_stop_op_cb(uint32_t status, void *param);
63 static void ticker_start_op_cb(uint32_t status, void *param);
64 static void ticker_start_next_op_cb(uint32_t status, void *param);
65 static uint32_t preempt_ticker_start(struct lll_event *event,
66 ticker_op_func op_cb);
67 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
68 uint32_t remainder, uint16_t lazy, uint8_t force,
69 void *param);
70 static void preempt(void *param);
71 #else /* CONFIG_BT_CTLR_LOW_LAT */
72 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
73 static void mfy_ticker_job_idle_get(void *param);
74 static void ticker_op_job_disable(uint32_t status, void *op_context);
75 #endif
76 #endif /* CONFIG_BT_CTLR_LOW_LAT */
77
rtc0_rv32m1_isr(const void * arg)78 static void rtc0_rv32m1_isr(const void *arg)
79 {
80 DEBUG_TICKER_ISR(1);
81
82 /* On compare0 run ticker worker instance0 */
83 if (LPTMR1->CSR & LPTMR_CSR_TCF(1)) {
84 LPTMR1->CSR |= LPTMR_CSR_TCF(1);
85
86 ticker_trigger(0);
87 }
88
89 mayfly_run(TICKER_USER_ID_ULL_HIGH);
90
91 #if !defined(CONFIG_BT_CTLR_LOW_LAT) && \
92 (CONFIG_BT_CTLR_ULL_HIGH_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
93 mayfly_run(TICKER_USER_ID_ULL_LOW);
94 #endif
95
96 DEBUG_TICKER_ISR(0);
97 }
98
swi_lll_rv32m1_isr(const void * arg)99 static void swi_lll_rv32m1_isr(const void *arg)
100 {
101 DEBUG_RADIO_ISR(1);
102
103 mayfly_run(TICKER_USER_ID_LLL);
104
105 DEBUG_RADIO_ISR(0);
106 }
107
108 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
109 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
swi_ull_low_rv32m1_isr(const void * arg)110 static void swi_ull_low_rv32m1_isr(const void *arg)
111 {
112 DEBUG_TICKER_JOB(1);
113
114 mayfly_run(TICKER_USER_ID_ULL_LOW);
115
116 DEBUG_TICKER_JOB(0);
117 }
118 #endif
119
lll_init(void)120 int lll_init(void)
121 {
122 int err;
123
124 /* Check if entropy device is ready */
125 if (!device_is_ready(dev_entropy)) {
126 return -ENODEV;
127 }
128
129 /* Initialise LLL internals */
130 event.curr.abort_cb = NULL;
131
132 err = init_reset();
133 if (err) {
134 return err;
135 }
136
137 /* Initialize SW IRQ structure */
138 hal_swi_init();
139
140 /* Connect ISRs */
141 IRQ_CONNECT(LL_RADIO_IRQn, CONFIG_BT_CTLR_LLL_PRIO, isr_radio, NULL, 0);
142 IRQ_CONNECT(LL_RTC0_IRQn, CONFIG_BT_CTLR_ULL_HIGH_PRIO,
143 rtc0_rv32m1_isr, NULL, 0);
144 IRQ_CONNECT(HAL_SWI_RADIO_IRQ, CONFIG_BT_CTLR_LLL_PRIO,
145 swi_lll_rv32m1_isr, NULL, 0);
146 #if defined(CONFIG_BT_CTLR_LOW_LAT) || \
147 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)
148 IRQ_CONNECT(HAL_SWI_JOB_IRQ, CONFIG_BT_CTLR_ULL_LOW_PRIO,
149 swi_ull_low_rv32m1_isr, NULL, 0);
150 #endif
151
152 /* Enable IRQs */
153 irq_enable(LL_RADIO_IRQn);
154 irq_enable(LL_RTC0_IRQn);
155 irq_enable(HAL_SWI_RADIO_IRQ);
156 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
157 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
158 irq_enable(HAL_SWI_JOB_IRQ);
159 }
160
161 /* Call it after IRQ enable to be able to measure ISR latency */
162 radio_setup();
163
164 return 0;
165 }
166
lll_deinit(void)167 int lll_deinit(void)
168 {
169 /* Disable IRQs */
170 irq_disable(LL_RADIO_IRQn);
171 irq_disable(LL_RTC0_IRQn);
172 irq_disable(HAL_SWI_RADIO_IRQ);
173 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) ||
174 (CONFIG_BT_CTLR_ULL_HIGH_PRIO != CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
175 irq_disable(HAL_SWI_JOB_IRQ);
176 }
177
178 return 0;
179 }
180
lll_csrand_get(void * buf,size_t len)181 int lll_csrand_get(void *buf, size_t len)
182 {
183 return entropy_get_entropy(dev_entropy, buf, len);
184 }
185
lll_csrand_isr_get(void * buf,size_t len)186 int lll_csrand_isr_get(void *buf, size_t len)
187 {
188 return entropy_get_entropy_isr(dev_entropy, buf, len, 0);
189 }
190
lll_rand_get(void * buf,size_t len)191 int lll_rand_get(void *buf, size_t len)
192 {
193 return 0;
194 }
195
lll_rand_isr_get(void * buf,size_t len)196 int lll_rand_isr_get(void *buf, size_t len)
197 {
198 return 0;
199 }
200
lll_reset(void)201 int lll_reset(void)
202 {
203 int err;
204
205 err = init_reset();
206 if (err) {
207 return err;
208 }
209
210 return 0;
211 }
212
lll_disable(void * param)213 void lll_disable(void *param)
214 {
215 /* LLL disable of current event, done is generated */
216 if (!param || (param == event.curr.param)) {
217 if (event.curr.abort_cb && event.curr.param) {
218 event.curr.abort_cb(NULL, event.curr.param);
219 } else {
220 LL_ASSERT(!param);
221 }
222 }
223 {
224 struct lll_event *next;
225 uint8_t idx;
226
227 idx = UINT8_MAX;
228 next = ull_prepare_dequeue_iter(&idx);
229 while (next) {
230 if (!next->is_aborted &&
231 (!param || (param == next->prepare_param.param))) {
232 next->is_aborted = 1;
233 next->abort_cb(&next->prepare_param,
234 next->prepare_param.param);
235
236 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
237 /* NOTE: abort_cb called lll_done which modifies
238 * the prepare pipeline hence re-iterate
239 * through the prepare pipeline.
240 */
241 idx = UINT8_MAX;
242 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
243 }
244
245 next = ull_prepare_dequeue_iter(&idx);
246 }
247 }
248 }
249
lll_prepare_done(void * param)250 int lll_prepare_done(void *param)
251 {
252 #if defined(CONFIG_BT_CTLR_LOW_LAT) && \
253 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
254 static memq_link_t link;
255 static struct mayfly mfy = {0, 0, &link, NULL, mfy_ticker_job_idle_get};
256 uint32_t ret;
257
258 ret = mayfly_enqueue(TICKER_USER_ID_LLL, TICKER_USER_ID_ULL_LOW,
259 1, &mfy);
260 if (ret) {
261 return -EFAULT;
262 }
263
264 return 0;
265 #else
266 return 0;
267 #endif /* CONFIG_BT_CTLR_LOW_LAT */
268 }
269
lll_done(void * param)270 int lll_done(void *param)
271 {
272 struct lll_event *next;
273 struct ull_hdr *ull;
274 void *evdone;
275
276 /* Assert if param supplied without a pending prepare to cancel. */
277 next = ull_prepare_dequeue_get();
278 LL_ASSERT(!param || next);
279
280 /* check if current LLL event is done */
281 ull = NULL;
282 if (!param) {
283 /* Reset current event instance */
284 LL_ASSERT(event.curr.abort_cb);
285 event.curr.abort_cb = NULL;
286
287 param = event.curr.param;
288 event.curr.param = NULL;
289
290 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
291 done_inc();
292 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
293
294 if (param) {
295 ull = HDR_LLL2ULL(param);
296 }
297
298 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) &&
299 (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)) {
300 mayfly_enable(TICKER_USER_ID_LLL,
301 TICKER_USER_ID_ULL_LOW,
302 1);
303 }
304
305 DEBUG_RADIO_CLOSE(0);
306 } else {
307 ull = HDR_LLL2ULL(param);
308 }
309
310 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
311 ull_prepare_dequeue(TICKER_USER_ID_LLL);
312 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
313
314 #if defined(CONFIG_BT_CTLR_JIT_SCHEDULING)
315 lll_done_score(param, 0, 0); /* TODO */
316 #endif /* CONFIG_BT_CTLR_JIT_SCHEDULING */
317
318 /* Let ULL know about LLL event done */
319 evdone = ull_event_done(ull);
320 LL_ASSERT(evdone);
321
322 return 0;
323 }
324
325 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
lll_done_ull_inc(void)326 void lll_done_ull_inc(void)
327 {
328 LL_ASSERT(event.done.ull_count != event.done.lll_count);
329 event.done.ull_count++;
330 }
331 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
332
lll_is_done(void * param)333 bool lll_is_done(void *param)
334 {
335 /* FIXME: use param to check */
336 return !event.curr.abort_cb;
337 }
338
lll_is_abort_cb(void * next,void * curr,lll_prepare_cb_t * resume_cb)339 int lll_is_abort_cb(void *next, void *curr, lll_prepare_cb_t *resume_cb)
340 {
341 return -ECANCELED;
342 }
343
lll_clk_on(void)344 int lll_clk_on(void)
345 {
346 int err;
347
348 /* turn on radio clock in non-blocking mode. */
349 err = radio_wake();
350 if (!err) {
351 DEBUG_RADIO_XTAL(1);
352 }
353
354 return err;
355 }
356
lll_clk_on_wait(void)357 int lll_clk_on_wait(void)
358 {
359 int err;
360
361 /* turn on radio clock in blocking mode. */
362 err = radio_wake();
363
364 while (radio_is_off()) {
365 k_cpu_idle();
366 }
367
368 DEBUG_RADIO_XTAL(1);
369
370 return err;
371 }
372
lll_clk_off(void)373 int lll_clk_off(void)
374 {
375 int err;
376
377 /* turn off radio clock in non-blocking mode. */
378 err = radio_sleep();
379 if (!err) {
380 DEBUG_RADIO_XTAL(0);
381 }
382
383 return err;
384 }
385
lll_event_offset_get(struct ull_hdr * ull)386 uint32_t lll_event_offset_get(struct ull_hdr *ull)
387 {
388 if (0) {
389 #if defined(CONFIG_BT_CTLR_XTAL_ADVANCED)
390 } else if (ull->ticks_prepare_to_start & XON_BITMASK) {
391 return MAX(ull->ticks_active_to_start,
392 ull->ticks_preempt_to_start);
393 #endif /* CONFIG_BT_CTLR_XTAL_ADVANCED */
394 } else {
395 return MAX(ull->ticks_active_to_start,
396 ull->ticks_prepare_to_start);
397 }
398 }
399
lll_preempt_calc(struct ull_hdr * ull,uint8_t ticker_id,uint32_t ticks_at_event)400 uint32_t lll_preempt_calc(struct ull_hdr *ull, uint8_t ticker_id,
401 uint32_t ticks_at_event)
402 {
403 uint32_t ticks_now;
404 uint32_t diff;
405
406 ticks_now = ticker_ticks_now_get();
407 diff = ticks_now - ticks_at_event;
408 if (diff & BIT(HAL_TICKER_CNTR_MSBIT)) {
409 return 0;
410 }
411
412 diff += HAL_TICKER_CNTR_CMP_OFFSET_MIN;
413 if (diff > HAL_TICKER_US_TO_TICKS(EVENT_OVERHEAD_START_US)) {
414 /* TODO: for Low Latency Feature with Advanced XTAL feature.
415 * 1. Release retained HF clock.
416 * 2. Advance the radio event to accommodate normal prepare
417 * duration.
418 * 3. Increase the preempt to start ticks for future events.
419 */
420 return 1;
421 }
422
423 return 0;
424 }
425
lll_chan_set(uint32_t chan)426 void lll_chan_set(uint32_t chan)
427 {
428 switch (chan) {
429 case 37:
430 radio_freq_chan_set(2);
431 break;
432
433 case 38:
434 radio_freq_chan_set(26);
435 break;
436
437 case 39:
438 radio_freq_chan_set(80);
439 break;
440
441 default:
442 if (chan < 11) {
443 radio_freq_chan_set(4 + (chan * 2U));
444 } else if (chan < 40) {
445 radio_freq_chan_set(28 + ((chan - 11) * 2U));
446 } else {
447 LL_ASSERT(0);
448 }
449 break;
450 }
451
452 radio_whiten_iv_set(chan);
453 }
454
455
lll_radio_is_idle(void)456 uint32_t lll_radio_is_idle(void)
457 {
458 return radio_is_idle();
459 }
460
lll_radio_tx_ready_delay_get(uint8_t phy,uint8_t flags)461 uint32_t lll_radio_tx_ready_delay_get(uint8_t phy, uint8_t flags)
462 {
463 return radio_tx_ready_delay_get(phy, flags);
464 }
465
lll_radio_rx_ready_delay_get(uint8_t phy,uint8_t flags)466 uint32_t lll_radio_rx_ready_delay_get(uint8_t phy, uint8_t flags)
467 {
468 return radio_rx_ready_delay_get(phy, flags);
469 }
470
lll_isr_status_reset(void)471 void lll_isr_status_reset(void)
472 {
473 radio_status_reset();
474 radio_tmr_status_reset();
475 radio_filter_status_reset();
476 if (IS_ENABLED(CONFIG_BT_CTLR_PRIVACY)) {
477 radio_ar_status_reset();
478 }
479 radio_rssi_status_reset();
480 }
481
init_reset(void)482 static int init_reset(void)
483 {
484 return 0;
485 }
486
487 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
done_inc(void)488 static inline void done_inc(void)
489 {
490 event.done.lll_count++;
491 LL_ASSERT(event.done.lll_count != event.done.ull_count);
492 }
493 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
494
is_done_sync(void)495 static inline bool is_done_sync(void)
496 {
497 #if defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
498 return event.done.lll_count == event.done.ull_count;
499 #else /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
500 return true;
501 #endif /* !CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
502 }
503
lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb,lll_abort_cb_t abort_cb,lll_prepare_cb_t prepare_cb,struct lll_prepare_param * prepare_param,uint8_t is_resume,uint8_t is_dequeue)504 int lll_prepare_resolve(lll_is_abort_cb_t is_abort_cb, lll_abort_cb_t abort_cb,
505 lll_prepare_cb_t prepare_cb,
506 struct lll_prepare_param *prepare_param,
507 uint8_t is_resume, uint8_t is_dequeue)
508 {
509 struct lll_event *p;
510 uint8_t idx;
511 int err;
512
513 /* Find the ready prepare in the pipeline */
514 idx = UINT8_MAX;
515 p = ull_prepare_dequeue_iter(&idx);
516 while (p && (p->is_aborted || p->is_resume)) {
517 p = ull_prepare_dequeue_iter(&idx);
518 }
519
520 /* Current event active or another prepare is ready in the pipeline */
521 if ((!is_dequeue && !is_done_sync()) ||
522 event.curr.abort_cb ||
523 (p && is_resume)) {
524 #if defined(CONFIG_BT_CTLR_LOW_LAT)
525 lll_prepare_cb_t resume_cb;
526 #endif /* CONFIG_BT_CTLR_LOW_LAT */
527 struct lll_event *next;
528
529 if (IS_ENABLED(CONFIG_BT_CTLR_LOW_LAT) && event.curr.param) {
530 /* early abort */
531 event.curr.abort_cb(NULL, event.curr.param);
532 }
533
534 /* Store the next prepare for deferred call */
535 next = ull_prepare_enqueue(is_abort_cb, abort_cb, prepare_param,
536 prepare_cb, is_resume);
537 LL_ASSERT(next);
538
539 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
540 if (is_resume) {
541 return -EINPROGRESS;
542 }
543
544 /* Start the preempt timeout */
545 uint32_t ret;
546
547 ret = preempt_ticker_start(next, ticker_start_op_cb);
548 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
549 (ret == TICKER_STATUS_BUSY));
550
551 #else /* CONFIG_BT_CTLR_LOW_LAT */
552 next = NULL;
553 while (p) {
554 if (!p->is_aborted) {
555 if (event.curr.param ==
556 p->prepare_param.param) {
557 p->is_aborted = 1;
558 p->abort_cb(&p->prepare_param,
559 p->prepare_param.param);
560 } else {
561 next = p;
562 }
563 }
564
565 p = ull_prepare_dequeue_iter(&idx);
566 }
567
568 if (next) {
569 /* check if resume requested by curr */
570 err = event.curr.is_abort_cb(NULL, event.curr.param,
571 &resume_cb);
572 LL_ASSERT(err);
573
574 if (err == -EAGAIN) {
575 next = resume_enqueue(resume_cb);
576 LL_ASSERT(next);
577 } else {
578 LL_ASSERT(err == -ECANCELED);
579 }
580 }
581 #endif /* CONFIG_BT_CTLR_LOW_LAT */
582
583 return -EINPROGRESS;
584 }
585
586 LL_ASSERT(!p || &p->prepare_param == prepare_param);
587
588 event.curr.param = prepare_param->param;
589 event.curr.is_abort_cb = is_abort_cb;
590 event.curr.abort_cb = abort_cb;
591
592 err = prepare_cb(prepare_param);
593
594 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
595 uint32_t ret;
596
597 /* Stop any scheduled preempt ticker */
598 ret = ticker_stop(TICKER_INSTANCE_ID_CTLR,
599 TICKER_USER_ID_LLL,
600 TICKER_ID_LLL_PREEMPT,
601 ticker_stop_op_cb, NULL);
602 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
603 (ret == TICKER_STATUS_FAILURE) ||
604 (ret == TICKER_STATUS_BUSY));
605
606 /* Find next prepare needing preempt timeout to be setup */
607 do {
608 p = ull_prepare_dequeue_iter(&idx);
609 if (!p) {
610 return err;
611 }
612 } while (p->is_aborted || p->is_resume);
613
614 /* Start the preempt timeout */
615 ret = preempt_ticker_start(p, ticker_start_next_op_cb);
616 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
617 (ret == TICKER_STATUS_BUSY));
618 #endif /* !CONFIG_BT_CTLR_LOW_LAT */
619
620 return err;
621 }
622
resume_enqueue(lll_prepare_cb_t resume_cb)623 static struct lll_event *resume_enqueue(lll_prepare_cb_t resume_cb)
624 {
625 struct lll_prepare_param prepare_param = {0};
626
627 prepare_param.param = event.curr.param;
628 event.curr.param = NULL;
629
630 return ull_prepare_enqueue(event.curr.is_abort_cb, event.curr.abort_cb,
631 &prepare_param, resume_cb, 1);
632 }
633
634 #if !defined(CONFIG_BT_CTLR_LOW_LAT)
ticker_stop_op_cb(uint32_t status,void * param)635 static void ticker_stop_op_cb(uint32_t status, void *param)
636 {
637 /* NOTE: this callback is present only for addition of debug messages
638 * when needed, else can be dispensed with.
639 */
640 ARG_UNUSED(param);
641
642 LL_ASSERT((status == TICKER_STATUS_SUCCESS) ||
643 (status == TICKER_STATUS_FAILURE));
644 }
645
ticker_start_op_cb(uint32_t status,void * param)646 static void ticker_start_op_cb(uint32_t status, void *param)
647 {
648 /* NOTE: this callback is present only for addition of debug messages
649 * when needed, else can be dispensed with.
650 */
651 ARG_UNUSED(param);
652
653 LL_ASSERT((status == TICKER_STATUS_SUCCESS) ||
654 (status == TICKER_STATUS_FAILURE));
655 }
656
ticker_start_next_op_cb(uint32_t status,void * param)657 static void ticker_start_next_op_cb(uint32_t status, void *param)
658 {
659 ARG_UNUSED(param);
660
661 LL_ASSERT(status == TICKER_STATUS_SUCCESS);
662 }
663
preempt_ticker_start(struct lll_event * evt,ticker_op_func op_cb)664 static uint32_t preempt_ticker_start(struct lll_event *evt,
665 ticker_op_func op_cb)
666 {
667 struct lll_prepare_param *p;
668 uint32_t preempt_anchor;
669 struct ull_hdr *ull;
670 uint32_t preempt_to;
671 uint32_t ret;
672
673 /* Calc the preempt timeout */
674 p = &evt->prepare_param;
675 ull = HDR_LLL2ULL(p->param);
676 preempt_anchor = p->ticks_at_expire;
677 preempt_to = MAX(ull->ticks_active_to_start,
678 ull->ticks_prepare_to_start) -
679 ull->ticks_preempt_to_start;
680
681 /* Setup pre empt timeout */
682 ret = ticker_start(TICKER_INSTANCE_ID_CTLR,
683 TICKER_USER_ID_LLL,
684 TICKER_ID_LLL_PREEMPT,
685 preempt_anchor,
686 preempt_to,
687 TICKER_NULL_PERIOD,
688 TICKER_NULL_REMAINDER,
689 TICKER_NULL_LAZY,
690 TICKER_NULL_SLOT,
691 preempt_ticker_cb, evt,
692 op_cb, evt);
693
694 return ret;
695 }
696
preempt_ticker_cb(uint32_t ticks_at_expire,uint32_t ticks_drift,uint32_t remainder,uint16_t lazy,uint8_t force,void * param)697 static void preempt_ticker_cb(uint32_t ticks_at_expire, uint32_t ticks_drift,
698 uint32_t remainder, uint16_t lazy, uint8_t force,
699 void *param)
700 {
701 static memq_link_t link;
702 static struct mayfly mfy = {0, 0, &link, NULL, preempt};
703 uint32_t ret;
704
705 mfy.param = param;
706 ret = mayfly_enqueue(TICKER_USER_ID_ULL_HIGH, TICKER_USER_ID_LLL,
707 0, &mfy);
708 LL_ASSERT(!ret);
709 }
710
preempt(void * param)711 static void preempt(void *param)
712 {
713 lll_prepare_cb_t resume_cb;
714 struct lll_event *next;
715 uint8_t idx;
716 int err;
717
718 /* No event to abort */
719 if (!event.curr.abort_cb || !event.curr.param) {
720 return;
721 }
722
723 /* Check if any prepare in pipeline */
724 idx = UINT8_MAX;
725 next = ull_prepare_dequeue_iter(&idx);
726 if (!next) {
727 return;
728 }
729
730 /* Find a prepare that is ready and not a resume */
731 while (next && (next->is_aborted || next->is_resume)) {
732 next = ull_prepare_dequeue_iter(&idx);
733 }
734
735 /* No ready prepare */
736 if (!next) {
737 return;
738 }
739
740 /* Preemptor not in pipeline */
741 if (next != param) {
742 uint32_t ret;
743
744 /* Start the preempt timeout */
745 ret = preempt_ticker_start(next, ticker_start_next_op_cb);
746 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
747 (ret == TICKER_STATUS_BUSY));
748
749 return;
750 }
751
752 /* Check if current event want to continue */
753 err = event.curr.is_abort_cb(next->prepare_param.param,
754 event.curr.param,
755 &resume_cb);
756 if (!err) {
757 /* Let preemptor LLL know about the cancelled prepare */
758 next->is_aborted = 1;
759 next->abort_cb(&next->prepare_param, next->prepare_param.param);
760
761 return;
762 }
763
764 /* Abort the current event */
765 event.curr.abort_cb(NULL, event.curr.param);
766
767 /* Check if resume requested */
768 if (err == -EAGAIN) {
769 struct lll_event *iter;
770 uint8_t iter_idx;
771
772 /* Abort any duplicates so that they get dequeued */
773 iter_idx = UINT8_MAX;
774 iter = ull_prepare_dequeue_iter(&iter_idx);
775 while (iter) {
776 if (!iter->is_aborted &&
777 event.curr.param == iter->prepare_param.param) {
778 iter->is_aborted = 1;
779 iter->abort_cb(&iter->prepare_param,
780 iter->prepare_param.param);
781
782 #if !defined(CONFIG_BT_CTLR_LOW_LAT_ULL_DONE)
783 /* NOTE: abort_cb called lll_done which modifies
784 * the prepare pipeline hence re-iterate
785 * through the prepare pipeline.
786 */
787 idx = UINT8_MAX;
788 #endif /* CONFIG_BT_CTLR_LOW_LAT_ULL_DONE */
789 }
790
791 iter = ull_prepare_dequeue_iter(&iter_idx);
792 }
793
794 /* Enqueue as resume event */
795 iter = resume_enqueue(resume_cb);
796 LL_ASSERT(iter);
797 } else {
798 LL_ASSERT(err == -ECANCELED);
799 }
800 }
801 #else /* CONFIG_BT_CTLR_LOW_LAT */
802
803 #if (CONFIG_BT_CTLR_LLL_PRIO == CONFIG_BT_CTLR_ULL_LOW_PRIO)
mfy_ticker_job_idle_get(void * param)804 static void mfy_ticker_job_idle_get(void *param)
805 {
806 uint32_t ret;
807
808 /* Ticker Job Silence */
809 ret = ticker_job_idle_get(TICKER_INSTANCE_ID_CTLR,
810 TICKER_USER_ID_ULL_LOW,
811 ticker_op_job_disable, NULL);
812 LL_ASSERT((ret == TICKER_STATUS_SUCCESS) ||
813 (ret == TICKER_STATUS_BUSY));
814 }
815
ticker_op_job_disable(uint32_t status,void * op_context)816 static void ticker_op_job_disable(uint32_t status, void *op_context)
817 {
818 ARG_UNUSED(status);
819 ARG_UNUSED(op_context);
820
821 /* FIXME: */
822 if (1 /* _radio.state != STATE_NONE */) {
823 mayfly_enable(TICKER_USER_ID_ULL_LOW,
824 TICKER_USER_ID_ULL_LOW, 0);
825 }
826 }
827 #endif
828
829 #endif /* CONFIG_BT_CTLR_LOW_LAT */
830