1 /*
2 * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stdbool.h>
9 #include <zephyr/types.h>
10 #include <soc.h>
11
12 #include "hal/cntr.h"
13 #include "hal/ticker.h"
14 #include "hal/cpu.h"
15
16 #include "ticker.h"
17
18 #include "hal/debug.h"
19
20 /*****************************************************************************
21 * Defines
22 ****************************************************************************/
23 #define DOUBLE_BUFFER_SIZE 2
24
25 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
26 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SET)
27 #define BT_CTLR_ADV_AUX_SET 0
28 #else
29 #define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET
30 #endif
31 #if !defined(CONFIG_BT_CTLR_ADV_SYNC_SET)
32 #define BT_CTLR_ADV_SYNC_SET 0
33 #else
34 #define BT_CTLR_ADV_SYNC_SET CONFIG_BT_CTLR_ADV_SYNC_SET
35 #endif
36 #if defined(CONFIG_BT_CTLR_ADV_ISO)
37 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET*2)
38 #else
39 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET)
40 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
41 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
42
43 /*****************************************************************************
44 * Types
45 ****************************************************************************/
46
47 struct ticker_node {
48 uint8_t next; /* Next ticker node */
49
50 uint8_t req; /* Request counter */
51 uint8_t ack; /* Acknowledge counter. Imbalance
52 * between req and ack indicates
53 * ongoing operation
54 */
55 uint8_t force:1; /* If non-zero, node timeout should
56 * be forced at next expiration
57 */
58 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
59 uint8_t start_pending:1; /* If non-zero, start is pending for
60 * bottom half of ticker_job.
61 */
62 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
63 uint32_t ticks_periodic; /* If non-zero, interval
64 * between expirations
65 */
66 uint32_t ticks_to_expire; /* Ticks until expiration */
67 ticker_timeout_func timeout_func; /* User timeout function */
68 void *context; /* Context delivered to timeout
69 * function
70 */
71 uint32_t ticks_to_expire_minus; /* Negative drift correction */
72 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
73 uint32_t ticks_slot; /* Air-time reservation for node */
74 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
75 uint16_t lazy_periodic; /* Number of timeouts to allow
76 * skipping
77 */
78 uint16_t lazy_current; /* Current number of timeouts
79 * skipped = peripheral latency
80 */
81 union {
82 uint32_t remainder_periodic;/* Sub-microsecond tick remainder
83 * for each period
84 */
85 ticker_op_func fp_op_func; /* Operation completion callback */
86 };
87
88 union {
89 uint32_t remainder_current; /* Current sub-microsecond tick
90 * remainder
91 */
92 void *op_context; /* Context passed in completion
93 * callback
94 */
95 };
96
97 #if defined(CONFIG_BT_TICKER_EXT)
98 struct ticker_ext *ext_data; /* Ticker extension data */
99 #endif /* CONFIG_BT_TICKER_EXT */
100 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
101 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
102 uint8_t must_expire; /* Node must expire, even if it
103 * collides with other nodes
104 */
105 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
106 int8_t priority; /* Ticker node priority. 0 is
107 * default. Lower value is higher
108 * priority
109 */
110 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
111 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
112 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
113 */
114 };
115
116 struct ticker_expire_info_internal {
117 uint32_t ticks_to_expire;
118 uint32_t remainder;
119 uint16_t lazy;
120 uint8_t ticker_id;
121 uint8_t outdated:1;
122 uint8_t found:1;
123 uint8_t last:1;
124 };
125
126 /* Operations to be performed in ticker_job.
127 * Possible values for field "op" in struct ticker_user_op
128 */
129 #define TICKER_USER_OP_TYPE_NONE 0
130 #define TICKER_USER_OP_TYPE_IDLE_GET 1
131 #define TICKER_USER_OP_TYPE_SLOT_GET 2
132 #define TICKER_USER_OP_TYPE_PRIORITY_SET 3
133 #define TICKER_USER_OP_TYPE_START 4
134 #define TICKER_USER_OP_TYPE_UPDATE 5
135 #define TICKER_USER_OP_TYPE_YIELD_ABS 6
136 #define TICKER_USER_OP_TYPE_STOP 7
137 #define TICKER_USER_OP_TYPE_STOP_ABS 8
138
139 /* Slot window re-schedule states */
140 #define TICKER_RESCHEDULE_STATE_NONE 0
141 #define TICKER_RESCHEDULE_STATE_PENDING 1
142 #define TICKER_RESCHEDULE_STATE_DONE 2
143
144 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
145 #define TICKER_HAS_SLOT_WINDOW(_ticker) \
146 ((_ticker)->ext_data && ((_ticker)->ext_data->ticks_slot_window != 0U))
147 #define TICKER_RESCHEDULE_PENDING(_ticker) \
148 (_ticker->ext_data && (_ticker->ext_data->reschedule_state == \
149 TICKER_RESCHEDULE_STATE_PENDING))
150 #else
151 #define TICKER_HAS_SLOT_WINDOW(_ticker) 0
152 #define TICKER_RESCHEDULE_PENDING(_ticker) 0
153 #endif
154
155 /* User operation data structure for start opcode. Used for passing start
156 * requests to ticker_job
157 */
158 struct ticker_user_op_start {
159 uint32_t ticks_at_start; /* Anchor ticks (absolute) */
160 uint32_t ticks_first; /* Initial timeout ticks */
161 uint32_t ticks_periodic; /* Ticker period ticks */
162 uint32_t remainder_periodic; /* Sub-microsecond tick remainder */
163 uint16_t lazy; /* Periodic latency in number of
164 * periods
165 */
166 #if defined(CONFIG_BT_TICKER_REMAINDER)
167 uint32_t remainder_first; /* Sub-microsecond tick remainder */
168 #endif /* CONFIG_BT_TICKER_REMAINDER */
169
170 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
171 uint32_t ticks_slot; /* Air-time reservation ticks */
172 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
173
174 ticker_timeout_func fp_timeout_func; /* Timeout callback function */
175 void *context; /* Context passed in timeout callback */
176
177 #if defined(CONFIG_BT_TICKER_EXT)
178 struct ticker_ext *ext_data; /* Ticker extension data instance */
179 #endif /* CONFIG_BT_TICKER_EXT */
180 };
181
182 /* User operation data structure for update opcode. Used for passing update
183 * requests to ticker_job
184 */
185 struct ticker_user_op_update {
186 uint32_t ticks_drift_plus; /* Requested positive drift in ticks */
187 uint32_t ticks_drift_minus; /* Requested negative drift in ticks */
188 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
189 uint32_t ticks_slot_plus; /* Number of ticks to add to slot
190 * reservation (air-time)
191 */
192 uint32_t ticks_slot_minus; /* Number of ticks to subtract from
193 * slot reservation (air-time)
194 */
195 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
196 uint16_t lazy; /* Peripheral latency:
197 * 0: Do nothing
198 * 1: latency = 0
199 * >1: latency = lazy - 1
200 */
201 uint8_t force; /* Force update */
202 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
203 !defined(CONFIG_BT_TICKER_LOW_LAT)
204 uint8_t must_expire; /* Node must expire, even if it
205 * collides with other nodes:
206 * 0x00: Do nothing
207 * 0x01: Disable must_expire
208 * 0x02: Enable must_expire
209 */
210 #endif
211 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
212 uint8_t expire_info_id;
213 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
214 };
215
216 /* User operation data structure for yield/stop opcode. Used for passing yield/
217 * stop requests with absolute tick to ticker_job
218 */
219 struct ticker_user_op_yield {
220 uint32_t ticks_at_yield; /* Anchor ticks (absolute) */
221 };
222
223 /* User operation data structure for slot_get opcode. Used for passing request
224 * to get next ticker with slot ticks via ticker_job
225 */
226 struct ticker_user_op_slot_get {
227 uint8_t *ticker_id;
228 uint32_t *ticks_current;
229 uint32_t *ticks_to_expire;
230 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
231 uint32_t *remainder;
232 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
233 #if defined(CONFIG_BT_TICKER_LAZY_GET)
234 uint16_t *lazy;
235 #endif /* CONFIG_BT_TICKER_LAZY_GET */
236 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
237 ticker_op_match_func fp_match_op_func;
238 void *match_op_context;
239 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
240 };
241
242 /* User operation data structure for priority_set opcode. Used for passing
243 * request to set ticker node priority via ticker_job
244 */
245 struct ticker_user_op_priority_set {
246 int8_t priority; /* Node priority. Defaults to 0 */
247 };
248
249 /* User operation top level data structure. Used for passing requests to
250 * ticker_job
251 */
252 struct ticker_user_op {
253 uint8_t op; /* User operation */
254 uint8_t id; /* Ticker node id */
255 uint8_t status; /* Operation result */
256 union {
257 struct ticker_user_op_start start;
258 struct ticker_user_op_update update;
259 struct ticker_user_op_yield yield;
260 struct ticker_user_op_slot_get slot_get;
261 struct ticker_user_op_priority_set priority_set;
262 } params; /* User operation parameters */
263 ticker_op_func fp_op_func; /* Operation completion callback */
264 void *op_context; /* Context passed in completion callback */
265 };
266
267 /* User data structure for operations
268 */
269 struct ticker_user {
270 uint8_t count_user_op; /* Number of user operation slots */
271 uint8_t first; /* Slot index of first user operation */
272 uint8_t middle; /* Slot index of last managed user op.
273 * Updated by ticker_job_list_manage
274 * for use in ticker_job_list_insert
275 */
276 uint8_t last; /* Slot index of last user operation */
277 struct ticker_user_op *user_op; /* Pointer to user operation array */
278 };
279
280 /* Ticker instance
281 */
282 struct ticker_instance {
283 struct ticker_node *nodes; /* Pointer to ticker nodes */
284 struct ticker_user *users; /* Pointer to user nodes */
285 uint8_t count_node; /* Number of ticker nodes */
286 uint8_t count_user; /* Number of user nodes */
287 uint8_t ticks_elapsed_first; /* Index from which elapsed ticks count
288 * is pulled
289 */
290 uint8_t ticks_elapsed_last; /* Index to which elapsed ticks count
291 * is pushed
292 */
293 uint32_t ticks_elapsed[DOUBLE_BUFFER_SIZE]; /* Buffer for elapsed
294 * ticks
295 */
296 uint32_t ticks_current; /* Absolute ticks elapsed at last
297 * ticker_job
298 */
299 uint8_t ticker_id_head; /* Index of first ticker node (next to
300 * expire)
301 */
302 uint8_t job_guard; /* Flag preventing ticker_worker from
303 * running if ticker_job is active
304 */
305 uint8_t worker_trigger; /* Flag preventing ticker_job from
306 * starting if ticker_worker was
307 * requested, and to trigger
308 * ticker_worker at end of job, if
309 * requested
310 */
311
312 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
313 uint8_t ticker_id_slot_previous; /* Id of previous slot reserving
314 * ticker node
315 */
316 uint32_t ticks_slot_previous; /* Number of ticks previously reserved
317 * by a ticker node (active air-time)
318 */
319 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
320
321 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
322 struct ticker_expire_info_internal expire_infos[TICKER_EXPIRE_INFO_MAX];
323 bool expire_infos_outdated;
324 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
325
326 ticker_caller_id_get_cb_t caller_id_get_cb; /* Function for retrieving
327 * the caller id from user
328 * id
329 */
330 ticker_sched_cb_t sched_cb; /* Function for scheduling
331 * ticker_worker and
332 * ticker_job
333 */
334 ticker_trigger_set_cb_t trigger_set_cb; /* Function for setting
335 * the trigger (compare
336 * value)
337 */
338 };
339
340 BUILD_ASSERT(sizeof(struct ticker_node) == TICKER_NODE_T_SIZE);
341 BUILD_ASSERT(sizeof(struct ticker_user) == TICKER_USER_T_SIZE);
342 BUILD_ASSERT(sizeof(struct ticker_user_op) == TICKER_USER_OP_T_SIZE);
343
344 /*****************************************************************************
345 * Global instances
346 ****************************************************************************/
347 #define TICKER_INSTANCE_MAX 1
348 static struct ticker_instance _instance[TICKER_INSTANCE_MAX];
349
350 /*****************************************************************************
351 * Static Functions
352 ****************************************************************************/
353
354 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add);
355
356 /**
357 * @brief Update elapsed index
358 *
359 * @param ticks_elapsed_index Pointer to current index
360 *
361 * @internal
362 */
ticker_next_elapsed(uint8_t * ticks_elapsed_index)363 static inline void ticker_next_elapsed(uint8_t *ticks_elapsed_index)
364 {
365 uint8_t idx = *ticks_elapsed_index + 1;
366
367 if (idx == DOUBLE_BUFFER_SIZE) {
368 idx = 0U;
369 }
370 *ticks_elapsed_index = idx;
371 }
372
373 #if defined(CONFIG_BT_TICKER_LOW_LAT)
374 /**
375 * @brief Get ticker expiring in a specific slot
376 *
377 * @details Searches for a ticker which expires in a specific slot starting
378 * at 'ticks_slot'.
379 *
380 * @param node Pointer to ticker node array
381 * @param ticker_id_head Id of initial ticker node
382 * @param ticks_slot Ticks indicating slot to get
383 *
384 * @return Id of ticker expiring within slot or TICKER_NULL
385 * @internal
386 */
ticker_by_slot_get(struct ticker_node * node,uint8_t ticker_id_head,uint32_t ticks_slot)387 static uint8_t ticker_by_slot_get(struct ticker_node *node, uint8_t ticker_id_head,
388 uint32_t ticks_slot)
389 {
390 while (ticker_id_head != TICKER_NULL) {
391 struct ticker_node *ticker;
392 uint32_t ticks_to_expire;
393
394 ticker = &node[ticker_id_head];
395 ticks_to_expire = ticker->ticks_to_expire;
396
397 if (ticks_slot <= ticks_to_expire) {
398 /* Next ticker expiration is outside the checked slot */
399 return TICKER_NULL;
400 }
401
402 if (ticker->ticks_slot) {
403 /* This ticker node has slot defined and expires within
404 * checked slot
405 */
406 break;
407 }
408
409 ticks_slot -= ticks_to_expire;
410 ticker_id_head = ticker->next;
411 }
412
413 return ticker_id_head;
414 }
415 #endif /* CONFIG_BT_TICKER_LOW_LAT */
416
417 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
418 /**
419 * @brief Get next ticker with slot ticks or match
420 *
421 * @details Iterates ticker nodes from ticker_id_head. If no head id is provided
422 * (TICKER_NULL), iteration starts from the first node.
423 * Operation details:
424 *
425 * NORMAL MODE (!CONFIG_BT_TICKER_SLOT_AGNOSTIC)
426 * - Gets the next ticker which has slot ticks specified and return the ticker
427 * id and accumulated ticks until expiration.
428 * - If a matching function is provided, this function is called and node iteration
429 * continues until match function returns true.
430 *
431 * SLOT AGNOSTIC MODE (CONFIG_BT_TICKER_SLOT_AGNOSTIC)
432 * - Gets the next ticker node.
433 * - If a matching function is provided, this function is called and node iteration
434 * continues until match function returns true.
435 *
436 * @param instance Pointer to ticker instance
437 * @param ticker_id_head Pointer to id of first ticker node [in/out]
438 * @param ticks_current Pointer to current ticks count [in/out]
439 * @param ticks_to_expire Pointer to ticks to expire [in/out]
440 * @param fp_match_op_func Pointer to match function or NULL if unused
441 * @param match_op_context Pointer to operation context passed to match
442 * function or NULL if unused
443 * @param lazy Pointer to lazy variable to receive lazy_current
444 * of found ticker node
445 * @internal
446 */
ticker_by_next_slot_get(struct ticker_instance * instance,uint8_t * ticker_id_head,uint32_t * ticks_current,uint32_t * ticks_to_expire,ticker_op_match_func fp_match_op_func,void * match_op_context,uint32_t * remainder,uint16_t * lazy)447 static void ticker_by_next_slot_get(struct ticker_instance *instance,
448 uint8_t *ticker_id_head,
449 uint32_t *ticks_current,
450 uint32_t *ticks_to_expire,
451 ticker_op_match_func fp_match_op_func,
452 void *match_op_context, uint32_t *remainder,
453 uint16_t *lazy)
454 {
455 struct ticker_node *ticker;
456 struct ticker_node *node;
457 uint32_t _ticks_to_expire;
458 uint8_t _ticker_id_head;
459
460 node = instance->nodes;
461
462 _ticker_id_head = *ticker_id_head;
463 _ticks_to_expire = *ticks_to_expire;
464 if ((_ticker_id_head == TICKER_NULL) ||
465 (*ticks_current != instance->ticks_current)) {
466 /* Initialize with instance head */
467 _ticker_id_head = instance->ticker_id_head;
468 *ticks_current = instance->ticks_current;
469 _ticks_to_expire = 0U;
470 } else {
471 /* Get ticker id for next node */
472 ticker = &node[_ticker_id_head];
473 _ticker_id_head = ticker->next;
474 }
475
476 /* Find first ticker node with match or slot ticks */
477 while (_ticker_id_head != TICKER_NULL) {
478 ticker = &node[_ticker_id_head];
479
480 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
481 if (fp_match_op_func) {
482 uint32_t ticks_slot = 0;
483
484 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
485 ticks_slot += ticker->ticks_slot;
486 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
487
488 /* Match node id */
489 if (fp_match_op_func(_ticker_id_head, ticks_slot,
490 _ticks_to_expire +
491 ticker->ticks_to_expire,
492 match_op_context)) {
493 /* Match found */
494 break;
495 }
496 } else
497 #else /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
498 ARG_UNUSED(fp_match_op_func);
499 ARG_UNUSED(match_op_context);
500 #endif /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
501
502 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
503 if (ticker->ticks_slot) {
504 /* Matching not used and node has slot ticks */
505 break;
506 #else
507 {
508 /* Matching not used and slot agnostic */
509 break;
510 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
511 }
512
513 /* Accumulate expire ticks */
514 _ticks_to_expire += ticker->ticks_to_expire;
515 _ticker_id_head = ticker->next;
516 }
517
518 if (_ticker_id_head != TICKER_NULL) {
519 /* Add ticks for found ticker */
520 _ticks_to_expire += ticker->ticks_to_expire;
521
522 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
523 if (remainder) {
524 *remainder = ticker->remainder_current;
525 }
526 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
527 ARG_UNUSED(remainder);
528 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
529
530 #if defined(CONFIG_BT_TICKER_LAZY_GET)
531 if (lazy) {
532 *lazy = ticker->lazy_current;
533 }
534 #else /* !CONFIG_BT_TICKER_LAZY_GET */
535 ARG_UNUSED(lazy);
536 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
537 }
538
539 *ticker_id_head = _ticker_id_head;
540 *ticks_to_expire = _ticks_to_expire;
541 }
542 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
543
544 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
545 /**
546 * @brief Enqueue ticker node
547 *
548 * @details Finds insertion point for new ticker node and inserts the
549 * node in the linked node list.
550 *
551 * @param instance Pointer to ticker instance
552 * @param id Ticker node id to enqueue
553 *
554 * @return Id of enqueued ticker node
555 * @internal
556 */
557 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
558 {
559 struct ticker_node *ticker_current;
560 struct ticker_node *ticker_new;
561 uint32_t ticks_to_expire_current;
562 struct ticker_node *node;
563 uint32_t ticks_to_expire;
564 uint8_t previous;
565 uint8_t current;
566
567 node = &instance->nodes[0];
568 ticker_new = &node[id];
569 ticks_to_expire = ticker_new->ticks_to_expire;
570 current = instance->ticker_id_head;
571
572 /* Find insertion point for new ticker node and adjust ticks_to_expire
573 * relative to insertion point
574 */
575 previous = TICKER_NULL;
576
577 while ((current != TICKER_NULL) && (ticks_to_expire >=
578 (ticks_to_expire_current =
579 (ticker_current = &node[current])->ticks_to_expire))) {
580
581 ticks_to_expire -= ticks_to_expire_current;
582
583 /* Check for timeout in same tick - prioritize according to
584 * latency
585 */
586 if (ticks_to_expire == 0 && (ticker_new->lazy_current >
587 ticker_current->lazy_current)) {
588 ticks_to_expire = ticks_to_expire_current;
589 break;
590 }
591
592 previous = current;
593 current = ticker_current->next;
594 }
595
596 /* Link in new ticker node and adjust ticks_to_expire to relative value
597 */
598 ticker_new->ticks_to_expire = ticks_to_expire;
599 ticker_new->next = current;
600
601 if (previous == TICKER_NULL) {
602 instance->ticker_id_head = id;
603 } else {
604 node[previous].next = id;
605 }
606
607 if (current != TICKER_NULL) {
608 node[current].ticks_to_expire -= ticks_to_expire;
609 }
610
611 return id;
612 }
613 #else /* CONFIG_BT_TICKER_LOW_LAT */
614
615 /**
616 * @brief Enqueue ticker node
617 *
618 * @details Finds insertion point for new ticker node and inserts the
619 * node in the linked node list. However, if the new ticker node collides
620 * with an existing node or the expiration is inside the previous slot,
621 * the node is not inserted.
622 *
623 * @param instance Pointer to ticker instance
624 * @param id Ticker node id to enqueue
625 *
626 * @return Id of enqueued ticker node, or id of previous- or colliding
627 * ticker node if new node was not enqueued
628 * @internal
629 */
630 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
631 {
632 struct ticker_node *ticker_current;
633 struct ticker_node *ticker_new;
634 uint32_t ticks_to_expire_current;
635 uint8_t ticker_id_slot_previous;
636 uint32_t ticks_slot_previous;
637 struct ticker_node *node;
638 uint32_t ticks_to_expire;
639 uint8_t previous;
640 uint8_t current;
641 uint8_t collide;
642
643 node = &instance->nodes[0];
644 ticker_new = &node[id];
645 ticks_to_expire = ticker_new->ticks_to_expire;
646
647 collide = ticker_id_slot_previous = TICKER_NULL;
648 current = instance->ticker_id_head;
649 previous = current;
650 ticks_slot_previous = instance->ticks_slot_previous;
651
652 /* Find insertion point for new ticker node and adjust ticks_to_expire
653 * relative to insertion point
654 */
655 while ((current != TICKER_NULL) &&
656 (ticks_to_expire >
657 (ticks_to_expire_current =
658 (ticker_current = &node[current])->ticks_to_expire))) {
659 ticks_to_expire -= ticks_to_expire_current;
660
661 if (ticker_current->ticks_slot != 0U) {
662 ticks_slot_previous = ticker_current->ticks_slot;
663 ticker_id_slot_previous = current;
664 } else {
665 if (ticks_slot_previous > ticks_to_expire_current) {
666 ticks_slot_previous -= ticks_to_expire_current;
667 } else {
668 ticks_slot_previous = 0U;
669 }
670 }
671 previous = current;
672 current = ticker_current->next;
673 }
674
675 /* Check for collision for new ticker node at insertion point */
676 collide = ticker_by_slot_get(&node[0], current,
677 ticks_to_expire + ticker_new->ticks_slot);
678
679 if ((ticker_new->ticks_slot == 0U) ||
680 ((ticks_slot_previous <= ticks_to_expire) &&
681 (collide == TICKER_NULL))) {
682 /* New ticker node has no slot ticks or there is no collision -
683 * link it in and adjust ticks_to_expire to relative value
684 */
685 ticker_new->ticks_to_expire = ticks_to_expire;
686 ticker_new->next = current;
687
688 if (previous == current) {
689 instance->ticker_id_head = id;
690 } else {
691 node[previous].next = id;
692 }
693
694 if (current != TICKER_NULL) {
695 node[current].ticks_to_expire -= ticks_to_expire;
696 }
697 } else {
698 /* Collision - no ticker node insertion, set id to that of
699 * colliding node
700 */
701 if (ticks_slot_previous > ticks_to_expire) {
702 id = ticker_id_slot_previous;
703 } else {
704 id = collide;
705 }
706 }
707
708 return id;
709 }
710 #endif /* CONFIG_BT_TICKER_LOW_LAT */
711
712 /**
713 * @brief Dequeue ticker node
714 *
715 * @details Finds extraction point for ticker node to be dequeued, unlinks
716 * the node and adjusts the links and ticks_to_expire. Returns the ticks
717 * until expiration for dequeued ticker node.
718 *
719 * @param instance Pointer to ticker instance
720 * @param id Ticker node id to dequeue
721 *
722 * @return Total ticks until expiration for dequeued ticker node, or 0 if
723 * node was not found
724 * @internal
725 */
726 static uint32_t ticker_dequeue(struct ticker_instance *instance, uint8_t id)
727 {
728 struct ticker_node *ticker_current;
729 struct ticker_node *node;
730 uint8_t previous;
731 uint32_t timeout;
732 uint8_t current;
733 uint32_t total;
734
735 /* Find the ticker's position in ticker node list while accumulating
736 * ticks_to_expire
737 */
738 node = &instance->nodes[0];
739 previous = instance->ticker_id_head;
740 current = previous;
741 total = 0U;
742 ticker_current = 0;
743 while (current != TICKER_NULL) {
744 ticker_current = &node[current];
745
746 if (current == id) {
747 break;
748 }
749
750 total += ticker_current->ticks_to_expire;
751 previous = current;
752 current = ticker_current->next;
753 }
754
755 if (current == TICKER_NULL) {
756 /* Ticker not in active list */
757 return 0;
758 }
759
760 if (previous == current) {
761 /* Ticker is the first in the list */
762 instance->ticker_id_head = ticker_current->next;
763 }
764
765 /* Remaining timeout between next timeout */
766 timeout = ticker_current->ticks_to_expire;
767
768 /* Link previous ticker with next of this ticker
769 * i.e. removing the ticker from list
770 */
771 node[previous].next = ticker_current->next;
772
773 /* If this is not the last ticker, increment the
774 * next ticker by this ticker timeout
775 */
776 if (ticker_current->next != TICKER_NULL) {
777 node[ticker_current->next].ticks_to_expire += timeout;
778 }
779
780 return (total + timeout);
781 }
782
783 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
784 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
785 /**
786 * @brief Resolve ticker node collision
787 *
788 * @details Evaluates the provided ticker node against other queued nodes
789 * and returns non-zero if the ticker node collides and should be skipped.
790 * The following rules are checked:
791 * 1) If the periodic latency is not yet exhausted, node is skipped
792 * 2) If the node has highest possible priority, node is never skipped
793 * 2) If the node will starve next node due to slot reservation
794 * overlap, node is skipped if:
795 * a) Next node has higher priority than current node
796 * b) Next node has more accumulated latency than the current node
797 * c) Next node is 'older' than current node and has same priority
798 * d) Next node has force flag set, and the current does not
799 *
800 * @param nodes Pointer to ticker node array
801 * @param ticker Pointer to ticker to resolve
802 *
803 * @return 0 if no collision was detected. 1 if ticker node collides
804 * with other ticker node of higher composite priority
805 * @internal
806 */
807 static uint8_t ticker_resolve_collision(struct ticker_node *nodes,
808 struct ticker_node *ticker)
809 {
810 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
811 if ((ticker->priority != TICKER_PRIORITY_CRITICAL) &&
812 (ticker->next != TICKER_NULL)) {
813
814 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
815 if (ticker->next != TICKER_NULL) {
816
817 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
818
819 uint16_t lazy_current = ticker->lazy_current;
820 uint32_t ticker_ticks_slot;
821
822 if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
823 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
824 } else {
825 ticker_ticks_slot = ticker->ticks_slot;
826 }
827
828 /* Check if this ticker node will starve next node which has
829 * latency or higher priority
830 */
831 if (lazy_current >= ticker->lazy_periodic) {
832 lazy_current -= ticker->lazy_periodic;
833 }
834 uint8_t id_head = ticker->next;
835 uint32_t acc_ticks_to_expire = 0U;
836
837 /* Age is time since last expiry */
838 uint32_t current_age = ticker->ticks_periodic +
839 (lazy_current * ticker->ticks_periodic);
840
841 while (id_head != TICKER_NULL) {
842 struct ticker_node *ticker_next = &nodes[id_head];
843 uint32_t ticker_next_ticks_slot;
844
845 /* Accumulate ticks_to_expire for each node */
846 acc_ticks_to_expire += ticker_next->ticks_to_expire;
847 if (acc_ticks_to_expire > ticker_ticks_slot) {
848 break;
849 }
850
851 if (TICKER_HAS_SLOT_WINDOW(ticker_next) &&
852 (ticker_next->ticks_slot == 0U)) {
853 ticker_next_ticks_slot =
854 HAL_TICKER_RESCHEDULE_MARGIN;
855 } else {
856 ticker_next_ticks_slot =
857 ticker_next->ticks_slot;
858 }
859
860 /* We only care about nodes with slot reservation */
861 if (ticker_next_ticks_slot == 0U) {
862 id_head = ticker_next->next;
863 continue;
864 }
865
866 uint16_t lazy_next = ticker_next->lazy_current;
867 uint8_t lazy_next_periodic_skip =
868 ticker_next->lazy_periodic > lazy_next;
869
870 if (!lazy_next_periodic_skip) {
871 lazy_next -= ticker_next->lazy_periodic;
872 }
873
874 /* Age is time since last expiry */
875 uint32_t next_age = (ticker_next->ticks_periodic == 0U ?
876 0U :
877 (ticker_next->ticks_periodic -
878 ticker_next->ticks_to_expire)) +
879 (lazy_next *
880 ticker_next->ticks_periodic);
881
882 /* Was the current node scheduled earlier? */
883 uint8_t current_is_older =
884 (ticker->ticks_periodic == 0U) ||
885 (current_age > next_age);
886 /* Was next node scheduled earlier (legacy priority)? */
887 uint8_t next_is_older =
888 (ticker->ticks_periodic != 0U) &&
889 (next_age > current_age);
890
891 /* Is the current and next node equal in force? */
892 uint8_t equal_force =
893 (ticker->force == ticker_next->force);
894 /* Is force requested for next node (e.g. update) -
895 * more so than for current node?
896 */
897 uint8_t next_force =
898 (ticker_next->force > ticker->force);
899
900 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
901 /* Does next node have critical priority and should
902 * always be scheduled?
903 */
904 uint8_t next_is_critical =
905 (ticker_next->priority ==
906 TICKER_PRIORITY_CRITICAL);
907
908 /* Is the current and next node equal in priority? */
909 uint8_t equal_priority =
910 (ticker->priority == ticker_next->priority);
911
912 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
913 uint8_t next_is_critical = 0U;
914 uint8_t equal_priority = 1U;
915 uint8_t next_has_priority = 0U;
916
917 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
918
919 #if defined(CONFIG_BT_TICKER_EXT)
920 #if defined(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD)
921 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
922 /* Does next node have higher priority? */
923 uint8_t next_has_priority =
924 (!TICKER_HAS_SLOT_WINDOW(ticker_next) &&
925 ((lazy_next - ticker_next->priority) >
926 (lazy_current - ticker->priority));
927 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
928
929 /* Colliding next ticker does not use ticks_slot_window
930 * or it does not fit after the current ticker within
931 * the ticks_slot_window.
932 */
933 uint8_t next_not_ticks_slot_window =
934 (!TICKER_HAS_SLOT_WINDOW(ticker_next) ||
935 ((acc_ticks_to_expire +
936 ticker_next->ext_data->ticks_slot_window -
937 ticker_next->ticks_slot) <
938 ticker->ticks_slot));
939
940 /* Can the current ticker with ticks_slot_window be
941 * scheduled after the colliding ticker?
942 */
943 uint8_t curr_has_ticks_slot_window =
944 (TICKER_HAS_SLOT_WINDOW(ticker) &&
945 ((acc_ticks_to_expire +
946 ticker_next->ticks_slot) <
947 (ticker->ext_data->ticks_slot_window -
948 ticker->ticks_slot)));
949
950 #else /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
951 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
952 /* Does next node have higher priority? */
953 uint8_t next_has_priority =
954 (lazy_next - ticker_next->priority) >
955 (lazy_current - ticker->priority);
956
957 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
958 uint8_t next_not_ticks_slot_window = 1U;
959
960 /* Can the current ticker with ticks_slot_window be
961 * scheduled after the colliding ticker?
962 * NOTE: Tickers with ticks_slot_window and no
963 * ticks_slot (unreserved) be always scheduled
964 * after the colliding ticker.
965 */
966 uint8_t curr_has_ticks_slot_window =
967 (TICKER_HAS_SLOT_WINDOW(ticker) &&
968 !ticker->ticks_slot &&
969 ((acc_ticks_to_expire +
970 ticker_next->ticks_slot) <
971 (ticker->ext_data->ticks_slot_window)));
972
973 #endif /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
974 #else /* !CONFIG_BT_TICKER_EXT */
975 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
976 /* Does next node have higher priority? */
977 uint8_t next_has_priority =
978 (lazy_next - ticker_next->priority) >
979 (lazy_current - ticker->priority);
980
981 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
982 uint8_t next_not_ticks_slot_window = 1U;
983 uint8_t curr_has_ticks_slot_window = 0U;
984
985 #endif /* !CONFIG_BT_TICKER_EXT */
986
987 /* Check if next node is within this reservation slot
988 * and wins conflict resolution
989 */
990 if ((curr_has_ticks_slot_window &&
991 next_not_ticks_slot_window) ||
992 (!lazy_next_periodic_skip &&
993 (next_is_critical ||
994 next_force ||
995 (next_has_priority && !current_is_older) ||
996 (equal_priority && equal_force && next_is_older &&
997 next_not_ticks_slot_window)))) {
998 /* This node must be skipped - check window */
999 return 1U;
1000 }
1001 id_head = ticker_next->next;
1002 }
1003 }
1004
1005 return 0U;
1006 }
1007 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1008 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1009 */
1010
1011 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1012 /**
1013 * @brief Get expiration delta from one ticker id to another ticker id
1014 *
1015 * @details Helper function to get expiration info between two tickers
1016 *
1017 * @param instance Ticker instance
1018 * @param to_ticker_id Target ticker id
1019 * @param from_ticker_id Ticker id to compare with
1020 * @param expire_info Pointer to ticker_expire_info that will
1021 * get populated with the result
1022 *
1023 * @internal
1024 */
1025 static void ticker_get_expire_info(struct ticker_instance *instance, uint8_t to_ticker_id,
1026 uint8_t from_ticker_id,
1027 struct ticker_expire_info_internal *expire_info)
1028 {
1029 struct ticker_node *current_node;
1030 uint32_t acc_ticks_to_expire = 0;
1031 uint8_t current_ticker_id;
1032 uint32_t from_ticks = 0;
1033 bool from_found = false;
1034 uint32_t to_ticks = 0;
1035 bool to_found = false;
1036
1037 current_ticker_id = instance->ticker_id_head;
1038 current_node = &instance->nodes[instance->ticker_id_head];
1039 while (current_ticker_id != TICKER_NULL && (!to_found || !from_found)) {
1040 /* Accumulate expire ticks */
1041 acc_ticks_to_expire += current_node->ticks_to_expire;
1042
1043 if (current_ticker_id == from_ticker_id) {
1044 from_ticks = acc_ticks_to_expire;
1045 from_found = true;
1046 } else if (current_ticker_id == to_ticker_id) {
1047 to_ticks = acc_ticks_to_expire;
1048 to_found = true;
1049 }
1050
1051 current_ticker_id = current_node->next;
1052 current_node = &instance->nodes[current_ticker_id];
1053 }
1054
1055 if (to_found && from_found) {
1056 struct ticker_node *to_ticker = &instance->nodes[to_ticker_id];
1057 uint32_t to_remainder = to_ticker->remainder_current;
1058
1059 if (from_ticks > to_ticks) {
1060 /* from ticker is scheduled after the to ticker - use period
1061 * to give an result
1062 */
1063 if (to_ticker->ticks_periodic == 0) {
1064 /* single shot ticker */
1065 expire_info->found = 0;
1066 return;
1067 }
1068 while (to_ticks < from_ticks) {
1069 to_ticks += to_ticker->ticks_periodic;
1070 to_ticks += ticker_add_to_remainder(&to_remainder,
1071 to_ticker->remainder_periodic);
1072 }
1073 }
1074
1075 expire_info->ticks_to_expire = to_ticks - from_ticks;
1076 expire_info->remainder = to_remainder;
1077 expire_info->lazy = to_ticker->lazy_current;
1078 expire_info->found = 1;
1079 } else {
1080 expire_info->found = 0;
1081 }
1082 }
1083
1084 /**
1085 * @brief Allocate an expire info for the given ticker ID
1086 *
1087 * @param instance Ticker instance
1088 * @param ticker_id Ticker ID to allocate for
1089 *
1090 * @return Returns TICKER_STATUS_SUCCESS if the allocation succeeded,
1091 * TICKER_STATUS_FAILURE otherwise
1092 *
1093 * @internal
1094 */
1095 static uint32_t ticker_alloc_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1096 {
1097 uint32_t status = TICKER_STATUS_FAILURE;
1098 uint8_t is_last = 0;
1099
1100 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1101 if (instance->expire_infos[i].ticker_id == TICKER_NULL) {
1102 struct ticker_node *ticker = &instance->nodes[ticker_id];
1103
1104 instance->expire_infos[i].ticker_id = ticker_id;
1105 instance->expire_infos[i].outdated = true;
1106 instance->expire_infos[i].last = is_last;
1107 ticker->ext_data->other_expire_info = &instance->expire_infos[i];
1108 instance->expire_infos_outdated = true;
1109 status = TICKER_STATUS_SUCCESS;
1110 break;
1111 } else if (instance->expire_infos[i].last && i < TICKER_EXPIRE_INFO_MAX - 1) {
1112 instance->expire_infos[i].last = 0;
1113 is_last = 1;
1114 }
1115 }
1116
1117 return status;
1118 }
1119
1120 /**
1121 * @brief Free a previously allocated expire info for the given ticker ID
1122 *
1123 * @param instance Ticker instance
1124 * @param ticker_id Ticker ID to free up the allocation for
1125 *
1126 * @internal
1127 */
1128 static void ticker_free_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1129 {
1130 uint8_t is_last = 0;
1131 uint8_t index;
1132
1133 for (index = 0; index < TICKER_EXPIRE_INFO_MAX; index++) {
1134 if (instance->expire_infos[index].ticker_id == ticker_id) {
1135 instance->expire_infos[index].ticker_id = TICKER_NULL;
1136 is_last = instance->expire_infos[index].last;
1137 instance->expire_infos[index].last = 0;
1138 break;
1139 }
1140 }
1141
1142 if (is_last) {
1143 /* Find new last used element and mark it */
1144 for (; index >= 0; index--) {
1145 if (instance->expire_infos[index].ticker_id != TICKER_NULL || index == 0) {
1146 instance->expire_infos[index].last = 1;
1147 break;
1148 }
1149 }
1150 }
1151 }
1152
1153 /**
1154 * @brief Mark all expire infos involving a ticker ID as outdated
1155 *
1156 * @details If a ticker moves this function should be called to mark all expiration
1157 * infos (if any) that involve that ticker as outdated and in need of re-calculation.
1158 * If any expiration infos involving the ticker_id is found, the ticker instances
1159 * expire_infos_outdated flag is also set.
1160 *
1161 * @param instance Ticker instance
1162 * @param ticker_id ID of ticker that has moved
1163 *
1164 * @internal
1165 */
1166 static void ticker_mark_expire_info_outdated(struct ticker_instance *instance, uint8_t ticker_id)
1167 {
1168 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1169 if (instance->expire_infos[i].ticker_id != TICKER_NULL) {
1170 uint8_t current_id = instance->expire_infos[i].ticker_id;
1171 struct ticker_node *ticker = &instance->nodes[current_id];
1172
1173 if (current_id == ticker_id ||
1174 ticker->ext_data->expire_info_id == ticker_id) {
1175 instance->expire_infos[i].outdated = true;
1176 instance->expire_infos_outdated = true;
1177 }
1178 }
1179 if (instance->expire_infos[i].last) {
1180 break;
1181 }
1182 }
1183 }
1184
1185 /**
1186 * @brief Run through all expire infos and update them if needed
1187 *
1188 * @details Runs through all expire_infos and runs ticker_get_expire_info()
1189 * for any that are marked as outdated. Clears the expire_infos_outdated
1190 * flag when done
1191 *
1192 * @param param Pointer to ticker instance
1193 *
1194 * @internal
1195 */
1196 static void ticker_job_update_expire_infos(struct ticker_instance *instance)
1197 {
1198 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1199 struct ticker_expire_info_internal *info = &instance->expire_infos[i];
1200
1201 if (info->ticker_id != TICKER_NULL && info->outdated) {
1202 struct ticker_node *ticker = &instance->nodes[info->ticker_id];
1203
1204 ticker_get_expire_info(instance, ticker->ext_data->expire_info_id,
1205 info->ticker_id, info);
1206 info->outdated = false;
1207 }
1208
1209 if (info->last) {
1210 break;
1211 }
1212 }
1213
1214 instance->expire_infos_outdated = false;
1215 }
1216
1217 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1218
1219 /**
1220 * @brief Ticker worker
1221 *
1222 * @details Runs as upper half of ticker operation, triggered by a compare
1223 * match from the underlying counter HAL, via the ticker_trigger function.
1224 * Traverses ticker nodes to find tickers expired since last job
1225 * execution. Expired (requested) ticker nodes have their timeout callback
1226 * functions called. Finally, a ticker job is enqueued. Invoked from the
1227 * ticker worker mayfly context (TICKER_MAYFLY_CALL_ID_WORKER)
1228 *
1229 * @param param Pointer to ticker instance
1230 *
1231 */
1232 void ticker_worker(void *param)
1233 {
1234 struct ticker_instance *instance = param;
1235 struct ticker_node *node;
1236 uint32_t ticks_elapsed;
1237 uint32_t ticks_expired;
1238 uint8_t ticker_id_head;
1239
1240 /* Defer worker if job running */
1241 instance->worker_trigger = 1U;
1242 if (instance->job_guard) {
1243 return;
1244 }
1245
1246 /* If no tickers queued (active), do nothing */
1247 if (instance->ticker_id_head == TICKER_NULL) {
1248 instance->worker_trigger = 0U;
1249 return;
1250 }
1251
1252 /* Get ticks elapsed since last job execution */
1253 ticks_elapsed = ticker_ticks_diff_get(cntr_cnt_get(),
1254 instance->ticks_current);
1255
1256 /* Initialize actual elapsed ticks being consumed */
1257 ticks_expired = 0U;
1258
1259 /* Auto variable containing the head of tickers expiring */
1260 ticker_id_head = instance->ticker_id_head;
1261
1262 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1263 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1264 /* Check if the previous ticker node which had air-time, is still
1265 * active and has this time slot reserved
1266 */
1267 uint8_t slot_reserved = 0;
1268
1269 if (instance->ticks_slot_previous > ticks_elapsed) {
1270 /* This node intersects reserved slot */
1271 slot_reserved = 1;
1272 }
1273 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1274 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1275 */
1276
1277 /* Expire all tickers within ticks_elapsed and collect ticks_expired */
1278 node = &instance->nodes[0];
1279
1280 while (ticker_id_head != TICKER_NULL) {
1281 struct ticker_node *ticker;
1282 uint32_t ticks_to_expire;
1283 uint8_t must_expire_skip;
1284 uint32_t ticks_drift;
1285
1286 ticker = &node[ticker_id_head];
1287
1288 /* Stop if ticker did not expire */
1289 ticks_to_expire = ticker->ticks_to_expire;
1290 if (ticks_elapsed < ticks_to_expire) {
1291 break;
1292 }
1293
1294 /* Decrement ticks_elapsed and collect expired ticks */
1295 ticks_elapsed -= ticks_to_expire;
1296 ticks_expired += ticks_to_expire;
1297
1298 /* Move to next ticker node */
1299 ticker_id_head = ticker->next;
1300 must_expire_skip = 0U;
1301
1302 /* Skip if not scheduled to execute */
1303 if (((ticker->req - ticker->ack) & 0xff) != 1U) {
1304 continue;
1305 }
1306
1307 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1308 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1309 uint32_t ticker_ticks_slot;
1310
1311 if (TICKER_HAS_SLOT_WINDOW(ticker) &&
1312 (ticker->ticks_slot == 0U)) {
1313 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
1314 } else {
1315 ticker_ticks_slot = ticker->ticks_slot;
1316 }
1317
1318 /* Check if node has slot reservation and resolve any collision
1319 * with other ticker nodes
1320 */
1321 if ((ticker_ticks_slot != 0U) &&
1322 (slot_reserved ||
1323 (instance->ticks_slot_previous > ticks_expired) ||
1324 ticker_resolve_collision(node, ticker))) {
1325 #if defined(CONFIG_BT_TICKER_EXT)
1326 struct ticker_ext *ext_data = ticker->ext_data;
1327
1328 if (ext_data &&
1329 ext_data->ticks_slot_window != 0U &&
1330 ext_data->reschedule_state ==
1331 TICKER_RESCHEDULE_STATE_NONE &&
1332 (ticker->lazy_periodic <= ticker->lazy_current)) {
1333 /* Mark node for re-scheduling in ticker_job */
1334 ext_data->reschedule_state =
1335 TICKER_RESCHEDULE_STATE_PENDING;
1336 } else if (ext_data) {
1337 /* Mark node as not re-scheduling */
1338 ext_data->reschedule_state =
1339 TICKER_RESCHEDULE_STATE_NONE;
1340 }
1341 #endif /* CONFIG_BT_TICKER_EXT */
1342 /* Increment lazy_current to indicate skipped event. In case
1343 * of re-scheduled node, the lazy count will be decremented in
1344 * ticker_job_reschedule_in_window when completed.
1345 */
1346 ticker->lazy_current++;
1347
1348 if ((ticker->must_expire == 0U) ||
1349 (ticker->lazy_periodic >= ticker->lazy_current) ||
1350 TICKER_RESCHEDULE_PENDING(ticker)) {
1351 /* Not a must-expire node or this is periodic
1352 * latency or pending re-schedule. Skip this
1353 * ticker node. Mark it as elapsed.
1354 */
1355 ticker->ack--;
1356 continue;
1357 }
1358
1359 /* Continue but perform shallow expiry */
1360 must_expire_skip = 1U;
1361 }
1362
1363 #if defined(CONFIG_BT_TICKER_EXT)
1364 if (ticker->ext_data) {
1365 ticks_drift = ticker->ext_data->ticks_drift;
1366 ticker->ext_data->ticks_drift = 0U;
1367 /* Mark node as not re-scheduling */
1368 ticker->ext_data->reschedule_state =
1369 TICKER_RESCHEDULE_STATE_NONE;
1370 } else {
1371 ticks_drift = 0U;
1372 }
1373
1374 #else /* !CONFIG_BT_TICKER_EXT */
1375 ticks_drift = 0U;
1376 #endif /* !CONFIG_BT_TICKER_EXT */
1377
1378 #else /* CONFIG_BT_TICKER_LOW_LAT ||
1379 * CONFIG_BT_TICKER_SLOT_AGNOSTIC
1380 */
1381 ticks_drift = 0U;
1382 #endif /* CONFIG_BT_TICKER_LOW_LAT ||
1383 * CONFIG_BT_TICKER_SLOT_AGNOSTIC
1384 */
1385
1386 /* Scheduled timeout is acknowledged to be complete */
1387 ticker->ack--;
1388
1389 if (ticker->timeout_func) {
1390 uint32_t ticks_at_expire;
1391
1392 ticks_at_expire = (instance->ticks_current +
1393 ticks_expired -
1394 ticker->ticks_to_expire_minus) &
1395 HAL_TICKER_CNTR_MASK;
1396
1397 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1398 if (ticker->ext_data &&
1399 ticker->ext_data->ext_timeout_func) {
1400 struct ticker_expire_info_internal *expire_info;
1401 struct ticker_ext_context ext_context;
1402 ticker_timeout_func timeout_func;
1403
1404 timeout_func = ticker->ext_data->ext_timeout_func;
1405 expire_info = ticker->ext_data->other_expire_info;
1406 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1407 LL_ASSERT(expire_info && !expire_info->outdated);
1408 }
1409
1410 ext_context.context = ticker->context;
1411 if (expire_info && expire_info->found) {
1412 ext_context.other_expire_info = (void *)expire_info;
1413 } else {
1414 ext_context.other_expire_info = NULL;
1415 }
1416
1417 DEBUG_TICKER_TASK(1);
1418
1419 /* Invoke the timeout callback */
1420 timeout_func(ticks_at_expire,
1421 ticks_drift,
1422 ticker->remainder_current,
1423 must_expire_skip ?
1424 TICKER_LAZY_MUST_EXPIRE :
1425 ticker->lazy_current,
1426 ticker->force,
1427 &ext_context);
1428 } else
1429 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1430 {
1431 DEBUG_TICKER_TASK(1);
1432
1433 /* Invoke the timeout callback */
1434 ticker->timeout_func(ticks_at_expire,
1435 ticks_drift,
1436 ticker->remainder_current,
1437 must_expire_skip ?
1438 TICKER_LAZY_MUST_EXPIRE :
1439 ticker->lazy_current,
1440 ticker->force,
1441 ticker->context);
1442 DEBUG_TICKER_TASK(0);
1443 }
1444
1445 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
1446 (must_expire_skip == 0U)) {
1447 /* Reset latency to periodic offset */
1448 ticker->lazy_current = 0U;
1449 ticker->force = 0U;
1450
1451 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1452 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1453 if (ticker_ticks_slot != 0U) {
1454 /* Any further nodes will be skipped */
1455 slot_reserved = 1U;
1456 }
1457 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1458 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1459 */
1460
1461 }
1462 }
1463 }
1464
1465 /* Queue the elapsed ticks */
1466 if (instance->ticks_elapsed_first == instance->ticks_elapsed_last) {
1467 ticker_next_elapsed(&instance->ticks_elapsed_last);
1468 }
1469 instance->ticks_elapsed[instance->ticks_elapsed_last] = ticks_expired;
1470
1471 instance->worker_trigger = 0U;
1472
1473 /* Enqueue the ticker job with chain=1 (do not inline) */
1474 instance->sched_cb(TICKER_CALL_ID_WORKER, TICKER_CALL_ID_JOB, 1,
1475 instance);
1476 }
1477
1478 /**
1479 * @brief Prepare ticker node expiration
1480 *
1481 * @details Calculates the number of ticks until next expiration, taking
1482 * into consideration any negative drift correction.
1483 *
1484 * @param ticker Pointer to ticker node
1485 * @param ticks_current Current number of ticks (elapsed)
1486 * @param ticks_at_start Number of ticks at start (anchor)
1487 *
1488 * @internal
1489 */
1490 static void ticks_to_expire_prep(struct ticker_node *ticker,
1491 uint32_t ticks_current, uint32_t ticks_at_start)
1492 {
1493 uint32_t ticks_to_expire = ticker->ticks_to_expire;
1494 uint32_t ticks_to_expire_minus = ticker->ticks_to_expire_minus;
1495
1496 /* Calculate ticks to expire for this new node */
1497 if (!((ticks_at_start - ticks_current) & BIT(HAL_TICKER_CNTR_MSBIT))) {
1498 /* Most significant bit is 0 so ticks_at_start lies ahead of
1499 * ticks_current: ticks_at_start >= ticks_current
1500 */
1501 ticks_to_expire += ticker_ticks_diff_get(ticks_at_start,
1502 ticks_current);
1503 } else {
1504 /* ticks_current > ticks_at_start
1505 */
1506 uint32_t delta_current_start;
1507
1508 delta_current_start = ticker_ticks_diff_get(ticks_current,
1509 ticks_at_start);
1510 if (ticks_to_expire > delta_current_start) {
1511 /* There's still time until expiration - subtract
1512 * elapsed time
1513 */
1514 ticks_to_expire -= delta_current_start;
1515 } else {
1516 /* Ticker node should have expired (we're late).
1517 * Add 'lateness' to negative drift correction
1518 * (ticks_to_expire_minus) and set ticks_to_expire
1519 * to 0
1520 */
1521 ticks_to_expire_minus +=
1522 (delta_current_start - ticks_to_expire);
1523 ticks_to_expire = 0U;
1524 }
1525 }
1526
1527 /* Handle negative drift correction */
1528 if (ticks_to_expire > ticks_to_expire_minus) {
1529 ticks_to_expire -= ticks_to_expire_minus;
1530 ticks_to_expire_minus = 0U;
1531 } else {
1532 ticks_to_expire_minus -= ticks_to_expire;
1533 ticks_to_expire = 0U;
1534 }
1535
1536 /* Update ticker */
1537 ticker->ticks_to_expire = ticks_to_expire;
1538 ticker->ticks_to_expire_minus = ticks_to_expire_minus;
1539 }
1540
1541 /**
1542 * @brief Add to remainder
1543 *
1544 * @details Calculates whether the remainder should increments expiration time
1545 * for above-microsecond precision counter HW. The remainder enables improved
1546 * ticker precision, but is disabled for sub-microsecond precision
1547 * configurations.
1548 * Note: This is the same functionality as ticker_remainder_inc(), except this
1549 * function allows doing the calculation without modifying any tickers
1550 *
1551 * @param remainder Pointer to remainder to add to
1552 * @param to_add Remainder value to add
1553 *
1554 * @return Returns 1 to indicate ticks increment is due, otherwise 0
1555 * @internal
1556 */
1557 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add)
1558 {
1559 #ifdef HAL_TICKER_REMAINDER_RANGE
1560 *remainder += to_add;
1561 if ((*remainder < BIT(31)) &&
1562 (*remainder > (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1563 *remainder -= HAL_TICKER_REMAINDER_RANGE;
1564 return 1;
1565 }
1566 return 0;
1567 #else
1568 return 0;
1569 #endif
1570 }
1571
1572 /**
1573 * @brief Increment remainder
1574 *
1575 * @details Calculates whether the remainder should increments expiration time
1576 * for above-microsecond precision counter HW. The remainder enables improved
1577 * ticker precision, but is disabled for sub-microsecond precision
1578 * configurations.
1579 *
1580 * @param ticker Pointer to ticker node
1581 *
1582 * @return Returns 1 to indicate increment is due, otherwise 0
1583 * @internal
1584 */
1585 static uint8_t ticker_remainder_inc(struct ticker_node *ticker)
1586 {
1587 return ticker_add_to_remainder(&ticker->remainder_current, ticker->remainder_periodic);
1588 }
1589
1590 /**
1591 * @brief Decrement remainder
1592 *
1593 * @details Calculates whether the remainder should decrements expiration time
1594 * for above-microsecond precision counter HW. The remainder enables improved
1595 * ticker precision, but is disabled for sub-microsecond precision
1596 * configurations.
1597 *
1598 * @param ticker Pointer to ticker node
1599 *
1600 * @return Returns 1 to indicate decrement is due, otherwise 0
1601 * @internal
1602 */
1603 static uint8_t ticker_remainder_dec(struct ticker_node *ticker)
1604 {
1605 #ifdef HAL_TICKER_REMAINDER_RANGE
1606 uint8_t decrement = 0U;
1607
1608 if ((ticker->remainder_current >= BIT(31)) ||
1609 (ticker->remainder_current <= (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1610 decrement++;
1611 ticker->remainder_current += HAL_TICKER_REMAINDER_RANGE;
1612 }
1613 ticker->remainder_current -= ticker->remainder_periodic;
1614 return decrement;
1615 #else
1616 return 0;
1617 #endif
1618 }
1619
1620 /**
1621 * @brief Invoke user operation callback
1622 *
1623 * @param user_op Pointer to user operation struct
1624 * @param status User operation status to pass to callback
1625 *
1626 * @internal
1627 */
1628 static void ticker_job_op_cb(struct ticker_user_op *user_op, uint8_t status)
1629 {
1630 user_op->op = TICKER_USER_OP_TYPE_NONE;
1631 user_op->status = status;
1632 if (user_op->fp_op_func) {
1633 user_op->fp_op_func(user_op->status, user_op->op_context);
1634 }
1635 }
1636
1637 /**
1638 * @brief Update and insert ticker node
1639 *
1640 * @details Update ticker node with parameters passed in user operation.
1641 * After update, the ticker is inserted in front as new head.
1642 *
1643 * @param ticker Pointer to ticker node
1644 * @param user_op Pointer to user operation
1645 * @param ticks_current Current ticker instance ticks
1646 * @param ticks_elapsed Expired ticks at time of call
1647 * @param insert_head Pointer to current head (id). Contains id
1648 * from user operation upon exit
1649 * @internal
1650 */
1651 static inline uint32_t ticker_job_node_update(struct ticker_instance *instance,
1652 struct ticker_node *ticker,
1653 struct ticker_user_op *user_op,
1654 uint32_t ticks_now,
1655 uint32_t ticks_current,
1656 uint32_t ticks_elapsed,
1657 uint8_t *insert_head)
1658 {
1659 uint32_t ticks_to_expire = ticker->ticks_to_expire;
1660
1661 ticks_elapsed += ticker_ticks_diff_get(ticks_now, ticks_current);
1662 if (ticks_to_expire > ticks_elapsed) {
1663 ticks_to_expire -= ticks_elapsed;
1664 } else {
1665 ticker->ticks_to_expire_minus += ticks_elapsed -
1666 ticks_to_expire;
1667 ticks_to_expire = 0U;
1668 }
1669
1670 /* Update ticks_to_expire from latency (lazy) input */
1671 if ((ticker->ticks_periodic != 0U) &&
1672 (user_op->params.update.lazy != 0U)) {
1673 user_op->params.update.lazy--;
1674 while ((ticks_to_expire > ticker->ticks_periodic) &&
1675 (ticker->lazy_current > user_op->params.update.lazy)) {
1676 ticks_to_expire -= ticker->ticks_periodic +
1677 ticker_remainder_dec(ticker);
1678 ticker->lazy_current--;
1679 }
1680
1681 while (ticker->lazy_current < user_op->params.update.lazy) {
1682 ticks_to_expire += ticker->ticks_periodic +
1683 ticker_remainder_inc(ticker);
1684 ticker->lazy_current++;
1685 }
1686 ticker->lazy_periodic = user_op->params.update.lazy;
1687 }
1688
1689 /* Update ticks_to_expire from drift input */
1690 ticker->ticks_to_expire = ticks_to_expire +
1691 user_op->params.update.ticks_drift_plus;
1692 ticker->ticks_to_expire_minus +=
1693 user_op->params.update.ticks_drift_minus;
1694
1695 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1696 /* TODO: An improvement on this could be to only consider the drift
1697 * (ADV => randomization) if re-sceduling fails. We would still store
1698 * the drift ticks here, but not actually update the node. That would
1699 * allow the ticker to use the full window for re-scheduling.
1700 */
1701 struct ticker_ext *ext_data = ticker->ext_data;
1702
1703 if (ext_data && ext_data->ticks_slot_window != 0U) {
1704 ext_data->ticks_drift =
1705 user_op->params.update.ticks_drift_plus -
1706 user_op->params.update.ticks_drift_minus;
1707 }
1708 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1709
1710 ticks_to_expire_prep(ticker, ticks_current, ticks_now);
1711
1712 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1713 /* Update ticks_slot parameter from plus/minus input */
1714 ticker->ticks_slot += user_op->params.update.ticks_slot_plus;
1715 if (ticker->ticks_slot > user_op->params.update.ticks_slot_minus) {
1716 ticker->ticks_slot -= user_op->params.update.ticks_slot_minus;
1717 } else {
1718 ticker->ticks_slot = 0U;
1719 }
1720 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1721
1722 /* Update force parameter */
1723 if (user_op->params.update.force != 0U) {
1724 ticker->force = user_op->params.update.force;
1725 }
1726
1727 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
1728 !defined(CONFIG_BT_TICKER_LOW_LAT)
1729 /* Update must_expire parameter */
1730 if (user_op->params.update.must_expire) {
1731 /* 1: disable, 2: enable */
1732 ticker->must_expire = (user_op->params.update.must_expire - 1);
1733 }
1734 #endif /* CONFIG_BT_TICKER_EXT */
1735
1736 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1737 if (ticker->ext_data && user_op->params.update.expire_info_id != user_op->id) {
1738 if (user_op->params.update.expire_info_id != TICKER_NULL &&
1739 !ticker->ext_data->other_expire_info) {
1740 uint32_t status;
1741
1742 status = ticker_alloc_expire_info(instance, user_op->id);
1743 if (status) {
1744 return status;
1745 }
1746 } else if (user_op->params.update.expire_info_id == TICKER_NULL &&
1747 ticker->ext_data->other_expire_info) {
1748 ticker_free_expire_info(instance, user_op->id);
1749 ticker->ext_data->other_expire_info = NULL;
1750 }
1751
1752 ticker->ext_data->expire_info_id = user_op->params.update.expire_info_id;
1753 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1754 ticker_mark_expire_info_outdated(instance, user_op->id);
1755 }
1756 }
1757 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1758 ARG_UNUSED(instance);
1759 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1760
1761 ticker->next = *insert_head;
1762 *insert_head = user_op->id;
1763
1764 return TICKER_STATUS_SUCCESS;
1765 }
1766
1767 /**
1768 * @brief Manage user update operation
1769 *
1770 * @details Called by ticker_job to execute an update request, or set node
1771 * as done if request is not update. Invokes user operation callback before
1772 * exit.
1773 *
1774 * @param instance Pointer to ticker instance
1775 * @param ticker Pointer to ticker node
1776 * @param user_op Pointer to user operation
1777 * @param ticks_elapsed Expired ticks at time of call
1778 * @param insert_head Pointer to current head (id). For update operation,
1779 * contains operation id upon exit
1780 * @internal
1781 */
1782 static inline void ticker_job_node_manage(struct ticker_instance *instance,
1783 struct ticker_node *ticker,
1784 struct ticker_user_op *user_op,
1785 uint32_t ticks_now,
1786 uint32_t ticks_elapsed,
1787 uint8_t *insert_head)
1788 {
1789 /* Handle update of ticker by re-inserting it back. */
1790 if (IS_ENABLED(CONFIG_BT_TICKER_UPDATE) &&
1791 (user_op->op == TICKER_USER_OP_TYPE_UPDATE)) {
1792 /* Remove ticker node from list */
1793 ticker->ticks_to_expire = ticker_dequeue(instance, user_op->id);
1794
1795 /* Update node and insert back */
1796 ticker_job_node_update(instance, ticker, user_op, ticks_now,
1797 instance->ticks_current, ticks_elapsed,
1798 insert_head);
1799
1800 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1801 ticker_mark_expire_info_outdated(instance, user_op->id);
1802 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1803
1804 /* Set schedule status of node
1805 * as updating.
1806 */
1807 ticker->req++;
1808 } else {
1809 /* If stop/stop_abs requested, then dequeue node */
1810 if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1811 /* Remove ticker node from list */
1812 ticker->ticks_to_expire = ticker_dequeue(instance,
1813 user_op->id);
1814
1815 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1816 if (ticker->ext_data && ticker->ext_data->expire_info_id != TICKER_NULL) {
1817 ticker_free_expire_info(instance, user_op->id);
1818 ticker->ext_data->other_expire_info = NULL;
1819 }
1820
1821 ticker_mark_expire_info_outdated(instance, user_op->id);
1822 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1823
1824 /* Reset schedule status of node */
1825 ticker->req = ticker->ack;
1826 }
1827
1828 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1829 /* If yield_abs/stop/stop_abs then adjust ticks_slot_previous */
1830 if (instance->ticker_id_slot_previous == user_op->id) {
1831 uint32_t ticks_current;
1832 uint32_t ticks_at_yield;
1833 uint32_t ticks_used;
1834
1835 if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1836 instance->ticker_id_slot_previous = TICKER_NULL;
1837 }
1838
1839 if ((user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS) ||
1840 (user_op->op == TICKER_USER_OP_TYPE_STOP_ABS)) {
1841 ticks_at_yield =
1842 user_op->params.yield.ticks_at_yield;
1843 } else {
1844 ticks_at_yield = ticks_now;
1845 }
1846
1847 ticks_current = instance->ticks_current;
1848 if (!((ticks_at_yield - ticks_current) &
1849 BIT(HAL_TICKER_CNTR_MSBIT))) {
1850 ticks_used = ticks_elapsed +
1851 ticker_ticks_diff_get(ticks_at_yield,
1852 ticks_current);
1853 } else {
1854 ticks_used =
1855 ticker_ticks_diff_get(ticks_current,
1856 ticks_at_yield);
1857 if (ticks_elapsed > ticks_used) {
1858 ticks_used = ticks_elapsed -
1859 ticks_used;
1860 } else {
1861 ticks_used = 0;
1862 }
1863 }
1864
1865 if (instance->ticks_slot_previous > ticks_used) {
1866 instance->ticks_slot_previous = ticks_used;
1867 }
1868 }
1869 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1870
1871 }
1872
1873 /* op success, @todo update may fail during
1874 * actual insert! need to design that yet.
1875 */
1876 ticker_job_op_cb(user_op, TICKER_STATUS_SUCCESS);
1877 }
1878
1879 /**
1880 * @brief Manage user operations list
1881 *
1882 * @details Called by ticker_job to execute requested user operations. A
1883 * number of operation may be queued since last ticker_job. Only update and
1884 * stop operations are handled. Start is handled implicitly by inserting
1885 * the ticker node in ticker_job_list_insert.
1886 *
1887 * @param instance Pointer to ticker instance
1888 * @param ticks_elapsed Expired ticks at time of call
1889 * @param insert_head Pointer to current head (id). For update operation,
1890 * contains operation id upon exit
1891 * @return Returns 1 if operations is pending, 0 if all operations are done.
1892 * @internal
1893 */
1894 static inline uint8_t ticker_job_list_manage(struct ticker_instance *instance,
1895 uint32_t ticks_now,
1896 uint32_t ticks_elapsed,
1897 uint8_t *insert_head)
1898 {
1899 uint8_t pending;
1900 struct ticker_node *node;
1901 struct ticker_user *users;
1902 uint8_t count_user;
1903
1904 pending = 0U;
1905 node = &instance->nodes[0];
1906 users = &instance->users[0];
1907 count_user = instance->count_user;
1908 /* Traverse users - highest id first */
1909 while (count_user--) {
1910 struct ticker_user *user;
1911 struct ticker_user_op *user_ops;
1912
1913 user = &users[count_user];
1914 user_ops = &user->user_op[0];
1915 /* Traverse user operation queue - middle to last (with wrap).
1916 * This operation updates user->middle to be the past the last
1917 * processed user operation. This is used later by
1918 * ticker_job_list_insert, for handling user->first to middle.
1919 */
1920 while (user->middle != user->last) {
1921 struct ticker_user_op *user_op;
1922 struct ticker_node *ticker;
1923 uint8_t state;
1924 uint8_t prev;
1925 uint8_t middle;
1926
1927 user_op = &user_ops[user->middle];
1928
1929 /* Increment index and handle wrapping */
1930 prev = user->middle;
1931 middle = user->middle + 1;
1932 if (middle == user->count_user_op) {
1933 middle = 0U;
1934 }
1935 user->middle = middle;
1936
1937 ticker = &node[user_op->id];
1938
1939 /* if op is start, then skip update and stop ops */
1940 if (user_op->op < TICKER_USER_OP_TYPE_UPDATE) {
1941 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1942 if (user_op->op == TICKER_USER_OP_TYPE_START) {
1943 /* Set start pending to validate a
1944 * successive, inline stop operation.
1945 */
1946 ticker->start_pending = 1U;
1947 }
1948 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1949
1950 continue;
1951 }
1952
1953 /* determine the ticker state */
1954 state = (ticker->req - ticker->ack) & 0xff;
1955
1956 /* if not started or update not required,
1957 * set status and continue.
1958 */
1959 if ((user_op->op > TICKER_USER_OP_TYPE_STOP_ABS) ||
1960 ((state == 0U) &&
1961 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1962 !ticker->start_pending &&
1963 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1964 (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS)) ||
1965 ((user_op->op == TICKER_USER_OP_TYPE_UPDATE) &&
1966 (user_op->params.update.ticks_drift_plus == 0U) &&
1967 (user_op->params.update.ticks_drift_minus == 0U) &&
1968 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1969 (user_op->params.update.ticks_slot_plus == 0U) &&
1970 (user_op->params.update.ticks_slot_minus == 0U) &&
1971 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1972 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1973 (!ticker->ext_data ||
1974 user_op->params.update.expire_info_id == user_op->id) &&
1975 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1976 (user_op->params.update.lazy == 0U) &&
1977 (user_op->params.update.force == 0U))) {
1978 ticker_job_op_cb(user_op,
1979 TICKER_STATUS_FAILURE);
1980 continue;
1981 }
1982
1983 /* Delete or yield node, if not expired */
1984 if ((state == 1U) ||
1985 (user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS)) {
1986 ticker_job_node_manage(instance, ticker,
1987 user_op, ticks_now,
1988 ticks_elapsed,
1989 insert_head);
1990 } else {
1991 /* Update on expired node requested, deferring
1992 * update until bottom half finishes.
1993 */
1994 /* sched job to run after worker bottom half.
1995 */
1996 instance->sched_cb(TICKER_CALL_ID_JOB,
1997 TICKER_CALL_ID_JOB, 1,
1998 instance);
1999
2000 /* Update the index upto which management is
2001 * complete.
2002 */
2003 user->middle = prev;
2004
2005 pending = 1U;
2006 break;
2007 }
2008 }
2009 }
2010
2011 return pending;
2012 }
2013
2014 /**
2015 * @brief Handle ticker node expirations
2016 *
2017 * @details Called by ticker_job to schedule next expirations. Expired ticker
2018 * nodes are removed from the active list, and re-inserted if periodic.
2019 *
2020 * @param instance Pointer to ticker instance
2021 * @param ticks_previous Absolute ticks at ticker_job start
2022 * @param ticks_elapsed Expired ticks at time of call
2023 * @param insert_head Pointer to current head (id). Updated if nodes are
2024 * re-inserted
2025 * @internal
2026 */
2027 static inline void ticker_job_worker_bh(struct ticker_instance *instance,
2028 uint32_t ticks_now,
2029 uint32_t ticks_previous,
2030 uint32_t ticks_elapsed,
2031 uint8_t *insert_head)
2032 {
2033 struct ticker_node *node;
2034 uint32_t ticks_expired;
2035 uint32_t ticks_latency;
2036
2037 ticks_latency = ticker_ticks_diff_get(ticks_now, ticks_previous);
2038
2039 node = &instance->nodes[0];
2040 ticks_expired = 0U;
2041 while (instance->ticker_id_head != TICKER_NULL) {
2042 uint8_t skip_collision = 0U;
2043 struct ticker_node *ticker;
2044 uint32_t ticks_to_expire;
2045 uint8_t id_expired;
2046 uint8_t state;
2047
2048 /* auto variable for current ticker node */
2049 id_expired = instance->ticker_id_head;
2050 ticker = &node[id_expired];
2051
2052 /* Do nothing if ticker did not expire */
2053 ticks_to_expire = ticker->ticks_to_expire;
2054 if (ticks_elapsed < ticks_to_expire) {
2055 ticker->ticks_to_expire -= ticks_elapsed;
2056 break;
2057 }
2058
2059 /* decrement ticks_elapsed and collect expired ticks */
2060 ticks_elapsed -= ticks_to_expire;
2061 ticks_latency -= ticks_to_expire;
2062 ticks_expired += ticks_to_expire;
2063
2064 state = (ticker->req - ticker->ack) & 0xff;
2065
2066 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2067 /* Node with lazy count did not expire with callback, but
2068 * was either a collision or re-scheduled. This node should
2069 * not define the active slot reservation (slot_previous).
2070 */
2071 skip_collision = (ticker->lazy_current != 0U);
2072 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2073
2074 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2075 /* decrement ticks_slot_previous */
2076 if (instance->ticks_slot_previous > ticks_to_expire) {
2077 instance->ticks_slot_previous -= ticks_to_expire;
2078 } else {
2079 instance->ticker_id_slot_previous = TICKER_NULL;
2080 instance->ticks_slot_previous = 0U;
2081 }
2082
2083 uint32_t ticker_ticks_slot;
2084
2085 if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
2086 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2087 } else {
2088 ticker_ticks_slot = ticker->ticks_slot;
2089 }
2090
2091 /* If a reschedule is set pending, we will need to keep
2092 * the slot_previous information
2093 */
2094 if (ticker_ticks_slot && (state == 2U) && !skip_collision &&
2095 !TICKER_RESCHEDULE_PENDING(ticker)) {
2096 instance->ticker_id_slot_previous = id_expired;
2097 instance->ticks_slot_previous = ticker_ticks_slot;
2098 }
2099 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2100
2101 /* ticker expired, set ticks_to_expire zero */
2102 ticker->ticks_to_expire = 0U;
2103
2104 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2105 ticker_mark_expire_info_outdated(instance, instance->ticker_id_head);
2106 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2107
2108 /* remove the expired ticker from head */
2109 instance->ticker_id_head = ticker->next;
2110
2111 /* Ticker will be restarted if periodic or to be re-scheduled */
2112 if ((ticker->ticks_periodic != 0U) ||
2113 TICKER_RESCHEDULE_PENDING(ticker)) {
2114 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2115 if (TICKER_RESCHEDULE_PENDING(ticker)) {
2116 /* Set the re-scheduled node to now. Will be
2117 * collision resolved after all nodes are
2118 * restarted
2119 */
2120 ticker->ticks_to_expire = ticks_elapsed;
2121
2122 /* Reset ticker state, so that its put
2123 * back in requested state later down
2124 * in the code.
2125 */
2126 ticker->req = ticker->ack;
2127 } else {
2128 uint16_t lazy_periodic;
2129 uint32_t count;
2130 uint16_t lazy;
2131
2132 /* If not skipped, apply lazy_periodic */
2133 if (!ticker->lazy_current) {
2134 lazy_periodic = ticker->lazy_periodic;
2135 } else {
2136 lazy_periodic = 0U;
2137
2138 /* Reset ticker state, so that its put
2139 * back in requested state later down
2140 * in the code.
2141 */
2142 ticker->req = ticker->ack;
2143 }
2144
2145 /* Reload ticks_to_expire with at least one
2146 * period.
2147 */
2148 ticks_to_expire = 0U;
2149 count = 1 + lazy_periodic;
2150 while (count--) {
2151 ticks_to_expire +=
2152 ticker->ticks_periodic;
2153 ticks_to_expire +=
2154 ticker_remainder_inc(ticker);
2155 }
2156
2157 /* Skip intervals that have elapsed w.r.t.
2158 * current ticks.
2159 */
2160 lazy = 0U;
2161
2162 if (0) {
2163 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2164 } else if (!ticker->must_expire) {
2165 #else
2166 } else {
2167 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2168 while (ticks_to_expire <
2169 ticks_latency) {
2170 ticks_to_expire +=
2171 ticker->ticks_periodic;
2172 ticks_to_expire +=
2173 ticker_remainder_inc(ticker);
2174 lazy++;
2175 }
2176 }
2177
2178 /* Use the calculated ticks to expire and
2179 * laziness.
2180 */
2181 ticker->ticks_to_expire = ticks_to_expire;
2182 ticker->lazy_current += (lazy_periodic + lazy);
2183 }
2184
2185 ticks_to_expire_prep(ticker, instance->ticks_current,
2186 ((ticks_previous + ticks_expired) &
2187 HAL_TICKER_CNTR_MASK));
2188 #else /* CONFIG_BT_TICKER_LOW_LAT */
2189 uint32_t count;
2190 uint16_t lazy;
2191
2192 /* Prepare for next interval */
2193 ticks_to_expire = 0U;
2194 count = 1 + ticker->lazy_periodic;
2195 while (count--) {
2196 ticks_to_expire += ticker->ticks_periodic;
2197 ticks_to_expire += ticker_remainder_inc(ticker);
2198 }
2199
2200 /* Skip intervals that have elapsed w.r.t. current
2201 * ticks.
2202 */
2203 lazy = 0U;
2204
2205 /* Schedule to a tick in the future */
2206 while (ticks_to_expire < ticks_latency) {
2207 ticks_to_expire += ticker->ticks_periodic;
2208 ticks_to_expire += ticker_remainder_inc(ticker);
2209 lazy++;
2210 }
2211
2212 /* Use the calculated ticks to expire and laziness. */
2213 ticker->ticks_to_expire = ticks_to_expire;
2214 ticker->lazy_current = ticker->lazy_periodic + lazy;
2215
2216 ticks_to_expire_prep(ticker, instance->ticks_current,
2217 ((ticks_previous + ticks_expired) &
2218 HAL_TICKER_CNTR_MASK));
2219
2220 /* Reset force state of the node */
2221 ticker->force = 0U;
2222 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2223
2224 /* Add to insert list */
2225 ticker->next = *insert_head;
2226 *insert_head = id_expired;
2227
2228 /* set schedule status of node as restarting. */
2229 ticker->req++;
2230 } else {
2231 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2232 /* A single-shot ticker in requested or skipped due to
2233 * collision shall generate a operation function
2234 * callback with failure status.
2235 */
2236 if (state && ((state == 1U) || skip_collision) &&
2237 ticker->fp_op_func) {
2238 ticker->fp_op_func(TICKER_STATUS_FAILURE,
2239 ticker->op_context);
2240 }
2241 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2242
2243 /* reset schedule status of node */
2244 ticker->req = ticker->ack;
2245 }
2246 }
2247 }
2248
2249 /**
2250 * @brief Prepare ticker node start
2251 *
2252 * @details Called by ticker_job to prepare ticker node start operation.
2253 *
2254 * @param ticker Pointer to ticker node
2255 * @param user_op Pointer to user operation
2256 * @param ticks_current Expired ticks at time of call
2257 *
2258 * @internal
2259 */
2260 static inline uint32_t ticker_job_op_start(struct ticker_instance *instance,
2261 struct ticker_node *ticker,
2262 struct ticker_user_op *user_op,
2263 uint32_t ticks_current)
2264 {
2265 struct ticker_user_op_start *start = (void *)&user_op->params.start;
2266
2267 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2268 /* Must expire is not supported in compatibility mode */
2269 LL_ASSERT(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP);
2270 #else
2271 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2272 if (start->lazy != TICKER_LAZY_MUST_EXPIRE_KEEP) {
2273 /* Update the must_expire state */
2274 ticker->must_expire =
2275 (start->lazy == TICKER_LAZY_MUST_EXPIRE) ? 1U : 0U;
2276 }
2277 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2278 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2279
2280 #if defined(CONFIG_BT_TICKER_EXT)
2281 ticker->ext_data = start->ext_data;
2282
2283 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2284 if (ticker->ext_data) {
2285 ticker->ext_data->other_expire_info = NULL;
2286 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
2287 uint32_t status;
2288
2289 status = ticker_alloc_expire_info(instance, user_op->id);
2290 if (status) {
2291 return status;
2292 }
2293 }
2294 }
2295
2296 ticker_mark_expire_info_outdated(instance, user_op->id);
2297 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2298 ARG_UNUSED(instance);
2299 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2300 #else /* !CONFIG_BT_TICKER_EXT */
2301 ARG_UNUSED(instance);
2302 #endif /* !CONFIG_BT_TICKER_EXT */
2303
2304 ticker->ticks_periodic = start->ticks_periodic;
2305 ticker->remainder_periodic = start->remainder_periodic;
2306 ticker->lazy_periodic =
2307 (start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP) ? start->lazy :
2308 0U;
2309 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2310 ticker->ticks_slot = start->ticks_slot;
2311 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2312 ticker->timeout_func = start->fp_timeout_func;
2313 ticker->context = start->context;
2314 ticker->ticks_to_expire = start->ticks_first;
2315 ticker->ticks_to_expire_minus = 0U;
2316 ticks_to_expire_prep(ticker, ticks_current, start->ticks_at_start);
2317 #if defined(CONFIG_BT_TICKER_REMAINDER)
2318 ticker->remainder_current = start->remainder_first;
2319 #else /* !CONFIG_BT_TICKER_REMAINDER */
2320 ticker->remainder_current = 0U;
2321 #endif /* !CONFIG_BT_TICKER_REMAINDER */
2322 ticker->lazy_current = 0U;
2323 ticker->force = 1U;
2324
2325 return TICKER_STATUS_SUCCESS;
2326 }
2327
2328 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2329 /**
2330 * @brief Insert new ticker node
2331 *
2332 * @details Called by ticker_job to insert a new ticker node. If node collides
2333 * with existing ticker nodes, either the new node is postponed, or colliding
2334 * node is un-scheduled. Decision is based on latency and the force-state of
2335 * individual nodes.
2336 *
2337 * @param instance Pointer to ticker instance
2338 * @param id_insert Id of ticker to insert
2339 * @param ticker Pointer to ticker node to insert
2340 * @param insert_head Pointer to current head. Updated if colliding nodes
2341 * are un-scheduled
2342 * @internal
2343 */
2344 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2345 uint8_t id_insert,
2346 struct ticker_node *ticker,
2347 uint8_t *insert_head)
2348 {
2349 ARG_UNUSED(insert_head);
2350
2351 /* Prepare to insert */
2352 ticker->next = TICKER_NULL;
2353
2354 /* Enqueue the ticker node */
2355 (void)ticker_enqueue(instance, id_insert);
2356
2357 /* Inserted/Scheduled */
2358 ticker->req = ticker->ack + 1;
2359
2360 return TICKER_STATUS_SUCCESS;
2361 }
2362
2363 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2364 /**
2365 * @brief Re-schedule ticker nodes within slot_window
2366 *
2367 * @details This function is responsible for re-scheduling ticker nodes
2368 * which have been marked for re-scheduling in ticker_worker. These nodes
2369 * have a non-zero ticks_slot_window configuration, which indicates a
2370 * valid range in which to re-schedule the node.
2371 * The function iterates over nodes, and handles one re-schedule at a
2372 * time. After a re-schedule, nodes are once again iterated until no more
2373 * nodes are marked for re-scheduling.
2374 *
2375 * @param instance Pointer to ticker instance
2376 * @param ticks_elapsed Number of ticks elapsed since last ticker job
2377 *
2378 * @internal
2379 */
2380 static uint8_t ticker_job_reschedule_in_window(struct ticker_instance *instance,
2381 uint32_t ticks_elapsed)
2382 {
2383 struct ticker_node *nodes;
2384 uint8_t rescheduling;
2385 uint8_t rescheduled;
2386
2387 nodes = &instance->nodes[0];
2388
2389 /* Do until all pending re-schedules handled */
2390 rescheduling = 1U;
2391 rescheduled = 0U;
2392 while (rescheduling) {
2393 struct ticker_node *ticker_resched;
2394 uint32_t ticks_to_expire_offset;
2395 uint8_t ticker_id_resched_prev;
2396 struct ticker_ext *ext_data;
2397 uint32_t ticks_start_offset;
2398 uint32_t window_start_ticks;
2399 uint32_t ticks_slot_window;
2400 uint8_t ticker_id_resched;
2401 uint32_t ticks_to_expire;
2402 uint8_t ticker_id_prev;
2403 uint8_t ticker_id_next;
2404 uint32_t ticks_slot;
2405
2406 rescheduling = 0U;
2407
2408 /* Find first pending re-schedule */
2409 ticker_id_resched_prev = TICKER_NULL;
2410 ticker_id_resched = instance->ticker_id_head;
2411 while (ticker_id_resched != TICKER_NULL) {
2412 ticker_resched = &nodes[ticker_id_resched];
2413 if (TICKER_RESCHEDULE_PENDING(ticker_resched)) {
2414 /* Pending reschedule found */
2415 break;
2416 }
2417
2418 ticker_id_resched_prev = ticker_id_resched;
2419 ticker_id_resched = ticker_resched->next;
2420 }
2421 if (ticker_id_resched == TICKER_NULL) {
2422 /* Done */
2423 break;
2424 }
2425
2426 /* Check for intersection with already active node */
2427 window_start_ticks = 0U;
2428 if (instance->ticks_slot_previous > ticks_elapsed) {
2429 /* Active node intersects - window starts after end of
2430 * active slot
2431 */
2432 window_start_ticks = instance->ticks_slot_previous -
2433 ticks_elapsed;
2434 }
2435
2436 ticker_id_next = ticker_resched->next;
2437
2438 /* If drift was applied to this node, this must be
2439 * taken into consideration. Reduce the window with
2440 * the amount of drift already applied.
2441 *
2442 * TODO: An improvement on this could be to only consider the
2443 * drift (ADV => randomization) if re-sceduling fails. Then the
2444 * ticker would have the best possible window to re-schedule in
2445 * and not be restricted to ticks_slot_window - ticks_drift.
2446 */
2447 ext_data = ticker_resched->ext_data;
2448 if (ext_data->ticks_drift < ext_data->ticks_slot_window) {
2449 ticks_slot_window = ext_data->ticks_slot_window -
2450 ext_data->ticks_drift;
2451 } else {
2452 /* Window has been exhausted - we can't reschedule */
2453 ticker_id_next = TICKER_NULL;
2454
2455 /* Assignment will be unused when TICKER_NULL */
2456 ticks_slot_window = 0U;
2457 }
2458
2459 /* Use ticker's reserved time ticks_slot, else for unreserved
2460 * tickers use the reschedule margin as ticks_slot.
2461 */
2462 if (ticker_resched->ticks_slot) {
2463 ticks_slot = ticker_resched->ticks_slot;
2464 } else {
2465 LL_ASSERT(TICKER_HAS_SLOT_WINDOW(ticker_resched));
2466
2467 ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2468 }
2469
2470 /* Try to find available slot for re-scheduling */
2471 ticks_to_expire_offset = 0U;
2472 ticks_start_offset = 0U;
2473 ticks_to_expire = 0U;
2474 while ((ticker_id_next != TICKER_NULL) &&
2475 ((ticks_start_offset + ticks_slot) <=
2476 ticks_slot_window)) {
2477 struct ticker_node *ticker_next;
2478 uint32_t window_end_ticks;
2479
2480 ticker_next = &nodes[ticker_id_next];
2481 ticks_to_expire_offset += ticker_next->ticks_to_expire;
2482
2483 /* Skip other pending re-schedule nodes and
2484 * tickers with no reservation or not periodic
2485 */
2486 if (TICKER_RESCHEDULE_PENDING(ticker_next) ||
2487 !ticker_next->ticks_slot ||
2488 !ticker_next->ticks_periodic) {
2489 ticker_id_next = ticker_next->next;
2490
2491 continue;
2492 }
2493
2494 /* Calculate end of window. Since window may be aligned
2495 * with expiry of next node, we add a margin
2496 */
2497 if (ticks_to_expire_offset >
2498 HAL_TICKER_RESCHEDULE_MARGIN) {
2499 window_end_ticks =
2500 MIN(ticks_slot_window,
2501 ticks_start_offset +
2502 ticks_to_expire_offset -
2503 HAL_TICKER_RESCHEDULE_MARGIN);
2504 } else {
2505 /* Next expiry is too close - try the next
2506 * node
2507 */
2508 window_end_ticks = 0;
2509 }
2510
2511 /* Calculate new ticks_to_expire as end of window minus
2512 * slot size.
2513 */
2514 if (window_end_ticks > (ticks_start_offset +
2515 ticks_slot)) {
2516 if (!ticker_resched->ticks_slot) {
2517 /* Place at start of window */
2518 ticks_to_expire = window_start_ticks;
2519 } else {
2520 /* Place at end of window. This ensures
2521 * that ticker with slot window and that
2522 * uses ticks_slot does not take the
2523 * interval of the colliding ticker.
2524 */
2525 ticks_to_expire = window_end_ticks -
2526 ticks_slot;
2527 }
2528 } else {
2529 /* No space in window - try the next node */
2530 ticks_to_expire = 0;
2531 }
2532
2533 /* Decide if the re-scheduling ticker node fits in the
2534 * slot found - break if it fits
2535 */
2536 if ((ticks_to_expire != 0U) &&
2537 (ticks_to_expire >= window_start_ticks) &&
2538 (ticks_to_expire <= (window_end_ticks -
2539 ticks_slot))) {
2540 /* Re-scheduled node fits before this node */
2541 break;
2542 }
2543
2544 /* We din't find a valid slot for re-scheduling - try
2545 * the next node
2546 */
2547 ticks_start_offset += ticks_to_expire_offset;
2548 window_start_ticks = ticks_start_offset +
2549 ticker_next->ticks_slot;
2550 ticks_to_expire_offset = 0U;
2551
2552 if (!ticker_resched->ticks_slot) {
2553 /* Try at the end of the next node */
2554 ticks_to_expire = window_start_ticks;
2555 } else {
2556 /* Try at the end of the slot window. This
2557 * ensures that ticker with slot window and that
2558 * uses ticks_slot does not take the interval of
2559 * the colliding ticker.
2560 */
2561 ticks_to_expire = ticks_slot_window -
2562 ticks_slot;
2563 }
2564
2565 ticker_id_next = ticker_next->next;
2566 }
2567
2568 ext_data->ticks_drift += ticks_to_expire -
2569 ticker_resched->ticks_to_expire;
2570 ticker_resched->ticks_to_expire = ticks_to_expire;
2571
2572 /* Place the ticker node sorted by expiration time and adjust
2573 * delta times
2574 */
2575 ticker_id_next = ticker_resched->next;
2576 ticker_id_prev = TICKER_NULL;
2577 while (ticker_id_next != TICKER_NULL) {
2578 struct ticker_node *ticker_next;
2579
2580 ticker_next = &nodes[ticker_id_next];
2581 if (ticker_resched->ticks_to_expire >
2582 ticker_next->ticks_to_expire) {
2583 /* Node is after this - adjust delta */
2584 ticker_resched->ticks_to_expire -=
2585 ticker_next->ticks_to_expire;
2586 } else {
2587 /* Node is before this one */
2588 ticker_next->ticks_to_expire -=
2589 ticker_resched->ticks_to_expire;
2590 break;
2591 }
2592 ticker_id_prev = ticker_id_next;
2593 ticker_id_next = ticker_next->next;
2594 }
2595
2596 /* If the node moved in the list, insert it */
2597 if (ticker_id_prev != TICKER_NULL) {
2598 /* Remove node from its current position in list */
2599 if (ticker_id_resched_prev != TICKER_NULL) {
2600 /* Node was not at the head of the list */
2601 nodes[ticker_id_resched_prev].next =
2602 ticker_resched->next;
2603 } else {
2604 /* Node was at the head, move head forward */
2605 instance->ticker_id_head = ticker_resched->next;
2606 }
2607
2608 /* Link inserted node */
2609 ticker_resched->next = nodes[ticker_id_prev].next;
2610 nodes[ticker_id_prev].next = ticker_id_resched;
2611 }
2612
2613 /* Remove latency added in ticker_worker */
2614 ticker_resched->lazy_current--;
2615
2616 /* Prevent repeated re-scheduling */
2617 ext_data->reschedule_state =
2618 TICKER_RESCHEDULE_STATE_DONE;
2619
2620 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2621 ticker_mark_expire_info_outdated(instance, ticker_id_resched);
2622 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2623
2624 /* Check for other pending re-schedules and set exit flag */
2625 rescheduling = 1U;
2626 rescheduled = 1U;
2627 }
2628
2629 return rescheduled;
2630 }
2631 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2632 #else /* CONFIG_BT_TICKER_LOW_LAT */
2633
2634 /**
2635 * @brief Insert new ticker node
2636 *
2637 * @details Called by ticker_job to insert a new ticker node. If node collides
2638 * with existing ticker nodes, either the new node is postponed, or colliding
2639 * node is un-scheduled. Decision is based on latency and the force-state of
2640 * individual nodes.
2641 *
2642 * @param instance Pointer to ticker instance
2643 * @param id_insert Id of ticker to insert
2644 * @param ticker Pointer to ticker node to insert
2645 * @param insert_head Pointer to current head. Updated if colliding nodes
2646 * are un-scheduled
2647 * @internal
2648 */
2649 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2650 uint8_t id_insert,
2651 struct ticker_node *ticker,
2652 uint8_t *insert_head)
2653 {
2654 struct ticker_node *node = &instance->nodes[0];
2655 uint8_t id_collide;
2656 uint16_t skip;
2657
2658 /* Prepare to insert */
2659 ticker->next = TICKER_NULL;
2660
2661 /* No. of times ticker has skipped its interval */
2662 if (ticker->lazy_current > ticker->lazy_periodic) {
2663 skip = ticker->lazy_current -
2664 ticker->lazy_periodic;
2665 } else {
2666 skip = 0U;
2667 }
2668
2669 /* If insert collides, remove colliding or advance to next interval */
2670 while (id_insert !=
2671 (id_collide = ticker_enqueue(instance, id_insert))) {
2672 /* Check for collision */
2673 if (id_collide != TICKER_NULL) {
2674 struct ticker_node *ticker_collide = &node[id_collide];
2675 uint16_t skip_collide;
2676
2677 /* No. of times colliding ticker has skipped its
2678 * interval.
2679 */
2680 if (ticker_collide->lazy_current >
2681 ticker_collide->lazy_periodic) {
2682 skip_collide = ticker_collide->lazy_current -
2683 ticker_collide->lazy_periodic;
2684 } else {
2685 skip_collide = 0U;
2686 }
2687
2688 /* Check if colliding node should be un-scheduled */
2689 if (ticker_collide->ticks_periodic &&
2690 skip_collide <= skip &&
2691 ticker_collide->force < ticker->force) {
2692 /* Dequeue and get the reminder of ticks
2693 * to expire.
2694 */
2695 ticker_collide->ticks_to_expire =
2696 ticker_dequeue(instance, id_collide);
2697 /* Unschedule node */
2698 ticker_collide->req = ticker_collide->ack;
2699
2700 /* Enqueue for re-insertion */
2701 ticker_collide->next = *insert_head;
2702 *insert_head = id_collide;
2703
2704 continue;
2705 }
2706 }
2707
2708 /* occupied, try next interval */
2709 if (ticker->ticks_periodic != 0U) {
2710 ticker->ticks_to_expire += ticker->ticks_periodic +
2711 ticker_remainder_inc(ticker);
2712 ticker->lazy_current++;
2713
2714 /* No. of times ticker has skipped its interval */
2715 if (ticker->lazy_current > ticker->lazy_periodic) {
2716 skip = ticker->lazy_current -
2717 ticker->lazy_periodic;
2718 } else {
2719 skip = 0U;
2720 }
2721
2722 /* Remove any accumulated drift (possibly added due to
2723 * ticker job execution latencies).
2724 */
2725 if (ticker->ticks_to_expire >
2726 ticker->ticks_to_expire_minus) {
2727 ticker->ticks_to_expire -=
2728 ticker->ticks_to_expire_minus;
2729 ticker->ticks_to_expire_minus = 0U;
2730 } else {
2731 ticker->ticks_to_expire_minus -=
2732 ticker->ticks_to_expire;
2733 ticker->ticks_to_expire = 0U;
2734 }
2735 } else {
2736 return TICKER_STATUS_FAILURE;
2737 }
2738 }
2739
2740 /* Inserted/Scheduled */
2741 ticker->req = ticker->ack + 1;
2742
2743 return TICKER_STATUS_SUCCESS;
2744 }
2745 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2746
2747 /**
2748 * @brief Insert and start ticker nodes for all users
2749 *
2750 * @details Called by ticker_job to prepare, insert and start ticker nodes
2751 * for all users. Specifying insert_head to other than TICKER_NULL causes
2752 * that ticker node to be inserted first.
2753 *
2754 * @param instance Pointer to ticker instance
2755 * @param insert_head Id of ticker node to insert, or TICKER_NULL if only
2756 * handle user operation inserts
2757 * @internal
2758 */
2759 static inline void ticker_job_list_insert(struct ticker_instance *instance,
2760 uint8_t insert_head)
2761 {
2762 struct ticker_node *node;
2763 struct ticker_user *users;
2764 uint8_t count_user;
2765
2766 node = &instance->nodes[0];
2767 users = &instance->users[0];
2768 count_user = instance->count_user;
2769
2770 /* Iterate through all user ids */
2771 while (count_user--) {
2772 struct ticker_user_op *user_ops;
2773 struct ticker_user *user;
2774 uint8_t user_ops_first;
2775
2776 user = &users[count_user];
2777 user_ops = (void *)&user->user_op[0];
2778 user_ops_first = user->first;
2779 /* Traverse user operation queue - first to middle (wrap) */
2780 while ((insert_head != TICKER_NULL) ||
2781 (user_ops_first != user->middle)) {
2782 struct ticker_user_op *user_op;
2783 struct ticker_node *ticker;
2784 uint8_t id_insert;
2785 uint8_t status = TICKER_STATUS_SUCCESS;
2786
2787 if (insert_head != TICKER_NULL) {
2788 /* Prepare insert of ticker node specified by
2789 * insert_head
2790 */
2791 id_insert = insert_head;
2792 ticker = &node[id_insert];
2793 insert_head = ticker->next;
2794
2795 user_op = NULL;
2796 } else {
2797 /* Prepare insert of any ticker nodes requested
2798 * via user operation TICKER_USER_OP_TYPE_START
2799 */
2800 uint8_t first;
2801
2802 user_op = &user_ops[user_ops_first];
2803 first = user_ops_first + 1;
2804 if (first == user->count_user_op) {
2805 first = 0U;
2806 }
2807 user_ops_first = first;
2808
2809 id_insert = user_op->id;
2810 ticker = &node[id_insert];
2811 if (user_op->op != TICKER_USER_OP_TYPE_START) {
2812 /* User operation is not start - skip
2813 * to next operation
2814 */
2815 continue;
2816 }
2817
2818 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2819 ticker->start_pending = 0U;
2820 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2821
2822 if (((ticker->req -
2823 ticker->ack) & 0xff) != 0U) {
2824 ticker_job_op_cb(user_op,
2825 TICKER_STATUS_FAILURE);
2826 continue;
2827 }
2828
2829 /* Prepare ticker for start */
2830 status = ticker_job_op_start(instance, ticker, user_op,
2831 instance->ticks_current);
2832 }
2833
2834 if (!status) {
2835 /* Insert ticker node */
2836 status = ticker_job_insert(instance, id_insert, ticker,
2837 &insert_head);
2838 }
2839
2840 if (user_op) {
2841 ticker_job_op_cb(user_op, status);
2842
2843 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
2844 (ticker->ticks_periodic == 0U) &&
2845 user_op) {
2846 ticker->fp_op_func =
2847 user_op->fp_op_func;
2848 ticker->op_context =
2849 user_op->op_context;
2850 }
2851 }
2852 }
2853
2854 #if !defined(CONFIG_BT_TICKER_JOB_IDLE_GET) && \
2855 !defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) && \
2856 !defined(CONFIG_BT_TICKER_PRIORITY_SET)
2857 user->first = user_ops_first;
2858 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
2859 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
2860 * !CONFIG_BT_TICKER_PRIORITY_SET
2861 */
2862
2863 }
2864 }
2865
2866 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2867 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
2868 defined(CONFIG_BT_TICKER_PRIORITY_SET)
2869 /**
2870 * @brief Perform inquiry for specific user operation
2871 *
2872 * @param instance Pointer to ticker instance
2873 * @param uop Pointer to user operation
2874 *
2875 * @internal
2876 */
2877 static inline void ticker_job_op_inquire(struct ticker_instance *instance,
2878 struct ticker_user_op *uop)
2879 {
2880 ticker_op_func fp_op_func;
2881
2882 fp_op_func = NULL;
2883 switch (uop->op) {
2884 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2885 case TICKER_USER_OP_TYPE_SLOT_GET:
2886 ticker_by_next_slot_get(instance,
2887 uop->params.slot_get.ticker_id,
2888 uop->params.slot_get.ticks_current,
2889 uop->params.slot_get.ticks_to_expire,
2890 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
2891 uop->params.slot_get.fp_match_op_func,
2892 uop->params.slot_get.match_op_context,
2893 #else
2894 NULL, NULL,
2895 #endif
2896 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
2897 uop->params.slot_get.remainder,
2898 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
2899 NULL,
2900 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
2901 #if defined(CONFIG_BT_TICKER_LAZY_GET)
2902 uop->params.slot_get.lazy);
2903 #else /* !CONFIG_BT_TICKER_LAZY_GET */
2904 NULL);
2905 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
2906 __fallthrough;
2907 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
2908
2909 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2910 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2911 case TICKER_USER_OP_TYPE_IDLE_GET:
2912 uop->status = TICKER_STATUS_SUCCESS;
2913 fp_op_func = uop->fp_op_func;
2914 break;
2915 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
2916 * CONFIG_BT_TICKER_NEXT_SLOT_GET
2917 */
2918
2919 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
2920 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
2921 defined(CONFIG_BT_TICKER_PRIORITY_SET)
2922 case TICKER_USER_OP_TYPE_PRIORITY_SET:
2923 if (uop->id < instance->count_node) {
2924 struct ticker_node *node = instance->nodes;
2925
2926 node[uop->id].priority =
2927 uop->params.priority_set.priority;
2928 uop->status = TICKER_STATUS_SUCCESS;
2929 } else {
2930 uop->status = TICKER_STATUS_FAILURE;
2931 }
2932 fp_op_func = uop->fp_op_func;
2933 break;
2934 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
2935 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
2936 * CONFIG_BT_TICKER_PRIORITY_SET
2937 */
2938
2939 default:
2940 /* do nothing for other ops */
2941 break;
2942 }
2943
2944 if (fp_op_func) {
2945 fp_op_func(uop->status, uop->op_context);
2946 }
2947 }
2948
2949 /**
2950 * @brief Check for pending inquiries for all users
2951 *
2952 * @details Run through all user operation lists, checking for pending
2953 * inquiries. Currently only two types of inquiries are supported:
2954 * TICKER_USER_OP_TYPE_SLOT_GET and TICKER_USER_OP_TYPE_IDLE_GET. The
2955 * function also supports user operation TICKER_USER_OP_TYPE_PRIORITY_SET.
2956 * This operation modifies the user->first index, indicating user operations
2957 * are complete.
2958 *
2959 * @param instance Pointer to ticker instance
2960 *
2961 * @internal
2962 */
2963 static inline void ticker_job_list_inquire(struct ticker_instance *instance)
2964 {
2965 struct ticker_user *users;
2966 uint8_t count_user;
2967
2968 users = &instance->users[0];
2969 count_user = instance->count_user;
2970 /* Traverse user operation queue - first to last (with wrap) */
2971 while (count_user--) {
2972 struct ticker_user_op *user_op;
2973 struct ticker_user *user;
2974
2975 user = &users[count_user];
2976 user_op = &user->user_op[0];
2977 while (user->first != user->last) {
2978 uint8_t first;
2979
2980 ticker_job_op_inquire(instance, &user_op[user->first]);
2981
2982 first = user->first + 1;
2983 if (first == user->count_user_op) {
2984 first = 0U;
2985 }
2986 user->first = first;
2987 }
2988 }
2989 }
2990 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
2991 * CONFIG_BT_TICKER_NEXT_SLOT_GET ||
2992 * CONFIG_BT_TICKER_PRIORITY_SET
2993 */
2994
2995 /**
2996 * @brief Update counter compare value (trigger)
2997 *
2998 * @details Updates trigger to the match next expiring ticker node. The
2999 * function takes into consideration that it may be preempted in the process,
3000 * and makes sure - by iteration - that compare value is set in the future
3001 * (with a margin).
3002 *
3003 * @param instance Pointer to ticker instance
3004 * @param ticker_id_old_head Previous ticker_id_head
3005 *
3006 * @internal
3007 */
3008 static inline uint8_t
3009 ticker_job_compare_update(struct ticker_instance *instance,
3010 uint8_t ticker_id_old_head)
3011 {
3012 struct ticker_node *ticker;
3013 uint32_t ticks_to_expire;
3014 uint32_t ctr_curr;
3015 uint32_t ctr_prev;
3016 uint32_t cc;
3017 uint32_t i;
3018
3019 if (instance->ticker_id_head == TICKER_NULL) {
3020 if (cntr_stop() == 0) {
3021 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3022 instance->ticks_slot_previous = 0U;
3023 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3024
3025 instance->ticks_current = cntr_cnt_get();
3026 }
3027
3028 return 0U;
3029 }
3030
3031 /* Check if this is the first update. If so, start the counter */
3032 if (ticker_id_old_head == TICKER_NULL) {
3033 uint32_t ticks_current;
3034
3035 ticks_current = cntr_cnt_get();
3036
3037 if (cntr_start() == 0) {
3038 instance->ticks_current = ticks_current;
3039 }
3040 }
3041
3042 ticker = &instance->nodes[instance->ticker_id_head];
3043 ticks_to_expire = ticker->ticks_to_expire;
3044
3045 /* If ticks_to_expire is zero, then immediately trigger the worker.
3046 */
3047 if (!ticks_to_expire) {
3048 return 1U;
3049 }
3050
3051 /* Iterate few times, if required, to ensure that compare is
3052 * correctly set to a future value. This is required in case
3053 * the operation is pre-empted and current h/w counter runs
3054 * ahead of compare value to be set.
3055 */
3056 i = 10U;
3057 ctr_curr = cntr_cnt_get();
3058 do {
3059 uint32_t ticks_elapsed;
3060 uint32_t ticks_diff;
3061
3062 LL_ASSERT(i);
3063 i--;
3064
3065 cc = instance->ticks_current;
3066 ticks_diff = ticker_ticks_diff_get(ctr_curr, cc);
3067 if (ticks_diff >= ticks_to_expire) {
3068 return 1U;
3069 }
3070
3071 ticks_elapsed = ticks_diff + HAL_TICKER_CNTR_CMP_OFFSET_MIN +
3072 HAL_TICKER_CNTR_SET_LATENCY;
3073 cc += MAX(ticks_elapsed, ticks_to_expire);
3074 cc &= HAL_TICKER_CNTR_MASK;
3075 instance->trigger_set_cb(cc);
3076
3077 ctr_prev = ctr_curr;
3078 ctr_curr = cntr_cnt_get();
3079 } while ((ticker_ticks_diff_get(ctr_curr, ctr_prev) +
3080 HAL_TICKER_CNTR_CMP_OFFSET_MIN) >
3081 ticker_ticks_diff_get(cc, ctr_prev));
3082
3083 return 0U;
3084 }
3085
3086 /**
3087 * @brief Ticker job
3088 *
3089 * @details Runs the bottom half of the ticker, after ticker nodes have elapsed
3090 * or user operations requested. The ticker_job is responsible for removing and
3091 * re-inserting ticker nodes, based on next elapsing and periodicity of the
3092 * nodes. The ticker_job is also responsible for processing user operations,
3093 * i.e. requests for start, update, stop etc.
3094 * Invoked from the ticker job mayfly context (TICKER_MAYFLY_CALL_ID_JOB).
3095 *
3096 * @param param Pointer to ticker instance
3097 *
3098 * @internal
3099 */
3100 void ticker_job(void *param)
3101 {
3102 struct ticker_instance *instance = param;
3103 uint8_t flag_compare_update;
3104 uint8_t ticker_id_old_head;
3105 uint8_t compare_trigger;
3106 uint32_t ticks_previous;
3107 uint32_t ticks_elapsed;
3108 uint8_t flag_elapsed;
3109 uint8_t insert_head;
3110 uint32_t ticks_now;
3111 uint8_t pending;
3112
3113 DEBUG_TICKER_JOB(1);
3114
3115 /* Defer job, as worker is running */
3116 if (instance->worker_trigger) {
3117 DEBUG_TICKER_JOB(0);
3118 return;
3119 }
3120
3121 /* Defer job, as job is already running */
3122 if (instance->job_guard) {
3123 instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_JOB, 1,
3124 instance);
3125 return;
3126 }
3127 instance->job_guard = 1U;
3128
3129 /* Back up the previous known tick */
3130 ticks_previous = instance->ticks_current;
3131
3132 /* Update current tick with the elapsed value from queue, and dequeue */
3133 if (instance->ticks_elapsed_first != instance->ticks_elapsed_last) {
3134 ticker_next_elapsed(&instance->ticks_elapsed_first);
3135
3136 ticks_elapsed =
3137 instance->ticks_elapsed[instance->ticks_elapsed_first];
3138
3139 instance->ticks_current += ticks_elapsed;
3140 instance->ticks_current &= HAL_TICKER_CNTR_MASK;
3141
3142 flag_elapsed = 1U;
3143 } else {
3144 /* No elapsed value in queue */
3145 flag_elapsed = 0U;
3146 ticks_elapsed = 0U;
3147 }
3148
3149 /* Initialise internal re-insert list */
3150 insert_head = TICKER_NULL;
3151
3152 /* Initialise flag used to update next compare value */
3153 flag_compare_update = 0U;
3154
3155 /* Remember the old head, so as to decide if new compare needs to be
3156 * set.
3157 */
3158 ticker_id_old_head = instance->ticker_id_head;
3159
3160 /* Manage user operations (updates and deletions) in ticker list */
3161 ticks_now = cntr_cnt_get();
3162 pending = ticker_job_list_manage(instance, ticks_now, ticks_elapsed,
3163 &insert_head);
3164
3165 /* Detect change in head of the list */
3166 if (instance->ticker_id_head != ticker_id_old_head) {
3167 flag_compare_update = 1U;
3168 }
3169
3170 /* Handle expired tickers */
3171 if (flag_elapsed) {
3172 ticker_job_worker_bh(instance, ticks_now, ticks_previous,
3173 ticks_elapsed, &insert_head);
3174
3175 /* Detect change in head of the list */
3176 if (instance->ticker_id_head != ticker_id_old_head) {
3177 flag_compare_update = 1U;
3178 }
3179
3180 /* Handle insertions */
3181 ticker_job_list_insert(instance, insert_head);
3182
3183 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
3184 !defined(CONFIG_BT_TICKER_LOW_LAT)
3185 /* Re-schedule any pending nodes with slot_window */
3186 if (ticker_job_reschedule_in_window(instance, ticks_elapsed)) {
3187 flag_compare_update = 1U;
3188 }
3189 #endif /* CONFIG_BT_TICKER_EXT */
3190 } else {
3191 /* Handle insertions */
3192 ticker_job_list_insert(instance, insert_head);
3193 }
3194
3195 /* Detect change in head of the list */
3196 if (instance->ticker_id_head != ticker_id_old_head) {
3197 flag_compare_update = 1U;
3198 }
3199
3200 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3201 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
3202 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3203 /* Process any list inquiries */
3204 if (!pending) {
3205 /* Handle inquiries */
3206 ticker_job_list_inquire(instance);
3207 }
3208 #else /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3209 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3210 * !CONFIG_BT_TICKER_PRIORITY_SET
3211 */
3212 ARG_UNUSED(pending);
3213 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3214 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3215 * !CONFIG_BT_TICKER_PRIORITY_SET
3216 */
3217
3218 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3219 if (instance->expire_infos_outdated) {
3220 ticker_job_update_expire_infos(instance);
3221 }
3222 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3223
3224 /* update compare if head changed */
3225 if (flag_compare_update) {
3226 compare_trigger = ticker_job_compare_update(instance,
3227 ticker_id_old_head);
3228 } else {
3229 compare_trigger = 0U;
3230 }
3231
3232 /* Permit worker to run */
3233 instance->job_guard = 0U;
3234
3235 /* trigger worker if deferred */
3236 cpu_dmb();
3237 if (instance->worker_trigger || compare_trigger) {
3238 instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_WORKER, 1,
3239 instance);
3240 }
3241
3242 DEBUG_TICKER_JOB(0);
3243 }
3244
3245 /*****************************************************************************
3246 * Public Interface
3247 ****************************************************************************/
3248
3249 /**
3250 * @brief Initialize ticker instance
3251 *
3252 * @details Called by ticker instance client once to initialize the ticker.
3253 *
3254 * @param instance_index Index of ticker instance
3255 * @param count_node Number of ticker nodes in node array
3256 * @param node Pointer to ticker node array
3257 * @param count_user Number of users in user array
3258 * @param user Pointer to user array of size count_user
3259 * @param count_op Number of user operations in user_op array
3260 * @param user_op Pointer to user operations array of size count_op
3261 * @param caller_id_get_cb Pointer to function for retrieving caller_id from
3262 * user id
3263 * @param sched_cb Pointer to function for scheduling ticker_worker
3264 * and ticker_job
3265 * @param trigger_set_cb Pointer to function for setting the compare trigger
3266 * ticks value
3267 *
3268 * @return TICKER_STATUS_SUCCESS if initialization was successful, otherwise
3269 * TICKER_STATUS_FAILURE
3270 */
3271 uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
3272 uint8_t count_user, void *user, uint8_t count_op, void *user_op,
3273 ticker_caller_id_get_cb_t caller_id_get_cb,
3274 ticker_sched_cb_t sched_cb,
3275 ticker_trigger_set_cb_t trigger_set_cb)
3276 {
3277 struct ticker_instance *instance = &_instance[instance_index];
3278 struct ticker_user_op *user_op_ = (void *)user_op;
3279 struct ticker_user *users;
3280
3281 if (instance_index >= TICKER_INSTANCE_MAX) {
3282 return TICKER_STATUS_FAILURE;
3283 }
3284
3285 instance->count_node = count_node;
3286 instance->nodes = node;
3287
3288 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3289 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3290 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3291 while (count_node--) {
3292 instance->nodes[count_node].priority = 0;
3293 }
3294 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3295 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3296 * CONFIG_BT_TICKER_PRIORITY_SET
3297 */
3298
3299 instance->count_user = count_user;
3300 instance->users = user;
3301
3302 /** @todo check if enough ticker_user_op supplied */
3303
3304 users = &instance->users[0];
3305 while (count_user--) {
3306 users[count_user].user_op = user_op_;
3307 user_op_ += users[count_user].count_user_op;
3308 count_op -= users[count_user].count_user_op;
3309 }
3310
3311 if (count_op) {
3312 return TICKER_STATUS_FAILURE;
3313 }
3314
3315 instance->caller_id_get_cb = caller_id_get_cb;
3316 instance->sched_cb = sched_cb;
3317 instance->trigger_set_cb = trigger_set_cb;
3318
3319 instance->ticker_id_head = TICKER_NULL;
3320 instance->ticks_current = cntr_cnt_get();
3321 instance->ticks_elapsed_first = 0U;
3322 instance->ticks_elapsed_last = 0U;
3323
3324 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3325 instance->ticker_id_slot_previous = TICKER_NULL;
3326 instance->ticks_slot_previous = 0U;
3327 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3328
3329 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3330 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
3331 instance->expire_infos[i].ticker_id = TICKER_NULL;
3332 instance->expire_infos[i].last = 1;
3333 }
3334 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3335
3336 return TICKER_STATUS_SUCCESS;
3337 }
3338
3339 /**
3340 * @brief Check if ticker instance is initialized
3341 *
3342 * @param instance_index Index of ticker instance
3343 *
3344 * @return true if ticker instance is initialized, false otherwise
3345 */
3346 bool ticker_is_initialized(uint8_t instance_index)
3347 {
3348 return !!(_instance[instance_index].count_node);
3349 }
3350
3351 /**
3352 * @brief Trigger the ticker worker
3353 *
3354 * @details Schedules the ticker_worker upper half by invoking the
3355 * corresponding mayfly.
3356 *
3357 * @param instance_index Index of ticker instance
3358 */
3359 void ticker_trigger(uint8_t instance_index)
3360 {
3361 struct ticker_instance *instance;
3362
3363 DEBUG_TICKER_ISR(1);
3364
3365 instance = &_instance[instance_index];
3366 if (instance->sched_cb) {
3367 instance->sched_cb(TICKER_CALL_ID_TRIGGER,
3368 TICKER_CALL_ID_WORKER, 1, instance);
3369 }
3370
3371 DEBUG_TICKER_ISR(0);
3372 }
3373
3374 /**
3375 * @brief Start a ticker node
3376 *
3377 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_START and
3378 * schedules the ticker_job.
3379 *
3380 * @param instance_index Index of ticker instance
3381 * @param user_id Ticker user id. Used for indexing user operations
3382 * and mapping to mayfly caller id
3383 * @param ticker_id Id of ticker node
3384 * @param ticks_anchor Absolute tick count as anchor point for
3385 * ticks_first
3386 * @param ticks_first Initial number of ticks before first timeout
3387 * @param ticks_periodic Number of ticks for a periodic ticker node. If 0,
3388 * ticker node is treated as one-shot
3389 * @param remainder_periodic Periodic ticks fraction
3390 * @param lazy Number of periods to skip (latency). A value of 1
3391 * causes skipping every other timeout
3392 * @param ticks_slot Slot reservation ticks for node (air-time)
3393 * @param ticks_slot_window Window in which the slot reservation may be
3394 * re-scheduled to avoid collision. Set to 0 for
3395 * legacy behavior
3396 * @param fp_timeout_func Function pointer of function to call at timeout
3397 * @param context Context passed in timeout call
3398 * @param fp_op_func Function pointer of user operation completion
3399 * function
3400 * @param op_context Context passed in operation completion call
3401 *
3402 * @return TICKER_STATUS_BUSY if start was successful but not yet completed.
3403 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3404 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to
3405 * run before exiting ticker_start
3406 */
3407 #if defined(CONFIG_BT_TICKER_EXT)
3408 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3409 uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3410 uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3411 ticker_timeout_func fp_timeout_func, void *context,
3412 ticker_op_func fp_op_func, void *op_context)
3413 {
3414 return ticker_start_ext(instance_index, user_id, ticker_id,
3415 ticks_anchor, ticks_first, ticks_periodic,
3416 remainder_periodic, lazy, ticks_slot,
3417 fp_timeout_func, context,
3418 fp_op_func, op_context,
3419 NULL);
3420 }
3421
3422 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3423 uint8_t ticker_id, uint32_t ticks_anchor,
3424 uint32_t ticks_first, uint32_t remainder_first,
3425 uint32_t ticks_periodic, uint32_t remainder_periodic,
3426 uint16_t lazy, uint32_t ticks_slot,
3427 ticker_timeout_func fp_timeout_func, void *context,
3428 ticker_op_func fp_op_func, void *op_context,
3429 struct ticker_ext *ext_data);
3430
3431 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3432 uint8_t ticker_id, uint32_t ticks_anchor,
3433 uint32_t ticks_first, uint32_t remainder_first,
3434 uint32_t ticks_periodic, uint32_t remainder_periodic,
3435 uint16_t lazy, uint32_t ticks_slot,
3436 ticker_timeout_func fp_timeout_func, void *context,
3437 ticker_op_func fp_op_func, void *op_context)
3438 {
3439 return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3440 ticks_first, remainder_first,
3441 ticks_periodic, remainder_periodic,
3442 lazy, ticks_slot,
3443 fp_timeout_func, context,
3444 fp_op_func, op_context,
3445 NULL);
3446 }
3447
3448 uint8_t ticker_start_ext(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3449 uint32_t ticks_anchor, uint32_t ticks_first,
3450 uint32_t ticks_periodic, uint32_t remainder_periodic,
3451 uint16_t lazy, uint32_t ticks_slot,
3452 ticker_timeout_func fp_timeout_func, void *context,
3453 ticker_op_func fp_op_func, void *op_context,
3454 struct ticker_ext *ext_data)
3455 {
3456 return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3457 ticks_first, 0U, ticks_periodic, remainder_periodic,
3458 lazy, ticks_slot,
3459 fp_timeout_func, context,
3460 fp_op_func, op_context,
3461 ext_data);
3462 }
3463
3464 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3465 uint8_t ticker_id, uint32_t ticks_anchor,
3466 uint32_t ticks_first, uint32_t remainder_first,
3467 uint32_t ticks_periodic, uint32_t remainder_periodic,
3468 uint16_t lazy, uint32_t ticks_slot,
3469 ticker_timeout_func fp_timeout_func, void *context,
3470 ticker_op_func fp_op_func, void *op_context,
3471 struct ticker_ext *ext_data)
3472
3473 #else /* !CONFIG_BT_TICKER_EXT */
3474 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3475 uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3476 uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3477 ticker_timeout_func fp_timeout_func, void *context,
3478 ticker_op_func fp_op_func, void *op_context)
3479 {
3480 return ticker_start_us(instance_index, user_id,
3481 ticker_id, ticks_anchor,
3482 ticks_first, 0U,
3483 ticks_periodic, remainder_periodic,
3484 lazy, ticks_slot,
3485 fp_timeout_func, context,
3486 fp_op_func, op_context);
3487 }
3488
3489 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3490 uint8_t ticker_id, uint32_t ticks_anchor,
3491 uint32_t ticks_first, uint32_t remainder_first,
3492 uint32_t ticks_periodic, uint32_t remainder_periodic,
3493 uint16_t lazy, uint32_t ticks_slot,
3494 ticker_timeout_func fp_timeout_func, void *context,
3495 ticker_op_func fp_op_func, void *op_context)
3496 #endif /* !CONFIG_BT_TICKER_EXT */
3497
3498 {
3499 struct ticker_instance *instance = &_instance[instance_index];
3500 struct ticker_user_op *user_op;
3501 struct ticker_user *user;
3502 uint8_t last;
3503
3504 user = &instance->users[user_id];
3505
3506 last = user->last + 1;
3507 if (last >= user->count_user_op) {
3508 last = 0U;
3509 }
3510
3511 if (last == user->first) {
3512 return TICKER_STATUS_FAILURE;
3513 }
3514
3515 user_op = &user->user_op[user->last];
3516 user_op->op = TICKER_USER_OP_TYPE_START;
3517 user_op->id = ticker_id;
3518 user_op->params.start.ticks_at_start = ticks_anchor;
3519 user_op->params.start.ticks_first = ticks_first;
3520 #if defined(CONFIG_BT_TICKER_REMAINDER)
3521 user_op->params.start.remainder_first = remainder_first;
3522 #else /* !CONFIG_BT_TICKER_REMAINDER */
3523 ARG_UNUSED(remainder_first);
3524 #endif /* !CONFIG_BT_TICKER_REMAINDER */
3525 user_op->params.start.ticks_periodic = ticks_periodic;
3526 user_op->params.start.remainder_periodic = remainder_periodic;
3527 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3528 user_op->params.start.ticks_slot = ticks_slot;
3529 #endif
3530 user_op->params.start.lazy = lazy;
3531 #if defined(CONFIG_BT_TICKER_EXT)
3532 user_op->params.start.ext_data = ext_data;
3533 #endif
3534 user_op->params.start.fp_timeout_func = fp_timeout_func;
3535 user_op->params.start.context = context;
3536 user_op->status = TICKER_STATUS_BUSY;
3537 user_op->fp_op_func = fp_op_func;
3538 user_op->op_context = op_context;
3539
3540 /* Make sure transaction is completed before committing */
3541 cpu_dmb();
3542 user->last = last;
3543
3544 instance->sched_cb(instance->caller_id_get_cb(user_id),
3545 TICKER_CALL_ID_JOB, 0, instance);
3546
3547 return user_op->status;
3548 }
3549
3550 #if defined(CONFIG_BT_TICKER_UPDATE)
3551 /**
3552 * @brief Update a ticker node
3553 *
3554 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_UPDATE and
3555 * schedules the ticker_job.
3556 *
3557 * @param instance_index Index of ticker instance
3558 * @param user_id Ticker user id. Used for indexing user operations
3559 * and mapping to mayfly caller id
3560 * @param ticker_id Id of ticker node
3561 * @param ticks_drift_plus Number of ticks to add for drift compensation
3562 * @param ticks_drift_minus Number of ticks to subtract for drift compensation
3563 * @param ticks_slot_plus Number of ticks to add to slot reservation
3564 * @param ticks_slot_minus Number of ticks to add subtract from slot
3565 * reservation
3566 * @param lazy Number of periods to skip (latency). A value of 0
3567 * means no action. 1 means no latency (normal). A
3568 * value >1 means latency = lazy - 1
3569 * @param force Force update to take effect immediately. With
3570 * force = 0, update is scheduled to take effect as
3571 * soon as possible
3572 * @param fp_op_func Function pointer of user operation completion
3573 * function
3574 * @param op_context Context passed in operation completion call
3575 * @param must_expire Disable, enable or ignore the must-expire state.
3576 * A value of 0 means no change, 1 means disable and
3577 * 2 means enable.
3578 *
3579 * @return TICKER_STATUS_BUSY if update was successful but not yet completed.
3580 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3581 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3582 * before exiting ticker_update
3583 */
3584 uint8_t ticker_update(uint8_t instance_index, uint8_t user_id,
3585 uint8_t ticker_id, uint32_t ticks_drift_plus,
3586 uint32_t ticks_drift_minus, uint32_t ticks_slot_plus,
3587 uint32_t ticks_slot_minus, uint16_t lazy, uint8_t force,
3588 ticker_op_func fp_op_func, void *op_context)
3589 #if defined(CONFIG_BT_TICKER_EXT)
3590 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3591 {
3592 return ticker_update_ext(instance_index, user_id, ticker_id,
3593 ticks_drift_plus, ticks_drift_minus,
3594 ticks_slot_plus, ticks_slot_minus, lazy,
3595 force, fp_op_func, op_context, 0U, ticker_id);
3596 }
3597
3598 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3599 uint8_t ticker_id, uint32_t ticks_drift_plus,
3600 uint32_t ticks_drift_minus,
3601 uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3602 uint16_t lazy, uint8_t force,
3603 ticker_op_func fp_op_func, void *op_context,
3604 uint8_t must_expire, uint8_t expire_info_id)
3605 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3606 {
3607 return ticker_update_ext(instance_index, user_id, ticker_id,
3608 ticks_drift_plus, ticks_drift_minus,
3609 ticks_slot_plus, ticks_slot_minus, lazy,
3610 force, fp_op_func, op_context, 0U);
3611 }
3612
3613 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3614 uint8_t ticker_id, uint32_t ticks_drift_plus,
3615 uint32_t ticks_drift_minus,
3616 uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3617 uint16_t lazy, uint8_t force,
3618 ticker_op_func fp_op_func, void *op_context,
3619 uint8_t must_expire)
3620 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3621 #endif /* CONFIG_BT_TICKER_EXT */
3622 {
3623 struct ticker_instance *instance = &_instance[instance_index];
3624 struct ticker_user_op *user_op;
3625 struct ticker_user *user;
3626 uint8_t last;
3627
3628 user = &instance->users[user_id];
3629
3630 last = user->last + 1;
3631 if (last >= user->count_user_op) {
3632 last = 0U;
3633 }
3634
3635 if (last == user->first) {
3636 return TICKER_STATUS_FAILURE;
3637 }
3638
3639 user_op = &user->user_op[user->last];
3640 user_op->op = TICKER_USER_OP_TYPE_UPDATE;
3641 user_op->id = ticker_id;
3642 user_op->params.update.ticks_drift_plus = ticks_drift_plus;
3643 user_op->params.update.ticks_drift_minus = ticks_drift_minus;
3644 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3645 user_op->params.update.ticks_slot_plus = ticks_slot_plus;
3646 user_op->params.update.ticks_slot_minus = ticks_slot_minus;
3647 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3648 user_op->params.update.lazy = lazy;
3649 user_op->params.update.force = force;
3650 #if defined(CONFIG_BT_TICKER_EXT)
3651 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && !defined(CONFIG_BT_TICKER_LOW_LAT)
3652 user_op->params.update.must_expire = must_expire;
3653 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC && !CONFIG_BT_TICKER_LOW_LAT */
3654 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3655 user_op->params.update.expire_info_id = expire_info_id;
3656 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3657 #endif /* CONFIG_BT_TICKER_EXT */
3658 user_op->status = TICKER_STATUS_BUSY;
3659 user_op->fp_op_func = fp_op_func;
3660 user_op->op_context = op_context;
3661
3662 /* Make sure transaction is completed before committing */
3663 cpu_dmb();
3664 user->last = last;
3665
3666 instance->sched_cb(instance->caller_id_get_cb(user_id),
3667 TICKER_CALL_ID_JOB, 0, instance);
3668
3669 return user_op->status;
3670 }
3671 #endif /* CONFIG_BT_TICKER_UPDATE */
3672
3673 /**
3674 * @brief Yield a ticker node with supplied absolute ticks reference
3675 *
3676 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_YIELD_ABS
3677 * and schedules the ticker_job.
3678 *
3679 * @param instance_index Index of ticker instance
3680 * @param user_id Ticker user id. Used for indexing user operations
3681 * and mapping to mayfly caller id
3682 * @param ticks_at_yield Absolute tick count at ticker yield request
3683 * @param fp_op_func Function pointer of user operation completion
3684 * function
3685 * @param op_context Context passed in operation completion call
3686 *
3687 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3688 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3689 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3690 * before exiting ticker_stop
3691 */
3692 uint8_t ticker_yield_abs(uint8_t instance_index, uint8_t user_id,
3693 uint8_t ticker_id, uint32_t ticks_at_yield,
3694 ticker_op_func fp_op_func, void *op_context)
3695 {
3696 struct ticker_instance *instance = &_instance[instance_index];
3697 struct ticker_user_op *user_op;
3698 struct ticker_user *user;
3699 uint8_t last;
3700
3701 user = &instance->users[user_id];
3702
3703 last = user->last + 1;
3704 if (last >= user->count_user_op) {
3705 last = 0U;
3706 }
3707
3708 if (last == user->first) {
3709 return TICKER_STATUS_FAILURE;
3710 }
3711
3712 user_op = &user->user_op[user->last];
3713 user_op->op = TICKER_USER_OP_TYPE_YIELD_ABS;
3714 user_op->id = ticker_id;
3715 user_op->params.yield.ticks_at_yield = ticks_at_yield;
3716 user_op->status = TICKER_STATUS_BUSY;
3717 user_op->fp_op_func = fp_op_func;
3718 user_op->op_context = op_context;
3719
3720 /* Make sure transaction is completed before committing */
3721 cpu_dmb();
3722 user->last = last;
3723
3724 instance->sched_cb(instance->caller_id_get_cb(user_id),
3725 TICKER_CALL_ID_JOB, 0, instance);
3726
3727 return user_op->status;
3728 }
3729
3730 /**
3731 * @brief Stop a ticker node
3732 *
3733 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP and
3734 * schedules the ticker_job.
3735 *
3736 * @param instance_index Index of ticker instance
3737 * @param user_id Ticker user id. Used for indexing user operations
3738 * and mapping to mayfly caller id
3739 * @param fp_op_func Function pointer of user operation completion
3740 * function
3741 * @param op_context Context passed in operation completion call
3742 *
3743 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3744 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3745 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3746 * before exiting ticker_stop
3747 */
3748 uint8_t ticker_stop(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3749 ticker_op_func fp_op_func, void *op_context)
3750 {
3751 struct ticker_instance *instance = &_instance[instance_index];
3752 struct ticker_user_op *user_op;
3753 struct ticker_user *user;
3754 uint8_t last;
3755
3756 user = &instance->users[user_id];
3757
3758 last = user->last + 1;
3759 if (last >= user->count_user_op) {
3760 last = 0U;
3761 }
3762
3763 if (last == user->first) {
3764 return TICKER_STATUS_FAILURE;
3765 }
3766
3767 user_op = &user->user_op[user->last];
3768 user_op->op = TICKER_USER_OP_TYPE_STOP;
3769 user_op->id = ticker_id;
3770 user_op->status = TICKER_STATUS_BUSY;
3771 user_op->fp_op_func = fp_op_func;
3772 user_op->op_context = op_context;
3773
3774 /* Make sure transaction is completed before committing */
3775 cpu_dmb();
3776 user->last = last;
3777
3778 instance->sched_cb(instance->caller_id_get_cb(user_id),
3779 TICKER_CALL_ID_JOB, 0, instance);
3780
3781 return user_op->status;
3782 }
3783
3784 /**
3785 * @brief Stop a ticker node with supplied absolute ticks reference
3786 *
3787 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP_ABS
3788 * and schedules the ticker_job.
3789 *
3790 * @param instance_index Index of ticker instance
3791 * @param user_id Ticker user id. Used for indexing user operations
3792 * and mapping to mayfly caller id
3793 * @param ticks_at_stop Absolute tick count at ticker stop request
3794 * @param fp_op_func Function pointer of user operation completion
3795 * function
3796 * @param op_context Context passed in operation completion call
3797 *
3798 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3799 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3800 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3801 * before exiting ticker_stop
3802 */
3803 uint8_t ticker_stop_abs(uint8_t instance_index, uint8_t user_id,
3804 uint8_t ticker_id, uint32_t ticks_at_stop,
3805 ticker_op_func fp_op_func, void *op_context)
3806 {
3807 struct ticker_instance *instance = &_instance[instance_index];
3808 struct ticker_user_op *user_op;
3809 struct ticker_user *user;
3810 uint8_t last;
3811
3812 user = &instance->users[user_id];
3813
3814 last = user->last + 1;
3815 if (last >= user->count_user_op) {
3816 last = 0U;
3817 }
3818
3819 if (last == user->first) {
3820 return TICKER_STATUS_FAILURE;
3821 }
3822
3823 user_op = &user->user_op[user->last];
3824 user_op->op = TICKER_USER_OP_TYPE_STOP_ABS;
3825 user_op->id = ticker_id;
3826 user_op->params.yield.ticks_at_yield = ticks_at_stop;
3827 user_op->status = TICKER_STATUS_BUSY;
3828 user_op->fp_op_func = fp_op_func;
3829 user_op->op_context = op_context;
3830
3831 /* Make sure transaction is completed before committing */
3832 cpu_dmb();
3833 user->last = last;
3834
3835 instance->sched_cb(instance->caller_id_get_cb(user_id),
3836 TICKER_CALL_ID_JOB, 0, instance);
3837
3838 return user_op->status;
3839 }
3840
3841 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
3842 /**
3843 * @brief Get next ticker node slot
3844 *
3845 * @details Gets the next ticker which has slot ticks specified and
3846 * return the ticker id and accumulated ticks until expiration. If no
3847 * ticker nodes have slot ticks, the next ticker node is returned.
3848 * If no head id is provided (TICKER_NULL) the first node is returned.
3849 *
3850 * @param instance_index Index of ticker instance
3851 * @param user_id Ticker user id. Used for indexing user operations
3852 * and mapping to mayfly caller id
3853 * @param ticker_id Pointer to id of ticker node
3854 * @param ticks_current Pointer to current ticks count
3855 * @param ticks_to_expire Pointer to ticks to expire
3856 * @param fp_op_func Function pointer of user operation completion
3857 * function
3858 * @param op_context Context passed in operation completion call
3859 *
3860 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
3861 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3862 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3863 * before exiting ticker_next_slot_get
3864 */
3865 uint8_t ticker_next_slot_get(uint8_t instance_index, uint8_t user_id,
3866 uint8_t *ticker_id, uint32_t *ticks_current,
3867 uint32_t *ticks_to_expire,
3868 ticker_op_func fp_op_func, void *op_context)
3869 {
3870 #if defined(CONFIG_BT_TICKER_LAZY_GET) || \
3871 defined(CONFIG_BT_TICKER_REMAINDER_GET) || \
3872 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
3873 return ticker_next_slot_get_ext(instance_index, user_id, ticker_id,
3874 ticks_current, ticks_to_expire, NULL,
3875 NULL, NULL, NULL, fp_op_func,
3876 op_context);
3877 }
3878
3879 uint8_t ticker_next_slot_get_ext(uint8_t instance_index, uint8_t user_id,
3880 uint8_t *ticker_id, uint32_t *ticks_current,
3881 uint32_t *ticks_to_expire,
3882 uint32_t *remainder, uint16_t *lazy,
3883 ticker_op_match_func fp_match_op_func,
3884 void *match_op_context,
3885 ticker_op_func fp_op_func, void *op_context)
3886 {
3887 #endif /* CONFIG_BT_TICKER_LAZY_GET ||
3888 * CONFIG_BT_TICKER_REMAINDER_GET ||
3889 * CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH
3890 */
3891 struct ticker_instance *instance = &_instance[instance_index];
3892 struct ticker_user_op *user_op;
3893 struct ticker_user *user;
3894 uint8_t last;
3895
3896 user = &instance->users[user_id];
3897
3898 last = user->last + 1;
3899 if (last >= user->count_user_op) {
3900 last = 0U;
3901 }
3902
3903 if (last == user->first) {
3904 return TICKER_STATUS_FAILURE;
3905 }
3906
3907 user_op = &user->user_op[user->last];
3908 user_op->op = TICKER_USER_OP_TYPE_SLOT_GET;
3909 user_op->id = TICKER_NULL;
3910 user_op->params.slot_get.ticker_id = ticker_id;
3911 user_op->params.slot_get.ticks_current = ticks_current;
3912 user_op->params.slot_get.ticks_to_expire = ticks_to_expire;
3913 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
3914 user_op->params.slot_get.remainder = remainder;
3915 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
3916 #if defined(CONFIG_BT_TICKER_LAZY_GET)
3917 user_op->params.slot_get.lazy = lazy;
3918 #endif /* CONFIG_BT_TICKER_LAZY_GET */
3919 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
3920 user_op->params.slot_get.fp_match_op_func = fp_match_op_func;
3921 user_op->params.slot_get.match_op_context = match_op_context;
3922 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
3923 user_op->status = TICKER_STATUS_BUSY;
3924 user_op->fp_op_func = fp_op_func;
3925 user_op->op_context = op_context;
3926
3927 /* Make sure transaction is completed before committing */
3928 cpu_dmb();
3929 user->last = last;
3930
3931 instance->sched_cb(instance->caller_id_get_cb(user_id),
3932 TICKER_CALL_ID_JOB, 0, instance);
3933
3934 return user_op->status;
3935 }
3936 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
3937
3938 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET)
3939 /**
3940 * @brief Get a callback at the end of ticker job execution
3941 *
3942 * @details Operation completion callback is called at the end of the
3943 * ticker_job execution. The user operation is immutable.
3944 *
3945 * @param instance_index Index of ticker instance
3946 * @param user_id Ticker user id. Used for indexing user operations
3947 * and mapping to mayfly caller id
3948 * @param fp_op_func Function pointer of user operation completion
3949 * function
3950 * @param op_context Context passed in operation completion call
3951 *
3952 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
3953 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3954 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3955 * before exiting ticker_job_idle_get
3956 */
3957 uint8_t ticker_job_idle_get(uint8_t instance_index, uint8_t user_id,
3958 ticker_op_func fp_op_func, void *op_context)
3959 {
3960 struct ticker_instance *instance = &_instance[instance_index];
3961 struct ticker_user_op *user_op;
3962 struct ticker_user *user;
3963 uint8_t last;
3964
3965 user = &instance->users[user_id];
3966
3967 last = user->last + 1;
3968 if (last >= user->count_user_op) {
3969 last = 0U;
3970 }
3971
3972 if (last == user->first) {
3973 return TICKER_STATUS_FAILURE;
3974 }
3975
3976 user_op = &user->user_op[user->last];
3977 user_op->op = TICKER_USER_OP_TYPE_IDLE_GET;
3978 user_op->id = TICKER_NULL;
3979 user_op->status = TICKER_STATUS_BUSY;
3980 user_op->fp_op_func = fp_op_func;
3981 user_op->op_context = op_context;
3982
3983 /* Make sure transaction is completed before committing */
3984 cpu_dmb();
3985 user->last = last;
3986
3987 instance->sched_cb(instance->caller_id_get_cb(user_id),
3988 TICKER_CALL_ID_JOB, 0, instance);
3989
3990 return user_op->status;
3991 }
3992 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET */
3993
3994 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3995 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3996 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3997 /**
3998 * @brief Set ticker node priority
3999 *
4000 * @param instance_index Index of ticker instance
4001 * @param user_id Ticker user id. Used for indexing user operations
4002 * and mapping to mayfly caller id
4003 * @param ticker_id Id of ticker node to set priority on
4004 * @param priority Priority to set. Range [-128..127], default is 0.
4005 * Lover value equals higher priority. Setting
4006 * priority to -128 (TICKER_PRIORITY_CRITICAL) makes
4007 * the node win all collision challenges. Only one
4008 * node can have this priority assigned.
4009 * @param fp_op_func Function pointer of user operation completion
4010 * function
4011 * @param op_context Context passed in operation completion call
4012 *
4013 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4014 * TICKER_STATUS_FAILURE is returned if there are no more user operations
4015 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4016 * before exiting ticker_priority_set
4017 */
4018 uint8_t ticker_priority_set(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
4019 int8_t priority, ticker_op_func fp_op_func,
4020 void *op_context)
4021 {
4022 struct ticker_instance *instance = &_instance[instance_index];
4023 struct ticker_user_op *user_op;
4024 struct ticker_user *user;
4025 uint8_t last;
4026
4027 user = &instance->users[user_id];
4028
4029 last = user->last + 1;
4030 if (last >= user->count_user_op) {
4031 last = 0U;
4032 }
4033
4034 if (last == user->first) {
4035 return TICKER_STATUS_FAILURE;
4036 }
4037
4038 user_op = &user->user_op[user->last];
4039 user_op->op = TICKER_USER_OP_TYPE_PRIORITY_SET;
4040 user_op->id = ticker_id;
4041 user_op->params.priority_set.priority = priority;
4042 user_op->status = TICKER_STATUS_BUSY;
4043 user_op->fp_op_func = fp_op_func;
4044 user_op->op_context = op_context;
4045
4046 /* Make sure transaction is completed before committing */
4047 cpu_dmb();
4048 user->last = last;
4049
4050 instance->sched_cb(instance->caller_id_get_cb(user_id),
4051 TICKER_CALL_ID_JOB, 0, instance);
4052
4053 return user_op->status;
4054 }
4055 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
4056 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC &&
4057 * CONFIG_BT_TICKER_PRIORITY_SET
4058 */
4059
4060 /**
4061 * @brief Schedule ticker job
4062 *
4063 * @param instance_index Index of ticker instance
4064 * @param user_id Ticker user id. Maps to mayfly caller id
4065 */
4066 void ticker_job_sched(uint8_t instance_index, uint8_t user_id)
4067 {
4068 struct ticker_instance *instance = &_instance[instance_index];
4069
4070 instance->sched_cb(instance->caller_id_get_cb(user_id),
4071 TICKER_CALL_ID_JOB, 0, instance);
4072 }
4073
4074 /**
4075 * @brief Get current absolute tick count
4076 *
4077 * @return Absolute tick count
4078 */
4079 uint32_t ticker_ticks_now_get(void)
4080 {
4081 return cntr_cnt_get();
4082 }
4083
4084 /**
4085 * @brief Get difference between two tick counts
4086 *
4087 * @details Subtract two counts and truncate to correct HW dependent counter
4088 * bit width
4089 *
4090 * @param ticks_now Highest tick count (now)
4091 * @param ticks_old Tick count to subtract from ticks_now
4092 */
4093 uint32_t ticker_ticks_diff_get(uint32_t ticks_now, uint32_t ticks_old)
4094 {
4095 return ((ticks_now - ticks_old) & HAL_TICKER_CNTR_MASK);
4096 }
4097