1 /*
2 * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stdbool.h>
9 #include <zephyr/types.h>
10 #include <soc.h>
11
12 #include "hal/cntr.h"
13 #include "hal/ticker.h"
14 #include "hal/cpu.h"
15
16 #include "ticker.h"
17
18 #include "hal/debug.h"
19
20 /*****************************************************************************
21 * Defines
22 ****************************************************************************/
23 #define DOUBLE_BUFFER_SIZE 2
24
25 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
26 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SET)
27 #define BT_CTLR_ADV_AUX_SET 0
28 #else
29 #define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET
30 #endif
31 #if !defined(CONFIG_BT_CTLR_ADV_SYNC_SET)
32 #define BT_CTLR_ADV_SYNC_SET 0
33 #else
34 #define BT_CTLR_ADV_SYNC_SET CONFIG_BT_CTLR_ADV_SYNC_SET
35 #endif
36 #if defined(CONFIG_BT_CTLR_ADV_ISO)
37 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET*2)
38 #else
39 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET)
40 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
41 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
42
43 /*****************************************************************************
44 * Types
45 ****************************************************************************/
46
47 struct ticker_node {
48 uint8_t next; /* Next ticker node */
49
50 uint8_t req; /* Request counter */
51 uint8_t ack; /* Acknowledge counter. Imbalance
52 * between req and ack indicates
53 * ongoing operation
54 */
55 uint8_t force:1; /* If non-zero, node timeout should
56 * be forced at next expiration
57 */
58 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
59 uint8_t start_pending:1; /* If non-zero, start is pending for
60 * bottom half of ticker_job.
61 */
62 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
63 uint32_t ticks_periodic; /* If non-zero, interval
64 * between expirations
65 */
66 uint32_t ticks_to_expire; /* Ticks until expiration */
67 ticker_timeout_func timeout_func; /* User timeout function */
68 void *context; /* Context delivered to timeout
69 * function
70 */
71 uint32_t ticks_to_expire_minus; /* Negative drift correction */
72 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
73 uint32_t ticks_slot; /* Air-time reservation for node */
74 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
75 uint16_t lazy_periodic; /* Number of timeouts to allow
76 * skipping
77 */
78 uint16_t lazy_current; /* Current number of timeouts
79 * skipped = peripheral latency
80 */
81 union {
82 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
83 uint32_t remainder_periodic;/* Sub-microsecond tick remainder
84 * for each period
85 */
86 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
87
88 ticker_op_func fp_op_func; /* Operation completion callback */
89 };
90
91 union {
92 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
93 uint32_t remainder_current; /* Current sub-microsecond tick
94 * remainder
95 */
96 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
97
98 void *op_context; /* Context passed in completion
99 * callback
100 */
101 };
102
103 #if defined(CONFIG_BT_TICKER_EXT)
104 struct ticker_ext *ext_data; /* Ticker extension data */
105 #endif /* CONFIG_BT_TICKER_EXT */
106 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
107 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
108 uint8_t must_expire; /* Node must expire, even if it
109 * collides with other nodes
110 */
111 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
112 int8_t priority; /* Ticker node priority. 0 is
113 * default. Lower value is higher
114 * priority
115 */
116 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
117 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
118 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
119 */
120 };
121
122 struct ticker_expire_info_internal {
123 uint32_t ticks_to_expire;
124 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
125 uint32_t remainder;
126 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
127 uint16_t lazy;
128 uint8_t ticker_id;
129 uint8_t outdated:1;
130 uint8_t found:1;
131 uint8_t last:1;
132 };
133
134 /* Operations to be performed in ticker_job.
135 * Possible values for field "op" in struct ticker_user_op
136 */
137 #define TICKER_USER_OP_TYPE_NONE 0
138 #define TICKER_USER_OP_TYPE_IDLE_GET 1
139 #define TICKER_USER_OP_TYPE_SLOT_GET 2
140 #define TICKER_USER_OP_TYPE_PRIORITY_SET 3
141 #define TICKER_USER_OP_TYPE_START 4
142 #define TICKER_USER_OP_TYPE_UPDATE 5
143 #define TICKER_USER_OP_TYPE_YIELD_ABS 6
144 #define TICKER_USER_OP_TYPE_STOP 7
145 #define TICKER_USER_OP_TYPE_STOP_ABS 8
146
147 /* Slot window re-schedule states */
148 #define TICKER_RESCHEDULE_STATE_NONE 0
149 #define TICKER_RESCHEDULE_STATE_PENDING 1
150 #define TICKER_RESCHEDULE_STATE_DONE 2
151
152 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
153 #define TICKER_HAS_SLOT_WINDOW(_ticker) \
154 ((_ticker)->ext_data && ((_ticker)->ext_data->ticks_slot_window != 0U))
155 #define TICKER_RESCHEDULE_PENDING(_ticker) \
156 (_ticker->ext_data && (_ticker->ext_data->reschedule_state == \
157 TICKER_RESCHEDULE_STATE_PENDING))
158 #else
159 #define TICKER_HAS_SLOT_WINDOW(_ticker) 0
160 #define TICKER_RESCHEDULE_PENDING(_ticker) 0
161 #endif
162
163 /* User operation data structure for start opcode. Used for passing start
164 * requests to ticker_job
165 */
166 struct ticker_user_op_start {
167 uint32_t ticks_at_start; /* Anchor ticks (absolute) */
168 uint32_t ticks_first; /* Initial timeout ticks */
169 uint32_t ticks_periodic; /* Ticker period ticks */
170
171 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
172 uint32_t remainder_periodic; /* Sub-microsecond tick remainder */
173
174 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
175 uint32_t remainder_first; /* Sub-microsecond tick remainder */
176 #endif /* CONFIG_BT_TICKER_START_REMAINDER */
177 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
178
179 uint16_t lazy; /* Periodic latency in number of
180 * periods
181 */
182
183 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
184 uint32_t ticks_slot; /* Air-time reservation ticks */
185 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
186
187 ticker_timeout_func fp_timeout_func; /* Timeout callback function */
188 void *context; /* Context passed in timeout callback */
189
190 #if defined(CONFIG_BT_TICKER_EXT)
191 struct ticker_ext *ext_data; /* Ticker extension data instance */
192 #endif /* CONFIG_BT_TICKER_EXT */
193 };
194
195 /* User operation data structure for update opcode. Used for passing update
196 * requests to ticker_job
197 */
198 struct ticker_user_op_update {
199 uint32_t ticks_drift_plus; /* Requested positive drift in ticks */
200 uint32_t ticks_drift_minus; /* Requested negative drift in ticks */
201 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
202 uint32_t ticks_slot_plus; /* Number of ticks to add to slot
203 * reservation (air-time)
204 */
205 uint32_t ticks_slot_minus; /* Number of ticks to subtract from
206 * slot reservation (air-time)
207 */
208 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
209 uint16_t lazy; /* Peripheral latency:
210 * 0: Do nothing
211 * 1: latency = 0
212 * >1: latency = lazy - 1
213 */
214 uint8_t force; /* Force update */
215 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
216 !defined(CONFIG_BT_TICKER_LOW_LAT)
217 uint8_t must_expire; /* Node must expire, even if it
218 * collides with other nodes:
219 * 0x00: Do nothing
220 * 0x01: Disable must_expire
221 * 0x02: Enable must_expire
222 */
223 #endif
224 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
225 uint8_t expire_info_id;
226 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
227 };
228
229 /* User operation data structure for yield/stop opcode. Used for passing yield/
230 * stop requests with absolute tick to ticker_job
231 */
232 struct ticker_user_op_yield {
233 uint32_t ticks_at_yield; /* Anchor ticks (absolute) */
234 };
235
236 /* User operation data structure for slot_get opcode. Used for passing request
237 * to get next ticker with slot ticks via ticker_job
238 */
239 struct ticker_user_op_slot_get {
240 uint8_t *ticker_id;
241 uint32_t *ticks_current;
242 uint32_t *ticks_to_expire;
243 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
244 uint32_t *remainder;
245 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
246 #if defined(CONFIG_BT_TICKER_LAZY_GET)
247 uint16_t *lazy;
248 #endif /* CONFIG_BT_TICKER_LAZY_GET */
249 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
250 ticker_op_match_func fp_match_op_func;
251 void *match_op_context;
252 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
253 };
254
255 /* User operation data structure for priority_set opcode. Used for passing
256 * request to set ticker node priority via ticker_job
257 */
258 struct ticker_user_op_priority_set {
259 int8_t priority; /* Node priority. Defaults to 0 */
260 };
261
262 /* User operation top level data structure. Used for passing requests to
263 * ticker_job
264 */
265 struct ticker_user_op {
266 uint8_t op; /* User operation */
267 uint8_t id; /* Ticker node id */
268 uint8_t status; /* Operation result */
269 union {
270 struct ticker_user_op_start start;
271 struct ticker_user_op_update update;
272 struct ticker_user_op_yield yield;
273 struct ticker_user_op_slot_get slot_get;
274 struct ticker_user_op_priority_set priority_set;
275 } params; /* User operation parameters */
276 ticker_op_func fp_op_func; /* Operation completion callback */
277 void *op_context; /* Context passed in completion callback */
278 };
279
280 /* User data structure for operations
281 */
282 struct ticker_user {
283 uint8_t count_user_op; /* Number of user operation slots */
284 uint8_t first; /* Slot index of first user operation */
285 uint8_t middle; /* Slot index of last managed user op.
286 * Updated by ticker_job_list_manage
287 * for use in ticker_job_list_insert
288 */
289 uint8_t last; /* Slot index of last user operation */
290 struct ticker_user_op *user_op; /* Pointer to user operation array */
291 };
292
293 /* Ticker instance
294 */
295 struct ticker_instance {
296 struct ticker_node *nodes; /* Pointer to ticker nodes */
297 struct ticker_user *users; /* Pointer to user nodes */
298 uint8_t count_node; /* Number of ticker nodes */
299 uint8_t count_user; /* Number of user nodes */
300 uint8_t ticks_elapsed_first; /* Index from which elapsed ticks count
301 * is pulled
302 */
303 uint8_t ticks_elapsed_last; /* Index to which elapsed ticks count
304 * is pushed
305 */
306 uint32_t ticks_elapsed[DOUBLE_BUFFER_SIZE]; /* Buffer for elapsed
307 * ticks
308 */
309 uint32_t ticks_current; /* Absolute ticks elapsed at last
310 * ticker_job
311 */
312 uint8_t ticker_id_head; /* Index of first ticker node (next to
313 * expire)
314 */
315 uint8_t job_guard; /* Flag preventing ticker_worker from
316 * running if ticker_job is active
317 */
318 uint8_t worker_trigger; /* Flag preventing ticker_job from
319 * starting if ticker_worker was
320 * requested, and to trigger
321 * ticker_worker at end of job, if
322 * requested
323 */
324
325 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
326 uint8_t ticker_id_slot_previous; /* Id of previous slot reserving
327 * ticker node
328 */
329 uint32_t ticks_slot_previous; /* Number of ticks previously reserved
330 * by a ticker node (active air-time)
331 */
332 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
333
334 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
335 struct ticker_expire_info_internal expire_infos[TICKER_EXPIRE_INFO_MAX];
336 bool expire_infos_outdated;
337 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
338
339 ticker_caller_id_get_cb_t caller_id_get_cb; /* Function for retrieving
340 * the caller id from user
341 * id
342 */
343 ticker_sched_cb_t sched_cb; /* Function for scheduling
344 * ticker_worker and
345 * ticker_job
346 */
347 ticker_trigger_set_cb_t trigger_set_cb; /* Function for setting
348 * the trigger (compare
349 * value)
350 */
351 };
352
353 BUILD_ASSERT(sizeof(struct ticker_node) == TICKER_NODE_T_SIZE);
354 BUILD_ASSERT(sizeof(struct ticker_user) == TICKER_USER_T_SIZE);
355 BUILD_ASSERT(sizeof(struct ticker_user_op) == TICKER_USER_OP_T_SIZE);
356
357 /*****************************************************************************
358 * Global instances
359 ****************************************************************************/
360 #define TICKER_INSTANCE_MAX 1
361 static struct ticker_instance _instance[TICKER_INSTANCE_MAX];
362
363 /*****************************************************************************
364 * Static Functions
365 ****************************************************************************/
366 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
367 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add);
368 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
369
370 /**
371 * @brief Update elapsed index
372 *
373 * @param ticks_elapsed_index Pointer to current index
374 *
375 * @internal
376 */
ticker_next_elapsed(uint8_t * ticks_elapsed_index)377 static inline void ticker_next_elapsed(uint8_t *ticks_elapsed_index)
378 {
379 uint8_t idx = *ticks_elapsed_index + 1;
380
381 if (idx == DOUBLE_BUFFER_SIZE) {
382 idx = 0U;
383 }
384 *ticks_elapsed_index = idx;
385 }
386
387 #if defined(CONFIG_BT_TICKER_LOW_LAT)
388 /**
389 * @brief Get ticker expiring in a specific slot
390 *
391 * @details Searches for a ticker which expires in a specific slot starting
392 * at 'ticks_slot'.
393 *
394 * @param node Pointer to ticker node array
395 * @param ticker_id_head Id of initial ticker node
396 * @param ticks_slot Ticks indicating slot to get
397 *
398 * @return Id of ticker expiring within slot or TICKER_NULL
399 * @internal
400 */
ticker_by_slot_get(struct ticker_node * node,uint8_t ticker_id_head,uint32_t ticks_slot)401 static uint8_t ticker_by_slot_get(struct ticker_node *node, uint8_t ticker_id_head,
402 uint32_t ticks_slot)
403 {
404 while (ticker_id_head != TICKER_NULL) {
405 struct ticker_node *ticker;
406 uint32_t ticks_to_expire;
407
408 ticker = &node[ticker_id_head];
409 ticks_to_expire = ticker->ticks_to_expire;
410
411 if (ticks_slot <= ticks_to_expire) {
412 /* Next ticker expiration is outside the checked slot */
413 return TICKER_NULL;
414 }
415
416 if (ticker->ticks_slot) {
417 /* This ticker node has slot defined and expires within
418 * checked slot
419 */
420 break;
421 }
422
423 ticks_slot -= ticks_to_expire;
424 ticker_id_head = ticker->next;
425 }
426
427 return ticker_id_head;
428 }
429 #endif /* CONFIG_BT_TICKER_LOW_LAT */
430
431 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
432 /**
433 * @brief Get next ticker with slot ticks or match
434 *
435 * @details Iterates ticker nodes from ticker_id_head. If no head id is provided
436 * (TICKER_NULL), iteration starts from the first node.
437 * Operation details:
438 *
439 * NORMAL MODE (!CONFIG_BT_TICKER_SLOT_AGNOSTIC)
440 * - Gets the next ticker which has slot ticks specified and return the ticker
441 * id and accumulated ticks until expiration.
442 * - If a matching function is provided, this function is called and node iteration
443 * continues until match function returns true.
444 *
445 * SLOT AGNOSTIC MODE (CONFIG_BT_TICKER_SLOT_AGNOSTIC)
446 * - Gets the next ticker node.
447 * - If a matching function is provided, this function is called and node iteration
448 * continues until match function returns true.
449 *
450 * @param instance Pointer to ticker instance
451 * @param ticker_id_head Pointer to id of first ticker node [in/out]
452 * @param ticks_current Pointer to current ticks count [in/out]
453 * @param ticks_to_expire Pointer to ticks to expire [in/out]
454 * @param fp_match_op_func Pointer to match function or NULL if unused
455 * @param match_op_context Pointer to operation context passed to match
456 * function or NULL if unused
457 * @param lazy Pointer to lazy variable to receive lazy_current
458 * of found ticker node
459 * @internal
460 */
ticker_by_next_slot_get(struct ticker_instance * instance,uint8_t * ticker_id_head,uint32_t * ticks_current,uint32_t * ticks_to_expire,ticker_op_match_func fp_match_op_func,void * match_op_context,uint32_t * remainder,uint16_t * lazy)461 static void ticker_by_next_slot_get(struct ticker_instance *instance,
462 uint8_t *ticker_id_head,
463 uint32_t *ticks_current,
464 uint32_t *ticks_to_expire,
465 ticker_op_match_func fp_match_op_func,
466 void *match_op_context, uint32_t *remainder,
467 uint16_t *lazy)
468 {
469 struct ticker_node *ticker;
470 struct ticker_node *node;
471 uint32_t _ticks_to_expire;
472 uint8_t _ticker_id_head;
473
474 node = instance->nodes;
475
476 _ticker_id_head = *ticker_id_head;
477 _ticks_to_expire = *ticks_to_expire;
478 if ((_ticker_id_head == TICKER_NULL) ||
479 (*ticks_current != instance->ticks_current)) {
480 /* Initialize with instance head */
481 _ticker_id_head = instance->ticker_id_head;
482 *ticks_current = instance->ticks_current;
483 _ticks_to_expire = 0U;
484 } else {
485 /* Get ticker id for next node */
486 ticker = &node[_ticker_id_head];
487 _ticker_id_head = ticker->next;
488 }
489
490 /* Find first ticker node with match or slot ticks */
491 while (_ticker_id_head != TICKER_NULL) {
492 ticker = &node[_ticker_id_head];
493
494 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
495 if (fp_match_op_func) {
496 uint32_t ticks_slot = 0;
497
498 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
499 ticks_slot += ticker->ticks_slot;
500 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
501
502 /* Match node id */
503 if (fp_match_op_func(_ticker_id_head, ticks_slot,
504 _ticks_to_expire +
505 ticker->ticks_to_expire,
506 match_op_context)) {
507 /* Match found */
508 break;
509 }
510 } else
511 #else /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
512 ARG_UNUSED(fp_match_op_func);
513 ARG_UNUSED(match_op_context);
514 #endif /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
515
516 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
517 if (ticker->ticks_slot) {
518 /* Matching not used and node has slot ticks */
519 break;
520 #else
521 {
522 /* Matching not used and slot agnostic */
523 break;
524 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
525 }
526
527 /* Accumulate expire ticks */
528 _ticks_to_expire += ticker->ticks_to_expire;
529 _ticker_id_head = ticker->next;
530 }
531
532 if (_ticker_id_head != TICKER_NULL) {
533 /* Add ticks for found ticker */
534 _ticks_to_expire += ticker->ticks_to_expire;
535
536 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
537 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
538 if (remainder) {
539 *remainder = ticker->remainder_current;
540 }
541 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
542 ARG_UNUSED(remainder);
543 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
544 #else /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
545 ARG_UNUSED(remainder);
546 #endif /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
547
548 #if defined(CONFIG_BT_TICKER_LAZY_GET)
549 if (lazy) {
550 *lazy = ticker->lazy_current;
551 }
552 #else /* !CONFIG_BT_TICKER_LAZY_GET */
553 ARG_UNUSED(lazy);
554 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
555 }
556
557 *ticker_id_head = _ticker_id_head;
558 *ticks_to_expire = _ticks_to_expire;
559 }
560 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
561
562 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
563 /**
564 * @brief Enqueue ticker node
565 *
566 * @details Finds insertion point for new ticker node and inserts the
567 * node in the linked node list.
568 *
569 * @param instance Pointer to ticker instance
570 * @param id Ticker node id to enqueue
571 *
572 * @return Id of enqueued ticker node
573 * @internal
574 */
575 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
576 {
577 struct ticker_node *ticker_current;
578 struct ticker_node *ticker_new;
579 uint32_t ticks_to_expire_current;
580 struct ticker_node *node;
581 uint32_t ticks_to_expire;
582 uint8_t previous;
583 uint8_t current;
584
585 node = &instance->nodes[0];
586 ticker_new = &node[id];
587 ticks_to_expire = ticker_new->ticks_to_expire;
588 current = instance->ticker_id_head;
589
590 /* Find insertion point for new ticker node and adjust ticks_to_expire
591 * relative to insertion point
592 */
593 previous = TICKER_NULL;
594
595 while ((current != TICKER_NULL) && (ticks_to_expire >=
596 (ticks_to_expire_current =
597 (ticker_current = &node[current])->ticks_to_expire))) {
598
599 ticks_to_expire -= ticks_to_expire_current;
600
601 /* Check for timeout in same tick - prioritize according to
602 * latency
603 */
604 if (ticks_to_expire == 0 && (ticker_new->lazy_current >
605 ticker_current->lazy_current)) {
606 ticks_to_expire = ticks_to_expire_current;
607 break;
608 }
609
610 previous = current;
611 current = ticker_current->next;
612 }
613
614 /* Link in new ticker node and adjust ticks_to_expire to relative value
615 */
616 ticker_new->ticks_to_expire = ticks_to_expire;
617 ticker_new->next = current;
618
619 if (previous == TICKER_NULL) {
620 instance->ticker_id_head = id;
621 } else {
622 node[previous].next = id;
623 }
624
625 if (current != TICKER_NULL) {
626 node[current].ticks_to_expire -= ticks_to_expire;
627 }
628
629 return id;
630 }
631 #else /* CONFIG_BT_TICKER_LOW_LAT */
632
633 /**
634 * @brief Enqueue ticker node
635 *
636 * @details Finds insertion point for new ticker node and inserts the
637 * node in the linked node list. However, if the new ticker node collides
638 * with an existing node or the expiration is inside the previous slot,
639 * the node is not inserted.
640 *
641 * @param instance Pointer to ticker instance
642 * @param id Ticker node id to enqueue
643 *
644 * @return Id of enqueued ticker node, or id of previous- or colliding
645 * ticker node if new node was not enqueued
646 * @internal
647 */
648 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
649 {
650 struct ticker_node *ticker_current;
651 struct ticker_node *ticker_new;
652 uint32_t ticks_to_expire_current;
653 uint8_t ticker_id_slot_previous;
654 uint32_t ticks_slot_previous;
655 struct ticker_node *node;
656 uint32_t ticks_to_expire;
657 uint8_t previous;
658 uint8_t current;
659 uint8_t collide;
660
661 node = &instance->nodes[0];
662 ticker_new = &node[id];
663 ticks_to_expire = ticker_new->ticks_to_expire;
664
665 collide = ticker_id_slot_previous = TICKER_NULL;
666 current = instance->ticker_id_head;
667 previous = current;
668 ticks_slot_previous = instance->ticks_slot_previous;
669
670 /* Find insertion point for new ticker node and adjust ticks_to_expire
671 * relative to insertion point
672 */
673 while ((current != TICKER_NULL) &&
674 (ticks_to_expire >
675 (ticks_to_expire_current =
676 (ticker_current = &node[current])->ticks_to_expire))) {
677 ticks_to_expire -= ticks_to_expire_current;
678
679 if (ticker_current->ticks_slot != 0U) {
680 ticks_slot_previous = ticker_current->ticks_slot;
681 ticker_id_slot_previous = current;
682 } else {
683 if (ticks_slot_previous > ticks_to_expire_current) {
684 ticks_slot_previous -= ticks_to_expire_current;
685 } else {
686 ticks_slot_previous = 0U;
687 }
688 }
689 previous = current;
690 current = ticker_current->next;
691 }
692
693 /* Check for collision for new ticker node at insertion point */
694 collide = ticker_by_slot_get(&node[0], current,
695 ticks_to_expire + ticker_new->ticks_slot);
696
697 if ((ticker_new->ticks_slot == 0U) ||
698 ((ticks_slot_previous <= ticks_to_expire) &&
699 (collide == TICKER_NULL))) {
700 /* New ticker node has no slot ticks or there is no collision -
701 * link it in and adjust ticks_to_expire to relative value
702 */
703 ticker_new->ticks_to_expire = ticks_to_expire;
704 ticker_new->next = current;
705
706 if (previous == current) {
707 instance->ticker_id_head = id;
708 } else {
709 node[previous].next = id;
710 }
711
712 if (current != TICKER_NULL) {
713 node[current].ticks_to_expire -= ticks_to_expire;
714 }
715 } else {
716 /* Collision - no ticker node insertion, set id to that of
717 * colliding node
718 */
719 if (ticks_slot_previous > ticks_to_expire) {
720 id = ticker_id_slot_previous;
721 } else {
722 id = collide;
723 }
724 }
725
726 return id;
727 }
728 #endif /* CONFIG_BT_TICKER_LOW_LAT */
729
730 /**
731 * @brief Dequeue ticker node
732 *
733 * @details Finds extraction point for ticker node to be dequeued, unlinks
734 * the node and adjusts the links and ticks_to_expire. Returns the ticks
735 * until expiration for dequeued ticker node.
736 *
737 * @param instance Pointer to ticker instance
738 * @param id Ticker node id to dequeue
739 *
740 * @return Total ticks until expiration for dequeued ticker node, or 0 if
741 * node was not found
742 * @internal
743 */
744 static uint32_t ticker_dequeue(struct ticker_instance *instance, uint8_t id)
745 {
746 struct ticker_node *ticker_current;
747 struct ticker_node *node;
748 uint8_t previous;
749 uint32_t timeout;
750 uint8_t current;
751 uint32_t total;
752
753 /* Find the ticker's position in ticker node list while accumulating
754 * ticks_to_expire
755 */
756 node = &instance->nodes[0];
757 previous = instance->ticker_id_head;
758 current = previous;
759 total = 0U;
760 ticker_current = 0;
761 while (current != TICKER_NULL) {
762 ticker_current = &node[current];
763
764 if (current == id) {
765 break;
766 }
767
768 total += ticker_current->ticks_to_expire;
769 previous = current;
770 current = ticker_current->next;
771 }
772
773 if (current == TICKER_NULL) {
774 /* Ticker not in active list */
775 return 0;
776 }
777
778 if (previous == current) {
779 /* Ticker is the first in the list */
780 instance->ticker_id_head = ticker_current->next;
781 }
782
783 /* Remaining timeout between next timeout */
784 timeout = ticker_current->ticks_to_expire;
785
786 /* Link previous ticker with next of this ticker
787 * i.e. removing the ticker from list
788 */
789 node[previous].next = ticker_current->next;
790
791 /* If this is not the last ticker, increment the
792 * next ticker by this ticker timeout
793 */
794 if (ticker_current->next != TICKER_NULL) {
795 node[ticker_current->next].ticks_to_expire += timeout;
796 }
797
798 return (total + timeout);
799 }
800
801 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
802 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
803 /**
804 * @brief Resolve ticker node collision
805 *
806 * @details Evaluates the provided ticker node against other queued nodes
807 * and returns non-zero if the ticker node collides and should be skipped.
808 * The following rules are checked:
809 * 1) If the periodic latency is not yet exhausted, node is skipped
810 * 2) If the node has highest possible priority, node is never skipped
811 * 3) If the node will starve next node due to slot reservation
812 * overlap, node is skipped if:
813 * a) Next node has higher priority than current node
814 * b) Next node has more accumulated latency than the current node
815 * c) Next node is 'older' than current node and has same priority
816 * d) Next node has force flag set, and the current does not
817 * 4) If using ticks slot window,
818 * a) current node can be rescheduled later in the ticks slot window
819 * 5) If using ticks slot window under yield (build time configuration),
820 * a) Current node can be rescheduled later in the ticks slot window when
821 * next node can not be rescheduled later in its ticks slot window
822 *
823 * @param nodes Pointer to ticker node array
824 * @param ticker Pointer to ticker to resolve
825 *
826 * @return 0 if no collision was detected. 1 if ticker node collides
827 * with other ticker node of higher composite priority
828 * @internal
829 */
830 static uint8_t ticker_resolve_collision(struct ticker_node *nodes,
831 struct ticker_node *ticker)
832 {
833 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
834 if ((ticker->priority != TICKER_PRIORITY_CRITICAL) &&
835 (ticker->next != TICKER_NULL)) {
836
837 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
838 if (ticker->next != TICKER_NULL) {
839
840 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
841
842 uint16_t lazy_current = ticker->lazy_current;
843 uint32_t ticker_ticks_slot;
844
845 if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
846 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
847 } else {
848 ticker_ticks_slot = ticker->ticks_slot;
849 }
850
851 /* Check if this ticker node will starve next node which has
852 * latency or higher priority
853 */
854 if (lazy_current >= ticker->lazy_periodic) {
855 lazy_current -= ticker->lazy_periodic;
856 }
857 uint8_t id_head = ticker->next;
858 uint32_t acc_ticks_to_expire = 0U;
859
860 /* Age is time since last expiry */
861 uint32_t current_age = ticker->ticks_periodic +
862 (lazy_current * ticker->ticks_periodic);
863
864 while (id_head != TICKER_NULL) {
865 struct ticker_node *ticker_next = &nodes[id_head];
866 uint32_t ticker_next_ticks_slot;
867
868 /* Accumulate ticks_to_expire for each node */
869 acc_ticks_to_expire += ticker_next->ticks_to_expire;
870 if (acc_ticks_to_expire > ticker_ticks_slot) {
871 break;
872 }
873
874 if (TICKER_HAS_SLOT_WINDOW(ticker_next) &&
875 (ticker_next->ticks_slot == 0U)) {
876 ticker_next_ticks_slot =
877 HAL_TICKER_RESCHEDULE_MARGIN;
878 } else {
879 ticker_next_ticks_slot =
880 ticker_next->ticks_slot;
881 }
882
883 /* We only care about nodes with slot reservation */
884 if (ticker_next_ticks_slot == 0U) {
885 id_head = ticker_next->next;
886 continue;
887 }
888
889 uint16_t lazy_next = ticker_next->lazy_current;
890 uint8_t lazy_next_periodic_skip =
891 ticker_next->lazy_periodic > lazy_next;
892
893 if (!lazy_next_periodic_skip) {
894 lazy_next -= ticker_next->lazy_periodic;
895 }
896
897 /* Age is time since last expiry */
898 uint32_t next_age = (ticker_next->ticks_periodic == 0U ?
899 0U :
900 (ticker_next->ticks_periodic -
901 ticker_next->ticks_to_expire)) +
902 (lazy_next *
903 ticker_next->ticks_periodic);
904
905 /* Was the current node scheduled earlier? */
906 uint8_t current_is_older =
907 (ticker->ticks_periodic == 0U) ||
908 (current_age > next_age);
909 /* Was next node scheduled earlier (legacy priority)? */
910 uint8_t next_is_older =
911 (ticker->ticks_periodic != 0U) &&
912 (next_age > current_age);
913
914 /* Is the current and next node equal in force? */
915 uint8_t equal_force =
916 (ticker->force == ticker_next->force);
917 /* Is force requested for next node (e.g. update) -
918 * more so than for current node?
919 */
920 uint8_t next_force =
921 (ticker_next->force > ticker->force);
922
923 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
924 /* Does next node have critical priority and should
925 * always be scheduled?
926 */
927 uint8_t next_is_critical =
928 (ticker_next->priority ==
929 TICKER_PRIORITY_CRITICAL);
930
931 /* Is the current and next node equal in priority? */
932 uint8_t equal_priority =
933 (ticker->priority == ticker_next->priority);
934
935 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
936 uint8_t next_is_critical = 0U;
937 uint8_t equal_priority = 1U;
938 uint8_t next_has_priority = 0U;
939
940 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
941
942 #if defined(CONFIG_BT_TICKER_EXT)
943 #if defined(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD)
944 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
945 /* Does next node have higher priority? */
946 uint8_t next_has_priority =
947 (!TICKER_HAS_SLOT_WINDOW(ticker_next) &&
948 ((lazy_next - ticker_next->priority) >
949 (lazy_current - ticker->priority));
950 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
951
952 /* Colliding next ticker does not use ticks_slot_window
953 * or it does not fit after the current ticker within
954 * the ticks_slot_window.
955 */
956 uint8_t next_not_ticks_slot_window =
957 !TICKER_HAS_SLOT_WINDOW(ticker_next) ||
958 (ticker_next->ext_data->is_drift_in_window &&
959 TICKER_HAS_SLOT_WINDOW(ticker)) ||
960 ((acc_ticks_to_expire +
961 ticker_next->ext_data->ticks_slot_window -
962 ticker_next->ticks_slot) <
963 ticker->ticks_slot);
964
965 /* Can the current ticker with ticks_slot_window be
966 * scheduled after the colliding ticker?
967 */
968 uint8_t curr_has_ticks_slot_window =
969 TICKER_HAS_SLOT_WINDOW(ticker) &&
970 ((acc_ticks_to_expire +
971 ticker_next->ticks_slot) <=
972 (ticker->ext_data->ticks_slot_window -
973 ticker->ticks_slot));
974
975 #else /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
976 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
977 /* Does next node have higher priority? */
978 uint8_t next_has_priority =
979 (lazy_next - ticker_next->priority) >
980 (lazy_current - ticker->priority);
981
982 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
983 uint8_t next_not_ticks_slot_window = 1U;
984
985 /* Can the current ticker with ticks_slot_window be
986 * scheduled after the colliding ticker?
987 * NOTE: Tickers with ticks_slot_window and no
988 * ticks_slot (unreserved) be always scheduled
989 * after the colliding ticker.
990 */
991 uint8_t curr_has_ticks_slot_window =
992 (TICKER_HAS_SLOT_WINDOW(ticker) &&
993 !ticker->ticks_slot &&
994 ((acc_ticks_to_expire +
995 ticker_next->ticks_slot) <=
996 (ticker->ext_data->ticks_slot_window)));
997
998 #endif /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
999 #else /* !CONFIG_BT_TICKER_EXT */
1000 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
1001 /* Does next node have higher priority? */
1002 uint8_t next_has_priority =
1003 (lazy_next - ticker_next->priority) >
1004 (lazy_current - ticker->priority);
1005
1006 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
1007 uint8_t next_not_ticks_slot_window = 1U;
1008 uint8_t curr_has_ticks_slot_window = 0U;
1009
1010 #endif /* !CONFIG_BT_TICKER_EXT */
1011
1012 /* Check if next node is within this reservation slot
1013 * and wins conflict resolution
1014 */
1015 if ((curr_has_ticks_slot_window &&
1016 next_not_ticks_slot_window) ||
1017 (!lazy_next_periodic_skip &&
1018 (next_is_critical ||
1019 next_force ||
1020 (next_has_priority && !current_is_older) ||
1021 (equal_priority && equal_force && next_is_older &&
1022 next_not_ticks_slot_window)))) {
1023 /* This node must be skipped - check window */
1024 return 1U;
1025 }
1026 id_head = ticker_next->next;
1027 }
1028 }
1029
1030 return 0U;
1031 }
1032 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1033 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1034 */
1035
1036 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1037 /**
1038 * @brief Get expiration delta from one ticker id to another ticker id
1039 *
1040 * @details Helper function to get expiration info between two tickers
1041 *
1042 * @param instance Ticker instance
1043 * @param to_ticker_id Target ticker id
1044 * @param from_ticker_id Ticker id to compare with
1045 * @param expire_info Pointer to ticker_expire_info that will
1046 * get populated with the result
1047 *
1048 * @internal
1049 */
1050 static void ticker_get_expire_info(struct ticker_instance *instance, uint8_t to_ticker_id,
1051 uint8_t from_ticker_id,
1052 struct ticker_expire_info_internal *expire_info)
1053 {
1054 struct ticker_node *current_node;
1055 uint32_t acc_ticks_to_expire = 0;
1056 uint8_t current_ticker_id;
1057 uint32_t from_ticks = 0;
1058 bool from_found = false;
1059 uint32_t to_ticks = 0;
1060 bool to_found = false;
1061
1062 current_ticker_id = instance->ticker_id_head;
1063 current_node = &instance->nodes[instance->ticker_id_head];
1064 while (current_ticker_id != TICKER_NULL && (!to_found || !from_found)) {
1065 /* Accumulate expire ticks */
1066 acc_ticks_to_expire += current_node->ticks_to_expire;
1067
1068 if (current_ticker_id == from_ticker_id) {
1069 from_ticks = acc_ticks_to_expire;
1070 from_found = true;
1071 } else if (current_ticker_id == to_ticker_id) {
1072 to_ticks = acc_ticks_to_expire;
1073 to_found = true;
1074 }
1075
1076 current_ticker_id = current_node->next;
1077 current_node = &instance->nodes[current_ticker_id];
1078 }
1079
1080 if (to_found && from_found) {
1081 struct ticker_node *to_ticker = &instance->nodes[to_ticker_id];
1082
1083 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1084 uint32_t to_remainder = to_ticker->remainder_current;
1085 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1086
1087 if (from_ticks > to_ticks) {
1088 /* from ticker is scheduled after the to ticker - use period
1089 * to give an result
1090 */
1091 if (to_ticker->ticks_periodic == 0) {
1092 /* single shot ticker */
1093 expire_info->found = 0;
1094 return;
1095 }
1096 while (to_ticks < from_ticks) {
1097 to_ticks += to_ticker->ticks_periodic;
1098
1099 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1100 to_ticks += ticker_add_to_remainder(&to_remainder,
1101 to_ticker->remainder_periodic);
1102 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1103 }
1104 }
1105
1106 expire_info->ticks_to_expire = to_ticks - from_ticks;
1107 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1108 expire_info->remainder = to_remainder;
1109 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1110 expire_info->lazy = to_ticker->lazy_current;
1111 expire_info->found = 1;
1112 } else {
1113 expire_info->found = 0;
1114 }
1115 }
1116
1117 /**
1118 * @brief Allocate an expire info for the given ticker ID
1119 *
1120 * @param instance Ticker instance
1121 * @param ticker_id Ticker ID to allocate for
1122 *
1123 * @return Returns TICKER_STATUS_SUCCESS if the allocation succeeded,
1124 * TICKER_STATUS_FAILURE otherwise
1125 *
1126 * @internal
1127 */
1128 static uint32_t ticker_alloc_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1129 {
1130 uint32_t status = TICKER_STATUS_FAILURE;
1131 uint8_t is_last = 0;
1132
1133 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1134 if (instance->expire_infos[i].ticker_id == TICKER_NULL) {
1135 struct ticker_node *ticker = &instance->nodes[ticker_id];
1136
1137 instance->expire_infos[i].ticker_id = ticker_id;
1138 instance->expire_infos[i].outdated = true;
1139 instance->expire_infos[i].last = is_last;
1140 ticker->ext_data->other_expire_info = &instance->expire_infos[i];
1141 instance->expire_infos_outdated = true;
1142 status = TICKER_STATUS_SUCCESS;
1143 break;
1144 } else if (instance->expire_infos[i].last && i < TICKER_EXPIRE_INFO_MAX - 1) {
1145 instance->expire_infos[i].last = 0;
1146 is_last = 1;
1147 }
1148 }
1149
1150 return status;
1151 }
1152
1153 /**
1154 * @brief Free a previously allocated expire info for the given ticker ID
1155 *
1156 * @param instance Ticker instance
1157 * @param ticker_id Ticker ID to free up the allocation for
1158 *
1159 * @internal
1160 */
1161 static void ticker_free_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1162 {
1163 uint8_t is_last = 0;
1164 uint8_t index;
1165
1166 for (index = 0; index < TICKER_EXPIRE_INFO_MAX; index++) {
1167 if (instance->expire_infos[index].ticker_id == ticker_id) {
1168 instance->expire_infos[index].ticker_id = TICKER_NULL;
1169 is_last = instance->expire_infos[index].last;
1170 instance->expire_infos[index].last = 0;
1171 break;
1172 }
1173 }
1174
1175 if (is_last) {
1176 /* Find new last used element and mark it */
1177 for (; index >= 0; index--) {
1178 if (instance->expire_infos[index].ticker_id != TICKER_NULL || index == 0) {
1179 instance->expire_infos[index].last = 1;
1180 break;
1181 }
1182 }
1183 }
1184 }
1185
1186 /**
1187 * @brief Mark all expire infos involving a ticker ID as outdated
1188 *
1189 * @details If a ticker moves this function should be called to mark all expiration
1190 * infos (if any) that involve that ticker as outdated and in need of re-calculation.
1191 * If any expiration infos involving the ticker_id is found, the ticker instances
1192 * expire_infos_outdated flag is also set.
1193 *
1194 * @param instance Ticker instance
1195 * @param ticker_id ID of ticker that has moved
1196 *
1197 * @internal
1198 */
1199 static void ticker_mark_expire_info_outdated(struct ticker_instance *instance, uint8_t ticker_id)
1200 {
1201 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1202 if (instance->expire_infos[i].ticker_id != TICKER_NULL) {
1203 uint8_t current_id = instance->expire_infos[i].ticker_id;
1204 struct ticker_node *ticker = &instance->nodes[current_id];
1205
1206 if (current_id == ticker_id ||
1207 ticker->ext_data->expire_info_id == ticker_id) {
1208 instance->expire_infos[i].outdated = true;
1209 instance->expire_infos_outdated = true;
1210 }
1211 }
1212 if (instance->expire_infos[i].last) {
1213 break;
1214 }
1215 }
1216 }
1217
1218 /**
1219 * @brief Run through all expire infos and update them if needed
1220 *
1221 * @details Runs through all expire_infos and runs ticker_get_expire_info()
1222 * for any that are marked as outdated. Clears the expire_infos_outdated
1223 * flag when done
1224 *
1225 * @param param Pointer to ticker instance
1226 *
1227 * @internal
1228 */
1229 static void ticker_job_update_expire_infos(struct ticker_instance *instance)
1230 {
1231 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1232 struct ticker_expire_info_internal *info = &instance->expire_infos[i];
1233
1234 if (info->ticker_id != TICKER_NULL && info->outdated) {
1235 struct ticker_node *ticker = &instance->nodes[info->ticker_id];
1236
1237 ticker_get_expire_info(instance, ticker->ext_data->expire_info_id,
1238 info->ticker_id, info);
1239 info->outdated = false;
1240 }
1241
1242 if (info->last) {
1243 break;
1244 }
1245 }
1246
1247 instance->expire_infos_outdated = false;
1248 }
1249
1250 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1251
1252 /**
1253 * @brief Ticker worker
1254 *
1255 * @details Runs as upper half of ticker operation, triggered by a compare
1256 * match from the underlying counter HAL, via the ticker_trigger function.
1257 * Traverses ticker nodes to find tickers expired since last job
1258 * execution. Expired (requested) ticker nodes have their timeout callback
1259 * functions called. Finally, a ticker job is enqueued. Invoked from the
1260 * ticker worker mayfly context (TICKER_MAYFLY_CALL_ID_WORKER)
1261 *
1262 * @param param Pointer to ticker instance
1263 *
1264 */
1265 void ticker_worker(void *param)
1266 {
1267 struct ticker_instance *instance = param;
1268 struct ticker_node *node;
1269 uint32_t ticks_elapsed;
1270 uint32_t ticks_expired;
1271 uint8_t ticker_id_head;
1272 uint32_t ticks_now;
1273
1274 /* Defer worker if job running */
1275 instance->worker_trigger = 1U;
1276 if (instance->job_guard) {
1277 return;
1278 }
1279
1280 /* If no tickers queued (active), do nothing */
1281 if (instance->ticker_id_head == TICKER_NULL) {
1282 instance->worker_trigger = 0U;
1283 return;
1284 }
1285
1286 ticks_now = cntr_cnt_get();
1287
1288 /* Get ticks elapsed since last job execution */
1289 ticks_elapsed = ticker_ticks_diff_get(ticks_now,
1290 instance->ticks_current);
1291
1292 /* Initialize actual elapsed ticks being consumed */
1293 ticks_expired = 0U;
1294
1295 /* Auto variable containing the head of tickers expiring */
1296 ticker_id_head = instance->ticker_id_head;
1297
1298 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1299 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1300 /* Check if the previous ticker node which had air-time, is still
1301 * active and has this time slot reserved
1302 */
1303 uint8_t slot_reserved = 0;
1304
1305 if (instance->ticks_slot_previous > ticks_elapsed) {
1306 /* This node intersects reserved slot */
1307 slot_reserved = 1;
1308 }
1309 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1310 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1311 */
1312
1313 /* Expire all tickers within ticks_elapsed and collect ticks_expired */
1314 node = &instance->nodes[0];
1315
1316 while (ticker_id_head != TICKER_NULL) {
1317 struct ticker_node *ticker;
1318 uint32_t ticks_to_expire;
1319 uint8_t must_expire_skip;
1320 uint32_t ticks_drift;
1321
1322 ticker = &node[ticker_id_head];
1323
1324 /* Stop if ticker did not expire */
1325 ticks_to_expire = ticker->ticks_to_expire;
1326 if (ticks_elapsed < ticks_to_expire) {
1327 break;
1328 }
1329
1330 /* Decrement ticks_elapsed and collect expired ticks */
1331 ticks_elapsed -= ticks_to_expire;
1332 ticks_expired += ticks_to_expire;
1333
1334 /* Move to next ticker node */
1335 ticker_id_head = ticker->next;
1336 must_expire_skip = 0U;
1337
1338 /* Skip if not scheduled to execute */
1339 if (((ticker->req - ticker->ack) & 0xff) != 1U) {
1340 continue;
1341 }
1342
1343 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1344 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1345 uint32_t ticker_ticks_slot;
1346
1347 if (TICKER_HAS_SLOT_WINDOW(ticker) &&
1348 (ticker->ticks_slot == 0U)) {
1349 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
1350 } else {
1351 ticker_ticks_slot = ticker->ticks_slot;
1352 }
1353
1354 /* Check if node has slot reservation and resolve any collision
1355 * with other ticker nodes
1356 */
1357 if ((ticker_ticks_slot != 0U) &&
1358 (slot_reserved ||
1359 (instance->ticks_slot_previous > ticks_expired) ||
1360 ticker_resolve_collision(node, ticker))) {
1361 #if defined(CONFIG_BT_TICKER_EXT)
1362 struct ticker_ext *ext_data = ticker->ext_data;
1363
1364 if (ext_data &&
1365 ext_data->ticks_slot_window != 0U &&
1366 ext_data->reschedule_state ==
1367 TICKER_RESCHEDULE_STATE_NONE &&
1368 (ticker->lazy_periodic <= ticker->lazy_current)) {
1369 /* Mark node for re-scheduling in ticker_job */
1370 ext_data->reschedule_state =
1371 TICKER_RESCHEDULE_STATE_PENDING;
1372 } else if (ext_data) {
1373 /* Mark node as not re-scheduling */
1374 ext_data->reschedule_state =
1375 TICKER_RESCHEDULE_STATE_NONE;
1376 }
1377 #endif /* CONFIG_BT_TICKER_EXT */
1378 /* Increment lazy_current to indicate skipped event. In case
1379 * of re-scheduled node, the lazy count will be decremented in
1380 * ticker_job_reschedule_in_window when completed.
1381 */
1382 ticker->lazy_current++;
1383 ticker->force = 0U;
1384
1385 if ((ticker->must_expire == 0U) ||
1386 (ticker->lazy_periodic >= ticker->lazy_current) ||
1387 TICKER_RESCHEDULE_PENDING(ticker)) {
1388 /* Not a must-expire node or this is periodic
1389 * latency or pending re-schedule. Skip this
1390 * ticker node. Mark it as elapsed.
1391 */
1392 ticker->ack--;
1393 continue;
1394 }
1395
1396 /* Continue but perform shallow expiry */
1397 must_expire_skip = 1U;
1398 }
1399
1400 #if defined(CONFIG_BT_TICKER_EXT)
1401 if (ticker->ext_data) {
1402 ticks_drift = ticker->ext_data->ticks_drift;
1403 ticker->ext_data->ticks_drift = 0U;
1404 /* Mark node as not re-scheduling */
1405 ticker->ext_data->reschedule_state =
1406 TICKER_RESCHEDULE_STATE_NONE;
1407 } else {
1408 ticks_drift = 0U;
1409 }
1410
1411 #else /* !CONFIG_BT_TICKER_EXT */
1412 ticks_drift = 0U;
1413 #endif /* !CONFIG_BT_TICKER_EXT */
1414
1415 #else /* CONFIG_BT_TICKER_LOW_LAT ||
1416 * CONFIG_BT_TICKER_SLOT_AGNOSTIC
1417 */
1418 ticks_drift = 0U;
1419 #endif /* CONFIG_BT_TICKER_LOW_LAT ||
1420 * CONFIG_BT_TICKER_SLOT_AGNOSTIC
1421 */
1422
1423 /* Scheduled timeout is acknowledged to be complete */
1424 ticker->ack--;
1425
1426 if (ticker->timeout_func) {
1427 uint32_t remainder_current;
1428 uint32_t ticks_at_expire;
1429
1430 ticks_at_expire = (instance->ticks_current +
1431 ticks_expired -
1432 ticker->ticks_to_expire_minus) &
1433 HAL_TICKER_CNTR_MASK;
1434
1435 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1436 remainder_current = ticker->remainder_current;
1437 #else /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1438 remainder_current = 0U;
1439 #endif /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1440
1441 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1442 if (ticker->ext_data &&
1443 ticker->ext_data->ext_timeout_func) {
1444 struct ticker_expire_info_internal *expire_info;
1445 struct ticker_ext_context ext_context;
1446 ticker_timeout_func timeout_func;
1447
1448 timeout_func = ticker->ext_data->ext_timeout_func;
1449 expire_info = ticker->ext_data->other_expire_info;
1450 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1451 LL_ASSERT_DBG(expire_info && !expire_info->outdated);
1452 }
1453
1454 ext_context.context = ticker->context;
1455 if (expire_info && expire_info->found) {
1456 ext_context.other_expire_info = (void *)expire_info;
1457 } else {
1458 ext_context.other_expire_info = NULL;
1459 }
1460
1461 DEBUG_TICKER_TASK(1);
1462
1463 /* Invoke the timeout callback */
1464 timeout_func(ticks_at_expire,
1465 ticks_drift,
1466 remainder_current,
1467 must_expire_skip ?
1468 TICKER_LAZY_MUST_EXPIRE :
1469 ticker->lazy_current,
1470 ticker->force,
1471 &ext_context);
1472 } else
1473 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1474 {
1475 DEBUG_TICKER_TASK(1);
1476
1477 /* Invoke the timeout callback */
1478 ticker->timeout_func(ticks_at_expire,
1479 ticks_drift,
1480 remainder_current,
1481 must_expire_skip ?
1482 TICKER_LAZY_MUST_EXPIRE :
1483 ticker->lazy_current,
1484 ticker->force,
1485 ticker->context);
1486 DEBUG_TICKER_TASK(0);
1487 }
1488
1489 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
1490 (must_expire_skip == 0U)) {
1491 /* Reset latency to periodic offset */
1492 ticker->lazy_current = 0U;
1493 ticker->force = 0U;
1494
1495 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1496 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1497 if (ticker_ticks_slot != 0U) {
1498 /* Any further nodes will be skipped */
1499 slot_reserved = 1U;
1500 }
1501 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1502 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1503 */
1504
1505 }
1506 }
1507 }
1508
1509 /* Queue the elapsed ticks */
1510 if (instance->ticks_elapsed_first == instance->ticks_elapsed_last) {
1511 ticker_next_elapsed(&instance->ticks_elapsed_last);
1512 }
1513 instance->ticks_elapsed[instance->ticks_elapsed_last] = ticks_expired;
1514
1515 instance->worker_trigger = 0U;
1516
1517 /* Enqueue the ticker job with chain=1 (do not inline) */
1518 instance->sched_cb(TICKER_CALL_ID_WORKER, TICKER_CALL_ID_JOB, 1,
1519 instance);
1520 }
1521
1522 /**
1523 * @brief Prepare ticker node expiration
1524 *
1525 * @details Calculates the number of ticks until next expiration, taking
1526 * into consideration any negative drift correction.
1527 *
1528 * @param ticker Pointer to ticker node
1529 * @param ticks_current Current number of ticks (elapsed)
1530 * @param ticks_at_start Number of ticks at start (anchor)
1531 *
1532 * @internal
1533 */
1534 static void ticks_to_expire_prep(struct ticker_node *ticker,
1535 uint32_t ticks_current, uint32_t ticks_at_start)
1536 {
1537 uint32_t ticks_to_expire = ticker->ticks_to_expire;
1538 uint32_t ticks_to_expire_minus = ticker->ticks_to_expire_minus;
1539
1540 /* Calculate ticks to expire for this new node */
1541 if (!((ticks_at_start - ticks_current) & BIT(HAL_TICKER_CNTR_MSBIT))) {
1542 /* Most significant bit is 0 so ticks_at_start lies ahead of
1543 * ticks_current: ticks_at_start >= ticks_current
1544 */
1545 ticks_to_expire += ticker_ticks_diff_get(ticks_at_start,
1546 ticks_current);
1547 } else {
1548 /* ticks_current > ticks_at_start
1549 */
1550 uint32_t delta_current_start;
1551
1552 delta_current_start = ticker_ticks_diff_get(ticks_current,
1553 ticks_at_start);
1554 if (ticks_to_expire > delta_current_start) {
1555 /* There's still time until expiration - subtract
1556 * elapsed time
1557 */
1558 ticks_to_expire -= delta_current_start;
1559 } else {
1560 /* Ticker node should have expired (we're late).
1561 * Add 'lateness' to negative drift correction
1562 * (ticks_to_expire_minus) and set ticks_to_expire
1563 * to 0
1564 */
1565 ticks_to_expire_minus +=
1566 (delta_current_start - ticks_to_expire);
1567 ticks_to_expire = 0U;
1568 }
1569 }
1570
1571 /* Handle negative drift correction */
1572 if (ticks_to_expire > ticks_to_expire_minus) {
1573 ticks_to_expire -= ticks_to_expire_minus;
1574 ticks_to_expire_minus = 0U;
1575 } else {
1576 ticks_to_expire_minus -= ticks_to_expire;
1577 ticks_to_expire = 0U;
1578 }
1579
1580 /* Update ticker */
1581 ticker->ticks_to_expire = ticks_to_expire;
1582 ticker->ticks_to_expire_minus = ticks_to_expire_minus;
1583 }
1584
1585 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1586 /**
1587 * @brief Add to remainder
1588 *
1589 * @details Calculates whether the remainder should increments expiration time
1590 * for above-microsecond precision counter HW. The remainder enables improved
1591 * ticker precision, but is disabled for sub-microsecond precision
1592 * configurations.
1593 * Note: This is the same functionality as ticker_remainder_inc(), except this
1594 * function allows doing the calculation without modifying any tickers
1595 *
1596 * @param remainder Pointer to remainder to add to
1597 * @param to_add Remainder value to add
1598 *
1599 * @return Returns 1 to indicate ticks increment is due, otherwise 0
1600 * @internal
1601 */
1602 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add)
1603 {
1604 *remainder += to_add;
1605 if ((*remainder < BIT(31)) &&
1606 (*remainder > (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1607 *remainder -= HAL_TICKER_REMAINDER_RANGE;
1608
1609 return 1;
1610 }
1611
1612 return 0;
1613 }
1614
1615 /**
1616 * @brief Increment remainder
1617 *
1618 * @details Calculates whether the remainder should increments expiration time
1619 * for above-microsecond precision counter HW. The remainder enables improved
1620 * ticker precision, but is disabled for sub-microsecond precision
1621 * configurations.
1622 *
1623 * @param ticker Pointer to ticker node
1624 *
1625 * @return Returns 1 to indicate increment is due, otherwise 0
1626 * @internal
1627 */
1628 static uint8_t ticker_remainder_inc(struct ticker_node *ticker)
1629 {
1630 return ticker_add_to_remainder(&ticker->remainder_current, ticker->remainder_periodic);
1631 }
1632
1633 /**
1634 * @brief Decrement remainder
1635 *
1636 * @details Calculates whether the remainder should decrements expiration time
1637 * for above-microsecond precision counter HW. The remainder enables improved
1638 * ticker precision, but is disabled for sub-microsecond precision
1639 * configurations.
1640 *
1641 * @param ticker Pointer to ticker node
1642 *
1643 * @return Returns 1 to indicate decrement is due, otherwise 0
1644 * @internal
1645 */
1646 static uint8_t ticker_remainder_dec(struct ticker_node *ticker)
1647 {
1648 uint8_t decrement = 0U;
1649
1650 if ((ticker->remainder_current >= BIT(31)) ||
1651 (ticker->remainder_current <= (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1652 decrement++;
1653 ticker->remainder_current += HAL_TICKER_REMAINDER_RANGE;
1654 }
1655
1656 ticker->remainder_current -= ticker->remainder_periodic;
1657
1658 return decrement;
1659 }
1660 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1661
1662 /**
1663 * @brief Invoke user operation callback
1664 *
1665 * @param user_op Pointer to user operation struct
1666 * @param status User operation status to pass to callback
1667 *
1668 * @internal
1669 */
1670 static void ticker_job_op_cb(struct ticker_user_op *user_op, uint8_t status)
1671 {
1672 user_op->op = TICKER_USER_OP_TYPE_NONE;
1673 user_op->status = status;
1674 if (user_op->fp_op_func) {
1675 user_op->fp_op_func(user_op->status, user_op->op_context);
1676 }
1677 }
1678
1679 /**
1680 * @brief Update and insert ticker node
1681 *
1682 * @details Update ticker node with parameters passed in user operation.
1683 * After update, the ticker is inserted in front as new head.
1684 *
1685 * @param ticker Pointer to ticker node
1686 * @param user_op Pointer to user operation
1687 * @param ticks_current Current ticker instance ticks
1688 * @param ticks_elapsed Expired ticks at time of call
1689 * @param insert_head Pointer to current head (id). Contains id
1690 * from user operation upon exit
1691 * @internal
1692 */
1693 static inline uint32_t ticker_job_node_update(struct ticker_instance *instance,
1694 struct ticker_node *ticker,
1695 struct ticker_user_op *user_op,
1696 uint32_t ticks_now,
1697 uint32_t ticks_current,
1698 uint32_t ticks_elapsed,
1699 uint8_t *insert_head)
1700 {
1701 uint32_t ticks_to_expire = ticker->ticks_to_expire;
1702
1703 ticks_elapsed += ticker_ticks_diff_get(ticks_now, ticks_current);
1704 if (ticks_to_expire > ticks_elapsed) {
1705 ticks_to_expire -= ticks_elapsed;
1706 } else {
1707 ticker->ticks_to_expire_minus += ticks_elapsed -
1708 ticks_to_expire;
1709 ticks_to_expire = 0U;
1710 }
1711
1712 /* Update ticks_to_expire from latency (lazy) input */
1713 if ((ticker->ticks_periodic != 0U) &&
1714 (user_op->params.update.lazy != 0U)) {
1715 user_op->params.update.lazy--;
1716 while ((ticks_to_expire > ticker->ticks_periodic) &&
1717 (ticker->lazy_current > user_op->params.update.lazy)) {
1718 ticks_to_expire -= ticker->ticks_periodic;
1719
1720 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1721 ticks_to_expire -= ticker_remainder_dec(ticker);
1722 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1723
1724 ticker->lazy_current--;
1725 }
1726
1727 while (ticker->lazy_current < user_op->params.update.lazy) {
1728 ticks_to_expire += ticker->ticks_periodic;
1729
1730 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1731 ticks_to_expire += ticker_remainder_inc(ticker);
1732 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1733
1734 ticker->lazy_current++;
1735 }
1736 ticker->lazy_periodic = user_op->params.update.lazy;
1737 }
1738
1739 /* Update ticks_to_expire from drift input */
1740 ticker->ticks_to_expire = ticks_to_expire +
1741 user_op->params.update.ticks_drift_plus;
1742 ticker->ticks_to_expire_minus +=
1743 user_op->params.update.ticks_drift_minus;
1744
1745 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1746 /* TODO: An improvement on this could be to only consider the drift
1747 * (ADV => randomization) if re-sceduling fails. We would still store
1748 * the drift ticks here, but not actually update the node. That would
1749 * allow the ticker to use the full window for re-scheduling.
1750 */
1751 struct ticker_ext *ext_data = ticker->ext_data;
1752
1753 if (ext_data && ext_data->ticks_slot_window != 0U) {
1754 ext_data->ticks_drift =
1755 user_op->params.update.ticks_drift_plus -
1756 user_op->params.update.ticks_drift_minus;
1757 }
1758 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1759
1760 ticks_to_expire_prep(ticker, ticks_current, ticks_now);
1761
1762 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1763 /* Update ticks_slot parameter from plus/minus input */
1764 ticker->ticks_slot += user_op->params.update.ticks_slot_plus;
1765 if (ticker->ticks_slot > user_op->params.update.ticks_slot_minus) {
1766 ticker->ticks_slot -= user_op->params.update.ticks_slot_minus;
1767 } else {
1768 ticker->ticks_slot = 0U;
1769 }
1770 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1771
1772 /* Update force parameter */
1773 if (user_op->params.update.force != 0U) {
1774 ticker->force = user_op->params.update.force;
1775 }
1776
1777 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
1778 !defined(CONFIG_BT_TICKER_LOW_LAT)
1779 /* Update must_expire parameter */
1780 if (user_op->params.update.must_expire) {
1781 /* 1: disable, 2: enable */
1782 ticker->must_expire = (user_op->params.update.must_expire - 1);
1783 }
1784 #endif /* CONFIG_BT_TICKER_EXT */
1785
1786 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1787 if (ticker->ext_data && user_op->params.update.expire_info_id != user_op->id) {
1788 if (user_op->params.update.expire_info_id != TICKER_NULL &&
1789 !ticker->ext_data->other_expire_info) {
1790 uint32_t status;
1791
1792 status = ticker_alloc_expire_info(instance, user_op->id);
1793 if (status) {
1794 return status;
1795 }
1796 } else if (user_op->params.update.expire_info_id == TICKER_NULL &&
1797 ticker->ext_data->other_expire_info) {
1798 ticker_free_expire_info(instance, user_op->id);
1799 ticker->ext_data->other_expire_info = NULL;
1800 }
1801
1802 ticker->ext_data->expire_info_id = user_op->params.update.expire_info_id;
1803 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1804 ticker_mark_expire_info_outdated(instance, user_op->id);
1805 }
1806 }
1807 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1808 ARG_UNUSED(instance);
1809 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1810
1811 ticker->next = *insert_head;
1812 *insert_head = user_op->id;
1813
1814 return TICKER_STATUS_SUCCESS;
1815 }
1816
1817 /**
1818 * @brief Manage user update operation
1819 *
1820 * @details Called by ticker_job to execute an update request, or set node
1821 * as done if request is not update. Invokes user operation callback before
1822 * exit.
1823 *
1824 * @param instance Pointer to ticker instance
1825 * @param ticker Pointer to ticker node
1826 * @param user_op Pointer to user operation
1827 * @param ticks_elapsed Expired ticks at time of call
1828 * @param insert_head Pointer to current head (id). For update operation,
1829 * contains operation id upon exit
1830 * @internal
1831 */
1832 static inline void ticker_job_node_manage(struct ticker_instance *instance,
1833 struct ticker_node *ticker,
1834 struct ticker_user_op *user_op,
1835 uint32_t ticks_now,
1836 uint32_t ticks_elapsed,
1837 uint8_t *insert_head)
1838 {
1839 /* Handle update of ticker by re-inserting it back. */
1840 if (IS_ENABLED(CONFIG_BT_TICKER_UPDATE) &&
1841 (user_op->op == TICKER_USER_OP_TYPE_UPDATE)) {
1842 /* Remove ticker node from list */
1843 ticker->ticks_to_expire = ticker_dequeue(instance, user_op->id);
1844
1845 /* Update node and insert back */
1846 ticker_job_node_update(instance, ticker, user_op, ticks_now,
1847 instance->ticks_current, ticks_elapsed,
1848 insert_head);
1849
1850 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1851 ticker_mark_expire_info_outdated(instance, user_op->id);
1852 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1853
1854 /* Set schedule status of node
1855 * as updating.
1856 */
1857 ticker->req++;
1858 } else {
1859 /* If stop/stop_abs requested, then dequeue node */
1860 if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1861 /* Remove ticker node from list */
1862 ticker->ticks_to_expire = ticker_dequeue(instance,
1863 user_op->id);
1864
1865 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1866 if (ticker->ext_data && ticker->ext_data->expire_info_id != TICKER_NULL) {
1867 ticker_free_expire_info(instance, user_op->id);
1868 ticker->ext_data->other_expire_info = NULL;
1869 }
1870
1871 ticker_mark_expire_info_outdated(instance, user_op->id);
1872 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1873
1874 /* Reset schedule status of node */
1875 ticker->req = ticker->ack;
1876 }
1877
1878 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1879 /* If yield_abs/stop/stop_abs then adjust ticks_slot_previous */
1880 if (instance->ticker_id_slot_previous == user_op->id) {
1881 uint32_t ticks_current;
1882 uint32_t ticks_at_yield;
1883 uint32_t ticks_used;
1884
1885 if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1886 instance->ticker_id_slot_previous = TICKER_NULL;
1887 }
1888
1889 if ((user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS) ||
1890 (user_op->op == TICKER_USER_OP_TYPE_STOP_ABS)) {
1891 ticks_at_yield =
1892 user_op->params.yield.ticks_at_yield;
1893 } else {
1894 ticks_at_yield = ticks_now;
1895 }
1896
1897 ticks_current = instance->ticks_current;
1898 if (!((ticks_at_yield - ticks_current) &
1899 BIT(HAL_TICKER_CNTR_MSBIT))) {
1900 ticks_used = ticks_elapsed +
1901 ticker_ticks_diff_get(ticks_at_yield,
1902 ticks_current);
1903 } else {
1904 ticks_used =
1905 ticker_ticks_diff_get(ticks_current,
1906 ticks_at_yield);
1907 if (ticks_elapsed > ticks_used) {
1908 ticks_used = ticks_elapsed -
1909 ticks_used;
1910 } else {
1911 ticks_used = 0;
1912 }
1913 }
1914
1915 if (instance->ticks_slot_previous > ticks_used) {
1916 instance->ticks_slot_previous = ticks_used;
1917 }
1918 }
1919 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1920
1921 }
1922
1923 /* op success, @todo update may fail during
1924 * actual insert! need to design that yet.
1925 */
1926 ticker_job_op_cb(user_op, TICKER_STATUS_SUCCESS);
1927 }
1928
1929 /**
1930 * @brief Manage user operations list
1931 *
1932 * @details Called by ticker_job to execute requested user operations. A
1933 * number of operation may be queued since last ticker_job. Only update and
1934 * stop operations are handled. Start is handled implicitly by inserting
1935 * the ticker node in ticker_job_list_insert.
1936 *
1937 * @param instance Pointer to ticker instance
1938 * @param ticks_elapsed Expired ticks at time of call
1939 * @param insert_head Pointer to current head (id). For update operation,
1940 * contains operation id upon exit
1941 * @return Returns 1 if operations is pending, 0 if all operations are done.
1942 * @internal
1943 */
1944 static inline uint8_t ticker_job_list_manage(struct ticker_instance *instance,
1945 uint32_t ticks_now,
1946 uint32_t ticks_elapsed,
1947 uint8_t *insert_head)
1948 {
1949 uint8_t pending;
1950 struct ticker_node *node;
1951 struct ticker_user *users;
1952 uint8_t count_user;
1953
1954 pending = 0U;
1955 node = &instance->nodes[0];
1956 users = &instance->users[0];
1957 count_user = instance->count_user;
1958 /* Traverse users - highest id first */
1959 while (count_user--) {
1960 struct ticker_user *user;
1961 struct ticker_user_op *user_ops;
1962
1963 user = &users[count_user];
1964 user_ops = &user->user_op[0];
1965 /* Traverse user operation queue - middle to last (with wrap).
1966 * This operation updates user->middle to be the past the last
1967 * processed user operation. This is used later by
1968 * ticker_job_list_insert, for handling user->first to middle.
1969 */
1970 while (user->middle != user->last) {
1971 struct ticker_user_op *user_op;
1972 struct ticker_node *ticker;
1973 uint8_t state;
1974 uint8_t prev;
1975 uint8_t middle;
1976
1977 user_op = &user_ops[user->middle];
1978
1979 /* Increment index and handle wrapping */
1980 prev = user->middle;
1981 middle = user->middle + 1;
1982 if (middle == user->count_user_op) {
1983 middle = 0U;
1984 }
1985 user->middle = middle;
1986
1987 ticker = &node[user_op->id];
1988
1989 /* if op is start, then skip update and stop ops */
1990 if (user_op->op < TICKER_USER_OP_TYPE_UPDATE) {
1991 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1992 if (user_op->op == TICKER_USER_OP_TYPE_START) {
1993 /* Set start pending to validate a
1994 * successive, inline stop operation.
1995 */
1996 ticker->start_pending = 1U;
1997 }
1998 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1999
2000 continue;
2001 }
2002
2003 /* determine the ticker state */
2004 state = (ticker->req - ticker->ack) & 0xff;
2005
2006 /* if not started or update not required,
2007 * set status and continue.
2008 */
2009 if ((user_op->op > TICKER_USER_OP_TYPE_STOP_ABS) ||
2010 ((state == 0U) &&
2011 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2012 !ticker->start_pending &&
2013 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2014 (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS)) ||
2015 ((user_op->op == TICKER_USER_OP_TYPE_UPDATE) &&
2016 (user_op->params.update.ticks_drift_plus == 0U) &&
2017 (user_op->params.update.ticks_drift_minus == 0U) &&
2018 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2019 (user_op->params.update.ticks_slot_plus == 0U) &&
2020 (user_op->params.update.ticks_slot_minus == 0U) &&
2021 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2022 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2023 (!ticker->ext_data ||
2024 user_op->params.update.expire_info_id == user_op->id) &&
2025 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2026 (user_op->params.update.lazy == 0U) &&
2027 (user_op->params.update.force == 0U))) {
2028 ticker_job_op_cb(user_op,
2029 TICKER_STATUS_FAILURE);
2030 continue;
2031 }
2032
2033 /* Delete or yield node, if not expired */
2034 if ((state == 1U) ||
2035 (user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS)) {
2036 ticker_job_node_manage(instance, ticker,
2037 user_op, ticks_now,
2038 ticks_elapsed,
2039 insert_head);
2040 } else {
2041 /* Update on expired node requested, deferring
2042 * update until bottom half finishes.
2043 */
2044 /* sched job to run after worker bottom half.
2045 */
2046 instance->sched_cb(TICKER_CALL_ID_JOB,
2047 TICKER_CALL_ID_JOB, 1,
2048 instance);
2049
2050 /* Update the index upto which management is
2051 * complete.
2052 */
2053 user->middle = prev;
2054
2055 pending = 1U;
2056 break;
2057 }
2058 }
2059 }
2060
2061 return pending;
2062 }
2063
2064 /**
2065 * @brief Handle ticker node expirations
2066 *
2067 * @details Called by ticker_job to schedule next expirations. Expired ticker
2068 * nodes are removed from the active list, and re-inserted if periodic.
2069 *
2070 * @param instance Pointer to ticker instance
2071 * @param ticks_previous Absolute ticks at ticker_job start
2072 * @param ticks_elapsed Expired ticks at time of call
2073 * @param insert_head Pointer to current head (id). Updated if nodes are
2074 * re-inserted
2075 * @internal
2076 */
2077 static inline void ticker_job_worker_bh(struct ticker_instance *instance,
2078 uint32_t ticks_now,
2079 uint32_t ticks_previous,
2080 uint32_t ticks_elapsed,
2081 uint8_t *insert_head)
2082 {
2083 struct ticker_node *node;
2084 uint32_t ticks_expired;
2085 uint32_t ticks_latency;
2086
2087 ticks_latency = ticker_ticks_diff_get(ticks_now, ticks_previous);
2088
2089 node = &instance->nodes[0];
2090 ticks_expired = 0U;
2091 while (instance->ticker_id_head != TICKER_NULL) {
2092 uint8_t skip_collision = 0U;
2093 struct ticker_node *ticker;
2094 uint32_t ticks_to_expire;
2095 uint8_t id_expired;
2096 uint8_t state;
2097
2098 /* auto variable for current ticker node */
2099 id_expired = instance->ticker_id_head;
2100 ticker = &node[id_expired];
2101
2102 /* Do nothing if ticker did not expire */
2103 ticks_to_expire = ticker->ticks_to_expire;
2104 if (ticks_elapsed < ticks_to_expire) {
2105 ticker->ticks_to_expire -= ticks_elapsed;
2106 break;
2107 }
2108
2109 /* decrement ticks_elapsed and collect expired ticks */
2110 ticks_elapsed -= ticks_to_expire;
2111 ticks_latency -= ticks_to_expire;
2112 ticks_expired += ticks_to_expire;
2113
2114 state = (ticker->req - ticker->ack) & 0xff;
2115
2116 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2117 /* Node with lazy count did not expire with callback, but
2118 * was either a collision or re-scheduled. This node should
2119 * not define the active slot reservation (slot_previous).
2120 */
2121 skip_collision = (ticker->lazy_current != 0U);
2122 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2123
2124 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2125 /* decrement ticks_slot_previous */
2126 if (instance->ticks_slot_previous > ticks_to_expire) {
2127 instance->ticks_slot_previous -= ticks_to_expire;
2128 } else {
2129 instance->ticker_id_slot_previous = TICKER_NULL;
2130 instance->ticks_slot_previous = 0U;
2131 }
2132
2133 uint32_t ticker_ticks_slot;
2134
2135 if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
2136 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2137 } else {
2138 ticker_ticks_slot = ticker->ticks_slot;
2139 }
2140
2141 /* If a reschedule is set pending, we will need to keep
2142 * the slot_previous information
2143 */
2144 if (ticker_ticks_slot && (state == 2U) && !skip_collision &&
2145 !TICKER_RESCHEDULE_PENDING(ticker)) {
2146 instance->ticker_id_slot_previous = id_expired;
2147 instance->ticks_slot_previous = ticker_ticks_slot;
2148 }
2149 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2150
2151 /* ticker expired, set ticks_to_expire zero */
2152 ticker->ticks_to_expire = 0U;
2153
2154 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2155 ticker_mark_expire_info_outdated(instance, instance->ticker_id_head);
2156 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2157
2158 /* remove the expired ticker from head */
2159 instance->ticker_id_head = ticker->next;
2160
2161 /* Ticker will be restarted if periodic or to be re-scheduled */
2162 if ((ticker->ticks_periodic != 0U) ||
2163 TICKER_RESCHEDULE_PENDING(ticker)) {
2164 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2165 if (TICKER_RESCHEDULE_PENDING(ticker)) {
2166 /* Set the re-scheduled node to now. Will be
2167 * collision resolved after all nodes are
2168 * restarted
2169 */
2170 ticker->ticks_to_expire = ticks_elapsed;
2171
2172 /* Reset ticker state, so that its put
2173 * back in requested state later down
2174 * in the code.
2175 */
2176 ticker->req = ticker->ack;
2177 } else {
2178 uint16_t lazy_periodic;
2179 uint32_t count;
2180 uint16_t lazy;
2181
2182 /* If not skipped, apply lazy_periodic */
2183 if (!ticker->lazy_current) {
2184 lazy_periodic = ticker->lazy_periodic;
2185 } else {
2186 lazy_periodic = 0U;
2187
2188 /* Reset ticker state, so that its put
2189 * back in requested state later down
2190 * in the code.
2191 */
2192 ticker->req = ticker->ack;
2193 }
2194
2195 /* Reload ticks_to_expire with at least one
2196 * period.
2197 */
2198 ticks_to_expire = 0U;
2199 count = 1 + lazy_periodic;
2200 while (count--) {
2201 ticks_to_expire +=
2202 ticker->ticks_periodic;
2203
2204 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2205 ticks_to_expire +=
2206 ticker_remainder_inc(ticker);
2207 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2208 }
2209
2210 /* Skip intervals that have elapsed w.r.t.
2211 * current ticks.
2212 */
2213 lazy = 0U;
2214
2215 if (0) {
2216 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2217 } else if (!ticker->must_expire) {
2218 #else
2219 } else {
2220 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2221 while (ticks_to_expire <
2222 ticks_latency) {
2223 ticks_to_expire +=
2224 ticker->ticks_periodic;
2225
2226 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2227 ticks_to_expire +=
2228 ticker_remainder_inc(ticker);
2229 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2230
2231 lazy++;
2232 }
2233 }
2234
2235 /* Use the calculated ticks to expire and
2236 * laziness.
2237 */
2238 ticker->ticks_to_expire = ticks_to_expire;
2239 ticker->lazy_current += (lazy_periodic + lazy);
2240 }
2241
2242 ticks_to_expire_prep(ticker, instance->ticks_current,
2243 ((ticks_previous + ticks_expired) &
2244 HAL_TICKER_CNTR_MASK));
2245 #else /* CONFIG_BT_TICKER_LOW_LAT */
2246 uint32_t count;
2247 uint16_t lazy;
2248
2249 /* Prepare for next interval */
2250 ticks_to_expire = 0U;
2251 count = 1 + ticker->lazy_periodic;
2252 while (count--) {
2253 ticks_to_expire += ticker->ticks_periodic;
2254 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2255 ticks_to_expire += ticker_remainder_inc(ticker);
2256 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2257 }
2258
2259 /* Skip intervals that have elapsed w.r.t. current
2260 * ticks.
2261 */
2262 lazy = 0U;
2263
2264 /* Schedule to a tick in the future */
2265 while (ticks_to_expire < ticks_latency) {
2266 ticks_to_expire += ticker->ticks_periodic;
2267 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2268 ticks_to_expire += ticker_remainder_inc(ticker);
2269 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2270 lazy++;
2271 }
2272
2273 /* Use the calculated ticks to expire and laziness. */
2274 ticker->ticks_to_expire = ticks_to_expire;
2275 ticker->lazy_current = ticker->lazy_periodic + lazy;
2276
2277 ticks_to_expire_prep(ticker, instance->ticks_current,
2278 ((ticks_previous + ticks_expired) &
2279 HAL_TICKER_CNTR_MASK));
2280
2281 /* Reset force state of the node */
2282 ticker->force = 0U;
2283 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2284
2285 /* Add to insert list */
2286 ticker->next = *insert_head;
2287 *insert_head = id_expired;
2288
2289 /* set schedule status of node as restarting. */
2290 ticker->req++;
2291 } else {
2292 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2293 /* A single-shot ticker in requested or skipped due to
2294 * collision shall generate a operation function
2295 * callback with failure status.
2296 */
2297 if (state && ((state == 1U) || skip_collision) &&
2298 ticker->fp_op_func) {
2299 ticker->fp_op_func(TICKER_STATUS_FAILURE,
2300 ticker->op_context);
2301 }
2302 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2303
2304 /* reset schedule status of node */
2305 ticker->req = ticker->ack;
2306 }
2307 }
2308 }
2309
2310 /**
2311 * @brief Prepare ticker node start
2312 *
2313 * @details Called by ticker_job to prepare ticker node start operation.
2314 *
2315 * @param ticker Pointer to ticker node
2316 * @param user_op Pointer to user operation
2317 * @param ticks_current Expired ticks at time of call
2318 *
2319 * @internal
2320 */
2321 static inline uint32_t ticker_job_op_start(struct ticker_instance *instance,
2322 struct ticker_node *ticker,
2323 struct ticker_user_op *user_op,
2324 uint32_t ticks_current)
2325 {
2326 struct ticker_user_op_start *start = (void *)&user_op->params.start;
2327
2328 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2329 /* Must expire is not supported in compatibility mode */
2330 LL_ASSERT_DBG(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP);
2331 #else
2332 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2333 if (start->lazy != TICKER_LAZY_MUST_EXPIRE_KEEP) {
2334 /* Update the must_expire state */
2335 ticker->must_expire =
2336 (start->lazy == TICKER_LAZY_MUST_EXPIRE) ? 1U : 0U;
2337 }
2338 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2339 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2340
2341 #if defined(CONFIG_BT_TICKER_EXT)
2342 ticker->ext_data = start->ext_data;
2343
2344 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2345 if (ticker->ext_data) {
2346 ticker->ext_data->other_expire_info = NULL;
2347 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
2348 uint32_t status;
2349
2350 status = ticker_alloc_expire_info(instance, user_op->id);
2351 if (status) {
2352 return status;
2353 }
2354 }
2355 }
2356
2357 ticker_mark_expire_info_outdated(instance, user_op->id);
2358 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2359 ARG_UNUSED(instance);
2360 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2361 #else /* !CONFIG_BT_TICKER_EXT */
2362 ARG_UNUSED(instance);
2363 #endif /* !CONFIG_BT_TICKER_EXT */
2364
2365 ticker->ticks_periodic = start->ticks_periodic;
2366
2367 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2368 ticker->remainder_periodic = start->remainder_periodic;
2369
2370 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
2371 ticker->remainder_current = start->remainder_first;
2372 #else /* !CONFIG_BT_TICKER_START_REMAINDER */
2373 ticker->remainder_current = 0U;
2374 #endif /* !CONFIG_BT_TICKER_START_REMAINDER */
2375 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2376
2377 ticker->lazy_periodic =
2378 (start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP) ? start->lazy :
2379 0U;
2380 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2381 ticker->ticks_slot = start->ticks_slot;
2382 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2383
2384 ticker->timeout_func = start->fp_timeout_func;
2385 ticker->context = start->context;
2386 ticker->ticks_to_expire = start->ticks_first;
2387 ticker->ticks_to_expire_minus = 0U;
2388 ticks_to_expire_prep(ticker, ticks_current, start->ticks_at_start);
2389
2390 ticker->lazy_current = 0U;
2391 ticker->force = 1U;
2392
2393 return TICKER_STATUS_SUCCESS;
2394 }
2395
2396 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2397 /**
2398 * @brief Insert new ticker node
2399 *
2400 * @details Called by ticker_job to insert a new ticker node. If node collides
2401 * with existing ticker nodes, either the new node is postponed, or colliding
2402 * node is un-scheduled. Decision is based on latency and the force-state of
2403 * individual nodes.
2404 *
2405 * @param instance Pointer to ticker instance
2406 * @param id_insert Id of ticker to insert
2407 * @param ticker Pointer to ticker node to insert
2408 * @param insert_head Pointer to current head. Updated if colliding nodes
2409 * are un-scheduled
2410 * @internal
2411 */
2412 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2413 uint8_t id_insert,
2414 struct ticker_node *ticker,
2415 uint8_t *insert_head)
2416 {
2417 ARG_UNUSED(insert_head);
2418
2419 /* Prepare to insert */
2420 ticker->next = TICKER_NULL;
2421
2422 /* Enqueue the ticker node */
2423 (void)ticker_enqueue(instance, id_insert);
2424
2425 /* Inserted/Scheduled */
2426 ticker->req = ticker->ack + 1;
2427
2428 return TICKER_STATUS_SUCCESS;
2429 }
2430
2431 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2432 /**
2433 * @brief Re-schedule ticker nodes within slot_window
2434 *
2435 * @details This function is responsible for re-scheduling ticker nodes
2436 * which have been marked for re-scheduling in ticker_worker. These nodes
2437 * have a non-zero ticks_slot_window configuration, which indicates a
2438 * valid range in which to re-schedule the node.
2439 * The function iterates over nodes, and handles one re-schedule at a
2440 * time. After a re-schedule, nodes are once again iterated until no more
2441 * nodes are marked for re-scheduling.
2442 *
2443 * @param instance Pointer to ticker instance
2444 * @param ticks_elapsed Number of ticks elapsed since last ticker job
2445 *
2446 * @internal
2447 */
2448 static uint8_t ticker_job_reschedule_in_window(struct ticker_instance *instance)
2449 {
2450 struct ticker_node *nodes;
2451 uint8_t rescheduling;
2452 uint8_t rescheduled;
2453
2454 nodes = &instance->nodes[0];
2455
2456 /* Do until all pending re-schedules handled */
2457 rescheduling = 1U;
2458 rescheduled = 0U;
2459 while (rescheduling) {
2460 struct ticker_node *ticker_resched;
2461 uint32_t ticks_to_expire_offset;
2462 uint8_t ticker_id_resched_prev;
2463 struct ticker_ext *ext_data;
2464 uint32_t ticks_start_offset;
2465 uint32_t window_start_ticks;
2466 uint32_t ticks_slot_window;
2467 uint8_t ticker_id_resched;
2468 uint32_t ticks_to_expire;
2469 uint8_t ticker_id_prev;
2470 uint8_t ticker_id_next;
2471 uint32_t ticks_slot;
2472
2473 rescheduling = 0U;
2474
2475 /* Find first pending re-schedule */
2476 ticker_id_resched_prev = TICKER_NULL;
2477 ticker_id_resched = instance->ticker_id_head;
2478 while (ticker_id_resched != TICKER_NULL) {
2479 ticker_resched = &nodes[ticker_id_resched];
2480 if (TICKER_RESCHEDULE_PENDING(ticker_resched)) {
2481 /* Pending reschedule found */
2482 break;
2483 }
2484
2485 ticker_id_resched_prev = ticker_id_resched;
2486 ticker_id_resched = ticker_resched->next;
2487 }
2488
2489 /* Exit if no tickers to be rescheduled */
2490 if (ticker_id_resched == TICKER_NULL) {
2491 break;
2492 }
2493
2494 /* Ensure that resched ticker is expired */
2495 LL_ASSERT_DBG(ticker_resched->ticks_to_expire == 0U);
2496
2497 /* Use ticker's reserved time ticks_slot, else for unreserved
2498 * tickers use the reschedule margin as ticks_slot.
2499 */
2500 if (ticker_resched->ticks_slot) {
2501 ticks_slot = ticker_resched->ticks_slot;
2502 } else {
2503 LL_ASSERT_DBG(TICKER_HAS_SLOT_WINDOW(ticker_resched));
2504
2505 ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2506 }
2507
2508 /* Window start after intersection with already active node */
2509 window_start_ticks = instance->ticks_slot_previous +
2510 HAL_TICKER_RESCHEDULE_MARGIN;
2511
2512 /* If drift was applied to this node, this must be
2513 * taken into consideration. Reduce the window with
2514 * the amount of drift already applied.
2515 *
2516 * TODO: An improvement on this could be to only consider the
2517 * drift (ADV => randomization) if re-sceduling fails. Then the
2518 * ticker would have the best possible window to re-schedule in
2519 * and not be restricted to ticks_slot_window - ticks_drift.
2520 */
2521 ext_data = ticker_resched->ext_data;
2522 if (IS_ENABLED(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD) &&
2523 ticker_resched->ticks_slot &&
2524 !ext_data->ticks_drift &&
2525 !ext_data->is_drift_in_window) {
2526 /* Use slot window after intersection include required
2527 * ticks_slot, and we do not take the interval of the
2528 * colliding ticker provided every expiry increments the
2529 * interval by random amount of ticks.
2530 */
2531 ticks_slot_window = window_start_ticks + ticks_slot;
2532
2533 /* Window available, proceed to calculate further
2534 * drift
2535 */
2536 ticker_id_next = ticker_resched->next;
2537
2538 } else if (ext_data->ticks_drift < ext_data->ticks_slot_window) {
2539 /* Use reduced slot window */
2540 ticks_slot_window = ext_data->ticks_slot_window -
2541 ext_data->ticks_drift;
2542
2543 /* Window available, proceed to calculate further
2544 * drift
2545 */
2546 ticker_id_next = ticker_resched->next;
2547
2548 } else {
2549 /* Window has been exhausted - we can't reschedule */
2550 ticker_id_next = TICKER_NULL;
2551
2552 /* Assignment will be unused when TICKER_NULL */
2553 ticks_slot_window = 0U;
2554 }
2555
2556 /* Try to find available slot for re-scheduling */
2557 ticks_to_expire_offset = 0U;
2558 ticks_start_offset = 0U;
2559 ticks_to_expire = 0U;
2560 while ((ticker_id_next != TICKER_NULL) &&
2561 ((ticks_start_offset + ticks_slot) <=
2562 ticks_slot_window)) {
2563 struct ticker_node *ticker_next;
2564 uint32_t window_end_ticks;
2565
2566 ticker_next = &nodes[ticker_id_next];
2567 ticks_to_expire_offset += ticker_next->ticks_to_expire;
2568
2569 /* Calculate end of window. Since window may be aligned
2570 * with expiry of next node, we add a margin
2571 */
2572 if (ticks_to_expire_offset >
2573 HAL_TICKER_RESCHEDULE_MARGIN) {
2574 window_end_ticks =
2575 MIN(ticks_slot_window,
2576 ticks_start_offset +
2577 ticks_to_expire_offset -
2578 HAL_TICKER_RESCHEDULE_MARGIN);
2579
2580 } else if ((ticker_resched->ticks_slot == 0U) ||
2581 ext_data->is_drift_in_window) {
2582 /* Next expiry is too close - hop over after
2583 * next node
2584 */
2585 goto reschedule_in_window_hop_over;
2586
2587 } else {
2588 /* Next expiry is too close - try the next
2589 * node
2590 */
2591 window_end_ticks = 0U;
2592 }
2593
2594 /* Calculate new ticks_to_expire as end of window minus
2595 * slot size.
2596 */
2597 if (((window_start_ticks + ticks_slot) <=
2598 ticks_slot_window) &&
2599 (window_end_ticks >= (ticks_start_offset +
2600 ticks_slot))) {
2601 if (!ticker_resched->ticks_slot ||
2602 ext_data->is_drift_in_window) {
2603 /* Place at start of window */
2604 ticks_to_expire = window_start_ticks;
2605 } else {
2606 /* Place at end of window. This ensures
2607 * that ticker with slot window and that
2608 * uses ticks_slot does not take the
2609 * interval of the colliding ticker.
2610 */
2611 ticks_to_expire = window_end_ticks -
2612 ticks_slot;
2613 }
2614 } else {
2615 /* No space in window - try the next node */
2616 ticks_to_expire = 0U;
2617 }
2618
2619 /* Decide if the re-scheduling ticker node fits in the
2620 * slot found - break if it fits
2621 */
2622 if ((ticks_to_expire != 0U) &&
2623 (ticks_to_expire >= window_start_ticks) &&
2624 (ticks_to_expire <= (window_end_ticks -
2625 ticks_slot))) {
2626 /* Re-scheduled node fits before this node */
2627 break;
2628 } else {
2629 /* Not inside the window */
2630 ticks_to_expire = 0U;
2631 }
2632
2633 /* Skip other pending re-schedule nodes and
2634 * tickers with no reservation or not periodic
2635 */
2636 if (TICKER_RESCHEDULE_PENDING(ticker_next) ||
2637 !ticker_next->ticks_slot ||
2638 !ticker_next->ticks_periodic) {
2639 ticker_id_next = ticker_next->next;
2640
2641 continue;
2642 }
2643
2644 reschedule_in_window_hop_over:
2645 /* We din't find a valid slot for re-scheduling - try
2646 * the next node
2647 */
2648 ticks_start_offset += ticks_to_expire_offset;
2649 window_start_ticks = ticks_start_offset +
2650 ticker_next->ticks_slot +
2651 HAL_TICKER_RESCHEDULE_MARGIN;
2652 ticks_to_expire_offset = 0U;
2653
2654 if (!ticker_resched->ticks_slot ||
2655 ext_data->is_drift_in_window) {
2656 if (!ticker_resched->ticks_slot ||
2657 (window_start_ticks <= (ticks_slot_window -
2658 ticks_slot))) {
2659 /* Try at the end of the next node */
2660 ticks_to_expire = window_start_ticks;
2661 }
2662 } else {
2663 /* Try at the end of the slot window. This
2664 * ensures that ticker with slot window and that
2665 * uses ticks_slot does not take the interval of
2666 * the colliding ticker.
2667 */
2668 ticks_to_expire = ticks_slot_window -
2669 ticks_slot;
2670 }
2671
2672 ticker_id_next = ticker_next->next;
2673 }
2674
2675 ext_data->ticks_drift += ticks_to_expire;
2676
2677 /* Place the ticker node sorted by expiration time and adjust
2678 * delta times
2679 */
2680 ticker_id_next = ticker_resched->next;
2681 ticker_id_prev = TICKER_NULL;
2682 while (ticker_id_next != TICKER_NULL) {
2683 struct ticker_node *ticker_next;
2684
2685 ticker_next = &nodes[ticker_id_next];
2686 if (ticks_to_expire > ticker_next->ticks_to_expire) {
2687 /* Node is after this - adjust delta */
2688 ticks_to_expire -= ticker_next->ticks_to_expire;
2689 } else {
2690 /* Node is before this one */
2691 ticker_next->ticks_to_expire -= ticks_to_expire;
2692 break;
2693 }
2694 ticker_id_prev = ticker_id_next;
2695 ticker_id_next = ticker_next->next;
2696 }
2697
2698 ticker_resched->ticks_to_expire = ticks_to_expire;
2699
2700 /* If the node moved in the list, insert it */
2701 if (ticker_id_prev != TICKER_NULL) {
2702 /* Remove node from its current position in list */
2703 if (ticker_id_resched_prev != TICKER_NULL) {
2704 /* Node was not at the head of the list */
2705 nodes[ticker_id_resched_prev].next =
2706 ticker_resched->next;
2707 } else {
2708 /* Node was at the head, move head forward */
2709 instance->ticker_id_head = ticker_resched->next;
2710 }
2711
2712 /* Link inserted node */
2713 ticker_resched->next = nodes[ticker_id_prev].next;
2714 nodes[ticker_id_prev].next = ticker_id_resched;
2715 }
2716
2717 /* Remove latency added in ticker_worker */
2718 ticker_resched->lazy_current--;
2719
2720 /* Prevent repeated re-scheduling */
2721 ext_data->reschedule_state =
2722 TICKER_RESCHEDULE_STATE_DONE;
2723
2724 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2725 ticker_mark_expire_info_outdated(instance, ticker_id_resched);
2726 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2727
2728 /* Check for other pending re-schedules and set exit flag */
2729 rescheduling = 1U;
2730 rescheduled = 1U;
2731 }
2732
2733 return rescheduled;
2734 }
2735 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2736 #else /* CONFIG_BT_TICKER_LOW_LAT */
2737
2738 /**
2739 * @brief Insert new ticker node
2740 *
2741 * @details Called by ticker_job to insert a new ticker node. If node collides
2742 * with existing ticker nodes, either the new node is postponed, or colliding
2743 * node is un-scheduled. Decision is based on latency and the force-state of
2744 * individual nodes.
2745 *
2746 * @param instance Pointer to ticker instance
2747 * @param id_insert Id of ticker to insert
2748 * @param ticker Pointer to ticker node to insert
2749 * @param insert_head Pointer to current head. Updated if colliding nodes
2750 * are un-scheduled
2751 * @internal
2752 */
2753 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2754 uint8_t id_insert,
2755 struct ticker_node *ticker,
2756 uint8_t *insert_head)
2757 {
2758 struct ticker_node *node = &instance->nodes[0];
2759 uint8_t id_collide;
2760 uint16_t skip;
2761
2762 /* Prepare to insert */
2763 ticker->next = TICKER_NULL;
2764
2765 /* No. of times ticker has skipped its interval */
2766 if (ticker->lazy_current > ticker->lazy_periodic) {
2767 skip = ticker->lazy_current -
2768 ticker->lazy_periodic;
2769 } else {
2770 skip = 0U;
2771 }
2772
2773 /* If insert collides, remove colliding or advance to next interval */
2774 while (id_insert !=
2775 (id_collide = ticker_enqueue(instance, id_insert))) {
2776 /* Check for collision */
2777 if (id_collide != TICKER_NULL) {
2778 struct ticker_node *ticker_collide = &node[id_collide];
2779 uint16_t skip_collide;
2780
2781 /* No. of times colliding ticker has skipped its
2782 * interval.
2783 */
2784 if (ticker_collide->lazy_current >
2785 ticker_collide->lazy_periodic) {
2786 skip_collide = ticker_collide->lazy_current -
2787 ticker_collide->lazy_periodic;
2788 } else {
2789 skip_collide = 0U;
2790 }
2791
2792 /* Check if colliding node should be un-scheduled */
2793 if (ticker_collide->ticks_periodic &&
2794 skip_collide <= skip &&
2795 ticker_collide->force < ticker->force) {
2796 /* Dequeue and get the reminder of ticks
2797 * to expire.
2798 */
2799 ticker_collide->ticks_to_expire =
2800 ticker_dequeue(instance, id_collide);
2801 /* Unschedule node */
2802 ticker_collide->req = ticker_collide->ack;
2803
2804 /* Enqueue for re-insertion */
2805 ticker_collide->next = *insert_head;
2806 *insert_head = id_collide;
2807
2808 continue;
2809 }
2810 }
2811
2812 /* occupied, try next interval */
2813 if (ticker->ticks_periodic != 0U) {
2814 ticker->ticks_to_expire += ticker->ticks_periodic;
2815 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2816 ticker->ticks_to_expire += ticker_remainder_inc(ticker);
2817 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2818 ticker->lazy_current++;
2819
2820 /* No. of times ticker has skipped its interval */
2821 if (ticker->lazy_current > ticker->lazy_periodic) {
2822 skip = ticker->lazy_current -
2823 ticker->lazy_periodic;
2824 } else {
2825 skip = 0U;
2826 }
2827
2828 /* Remove any accumulated drift (possibly added due to
2829 * ticker job execution latencies).
2830 */
2831 if (ticker->ticks_to_expire >
2832 ticker->ticks_to_expire_minus) {
2833 ticker->ticks_to_expire -=
2834 ticker->ticks_to_expire_minus;
2835 ticker->ticks_to_expire_minus = 0U;
2836 } else {
2837 ticker->ticks_to_expire_minus -=
2838 ticker->ticks_to_expire;
2839 ticker->ticks_to_expire = 0U;
2840 }
2841 } else {
2842 return TICKER_STATUS_FAILURE;
2843 }
2844 }
2845
2846 /* Inserted/Scheduled */
2847 ticker->req = ticker->ack + 1;
2848
2849 return TICKER_STATUS_SUCCESS;
2850 }
2851 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2852
2853 /**
2854 * @brief Insert and start ticker nodes for all users
2855 *
2856 * @details Called by ticker_job to prepare, insert and start ticker nodes
2857 * for all users. Specifying insert_head to other than TICKER_NULL causes
2858 * that ticker node to be inserted first.
2859 *
2860 * @param instance Pointer to ticker instance
2861 * @param insert_head Id of ticker node to insert, or TICKER_NULL if only
2862 * handle user operation inserts
2863 * @internal
2864 */
2865 static inline void ticker_job_list_insert(struct ticker_instance *instance,
2866 uint8_t insert_head)
2867 {
2868 struct ticker_node *node;
2869 struct ticker_user *users;
2870 uint8_t count_user;
2871
2872 node = &instance->nodes[0];
2873 users = &instance->users[0];
2874 count_user = instance->count_user;
2875
2876 /* Iterate through all user ids */
2877 while (count_user--) {
2878 struct ticker_user_op *user_ops;
2879 struct ticker_user *user;
2880 uint8_t user_ops_first;
2881
2882 user = &users[count_user];
2883 user_ops = (void *)&user->user_op[0];
2884 user_ops_first = user->first;
2885 /* Traverse user operation queue - first to middle (wrap) */
2886 while ((insert_head != TICKER_NULL) ||
2887 (user_ops_first != user->middle)) {
2888 struct ticker_user_op *user_op;
2889 struct ticker_node *ticker;
2890 uint8_t id_insert;
2891 uint8_t status = TICKER_STATUS_SUCCESS;
2892
2893 if (insert_head != TICKER_NULL) {
2894 /* Prepare insert of ticker node specified by
2895 * insert_head
2896 */
2897 id_insert = insert_head;
2898 ticker = &node[id_insert];
2899 insert_head = ticker->next;
2900
2901 user_op = NULL;
2902 } else {
2903 /* Prepare insert of any ticker nodes requested
2904 * via user operation TICKER_USER_OP_TYPE_START
2905 */
2906 uint8_t first;
2907
2908 user_op = &user_ops[user_ops_first];
2909 first = user_ops_first + 1;
2910 if (first == user->count_user_op) {
2911 first = 0U;
2912 }
2913 user_ops_first = first;
2914
2915 id_insert = user_op->id;
2916 ticker = &node[id_insert];
2917 if (user_op->op != TICKER_USER_OP_TYPE_START) {
2918 /* User operation is not start - skip
2919 * to next operation
2920 */
2921 continue;
2922 }
2923
2924 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2925 ticker->start_pending = 0U;
2926 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2927
2928 if (((ticker->req -
2929 ticker->ack) & 0xff) != 0U) {
2930 ticker_job_op_cb(user_op,
2931 TICKER_STATUS_FAILURE);
2932 continue;
2933 }
2934
2935 /* Prepare ticker for start */
2936 status = ticker_job_op_start(instance, ticker, user_op,
2937 instance->ticks_current);
2938 }
2939
2940 if (!status) {
2941 /* Insert ticker node */
2942 status = ticker_job_insert(instance, id_insert, ticker,
2943 &insert_head);
2944 }
2945
2946 if (user_op) {
2947 ticker_job_op_cb(user_op, status);
2948
2949 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
2950 (ticker->ticks_periodic == 0U) &&
2951 user_op) {
2952 ticker->fp_op_func =
2953 user_op->fp_op_func;
2954 ticker->op_context =
2955 user_op->op_context;
2956 }
2957 }
2958 }
2959
2960 #if !defined(CONFIG_BT_TICKER_JOB_IDLE_GET) && \
2961 !defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) && \
2962 !defined(CONFIG_BT_TICKER_PRIORITY_SET)
2963 user->first = user_ops_first;
2964 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
2965 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
2966 * !CONFIG_BT_TICKER_PRIORITY_SET
2967 */
2968
2969 }
2970 }
2971
2972 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2973 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
2974 defined(CONFIG_BT_TICKER_PRIORITY_SET)
2975 /**
2976 * @brief Perform inquiry for specific user operation
2977 *
2978 * @param instance Pointer to ticker instance
2979 * @param uop Pointer to user operation
2980 *
2981 * @internal
2982 */
2983 static inline void ticker_job_op_inquire(struct ticker_instance *instance,
2984 struct ticker_user_op *uop)
2985 {
2986 ticker_op_func fp_op_func;
2987
2988 fp_op_func = NULL;
2989 switch (uop->op) {
2990 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2991 case TICKER_USER_OP_TYPE_SLOT_GET:
2992 ticker_by_next_slot_get(instance,
2993 uop->params.slot_get.ticker_id,
2994 uop->params.slot_get.ticks_current,
2995 uop->params.slot_get.ticks_to_expire,
2996 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
2997 uop->params.slot_get.fp_match_op_func,
2998 uop->params.slot_get.match_op_context,
2999 #else
3000 NULL, NULL,
3001 #endif
3002 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
3003 uop->params.slot_get.remainder,
3004 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
3005 NULL,
3006 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
3007 #if defined(CONFIG_BT_TICKER_LAZY_GET)
3008 uop->params.slot_get.lazy);
3009 #else /* !CONFIG_BT_TICKER_LAZY_GET */
3010 NULL);
3011 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
3012 __fallthrough;
3013 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
3014
3015 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3016 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
3017 case TICKER_USER_OP_TYPE_IDLE_GET:
3018 uop->status = TICKER_STATUS_SUCCESS;
3019 fp_op_func = uop->fp_op_func;
3020 break;
3021 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3022 * CONFIG_BT_TICKER_NEXT_SLOT_GET
3023 */
3024
3025 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3026 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3027 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3028 case TICKER_USER_OP_TYPE_PRIORITY_SET:
3029 if (uop->id < instance->count_node) {
3030 struct ticker_node *node = instance->nodes;
3031
3032 node[uop->id].priority =
3033 uop->params.priority_set.priority;
3034 uop->status = TICKER_STATUS_SUCCESS;
3035 } else {
3036 uop->status = TICKER_STATUS_FAILURE;
3037 }
3038 fp_op_func = uop->fp_op_func;
3039 break;
3040 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3041 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3042 * CONFIG_BT_TICKER_PRIORITY_SET
3043 */
3044
3045 default:
3046 /* do nothing for other ops */
3047 break;
3048 }
3049
3050 if (fp_op_func) {
3051 fp_op_func(uop->status, uop->op_context);
3052 }
3053 }
3054
3055 /**
3056 * @brief Check for pending inquiries for all users
3057 *
3058 * @details Run through all user operation lists, checking for pending
3059 * inquiries. Currently only two types of inquiries are supported:
3060 * TICKER_USER_OP_TYPE_SLOT_GET and TICKER_USER_OP_TYPE_IDLE_GET. The
3061 * function also supports user operation TICKER_USER_OP_TYPE_PRIORITY_SET.
3062 * This operation modifies the user->first index, indicating user operations
3063 * are complete.
3064 *
3065 * @param instance Pointer to ticker instance
3066 *
3067 * @internal
3068 */
3069 static inline void ticker_job_list_inquire(struct ticker_instance *instance)
3070 {
3071 struct ticker_user *users;
3072 uint8_t count_user;
3073
3074 users = &instance->users[0];
3075 count_user = instance->count_user;
3076 /* Traverse user operation queue - first to last (with wrap) */
3077 while (count_user--) {
3078 struct ticker_user_op *user_op;
3079 struct ticker_user *user;
3080
3081 user = &users[count_user];
3082 user_op = &user->user_op[0];
3083 while (user->first != user->last) {
3084 uint8_t first;
3085
3086 ticker_job_op_inquire(instance, &user_op[user->first]);
3087
3088 first = user->first + 1;
3089 if (first == user->count_user_op) {
3090 first = 0U;
3091 }
3092 user->first = first;
3093 }
3094 }
3095 }
3096 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3097 * CONFIG_BT_TICKER_NEXT_SLOT_GET ||
3098 * CONFIG_BT_TICKER_PRIORITY_SET
3099 */
3100
3101 /**
3102 * @brief Update counter compare value (trigger)
3103 *
3104 * @details Updates trigger to the match next expiring ticker node. The
3105 * function takes into consideration that it may be preempted in the process,
3106 * and makes sure - by iteration - that compare value is set in the future
3107 * (with a margin).
3108 *
3109 * @param instance Pointer to ticker instance
3110 * @param ticker_id_old_head Previous ticker_id_head
3111 *
3112 * @internal
3113 */
3114 static inline uint8_t
3115 ticker_job_compare_update(struct ticker_instance *instance,
3116 uint8_t ticker_id_old_head)
3117 {
3118 struct ticker_node *ticker;
3119 uint32_t ticks_to_expire;
3120 uint32_t ctr_curr;
3121 uint32_t ctr_prev;
3122 uint32_t cc;
3123 uint32_t i;
3124
3125 if (instance->ticker_id_head == TICKER_NULL) {
3126 if (cntr_stop() == 0) {
3127 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3128 instance->ticks_slot_previous = 0U;
3129 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3130
3131 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3132 /* Stopped counter value will be used as ticks_current
3133 * for calculation to start new tickers.
3134 */
3135 instance->ticks_current = cntr_cnt_get();
3136 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3137 }
3138
3139 return 0U;
3140 }
3141
3142 /* Check if this is the first update. If so, start the counter */
3143 if (ticker_id_old_head == TICKER_NULL) {
3144 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3145 uint32_t ticks_current;
3146
3147 ticks_current = cntr_cnt_get();
3148 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3149
3150 if (cntr_start() == 0) {
3151 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3152 /* Stopped counter value will be used as ticks_current
3153 * for calculation to start new tickers.
3154 * FIXME: We do not need to synchronize here, instead
3155 * replace with check to ensure the counter value
3156 * has not since that synchronization when the
3157 * counter with in stopped state.
3158 */
3159 instance->ticks_current = ticks_current;
3160 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3161 }
3162 }
3163
3164 ticker = &instance->nodes[instance->ticker_id_head];
3165 ticks_to_expire = ticker->ticks_to_expire;
3166
3167 /* If ticks_to_expire is zero, then immediately trigger the worker.
3168 */
3169 if (!ticks_to_expire) {
3170 return 1U;
3171 }
3172
3173 /* Iterate few times, if required, to ensure that compare is
3174 * correctly set to a future value. This is required in case
3175 * the operation is pre-empted and current h/w counter runs
3176 * ahead of compare value to be set.
3177 */
3178 i = 10U;
3179 ctr_curr = cntr_cnt_get();
3180 do {
3181 uint32_t ticks_elapsed;
3182 uint32_t ticks_diff;
3183
3184 LL_ASSERT_ERR(i);
3185 i--;
3186
3187 cc = instance->ticks_current;
3188 ticks_diff = ticker_ticks_diff_get(ctr_curr, cc);
3189 if (ticks_diff >= ticks_to_expire) {
3190 return 1U;
3191 }
3192
3193 ticks_elapsed = ticks_diff + HAL_TICKER_CNTR_CMP_OFFSET_MIN +
3194 HAL_TICKER_CNTR_SET_LATENCY;
3195 cc += MAX(ticks_elapsed, ticks_to_expire);
3196 cc &= HAL_TICKER_CNTR_MASK;
3197 instance->trigger_set_cb(cc);
3198
3199 ctr_prev = ctr_curr;
3200 ctr_curr = cntr_cnt_get();
3201 } while ((ticker_ticks_diff_get(ctr_curr, ctr_prev) +
3202 HAL_TICKER_CNTR_CMP_OFFSET_MIN) >
3203 ticker_ticks_diff_get(cc, ctr_prev));
3204
3205 return 0U;
3206 }
3207
3208 /**
3209 * @brief Ticker job
3210 *
3211 * @details Runs the bottom half of the ticker, after ticker nodes have elapsed
3212 * or user operations requested. The ticker_job is responsible for removing and
3213 * re-inserting ticker nodes, based on next elapsing and periodicity of the
3214 * nodes. The ticker_job is also responsible for processing user operations,
3215 * i.e. requests for start, update, stop etc.
3216 * Invoked from the ticker job mayfly context (TICKER_MAYFLY_CALL_ID_JOB).
3217 *
3218 * @param param Pointer to ticker instance
3219 *
3220 * @internal
3221 */
3222 void ticker_job(void *param)
3223 {
3224 struct ticker_instance *instance = param;
3225 uint8_t flag_compare_update;
3226 uint8_t ticker_id_old_head;
3227 uint8_t compare_trigger;
3228 uint32_t ticks_previous;
3229 uint32_t ticks_elapsed;
3230 uint8_t flag_elapsed;
3231 uint8_t insert_head;
3232 uint32_t ticks_now;
3233 uint8_t pending;
3234
3235 DEBUG_TICKER_JOB(1);
3236
3237 /* Defer job, as worker is running */
3238 if (instance->worker_trigger) {
3239 DEBUG_TICKER_JOB(0);
3240 return;
3241 }
3242
3243 /* Defer job, as job is already running */
3244 if (instance->job_guard) {
3245 instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_JOB, 1,
3246 instance);
3247 return;
3248 }
3249 instance->job_guard = 1U;
3250
3251 /* Back up the previous known tick */
3252 ticks_previous = instance->ticks_current;
3253
3254 /* Update current tick with the elapsed value from queue, and dequeue */
3255 if (instance->ticks_elapsed_first != instance->ticks_elapsed_last) {
3256 ticker_next_elapsed(&instance->ticks_elapsed_first);
3257
3258 ticks_elapsed =
3259 instance->ticks_elapsed[instance->ticks_elapsed_first];
3260
3261 instance->ticks_current += ticks_elapsed;
3262 instance->ticks_current &= HAL_TICKER_CNTR_MASK;
3263
3264 flag_elapsed = 1U;
3265 } else {
3266 /* No elapsed value in queue */
3267 flag_elapsed = 0U;
3268 ticks_elapsed = 0U;
3269 }
3270
3271 /* Initialise internal re-insert list */
3272 insert_head = TICKER_NULL;
3273
3274 /* Initialise flag used to update next compare value */
3275 flag_compare_update = 0U;
3276
3277 /* Remember the old head, so as to decide if new compare needs to be
3278 * set.
3279 */
3280 ticker_id_old_head = instance->ticker_id_head;
3281
3282 /* Get current ticks, used in managing updates and expired tickers */
3283 ticks_now = cntr_cnt_get();
3284
3285 #if defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3286 if (ticker_id_old_head == TICKER_NULL) {
3287 /* No tickers active, synchronize to the free running counter so
3288 * that any new ticker started can have its ticks_to_expire
3289 * relative to current free running counter value.
3290 *
3291 * Both current tick (new value) and previous tick (previously
3292 * stored when all tickers stopped) is assigned to ticks_now.
3293 * All new tickers are started from this synchronized value as
3294 * the anchor/reference value.
3295 *
3296 * Note, this if clause is an overhead wherein the check is
3297 * performed for every ticker_job() iteration!
3298 */
3299 instance->ticks_current = ticks_now;
3300 ticks_previous = ticks_now;
3301 }
3302 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3303
3304 /* Manage user operations (updates and deletions) in ticker list */
3305 pending = ticker_job_list_manage(instance, ticks_now, ticks_elapsed,
3306 &insert_head);
3307
3308 /* Detect change in head of the list */
3309 if (instance->ticker_id_head != ticker_id_old_head) {
3310 flag_compare_update = 1U;
3311 }
3312
3313 /* Handle expired tickers */
3314 if (flag_elapsed) {
3315 ticker_job_worker_bh(instance, ticks_now, ticks_previous,
3316 ticks_elapsed, &insert_head);
3317
3318 /* Detect change in head of the list */
3319 if (instance->ticker_id_head != ticker_id_old_head) {
3320 flag_compare_update = 1U;
3321 }
3322
3323 /* Handle insertions */
3324 ticker_job_list_insert(instance, insert_head);
3325
3326 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
3327 !defined(CONFIG_BT_TICKER_LOW_LAT)
3328 /* Re-schedule any pending nodes with slot_window */
3329 if (ticker_job_reschedule_in_window(instance)) {
3330 flag_compare_update = 1U;
3331 }
3332 #endif /* CONFIG_BT_TICKER_EXT */
3333 } else {
3334 /* Handle insertions */
3335 ticker_job_list_insert(instance, insert_head);
3336 }
3337
3338 /* Detect change in head of the list */
3339 if (instance->ticker_id_head != ticker_id_old_head) {
3340 flag_compare_update = 1U;
3341 }
3342
3343 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3344 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
3345 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3346 /* Process any list inquiries */
3347 if (!pending) {
3348 /* Handle inquiries */
3349 ticker_job_list_inquire(instance);
3350 }
3351 #else /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3352 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3353 * !CONFIG_BT_TICKER_PRIORITY_SET
3354 */
3355 ARG_UNUSED(pending);
3356 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3357 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3358 * !CONFIG_BT_TICKER_PRIORITY_SET
3359 */
3360
3361 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3362 if (instance->expire_infos_outdated) {
3363 ticker_job_update_expire_infos(instance);
3364 }
3365 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3366
3367 /* update compare if head changed */
3368 if (flag_compare_update) {
3369 compare_trigger = ticker_job_compare_update(instance,
3370 ticker_id_old_head);
3371 } else {
3372 compare_trigger = 0U;
3373 }
3374
3375 /* Permit worker to run */
3376 instance->job_guard = 0U;
3377
3378 /* trigger worker if deferred */
3379 cpu_dmb();
3380 if (instance->worker_trigger || compare_trigger) {
3381 instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_WORKER, 1,
3382 instance);
3383 }
3384
3385 DEBUG_TICKER_JOB(0);
3386 }
3387
3388 /*****************************************************************************
3389 * Public Interface
3390 ****************************************************************************/
3391
3392 /**
3393 * @brief Initialize ticker instance
3394 *
3395 * @details Called by ticker instance client once to initialize the ticker.
3396 *
3397 * @param instance_index Index of ticker instance
3398 * @param count_node Number of ticker nodes in node array
3399 * @param node Pointer to ticker node array
3400 * @param count_user Number of users in user array
3401 * @param user Pointer to user array of size count_user
3402 * @param count_op Number of user operations in user_op array
3403 * @param user_op Pointer to user operations array of size count_op
3404 * @param caller_id_get_cb Pointer to function for retrieving caller_id from
3405 * user id
3406 * @param sched_cb Pointer to function for scheduling ticker_worker
3407 * and ticker_job
3408 * @param trigger_set_cb Pointer to function for setting the compare trigger
3409 * ticks value
3410 *
3411 * @return TICKER_STATUS_SUCCESS if initialization was successful, otherwise
3412 * TICKER_STATUS_FAILURE
3413 */
3414 uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
3415 uint8_t count_user, void *user, uint8_t count_op, void *user_op,
3416 ticker_caller_id_get_cb_t caller_id_get_cb,
3417 ticker_sched_cb_t sched_cb,
3418 ticker_trigger_set_cb_t trigger_set_cb)
3419 {
3420 struct ticker_instance *instance = &_instance[instance_index];
3421 struct ticker_user_op *user_op_ = (void *)user_op;
3422 struct ticker_user *users;
3423
3424 if (instance_index >= TICKER_INSTANCE_MAX) {
3425 return TICKER_STATUS_FAILURE;
3426 }
3427
3428 instance->count_node = count_node;
3429 instance->nodes = node;
3430
3431 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3432 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3433 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3434 while (count_node--) {
3435 instance->nodes[count_node].priority = 0;
3436 }
3437 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3438 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3439 * CONFIG_BT_TICKER_PRIORITY_SET
3440 */
3441
3442 instance->count_user = count_user;
3443 instance->users = user;
3444
3445 /** @todo check if enough ticker_user_op supplied */
3446
3447 users = &instance->users[0];
3448 while (count_user--) {
3449 users[count_user].user_op = user_op_;
3450 user_op_ += users[count_user].count_user_op;
3451 count_op -= users[count_user].count_user_op;
3452 }
3453
3454 if (count_op) {
3455 return TICKER_STATUS_FAILURE;
3456 }
3457
3458 instance->caller_id_get_cb = caller_id_get_cb;
3459 instance->sched_cb = sched_cb;
3460 instance->trigger_set_cb = trigger_set_cb;
3461
3462 instance->ticker_id_head = TICKER_NULL;
3463 #if defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3464 /* We will synchronize in ticker_job on first ticker start */
3465 instance->ticks_current = 0U;
3466 #else /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3467 /* Synchronize to initialized (in stopped state) counter value */
3468 instance->ticks_current = cntr_cnt_get();
3469 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3470 instance->ticks_elapsed_first = 0U;
3471 instance->ticks_elapsed_last = 0U;
3472
3473 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3474 instance->ticker_id_slot_previous = TICKER_NULL;
3475 instance->ticks_slot_previous = 0U;
3476 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3477
3478 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3479 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
3480 instance->expire_infos[i].ticker_id = TICKER_NULL;
3481 instance->expire_infos[i].last = 1;
3482 }
3483 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3484
3485 return TICKER_STATUS_SUCCESS;
3486 }
3487
3488 /**
3489 * @brief Deinitialize ticker instance
3490 *
3491 * @param instance_index Index of ticker instance
3492 */
3493 int ticker_deinit(uint8_t instance_index)
3494 {
3495 struct ticker_instance *instance;
3496
3497 if (instance_index >= TICKER_INSTANCE_MAX) {
3498 return -EINVAL;
3499 }
3500
3501 instance = &_instance[instance_index];
3502
3503 if (instance->ticker_id_head != TICKER_NULL) {
3504 return -EBUSY;
3505 }
3506
3507 instance->count_node = 0U;
3508
3509 return 0;
3510 }
3511
3512 /**
3513 * @brief Check if ticker instance is initialized
3514 *
3515 * @param instance_index Index of ticker instance
3516 *
3517 * @return true if ticker instance is initialized, false otherwise
3518 */
3519 bool ticker_is_initialized(uint8_t instance_index)
3520 {
3521 return !!(_instance[instance_index].count_node);
3522 }
3523
3524 /**
3525 * @brief Trigger the ticker worker
3526 *
3527 * @details Schedules the ticker_worker upper half by invoking the
3528 * corresponding mayfly.
3529 *
3530 * @param instance_index Index of ticker instance
3531 */
3532 void ticker_trigger(uint8_t instance_index)
3533 {
3534 struct ticker_instance *instance;
3535
3536 DEBUG_TICKER_ISR(1);
3537
3538 instance = &_instance[instance_index];
3539 if (instance->sched_cb) {
3540 instance->sched_cb(TICKER_CALL_ID_TRIGGER,
3541 TICKER_CALL_ID_WORKER, 1, instance);
3542 }
3543
3544 DEBUG_TICKER_ISR(0);
3545 }
3546
3547 /**
3548 * @brief Start a ticker node
3549 *
3550 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_START and
3551 * schedules the ticker_job.
3552 *
3553 * @param instance_index Index of ticker instance
3554 * @param user_id Ticker user id. Used for indexing user operations
3555 * and mapping to mayfly caller id
3556 * @param ticker_id Id of ticker node
3557 * @param ticks_anchor Absolute tick count as anchor point for
3558 * ticks_first
3559 * @param ticks_first Initial number of ticks before first timeout
3560 * @param ticks_periodic Number of ticks for a periodic ticker node. If 0,
3561 * ticker node is treated as one-shot
3562 * @param remainder_periodic Periodic ticks fraction
3563 * @param lazy Number of periods to skip (latency). A value of 1
3564 * causes skipping every other timeout
3565 * @param ticks_slot Slot reservation ticks for node (air-time)
3566 * @param ticks_slot_window Window in which the slot reservation may be
3567 * re-scheduled to avoid collision. Set to 0 for
3568 * legacy behavior
3569 * @param fp_timeout_func Function pointer of function to call at timeout
3570 * @param context Context passed in timeout call
3571 * @param fp_op_func Function pointer of user operation completion
3572 * function
3573 * @param op_context Context passed in operation completion call
3574 *
3575 * @return TICKER_STATUS_BUSY if start was successful but not yet completed.
3576 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3577 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to
3578 * run before exiting ticker_start
3579 */
3580 #if defined(CONFIG_BT_TICKER_EXT)
3581 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3582 uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3583 uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3584 ticker_timeout_func fp_timeout_func, void *context,
3585 ticker_op_func fp_op_func, void *op_context)
3586 {
3587 return ticker_start_ext(instance_index, user_id, ticker_id,
3588 ticks_anchor, ticks_first, ticks_periodic,
3589 remainder_periodic, lazy, ticks_slot,
3590 fp_timeout_func, context,
3591 fp_op_func, op_context,
3592 NULL);
3593 }
3594
3595 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3596 uint8_t ticker_id, uint32_t ticks_anchor,
3597 uint32_t ticks_first, uint32_t remainder_first,
3598 uint32_t ticks_periodic, uint32_t remainder_periodic,
3599 uint16_t lazy, uint32_t ticks_slot,
3600 ticker_timeout_func fp_timeout_func, void *context,
3601 ticker_op_func fp_op_func, void *op_context,
3602 struct ticker_ext *ext_data);
3603
3604 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3605 uint8_t ticker_id, uint32_t ticks_anchor,
3606 uint32_t ticks_first, uint32_t remainder_first,
3607 uint32_t ticks_periodic, uint32_t remainder_periodic,
3608 uint16_t lazy, uint32_t ticks_slot,
3609 ticker_timeout_func fp_timeout_func, void *context,
3610 ticker_op_func fp_op_func, void *op_context)
3611 {
3612 return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3613 ticks_first, remainder_first,
3614 ticks_periodic, remainder_periodic,
3615 lazy, ticks_slot,
3616 fp_timeout_func, context,
3617 fp_op_func, op_context,
3618 NULL);
3619 }
3620
3621 uint8_t ticker_start_ext(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3622 uint32_t ticks_anchor, uint32_t ticks_first,
3623 uint32_t ticks_periodic, uint32_t remainder_periodic,
3624 uint16_t lazy, uint32_t ticks_slot,
3625 ticker_timeout_func fp_timeout_func, void *context,
3626 ticker_op_func fp_op_func, void *op_context,
3627 struct ticker_ext *ext_data)
3628 {
3629 return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3630 ticks_first, 0U, ticks_periodic, remainder_periodic,
3631 lazy, ticks_slot,
3632 fp_timeout_func, context,
3633 fp_op_func, op_context,
3634 ext_data);
3635 }
3636
3637 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3638 uint8_t ticker_id, uint32_t ticks_anchor,
3639 uint32_t ticks_first, uint32_t remainder_first,
3640 uint32_t ticks_periodic, uint32_t remainder_periodic,
3641 uint16_t lazy, uint32_t ticks_slot,
3642 ticker_timeout_func fp_timeout_func, void *context,
3643 ticker_op_func fp_op_func, void *op_context,
3644 struct ticker_ext *ext_data)
3645
3646 #else /* !CONFIG_BT_TICKER_EXT */
3647 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3648 uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3649 uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3650 ticker_timeout_func fp_timeout_func, void *context,
3651 ticker_op_func fp_op_func, void *op_context)
3652 {
3653 return ticker_start_us(instance_index, user_id,
3654 ticker_id, ticks_anchor,
3655 ticks_first, 0U,
3656 ticks_periodic, remainder_periodic,
3657 lazy, ticks_slot,
3658 fp_timeout_func, context,
3659 fp_op_func, op_context);
3660 }
3661
3662 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3663 uint8_t ticker_id, uint32_t ticks_anchor,
3664 uint32_t ticks_first, uint32_t remainder_first,
3665 uint32_t ticks_periodic, uint32_t remainder_periodic,
3666 uint16_t lazy, uint32_t ticks_slot,
3667 ticker_timeout_func fp_timeout_func, void *context,
3668 ticker_op_func fp_op_func, void *op_context)
3669 #endif /* !CONFIG_BT_TICKER_EXT */
3670
3671 {
3672 struct ticker_instance *instance = &_instance[instance_index];
3673 struct ticker_user_op *user_op;
3674 struct ticker_user *user;
3675 uint8_t last;
3676
3677 user = &instance->users[user_id];
3678
3679 last = user->last + 1;
3680 if (last >= user->count_user_op) {
3681 last = 0U;
3682 }
3683
3684 if (last == user->first) {
3685 return TICKER_STATUS_FAILURE;
3686 }
3687
3688 user_op = &user->user_op[user->last];
3689 user_op->op = TICKER_USER_OP_TYPE_START;
3690 user_op->id = ticker_id;
3691 user_op->params.start.ticks_at_start = ticks_anchor;
3692 user_op->params.start.ticks_first = ticks_first;
3693 user_op->params.start.ticks_periodic = ticks_periodic;
3694
3695 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
3696 user_op->params.start.remainder_periodic = remainder_periodic;
3697
3698 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
3699 user_op->params.start.remainder_first = remainder_first;
3700 #else /* !CONFIG_BT_TICKER_START_REMAINDER */
3701 ARG_UNUSED(remainder_first);
3702 #endif /* !CONFIG_BT_TICKER_START_REMAINDER */
3703 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
3704
3705 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3706 user_op->params.start.ticks_slot = ticks_slot;
3707 #endif
3708 user_op->params.start.lazy = lazy;
3709 #if defined(CONFIG_BT_TICKER_EXT)
3710 user_op->params.start.ext_data = ext_data;
3711 #endif
3712 user_op->params.start.fp_timeout_func = fp_timeout_func;
3713 user_op->params.start.context = context;
3714 user_op->status = TICKER_STATUS_BUSY;
3715 user_op->fp_op_func = fp_op_func;
3716 user_op->op_context = op_context;
3717
3718 /* Make sure transaction is completed before committing */
3719 cpu_dmb();
3720 user->last = last;
3721
3722 instance->sched_cb(instance->caller_id_get_cb(user_id),
3723 TICKER_CALL_ID_JOB, 0, instance);
3724
3725 return user_op->status;
3726 }
3727
3728 #if defined(CONFIG_BT_TICKER_UPDATE)
3729 /**
3730 * @brief Update a ticker node
3731 *
3732 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_UPDATE and
3733 * schedules the ticker_job.
3734 *
3735 * @param instance_index Index of ticker instance
3736 * @param user_id Ticker user id. Used for indexing user operations
3737 * and mapping to mayfly caller id
3738 * @param ticker_id Id of ticker node
3739 * @param ticks_drift_plus Number of ticks to add for drift compensation
3740 * @param ticks_drift_minus Number of ticks to subtract for drift compensation
3741 * @param ticks_slot_plus Number of ticks to add to slot reservation
3742 * @param ticks_slot_minus Number of ticks to add subtract from slot
3743 * reservation
3744 * @param lazy Number of periods to skip (latency). A value of 0
3745 * means no action. 1 means no latency (normal). A
3746 * value >1 means latency = lazy - 1
3747 * @param force Force update to take effect immediately. With
3748 * force = 0, update is scheduled to take effect as
3749 * soon as possible
3750 * @param fp_op_func Function pointer of user operation completion
3751 * function
3752 * @param op_context Context passed in operation completion call
3753 * @param must_expire Disable, enable or ignore the must-expire state.
3754 * A value of 0 means no change, 1 means disable and
3755 * 2 means enable.
3756 *
3757 * @return TICKER_STATUS_BUSY if update was successful but not yet completed.
3758 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3759 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3760 * before exiting ticker_update
3761 */
3762 uint8_t ticker_update(uint8_t instance_index, uint8_t user_id,
3763 uint8_t ticker_id, uint32_t ticks_drift_plus,
3764 uint32_t ticks_drift_minus, uint32_t ticks_slot_plus,
3765 uint32_t ticks_slot_minus, uint16_t lazy, uint8_t force,
3766 ticker_op_func fp_op_func, void *op_context)
3767 #if defined(CONFIG_BT_TICKER_EXT)
3768 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3769 {
3770 return ticker_update_ext(instance_index, user_id, ticker_id,
3771 ticks_drift_plus, ticks_drift_minus,
3772 ticks_slot_plus, ticks_slot_minus, lazy,
3773 force, fp_op_func, op_context, 0U, ticker_id);
3774 }
3775
3776 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3777 uint8_t ticker_id, uint32_t ticks_drift_plus,
3778 uint32_t ticks_drift_minus,
3779 uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3780 uint16_t lazy, uint8_t force,
3781 ticker_op_func fp_op_func, void *op_context,
3782 uint8_t must_expire, uint8_t expire_info_id)
3783 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3784 {
3785 return ticker_update_ext(instance_index, user_id, ticker_id,
3786 ticks_drift_plus, ticks_drift_minus,
3787 ticks_slot_plus, ticks_slot_minus, lazy,
3788 force, fp_op_func, op_context, 0U);
3789 }
3790
3791 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3792 uint8_t ticker_id, uint32_t ticks_drift_plus,
3793 uint32_t ticks_drift_minus,
3794 uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3795 uint16_t lazy, uint8_t force,
3796 ticker_op_func fp_op_func, void *op_context,
3797 uint8_t must_expire)
3798 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3799 #endif /* CONFIG_BT_TICKER_EXT */
3800 {
3801 struct ticker_instance *instance = &_instance[instance_index];
3802 struct ticker_user_op *user_op;
3803 struct ticker_user *user;
3804 uint8_t last;
3805
3806 user = &instance->users[user_id];
3807
3808 last = user->last + 1;
3809 if (last >= user->count_user_op) {
3810 last = 0U;
3811 }
3812
3813 if (last == user->first) {
3814 return TICKER_STATUS_FAILURE;
3815 }
3816
3817 user_op = &user->user_op[user->last];
3818 user_op->op = TICKER_USER_OP_TYPE_UPDATE;
3819 user_op->id = ticker_id;
3820 user_op->params.update.ticks_drift_plus = ticks_drift_plus;
3821 user_op->params.update.ticks_drift_minus = ticks_drift_minus;
3822 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3823 user_op->params.update.ticks_slot_plus = ticks_slot_plus;
3824 user_op->params.update.ticks_slot_minus = ticks_slot_minus;
3825 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3826 user_op->params.update.lazy = lazy;
3827 user_op->params.update.force = force;
3828 #if defined(CONFIG_BT_TICKER_EXT)
3829 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && !defined(CONFIG_BT_TICKER_LOW_LAT)
3830 user_op->params.update.must_expire = must_expire;
3831 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC && !CONFIG_BT_TICKER_LOW_LAT */
3832 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3833 user_op->params.update.expire_info_id = expire_info_id;
3834 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3835 #endif /* CONFIG_BT_TICKER_EXT */
3836 user_op->status = TICKER_STATUS_BUSY;
3837 user_op->fp_op_func = fp_op_func;
3838 user_op->op_context = op_context;
3839
3840 /* Make sure transaction is completed before committing */
3841 cpu_dmb();
3842 user->last = last;
3843
3844 instance->sched_cb(instance->caller_id_get_cb(user_id),
3845 TICKER_CALL_ID_JOB, 0, instance);
3846
3847 return user_op->status;
3848 }
3849 #endif /* CONFIG_BT_TICKER_UPDATE */
3850
3851 /**
3852 * @brief Yield a ticker node with supplied absolute ticks reference
3853 *
3854 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_YIELD_ABS
3855 * and schedules the ticker_job.
3856 *
3857 * @param instance_index Index of ticker instance
3858 * @param user_id Ticker user id. Used for indexing user operations
3859 * and mapping to mayfly caller id
3860 * @param ticks_at_yield Absolute tick count at ticker yield request
3861 * @param fp_op_func Function pointer of user operation completion
3862 * function
3863 * @param op_context Context passed in operation completion call
3864 *
3865 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3866 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3867 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3868 * before exiting ticker_stop
3869 */
3870 uint8_t ticker_yield_abs(uint8_t instance_index, uint8_t user_id,
3871 uint8_t ticker_id, uint32_t ticks_at_yield,
3872 ticker_op_func fp_op_func, void *op_context)
3873 {
3874 struct ticker_instance *instance = &_instance[instance_index];
3875 struct ticker_user_op *user_op;
3876 struct ticker_user *user;
3877 uint8_t last;
3878
3879 user = &instance->users[user_id];
3880
3881 last = user->last + 1;
3882 if (last >= user->count_user_op) {
3883 last = 0U;
3884 }
3885
3886 if (last == user->first) {
3887 return TICKER_STATUS_FAILURE;
3888 }
3889
3890 user_op = &user->user_op[user->last];
3891 user_op->op = TICKER_USER_OP_TYPE_YIELD_ABS;
3892 user_op->id = ticker_id;
3893 user_op->params.yield.ticks_at_yield = ticks_at_yield;
3894 user_op->status = TICKER_STATUS_BUSY;
3895 user_op->fp_op_func = fp_op_func;
3896 user_op->op_context = op_context;
3897
3898 /* Make sure transaction is completed before committing */
3899 cpu_dmb();
3900 user->last = last;
3901
3902 instance->sched_cb(instance->caller_id_get_cb(user_id),
3903 TICKER_CALL_ID_JOB, 0, instance);
3904
3905 return user_op->status;
3906 }
3907
3908 /**
3909 * @brief Stop a ticker node
3910 *
3911 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP and
3912 * schedules the ticker_job.
3913 *
3914 * @param instance_index Index of ticker instance
3915 * @param user_id Ticker user id. Used for indexing user operations
3916 * and mapping to mayfly caller id
3917 * @param fp_op_func Function pointer of user operation completion
3918 * function
3919 * @param op_context Context passed in operation completion call
3920 *
3921 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3922 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3923 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3924 * before exiting ticker_stop
3925 */
3926 uint8_t ticker_stop(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3927 ticker_op_func fp_op_func, void *op_context)
3928 {
3929 struct ticker_instance *instance = &_instance[instance_index];
3930 struct ticker_user_op *user_op;
3931 struct ticker_user *user;
3932 uint8_t last;
3933
3934 user = &instance->users[user_id];
3935
3936 last = user->last + 1;
3937 if (last >= user->count_user_op) {
3938 last = 0U;
3939 }
3940
3941 if (last == user->first) {
3942 return TICKER_STATUS_FAILURE;
3943 }
3944
3945 user_op = &user->user_op[user->last];
3946 user_op->op = TICKER_USER_OP_TYPE_STOP;
3947 user_op->id = ticker_id;
3948 user_op->status = TICKER_STATUS_BUSY;
3949 user_op->fp_op_func = fp_op_func;
3950 user_op->op_context = op_context;
3951
3952 /* Make sure transaction is completed before committing */
3953 cpu_dmb();
3954 user->last = last;
3955
3956 instance->sched_cb(instance->caller_id_get_cb(user_id),
3957 TICKER_CALL_ID_JOB, 0, instance);
3958
3959 return user_op->status;
3960 }
3961
3962 /**
3963 * @brief Stop a ticker node with supplied absolute ticks reference
3964 *
3965 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP_ABS
3966 * and schedules the ticker_job.
3967 *
3968 * @param instance_index Index of ticker instance
3969 * @param user_id Ticker user id. Used for indexing user operations
3970 * and mapping to mayfly caller id
3971 * @param ticks_at_stop Absolute tick count at ticker stop request
3972 * @param fp_op_func Function pointer of user operation completion
3973 * function
3974 * @param op_context Context passed in operation completion call
3975 *
3976 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3977 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3978 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3979 * before exiting ticker_stop
3980 */
3981 uint8_t ticker_stop_abs(uint8_t instance_index, uint8_t user_id,
3982 uint8_t ticker_id, uint32_t ticks_at_stop,
3983 ticker_op_func fp_op_func, void *op_context)
3984 {
3985 struct ticker_instance *instance = &_instance[instance_index];
3986 struct ticker_user_op *user_op;
3987 struct ticker_user *user;
3988 uint8_t last;
3989
3990 user = &instance->users[user_id];
3991
3992 last = user->last + 1;
3993 if (last >= user->count_user_op) {
3994 last = 0U;
3995 }
3996
3997 if (last == user->first) {
3998 return TICKER_STATUS_FAILURE;
3999 }
4000
4001 user_op = &user->user_op[user->last];
4002 user_op->op = TICKER_USER_OP_TYPE_STOP_ABS;
4003 user_op->id = ticker_id;
4004 user_op->params.yield.ticks_at_yield = ticks_at_stop;
4005 user_op->status = TICKER_STATUS_BUSY;
4006 user_op->fp_op_func = fp_op_func;
4007 user_op->op_context = op_context;
4008
4009 /* Make sure transaction is completed before committing */
4010 cpu_dmb();
4011 user->last = last;
4012
4013 instance->sched_cb(instance->caller_id_get_cb(user_id),
4014 TICKER_CALL_ID_JOB, 0, instance);
4015
4016 return user_op->status;
4017 }
4018
4019 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
4020 /**
4021 * @brief Get next ticker node slot
4022 *
4023 * @details Gets the next ticker which has slot ticks specified and
4024 * return the ticker id and accumulated ticks until expiration. If no
4025 * ticker nodes have slot ticks, the next ticker node is returned.
4026 * If no head id is provided (TICKER_NULL) the first node is returned.
4027 *
4028 * @param instance_index Index of ticker instance
4029 * @param user_id Ticker user id. Used for indexing user operations
4030 * and mapping to mayfly caller id
4031 * @param ticker_id Pointer to id of ticker node
4032 * @param ticks_current Pointer to current ticks count
4033 * @param ticks_to_expire Pointer to ticks to expire
4034 * @param fp_op_func Function pointer of user operation completion
4035 * function
4036 * @param op_context Context passed in operation completion call
4037 *
4038 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4039 * TICKER_STATUS_FAILURE is returned if there are no more user operations
4040 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4041 * before exiting ticker_next_slot_get
4042 */
4043 uint8_t ticker_next_slot_get(uint8_t instance_index, uint8_t user_id,
4044 uint8_t *ticker_id, uint32_t *ticks_current,
4045 uint32_t *ticks_to_expire,
4046 ticker_op_func fp_op_func, void *op_context)
4047 {
4048 #if defined(CONFIG_BT_TICKER_LAZY_GET) || \
4049 defined(CONFIG_BT_TICKER_REMAINDER_GET) || \
4050 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
4051 return ticker_next_slot_get_ext(instance_index, user_id, ticker_id,
4052 ticks_current, ticks_to_expire, NULL,
4053 NULL, NULL, NULL, fp_op_func,
4054 op_context);
4055 }
4056
4057 uint8_t ticker_next_slot_get_ext(uint8_t instance_index, uint8_t user_id,
4058 uint8_t *ticker_id, uint32_t *ticks_current,
4059 uint32_t *ticks_to_expire,
4060 uint32_t *remainder, uint16_t *lazy,
4061 ticker_op_match_func fp_match_op_func,
4062 void *match_op_context,
4063 ticker_op_func fp_op_func, void *op_context)
4064 {
4065 #endif /* CONFIG_BT_TICKER_LAZY_GET ||
4066 * CONFIG_BT_TICKER_REMAINDER_GET ||
4067 * CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH
4068 */
4069 struct ticker_instance *instance = &_instance[instance_index];
4070 struct ticker_user_op *user_op;
4071 struct ticker_user *user;
4072 uint8_t last;
4073
4074 user = &instance->users[user_id];
4075
4076 last = user->last + 1;
4077 if (last >= user->count_user_op) {
4078 last = 0U;
4079 }
4080
4081 if (last == user->first) {
4082 return TICKER_STATUS_FAILURE;
4083 }
4084
4085 user_op = &user->user_op[user->last];
4086 user_op->op = TICKER_USER_OP_TYPE_SLOT_GET;
4087 user_op->id = TICKER_NULL;
4088 user_op->params.slot_get.ticker_id = ticker_id;
4089 user_op->params.slot_get.ticks_current = ticks_current;
4090 user_op->params.slot_get.ticks_to_expire = ticks_to_expire;
4091 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
4092 user_op->params.slot_get.remainder = remainder;
4093 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
4094 #if defined(CONFIG_BT_TICKER_LAZY_GET)
4095 user_op->params.slot_get.lazy = lazy;
4096 #endif /* CONFIG_BT_TICKER_LAZY_GET */
4097 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
4098 user_op->params.slot_get.fp_match_op_func = fp_match_op_func;
4099 user_op->params.slot_get.match_op_context = match_op_context;
4100 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
4101 user_op->status = TICKER_STATUS_BUSY;
4102 user_op->fp_op_func = fp_op_func;
4103 user_op->op_context = op_context;
4104
4105 /* Make sure transaction is completed before committing */
4106 cpu_dmb();
4107 user->last = last;
4108
4109 instance->sched_cb(instance->caller_id_get_cb(user_id),
4110 TICKER_CALL_ID_JOB, 0, instance);
4111
4112 return user_op->status;
4113 }
4114 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
4115
4116 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET)
4117 /**
4118 * @brief Get a callback at the end of ticker job execution
4119 *
4120 * @details Operation completion callback is called at the end of the
4121 * ticker_job execution. The user operation is immutable.
4122 *
4123 * @param instance_index Index of ticker instance
4124 * @param user_id Ticker user id. Used for indexing user operations
4125 * and mapping to mayfly caller id
4126 * @param fp_op_func Function pointer of user operation completion
4127 * function
4128 * @param op_context Context passed in operation completion call
4129 *
4130 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4131 * TICKER_STATUS_FAILURE is returned if there are no more user operations
4132 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4133 * before exiting ticker_job_idle_get
4134 */
4135 uint8_t ticker_job_idle_get(uint8_t instance_index, uint8_t user_id,
4136 ticker_op_func fp_op_func, void *op_context)
4137 {
4138 struct ticker_instance *instance = &_instance[instance_index];
4139 struct ticker_user_op *user_op;
4140 struct ticker_user *user;
4141 uint8_t last;
4142
4143 user = &instance->users[user_id];
4144
4145 last = user->last + 1;
4146 if (last >= user->count_user_op) {
4147 last = 0U;
4148 }
4149
4150 if (last == user->first) {
4151 return TICKER_STATUS_FAILURE;
4152 }
4153
4154 user_op = &user->user_op[user->last];
4155 user_op->op = TICKER_USER_OP_TYPE_IDLE_GET;
4156 user_op->id = TICKER_NULL;
4157 user_op->status = TICKER_STATUS_BUSY;
4158 user_op->fp_op_func = fp_op_func;
4159 user_op->op_context = op_context;
4160
4161 /* Make sure transaction is completed before committing */
4162 cpu_dmb();
4163 user->last = last;
4164
4165 instance->sched_cb(instance->caller_id_get_cb(user_id),
4166 TICKER_CALL_ID_JOB, 0, instance);
4167
4168 return user_op->status;
4169 }
4170 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET */
4171
4172 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
4173 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
4174 defined(CONFIG_BT_TICKER_PRIORITY_SET)
4175 /**
4176 * @brief Set ticker node priority
4177 *
4178 * @param instance_index Index of ticker instance
4179 * @param user_id Ticker user id. Used for indexing user operations
4180 * and mapping to mayfly caller id
4181 * @param ticker_id Id of ticker node to set priority on
4182 * @param priority Priority to set. Range [-128..127], default is 0.
4183 * Lover value equals higher priority. Setting
4184 * priority to -128 (TICKER_PRIORITY_CRITICAL) makes
4185 * the node win all collision challenges. Only one
4186 * node can have this priority assigned.
4187 * @param fp_op_func Function pointer of user operation completion
4188 * function
4189 * @param op_context Context passed in operation completion call
4190 *
4191 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4192 * TICKER_STATUS_FAILURE is returned if there are no more user operations
4193 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4194 * before exiting ticker_priority_set
4195 */
4196 uint8_t ticker_priority_set(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
4197 int8_t priority, ticker_op_func fp_op_func,
4198 void *op_context)
4199 {
4200 struct ticker_instance *instance = &_instance[instance_index];
4201 struct ticker_user_op *user_op;
4202 struct ticker_user *user;
4203 uint8_t last;
4204
4205 user = &instance->users[user_id];
4206
4207 last = user->last + 1;
4208 if (last >= user->count_user_op) {
4209 last = 0U;
4210 }
4211
4212 if (last == user->first) {
4213 return TICKER_STATUS_FAILURE;
4214 }
4215
4216 user_op = &user->user_op[user->last];
4217 user_op->op = TICKER_USER_OP_TYPE_PRIORITY_SET;
4218 user_op->id = ticker_id;
4219 user_op->params.priority_set.priority = priority;
4220 user_op->status = TICKER_STATUS_BUSY;
4221 user_op->fp_op_func = fp_op_func;
4222 user_op->op_context = op_context;
4223
4224 /* Make sure transaction is completed before committing */
4225 cpu_dmb();
4226 user->last = last;
4227
4228 instance->sched_cb(instance->caller_id_get_cb(user_id),
4229 TICKER_CALL_ID_JOB, 0, instance);
4230
4231 return user_op->status;
4232 }
4233 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
4234 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC &&
4235 * CONFIG_BT_TICKER_PRIORITY_SET
4236 */
4237
4238 /**
4239 * @brief Schedule ticker job
4240 *
4241 * @param instance_index Index of ticker instance
4242 * @param user_id Ticker user id. Maps to mayfly caller id
4243 */
4244 void ticker_job_sched(uint8_t instance_index, uint8_t user_id)
4245 {
4246 struct ticker_instance *instance = &_instance[instance_index];
4247
4248 instance->sched_cb(instance->caller_id_get_cb(user_id),
4249 TICKER_CALL_ID_JOB, 0, instance);
4250 }
4251
4252 /**
4253 * @brief Get current absolute tick count
4254 *
4255 * @return Absolute tick count
4256 */
4257 uint32_t ticker_ticks_now_get(void)
4258 {
4259 return cntr_cnt_get();
4260 }
4261
4262 /**
4263 * @brief Get difference between two tick counts
4264 *
4265 * @details Subtract two counts and truncate to correct HW dependent counter
4266 * bit width
4267 *
4268 * @param ticks_now Highest tick count (now)
4269 * @param ticks_old Tick count to subtract from ticks_now
4270 */
4271 uint32_t ticker_ticks_diff_get(uint32_t ticks_now, uint32_t ticks_old)
4272 {
4273 return ((ticks_now - ticks_old) & HAL_TICKER_CNTR_MASK);
4274 }
4275