1 /*
2 * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3 * Copyright (c) 2016 Vinayak Kariappa Chettimada
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <stdbool.h>
9 #include <zephyr/types.h>
10 #include <soc.h>
11
12 #include "hal/cntr.h"
13 #include "hal/ticker.h"
14 #include "hal/cpu.h"
15
16 #include "ticker.h"
17
18 #include "hal/debug.h"
19
20 /*****************************************************************************
21 * Defines
22 ****************************************************************************/
23 #define DOUBLE_BUFFER_SIZE 2
24
25 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
26 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SET)
27 #define BT_CTLR_ADV_AUX_SET 0
28 #else
29 #define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET
30 #endif
31 #if !defined(CONFIG_BT_CTLR_ADV_SYNC_SET)
32 #define BT_CTLR_ADV_SYNC_SET 0
33 #else
34 #define BT_CTLR_ADV_SYNC_SET CONFIG_BT_CTLR_ADV_SYNC_SET
35 #endif
36 #if defined(CONFIG_BT_CTLR_ADV_ISO)
37 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET*2)
38 #else
39 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET)
40 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
41 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
42
43 /*****************************************************************************
44 * Types
45 ****************************************************************************/
46
47 struct ticker_node {
48 uint8_t next; /* Next ticker node */
49
50 uint8_t req; /* Request counter */
51 uint8_t ack; /* Acknowledge counter. Imbalance
52 * between req and ack indicates
53 * ongoing operation
54 */
55 uint8_t force:1; /* If non-zero, node timeout should
56 * be forced at next expiration
57 */
58 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
59 uint8_t start_pending:1; /* If non-zero, start is pending for
60 * bottom half of ticker_job.
61 */
62 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
63 uint32_t ticks_periodic; /* If non-zero, interval
64 * between expirations
65 */
66 uint32_t ticks_to_expire; /* Ticks until expiration */
67 ticker_timeout_func timeout_func; /* User timeout function */
68 void *context; /* Context delivered to timeout
69 * function
70 */
71 uint32_t ticks_to_expire_minus; /* Negative drift correction */
72 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
73 uint32_t ticks_slot; /* Air-time reservation for node */
74 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
75 uint16_t lazy_periodic; /* Number of timeouts to allow
76 * skipping
77 */
78 uint16_t lazy_current; /* Current number of timeouts
79 * skipped = peripheral latency
80 */
81 union {
82 uint32_t remainder_periodic;/* Sub-microsecond tick remainder
83 * for each period
84 */
85 ticker_op_func fp_op_func; /* Operation completion callback */
86 };
87
88 union {
89 uint32_t remainder_current; /* Current sub-microsecond tick
90 * remainder
91 */
92 void *op_context; /* Context passed in completion
93 * callback
94 */
95 };
96
97 #if defined(CONFIG_BT_TICKER_EXT)
98 struct ticker_ext *ext_data; /* Ticker extension data */
99 #endif /* CONFIG_BT_TICKER_EXT */
100 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
101 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
102 uint8_t must_expire; /* Node must expire, even if it
103 * collides with other nodes
104 */
105 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
106 int8_t priority; /* Ticker node priority. 0 is
107 * default. Lower value is higher
108 * priority
109 */
110 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
111 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
112 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
113 */
114 };
115
116 struct ticker_expire_info_internal {
117 uint32_t ticks_to_expire;
118 uint32_t remainder;
119 uint16_t lazy;
120 uint8_t ticker_id;
121 uint8_t outdated:1;
122 uint8_t found:1;
123 uint8_t last:1;
124 };
125
126 /* Operations to be performed in ticker_job.
127 * Possible values for field "op" in struct ticker_user_op
128 */
129 #define TICKER_USER_OP_TYPE_NONE 0
130 #define TICKER_USER_OP_TYPE_IDLE_GET 1
131 #define TICKER_USER_OP_TYPE_SLOT_GET 2
132 #define TICKER_USER_OP_TYPE_PRIORITY_SET 3
133 #define TICKER_USER_OP_TYPE_START 4
134 #define TICKER_USER_OP_TYPE_UPDATE 5
135 #define TICKER_USER_OP_TYPE_YIELD_ABS 6
136 #define TICKER_USER_OP_TYPE_STOP 7
137 #define TICKER_USER_OP_TYPE_STOP_ABS 8
138
139 /* Slot window re-schedule states */
140 #define TICKER_RESCHEDULE_STATE_NONE 0
141 #define TICKER_RESCHEDULE_STATE_PENDING 1
142 #define TICKER_RESCHEDULE_STATE_DONE 2
143
144 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
145 #define TICKER_HAS_SLOT_WINDOW(_ticker) \
146 ((_ticker)->ext_data && ((_ticker)->ext_data->ticks_slot_window != 0U))
147 #define TICKER_RESCHEDULE_PENDING(_ticker) \
148 (_ticker->ext_data && (_ticker->ext_data->reschedule_state == \
149 TICKER_RESCHEDULE_STATE_PENDING))
150 #else
151 #define TICKER_HAS_SLOT_WINDOW(_ticker) 0
152 #define TICKER_RESCHEDULE_PENDING(_ticker) 0
153 #endif
154
155 /* User operation data structure for start opcode. Used for passing start
156 * requests to ticker_job
157 */
158 struct ticker_user_op_start {
159 uint32_t ticks_at_start; /* Anchor ticks (absolute) */
160 uint32_t ticks_first; /* Initial timeout ticks */
161 uint32_t ticks_periodic; /* Ticker period ticks */
162 uint32_t remainder_periodic; /* Sub-microsecond tick remainder */
163 uint16_t lazy; /* Periodic latency in number of
164 * periods
165 */
166 #if defined(CONFIG_BT_TICKER_REMAINDER)
167 uint32_t remainder_first; /* Sub-microsecond tick remainder */
168 #endif /* CONFIG_BT_TICKER_REMAINDER */
169
170 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
171 uint32_t ticks_slot; /* Air-time reservation ticks */
172 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
173
174 ticker_timeout_func fp_timeout_func; /* Timeout callback function */
175 void *context; /* Context passed in timeout callback */
176
177 #if defined(CONFIG_BT_TICKER_EXT)
178 struct ticker_ext *ext_data; /* Ticker extension data instance */
179 #endif /* CONFIG_BT_TICKER_EXT */
180 };
181
182 /* User operation data structure for update opcode. Used for passing update
183 * requests to ticker_job
184 */
185 struct ticker_user_op_update {
186 uint32_t ticks_drift_plus; /* Requested positive drift in ticks */
187 uint32_t ticks_drift_minus; /* Requested negative drift in ticks */
188 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
189 uint32_t ticks_slot_plus; /* Number of ticks to add to slot
190 * reservation (air-time)
191 */
192 uint32_t ticks_slot_minus; /* Number of ticks to subtract from
193 * slot reservation (air-time)
194 */
195 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
196 uint16_t lazy; /* Peripheral latency:
197 * 0: Do nothing
198 * 1: latency = 0
199 * >1: latency = lazy - 1
200 */
201 uint8_t force; /* Force update */
202 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
203 !defined(CONFIG_BT_TICKER_LOW_LAT)
204 uint8_t must_expire; /* Node must expire, even if it
205 * collides with other nodes:
206 * 0x00: Do nothing
207 * 0x01: Disable must_expire
208 * 0x02: Enable must_expire
209 */
210 #endif
211 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
212 uint8_t expire_info_id;
213 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
214 };
215
216 /* User operation data structure for yield/stop opcode. Used for passing yield/
217 * stop requests with absolute tick to ticker_job
218 */
219 struct ticker_user_op_yield {
220 uint32_t ticks_at_yield; /* Anchor ticks (absolute) */
221 };
222
223 /* User operation data structure for slot_get opcode. Used for passing request
224 * to get next ticker with slot ticks via ticker_job
225 */
226 struct ticker_user_op_slot_get {
227 uint8_t *ticker_id;
228 uint32_t *ticks_current;
229 uint32_t *ticks_to_expire;
230 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
231 uint32_t *remainder;
232 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
233 #if defined(CONFIG_BT_TICKER_LAZY_GET)
234 uint16_t *lazy;
235 #endif /* CONFIG_BT_TICKER_LAZY_GET */
236 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
237 ticker_op_match_func fp_match_op_func;
238 void *match_op_context;
239 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
240 };
241
242 /* User operation data structure for priority_set opcode. Used for passing
243 * request to set ticker node priority via ticker_job
244 */
245 struct ticker_user_op_priority_set {
246 int8_t priority; /* Node priority. Defaults to 0 */
247 };
248
249 /* User operation top level data structure. Used for passing requests to
250 * ticker_job
251 */
252 struct ticker_user_op {
253 uint8_t op; /* User operation */
254 uint8_t id; /* Ticker node id */
255 uint8_t status; /* Operation result */
256 union {
257 struct ticker_user_op_start start;
258 struct ticker_user_op_update update;
259 struct ticker_user_op_yield yield;
260 struct ticker_user_op_slot_get slot_get;
261 struct ticker_user_op_priority_set priority_set;
262 } params; /* User operation parameters */
263 ticker_op_func fp_op_func; /* Operation completion callback */
264 void *op_context; /* Context passed in completion callback */
265 };
266
267 /* User data structure for operations
268 */
269 struct ticker_user {
270 uint8_t count_user_op; /* Number of user operation slots */
271 uint8_t first; /* Slot index of first user operation */
272 uint8_t middle; /* Slot index of last managed user op.
273 * Updated by ticker_job_list_manage
274 * for use in ticker_job_list_insert
275 */
276 uint8_t last; /* Slot index of last user operation */
277 struct ticker_user_op *user_op; /* Pointer to user operation array */
278 };
279
280 /* Ticker instance
281 */
282 struct ticker_instance {
283 struct ticker_node *nodes; /* Pointer to ticker nodes */
284 struct ticker_user *users; /* Pointer to user nodes */
285 uint8_t count_node; /* Number of ticker nodes */
286 uint8_t count_user; /* Number of user nodes */
287 uint8_t ticks_elapsed_first; /* Index from which elapsed ticks count
288 * is pulled
289 */
290 uint8_t ticks_elapsed_last; /* Index to which elapsed ticks count
291 * is pushed
292 */
293 uint32_t ticks_elapsed[DOUBLE_BUFFER_SIZE]; /* Buffer for elapsed
294 * ticks
295 */
296 uint32_t ticks_current; /* Absolute ticks elapsed at last
297 * ticker_job
298 */
299 uint8_t ticker_id_head; /* Index of first ticker node (next to
300 * expire)
301 */
302 uint8_t job_guard; /* Flag preventing ticker_worker from
303 * running if ticker_job is active
304 */
305 uint8_t worker_trigger; /* Flag preventing ticker_job from
306 * starting if ticker_worker was
307 * requested, and to trigger
308 * ticker_worker at end of job, if
309 * requested
310 */
311
312 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
313 uint8_t ticker_id_slot_previous; /* Id of previous slot reserving
314 * ticker node
315 */
316 uint32_t ticks_slot_previous; /* Number of ticks previously reserved
317 * by a ticker node (active air-time)
318 */
319 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
320
321 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
322 struct ticker_expire_info_internal expire_infos[TICKER_EXPIRE_INFO_MAX];
323 bool expire_infos_outdated;
324 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
325
326 ticker_caller_id_get_cb_t caller_id_get_cb; /* Function for retrieving
327 * the caller id from user
328 * id
329 */
330 ticker_sched_cb_t sched_cb; /* Function for scheduling
331 * ticker_worker and
332 * ticker_job
333 */
334 ticker_trigger_set_cb_t trigger_set_cb; /* Function for setting
335 * the trigger (compare
336 * value)
337 */
338 };
339
340 BUILD_ASSERT(sizeof(struct ticker_node) == TICKER_NODE_T_SIZE);
341 BUILD_ASSERT(sizeof(struct ticker_user) == TICKER_USER_T_SIZE);
342 BUILD_ASSERT(sizeof(struct ticker_user_op) == TICKER_USER_OP_T_SIZE);
343
344 /*****************************************************************************
345 * Global instances
346 ****************************************************************************/
347 #define TICKER_INSTANCE_MAX 1
348 static struct ticker_instance _instance[TICKER_INSTANCE_MAX];
349
350 /*****************************************************************************
351 * Static Functions
352 ****************************************************************************/
353
354 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add);
355
356 /**
357 * @brief Update elapsed index
358 *
359 * @param ticks_elapsed_index Pointer to current index
360 *
361 * @internal
362 */
ticker_next_elapsed(uint8_t * ticks_elapsed_index)363 static inline void ticker_next_elapsed(uint8_t *ticks_elapsed_index)
364 {
365 uint8_t idx = *ticks_elapsed_index + 1;
366
367 if (idx == DOUBLE_BUFFER_SIZE) {
368 idx = 0U;
369 }
370 *ticks_elapsed_index = idx;
371 }
372
373 #if defined(CONFIG_BT_TICKER_LOW_LAT)
374 /**
375 * @brief Get ticker expiring in a specific slot
376 *
377 * @details Searches for a ticker which expires in a specific slot starting
378 * at 'ticks_slot'.
379 *
380 * @param node Pointer to ticker node array
381 * @param ticker_id_head Id of initial ticker node
382 * @param ticks_slot Ticks indicating slot to get
383 *
384 * @return Id of ticker expiring within slot or TICKER_NULL
385 * @internal
386 */
ticker_by_slot_get(struct ticker_node * node,uint8_t ticker_id_head,uint32_t ticks_slot)387 static uint8_t ticker_by_slot_get(struct ticker_node *node, uint8_t ticker_id_head,
388 uint32_t ticks_slot)
389 {
390 while (ticker_id_head != TICKER_NULL) {
391 struct ticker_node *ticker;
392 uint32_t ticks_to_expire;
393
394 ticker = &node[ticker_id_head];
395 ticks_to_expire = ticker->ticks_to_expire;
396
397 if (ticks_slot <= ticks_to_expire) {
398 /* Next ticker expiration is outside the checked slot */
399 return TICKER_NULL;
400 }
401
402 if (ticker->ticks_slot) {
403 /* This ticker node has slot defined and expires within
404 * checked slot
405 */
406 break;
407 }
408
409 ticks_slot -= ticks_to_expire;
410 ticker_id_head = ticker->next;
411 }
412
413 return ticker_id_head;
414 }
415 #endif /* CONFIG_BT_TICKER_LOW_LAT */
416
417 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
418 /**
419 * @brief Get next ticker with slot ticks or match
420 *
421 * @details Iterates ticker nodes from ticker_id_head. If no head id is provided
422 * (TICKER_NULL), iteration starts from the first node.
423 * Operation details:
424 *
425 * NORMAL MODE (!CONFIG_BT_TICKER_SLOT_AGNOSTIC)
426 * - Gets the next ticker which has slot ticks specified and return the ticker
427 * id and accumulated ticks until expiration.
428 * - If a matching function is provided, this function is called and node iteration
429 * continues until match function returns true.
430 *
431 * SLOT AGNOSTIC MODE (CONFIG_BT_TICKER_SLOT_AGNOSTIC)
432 * - Gets the next ticker node.
433 * - If a matching function is provided, this function is called and node iteration
434 * continues until match function returns true.
435 *
436 * @param instance Pointer to ticker instance
437 * @param ticker_id_head Pointer to id of first ticker node [in/out]
438 * @param ticks_current Pointer to current ticks count [in/out]
439 * @param ticks_to_expire Pointer to ticks to expire [in/out]
440 * @param fp_match_op_func Pointer to match function or NULL if unused
441 * @param match_op_context Pointer to operation context passed to match
442 * function or NULL if unused
443 * @param lazy Pointer to lazy variable to receive lazy_current
444 * of found ticker node
445 * @internal
446 */
ticker_by_next_slot_get(struct ticker_instance * instance,uint8_t * ticker_id_head,uint32_t * ticks_current,uint32_t * ticks_to_expire,ticker_op_match_func fp_match_op_func,void * match_op_context,uint32_t * remainder,uint16_t * lazy)447 static void ticker_by_next_slot_get(struct ticker_instance *instance,
448 uint8_t *ticker_id_head,
449 uint32_t *ticks_current,
450 uint32_t *ticks_to_expire,
451 ticker_op_match_func fp_match_op_func,
452 void *match_op_context, uint32_t *remainder,
453 uint16_t *lazy)
454 {
455 struct ticker_node *ticker;
456 struct ticker_node *node;
457 uint32_t _ticks_to_expire;
458 uint8_t _ticker_id_head;
459
460 node = instance->nodes;
461
462 _ticker_id_head = *ticker_id_head;
463 _ticks_to_expire = *ticks_to_expire;
464 if ((_ticker_id_head == TICKER_NULL) ||
465 (*ticks_current != instance->ticks_current)) {
466 /* Initialize with instance head */
467 _ticker_id_head = instance->ticker_id_head;
468 *ticks_current = instance->ticks_current;
469 _ticks_to_expire = 0U;
470 } else {
471 /* Get ticker id for next node */
472 ticker = &node[_ticker_id_head];
473 _ticker_id_head = ticker->next;
474 }
475
476 /* Find first ticker node with match or slot ticks */
477 while (_ticker_id_head != TICKER_NULL) {
478 ticker = &node[_ticker_id_head];
479
480 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
481 if (fp_match_op_func) {
482 uint32_t ticks_slot = 0;
483
484 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
485 ticks_slot += ticker->ticks_slot;
486 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
487
488 /* Match node id */
489 if (fp_match_op_func(_ticker_id_head, ticks_slot,
490 _ticks_to_expire +
491 ticker->ticks_to_expire,
492 match_op_context)) {
493 /* Match found */
494 break;
495 }
496 } else
497 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
498 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
499 if (ticker->ticks_slot) {
500 /* Matching not used and node has slot ticks */
501 break;
502 #else
503 {
504 /* Matching not used and slot agnostic */
505 break;
506 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
507 }
508
509 /* Accumulate expire ticks */
510 _ticks_to_expire += ticker->ticks_to_expire;
511 _ticker_id_head = ticker->next;
512 }
513
514 if (_ticker_id_head != TICKER_NULL) {
515 /* Add ticks for found ticker */
516 _ticks_to_expire += ticker->ticks_to_expire;
517
518 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
519 if (remainder) {
520 *remainder = ticker->remainder_current;
521 }
522 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
523
524 #if defined(CONFIG_BT_TICKER_LAZY_GET)
525 if (lazy) {
526 *lazy = ticker->lazy_current;
527 }
528 #endif /* CONFIG_BT_TICKER_LAZY_GET */
529 }
530
531 *ticker_id_head = _ticker_id_head;
532 *ticks_to_expire = _ticks_to_expire;
533 }
534 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
535
536 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
537 /**
538 * @brief Enqueue ticker node
539 *
540 * @details Finds insertion point for new ticker node and inserts the
541 * node in the linked node list.
542 *
543 * @param instance Pointer to ticker instance
544 * @param id Ticker node id to enqueue
545 *
546 * @return Id of enqueued ticker node
547 * @internal
548 */
549 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
550 {
551 struct ticker_node *ticker_current;
552 struct ticker_node *ticker_new;
553 uint32_t ticks_to_expire_current;
554 struct ticker_node *node;
555 uint32_t ticks_to_expire;
556 uint8_t previous;
557 uint8_t current;
558
559 node = &instance->nodes[0];
560 ticker_new = &node[id];
561 ticks_to_expire = ticker_new->ticks_to_expire;
562 current = instance->ticker_id_head;
563
564 /* Find insertion point for new ticker node and adjust ticks_to_expire
565 * relative to insertion point
566 */
567 previous = TICKER_NULL;
568
569 while ((current != TICKER_NULL) && (ticks_to_expire >=
570 (ticks_to_expire_current =
571 (ticker_current = &node[current])->ticks_to_expire))) {
572
573 ticks_to_expire -= ticks_to_expire_current;
574
575 /* Check for timeout in same tick - prioritize according to
576 * latency
577 */
578 if (ticks_to_expire == 0 && (ticker_new->lazy_current >
579 ticker_current->lazy_current)) {
580 ticks_to_expire = ticks_to_expire_current;
581 break;
582 }
583
584 previous = current;
585 current = ticker_current->next;
586 }
587
588 /* Link in new ticker node and adjust ticks_to_expire to relative value
589 */
590 ticker_new->ticks_to_expire = ticks_to_expire;
591 ticker_new->next = current;
592
593 if (previous == TICKER_NULL) {
594 instance->ticker_id_head = id;
595 } else {
596 node[previous].next = id;
597 }
598
599 if (current != TICKER_NULL) {
600 node[current].ticks_to_expire -= ticks_to_expire;
601 }
602
603 return id;
604 }
605 #else /* !CONFIG_BT_TICKER_LOW_LAT */
606
607 /**
608 * @brief Enqueue ticker node
609 *
610 * @details Finds insertion point for new ticker node and inserts the
611 * node in the linked node list. However, if the new ticker node collides
612 * with an existing node or the expiration is inside the previous slot,
613 * the node is not inserted.
614 *
615 * @param instance Pointer to ticker instance
616 * @param id Ticker node id to enqueue
617 *
618 * @return Id of enqueued ticker node, or id of previous- or colliding
619 * ticker node if new node was not enqueued
620 * @internal
621 */
622 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
623 {
624 struct ticker_node *ticker_current;
625 struct ticker_node *ticker_new;
626 uint32_t ticks_to_expire_current;
627 uint8_t ticker_id_slot_previous;
628 uint32_t ticks_slot_previous;
629 struct ticker_node *node;
630 uint32_t ticks_to_expire;
631 uint8_t previous;
632 uint8_t current;
633 uint8_t collide;
634
635 node = &instance->nodes[0];
636 ticker_new = &node[id];
637 ticks_to_expire = ticker_new->ticks_to_expire;
638
639 collide = ticker_id_slot_previous = TICKER_NULL;
640 current = instance->ticker_id_head;
641 previous = current;
642 ticks_slot_previous = instance->ticks_slot_previous;
643
644 /* Find insertion point for new ticker node and adjust ticks_to_expire
645 * relative to insertion point
646 */
647 while ((current != TICKER_NULL) &&
648 (ticks_to_expire >
649 (ticks_to_expire_current =
650 (ticker_current = &node[current])->ticks_to_expire))) {
651 ticks_to_expire -= ticks_to_expire_current;
652
653 if (ticker_current->ticks_slot != 0U) {
654 ticks_slot_previous = ticker_current->ticks_slot;
655 ticker_id_slot_previous = current;
656 } else {
657 if (ticks_slot_previous > ticks_to_expire_current) {
658 ticks_slot_previous -= ticks_to_expire_current;
659 } else {
660 ticks_slot_previous = 0U;
661 }
662 }
663 previous = current;
664 current = ticker_current->next;
665 }
666
667 /* Check for collision for new ticker node at insertion point */
668 collide = ticker_by_slot_get(&node[0], current,
669 ticks_to_expire + ticker_new->ticks_slot);
670
671 if ((ticker_new->ticks_slot == 0U) ||
672 ((ticks_slot_previous <= ticks_to_expire) &&
673 (collide == TICKER_NULL))) {
674 /* New ticker node has no slot ticks or there is no collision -
675 * link it in and adjust ticks_to_expire to relative value
676 */
677 ticker_new->ticks_to_expire = ticks_to_expire;
678 ticker_new->next = current;
679
680 if (previous == current) {
681 instance->ticker_id_head = id;
682 } else {
683 node[previous].next = id;
684 }
685
686 if (current != TICKER_NULL) {
687 node[current].ticks_to_expire -= ticks_to_expire;
688 }
689 } else {
690 /* Collision - no ticker node insertion, set id to that of
691 * colliding node
692 */
693 if (ticks_slot_previous > ticks_to_expire) {
694 id = ticker_id_slot_previous;
695 } else {
696 id = collide;
697 }
698 }
699
700 return id;
701 }
702 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
703
704 /**
705 * @brief Dequeue ticker node
706 *
707 * @details Finds extraction point for ticker node to be dequeued, unlinks
708 * the node and adjusts the links and ticks_to_expire. Returns the ticks
709 * until expiration for dequeued ticker node.
710 *
711 * @param instance Pointer to ticker instance
712 * @param id Ticker node id to dequeue
713 *
714 * @return Total ticks until expiration for dequeued ticker node, or 0 if
715 * node was not found
716 * @internal
717 */
718 static uint32_t ticker_dequeue(struct ticker_instance *instance, uint8_t id)
719 {
720 struct ticker_node *ticker_current;
721 struct ticker_node *node;
722 uint8_t previous;
723 uint32_t timeout;
724 uint8_t current;
725 uint32_t total;
726
727 /* Find the ticker's position in ticker node list while accumulating
728 * ticks_to_expire
729 */
730 node = &instance->nodes[0];
731 previous = instance->ticker_id_head;
732 current = previous;
733 total = 0U;
734 ticker_current = 0;
735 while (current != TICKER_NULL) {
736 ticker_current = &node[current];
737
738 if (current == id) {
739 break;
740 }
741
742 total += ticker_current->ticks_to_expire;
743 previous = current;
744 current = ticker_current->next;
745 }
746
747 if (current == TICKER_NULL) {
748 /* Ticker not in active list */
749 return 0;
750 }
751
752 if (previous == current) {
753 /* Ticker is the first in the list */
754 instance->ticker_id_head = ticker_current->next;
755 }
756
757 /* Remaining timeout between next timeout */
758 timeout = ticker_current->ticks_to_expire;
759
760 /* Link previous ticker with next of this ticker
761 * i.e. removing the ticker from list
762 */
763 node[previous].next = ticker_current->next;
764
765 /* If this is not the last ticker, increment the
766 * next ticker by this ticker timeout
767 */
768 if (ticker_current->next != TICKER_NULL) {
769 node[ticker_current->next].ticks_to_expire += timeout;
770 }
771
772 return (total + timeout);
773 }
774
775 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
776 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
777 /**
778 * @brief Resolve ticker node collision
779 *
780 * @details Evaluates the provided ticker node against other queued nodes
781 * and returns non-zero if the ticker node collides and should be skipped.
782 * The following rules are checked:
783 * 1) If the periodic latency is not yet exhausted, node is skipped
784 * 2) If the node has highest possible priority, node is never skipped
785 * 2) If the node will starve next node due to slot reservation
786 * overlap, node is skipped if:
787 * a) Next node has higher priority than current node
788 * b) Next node has more accumulated latency than the current node
789 * c) Next node is 'older' than current node and has same priority
790 * d) Next node has force flag set, and the current does not
791 *
792 * @param nodes Pointer to ticker node array
793 * @param ticker Pointer to ticker to resolve
794 *
795 * @return 0 if no collision was detected. 1 if ticker node collides
796 * with other ticker node of higher composite priority
797 * @internal
798 */
799 static uint8_t ticker_resolve_collision(struct ticker_node *nodes,
800 struct ticker_node *ticker)
801 {
802 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
803 if ((ticker->priority != TICKER_PRIORITY_CRITICAL) &&
804 (ticker->next != TICKER_NULL)) {
805
806 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
807 if (ticker->next != TICKER_NULL) {
808
809 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
810
811 uint16_t lazy_current = ticker->lazy_current;
812 uint32_t ticker_ticks_slot;
813
814 if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
815 ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
816 } else {
817 ticker_ticks_slot = ticker->ticks_slot;
818 }
819
820 /* Check if this ticker node will starve next node which has
821 * latency or higher priority
822 */
823 if (lazy_current >= ticker->lazy_periodic) {
824 lazy_current -= ticker->lazy_periodic;
825 }
826 uint8_t id_head = ticker->next;
827 uint32_t acc_ticks_to_expire = 0U;
828
829 /* Age is time since last expiry */
830 uint32_t current_age = ticker->ticks_periodic +
831 (lazy_current * ticker->ticks_periodic);
832
833 while (id_head != TICKER_NULL) {
834 struct ticker_node *ticker_next = &nodes[id_head];
835
836 /* Accumulate ticks_to_expire for each node */
837 acc_ticks_to_expire += ticker_next->ticks_to_expire;
838 if (acc_ticks_to_expire > ticker_ticks_slot) {
839 break;
840 }
841
842 /* We only care about nodes with slot reservation */
843 if (ticker_next->ticks_slot == 0U) {
844 id_head = ticker_next->next;
845 continue;
846 }
847
848 uint16_t lazy_next = ticker_next->lazy_current;
849 uint8_t lazy_next_periodic_skip =
850 ticker_next->lazy_periodic > lazy_next;
851
852 if (!lazy_next_periodic_skip) {
853 lazy_next -= ticker_next->lazy_periodic;
854 }
855
856 /* Age is time since last expiry */
857 uint32_t next_age = (ticker_next->ticks_periodic == 0U ?
858 0U :
859 (ticker_next->ticks_periodic -
860 ticker_next->ticks_to_expire)) +
861 (lazy_next *
862 ticker_next->ticks_periodic);
863
864 /* Was the current node scheduled earlier? */
865 uint8_t current_is_older =
866 (ticker->ticks_periodic == 0U) ||
867 (current_age > next_age);
868 /* Was next node scheduled earlier (legacy priority)? */
869 uint8_t next_is_older =
870 (ticker->ticks_periodic != 0U) &&
871 (next_age > current_age);
872
873 /* Is the current and next node equal in force? */
874 uint8_t equal_force =
875 (ticker->force == ticker_next->force);
876 /* Is force requested for next node (e.g. update) -
877 * more so than for current node?
878 */
879 uint8_t next_force =
880 (ticker_next->force > ticker->force);
881
882 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
883 /* Does next node have critical priority and should
884 * always be scheduled?
885 */
886 uint8_t next_is_critical =
887 (ticker_next->priority ==
888 TICKER_PRIORITY_CRITICAL);
889
890 /* Is the current and next node equal in priority? */
891 uint8_t equal_priority =
892 (ticker->priority == ticker_next->priority);
893
894 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
895 uint8_t next_is_critical = 0U;
896 uint8_t equal_priority = 1U;
897 uint8_t next_has_priority = 0U;
898
899 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
900
901 #if defined(CONFIG_BT_TICKER_EXT)
902 #if defined(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD)
903 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
904 /* Does next node have higher priority? */
905 uint8_t next_has_priority =
906 (!TICKER_HAS_SLOT_WINDOW(ticker_next) &&
907 ((lazy_next - ticker_next->priority) >
908 (lazy_current - ticker->priority));
909 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
910
911 /* Colliding next ticker does not use ticks_slot_window
912 * or it does not fit after the current ticker within
913 * the ticks_slot_window.
914 */
915 uint8_t next_not_ticks_slot_window =
916 (!TICKER_HAS_SLOT_WINDOW(ticker_next) ||
917 ((acc_ticks_to_expire +
918 ticker_next->ext_data->ticks_slot_window -
919 ticker_next->ticks_slot) <
920 ticker->ticks_slot));
921
922 /* Can the current ticker with ticks_slot_window be
923 * scheduled after the colliding ticker?
924 */
925 uint8_t curr_has_ticks_slot_window =
926 (TICKER_HAS_SLOT_WINDOW(ticker) &&
927 ((acc_ticks_to_expire +
928 ticker_next->ticks_slot) <
929 (ticker->ext_data->ticks_slot_window -
930 ticker->ticks_slot)));
931
932 #else /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
933 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
934 /* Does next node have higher priority? */
935 uint8_t next_has_priority =
936 (lazy_next - ticker_next->priority) >
937 (lazy_current - ticker->priority);
938
939 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
940 uint8_t next_not_ticks_slot_window = 1U;
941
942 /* Can the current ticker with ticks_slot_window be
943 * scheduled after the colliding ticker?
944 * NOTE: Tickers with ticks_slot_window and no
945 * ticks_slot (unreserved) be always scheduled
946 * after the colliding ticker.
947 */
948 uint8_t curr_has_ticks_slot_window =
949 (TICKER_HAS_SLOT_WINDOW(ticker) &&
950 !ticker->ticks_slot &&
951 ((acc_ticks_to_expire +
952 ticker_next->ticks_slot) <
953 (ticker->ext_data->ticks_slot_window)));
954
955 #endif /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
956 #else /* !CONFIG_BT_TICKER_EXT */
957 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
958 /* Does next node have higher priority? */
959 uint8_t next_has_priority =
960 (lazy_next - ticker_next->priority) >
961 (lazy_current - ticker->priority);
962
963 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
964 uint8_t next_not_ticks_slot_window = 1U;
965 uint8_t curr_has_ticks_slot_window = 0U;
966
967 #endif /* !CONFIG_BT_TICKER_EXT */
968
969 /* Check if next node is within this reservation slot
970 * and wins conflict resolution
971 */
972 if (curr_has_ticks_slot_window ||
973 (!lazy_next_periodic_skip &&
974 (next_is_critical ||
975 next_force ||
976 (next_has_priority && !current_is_older) ||
977 (equal_priority && equal_force && next_is_older &&
978 next_not_ticks_slot_window)))) {
979 /* This node must be skipped - check window */
980 return 1U;
981 }
982 id_head = ticker_next->next;
983 }
984 }
985
986 return 0U;
987 }
988 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
989 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
990 */
991
992 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
993 /**
994 * @brief Get expiration delta from one ticker id to another ticker id
995 *
996 * @details Helper function to get expiration info between two tickers
997 *
998 * @param instance Ticker instance
999 * @param to_ticker_id Target ticker id
1000 * @param from_ticker_id Ticker id to compare with
1001 * @param expire_info Pointer to ticker_expire_info that will
1002 * get populated with the result
1003 *
1004 * @internal
1005 */
1006 static void ticker_get_expire_info(struct ticker_instance *instance, uint8_t to_ticker_id,
1007 uint8_t from_ticker_id,
1008 struct ticker_expire_info_internal *expire_info)
1009 {
1010 struct ticker_node *current_node;
1011 uint32_t acc_ticks_to_expire = 0;
1012 uint8_t current_ticker_id;
1013 uint32_t from_ticks = 0;
1014 bool from_found = false;
1015 uint32_t to_ticks = 0;
1016 bool to_found = false;
1017
1018 current_ticker_id = instance->ticker_id_head;
1019 current_node = &instance->nodes[instance->ticker_id_head];
1020 while (current_ticker_id != TICKER_NULL && (!to_found || !from_found)) {
1021 /* Accumulate expire ticks */
1022 acc_ticks_to_expire += current_node->ticks_to_expire;
1023
1024 if (current_ticker_id == from_ticker_id) {
1025 from_ticks = acc_ticks_to_expire;
1026 from_found = true;
1027 } else if (current_ticker_id == to_ticker_id) {
1028 to_ticks = acc_ticks_to_expire;
1029 to_found = true;
1030 }
1031
1032 current_ticker_id = current_node->next;
1033 current_node = &instance->nodes[current_ticker_id];
1034 }
1035
1036 if (to_found && from_found) {
1037 struct ticker_node *to_ticker = &instance->nodes[to_ticker_id];
1038 uint32_t to_remainder = to_ticker->remainder_current;
1039
1040 if (from_ticks > to_ticks) {
1041 /* from ticker is scheduled after the to ticker - use period
1042 * to give an result
1043 */
1044 if (to_ticker->ticks_periodic == 0) {
1045 /* single shot ticker */
1046 expire_info->found = 0;
1047 return;
1048 }
1049 while (to_ticks < from_ticks) {
1050 to_ticks += to_ticker->ticks_periodic;
1051 to_ticks += ticker_add_to_remainder(&to_remainder,
1052 to_ticker->remainder_periodic);
1053 }
1054 }
1055
1056 expire_info->ticks_to_expire = to_ticks - from_ticks;
1057 expire_info->remainder = to_remainder;
1058 expire_info->lazy = to_ticker->lazy_current;
1059 expire_info->found = 1;
1060 } else {
1061 expire_info->found = 0;
1062 }
1063 }
1064
1065 /**
1066 * @brief Allocate an expire info for the given ticker ID
1067 *
1068 * @param instance Ticker instance
1069 * @param ticker_id Ticker ID to allocate for
1070 *
1071 * @return Returns TICKER_STATUS_SUCCESS if the allocation succeeded,
1072 * TICKER_STATUS_FAILURE otherwise
1073 *
1074 * @internal
1075 */
1076 static uint32_t ticker_alloc_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1077 {
1078 uint32_t status = TICKER_STATUS_FAILURE;
1079 uint8_t is_last = 0;
1080
1081 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1082 if (instance->expire_infos[i].ticker_id == TICKER_NULL) {
1083 struct ticker_node *ticker = &instance->nodes[ticker_id];
1084
1085 instance->expire_infos[i].ticker_id = ticker_id;
1086 instance->expire_infos[i].outdated = true;
1087 instance->expire_infos[i].last = is_last;
1088 ticker->ext_data->other_expire_info = &instance->expire_infos[i];
1089 instance->expire_infos_outdated = true;
1090 status = TICKER_STATUS_SUCCESS;
1091 break;
1092 } else if (instance->expire_infos[i].last && i < TICKER_EXPIRE_INFO_MAX - 1) {
1093 instance->expire_infos[i].last = 0;
1094 is_last = 1;
1095 }
1096 }
1097
1098 return status;
1099 }
1100
1101 /**
1102 * @brief Free a previously allocated expire info for the given ticker ID
1103 *
1104 * @param instance Ticker instance
1105 * @param ticker_id Ticker ID to free up the allocation for
1106 *
1107 * @internal
1108 */
1109 static void ticker_free_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1110 {
1111 uint8_t is_last = 0;
1112 uint8_t index;
1113
1114 for (index = 0; index < TICKER_EXPIRE_INFO_MAX; index++) {
1115 if (instance->expire_infos[index].ticker_id == ticker_id) {
1116 instance->expire_infos[index].ticker_id = TICKER_NULL;
1117 is_last = instance->expire_infos[index].last;
1118 instance->expire_infos[index].last = 0;
1119 break;
1120 }
1121 }
1122
1123 if (is_last) {
1124 /* Find new last used element and mark it */
1125 for (; index >= 0; index--) {
1126 if (instance->expire_infos[index].ticker_id != TICKER_NULL || index == 0) {
1127 instance->expire_infos[index].last = 1;
1128 break;
1129 }
1130 }
1131 }
1132 }
1133
1134 /**
1135 * @brief Mark all expire infos involving a ticker ID as outdated
1136 *
1137 * @details If a ticker moves this function should be called to mark all expiration
1138 * infos (if any) that involve that ticker as outdated and in need of re-calculation.
1139 * If any expiration infos involving the ticker_id is found, the ticker instances
1140 * expire_infos_outdated flag is also set.
1141 *
1142 * @param instance Ticker instance
1143 * @param ticker_id ID of ticker that has moved
1144 *
1145 * @internal
1146 */
1147 static void ticker_mark_expire_info_outdated(struct ticker_instance *instance, uint8_t ticker_id)
1148 {
1149 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1150 if (instance->expire_infos[i].ticker_id != TICKER_NULL) {
1151 uint8_t current_id = instance->expire_infos[i].ticker_id;
1152 struct ticker_node *ticker = &instance->nodes[current_id];
1153
1154 if (current_id == ticker_id ||
1155 ticker->ext_data->expire_info_id == ticker_id) {
1156 instance->expire_infos[i].outdated = true;
1157 instance->expire_infos_outdated = true;
1158 }
1159 }
1160 if (instance->expire_infos[i].last) {
1161 break;
1162 }
1163 }
1164 }
1165
1166 /**
1167 * @brief Run through all expire infos and update them if needed
1168 *
1169 * @details Runs through all expire_infos and runs ticker_get_expire_info()
1170 * for any that are marked as outdated. Clears the expire_infos_outdated
1171 * flag when done
1172 *
1173 * @param param Pointer to ticker instance
1174 *
1175 * @internal
1176 */
1177 static void ticker_job_update_expire_infos(struct ticker_instance *instance)
1178 {
1179 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1180 struct ticker_expire_info_internal *info = &instance->expire_infos[i];
1181
1182 if (info->ticker_id != TICKER_NULL && info->outdated) {
1183 struct ticker_node *ticker = &instance->nodes[info->ticker_id];
1184
1185 ticker_get_expire_info(instance, ticker->ext_data->expire_info_id,
1186 info->ticker_id, info);
1187 info->outdated = false;
1188 }
1189
1190 if (info->last) {
1191 break;
1192 }
1193 }
1194
1195 instance->expire_infos_outdated = false;
1196 }
1197
1198 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1199
1200 /**
1201 * @brief Ticker worker
1202 *
1203 * @details Runs as upper half of ticker operation, triggered by a compare
1204 * match from the underlying counter HAL, via the ticker_trigger function.
1205 * Traverses ticker nodes to find tickers expired since last job
1206 * execution. Expired (requested) ticker nodes have their timeout callback
1207 * functions called. Finally, a ticker job is enqueued. Invoked from the
1208 * ticker worker mayfly context (TICKER_MAYFLY_CALL_ID_WORKER)
1209 *
1210 * @param param Pointer to ticker instance
1211 *
1212 */
1213 void ticker_worker(void *param)
1214 {
1215 struct ticker_instance *instance = param;
1216 struct ticker_node *node;
1217 uint32_t ticks_elapsed;
1218 uint32_t ticks_expired;
1219 uint8_t ticker_id_head;
1220
1221 /* Defer worker if job running */
1222 instance->worker_trigger = 1U;
1223 if (instance->job_guard) {
1224 return;
1225 }
1226
1227 /* If no tickers queued (active), do nothing */
1228 if (instance->ticker_id_head == TICKER_NULL) {
1229 instance->worker_trigger = 0U;
1230 return;
1231 }
1232
1233 /* Get ticks elapsed since last job execution */
1234 ticks_elapsed = ticker_ticks_diff_get(cntr_cnt_get(),
1235 instance->ticks_current);
1236
1237 /* Initialize actual elapsed ticks being consumed */
1238 ticks_expired = 0U;
1239
1240 /* Auto variable containing the head of tickers expiring */
1241 ticker_id_head = instance->ticker_id_head;
1242
1243 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1244 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1245 /* Check if the previous ticker node which had air-time, is still
1246 * active and has this time slot reserved
1247 */
1248 uint8_t slot_reserved = 0;
1249
1250 if (instance->ticks_slot_previous > ticks_elapsed) {
1251 /* This node intersects reserved slot */
1252 slot_reserved = 1;
1253 }
1254 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1255 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1256 */
1257
1258 /* Expire all tickers within ticks_elapsed and collect ticks_expired */
1259 node = &instance->nodes[0];
1260
1261 while (ticker_id_head != TICKER_NULL) {
1262 struct ticker_node *ticker;
1263 uint32_t ticks_to_expire;
1264 uint8_t must_expire_skip;
1265 uint32_t ticks_drift;
1266
1267 ticker = &node[ticker_id_head];
1268
1269 /* Stop if ticker did not expire */
1270 ticks_to_expire = ticker->ticks_to_expire;
1271 if (ticks_elapsed < ticks_to_expire) {
1272 break;
1273 }
1274
1275 /* Decrement ticks_elapsed and collect expired ticks */
1276 ticks_elapsed -= ticks_to_expire;
1277 ticks_expired += ticks_to_expire;
1278
1279 /* Move to next ticker node */
1280 ticker_id_head = ticker->next;
1281 must_expire_skip = 0U;
1282
1283 /* Skip if not scheduled to execute */
1284 if (((ticker->req - ticker->ack) & 0xff) != 1U) {
1285 continue;
1286 }
1287
1288 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1289 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1290 /* Check if node has slot reservation and resolve any collision
1291 * with other ticker nodes
1292 */
1293 if (((ticker->ticks_slot != 0U) ||
1294 TICKER_HAS_SLOT_WINDOW(ticker)) &&
1295 (slot_reserved ||
1296 (instance->ticks_slot_previous > ticks_expired) ||
1297 ticker_resolve_collision(node, ticker))) {
1298 #if defined(CONFIG_BT_TICKER_EXT)
1299 struct ticker_ext *ext_data = ticker->ext_data;
1300
1301 if (ext_data &&
1302 ext_data->ticks_slot_window != 0U &&
1303 ext_data->reschedule_state ==
1304 TICKER_RESCHEDULE_STATE_NONE &&
1305 (ticker->lazy_periodic <= ticker->lazy_current)) {
1306 /* Mark node for re-scheduling in ticker_job */
1307 ext_data->reschedule_state =
1308 TICKER_RESCHEDULE_STATE_PENDING;
1309 } else if (ext_data) {
1310 /* Mark node as not re-scheduling */
1311 ext_data->reschedule_state =
1312 TICKER_RESCHEDULE_STATE_NONE;
1313 }
1314 #endif /* CONFIG_BT_TICKER_EXT */
1315 /* Increment lazy_current to indicate skipped event. In case
1316 * of re-scheduled node, the lazy count will be decremented in
1317 * ticker_job_reschedule_in_window when completed.
1318 */
1319 ticker->lazy_current++;
1320
1321 if ((ticker->must_expire == 0U) ||
1322 (ticker->lazy_periodic >= ticker->lazy_current) ||
1323 TICKER_RESCHEDULE_PENDING(ticker)) {
1324 /* Not a must-expire node or this is periodic
1325 * latency or pending re-schedule. Skip this
1326 * ticker node. Mark it as elapsed.
1327 */
1328 ticker->ack--;
1329 continue;
1330 }
1331
1332 /* Continue but perform shallow expiry */
1333 must_expire_skip = 1U;
1334 }
1335
1336 #if defined(CONFIG_BT_TICKER_EXT)
1337 if (ticker->ext_data) {
1338 ticks_drift = ticker->ext_data->ticks_drift;
1339 ticker->ext_data->ticks_drift = 0U;
1340 /* Mark node as not re-scheduling */
1341 ticker->ext_data->reschedule_state =
1342 TICKER_RESCHEDULE_STATE_NONE;
1343 } else {
1344 ticks_drift = 0U;
1345 }
1346
1347 #else /* !CONFIG_BT_TICKER_EXT */
1348 ticks_drift = 0U;
1349 #endif /* !CONFIG_BT_TICKER_EXT */
1350
1351 #else /* CONFIG_BT_TICKER_LOW_LAT ||
1352 * CONFIG_BT_TICKER_SLOT_AGNOSTIC
1353 */
1354 ticks_drift = 0U;
1355 #endif /* CONFIG_BT_TICKER_LOW_LAT ||
1356 * CONFIG_BT_TICKER_SLOT_AGNOSTIC
1357 */
1358
1359 /* Scheduled timeout is acknowledged to be complete */
1360 ticker->ack--;
1361
1362 if (ticker->timeout_func) {
1363 uint32_t ticks_at_expire;
1364
1365 ticks_at_expire = (instance->ticks_current +
1366 ticks_expired -
1367 ticker->ticks_to_expire_minus) &
1368 HAL_TICKER_CNTR_MASK;
1369
1370 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1371 if (ticker->ext_data &&
1372 ticker->ext_data->ext_timeout_func) {
1373 struct ticker_expire_info_internal *expire_info;
1374 struct ticker_ext_context ext_context;
1375 ticker_timeout_func timeout_func;
1376
1377 timeout_func = ticker->ext_data->ext_timeout_func;
1378 expire_info = ticker->ext_data->other_expire_info;
1379 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1380 LL_ASSERT(expire_info && !expire_info->outdated);
1381 }
1382
1383 ext_context.context = ticker->context;
1384 if (expire_info && expire_info->found) {
1385 ext_context.other_expire_info = (void *)expire_info;
1386 } else {
1387 ext_context.other_expire_info = NULL;
1388 }
1389
1390 DEBUG_TICKER_TASK(1);
1391
1392 /* Invoke the timeout callback */
1393 timeout_func(ticks_at_expire,
1394 ticks_drift,
1395 ticker->remainder_current,
1396 must_expire_skip ?
1397 TICKER_LAZY_MUST_EXPIRE :
1398 ticker->lazy_current,
1399 ticker->force,
1400 &ext_context);
1401 } else
1402 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1403 {
1404 DEBUG_TICKER_TASK(1);
1405
1406 /* Invoke the timeout callback */
1407 ticker->timeout_func(ticks_at_expire,
1408 ticks_drift,
1409 ticker->remainder_current,
1410 must_expire_skip ?
1411 TICKER_LAZY_MUST_EXPIRE :
1412 ticker->lazy_current,
1413 ticker->force,
1414 ticker->context);
1415 DEBUG_TICKER_TASK(0);
1416 }
1417
1418 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
1419 (must_expire_skip == 0U)) {
1420 /* Reset latency to periodic offset */
1421 ticker->lazy_current = 0U;
1422 ticker->force = 0U;
1423
1424 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1425 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1426 if (ticker->ticks_slot != 0U) {
1427 /* Any further nodes will be skipped */
1428 slot_reserved = 1U;
1429 }
1430 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1431 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1432 */
1433
1434 }
1435 }
1436 }
1437
1438 /* Queue the elapsed ticks */
1439 if (instance->ticks_elapsed_first == instance->ticks_elapsed_last) {
1440 ticker_next_elapsed(&instance->ticks_elapsed_last);
1441 }
1442 instance->ticks_elapsed[instance->ticks_elapsed_last] = ticks_expired;
1443
1444 instance->worker_trigger = 0U;
1445
1446 /* Enqueue the ticker job with chain=1 (do not inline) */
1447 instance->sched_cb(TICKER_CALL_ID_WORKER, TICKER_CALL_ID_JOB, 1,
1448 instance);
1449 }
1450
1451 /**
1452 * @brief Prepare ticker node expiration
1453 *
1454 * @details Calculates the number of ticks until next expiration, taking
1455 * into consideration any negative drift correction.
1456 *
1457 * @param ticker Pointer to ticker node
1458 * @param ticks_current Current number of ticks (elapsed)
1459 * @param ticks_at_start Number of ticks at start (anchor)
1460 *
1461 * @internal
1462 */
1463 static void ticks_to_expire_prep(struct ticker_node *ticker,
1464 uint32_t ticks_current, uint32_t ticks_at_start)
1465 {
1466 uint32_t ticks_to_expire = ticker->ticks_to_expire;
1467 uint32_t ticks_to_expire_minus = ticker->ticks_to_expire_minus;
1468
1469 /* Calculate ticks to expire for this new node */
1470 if (!((ticks_at_start - ticks_current) & BIT(HAL_TICKER_CNTR_MSBIT))) {
1471 /* Most significant bit is 0 so ticks_at_start lies ahead of
1472 * ticks_current: ticks_at_start >= ticks_current
1473 */
1474 ticks_to_expire += ticker_ticks_diff_get(ticks_at_start,
1475 ticks_current);
1476 } else {
1477 /* ticks_current > ticks_at_start
1478 */
1479 uint32_t delta_current_start;
1480
1481 delta_current_start = ticker_ticks_diff_get(ticks_current,
1482 ticks_at_start);
1483 if (ticks_to_expire > delta_current_start) {
1484 /* There's still time until expiration - subtract
1485 * elapsed time
1486 */
1487 ticks_to_expire -= delta_current_start;
1488 } else {
1489 /* Ticker node should have expired (we're late).
1490 * Add 'lateness' to negative drift correction
1491 * (ticks_to_expire_minus) and set ticks_to_expire
1492 * to 0
1493 */
1494 ticks_to_expire_minus +=
1495 (delta_current_start - ticks_to_expire);
1496 ticks_to_expire = 0U;
1497 }
1498 }
1499
1500 /* Handle negative drift correction */
1501 if (ticks_to_expire > ticks_to_expire_minus) {
1502 ticks_to_expire -= ticks_to_expire_minus;
1503 ticks_to_expire_minus = 0U;
1504 } else {
1505 ticks_to_expire_minus -= ticks_to_expire;
1506 ticks_to_expire = 0U;
1507 }
1508
1509 /* Update ticker */
1510 ticker->ticks_to_expire = ticks_to_expire;
1511 ticker->ticks_to_expire_minus = ticks_to_expire_minus;
1512 }
1513
1514 /**
1515 * @brief Add to remainder
1516 *
1517 * @details Calculates whether the remainder should increments expiration time
1518 * for above-microsecond precision counter HW. The remainder enables improved
1519 * ticker precision, but is disabled for for sub-microsecond precision
1520 * configurations.
1521 * Note: This is the same functionality as ticker_remainder_inc(), except this
1522 * function allows doing the calculation without modifying any tickers
1523 *
1524 * @param remainder Pointer to remainder to add to
1525 * @param to_add Remainder value to add
1526 *
1527 * @return Returns 1 to indicate ticks increment is due, otherwise 0
1528 * @internal
1529 */
1530 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add)
1531 {
1532 #ifdef HAL_TICKER_REMAINDER_RANGE
1533 *remainder += to_add;
1534 if ((*remainder < BIT(31)) &&
1535 (*remainder > (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1536 *remainder -= HAL_TICKER_REMAINDER_RANGE;
1537 return 1;
1538 }
1539 return 0;
1540 #else
1541 return 0;
1542 #endif
1543 }
1544
1545 /**
1546 * @brief Increment remainder
1547 *
1548 * @details Calculates whether the remainder should increments expiration time
1549 * for above-microsecond precision counter HW. The remainder enables improved
1550 * ticker precision, but is disabled for for sub-microsecond precision
1551 * configurations.
1552 *
1553 * @param ticker Pointer to ticker node
1554 *
1555 * @return Returns 1 to indicate increment is due, otherwise 0
1556 * @internal
1557 */
1558 static uint8_t ticker_remainder_inc(struct ticker_node *ticker)
1559 {
1560 return ticker_add_to_remainder(&ticker->remainder_current, ticker->remainder_periodic);
1561 }
1562
1563 /**
1564 * @brief Decrement remainder
1565 *
1566 * @details Calculates whether the remainder should decrements expiration time
1567 * for above-microsecond precision counter HW. The remainder enables improved
1568 * ticker precision, but is disabled for for sub-microsecond precision
1569 * configurations.
1570 *
1571 * @param ticker Pointer to ticker node
1572 *
1573 * @return Returns 1 to indicate decrement is due, otherwise 0
1574 * @internal
1575 */
1576 static uint8_t ticker_remainder_dec(struct ticker_node *ticker)
1577 {
1578 #ifdef HAL_TICKER_REMAINDER_RANGE
1579 uint8_t decrement = 0U;
1580
1581 if ((ticker->remainder_current >= BIT(31)) ||
1582 (ticker->remainder_current <= (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1583 decrement++;
1584 ticker->remainder_current += HAL_TICKER_REMAINDER_RANGE;
1585 }
1586 ticker->remainder_current -= ticker->remainder_periodic;
1587 return decrement;
1588 #else
1589 return 0;
1590 #endif
1591 }
1592
1593 /**
1594 * @brief Invoke user operation callback
1595 *
1596 * @param user_op Pointer to user operation struct
1597 * @param status User operation status to pass to callback
1598 *
1599 * @internal
1600 */
1601 static void ticker_job_op_cb(struct ticker_user_op *user_op, uint8_t status)
1602 {
1603 user_op->op = TICKER_USER_OP_TYPE_NONE;
1604 user_op->status = status;
1605 if (user_op->fp_op_func) {
1606 user_op->fp_op_func(user_op->status, user_op->op_context);
1607 }
1608 }
1609
1610 /**
1611 * @brief Update and insert ticker node
1612 *
1613 * @details Update ticker node with parameters passed in user operation.
1614 * After update, the ticker is inserted in front as new head.
1615 *
1616 * @param ticker Pointer to ticker node
1617 * @param user_op Pointer to user operation
1618 * @param ticks_current Current ticker instance ticks
1619 * @param ticks_elapsed Expired ticks at time of call
1620 * @param insert_head Pointer to current head (id). Contains id
1621 * from user operation upon exit
1622 * @internal
1623 */
1624 static inline uint32_t ticker_job_node_update(struct ticker_instance *instance,
1625 struct ticker_node *ticker,
1626 struct ticker_user_op *user_op,
1627 uint32_t ticks_now,
1628 uint32_t ticks_current,
1629 uint32_t ticks_elapsed,
1630 uint8_t *insert_head)
1631 {
1632 uint32_t ticks_to_expire = ticker->ticks_to_expire;
1633
1634 ticks_elapsed += ticker_ticks_diff_get(ticks_now, ticks_current);
1635 if (ticks_to_expire > ticks_elapsed) {
1636 ticks_to_expire -= ticks_elapsed;
1637 } else {
1638 ticker->ticks_to_expire_minus += ticks_elapsed -
1639 ticks_to_expire;
1640 ticks_to_expire = 0U;
1641 }
1642
1643 /* Update ticks_to_expire from latency (lazy) input */
1644 if ((ticker->ticks_periodic != 0U) &&
1645 (user_op->params.update.lazy != 0U)) {
1646 user_op->params.update.lazy--;
1647 while ((ticks_to_expire > ticker->ticks_periodic) &&
1648 (ticker->lazy_current > user_op->params.update.lazy)) {
1649 ticks_to_expire -= ticker->ticks_periodic +
1650 ticker_remainder_dec(ticker);
1651 ticker->lazy_current--;
1652 }
1653
1654 while (ticker->lazy_current < user_op->params.update.lazy) {
1655 ticks_to_expire += ticker->ticks_periodic +
1656 ticker_remainder_inc(ticker);
1657 ticker->lazy_current++;
1658 }
1659 ticker->lazy_periodic = user_op->params.update.lazy;
1660 }
1661
1662 /* Update ticks_to_expire from drift input */
1663 ticker->ticks_to_expire = ticks_to_expire +
1664 user_op->params.update.ticks_drift_plus;
1665 ticker->ticks_to_expire_minus +=
1666 user_op->params.update.ticks_drift_minus;
1667
1668 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1669 /* TODO: An improvement on this could be to only consider the drift
1670 * (ADV => randomization) if re-sceduling fails. We would still store
1671 * the drift ticks here, but not actually update the node. That would
1672 * allow the ticker to use the full window for re-scheduling.
1673 */
1674 struct ticker_ext *ext_data = ticker->ext_data;
1675
1676 if (ext_data && ext_data->ticks_slot_window != 0U) {
1677 ext_data->ticks_drift =
1678 user_op->params.update.ticks_drift_plus -
1679 user_op->params.update.ticks_drift_minus;
1680 }
1681 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1682
1683 ticks_to_expire_prep(ticker, ticks_current, ticks_now);
1684
1685 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1686 /* Update ticks_slot parameter from plus/minus input */
1687 ticker->ticks_slot += user_op->params.update.ticks_slot_plus;
1688 if (ticker->ticks_slot > user_op->params.update.ticks_slot_minus) {
1689 ticker->ticks_slot -= user_op->params.update.ticks_slot_minus;
1690 } else {
1691 ticker->ticks_slot = 0U;
1692 }
1693 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1694
1695 /* Update force parameter */
1696 if (user_op->params.update.force != 0U) {
1697 ticker->force = user_op->params.update.force;
1698 }
1699
1700 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
1701 !defined(CONFIG_BT_TICKER_LOW_LAT)
1702 /* Update must_expire parameter */
1703 if (user_op->params.update.must_expire) {
1704 /* 1: disable, 2: enable */
1705 ticker->must_expire = (user_op->params.update.must_expire - 1);
1706 }
1707 #endif /* CONFIG_BT_TICKER_EXT */
1708
1709 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1710 if (ticker->ext_data && user_op->params.update.expire_info_id != user_op->id) {
1711 if (user_op->params.update.expire_info_id != TICKER_NULL &&
1712 !ticker->ext_data->other_expire_info) {
1713 uint32_t status;
1714
1715 status = ticker_alloc_expire_info(instance, user_op->id);
1716 if (status) {
1717 return status;
1718 }
1719 } else if (user_op->params.update.expire_info_id == TICKER_NULL &&
1720 ticker->ext_data->other_expire_info) {
1721 ticker_free_expire_info(instance, user_op->id);
1722 ticker->ext_data->other_expire_info = NULL;
1723 }
1724
1725 ticker->ext_data->expire_info_id = user_op->params.update.expire_info_id;
1726 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1727 ticker_mark_expire_info_outdated(instance, user_op->id);
1728 }
1729 }
1730 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1731
1732 ticker->next = *insert_head;
1733 *insert_head = user_op->id;
1734
1735 return TICKER_STATUS_SUCCESS;
1736 }
1737
1738 /**
1739 * @brief Manage user update operation
1740 *
1741 * @details Called by ticker_job to execute an update request, or set node
1742 * as done if request is not update. Invokes user operation callback before
1743 * exit.
1744 *
1745 * @param instance Pointer to ticker instance
1746 * @param ticker Pointer to ticker node
1747 * @param user_op Pointer to user operation
1748 * @param ticks_elapsed Expired ticks at time of call
1749 * @param insert_head Pointer to current head (id). For update operation,
1750 * contains operation id upon exit
1751 * @internal
1752 */
1753 static inline void ticker_job_node_manage(struct ticker_instance *instance,
1754 struct ticker_node *ticker,
1755 struct ticker_user_op *user_op,
1756 uint32_t ticks_now,
1757 uint32_t ticks_elapsed,
1758 uint8_t *insert_head)
1759 {
1760 /* Handle update of ticker by re-inserting it back. */
1761 if (IS_ENABLED(CONFIG_BT_TICKER_UPDATE) &&
1762 (user_op->op == TICKER_USER_OP_TYPE_UPDATE)) {
1763 /* Remove ticker node from list */
1764 ticker->ticks_to_expire = ticker_dequeue(instance, user_op->id);
1765
1766 /* Update node and insert back */
1767 ticker_job_node_update(instance, ticker, user_op, ticks_now,
1768 instance->ticks_current, ticks_elapsed,
1769 insert_head);
1770
1771 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1772 ticker_mark_expire_info_outdated(instance, user_op->id);
1773 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1774
1775 /* Set schedule status of node
1776 * as updating.
1777 */
1778 ticker->req++;
1779 } else {
1780 /* If stop/stop_abs requested, then dequeue node */
1781 if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1782 /* Remove ticker node from list */
1783 ticker->ticks_to_expire = ticker_dequeue(instance,
1784 user_op->id);
1785
1786 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1787 if (ticker->ext_data && ticker->ext_data->expire_info_id != TICKER_NULL) {
1788 ticker_free_expire_info(instance, user_op->id);
1789 ticker->ext_data->other_expire_info = NULL;
1790 }
1791
1792 ticker_mark_expire_info_outdated(instance, user_op->id);
1793 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1794
1795 /* Reset schedule status of node */
1796 ticker->req = ticker->ack;
1797 }
1798
1799 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1800 /* If yield_abs/stop/stop_abs then adjust ticks_slot_previous */
1801 if (instance->ticker_id_slot_previous == user_op->id) {
1802 uint32_t ticks_current;
1803 uint32_t ticks_at_yield;
1804 uint32_t ticks_used;
1805
1806 if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1807 instance->ticker_id_slot_previous = TICKER_NULL;
1808 }
1809
1810 if ((user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS) ||
1811 (user_op->op == TICKER_USER_OP_TYPE_STOP_ABS)) {
1812 ticks_at_yield =
1813 user_op->params.yield.ticks_at_yield;
1814 } else {
1815 ticks_at_yield = ticks_now;
1816 }
1817
1818 ticks_current = instance->ticks_current;
1819 if (!((ticks_at_yield - ticks_current) &
1820 BIT(HAL_TICKER_CNTR_MSBIT))) {
1821 ticks_used = ticks_elapsed +
1822 ticker_ticks_diff_get(ticks_at_yield,
1823 ticks_current);
1824 } else {
1825 ticks_used =
1826 ticker_ticks_diff_get(ticks_current,
1827 ticks_at_yield);
1828 if (ticks_elapsed > ticks_used) {
1829 ticks_used = ticks_elapsed -
1830 ticks_used;
1831 } else {
1832 ticks_used = 0;
1833 }
1834 }
1835
1836 if (instance->ticks_slot_previous > ticks_used) {
1837 instance->ticks_slot_previous = ticks_used;
1838 }
1839 }
1840 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1841
1842 }
1843
1844 /* op success, @todo update may fail during
1845 * actual insert! need to design that yet.
1846 */
1847 ticker_job_op_cb(user_op, TICKER_STATUS_SUCCESS);
1848 }
1849
1850 /**
1851 * @brief Manage user operations list
1852 *
1853 * @details Called by ticker_job to execute requested user operations. A
1854 * number of operation may be queued since last ticker_job. Only update and
1855 * stop operations are handled. Start is handled implicitly by inserting
1856 * the ticker node in ticker_job_list_insert.
1857 *
1858 * @param instance Pointer to ticker instance
1859 * @param ticks_elapsed Expired ticks at time of call
1860 * @param insert_head Pointer to current head (id). For update operation,
1861 * contains operation id upon exit
1862 * @return Returns 1 if operations is pending, 0 if all operations are done.
1863 * @internal
1864 */
1865 static inline uint8_t ticker_job_list_manage(struct ticker_instance *instance,
1866 uint32_t ticks_now,
1867 uint32_t ticks_elapsed,
1868 uint8_t *insert_head)
1869 {
1870 uint8_t pending;
1871 struct ticker_node *node;
1872 struct ticker_user *users;
1873 uint8_t count_user;
1874
1875 pending = 0U;
1876 node = &instance->nodes[0];
1877 users = &instance->users[0];
1878 count_user = instance->count_user;
1879 /* Traverse users - highest id first */
1880 while (count_user--) {
1881 struct ticker_user *user;
1882 struct ticker_user_op *user_ops;
1883
1884 user = &users[count_user];
1885 user_ops = &user->user_op[0];
1886 /* Traverse user operation queue - middle to last (with wrap).
1887 * This operation updates user->middle to be the past the last
1888 * processed user operation. This is used later by
1889 * ticker_job_list_insert, for handling user->first to middle.
1890 */
1891 while (user->middle != user->last) {
1892 struct ticker_user_op *user_op;
1893 struct ticker_node *ticker;
1894 uint8_t state;
1895 uint8_t prev;
1896 uint8_t middle;
1897
1898 user_op = &user_ops[user->middle];
1899
1900 /* Increment index and handle wrapping */
1901 prev = user->middle;
1902 middle = user->middle + 1;
1903 if (middle == user->count_user_op) {
1904 middle = 0U;
1905 }
1906 user->middle = middle;
1907
1908 ticker = &node[user_op->id];
1909
1910 /* if op is start, then skip update and stop ops */
1911 if (user_op->op < TICKER_USER_OP_TYPE_UPDATE) {
1912 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1913 if (user_op->op == TICKER_USER_OP_TYPE_START) {
1914 /* Set start pending to validate a
1915 * successive, inline stop operation.
1916 */
1917 ticker->start_pending = 1U;
1918 }
1919 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1920
1921 continue;
1922 }
1923
1924 /* determine the ticker state */
1925 state = (ticker->req - ticker->ack) & 0xff;
1926
1927 /* if not started or update not required,
1928 * set status and continue.
1929 */
1930 if ((user_op->op > TICKER_USER_OP_TYPE_STOP_ABS) ||
1931 ((state == 0U) &&
1932 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1933 !ticker->start_pending &&
1934 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1935 (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS)) ||
1936 ((user_op->op == TICKER_USER_OP_TYPE_UPDATE) &&
1937 (user_op->params.update.ticks_drift_plus == 0U) &&
1938 (user_op->params.update.ticks_drift_minus == 0U) &&
1939 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1940 (user_op->params.update.ticks_slot_plus == 0U) &&
1941 (user_op->params.update.ticks_slot_minus == 0U) &&
1942 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1943 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1944 (!ticker->ext_data ||
1945 user_op->params.update.expire_info_id == user_op->id) &&
1946 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1947 (user_op->params.update.lazy == 0U) &&
1948 (user_op->params.update.force == 0U))) {
1949 ticker_job_op_cb(user_op,
1950 TICKER_STATUS_FAILURE);
1951 continue;
1952 }
1953
1954 /* Delete or yield node, if not expired */
1955 if ((state == 1U) ||
1956 (user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS)) {
1957 ticker_job_node_manage(instance, ticker,
1958 user_op, ticks_now,
1959 ticks_elapsed,
1960 insert_head);
1961 } else {
1962 /* Update on expired node requested, deferring
1963 * update until bottom half finishes.
1964 */
1965 /* sched job to run after worker bottom half.
1966 */
1967 instance->sched_cb(TICKER_CALL_ID_JOB,
1968 TICKER_CALL_ID_JOB, 1,
1969 instance);
1970
1971 /* Update the index upto which management is
1972 * complete.
1973 */
1974 user->middle = prev;
1975
1976 pending = 1U;
1977 break;
1978 }
1979 }
1980 }
1981
1982 return pending;
1983 }
1984
1985 /**
1986 * @brief Handle ticker node expirations
1987 *
1988 * @details Called by ticker_job to schedule next expirations. Expired ticker
1989 * nodes are removed from the active list, and re-inserted if periodic.
1990 *
1991 * @param instance Pointer to ticker instance
1992 * @param ticks_previous Absolute ticks at ticker_job start
1993 * @param ticks_elapsed Expired ticks at time of call
1994 * @param insert_head Pointer to current head (id). Updated if nodes are
1995 * re-inserted
1996 * @internal
1997 */
1998 static inline void ticker_job_worker_bh(struct ticker_instance *instance,
1999 uint32_t ticks_now,
2000 uint32_t ticks_previous,
2001 uint32_t ticks_elapsed,
2002 uint8_t *insert_head)
2003 {
2004 struct ticker_node *node;
2005 uint32_t ticks_expired;
2006
2007 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2008 uint32_t ticks_latency;
2009
2010 ticks_latency = ticker_ticks_diff_get(ticks_now, ticks_previous);
2011 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2012
2013 node = &instance->nodes[0];
2014 ticks_expired = 0U;
2015 while (instance->ticker_id_head != TICKER_NULL) {
2016 uint8_t skip_collision = 0U;
2017 struct ticker_node *ticker;
2018 uint32_t ticks_to_expire;
2019 uint8_t id_expired;
2020 uint8_t state;
2021
2022 /* auto variable for current ticker node */
2023 id_expired = instance->ticker_id_head;
2024 ticker = &node[id_expired];
2025
2026 /* Do nothing if ticker did not expire */
2027 ticks_to_expire = ticker->ticks_to_expire;
2028 if (ticks_elapsed < ticks_to_expire) {
2029 ticker->ticks_to_expire -= ticks_elapsed;
2030 break;
2031 }
2032
2033 /* decrement ticks_elapsed and collect expired ticks */
2034 ticks_elapsed -= ticks_to_expire;
2035 ticks_expired += ticks_to_expire;
2036
2037 state = (ticker->req - ticker->ack) & 0xff;
2038
2039 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2040 ticks_latency -= ticks_to_expire;
2041
2042 /* Node with lazy count did not expire with callback, but
2043 * was either a collision or re-scheduled. This node should
2044 * not define the active slot reservation (slot_previous).
2045 */
2046 skip_collision = (ticker->lazy_current != 0U);
2047 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2048
2049 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2050 /* decrement ticks_slot_previous */
2051 if (instance->ticks_slot_previous > ticks_to_expire) {
2052 instance->ticks_slot_previous -= ticks_to_expire;
2053 } else {
2054 instance->ticker_id_slot_previous = TICKER_NULL;
2055 instance->ticks_slot_previous = 0U;
2056 }
2057
2058 /* If a reschedule is set pending, we will need to keep
2059 * the slot_previous information
2060 */
2061 if (ticker->ticks_slot && (state == 2U) && !skip_collision &&
2062 !TICKER_RESCHEDULE_PENDING(ticker)) {
2063 instance->ticker_id_slot_previous = id_expired;
2064 instance->ticks_slot_previous = ticker->ticks_slot;
2065 }
2066 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2067
2068 /* ticker expired, set ticks_to_expire zero */
2069 ticker->ticks_to_expire = 0U;
2070
2071 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2072 ticker_mark_expire_info_outdated(instance, instance->ticker_id_head);
2073 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2074
2075 /* remove the expired ticker from head */
2076 instance->ticker_id_head = ticker->next;
2077
2078 /* Ticker will be restarted if periodic or to be re-scheduled */
2079 if ((ticker->ticks_periodic != 0U) ||
2080 TICKER_RESCHEDULE_PENDING(ticker)) {
2081 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2082 if (TICKER_RESCHEDULE_PENDING(ticker)) {
2083 /* Set the re-scheduled node to now. Will be
2084 * collision resolved after all nodes are
2085 * restarted
2086 */
2087 ticker->ticks_to_expire = ticks_elapsed;
2088
2089 /* Reset ticker state, so that its put
2090 * back in requested state later down
2091 * in the code.
2092 */
2093 ticker->req = ticker->ack;
2094 } else {
2095 uint16_t lazy_periodic;
2096 uint32_t count;
2097 uint16_t lazy;
2098
2099 /* If not skipped, apply lazy_periodic */
2100 if (!ticker->lazy_current) {
2101 lazy_periodic = ticker->lazy_periodic;
2102 } else {
2103 lazy_periodic = 0U;
2104
2105 /* Reset ticker state, so that its put
2106 * back in requested state later down
2107 * in the code.
2108 */
2109 ticker->req = ticker->ack;
2110 }
2111
2112 /* Reload ticks_to_expire with at least one
2113 * period.
2114 */
2115 ticks_to_expire = 0U;
2116 count = 1 + lazy_periodic;
2117 while (count--) {
2118 ticks_to_expire +=
2119 ticker->ticks_periodic;
2120 ticks_to_expire +=
2121 ticker_remainder_inc(ticker);
2122 }
2123
2124 /* Skip intervals that have elapsed w.r.t.
2125 * current ticks.
2126 */
2127 lazy = 0U;
2128
2129 if (0) {
2130 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2131 } else if (!ticker->must_expire) {
2132 #else
2133 } else {
2134 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2135 while (ticks_to_expire <
2136 ticks_latency) {
2137 ticks_to_expire +=
2138 ticker->ticks_periodic;
2139 ticks_to_expire +=
2140 ticker_remainder_inc(ticker);
2141 lazy++;
2142 }
2143 }
2144
2145 /* Use the calculated ticks to expire and
2146 * laziness.
2147 */
2148 ticker->ticks_to_expire = ticks_to_expire;
2149 ticker->lazy_current += (lazy_periodic + lazy);
2150 }
2151
2152 ticks_to_expire_prep(ticker, instance->ticks_current,
2153 (ticks_previous + ticks_expired));
2154 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2155 uint32_t count;
2156
2157 /* Prepare for next interval */
2158 ticks_to_expire = 0U;
2159 count = 1 + ticker->lazy_periodic;
2160 while (count--) {
2161 ticks_to_expire += ticker->ticks_periodic;
2162 ticks_to_expire += ticker_remainder_inc(ticker);
2163 }
2164 ticker->ticks_to_expire = ticks_to_expire;
2165
2166 ticks_to_expire_prep(ticker, instance->ticks_current,
2167 (ticks_previous + ticks_expired));
2168
2169 /* Reset latency to periodic offset */
2170 ticker->lazy_current = ticker->lazy_periodic;
2171 ticker->force = 0U;
2172 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2173 /* Add to insert list */
2174 ticker->next = *insert_head;
2175 *insert_head = id_expired;
2176
2177 /* set schedule status of node as restarting. */
2178 ticker->req++;
2179 } else {
2180 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2181 /* A single-shot ticker in requested or skipped due to
2182 * collision shall generate a operation function
2183 * callback with failure status.
2184 */
2185 if (state && ((state == 1U) || skip_collision) &&
2186 ticker->fp_op_func) {
2187 ticker->fp_op_func(TICKER_STATUS_FAILURE,
2188 ticker->op_context);
2189 }
2190 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2191
2192 /* reset schedule status of node */
2193 ticker->req = ticker->ack;
2194 }
2195 }
2196 }
2197
2198 /**
2199 * @brief Prepare ticker node start
2200 *
2201 * @details Called by ticker_job to prepare ticker node start operation.
2202 *
2203 * @param ticker Pointer to ticker node
2204 * @param user_op Pointer to user operation
2205 * @param ticks_current Expired ticks at time of call
2206 *
2207 * @internal
2208 */
2209 static inline uint32_t ticker_job_op_start(struct ticker_instance *instance,
2210 struct ticker_node *ticker,
2211 struct ticker_user_op *user_op,
2212 uint32_t ticks_current)
2213 {
2214 struct ticker_user_op_start *start = (void *)&user_op->params.start;
2215
2216 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2217 /* Must expire is not supported in compatibility mode */
2218 LL_ASSERT(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP);
2219 #else
2220 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2221 if (start->lazy != TICKER_LAZY_MUST_EXPIRE_KEEP) {
2222 /* Update the must_expire state */
2223 ticker->must_expire =
2224 (start->lazy == TICKER_LAZY_MUST_EXPIRE) ? 1U : 0U;
2225 }
2226 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2227 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2228
2229 #if defined(CONFIG_BT_TICKER_EXT)
2230 ticker->ext_data = start->ext_data;
2231
2232 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2233 if (ticker->ext_data) {
2234 ticker->ext_data->other_expire_info = NULL;
2235 if (ticker->ext_data->expire_info_id != TICKER_NULL) {
2236 uint32_t status;
2237
2238 status = ticker_alloc_expire_info(instance, user_op->id);
2239 if (status) {
2240 return status;
2241 }
2242 }
2243 }
2244
2245 ticker_mark_expire_info_outdated(instance, user_op->id);
2246 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2247 #endif /* CONFIG_BT_TICKER_EXT */
2248
2249 ticker->ticks_periodic = start->ticks_periodic;
2250 ticker->remainder_periodic = start->remainder_periodic;
2251 ticker->lazy_periodic =
2252 (start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP) ? start->lazy :
2253 0U;
2254 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2255 ticker->ticks_slot = start->ticks_slot;
2256 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2257 ticker->timeout_func = start->fp_timeout_func;
2258 ticker->context = start->context;
2259 ticker->ticks_to_expire = start->ticks_first;
2260 ticker->ticks_to_expire_minus = 0U;
2261 ticks_to_expire_prep(ticker, ticks_current, start->ticks_at_start);
2262 #if defined(CONFIG_BT_TICKER_REMAINDER)
2263 ticker->remainder_current = start->remainder_first;
2264 #else /* !CONFIG_BT_TICKER_REMAINDER */
2265 ticker->remainder_current = 0U;
2266 #endif /* !CONFIG_BT_TICKER_REMAINDER */
2267 ticker->lazy_current = 0U;
2268 ticker->force = 1U;
2269
2270 return TICKER_STATUS_SUCCESS;
2271 }
2272
2273 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2274 /**
2275 * @brief Insert new ticker node
2276 *
2277 * @details Called by ticker_job to insert a new ticker node. If node collides
2278 * with existing ticker nodes, either the new node is postponed, or colliding
2279 * node is un-scheduled. Decision is based on latency and the force-state of
2280 * individual nodes.
2281 *
2282 * @param instance Pointer to ticker instance
2283 * @param id_insert Id of ticker to insert
2284 * @param ticker Pointer to ticker node to insert
2285 * @param insert_head Pointer to current head. Updated if colliding nodes
2286 * are un-scheduled
2287 * @internal
2288 */
2289 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2290 uint8_t id_insert,
2291 struct ticker_node *ticker,
2292 uint8_t *insert_head)
2293 {
2294 ARG_UNUSED(insert_head);
2295
2296 /* Prepare to insert */
2297 ticker->next = TICKER_NULL;
2298
2299 /* Enqueue the ticker node */
2300 (void)ticker_enqueue(instance, id_insert);
2301
2302 /* Inserted/Scheduled */
2303 ticker->req = ticker->ack + 1;
2304
2305 return TICKER_STATUS_SUCCESS;
2306 }
2307
2308 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2309 /**
2310 * @brief Re-schedule ticker nodes within slot_window
2311 *
2312 * @details This function is responsible for re-scheduling ticker nodes
2313 * which have been marked for re-scheduling in ticker_worker. These nodes
2314 * have a non-zero ticks_slot_window configuration, which indicates a
2315 * valid range in which to re-schedule the node.
2316 * The function iterates over nodes, and handles one re-schedule at a
2317 * time. After a re-schedule, nodes are once again iterated until no more
2318 * nodes are marked for re-scheduling.
2319 *
2320 * @param instance Pointer to ticker instance
2321 * @param ticks_elapsed Number of ticks elapsed since last ticker job
2322 *
2323 * @internal
2324 */
2325 static uint8_t ticker_job_reschedule_in_window(struct ticker_instance *instance,
2326 uint32_t ticks_elapsed)
2327 {
2328 struct ticker_node *nodes;
2329 uint8_t rescheduling;
2330 uint8_t rescheduled;
2331
2332 nodes = &instance->nodes[0];
2333
2334 /* Do until all pending re-schedules handled */
2335 rescheduling = 1U;
2336 rescheduled = 0U;
2337 while (rescheduling) {
2338 struct ticker_node *ticker_resched;
2339 uint32_t ticks_to_expire_offset;
2340 uint8_t ticker_id_resched_prev;
2341 struct ticker_ext *ext_data;
2342 uint32_t ticks_start_offset;
2343 uint32_t window_start_ticks;
2344 uint32_t ticks_slot_window;
2345 uint8_t ticker_id_resched;
2346 uint32_t ticks_to_expire;
2347 uint8_t ticker_id_prev;
2348 uint8_t ticker_id_next;
2349 uint32_t ticks_slot;
2350
2351 rescheduling = 0U;
2352
2353 /* Find first pending re-schedule */
2354 ticker_id_resched_prev = TICKER_NULL;
2355 ticker_id_resched = instance->ticker_id_head;
2356 while (ticker_id_resched != TICKER_NULL) {
2357 ticker_resched = &nodes[ticker_id_resched];
2358 if (TICKER_RESCHEDULE_PENDING(ticker_resched)) {
2359 /* Pending reschedule found */
2360 break;
2361 }
2362
2363 ticker_id_resched_prev = ticker_id_resched;
2364 ticker_id_resched = ticker_resched->next;
2365 }
2366 if (ticker_id_resched == TICKER_NULL) {
2367 /* Done */
2368 break;
2369 }
2370
2371 /* Check for intersection with already active node */
2372 window_start_ticks = 0U;
2373 if (instance->ticks_slot_previous > ticks_elapsed) {
2374 /* Active node intersects - window starts after end of
2375 * active slot
2376 */
2377 window_start_ticks = instance->ticks_slot_previous -
2378 ticks_elapsed;
2379 }
2380
2381 ticker_id_next = ticker_resched->next;
2382
2383 /* If drift was applied to this node, this must be
2384 * taken into consideration. Reduce the window with
2385 * the amount of drift already applied.
2386 *
2387 * TODO: An improvement on this could be to only consider the
2388 * drift (ADV => randomization) if re-sceduling fails. Then the
2389 * ticker would have the best possible window to re-schedule in
2390 * and not be restricted to ticks_slot_window - ticks_drift.
2391 */
2392 ext_data = ticker_resched->ext_data;
2393 if (ext_data->ticks_drift < ext_data->ticks_slot_window) {
2394 ticks_slot_window = ext_data->ticks_slot_window -
2395 ext_data->ticks_drift;
2396 } else {
2397 /* Window has been exhausted - we can't reschedule */
2398 ticker_id_next = TICKER_NULL;
2399
2400 /* Assignment will be unused when TICKER_NULL */
2401 ticks_slot_window = 0U;
2402 }
2403
2404 /* Use ticker's reserved time ticks_slot, else for unreserved
2405 * tickers use the reschedule margin as ticks_slot.
2406 */
2407 if (ticker_resched->ticks_slot) {
2408 ticks_slot = ticker_resched->ticks_slot;
2409 } else {
2410 LL_ASSERT(TICKER_HAS_SLOT_WINDOW(ticker_resched));
2411
2412 ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2413 }
2414
2415 /* Try to find available slot for re-scheduling */
2416 ticks_to_expire_offset = 0U;
2417 ticks_start_offset = 0U;
2418 ticks_to_expire = 0U;
2419 while ((ticker_id_next != TICKER_NULL) &&
2420 ((ticks_start_offset + ticks_slot) <=
2421 ticks_slot_window)) {
2422 struct ticker_node *ticker_next;
2423 uint32_t window_end_ticks;
2424
2425 ticker_next = &nodes[ticker_id_next];
2426 ticks_to_expire_offset += ticker_next->ticks_to_expire;
2427
2428 /* Skip other pending re-schedule nodes and
2429 * tickers with no reservation or not periodic
2430 */
2431 if (TICKER_RESCHEDULE_PENDING(ticker_next) ||
2432 !ticker_next->ticks_slot ||
2433 !ticker_next->ticks_periodic) {
2434 ticker_id_next = ticker_next->next;
2435
2436 continue;
2437 }
2438
2439 /* Calculate end of window. Since window may be aligned
2440 * with expiry of next node, we add a margin
2441 */
2442 if (ticks_to_expire_offset >
2443 HAL_TICKER_RESCHEDULE_MARGIN) {
2444 window_end_ticks =
2445 MIN(ticks_slot_window,
2446 ticks_start_offset +
2447 ticks_to_expire_offset -
2448 HAL_TICKER_RESCHEDULE_MARGIN);
2449 } else {
2450 /* Next expiry is too close - try the next
2451 * node
2452 */
2453 window_end_ticks = 0;
2454 }
2455
2456 /* Calculate new ticks_to_expire as end of window minus
2457 * slot size.
2458 */
2459 if (window_end_ticks > (ticks_start_offset +
2460 ticks_slot)) {
2461 if (!ticker_resched->ticks_slot) {
2462 /* Place at start of window */
2463 ticks_to_expire = window_start_ticks;
2464 } else {
2465 /* Place at end of window. This ensures
2466 * that ticker with slot window and that
2467 * uses ticks_slot does not take the
2468 * interval of the colliding ticker.
2469 */
2470 ticks_to_expire = window_end_ticks -
2471 ticks_slot;
2472 }
2473 } else {
2474 /* No space in window - try the next node */
2475 ticks_to_expire = 0;
2476 }
2477
2478 /* Decide if the re-scheduling ticker node fits in the
2479 * slot found - break if it fits
2480 */
2481 if ((ticks_to_expire != 0U) &&
2482 (ticks_to_expire >= window_start_ticks) &&
2483 (ticks_to_expire <= (window_end_ticks -
2484 ticks_slot))) {
2485 /* Re-scheduled node fits before this node */
2486 break;
2487 }
2488
2489 /* We din't find a valid slot for re-scheduling - try
2490 * the next node
2491 */
2492 ticks_start_offset += ticks_to_expire_offset;
2493 window_start_ticks = ticks_start_offset +
2494 ticker_next->ticks_slot;
2495 ticks_to_expire_offset = 0U;
2496
2497 if (!ticker_resched->ticks_slot) {
2498 /* Try at the end of the next node */
2499 ticks_to_expire = window_start_ticks;
2500 } else {
2501 /* Try at the end of the slot window. This
2502 * ensures that ticker with slot window and that
2503 * uses ticks_slot does not take the interval of
2504 * the colliding ticker.
2505 */
2506 ticks_to_expire = ticks_slot_window -
2507 ticks_slot;
2508 }
2509
2510 ticker_id_next = ticker_next->next;
2511 }
2512
2513 ext_data->ticks_drift += ticks_to_expire -
2514 ticker_resched->ticks_to_expire;
2515 ticker_resched->ticks_to_expire = ticks_to_expire;
2516
2517 /* Place the ticker node sorted by expiration time and adjust
2518 * delta times
2519 */
2520 ticker_id_next = ticker_resched->next;
2521 ticker_id_prev = TICKER_NULL;
2522 while (ticker_id_next != TICKER_NULL) {
2523 struct ticker_node *ticker_next;
2524
2525 ticker_next = &nodes[ticker_id_next];
2526 if (ticker_resched->ticks_to_expire >
2527 ticker_next->ticks_to_expire) {
2528 /* Node is after this - adjust delta */
2529 ticker_resched->ticks_to_expire -=
2530 ticker_next->ticks_to_expire;
2531 } else {
2532 /* Node is before this one */
2533 ticker_next->ticks_to_expire -=
2534 ticker_resched->ticks_to_expire;
2535 break;
2536 }
2537 ticker_id_prev = ticker_id_next;
2538 ticker_id_next = ticker_next->next;
2539 }
2540
2541 /* If the node moved in the list, insert it */
2542 if (ticker_id_prev != TICKER_NULL) {
2543 /* Remove node from its current position in list */
2544 if (ticker_id_resched_prev != TICKER_NULL) {
2545 /* Node was not at the head of the list */
2546 nodes[ticker_id_resched_prev].next =
2547 ticker_resched->next;
2548 } else {
2549 /* Node was at the head, move head forward */
2550 instance->ticker_id_head = ticker_resched->next;
2551 }
2552
2553 /* Link inserted node */
2554 ticker_resched->next = nodes[ticker_id_prev].next;
2555 nodes[ticker_id_prev].next = ticker_id_resched;
2556 }
2557
2558 /* Remove latency added in ticker_worker */
2559 ticker_resched->lazy_current--;
2560
2561 /* Prevent repeated re-scheduling */
2562 ext_data->reschedule_state =
2563 TICKER_RESCHEDULE_STATE_DONE;
2564
2565 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2566 ticker_mark_expire_info_outdated(instance, ticker_id_resched);
2567 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2568
2569 /* Check for other pending re-schedules and set exit flag */
2570 rescheduling = 1U;
2571 rescheduled = 1U;
2572 }
2573
2574 return rescheduled;
2575 }
2576 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2577 #else /* !CONFIG_BT_TICKER_LOW_LAT */
2578
2579 /**
2580 * @brief Insert new ticker node
2581 *
2582 * @details Called by ticker_job to insert a new ticker node. If node collides
2583 * with existing ticker nodes, either the new node is postponed, or colliding
2584 * node is un-scheduled. Decision is based on latency and the force-state of
2585 * individual nodes.
2586 *
2587 * @param instance Pointer to ticker instance
2588 * @param id_insert Id of ticker to insert
2589 * @param ticker Pointer to ticker node to insert
2590 * @param insert_head Pointer to current head. Updated if colliding nodes
2591 * are un-scheduled
2592 * @internal
2593 */
2594 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2595 uint8_t id_insert,
2596 struct ticker_node *ticker,
2597 uint8_t *insert_head)
2598 {
2599 struct ticker_node *node = &instance->nodes[0];
2600 uint8_t id_collide;
2601 uint16_t skip;
2602
2603 /* Prepare to insert */
2604 ticker->next = TICKER_NULL;
2605
2606 /* No. of times ticker has skipped its interval */
2607 if (ticker->lazy_current > ticker->lazy_periodic) {
2608 skip = ticker->lazy_current -
2609 ticker->lazy_periodic;
2610 } else {
2611 skip = 0U;
2612 }
2613
2614 /* If insert collides, remove colliding or advance to next interval */
2615 while (id_insert !=
2616 (id_collide = ticker_enqueue(instance, id_insert))) {
2617 /* Check for collision */
2618 if (id_collide != TICKER_NULL) {
2619 struct ticker_node *ticker_collide = &node[id_collide];
2620 uint16_t skip_collide;
2621
2622 /* No. of times colliding ticker has skipped its
2623 * interval.
2624 */
2625 if (ticker_collide->lazy_current >
2626 ticker_collide->lazy_periodic) {
2627 skip_collide = ticker_collide->lazy_current -
2628 ticker_collide->lazy_periodic;
2629 } else {
2630 skip_collide = 0U;
2631 }
2632
2633 /* Check if colliding node should be un-scheduled */
2634 if (ticker_collide->ticks_periodic &&
2635 skip_collide <= skip &&
2636 ticker_collide->force < ticker->force) {
2637 /* Dequeue and get the reminder of ticks
2638 * to expire.
2639 */
2640 ticker_collide->ticks_to_expire =
2641 ticker_dequeue(instance, id_collide);
2642 /* Unschedule node */
2643 ticker_collide->req = ticker_collide->ack;
2644
2645 /* Enqueue for re-insertion */
2646 ticker_collide->next = *insert_head;
2647 *insert_head = id_collide;
2648
2649 continue;
2650 }
2651 }
2652
2653 /* occupied, try next interval */
2654 if (ticker->ticks_periodic != 0U) {
2655 ticker->ticks_to_expire += ticker->ticks_periodic +
2656 ticker_remainder_inc(ticker);
2657 ticker->lazy_current++;
2658
2659 /* No. of times ticker has skipped its interval */
2660 if (ticker->lazy_current > ticker->lazy_periodic) {
2661 skip = ticker->lazy_current -
2662 ticker->lazy_periodic;
2663 } else {
2664 skip = 0U;
2665 }
2666
2667 /* Remove any accumulated drift (possibly added due to
2668 * ticker job execution latencies).
2669 */
2670 if (ticker->ticks_to_expire >
2671 ticker->ticks_to_expire_minus) {
2672 ticker->ticks_to_expire -=
2673 ticker->ticks_to_expire_minus;
2674 ticker->ticks_to_expire_minus = 0U;
2675 } else {
2676 ticker->ticks_to_expire_minus -=
2677 ticker->ticks_to_expire;
2678 ticker->ticks_to_expire = 0U;
2679 }
2680 } else {
2681 return TICKER_STATUS_FAILURE;
2682 }
2683 }
2684
2685 /* Inserted/Scheduled */
2686 ticker->req = ticker->ack + 1;
2687
2688 return TICKER_STATUS_SUCCESS;
2689 }
2690 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2691
2692 /**
2693 * @brief Insert and start ticker nodes for all users
2694 *
2695 * @details Called by ticker_job to prepare, insert and start ticker nodes
2696 * for all users. Specifying insert_head to other than TICKER_NULL causes
2697 * that ticker node to be inserted first.
2698 *
2699 * @param instance Pointer to ticker instance
2700 * @param insert_head Id of ticker node to insert, or TICKER_NULL if only
2701 * handle user operation inserts
2702 * @internal
2703 */
2704 static inline void ticker_job_list_insert(struct ticker_instance *instance,
2705 uint8_t insert_head)
2706 {
2707 struct ticker_node *node;
2708 struct ticker_user *users;
2709 uint8_t count_user;
2710
2711 node = &instance->nodes[0];
2712 users = &instance->users[0];
2713 count_user = instance->count_user;
2714
2715 /* Iterate through all user ids */
2716 while (count_user--) {
2717 struct ticker_user_op *user_ops;
2718 struct ticker_user *user;
2719 uint8_t user_ops_first;
2720
2721 user = &users[count_user];
2722 user_ops = (void *)&user->user_op[0];
2723 user_ops_first = user->first;
2724 /* Traverse user operation queue - first to middle (wrap) */
2725 while ((insert_head != TICKER_NULL) ||
2726 (user_ops_first != user->middle)) {
2727 struct ticker_user_op *user_op;
2728 struct ticker_node *ticker;
2729 uint8_t id_insert;
2730 uint8_t status = TICKER_STATUS_SUCCESS;
2731
2732 if (insert_head != TICKER_NULL) {
2733 /* Prepare insert of ticker node specified by
2734 * insert_head
2735 */
2736 id_insert = insert_head;
2737 ticker = &node[id_insert];
2738 insert_head = ticker->next;
2739
2740 user_op = NULL;
2741 } else {
2742 /* Prepare insert of any ticker nodes requested
2743 * via user operation TICKER_USER_OP_TYPE_START
2744 */
2745 uint8_t first;
2746
2747 user_op = &user_ops[user_ops_first];
2748 first = user_ops_first + 1;
2749 if (first == user->count_user_op) {
2750 first = 0U;
2751 }
2752 user_ops_first = first;
2753
2754 id_insert = user_op->id;
2755 ticker = &node[id_insert];
2756 if (user_op->op != TICKER_USER_OP_TYPE_START) {
2757 /* User operation is not start - skip
2758 * to next operation
2759 */
2760 continue;
2761 }
2762
2763 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2764 ticker->start_pending = 0U;
2765 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2766
2767 if (((ticker->req -
2768 ticker->ack) & 0xff) != 0U) {
2769 ticker_job_op_cb(user_op,
2770 TICKER_STATUS_FAILURE);
2771 continue;
2772 }
2773
2774 /* Prepare ticker for start */
2775 status = ticker_job_op_start(instance, ticker, user_op,
2776 instance->ticks_current);
2777 }
2778
2779 if (!status) {
2780 /* Insert ticker node */
2781 status = ticker_job_insert(instance, id_insert, ticker,
2782 &insert_head);
2783 }
2784
2785 if (user_op) {
2786 ticker_job_op_cb(user_op, status);
2787
2788 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
2789 (ticker->ticks_periodic == 0U) &&
2790 user_op) {
2791 ticker->fp_op_func =
2792 user_op->fp_op_func;
2793 ticker->op_context =
2794 user_op->op_context;
2795 }
2796 }
2797 }
2798
2799 #if !defined(CONFIG_BT_TICKER_JOB_IDLE_GET) && \
2800 !defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) && \
2801 !defined(CONFIG_BT_TICKER_PRIORITY_SET)
2802 user->first = user_ops_first;
2803 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
2804 * !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
2805 * !CONFIG_BT_TICKER_PRIORITY_SET
2806 */
2807
2808 }
2809 }
2810
2811 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2812 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
2813 defined(CONFIG_BT_TICKER_PRIORITY_SET)
2814 /**
2815 * @brief Perform inquiry for specific user operation
2816 *
2817 * @param instance Pointer to ticker instance
2818 * @param uop Pointer to user operation
2819 *
2820 * @internal
2821 */
2822 static inline void ticker_job_op_inquire(struct ticker_instance *instance,
2823 struct ticker_user_op *uop)
2824 {
2825 ticker_op_func fp_op_func;
2826
2827 fp_op_func = NULL;
2828 switch (uop->op) {
2829 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2830 case TICKER_USER_OP_TYPE_SLOT_GET:
2831 ticker_by_next_slot_get(instance,
2832 uop->params.slot_get.ticker_id,
2833 uop->params.slot_get.ticks_current,
2834 uop->params.slot_get.ticks_to_expire,
2835 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
2836 uop->params.slot_get.fp_match_op_func,
2837 uop->params.slot_get.match_op_context,
2838 #else
2839 NULL, NULL,
2840 #endif
2841 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
2842 uop->params.slot_get.remainder,
2843 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
2844 NULL,
2845 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
2846 #if defined(CONFIG_BT_TICKER_LAZY_GET)
2847 uop->params.slot_get.lazy);
2848 #else /* !CONFIG_BT_TICKER_LAZY_GET */
2849 NULL);
2850 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
2851 __fallthrough;
2852 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
2853
2854 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2855 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2856 case TICKER_USER_OP_TYPE_IDLE_GET:
2857 uop->status = TICKER_STATUS_SUCCESS;
2858 fp_op_func = uop->fp_op_func;
2859 break;
2860 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
2861 * CONFIG_BT_TICKER_NEXT_SLOT_GET
2862 */
2863
2864 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
2865 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
2866 defined(CONFIG_BT_TICKER_PRIORITY_SET)
2867 case TICKER_USER_OP_TYPE_PRIORITY_SET:
2868 if (uop->id < instance->count_node) {
2869 struct ticker_node *node = instance->nodes;
2870
2871 node[uop->id].priority =
2872 uop->params.priority_set.priority;
2873 uop->status = TICKER_STATUS_SUCCESS;
2874 } else {
2875 uop->status = TICKER_STATUS_FAILURE;
2876 }
2877 fp_op_func = uop->fp_op_func;
2878 break;
2879 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
2880 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
2881 * CONFIG_BT_TICKER_PRIORITY_SET
2882 */
2883
2884 default:
2885 /* do nothing for other ops */
2886 break;
2887 }
2888
2889 if (fp_op_func) {
2890 fp_op_func(uop->status, uop->op_context);
2891 }
2892 }
2893
2894 /**
2895 * @brief Check for pending inquiries for all users
2896 *
2897 * @details Run through all user operation lists, checking for pending
2898 * inquiries. Currently only two types of inquiries are supported:
2899 * TICKER_USER_OP_TYPE_SLOT_GET and TICKER_USER_OP_TYPE_IDLE_GET. The
2900 * function also supports user operation TICKER_USER_OP_TYPE_PRIORITY_SET.
2901 * This operation modifies the user->first index, indicating user operations
2902 * are complete.
2903 *
2904 * @param instance Pointer to ticker instance
2905 *
2906 * @internal
2907 */
2908 static inline void ticker_job_list_inquire(struct ticker_instance *instance)
2909 {
2910 struct ticker_user *users;
2911 uint8_t count_user;
2912
2913 users = &instance->users[0];
2914 count_user = instance->count_user;
2915 /* Traverse user operation queue - first to last (with wrap) */
2916 while (count_user--) {
2917 struct ticker_user_op *user_op;
2918 struct ticker_user *user;
2919
2920 user = &users[count_user];
2921 user_op = &user->user_op[0];
2922 while (user->first != user->last) {
2923 uint8_t first;
2924
2925 ticker_job_op_inquire(instance, &user_op[user->first]);
2926
2927 first = user->first + 1;
2928 if (first == user->count_user_op) {
2929 first = 0U;
2930 }
2931 user->first = first;
2932 }
2933 }
2934 }
2935 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
2936 * CONFIG_BT_TICKER_NEXT_SLOT_GET ||
2937 * CONFIG_BT_TICKER_PRIORITY_SET
2938 */
2939
2940 /**
2941 * @brief Update counter compare value (trigger)
2942 *
2943 * @details Updates trigger to the match next expiring ticker node. The
2944 * function takes into consideration that it may be preempted in the process,
2945 * and makes sure - by iteration - that compare value is set in the future
2946 * (with a margin).
2947 *
2948 * @param instance Pointer to ticker instance
2949 * @param ticker_id_old_head Previous ticker_id_head
2950 *
2951 * @internal
2952 */
2953 static inline uint8_t
2954 ticker_job_compare_update(struct ticker_instance *instance,
2955 uint8_t ticker_id_old_head)
2956 {
2957 struct ticker_node *ticker;
2958 uint32_t ticks_to_expire;
2959 uint32_t ctr_curr;
2960 uint32_t ctr_prev;
2961 uint32_t cc;
2962 uint32_t i;
2963
2964 if (instance->ticker_id_head == TICKER_NULL) {
2965 if (cntr_stop() == 0) {
2966 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2967 instance->ticks_slot_previous = 0U;
2968 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2969
2970 instance->ticks_current = cntr_cnt_get();
2971 }
2972
2973 return 0U;
2974 }
2975
2976 /* Check if this is the first update. If so, start the counter */
2977 if (ticker_id_old_head == TICKER_NULL) {
2978 uint32_t ticks_current;
2979
2980 ticks_current = cntr_cnt_get();
2981
2982 if (cntr_start() == 0) {
2983 instance->ticks_current = ticks_current;
2984 }
2985 }
2986
2987 ticker = &instance->nodes[instance->ticker_id_head];
2988 ticks_to_expire = ticker->ticks_to_expire;
2989
2990 /* If ticks_to_expire is zero, then immediately trigger the worker.
2991 * Under BT_TICKER_LOW_LAT, mesh loopback test fails pending
2992 * investigation hence immediate trigger not used for BT_TICKER_LOW_LAT.
2993 */
2994 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) && !ticks_to_expire) {
2995 return 1U;
2996 }
2997
2998 /* Iterate few times, if required, to ensure that compare is
2999 * correctly set to a future value. This is required in case
3000 * the operation is pre-empted and current h/w counter runs
3001 * ahead of compare value to be set.
3002 */
3003 i = 10U;
3004 ctr_curr = cntr_cnt_get();
3005 do {
3006 uint32_t ticks_elapsed;
3007 uint32_t ticks_diff;
3008
3009 LL_ASSERT(i);
3010 i--;
3011
3012 cc = instance->ticks_current;
3013 ticks_diff = ticker_ticks_diff_get(ctr_curr, cc);
3014 /* Under BT_TICKER_LOW_LAT, bsim test fails, pending
3015 * investigation immediate trigger not used for
3016 * BT_TICKER_LOW_LAT.
3017 */
3018 if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
3019 (ticks_diff >= ticks_to_expire)) {
3020 return 1U;
3021 }
3022
3023 ticks_elapsed = ticks_diff + HAL_TICKER_CNTR_CMP_OFFSET_MIN +
3024 HAL_TICKER_CNTR_SET_LATENCY;
3025 cc += MAX(ticks_elapsed, ticks_to_expire);
3026 cc &= HAL_TICKER_CNTR_MASK;
3027 instance->trigger_set_cb(cc);
3028
3029 ctr_prev = ctr_curr;
3030 ctr_curr = cntr_cnt_get();
3031 } while ((ticker_ticks_diff_get(ctr_curr, ctr_prev) +
3032 HAL_TICKER_CNTR_CMP_OFFSET_MIN) >
3033 ticker_ticks_diff_get(cc, ctr_prev));
3034
3035 return 0U;
3036 }
3037
3038 /**
3039 * @brief Ticker job
3040 *
3041 * @details Runs the bottom half of the ticker, after ticker nodes have elapsed
3042 * or user operations requested. The ticker_job is responsible for removing and
3043 * re-inserting ticker nodes, based on next elapsing and periodicity of the
3044 * nodes. The ticker_job is also responsible for processing user operations,
3045 * i.e. requests for start, update, stop etc.
3046 * Invoked from the ticker job mayfly context (TICKER_MAYFLY_CALL_ID_JOB).
3047 *
3048 * @param param Pointer to ticker instance
3049 *
3050 * @internal
3051 */
3052 void ticker_job(void *param)
3053 {
3054 struct ticker_instance *instance = param;
3055 uint8_t flag_compare_update;
3056 uint8_t ticker_id_old_head;
3057 uint8_t compare_trigger;
3058 uint32_t ticks_previous;
3059 uint32_t ticks_elapsed;
3060 uint8_t flag_elapsed;
3061 uint8_t insert_head;
3062 uint32_t ticks_now;
3063 uint8_t pending;
3064
3065 DEBUG_TICKER_JOB(1);
3066
3067 /* Defer job, as worker is running */
3068 if (instance->worker_trigger) {
3069 DEBUG_TICKER_JOB(0);
3070 return;
3071 }
3072
3073 /* Defer job, as job is already running */
3074 if (instance->job_guard) {
3075 instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_JOB, 1,
3076 instance);
3077 return;
3078 }
3079 instance->job_guard = 1U;
3080
3081 /* Back up the previous known tick */
3082 ticks_previous = instance->ticks_current;
3083
3084 /* Update current tick with the elapsed value from queue, and dequeue */
3085 if (instance->ticks_elapsed_first != instance->ticks_elapsed_last) {
3086 ticker_next_elapsed(&instance->ticks_elapsed_first);
3087
3088 ticks_elapsed =
3089 instance->ticks_elapsed[instance->ticks_elapsed_first];
3090
3091 instance->ticks_current += ticks_elapsed;
3092 instance->ticks_current &= HAL_TICKER_CNTR_MASK;
3093
3094 flag_elapsed = 1U;
3095 } else {
3096 /* No elapsed value in queue */
3097 flag_elapsed = 0U;
3098 ticks_elapsed = 0U;
3099 }
3100
3101 /* Initialise internal re-insert list */
3102 insert_head = TICKER_NULL;
3103
3104 /* Initialise flag used to update next compare value */
3105 flag_compare_update = 0U;
3106
3107 /* Remember the old head, so as to decide if new compare needs to be
3108 * set.
3109 */
3110 ticker_id_old_head = instance->ticker_id_head;
3111
3112 /* Manage user operations (updates and deletions) in ticker list */
3113 ticks_now = cntr_cnt_get();
3114 pending = ticker_job_list_manage(instance, ticks_now, ticks_elapsed,
3115 &insert_head);
3116
3117 /* Detect change in head of the list */
3118 if (instance->ticker_id_head != ticker_id_old_head) {
3119 flag_compare_update = 1U;
3120 }
3121
3122 /* Handle expired tickers */
3123 if (flag_elapsed) {
3124 ticker_job_worker_bh(instance, ticks_now, ticks_previous,
3125 ticks_elapsed, &insert_head);
3126
3127 /* Detect change in head of the list */
3128 if (instance->ticker_id_head != ticker_id_old_head) {
3129 flag_compare_update = 1U;
3130 }
3131
3132 /* Handle insertions */
3133 ticker_job_list_insert(instance, insert_head);
3134
3135 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
3136 !defined(CONFIG_BT_TICKER_LOW_LAT)
3137 /* Re-schedule any pending nodes with slot_window */
3138 if (ticker_job_reschedule_in_window(instance, ticks_elapsed)) {
3139 flag_compare_update = 1U;
3140 }
3141 #endif /* CONFIG_BT_TICKER_EXT */
3142 } else {
3143 /* Handle insertions */
3144 ticker_job_list_insert(instance, insert_head);
3145 }
3146
3147 /* Detect change in head of the list */
3148 if (instance->ticker_id_head != ticker_id_old_head) {
3149 flag_compare_update = 1U;
3150 }
3151
3152 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3153 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
3154 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3155 /* Process any list inquiries */
3156 if (!pending) {
3157 /* Handle inquiries */
3158 ticker_job_list_inquire(instance);
3159 }
3160 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3161 * CONFIG_BT_TICKER_NEXT_SLOT_GET ||
3162 * CONFIG_BT_TICKER_PRIORITY_SET
3163 */
3164
3165 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3166 if (instance->expire_infos_outdated) {
3167 ticker_job_update_expire_infos(instance);
3168 }
3169 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3170
3171 /* update compare if head changed */
3172 if (flag_compare_update) {
3173 compare_trigger = ticker_job_compare_update(instance,
3174 ticker_id_old_head);
3175 } else {
3176 compare_trigger = 0U;
3177 }
3178
3179 /* Permit worker to run */
3180 instance->job_guard = 0U;
3181
3182 /* trigger worker if deferred */
3183 cpu_dmb();
3184 if (instance->worker_trigger || compare_trigger) {
3185 instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_WORKER, 1,
3186 instance);
3187 }
3188
3189 DEBUG_TICKER_JOB(0);
3190 }
3191
3192 /*****************************************************************************
3193 * Public Interface
3194 ****************************************************************************/
3195
3196 /**
3197 * @brief Initialize ticker instance
3198 *
3199 * @details Called by ticker instance client once to initialize the ticker.
3200 *
3201 * @param instance_index Index of ticker instance
3202 * @param count_node Number of ticker nodes in node array
3203 * @param node Pointer to ticker node array
3204 * @param count_user Number of users in user array
3205 * @param user Pointer to user array of size count_user
3206 * @param count_op Number of user operations in user_op array
3207 * @param user_op Pointer to user operations array of size count_op
3208 * @param caller_id_get_cb Pointer to function for retrieving caller_id from
3209 * user id
3210 * @param sched_cb Pointer to function for scheduling ticker_worker
3211 * and ticker_job
3212 * @param trigger_set_cb Pointer to function for setting the compare trigger
3213 * ticks value
3214 *
3215 * @return TICKER_STATUS_SUCCESS if initialization was successful, otherwise
3216 * TICKER_STATUS_FAILURE
3217 */
3218 uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
3219 uint8_t count_user, void *user, uint8_t count_op, void *user_op,
3220 ticker_caller_id_get_cb_t caller_id_get_cb,
3221 ticker_sched_cb_t sched_cb,
3222 ticker_trigger_set_cb_t trigger_set_cb)
3223 {
3224 struct ticker_instance *instance = &_instance[instance_index];
3225 struct ticker_user_op *user_op_ = (void *)user_op;
3226 struct ticker_user *users;
3227
3228 if (instance_index >= TICKER_INSTANCE_MAX) {
3229 return TICKER_STATUS_FAILURE;
3230 }
3231
3232 instance->count_node = count_node;
3233 instance->nodes = node;
3234
3235 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3236 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3237 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3238 while (count_node--) {
3239 instance->nodes[count_node].priority = 0;
3240 }
3241 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3242 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3243 * CONFIG_BT_TICKER_PRIORITY_SET
3244 */
3245
3246 instance->count_user = count_user;
3247 instance->users = user;
3248
3249 /** @todo check if enough ticker_user_op supplied */
3250
3251 users = &instance->users[0];
3252 while (count_user--) {
3253 users[count_user].user_op = user_op_;
3254 user_op_ += users[count_user].count_user_op;
3255 count_op -= users[count_user].count_user_op;
3256 }
3257
3258 if (count_op) {
3259 return TICKER_STATUS_FAILURE;
3260 }
3261
3262 instance->caller_id_get_cb = caller_id_get_cb;
3263 instance->sched_cb = sched_cb;
3264 instance->trigger_set_cb = trigger_set_cb;
3265
3266 instance->ticker_id_head = TICKER_NULL;
3267 instance->ticks_current = cntr_cnt_get();
3268 instance->ticks_elapsed_first = 0U;
3269 instance->ticks_elapsed_last = 0U;
3270
3271 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3272 instance->ticker_id_slot_previous = TICKER_NULL;
3273 instance->ticks_slot_previous = 0U;
3274 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3275
3276 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3277 for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
3278 instance->expire_infos[i].ticker_id = TICKER_NULL;
3279 instance->expire_infos[i].last = 1;
3280 }
3281 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3282
3283 return TICKER_STATUS_SUCCESS;
3284 }
3285
3286 /**
3287 * @brief Check if ticker instance is initialized
3288 *
3289 * @param instance_index Index of ticker instance
3290 *
3291 * @return true if ticker instance is initialized, false otherwise
3292 */
3293 bool ticker_is_initialized(uint8_t instance_index)
3294 {
3295 return !!(_instance[instance_index].count_node);
3296 }
3297
3298 /**
3299 * @brief Trigger the ticker worker
3300 *
3301 * @details Schedules the ticker_worker upper half by invoking the
3302 * corresponding mayfly.
3303 *
3304 * @param instance_index Index of ticker instance
3305 */
3306 void ticker_trigger(uint8_t instance_index)
3307 {
3308 struct ticker_instance *instance;
3309
3310 DEBUG_TICKER_ISR(1);
3311
3312 instance = &_instance[instance_index];
3313 if (instance->sched_cb) {
3314 instance->sched_cb(TICKER_CALL_ID_TRIGGER,
3315 TICKER_CALL_ID_WORKER, 1, instance);
3316 }
3317
3318 DEBUG_TICKER_ISR(0);
3319 }
3320
3321 /**
3322 * @brief Start a ticker node
3323 *
3324 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_START and
3325 * schedules the ticker_job.
3326 *
3327 * @param instance_index Index of ticker instance
3328 * @param user_id Ticker user id. Used for indexing user operations
3329 * and mapping to mayfly caller id
3330 * @param ticker_id Id of ticker node
3331 * @param ticks_anchor Absolute tick count as anchor point for
3332 * ticks_first
3333 * @param ticks_first Initial number of ticks before first timeout
3334 * @param ticks_periodic Number of ticks for a periodic ticker node. If 0,
3335 * ticker node is treated as one-shot
3336 * @param remainder_periodic Periodic ticks fraction
3337 * @param lazy Number of periods to skip (latency). A value of 1
3338 * causes skipping every other timeout
3339 * @param ticks_slot Slot reservation ticks for node (air-time)
3340 * @param ticks_slot_window Window in which the slot reservation may be
3341 * re-scheduled to avoid collision. Set to 0 for
3342 * legacy behavior
3343 * @param fp_timeout_func Function pointer of function to call at timeout
3344 * @param context Context passed in timeout call
3345 * @param fp_op_func Function pointer of user operation completion
3346 * function
3347 * @param op_context Context passed in operation completion call
3348 *
3349 * @return TICKER_STATUS_BUSY if start was successful but not yet completed.
3350 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3351 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to
3352 * run before exiting ticker_start
3353 */
3354 #if defined(CONFIG_BT_TICKER_EXT)
3355 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3356 uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3357 uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3358 ticker_timeout_func fp_timeout_func, void *context,
3359 ticker_op_func fp_op_func, void *op_context)
3360 {
3361 return ticker_start_ext(instance_index, user_id, ticker_id,
3362 ticks_anchor, ticks_first, ticks_periodic,
3363 remainder_periodic, lazy, ticks_slot,
3364 fp_timeout_func, context,
3365 fp_op_func, op_context,
3366 NULL);
3367 }
3368
3369 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3370 uint8_t ticker_id, uint32_t ticks_anchor,
3371 uint32_t ticks_first, uint32_t remainder_first,
3372 uint32_t ticks_periodic, uint32_t remainder_periodic,
3373 uint16_t lazy, uint32_t ticks_slot,
3374 ticker_timeout_func fp_timeout_func, void *context,
3375 ticker_op_func fp_op_func, void *op_context,
3376 struct ticker_ext *ext_data);
3377
3378 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3379 uint8_t ticker_id, uint32_t ticks_anchor,
3380 uint32_t ticks_first, uint32_t remainder_first,
3381 uint32_t ticks_periodic, uint32_t remainder_periodic,
3382 uint16_t lazy, uint32_t ticks_slot,
3383 ticker_timeout_func fp_timeout_func, void *context,
3384 ticker_op_func fp_op_func, void *op_context)
3385 {
3386 return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3387 ticks_first, remainder_first,
3388 ticks_periodic, remainder_periodic,
3389 lazy, ticks_slot,
3390 fp_timeout_func, context,
3391 fp_op_func, op_context,
3392 NULL);
3393 }
3394
3395 uint8_t ticker_start_ext(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3396 uint32_t ticks_anchor, uint32_t ticks_first,
3397 uint32_t ticks_periodic, uint32_t remainder_periodic,
3398 uint16_t lazy, uint32_t ticks_slot,
3399 ticker_timeout_func fp_timeout_func, void *context,
3400 ticker_op_func fp_op_func, void *op_context,
3401 struct ticker_ext *ext_data)
3402 {
3403 return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3404 ticks_first, 0U, ticks_periodic, remainder_periodic,
3405 lazy, ticks_slot,
3406 fp_timeout_func, context,
3407 fp_op_func, op_context,
3408 ext_data);
3409 }
3410
3411 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3412 uint8_t ticker_id, uint32_t ticks_anchor,
3413 uint32_t ticks_first, uint32_t remainder_first,
3414 uint32_t ticks_periodic, uint32_t remainder_periodic,
3415 uint16_t lazy, uint32_t ticks_slot,
3416 ticker_timeout_func fp_timeout_func, void *context,
3417 ticker_op_func fp_op_func, void *op_context,
3418 struct ticker_ext *ext_data)
3419
3420 #else /* !CONFIG_BT_TICKER_EXT */
3421 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3422 uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3423 uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3424 ticker_timeout_func fp_timeout_func, void *context,
3425 ticker_op_func fp_op_func, void *op_context)
3426 {
3427 return ticker_start_us(instance_index, user_id,
3428 ticker_id, ticks_anchor,
3429 ticks_first, 0U,
3430 ticks_periodic, remainder_periodic,
3431 lazy, ticks_slot,
3432 fp_timeout_func, context,
3433 fp_op_func, op_context);
3434 }
3435
3436 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3437 uint8_t ticker_id, uint32_t ticks_anchor,
3438 uint32_t ticks_first, uint32_t remainder_first,
3439 uint32_t ticks_periodic, uint32_t remainder_periodic,
3440 uint16_t lazy, uint32_t ticks_slot,
3441 ticker_timeout_func fp_timeout_func, void *context,
3442 ticker_op_func fp_op_func, void *op_context)
3443 #endif /* !CONFIG_BT_TICKER_EXT */
3444
3445 {
3446 struct ticker_instance *instance = &_instance[instance_index];
3447 struct ticker_user_op *user_op;
3448 struct ticker_user *user;
3449 uint8_t last;
3450
3451 user = &instance->users[user_id];
3452
3453 last = user->last + 1;
3454 if (last >= user->count_user_op) {
3455 last = 0U;
3456 }
3457
3458 if (last == user->first) {
3459 return TICKER_STATUS_FAILURE;
3460 }
3461
3462 user_op = &user->user_op[user->last];
3463 user_op->op = TICKER_USER_OP_TYPE_START;
3464 user_op->id = ticker_id;
3465 user_op->params.start.ticks_at_start = ticks_anchor;
3466 user_op->params.start.ticks_first = ticks_first;
3467 #if defined(CONFIG_BT_TICKER_REMAINDER)
3468 user_op->params.start.remainder_first = remainder_first;
3469 #endif /* !CONFIG_BT_TICKER_REMAINDER */
3470 user_op->params.start.ticks_periodic = ticks_periodic;
3471 user_op->params.start.remainder_periodic = remainder_periodic;
3472 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3473 user_op->params.start.ticks_slot = ticks_slot;
3474 #endif
3475 user_op->params.start.lazy = lazy;
3476 #if defined(CONFIG_BT_TICKER_EXT)
3477 user_op->params.start.ext_data = ext_data;
3478 #endif
3479 user_op->params.start.fp_timeout_func = fp_timeout_func;
3480 user_op->params.start.context = context;
3481 user_op->status = TICKER_STATUS_BUSY;
3482 user_op->fp_op_func = fp_op_func;
3483 user_op->op_context = op_context;
3484
3485 /* Make sure transaction is completed before committing */
3486 cpu_dmb();
3487 user->last = last;
3488
3489 instance->sched_cb(instance->caller_id_get_cb(user_id),
3490 TICKER_CALL_ID_JOB, 0, instance);
3491
3492 return user_op->status;
3493 }
3494
3495 #if defined(CONFIG_BT_TICKER_UPDATE)
3496 /**
3497 * @brief Update a ticker node
3498 *
3499 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_UPDATE and
3500 * schedules the ticker_job.
3501 *
3502 * @param instance_index Index of ticker instance
3503 * @param user_id Ticker user id. Used for indexing user operations
3504 * and mapping to mayfly caller id
3505 * @param ticker_id Id of ticker node
3506 * @param ticks_drift_plus Number of ticks to add for drift compensation
3507 * @param ticks_drift_minus Number of ticks to subtract for drift compensation
3508 * @param ticks_slot_plus Number of ticks to add to slot reservation
3509 * @param ticks_slot_minus Number of ticks to add subtract from slot
3510 * reservation
3511 * @param lazy Number of periods to skip (latency). A value of 0
3512 * means no action. 1 means no latency (normal). A
3513 * value >1 means latency = lazy - 1
3514 * @param force Force update to take effect immediately. With
3515 * force = 0, update is scheduled to take effect as
3516 * soon as possible
3517 * @param fp_op_func Function pointer of user operation completion
3518 * function
3519 * @param op_context Context passed in operation completion call
3520 * @param must_expire Disable, enable or ignore the must-expire state.
3521 * A value of 0 means no change, 1 means disable and
3522 * 2 means enable.
3523 *
3524 * @return TICKER_STATUS_BUSY if update was successful but not yet completed.
3525 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3526 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3527 * before exiting ticker_update
3528 */
3529 uint8_t ticker_update(uint8_t instance_index, uint8_t user_id,
3530 uint8_t ticker_id, uint32_t ticks_drift_plus,
3531 uint32_t ticks_drift_minus, uint32_t ticks_slot_plus,
3532 uint32_t ticks_slot_minus, uint16_t lazy, uint8_t force,
3533 ticker_op_func fp_op_func, void *op_context)
3534 #if defined(CONFIG_BT_TICKER_EXT)
3535 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3536 {
3537 return ticker_update_ext(instance_index, user_id, ticker_id,
3538 ticks_drift_plus, ticks_drift_minus,
3539 ticks_slot_plus, ticks_slot_minus, lazy,
3540 force, fp_op_func, op_context, 0U, ticker_id);
3541 }
3542
3543 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3544 uint8_t ticker_id, uint32_t ticks_drift_plus,
3545 uint32_t ticks_drift_minus,
3546 uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3547 uint16_t lazy, uint8_t force,
3548 ticker_op_func fp_op_func, void *op_context,
3549 uint8_t must_expire, uint8_t expire_info_id)
3550 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3551 {
3552 return ticker_update_ext(instance_index, user_id, ticker_id,
3553 ticks_drift_plus, ticks_drift_minus,
3554 ticks_slot_plus, ticks_slot_minus, lazy,
3555 force, fp_op_func, op_context, 0U);
3556 }
3557
3558 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3559 uint8_t ticker_id, uint32_t ticks_drift_plus,
3560 uint32_t ticks_drift_minus,
3561 uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3562 uint16_t lazy, uint8_t force,
3563 ticker_op_func fp_op_func, void *op_context,
3564 uint8_t must_expire)
3565 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3566 #endif /* CONFIG_BT_TICKER_EXT */
3567 {
3568 struct ticker_instance *instance = &_instance[instance_index];
3569 struct ticker_user_op *user_op;
3570 struct ticker_user *user;
3571 uint8_t last;
3572
3573 user = &instance->users[user_id];
3574
3575 last = user->last + 1;
3576 if (last >= user->count_user_op) {
3577 last = 0U;
3578 }
3579
3580 if (last == user->first) {
3581 return TICKER_STATUS_FAILURE;
3582 }
3583
3584 user_op = &user->user_op[user->last];
3585 user_op->op = TICKER_USER_OP_TYPE_UPDATE;
3586 user_op->id = ticker_id;
3587 user_op->params.update.ticks_drift_plus = ticks_drift_plus;
3588 user_op->params.update.ticks_drift_minus = ticks_drift_minus;
3589 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3590 user_op->params.update.ticks_slot_plus = ticks_slot_plus;
3591 user_op->params.update.ticks_slot_minus = ticks_slot_minus;
3592 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3593 user_op->params.update.lazy = lazy;
3594 user_op->params.update.force = force;
3595 #if defined(CONFIG_BT_TICKER_EXT)
3596 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && !defined(CONFIG_BT_TICKER_LOW_LAT)
3597 user_op->params.update.must_expire = must_expire;
3598 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC && !CONFIG_BT_TICKER_LOW_LAT */
3599 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3600 user_op->params.update.expire_info_id = expire_info_id;
3601 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3602 #endif /* CONFIG_BT_TICKER_EXT */
3603 user_op->status = TICKER_STATUS_BUSY;
3604 user_op->fp_op_func = fp_op_func;
3605 user_op->op_context = op_context;
3606
3607 /* Make sure transaction is completed before committing */
3608 cpu_dmb();
3609 user->last = last;
3610
3611 instance->sched_cb(instance->caller_id_get_cb(user_id),
3612 TICKER_CALL_ID_JOB, 0, instance);
3613
3614 return user_op->status;
3615 }
3616 #endif /* CONFIG_BT_TICKER_UPDATE */
3617
3618 /**
3619 * @brief Yield a ticker node with supplied absolute ticks reference
3620 *
3621 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_YIELD_ABS
3622 * and schedules the ticker_job.
3623 *
3624 * @param instance_index Index of ticker instance
3625 * @param user_id Ticker user id. Used for indexing user operations
3626 * and mapping to mayfly caller id
3627 * @param ticks_at_yield Absolute tick count at ticker yield request
3628 * @param fp_op_func Function pointer of user operation completion
3629 * function
3630 * @param op_context Context passed in operation completion call
3631 *
3632 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3633 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3634 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3635 * before exiting ticker_stop
3636 */
3637 uint8_t ticker_yield_abs(uint8_t instance_index, uint8_t user_id,
3638 uint8_t ticker_id, uint32_t ticks_at_yield,
3639 ticker_op_func fp_op_func, void *op_context)
3640 {
3641 struct ticker_instance *instance = &_instance[instance_index];
3642 struct ticker_user_op *user_op;
3643 struct ticker_user *user;
3644 uint8_t last;
3645
3646 user = &instance->users[user_id];
3647
3648 last = user->last + 1;
3649 if (last >= user->count_user_op) {
3650 last = 0U;
3651 }
3652
3653 if (last == user->first) {
3654 return TICKER_STATUS_FAILURE;
3655 }
3656
3657 user_op = &user->user_op[user->last];
3658 user_op->op = TICKER_USER_OP_TYPE_YIELD_ABS;
3659 user_op->id = ticker_id;
3660 user_op->params.yield.ticks_at_yield = ticks_at_yield;
3661 user_op->status = TICKER_STATUS_BUSY;
3662 user_op->fp_op_func = fp_op_func;
3663 user_op->op_context = op_context;
3664
3665 /* Make sure transaction is completed before committing */
3666 cpu_dmb();
3667 user->last = last;
3668
3669 instance->sched_cb(instance->caller_id_get_cb(user_id),
3670 TICKER_CALL_ID_JOB, 0, instance);
3671
3672 return user_op->status;
3673 }
3674
3675 /**
3676 * @brief Stop a ticker node
3677 *
3678 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP and
3679 * schedules the ticker_job.
3680 *
3681 * @param instance_index Index of ticker instance
3682 * @param user_id Ticker user id. Used for indexing user operations
3683 * and mapping to mayfly caller id
3684 * @param fp_op_func Function pointer of user operation completion
3685 * function
3686 * @param op_context Context passed in operation completion call
3687 *
3688 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3689 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3690 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3691 * before exiting ticker_stop
3692 */
3693 uint8_t ticker_stop(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3694 ticker_op_func fp_op_func, void *op_context)
3695 {
3696 struct ticker_instance *instance = &_instance[instance_index];
3697 struct ticker_user_op *user_op;
3698 struct ticker_user *user;
3699 uint8_t last;
3700
3701 user = &instance->users[user_id];
3702
3703 last = user->last + 1;
3704 if (last >= user->count_user_op) {
3705 last = 0U;
3706 }
3707
3708 if (last == user->first) {
3709 return TICKER_STATUS_FAILURE;
3710 }
3711
3712 user_op = &user->user_op[user->last];
3713 user_op->op = TICKER_USER_OP_TYPE_STOP;
3714 user_op->id = ticker_id;
3715 user_op->status = TICKER_STATUS_BUSY;
3716 user_op->fp_op_func = fp_op_func;
3717 user_op->op_context = op_context;
3718
3719 /* Make sure transaction is completed before committing */
3720 cpu_dmb();
3721 user->last = last;
3722
3723 instance->sched_cb(instance->caller_id_get_cb(user_id),
3724 TICKER_CALL_ID_JOB, 0, instance);
3725
3726 return user_op->status;
3727 }
3728
3729 /**
3730 * @brief Stop a ticker node with supplied absolute ticks reference
3731 *
3732 * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP_ABS
3733 * and schedules the ticker_job.
3734 *
3735 * @param instance_index Index of ticker instance
3736 * @param user_id Ticker user id. Used for indexing user operations
3737 * and mapping to mayfly caller id
3738 * @param ticks_at_stop Absolute tick count at ticker stop request
3739 * @param fp_op_func Function pointer of user operation completion
3740 * function
3741 * @param op_context Context passed in operation completion call
3742 *
3743 * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3744 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3745 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3746 * before exiting ticker_stop
3747 */
3748 uint8_t ticker_stop_abs(uint8_t instance_index, uint8_t user_id,
3749 uint8_t ticker_id, uint32_t ticks_at_stop,
3750 ticker_op_func fp_op_func, void *op_context)
3751 {
3752 struct ticker_instance *instance = &_instance[instance_index];
3753 struct ticker_user_op *user_op;
3754 struct ticker_user *user;
3755 uint8_t last;
3756
3757 user = &instance->users[user_id];
3758
3759 last = user->last + 1;
3760 if (last >= user->count_user_op) {
3761 last = 0U;
3762 }
3763
3764 if (last == user->first) {
3765 return TICKER_STATUS_FAILURE;
3766 }
3767
3768 user_op = &user->user_op[user->last];
3769 user_op->op = TICKER_USER_OP_TYPE_STOP_ABS;
3770 user_op->id = ticker_id;
3771 user_op->params.yield.ticks_at_yield = ticks_at_stop;
3772 user_op->status = TICKER_STATUS_BUSY;
3773 user_op->fp_op_func = fp_op_func;
3774 user_op->op_context = op_context;
3775
3776 /* Make sure transaction is completed before committing */
3777 cpu_dmb();
3778 user->last = last;
3779
3780 instance->sched_cb(instance->caller_id_get_cb(user_id),
3781 TICKER_CALL_ID_JOB, 0, instance);
3782
3783 return user_op->status;
3784 }
3785
3786 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
3787 /**
3788 * @brief Get next ticker node slot
3789 *
3790 * @details Gets the next ticker which has slot ticks specified and
3791 * return the ticker id and accumulated ticks until expiration. If no
3792 * ticker nodes have slot ticks, the next ticker node is returned.
3793 * If no head id is provided (TICKER_NULL) the first node is returned.
3794 *
3795 * @param instance_index Index of ticker instance
3796 * @param user_id Ticker user id. Used for indexing user operations
3797 * and mapping to mayfly caller id
3798 * @param ticker_id Pointer to id of ticker node
3799 * @param ticks_current Pointer to current ticks count
3800 * @param ticks_to_expire Pointer to ticks to expire
3801 * @param fp_op_func Function pointer of user operation completion
3802 * function
3803 * @param op_context Context passed in operation completion call
3804 *
3805 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
3806 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3807 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3808 * before exiting ticker_next_slot_get
3809 */
3810 uint8_t ticker_next_slot_get(uint8_t instance_index, uint8_t user_id,
3811 uint8_t *ticker_id, uint32_t *ticks_current,
3812 uint32_t *ticks_to_expire,
3813 ticker_op_func fp_op_func, void *op_context)
3814 {
3815 #if defined(CONFIG_BT_TICKER_LAZY_GET) || \
3816 defined(CONFIG_BT_TICKER_REMAINDER_GET) || \
3817 defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
3818 return ticker_next_slot_get_ext(instance_index, user_id, ticker_id,
3819 ticks_current, ticks_to_expire, NULL,
3820 NULL, NULL, NULL, fp_op_func,
3821 op_context);
3822 }
3823
3824 uint8_t ticker_next_slot_get_ext(uint8_t instance_index, uint8_t user_id,
3825 uint8_t *ticker_id, uint32_t *ticks_current,
3826 uint32_t *ticks_to_expire,
3827 uint32_t *remainder, uint16_t *lazy,
3828 ticker_op_match_func fp_match_op_func,
3829 void *match_op_context,
3830 ticker_op_func fp_op_func, void *op_context)
3831 {
3832 #endif /* CONFIG_BT_TICKER_LAZY_GET ||
3833 * CONFIG_BT_TICKER_REMAINDER_GET ||
3834 * CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH
3835 */
3836 struct ticker_instance *instance = &_instance[instance_index];
3837 struct ticker_user_op *user_op;
3838 struct ticker_user *user;
3839 uint8_t last;
3840
3841 user = &instance->users[user_id];
3842
3843 last = user->last + 1;
3844 if (last >= user->count_user_op) {
3845 last = 0U;
3846 }
3847
3848 if (last == user->first) {
3849 return TICKER_STATUS_FAILURE;
3850 }
3851
3852 user_op = &user->user_op[user->last];
3853 user_op->op = TICKER_USER_OP_TYPE_SLOT_GET;
3854 user_op->id = TICKER_NULL;
3855 user_op->params.slot_get.ticker_id = ticker_id;
3856 user_op->params.slot_get.ticks_current = ticks_current;
3857 user_op->params.slot_get.ticks_to_expire = ticks_to_expire;
3858 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
3859 user_op->params.slot_get.remainder = remainder;
3860 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
3861 #if defined(CONFIG_BT_TICKER_LAZY_GET)
3862 user_op->params.slot_get.lazy = lazy;
3863 #endif /* CONFIG_BT_TICKER_LAZY_GET */
3864 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
3865 user_op->params.slot_get.fp_match_op_func = fp_match_op_func;
3866 user_op->params.slot_get.match_op_context = match_op_context;
3867 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
3868 user_op->status = TICKER_STATUS_BUSY;
3869 user_op->fp_op_func = fp_op_func;
3870 user_op->op_context = op_context;
3871
3872 /* Make sure transaction is completed before committing */
3873 cpu_dmb();
3874 user->last = last;
3875
3876 instance->sched_cb(instance->caller_id_get_cb(user_id),
3877 TICKER_CALL_ID_JOB, 0, instance);
3878
3879 return user_op->status;
3880 }
3881 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
3882
3883 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET)
3884 /**
3885 * @brief Get a callback at the end of ticker job execution
3886 *
3887 * @details Operation completion callback is called at the end of the
3888 * ticker_job execution. The user operation is immutable.
3889 *
3890 * @param instance_index Index of ticker instance
3891 * @param user_id Ticker user id. Used for indexing user operations
3892 * and mapping to mayfly caller id
3893 * @param fp_op_func Function pointer of user operation completion
3894 * function
3895 * @param op_context Context passed in operation completion call
3896 *
3897 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
3898 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3899 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3900 * before exiting ticker_job_idle_get
3901 */
3902 uint8_t ticker_job_idle_get(uint8_t instance_index, uint8_t user_id,
3903 ticker_op_func fp_op_func, void *op_context)
3904 {
3905 struct ticker_instance *instance = &_instance[instance_index];
3906 struct ticker_user_op *user_op;
3907 struct ticker_user *user;
3908 uint8_t last;
3909
3910 user = &instance->users[user_id];
3911
3912 last = user->last + 1;
3913 if (last >= user->count_user_op) {
3914 last = 0U;
3915 }
3916
3917 if (last == user->first) {
3918 return TICKER_STATUS_FAILURE;
3919 }
3920
3921 user_op = &user->user_op[user->last];
3922 user_op->op = TICKER_USER_OP_TYPE_IDLE_GET;
3923 user_op->id = TICKER_NULL;
3924 user_op->status = TICKER_STATUS_BUSY;
3925 user_op->fp_op_func = fp_op_func;
3926 user_op->op_context = op_context;
3927
3928 /* Make sure transaction is completed before committing */
3929 cpu_dmb();
3930 user->last = last;
3931
3932 instance->sched_cb(instance->caller_id_get_cb(user_id),
3933 TICKER_CALL_ID_JOB, 0, instance);
3934
3935 return user_op->status;
3936 }
3937 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET */
3938
3939 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3940 !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3941 defined(CONFIG_BT_TICKER_PRIORITY_SET)
3942 /**
3943 * @brief Set ticker node priority
3944 *
3945 * @param instance_index Index of ticker instance
3946 * @param user_id Ticker user id. Used for indexing user operations
3947 * and mapping to mayfly caller id
3948 * @param ticker_id Id of ticker node to set priority on
3949 * @param priority Priority to set. Range [-128..127], default is 0.
3950 * Lover value equals higher priority. Setting
3951 * priority to -128 (TICKER_PRIORITY_CRITICAL) makes
3952 * the node win all collision challenges. Only one
3953 * node can have this priority assigned.
3954 * @param fp_op_func Function pointer of user operation completion
3955 * function
3956 * @param op_context Context passed in operation completion call
3957 *
3958 * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
3959 * TICKER_STATUS_FAILURE is returned if there are no more user operations
3960 * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3961 * before exiting ticker_priority_set
3962 */
3963 uint8_t ticker_priority_set(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3964 int8_t priority, ticker_op_func fp_op_func,
3965 void *op_context)
3966 {
3967 struct ticker_instance *instance = &_instance[instance_index];
3968 struct ticker_user_op *user_op;
3969 struct ticker_user *user;
3970 uint8_t last;
3971
3972 user = &instance->users[user_id];
3973
3974 last = user->last + 1;
3975 if (last >= user->count_user_op) {
3976 last = 0U;
3977 }
3978
3979 if (last == user->first) {
3980 return TICKER_STATUS_FAILURE;
3981 }
3982
3983 user_op = &user->user_op[user->last];
3984 user_op->op = TICKER_USER_OP_TYPE_PRIORITY_SET;
3985 user_op->id = ticker_id;
3986 user_op->params.priority_set.priority = priority;
3987 user_op->status = TICKER_STATUS_BUSY;
3988 user_op->fp_op_func = fp_op_func;
3989 user_op->op_context = op_context;
3990
3991 /* Make sure transaction is completed before committing */
3992 cpu_dmb();
3993 user->last = last;
3994
3995 instance->sched_cb(instance->caller_id_get_cb(user_id),
3996 TICKER_CALL_ID_JOB, 0, instance);
3997
3998 return user_op->status;
3999 }
4000 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
4001 * !CONFIG_BT_TICKER_SLOT_AGNOSTIC &&
4002 * CONFIG_BT_TICKER_PRIORITY_SET
4003 */
4004
4005 /**
4006 * @brief Schedule ticker job
4007 *
4008 * @param instance_index Index of ticker instance
4009 * @param user_id Ticker user id. Maps to mayfly caller id
4010 */
4011 void ticker_job_sched(uint8_t instance_index, uint8_t user_id)
4012 {
4013 struct ticker_instance *instance = &_instance[instance_index];
4014
4015 instance->sched_cb(instance->caller_id_get_cb(user_id),
4016 TICKER_CALL_ID_JOB, 0, instance);
4017 }
4018
4019 /**
4020 * @brief Get current absolute tick count
4021 *
4022 * @return Absolute tick count
4023 */
4024 uint32_t ticker_ticks_now_get(void)
4025 {
4026 return cntr_cnt_get();
4027 }
4028
4029 /**
4030 * @brief Get difference between two tick counts
4031 *
4032 * @details Subtract two counts and truncate to correct HW dependent counter
4033 * bit width
4034 *
4035 * @param ticks_now Highest tick count (now)
4036 * @param ticks_old Tick count to subtract from ticks_now
4037 */
4038 uint32_t ticker_ticks_diff_get(uint32_t ticks_now, uint32_t ticks_old)
4039 {
4040 return ((ticks_now - ticks_old) & HAL_TICKER_CNTR_MASK);
4041 }
4042