1 /*
2  * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <stdbool.h>
9 #include <zephyr/types.h>
10 #include <soc.h>
11 
12 #include "hal/cntr.h"
13 #include "hal/ticker.h"
14 #include "hal/cpu.h"
15 
16 #include "ticker.h"
17 
18 #include "hal/debug.h"
19 
20 /*****************************************************************************
21  * Defines
22  ****************************************************************************/
23 #define DOUBLE_BUFFER_SIZE 2
24 
25 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
26 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SET)
27 #define BT_CTLR_ADV_AUX_SET 0
28 #else
29 #define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET
30 #endif
31 #if !defined(CONFIG_BT_CTLR_ADV_SYNC_SET)
32 #define BT_CTLR_ADV_SYNC_SET 0
33 #else
34 #define BT_CTLR_ADV_SYNC_SET CONFIG_BT_CTLR_ADV_SYNC_SET
35 #endif
36 #if defined(CONFIG_BT_CTLR_ADV_ISO)
37 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET*2)
38 #else
39 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET)
40 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
41 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
42 
43 /*****************************************************************************
44  * Types
45  ****************************************************************************/
46 
47 struct ticker_node {
48 	uint8_t  next;			    /* Next ticker node */
49 
50 	uint8_t  req;			    /* Request counter */
51 	uint8_t  ack;			    /* Acknowledge counter. Imbalance
52 					     * between req and ack indicates
53 					     * ongoing operation
54 					     */
55 	uint8_t  force:1;		    /* If non-zero, node timeout should
56 					     * be forced at next expiration
57 					     */
58 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
59 	uint8_t  start_pending:1;	    /* If non-zero, start is pending for
60 					     * bottom half of ticker_job.
61 					     */
62 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
63 	uint32_t ticks_periodic;	    /* If non-zero, interval
64 					     * between expirations
65 					     */
66 	uint32_t ticks_to_expire;	    /* Ticks until expiration */
67 	ticker_timeout_func timeout_func;   /* User timeout function */
68 	void  *context;			    /* Context delivered to timeout
69 					     * function
70 					     */
71 	uint32_t ticks_to_expire_minus;	    /* Negative drift correction */
72 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
73 	uint32_t ticks_slot;		    /* Air-time reservation for node */
74 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
75 	uint16_t lazy_periodic;		    /* Number of timeouts to allow
76 					     * skipping
77 					     */
78 	uint16_t lazy_current;		    /* Current number of timeouts
79 					     * skipped = peripheral latency
80 					     */
81 	union {
82 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
83 		uint32_t remainder_periodic;/* Sub-microsecond tick remainder
84 					     * for each period
85 					     */
86 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
87 
88 		ticker_op_func fp_op_func;  /* Operation completion callback */
89 	};
90 
91 	union {
92 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
93 		uint32_t remainder_current; /* Current sub-microsecond tick
94 					     * remainder
95 					     */
96 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
97 
98 		void  *op_context;	    /* Context passed in completion
99 					     * callback
100 					     */
101 	};
102 
103 #if  defined(CONFIG_BT_TICKER_EXT)
104 	struct ticker_ext *ext_data;	    /* Ticker extension data */
105 #endif /* CONFIG_BT_TICKER_EXT */
106 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
107 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
108 	uint8_t  must_expire;		    /* Node must expire, even if it
109 					     * collides with other nodes
110 					     */
111 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
112 	int8_t  priority;		    /* Ticker node priority. 0 is
113 					     * default. Lower value is higher
114 					     * priority
115 					     */
116 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
117 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
118 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
119 	*/
120 };
121 
122 struct ticker_expire_info_internal {
123 	uint32_t ticks_to_expire;
124 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
125 	uint32_t remainder;
126 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
127 	uint16_t lazy;
128 	uint8_t ticker_id;
129 	uint8_t outdated:1;
130 	uint8_t found:1;
131 	uint8_t last:1;
132 };
133 
134 /* Operations to be performed in ticker_job.
135  * Possible values for field "op" in struct ticker_user_op
136  */
137 #define TICKER_USER_OP_TYPE_NONE         0
138 #define TICKER_USER_OP_TYPE_IDLE_GET     1
139 #define TICKER_USER_OP_TYPE_SLOT_GET     2
140 #define TICKER_USER_OP_TYPE_PRIORITY_SET 3
141 #define TICKER_USER_OP_TYPE_START        4
142 #define TICKER_USER_OP_TYPE_UPDATE       5
143 #define TICKER_USER_OP_TYPE_YIELD_ABS    6
144 #define TICKER_USER_OP_TYPE_STOP         7
145 #define TICKER_USER_OP_TYPE_STOP_ABS     8
146 
147 /* Slot window re-schedule states */
148 #define TICKER_RESCHEDULE_STATE_NONE     0
149 #define TICKER_RESCHEDULE_STATE_PENDING  1
150 #define TICKER_RESCHEDULE_STATE_DONE     2
151 
152 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
153 #define TICKER_HAS_SLOT_WINDOW(_ticker) \
154 	((_ticker)->ext_data && ((_ticker)->ext_data->ticks_slot_window != 0U))
155 #define TICKER_RESCHEDULE_PENDING(_ticker) \
156 	(_ticker->ext_data && (_ticker->ext_data->reschedule_state == \
157 		TICKER_RESCHEDULE_STATE_PENDING))
158 #else
159 #define TICKER_HAS_SLOT_WINDOW(_ticker) 0
160 #define TICKER_RESCHEDULE_PENDING(_ticker) 0
161 #endif
162 
163 /* User operation data structure for start opcode. Used for passing start
164  * requests to ticker_job
165  */
166 struct ticker_user_op_start {
167 	uint32_t ticks_at_start;	/* Anchor ticks (absolute) */
168 	uint32_t ticks_first;		/* Initial timeout ticks */
169 	uint32_t ticks_periodic;	/* Ticker period ticks */
170 
171 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
172 	uint32_t remainder_periodic;	/* Sub-microsecond tick remainder */
173 
174 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
175 	uint32_t remainder_first;       /* Sub-microsecond tick remainder */
176 #endif /* CONFIG_BT_TICKER_START_REMAINDER */
177 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
178 
179 	uint16_t lazy;			/* Periodic latency in number of
180 					 * periods
181 					 */
182 
183 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
184 	uint32_t ticks_slot;		/* Air-time reservation ticks */
185 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
186 
187 	ticker_timeout_func fp_timeout_func; /* Timeout callback function */
188 	void  *context;			/* Context passed in timeout callback */
189 
190 #if defined(CONFIG_BT_TICKER_EXT)
191 	struct ticker_ext *ext_data;	/* Ticker extension data instance */
192 #endif /* CONFIG_BT_TICKER_EXT */
193 };
194 
195 /* User operation data structure for update opcode. Used for passing update
196  * requests to ticker_job
197  */
198 struct ticker_user_op_update {
199 	uint32_t ticks_drift_plus;	/* Requested positive drift in ticks */
200 	uint32_t ticks_drift_minus;	/* Requested negative drift in ticks */
201 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
202 	uint32_t ticks_slot_plus;	/* Number of ticks to add to slot
203 					 * reservation (air-time)
204 					 */
205 	uint32_t ticks_slot_minus;	/* Number of ticks to subtract from
206 					 * slot reservation (air-time)
207 					 */
208 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
209 	uint16_t lazy;			/* Peripheral latency:
210 					 *  0: Do nothing
211 					 *  1: latency = 0
212 					 * >1: latency = lazy - 1
213 					 */
214 	uint8_t  force;			/* Force update */
215 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
216 	!defined(CONFIG_BT_TICKER_LOW_LAT)
217 	uint8_t must_expire;		/* Node must expire, even if it
218 					 * collides with other nodes:
219 					 *  0x00: Do nothing
220 					 *  0x01: Disable must_expire
221 					 *  0x02: Enable must_expire
222 					 */
223 #endif
224 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
225 	uint8_t expire_info_id;
226 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
227 };
228 
229 /* User operation data structure for yield/stop opcode. Used for passing yield/
230  * stop requests with absolute tick to ticker_job
231  */
232 struct ticker_user_op_yield {
233 	uint32_t ticks_at_yield;        /* Anchor ticks (absolute) */
234 };
235 
236 /* User operation data structure for slot_get opcode. Used for passing request
237  * to get next ticker with slot ticks via ticker_job
238  */
239 struct ticker_user_op_slot_get {
240 	uint8_t  *ticker_id;
241 	uint32_t *ticks_current;
242 	uint32_t *ticks_to_expire;
243 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
244 	uint32_t *remainder;
245 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
246 #if defined(CONFIG_BT_TICKER_LAZY_GET)
247 	uint16_t *lazy;
248 #endif /* CONFIG_BT_TICKER_LAZY_GET */
249 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
250 	ticker_op_match_func fp_match_op_func;
251 	void *match_op_context;
252 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
253 };
254 
255 /* User operation data structure for priority_set opcode. Used for passing
256  * request to set ticker node priority via ticker_job
257  */
258 struct ticker_user_op_priority_set {
259 	int8_t priority;		/* Node priority. Defaults to 0 */
260 };
261 
262 /* User operation top level data structure. Used for passing requests to
263  * ticker_job
264  */
265 struct ticker_user_op {
266 	uint8_t op;			/* User operation */
267 	uint8_t id;			/* Ticker node id */
268 	uint8_t status;			/* Operation result */
269 	union {
270 		struct ticker_user_op_start        start;
271 		struct ticker_user_op_update       update;
272 		struct ticker_user_op_yield        yield;
273 		struct ticker_user_op_slot_get     slot_get;
274 		struct ticker_user_op_priority_set priority_set;
275 	} params;			/* User operation parameters */
276 	ticker_op_func fp_op_func;	/* Operation completion callback */
277 	void  *op_context;		/* Context passed in completion callback */
278 };
279 
280 /* User data structure for operations
281  */
282 struct ticker_user {
283 	uint8_t count_user_op;		/* Number of user operation slots */
284 	uint8_t first;			/* Slot index of first user operation */
285 	uint8_t middle;			/* Slot index of last managed user op.
286 					 * Updated by ticker_job_list_manage
287 					 * for use in ticker_job_list_insert
288 					 */
289 	uint8_t last;			/* Slot index of last user operation */
290 	struct ticker_user_op *user_op; /* Pointer to user operation array */
291 };
292 
293 /* Ticker instance
294  */
295 struct ticker_instance {
296 	struct ticker_node *nodes;	/* Pointer to ticker nodes */
297 	struct ticker_user *users;	/* Pointer to user nodes */
298 	uint8_t  count_node;		/* Number of ticker nodes */
299 	uint8_t  count_user;		/* Number of user nodes */
300 	uint8_t  ticks_elapsed_first;	/* Index from which elapsed ticks count
301 					 * is pulled
302 					 */
303 	uint8_t  ticks_elapsed_last;	/* Index to which elapsed ticks count
304 					 * is pushed
305 					 */
306 	uint32_t ticks_elapsed[DOUBLE_BUFFER_SIZE]; /* Buffer for elapsed
307 						     * ticks
308 						     */
309 	uint32_t ticks_current;		/* Absolute ticks elapsed at last
310 					 * ticker_job
311 					 */
312 	uint8_t  ticker_id_head;	/* Index of first ticker node (next to
313 					 * expire)
314 					 */
315 	uint8_t  job_guard;		/* Flag preventing ticker_worker from
316 					 * running if ticker_job is active
317 					 */
318 	uint8_t  worker_trigger;	/* Flag preventing ticker_job from
319 					 * starting if ticker_worker was
320 					 * requested, and to trigger
321 					 * ticker_worker at end of job, if
322 					 * requested
323 					 */
324 
325 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
326 	uint8_t  ticker_id_slot_previous; /* Id of previous slot reserving
327 					   * ticker node
328 					   */
329 	uint32_t ticks_slot_previous;	/* Number of ticks previously reserved
330 					 * by a ticker node (active air-time)
331 					 */
332 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
333 
334 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
335 	struct ticker_expire_info_internal expire_infos[TICKER_EXPIRE_INFO_MAX];
336 	bool expire_infos_outdated;
337 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
338 
339 	ticker_caller_id_get_cb_t caller_id_get_cb; /* Function for retrieving
340 						     * the caller id from user
341 						     * id
342 						     */
343 	ticker_sched_cb_t         sched_cb;	    /* Function for scheduling
344 						     * ticker_worker and
345 						     * ticker_job
346 						     */
347 	ticker_trigger_set_cb_t   trigger_set_cb;   /* Function for setting
348 						     * the trigger (compare
349 						     * value)
350 						     */
351 };
352 
353 BUILD_ASSERT(sizeof(struct ticker_node)    == TICKER_NODE_T_SIZE);
354 BUILD_ASSERT(sizeof(struct ticker_user)    == TICKER_USER_T_SIZE);
355 BUILD_ASSERT(sizeof(struct ticker_user_op) == TICKER_USER_OP_T_SIZE);
356 
357 /*****************************************************************************
358  * Global instances
359  ****************************************************************************/
360 #define TICKER_INSTANCE_MAX 1
361 static struct ticker_instance _instance[TICKER_INSTANCE_MAX];
362 
363 /*****************************************************************************
364  * Static Functions
365  ****************************************************************************/
366 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
367 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add);
368 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
369 
370 /**
371  * @brief Update elapsed index
372  *
373  * @param ticks_elapsed_index Pointer to current index
374  *
375  * @internal
376  */
ticker_next_elapsed(uint8_t * ticks_elapsed_index)377 static inline void ticker_next_elapsed(uint8_t *ticks_elapsed_index)
378 {
379 	uint8_t idx = *ticks_elapsed_index + 1;
380 
381 	if (idx == DOUBLE_BUFFER_SIZE) {
382 		idx = 0U;
383 	}
384 	*ticks_elapsed_index = idx;
385 }
386 
387 #if defined(CONFIG_BT_TICKER_LOW_LAT)
388 /**
389  * @brief Get ticker expiring in a specific slot
390  *
391  * @details Searches for a ticker which expires in a specific slot starting
392  * at 'ticks_slot'.
393  *
394  * @param node           Pointer to ticker node array
395  * @param ticker_id_head Id of initial ticker node
396  * @param ticks_slot     Ticks indicating slot to get
397  *
398  * @return Id of ticker expiring within slot or TICKER_NULL
399  * @internal
400  */
ticker_by_slot_get(struct ticker_node * node,uint8_t ticker_id_head,uint32_t ticks_slot)401 static uint8_t ticker_by_slot_get(struct ticker_node *node, uint8_t ticker_id_head,
402 			       uint32_t ticks_slot)
403 {
404 	while (ticker_id_head != TICKER_NULL) {
405 		struct ticker_node *ticker;
406 		uint32_t ticks_to_expire;
407 
408 		ticker = &node[ticker_id_head];
409 		ticks_to_expire = ticker->ticks_to_expire;
410 
411 		if (ticks_slot <= ticks_to_expire) {
412 			/* Next ticker expiration is outside the checked slot */
413 			return TICKER_NULL;
414 		}
415 
416 		if (ticker->ticks_slot) {
417 			/* This ticker node has slot defined and expires within
418 			 * checked slot
419 			 */
420 			break;
421 		}
422 
423 		ticks_slot -= ticks_to_expire;
424 		ticker_id_head = ticker->next;
425 	}
426 
427 	return ticker_id_head;
428 }
429 #endif /* CONFIG_BT_TICKER_LOW_LAT */
430 
431 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
432 /**
433  * @brief Get next ticker with slot ticks or match
434  *
435  * @details Iterates ticker nodes from ticker_id_head. If no head id is provided
436  * (TICKER_NULL), iteration starts from the first node.
437  * Operation details:
438  *
439  * NORMAL MODE (!CONFIG_BT_TICKER_SLOT_AGNOSTIC)
440  * - Gets the next ticker which has slot ticks specified and return the ticker
441  *   id and accumulated ticks until expiration.
442  * - If a matching function is provided, this function is called and node iteration
443  *   continues until match function returns true.
444  *
445  * SLOT AGNOSTIC MODE (CONFIG_BT_TICKER_SLOT_AGNOSTIC)
446  * - Gets the next ticker node.
447  * - If a matching function is provided, this function is called and node iteration
448  *   continues until match function returns true.
449  *
450  * @param instance          Pointer to ticker instance
451  * @param ticker_id_head    Pointer to id of first ticker node [in/out]
452  * @param ticks_current     Pointer to current ticks count [in/out]
453  * @param ticks_to_expire   Pointer to ticks to expire [in/out]
454  * @param fp_match_op_func  Pointer to match function or NULL if unused
455  * @param match_op_context  Pointer to operation context passed to match
456  *                          function or NULL if unused
457  * @param lazy              Pointer to lazy variable to receive lazy_current
458  *                          of found ticker node
459  * @internal
460  */
ticker_by_next_slot_get(struct ticker_instance * instance,uint8_t * ticker_id_head,uint32_t * ticks_current,uint32_t * ticks_to_expire,ticker_op_match_func fp_match_op_func,void * match_op_context,uint32_t * remainder,uint16_t * lazy)461 static void ticker_by_next_slot_get(struct ticker_instance *instance,
462 				    uint8_t *ticker_id_head,
463 				    uint32_t *ticks_current,
464 				    uint32_t *ticks_to_expire,
465 				    ticker_op_match_func fp_match_op_func,
466 				    void *match_op_context, uint32_t *remainder,
467 				    uint16_t *lazy)
468 {
469 	struct ticker_node *ticker;
470 	struct ticker_node *node;
471 	uint32_t _ticks_to_expire;
472 	uint8_t _ticker_id_head;
473 
474 	node = instance->nodes;
475 
476 	_ticker_id_head = *ticker_id_head;
477 	_ticks_to_expire = *ticks_to_expire;
478 	if ((_ticker_id_head == TICKER_NULL) ||
479 	    (*ticks_current != instance->ticks_current)) {
480 		/* Initialize with instance head */
481 		_ticker_id_head = instance->ticker_id_head;
482 		*ticks_current = instance->ticks_current;
483 		_ticks_to_expire = 0U;
484 	} else {
485 		/* Get ticker id for next node */
486 		ticker = &node[_ticker_id_head];
487 		_ticker_id_head = ticker->next;
488 	}
489 
490 	/* Find first ticker node with match or slot ticks */
491 	while (_ticker_id_head != TICKER_NULL) {
492 		ticker = &node[_ticker_id_head];
493 
494 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
495 		if (fp_match_op_func) {
496 			uint32_t ticks_slot = 0;
497 
498 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
499 			ticks_slot += ticker->ticks_slot;
500 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
501 
502 			/* Match node id */
503 			if (fp_match_op_func(_ticker_id_head, ticks_slot,
504 					     _ticks_to_expire +
505 					     ticker->ticks_to_expire,
506 					     match_op_context)) {
507 				/* Match found */
508 				break;
509 			}
510 		} else
511 #else /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
512 	ARG_UNUSED(fp_match_op_func);
513 	ARG_UNUSED(match_op_context);
514 #endif /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
515 
516 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
517 			if (ticker->ticks_slot) {
518 				/* Matching not used and node has slot ticks */
519 				break;
520 #else
521 			{
522 				/* Matching not used and slot agnostic */
523 				break;
524 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
525 			}
526 
527 		/* Accumulate expire ticks */
528 		_ticks_to_expire += ticker->ticks_to_expire;
529 		_ticker_id_head = ticker->next;
530 	}
531 
532 	if (_ticker_id_head != TICKER_NULL) {
533 		/* Add ticks for found ticker */
534 		_ticks_to_expire += ticker->ticks_to_expire;
535 
536 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
537 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
538 		if (remainder) {
539 			*remainder = ticker->remainder_current;
540 		}
541 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
542 		ARG_UNUSED(remainder);
543 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
544 #else /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
545 		ARG_UNUSED(remainder);
546 #endif /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
547 
548 #if defined(CONFIG_BT_TICKER_LAZY_GET)
549 		if (lazy) {
550 			*lazy = ticker->lazy_current;
551 		}
552 #else /* !CONFIG_BT_TICKER_LAZY_GET */
553 	ARG_UNUSED(lazy);
554 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
555 	}
556 
557 	*ticker_id_head = _ticker_id_head;
558 	*ticks_to_expire = _ticks_to_expire;
559 }
560 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
561 
562 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
563 /**
564  * @brief Enqueue ticker node
565  *
566  * @details Finds insertion point for new ticker node and inserts the
567  * node in the linked node list.
568  *
569  * @param instance Pointer to ticker instance
570  * @param id       Ticker node id to enqueue
571  *
572  * @return Id of enqueued ticker node
573  * @internal
574  */
575 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
576 {
577 	struct ticker_node *ticker_current;
578 	struct ticker_node *ticker_new;
579 	uint32_t ticks_to_expire_current;
580 	struct ticker_node *node;
581 	uint32_t ticks_to_expire;
582 	uint8_t previous;
583 	uint8_t current;
584 
585 	node = &instance->nodes[0];
586 	ticker_new = &node[id];
587 	ticks_to_expire = ticker_new->ticks_to_expire;
588 	current = instance->ticker_id_head;
589 
590 	/* Find insertion point for new ticker node and adjust ticks_to_expire
591 	 * relative to insertion point
592 	 */
593 	previous = TICKER_NULL;
594 
595 	while ((current != TICKER_NULL) && (ticks_to_expire >=
596 		(ticks_to_expire_current =
597 		(ticker_current = &node[current])->ticks_to_expire))) {
598 
599 		ticks_to_expire -= ticks_to_expire_current;
600 
601 		/* Check for timeout in same tick - prioritize according to
602 		 * latency
603 		 */
604 		if (ticks_to_expire == 0 && (ticker_new->lazy_current >
605 					     ticker_current->lazy_current)) {
606 			ticks_to_expire = ticks_to_expire_current;
607 			break;
608 		}
609 
610 		previous = current;
611 		current = ticker_current->next;
612 	}
613 
614 	/* Link in new ticker node and adjust ticks_to_expire to relative value
615 	 */
616 	ticker_new->ticks_to_expire = ticks_to_expire;
617 	ticker_new->next = current;
618 
619 	if (previous == TICKER_NULL) {
620 		instance->ticker_id_head = id;
621 	} else {
622 		node[previous].next = id;
623 	}
624 
625 	if (current != TICKER_NULL) {
626 		node[current].ticks_to_expire -= ticks_to_expire;
627 	}
628 
629 	return id;
630 }
631 #else /* CONFIG_BT_TICKER_LOW_LAT */
632 
633 /**
634  * @brief Enqueue ticker node
635  *
636  * @details Finds insertion point for new ticker node and inserts the
637  * node in the linked node list. However, if the new ticker node collides
638  * with an existing node or the expiration is inside the previous slot,
639  * the node is not inserted.
640  *
641  * @param instance Pointer to ticker instance
642  * @param id       Ticker node id to enqueue
643  *
644  * @return Id of enqueued ticker node, or id of previous- or colliding
645  * ticker node if new node was not enqueued
646  * @internal
647  */
648 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
649 {
650 	struct ticker_node *ticker_current;
651 	struct ticker_node *ticker_new;
652 	uint32_t ticks_to_expire_current;
653 	uint8_t ticker_id_slot_previous;
654 	uint32_t ticks_slot_previous;
655 	struct ticker_node *node;
656 	uint32_t ticks_to_expire;
657 	uint8_t previous;
658 	uint8_t current;
659 	uint8_t collide;
660 
661 	node = &instance->nodes[0];
662 	ticker_new = &node[id];
663 	ticks_to_expire = ticker_new->ticks_to_expire;
664 
665 	collide = ticker_id_slot_previous = TICKER_NULL;
666 	current = instance->ticker_id_head;
667 	previous = current;
668 	ticks_slot_previous = instance->ticks_slot_previous;
669 
670 	/* Find insertion point for new ticker node and adjust ticks_to_expire
671 	 * relative to insertion point
672 	 */
673 	while ((current != TICKER_NULL) &&
674 	       (ticks_to_expire >
675 		(ticks_to_expire_current =
676 		 (ticker_current = &node[current])->ticks_to_expire))) {
677 		ticks_to_expire -= ticks_to_expire_current;
678 
679 		if (ticker_current->ticks_slot != 0U) {
680 			ticks_slot_previous = ticker_current->ticks_slot;
681 			ticker_id_slot_previous = current;
682 		} else {
683 			if (ticks_slot_previous > ticks_to_expire_current) {
684 				ticks_slot_previous -= ticks_to_expire_current;
685 			} else {
686 				ticks_slot_previous = 0U;
687 			}
688 		}
689 		previous = current;
690 		current = ticker_current->next;
691 	}
692 
693 	/* Check for collision for new ticker node at insertion point */
694 	collide = ticker_by_slot_get(&node[0], current,
695 				     ticks_to_expire + ticker_new->ticks_slot);
696 
697 	if ((ticker_new->ticks_slot == 0U) ||
698 	    ((ticks_slot_previous <= ticks_to_expire) &&
699 	     (collide == TICKER_NULL))) {
700 		/* New ticker node has no slot ticks or there is no collision -
701 		 * link it in and adjust ticks_to_expire to relative value
702 		 */
703 		ticker_new->ticks_to_expire = ticks_to_expire;
704 		ticker_new->next = current;
705 
706 		if (previous == current) {
707 			instance->ticker_id_head = id;
708 		} else {
709 			node[previous].next = id;
710 		}
711 
712 		if (current != TICKER_NULL) {
713 			node[current].ticks_to_expire -= ticks_to_expire;
714 		}
715 	} else {
716 		/* Collision - no ticker node insertion, set id to that of
717 		 * colliding node
718 		 */
719 		if (ticks_slot_previous > ticks_to_expire) {
720 			id = ticker_id_slot_previous;
721 		} else {
722 			id = collide;
723 		}
724 	}
725 
726 	return id;
727 }
728 #endif /* CONFIG_BT_TICKER_LOW_LAT */
729 
730 /**
731  * @brief Dequeue ticker node
732  *
733  * @details Finds extraction point for ticker node to be dequeued, unlinks
734  * the node and adjusts the links and ticks_to_expire. Returns the ticks
735  * until expiration for dequeued ticker node.
736  *
737  * @param instance Pointer to ticker instance
738  * @param id       Ticker node id to dequeue
739  *
740  * @return Total ticks until expiration for dequeued ticker node, or 0 if
741  * node was not found
742  * @internal
743  */
744 static uint32_t ticker_dequeue(struct ticker_instance *instance, uint8_t id)
745 {
746 	struct ticker_node *ticker_current;
747 	struct ticker_node *node;
748 	uint8_t previous;
749 	uint32_t timeout;
750 	uint8_t current;
751 	uint32_t total;
752 
753 	/* Find the ticker's position in ticker node list while accumulating
754 	 * ticks_to_expire
755 	 */
756 	node = &instance->nodes[0];
757 	previous = instance->ticker_id_head;
758 	current = previous;
759 	total = 0U;
760 	ticker_current = 0;
761 	while (current != TICKER_NULL) {
762 		ticker_current = &node[current];
763 
764 		if (current == id) {
765 			break;
766 		}
767 
768 		total += ticker_current->ticks_to_expire;
769 		previous = current;
770 		current = ticker_current->next;
771 	}
772 
773 	if (current == TICKER_NULL) {
774 		/* Ticker not in active list */
775 		return 0;
776 	}
777 
778 	if (previous == current) {
779 		/* Ticker is the first in the list */
780 		instance->ticker_id_head = ticker_current->next;
781 	}
782 
783 	/* Remaining timeout between next timeout */
784 	timeout = ticker_current->ticks_to_expire;
785 
786 	/* Link previous ticker with next of this ticker
787 	 * i.e. removing the ticker from list
788 	 */
789 	node[previous].next = ticker_current->next;
790 
791 	/* If this is not the last ticker, increment the
792 	 * next ticker by this ticker timeout
793 	 */
794 	if (ticker_current->next != TICKER_NULL) {
795 		node[ticker_current->next].ticks_to_expire += timeout;
796 	}
797 
798 	return (total + timeout);
799 }
800 
801 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
802 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
803 /**
804  * @brief Resolve ticker node collision
805  *
806  * @details Evaluates the provided ticker node against other queued nodes
807  * and returns non-zero if the ticker node collides and should be skipped.
808  * The following rules are checked:
809  *   1) If the periodic latency is not yet exhausted, node is skipped
810  *   2) If the node has highest possible priority, node is never skipped
811  *   3) If the node will starve next node due to slot reservation
812  *      overlap, node is skipped if:
813  *      a) Next node has higher priority than current node
814  *      b) Next node has more accumulated latency than the current node
815  *      c) Next node is 'older' than current node and has same priority
816  *      d) Next node has force flag set, and the current does not
817  *   4) If using ticks slot window,
818  *      a) current node can be rescheduled later in the ticks slot window
819  *   5) If using ticks slot window under yield (build time configuration),
820  *      a) Current node can be rescheduled later in the ticks slot window when
821  *         next node can not be rescheduled later in its ticks slot window
822  *
823  * @param nodes         Pointer to ticker node array
824  * @param ticker        Pointer to ticker to resolve
825  *
826  * @return 0 if no collision was detected. 1 if ticker node collides
827  * with other ticker node of higher composite priority
828  * @internal
829  */
830 static uint8_t ticker_resolve_collision(struct ticker_node *nodes,
831 				     struct ticker_node *ticker)
832 {
833 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
834 	if ((ticker->priority != TICKER_PRIORITY_CRITICAL) &&
835 	    (ticker->next != TICKER_NULL)) {
836 
837 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
838 	if (ticker->next != TICKER_NULL) {
839 
840 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
841 
842 		uint16_t lazy_current = ticker->lazy_current;
843 		uint32_t ticker_ticks_slot;
844 
845 		if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
846 			ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
847 		} else {
848 			ticker_ticks_slot = ticker->ticks_slot;
849 		}
850 
851 		/* Check if this ticker node will starve next node which has
852 		 * latency or higher priority
853 		 */
854 		if (lazy_current >= ticker->lazy_periodic) {
855 			lazy_current -= ticker->lazy_periodic;
856 		}
857 		uint8_t  id_head = ticker->next;
858 		uint32_t acc_ticks_to_expire = 0U;
859 
860 		/* Age is time since last expiry */
861 		uint32_t current_age = ticker->ticks_periodic +
862 				    (lazy_current * ticker->ticks_periodic);
863 
864 		while (id_head != TICKER_NULL) {
865 			struct ticker_node *ticker_next = &nodes[id_head];
866 			uint32_t ticker_next_ticks_slot;
867 
868 			/* Accumulate ticks_to_expire for each node */
869 			acc_ticks_to_expire += ticker_next->ticks_to_expire;
870 			if (acc_ticks_to_expire > ticker_ticks_slot) {
871 				break;
872 			}
873 
874 			if (TICKER_HAS_SLOT_WINDOW(ticker_next) &&
875 			    (ticker_next->ticks_slot == 0U)) {
876 				ticker_next_ticks_slot =
877 					HAL_TICKER_RESCHEDULE_MARGIN;
878 			} else {
879 				ticker_next_ticks_slot =
880 					ticker_next->ticks_slot;
881 			}
882 
883 			/* We only care about nodes with slot reservation */
884 			if (ticker_next_ticks_slot == 0U) {
885 				id_head = ticker_next->next;
886 				continue;
887 			}
888 
889 			uint16_t lazy_next = ticker_next->lazy_current;
890 			uint8_t  lazy_next_periodic_skip =
891 				ticker_next->lazy_periodic > lazy_next;
892 
893 			if (!lazy_next_periodic_skip) {
894 				lazy_next -= ticker_next->lazy_periodic;
895 			}
896 
897 			/* Age is time since last expiry */
898 			uint32_t next_age = (ticker_next->ticks_periodic == 0U ?
899 					  0U :
900 					 (ticker_next->ticks_periodic -
901 					  ticker_next->ticks_to_expire)) +
902 					 (lazy_next *
903 					  ticker_next->ticks_periodic);
904 
905 			/* Was the current node scheduled earlier? */
906 			uint8_t current_is_older =
907 				(ticker->ticks_periodic == 0U) ||
908 				(current_age > next_age);
909 			/* Was next node scheduled earlier (legacy priority)? */
910 			uint8_t next_is_older =
911 				(ticker->ticks_periodic != 0U) &&
912 				(next_age > current_age);
913 
914 			/* Is the current and next node equal in force? */
915 			uint8_t equal_force =
916 				(ticker->force == ticker_next->force);
917 			/* Is force requested for next node (e.g. update) -
918 			 * more so than for current node?
919 			 */
920 			uint8_t next_force =
921 				(ticker_next->force > ticker->force);
922 
923 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
924 			/* Does next node have critical priority and should
925 			 * always be scheduled?
926 			 */
927 			uint8_t next_is_critical =
928 				(ticker_next->priority ==
929 				 TICKER_PRIORITY_CRITICAL);
930 
931 			/* Is the current and next node equal in priority? */
932 			uint8_t equal_priority =
933 				(ticker->priority == ticker_next->priority);
934 
935 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
936 			uint8_t next_is_critical = 0U;
937 			uint8_t equal_priority = 1U;
938 			uint8_t next_has_priority = 0U;
939 
940 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
941 
942 #if defined(CONFIG_BT_TICKER_EXT)
943 #if defined(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD)
944 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
945 			/* Does next node have higher priority? */
946 			uint8_t next_has_priority =
947 				(!TICKER_HAS_SLOT_WINDOW(ticker_next) &&
948 				((lazy_next - ticker_next->priority) >
949 				 (lazy_current - ticker->priority));
950 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
951 
952 			/* Colliding next ticker does not use ticks_slot_window
953 			 * or it does not fit after the current ticker within
954 			 * the ticks_slot_window.
955 			 */
956 			uint8_t next_not_ticks_slot_window =
957 					!TICKER_HAS_SLOT_WINDOW(ticker_next) ||
958 					(ticker_next->ext_data->is_drift_in_window &&
959 					 TICKER_HAS_SLOT_WINDOW(ticker)) ||
960 					((acc_ticks_to_expire +
961 					  ticker_next->ext_data->ticks_slot_window -
962 					  ticker_next->ticks_slot) <
963 					 ticker->ticks_slot);
964 
965 			/* Can the current ticker with ticks_slot_window be
966 			 * scheduled after the colliding ticker?
967 			 */
968 			uint8_t curr_has_ticks_slot_window =
969 					TICKER_HAS_SLOT_WINDOW(ticker) &&
970 					((acc_ticks_to_expire +
971 					  ticker_next->ticks_slot) <=
972 					 (ticker->ext_data->ticks_slot_window -
973 					  ticker->ticks_slot));
974 
975 #else /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
976 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
977 			/* Does next node have higher priority? */
978 			uint8_t next_has_priority =
979 				(lazy_next - ticker_next->priority) >
980 				(lazy_current - ticker->priority);
981 
982 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
983 			uint8_t next_not_ticks_slot_window = 1U;
984 
985 			/* Can the current ticker with ticks_slot_window be
986 			 * scheduled after the colliding ticker?
987 			 * NOTE: Tickers with ticks_slot_window and no
988 			 *       ticks_slot (unreserved) be always scheduled
989 			 *       after the colliding ticker.
990 			 */
991 			uint8_t curr_has_ticks_slot_window =
992 				(TICKER_HAS_SLOT_WINDOW(ticker) &&
993 				 !ticker->ticks_slot &&
994 				 ((acc_ticks_to_expire +
995 				   ticker_next->ticks_slot) <=
996 				  (ticker->ext_data->ticks_slot_window)));
997 
998 #endif /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
999 #else /* !CONFIG_BT_TICKER_EXT */
1000 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
1001 			/* Does next node have higher priority? */
1002 			uint8_t next_has_priority =
1003 				(lazy_next - ticker_next->priority) >
1004 				(lazy_current - ticker->priority);
1005 
1006 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
1007 			uint8_t next_not_ticks_slot_window = 1U;
1008 			uint8_t curr_has_ticks_slot_window = 0U;
1009 
1010 #endif /* !CONFIG_BT_TICKER_EXT */
1011 
1012 			/* Check if next node is within this reservation slot
1013 			 * and wins conflict resolution
1014 			 */
1015 			if ((curr_has_ticks_slot_window &&
1016 			     next_not_ticks_slot_window) ||
1017 			    (!lazy_next_periodic_skip &&
1018 			     (next_is_critical ||
1019 			      next_force ||
1020 			      (next_has_priority && !current_is_older) ||
1021 			      (equal_priority && equal_force && next_is_older &&
1022 			       next_not_ticks_slot_window)))) {
1023 				/* This node must be skipped - check window */
1024 				return 1U;
1025 			}
1026 			id_head = ticker_next->next;
1027 		}
1028 	}
1029 
1030 	return 0U;
1031 }
1032 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1033 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1034 	*/
1035 
1036 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1037 /**
1038  * @brief Get expiration delta from one ticker id to another ticker id
1039  *
1040  * @details Helper function to get expiration info between two tickers
1041  *
1042  * @param instance            Ticker instance
1043  * @param to_ticker_id        Target ticker id
1044  * @param from_ticker_id      Ticker id to compare with
1045  * @param expire_info         Pointer to ticker_expire_info that will
1046  *                            get populated with the result
1047  *
1048  * @internal
1049  */
1050 static void ticker_get_expire_info(struct ticker_instance *instance, uint8_t to_ticker_id,
1051 					  uint8_t from_ticker_id,
1052 					  struct ticker_expire_info_internal *expire_info)
1053 {
1054 	struct ticker_node *current_node;
1055 	uint32_t acc_ticks_to_expire = 0;
1056 	uint8_t current_ticker_id;
1057 	uint32_t from_ticks = 0;
1058 	bool from_found = false;
1059 	uint32_t to_ticks = 0;
1060 	bool to_found = false;
1061 
1062 	current_ticker_id = instance->ticker_id_head;
1063 	current_node = &instance->nodes[instance->ticker_id_head];
1064 	while (current_ticker_id != TICKER_NULL && (!to_found || !from_found)) {
1065 		/* Accumulate expire ticks */
1066 		acc_ticks_to_expire += current_node->ticks_to_expire;
1067 
1068 		if (current_ticker_id == from_ticker_id) {
1069 			from_ticks = acc_ticks_to_expire;
1070 			from_found = true;
1071 		} else if (current_ticker_id == to_ticker_id) {
1072 			to_ticks = acc_ticks_to_expire;
1073 			to_found = true;
1074 		}
1075 
1076 		current_ticker_id = current_node->next;
1077 		current_node = &instance->nodes[current_ticker_id];
1078 	}
1079 
1080 	if (to_found && from_found) {
1081 		struct ticker_node *to_ticker = &instance->nodes[to_ticker_id];
1082 
1083 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1084 		uint32_t to_remainder = to_ticker->remainder_current;
1085 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1086 
1087 		if (from_ticks > to_ticks) {
1088 			/* from ticker is scheduled after the to ticker - use period
1089 			 * to give an result
1090 			 */
1091 			if (to_ticker->ticks_periodic == 0) {
1092 				/* single shot ticker */
1093 				expire_info->found = 0;
1094 				return;
1095 			}
1096 			while (to_ticks < from_ticks) {
1097 				to_ticks += to_ticker->ticks_periodic;
1098 
1099 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1100 				to_ticks += ticker_add_to_remainder(&to_remainder,
1101 								    to_ticker->remainder_periodic);
1102 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1103 			}
1104 		}
1105 
1106 		expire_info->ticks_to_expire = to_ticks - from_ticks;
1107 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1108 		expire_info->remainder = to_remainder;
1109 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1110 		expire_info->lazy = to_ticker->lazy_current;
1111 		expire_info->found = 1;
1112 	} else {
1113 		expire_info->found = 0;
1114 	}
1115 }
1116 
1117 /**
1118  * @brief Allocate an expire info for the given ticker ID
1119  *
1120  * @param instance            Ticker instance
1121  * @param ticker_id           Ticker ID to allocate for
1122  *
1123  * @return Returns TICKER_STATUS_SUCCESS if the allocation succeeded,
1124  *         TICKER_STATUS_FAILURE otherwise
1125  *
1126  * @internal
1127  */
1128 static uint32_t ticker_alloc_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1129 {
1130 	uint32_t status = TICKER_STATUS_FAILURE;
1131 	uint8_t is_last = 0;
1132 
1133 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1134 		if (instance->expire_infos[i].ticker_id == TICKER_NULL) {
1135 			struct ticker_node *ticker = &instance->nodes[ticker_id];
1136 
1137 			instance->expire_infos[i].ticker_id = ticker_id;
1138 			instance->expire_infos[i].outdated = true;
1139 			instance->expire_infos[i].last = is_last;
1140 			ticker->ext_data->other_expire_info = &instance->expire_infos[i];
1141 			instance->expire_infos_outdated = true;
1142 			status = TICKER_STATUS_SUCCESS;
1143 			break;
1144 		} else if (instance->expire_infos[i].last && i < TICKER_EXPIRE_INFO_MAX - 1) {
1145 			instance->expire_infos[i].last = 0;
1146 			is_last = 1;
1147 		}
1148 	}
1149 
1150 	return status;
1151 }
1152 
1153 /**
1154  * @brief Free a previously allocated expire info for the given ticker ID
1155  *
1156  * @param instance            Ticker instance
1157  * @param ticker_id           Ticker ID to free up the allocation for
1158  *
1159  * @internal
1160  */
1161 static void ticker_free_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1162 {
1163 	uint8_t is_last = 0;
1164 	uint8_t index;
1165 
1166 	for (index = 0; index < TICKER_EXPIRE_INFO_MAX; index++) {
1167 		if (instance->expire_infos[index].ticker_id == ticker_id) {
1168 			instance->expire_infos[index].ticker_id = TICKER_NULL;
1169 			is_last = instance->expire_infos[index].last;
1170 			instance->expire_infos[index].last = 0;
1171 			break;
1172 		}
1173 	}
1174 
1175 	if (is_last) {
1176 		/* Find new last used element and mark it */
1177 		for (; index >= 0; index--) {
1178 			if (instance->expire_infos[index].ticker_id != TICKER_NULL || index == 0) {
1179 				instance->expire_infos[index].last = 1;
1180 				break;
1181 			}
1182 		}
1183 	}
1184 }
1185 
1186 /**
1187  * @brief Mark all expire infos involving a ticker ID as outdated
1188  *
1189  * @details If a ticker moves this function should be called to mark all expiration
1190  *          infos (if any) that involve that ticker as outdated and in need of re-calculation.
1191  *          If any expiration infos involving the ticker_id is found, the ticker instances
1192  *          expire_infos_outdated flag is also set.
1193  *
1194  * @param instance            Ticker instance
1195  * @param ticker_id           ID of ticker that has moved
1196  *
1197  * @internal
1198  */
1199 static void ticker_mark_expire_info_outdated(struct ticker_instance *instance, uint8_t ticker_id)
1200 {
1201 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1202 		if (instance->expire_infos[i].ticker_id != TICKER_NULL) {
1203 			uint8_t current_id = instance->expire_infos[i].ticker_id;
1204 			struct ticker_node *ticker = &instance->nodes[current_id];
1205 
1206 			if (current_id == ticker_id ||
1207 			    ticker->ext_data->expire_info_id == ticker_id) {
1208 				instance->expire_infos[i].outdated = true;
1209 				instance->expire_infos_outdated = true;
1210 			}
1211 		}
1212 		if (instance->expire_infos[i].last) {
1213 			break;
1214 		}
1215 	}
1216 }
1217 
1218 /**
1219  * @brief Run through all expire infos and update them if needed
1220  *
1221  * @details Runs through all expire_infos and runs ticker_get_expire_info()
1222  *          for any that are marked as outdated. Clears the expire_infos_outdated
1223  *          flag when done
1224  *
1225  * @param param Pointer to ticker instance
1226  *
1227  * @internal
1228  */
1229 static void ticker_job_update_expire_infos(struct ticker_instance *instance)
1230 {
1231 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1232 		struct ticker_expire_info_internal *info = &instance->expire_infos[i];
1233 
1234 		if (info->ticker_id != TICKER_NULL && info->outdated) {
1235 			struct ticker_node *ticker = &instance->nodes[info->ticker_id];
1236 
1237 			ticker_get_expire_info(instance, ticker->ext_data->expire_info_id,
1238 						info->ticker_id, info);
1239 			info->outdated = false;
1240 		}
1241 
1242 		if (info->last) {
1243 			break;
1244 		}
1245 	}
1246 
1247 	instance->expire_infos_outdated = false;
1248 }
1249 
1250 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1251 
1252 /**
1253  * @brief Ticker worker
1254  *
1255  * @details Runs as upper half of ticker operation, triggered by a compare
1256  * match from the underlying counter HAL, via the ticker_trigger function.
1257  * Traverses ticker nodes to find tickers expired since last job
1258  * execution. Expired (requested) ticker nodes have their timeout callback
1259  * functions called. Finally, a ticker job is enqueued. Invoked from the
1260  * ticker worker mayfly context (TICKER_MAYFLY_CALL_ID_WORKER)
1261  *
1262  * @param param Pointer to ticker instance
1263  *
1264  */
1265 void ticker_worker(void *param)
1266 {
1267 	struct ticker_instance *instance = param;
1268 	struct ticker_node *node;
1269 	uint32_t ticks_elapsed;
1270 	uint32_t ticks_expired;
1271 	uint8_t ticker_id_head;
1272 	uint32_t ticks_now;
1273 
1274 	/* Defer worker if job running */
1275 	instance->worker_trigger = 1U;
1276 	if (instance->job_guard) {
1277 		return;
1278 	}
1279 
1280 	/* If no tickers queued (active), do nothing */
1281 	if (instance->ticker_id_head == TICKER_NULL) {
1282 		instance->worker_trigger = 0U;
1283 		return;
1284 	}
1285 
1286 	ticks_now = cntr_cnt_get();
1287 
1288 	/* Get ticks elapsed since last job execution */
1289 	ticks_elapsed = ticker_ticks_diff_get(ticks_now,
1290 					      instance->ticks_current);
1291 
1292 	/* Initialize actual elapsed ticks being consumed */
1293 	ticks_expired = 0U;
1294 
1295 	/* Auto variable containing the head of tickers expiring */
1296 	ticker_id_head = instance->ticker_id_head;
1297 
1298 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1299 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1300 	/* Check if the previous ticker node which had air-time, is still
1301 	 * active and has this time slot reserved
1302 	 */
1303 	uint8_t slot_reserved = 0;
1304 
1305 	if (instance->ticks_slot_previous > ticks_elapsed) {
1306 		/* This node intersects reserved slot */
1307 		slot_reserved = 1;
1308 	}
1309 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1310 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1311 	*/
1312 
1313 	/* Expire all tickers within ticks_elapsed and collect ticks_expired */
1314 	node = &instance->nodes[0];
1315 
1316 	while (ticker_id_head != TICKER_NULL) {
1317 		struct ticker_node *ticker;
1318 		uint32_t ticks_to_expire;
1319 		uint8_t must_expire_skip;
1320 		uint32_t ticks_drift;
1321 
1322 		ticker = &node[ticker_id_head];
1323 
1324 		/* Stop if ticker did not expire */
1325 		ticks_to_expire = ticker->ticks_to_expire;
1326 		if (ticks_elapsed < ticks_to_expire) {
1327 			break;
1328 		}
1329 
1330 		/* Decrement ticks_elapsed and collect expired ticks */
1331 		ticks_elapsed -= ticks_to_expire;
1332 		ticks_expired += ticks_to_expire;
1333 
1334 		/* Move to next ticker node */
1335 		ticker_id_head = ticker->next;
1336 		must_expire_skip = 0U;
1337 
1338 		/* Skip if not scheduled to execute */
1339 		if (((ticker->req - ticker->ack) & 0xff) != 1U) {
1340 			continue;
1341 		}
1342 
1343 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1344 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1345 		uint32_t ticker_ticks_slot;
1346 
1347 		if (TICKER_HAS_SLOT_WINDOW(ticker) &&
1348 		    (ticker->ticks_slot == 0U)) {
1349 			ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
1350 		} else {
1351 			ticker_ticks_slot = ticker->ticks_slot;
1352 		}
1353 
1354 		/* Check if node has slot reservation and resolve any collision
1355 		 * with other ticker nodes
1356 		 */
1357 		if ((ticker_ticks_slot != 0U) &&
1358 		    (slot_reserved ||
1359 		     (instance->ticks_slot_previous > ticks_expired) ||
1360 		     ticker_resolve_collision(node, ticker))) {
1361 #if defined(CONFIG_BT_TICKER_EXT)
1362 			struct ticker_ext *ext_data = ticker->ext_data;
1363 
1364 			if (ext_data &&
1365 			    ext_data->ticks_slot_window != 0U &&
1366 			    ext_data->reschedule_state ==
1367 			    TICKER_RESCHEDULE_STATE_NONE &&
1368 			   (ticker->lazy_periodic <= ticker->lazy_current)) {
1369 				/* Mark node for re-scheduling in ticker_job */
1370 				ext_data->reschedule_state =
1371 					TICKER_RESCHEDULE_STATE_PENDING;
1372 			} else if (ext_data) {
1373 				/* Mark node as not re-scheduling */
1374 				ext_data->reschedule_state =
1375 					TICKER_RESCHEDULE_STATE_NONE;
1376 			}
1377 #endif /* CONFIG_BT_TICKER_EXT */
1378 			/* Increment lazy_current to indicate skipped event. In case
1379 			 * of re-scheduled node, the lazy count will be decremented in
1380 			 * ticker_job_reschedule_in_window when completed.
1381 			 */
1382 			ticker->lazy_current++;
1383 
1384 			if ((ticker->must_expire == 0U) ||
1385 			    (ticker->lazy_periodic >= ticker->lazy_current) ||
1386 			    TICKER_RESCHEDULE_PENDING(ticker)) {
1387 				/* Not a must-expire node or this is periodic
1388 				 * latency or pending re-schedule. Skip this
1389 				 * ticker node. Mark it as elapsed.
1390 				 */
1391 				ticker->ack--;
1392 				continue;
1393 			}
1394 
1395 			/* Continue but perform shallow expiry */
1396 			must_expire_skip = 1U;
1397 		}
1398 
1399 #if defined(CONFIG_BT_TICKER_EXT)
1400 		if (ticker->ext_data) {
1401 			ticks_drift = ticker->ext_data->ticks_drift;
1402 			ticker->ext_data->ticks_drift = 0U;
1403 			/* Mark node as not re-scheduling */
1404 			ticker->ext_data->reschedule_state =
1405 				TICKER_RESCHEDULE_STATE_NONE;
1406 		} else {
1407 			ticks_drift = 0U;
1408 		}
1409 
1410 #else  /* !CONFIG_BT_TICKER_EXT */
1411 		ticks_drift = 0U;
1412 #endif /* !CONFIG_BT_TICKER_EXT */
1413 
1414 #else  /* CONFIG_BT_TICKER_LOW_LAT ||
1415 	* CONFIG_BT_TICKER_SLOT_AGNOSTIC
1416 	*/
1417 		ticks_drift = 0U;
1418 #endif /* CONFIG_BT_TICKER_LOW_LAT ||
1419 	* CONFIG_BT_TICKER_SLOT_AGNOSTIC
1420 	*/
1421 
1422 		/* Scheduled timeout is acknowledged to be complete */
1423 		ticker->ack--;
1424 
1425 		if (ticker->timeout_func) {
1426 			uint32_t remainder_current;
1427 			uint32_t ticks_at_expire;
1428 
1429 			ticks_at_expire = (instance->ticks_current +
1430 					   ticks_expired -
1431 					   ticker->ticks_to_expire_minus) &
1432 					   HAL_TICKER_CNTR_MASK;
1433 
1434 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1435 			remainder_current = ticker->remainder_current;
1436 #else /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1437 			remainder_current = 0U;
1438 #endif /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1439 
1440 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1441 			if (ticker->ext_data &&
1442 			    ticker->ext_data->ext_timeout_func) {
1443 				struct ticker_expire_info_internal *expire_info;
1444 				struct ticker_ext_context ext_context;
1445 				ticker_timeout_func timeout_func;
1446 
1447 				timeout_func = ticker->ext_data->ext_timeout_func;
1448 				expire_info = ticker->ext_data->other_expire_info;
1449 				if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1450 					LL_ASSERT(expire_info && !expire_info->outdated);
1451 				}
1452 
1453 				ext_context.context = ticker->context;
1454 				if (expire_info && expire_info->found) {
1455 					ext_context.other_expire_info = (void *)expire_info;
1456 				} else {
1457 					ext_context.other_expire_info = NULL;
1458 				}
1459 
1460 				DEBUG_TICKER_TASK(1);
1461 
1462 				/* Invoke the timeout callback */
1463 				timeout_func(ticks_at_expire,
1464 					     ticks_drift,
1465 					     remainder_current,
1466 					     must_expire_skip ?
1467 					     TICKER_LAZY_MUST_EXPIRE :
1468 					     ticker->lazy_current,
1469 					     ticker->force,
1470 					     &ext_context);
1471 			} else
1472 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1473 			{
1474 				DEBUG_TICKER_TASK(1);
1475 
1476 				/* Invoke the timeout callback */
1477 				ticker->timeout_func(ticks_at_expire,
1478 					     ticks_drift,
1479 					     remainder_current,
1480 					     must_expire_skip ?
1481 					     TICKER_LAZY_MUST_EXPIRE :
1482 					     ticker->lazy_current,
1483 					     ticker->force,
1484 					     ticker->context);
1485 				DEBUG_TICKER_TASK(0);
1486 			}
1487 
1488 			if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
1489 			   (must_expire_skip == 0U)) {
1490 				/* Reset latency to periodic offset */
1491 				ticker->lazy_current = 0U;
1492 				ticker->force = 0U;
1493 
1494 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1495 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1496 				if (ticker_ticks_slot != 0U) {
1497 					/* Any further nodes will be skipped */
1498 					slot_reserved = 1U;
1499 				}
1500 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1501 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1502 	*/
1503 
1504 			}
1505 		}
1506 	}
1507 
1508 	/* Queue the elapsed ticks */
1509 	if (instance->ticks_elapsed_first == instance->ticks_elapsed_last) {
1510 		ticker_next_elapsed(&instance->ticks_elapsed_last);
1511 	}
1512 	instance->ticks_elapsed[instance->ticks_elapsed_last] = ticks_expired;
1513 
1514 	instance->worker_trigger = 0U;
1515 
1516 	/* Enqueue the ticker job with chain=1 (do not inline) */
1517 	instance->sched_cb(TICKER_CALL_ID_WORKER, TICKER_CALL_ID_JOB, 1,
1518 			   instance);
1519 }
1520 
1521 /**
1522  * @brief Prepare ticker node expiration
1523  *
1524  * @details Calculates the number of ticks until next expiration, taking
1525  * into consideration any negative drift correction.
1526  *
1527  * @param ticker         Pointer to ticker node
1528  * @param ticks_current  Current number of ticks (elapsed)
1529  * @param ticks_at_start Number of ticks at start (anchor)
1530  *
1531  * @internal
1532  */
1533 static void ticks_to_expire_prep(struct ticker_node *ticker,
1534 				 uint32_t ticks_current, uint32_t ticks_at_start)
1535 {
1536 	uint32_t ticks_to_expire = ticker->ticks_to_expire;
1537 	uint32_t ticks_to_expire_minus = ticker->ticks_to_expire_minus;
1538 
1539 	/* Calculate ticks to expire for this new node */
1540 	if (!((ticks_at_start - ticks_current) & BIT(HAL_TICKER_CNTR_MSBIT))) {
1541 		/* Most significant bit is 0 so ticks_at_start lies ahead of
1542 		 * ticks_current: ticks_at_start >= ticks_current
1543 		 */
1544 		ticks_to_expire += ticker_ticks_diff_get(ticks_at_start,
1545 							 ticks_current);
1546 	} else {
1547 		/* ticks_current > ticks_at_start
1548 		 */
1549 		uint32_t delta_current_start;
1550 
1551 		delta_current_start = ticker_ticks_diff_get(ticks_current,
1552 							    ticks_at_start);
1553 		if (ticks_to_expire > delta_current_start) {
1554 			/* There's still time until expiration - subtract
1555 			 * elapsed time
1556 			 */
1557 			ticks_to_expire -= delta_current_start;
1558 		} else {
1559 			/* Ticker node should have expired (we're late).
1560 			 * Add 'lateness' to negative drift correction
1561 			 * (ticks_to_expire_minus) and set ticks_to_expire
1562 			 * to 0
1563 			 */
1564 			ticks_to_expire_minus +=
1565 			    (delta_current_start - ticks_to_expire);
1566 			ticks_to_expire = 0U;
1567 		}
1568 	}
1569 
1570 	/* Handle negative drift correction */
1571 	if (ticks_to_expire > ticks_to_expire_minus) {
1572 		ticks_to_expire -= ticks_to_expire_minus;
1573 		ticks_to_expire_minus = 0U;
1574 	} else {
1575 		ticks_to_expire_minus -= ticks_to_expire;
1576 		ticks_to_expire = 0U;
1577 	}
1578 
1579 	/* Update ticker */
1580 	ticker->ticks_to_expire = ticks_to_expire;
1581 	ticker->ticks_to_expire_minus = ticks_to_expire_minus;
1582 }
1583 
1584 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1585 /**
1586  * @brief Add to remainder
1587  *
1588  * @details Calculates whether the remainder should increments expiration time
1589  * for above-microsecond precision counter HW. The remainder enables improved
1590  * ticker precision, but is disabled for sub-microsecond precision
1591  * configurations.
1592  * Note: This is the same functionality as ticker_remainder_inc(), except this
1593  * function allows doing the calculation without modifying any tickers
1594  *
1595  * @param remainder Pointer to remainder to add to
1596  * @param to_add    Remainder value to add
1597  *
1598  * @return Returns 1 to indicate ticks increment is due, otherwise 0
1599  * @internal
1600  */
1601 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add)
1602 {
1603 	*remainder += to_add;
1604 	if ((*remainder < BIT(31)) &&
1605 	    (*remainder > (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1606 		*remainder -= HAL_TICKER_REMAINDER_RANGE;
1607 
1608 		return 1;
1609 	}
1610 
1611 	return 0;
1612 }
1613 
1614 /**
1615  * @brief Increment remainder
1616  *
1617  * @details Calculates whether the remainder should increments expiration time
1618  * for above-microsecond precision counter HW. The remainder enables improved
1619  * ticker precision, but is disabled for sub-microsecond precision
1620  * configurations.
1621  *
1622  * @param ticker Pointer to ticker node
1623  *
1624  * @return Returns 1 to indicate increment is due, otherwise 0
1625  * @internal
1626  */
1627 static uint8_t ticker_remainder_inc(struct ticker_node *ticker)
1628 {
1629 	return ticker_add_to_remainder(&ticker->remainder_current, ticker->remainder_periodic);
1630 }
1631 
1632 /**
1633  * @brief Decrement remainder
1634  *
1635  * @details Calculates whether the remainder should decrements expiration time
1636  * for above-microsecond precision counter HW. The remainder enables improved
1637  * ticker precision, but is disabled for sub-microsecond precision
1638  * configurations.
1639  *
1640  * @param ticker Pointer to ticker node
1641  *
1642  * @return Returns 1 to indicate decrement is due, otherwise 0
1643  * @internal
1644  */
1645 static uint8_t ticker_remainder_dec(struct ticker_node *ticker)
1646 {
1647 	uint8_t decrement = 0U;
1648 
1649 	if ((ticker->remainder_current >= BIT(31)) ||
1650 	    (ticker->remainder_current <= (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1651 		decrement++;
1652 		ticker->remainder_current += HAL_TICKER_REMAINDER_RANGE;
1653 	}
1654 
1655 	ticker->remainder_current -= ticker->remainder_periodic;
1656 
1657 	return decrement;
1658 }
1659 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1660 
1661 /**
1662  * @brief Invoke user operation callback
1663  *
1664  * @param user_op Pointer to user operation struct
1665  * @param status  User operation status to pass to callback
1666  *
1667  * @internal
1668  */
1669 static void ticker_job_op_cb(struct ticker_user_op *user_op, uint8_t status)
1670 {
1671 	user_op->op = TICKER_USER_OP_TYPE_NONE;
1672 	user_op->status = status;
1673 	if (user_op->fp_op_func) {
1674 		user_op->fp_op_func(user_op->status, user_op->op_context);
1675 	}
1676 }
1677 
1678 /**
1679  * @brief Update and insert ticker node
1680  *
1681  * @details Update ticker node with parameters passed in user operation.
1682  * After update, the ticker is inserted in front as new head.
1683  *
1684  * @param ticker	Pointer to ticker node
1685  * @param user_op	Pointer to user operation
1686  * @param ticks_current	Current ticker instance ticks
1687  * @param ticks_elapsed	Expired ticks at time of call
1688  * @param insert_head	Pointer to current head (id). Contains id
1689  *			from user operation upon exit
1690  * @internal
1691  */
1692 static inline uint32_t ticker_job_node_update(struct ticker_instance *instance,
1693 					  struct ticker_node *ticker,
1694 					  struct ticker_user_op *user_op,
1695 					  uint32_t ticks_now,
1696 					  uint32_t ticks_current,
1697 					  uint32_t ticks_elapsed,
1698 					  uint8_t *insert_head)
1699 {
1700 	uint32_t ticks_to_expire = ticker->ticks_to_expire;
1701 
1702 	ticks_elapsed += ticker_ticks_diff_get(ticks_now, ticks_current);
1703 	if (ticks_to_expire > ticks_elapsed) {
1704 		ticks_to_expire -= ticks_elapsed;
1705 	} else {
1706 		ticker->ticks_to_expire_minus += ticks_elapsed -
1707 						 ticks_to_expire;
1708 		ticks_to_expire = 0U;
1709 	}
1710 
1711 	/* Update ticks_to_expire from latency (lazy) input */
1712 	if ((ticker->ticks_periodic != 0U) &&
1713 	    (user_op->params.update.lazy != 0U)) {
1714 		user_op->params.update.lazy--;
1715 		while ((ticks_to_expire > ticker->ticks_periodic) &&
1716 		       (ticker->lazy_current > user_op->params.update.lazy)) {
1717 			ticks_to_expire -= ticker->ticks_periodic;
1718 
1719 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1720 			ticks_to_expire -= ticker_remainder_dec(ticker);
1721 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1722 
1723 			ticker->lazy_current--;
1724 		}
1725 
1726 		while (ticker->lazy_current < user_op->params.update.lazy) {
1727 			ticks_to_expire += ticker->ticks_periodic;
1728 
1729 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1730 			ticks_to_expire += ticker_remainder_inc(ticker);
1731 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1732 
1733 			ticker->lazy_current++;
1734 		}
1735 		ticker->lazy_periodic = user_op->params.update.lazy;
1736 	}
1737 
1738 	/* Update ticks_to_expire from drift input */
1739 	ticker->ticks_to_expire = ticks_to_expire +
1740 				  user_op->params.update.ticks_drift_plus;
1741 	ticker->ticks_to_expire_minus +=
1742 				user_op->params.update.ticks_drift_minus;
1743 
1744 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1745 	/* TODO: An improvement on this could be to only consider the drift
1746 	 * (ADV => randomization) if re-sceduling fails. We would still store
1747 	 * the drift ticks here, but not actually update the node. That would
1748 	 * allow the ticker to use the full window for re-scheduling.
1749 	 */
1750 	struct ticker_ext *ext_data = ticker->ext_data;
1751 
1752 	if (ext_data && ext_data->ticks_slot_window != 0U) {
1753 		ext_data->ticks_drift =
1754 			user_op->params.update.ticks_drift_plus -
1755 			user_op->params.update.ticks_drift_minus;
1756 	}
1757 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1758 
1759 	ticks_to_expire_prep(ticker, ticks_current, ticks_now);
1760 
1761 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1762 	/* Update ticks_slot parameter from plus/minus input */
1763 	ticker->ticks_slot += user_op->params.update.ticks_slot_plus;
1764 	if (ticker->ticks_slot > user_op->params.update.ticks_slot_minus) {
1765 		ticker->ticks_slot -= user_op->params.update.ticks_slot_minus;
1766 	} else {
1767 		ticker->ticks_slot = 0U;
1768 	}
1769 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1770 
1771 	/* Update force parameter */
1772 	if (user_op->params.update.force != 0U) {
1773 		ticker->force = user_op->params.update.force;
1774 	}
1775 
1776 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
1777 	!defined(CONFIG_BT_TICKER_LOW_LAT)
1778 	/* Update must_expire parameter */
1779 	if (user_op->params.update.must_expire) {
1780 		/* 1: disable, 2: enable */
1781 		ticker->must_expire = (user_op->params.update.must_expire - 1);
1782 	}
1783 #endif /* CONFIG_BT_TICKER_EXT */
1784 
1785 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1786 	if (ticker->ext_data && user_op->params.update.expire_info_id != user_op->id) {
1787 		if (user_op->params.update.expire_info_id != TICKER_NULL &&
1788 		    !ticker->ext_data->other_expire_info) {
1789 			uint32_t status;
1790 
1791 			status = ticker_alloc_expire_info(instance, user_op->id);
1792 			if (status) {
1793 				return status;
1794 			}
1795 		} else if (user_op->params.update.expire_info_id == TICKER_NULL &&
1796 			 ticker->ext_data->other_expire_info) {
1797 			ticker_free_expire_info(instance, user_op->id);
1798 			ticker->ext_data->other_expire_info = NULL;
1799 		}
1800 
1801 		ticker->ext_data->expire_info_id = user_op->params.update.expire_info_id;
1802 		if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1803 			ticker_mark_expire_info_outdated(instance, user_op->id);
1804 		}
1805 	}
1806 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1807 	ARG_UNUSED(instance);
1808 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1809 
1810 	ticker->next = *insert_head;
1811 	*insert_head = user_op->id;
1812 
1813 	return TICKER_STATUS_SUCCESS;
1814 }
1815 
1816 /**
1817  * @brief Manage user update operation
1818  *
1819  * @details Called by ticker_job to execute an update request, or set node
1820  * as done if request is not update. Invokes user operation callback before
1821  * exit.
1822  *
1823  * @param instance	Pointer to ticker instance
1824  * @param ticker	Pointer to ticker node
1825  * @param user_op	Pointer to user operation
1826  * @param ticks_elapsed Expired ticks at time of call
1827  * @param insert_head	Pointer to current head (id). For update operation,
1828  *			contains operation id upon exit
1829  * @internal
1830  */
1831 static inline void ticker_job_node_manage(struct ticker_instance *instance,
1832 					  struct ticker_node *ticker,
1833 					  struct ticker_user_op *user_op,
1834 					  uint32_t ticks_now,
1835 					  uint32_t ticks_elapsed,
1836 					  uint8_t *insert_head)
1837 {
1838 	/* Handle update of ticker by re-inserting it back. */
1839 	if (IS_ENABLED(CONFIG_BT_TICKER_UPDATE) &&
1840 	    (user_op->op == TICKER_USER_OP_TYPE_UPDATE)) {
1841 		/* Remove ticker node from list */
1842 		ticker->ticks_to_expire = ticker_dequeue(instance, user_op->id);
1843 
1844 		/* Update node and insert back */
1845 		ticker_job_node_update(instance, ticker, user_op, ticks_now,
1846 				       instance->ticks_current, ticks_elapsed,
1847 				       insert_head);
1848 
1849 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1850 		ticker_mark_expire_info_outdated(instance, user_op->id);
1851 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1852 
1853 		/* Set schedule status of node
1854 		 * as updating.
1855 		 */
1856 		ticker->req++;
1857 	} else {
1858 		/* If stop/stop_abs requested, then dequeue node */
1859 		if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1860 			/* Remove ticker node from list */
1861 			ticker->ticks_to_expire = ticker_dequeue(instance,
1862 								 user_op->id);
1863 
1864 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1865 			if (ticker->ext_data && ticker->ext_data->expire_info_id != TICKER_NULL) {
1866 				ticker_free_expire_info(instance, user_op->id);
1867 				ticker->ext_data->other_expire_info = NULL;
1868 			}
1869 
1870 			ticker_mark_expire_info_outdated(instance, user_op->id);
1871 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1872 
1873 			/* Reset schedule status of node */
1874 			ticker->req = ticker->ack;
1875 		}
1876 
1877 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1878 		/* If yield_abs/stop/stop_abs then adjust ticks_slot_previous */
1879 		if (instance->ticker_id_slot_previous == user_op->id) {
1880 			uint32_t ticks_current;
1881 			uint32_t ticks_at_yield;
1882 			uint32_t ticks_used;
1883 
1884 			if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1885 				instance->ticker_id_slot_previous = TICKER_NULL;
1886 			}
1887 
1888 			if ((user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS) ||
1889 			    (user_op->op == TICKER_USER_OP_TYPE_STOP_ABS)) {
1890 				ticks_at_yield =
1891 					user_op->params.yield.ticks_at_yield;
1892 			} else {
1893 				ticks_at_yield = ticks_now;
1894 			}
1895 
1896 			ticks_current = instance->ticks_current;
1897 			if (!((ticks_at_yield - ticks_current) &
1898 			      BIT(HAL_TICKER_CNTR_MSBIT))) {
1899 				ticks_used = ticks_elapsed +
1900 					ticker_ticks_diff_get(ticks_at_yield,
1901 							      ticks_current);
1902 			} else {
1903 				ticks_used =
1904 					ticker_ticks_diff_get(ticks_current,
1905 							      ticks_at_yield);
1906 				if (ticks_elapsed > ticks_used) {
1907 					ticks_used = ticks_elapsed -
1908 						     ticks_used;
1909 				} else {
1910 					ticks_used = 0;
1911 				}
1912 			}
1913 
1914 			if (instance->ticks_slot_previous > ticks_used) {
1915 				instance->ticks_slot_previous = ticks_used;
1916 			}
1917 		}
1918 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1919 
1920 	}
1921 
1922 	/* op success, @todo update may fail during
1923 	 * actual insert! need to design that yet.
1924 	 */
1925 	ticker_job_op_cb(user_op, TICKER_STATUS_SUCCESS);
1926 }
1927 
1928 /**
1929  * @brief Manage user operations list
1930  *
1931  * @details Called by ticker_job to execute requested user operations. A
1932  * number of operation may be queued since last ticker_job. Only update and
1933  * stop operations are handled. Start is handled implicitly by inserting
1934  * the ticker node in ticker_job_list_insert.
1935  *
1936  * @param instance	Pointer to ticker instance
1937  * @param ticks_elapsed Expired ticks at time of call
1938  * @param insert_head	Pointer to current head (id). For update operation,
1939  *			contains operation id upon exit
1940  * @return Returns 1 if operations is pending, 0 if all operations are done.
1941  * @internal
1942  */
1943 static inline uint8_t ticker_job_list_manage(struct ticker_instance *instance,
1944 					     uint32_t ticks_now,
1945 					     uint32_t ticks_elapsed,
1946 					     uint8_t *insert_head)
1947 {
1948 	uint8_t pending;
1949 	struct ticker_node *node;
1950 	struct ticker_user *users;
1951 	uint8_t count_user;
1952 
1953 	pending = 0U;
1954 	node = &instance->nodes[0];
1955 	users = &instance->users[0];
1956 	count_user = instance->count_user;
1957 	/* Traverse users - highest id first */
1958 	while (count_user--) {
1959 		struct ticker_user *user;
1960 		struct ticker_user_op *user_ops;
1961 
1962 		user = &users[count_user];
1963 		user_ops = &user->user_op[0];
1964 		/* Traverse user operation queue - middle to last (with wrap).
1965 		 * This operation updates user->middle to be the past the last
1966 		 * processed user operation. This is used later by
1967 		 * ticker_job_list_insert, for handling user->first to middle.
1968 		 */
1969 		while (user->middle != user->last) {
1970 			struct ticker_user_op *user_op;
1971 			struct ticker_node *ticker;
1972 			uint8_t state;
1973 			uint8_t prev;
1974 			uint8_t middle;
1975 
1976 			user_op = &user_ops[user->middle];
1977 
1978 			/* Increment index and handle wrapping */
1979 			prev = user->middle;
1980 			middle = user->middle + 1;
1981 			if (middle == user->count_user_op) {
1982 				middle = 0U;
1983 			}
1984 			user->middle = middle;
1985 
1986 			ticker = &node[user_op->id];
1987 
1988 			/* if op is start, then skip update and stop ops */
1989 			if (user_op->op < TICKER_USER_OP_TYPE_UPDATE) {
1990 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1991 				if (user_op->op == TICKER_USER_OP_TYPE_START) {
1992 					/* Set start pending to validate a
1993 					 * successive, inline stop operation.
1994 					 */
1995 					ticker->start_pending = 1U;
1996 				}
1997 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1998 
1999 				continue;
2000 			}
2001 
2002 			/* determine the ticker state */
2003 			state = (ticker->req - ticker->ack) & 0xff;
2004 
2005 			/* if not started or update not required,
2006 			 * set status and continue.
2007 			 */
2008 			if ((user_op->op > TICKER_USER_OP_TYPE_STOP_ABS) ||
2009 			    ((state == 0U) &&
2010 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2011 			     !ticker->start_pending &&
2012 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2013 			     (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS)) ||
2014 			    ((user_op->op == TICKER_USER_OP_TYPE_UPDATE) &&
2015 			     (user_op->params.update.ticks_drift_plus == 0U) &&
2016 			     (user_op->params.update.ticks_drift_minus == 0U) &&
2017 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2018 			     (user_op->params.update.ticks_slot_plus == 0U) &&
2019 			     (user_op->params.update.ticks_slot_minus == 0U) &&
2020 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2021 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2022 			     (!ticker->ext_data ||
2023 				  user_op->params.update.expire_info_id == user_op->id) &&
2024 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2025 			     (user_op->params.update.lazy == 0U) &&
2026 			     (user_op->params.update.force == 0U))) {
2027 				ticker_job_op_cb(user_op,
2028 						 TICKER_STATUS_FAILURE);
2029 				continue;
2030 			}
2031 
2032 			/* Delete or yield node, if not expired */
2033 			if ((state == 1U) ||
2034 			    (user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS)) {
2035 				ticker_job_node_manage(instance, ticker,
2036 						       user_op, ticks_now,
2037 						       ticks_elapsed,
2038 						       insert_head);
2039 			} else {
2040 				/* Update on expired node requested, deferring
2041 				 * update until bottom half finishes.
2042 				 */
2043 				/* sched job to run after worker bottom half.
2044 				 */
2045 				instance->sched_cb(TICKER_CALL_ID_JOB,
2046 						   TICKER_CALL_ID_JOB, 1,
2047 						   instance);
2048 
2049 				/* Update the index upto which management is
2050 				 * complete.
2051 				 */
2052 				user->middle = prev;
2053 
2054 				pending = 1U;
2055 				break;
2056 			}
2057 		}
2058 	}
2059 
2060 	return pending;
2061 }
2062 
2063 /**
2064  * @brief Handle ticker node expirations
2065  *
2066  * @details Called by ticker_job to schedule next expirations. Expired ticker
2067  * nodes are removed from the active list, and re-inserted if periodic.
2068  *
2069  * @param instance	 Pointer to ticker instance
2070  * @param ticks_previous Absolute ticks at ticker_job start
2071  * @param ticks_elapsed  Expired ticks at time of call
2072  * @param insert_head	 Pointer to current head (id). Updated if nodes are
2073  *			 re-inserted
2074  * @internal
2075  */
2076 static inline void ticker_job_worker_bh(struct ticker_instance *instance,
2077 					uint32_t ticks_now,
2078 					uint32_t ticks_previous,
2079 					uint32_t ticks_elapsed,
2080 					uint8_t *insert_head)
2081 {
2082 	struct ticker_node *node;
2083 	uint32_t ticks_expired;
2084 	uint32_t ticks_latency;
2085 
2086 	ticks_latency = ticker_ticks_diff_get(ticks_now, ticks_previous);
2087 
2088 	node = &instance->nodes[0];
2089 	ticks_expired = 0U;
2090 	while (instance->ticker_id_head != TICKER_NULL) {
2091 		uint8_t skip_collision = 0U;
2092 		struct ticker_node *ticker;
2093 		uint32_t ticks_to_expire;
2094 		uint8_t id_expired;
2095 		uint8_t state;
2096 
2097 		/* auto variable for current ticker node */
2098 		id_expired = instance->ticker_id_head;
2099 		ticker = &node[id_expired];
2100 
2101 		/* Do nothing if ticker did not expire */
2102 		ticks_to_expire = ticker->ticks_to_expire;
2103 		if (ticks_elapsed < ticks_to_expire) {
2104 			ticker->ticks_to_expire -= ticks_elapsed;
2105 			break;
2106 		}
2107 
2108 		/* decrement ticks_elapsed and collect expired ticks */
2109 		ticks_elapsed -= ticks_to_expire;
2110 		ticks_latency -= ticks_to_expire;
2111 		ticks_expired += ticks_to_expire;
2112 
2113 		state = (ticker->req - ticker->ack) & 0xff;
2114 
2115 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2116 		/* Node with lazy count did not expire with callback, but
2117 		 * was either a collision or re-scheduled. This node should
2118 		 * not define the active slot reservation (slot_previous).
2119 		 */
2120 		skip_collision = (ticker->lazy_current != 0U);
2121 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2122 
2123 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2124 		/* decrement ticks_slot_previous */
2125 		if (instance->ticks_slot_previous > ticks_to_expire) {
2126 			instance->ticks_slot_previous -= ticks_to_expire;
2127 		} else {
2128 			instance->ticker_id_slot_previous = TICKER_NULL;
2129 			instance->ticks_slot_previous = 0U;
2130 		}
2131 
2132 		uint32_t ticker_ticks_slot;
2133 
2134 		if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
2135 			ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2136 		} else {
2137 			ticker_ticks_slot = ticker->ticks_slot;
2138 		}
2139 
2140 		/* If a reschedule is set pending, we will need to keep
2141 		 * the slot_previous information
2142 		 */
2143 		if (ticker_ticks_slot && (state == 2U) && !skip_collision &&
2144 		    !TICKER_RESCHEDULE_PENDING(ticker)) {
2145 			instance->ticker_id_slot_previous = id_expired;
2146 			instance->ticks_slot_previous = ticker_ticks_slot;
2147 		}
2148 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2149 
2150 		/* ticker expired, set ticks_to_expire zero */
2151 		ticker->ticks_to_expire = 0U;
2152 
2153 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2154 		ticker_mark_expire_info_outdated(instance, instance->ticker_id_head);
2155 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2156 
2157 		/* remove the expired ticker from head */
2158 		instance->ticker_id_head = ticker->next;
2159 
2160 		/* Ticker will be restarted if periodic or to be re-scheduled */
2161 		if ((ticker->ticks_periodic != 0U) ||
2162 		    TICKER_RESCHEDULE_PENDING(ticker)) {
2163 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2164 			if (TICKER_RESCHEDULE_PENDING(ticker)) {
2165 				/* Set the re-scheduled node to now. Will be
2166 				 * collision resolved after all nodes are
2167 				 * restarted
2168 				 */
2169 				ticker->ticks_to_expire = ticks_elapsed;
2170 
2171 				/* Reset ticker state, so that its put
2172 				 * back in requested state later down
2173 				 * in the code.
2174 				 */
2175 				ticker->req = ticker->ack;
2176 			} else {
2177 				uint16_t lazy_periodic;
2178 				uint32_t count;
2179 				uint16_t lazy;
2180 
2181 				/* If not skipped, apply lazy_periodic */
2182 				if (!ticker->lazy_current) {
2183 					lazy_periodic = ticker->lazy_periodic;
2184 				} else {
2185 					lazy_periodic = 0U;
2186 
2187 					/* Reset ticker state, so that its put
2188 					 * back in requested state later down
2189 					 * in the code.
2190 					 */
2191 					ticker->req = ticker->ack;
2192 				}
2193 
2194 				/* Reload ticks_to_expire with at least one
2195 				 * period.
2196 				 */
2197 				ticks_to_expire = 0U;
2198 				count = 1 + lazy_periodic;
2199 				while (count--) {
2200 					ticks_to_expire +=
2201 						ticker->ticks_periodic;
2202 
2203 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2204 					ticks_to_expire +=
2205 						ticker_remainder_inc(ticker);
2206 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2207 				}
2208 
2209 				/* Skip intervals that have elapsed w.r.t.
2210 				 * current ticks.
2211 				 */
2212 				lazy = 0U;
2213 
2214 				if (0) {
2215 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2216 				} else if (!ticker->must_expire) {
2217 #else
2218 				} else {
2219 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2220 					while (ticks_to_expire <
2221 					       ticks_latency) {
2222 						ticks_to_expire +=
2223 							ticker->ticks_periodic;
2224 
2225 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2226 						ticks_to_expire +=
2227 						  ticker_remainder_inc(ticker);
2228 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2229 
2230 						lazy++;
2231 					}
2232 				}
2233 
2234 				/* Use the calculated ticks to expire and
2235 				 * laziness.
2236 				 */
2237 				ticker->ticks_to_expire = ticks_to_expire;
2238 				ticker->lazy_current += (lazy_periodic + lazy);
2239 			}
2240 
2241 			ticks_to_expire_prep(ticker, instance->ticks_current,
2242 					     ((ticks_previous + ticks_expired) &
2243 					      HAL_TICKER_CNTR_MASK));
2244 #else /* CONFIG_BT_TICKER_LOW_LAT */
2245 			uint32_t count;
2246 			uint16_t lazy;
2247 
2248 			/* Prepare for next interval */
2249 			ticks_to_expire = 0U;
2250 			count = 1 + ticker->lazy_periodic;
2251 			while (count--) {
2252 				ticks_to_expire += ticker->ticks_periodic;
2253 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2254 				ticks_to_expire += ticker_remainder_inc(ticker);
2255 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2256 			}
2257 
2258 			/* Skip intervals that have elapsed w.r.t. current
2259 			 * ticks.
2260 			 */
2261 			lazy = 0U;
2262 
2263 			/* Schedule to a tick in the future */
2264 			while (ticks_to_expire < ticks_latency) {
2265 				ticks_to_expire += ticker->ticks_periodic;
2266 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2267 				ticks_to_expire += ticker_remainder_inc(ticker);
2268 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2269 				lazy++;
2270 			}
2271 
2272 			/* Use the calculated ticks to expire and laziness. */
2273 			ticker->ticks_to_expire = ticks_to_expire;
2274 			ticker->lazy_current = ticker->lazy_periodic + lazy;
2275 
2276 			ticks_to_expire_prep(ticker, instance->ticks_current,
2277 					     ((ticks_previous + ticks_expired) &
2278 					      HAL_TICKER_CNTR_MASK));
2279 
2280 			/* Reset force state of the node */
2281 			ticker->force = 0U;
2282 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2283 
2284 			/* Add to insert list */
2285 			ticker->next = *insert_head;
2286 			*insert_head = id_expired;
2287 
2288 			/* set schedule status of node as restarting. */
2289 			ticker->req++;
2290 		} else {
2291 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2292 			/* A single-shot ticker in requested or skipped due to
2293 			 * collision shall generate a operation function
2294 			 * callback with failure status.
2295 			 */
2296 			if (state && ((state == 1U) || skip_collision) &&
2297 			    ticker->fp_op_func) {
2298 				ticker->fp_op_func(TICKER_STATUS_FAILURE,
2299 						   ticker->op_context);
2300 			}
2301 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2302 
2303 			/* reset schedule status of node */
2304 			ticker->req = ticker->ack;
2305 		}
2306 	}
2307 }
2308 
2309 /**
2310  * @brief Prepare ticker node start
2311  *
2312  * @details Called by ticker_job to prepare ticker node start operation.
2313  *
2314  * @param ticker	Pointer to ticker node
2315  * @param user_op	Pointer to user operation
2316  * @param ticks_current Expired ticks at time of call
2317  *
2318  * @internal
2319  */
2320 static inline uint32_t ticker_job_op_start(struct ticker_instance *instance,
2321 					   struct ticker_node *ticker,
2322 					   struct ticker_user_op *user_op,
2323 					   uint32_t ticks_current)
2324 {
2325 	struct ticker_user_op_start *start = (void *)&user_op->params.start;
2326 
2327 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2328 	/* Must expire is not supported in compatibility mode */
2329 	LL_ASSERT(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP);
2330 #else
2331 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2332 	if (start->lazy != TICKER_LAZY_MUST_EXPIRE_KEEP) {
2333 		/* Update the must_expire state */
2334 		ticker->must_expire =
2335 			(start->lazy == TICKER_LAZY_MUST_EXPIRE) ? 1U : 0U;
2336 	}
2337 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2338 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2339 
2340 #if defined(CONFIG_BT_TICKER_EXT)
2341 	ticker->ext_data = start->ext_data;
2342 
2343 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2344 	if (ticker->ext_data) {
2345 		ticker->ext_data->other_expire_info = NULL;
2346 		if (ticker->ext_data->expire_info_id != TICKER_NULL) {
2347 			uint32_t status;
2348 
2349 			status = ticker_alloc_expire_info(instance, user_op->id);
2350 			if (status) {
2351 				return status;
2352 			}
2353 		}
2354 	}
2355 
2356 	ticker_mark_expire_info_outdated(instance, user_op->id);
2357 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2358 	ARG_UNUSED(instance);
2359 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2360 #else /* !CONFIG_BT_TICKER_EXT */
2361 	ARG_UNUSED(instance);
2362 #endif /* !CONFIG_BT_TICKER_EXT */
2363 
2364 	ticker->ticks_periodic = start->ticks_periodic;
2365 
2366 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2367 	ticker->remainder_periodic = start->remainder_periodic;
2368 
2369 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
2370 	ticker->remainder_current = start->remainder_first;
2371 #else /* !CONFIG_BT_TICKER_START_REMAINDER */
2372 	ticker->remainder_current = 0U;
2373 #endif /* !CONFIG_BT_TICKER_START_REMAINDER */
2374 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2375 
2376 	ticker->lazy_periodic =
2377 		(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP) ? start->lazy :
2378 							       0U;
2379 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2380 	ticker->ticks_slot = start->ticks_slot;
2381 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2382 
2383 	ticker->timeout_func = start->fp_timeout_func;
2384 	ticker->context = start->context;
2385 	ticker->ticks_to_expire = start->ticks_first;
2386 	ticker->ticks_to_expire_minus = 0U;
2387 	ticks_to_expire_prep(ticker, ticks_current, start->ticks_at_start);
2388 
2389 	ticker->lazy_current = 0U;
2390 	ticker->force = 1U;
2391 
2392 	return TICKER_STATUS_SUCCESS;
2393 }
2394 
2395 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2396 /**
2397  * @brief Insert new ticker node
2398  *
2399  * @details Called by ticker_job to insert a new ticker node. If node collides
2400  * with existing ticker nodes, either the new node is postponed, or colliding
2401  * node is un-scheduled. Decision is based on latency and the force-state of
2402  * individual nodes.
2403  *
2404  * @param instance    Pointer to ticker instance
2405  * @param id_insert   Id of ticker to insert
2406  * @param ticker      Pointer to ticker node to insert
2407  * @param insert_head Pointer to current head. Updated if colliding nodes
2408  *		      are un-scheduled
2409  * @internal
2410  */
2411 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2412 				      uint8_t id_insert,
2413 				      struct ticker_node *ticker,
2414 				      uint8_t *insert_head)
2415 {
2416 	ARG_UNUSED(insert_head);
2417 
2418 	/* Prepare to insert */
2419 	ticker->next = TICKER_NULL;
2420 
2421 	/* Enqueue the ticker node */
2422 	(void)ticker_enqueue(instance, id_insert);
2423 
2424 	/* Inserted/Scheduled */
2425 	ticker->req = ticker->ack + 1;
2426 
2427 	return TICKER_STATUS_SUCCESS;
2428 }
2429 
2430 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2431 /**
2432  * @brief Re-schedule ticker nodes within slot_window
2433  *
2434  * @details This function is responsible for re-scheduling ticker nodes
2435  * which have been marked for re-scheduling in ticker_worker. These nodes
2436  * have a non-zero ticks_slot_window configuration, which indicates a
2437  * valid range in which to re-schedule the node.
2438  * The function iterates over nodes, and handles one re-schedule at a
2439  * time. After a re-schedule, nodes are once again iterated until no more
2440  * nodes are marked for re-scheduling.
2441  *
2442  * @param instance      Pointer to ticker instance
2443  * @param ticks_elapsed Number of ticks elapsed since last ticker job
2444  *
2445  * @internal
2446  */
2447 static uint8_t ticker_job_reschedule_in_window(struct ticker_instance *instance)
2448 {
2449 	struct ticker_node *nodes;
2450 	uint8_t rescheduling;
2451 	uint8_t rescheduled;
2452 
2453 	nodes = &instance->nodes[0];
2454 
2455 	/* Do until all pending re-schedules handled */
2456 	rescheduling = 1U;
2457 	rescheduled = 0U;
2458 	while (rescheduling) {
2459 		struct ticker_node *ticker_resched;
2460 		uint32_t ticks_to_expire_offset;
2461 		uint8_t ticker_id_resched_prev;
2462 		struct ticker_ext  *ext_data;
2463 		uint32_t ticks_start_offset;
2464 		uint32_t window_start_ticks;
2465 		uint32_t ticks_slot_window;
2466 		uint8_t ticker_id_resched;
2467 		uint32_t ticks_to_expire;
2468 		uint8_t ticker_id_prev;
2469 		uint8_t ticker_id_next;
2470 		uint32_t ticks_slot;
2471 
2472 		rescheduling = 0U;
2473 
2474 		/* Find first pending re-schedule */
2475 		ticker_id_resched_prev = TICKER_NULL;
2476 		ticker_id_resched = instance->ticker_id_head;
2477 		while (ticker_id_resched != TICKER_NULL) {
2478 			ticker_resched = &nodes[ticker_id_resched];
2479 			if (TICKER_RESCHEDULE_PENDING(ticker_resched)) {
2480 				/* Pending reschedule found */
2481 				break;
2482 			}
2483 
2484 			ticker_id_resched_prev = ticker_id_resched;
2485 			ticker_id_resched = ticker_resched->next;
2486 		}
2487 
2488 		/* Exit if no tickers to be rescheduled */
2489 		if (ticker_id_resched == TICKER_NULL) {
2490 			break;
2491 		}
2492 
2493 		/* Ensure that resched ticker is expired */
2494 		LL_ASSERT(ticker_resched->ticks_to_expire == 0U);
2495 
2496 		/* Use ticker's reserved time ticks_slot, else for unreserved
2497 		 * tickers use the reschedule margin as ticks_slot.
2498 		 */
2499 		if (ticker_resched->ticks_slot) {
2500 			ticks_slot = ticker_resched->ticks_slot;
2501 		} else {
2502 			LL_ASSERT(TICKER_HAS_SLOT_WINDOW(ticker_resched));
2503 
2504 			ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2505 		}
2506 
2507 		/* Window start after intersection with already active node */
2508 		window_start_ticks = instance->ticks_slot_previous +
2509 				     HAL_TICKER_RESCHEDULE_MARGIN;
2510 
2511 		/* If drift was applied to this node, this must be
2512 		 * taken into consideration. Reduce the window with
2513 		 * the amount of drift already applied.
2514 		 *
2515 		 * TODO: An improvement on this could be to only consider the
2516 		 * drift (ADV => randomization) if re-sceduling fails. Then the
2517 		 * ticker would have the best possible window to re-schedule in
2518 		 * and not be restricted to ticks_slot_window - ticks_drift.
2519 		 */
2520 		ext_data = ticker_resched->ext_data;
2521 		if (IS_ENABLED(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD) &&
2522 		    ticker_resched->ticks_slot &&
2523 		    !ext_data->ticks_drift &&
2524 		    !ext_data->is_drift_in_window) {
2525 			/* Use slot window after intersection include required
2526 			 * ticks_slot, and we do not take the interval of the
2527 			 * colliding ticker provided every expiry increments the
2528 			 * interval by random amount of ticks.
2529 			 */
2530 			ticks_slot_window = window_start_ticks + ticks_slot;
2531 
2532 			/* Window available, proceed to calculate further
2533 			 * drift
2534 			 */
2535 			ticker_id_next = ticker_resched->next;
2536 
2537 		} else if (ext_data->ticks_drift < ext_data->ticks_slot_window) {
2538 			/* Use reduced slot window */
2539 			ticks_slot_window = ext_data->ticks_slot_window -
2540 					    ext_data->ticks_drift;
2541 
2542 			/* Window available, proceed to calculate further
2543 			 * drift
2544 			 */
2545 			ticker_id_next = ticker_resched->next;
2546 
2547 		} else {
2548 			/* Window has been exhausted - we can't reschedule */
2549 			ticker_id_next = TICKER_NULL;
2550 
2551 			/* Assignment will be unused when TICKER_NULL */
2552 			ticks_slot_window = 0U;
2553 		}
2554 
2555 		/* Try to find available slot for re-scheduling */
2556 		ticks_to_expire_offset = 0U;
2557 		ticks_start_offset = 0U;
2558 		ticks_to_expire = 0U;
2559 		while ((ticker_id_next != TICKER_NULL) &&
2560 		       ((ticks_start_offset + ticks_slot) <=
2561 			ticks_slot_window)) {
2562 			struct ticker_node *ticker_next;
2563 			uint32_t window_end_ticks;
2564 
2565 			ticker_next = &nodes[ticker_id_next];
2566 			ticks_to_expire_offset += ticker_next->ticks_to_expire;
2567 
2568 			/* Calculate end of window. Since window may be aligned
2569 			 * with expiry of next node, we add a margin
2570 			 */
2571 			if (ticks_to_expire_offset >
2572 			    HAL_TICKER_RESCHEDULE_MARGIN) {
2573 				window_end_ticks =
2574 					MIN(ticks_slot_window,
2575 					    ticks_start_offset +
2576 					    ticks_to_expire_offset -
2577 					    HAL_TICKER_RESCHEDULE_MARGIN);
2578 			} else {
2579 				/* Next expiry is too close - try the next
2580 				 * node
2581 				 */
2582 				window_end_ticks = 0U;
2583 			}
2584 
2585 			/* Calculate new ticks_to_expire as end of window minus
2586 			 * slot size.
2587 			 */
2588 			if (((window_start_ticks + ticks_slot) <=
2589 			     ticks_slot_window) &&
2590 			    (window_end_ticks >= (ticks_start_offset +
2591 						 ticks_slot))) {
2592 				if (!ticker_resched->ticks_slot ||
2593 				    ext_data->is_drift_in_window) {
2594 					/* Place at start of window */
2595 					ticks_to_expire = window_start_ticks;
2596 				} else {
2597 					/* Place at end of window. This ensures
2598 					 * that ticker with slot window and that
2599 					 * uses ticks_slot does not take the
2600 					 * interval of the colliding ticker.
2601 					 */
2602 					ticks_to_expire = window_end_ticks -
2603 							  ticks_slot;
2604 				}
2605 			} else {
2606 				/* No space in window - try the next node */
2607 				ticks_to_expire = 0U;
2608 			}
2609 
2610 			/* Decide if the re-scheduling ticker node fits in the
2611 			 * slot found - break if it fits
2612 			 */
2613 			if ((ticks_to_expire != 0U) &&
2614 			    (ticks_to_expire >= window_start_ticks) &&
2615 			    (ticks_to_expire <= (window_end_ticks -
2616 						 ticks_slot))) {
2617 				/* Re-scheduled node fits before this node */
2618 				break;
2619 			} else {
2620 				/* Not inside the window */
2621 				ticks_to_expire = 0U;
2622 			}
2623 
2624 			/* Skip other pending re-schedule nodes and
2625 			 * tickers with no reservation or not periodic
2626 			 */
2627 			if (TICKER_RESCHEDULE_PENDING(ticker_next) ||
2628 			    !ticker_next->ticks_slot ||
2629 			    !ticker_next->ticks_periodic) {
2630 				ticker_id_next = ticker_next->next;
2631 
2632 				continue;
2633 			}
2634 
2635 			/* We din't find a valid slot for re-scheduling - try
2636 			 * the next node
2637 			 */
2638 			ticks_start_offset += ticks_to_expire_offset;
2639 			window_start_ticks  = ticks_start_offset +
2640 					      ticker_next->ticks_slot +
2641 					      HAL_TICKER_RESCHEDULE_MARGIN;
2642 			ticks_to_expire_offset = 0U;
2643 
2644 			if (!ticker_resched->ticks_slot ||
2645 			    ext_data->is_drift_in_window) {
2646 				if (!ticker_resched->ticks_slot ||
2647 				    (window_start_ticks <= (ticks_slot_window -
2648 							   ticks_slot))) {
2649 					/* Try at the end of the next node */
2650 					ticks_to_expire = window_start_ticks;
2651 				}
2652 			} else {
2653 				/* Try at the end of the slot window. This
2654 				 * ensures that ticker with slot window and that
2655 				 * uses ticks_slot does not take the interval of
2656 				 * the colliding ticker.
2657 				 */
2658 				ticks_to_expire = ticks_slot_window -
2659 						  ticks_slot;
2660 			}
2661 
2662 			ticker_id_next = ticker_next->next;
2663 		}
2664 
2665 		ext_data->ticks_drift += ticks_to_expire;
2666 
2667 		/* Place the ticker node sorted by expiration time and adjust
2668 		 * delta times
2669 		 */
2670 		ticker_id_next = ticker_resched->next;
2671 		ticker_id_prev = TICKER_NULL;
2672 		while (ticker_id_next != TICKER_NULL) {
2673 			struct ticker_node *ticker_next;
2674 
2675 			ticker_next = &nodes[ticker_id_next];
2676 			if (ticks_to_expire > ticker_next->ticks_to_expire) {
2677 				/* Node is after this - adjust delta */
2678 				ticks_to_expire -= ticker_next->ticks_to_expire;
2679 			} else {
2680 				/* Node is before this one */
2681 				ticker_next->ticks_to_expire -= ticks_to_expire;
2682 				break;
2683 			}
2684 			ticker_id_prev = ticker_id_next;
2685 			ticker_id_next = ticker_next->next;
2686 		}
2687 
2688 		ticker_resched->ticks_to_expire = ticks_to_expire;
2689 
2690 		/* If the node moved in the list, insert it */
2691 		if (ticker_id_prev != TICKER_NULL) {
2692 			/* Remove node from its current position in list */
2693 			if (ticker_id_resched_prev != TICKER_NULL) {
2694 				/* Node was not at the head of the list */
2695 				nodes[ticker_id_resched_prev].next =
2696 					ticker_resched->next;
2697 			} else {
2698 				/* Node was at the head, move head forward */
2699 				instance->ticker_id_head = ticker_resched->next;
2700 			}
2701 
2702 			/* Link inserted node */
2703 			ticker_resched->next = nodes[ticker_id_prev].next;
2704 			nodes[ticker_id_prev].next = ticker_id_resched;
2705 		}
2706 
2707 		/* Remove latency added in ticker_worker */
2708 		ticker_resched->lazy_current--;
2709 
2710 		/* Prevent repeated re-scheduling */
2711 		ext_data->reschedule_state =
2712 			TICKER_RESCHEDULE_STATE_DONE;
2713 
2714 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2715 		ticker_mark_expire_info_outdated(instance, ticker_id_resched);
2716 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2717 
2718 		/* Check for other pending re-schedules and set exit flag */
2719 		rescheduling = 1U;
2720 		rescheduled  = 1U;
2721 	}
2722 
2723 	return rescheduled;
2724 }
2725 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2726 #else  /* CONFIG_BT_TICKER_LOW_LAT */
2727 
2728 /**
2729  * @brief Insert new ticker node
2730  *
2731  * @details Called by ticker_job to insert a new ticker node. If node collides
2732  * with existing ticker nodes, either the new node is postponed, or colliding
2733  * node is un-scheduled. Decision is based on latency and the force-state of
2734  * individual nodes.
2735  *
2736  * @param instance    Pointer to ticker instance
2737  * @param id_insert   Id of ticker to insert
2738  * @param ticker      Pointer to ticker node to insert
2739  * @param insert_head Pointer to current head. Updated if colliding nodes
2740  *		      are un-scheduled
2741  * @internal
2742  */
2743 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2744 				      uint8_t id_insert,
2745 				      struct ticker_node *ticker,
2746 				      uint8_t *insert_head)
2747 {
2748 	struct ticker_node *node = &instance->nodes[0];
2749 	uint8_t id_collide;
2750 	uint16_t skip;
2751 
2752 	/* Prepare to insert */
2753 	ticker->next = TICKER_NULL;
2754 
2755 	/* No. of times ticker has skipped its interval */
2756 	if (ticker->lazy_current > ticker->lazy_periodic) {
2757 		skip = ticker->lazy_current -
2758 		       ticker->lazy_periodic;
2759 	} else {
2760 		skip = 0U;
2761 	}
2762 
2763 	/* If insert collides, remove colliding or advance to next interval */
2764 	while (id_insert !=
2765 	       (id_collide = ticker_enqueue(instance, id_insert))) {
2766 		/* Check for collision */
2767 		if (id_collide != TICKER_NULL) {
2768 			struct ticker_node *ticker_collide = &node[id_collide];
2769 			uint16_t skip_collide;
2770 
2771 			/* No. of times colliding ticker has skipped its
2772 			 * interval.
2773 			 */
2774 			if (ticker_collide->lazy_current >
2775 			    ticker_collide->lazy_periodic) {
2776 				skip_collide = ticker_collide->lazy_current -
2777 					       ticker_collide->lazy_periodic;
2778 			} else {
2779 				skip_collide = 0U;
2780 			}
2781 
2782 			/* Check if colliding node should be un-scheduled */
2783 			if (ticker_collide->ticks_periodic &&
2784 			    skip_collide <= skip &&
2785 			    ticker_collide->force < ticker->force) {
2786 				/* Dequeue and get the reminder of ticks
2787 				 * to expire.
2788 				 */
2789 				ticker_collide->ticks_to_expire =
2790 					ticker_dequeue(instance, id_collide);
2791 				/* Unschedule node */
2792 				ticker_collide->req = ticker_collide->ack;
2793 
2794 				/* Enqueue for re-insertion */
2795 				ticker_collide->next = *insert_head;
2796 				*insert_head = id_collide;
2797 
2798 				continue;
2799 			}
2800 		}
2801 
2802 		/* occupied, try next interval */
2803 		if (ticker->ticks_periodic != 0U) {
2804 			ticker->ticks_to_expire += ticker->ticks_periodic;
2805 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2806 			ticker->ticks_to_expire += ticker_remainder_inc(ticker);
2807 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2808 			ticker->lazy_current++;
2809 
2810 			/* No. of times ticker has skipped its interval */
2811 			if (ticker->lazy_current > ticker->lazy_periodic) {
2812 				skip = ticker->lazy_current -
2813 				       ticker->lazy_periodic;
2814 			} else {
2815 				skip = 0U;
2816 			}
2817 
2818 			/* Remove any accumulated drift (possibly added due to
2819 			 * ticker job execution latencies).
2820 			 */
2821 			if (ticker->ticks_to_expire >
2822 			    ticker->ticks_to_expire_minus) {
2823 				ticker->ticks_to_expire -=
2824 					ticker->ticks_to_expire_minus;
2825 				ticker->ticks_to_expire_minus = 0U;
2826 			} else {
2827 				ticker->ticks_to_expire_minus -=
2828 					ticker->ticks_to_expire;
2829 				ticker->ticks_to_expire = 0U;
2830 			}
2831 		} else {
2832 			return TICKER_STATUS_FAILURE;
2833 		}
2834 	}
2835 
2836 	/* Inserted/Scheduled */
2837 	ticker->req = ticker->ack + 1;
2838 
2839 	return TICKER_STATUS_SUCCESS;
2840 }
2841 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2842 
2843 /**
2844  * @brief Insert and start ticker nodes for all users
2845  *
2846  * @details Called by ticker_job to prepare, insert and start ticker nodes
2847  * for all users. Specifying insert_head to other than TICKER_NULL causes
2848  * that ticker node to be inserted first.
2849  *
2850  * @param instance    Pointer to ticker instance
2851  * @param insert_head Id of ticker node to insert, or TICKER_NULL if only
2852  *                    handle user operation inserts
2853  * @internal
2854  */
2855 static inline void ticker_job_list_insert(struct ticker_instance *instance,
2856 					  uint8_t insert_head)
2857 {
2858 	struct ticker_node *node;
2859 	struct ticker_user *users;
2860 	uint8_t count_user;
2861 
2862 	node = &instance->nodes[0];
2863 	users = &instance->users[0];
2864 	count_user = instance->count_user;
2865 
2866 	/* Iterate through all user ids */
2867 	while (count_user--) {
2868 		struct ticker_user_op *user_ops;
2869 		struct ticker_user *user;
2870 		uint8_t user_ops_first;
2871 
2872 		user = &users[count_user];
2873 		user_ops = (void *)&user->user_op[0];
2874 		user_ops_first = user->first;
2875 		/* Traverse user operation queue - first to middle (wrap) */
2876 		while ((insert_head != TICKER_NULL) ||
2877 		       (user_ops_first != user->middle)) {
2878 			struct ticker_user_op *user_op;
2879 			struct ticker_node *ticker;
2880 			uint8_t id_insert;
2881 			uint8_t status = TICKER_STATUS_SUCCESS;
2882 
2883 			if (insert_head != TICKER_NULL) {
2884 				/* Prepare insert of ticker node specified by
2885 				 * insert_head
2886 				 */
2887 				id_insert = insert_head;
2888 				ticker = &node[id_insert];
2889 				insert_head = ticker->next;
2890 
2891 				user_op = NULL;
2892 			} else {
2893 				/* Prepare insert of any ticker nodes requested
2894 				 * via user operation TICKER_USER_OP_TYPE_START
2895 				 */
2896 				uint8_t first;
2897 
2898 				user_op = &user_ops[user_ops_first];
2899 				first = user_ops_first + 1;
2900 				if (first == user->count_user_op) {
2901 					first = 0U;
2902 				}
2903 				user_ops_first = first;
2904 
2905 				id_insert = user_op->id;
2906 				ticker = &node[id_insert];
2907 				if (user_op->op != TICKER_USER_OP_TYPE_START) {
2908 					/* User operation is not start - skip
2909 					 * to next operation
2910 					 */
2911 					continue;
2912 				}
2913 
2914 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2915 				ticker->start_pending = 0U;
2916 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2917 
2918 				if (((ticker->req -
2919 				      ticker->ack) & 0xff) != 0U) {
2920 					ticker_job_op_cb(user_op,
2921 							 TICKER_STATUS_FAILURE);
2922 					continue;
2923 				}
2924 
2925 				/* Prepare ticker for start */
2926 				status = ticker_job_op_start(instance, ticker, user_op,
2927 						    instance->ticks_current);
2928 			}
2929 
2930 			if (!status) {
2931 				/* Insert ticker node */
2932 				status = ticker_job_insert(instance, id_insert, ticker,
2933 							   &insert_head);
2934 			}
2935 
2936 			if (user_op) {
2937 				ticker_job_op_cb(user_op, status);
2938 
2939 				if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
2940 				    (ticker->ticks_periodic == 0U) &&
2941 				    user_op) {
2942 					ticker->fp_op_func =
2943 						user_op->fp_op_func;
2944 					ticker->op_context =
2945 						user_op->op_context;
2946 				}
2947 			}
2948 		}
2949 
2950 #if !defined(CONFIG_BT_TICKER_JOB_IDLE_GET) && \
2951 	!defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) && \
2952 	!defined(CONFIG_BT_TICKER_PRIORITY_SET)
2953 		user->first = user_ops_first;
2954 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
2955 	* !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
2956 	* !CONFIG_BT_TICKER_PRIORITY_SET
2957 	*/
2958 
2959 	}
2960 }
2961 
2962 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2963 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
2964 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
2965 /**
2966  * @brief Perform inquiry for specific user operation
2967  *
2968  * @param instance Pointer to ticker instance
2969  * @param uop	   Pointer to user operation
2970  *
2971  * @internal
2972  */
2973 static inline void ticker_job_op_inquire(struct ticker_instance *instance,
2974 					 struct ticker_user_op *uop)
2975 {
2976 	ticker_op_func fp_op_func;
2977 
2978 	fp_op_func = NULL;
2979 	switch (uop->op) {
2980 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2981 	case TICKER_USER_OP_TYPE_SLOT_GET:
2982 		ticker_by_next_slot_get(instance,
2983 					uop->params.slot_get.ticker_id,
2984 					uop->params.slot_get.ticks_current,
2985 					uop->params.slot_get.ticks_to_expire,
2986 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
2987 					uop->params.slot_get.fp_match_op_func,
2988 					uop->params.slot_get.match_op_context,
2989 #else
2990 					NULL, NULL,
2991 #endif
2992 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
2993 					uop->params.slot_get.remainder,
2994 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
2995 					NULL,
2996 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
2997 #if defined(CONFIG_BT_TICKER_LAZY_GET)
2998 					uop->params.slot_get.lazy);
2999 #else /* !CONFIG_BT_TICKER_LAZY_GET */
3000 					NULL);
3001 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
3002 		__fallthrough;
3003 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
3004 
3005 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3006 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
3007 	case TICKER_USER_OP_TYPE_IDLE_GET:
3008 		uop->status = TICKER_STATUS_SUCCESS;
3009 		fp_op_func = uop->fp_op_func;
3010 		break;
3011 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3012 	* CONFIG_BT_TICKER_NEXT_SLOT_GET
3013 	*/
3014 
3015 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3016 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3017 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
3018 	case TICKER_USER_OP_TYPE_PRIORITY_SET:
3019 		if (uop->id < instance->count_node) {
3020 			struct ticker_node *node = instance->nodes;
3021 
3022 			node[uop->id].priority =
3023 				uop->params.priority_set.priority;
3024 			uop->status = TICKER_STATUS_SUCCESS;
3025 		} else {
3026 			uop->status = TICKER_STATUS_FAILURE;
3027 		}
3028 		fp_op_func = uop->fp_op_func;
3029 		break;
3030 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3031 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3032 	* CONFIG_BT_TICKER_PRIORITY_SET
3033 	*/
3034 
3035 	default:
3036 		/* do nothing for other ops */
3037 		break;
3038 	}
3039 
3040 	if (fp_op_func) {
3041 		fp_op_func(uop->status, uop->op_context);
3042 	}
3043 }
3044 
3045 /**
3046  * @brief Check for pending inquiries for all users
3047  *
3048  * @details Run through all user operation lists, checking for pending
3049  * inquiries. Currently only two types of inquiries are supported:
3050  * TICKER_USER_OP_TYPE_SLOT_GET and TICKER_USER_OP_TYPE_IDLE_GET. The
3051  * function also supports user operation TICKER_USER_OP_TYPE_PRIORITY_SET.
3052  * This operation modifies the user->first index, indicating user operations
3053  * are complete.
3054  *
3055  * @param instance Pointer to ticker instance
3056  *
3057  * @internal
3058  */
3059 static inline void ticker_job_list_inquire(struct ticker_instance *instance)
3060 {
3061 	struct ticker_user *users;
3062 	uint8_t count_user;
3063 
3064 	users = &instance->users[0];
3065 	count_user = instance->count_user;
3066 	/* Traverse user operation queue - first to last (with wrap) */
3067 	while (count_user--) {
3068 		struct ticker_user_op *user_op;
3069 		struct ticker_user *user;
3070 
3071 		user = &users[count_user];
3072 		user_op = &user->user_op[0];
3073 		while (user->first != user->last) {
3074 			uint8_t first;
3075 
3076 			ticker_job_op_inquire(instance, &user_op[user->first]);
3077 
3078 			first = user->first + 1;
3079 			if (first == user->count_user_op) {
3080 				first = 0U;
3081 			}
3082 			user->first = first;
3083 		}
3084 	}
3085 }
3086 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3087 	* CONFIG_BT_TICKER_NEXT_SLOT_GET ||
3088 	* CONFIG_BT_TICKER_PRIORITY_SET
3089 	*/
3090 
3091 /**
3092  * @brief Update counter compare value (trigger)
3093  *
3094  * @details Updates trigger to the match next expiring ticker node. The
3095  * function takes into consideration that it may be preempted in the process,
3096  * and makes sure - by iteration - that compare value is set in the future
3097  * (with a margin).
3098  *
3099  * @param instance           Pointer to ticker instance
3100  * @param ticker_id_old_head Previous ticker_id_head
3101  *
3102  * @internal
3103  */
3104 static inline uint8_t
3105 ticker_job_compare_update(struct ticker_instance *instance,
3106 			  uint8_t ticker_id_old_head)
3107 {
3108 	struct ticker_node *ticker;
3109 	uint32_t ticks_to_expire;
3110 	uint32_t ctr_curr;
3111 	uint32_t ctr_prev;
3112 	uint32_t cc;
3113 	uint32_t i;
3114 
3115 	if (instance->ticker_id_head == TICKER_NULL) {
3116 		if (cntr_stop() == 0) {
3117 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3118 			instance->ticks_slot_previous = 0U;
3119 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3120 
3121 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3122 			/* Stopped counter value will be used as ticks_current
3123 			 * for calculation to start new tickers.
3124 			 */
3125 			instance->ticks_current = cntr_cnt_get();
3126 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3127 		}
3128 
3129 		return 0U;
3130 	}
3131 
3132 	/* Check if this is the first update. If so, start the counter */
3133 	if (ticker_id_old_head == TICKER_NULL) {
3134 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3135 		uint32_t ticks_current;
3136 
3137 		ticks_current = cntr_cnt_get();
3138 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3139 
3140 		if (cntr_start() == 0) {
3141 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3142 			/* Stopped counter value will be used as ticks_current
3143 			 * for calculation to start new tickers.
3144 			 * FIXME: We do not need to synchronize here, instead
3145 			 *        replace with check to ensure the counter value
3146 			 *        has not since that synchronization when the
3147 			 *        counter with in stopped state.
3148 			 */
3149 			instance->ticks_current = ticks_current;
3150 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3151 		}
3152 	}
3153 
3154 	ticker = &instance->nodes[instance->ticker_id_head];
3155 	ticks_to_expire = ticker->ticks_to_expire;
3156 
3157 	/* If ticks_to_expire is zero, then immediately trigger the worker.
3158 	 */
3159 	if (!ticks_to_expire) {
3160 		return 1U;
3161 	}
3162 
3163 	/* Iterate few times, if required, to ensure that compare is
3164 	 * correctly set to a future value. This is required in case
3165 	 * the operation is pre-empted and current h/w counter runs
3166 	 * ahead of compare value to be set.
3167 	 */
3168 	i = 10U;
3169 	ctr_curr = cntr_cnt_get();
3170 	do {
3171 		uint32_t ticks_elapsed;
3172 		uint32_t ticks_diff;
3173 
3174 		LL_ASSERT(i);
3175 		i--;
3176 
3177 		cc = instance->ticks_current;
3178 		ticks_diff = ticker_ticks_diff_get(ctr_curr, cc);
3179 		if (ticks_diff >= ticks_to_expire) {
3180 			return 1U;
3181 		}
3182 
3183 		ticks_elapsed = ticks_diff + HAL_TICKER_CNTR_CMP_OFFSET_MIN +
3184 				HAL_TICKER_CNTR_SET_LATENCY;
3185 		cc += MAX(ticks_elapsed, ticks_to_expire);
3186 		cc &= HAL_TICKER_CNTR_MASK;
3187 		instance->trigger_set_cb(cc);
3188 
3189 		ctr_prev = ctr_curr;
3190 		ctr_curr = cntr_cnt_get();
3191 	} while ((ticker_ticks_diff_get(ctr_curr, ctr_prev) +
3192 		  HAL_TICKER_CNTR_CMP_OFFSET_MIN) >
3193 		  ticker_ticks_diff_get(cc, ctr_prev));
3194 
3195 	return 0U;
3196 }
3197 
3198 /**
3199  * @brief Ticker job
3200  *
3201  * @details Runs the bottom half of the ticker, after ticker nodes have elapsed
3202  * or user operations requested. The ticker_job is responsible for removing and
3203  * re-inserting ticker nodes, based on next elapsing and periodicity of the
3204  * nodes. The ticker_job is also responsible for processing user operations,
3205  * i.e. requests for start, update, stop etc.
3206  * Invoked from the ticker job mayfly context (TICKER_MAYFLY_CALL_ID_JOB).
3207  *
3208  * @param param Pointer to ticker instance
3209  *
3210  * @internal
3211  */
3212 void ticker_job(void *param)
3213 {
3214 	struct ticker_instance *instance = param;
3215 	uint8_t flag_compare_update;
3216 	uint8_t ticker_id_old_head;
3217 	uint8_t compare_trigger;
3218 	uint32_t ticks_previous;
3219 	uint32_t ticks_elapsed;
3220 	uint8_t flag_elapsed;
3221 	uint8_t insert_head;
3222 	uint32_t ticks_now;
3223 	uint8_t pending;
3224 
3225 	DEBUG_TICKER_JOB(1);
3226 
3227 	/* Defer job, as worker is running */
3228 	if (instance->worker_trigger) {
3229 		DEBUG_TICKER_JOB(0);
3230 		return;
3231 	}
3232 
3233 	/* Defer job, as job is already running */
3234 	if (instance->job_guard) {
3235 		instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_JOB, 1,
3236 				   instance);
3237 		return;
3238 	}
3239 	instance->job_guard = 1U;
3240 
3241 	/* Back up the previous known tick */
3242 	ticks_previous = instance->ticks_current;
3243 
3244 	/* Update current tick with the elapsed value from queue, and dequeue */
3245 	if (instance->ticks_elapsed_first != instance->ticks_elapsed_last) {
3246 		ticker_next_elapsed(&instance->ticks_elapsed_first);
3247 
3248 		ticks_elapsed =
3249 		    instance->ticks_elapsed[instance->ticks_elapsed_first];
3250 
3251 		instance->ticks_current += ticks_elapsed;
3252 		instance->ticks_current &= HAL_TICKER_CNTR_MASK;
3253 
3254 		flag_elapsed = 1U;
3255 	} else {
3256 		/* No elapsed value in queue */
3257 		flag_elapsed = 0U;
3258 		ticks_elapsed = 0U;
3259 	}
3260 
3261 	/* Initialise internal re-insert list */
3262 	insert_head = TICKER_NULL;
3263 
3264 	/* Initialise flag used to update next compare value */
3265 	flag_compare_update = 0U;
3266 
3267 	/* Remember the old head, so as to decide if new compare needs to be
3268 	 * set.
3269 	 */
3270 	ticker_id_old_head = instance->ticker_id_head;
3271 
3272 	/* Get current ticks, used in managing updates and expired tickers */
3273 	ticks_now = cntr_cnt_get();
3274 
3275 #if defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3276 	if (ticker_id_old_head == TICKER_NULL) {
3277 		/* No tickers active, synchronize to the free running counter so
3278 		 * that any new ticker started can have its ticks_to_expire
3279 		 * relative to current free running counter value.
3280 		 *
3281 		 * Both current tick (new value) and previous tick (previously
3282 		 * stored when all tickers stopped) is assigned to ticks_now.
3283 		 * All new tickers are started from this synchronized value as
3284 		 * the anchor/reference value.
3285 		 *
3286 		 * Note, this if clause is an overhead wherein the check is
3287 		 * performed for every ticker_job() iteration!
3288 		 */
3289 		instance->ticks_current = ticks_now;
3290 		ticks_previous = ticks_now;
3291 	}
3292 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3293 
3294 	/* Manage user operations (updates and deletions) in ticker list */
3295 	pending = ticker_job_list_manage(instance, ticks_now, ticks_elapsed,
3296 					 &insert_head);
3297 
3298 	/* Detect change in head of the list */
3299 	if (instance->ticker_id_head != ticker_id_old_head) {
3300 		flag_compare_update = 1U;
3301 	}
3302 
3303 	/* Handle expired tickers */
3304 	if (flag_elapsed) {
3305 		ticker_job_worker_bh(instance, ticks_now, ticks_previous,
3306 				     ticks_elapsed, &insert_head);
3307 
3308 		/* Detect change in head of the list */
3309 		if (instance->ticker_id_head != ticker_id_old_head) {
3310 			flag_compare_update = 1U;
3311 		}
3312 
3313 		/* Handle insertions */
3314 		ticker_job_list_insert(instance, insert_head);
3315 
3316 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
3317 	!defined(CONFIG_BT_TICKER_LOW_LAT)
3318 		/* Re-schedule any pending nodes with slot_window */
3319 		if (ticker_job_reschedule_in_window(instance)) {
3320 			flag_compare_update = 1U;
3321 		}
3322 #endif /* CONFIG_BT_TICKER_EXT */
3323 	} else {
3324 		/* Handle insertions */
3325 		ticker_job_list_insert(instance, insert_head);
3326 	}
3327 
3328 	/* Detect change in head of the list */
3329 	if (instance->ticker_id_head != ticker_id_old_head) {
3330 		flag_compare_update = 1U;
3331 	}
3332 
3333 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3334 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
3335 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
3336 	/* Process any list inquiries */
3337 	if (!pending) {
3338 		/* Handle inquiries */
3339 		ticker_job_list_inquire(instance);
3340 	}
3341 #else  /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3342 	* !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3343 	* !CONFIG_BT_TICKER_PRIORITY_SET
3344 	*/
3345 	ARG_UNUSED(pending);
3346 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3347 	* !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3348 	* !CONFIG_BT_TICKER_PRIORITY_SET
3349 	*/
3350 
3351 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3352 	if (instance->expire_infos_outdated) {
3353 		ticker_job_update_expire_infos(instance);
3354 	}
3355 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3356 
3357 	/* update compare if head changed */
3358 	if (flag_compare_update) {
3359 		compare_trigger = ticker_job_compare_update(instance,
3360 							    ticker_id_old_head);
3361 	} else {
3362 		compare_trigger = 0U;
3363 	}
3364 
3365 	/* Permit worker to run */
3366 	instance->job_guard = 0U;
3367 
3368 	/* trigger worker if deferred */
3369 	cpu_dmb();
3370 	if (instance->worker_trigger || compare_trigger) {
3371 		instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_WORKER, 1,
3372 				   instance);
3373 	}
3374 
3375 	DEBUG_TICKER_JOB(0);
3376 }
3377 
3378 /*****************************************************************************
3379  * Public Interface
3380  ****************************************************************************/
3381 
3382 /**
3383  * @brief Initialize ticker instance
3384  *
3385  * @details Called by ticker instance client once to initialize the ticker.
3386  *
3387  * @param instance_index   Index of ticker instance
3388  * @param count_node	   Number of ticker nodes in node array
3389  * @param node		   Pointer to ticker node array
3390  * @param count_user	   Number of users in user array
3391  * @param user		   Pointer to user array of size count_user
3392  * @param count_op	   Number of user operations in user_op array
3393  * @param user_op	   Pointer to user operations array of size count_op
3394  * @param caller_id_get_cb Pointer to function for retrieving caller_id from
3395  *			   user id
3396  * @param sched_cb	   Pointer to function for scheduling ticker_worker
3397  *			   and ticker_job
3398  * @param trigger_set_cb   Pointer to function for setting the compare trigger
3399  *			   ticks value
3400  *
3401  * @return TICKER_STATUS_SUCCESS if initialization was successful, otherwise
3402  * TICKER_STATUS_FAILURE
3403  */
3404 uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
3405 		  uint8_t count_user, void *user, uint8_t count_op, void *user_op,
3406 		  ticker_caller_id_get_cb_t caller_id_get_cb,
3407 		  ticker_sched_cb_t sched_cb,
3408 		  ticker_trigger_set_cb_t trigger_set_cb)
3409 {
3410 	struct ticker_instance *instance = &_instance[instance_index];
3411 	struct ticker_user_op *user_op_ = (void *)user_op;
3412 	struct ticker_user *users;
3413 
3414 	if (instance_index >= TICKER_INSTANCE_MAX) {
3415 		return TICKER_STATUS_FAILURE;
3416 	}
3417 
3418 	instance->count_node = count_node;
3419 	instance->nodes = node;
3420 
3421 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3422 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3423 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
3424 	while (count_node--) {
3425 		instance->nodes[count_node].priority = 0;
3426 	}
3427 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3428 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3429 	* CONFIG_BT_TICKER_PRIORITY_SET
3430 	*/
3431 
3432 	instance->count_user = count_user;
3433 	instance->users = user;
3434 
3435 	/** @todo check if enough ticker_user_op supplied */
3436 
3437 	users = &instance->users[0];
3438 	while (count_user--) {
3439 		users[count_user].user_op = user_op_;
3440 		user_op_ += users[count_user].count_user_op;
3441 		count_op -= users[count_user].count_user_op;
3442 	}
3443 
3444 	if (count_op) {
3445 		return TICKER_STATUS_FAILURE;
3446 	}
3447 
3448 	instance->caller_id_get_cb = caller_id_get_cb;
3449 	instance->sched_cb = sched_cb;
3450 	instance->trigger_set_cb = trigger_set_cb;
3451 
3452 	instance->ticker_id_head = TICKER_NULL;
3453 #if defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3454 	/* We will synchronize in ticker_job on first ticker start */
3455 	instance->ticks_current = 0U;
3456 #else /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3457 	/* Synchronize to initialized (in stopped state) counter value */
3458 	instance->ticks_current = cntr_cnt_get();
3459 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3460 	instance->ticks_elapsed_first = 0U;
3461 	instance->ticks_elapsed_last = 0U;
3462 
3463 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3464 	instance->ticker_id_slot_previous = TICKER_NULL;
3465 	instance->ticks_slot_previous = 0U;
3466 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3467 
3468 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3469 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
3470 		instance->expire_infos[i].ticker_id = TICKER_NULL;
3471 		instance->expire_infos[i].last = 1;
3472 	}
3473 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3474 
3475 	return TICKER_STATUS_SUCCESS;
3476 }
3477 
3478 /**
3479  * @brief Check if ticker instance is initialized
3480  *
3481  * @param instance_index Index of ticker instance
3482  *
3483  * @return true if ticker instance is initialized, false otherwise
3484  */
3485 bool ticker_is_initialized(uint8_t instance_index)
3486 {
3487 	return !!(_instance[instance_index].count_node);
3488 }
3489 
3490 /**
3491  * @brief Trigger the ticker worker
3492  *
3493  * @details Schedules the ticker_worker upper half by invoking the
3494  * corresponding mayfly.
3495  *
3496  * @param instance_index Index of ticker instance
3497  */
3498 void ticker_trigger(uint8_t instance_index)
3499 {
3500 	struct ticker_instance *instance;
3501 
3502 	DEBUG_TICKER_ISR(1);
3503 
3504 	instance = &_instance[instance_index];
3505 	if (instance->sched_cb) {
3506 		instance->sched_cb(TICKER_CALL_ID_TRIGGER,
3507 				   TICKER_CALL_ID_WORKER, 1, instance);
3508 	}
3509 
3510 	DEBUG_TICKER_ISR(0);
3511 }
3512 
3513 /**
3514  * @brief Start a ticker node
3515  *
3516  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_START and
3517  * schedules the ticker_job.
3518  *
3519  * @param instance_index     Index of ticker instance
3520  * @param user_id	     Ticker user id. Used for indexing user operations
3521  *			     and mapping to mayfly caller id
3522  * @param ticker_id	     Id of ticker node
3523  * @param ticks_anchor	     Absolute tick count as anchor point for
3524  *			     ticks_first
3525  * @param ticks_first	     Initial number of ticks before first timeout
3526  * @param ticks_periodic     Number of ticks for a periodic ticker node. If 0,
3527  *			     ticker node is treated as one-shot
3528  * @param remainder_periodic Periodic ticks fraction
3529  * @param lazy		     Number of periods to skip (latency). A value of 1
3530  *			     causes skipping every other timeout
3531  * @param ticks_slot	     Slot reservation ticks for node (air-time)
3532  * @param ticks_slot_window  Window in which the slot reservation may be
3533  *			     re-scheduled to avoid collision. Set to 0 for
3534  *			     legacy behavior
3535  * @param fp_timeout_func    Function pointer of function to call at timeout
3536  * @param context	     Context passed in timeout call
3537  * @param fp_op_func	     Function pointer of user operation completion
3538  *			     function
3539  * @param op_context	     Context passed in operation completion call
3540  *
3541  * @return TICKER_STATUS_BUSY if start was successful but not yet completed.
3542  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3543  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to
3544  * run before exiting ticker_start
3545  */
3546 #if defined(CONFIG_BT_TICKER_EXT)
3547 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3548 		   uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3549 		   uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3550 		   ticker_timeout_func fp_timeout_func, void *context,
3551 		   ticker_op_func fp_op_func, void *op_context)
3552 {
3553 	return ticker_start_ext(instance_index, user_id, ticker_id,
3554 				ticks_anchor, ticks_first, ticks_periodic,
3555 				remainder_periodic, lazy, ticks_slot,
3556 				fp_timeout_func, context,
3557 				fp_op_func, op_context,
3558 				NULL);
3559 }
3560 
3561 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3562 			uint8_t ticker_id, uint32_t ticks_anchor,
3563 			uint32_t ticks_first, uint32_t remainder_first,
3564 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3565 			uint16_t lazy, uint32_t ticks_slot,
3566 			ticker_timeout_func fp_timeout_func, void *context,
3567 			ticker_op_func fp_op_func, void *op_context,
3568 			struct ticker_ext *ext_data);
3569 
3570 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3571 			uint8_t ticker_id, uint32_t ticks_anchor,
3572 			uint32_t ticks_first, uint32_t remainder_first,
3573 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3574 			uint16_t lazy, uint32_t ticks_slot,
3575 			ticker_timeout_func fp_timeout_func, void *context,
3576 			ticker_op_func fp_op_func, void *op_context)
3577 {
3578 	return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3579 			ticks_first, remainder_first,
3580 			ticks_periodic, remainder_periodic,
3581 			lazy, ticks_slot,
3582 			fp_timeout_func, context,
3583 			fp_op_func, op_context,
3584 			NULL);
3585 }
3586 
3587 uint8_t ticker_start_ext(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3588 		       uint32_t ticks_anchor, uint32_t ticks_first,
3589 		       uint32_t ticks_periodic, uint32_t remainder_periodic,
3590 		       uint16_t lazy, uint32_t ticks_slot,
3591 		       ticker_timeout_func fp_timeout_func, void *context,
3592 		       ticker_op_func fp_op_func, void *op_context,
3593 		       struct ticker_ext *ext_data)
3594 {
3595 	return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3596 			ticks_first, 0U, ticks_periodic, remainder_periodic,
3597 			lazy, ticks_slot,
3598 			fp_timeout_func, context,
3599 			fp_op_func, op_context,
3600 			ext_data);
3601 }
3602 
3603 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3604 			uint8_t ticker_id, uint32_t ticks_anchor,
3605 			uint32_t ticks_first, uint32_t remainder_first,
3606 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3607 			uint16_t lazy, uint32_t ticks_slot,
3608 			ticker_timeout_func fp_timeout_func, void *context,
3609 			ticker_op_func fp_op_func, void *op_context,
3610 			struct ticker_ext *ext_data)
3611 
3612 #else /* !CONFIG_BT_TICKER_EXT */
3613 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3614 		   uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3615 		   uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3616 		   ticker_timeout_func fp_timeout_func, void *context,
3617 		   ticker_op_func fp_op_func, void *op_context)
3618 {
3619 	return ticker_start_us(instance_index, user_id,
3620 			       ticker_id, ticks_anchor,
3621 			       ticks_first, 0U,
3622 			       ticks_periodic, remainder_periodic,
3623 			       lazy, ticks_slot,
3624 			       fp_timeout_func, context,
3625 			       fp_op_func, op_context);
3626 }
3627 
3628 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3629 			uint8_t ticker_id, uint32_t ticks_anchor,
3630 			uint32_t ticks_first, uint32_t remainder_first,
3631 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3632 			uint16_t lazy, uint32_t ticks_slot,
3633 			ticker_timeout_func fp_timeout_func, void *context,
3634 			ticker_op_func fp_op_func, void *op_context)
3635 #endif /* !CONFIG_BT_TICKER_EXT */
3636 
3637 {
3638 	struct ticker_instance *instance = &_instance[instance_index];
3639 	struct ticker_user_op *user_op;
3640 	struct ticker_user *user;
3641 	uint8_t last;
3642 
3643 	user = &instance->users[user_id];
3644 
3645 	last = user->last + 1;
3646 	if (last >= user->count_user_op) {
3647 		last = 0U;
3648 	}
3649 
3650 	if (last == user->first) {
3651 		return TICKER_STATUS_FAILURE;
3652 	}
3653 
3654 	user_op = &user->user_op[user->last];
3655 	user_op->op = TICKER_USER_OP_TYPE_START;
3656 	user_op->id = ticker_id;
3657 	user_op->params.start.ticks_at_start = ticks_anchor;
3658 	user_op->params.start.ticks_first = ticks_first;
3659 	user_op->params.start.ticks_periodic = ticks_periodic;
3660 
3661 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
3662 	user_op->params.start.remainder_periodic = remainder_periodic;
3663 
3664 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
3665 	user_op->params.start.remainder_first = remainder_first;
3666 #else /* !CONFIG_BT_TICKER_START_REMAINDER */
3667 	ARG_UNUSED(remainder_first);
3668 #endif /* !CONFIG_BT_TICKER_START_REMAINDER */
3669 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
3670 
3671 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3672 	user_op->params.start.ticks_slot = ticks_slot;
3673 #endif
3674 	user_op->params.start.lazy = lazy;
3675 #if defined(CONFIG_BT_TICKER_EXT)
3676 	user_op->params.start.ext_data = ext_data;
3677 #endif
3678 	user_op->params.start.fp_timeout_func = fp_timeout_func;
3679 	user_op->params.start.context = context;
3680 	user_op->status = TICKER_STATUS_BUSY;
3681 	user_op->fp_op_func = fp_op_func;
3682 	user_op->op_context = op_context;
3683 
3684 	/* Make sure transaction is completed before committing */
3685 	cpu_dmb();
3686 	user->last = last;
3687 
3688 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3689 			   TICKER_CALL_ID_JOB, 0, instance);
3690 
3691 	return user_op->status;
3692 }
3693 
3694 #if defined(CONFIG_BT_TICKER_UPDATE)
3695 /**
3696  * @brief Update a ticker node
3697  *
3698  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_UPDATE and
3699  * schedules the ticker_job.
3700  *
3701  * @param instance_index     Index of ticker instance
3702  * @param user_id	     Ticker user id. Used for indexing user operations
3703  *			     and mapping to mayfly caller id
3704  * @param ticker_id	     Id of ticker node
3705  * @param ticks_drift_plus   Number of ticks to add for drift compensation
3706  * @param ticks_drift_minus  Number of ticks to subtract for drift compensation
3707  * @param ticks_slot_plus    Number of ticks to add to slot reservation
3708  * @param ticks_slot_minus   Number of ticks to add subtract from slot
3709  *			     reservation
3710  * @param lazy		     Number of periods to skip (latency). A value of 0
3711  *			     means no action. 1 means no latency (normal). A
3712  *			     value >1 means latency = lazy - 1
3713  * @param force		     Force update to take effect immediately. With
3714  *			     force = 0, update is scheduled to take effect as
3715  *			     soon as possible
3716  * @param fp_op_func	     Function pointer of user operation completion
3717  *			     function
3718  * @param op_context	     Context passed in operation completion call
3719  * @param must_expire	     Disable, enable or ignore the must-expire state.
3720  *			     A value of 0 means no change, 1 means disable and
3721  *			     2 means enable.
3722  *
3723  * @return TICKER_STATUS_BUSY if update was successful but not yet completed.
3724  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3725  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3726  * before exiting ticker_update
3727  */
3728 uint8_t ticker_update(uint8_t instance_index, uint8_t user_id,
3729 		       uint8_t ticker_id, uint32_t ticks_drift_plus,
3730 		       uint32_t ticks_drift_minus, uint32_t ticks_slot_plus,
3731 		       uint32_t ticks_slot_minus, uint16_t lazy, uint8_t force,
3732 		       ticker_op_func fp_op_func, void *op_context)
3733 #if defined(CONFIG_BT_TICKER_EXT)
3734 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3735 {
3736 	return ticker_update_ext(instance_index, user_id, ticker_id,
3737 				 ticks_drift_plus, ticks_drift_minus,
3738 				 ticks_slot_plus, ticks_slot_minus, lazy,
3739 				 force, fp_op_func, op_context, 0U, ticker_id);
3740 }
3741 
3742 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3743 			   uint8_t ticker_id, uint32_t ticks_drift_plus,
3744 			   uint32_t ticks_drift_minus,
3745 			   uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3746 			   uint16_t lazy, uint8_t force,
3747 			   ticker_op_func fp_op_func, void *op_context,
3748 			   uint8_t must_expire, uint8_t expire_info_id)
3749 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3750 {
3751 	return ticker_update_ext(instance_index, user_id, ticker_id,
3752 				 ticks_drift_plus, ticks_drift_minus,
3753 				 ticks_slot_plus, ticks_slot_minus, lazy,
3754 				 force, fp_op_func, op_context, 0U);
3755 }
3756 
3757 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3758 			   uint8_t ticker_id, uint32_t ticks_drift_plus,
3759 			   uint32_t ticks_drift_minus,
3760 			   uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3761 			   uint16_t lazy, uint8_t force,
3762 			   ticker_op_func fp_op_func, void *op_context,
3763 			   uint8_t must_expire)
3764 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3765 #endif /* CONFIG_BT_TICKER_EXT */
3766 {
3767 	struct ticker_instance *instance = &_instance[instance_index];
3768 	struct ticker_user_op *user_op;
3769 	struct ticker_user *user;
3770 	uint8_t last;
3771 
3772 	user = &instance->users[user_id];
3773 
3774 	last = user->last + 1;
3775 	if (last >= user->count_user_op) {
3776 		last = 0U;
3777 	}
3778 
3779 	if (last == user->first) {
3780 		return TICKER_STATUS_FAILURE;
3781 	}
3782 
3783 	user_op = &user->user_op[user->last];
3784 	user_op->op = TICKER_USER_OP_TYPE_UPDATE;
3785 	user_op->id = ticker_id;
3786 	user_op->params.update.ticks_drift_plus = ticks_drift_plus;
3787 	user_op->params.update.ticks_drift_minus = ticks_drift_minus;
3788 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3789 	user_op->params.update.ticks_slot_plus = ticks_slot_plus;
3790 	user_op->params.update.ticks_slot_minus = ticks_slot_minus;
3791 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3792 	user_op->params.update.lazy = lazy;
3793 	user_op->params.update.force = force;
3794 #if defined(CONFIG_BT_TICKER_EXT)
3795 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && !defined(CONFIG_BT_TICKER_LOW_LAT)
3796 	user_op->params.update.must_expire = must_expire;
3797 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC && !CONFIG_BT_TICKER_LOW_LAT */
3798 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3799 	user_op->params.update.expire_info_id = expire_info_id;
3800 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3801 #endif /* CONFIG_BT_TICKER_EXT */
3802 	user_op->status = TICKER_STATUS_BUSY;
3803 	user_op->fp_op_func = fp_op_func;
3804 	user_op->op_context = op_context;
3805 
3806 	/* Make sure transaction is completed before committing */
3807 	cpu_dmb();
3808 	user->last = last;
3809 
3810 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3811 			   TICKER_CALL_ID_JOB, 0, instance);
3812 
3813 	return user_op->status;
3814 }
3815 #endif /* CONFIG_BT_TICKER_UPDATE */
3816 
3817 /**
3818  * @brief Yield a ticker node with supplied absolute ticks reference
3819  *
3820  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_YIELD_ABS
3821  * and schedules the ticker_job.
3822  *
3823  * @param instance_index     Index of ticker instance
3824  * @param user_id	     Ticker user id. Used for indexing user operations
3825  *			     and mapping to mayfly caller id
3826  * @param ticks_at_yield     Absolute tick count at ticker yield request
3827  * @param fp_op_func	     Function pointer of user operation completion
3828  *			     function
3829  * @param op_context	     Context passed in operation completion call
3830  *
3831  * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3832  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3833  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3834  * before exiting ticker_stop
3835  */
3836 uint8_t ticker_yield_abs(uint8_t instance_index, uint8_t user_id,
3837 			  uint8_t ticker_id, uint32_t ticks_at_yield,
3838 			  ticker_op_func fp_op_func, void *op_context)
3839 {
3840 	struct ticker_instance *instance = &_instance[instance_index];
3841 	struct ticker_user_op *user_op;
3842 	struct ticker_user *user;
3843 	uint8_t last;
3844 
3845 	user = &instance->users[user_id];
3846 
3847 	last = user->last + 1;
3848 	if (last >= user->count_user_op) {
3849 		last = 0U;
3850 	}
3851 
3852 	if (last == user->first) {
3853 		return TICKER_STATUS_FAILURE;
3854 	}
3855 
3856 	user_op = &user->user_op[user->last];
3857 	user_op->op = TICKER_USER_OP_TYPE_YIELD_ABS;
3858 	user_op->id = ticker_id;
3859 	user_op->params.yield.ticks_at_yield = ticks_at_yield;
3860 	user_op->status = TICKER_STATUS_BUSY;
3861 	user_op->fp_op_func = fp_op_func;
3862 	user_op->op_context = op_context;
3863 
3864 	/* Make sure transaction is completed before committing */
3865 	cpu_dmb();
3866 	user->last = last;
3867 
3868 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3869 			   TICKER_CALL_ID_JOB, 0, instance);
3870 
3871 	return user_op->status;
3872 }
3873 
3874 /**
3875  * @brief Stop a ticker node
3876  *
3877  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP and
3878  * schedules the ticker_job.
3879  *
3880  * @param instance_index     Index of ticker instance
3881  * @param user_id	     Ticker user id. Used for indexing user operations
3882  *			     and mapping to mayfly caller id
3883  * @param fp_op_func	     Function pointer of user operation completion
3884  *			     function
3885  * @param op_context	     Context passed in operation completion call
3886  *
3887  * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3888  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3889  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3890  * before exiting ticker_stop
3891  */
3892 uint8_t ticker_stop(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3893 		  ticker_op_func fp_op_func, void *op_context)
3894 {
3895 	struct ticker_instance *instance = &_instance[instance_index];
3896 	struct ticker_user_op *user_op;
3897 	struct ticker_user *user;
3898 	uint8_t last;
3899 
3900 	user = &instance->users[user_id];
3901 
3902 	last = user->last + 1;
3903 	if (last >= user->count_user_op) {
3904 		last = 0U;
3905 	}
3906 
3907 	if (last == user->first) {
3908 		return TICKER_STATUS_FAILURE;
3909 	}
3910 
3911 	user_op = &user->user_op[user->last];
3912 	user_op->op = TICKER_USER_OP_TYPE_STOP;
3913 	user_op->id = ticker_id;
3914 	user_op->status = TICKER_STATUS_BUSY;
3915 	user_op->fp_op_func = fp_op_func;
3916 	user_op->op_context = op_context;
3917 
3918 	/* Make sure transaction is completed before committing */
3919 	cpu_dmb();
3920 	user->last = last;
3921 
3922 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3923 			   TICKER_CALL_ID_JOB, 0, instance);
3924 
3925 	return user_op->status;
3926 }
3927 
3928 /**
3929  * @brief Stop a ticker node with supplied absolute ticks reference
3930  *
3931  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP_ABS
3932  * and schedules the ticker_job.
3933  *
3934  * @param instance_index     Index of ticker instance
3935  * @param user_id	     Ticker user id. Used for indexing user operations
3936  *			     and mapping to mayfly caller id
3937  * @param ticks_at_stop      Absolute tick count at ticker stop request
3938  * @param fp_op_func	     Function pointer of user operation completion
3939  *			     function
3940  * @param op_context	     Context passed in operation completion call
3941  *
3942  * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3943  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3944  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3945  * before exiting ticker_stop
3946  */
3947 uint8_t ticker_stop_abs(uint8_t instance_index, uint8_t user_id,
3948 			 uint8_t ticker_id, uint32_t ticks_at_stop,
3949 			 ticker_op_func fp_op_func, void *op_context)
3950 {
3951 	struct ticker_instance *instance = &_instance[instance_index];
3952 	struct ticker_user_op *user_op;
3953 	struct ticker_user *user;
3954 	uint8_t last;
3955 
3956 	user = &instance->users[user_id];
3957 
3958 	last = user->last + 1;
3959 	if (last >= user->count_user_op) {
3960 		last = 0U;
3961 	}
3962 
3963 	if (last == user->first) {
3964 		return TICKER_STATUS_FAILURE;
3965 	}
3966 
3967 	user_op = &user->user_op[user->last];
3968 	user_op->op = TICKER_USER_OP_TYPE_STOP_ABS;
3969 	user_op->id = ticker_id;
3970 	user_op->params.yield.ticks_at_yield = ticks_at_stop;
3971 	user_op->status = TICKER_STATUS_BUSY;
3972 	user_op->fp_op_func = fp_op_func;
3973 	user_op->op_context = op_context;
3974 
3975 	/* Make sure transaction is completed before committing */
3976 	cpu_dmb();
3977 	user->last = last;
3978 
3979 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3980 			   TICKER_CALL_ID_JOB, 0, instance);
3981 
3982 	return user_op->status;
3983 }
3984 
3985 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
3986 /**
3987  * @brief Get next ticker node slot
3988  *
3989  * @details Gets the next ticker which has slot ticks specified and
3990  * return the ticker id and accumulated ticks until expiration. If no
3991  * ticker nodes have slot ticks, the next ticker node is returned.
3992  * If no head id is provided (TICKER_NULL) the first node is returned.
3993  *
3994  * @param instance_index     Index of ticker instance
3995  * @param user_id	     Ticker user id. Used for indexing user operations
3996  *			     and mapping to mayfly caller id
3997  * @param ticker_id	     Pointer to id of ticker node
3998  * @param ticks_current	     Pointer to current ticks count
3999  * @param ticks_to_expire    Pointer to ticks to expire
4000  * @param fp_op_func	     Function pointer of user operation completion
4001  *			     function
4002  * @param op_context	     Context passed in operation completion call
4003  *
4004  * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4005  * TICKER_STATUS_FAILURE is returned if there are no more user operations
4006  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4007  * before exiting ticker_next_slot_get
4008  */
4009 uint8_t ticker_next_slot_get(uint8_t instance_index, uint8_t user_id,
4010 			      uint8_t *ticker_id, uint32_t *ticks_current,
4011 			      uint32_t *ticks_to_expire,
4012 			      ticker_op_func fp_op_func, void *op_context)
4013 {
4014 #if defined(CONFIG_BT_TICKER_LAZY_GET) || \
4015 	defined(CONFIG_BT_TICKER_REMAINDER_GET) || \
4016 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
4017 	return ticker_next_slot_get_ext(instance_index, user_id, ticker_id,
4018 					ticks_current, ticks_to_expire, NULL,
4019 					NULL, NULL, NULL, fp_op_func,
4020 					op_context);
4021 }
4022 
4023 uint8_t ticker_next_slot_get_ext(uint8_t instance_index, uint8_t user_id,
4024 				  uint8_t *ticker_id, uint32_t *ticks_current,
4025 				  uint32_t *ticks_to_expire,
4026 				  uint32_t *remainder, uint16_t *lazy,
4027 				  ticker_op_match_func fp_match_op_func,
4028 				  void *match_op_context,
4029 				  ticker_op_func fp_op_func, void *op_context)
4030 {
4031 #endif /* CONFIG_BT_TICKER_LAZY_GET ||
4032 	* CONFIG_BT_TICKER_REMAINDER_GET ||
4033 	* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH
4034 	*/
4035 	struct ticker_instance *instance = &_instance[instance_index];
4036 	struct ticker_user_op *user_op;
4037 	struct ticker_user *user;
4038 	uint8_t last;
4039 
4040 	user = &instance->users[user_id];
4041 
4042 	last = user->last + 1;
4043 	if (last >= user->count_user_op) {
4044 		last = 0U;
4045 	}
4046 
4047 	if (last == user->first) {
4048 		return TICKER_STATUS_FAILURE;
4049 	}
4050 
4051 	user_op = &user->user_op[user->last];
4052 	user_op->op = TICKER_USER_OP_TYPE_SLOT_GET;
4053 	user_op->id = TICKER_NULL;
4054 	user_op->params.slot_get.ticker_id = ticker_id;
4055 	user_op->params.slot_get.ticks_current = ticks_current;
4056 	user_op->params.slot_get.ticks_to_expire = ticks_to_expire;
4057 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
4058 	user_op->params.slot_get.remainder = remainder;
4059 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
4060 #if defined(CONFIG_BT_TICKER_LAZY_GET)
4061 	user_op->params.slot_get.lazy = lazy;
4062 #endif /* CONFIG_BT_TICKER_LAZY_GET */
4063 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
4064 	user_op->params.slot_get.fp_match_op_func = fp_match_op_func;
4065 	user_op->params.slot_get.match_op_context = match_op_context;
4066 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
4067 	user_op->status = TICKER_STATUS_BUSY;
4068 	user_op->fp_op_func = fp_op_func;
4069 	user_op->op_context = op_context;
4070 
4071 	/* Make sure transaction is completed before committing */
4072 	cpu_dmb();
4073 	user->last = last;
4074 
4075 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4076 			   TICKER_CALL_ID_JOB, 0, instance);
4077 
4078 	return user_op->status;
4079 }
4080 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
4081 
4082 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET)
4083 /**
4084  * @brief Get a callback at the end of ticker job execution
4085  *
4086  * @details Operation completion callback is called at the end of the
4087  * ticker_job execution. The user operation is immutable.
4088  *
4089  * @param instance_index     Index of ticker instance
4090  * @param user_id	     Ticker user id. Used for indexing user operations
4091  *			     and mapping to mayfly caller id
4092  * @param fp_op_func	     Function pointer of user operation completion
4093  *			     function
4094  * @param op_context	     Context passed in operation completion call
4095  *
4096  * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4097  * TICKER_STATUS_FAILURE is returned if there are no more user operations
4098  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4099  * before exiting ticker_job_idle_get
4100  */
4101 uint8_t ticker_job_idle_get(uint8_t instance_index, uint8_t user_id,
4102 			  ticker_op_func fp_op_func, void *op_context)
4103 {
4104 	struct ticker_instance *instance = &_instance[instance_index];
4105 	struct ticker_user_op *user_op;
4106 	struct ticker_user *user;
4107 	uint8_t last;
4108 
4109 	user = &instance->users[user_id];
4110 
4111 	last = user->last + 1;
4112 	if (last >= user->count_user_op) {
4113 		last = 0U;
4114 	}
4115 
4116 	if (last == user->first) {
4117 		return TICKER_STATUS_FAILURE;
4118 	}
4119 
4120 	user_op = &user->user_op[user->last];
4121 	user_op->op = TICKER_USER_OP_TYPE_IDLE_GET;
4122 	user_op->id = TICKER_NULL;
4123 	user_op->status = TICKER_STATUS_BUSY;
4124 	user_op->fp_op_func = fp_op_func;
4125 	user_op->op_context = op_context;
4126 
4127 	/* Make sure transaction is completed before committing */
4128 	cpu_dmb();
4129 	user->last = last;
4130 
4131 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4132 			   TICKER_CALL_ID_JOB, 0, instance);
4133 
4134 	return user_op->status;
4135 }
4136 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET */
4137 
4138 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
4139 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
4140 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
4141 /**
4142  * @brief Set ticker node priority
4143  *
4144  * @param instance_index     Index of ticker instance
4145  * @param user_id	     Ticker user id. Used for indexing user operations
4146  *			     and mapping to mayfly caller id
4147  * @param ticker_id	     Id of ticker node to set priority on
4148  * @param priority	     Priority to set. Range [-128..127], default is 0.
4149  *			     Lover value equals higher priority. Setting
4150  *			     priority to -128 (TICKER_PRIORITY_CRITICAL) makes
4151  *			     the node win all collision challenges. Only one
4152  *			     node can have this priority assigned.
4153  * @param fp_op_func	     Function pointer of user operation completion
4154  *			     function
4155  * @param op_context	     Context passed in operation completion call
4156  *
4157  * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4158  * TICKER_STATUS_FAILURE is returned if there are no more user operations
4159  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4160  * before exiting ticker_priority_set
4161  */
4162 uint8_t ticker_priority_set(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
4163 			  int8_t priority, ticker_op_func fp_op_func,
4164 			  void *op_context)
4165 {
4166 	struct ticker_instance *instance = &_instance[instance_index];
4167 	struct ticker_user_op *user_op;
4168 	struct ticker_user *user;
4169 	uint8_t last;
4170 
4171 	user = &instance->users[user_id];
4172 
4173 	last = user->last + 1;
4174 	if (last >= user->count_user_op) {
4175 		last = 0U;
4176 	}
4177 
4178 	if (last == user->first) {
4179 		return TICKER_STATUS_FAILURE;
4180 	}
4181 
4182 	user_op = &user->user_op[user->last];
4183 	user_op->op = TICKER_USER_OP_TYPE_PRIORITY_SET;
4184 	user_op->id = ticker_id;
4185 	user_op->params.priority_set.priority = priority;
4186 	user_op->status = TICKER_STATUS_BUSY;
4187 	user_op->fp_op_func = fp_op_func;
4188 	user_op->op_context = op_context;
4189 
4190 	/* Make sure transaction is completed before committing */
4191 	cpu_dmb();
4192 	user->last = last;
4193 
4194 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4195 			   TICKER_CALL_ID_JOB, 0, instance);
4196 
4197 	return user_op->status;
4198 }
4199 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
4200 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC &&
4201 	* CONFIG_BT_TICKER_PRIORITY_SET
4202 	*/
4203 
4204 /**
4205  * @brief Schedule ticker job
4206  *
4207  * @param instance_index Index of ticker instance
4208  * @param user_id	 Ticker user id. Maps to mayfly caller id
4209  */
4210 void ticker_job_sched(uint8_t instance_index, uint8_t user_id)
4211 {
4212 	struct ticker_instance *instance = &_instance[instance_index];
4213 
4214 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4215 			   TICKER_CALL_ID_JOB, 0, instance);
4216 }
4217 
4218 /**
4219  * @brief Get current absolute tick count
4220  *
4221  * @return Absolute tick count
4222  */
4223 uint32_t ticker_ticks_now_get(void)
4224 {
4225 	return cntr_cnt_get();
4226 }
4227 
4228 /**
4229  * @brief Get difference between two tick counts
4230  *
4231  * @details Subtract two counts and truncate to correct HW dependent counter
4232  * bit width
4233  *
4234  * @param ticks_now Highest tick count (now)
4235  * @param ticks_old Tick count to subtract from ticks_now
4236  */
4237 uint32_t ticker_ticks_diff_get(uint32_t ticks_now, uint32_t ticks_old)
4238 {
4239 	return ((ticks_now - ticks_old) & HAL_TICKER_CNTR_MASK);
4240 }
4241