1 /*
2  * Copyright (c) 2016-2018 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Vinayak Kariappa Chettimada
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <stdbool.h>
9 #include <zephyr/types.h>
10 #include <soc.h>
11 
12 #include "hal/cntr.h"
13 #include "hal/ticker.h"
14 #include "hal/cpu.h"
15 
16 #include "ticker.h"
17 
18 #include "hal/debug.h"
19 
20 /*****************************************************************************
21  * Defines
22  ****************************************************************************/
23 #define DOUBLE_BUFFER_SIZE 2
24 
25 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
26 #if !defined(CONFIG_BT_CTLR_ADV_AUX_SET)
27 #define BT_CTLR_ADV_AUX_SET 0
28 #else
29 #define BT_CTLR_ADV_AUX_SET CONFIG_BT_CTLR_ADV_AUX_SET
30 #endif
31 #if !defined(CONFIG_BT_CTLR_ADV_SYNC_SET)
32 #define BT_CTLR_ADV_SYNC_SET 0
33 #else
34 #define BT_CTLR_ADV_SYNC_SET CONFIG_BT_CTLR_ADV_SYNC_SET
35 #endif
36 #if defined(CONFIG_BT_CTLR_ADV_ISO)
37 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET*2)
38 #else
39 #define TICKER_EXPIRE_INFO_MAX (BT_CTLR_ADV_AUX_SET + BT_CTLR_ADV_SYNC_SET)
40 #endif /* !CONFIG_BT_CTLR_ADV_ISO */
41 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
42 
43 /*****************************************************************************
44  * Types
45  ****************************************************************************/
46 
47 struct ticker_node {
48 	uint8_t  next;			    /* Next ticker node */
49 
50 	uint8_t  req;			    /* Request counter */
51 	uint8_t  ack;			    /* Acknowledge counter. Imbalance
52 					     * between req and ack indicates
53 					     * ongoing operation
54 					     */
55 	uint8_t  force:1;		    /* If non-zero, node timeout should
56 					     * be forced at next expiration
57 					     */
58 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
59 	uint8_t  start_pending:1;	    /* If non-zero, start is pending for
60 					     * bottom half of ticker_job.
61 					     */
62 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
63 	uint32_t ticks_periodic;	    /* If non-zero, interval
64 					     * between expirations
65 					     */
66 	uint32_t ticks_to_expire;	    /* Ticks until expiration */
67 	ticker_timeout_func timeout_func;   /* User timeout function */
68 	void  *context;			    /* Context delivered to timeout
69 					     * function
70 					     */
71 	uint32_t ticks_to_expire_minus;	    /* Negative drift correction */
72 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
73 	uint32_t ticks_slot;		    /* Air-time reservation for node */
74 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
75 	uint16_t lazy_periodic;		    /* Number of timeouts to allow
76 					     * skipping
77 					     */
78 	uint16_t lazy_current;		    /* Current number of timeouts
79 					     * skipped = peripheral latency
80 					     */
81 	union {
82 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
83 		uint32_t remainder_periodic;/* Sub-microsecond tick remainder
84 					     * for each period
85 					     */
86 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
87 
88 		ticker_op_func fp_op_func;  /* Operation completion callback */
89 	};
90 
91 	union {
92 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
93 		uint32_t remainder_current; /* Current sub-microsecond tick
94 					     * remainder
95 					     */
96 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
97 
98 		void  *op_context;	    /* Context passed in completion
99 					     * callback
100 					     */
101 	};
102 
103 #if  defined(CONFIG_BT_TICKER_EXT)
104 	struct ticker_ext *ext_data;	    /* Ticker extension data */
105 #endif /* CONFIG_BT_TICKER_EXT */
106 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
107 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
108 	uint8_t  must_expire;		    /* Node must expire, even if it
109 					     * collides with other nodes
110 					     */
111 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
112 	int8_t  priority;		    /* Ticker node priority. 0 is
113 					     * default. Lower value is higher
114 					     * priority
115 					     */
116 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
117 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
118 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
119 	*/
120 };
121 
122 struct ticker_expire_info_internal {
123 	uint32_t ticks_to_expire;
124 	uint32_t remainder;
125 	uint16_t lazy;
126 	uint8_t ticker_id;
127 	uint8_t outdated:1;
128 	uint8_t found:1;
129 	uint8_t last:1;
130 };
131 
132 /* Operations to be performed in ticker_job.
133  * Possible values for field "op" in struct ticker_user_op
134  */
135 #define TICKER_USER_OP_TYPE_NONE         0
136 #define TICKER_USER_OP_TYPE_IDLE_GET     1
137 #define TICKER_USER_OP_TYPE_SLOT_GET     2
138 #define TICKER_USER_OP_TYPE_PRIORITY_SET 3
139 #define TICKER_USER_OP_TYPE_START        4
140 #define TICKER_USER_OP_TYPE_UPDATE       5
141 #define TICKER_USER_OP_TYPE_YIELD_ABS    6
142 #define TICKER_USER_OP_TYPE_STOP         7
143 #define TICKER_USER_OP_TYPE_STOP_ABS     8
144 
145 /* Slot window re-schedule states */
146 #define TICKER_RESCHEDULE_STATE_NONE     0
147 #define TICKER_RESCHEDULE_STATE_PENDING  1
148 #define TICKER_RESCHEDULE_STATE_DONE     2
149 
150 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
151 #define TICKER_HAS_SLOT_WINDOW(_ticker) \
152 	((_ticker)->ext_data && ((_ticker)->ext_data->ticks_slot_window != 0U))
153 #define TICKER_RESCHEDULE_PENDING(_ticker) \
154 	(_ticker->ext_data && (_ticker->ext_data->reschedule_state == \
155 		TICKER_RESCHEDULE_STATE_PENDING))
156 #else
157 #define TICKER_HAS_SLOT_WINDOW(_ticker) 0
158 #define TICKER_RESCHEDULE_PENDING(_ticker) 0
159 #endif
160 
161 /* User operation data structure for start opcode. Used for passing start
162  * requests to ticker_job
163  */
164 struct ticker_user_op_start {
165 	uint32_t ticks_at_start;	/* Anchor ticks (absolute) */
166 	uint32_t ticks_first;		/* Initial timeout ticks */
167 	uint32_t ticks_periodic;	/* Ticker period ticks */
168 
169 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
170 	uint32_t remainder_periodic;	/* Sub-microsecond tick remainder */
171 
172 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
173 	uint32_t remainder_first;       /* Sub-microsecond tick remainder */
174 #endif /* CONFIG_BT_TICKER_START_REMAINDER */
175 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
176 
177 	uint16_t lazy;			/* Periodic latency in number of
178 					 * periods
179 					 */
180 
181 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
182 	uint32_t ticks_slot;		/* Air-time reservation ticks */
183 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
184 
185 	ticker_timeout_func fp_timeout_func; /* Timeout callback function */
186 	void  *context;			/* Context passed in timeout callback */
187 
188 #if defined(CONFIG_BT_TICKER_EXT)
189 	struct ticker_ext *ext_data;	/* Ticker extension data instance */
190 #endif /* CONFIG_BT_TICKER_EXT */
191 };
192 
193 /* User operation data structure for update opcode. Used for passing update
194  * requests to ticker_job
195  */
196 struct ticker_user_op_update {
197 	uint32_t ticks_drift_plus;	/* Requested positive drift in ticks */
198 	uint32_t ticks_drift_minus;	/* Requested negative drift in ticks */
199 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
200 	uint32_t ticks_slot_plus;	/* Number of ticks to add to slot
201 					 * reservation (air-time)
202 					 */
203 	uint32_t ticks_slot_minus;	/* Number of ticks to subtract from
204 					 * slot reservation (air-time)
205 					 */
206 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
207 	uint16_t lazy;			/* Peripheral latency:
208 					 *  0: Do nothing
209 					 *  1: latency = 0
210 					 * >1: latency = lazy - 1
211 					 */
212 	uint8_t  force;			/* Force update */
213 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
214 	!defined(CONFIG_BT_TICKER_LOW_LAT)
215 	uint8_t must_expire;		/* Node must expire, even if it
216 					 * collides with other nodes:
217 					 *  0x00: Do nothing
218 					 *  0x01: Disable must_expire
219 					 *  0x02: Enable must_expire
220 					 */
221 #endif
222 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
223 	uint8_t expire_info_id;
224 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
225 };
226 
227 /* User operation data structure for yield/stop opcode. Used for passing yield/
228  * stop requests with absolute tick to ticker_job
229  */
230 struct ticker_user_op_yield {
231 	uint32_t ticks_at_yield;        /* Anchor ticks (absolute) */
232 };
233 
234 /* User operation data structure for slot_get opcode. Used for passing request
235  * to get next ticker with slot ticks via ticker_job
236  */
237 struct ticker_user_op_slot_get {
238 	uint8_t  *ticker_id;
239 	uint32_t *ticks_current;
240 	uint32_t *ticks_to_expire;
241 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
242 	uint32_t *remainder;
243 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
244 #if defined(CONFIG_BT_TICKER_LAZY_GET)
245 	uint16_t *lazy;
246 #endif /* CONFIG_BT_TICKER_LAZY_GET */
247 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
248 	ticker_op_match_func fp_match_op_func;
249 	void *match_op_context;
250 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
251 };
252 
253 /* User operation data structure for priority_set opcode. Used for passing
254  * request to set ticker node priority via ticker_job
255  */
256 struct ticker_user_op_priority_set {
257 	int8_t priority;		/* Node priority. Defaults to 0 */
258 };
259 
260 /* User operation top level data structure. Used for passing requests to
261  * ticker_job
262  */
263 struct ticker_user_op {
264 	uint8_t op;			/* User operation */
265 	uint8_t id;			/* Ticker node id */
266 	uint8_t status;			/* Operation result */
267 	union {
268 		struct ticker_user_op_start        start;
269 		struct ticker_user_op_update       update;
270 		struct ticker_user_op_yield        yield;
271 		struct ticker_user_op_slot_get     slot_get;
272 		struct ticker_user_op_priority_set priority_set;
273 	} params;			/* User operation parameters */
274 	ticker_op_func fp_op_func;	/* Operation completion callback */
275 	void  *op_context;		/* Context passed in completion callback */
276 };
277 
278 /* User data structure for operations
279  */
280 struct ticker_user {
281 	uint8_t count_user_op;		/* Number of user operation slots */
282 	uint8_t first;			/* Slot index of first user operation */
283 	uint8_t middle;			/* Slot index of last managed user op.
284 					 * Updated by ticker_job_list_manage
285 					 * for use in ticker_job_list_insert
286 					 */
287 	uint8_t last;			/* Slot index of last user operation */
288 	struct ticker_user_op *user_op; /* Pointer to user operation array */
289 };
290 
291 /* Ticker instance
292  */
293 struct ticker_instance {
294 	struct ticker_node *nodes;	/* Pointer to ticker nodes */
295 	struct ticker_user *users;	/* Pointer to user nodes */
296 	uint8_t  count_node;		/* Number of ticker nodes */
297 	uint8_t  count_user;		/* Number of user nodes */
298 	uint8_t  ticks_elapsed_first;	/* Index from which elapsed ticks count
299 					 * is pulled
300 					 */
301 	uint8_t  ticks_elapsed_last;	/* Index to which elapsed ticks count
302 					 * is pushed
303 					 */
304 	uint32_t ticks_elapsed[DOUBLE_BUFFER_SIZE]; /* Buffer for elapsed
305 						     * ticks
306 						     */
307 	uint32_t ticks_current;		/* Absolute ticks elapsed at last
308 					 * ticker_job
309 					 */
310 	uint8_t  ticker_id_head;	/* Index of first ticker node (next to
311 					 * expire)
312 					 */
313 	uint8_t  job_guard;		/* Flag preventing ticker_worker from
314 					 * running if ticker_job is active
315 					 */
316 	uint8_t  worker_trigger;	/* Flag preventing ticker_job from
317 					 * starting if ticker_worker was
318 					 * requested, and to trigger
319 					 * ticker_worker at end of job, if
320 					 * requested
321 					 */
322 
323 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
324 	uint8_t  ticker_id_slot_previous; /* Id of previous slot reserving
325 					   * ticker node
326 					   */
327 	uint32_t ticks_slot_previous;	/* Number of ticks previously reserved
328 					 * by a ticker node (active air-time)
329 					 */
330 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
331 
332 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
333 	struct ticker_expire_info_internal expire_infos[TICKER_EXPIRE_INFO_MAX];
334 	bool expire_infos_outdated;
335 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
336 
337 	ticker_caller_id_get_cb_t caller_id_get_cb; /* Function for retrieving
338 						     * the caller id from user
339 						     * id
340 						     */
341 	ticker_sched_cb_t         sched_cb;	    /* Function for scheduling
342 						     * ticker_worker and
343 						     * ticker_job
344 						     */
345 	ticker_trigger_set_cb_t   trigger_set_cb;   /* Function for setting
346 						     * the trigger (compare
347 						     * value)
348 						     */
349 };
350 
351 BUILD_ASSERT(sizeof(struct ticker_node)    == TICKER_NODE_T_SIZE);
352 BUILD_ASSERT(sizeof(struct ticker_user)    == TICKER_USER_T_SIZE);
353 BUILD_ASSERT(sizeof(struct ticker_user_op) == TICKER_USER_OP_T_SIZE);
354 
355 /*****************************************************************************
356  * Global instances
357  ****************************************************************************/
358 #define TICKER_INSTANCE_MAX 1
359 static struct ticker_instance _instance[TICKER_INSTANCE_MAX];
360 
361 /*****************************************************************************
362  * Static Functions
363  ****************************************************************************/
364 
365 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add);
366 
367 /**
368  * @brief Update elapsed index
369  *
370  * @param ticks_elapsed_index Pointer to current index
371  *
372  * @internal
373  */
ticker_next_elapsed(uint8_t * ticks_elapsed_index)374 static inline void ticker_next_elapsed(uint8_t *ticks_elapsed_index)
375 {
376 	uint8_t idx = *ticks_elapsed_index + 1;
377 
378 	if (idx == DOUBLE_BUFFER_SIZE) {
379 		idx = 0U;
380 	}
381 	*ticks_elapsed_index = idx;
382 }
383 
384 #if defined(CONFIG_BT_TICKER_LOW_LAT)
385 /**
386  * @brief Get ticker expiring in a specific slot
387  *
388  * @details Searches for a ticker which expires in a specific slot starting
389  * at 'ticks_slot'.
390  *
391  * @param node           Pointer to ticker node array
392  * @param ticker_id_head Id of initial ticker node
393  * @param ticks_slot     Ticks indicating slot to get
394  *
395  * @return Id of ticker expiring within slot or TICKER_NULL
396  * @internal
397  */
ticker_by_slot_get(struct ticker_node * node,uint8_t ticker_id_head,uint32_t ticks_slot)398 static uint8_t ticker_by_slot_get(struct ticker_node *node, uint8_t ticker_id_head,
399 			       uint32_t ticks_slot)
400 {
401 	while (ticker_id_head != TICKER_NULL) {
402 		struct ticker_node *ticker;
403 		uint32_t ticks_to_expire;
404 
405 		ticker = &node[ticker_id_head];
406 		ticks_to_expire = ticker->ticks_to_expire;
407 
408 		if (ticks_slot <= ticks_to_expire) {
409 			/* Next ticker expiration is outside the checked slot */
410 			return TICKER_NULL;
411 		}
412 
413 		if (ticker->ticks_slot) {
414 			/* This ticker node has slot defined and expires within
415 			 * checked slot
416 			 */
417 			break;
418 		}
419 
420 		ticks_slot -= ticks_to_expire;
421 		ticker_id_head = ticker->next;
422 	}
423 
424 	return ticker_id_head;
425 }
426 #endif /* CONFIG_BT_TICKER_LOW_LAT */
427 
428 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
429 /**
430  * @brief Get next ticker with slot ticks or match
431  *
432  * @details Iterates ticker nodes from ticker_id_head. If no head id is provided
433  * (TICKER_NULL), iteration starts from the first node.
434  * Operation details:
435  *
436  * NORMAL MODE (!CONFIG_BT_TICKER_SLOT_AGNOSTIC)
437  * - Gets the next ticker which has slot ticks specified and return the ticker
438  *   id and accumulated ticks until expiration.
439  * - If a matching function is provided, this function is called and node iteration
440  *   continues until match function returns true.
441  *
442  * SLOT AGNOSTIC MODE (CONFIG_BT_TICKER_SLOT_AGNOSTIC)
443  * - Gets the next ticker node.
444  * - If a matching function is provided, this function is called and node iteration
445  *   continues until match function returns true.
446  *
447  * @param instance          Pointer to ticker instance
448  * @param ticker_id_head    Pointer to id of first ticker node [in/out]
449  * @param ticks_current     Pointer to current ticks count [in/out]
450  * @param ticks_to_expire   Pointer to ticks to expire [in/out]
451  * @param fp_match_op_func  Pointer to match function or NULL if unused
452  * @param match_op_context  Pointer to operation context passed to match
453  *                          function or NULL if unused
454  * @param lazy              Pointer to lazy variable to receive lazy_current
455  *                          of found ticker node
456  * @internal
457  */
ticker_by_next_slot_get(struct ticker_instance * instance,uint8_t * ticker_id_head,uint32_t * ticks_current,uint32_t * ticks_to_expire,ticker_op_match_func fp_match_op_func,void * match_op_context,uint32_t * remainder,uint16_t * lazy)458 static void ticker_by_next_slot_get(struct ticker_instance *instance,
459 				    uint8_t *ticker_id_head,
460 				    uint32_t *ticks_current,
461 				    uint32_t *ticks_to_expire,
462 				    ticker_op_match_func fp_match_op_func,
463 				    void *match_op_context, uint32_t *remainder,
464 				    uint16_t *lazy)
465 {
466 	struct ticker_node *ticker;
467 	struct ticker_node *node;
468 	uint32_t _ticks_to_expire;
469 	uint8_t _ticker_id_head;
470 
471 	node = instance->nodes;
472 
473 	_ticker_id_head = *ticker_id_head;
474 	_ticks_to_expire = *ticks_to_expire;
475 	if ((_ticker_id_head == TICKER_NULL) ||
476 	    (*ticks_current != instance->ticks_current)) {
477 		/* Initialize with instance head */
478 		_ticker_id_head = instance->ticker_id_head;
479 		*ticks_current = instance->ticks_current;
480 		_ticks_to_expire = 0U;
481 	} else {
482 		/* Get ticker id for next node */
483 		ticker = &node[_ticker_id_head];
484 		_ticker_id_head = ticker->next;
485 	}
486 
487 	/* Find first ticker node with match or slot ticks */
488 	while (_ticker_id_head != TICKER_NULL) {
489 		ticker = &node[_ticker_id_head];
490 
491 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
492 		if (fp_match_op_func) {
493 			uint32_t ticks_slot = 0;
494 
495 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
496 			ticks_slot += ticker->ticks_slot;
497 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
498 
499 			/* Match node id */
500 			if (fp_match_op_func(_ticker_id_head, ticks_slot,
501 					     _ticks_to_expire +
502 					     ticker->ticks_to_expire,
503 					     match_op_context)) {
504 				/* Match found */
505 				break;
506 			}
507 		} else
508 #else /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
509 	ARG_UNUSED(fp_match_op_func);
510 	ARG_UNUSED(match_op_context);
511 #endif /* !CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
512 
513 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
514 			if (ticker->ticks_slot) {
515 				/* Matching not used and node has slot ticks */
516 				break;
517 #else
518 			{
519 				/* Matching not used and slot agnostic */
520 				break;
521 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
522 			}
523 
524 		/* Accumulate expire ticks */
525 		_ticks_to_expire += ticker->ticks_to_expire;
526 		_ticker_id_head = ticker->next;
527 	}
528 
529 	if (_ticker_id_head != TICKER_NULL) {
530 		/* Add ticks for found ticker */
531 		_ticks_to_expire += ticker->ticks_to_expire;
532 
533 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
534 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
535 		if (remainder) {
536 			*remainder = ticker->remainder_current;
537 		}
538 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
539 		ARG_UNUSED(remainder);
540 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
541 #else /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
542 		ARG_UNUSED(remainder);
543 #endif /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
544 
545 #if defined(CONFIG_BT_TICKER_LAZY_GET)
546 		if (lazy) {
547 			*lazy = ticker->lazy_current;
548 		}
549 #else /* !CONFIG_BT_TICKER_LAZY_GET */
550 	ARG_UNUSED(lazy);
551 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
552 	}
553 
554 	*ticker_id_head = _ticker_id_head;
555 	*ticks_to_expire = _ticks_to_expire;
556 }
557 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
558 
559 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
560 /**
561  * @brief Enqueue ticker node
562  *
563  * @details Finds insertion point for new ticker node and inserts the
564  * node in the linked node list.
565  *
566  * @param instance Pointer to ticker instance
567  * @param id       Ticker node id to enqueue
568  *
569  * @return Id of enqueued ticker node
570  * @internal
571  */
572 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
573 {
574 	struct ticker_node *ticker_current;
575 	struct ticker_node *ticker_new;
576 	uint32_t ticks_to_expire_current;
577 	struct ticker_node *node;
578 	uint32_t ticks_to_expire;
579 	uint8_t previous;
580 	uint8_t current;
581 
582 	node = &instance->nodes[0];
583 	ticker_new = &node[id];
584 	ticks_to_expire = ticker_new->ticks_to_expire;
585 	current = instance->ticker_id_head;
586 
587 	/* Find insertion point for new ticker node and adjust ticks_to_expire
588 	 * relative to insertion point
589 	 */
590 	previous = TICKER_NULL;
591 
592 	while ((current != TICKER_NULL) && (ticks_to_expire >=
593 		(ticks_to_expire_current =
594 		(ticker_current = &node[current])->ticks_to_expire))) {
595 
596 		ticks_to_expire -= ticks_to_expire_current;
597 
598 		/* Check for timeout in same tick - prioritize according to
599 		 * latency
600 		 */
601 		if (ticks_to_expire == 0 && (ticker_new->lazy_current >
602 					     ticker_current->lazy_current)) {
603 			ticks_to_expire = ticks_to_expire_current;
604 			break;
605 		}
606 
607 		previous = current;
608 		current = ticker_current->next;
609 	}
610 
611 	/* Link in new ticker node and adjust ticks_to_expire to relative value
612 	 */
613 	ticker_new->ticks_to_expire = ticks_to_expire;
614 	ticker_new->next = current;
615 
616 	if (previous == TICKER_NULL) {
617 		instance->ticker_id_head = id;
618 	} else {
619 		node[previous].next = id;
620 	}
621 
622 	if (current != TICKER_NULL) {
623 		node[current].ticks_to_expire -= ticks_to_expire;
624 	}
625 
626 	return id;
627 }
628 #else /* CONFIG_BT_TICKER_LOW_LAT */
629 
630 /**
631  * @brief Enqueue ticker node
632  *
633  * @details Finds insertion point for new ticker node and inserts the
634  * node in the linked node list. However, if the new ticker node collides
635  * with an existing node or the expiration is inside the previous slot,
636  * the node is not inserted.
637  *
638  * @param instance Pointer to ticker instance
639  * @param id       Ticker node id to enqueue
640  *
641  * @return Id of enqueued ticker node, or id of previous- or colliding
642  * ticker node if new node was not enqueued
643  * @internal
644  */
645 static uint8_t ticker_enqueue(struct ticker_instance *instance, uint8_t id)
646 {
647 	struct ticker_node *ticker_current;
648 	struct ticker_node *ticker_new;
649 	uint32_t ticks_to_expire_current;
650 	uint8_t ticker_id_slot_previous;
651 	uint32_t ticks_slot_previous;
652 	struct ticker_node *node;
653 	uint32_t ticks_to_expire;
654 	uint8_t previous;
655 	uint8_t current;
656 	uint8_t collide;
657 
658 	node = &instance->nodes[0];
659 	ticker_new = &node[id];
660 	ticks_to_expire = ticker_new->ticks_to_expire;
661 
662 	collide = ticker_id_slot_previous = TICKER_NULL;
663 	current = instance->ticker_id_head;
664 	previous = current;
665 	ticks_slot_previous = instance->ticks_slot_previous;
666 
667 	/* Find insertion point for new ticker node and adjust ticks_to_expire
668 	 * relative to insertion point
669 	 */
670 	while ((current != TICKER_NULL) &&
671 	       (ticks_to_expire >
672 		(ticks_to_expire_current =
673 		 (ticker_current = &node[current])->ticks_to_expire))) {
674 		ticks_to_expire -= ticks_to_expire_current;
675 
676 		if (ticker_current->ticks_slot != 0U) {
677 			ticks_slot_previous = ticker_current->ticks_slot;
678 			ticker_id_slot_previous = current;
679 		} else {
680 			if (ticks_slot_previous > ticks_to_expire_current) {
681 				ticks_slot_previous -= ticks_to_expire_current;
682 			} else {
683 				ticks_slot_previous = 0U;
684 			}
685 		}
686 		previous = current;
687 		current = ticker_current->next;
688 	}
689 
690 	/* Check for collision for new ticker node at insertion point */
691 	collide = ticker_by_slot_get(&node[0], current,
692 				     ticks_to_expire + ticker_new->ticks_slot);
693 
694 	if ((ticker_new->ticks_slot == 0U) ||
695 	    ((ticks_slot_previous <= ticks_to_expire) &&
696 	     (collide == TICKER_NULL))) {
697 		/* New ticker node has no slot ticks or there is no collision -
698 		 * link it in and adjust ticks_to_expire to relative value
699 		 */
700 		ticker_new->ticks_to_expire = ticks_to_expire;
701 		ticker_new->next = current;
702 
703 		if (previous == current) {
704 			instance->ticker_id_head = id;
705 		} else {
706 			node[previous].next = id;
707 		}
708 
709 		if (current != TICKER_NULL) {
710 			node[current].ticks_to_expire -= ticks_to_expire;
711 		}
712 	} else {
713 		/* Collision - no ticker node insertion, set id to that of
714 		 * colliding node
715 		 */
716 		if (ticks_slot_previous > ticks_to_expire) {
717 			id = ticker_id_slot_previous;
718 		} else {
719 			id = collide;
720 		}
721 	}
722 
723 	return id;
724 }
725 #endif /* CONFIG_BT_TICKER_LOW_LAT */
726 
727 /**
728  * @brief Dequeue ticker node
729  *
730  * @details Finds extraction point for ticker node to be dequeued, unlinks
731  * the node and adjusts the links and ticks_to_expire. Returns the ticks
732  * until expiration for dequeued ticker node.
733  *
734  * @param instance Pointer to ticker instance
735  * @param id       Ticker node id to dequeue
736  *
737  * @return Total ticks until expiration for dequeued ticker node, or 0 if
738  * node was not found
739  * @internal
740  */
741 static uint32_t ticker_dequeue(struct ticker_instance *instance, uint8_t id)
742 {
743 	struct ticker_node *ticker_current;
744 	struct ticker_node *node;
745 	uint8_t previous;
746 	uint32_t timeout;
747 	uint8_t current;
748 	uint32_t total;
749 
750 	/* Find the ticker's position in ticker node list while accumulating
751 	 * ticks_to_expire
752 	 */
753 	node = &instance->nodes[0];
754 	previous = instance->ticker_id_head;
755 	current = previous;
756 	total = 0U;
757 	ticker_current = 0;
758 	while (current != TICKER_NULL) {
759 		ticker_current = &node[current];
760 
761 		if (current == id) {
762 			break;
763 		}
764 
765 		total += ticker_current->ticks_to_expire;
766 		previous = current;
767 		current = ticker_current->next;
768 	}
769 
770 	if (current == TICKER_NULL) {
771 		/* Ticker not in active list */
772 		return 0;
773 	}
774 
775 	if (previous == current) {
776 		/* Ticker is the first in the list */
777 		instance->ticker_id_head = ticker_current->next;
778 	}
779 
780 	/* Remaining timeout between next timeout */
781 	timeout = ticker_current->ticks_to_expire;
782 
783 	/* Link previous ticker with next of this ticker
784 	 * i.e. removing the ticker from list
785 	 */
786 	node[previous].next = ticker_current->next;
787 
788 	/* If this is not the last ticker, increment the
789 	 * next ticker by this ticker timeout
790 	 */
791 	if (ticker_current->next != TICKER_NULL) {
792 		node[ticker_current->next].ticks_to_expire += timeout;
793 	}
794 
795 	return (total + timeout);
796 }
797 
798 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
799 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
800 /**
801  * @brief Resolve ticker node collision
802  *
803  * @details Evaluates the provided ticker node against other queued nodes
804  * and returns non-zero if the ticker node collides and should be skipped.
805  * The following rules are checked:
806  *   1) If the periodic latency is not yet exhausted, node is skipped
807  *   2) If the node has highest possible priority, node is never skipped
808  *   3) If the node will starve next node due to slot reservation
809  *      overlap, node is skipped if:
810  *      a) Next node has higher priority than current node
811  *      b) Next node has more accumulated latency than the current node
812  *      c) Next node is 'older' than current node and has same priority
813  *      d) Next node has force flag set, and the current does not
814  *   4) If using ticks slot window,
815  *      a) current node can be rescheduled later in the ticks slot window
816  *   5) If using ticks slot window under yield (build time configuration),
817  *      a) Current node can be rescheduled later in the ticks slot window when
818  *         next node can not be rescheduled later in its ticks slot window
819  *
820  * @param nodes         Pointer to ticker node array
821  * @param ticker        Pointer to ticker to resolve
822  *
823  * @return 0 if no collision was detected. 1 if ticker node collides
824  * with other ticker node of higher composite priority
825  * @internal
826  */
827 static uint8_t ticker_resolve_collision(struct ticker_node *nodes,
828 				     struct ticker_node *ticker)
829 {
830 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
831 	if ((ticker->priority != TICKER_PRIORITY_CRITICAL) &&
832 	    (ticker->next != TICKER_NULL)) {
833 
834 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
835 	if (ticker->next != TICKER_NULL) {
836 
837 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
838 
839 		uint16_t lazy_current = ticker->lazy_current;
840 		uint32_t ticker_ticks_slot;
841 
842 		if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
843 			ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
844 		} else {
845 			ticker_ticks_slot = ticker->ticks_slot;
846 		}
847 
848 		/* Check if this ticker node will starve next node which has
849 		 * latency or higher priority
850 		 */
851 		if (lazy_current >= ticker->lazy_periodic) {
852 			lazy_current -= ticker->lazy_periodic;
853 		}
854 		uint8_t  id_head = ticker->next;
855 		uint32_t acc_ticks_to_expire = 0U;
856 
857 		/* Age is time since last expiry */
858 		uint32_t current_age = ticker->ticks_periodic +
859 				    (lazy_current * ticker->ticks_periodic);
860 
861 		while (id_head != TICKER_NULL) {
862 			struct ticker_node *ticker_next = &nodes[id_head];
863 			uint32_t ticker_next_ticks_slot;
864 
865 			/* Accumulate ticks_to_expire for each node */
866 			acc_ticks_to_expire += ticker_next->ticks_to_expire;
867 			if (acc_ticks_to_expire > ticker_ticks_slot) {
868 				break;
869 			}
870 
871 			if (TICKER_HAS_SLOT_WINDOW(ticker_next) &&
872 			    (ticker_next->ticks_slot == 0U)) {
873 				ticker_next_ticks_slot =
874 					HAL_TICKER_RESCHEDULE_MARGIN;
875 			} else {
876 				ticker_next_ticks_slot =
877 					ticker_next->ticks_slot;
878 			}
879 
880 			/* We only care about nodes with slot reservation */
881 			if (ticker_next_ticks_slot == 0U) {
882 				id_head = ticker_next->next;
883 				continue;
884 			}
885 
886 			uint16_t lazy_next = ticker_next->lazy_current;
887 			uint8_t  lazy_next_periodic_skip =
888 				ticker_next->lazy_periodic > lazy_next;
889 
890 			if (!lazy_next_periodic_skip) {
891 				lazy_next -= ticker_next->lazy_periodic;
892 			}
893 
894 			/* Age is time since last expiry */
895 			uint32_t next_age = (ticker_next->ticks_periodic == 0U ?
896 					  0U :
897 					 (ticker_next->ticks_periodic -
898 					  ticker_next->ticks_to_expire)) +
899 					 (lazy_next *
900 					  ticker_next->ticks_periodic);
901 
902 			/* Was the current node scheduled earlier? */
903 			uint8_t current_is_older =
904 				(ticker->ticks_periodic == 0U) ||
905 				(current_age > next_age);
906 			/* Was next node scheduled earlier (legacy priority)? */
907 			uint8_t next_is_older =
908 				(ticker->ticks_periodic != 0U) &&
909 				(next_age > current_age);
910 
911 			/* Is the current and next node equal in force? */
912 			uint8_t equal_force =
913 				(ticker->force == ticker_next->force);
914 			/* Is force requested for next node (e.g. update) -
915 			 * more so than for current node?
916 			 */
917 			uint8_t next_force =
918 				(ticker_next->force > ticker->force);
919 
920 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
921 			/* Does next node have critical priority and should
922 			 * always be scheduled?
923 			 */
924 			uint8_t next_is_critical =
925 				(ticker_next->priority ==
926 				 TICKER_PRIORITY_CRITICAL);
927 
928 			/* Is the current and next node equal in priority? */
929 			uint8_t equal_priority =
930 				(ticker->priority == ticker_next->priority);
931 
932 #else /* !CONFIG_BT_TICKER_PRIORITY_SET */
933 			uint8_t next_is_critical = 0U;
934 			uint8_t equal_priority = 1U;
935 			uint8_t next_has_priority = 0U;
936 
937 #endif /* !CONFIG_BT_TICKER_PRIORITY_SET */
938 
939 #if defined(CONFIG_BT_TICKER_EXT)
940 #if defined(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD)
941 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
942 			/* Does next node have higher priority? */
943 			uint8_t next_has_priority =
944 				(!TICKER_HAS_SLOT_WINDOW(ticker_next) &&
945 				((lazy_next - ticker_next->priority) >
946 				 (lazy_current - ticker->priority));
947 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
948 
949 			/* Colliding next ticker does not use ticks_slot_window
950 			 * or it does not fit after the current ticker within
951 			 * the ticks_slot_window.
952 			 */
953 			uint8_t next_not_ticks_slot_window =
954 					!TICKER_HAS_SLOT_WINDOW(ticker_next) ||
955 					(ticker_next->ext_data->is_drift_in_window &&
956 					 TICKER_HAS_SLOT_WINDOW(ticker)) ||
957 					((acc_ticks_to_expire +
958 					  ticker_next->ext_data->ticks_slot_window -
959 					  ticker_next->ticks_slot) <
960 					 ticker->ticks_slot);
961 
962 			/* Can the current ticker with ticks_slot_window be
963 			 * scheduled after the colliding ticker?
964 			 */
965 			uint8_t curr_has_ticks_slot_window =
966 					TICKER_HAS_SLOT_WINDOW(ticker) &&
967 					((acc_ticks_to_expire +
968 					  ticker_next->ticks_slot) <=
969 					 (ticker->ext_data->ticks_slot_window -
970 					  ticker->ticks_slot));
971 
972 #else /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
973 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
974 			/* Does next node have higher priority? */
975 			uint8_t next_has_priority =
976 				(lazy_next - ticker_next->priority) >
977 				(lazy_current - ticker->priority);
978 
979 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
980 			uint8_t next_not_ticks_slot_window = 1U;
981 
982 			/* Can the current ticker with ticks_slot_window be
983 			 * scheduled after the colliding ticker?
984 			 * NOTE: Tickers with ticks_slot_window and no
985 			 *       ticks_slot (unreserved) be always scheduled
986 			 *       after the colliding ticker.
987 			 */
988 			uint8_t curr_has_ticks_slot_window =
989 				(TICKER_HAS_SLOT_WINDOW(ticker) &&
990 				 !ticker->ticks_slot &&
991 				 ((acc_ticks_to_expire +
992 				   ticker_next->ticks_slot) <=
993 				  (ticker->ext_data->ticks_slot_window)));
994 
995 #endif /* !CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD */
996 #else /* !CONFIG_BT_TICKER_EXT */
997 #if defined(CONFIG_BT_TICKER_PRIORITY_SET)
998 			/* Does next node have higher priority? */
999 			uint8_t next_has_priority =
1000 				(lazy_next - ticker_next->priority) >
1001 				(lazy_current - ticker->priority);
1002 
1003 #endif /* CONFIG_BT_TICKER_PRIORITY_SET */
1004 			uint8_t next_not_ticks_slot_window = 1U;
1005 			uint8_t curr_has_ticks_slot_window = 0U;
1006 
1007 #endif /* !CONFIG_BT_TICKER_EXT */
1008 
1009 			/* Check if next node is within this reservation slot
1010 			 * and wins conflict resolution
1011 			 */
1012 			if ((curr_has_ticks_slot_window &&
1013 			     next_not_ticks_slot_window) ||
1014 			    (!lazy_next_periodic_skip &&
1015 			     (next_is_critical ||
1016 			      next_force ||
1017 			      (next_has_priority && !current_is_older) ||
1018 			      (equal_priority && equal_force && next_is_older &&
1019 			       next_not_ticks_slot_window)))) {
1020 				/* This node must be skipped - check window */
1021 				return 1U;
1022 			}
1023 			id_head = ticker_next->next;
1024 		}
1025 	}
1026 
1027 	return 0U;
1028 }
1029 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1030 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1031 	*/
1032 
1033 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1034 /**
1035  * @brief Get expiration delta from one ticker id to another ticker id
1036  *
1037  * @details Helper function to get expiration info between two tickers
1038  *
1039  * @param instance            Ticker instance
1040  * @param to_ticker_id        Target ticker id
1041  * @param from_ticker_id      Ticker id to compare with
1042  * @param expire_info         Pointer to ticker_expire_info that will
1043  *                            get populated with the result
1044  *
1045  * @internal
1046  */
1047 static void ticker_get_expire_info(struct ticker_instance *instance, uint8_t to_ticker_id,
1048 					  uint8_t from_ticker_id,
1049 					  struct ticker_expire_info_internal *expire_info)
1050 {
1051 	struct ticker_node *current_node;
1052 	uint32_t acc_ticks_to_expire = 0;
1053 	uint8_t current_ticker_id;
1054 	uint32_t from_ticks = 0;
1055 	bool from_found = false;
1056 	uint32_t to_ticks = 0;
1057 	bool to_found = false;
1058 
1059 	current_ticker_id = instance->ticker_id_head;
1060 	current_node = &instance->nodes[instance->ticker_id_head];
1061 	while (current_ticker_id != TICKER_NULL && (!to_found || !from_found)) {
1062 		/* Accumulate expire ticks */
1063 		acc_ticks_to_expire += current_node->ticks_to_expire;
1064 
1065 		if (current_ticker_id == from_ticker_id) {
1066 			from_ticks = acc_ticks_to_expire;
1067 			from_found = true;
1068 		} else if (current_ticker_id == to_ticker_id) {
1069 			to_ticks = acc_ticks_to_expire;
1070 			to_found = true;
1071 		}
1072 
1073 		current_ticker_id = current_node->next;
1074 		current_node = &instance->nodes[current_ticker_id];
1075 	}
1076 
1077 	if (to_found && from_found) {
1078 		struct ticker_node *to_ticker = &instance->nodes[to_ticker_id];
1079 
1080 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1081 		uint32_t to_remainder = to_ticker->remainder_current;
1082 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1083 
1084 		if (from_ticks > to_ticks) {
1085 			/* from ticker is scheduled after the to ticker - use period
1086 			 * to give an result
1087 			 */
1088 			if (to_ticker->ticks_periodic == 0) {
1089 				/* single shot ticker */
1090 				expire_info->found = 0;
1091 				return;
1092 			}
1093 			while (to_ticks < from_ticks) {
1094 				to_ticks += to_ticker->ticks_periodic;
1095 
1096 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1097 				to_ticks += ticker_add_to_remainder(&to_remainder,
1098 								    to_ticker->remainder_periodic);
1099 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1100 			}
1101 		}
1102 
1103 		expire_info->ticks_to_expire = to_ticks - from_ticks;
1104 		expire_info->remainder = to_remainder;
1105 		expire_info->lazy = to_ticker->lazy_current;
1106 		expire_info->found = 1;
1107 	} else {
1108 		expire_info->found = 0;
1109 	}
1110 }
1111 
1112 /**
1113  * @brief Allocate an expire info for the given ticker ID
1114  *
1115  * @param instance            Ticker instance
1116  * @param ticker_id           Ticker ID to allocate for
1117  *
1118  * @return Returns TICKER_STATUS_SUCCESS if the allocation succeeded,
1119  *         TICKER_STATUS_FAILURE otherwise
1120  *
1121  * @internal
1122  */
1123 static uint32_t ticker_alloc_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1124 {
1125 	uint32_t status = TICKER_STATUS_FAILURE;
1126 	uint8_t is_last = 0;
1127 
1128 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1129 		if (instance->expire_infos[i].ticker_id == TICKER_NULL) {
1130 			struct ticker_node *ticker = &instance->nodes[ticker_id];
1131 
1132 			instance->expire_infos[i].ticker_id = ticker_id;
1133 			instance->expire_infos[i].outdated = true;
1134 			instance->expire_infos[i].last = is_last;
1135 			ticker->ext_data->other_expire_info = &instance->expire_infos[i];
1136 			instance->expire_infos_outdated = true;
1137 			status = TICKER_STATUS_SUCCESS;
1138 			break;
1139 		} else if (instance->expire_infos[i].last && i < TICKER_EXPIRE_INFO_MAX - 1) {
1140 			instance->expire_infos[i].last = 0;
1141 			is_last = 1;
1142 		}
1143 	}
1144 
1145 	return status;
1146 }
1147 
1148 /**
1149  * @brief Free a previously allocated expire info for the given ticker ID
1150  *
1151  * @param instance            Ticker instance
1152  * @param ticker_id           Ticker ID to free up the allocation for
1153  *
1154  * @internal
1155  */
1156 static void ticker_free_expire_info(struct ticker_instance *instance, uint8_t ticker_id)
1157 {
1158 	uint8_t is_last = 0;
1159 	uint8_t index;
1160 
1161 	for (index = 0; index < TICKER_EXPIRE_INFO_MAX; index++) {
1162 		if (instance->expire_infos[index].ticker_id == ticker_id) {
1163 			instance->expire_infos[index].ticker_id = TICKER_NULL;
1164 			is_last = instance->expire_infos[index].last;
1165 			instance->expire_infos[index].last = 0;
1166 			break;
1167 		}
1168 	}
1169 
1170 	if (is_last) {
1171 		/* Find new last used element and mark it */
1172 		for (; index >= 0; index--) {
1173 			if (instance->expire_infos[index].ticker_id != TICKER_NULL || index == 0) {
1174 				instance->expire_infos[index].last = 1;
1175 				break;
1176 			}
1177 		}
1178 	}
1179 }
1180 
1181 /**
1182  * @brief Mark all expire infos involving a ticker ID as outdated
1183  *
1184  * @details If a ticker moves this function should be called to mark all expiration
1185  *          infos (if any) that involve that ticker as outdated and in need of re-calculation.
1186  *          If any expiration infos involving the ticker_id is found, the ticker instances
1187  *          expire_infos_outdated flag is also set.
1188  *
1189  * @param instance            Ticker instance
1190  * @param ticker_id           ID of ticker that has moved
1191  *
1192  * @internal
1193  */
1194 static void ticker_mark_expire_info_outdated(struct ticker_instance *instance, uint8_t ticker_id)
1195 {
1196 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1197 		if (instance->expire_infos[i].ticker_id != TICKER_NULL) {
1198 			uint8_t current_id = instance->expire_infos[i].ticker_id;
1199 			struct ticker_node *ticker = &instance->nodes[current_id];
1200 
1201 			if (current_id == ticker_id ||
1202 			    ticker->ext_data->expire_info_id == ticker_id) {
1203 				instance->expire_infos[i].outdated = true;
1204 				instance->expire_infos_outdated = true;
1205 			}
1206 		}
1207 		if (instance->expire_infos[i].last) {
1208 			break;
1209 		}
1210 	}
1211 }
1212 
1213 /**
1214  * @brief Run through all expire infos and update them if needed
1215  *
1216  * @details Runs through all expire_infos and runs ticker_get_expire_info()
1217  *          for any that are marked as outdated. Clears the expire_infos_outdated
1218  *          flag when done
1219  *
1220  * @param param Pointer to ticker instance
1221  *
1222  * @internal
1223  */
1224 static void ticker_job_update_expire_infos(struct ticker_instance *instance)
1225 {
1226 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
1227 		struct ticker_expire_info_internal *info = &instance->expire_infos[i];
1228 
1229 		if (info->ticker_id != TICKER_NULL && info->outdated) {
1230 			struct ticker_node *ticker = &instance->nodes[info->ticker_id];
1231 
1232 			ticker_get_expire_info(instance, ticker->ext_data->expire_info_id,
1233 						info->ticker_id, info);
1234 			info->outdated = false;
1235 		}
1236 
1237 		if (info->last) {
1238 			break;
1239 		}
1240 	}
1241 
1242 	instance->expire_infos_outdated = false;
1243 }
1244 
1245 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1246 
1247 /**
1248  * @brief Ticker worker
1249  *
1250  * @details Runs as upper half of ticker operation, triggered by a compare
1251  * match from the underlying counter HAL, via the ticker_trigger function.
1252  * Traverses ticker nodes to find tickers expired since last job
1253  * execution. Expired (requested) ticker nodes have their timeout callback
1254  * functions called. Finally, a ticker job is enqueued. Invoked from the
1255  * ticker worker mayfly context (TICKER_MAYFLY_CALL_ID_WORKER)
1256  *
1257  * @param param Pointer to ticker instance
1258  *
1259  */
1260 void ticker_worker(void *param)
1261 {
1262 	struct ticker_instance *instance = param;
1263 	struct ticker_node *node;
1264 	uint32_t ticks_elapsed;
1265 	uint32_t ticks_expired;
1266 	uint8_t ticker_id_head;
1267 	uint32_t ticks_now;
1268 
1269 	/* Defer worker if job running */
1270 	instance->worker_trigger = 1U;
1271 	if (instance->job_guard) {
1272 		return;
1273 	}
1274 
1275 	/* If no tickers queued (active), do nothing */
1276 	if (instance->ticker_id_head == TICKER_NULL) {
1277 		instance->worker_trigger = 0U;
1278 		return;
1279 	}
1280 
1281 	ticks_now = cntr_cnt_get();
1282 
1283 	/* Get ticks elapsed since last job execution */
1284 	ticks_elapsed = ticker_ticks_diff_get(ticks_now,
1285 					      instance->ticks_current);
1286 
1287 	/* Initialize actual elapsed ticks being consumed */
1288 	ticks_expired = 0U;
1289 
1290 	/* Auto variable containing the head of tickers expiring */
1291 	ticker_id_head = instance->ticker_id_head;
1292 
1293 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1294 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1295 	/* Check if the previous ticker node which had air-time, is still
1296 	 * active and has this time slot reserved
1297 	 */
1298 	uint8_t slot_reserved = 0;
1299 
1300 	if (instance->ticks_slot_previous > ticks_elapsed) {
1301 		/* This node intersects reserved slot */
1302 		slot_reserved = 1;
1303 	}
1304 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1305 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1306 	*/
1307 
1308 	/* Expire all tickers within ticks_elapsed and collect ticks_expired */
1309 	node = &instance->nodes[0];
1310 
1311 	while (ticker_id_head != TICKER_NULL) {
1312 		struct ticker_node *ticker;
1313 		uint32_t ticks_to_expire;
1314 		uint8_t must_expire_skip;
1315 		uint32_t ticks_drift;
1316 
1317 		ticker = &node[ticker_id_head];
1318 
1319 		/* Stop if ticker did not expire */
1320 		ticks_to_expire = ticker->ticks_to_expire;
1321 		if (ticks_elapsed < ticks_to_expire) {
1322 			break;
1323 		}
1324 
1325 		/* Decrement ticks_elapsed and collect expired ticks */
1326 		ticks_elapsed -= ticks_to_expire;
1327 		ticks_expired += ticks_to_expire;
1328 
1329 		/* Move to next ticker node */
1330 		ticker_id_head = ticker->next;
1331 		must_expire_skip = 0U;
1332 
1333 		/* Skip if not scheduled to execute */
1334 		if (((ticker->req - ticker->ack) & 0xff) != 1U) {
1335 			continue;
1336 		}
1337 
1338 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1339 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1340 		uint32_t ticker_ticks_slot;
1341 
1342 		if (TICKER_HAS_SLOT_WINDOW(ticker) &&
1343 		    (ticker->ticks_slot == 0U)) {
1344 			ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
1345 		} else {
1346 			ticker_ticks_slot = ticker->ticks_slot;
1347 		}
1348 
1349 		/* Check if node has slot reservation and resolve any collision
1350 		 * with other ticker nodes
1351 		 */
1352 		if ((ticker_ticks_slot != 0U) &&
1353 		    (slot_reserved ||
1354 		     (instance->ticks_slot_previous > ticks_expired) ||
1355 		     ticker_resolve_collision(node, ticker))) {
1356 #if defined(CONFIG_BT_TICKER_EXT)
1357 			struct ticker_ext *ext_data = ticker->ext_data;
1358 
1359 			if (ext_data &&
1360 			    ext_data->ticks_slot_window != 0U &&
1361 			    ext_data->reschedule_state ==
1362 			    TICKER_RESCHEDULE_STATE_NONE &&
1363 			   (ticker->lazy_periodic <= ticker->lazy_current)) {
1364 				/* Mark node for re-scheduling in ticker_job */
1365 				ext_data->reschedule_state =
1366 					TICKER_RESCHEDULE_STATE_PENDING;
1367 			} else if (ext_data) {
1368 				/* Mark node as not re-scheduling */
1369 				ext_data->reschedule_state =
1370 					TICKER_RESCHEDULE_STATE_NONE;
1371 			}
1372 #endif /* CONFIG_BT_TICKER_EXT */
1373 			/* Increment lazy_current to indicate skipped event. In case
1374 			 * of re-scheduled node, the lazy count will be decremented in
1375 			 * ticker_job_reschedule_in_window when completed.
1376 			 */
1377 			ticker->lazy_current++;
1378 
1379 			if ((ticker->must_expire == 0U) ||
1380 			    (ticker->lazy_periodic >= ticker->lazy_current) ||
1381 			    TICKER_RESCHEDULE_PENDING(ticker)) {
1382 				/* Not a must-expire node or this is periodic
1383 				 * latency or pending re-schedule. Skip this
1384 				 * ticker node. Mark it as elapsed.
1385 				 */
1386 				ticker->ack--;
1387 				continue;
1388 			}
1389 
1390 			/* Continue but perform shallow expiry */
1391 			must_expire_skip = 1U;
1392 		}
1393 
1394 #if defined(CONFIG_BT_TICKER_EXT)
1395 		if (ticker->ext_data) {
1396 			ticks_drift = ticker->ext_data->ticks_drift;
1397 			ticker->ext_data->ticks_drift = 0U;
1398 			/* Mark node as not re-scheduling */
1399 			ticker->ext_data->reschedule_state =
1400 				TICKER_RESCHEDULE_STATE_NONE;
1401 		} else {
1402 			ticks_drift = 0U;
1403 		}
1404 
1405 #else  /* !CONFIG_BT_TICKER_EXT */
1406 		ticks_drift = 0U;
1407 #endif /* !CONFIG_BT_TICKER_EXT */
1408 
1409 #else  /* CONFIG_BT_TICKER_LOW_LAT ||
1410 	* CONFIG_BT_TICKER_SLOT_AGNOSTIC
1411 	*/
1412 		ticks_drift = 0U;
1413 #endif /* CONFIG_BT_TICKER_LOW_LAT ||
1414 	* CONFIG_BT_TICKER_SLOT_AGNOSTIC
1415 	*/
1416 
1417 		/* Scheduled timeout is acknowledged to be complete */
1418 		ticker->ack--;
1419 
1420 		if (ticker->timeout_func) {
1421 			uint32_t remainder_current;
1422 			uint32_t ticks_at_expire;
1423 
1424 			ticks_at_expire = (instance->ticks_current +
1425 					   ticks_expired -
1426 					   ticker->ticks_to_expire_minus) &
1427 					   HAL_TICKER_CNTR_MASK;
1428 
1429 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1430 			remainder_current = ticker->remainder_current;
1431 #else /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1432 			remainder_current = 0U;
1433 #endif /* !CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1434 
1435 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1436 			if (ticker->ext_data &&
1437 			    ticker->ext_data->ext_timeout_func) {
1438 				struct ticker_expire_info_internal *expire_info;
1439 				struct ticker_ext_context ext_context;
1440 				ticker_timeout_func timeout_func;
1441 
1442 				timeout_func = ticker->ext_data->ext_timeout_func;
1443 				expire_info = ticker->ext_data->other_expire_info;
1444 				if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1445 					LL_ASSERT(expire_info && !expire_info->outdated);
1446 				}
1447 
1448 				ext_context.context = ticker->context;
1449 				if (expire_info && expire_info->found) {
1450 					ext_context.other_expire_info = (void *)expire_info;
1451 				} else {
1452 					ext_context.other_expire_info = NULL;
1453 				}
1454 
1455 				DEBUG_TICKER_TASK(1);
1456 
1457 				/* Invoke the timeout callback */
1458 				timeout_func(ticks_at_expire,
1459 					     ticks_drift,
1460 					     remainder_current,
1461 					     must_expire_skip ?
1462 					     TICKER_LAZY_MUST_EXPIRE :
1463 					     ticker->lazy_current,
1464 					     ticker->force,
1465 					     &ext_context);
1466 			} else
1467 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1468 			{
1469 				DEBUG_TICKER_TASK(1);
1470 
1471 				/* Invoke the timeout callback */
1472 				ticker->timeout_func(ticks_at_expire,
1473 					     ticks_drift,
1474 					     remainder_current,
1475 					     must_expire_skip ?
1476 					     TICKER_LAZY_MUST_EXPIRE :
1477 					     ticker->lazy_current,
1478 					     ticker->force,
1479 					     ticker->context);
1480 				DEBUG_TICKER_TASK(0);
1481 			}
1482 
1483 			if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
1484 			   (must_expire_skip == 0U)) {
1485 				/* Reset latency to periodic offset */
1486 				ticker->lazy_current = 0U;
1487 				ticker->force = 0U;
1488 
1489 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
1490 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1491 				if (ticker_ticks_slot != 0U) {
1492 					/* Any further nodes will be skipped */
1493 					slot_reserved = 1U;
1494 				}
1495 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
1496 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
1497 	*/
1498 
1499 			}
1500 		}
1501 	}
1502 
1503 	/* Queue the elapsed ticks */
1504 	if (instance->ticks_elapsed_first == instance->ticks_elapsed_last) {
1505 		ticker_next_elapsed(&instance->ticks_elapsed_last);
1506 	}
1507 	instance->ticks_elapsed[instance->ticks_elapsed_last] = ticks_expired;
1508 
1509 	instance->worker_trigger = 0U;
1510 
1511 	/* Enqueue the ticker job with chain=1 (do not inline) */
1512 	instance->sched_cb(TICKER_CALL_ID_WORKER, TICKER_CALL_ID_JOB, 1,
1513 			   instance);
1514 }
1515 
1516 /**
1517  * @brief Prepare ticker node expiration
1518  *
1519  * @details Calculates the number of ticks until next expiration, taking
1520  * into consideration any negative drift correction.
1521  *
1522  * @param ticker         Pointer to ticker node
1523  * @param ticks_current  Current number of ticks (elapsed)
1524  * @param ticks_at_start Number of ticks at start (anchor)
1525  *
1526  * @internal
1527  */
1528 static void ticks_to_expire_prep(struct ticker_node *ticker,
1529 				 uint32_t ticks_current, uint32_t ticks_at_start)
1530 {
1531 	uint32_t ticks_to_expire = ticker->ticks_to_expire;
1532 	uint32_t ticks_to_expire_minus = ticker->ticks_to_expire_minus;
1533 
1534 	/* Calculate ticks to expire for this new node */
1535 	if (!((ticks_at_start - ticks_current) & BIT(HAL_TICKER_CNTR_MSBIT))) {
1536 		/* Most significant bit is 0 so ticks_at_start lies ahead of
1537 		 * ticks_current: ticks_at_start >= ticks_current
1538 		 */
1539 		ticks_to_expire += ticker_ticks_diff_get(ticks_at_start,
1540 							 ticks_current);
1541 	} else {
1542 		/* ticks_current > ticks_at_start
1543 		 */
1544 		uint32_t delta_current_start;
1545 
1546 		delta_current_start = ticker_ticks_diff_get(ticks_current,
1547 							    ticks_at_start);
1548 		if (ticks_to_expire > delta_current_start) {
1549 			/* There's still time until expiration - subtract
1550 			 * elapsed time
1551 			 */
1552 			ticks_to_expire -= delta_current_start;
1553 		} else {
1554 			/* Ticker node should have expired (we're late).
1555 			 * Add 'lateness' to negative drift correction
1556 			 * (ticks_to_expire_minus) and set ticks_to_expire
1557 			 * to 0
1558 			 */
1559 			ticks_to_expire_minus +=
1560 			    (delta_current_start - ticks_to_expire);
1561 			ticks_to_expire = 0U;
1562 		}
1563 	}
1564 
1565 	/* Handle negative drift correction */
1566 	if (ticks_to_expire > ticks_to_expire_minus) {
1567 		ticks_to_expire -= ticks_to_expire_minus;
1568 		ticks_to_expire_minus = 0U;
1569 	} else {
1570 		ticks_to_expire_minus -= ticks_to_expire;
1571 		ticks_to_expire = 0U;
1572 	}
1573 
1574 	/* Update ticker */
1575 	ticker->ticks_to_expire = ticks_to_expire;
1576 	ticker->ticks_to_expire_minus = ticks_to_expire_minus;
1577 }
1578 
1579 /**
1580  * @brief Add to remainder
1581  *
1582  * @details Calculates whether the remainder should increments expiration time
1583  * for above-microsecond precision counter HW. The remainder enables improved
1584  * ticker precision, but is disabled for sub-microsecond precision
1585  * configurations.
1586  * Note: This is the same functionality as ticker_remainder_inc(), except this
1587  * function allows doing the calculation without modifying any tickers
1588  *
1589  * @param remainder Pointer to remainder to add to
1590  * @param to_add    Remainder value to add
1591  *
1592  * @return Returns 1 to indicate ticks increment is due, otherwise 0
1593  * @internal
1594  */
1595 static inline uint8_t ticker_add_to_remainder(uint32_t *remainder, uint32_t to_add)
1596 {
1597 	*remainder += to_add;
1598 	if ((*remainder < BIT(31)) &&
1599 	    (*remainder > (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1600 		*remainder -= HAL_TICKER_REMAINDER_RANGE;
1601 
1602 		return 1;
1603 	}
1604 
1605 	return 0;
1606 }
1607 
1608 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1609 /**
1610  * @brief Increment remainder
1611  *
1612  * @details Calculates whether the remainder should increments expiration time
1613  * for above-microsecond precision counter HW. The remainder enables improved
1614  * ticker precision, but is disabled for sub-microsecond precision
1615  * configurations.
1616  *
1617  * @param ticker Pointer to ticker node
1618  *
1619  * @return Returns 1 to indicate increment is due, otherwise 0
1620  * @internal
1621  */
1622 static uint8_t ticker_remainder_inc(struct ticker_node *ticker)
1623 {
1624 	return ticker_add_to_remainder(&ticker->remainder_current, ticker->remainder_periodic);
1625 }
1626 
1627 /**
1628  * @brief Decrement remainder
1629  *
1630  * @details Calculates whether the remainder should decrements expiration time
1631  * for above-microsecond precision counter HW. The remainder enables improved
1632  * ticker precision, but is disabled for sub-microsecond precision
1633  * configurations.
1634  *
1635  * @param ticker Pointer to ticker node
1636  *
1637  * @return Returns 1 to indicate decrement is due, otherwise 0
1638  * @internal
1639  */
1640 static uint8_t ticker_remainder_dec(struct ticker_node *ticker)
1641 {
1642 	uint8_t decrement = 0U;
1643 
1644 	if ((ticker->remainder_current >= BIT(31)) ||
1645 	    (ticker->remainder_current <= (HAL_TICKER_REMAINDER_RANGE >> 1))) {
1646 		decrement++;
1647 		ticker->remainder_current += HAL_TICKER_REMAINDER_RANGE;
1648 	}
1649 
1650 	ticker->remainder_current -= ticker->remainder_periodic;
1651 
1652 	return decrement;
1653 }
1654 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1655 
1656 /**
1657  * @brief Invoke user operation callback
1658  *
1659  * @param user_op Pointer to user operation struct
1660  * @param status  User operation status to pass to callback
1661  *
1662  * @internal
1663  */
1664 static void ticker_job_op_cb(struct ticker_user_op *user_op, uint8_t status)
1665 {
1666 	user_op->op = TICKER_USER_OP_TYPE_NONE;
1667 	user_op->status = status;
1668 	if (user_op->fp_op_func) {
1669 		user_op->fp_op_func(user_op->status, user_op->op_context);
1670 	}
1671 }
1672 
1673 /**
1674  * @brief Update and insert ticker node
1675  *
1676  * @details Update ticker node with parameters passed in user operation.
1677  * After update, the ticker is inserted in front as new head.
1678  *
1679  * @param ticker	Pointer to ticker node
1680  * @param user_op	Pointer to user operation
1681  * @param ticks_current	Current ticker instance ticks
1682  * @param ticks_elapsed	Expired ticks at time of call
1683  * @param insert_head	Pointer to current head (id). Contains id
1684  *			from user operation upon exit
1685  * @internal
1686  */
1687 static inline uint32_t ticker_job_node_update(struct ticker_instance *instance,
1688 					  struct ticker_node *ticker,
1689 					  struct ticker_user_op *user_op,
1690 					  uint32_t ticks_now,
1691 					  uint32_t ticks_current,
1692 					  uint32_t ticks_elapsed,
1693 					  uint8_t *insert_head)
1694 {
1695 	uint32_t ticks_to_expire = ticker->ticks_to_expire;
1696 
1697 	ticks_elapsed += ticker_ticks_diff_get(ticks_now, ticks_current);
1698 	if (ticks_to_expire > ticks_elapsed) {
1699 		ticks_to_expire -= ticks_elapsed;
1700 	} else {
1701 		ticker->ticks_to_expire_minus += ticks_elapsed -
1702 						 ticks_to_expire;
1703 		ticks_to_expire = 0U;
1704 	}
1705 
1706 	/* Update ticks_to_expire from latency (lazy) input */
1707 	if ((ticker->ticks_periodic != 0U) &&
1708 	    (user_op->params.update.lazy != 0U)) {
1709 		user_op->params.update.lazy--;
1710 		while ((ticks_to_expire > ticker->ticks_periodic) &&
1711 		       (ticker->lazy_current > user_op->params.update.lazy)) {
1712 			ticks_to_expire -= ticker->ticks_periodic;
1713 
1714 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1715 			ticks_to_expire -= ticker_remainder_dec(ticker);
1716 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1717 
1718 			ticker->lazy_current--;
1719 		}
1720 
1721 		while (ticker->lazy_current < user_op->params.update.lazy) {
1722 			ticks_to_expire += ticker->ticks_periodic;
1723 
1724 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
1725 			ticks_to_expire += ticker_remainder_inc(ticker);
1726 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
1727 
1728 			ticker->lazy_current++;
1729 		}
1730 		ticker->lazy_periodic = user_op->params.update.lazy;
1731 	}
1732 
1733 	/* Update ticks_to_expire from drift input */
1734 	ticker->ticks_to_expire = ticks_to_expire +
1735 				  user_op->params.update.ticks_drift_plus;
1736 	ticker->ticks_to_expire_minus +=
1737 				user_op->params.update.ticks_drift_minus;
1738 
1739 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1740 	/* TODO: An improvement on this could be to only consider the drift
1741 	 * (ADV => randomization) if re-sceduling fails. We would still store
1742 	 * the drift ticks here, but not actually update the node. That would
1743 	 * allow the ticker to use the full window for re-scheduling.
1744 	 */
1745 	struct ticker_ext *ext_data = ticker->ext_data;
1746 
1747 	if (ext_data && ext_data->ticks_slot_window != 0U) {
1748 		ext_data->ticks_drift =
1749 			user_op->params.update.ticks_drift_plus -
1750 			user_op->params.update.ticks_drift_minus;
1751 	}
1752 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1753 
1754 	ticks_to_expire_prep(ticker, ticks_current, ticks_now);
1755 
1756 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1757 	/* Update ticks_slot parameter from plus/minus input */
1758 	ticker->ticks_slot += user_op->params.update.ticks_slot_plus;
1759 	if (ticker->ticks_slot > user_op->params.update.ticks_slot_minus) {
1760 		ticker->ticks_slot -= user_op->params.update.ticks_slot_minus;
1761 	} else {
1762 		ticker->ticks_slot = 0U;
1763 	}
1764 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1765 
1766 	/* Update force parameter */
1767 	if (user_op->params.update.force != 0U) {
1768 		ticker->force = user_op->params.update.force;
1769 	}
1770 
1771 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
1772 	!defined(CONFIG_BT_TICKER_LOW_LAT)
1773 	/* Update must_expire parameter */
1774 	if (user_op->params.update.must_expire) {
1775 		/* 1: disable, 2: enable */
1776 		ticker->must_expire = (user_op->params.update.must_expire - 1);
1777 	}
1778 #endif /* CONFIG_BT_TICKER_EXT */
1779 
1780 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1781 	if (ticker->ext_data && user_op->params.update.expire_info_id != user_op->id) {
1782 		if (user_op->params.update.expire_info_id != TICKER_NULL &&
1783 		    !ticker->ext_data->other_expire_info) {
1784 			uint32_t status;
1785 
1786 			status = ticker_alloc_expire_info(instance, user_op->id);
1787 			if (status) {
1788 				return status;
1789 			}
1790 		} else if (user_op->params.update.expire_info_id == TICKER_NULL &&
1791 			 ticker->ext_data->other_expire_info) {
1792 			ticker_free_expire_info(instance, user_op->id);
1793 			ticker->ext_data->other_expire_info = NULL;
1794 		}
1795 
1796 		ticker->ext_data->expire_info_id = user_op->params.update.expire_info_id;
1797 		if (ticker->ext_data->expire_info_id != TICKER_NULL) {
1798 			ticker_mark_expire_info_outdated(instance, user_op->id);
1799 		}
1800 	}
1801 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1802 	ARG_UNUSED(instance);
1803 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1804 
1805 	ticker->next = *insert_head;
1806 	*insert_head = user_op->id;
1807 
1808 	return TICKER_STATUS_SUCCESS;
1809 }
1810 
1811 /**
1812  * @brief Manage user update operation
1813  *
1814  * @details Called by ticker_job to execute an update request, or set node
1815  * as done if request is not update. Invokes user operation callback before
1816  * exit.
1817  *
1818  * @param instance	Pointer to ticker instance
1819  * @param ticker	Pointer to ticker node
1820  * @param user_op	Pointer to user operation
1821  * @param ticks_elapsed Expired ticks at time of call
1822  * @param insert_head	Pointer to current head (id). For update operation,
1823  *			contains operation id upon exit
1824  * @internal
1825  */
1826 static inline void ticker_job_node_manage(struct ticker_instance *instance,
1827 					  struct ticker_node *ticker,
1828 					  struct ticker_user_op *user_op,
1829 					  uint32_t ticks_now,
1830 					  uint32_t ticks_elapsed,
1831 					  uint8_t *insert_head)
1832 {
1833 	/* Handle update of ticker by re-inserting it back. */
1834 	if (IS_ENABLED(CONFIG_BT_TICKER_UPDATE) &&
1835 	    (user_op->op == TICKER_USER_OP_TYPE_UPDATE)) {
1836 		/* Remove ticker node from list */
1837 		ticker->ticks_to_expire = ticker_dequeue(instance, user_op->id);
1838 
1839 		/* Update node and insert back */
1840 		ticker_job_node_update(instance, ticker, user_op, ticks_now,
1841 				       instance->ticks_current, ticks_elapsed,
1842 				       insert_head);
1843 
1844 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1845 		ticker_mark_expire_info_outdated(instance, user_op->id);
1846 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1847 
1848 		/* Set schedule status of node
1849 		 * as updating.
1850 		 */
1851 		ticker->req++;
1852 	} else {
1853 		/* If stop/stop_abs requested, then dequeue node */
1854 		if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1855 			/* Remove ticker node from list */
1856 			ticker->ticks_to_expire = ticker_dequeue(instance,
1857 								 user_op->id);
1858 
1859 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
1860 			if (ticker->ext_data && ticker->ext_data->expire_info_id != TICKER_NULL) {
1861 				ticker_free_expire_info(instance, user_op->id);
1862 				ticker->ext_data->other_expire_info = NULL;
1863 			}
1864 
1865 			ticker_mark_expire_info_outdated(instance, user_op->id);
1866 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
1867 
1868 			/* Reset schedule status of node */
1869 			ticker->req = ticker->ack;
1870 		}
1871 
1872 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
1873 		/* If yield_abs/stop/stop_abs then adjust ticks_slot_previous */
1874 		if (instance->ticker_id_slot_previous == user_op->id) {
1875 			uint32_t ticks_current;
1876 			uint32_t ticks_at_yield;
1877 			uint32_t ticks_used;
1878 
1879 			if (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS) {
1880 				instance->ticker_id_slot_previous = TICKER_NULL;
1881 			}
1882 
1883 			if ((user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS) ||
1884 			    (user_op->op == TICKER_USER_OP_TYPE_STOP_ABS)) {
1885 				ticks_at_yield =
1886 					user_op->params.yield.ticks_at_yield;
1887 			} else {
1888 				ticks_at_yield = ticks_now;
1889 			}
1890 
1891 			ticks_current = instance->ticks_current;
1892 			if (!((ticks_at_yield - ticks_current) &
1893 			      BIT(HAL_TICKER_CNTR_MSBIT))) {
1894 				ticks_used = ticks_elapsed +
1895 					ticker_ticks_diff_get(ticks_at_yield,
1896 							      ticks_current);
1897 			} else {
1898 				ticks_used =
1899 					ticker_ticks_diff_get(ticks_current,
1900 							      ticks_at_yield);
1901 				if (ticks_elapsed > ticks_used) {
1902 					ticks_used = ticks_elapsed -
1903 						     ticks_used;
1904 				} else {
1905 					ticks_used = 0;
1906 				}
1907 			}
1908 
1909 			if (instance->ticks_slot_previous > ticks_used) {
1910 				instance->ticks_slot_previous = ticks_used;
1911 			}
1912 		}
1913 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
1914 
1915 	}
1916 
1917 	/* op success, @todo update may fail during
1918 	 * actual insert! need to design that yet.
1919 	 */
1920 	ticker_job_op_cb(user_op, TICKER_STATUS_SUCCESS);
1921 }
1922 
1923 /**
1924  * @brief Manage user operations list
1925  *
1926  * @details Called by ticker_job to execute requested user operations. A
1927  * number of operation may be queued since last ticker_job. Only update and
1928  * stop operations are handled. Start is handled implicitly by inserting
1929  * the ticker node in ticker_job_list_insert.
1930  *
1931  * @param instance	Pointer to ticker instance
1932  * @param ticks_elapsed Expired ticks at time of call
1933  * @param insert_head	Pointer to current head (id). For update operation,
1934  *			contains operation id upon exit
1935  * @return Returns 1 if operations is pending, 0 if all operations are done.
1936  * @internal
1937  */
1938 static inline uint8_t ticker_job_list_manage(struct ticker_instance *instance,
1939 					     uint32_t ticks_now,
1940 					     uint32_t ticks_elapsed,
1941 					     uint8_t *insert_head)
1942 {
1943 	uint8_t pending;
1944 	struct ticker_node *node;
1945 	struct ticker_user *users;
1946 	uint8_t count_user;
1947 
1948 	pending = 0U;
1949 	node = &instance->nodes[0];
1950 	users = &instance->users[0];
1951 	count_user = instance->count_user;
1952 	/* Traverse users - highest id first */
1953 	while (count_user--) {
1954 		struct ticker_user *user;
1955 		struct ticker_user_op *user_ops;
1956 
1957 		user = &users[count_user];
1958 		user_ops = &user->user_op[0];
1959 		/* Traverse user operation queue - middle to last (with wrap).
1960 		 * This operation updates user->middle to be the past the last
1961 		 * processed user operation. This is used later by
1962 		 * ticker_job_list_insert, for handling user->first to middle.
1963 		 */
1964 		while (user->middle != user->last) {
1965 			struct ticker_user_op *user_op;
1966 			struct ticker_node *ticker;
1967 			uint8_t state;
1968 			uint8_t prev;
1969 			uint8_t middle;
1970 
1971 			user_op = &user_ops[user->middle];
1972 
1973 			/* Increment index and handle wrapping */
1974 			prev = user->middle;
1975 			middle = user->middle + 1;
1976 			if (middle == user->count_user_op) {
1977 				middle = 0U;
1978 			}
1979 			user->middle = middle;
1980 
1981 			ticker = &node[user_op->id];
1982 
1983 			/* if op is start, then skip update and stop ops */
1984 			if (user_op->op < TICKER_USER_OP_TYPE_UPDATE) {
1985 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
1986 				if (user_op->op == TICKER_USER_OP_TYPE_START) {
1987 					/* Set start pending to validate a
1988 					 * successive, inline stop operation.
1989 					 */
1990 					ticker->start_pending = 1U;
1991 				}
1992 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
1993 
1994 				continue;
1995 			}
1996 
1997 			/* determine the ticker state */
1998 			state = (ticker->req - ticker->ack) & 0xff;
1999 
2000 			/* if not started or update not required,
2001 			 * set status and continue.
2002 			 */
2003 			if ((user_op->op > TICKER_USER_OP_TYPE_STOP_ABS) ||
2004 			    ((state == 0U) &&
2005 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2006 			     !ticker->start_pending &&
2007 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2008 			     (user_op->op != TICKER_USER_OP_TYPE_YIELD_ABS)) ||
2009 			    ((user_op->op == TICKER_USER_OP_TYPE_UPDATE) &&
2010 			     (user_op->params.update.ticks_drift_plus == 0U) &&
2011 			     (user_op->params.update.ticks_drift_minus == 0U) &&
2012 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2013 			     (user_op->params.update.ticks_slot_plus == 0U) &&
2014 			     (user_op->params.update.ticks_slot_minus == 0U) &&
2015 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2016 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2017 			     (!ticker->ext_data ||
2018 				  user_op->params.update.expire_info_id == user_op->id) &&
2019 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2020 			     (user_op->params.update.lazy == 0U) &&
2021 			     (user_op->params.update.force == 0U))) {
2022 				ticker_job_op_cb(user_op,
2023 						 TICKER_STATUS_FAILURE);
2024 				continue;
2025 			}
2026 
2027 			/* Delete or yield node, if not expired */
2028 			if ((state == 1U) ||
2029 			    (user_op->op == TICKER_USER_OP_TYPE_YIELD_ABS)) {
2030 				ticker_job_node_manage(instance, ticker,
2031 						       user_op, ticks_now,
2032 						       ticks_elapsed,
2033 						       insert_head);
2034 			} else {
2035 				/* Update on expired node requested, deferring
2036 				 * update until bottom half finishes.
2037 				 */
2038 				/* sched job to run after worker bottom half.
2039 				 */
2040 				instance->sched_cb(TICKER_CALL_ID_JOB,
2041 						   TICKER_CALL_ID_JOB, 1,
2042 						   instance);
2043 
2044 				/* Update the index upto which management is
2045 				 * complete.
2046 				 */
2047 				user->middle = prev;
2048 
2049 				pending = 1U;
2050 				break;
2051 			}
2052 		}
2053 	}
2054 
2055 	return pending;
2056 }
2057 
2058 /**
2059  * @brief Handle ticker node expirations
2060  *
2061  * @details Called by ticker_job to schedule next expirations. Expired ticker
2062  * nodes are removed from the active list, and re-inserted if periodic.
2063  *
2064  * @param instance	 Pointer to ticker instance
2065  * @param ticks_previous Absolute ticks at ticker_job start
2066  * @param ticks_elapsed  Expired ticks at time of call
2067  * @param insert_head	 Pointer to current head (id). Updated if nodes are
2068  *			 re-inserted
2069  * @internal
2070  */
2071 static inline void ticker_job_worker_bh(struct ticker_instance *instance,
2072 					uint32_t ticks_now,
2073 					uint32_t ticks_previous,
2074 					uint32_t ticks_elapsed,
2075 					uint8_t *insert_head)
2076 {
2077 	struct ticker_node *node;
2078 	uint32_t ticks_expired;
2079 	uint32_t ticks_latency;
2080 
2081 	ticks_latency = ticker_ticks_diff_get(ticks_now, ticks_previous);
2082 
2083 	node = &instance->nodes[0];
2084 	ticks_expired = 0U;
2085 	while (instance->ticker_id_head != TICKER_NULL) {
2086 		uint8_t skip_collision = 0U;
2087 		struct ticker_node *ticker;
2088 		uint32_t ticks_to_expire;
2089 		uint8_t id_expired;
2090 		uint8_t state;
2091 
2092 		/* auto variable for current ticker node */
2093 		id_expired = instance->ticker_id_head;
2094 		ticker = &node[id_expired];
2095 
2096 		/* Do nothing if ticker did not expire */
2097 		ticks_to_expire = ticker->ticks_to_expire;
2098 		if (ticks_elapsed < ticks_to_expire) {
2099 			ticker->ticks_to_expire -= ticks_elapsed;
2100 			break;
2101 		}
2102 
2103 		/* decrement ticks_elapsed and collect expired ticks */
2104 		ticks_elapsed -= ticks_to_expire;
2105 		ticks_latency -= ticks_to_expire;
2106 		ticks_expired += ticks_to_expire;
2107 
2108 		state = (ticker->req - ticker->ack) & 0xff;
2109 
2110 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2111 		/* Node with lazy count did not expire with callback, but
2112 		 * was either a collision or re-scheduled. This node should
2113 		 * not define the active slot reservation (slot_previous).
2114 		 */
2115 		skip_collision = (ticker->lazy_current != 0U);
2116 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2117 
2118 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2119 		/* decrement ticks_slot_previous */
2120 		if (instance->ticks_slot_previous > ticks_to_expire) {
2121 			instance->ticks_slot_previous -= ticks_to_expire;
2122 		} else {
2123 			instance->ticker_id_slot_previous = TICKER_NULL;
2124 			instance->ticks_slot_previous = 0U;
2125 		}
2126 
2127 		uint32_t ticker_ticks_slot;
2128 
2129 		if (TICKER_HAS_SLOT_WINDOW(ticker) && !ticker->ticks_slot) {
2130 			ticker_ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2131 		} else {
2132 			ticker_ticks_slot = ticker->ticks_slot;
2133 		}
2134 
2135 		/* If a reschedule is set pending, we will need to keep
2136 		 * the slot_previous information
2137 		 */
2138 		if (ticker_ticks_slot && (state == 2U) && !skip_collision &&
2139 		    !TICKER_RESCHEDULE_PENDING(ticker)) {
2140 			instance->ticker_id_slot_previous = id_expired;
2141 			instance->ticks_slot_previous = ticker_ticks_slot;
2142 		}
2143 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2144 
2145 		/* ticker expired, set ticks_to_expire zero */
2146 		ticker->ticks_to_expire = 0U;
2147 
2148 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2149 		ticker_mark_expire_info_outdated(instance, instance->ticker_id_head);
2150 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2151 
2152 		/* remove the expired ticker from head */
2153 		instance->ticker_id_head = ticker->next;
2154 
2155 		/* Ticker will be restarted if periodic or to be re-scheduled */
2156 		if ((ticker->ticks_periodic != 0U) ||
2157 		    TICKER_RESCHEDULE_PENDING(ticker)) {
2158 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2159 			if (TICKER_RESCHEDULE_PENDING(ticker)) {
2160 				/* Set the re-scheduled node to now. Will be
2161 				 * collision resolved after all nodes are
2162 				 * restarted
2163 				 */
2164 				ticker->ticks_to_expire = ticks_elapsed;
2165 
2166 				/* Reset ticker state, so that its put
2167 				 * back in requested state later down
2168 				 * in the code.
2169 				 */
2170 				ticker->req = ticker->ack;
2171 			} else {
2172 				uint16_t lazy_periodic;
2173 				uint32_t count;
2174 				uint16_t lazy;
2175 
2176 				/* If not skipped, apply lazy_periodic */
2177 				if (!ticker->lazy_current) {
2178 					lazy_periodic = ticker->lazy_periodic;
2179 				} else {
2180 					lazy_periodic = 0U;
2181 
2182 					/* Reset ticker state, so that its put
2183 					 * back in requested state later down
2184 					 * in the code.
2185 					 */
2186 					ticker->req = ticker->ack;
2187 				}
2188 
2189 				/* Reload ticks_to_expire with at least one
2190 				 * period.
2191 				 */
2192 				ticks_to_expire = 0U;
2193 				count = 1 + lazy_periodic;
2194 				while (count--) {
2195 					ticks_to_expire +=
2196 						ticker->ticks_periodic;
2197 
2198 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2199 					ticks_to_expire +=
2200 						ticker_remainder_inc(ticker);
2201 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2202 				}
2203 
2204 				/* Skip intervals that have elapsed w.r.t.
2205 				 * current ticks.
2206 				 */
2207 				lazy = 0U;
2208 
2209 				if (0) {
2210 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2211 				} else if (!ticker->must_expire) {
2212 #else
2213 				} else {
2214 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2215 					while (ticks_to_expire <
2216 					       ticks_latency) {
2217 						ticks_to_expire +=
2218 							ticker->ticks_periodic;
2219 
2220 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2221 						ticks_to_expire +=
2222 						  ticker_remainder_inc(ticker);
2223 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2224 
2225 						lazy++;
2226 					}
2227 				}
2228 
2229 				/* Use the calculated ticks to expire and
2230 				 * laziness.
2231 				 */
2232 				ticker->ticks_to_expire = ticks_to_expire;
2233 				ticker->lazy_current += (lazy_periodic + lazy);
2234 			}
2235 
2236 			ticks_to_expire_prep(ticker, instance->ticks_current,
2237 					     ((ticks_previous + ticks_expired) &
2238 					      HAL_TICKER_CNTR_MASK));
2239 #else /* CONFIG_BT_TICKER_LOW_LAT */
2240 			uint32_t count;
2241 			uint16_t lazy;
2242 
2243 			/* Prepare for next interval */
2244 			ticks_to_expire = 0U;
2245 			count = 1 + ticker->lazy_periodic;
2246 			while (count--) {
2247 				ticks_to_expire += ticker->ticks_periodic;
2248 				ticks_to_expire += ticker_remainder_inc(ticker);
2249 			}
2250 
2251 			/* Skip intervals that have elapsed w.r.t. current
2252 			 * ticks.
2253 			 */
2254 			lazy = 0U;
2255 
2256 			/* Schedule to a tick in the future */
2257 			while (ticks_to_expire < ticks_latency) {
2258 				ticks_to_expire += ticker->ticks_periodic;
2259 				ticks_to_expire += ticker_remainder_inc(ticker);
2260 				lazy++;
2261 			}
2262 
2263 			/* Use the calculated ticks to expire and laziness. */
2264 			ticker->ticks_to_expire = ticks_to_expire;
2265 			ticker->lazy_current = ticker->lazy_periodic + lazy;
2266 
2267 			ticks_to_expire_prep(ticker, instance->ticks_current,
2268 					     ((ticks_previous + ticks_expired) &
2269 					      HAL_TICKER_CNTR_MASK));
2270 
2271 			/* Reset force state of the node */
2272 			ticker->force = 0U;
2273 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2274 
2275 			/* Add to insert list */
2276 			ticker->next = *insert_head;
2277 			*insert_head = id_expired;
2278 
2279 			/* set schedule status of node as restarting. */
2280 			ticker->req++;
2281 		} else {
2282 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2283 			/* A single-shot ticker in requested or skipped due to
2284 			 * collision shall generate a operation function
2285 			 * callback with failure status.
2286 			 */
2287 			if (state && ((state == 1U) || skip_collision) &&
2288 			    ticker->fp_op_func) {
2289 				ticker->fp_op_func(TICKER_STATUS_FAILURE,
2290 						   ticker->op_context);
2291 			}
2292 #endif /* !CONFIG_BT_TICKER_LOW_LAT */
2293 
2294 			/* reset schedule status of node */
2295 			ticker->req = ticker->ack;
2296 		}
2297 	}
2298 }
2299 
2300 /**
2301  * @brief Prepare ticker node start
2302  *
2303  * @details Called by ticker_job to prepare ticker node start operation.
2304  *
2305  * @param ticker	Pointer to ticker node
2306  * @param user_op	Pointer to user operation
2307  * @param ticks_current Expired ticks at time of call
2308  *
2309  * @internal
2310  */
2311 static inline uint32_t ticker_job_op_start(struct ticker_instance *instance,
2312 					   struct ticker_node *ticker,
2313 					   struct ticker_user_op *user_op,
2314 					   uint32_t ticks_current)
2315 {
2316 	struct ticker_user_op_start *start = (void *)&user_op->params.start;
2317 
2318 #if defined(CONFIG_BT_TICKER_LOW_LAT)
2319 	/* Must expire is not supported in compatibility mode */
2320 	LL_ASSERT(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP);
2321 #else
2322 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2323 	if (start->lazy != TICKER_LAZY_MUST_EXPIRE_KEEP) {
2324 		/* Update the must_expire state */
2325 		ticker->must_expire =
2326 			(start->lazy == TICKER_LAZY_MUST_EXPIRE) ? 1U : 0U;
2327 	}
2328 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2329 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2330 
2331 #if defined(CONFIG_BT_TICKER_EXT)
2332 	ticker->ext_data = start->ext_data;
2333 
2334 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2335 	if (ticker->ext_data) {
2336 		ticker->ext_data->other_expire_info = NULL;
2337 		if (ticker->ext_data->expire_info_id != TICKER_NULL) {
2338 			uint32_t status;
2339 
2340 			status = ticker_alloc_expire_info(instance, user_op->id);
2341 			if (status) {
2342 				return status;
2343 			}
2344 		}
2345 	}
2346 
2347 	ticker_mark_expire_info_outdated(instance, user_op->id);
2348 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2349 	ARG_UNUSED(instance);
2350 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2351 #else /* !CONFIG_BT_TICKER_EXT */
2352 	ARG_UNUSED(instance);
2353 #endif /* !CONFIG_BT_TICKER_EXT */
2354 
2355 	ticker->ticks_periodic = start->ticks_periodic;
2356 
2357 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
2358 	ticker->remainder_periodic = start->remainder_periodic;
2359 
2360 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
2361 	ticker->remainder_current = start->remainder_first;
2362 #else /* !CONFIG_BT_TICKER_START_REMAINDER */
2363 	ticker->remainder_current = 0U;
2364 #endif /* !CONFIG_BT_TICKER_START_REMAINDER */
2365 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
2366 
2367 	ticker->lazy_periodic =
2368 		(start->lazy < TICKER_LAZY_MUST_EXPIRE_KEEP) ? start->lazy :
2369 							       0U;
2370 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2371 	ticker->ticks_slot = start->ticks_slot;
2372 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2373 
2374 	ticker->timeout_func = start->fp_timeout_func;
2375 	ticker->context = start->context;
2376 	ticker->ticks_to_expire = start->ticks_first;
2377 	ticker->ticks_to_expire_minus = 0U;
2378 	ticks_to_expire_prep(ticker, ticks_current, start->ticks_at_start);
2379 
2380 	ticker->lazy_current = 0U;
2381 	ticker->force = 1U;
2382 
2383 	return TICKER_STATUS_SUCCESS;
2384 }
2385 
2386 #if !defined(CONFIG_BT_TICKER_LOW_LAT)
2387 /**
2388  * @brief Insert new ticker node
2389  *
2390  * @details Called by ticker_job to insert a new ticker node. If node collides
2391  * with existing ticker nodes, either the new node is postponed, or colliding
2392  * node is un-scheduled. Decision is based on latency and the force-state of
2393  * individual nodes.
2394  *
2395  * @param instance    Pointer to ticker instance
2396  * @param id_insert   Id of ticker to insert
2397  * @param ticker      Pointer to ticker node to insert
2398  * @param insert_head Pointer to current head. Updated if colliding nodes
2399  *		      are un-scheduled
2400  * @internal
2401  */
2402 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2403 				      uint8_t id_insert,
2404 				      struct ticker_node *ticker,
2405 				      uint8_t *insert_head)
2406 {
2407 	ARG_UNUSED(insert_head);
2408 
2409 	/* Prepare to insert */
2410 	ticker->next = TICKER_NULL;
2411 
2412 	/* Enqueue the ticker node */
2413 	(void)ticker_enqueue(instance, id_insert);
2414 
2415 	/* Inserted/Scheduled */
2416 	ticker->req = ticker->ack + 1;
2417 
2418 	return TICKER_STATUS_SUCCESS;
2419 }
2420 
2421 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
2422 /**
2423  * @brief Re-schedule ticker nodes within slot_window
2424  *
2425  * @details This function is responsible for re-scheduling ticker nodes
2426  * which have been marked for re-scheduling in ticker_worker. These nodes
2427  * have a non-zero ticks_slot_window configuration, which indicates a
2428  * valid range in which to re-schedule the node.
2429  * The function iterates over nodes, and handles one re-schedule at a
2430  * time. After a re-schedule, nodes are once again iterated until no more
2431  * nodes are marked for re-scheduling.
2432  *
2433  * @param instance      Pointer to ticker instance
2434  * @param ticks_elapsed Number of ticks elapsed since last ticker job
2435  *
2436  * @internal
2437  */
2438 static uint8_t ticker_job_reschedule_in_window(struct ticker_instance *instance)
2439 {
2440 	struct ticker_node *nodes;
2441 	uint8_t rescheduling;
2442 	uint8_t rescheduled;
2443 
2444 	nodes = &instance->nodes[0];
2445 
2446 	/* Do until all pending re-schedules handled */
2447 	rescheduling = 1U;
2448 	rescheduled = 0U;
2449 	while (rescheduling) {
2450 		struct ticker_node *ticker_resched;
2451 		uint32_t ticks_to_expire_offset;
2452 		uint8_t ticker_id_resched_prev;
2453 		struct ticker_ext  *ext_data;
2454 		uint32_t ticks_start_offset;
2455 		uint32_t window_start_ticks;
2456 		uint32_t ticks_slot_window;
2457 		uint8_t ticker_id_resched;
2458 		uint32_t ticks_to_expire;
2459 		uint8_t ticker_id_prev;
2460 		uint8_t ticker_id_next;
2461 		uint32_t ticks_slot;
2462 
2463 		rescheduling = 0U;
2464 
2465 		/* Find first pending re-schedule */
2466 		ticker_id_resched_prev = TICKER_NULL;
2467 		ticker_id_resched = instance->ticker_id_head;
2468 		while (ticker_id_resched != TICKER_NULL) {
2469 			ticker_resched = &nodes[ticker_id_resched];
2470 			if (TICKER_RESCHEDULE_PENDING(ticker_resched)) {
2471 				/* Pending reschedule found */
2472 				break;
2473 			}
2474 
2475 			ticker_id_resched_prev = ticker_id_resched;
2476 			ticker_id_resched = ticker_resched->next;
2477 		}
2478 
2479 		/* Exit if no tickers to be rescheduled */
2480 		if (ticker_id_resched == TICKER_NULL) {
2481 			break;
2482 		}
2483 
2484 		/* Ensure that resched ticker is expired */
2485 		LL_ASSERT(ticker_resched->ticks_to_expire == 0U);
2486 
2487 		/* Use ticker's reserved time ticks_slot, else for unreserved
2488 		 * tickers use the reschedule margin as ticks_slot.
2489 		 */
2490 		if (ticker_resched->ticks_slot) {
2491 			ticks_slot = ticker_resched->ticks_slot;
2492 		} else {
2493 			LL_ASSERT(TICKER_HAS_SLOT_WINDOW(ticker_resched));
2494 
2495 			ticks_slot = HAL_TICKER_RESCHEDULE_MARGIN;
2496 		}
2497 
2498 		/* Window start after intersection with already active node */
2499 		window_start_ticks = instance->ticks_slot_previous +
2500 				     HAL_TICKER_RESCHEDULE_MARGIN;
2501 
2502 		/* If drift was applied to this node, this must be
2503 		 * taken into consideration. Reduce the window with
2504 		 * the amount of drift already applied.
2505 		 *
2506 		 * TODO: An improvement on this could be to only consider the
2507 		 * drift (ADV => randomization) if re-sceduling fails. Then the
2508 		 * ticker would have the best possible window to re-schedule in
2509 		 * and not be restricted to ticks_slot_window - ticks_drift.
2510 		 */
2511 		ext_data = ticker_resched->ext_data;
2512 		if (IS_ENABLED(CONFIG_BT_TICKER_EXT_SLOT_WINDOW_YIELD) &&
2513 		    ticker_resched->ticks_slot &&
2514 		    !ext_data->ticks_drift &&
2515 		    !ext_data->is_drift_in_window) {
2516 			/* Use slot window after intersection include required
2517 			 * ticks_slot, and we do not take the interval of the
2518 			 * colliding ticker provided every expiry increments the
2519 			 * interval by random amount of ticks.
2520 			 */
2521 			ticks_slot_window = window_start_ticks + ticks_slot;
2522 
2523 			/* Window available, proceed to calculate further
2524 			 * drift
2525 			 */
2526 			ticker_id_next = ticker_resched->next;
2527 
2528 		} else if (ext_data->ticks_drift < ext_data->ticks_slot_window) {
2529 			/* Use reduced slot window */
2530 			ticks_slot_window = ext_data->ticks_slot_window -
2531 					    ext_data->ticks_drift;
2532 
2533 			/* Window available, proceed to calculate further
2534 			 * drift
2535 			 */
2536 			ticker_id_next = ticker_resched->next;
2537 
2538 		} else {
2539 			/* Window has been exhausted - we can't reschedule */
2540 			ticker_id_next = TICKER_NULL;
2541 
2542 			/* Assignment will be unused when TICKER_NULL */
2543 			ticks_slot_window = 0U;
2544 		}
2545 
2546 		/* Try to find available slot for re-scheduling */
2547 		ticks_to_expire_offset = 0U;
2548 		ticks_start_offset = 0U;
2549 		ticks_to_expire = 0U;
2550 		while ((ticker_id_next != TICKER_NULL) &&
2551 		       ((ticks_start_offset + ticks_slot) <=
2552 			ticks_slot_window)) {
2553 			struct ticker_node *ticker_next;
2554 			uint32_t window_end_ticks;
2555 
2556 			ticker_next = &nodes[ticker_id_next];
2557 			ticks_to_expire_offset += ticker_next->ticks_to_expire;
2558 
2559 			/* Calculate end of window. Since window may be aligned
2560 			 * with expiry of next node, we add a margin
2561 			 */
2562 			if (ticks_to_expire_offset >
2563 			    HAL_TICKER_RESCHEDULE_MARGIN) {
2564 				window_end_ticks =
2565 					MIN(ticks_slot_window,
2566 					    ticks_start_offset +
2567 					    ticks_to_expire_offset -
2568 					    HAL_TICKER_RESCHEDULE_MARGIN);
2569 			} else {
2570 				/* Next expiry is too close - try the next
2571 				 * node
2572 				 */
2573 				window_end_ticks = 0U;
2574 			}
2575 
2576 			/* Calculate new ticks_to_expire as end of window minus
2577 			 * slot size.
2578 			 */
2579 			if (((window_start_ticks + ticks_slot) <=
2580 			     ticks_slot_window) &&
2581 			    (window_end_ticks >= (ticks_start_offset +
2582 						 ticks_slot))) {
2583 				if (!ticker_resched->ticks_slot ||
2584 				    ext_data->is_drift_in_window) {
2585 					/* Place at start of window */
2586 					ticks_to_expire = window_start_ticks;
2587 				} else {
2588 					/* Place at end of window. This ensures
2589 					 * that ticker with slot window and that
2590 					 * uses ticks_slot does not take the
2591 					 * interval of the colliding ticker.
2592 					 */
2593 					ticks_to_expire = window_end_ticks -
2594 							  ticks_slot;
2595 				}
2596 			} else {
2597 				/* No space in window - try the next node */
2598 				ticks_to_expire = 0U;
2599 			}
2600 
2601 			/* Decide if the re-scheduling ticker node fits in the
2602 			 * slot found - break if it fits
2603 			 */
2604 			if ((ticks_to_expire != 0U) &&
2605 			    (ticks_to_expire >= window_start_ticks) &&
2606 			    (ticks_to_expire <= (window_end_ticks -
2607 						 ticks_slot))) {
2608 				/* Re-scheduled node fits before this node */
2609 				break;
2610 			} else {
2611 				/* Not inside the window */
2612 				ticks_to_expire = 0U;
2613 			}
2614 
2615 			/* Skip other pending re-schedule nodes and
2616 			 * tickers with no reservation or not periodic
2617 			 */
2618 			if (TICKER_RESCHEDULE_PENDING(ticker_next) ||
2619 			    !ticker_next->ticks_slot ||
2620 			    !ticker_next->ticks_periodic) {
2621 				ticker_id_next = ticker_next->next;
2622 
2623 				continue;
2624 			}
2625 
2626 			/* We din't find a valid slot for re-scheduling - try
2627 			 * the next node
2628 			 */
2629 			ticks_start_offset += ticks_to_expire_offset;
2630 			window_start_ticks  = ticks_start_offset +
2631 					      ticker_next->ticks_slot +
2632 					      HAL_TICKER_RESCHEDULE_MARGIN;
2633 			ticks_to_expire_offset = 0U;
2634 
2635 			if (!ticker_resched->ticks_slot ||
2636 			    ext_data->is_drift_in_window) {
2637 				if (!ticker_resched->ticks_slot ||
2638 				    (window_start_ticks <= (ticks_slot_window -
2639 							   ticks_slot))) {
2640 					/* Try at the end of the next node */
2641 					ticks_to_expire = window_start_ticks;
2642 				}
2643 			} else {
2644 				/* Try at the end of the slot window. This
2645 				 * ensures that ticker with slot window and that
2646 				 * uses ticks_slot does not take the interval of
2647 				 * the colliding ticker.
2648 				 */
2649 				ticks_to_expire = ticks_slot_window -
2650 						  ticks_slot;
2651 			}
2652 
2653 			ticker_id_next = ticker_next->next;
2654 		}
2655 
2656 		ext_data->ticks_drift += ticks_to_expire;
2657 
2658 		/* Place the ticker node sorted by expiration time and adjust
2659 		 * delta times
2660 		 */
2661 		ticker_id_next = ticker_resched->next;
2662 		ticker_id_prev = TICKER_NULL;
2663 		while (ticker_id_next != TICKER_NULL) {
2664 			struct ticker_node *ticker_next;
2665 
2666 			ticker_next = &nodes[ticker_id_next];
2667 			if (ticks_to_expire > ticker_next->ticks_to_expire) {
2668 				/* Node is after this - adjust delta */
2669 				ticks_to_expire -= ticker_next->ticks_to_expire;
2670 			} else {
2671 				/* Node is before this one */
2672 				ticker_next->ticks_to_expire -= ticks_to_expire;
2673 				break;
2674 			}
2675 			ticker_id_prev = ticker_id_next;
2676 			ticker_id_next = ticker_next->next;
2677 		}
2678 
2679 		ticker_resched->ticks_to_expire = ticks_to_expire;
2680 
2681 		/* If the node moved in the list, insert it */
2682 		if (ticker_id_prev != TICKER_NULL) {
2683 			/* Remove node from its current position in list */
2684 			if (ticker_id_resched_prev != TICKER_NULL) {
2685 				/* Node was not at the head of the list */
2686 				nodes[ticker_id_resched_prev].next =
2687 					ticker_resched->next;
2688 			} else {
2689 				/* Node was at the head, move head forward */
2690 				instance->ticker_id_head = ticker_resched->next;
2691 			}
2692 
2693 			/* Link inserted node */
2694 			ticker_resched->next = nodes[ticker_id_prev].next;
2695 			nodes[ticker_id_prev].next = ticker_id_resched;
2696 		}
2697 
2698 		/* Remove latency added in ticker_worker */
2699 		ticker_resched->lazy_current--;
2700 
2701 		/* Prevent repeated re-scheduling */
2702 		ext_data->reschedule_state =
2703 			TICKER_RESCHEDULE_STATE_DONE;
2704 
2705 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
2706 		ticker_mark_expire_info_outdated(instance, ticker_id_resched);
2707 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
2708 
2709 		/* Check for other pending re-schedules and set exit flag */
2710 		rescheduling = 1U;
2711 		rescheduled  = 1U;
2712 	}
2713 
2714 	return rescheduled;
2715 }
2716 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
2717 #else  /* CONFIG_BT_TICKER_LOW_LAT */
2718 
2719 /**
2720  * @brief Insert new ticker node
2721  *
2722  * @details Called by ticker_job to insert a new ticker node. If node collides
2723  * with existing ticker nodes, either the new node is postponed, or colliding
2724  * node is un-scheduled. Decision is based on latency and the force-state of
2725  * individual nodes.
2726  *
2727  * @param instance    Pointer to ticker instance
2728  * @param id_insert   Id of ticker to insert
2729  * @param ticker      Pointer to ticker node to insert
2730  * @param insert_head Pointer to current head. Updated if colliding nodes
2731  *		      are un-scheduled
2732  * @internal
2733  */
2734 static inline uint8_t ticker_job_insert(struct ticker_instance *instance,
2735 				      uint8_t id_insert,
2736 				      struct ticker_node *ticker,
2737 				      uint8_t *insert_head)
2738 {
2739 	struct ticker_node *node = &instance->nodes[0];
2740 	uint8_t id_collide;
2741 	uint16_t skip;
2742 
2743 	/* Prepare to insert */
2744 	ticker->next = TICKER_NULL;
2745 
2746 	/* No. of times ticker has skipped its interval */
2747 	if (ticker->lazy_current > ticker->lazy_periodic) {
2748 		skip = ticker->lazy_current -
2749 		       ticker->lazy_periodic;
2750 	} else {
2751 		skip = 0U;
2752 	}
2753 
2754 	/* If insert collides, remove colliding or advance to next interval */
2755 	while (id_insert !=
2756 	       (id_collide = ticker_enqueue(instance, id_insert))) {
2757 		/* Check for collision */
2758 		if (id_collide != TICKER_NULL) {
2759 			struct ticker_node *ticker_collide = &node[id_collide];
2760 			uint16_t skip_collide;
2761 
2762 			/* No. of times colliding ticker has skipped its
2763 			 * interval.
2764 			 */
2765 			if (ticker_collide->lazy_current >
2766 			    ticker_collide->lazy_periodic) {
2767 				skip_collide = ticker_collide->lazy_current -
2768 					       ticker_collide->lazy_periodic;
2769 			} else {
2770 				skip_collide = 0U;
2771 			}
2772 
2773 			/* Check if colliding node should be un-scheduled */
2774 			if (ticker_collide->ticks_periodic &&
2775 			    skip_collide <= skip &&
2776 			    ticker_collide->force < ticker->force) {
2777 				/* Dequeue and get the reminder of ticks
2778 				 * to expire.
2779 				 */
2780 				ticker_collide->ticks_to_expire =
2781 					ticker_dequeue(instance, id_collide);
2782 				/* Unschedule node */
2783 				ticker_collide->req = ticker_collide->ack;
2784 
2785 				/* Enqueue for re-insertion */
2786 				ticker_collide->next = *insert_head;
2787 				*insert_head = id_collide;
2788 
2789 				continue;
2790 			}
2791 		}
2792 
2793 		/* occupied, try next interval */
2794 		if (ticker->ticks_periodic != 0U) {
2795 			ticker->ticks_to_expire += ticker->ticks_periodic +
2796 						   ticker_remainder_inc(ticker);
2797 			ticker->lazy_current++;
2798 
2799 			/* No. of times ticker has skipped its interval */
2800 			if (ticker->lazy_current > ticker->lazy_periodic) {
2801 				skip = ticker->lazy_current -
2802 				       ticker->lazy_periodic;
2803 			} else {
2804 				skip = 0U;
2805 			}
2806 
2807 			/* Remove any accumulated drift (possibly added due to
2808 			 * ticker job execution latencies).
2809 			 */
2810 			if (ticker->ticks_to_expire >
2811 			    ticker->ticks_to_expire_minus) {
2812 				ticker->ticks_to_expire -=
2813 					ticker->ticks_to_expire_minus;
2814 				ticker->ticks_to_expire_minus = 0U;
2815 			} else {
2816 				ticker->ticks_to_expire_minus -=
2817 					ticker->ticks_to_expire;
2818 				ticker->ticks_to_expire = 0U;
2819 			}
2820 		} else {
2821 			return TICKER_STATUS_FAILURE;
2822 		}
2823 	}
2824 
2825 	/* Inserted/Scheduled */
2826 	ticker->req = ticker->ack + 1;
2827 
2828 	return TICKER_STATUS_SUCCESS;
2829 }
2830 #endif /* CONFIG_BT_TICKER_LOW_LAT */
2831 
2832 /**
2833  * @brief Insert and start ticker nodes for all users
2834  *
2835  * @details Called by ticker_job to prepare, insert and start ticker nodes
2836  * for all users. Specifying insert_head to other than TICKER_NULL causes
2837  * that ticker node to be inserted first.
2838  *
2839  * @param instance    Pointer to ticker instance
2840  * @param insert_head Id of ticker node to insert, or TICKER_NULL if only
2841  *                    handle user operation inserts
2842  * @internal
2843  */
2844 static inline void ticker_job_list_insert(struct ticker_instance *instance,
2845 					  uint8_t insert_head)
2846 {
2847 	struct ticker_node *node;
2848 	struct ticker_user *users;
2849 	uint8_t count_user;
2850 
2851 	node = &instance->nodes[0];
2852 	users = &instance->users[0];
2853 	count_user = instance->count_user;
2854 
2855 	/* Iterate through all user ids */
2856 	while (count_user--) {
2857 		struct ticker_user_op *user_ops;
2858 		struct ticker_user *user;
2859 		uint8_t user_ops_first;
2860 
2861 		user = &users[count_user];
2862 		user_ops = (void *)&user->user_op[0];
2863 		user_ops_first = user->first;
2864 		/* Traverse user operation queue - first to middle (wrap) */
2865 		while ((insert_head != TICKER_NULL) ||
2866 		       (user_ops_first != user->middle)) {
2867 			struct ticker_user_op *user_op;
2868 			struct ticker_node *ticker;
2869 			uint8_t id_insert;
2870 			uint8_t status = TICKER_STATUS_SUCCESS;
2871 
2872 			if (insert_head != TICKER_NULL) {
2873 				/* Prepare insert of ticker node specified by
2874 				 * insert_head
2875 				 */
2876 				id_insert = insert_head;
2877 				ticker = &node[id_insert];
2878 				insert_head = ticker->next;
2879 
2880 				user_op = NULL;
2881 			} else {
2882 				/* Prepare insert of any ticker nodes requested
2883 				 * via user operation TICKER_USER_OP_TYPE_START
2884 				 */
2885 				uint8_t first;
2886 
2887 				user_op = &user_ops[user_ops_first];
2888 				first = user_ops_first + 1;
2889 				if (first == user->count_user_op) {
2890 					first = 0U;
2891 				}
2892 				user_ops_first = first;
2893 
2894 				id_insert = user_op->id;
2895 				ticker = &node[id_insert];
2896 				if (user_op->op != TICKER_USER_OP_TYPE_START) {
2897 					/* User operation is not start - skip
2898 					 * to next operation
2899 					 */
2900 					continue;
2901 				}
2902 
2903 #if defined(CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP)
2904 				ticker->start_pending = 0U;
2905 #endif /* CONFIG_BT_TICKER_PREFER_START_BEFORE_STOP */
2906 
2907 				if (((ticker->req -
2908 				      ticker->ack) & 0xff) != 0U) {
2909 					ticker_job_op_cb(user_op,
2910 							 TICKER_STATUS_FAILURE);
2911 					continue;
2912 				}
2913 
2914 				/* Prepare ticker for start */
2915 				status = ticker_job_op_start(instance, ticker, user_op,
2916 						    instance->ticks_current);
2917 			}
2918 
2919 			if (!status) {
2920 				/* Insert ticker node */
2921 				status = ticker_job_insert(instance, id_insert, ticker,
2922 							   &insert_head);
2923 			}
2924 
2925 			if (user_op) {
2926 				ticker_job_op_cb(user_op, status);
2927 
2928 				if (!IS_ENABLED(CONFIG_BT_TICKER_LOW_LAT) &&
2929 				    (ticker->ticks_periodic == 0U) &&
2930 				    user_op) {
2931 					ticker->fp_op_func =
2932 						user_op->fp_op_func;
2933 					ticker->op_context =
2934 						user_op->op_context;
2935 				}
2936 			}
2937 		}
2938 
2939 #if !defined(CONFIG_BT_TICKER_JOB_IDLE_GET) && \
2940 	!defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) && \
2941 	!defined(CONFIG_BT_TICKER_PRIORITY_SET)
2942 		user->first = user_ops_first;
2943 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
2944 	* !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
2945 	* !CONFIG_BT_TICKER_PRIORITY_SET
2946 	*/
2947 
2948 	}
2949 }
2950 
2951 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2952 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
2953 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
2954 /**
2955  * @brief Perform inquiry for specific user operation
2956  *
2957  * @param instance Pointer to ticker instance
2958  * @param uop	   Pointer to user operation
2959  *
2960  * @internal
2961  */
2962 static inline void ticker_job_op_inquire(struct ticker_instance *instance,
2963 					 struct ticker_user_op *uop)
2964 {
2965 	ticker_op_func fp_op_func;
2966 
2967 	fp_op_func = NULL;
2968 	switch (uop->op) {
2969 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2970 	case TICKER_USER_OP_TYPE_SLOT_GET:
2971 		ticker_by_next_slot_get(instance,
2972 					uop->params.slot_get.ticker_id,
2973 					uop->params.slot_get.ticks_current,
2974 					uop->params.slot_get.ticks_to_expire,
2975 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
2976 					uop->params.slot_get.fp_match_op_func,
2977 					uop->params.slot_get.match_op_context,
2978 #else
2979 					NULL, NULL,
2980 #endif
2981 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
2982 					uop->params.slot_get.remainder,
2983 #else /* !CONFIG_BT_TICKER_REMAINDER_GET */
2984 					NULL,
2985 #endif /* !CONFIG_BT_TICKER_REMAINDER_GET */
2986 #if defined(CONFIG_BT_TICKER_LAZY_GET)
2987 					uop->params.slot_get.lazy);
2988 #else /* !CONFIG_BT_TICKER_LAZY_GET */
2989 					NULL);
2990 #endif /* !CONFIG_BT_TICKER_LAZY_GET */
2991 		__fallthrough;
2992 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
2993 
2994 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
2995 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
2996 	case TICKER_USER_OP_TYPE_IDLE_GET:
2997 		uop->status = TICKER_STATUS_SUCCESS;
2998 		fp_op_func = uop->fp_op_func;
2999 		break;
3000 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3001 	* CONFIG_BT_TICKER_NEXT_SLOT_GET
3002 	*/
3003 
3004 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3005 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3006 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
3007 	case TICKER_USER_OP_TYPE_PRIORITY_SET:
3008 		if (uop->id < instance->count_node) {
3009 			struct ticker_node *node = instance->nodes;
3010 
3011 			node[uop->id].priority =
3012 				uop->params.priority_set.priority;
3013 			uop->status = TICKER_STATUS_SUCCESS;
3014 		} else {
3015 			uop->status = TICKER_STATUS_FAILURE;
3016 		}
3017 		fp_op_func = uop->fp_op_func;
3018 		break;
3019 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3020 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3021 	* CONFIG_BT_TICKER_PRIORITY_SET
3022 	*/
3023 
3024 	default:
3025 		/* do nothing for other ops */
3026 		break;
3027 	}
3028 
3029 	if (fp_op_func) {
3030 		fp_op_func(uop->status, uop->op_context);
3031 	}
3032 }
3033 
3034 /**
3035  * @brief Check for pending inquiries for all users
3036  *
3037  * @details Run through all user operation lists, checking for pending
3038  * inquiries. Currently only two types of inquiries are supported:
3039  * TICKER_USER_OP_TYPE_SLOT_GET and TICKER_USER_OP_TYPE_IDLE_GET. The
3040  * function also supports user operation TICKER_USER_OP_TYPE_PRIORITY_SET.
3041  * This operation modifies the user->first index, indicating user operations
3042  * are complete.
3043  *
3044  * @param instance Pointer to ticker instance
3045  *
3046  * @internal
3047  */
3048 static inline void ticker_job_list_inquire(struct ticker_instance *instance)
3049 {
3050 	struct ticker_user *users;
3051 	uint8_t count_user;
3052 
3053 	users = &instance->users[0];
3054 	count_user = instance->count_user;
3055 	/* Traverse user operation queue - first to last (with wrap) */
3056 	while (count_user--) {
3057 		struct ticker_user_op *user_op;
3058 		struct ticker_user *user;
3059 
3060 		user = &users[count_user];
3061 		user_op = &user->user_op[0];
3062 		while (user->first != user->last) {
3063 			uint8_t first;
3064 
3065 			ticker_job_op_inquire(instance, &user_op[user->first]);
3066 
3067 			first = user->first + 1;
3068 			if (first == user->count_user_op) {
3069 				first = 0U;
3070 			}
3071 			user->first = first;
3072 		}
3073 	}
3074 }
3075 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET ||
3076 	* CONFIG_BT_TICKER_NEXT_SLOT_GET ||
3077 	* CONFIG_BT_TICKER_PRIORITY_SET
3078 	*/
3079 
3080 /**
3081  * @brief Update counter compare value (trigger)
3082  *
3083  * @details Updates trigger to the match next expiring ticker node. The
3084  * function takes into consideration that it may be preempted in the process,
3085  * and makes sure - by iteration - that compare value is set in the future
3086  * (with a margin).
3087  *
3088  * @param instance           Pointer to ticker instance
3089  * @param ticker_id_old_head Previous ticker_id_head
3090  *
3091  * @internal
3092  */
3093 static inline uint8_t
3094 ticker_job_compare_update(struct ticker_instance *instance,
3095 			  uint8_t ticker_id_old_head)
3096 {
3097 	struct ticker_node *ticker;
3098 	uint32_t ticks_to_expire;
3099 	uint32_t ctr_curr;
3100 	uint32_t ctr_prev;
3101 	uint32_t cc;
3102 	uint32_t i;
3103 
3104 	if (instance->ticker_id_head == TICKER_NULL) {
3105 		if (cntr_stop() == 0) {
3106 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3107 			instance->ticks_slot_previous = 0U;
3108 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3109 
3110 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3111 			/* Stopped counter value will be used as ticks_current
3112 			 * for calculation to start new tickers.
3113 			 */
3114 			instance->ticks_current = cntr_cnt_get();
3115 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3116 		}
3117 
3118 		return 0U;
3119 	}
3120 
3121 	/* Check if this is the first update. If so, start the counter */
3122 	if (ticker_id_old_head == TICKER_NULL) {
3123 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3124 		uint32_t ticks_current;
3125 
3126 		ticks_current = cntr_cnt_get();
3127 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3128 
3129 		if (cntr_start() == 0) {
3130 #if !defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3131 			/* Stopped counter value will be used as ticks_current
3132 			 * for calculation to start new tickers.
3133 			 * FIXME: We do not need to synchronize here, instead
3134 			 *        replace with check to ensure the counter value
3135 			 *        has not since that synchronization when the
3136 			 *        counter with in stopped state.
3137 			 */
3138 			instance->ticks_current = ticks_current;
3139 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3140 		}
3141 	}
3142 
3143 	ticker = &instance->nodes[instance->ticker_id_head];
3144 	ticks_to_expire = ticker->ticks_to_expire;
3145 
3146 	/* If ticks_to_expire is zero, then immediately trigger the worker.
3147 	 */
3148 	if (!ticks_to_expire) {
3149 		return 1U;
3150 	}
3151 
3152 	/* Iterate few times, if required, to ensure that compare is
3153 	 * correctly set to a future value. This is required in case
3154 	 * the operation is pre-empted and current h/w counter runs
3155 	 * ahead of compare value to be set.
3156 	 */
3157 	i = 10U;
3158 	ctr_curr = cntr_cnt_get();
3159 	do {
3160 		uint32_t ticks_elapsed;
3161 		uint32_t ticks_diff;
3162 
3163 		LL_ASSERT(i);
3164 		i--;
3165 
3166 		cc = instance->ticks_current;
3167 		ticks_diff = ticker_ticks_diff_get(ctr_curr, cc);
3168 		if (ticks_diff >= ticks_to_expire) {
3169 			return 1U;
3170 		}
3171 
3172 		ticks_elapsed = ticks_diff + HAL_TICKER_CNTR_CMP_OFFSET_MIN +
3173 				HAL_TICKER_CNTR_SET_LATENCY;
3174 		cc += MAX(ticks_elapsed, ticks_to_expire);
3175 		cc &= HAL_TICKER_CNTR_MASK;
3176 		instance->trigger_set_cb(cc);
3177 
3178 		ctr_prev = ctr_curr;
3179 		ctr_curr = cntr_cnt_get();
3180 	} while ((ticker_ticks_diff_get(ctr_curr, ctr_prev) +
3181 		  HAL_TICKER_CNTR_CMP_OFFSET_MIN) >
3182 		  ticker_ticks_diff_get(cc, ctr_prev));
3183 
3184 	return 0U;
3185 }
3186 
3187 /**
3188  * @brief Ticker job
3189  *
3190  * @details Runs the bottom half of the ticker, after ticker nodes have elapsed
3191  * or user operations requested. The ticker_job is responsible for removing and
3192  * re-inserting ticker nodes, based on next elapsing and periodicity of the
3193  * nodes. The ticker_job is also responsible for processing user operations,
3194  * i.e. requests for start, update, stop etc.
3195  * Invoked from the ticker job mayfly context (TICKER_MAYFLY_CALL_ID_JOB).
3196  *
3197  * @param param Pointer to ticker instance
3198  *
3199  * @internal
3200  */
3201 void ticker_job(void *param)
3202 {
3203 	struct ticker_instance *instance = param;
3204 	uint8_t flag_compare_update;
3205 	uint8_t ticker_id_old_head;
3206 	uint8_t compare_trigger;
3207 	uint32_t ticks_previous;
3208 	uint32_t ticks_elapsed;
3209 	uint8_t flag_elapsed;
3210 	uint8_t insert_head;
3211 	uint32_t ticks_now;
3212 	uint8_t pending;
3213 
3214 	DEBUG_TICKER_JOB(1);
3215 
3216 	/* Defer job, as worker is running */
3217 	if (instance->worker_trigger) {
3218 		DEBUG_TICKER_JOB(0);
3219 		return;
3220 	}
3221 
3222 	/* Defer job, as job is already running */
3223 	if (instance->job_guard) {
3224 		instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_JOB, 1,
3225 				   instance);
3226 		return;
3227 	}
3228 	instance->job_guard = 1U;
3229 
3230 	/* Back up the previous known tick */
3231 	ticks_previous = instance->ticks_current;
3232 
3233 	/* Update current tick with the elapsed value from queue, and dequeue */
3234 	if (instance->ticks_elapsed_first != instance->ticks_elapsed_last) {
3235 		ticker_next_elapsed(&instance->ticks_elapsed_first);
3236 
3237 		ticks_elapsed =
3238 		    instance->ticks_elapsed[instance->ticks_elapsed_first];
3239 
3240 		instance->ticks_current += ticks_elapsed;
3241 		instance->ticks_current &= HAL_TICKER_CNTR_MASK;
3242 
3243 		flag_elapsed = 1U;
3244 	} else {
3245 		/* No elapsed value in queue */
3246 		flag_elapsed = 0U;
3247 		ticks_elapsed = 0U;
3248 	}
3249 
3250 	/* Initialise internal re-insert list */
3251 	insert_head = TICKER_NULL;
3252 
3253 	/* Initialise flag used to update next compare value */
3254 	flag_compare_update = 0U;
3255 
3256 	/* Remember the old head, so as to decide if new compare needs to be
3257 	 * set.
3258 	 */
3259 	ticker_id_old_head = instance->ticker_id_head;
3260 
3261 	/* Get current ticks, used in managing updates and expired tickers */
3262 	ticks_now = cntr_cnt_get();
3263 
3264 #if defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3265 	if (ticker_id_old_head == TICKER_NULL) {
3266 		/* No tickers active, synchronize to the free running counter so
3267 		 * that any new ticker started can have its ticks_to_expire
3268 		 * relative to current free running counter value.
3269 		 *
3270 		 * Both current tick (new value) and previous tick (previously
3271 		 * stored when all tickers stopped) is assigned to ticks_now.
3272 		 * All new tickers are started from this synchronized value as
3273 		 * the anchor/reference value.
3274 		 *
3275 		 * Note, this if clause is an overhead wherein the check is
3276 		 * performed for every ticker_job() iteration!
3277 		 */
3278 		instance->ticks_current = ticks_now;
3279 		ticks_previous = ticks_now;
3280 	}
3281 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3282 
3283 	/* Manage user operations (updates and deletions) in ticker list */
3284 	pending = ticker_job_list_manage(instance, ticks_now, ticks_elapsed,
3285 					 &insert_head);
3286 
3287 	/* Detect change in head of the list */
3288 	if (instance->ticker_id_head != ticker_id_old_head) {
3289 		flag_compare_update = 1U;
3290 	}
3291 
3292 	/* Handle expired tickers */
3293 	if (flag_elapsed) {
3294 		ticker_job_worker_bh(instance, ticks_now, ticks_previous,
3295 				     ticks_elapsed, &insert_head);
3296 
3297 		/* Detect change in head of the list */
3298 		if (instance->ticker_id_head != ticker_id_old_head) {
3299 			flag_compare_update = 1U;
3300 		}
3301 
3302 		/* Handle insertions */
3303 		ticker_job_list_insert(instance, insert_head);
3304 
3305 #if defined(CONFIG_BT_TICKER_EXT) && !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) &&\
3306 	!defined(CONFIG_BT_TICKER_LOW_LAT)
3307 		/* Re-schedule any pending nodes with slot_window */
3308 		if (ticker_job_reschedule_in_window(instance)) {
3309 			flag_compare_update = 1U;
3310 		}
3311 #endif /* CONFIG_BT_TICKER_EXT */
3312 	} else {
3313 		/* Handle insertions */
3314 		ticker_job_list_insert(instance, insert_head);
3315 	}
3316 
3317 	/* Detect change in head of the list */
3318 	if (instance->ticker_id_head != ticker_id_old_head) {
3319 		flag_compare_update = 1U;
3320 	}
3321 
3322 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET) || \
3323 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET) || \
3324 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
3325 	/* Process any list inquiries */
3326 	if (!pending) {
3327 		/* Handle inquiries */
3328 		ticker_job_list_inquire(instance);
3329 	}
3330 #else  /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3331 	* !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3332 	* !CONFIG_BT_TICKER_PRIORITY_SET
3333 	*/
3334 	ARG_UNUSED(pending);
3335 #endif /* !CONFIG_BT_TICKER_JOB_IDLE_GET &&
3336 	* !CONFIG_BT_TICKER_NEXT_SLOT_GET &&
3337 	* !CONFIG_BT_TICKER_PRIORITY_SET
3338 	*/
3339 
3340 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3341 	if (instance->expire_infos_outdated) {
3342 		ticker_job_update_expire_infos(instance);
3343 	}
3344 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3345 
3346 	/* update compare if head changed */
3347 	if (flag_compare_update) {
3348 		compare_trigger = ticker_job_compare_update(instance,
3349 							    ticker_id_old_head);
3350 	} else {
3351 		compare_trigger = 0U;
3352 	}
3353 
3354 	/* Permit worker to run */
3355 	instance->job_guard = 0U;
3356 
3357 	/* trigger worker if deferred */
3358 	cpu_dmb();
3359 	if (instance->worker_trigger || compare_trigger) {
3360 		instance->sched_cb(TICKER_CALL_ID_JOB, TICKER_CALL_ID_WORKER, 1,
3361 				   instance);
3362 	}
3363 
3364 	DEBUG_TICKER_JOB(0);
3365 }
3366 
3367 /*****************************************************************************
3368  * Public Interface
3369  ****************************************************************************/
3370 
3371 /**
3372  * @brief Initialize ticker instance
3373  *
3374  * @details Called by ticker instance client once to initialize the ticker.
3375  *
3376  * @param instance_index   Index of ticker instance
3377  * @param count_node	   Number of ticker nodes in node array
3378  * @param node		   Pointer to ticker node array
3379  * @param count_user	   Number of users in user array
3380  * @param user		   Pointer to user array of size count_user
3381  * @param count_op	   Number of user operations in user_op array
3382  * @param user_op	   Pointer to user operations array of size count_op
3383  * @param caller_id_get_cb Pointer to function for retrieving caller_id from
3384  *			   user id
3385  * @param sched_cb	   Pointer to function for scheduling ticker_worker
3386  *			   and ticker_job
3387  * @param trigger_set_cb   Pointer to function for setting the compare trigger
3388  *			   ticks value
3389  *
3390  * @return TICKER_STATUS_SUCCESS if initialization was successful, otherwise
3391  * TICKER_STATUS_FAILURE
3392  */
3393 uint8_t ticker_init(uint8_t instance_index, uint8_t count_node, void *node,
3394 		  uint8_t count_user, void *user, uint8_t count_op, void *user_op,
3395 		  ticker_caller_id_get_cb_t caller_id_get_cb,
3396 		  ticker_sched_cb_t sched_cb,
3397 		  ticker_trigger_set_cb_t trigger_set_cb)
3398 {
3399 	struct ticker_instance *instance = &_instance[instance_index];
3400 	struct ticker_user_op *user_op_ = (void *)user_op;
3401 	struct ticker_user *users;
3402 
3403 	if (instance_index >= TICKER_INSTANCE_MAX) {
3404 		return TICKER_STATUS_FAILURE;
3405 	}
3406 
3407 	instance->count_node = count_node;
3408 	instance->nodes = node;
3409 
3410 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
3411 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
3412 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
3413 	while (count_node--) {
3414 		instance->nodes[count_node].priority = 0;
3415 	}
3416 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
3417 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC
3418 	* CONFIG_BT_TICKER_PRIORITY_SET
3419 	*/
3420 
3421 	instance->count_user = count_user;
3422 	instance->users = user;
3423 
3424 	/** @todo check if enough ticker_user_op supplied */
3425 
3426 	users = &instance->users[0];
3427 	while (count_user--) {
3428 		users[count_user].user_op = user_op_;
3429 		user_op_ += users[count_user].count_user_op;
3430 		count_op -= users[count_user].count_user_op;
3431 	}
3432 
3433 	if (count_op) {
3434 		return TICKER_STATUS_FAILURE;
3435 	}
3436 
3437 	instance->caller_id_get_cb = caller_id_get_cb;
3438 	instance->sched_cb = sched_cb;
3439 	instance->trigger_set_cb = trigger_set_cb;
3440 
3441 	instance->ticker_id_head = TICKER_NULL;
3442 #if defined(CONFIG_BT_TICKER_CNTR_FREE_RUNNING)
3443 	/* We will synchronize in ticker_job on first ticker start */
3444 	instance->ticks_current = 0U;
3445 #else /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3446 	/* Synchronize to initialized (in stopped state) counter value */
3447 	instance->ticks_current = cntr_cnt_get();
3448 #endif /* !CONFIG_BT_TICKER_CNTR_FREE_RUNNING */
3449 	instance->ticks_elapsed_first = 0U;
3450 	instance->ticks_elapsed_last = 0U;
3451 
3452 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3453 	instance->ticker_id_slot_previous = TICKER_NULL;
3454 	instance->ticks_slot_previous = 0U;
3455 #endif /* !CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3456 
3457 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3458 	for (int i = 0; i < TICKER_EXPIRE_INFO_MAX; i++) {
3459 		instance->expire_infos[i].ticker_id = TICKER_NULL;
3460 		instance->expire_infos[i].last = 1;
3461 	}
3462 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3463 
3464 	return TICKER_STATUS_SUCCESS;
3465 }
3466 
3467 /**
3468  * @brief Check if ticker instance is initialized
3469  *
3470  * @param instance_index Index of ticker instance
3471  *
3472  * @return true if ticker instance is initialized, false otherwise
3473  */
3474 bool ticker_is_initialized(uint8_t instance_index)
3475 {
3476 	return !!(_instance[instance_index].count_node);
3477 }
3478 
3479 /**
3480  * @brief Trigger the ticker worker
3481  *
3482  * @details Schedules the ticker_worker upper half by invoking the
3483  * corresponding mayfly.
3484  *
3485  * @param instance_index Index of ticker instance
3486  */
3487 void ticker_trigger(uint8_t instance_index)
3488 {
3489 	struct ticker_instance *instance;
3490 
3491 	DEBUG_TICKER_ISR(1);
3492 
3493 	instance = &_instance[instance_index];
3494 	if (instance->sched_cb) {
3495 		instance->sched_cb(TICKER_CALL_ID_TRIGGER,
3496 				   TICKER_CALL_ID_WORKER, 1, instance);
3497 	}
3498 
3499 	DEBUG_TICKER_ISR(0);
3500 }
3501 
3502 /**
3503  * @brief Start a ticker node
3504  *
3505  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_START and
3506  * schedules the ticker_job.
3507  *
3508  * @param instance_index     Index of ticker instance
3509  * @param user_id	     Ticker user id. Used for indexing user operations
3510  *			     and mapping to mayfly caller id
3511  * @param ticker_id	     Id of ticker node
3512  * @param ticks_anchor	     Absolute tick count as anchor point for
3513  *			     ticks_first
3514  * @param ticks_first	     Initial number of ticks before first timeout
3515  * @param ticks_periodic     Number of ticks for a periodic ticker node. If 0,
3516  *			     ticker node is treated as one-shot
3517  * @param remainder_periodic Periodic ticks fraction
3518  * @param lazy		     Number of periods to skip (latency). A value of 1
3519  *			     causes skipping every other timeout
3520  * @param ticks_slot	     Slot reservation ticks for node (air-time)
3521  * @param ticks_slot_window  Window in which the slot reservation may be
3522  *			     re-scheduled to avoid collision. Set to 0 for
3523  *			     legacy behavior
3524  * @param fp_timeout_func    Function pointer of function to call at timeout
3525  * @param context	     Context passed in timeout call
3526  * @param fp_op_func	     Function pointer of user operation completion
3527  *			     function
3528  * @param op_context	     Context passed in operation completion call
3529  *
3530  * @return TICKER_STATUS_BUSY if start was successful but not yet completed.
3531  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3532  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to
3533  * run before exiting ticker_start
3534  */
3535 #if defined(CONFIG_BT_TICKER_EXT)
3536 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3537 		   uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3538 		   uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3539 		   ticker_timeout_func fp_timeout_func, void *context,
3540 		   ticker_op_func fp_op_func, void *op_context)
3541 {
3542 	return ticker_start_ext(instance_index, user_id, ticker_id,
3543 				ticks_anchor, ticks_first, ticks_periodic,
3544 				remainder_periodic, lazy, ticks_slot,
3545 				fp_timeout_func, context,
3546 				fp_op_func, op_context,
3547 				NULL);
3548 }
3549 
3550 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3551 			uint8_t ticker_id, uint32_t ticks_anchor,
3552 			uint32_t ticks_first, uint32_t remainder_first,
3553 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3554 			uint16_t lazy, uint32_t ticks_slot,
3555 			ticker_timeout_func fp_timeout_func, void *context,
3556 			ticker_op_func fp_op_func, void *op_context,
3557 			struct ticker_ext *ext_data);
3558 
3559 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3560 			uint8_t ticker_id, uint32_t ticks_anchor,
3561 			uint32_t ticks_first, uint32_t remainder_first,
3562 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3563 			uint16_t lazy, uint32_t ticks_slot,
3564 			ticker_timeout_func fp_timeout_func, void *context,
3565 			ticker_op_func fp_op_func, void *op_context)
3566 {
3567 	return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3568 			ticks_first, remainder_first,
3569 			ticks_periodic, remainder_periodic,
3570 			lazy, ticks_slot,
3571 			fp_timeout_func, context,
3572 			fp_op_func, op_context,
3573 			NULL);
3574 }
3575 
3576 uint8_t ticker_start_ext(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3577 		       uint32_t ticks_anchor, uint32_t ticks_first,
3578 		       uint32_t ticks_periodic, uint32_t remainder_periodic,
3579 		       uint16_t lazy, uint32_t ticks_slot,
3580 		       ticker_timeout_func fp_timeout_func, void *context,
3581 		       ticker_op_func fp_op_func, void *op_context,
3582 		       struct ticker_ext *ext_data)
3583 {
3584 	return start_us(instance_index, user_id, ticker_id, ticks_anchor,
3585 			ticks_first, 0U, ticks_periodic, remainder_periodic,
3586 			lazy, ticks_slot,
3587 			fp_timeout_func, context,
3588 			fp_op_func, op_context,
3589 			ext_data);
3590 }
3591 
3592 static uint8_t start_us(uint8_t instance_index, uint8_t user_id,
3593 			uint8_t ticker_id, uint32_t ticks_anchor,
3594 			uint32_t ticks_first, uint32_t remainder_first,
3595 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3596 			uint16_t lazy, uint32_t ticks_slot,
3597 			ticker_timeout_func fp_timeout_func, void *context,
3598 			ticker_op_func fp_op_func, void *op_context,
3599 			struct ticker_ext *ext_data)
3600 
3601 #else /* !CONFIG_BT_TICKER_EXT */
3602 uint8_t ticker_start(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3603 		   uint32_t ticks_anchor, uint32_t ticks_first, uint32_t ticks_periodic,
3604 		   uint32_t remainder_periodic, uint16_t lazy, uint32_t ticks_slot,
3605 		   ticker_timeout_func fp_timeout_func, void *context,
3606 		   ticker_op_func fp_op_func, void *op_context)
3607 {
3608 	return ticker_start_us(instance_index, user_id,
3609 			       ticker_id, ticks_anchor,
3610 			       ticks_first, 0U,
3611 			       ticks_periodic, remainder_periodic,
3612 			       lazy, ticks_slot,
3613 			       fp_timeout_func, context,
3614 			       fp_op_func, op_context);
3615 }
3616 
3617 uint8_t ticker_start_us(uint8_t instance_index, uint8_t user_id,
3618 			uint8_t ticker_id, uint32_t ticks_anchor,
3619 			uint32_t ticks_first, uint32_t remainder_first,
3620 			uint32_t ticks_periodic, uint32_t remainder_periodic,
3621 			uint16_t lazy, uint32_t ticks_slot,
3622 			ticker_timeout_func fp_timeout_func, void *context,
3623 			ticker_op_func fp_op_func, void *op_context)
3624 #endif /* !CONFIG_BT_TICKER_EXT */
3625 
3626 {
3627 	struct ticker_instance *instance = &_instance[instance_index];
3628 	struct ticker_user_op *user_op;
3629 	struct ticker_user *user;
3630 	uint8_t last;
3631 
3632 	user = &instance->users[user_id];
3633 
3634 	last = user->last + 1;
3635 	if (last >= user->count_user_op) {
3636 		last = 0U;
3637 	}
3638 
3639 	if (last == user->first) {
3640 		return TICKER_STATUS_FAILURE;
3641 	}
3642 
3643 	user_op = &user->user_op[user->last];
3644 	user_op->op = TICKER_USER_OP_TYPE_START;
3645 	user_op->id = ticker_id;
3646 	user_op->params.start.ticks_at_start = ticks_anchor;
3647 	user_op->params.start.ticks_first = ticks_first;
3648 	user_op->params.start.ticks_periodic = ticks_periodic;
3649 
3650 #if defined(CONFIG_BT_TICKER_REMAINDER_SUPPORT)
3651 	user_op->params.start.remainder_periodic = remainder_periodic;
3652 
3653 #if defined(CONFIG_BT_TICKER_START_REMAINDER)
3654 	user_op->params.start.remainder_first = remainder_first;
3655 #else /* !CONFIG_BT_TICKER_START_REMAINDER */
3656 	ARG_UNUSED(remainder_first);
3657 #endif /* !CONFIG_BT_TICKER_START_REMAINDER */
3658 #endif /* CONFIG_BT_TICKER_REMAINDER_SUPPORT */
3659 
3660 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3661 	user_op->params.start.ticks_slot = ticks_slot;
3662 #endif
3663 	user_op->params.start.lazy = lazy;
3664 #if defined(CONFIG_BT_TICKER_EXT)
3665 	user_op->params.start.ext_data = ext_data;
3666 #endif
3667 	user_op->params.start.fp_timeout_func = fp_timeout_func;
3668 	user_op->params.start.context = context;
3669 	user_op->status = TICKER_STATUS_BUSY;
3670 	user_op->fp_op_func = fp_op_func;
3671 	user_op->op_context = op_context;
3672 
3673 	/* Make sure transaction is completed before committing */
3674 	cpu_dmb();
3675 	user->last = last;
3676 
3677 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3678 			   TICKER_CALL_ID_JOB, 0, instance);
3679 
3680 	return user_op->status;
3681 }
3682 
3683 #if defined(CONFIG_BT_TICKER_UPDATE)
3684 /**
3685  * @brief Update a ticker node
3686  *
3687  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_UPDATE and
3688  * schedules the ticker_job.
3689  *
3690  * @param instance_index     Index of ticker instance
3691  * @param user_id	     Ticker user id. Used for indexing user operations
3692  *			     and mapping to mayfly caller id
3693  * @param ticker_id	     Id of ticker node
3694  * @param ticks_drift_plus   Number of ticks to add for drift compensation
3695  * @param ticks_drift_minus  Number of ticks to subtract for drift compensation
3696  * @param ticks_slot_plus    Number of ticks to add to slot reservation
3697  * @param ticks_slot_minus   Number of ticks to add subtract from slot
3698  *			     reservation
3699  * @param lazy		     Number of periods to skip (latency). A value of 0
3700  *			     means no action. 1 means no latency (normal). A
3701  *			     value >1 means latency = lazy - 1
3702  * @param force		     Force update to take effect immediately. With
3703  *			     force = 0, update is scheduled to take effect as
3704  *			     soon as possible
3705  * @param fp_op_func	     Function pointer of user operation completion
3706  *			     function
3707  * @param op_context	     Context passed in operation completion call
3708  * @param must_expire	     Disable, enable or ignore the must-expire state.
3709  *			     A value of 0 means no change, 1 means disable and
3710  *			     2 means enable.
3711  *
3712  * @return TICKER_STATUS_BUSY if update was successful but not yet completed.
3713  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3714  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3715  * before exiting ticker_update
3716  */
3717 uint8_t ticker_update(uint8_t instance_index, uint8_t user_id,
3718 		       uint8_t ticker_id, uint32_t ticks_drift_plus,
3719 		       uint32_t ticks_drift_minus, uint32_t ticks_slot_plus,
3720 		       uint32_t ticks_slot_minus, uint16_t lazy, uint8_t force,
3721 		       ticker_op_func fp_op_func, void *op_context)
3722 #if defined(CONFIG_BT_TICKER_EXT)
3723 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3724 {
3725 	return ticker_update_ext(instance_index, user_id, ticker_id,
3726 				 ticks_drift_plus, ticks_drift_minus,
3727 				 ticks_slot_plus, ticks_slot_minus, lazy,
3728 				 force, fp_op_func, op_context, 0U, ticker_id);
3729 }
3730 
3731 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3732 			   uint8_t ticker_id, uint32_t ticks_drift_plus,
3733 			   uint32_t ticks_drift_minus,
3734 			   uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3735 			   uint16_t lazy, uint8_t force,
3736 			   ticker_op_func fp_op_func, void *op_context,
3737 			   uint8_t must_expire, uint8_t expire_info_id)
3738 #else /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3739 {
3740 	return ticker_update_ext(instance_index, user_id, ticker_id,
3741 				 ticks_drift_plus, ticks_drift_minus,
3742 				 ticks_slot_plus, ticks_slot_minus, lazy,
3743 				 force, fp_op_func, op_context, 0U);
3744 }
3745 
3746 uint8_t ticker_update_ext(uint8_t instance_index, uint8_t user_id,
3747 			   uint8_t ticker_id, uint32_t ticks_drift_plus,
3748 			   uint32_t ticks_drift_minus,
3749 			   uint32_t ticks_slot_plus, uint32_t ticks_slot_minus,
3750 			   uint16_t lazy, uint8_t force,
3751 			   ticker_op_func fp_op_func, void *op_context,
3752 			   uint8_t must_expire)
3753 #endif /* !CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3754 #endif /* CONFIG_BT_TICKER_EXT */
3755 {
3756 	struct ticker_instance *instance = &_instance[instance_index];
3757 	struct ticker_user_op *user_op;
3758 	struct ticker_user *user;
3759 	uint8_t last;
3760 
3761 	user = &instance->users[user_id];
3762 
3763 	last = user->last + 1;
3764 	if (last >= user->count_user_op) {
3765 		last = 0U;
3766 	}
3767 
3768 	if (last == user->first) {
3769 		return TICKER_STATUS_FAILURE;
3770 	}
3771 
3772 	user_op = &user->user_op[user->last];
3773 	user_op->op = TICKER_USER_OP_TYPE_UPDATE;
3774 	user_op->id = ticker_id;
3775 	user_op->params.update.ticks_drift_plus = ticks_drift_plus;
3776 	user_op->params.update.ticks_drift_minus = ticks_drift_minus;
3777 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC)
3778 	user_op->params.update.ticks_slot_plus = ticks_slot_plus;
3779 	user_op->params.update.ticks_slot_minus = ticks_slot_minus;
3780 #endif /* CONFIG_BT_TICKER_SLOT_AGNOSTIC */
3781 	user_op->params.update.lazy = lazy;
3782 	user_op->params.update.force = force;
3783 #if defined(CONFIG_BT_TICKER_EXT)
3784 #if !defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && !defined(CONFIG_BT_TICKER_LOW_LAT)
3785 	user_op->params.update.must_expire = must_expire;
3786 #endif /* CONFIG_BT_TICKER_EXT && !CONFIG_BT_TICKER_SLOT_AGNOSTIC && !CONFIG_BT_TICKER_LOW_LAT */
3787 #if defined(CONFIG_BT_TICKER_EXT_EXPIRE_INFO)
3788 	user_op->params.update.expire_info_id = expire_info_id;
3789 #endif /* CONFIG_BT_TICKER_EXT_EXPIRE_INFO */
3790 #endif /* CONFIG_BT_TICKER_EXT */
3791 	user_op->status = TICKER_STATUS_BUSY;
3792 	user_op->fp_op_func = fp_op_func;
3793 	user_op->op_context = op_context;
3794 
3795 	/* Make sure transaction is completed before committing */
3796 	cpu_dmb();
3797 	user->last = last;
3798 
3799 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3800 			   TICKER_CALL_ID_JOB, 0, instance);
3801 
3802 	return user_op->status;
3803 }
3804 #endif /* CONFIG_BT_TICKER_UPDATE */
3805 
3806 /**
3807  * @brief Yield a ticker node with supplied absolute ticks reference
3808  *
3809  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_YIELD_ABS
3810  * and schedules the ticker_job.
3811  *
3812  * @param instance_index     Index of ticker instance
3813  * @param user_id	     Ticker user id. Used for indexing user operations
3814  *			     and mapping to mayfly caller id
3815  * @param ticks_at_yield     Absolute tick count at ticker yield request
3816  * @param fp_op_func	     Function pointer of user operation completion
3817  *			     function
3818  * @param op_context	     Context passed in operation completion call
3819  *
3820  * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3821  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3822  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3823  * before exiting ticker_stop
3824  */
3825 uint8_t ticker_yield_abs(uint8_t instance_index, uint8_t user_id,
3826 			  uint8_t ticker_id, uint32_t ticks_at_yield,
3827 			  ticker_op_func fp_op_func, void *op_context)
3828 {
3829 	struct ticker_instance *instance = &_instance[instance_index];
3830 	struct ticker_user_op *user_op;
3831 	struct ticker_user *user;
3832 	uint8_t last;
3833 
3834 	user = &instance->users[user_id];
3835 
3836 	last = user->last + 1;
3837 	if (last >= user->count_user_op) {
3838 		last = 0U;
3839 	}
3840 
3841 	if (last == user->first) {
3842 		return TICKER_STATUS_FAILURE;
3843 	}
3844 
3845 	user_op = &user->user_op[user->last];
3846 	user_op->op = TICKER_USER_OP_TYPE_YIELD_ABS;
3847 	user_op->id = ticker_id;
3848 	user_op->params.yield.ticks_at_yield = ticks_at_yield;
3849 	user_op->status = TICKER_STATUS_BUSY;
3850 	user_op->fp_op_func = fp_op_func;
3851 	user_op->op_context = op_context;
3852 
3853 	/* Make sure transaction is completed before committing */
3854 	cpu_dmb();
3855 	user->last = last;
3856 
3857 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3858 			   TICKER_CALL_ID_JOB, 0, instance);
3859 
3860 	return user_op->status;
3861 }
3862 
3863 /**
3864  * @brief Stop a ticker node
3865  *
3866  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP and
3867  * schedules the ticker_job.
3868  *
3869  * @param instance_index     Index of ticker instance
3870  * @param user_id	     Ticker user id. Used for indexing user operations
3871  *			     and mapping to mayfly caller id
3872  * @param fp_op_func	     Function pointer of user operation completion
3873  *			     function
3874  * @param op_context	     Context passed in operation completion call
3875  *
3876  * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3877  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3878  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3879  * before exiting ticker_stop
3880  */
3881 uint8_t ticker_stop(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
3882 		  ticker_op_func fp_op_func, void *op_context)
3883 {
3884 	struct ticker_instance *instance = &_instance[instance_index];
3885 	struct ticker_user_op *user_op;
3886 	struct ticker_user *user;
3887 	uint8_t last;
3888 
3889 	user = &instance->users[user_id];
3890 
3891 	last = user->last + 1;
3892 	if (last >= user->count_user_op) {
3893 		last = 0U;
3894 	}
3895 
3896 	if (last == user->first) {
3897 		return TICKER_STATUS_FAILURE;
3898 	}
3899 
3900 	user_op = &user->user_op[user->last];
3901 	user_op->op = TICKER_USER_OP_TYPE_STOP;
3902 	user_op->id = ticker_id;
3903 	user_op->status = TICKER_STATUS_BUSY;
3904 	user_op->fp_op_func = fp_op_func;
3905 	user_op->op_context = op_context;
3906 
3907 	/* Make sure transaction is completed before committing */
3908 	cpu_dmb();
3909 	user->last = last;
3910 
3911 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3912 			   TICKER_CALL_ID_JOB, 0, instance);
3913 
3914 	return user_op->status;
3915 }
3916 
3917 /**
3918  * @brief Stop a ticker node with supplied absolute ticks reference
3919  *
3920  * @details Creates a new user operation of type TICKER_USER_OP_TYPE_STOP_ABS
3921  * and schedules the ticker_job.
3922  *
3923  * @param instance_index     Index of ticker instance
3924  * @param user_id	     Ticker user id. Used for indexing user operations
3925  *			     and mapping to mayfly caller id
3926  * @param ticks_at_stop      Absolute tick count at ticker stop request
3927  * @param fp_op_func	     Function pointer of user operation completion
3928  *			     function
3929  * @param op_context	     Context passed in operation completion call
3930  *
3931  * @return TICKER_STATUS_BUSY if stop was successful but not yet completed.
3932  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3933  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3934  * before exiting ticker_stop
3935  */
3936 uint8_t ticker_stop_abs(uint8_t instance_index, uint8_t user_id,
3937 			 uint8_t ticker_id, uint32_t ticks_at_stop,
3938 			 ticker_op_func fp_op_func, void *op_context)
3939 {
3940 	struct ticker_instance *instance = &_instance[instance_index];
3941 	struct ticker_user_op *user_op;
3942 	struct ticker_user *user;
3943 	uint8_t last;
3944 
3945 	user = &instance->users[user_id];
3946 
3947 	last = user->last + 1;
3948 	if (last >= user->count_user_op) {
3949 		last = 0U;
3950 	}
3951 
3952 	if (last == user->first) {
3953 		return TICKER_STATUS_FAILURE;
3954 	}
3955 
3956 	user_op = &user->user_op[user->last];
3957 	user_op->op = TICKER_USER_OP_TYPE_STOP_ABS;
3958 	user_op->id = ticker_id;
3959 	user_op->params.yield.ticks_at_yield = ticks_at_stop;
3960 	user_op->status = TICKER_STATUS_BUSY;
3961 	user_op->fp_op_func = fp_op_func;
3962 	user_op->op_context = op_context;
3963 
3964 	/* Make sure transaction is completed before committing */
3965 	cpu_dmb();
3966 	user->last = last;
3967 
3968 	instance->sched_cb(instance->caller_id_get_cb(user_id),
3969 			   TICKER_CALL_ID_JOB, 0, instance);
3970 
3971 	return user_op->status;
3972 }
3973 
3974 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET)
3975 /**
3976  * @brief Get next ticker node slot
3977  *
3978  * @details Gets the next ticker which has slot ticks specified and
3979  * return the ticker id and accumulated ticks until expiration. If no
3980  * ticker nodes have slot ticks, the next ticker node is returned.
3981  * If no head id is provided (TICKER_NULL) the first node is returned.
3982  *
3983  * @param instance_index     Index of ticker instance
3984  * @param user_id	     Ticker user id. Used for indexing user operations
3985  *			     and mapping to mayfly caller id
3986  * @param ticker_id	     Pointer to id of ticker node
3987  * @param ticks_current	     Pointer to current ticks count
3988  * @param ticks_to_expire    Pointer to ticks to expire
3989  * @param fp_op_func	     Function pointer of user operation completion
3990  *			     function
3991  * @param op_context	     Context passed in operation completion call
3992  *
3993  * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
3994  * TICKER_STATUS_FAILURE is returned if there are no more user operations
3995  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
3996  * before exiting ticker_next_slot_get
3997  */
3998 uint8_t ticker_next_slot_get(uint8_t instance_index, uint8_t user_id,
3999 			      uint8_t *ticker_id, uint32_t *ticks_current,
4000 			      uint32_t *ticks_to_expire,
4001 			      ticker_op_func fp_op_func, void *op_context)
4002 {
4003 #if defined(CONFIG_BT_TICKER_LAZY_GET) || \
4004 	defined(CONFIG_BT_TICKER_REMAINDER_GET) || \
4005 	defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
4006 	return ticker_next_slot_get_ext(instance_index, user_id, ticker_id,
4007 					ticks_current, ticks_to_expire, NULL,
4008 					NULL, NULL, NULL, fp_op_func,
4009 					op_context);
4010 }
4011 
4012 uint8_t ticker_next_slot_get_ext(uint8_t instance_index, uint8_t user_id,
4013 				  uint8_t *ticker_id, uint32_t *ticks_current,
4014 				  uint32_t *ticks_to_expire,
4015 				  uint32_t *remainder, uint16_t *lazy,
4016 				  ticker_op_match_func fp_match_op_func,
4017 				  void *match_op_context,
4018 				  ticker_op_func fp_op_func, void *op_context)
4019 {
4020 #endif /* CONFIG_BT_TICKER_LAZY_GET ||
4021 	* CONFIG_BT_TICKER_REMAINDER_GET ||
4022 	* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH
4023 	*/
4024 	struct ticker_instance *instance = &_instance[instance_index];
4025 	struct ticker_user_op *user_op;
4026 	struct ticker_user *user;
4027 	uint8_t last;
4028 
4029 	user = &instance->users[user_id];
4030 
4031 	last = user->last + 1;
4032 	if (last >= user->count_user_op) {
4033 		last = 0U;
4034 	}
4035 
4036 	if (last == user->first) {
4037 		return TICKER_STATUS_FAILURE;
4038 	}
4039 
4040 	user_op = &user->user_op[user->last];
4041 	user_op->op = TICKER_USER_OP_TYPE_SLOT_GET;
4042 	user_op->id = TICKER_NULL;
4043 	user_op->params.slot_get.ticker_id = ticker_id;
4044 	user_op->params.slot_get.ticks_current = ticks_current;
4045 	user_op->params.slot_get.ticks_to_expire = ticks_to_expire;
4046 #if defined(CONFIG_BT_TICKER_REMAINDER_GET)
4047 	user_op->params.slot_get.remainder = remainder;
4048 #endif /* CONFIG_BT_TICKER_REMAINDER_GET */
4049 #if defined(CONFIG_BT_TICKER_LAZY_GET)
4050 	user_op->params.slot_get.lazy = lazy;
4051 #endif /* CONFIG_BT_TICKER_LAZY_GET */
4052 #if defined(CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH)
4053 	user_op->params.slot_get.fp_match_op_func = fp_match_op_func;
4054 	user_op->params.slot_get.match_op_context = match_op_context;
4055 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET_MATCH */
4056 	user_op->status = TICKER_STATUS_BUSY;
4057 	user_op->fp_op_func = fp_op_func;
4058 	user_op->op_context = op_context;
4059 
4060 	/* Make sure transaction is completed before committing */
4061 	cpu_dmb();
4062 	user->last = last;
4063 
4064 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4065 			   TICKER_CALL_ID_JOB, 0, instance);
4066 
4067 	return user_op->status;
4068 }
4069 #endif /* CONFIG_BT_TICKER_NEXT_SLOT_GET */
4070 
4071 #if defined(CONFIG_BT_TICKER_JOB_IDLE_GET)
4072 /**
4073  * @brief Get a callback at the end of ticker job execution
4074  *
4075  * @details Operation completion callback is called at the end of the
4076  * ticker_job execution. The user operation is immutable.
4077  *
4078  * @param instance_index     Index of ticker instance
4079  * @param user_id	     Ticker user id. Used for indexing user operations
4080  *			     and mapping to mayfly caller id
4081  * @param fp_op_func	     Function pointer of user operation completion
4082  *			     function
4083  * @param op_context	     Context passed in operation completion call
4084  *
4085  * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4086  * TICKER_STATUS_FAILURE is returned if there are no more user operations
4087  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4088  * before exiting ticker_job_idle_get
4089  */
4090 uint8_t ticker_job_idle_get(uint8_t instance_index, uint8_t user_id,
4091 			  ticker_op_func fp_op_func, void *op_context)
4092 {
4093 	struct ticker_instance *instance = &_instance[instance_index];
4094 	struct ticker_user_op *user_op;
4095 	struct ticker_user *user;
4096 	uint8_t last;
4097 
4098 	user = &instance->users[user_id];
4099 
4100 	last = user->last + 1;
4101 	if (last >= user->count_user_op) {
4102 		last = 0U;
4103 	}
4104 
4105 	if (last == user->first) {
4106 		return TICKER_STATUS_FAILURE;
4107 	}
4108 
4109 	user_op = &user->user_op[user->last];
4110 	user_op->op = TICKER_USER_OP_TYPE_IDLE_GET;
4111 	user_op->id = TICKER_NULL;
4112 	user_op->status = TICKER_STATUS_BUSY;
4113 	user_op->fp_op_func = fp_op_func;
4114 	user_op->op_context = op_context;
4115 
4116 	/* Make sure transaction is completed before committing */
4117 	cpu_dmb();
4118 	user->last = last;
4119 
4120 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4121 			   TICKER_CALL_ID_JOB, 0, instance);
4122 
4123 	return user_op->status;
4124 }
4125 #endif /* CONFIG_BT_TICKER_JOB_IDLE_GET */
4126 
4127 #if !defined(CONFIG_BT_TICKER_LOW_LAT) && \
4128 	!defined(CONFIG_BT_TICKER_SLOT_AGNOSTIC) && \
4129 	defined(CONFIG_BT_TICKER_PRIORITY_SET)
4130 /**
4131  * @brief Set ticker node priority
4132  *
4133  * @param instance_index     Index of ticker instance
4134  * @param user_id	     Ticker user id. Used for indexing user operations
4135  *			     and mapping to mayfly caller id
4136  * @param ticker_id	     Id of ticker node to set priority on
4137  * @param priority	     Priority to set. Range [-128..127], default is 0.
4138  *			     Lover value equals higher priority. Setting
4139  *			     priority to -128 (TICKER_PRIORITY_CRITICAL) makes
4140  *			     the node win all collision challenges. Only one
4141  *			     node can have this priority assigned.
4142  * @param fp_op_func	     Function pointer of user operation completion
4143  *			     function
4144  * @param op_context	     Context passed in operation completion call
4145  *
4146  * @return TICKER_STATUS_BUSY if request was successful but not yet completed.
4147  * TICKER_STATUS_FAILURE is returned if there are no more user operations
4148  * available, and TICKER_STATUS_SUCCESS is returned if ticker_job gets to run
4149  * before exiting ticker_priority_set
4150  */
4151 uint8_t ticker_priority_set(uint8_t instance_index, uint8_t user_id, uint8_t ticker_id,
4152 			  int8_t priority, ticker_op_func fp_op_func,
4153 			  void *op_context)
4154 {
4155 	struct ticker_instance *instance = &_instance[instance_index];
4156 	struct ticker_user_op *user_op;
4157 	struct ticker_user *user;
4158 	uint8_t last;
4159 
4160 	user = &instance->users[user_id];
4161 
4162 	last = user->last + 1;
4163 	if (last >= user->count_user_op) {
4164 		last = 0U;
4165 	}
4166 
4167 	if (last == user->first) {
4168 		return TICKER_STATUS_FAILURE;
4169 	}
4170 
4171 	user_op = &user->user_op[user->last];
4172 	user_op->op = TICKER_USER_OP_TYPE_PRIORITY_SET;
4173 	user_op->id = ticker_id;
4174 	user_op->params.priority_set.priority = priority;
4175 	user_op->status = TICKER_STATUS_BUSY;
4176 	user_op->fp_op_func = fp_op_func;
4177 	user_op->op_context = op_context;
4178 
4179 	/* Make sure transaction is completed before committing */
4180 	cpu_dmb();
4181 	user->last = last;
4182 
4183 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4184 			   TICKER_CALL_ID_JOB, 0, instance);
4185 
4186 	return user_op->status;
4187 }
4188 #endif /* !CONFIG_BT_TICKER_LOW_LAT &&
4189 	* !CONFIG_BT_TICKER_SLOT_AGNOSTIC &&
4190 	* CONFIG_BT_TICKER_PRIORITY_SET
4191 	*/
4192 
4193 /**
4194  * @brief Schedule ticker job
4195  *
4196  * @param instance_index Index of ticker instance
4197  * @param user_id	 Ticker user id. Maps to mayfly caller id
4198  */
4199 void ticker_job_sched(uint8_t instance_index, uint8_t user_id)
4200 {
4201 	struct ticker_instance *instance = &_instance[instance_index];
4202 
4203 	instance->sched_cb(instance->caller_id_get_cb(user_id),
4204 			   TICKER_CALL_ID_JOB, 0, instance);
4205 }
4206 
4207 /**
4208  * @brief Get current absolute tick count
4209  *
4210  * @return Absolute tick count
4211  */
4212 uint32_t ticker_ticks_now_get(void)
4213 {
4214 	return cntr_cnt_get();
4215 }
4216 
4217 /**
4218  * @brief Get difference between two tick counts
4219  *
4220  * @details Subtract two counts and truncate to correct HW dependent counter
4221  * bit width
4222  *
4223  * @param ticks_now Highest tick count (now)
4224  * @param ticks_old Tick count to subtract from ticks_now
4225  */
4226 uint32_t ticker_ticks_diff_get(uint32_t ticks_now, uint32_t ticks_old)
4227 {
4228 	return ((ticks_now - ticks_old) & HAL_TICKER_CNTR_MASK);
4229 }
4230