1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/system_timer.h>
8 #include <zephyr/sys_clock.h>
9 #include <zephyr/spinlock.h>
10 #include <cmsis_core.h>
11 #include <zephyr/irq.h>
12 #include <zephyr/sys/util.h>
13 #include <zephyr/drivers/counter.h>
14 
15 #include "cortex_m_systick.h"
16 
17 #define COUNTER_MAX 0x00ffffff
18 #define TIMER_STOPPED 0xff000000
19 
20 
21 #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
22 extern unsigned int z_clock_hw_cycles_per_sec;
23 /* CYC_PER_TICK must be inside of systick capacities (<1Ghz) */
24 #define CYC_PER_TICK (z_clock_hw_cycles_per_sec/CONFIG_SYS_CLOCK_TICKS_PER_SEC)
25 #else
26 #define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC/CONFIG_SYS_CLOCK_TICKS_PER_SEC)
27 #if (COUNTER_MAX / CYC_PER_TICK) == 1
28 #pragma message("tickless does nothing as CONFIG_SYS_CLOCK_TICKS_PER_SEC too low")
29 #endif
30 #endif
31 
32 /* add MAX_TICKS protection */
33 #define _MAX_TICKS (int)((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
34 #define MAX_TICKS ((_MAX_TICKS > 0) ? _MAX_TICKS : 1)
35 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
36 
37 /* Minimum cycles in the future to try to program.  Note that this is
38  * NOT simply "enough cycles to get the counter read and reprogrammed
39  * reliably" -- it becomes the minimum value of the LOAD register, and
40  * thus reflects how much time we can reliably see expire between
41  * calls to elapsed() to read the COUNTFLAG bit.  So it needs to be
42  * set to be larger than the maximum time the interrupt might be
43  * masked.  Choosing a fraction of a tick is probably a good enough
44  * default, with an absolute minimum of 1k cyc.
45  */
46 #define MIN_DELAY MAX(1024U, ((uint32_t)CYC_PER_TICK/16U))
47 
48 static struct k_spinlock lock;
49 
50 static uint32_t last_load;
51 
52 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
53 typedef uint64_t cycle_t;
54 #else
55 typedef uint32_t cycle_t;
56 #endif
57 
58 /*
59  * This local variable holds the amount of SysTick HW cycles elapsed
60  * and it is updated in sys_clock_isr() and sys_clock_set_timeout().
61  *
62  * Note:
63  *  At an arbitrary point in time the "current" value of the SysTick
64  *  HW timer is calculated as:
65  *
66  * t = cycle_counter + elapsed();
67  */
68 static cycle_t cycle_count;
69 
70 /*
71  * This local variable holds the amount of elapsed SysTick HW cycles
72  * that have been announced to the kernel.
73  *
74  * Note:
75  * Additions/subtractions/comparisons of 64-bits values on 32-bits systems
76  * are very cheap. Divisions are not. Make sure the difference between
77  * cycle_count and announced_cycles is stored in a 32-bit variable before
78  * dividing it by CYC_PER_TICK.
79  */
80 static cycle_t announced_cycles;
81 
82 /*
83  * This local variable holds the amount of elapsed HW cycles due to
84  * SysTick timer wraps ('overflows') and is used in the calculation
85  * in elapsed() function, as well as in the updates to cycle_count.
86  *
87  * Note:
88  * Each time cycle_count is updated with the value from overflow_cyc,
89  * the overflow_cyc must be reset to zero.
90  */
91 static volatile uint32_t overflow_cyc;
92 
93 #if !defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE)
94 /* This local variable indicates that the timeout was set right before
95  * entering idle state.
96  *
97  * It is used for chips that has to use a separate idle timer in such
98  * case because the Cortex-m SysTick is not clocked in the low power
99  * mode state.
100  */
101 static bool timeout_idle;
102 
103 #if !defined(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)
104 /* Cycle counter before entering the idle state. */
105 static cycle_t cycle_pre_idle;
106 #endif /* !CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
107 
108 #if defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER)
109 /* Idle timer value before entering the idle state. */
110 static uint32_t idle_timer_pre_idle;
111 
112 /* How many ticks the system was expected to sleep when
113  * idle timer was configured. Used to determine if the
114  * counter overflowed or not.
115  */
116 static uint32_t idle_timer_scheduled_sleep_ticks;
117 
118 /* Idle timer used for timer while entering the idle state */
119 static const struct device *idle_timer = DEVICE_DT_GET(DT_CHOSEN(zephyr_cortex_m_idle_timer));
120 
121 /* Stub callback to satisfy Counter API (cannot be NULL) */
idle_timer_alarm_stub(const struct device * dev,uint8_t chan_id,uint32_t ticks,void * user_data)122 static void idle_timer_alarm_stub(const struct device *dev, uint8_t chan_id,
123 				uint32_t ticks, void *user_data)
124 {
125 	ARG_UNUSED(dev);
126 	ARG_UNUSED(chan_id);
127 	ARG_UNUSED(ticks);
128 	ARG_UNUSED(user_data);
129 }
130 #endif /* CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER */
131 #endif /* !CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE */
132 
133 #if defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER)
134 /**
135  * To simplify the driver, implement the callout to Counter API
136  * as hooks that would be provided by platform drivers if
137  * CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_HOOKS was selected instead.
138  */
z_cms_lptim_hook_on_lpm_entry(uint64_t max_lpm_time_us)139 void z_cms_lptim_hook_on_lpm_entry(uint64_t max_lpm_time_us)
140 {
141 	struct counter_alarm_cfg cfg = {
142 		.callback = idle_timer_alarm_stub,
143 		.ticks = counter_us_to_ticks(idle_timer, max_lpm_time_us),
144 		.user_data = NULL,
145 		.flags = 0,
146 	};
147 
148 	/**
149 	 * Disable the counter alarm in case it was already running.
150 	 */
151 	counter_cancel_channel_alarm(idle_timer, 0);
152 
153 	/* Set the alarm using timer that runs the idle.
154 	 * Needed rump-up/setting time, lower accurency etc. should be
155 	 * included in the exit-latency in the power state definition.
156 	 */
157 	counter_set_channel_alarm(idle_timer, 0, &cfg);
158 
159 	/* Store current value of the selected timer to calculate a
160 	 * difference in measurements after exiting the idle state.
161 	 */
162 	counter_get_value(idle_timer, &idle_timer_pre_idle);
163 
164 	idle_timer_scheduled_sleep_ticks = cfg.ticks;
165 }
166 
z_cms_lptim_hook_on_lpm_exit(void)167 uint64_t z_cms_lptim_hook_on_lpm_exit(void)
168 {
169 	/**
170 	 * Calculate how much time elapsed according to counter.
171 	 */
172 	uint32_t idle_timer_post, idle_timer_diff, idle_timer_top;
173 	bool idle_timer_int_pending, idle_timer_wrap;
174 
175 	counter_get_value(idle_timer, &idle_timer_post);
176 	idle_timer_int_pending = counter_get_pending_int(idle_timer) ? true : false;
177 	idle_timer_top = counter_get_top_value(idle_timer);
178 
179 	/**
180 	 * Check for counter timer overflow
181 	 * (TODO: this doesn't work for downcounting timers!)
182 	 */
183 	if (idle_timer_pre_idle > idle_timer_post) {
184 		/* Pre > Post: counter wrapped (overflow occurred) */
185 		idle_timer_wrap = true;
186 	} else if (idle_timer_pre_idle == idle_timer_post) {
187 		/* Pre == Post: consider wrap only if interrupt is pending */
188 		idle_timer_wrap = idle_timer_int_pending;
189 	} else {
190 		/* Pre < Post: normally no wrap; if interrupt pending and the
191 		 * expected sleep spans the counter top, treat as wrap.
192 		 */
193 		idle_timer_wrap = idle_timer_int_pending &&
194 			((uint64_t)idle_timer_pre_idle + idle_timer_scheduled_sleep_ticks
195 				>= idle_timer_top);
196 	}
197 
198 	if (idle_timer_wrap) {
199 		idle_timer_diff = idle_timer_top - idle_timer_pre_idle + idle_timer_post + 1;
200 	} else {
201 		idle_timer_diff = idle_timer_post - idle_timer_pre_idle;
202 	}
203 
204 	return (uint64_t)counter_ticks_to_us(idle_timer, idle_timer_diff);
205 }
206 #endif /* CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER */
207 
208 /* This internal function calculates the amount of HW cycles that have
209  * elapsed since the last time the absolute HW cycles counter has been
210  * updated. 'cycle_count' may be updated either by the ISR, or when we
211  * re-program the SysTick.LOAD register, in sys_clock_set_timeout().
212  *
213  * Additionally, the function updates the 'overflow_cyc' counter, that
214  * holds the amount of elapsed HW cycles due to (possibly) multiple
215  * timer wraps (overflows).
216  *
217  * Prerequisites:
218  * - reprogramming of SysTick.LOAD must be clearing the SysTick.COUNTER
219  *   register and the 'overflow_cyc' counter.
220  * - ISR must be clearing the 'overflow_cyc' counter.
221  * - no more than one counter-wrap has occurred between
222  *     - the timer reset or the last time the function was called
223  *     - and until the current call of the function is completed.
224  * - the function is invoked with interrupts disabled.
225  */
elapsed(void)226 static uint32_t elapsed(void)
227 {
228 	uint32_t val1 = SysTick->VAL;	/* A */
229 	uint32_t ctrl = SysTick->CTRL;	/* B */
230 	uint32_t val2 = SysTick->VAL;	/* C */
231 
232 	/* SysTick behavior: The counter wraps after zero automatically.
233 	 * The COUNTFLAG field of the CTRL register is set when it
234 	 * decrements from 1 to 0. Reading the control register
235 	 * automatically clears that field. When a timer is started,
236 	 * count begins at zero then wraps after the first cycle.
237 	 * Reference:
238 	 *  Armv6-m (B3.3.1) https://developer.arm.com/documentation/ddi0419
239 	 *  Armv7-m (B3.3.1) https://developer.arm.com/documentation/ddi0403
240 	 *  Armv8-m (B11.1)  https://developer.arm.com/documentation/ddi0553
241 	 *
242 	 * First, manually wrap/realign val1 and val2 from [0:last_load-1]
243 	 * to [1:last_load]. This allows subsequent code to assume that
244 	 * COUNTFLAG and wrapping occur on the same cycle.
245 	 *
246 	 * If the count wrapped...
247 	 * 1) Before A then COUNTFLAG will be set and val1 >= val2
248 	 * 2) Between A and B then COUNTFLAG will be set and val1 < val2
249 	 * 3) Between B and C then COUNTFLAG will be clear and val1 < val2
250 	 * 4) After C we'll see it next time
251 	 *
252 	 * So the count in val2 is post-wrap and last_load needs to be
253 	 * added if and only if COUNTFLAG is set or val1 < val2.
254 	 */
255 	if (val1 == 0) {
256 		val1 = last_load;
257 	}
258 	if (val2 == 0) {
259 		val2 = last_load;
260 	}
261 
262 	if ((ctrl & SysTick_CTRL_COUNTFLAG_Msk)
263 	    || (val1 < val2)) {
264 		overflow_cyc += last_load;
265 
266 		/* We know there was a wrap, but we might not have
267 		 * seen it in CTRL, so clear it. */
268 		(void)SysTick->CTRL;
269 	}
270 
271 	return (last_load - val2) + overflow_cyc;
272 }
273 
274 /* sys_clock_isr is calling directly from the platform's vectors table.
275  * However using ISR_DIRECT_DECLARE() is not so suitable due to possible
276  * tracing overflow, so here is a stripped down version of it.
277  */
278 ARCH_ISR_DIAG_OFF
sys_clock_isr(void)279 __attribute__((interrupt("IRQ"))) void sys_clock_isr(void)
280 {
281 #ifdef CONFIG_TRACING_ISR
282 	sys_trace_isr_enter();
283 #endif /* CONFIG_TRACING_ISR */
284 
285 	uint32_t dcycles;
286 	uint32_t dticks;
287 
288 	/* Update overflow_cyc and clear COUNTFLAG by invoking elapsed() */
289 	elapsed();
290 
291 	/* Increment the amount of HW cycles elapsed (complete counter
292 	 * cycles) and announce the progress to the kernel.
293 	 */
294 	cycle_count += overflow_cyc;
295 	overflow_cyc = 0;
296 
297 #if defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER)
298 	/* Rare case, when the interrupt was triggered, with previously programmed
299 	 * LOAD value, just before entering the idle mode (SysTick is clocked) or right
300 	 * after exiting the idle mode, before executing the procedure in the
301 	 * sys_clock_idle_exit function.
302 	 */
303 	if (timeout_idle) {
304 		ISR_DIRECT_PM();
305 		z_arm_int_exit();
306 
307 		return;
308 	}
309 #endif /* CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_COUNTER */
310 
311 	if (IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
312 		/* In TICKLESS mode, the SysTick.LOAD is re-programmed
313 		 * in sys_clock_set_timeout(), followed by resetting of
314 		 * the counter (VAL = 0).
315 		 *
316 		 * If a timer wrap occurs right when we re-program LOAD,
317 		 * the ISR is triggered immediately after sys_clock_set_timeout()
318 		 * returns; in that case we shall not increment the cycle_count
319 		 * because the value has been updated before LOAD re-program.
320 		 *
321 		 * We can assess if this is the case by inspecting COUNTFLAG.
322 		 */
323 
324 		dcycles = cycle_count - announced_cycles;
325 		dticks = dcycles / CYC_PER_TICK;
326 		announced_cycles += dticks * CYC_PER_TICK;
327 		sys_clock_announce(dticks);
328 	} else {
329 		sys_clock_announce(1);
330 	}
331 
332 	ISR_DIRECT_PM();
333 
334 #ifdef CONFIG_TRACING_ISR
335 	sys_trace_isr_exit();
336 #endif /* CONFIG_TRACING_ISR */
337 
338 	z_arm_int_exit();
339 }
340 ARCH_ISR_DIAG_ON
341 
sys_clock_set_timeout(int32_t ticks,bool idle)342 void sys_clock_set_timeout(int32_t ticks, bool idle)
343 {
344 	/* Fast CPUs and a 24 bit counter mean that even idle systems
345 	 * need to wake up multiple times per second.  If the kernel
346 	 * allows us to miss tick announcements in idle, then shut off
347 	 * the counter. (Note: we can assume if idle==true that
348 	 * interrupts are already disabled)
349 	 */
350 	if (IS_ENABLED(CONFIG_TICKLESS_KERNEL) && idle && ticks == K_TICKS_FOREVER) {
351 		SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
352 		last_load = TIMER_STOPPED;
353 		return;
354 	}
355 
356 #if !defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE)
357 	if (idle) {
358 		uint64_t timeout_us =
359 			((uint64_t)ticks * USEC_PER_SEC) / CONFIG_SYS_CLOCK_TICKS_PER_SEC;
360 
361 		timeout_idle = true;
362 
363 		/**
364 		 * Invoke platform-specific layer to configure LPTIM
365 		 * such that system wakes up after timeout elapses.
366 		 */
367 		z_cms_lptim_hook_on_lpm_entry(timeout_us);
368 
369 #if !defined(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)
370 		/* Store current value of SysTick counter to be able to
371 		 * calculate a difference in measurements after exiting
372 		 * the low-power state.
373 		 */
374 		cycle_pre_idle = cycle_count + elapsed();
375 #else /* CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
376 		/**
377 		 * SysTick will be placed under reset once we enter
378 		 * low-power mode. Turn it off right now then update
379 		 * the cycle counter now, since we won't be able to
380 		 * to it after waking up.
381 		 */
382 		sys_clock_disable();
383 
384 		cycle_count += elapsed();
385 		overflow_cyc = 0;
386 #endif /* !CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
387 		return;
388 	}
389 #endif /* !CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE */
390 
391 #if defined(CONFIG_TICKLESS_KERNEL)
392 	uint32_t delay;
393 	uint32_t val1, val2;
394 	uint32_t last_load_ = last_load;
395 
396 	ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
397 	ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
398 
399 	k_spinlock_key_t key = k_spin_lock(&lock);
400 
401 	uint32_t pending = elapsed();
402 
403 	val1 = SysTick->VAL;
404 
405 	cycle_count += pending;
406 	overflow_cyc = 0U;
407 
408 	uint32_t unannounced = cycle_count - announced_cycles;
409 
410 	if ((int32_t)unannounced < 0) {
411 		/* We haven't announced for more than half the 32-bit
412 		 * wrap duration, because new timeouts keep being set
413 		 * before the existing one fires.  Force an announce
414 		 * to avoid loss of a wrap event, making sure the
415 		 * delay is at least the minimum delay possible.
416 		 */
417 		last_load = MIN_DELAY;
418 	} else {
419 		/* Desired delay in the future */
420 		delay = ticks * CYC_PER_TICK;
421 
422 		/* Round delay up to next tick boundary */
423 		delay += unannounced;
424 		delay = DIV_ROUND_UP(delay, CYC_PER_TICK) * CYC_PER_TICK;
425 		delay -= unannounced;
426 		delay = MAX(delay, MIN_DELAY);
427 		if (delay > MAX_CYCLES) {
428 			last_load = MAX_CYCLES;
429 		} else {
430 			last_load = delay;
431 		}
432 	}
433 
434 	val2 = SysTick->VAL;
435 
436 	SysTick->LOAD = last_load - 1;
437 	SysTick->VAL = 0; /* resets timer to last_load */
438 
439 	/*
440 	 * Add elapsed cycles while computing the new load to cycle_count.
441 	 *
442 	 * Note that comparing val1 and val2 is normaly not good enough to
443 	 * guess if the counter wrapped during this interval. Indeed if val1 is
444 	 * close to LOAD, then there are little chances to catch val2 between
445 	 * val1 and LOAD after a wrap. COUNTFLAG should be checked in addition.
446 	 * But since the load computation is faster than MIN_DELAY, then we
447 	 * don't need to worry about this case.
448 	 */
449 	if (val1 < val2) {
450 		cycle_count += (val1 + (last_load_ - val2));
451 	} else {
452 		cycle_count += (val1 - val2);
453 	}
454 	k_spin_unlock(&lock, key);
455 #endif
456 }
457 
sys_clock_elapsed(void)458 uint32_t sys_clock_elapsed(void)
459 {
460 	if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
461 		return 0;
462 	}
463 
464 	k_spinlock_key_t key = k_spin_lock(&lock);
465 	uint32_t unannounced = cycle_count - announced_cycles;
466 	uint32_t cyc = elapsed() + unannounced;
467 
468 	k_spin_unlock(&lock, key);
469 	return cyc / CYC_PER_TICK;
470 }
471 
sys_clock_cycle_get_32(void)472 uint32_t sys_clock_cycle_get_32(void)
473 {
474 	k_spinlock_key_t key = k_spin_lock(&lock);
475 	uint32_t ret = cycle_count;
476 
477 	ret += elapsed();
478 	k_spin_unlock(&lock, key);
479 	return ret;
480 }
481 
482 #ifdef CONFIG_CORTEX_M_SYSTICK_64BIT_CYCLE_COUNTER
sys_clock_cycle_get_64(void)483 uint64_t sys_clock_cycle_get_64(void)
484 {
485 	k_spinlock_key_t key = k_spin_lock(&lock);
486 	uint64_t ret = cycle_count + elapsed();
487 
488 	k_spin_unlock(&lock, key);
489 	return ret;
490 }
491 #endif
492 
sys_clock_idle_exit(void)493 void sys_clock_idle_exit(void)
494 {
495 #if !defined(CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE)
496 	if (timeout_idle) {
497 		cycle_t systick_diff, missed_cycles;
498 		uint32_t dcycles, dticks;
499 		uint64_t systick_us, idle_timer_us;
500 
501 #if !defined(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)
502 		/**
503 		 * Get current value for SysTick and calculate how
504 		 * much time has passed since last measurement.
505 		 */
506 		systick_diff = cycle_count + elapsed() - cycle_pre_idle;
507 		systick_us =
508 			((uint64_t)systick_diff * USEC_PER_SEC) / sys_clock_hw_cycles_per_sec();
509 #else /* CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
510 		/* SysTick was placed under reset so it didn't tick */
511 		systick_diff = systick_us = 0;
512 #endif /* !CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM */
513 
514 		/**
515 		 * Query platform-specific code for elapsed time according to LPTIM.
516 		 */
517 		idle_timer_us = z_cms_lptim_hook_on_lpm_exit();
518 
519 		/* Calculate difference in measurements to get how much time
520 		 * the SysTick missed in idle state.
521 		 */
522 		if (idle_timer_us < systick_us) {
523 			/* This case is possible, when the time in low power mode is
524 			 * very short or 0. SysTick usually has higher measurement
525 			 * resolution of than the IDLE timer, thus the measurement of
526 			 * passed time since the sys_clock_set_timeout call can be higher.
527 			 */
528 			missed_cycles = 0;
529 		} else {
530 			uint64_t measurement_diff_us;
531 
532 			measurement_diff_us = idle_timer_us - systick_us;
533 			missed_cycles = (sys_clock_hw_cycles_per_sec() * measurement_diff_us) /
534 					USEC_PER_SEC;
535 		}
536 
537 		/* Update the cycle counter to include the cycles missed in idle */
538 		cycle_count += missed_cycles;
539 
540 		/* Announce the passed ticks to the kernel */
541 		dcycles = cycle_count + elapsed() - announced_cycles;
542 		dticks = dcycles / CYC_PER_TICK;
543 		announced_cycles += dticks * CYC_PER_TICK;
544 		sys_clock_announce(dticks);
545 
546 		/* We've alredy performed all needed operations */
547 		timeout_idle = false;
548 	}
549 #endif /* !CONFIG_CORTEX_M_SYSTICK_LPM_TIMER_NONE */
550 
551 	if (last_load == TIMER_STOPPED || IS_ENABLED(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)) {
552 		/* SysTick was stopped or placed under reset.
553 		 * Restart the timer from scratch.
554 		 */
555 		K_SPINLOCK(&lock) {
556 			last_load = CYC_PER_TICK;
557 			SysTick->LOAD = last_load - 1;
558 			SysTick->VAL = 0; /* resets timer to last_load */
559 			if (!IS_ENABLED(CONFIG_CORTEX_M_SYSTICK_RESET_BY_LPM)) {
560 				SysTick->CTRL |= SysTick_CTRL_ENABLE_Msk;
561 			} else {
562 				NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
563 				SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
564 						  SysTick_CTRL_TICKINT_Msk |
565 						  SysTick_CTRL_CLKSOURCE_Msk);
566 			}
567 		}
568 	}
569 }
570 
sys_clock_disable(void)571 void sys_clock_disable(void)
572 {
573 	SysTick->CTRL &= ~SysTick_CTRL_ENABLE_Msk;
574 }
575 
sys_clock_driver_init(void)576 static int sys_clock_driver_init(void)
577 {
578 
579 	NVIC_SetPriority(SysTick_IRQn, _IRQ_PRIO_OFFSET);
580 	last_load = CYC_PER_TICK;
581 	overflow_cyc = 0U;
582 	SysTick->LOAD = last_load - 1;
583 	SysTick->VAL = 0; /* resets timer to last_load */
584 	SysTick->CTRL |= (SysTick_CTRL_ENABLE_Msk |
585 			  SysTick_CTRL_TICKINT_Msk |
586 			  SysTick_CTRL_CLKSOURCE_Msk);
587 	return 0;
588 }
589 
590 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
591 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
592