1 /*
2  * Copyright (c) 2019 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <zephyr/init.h>
7 #include <zephyr/drivers/timer/system_timer.h>
8 #include <zephyr/sys_clock.h>
9 #include <zephyr/spinlock.h>
10 #include <zephyr/drivers/interrupt_controller/loapic.h>
11 #include <zephyr/irq.h>
12 
13 BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP), "APIC timer doesn't support SMP");
14 
15 /*
16  * Overview:
17  *
18  * This driver enables the local APIC as the Zephyr system timer. It supports
19  * both legacy ("tickful") mode as well as TICKLESS_KERNEL. The driver will
20  * work with any APIC that has the ARAT "always running APIC timer" feature
21  * (CPUID 0x06, EAX bit 2); for the more accurate sys_clock_cycle_get_32(),
22  * the invariant TSC feature (CPUID 0x80000007: EDX bit 8) is also required.
23  * (Ultimately systems with invariant TSCs should use a TSC-based driver,
24  * and the TSC-related parts should be stripped from this implementation.)
25  *
26  * Configuration:
27  *
28  * CONFIG_APIC_TIMER=y			enables this timer driver.
29  * CONFIG_APIC_TIMER_IRQ=<irq>		which IRQ to configure for the timer.
30  * CONFIG_APIC_TIMER_IRQ_PRIORITY=<p>	priority for IRQ_CONNECT()
31  *
32  * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=<hz> must contain the frequency seen
33  *     by the local APIC timer block (before it gets to the timer divider).
34  *
35  * CONFIG_APIC_TIMER_TSC=y enables the more accurate TSC-based cycle counter
36  *     for sys_clock_cycle_get_32(). This also requires the next options be set.
37  *
38  * CONFIG_APIC_TIMER_TSC_N=<n>
39  * CONFIG_APIC_TIMER_TSC_M=<m>
40  *     When CONFIG_APIC_TIMER_TSC=y, these are set to indicate the ratio of
41  *     the TSC frequency to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. This can be
42  *     found via CPUID 0x15 (n = EBX, m = EAX) on most CPUs.
43  */
44 
45 /* These should be merged into include/drivers/interrupt_controller/loapic.h. */
46 
47 #define DCR_DIVIDER_MASK	0x0000000F	/* divider bits */
48 #define DCR_DIVIDER		0x0000000B	/* divide by 1 */
49 #define LVT_MODE_MASK		0x00060000	/* timer mode bits */
50 #define LVT_MODE		0x00000000	/* one-shot */
51 
52 #if defined(CONFIG_TEST)
53 const int32_t z_sys_timer_irq_for_test = CONFIG_APIC_TIMER_IRQ;
54 #endif
55 /*
56  * CYCLES_PER_TICK must always be at least '2', otherwise MAX_TICKS
57  * will overflow int32_t, which is how 'ticks' are currently represented.
58  */
59 
60 #define CYCLES_PER_TICK \
61 	(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
62 
63 BUILD_ASSERT(CYCLES_PER_TICK >= 2, "APIC timer: bad CYCLES_PER_TICK");
64 
65 /* max number of ticks we can load into the timer in one shot */
66 
67 #define MAX_TICKS (0xFFFFFFFFU / CYCLES_PER_TICK)
68 
69 /*
70  * The spinlock protects all access to the local APIC timer registers,
71  * as well as 'total_cycles', 'last_announcement', and 'cached_icr'.
72  *
73  * One important invariant that must be observed: `total_cycles` + `cached_icr`
74  * is always an integral multiple of CYCLE_PER_TICK; this is, timer interrupts
75  * are only ever scheduled to occur at tick boundaries.
76  */
77 
78 static struct k_spinlock lock;
79 static uint64_t total_cycles;
80 static uint32_t cached_icr = CYCLES_PER_TICK;
81 
82 #ifdef CONFIG_TICKLESS_KERNEL
83 
84 static uint64_t last_announcement;	/* last time we called sys_clock_announce() */
85 
sys_clock_set_timeout(int32_t n,bool idle)86 void sys_clock_set_timeout(int32_t n, bool idle)
87 {
88 	ARG_UNUSED(idle);
89 
90 	uint32_t ccr;
91 	int   full_ticks;	/* number of complete ticks we'll wait */
92 	uint32_t full_cycles;	/* full_ticks represented as cycles */
93 	uint32_t partial_cycles;	/* number of cycles to first tick boundary */
94 
95 	if (n < 1) {
96 		full_ticks = 0;
97 	} else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
98 		full_ticks = MAX_TICKS - 1;
99 	} else {
100 		full_ticks = n - 1;
101 	}
102 
103 	full_cycles = full_ticks * CYCLES_PER_TICK;
104 
105 	/*
106 	 * There's a wee race condition here. The timer may expire while
107 	 * we're busy reprogramming it; an interrupt will be queued at the
108 	 * local APIC and the ISR will be called too early, roughly right
109 	 * after we unlock, and not because the count we just programmed has
110 	 * counted down. Luckily this situation is easy to detect, which is
111 	 * why the ISR actually checks to be sure the CCR is 0 before acting.
112 	 */
113 
114 	k_spinlock_key_t key = k_spin_lock(&lock);
115 
116 	ccr = x86_read_loapic(LOAPIC_TIMER_CCR);
117 	total_cycles += (cached_icr - ccr);
118 	partial_cycles = CYCLES_PER_TICK - (total_cycles % CYCLES_PER_TICK);
119 	cached_icr = full_cycles + partial_cycles;
120 	x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
121 
122 	k_spin_unlock(&lock, key);
123 }
124 
sys_clock_elapsed(void)125 uint32_t sys_clock_elapsed(void)
126 {
127 	uint32_t ccr;
128 	uint32_t ticks;
129 
130 	k_spinlock_key_t key = k_spin_lock(&lock);
131 	ccr = x86_read_loapic(LOAPIC_TIMER_CCR);
132 	ticks = total_cycles - last_announcement;
133 	ticks += cached_icr - ccr;
134 	k_spin_unlock(&lock, key);
135 	ticks /= CYCLES_PER_TICK;
136 
137 	return ticks;
138 }
139 
isr(const void * arg)140 static void isr(const void *arg)
141 {
142 	ARG_UNUSED(arg);
143 
144 	uint32_t cycles;
145 	int32_t ticks;
146 
147 	k_spinlock_key_t key = k_spin_lock(&lock);
148 
149 	/*
150 	 * If we get here and the CCR isn't zero, then this interrupt is
151 	 * stale: it was queued while sys_clock_set_timeout() was setting
152 	 * a new counter. Just ignore it. See above for more info.
153 	 */
154 
155 	if (x86_read_loapic(LOAPIC_TIMER_CCR) != 0) {
156 		k_spin_unlock(&lock, key);
157 		return;
158 	}
159 
160 	/* Restart the timer as early as possible to minimize drift... */
161 	x86_write_loapic(LOAPIC_TIMER_ICR, MAX_TICKS * CYCLES_PER_TICK);
162 
163 	cycles = cached_icr;
164 	cached_icr = MAX_TICKS * CYCLES_PER_TICK;
165 	total_cycles += cycles;
166 	ticks = (total_cycles - last_announcement) / CYCLES_PER_TICK;
167 	last_announcement = total_cycles;
168 	k_spin_unlock(&lock, key);
169 	sys_clock_announce(ticks);
170 }
171 
172 #else
173 
isr(const void * arg)174 static void isr(const void *arg)
175 {
176 	ARG_UNUSED(arg);
177 
178 	k_spinlock_key_t key = k_spin_lock(&lock);
179 	total_cycles += CYCLES_PER_TICK;
180 	x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
181 	k_spin_unlock(&lock, key);
182 
183 	sys_clock_announce(1);
184 }
185 
sys_clock_elapsed(void)186 uint32_t sys_clock_elapsed(void)
187 {
188 	return 0U;
189 }
190 
191 #endif /* CONFIG_TICKLESS_KERNEL */
192 
193 #ifdef CONFIG_APIC_TIMER_TSC
194 
sys_clock_cycle_get_32(void)195 uint32_t sys_clock_cycle_get_32(void)
196 {
197 	uint64_t tsc = z_tsc_read();
198 	uint32_t cycles;
199 
200 	cycles = (tsc * CONFIG_APIC_TIMER_TSC_M) / CONFIG_APIC_TIMER_TSC_N;
201 	return cycles;
202 }
203 
204 #else
205 
sys_clock_cycle_get_32(void)206 uint32_t sys_clock_cycle_get_32(void)
207 {
208 	uint32_t ret;
209 	uint32_t ccr;
210 
211 	k_spinlock_key_t key = k_spin_lock(&lock);
212 	ccr = x86_read_loapic(LOAPIC_TIMER_CCR);
213 	ret = total_cycles + (cached_icr - ccr);
214 	k_spin_unlock(&lock, key);
215 
216 	return ret;
217 }
218 
219 #endif
220 
sys_clock_driver_init(void)221 static int sys_clock_driver_init(void)
222 {
223 	uint32_t val;
224 
225 
226 	val = x86_read_loapic(LOAPIC_TIMER_CONFIG);	/* set divider */
227 	val &= ~DCR_DIVIDER_MASK;
228 	val |= DCR_DIVIDER;
229 	x86_write_loapic(LOAPIC_TIMER_CONFIG, val);
230 
231 	val = x86_read_loapic(LOAPIC_TIMER);		/* set timer mode */
232 	val &= ~LVT_MODE_MASK;
233 	val |= LVT_MODE;
234 	x86_write_loapic(LOAPIC_TIMER, val);
235 
236 	/* remember, wiring up the interrupt will mess with the LVT, too */
237 
238 	IRQ_CONNECT(CONFIG_APIC_TIMER_IRQ,
239 		CONFIG_APIC_TIMER_IRQ_PRIORITY,
240 		isr, 0, 0);
241 
242 	x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
243 	irq_enable(CONFIG_APIC_TIMER_IRQ);
244 
245 	return 0;
246 }
247 
248 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
249 	 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
250