1 /*
2  * Copyright (c) 2019 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #include <drivers/timer/system_timer.h>
7 #include <sys_clock.h>
8 #include <spinlock.h>
9 #include <drivers/interrupt_controller/loapic.h>
10 
11 BUILD_ASSERT(!IS_ENABLED(CONFIG_SMP), "APIC timer doesn't support SMP");
12 
13 /*
14  * Overview:
15  *
16  * This driver enables the local APIC as the Zephyr system timer. It supports
17  * both legacy ("tickful") mode as well as TICKLESS_KERNEL. The driver will
18  * work with any APIC that has the ARAT "always running APIC timer" feature
19  * (CPUID 0x06, EAX bit 2); for the more accurate sys_clock_cycle_get_32(),
20  * the invariant TSC feature (CPUID 0x80000007: EDX bit 8) is also required.
21  * (Ultimately systems with invariant TSCs should use a TSC-based driver,
22  * and the TSC-related parts should be stripped from this implementation.)
23  *
24  * Configuration:
25  *
26  * CONFIG_APIC_TIMER=y			enables this timer driver.
27  * CONFIG_APIC_TIMER_IRQ=<irq>		which IRQ to configure for the timer.
28  * CONFIG_APIC_TIMER_IRQ_PRIORITY=<p>	priority for IRQ_CONNECT()
29  *
30  * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC=<hz> must contain the frequency seen
31  *     by the local APIC timer block (before it gets to the timer divider).
32  *
33  * CONFIG_APIC_TIMER_TSC=y enables the more accurate TSC-based cycle counter
34  *     for sys_clock_cycle_get_32(). This also requires the next options be set.
35  *
36  * CONFIG_APIC_TIMER_TSC_N=<n>
37  * CONFIG_APIC_TIMER_TSC_M=<m>
38  *     When CONFIG_APIC_TIMER_TSC=y, these are set to indicate the ratio of
39  *     the TSC frequency to CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. This can be
40  *     found via CPUID 0x15 (n = EBX, m = EAX) on most CPUs.
41  */
42 
43 /* These should be merged into include/drivers/interrupt_controller/loapic.h. */
44 
45 #define DCR_DIVIDER_MASK	0x0000000F	/* divider bits */
46 #define DCR_DIVIDER		0x0000000B	/* divide by 1 */
47 #define LVT_MODE_MASK		0x00060000	/* timer mode bits */
48 #define LVT_MODE		0x00000000	/* one-shot */
49 
50 /*
51  * CYCLES_PER_TICK must always be at least '2', otherwise MAX_TICKS
52  * will overflow int32_t, which is how 'ticks' are currently represented.
53  */
54 
55 #define CYCLES_PER_TICK \
56 	(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
57 
58 BUILD_ASSERT(CYCLES_PER_TICK >= 2, "APIC timer: bad CYCLES_PER_TICK");
59 
60 /* max number of ticks we can load into the timer in one shot */
61 
62 #define MAX_TICKS (0xFFFFFFFFU / CYCLES_PER_TICK)
63 
64 /*
65  * The spinlock protects all access to the local APIC timer registers,
66  * as well as 'total_cycles', 'last_announcement', and 'cached_icr'.
67  *
68  * One important invariant that must be observed: `total_cycles` + `cached_icr`
69  * is always an integral multiple of CYCLE_PER_TICK; this is, timer interrupts
70  * are only ever scheduled to occur at tick boundaries.
71  */
72 
73 static struct k_spinlock lock;
74 static uint64_t total_cycles;
75 static uint32_t cached_icr = CYCLES_PER_TICK;
76 
77 #ifdef CONFIG_TICKLESS_KERNEL
78 
79 static uint64_t last_announcement;	/* last time we called sys_clock_announce() */
80 
sys_clock_set_timeout(int32_t n,bool idle)81 void sys_clock_set_timeout(int32_t n, bool idle)
82 {
83 	ARG_UNUSED(idle);
84 
85 	uint32_t ccr;
86 	int   full_ticks;	/* number of complete ticks we'll wait */
87 	uint32_t full_cycles;	/* full_ticks represented as cycles */
88 	uint32_t partial_cycles;	/* number of cycles to first tick boundary */
89 
90 	if (n < 1) {
91 		full_ticks = 0;
92 	} else if ((n == K_TICKS_FOREVER) || (n > MAX_TICKS)) {
93 		full_ticks = MAX_TICKS - 1;
94 	} else {
95 		full_ticks = n - 1;
96 	}
97 
98 	full_cycles = full_ticks * CYCLES_PER_TICK;
99 
100 	/*
101 	 * There's a wee race condition here. The timer may expire while
102 	 * we're busy reprogramming it; an interrupt will be queued at the
103 	 * local APIC and the ISR will be called too early, roughly right
104 	 * after we unlock, and not because the count we just programmed has
105 	 * counted down. Luckily this situation is easy to detect, which is
106 	 * why the ISR actually checks to be sure the CCR is 0 before acting.
107 	 */
108 
109 	k_spinlock_key_t key = k_spin_lock(&lock);
110 
111 	ccr = x86_read_loapic(LOAPIC_TIMER_CCR);
112 	total_cycles += (cached_icr - ccr);
113 	partial_cycles = CYCLES_PER_TICK - (total_cycles % CYCLES_PER_TICK);
114 	cached_icr = full_cycles + partial_cycles;
115 	x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
116 
117 	k_spin_unlock(&lock, key);
118 }
119 
sys_clock_elapsed(void)120 uint32_t sys_clock_elapsed(void)
121 {
122 	uint32_t ccr;
123 	uint32_t ticks;
124 
125 	k_spinlock_key_t key = k_spin_lock(&lock);
126 	ccr = x86_read_loapic(LOAPIC_TIMER_CCR);
127 	ticks = total_cycles - last_announcement;
128 	ticks += cached_icr - ccr;
129 	k_spin_unlock(&lock, key);
130 	ticks /= CYCLES_PER_TICK;
131 
132 	return ticks;
133 }
134 
isr(const void * arg)135 static void isr(const void *arg)
136 {
137 	ARG_UNUSED(arg);
138 
139 	uint32_t cycles;
140 	int32_t ticks;
141 
142 	k_spinlock_key_t key = k_spin_lock(&lock);
143 
144 	/*
145 	 * If we get here and the CCR isn't zero, then this interrupt is
146 	 * stale: it was queued while sys_clock_set_timeout() was setting
147 	 * a new counter. Just ignore it. See above for more info.
148 	 */
149 
150 	if (x86_read_loapic(LOAPIC_TIMER_CCR) != 0) {
151 		k_spin_unlock(&lock, key);
152 		return;
153 	}
154 
155 	/* Restart the timer as early as possible to minimize drift... */
156 	x86_write_loapic(LOAPIC_TIMER_ICR, MAX_TICKS * CYCLES_PER_TICK);
157 
158 	cycles = cached_icr;
159 	cached_icr = MAX_TICKS * CYCLES_PER_TICK;
160 	total_cycles += cycles;
161 	ticks = (total_cycles - last_announcement) / CYCLES_PER_TICK;
162 	last_announcement = total_cycles;
163 	k_spin_unlock(&lock, key);
164 	sys_clock_announce(ticks);
165 }
166 
167 #else
168 
isr(const void * arg)169 static void isr(const void *arg)
170 {
171 	ARG_UNUSED(arg);
172 
173 	k_spinlock_key_t key = k_spin_lock(&lock);
174 	total_cycles += CYCLES_PER_TICK;
175 	x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
176 	k_spin_unlock(&lock, key);
177 
178 	sys_clock_announce(1);
179 }
180 
sys_clock_elapsed(void)181 uint32_t sys_clock_elapsed(void)
182 {
183 	return 0U;
184 }
185 
186 #endif /* CONFIG_TICKLESS_KERNEL */
187 
188 #ifdef CONFIG_APIC_TIMER_TSC
189 
sys_clock_cycle_get_32(void)190 uint32_t sys_clock_cycle_get_32(void)
191 {
192 	uint64_t tsc = z_tsc_read();
193 	uint32_t cycles;
194 
195 	cycles = (tsc * CONFIG_APIC_TIMER_TSC_M) / CONFIG_APIC_TIMER_TSC_N;
196 	return cycles;
197 }
198 
199 #else
200 
sys_clock_cycle_get_32(void)201 uint32_t sys_clock_cycle_get_32(void)
202 {
203 	uint32_t ret;
204 	uint32_t ccr;
205 
206 	k_spinlock_key_t key = k_spin_lock(&lock);
207 	ccr = x86_read_loapic(LOAPIC_TIMER_CCR);
208 	ret = total_cycles + (cached_icr - ccr);
209 	k_spin_unlock(&lock, key);
210 
211 	return ret;
212 }
213 
214 #endif
215 
sys_clock_driver_init(const struct device * dev)216 int sys_clock_driver_init(const struct device *dev)
217 {
218 	uint32_t val;
219 
220 	ARG_UNUSED(dev);
221 
222 	val = x86_read_loapic(LOAPIC_TIMER_CONFIG);	/* set divider */
223 	val &= ~DCR_DIVIDER_MASK;
224 	val |= DCR_DIVIDER;
225 	x86_write_loapic(LOAPIC_TIMER_CONFIG, val);
226 
227 	val = x86_read_loapic(LOAPIC_TIMER);		/* set timer mode */
228 	val &= ~LVT_MODE_MASK;
229 	val |= LVT_MODE;
230 	x86_write_loapic(LOAPIC_TIMER, val);
231 
232 	/* remember, wiring up the interrupt will mess with the LVT, too */
233 
234 	IRQ_CONNECT(CONFIG_APIC_TIMER_IRQ,
235 		CONFIG_APIC_TIMER_IRQ_PRIORITY,
236 		isr, 0, 0);
237 
238 	x86_write_loapic(LOAPIC_TIMER_ICR, cached_icr);
239 	irq_enable(CONFIG_APIC_TIMER_IRQ);
240 
241 	return 0;
242 }
243