1 /*
2 * Copyright (c) 2018-2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <limits.h>
8
9 #include <zephyr/init.h>
10 #include <zephyr/devicetree.h>
11 #include <zephyr/drivers/timer/system_timer.h>
12 #include <zephyr/sys_clock.h>
13 #include <zephyr/spinlock.h>
14 #include <zephyr/irq.h>
15
16 /* andestech,machine-timer */
17 #if DT_HAS_COMPAT_STATUS_OKAY(andestech_machine_timer)
18 #define DT_DRV_COMPAT andestech_machine_timer
19
20 #define MTIME_REG DT_INST_REG_ADDR(0)
21 #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
22 #define TIMER_IRQN DT_INST_IRQN(0)
23 /* neorv32-machine-timer */
24 #elif DT_HAS_COMPAT_STATUS_OKAY(neorv32_machine_timer)
25 #define DT_DRV_COMPAT neorv32_machine_timer
26
27 #define MTIME_REG DT_INST_REG_ADDR(0)
28 #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
29 #define TIMER_IRQN DT_INST_IRQN(0)
30 /* nuclei,systimer */
31 #elif DT_HAS_COMPAT_STATUS_OKAY(nuclei_systimer)
32 #define DT_DRV_COMPAT nuclei_systimer
33
34 #define MTIME_REG DT_INST_REG_ADDR(0)
35 #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
36 #define TIMER_IRQN DT_INST_IRQ_BY_IDX(0, 1, irq)
37 /* sifive,clint0 */
38 #elif DT_HAS_COMPAT_STATUS_OKAY(sifive_clint0)
39 #define DT_DRV_COMPAT sifive_clint0
40
41 #define MTIME_REG (DT_INST_REG_ADDR(0) + 0xbff8U)
42 #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 0x4000U)
43 #define TIMER_IRQN DT_INST_IRQ_BY_IDX(0, 1, irq)
44 /* telink,machine-timer */
45 #elif DT_HAS_COMPAT_STATUS_OKAY(telink_machine_timer)
46 #define DT_DRV_COMPAT telink_machine_timer
47
48 #define MTIME_REG DT_INST_REG_ADDR(0)
49 #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 8)
50 #define TIMER_IRQN DT_INST_IRQN(0)
51 /* lowrisc,machine-timer */
52 #elif DT_HAS_COMPAT_STATUS_OKAY(lowrisc_machine_timer)
53 #define DT_DRV_COMPAT lowrisc_machine_timer
54
55 #define MTIME_REG (DT_INST_REG_ADDR(0) + 0x110)
56 #define MTIMECMP_REG (DT_INST_REG_ADDR(0) + 0x118)
57 #define TIMER_IRQN DT_INST_IRQN(0)
58 /* niosv-machine-timer */
59 #elif DT_HAS_COMPAT_STATUS_OKAY(niosv_machine_timer)
60 #define DT_DRV_COMPAT niosv_machine_timer
61
62 #define MTIMECMP_REG DT_INST_REG_ADDR(0)
63 #define MTIME_REG (DT_INST_REG_ADDR(0) + 8)
64 #define TIMER_IRQN DT_INST_IRQN(0)
65 /* scr,machine-timer*/
66 #elif DT_HAS_COMPAT_STATUS_OKAY(scr_machine_timer)
67 #define DT_DRV_COMPAT scr_machine_timer
68 #define MTIMER_HAS_DIVIDER
69
70 #define MTIMEDIV_REG (DT_INST_REG_ADDR_U64(0) + 4)
71 #define MTIME_REG (DT_INST_REG_ADDR_U64(0) + 8)
72 #define MTIMECMP_REG (DT_INST_REG_ADDR_U64(0) + 16)
73 #define TIMER_IRQN DT_INST_IRQN(0)
74 #endif
75
76 #define CYC_PER_TICK (uint32_t)(sys_clock_hw_cycles_per_sec() \
77 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
78
79 /* the unsigned long cast limits divisions to native CPU register width */
80 #define cycle_diff_t unsigned long
81
82 static struct k_spinlock lock;
83 static uint64_t last_count;
84 static uint64_t last_ticks;
85 static uint32_t last_elapsed;
86
87 #if defined(CONFIG_TEST)
88 const int32_t z_sys_timer_irq_for_test = TIMER_IRQN;
89 #endif
90
get_hart_mtimecmp(void)91 static uintptr_t get_hart_mtimecmp(void)
92 {
93 return MTIMECMP_REG + (arch_proc_id() * 8);
94 }
95
set_mtimecmp(uint64_t time)96 static void set_mtimecmp(uint64_t time)
97 {
98 #ifdef CONFIG_64BIT
99 *(volatile uint64_t *)get_hart_mtimecmp() = time;
100 #else
101 volatile uint32_t *r = (uint32_t *)get_hart_mtimecmp();
102
103 /* Per spec, the RISC-V MTIME/MTIMECMP registers are 64 bit,
104 * but are NOT internally latched for multiword transfers. So
105 * we have to be careful about sequencing to avoid triggering
106 * spurious interrupts: always set the high word to a max
107 * value first.
108 */
109 r[1] = 0xffffffff;
110 r[0] = (uint32_t)time;
111 r[1] = (uint32_t)(time >> 32);
112 #endif
113 }
114
set_divider(void)115 static void set_divider(void)
116 {
117 #ifdef MTIMER_HAS_DIVIDER
118 *(volatile uint32_t *)MTIMEDIV_REG =
119 CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
120 #endif
121 }
122
mtime(void)123 static uint64_t mtime(void)
124 {
125 #ifdef CONFIG_64BIT
126 return *(volatile uint64_t *)MTIME_REG;
127 #else
128 volatile uint32_t *r = (uint32_t *)MTIME_REG;
129 uint32_t lo, hi;
130
131 /* Likewise, must guard against rollover when reading */
132 do {
133 hi = r[1];
134 lo = r[0];
135 } while (r[1] != hi);
136
137 return (((uint64_t)hi) << 32) | lo;
138 #endif
139 }
140
timer_isr(const void * arg)141 static void timer_isr(const void *arg)
142 {
143 ARG_UNUSED(arg);
144
145 k_spinlock_key_t key = k_spin_lock(&lock);
146
147 uint64_t now = mtime();
148 uint64_t dcycles = now - last_count;
149 uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK;
150
151 last_count += (cycle_diff_t)dticks * CYC_PER_TICK;
152 last_ticks += dticks;
153 last_elapsed = 0;
154
155 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
156 uint64_t next = last_count + CYC_PER_TICK;
157
158 set_mtimecmp(next);
159 }
160
161 k_spin_unlock(&lock, key);
162 sys_clock_announce(dticks);
163 }
164
sys_clock_set_timeout(int32_t ticks,bool idle)165 void sys_clock_set_timeout(int32_t ticks, bool idle)
166 {
167 ARG_UNUSED(idle);
168
169 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
170 return;
171 }
172
173 if (ticks == K_TICKS_FOREVER) {
174 set_mtimecmp(UINT64_MAX);
175 return;
176 }
177
178 /*
179 * Clamp the max period length to a number of cycles that can fit
180 * in half the range of a cycle_diff_t for native width divisions
181 * to be usable elsewhere. Also clamp it to half the range of an
182 * int32_t as this is the type used for elapsed tick announcements.
183 * The half range gives us extra room to cope with the unavoidable IRQ
184 * servicing latency. The compiler should optimize away the least
185 * restrictive of those tests automatically.
186 */
187 ticks = CLAMP(ticks, 0, (cycle_diff_t)-1 / 2 / CYC_PER_TICK);
188 ticks = CLAMP(ticks, 0, INT32_MAX / 2);
189
190 k_spinlock_key_t key = k_spin_lock(&lock);
191 uint64_t cyc = (last_ticks + last_elapsed + ticks) * CYC_PER_TICK;
192
193 set_mtimecmp(cyc);
194 k_spin_unlock(&lock, key);
195 }
196
sys_clock_elapsed(void)197 uint32_t sys_clock_elapsed(void)
198 {
199 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
200 return 0;
201 }
202
203 k_spinlock_key_t key = k_spin_lock(&lock);
204 uint64_t now = mtime();
205 uint64_t dcycles = now - last_count;
206 uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK;
207
208 last_elapsed = dticks;
209 k_spin_unlock(&lock, key);
210 return dticks;
211 }
212
sys_clock_cycle_get_32(void)213 uint32_t sys_clock_cycle_get_32(void)
214 {
215 return ((uint32_t)mtime()) << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
216 }
217
sys_clock_cycle_get_64(void)218 uint64_t sys_clock_cycle_get_64(void)
219 {
220 return mtime() << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
221 }
222
sys_clock_driver_init(void)223 static int sys_clock_driver_init(void)
224 {
225
226 set_divider();
227
228 IRQ_CONNECT(TIMER_IRQN, 0, timer_isr, NULL, 0);
229 last_ticks = mtime() / CYC_PER_TICK;
230 last_count = last_ticks * CYC_PER_TICK;
231 set_mtimecmp(last_count + CYC_PER_TICK);
232 irq_enable(TIMER_IRQN);
233 return 0;
234 }
235
236 #ifdef CONFIG_SMP
smp_timer_init(void)237 void smp_timer_init(void)
238 {
239 set_mtimecmp(last_count + CYC_PER_TICK);
240 irq_enable(TIMER_IRQN);
241 }
242 #endif
243
244 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
245 CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
246