1 /*
2 * Copyright (c) 2024 MASSDRIVER EI (massdriver.space)
3 * Copyright (c) 2018-2023 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <limits.h>
9
10 #include <zephyr/init.h>
11 #include <zephyr/devicetree.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/sys_clock.h>
14 #include <zephyr/spinlock.h>
15 #include <zephyr/irq.h>
16
17 #define DT_DRV_COMPAT riscv_machine_timer
18
19 #define MTIME_REG DT_INST_REG_ADDR_BY_IDX(0, 0)
20 #define MTIMECMP_REG DT_INST_REG_ADDR_BY_IDX(0, 1)
21 #define TIMER_IRQN DT_INST_IRQN(0)
22
23 #define CYC_PER_TICK (uint32_t)(sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
24
25 /* the unsigned long cast limits divisions to native CPU register width */
26 #define cycle_diff_t unsigned long
27 #define CYCLE_DIFF_MAX (~(cycle_diff_t)0)
28
29 /*
30 * We have two constraints on the maximum number of cycles we can wait for.
31 *
32 * 1) sys_clock_announce() accepts at most INT32_MAX ticks.
33 *
34 * 2) The number of cycles between two reports must fit in a cycle_diff_t
35 * variable before converting it to ticks.
36 *
37 * Then:
38 *
39 * 3) Pick the smallest between (1) and (2).
40 *
41 * 4) Take into account some room for the unavoidable IRQ servicing latency.
42 * Let's use 3/4 of the max range.
43 *
44 * Finally let's add the LSB value to the result so to clear out a bunch of
45 * consecutive set bits coming from the original max values to produce a
46 * nicer literal for assembly generation.
47 */
48 #define CYCLES_MAX_1 ((uint64_t)INT32_MAX * (uint64_t)CYC_PER_TICK)
49 #define CYCLES_MAX_2 ((uint64_t)CYCLE_DIFF_MAX)
50 #define CYCLES_MAX_3 MIN(CYCLES_MAX_1, CYCLES_MAX_2)
51 #define CYCLES_MAX_4 (CYCLES_MAX_3 / 2 + CYCLES_MAX_3 / 4)
52 #define CYCLES_MAX (CYCLES_MAX_4 + LSB_GET(CYCLES_MAX_4))
53
54 static struct k_spinlock lock;
55 static uint64_t last_count;
56 static uint64_t last_ticks;
57 static uint32_t last_elapsed;
58
59 #if defined(CONFIG_TEST)
60 const int32_t z_sys_timer_irq_for_test = TIMER_IRQN;
61 #endif
62
get_hart_mtimecmp(void)63 static uintptr_t get_hart_mtimecmp(void)
64 {
65 return MTIMECMP_REG + (arch_proc_id() * 8);
66 }
67
set_mtimecmp(uint64_t time)68 static void set_mtimecmp(uint64_t time)
69 {
70 #ifdef CONFIG_64BIT
71 *(volatile uint64_t *)get_hart_mtimecmp() = time;
72 #else
73 volatile uint32_t *r = (uint32_t *)get_hart_mtimecmp();
74
75 /* Per spec, the RISC-V MTIME/MTIMECMP registers are 64 bit,
76 * but are NOT internally latched for multiword transfers. So
77 * we have to be careful about sequencing to avoid triggering
78 * spurious interrupts: always set the high word to a max
79 * value first.
80 */
81 r[1] = 0xffffffff;
82 r[0] = (uint32_t)time;
83 r[1] = (uint32_t)(time >> 32);
84 #endif
85 }
86
mtime(void)87 static uint64_t mtime(void)
88 {
89 #ifdef CONFIG_64BIT
90 return *(volatile uint64_t *)MTIME_REG;
91 #else
92 volatile uint32_t *r = (uint32_t *)MTIME_REG;
93 uint32_t lo, hi;
94
95 /* Likewise, must guard against rollover when reading */
96 do {
97 hi = r[1];
98 lo = r[0];
99 } while (r[1] != hi);
100
101 return (((uint64_t)hi) << 32) | lo;
102 #endif
103 }
104
timer_isr(const void * arg)105 static void timer_isr(const void *arg)
106 {
107 ARG_UNUSED(arg);
108
109 k_spinlock_key_t key = k_spin_lock(&lock);
110
111 uint64_t now = mtime();
112 uint64_t dcycles = now - last_count;
113 uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK;
114
115 last_count += (cycle_diff_t)dticks * CYC_PER_TICK;
116 last_ticks += dticks;
117 last_elapsed = 0;
118
119 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
120 uint64_t next = last_count + CYC_PER_TICK;
121
122 set_mtimecmp(next);
123 }
124
125 k_spin_unlock(&lock, key);
126 sys_clock_announce(dticks);
127 }
128
sys_clock_set_timeout(int32_t ticks,bool idle)129 void sys_clock_set_timeout(int32_t ticks, bool idle)
130 {
131 ARG_UNUSED(idle);
132
133 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
134 return;
135 }
136
137 k_spinlock_key_t key = k_spin_lock(&lock);
138 uint64_t cyc;
139
140 if (ticks == K_TICKS_FOREVER) {
141 cyc = last_count + CYCLES_MAX;
142 } else {
143 cyc = (last_ticks + last_elapsed + ticks) * CYC_PER_TICK;
144 if ((cyc - last_count) > CYCLES_MAX) {
145 cyc = last_count + CYCLES_MAX;
146 }
147 }
148 set_mtimecmp(cyc);
149
150 k_spin_unlock(&lock, key);
151 }
152
sys_clock_elapsed(void)153 uint32_t sys_clock_elapsed(void)
154 {
155 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
156 return 0;
157 }
158
159 k_spinlock_key_t key = k_spin_lock(&lock);
160 uint64_t now = mtime();
161 uint64_t dcycles = now - last_count;
162 uint32_t dticks = (cycle_diff_t)dcycles / CYC_PER_TICK;
163
164 last_elapsed = dticks;
165 k_spin_unlock(&lock, key);
166 return dticks;
167 }
168
sys_clock_cycle_get_32(void)169 uint32_t sys_clock_cycle_get_32(void)
170 {
171 return ((uint32_t)mtime()) << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
172 }
173
sys_clock_cycle_get_64(void)174 uint64_t sys_clock_cycle_get_64(void)
175 {
176 return mtime() << CONFIG_RISCV_MACHINE_TIMER_SYSTEM_CLOCK_DIVIDER;
177 }
178
sys_clock_driver_init(void)179 static int sys_clock_driver_init(void)
180 {
181 IRQ_CONNECT(TIMER_IRQN, 0, timer_isr, NULL, 0);
182 last_ticks = mtime() / CYC_PER_TICK;
183 last_count = last_ticks * CYC_PER_TICK;
184 set_mtimecmp(last_count + CYC_PER_TICK);
185 irq_enable(TIMER_IRQN);
186 return 0;
187 }
188
189 #ifdef CONFIG_SMP
smp_timer_init(void)190 void smp_timer_init(void)
191 {
192 set_mtimecmp(last_count + CYC_PER_TICK);
193 irq_enable(TIMER_IRQN);
194 }
195 #endif
196
197 SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
198