1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2018 ARM Limited
4 */
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
7
8 #ifndef __ASSEMBLY__
9
10 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12 #include <asm/errno.h>
13
14 #include <asm/vdso/compat_barrier.h>
15
16 #define VDSO_HAS_CLOCK_GETRES 1
17
18 #define BUILD_VDSO32 1
19
20 static __always_inline
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)21 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
22 struct timezone *_tz)
23 {
24 register struct timezone *tz asm("r1") = _tz;
25 register struct __kernel_old_timeval *tv asm("r0") = _tv;
26 register long ret asm ("r0");
27 register long nr asm("r7") = __NR_compat_gettimeofday;
28
29 asm volatile(
30 " swi #0\n"
31 : "=r" (ret)
32 : "r" (tv), "r" (tz), "r" (nr)
33 : "memory");
34
35 return ret;
36 }
37
38 static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)39 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
40 {
41 register struct __kernel_timespec *ts asm("r1") = _ts;
42 register clockid_t clkid asm("r0") = _clkid;
43 register long ret asm ("r0");
44 register long nr asm("r7") = __NR_compat_clock_gettime64;
45
46 asm volatile(
47 " swi #0\n"
48 : "=r" (ret)
49 : "r" (clkid), "r" (ts), "r" (nr)
50 : "memory");
51
52 return ret;
53 }
54
55 static __always_inline
clock_gettime32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)56 long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
57 {
58 register struct old_timespec32 *ts asm("r1") = _ts;
59 register clockid_t clkid asm("r0") = _clkid;
60 register long ret asm ("r0");
61 register long nr asm("r7") = __NR_compat_clock_gettime;
62
63 asm volatile(
64 " swi #0\n"
65 : "=r" (ret)
66 : "r" (clkid), "r" (ts), "r" (nr)
67 : "memory");
68
69 return ret;
70 }
71
72 static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)73 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
74 {
75 register struct __kernel_timespec *ts asm("r1") = _ts;
76 register clockid_t clkid asm("r0") = _clkid;
77 register long ret asm ("r0");
78 register long nr asm("r7") = __NR_compat_clock_getres_time64;
79
80 asm volatile(
81 " swi #0\n"
82 : "=r" (ret)
83 : "r" (clkid), "r" (ts), "r" (nr)
84 : "memory");
85
86 return ret;
87 }
88
89 static __always_inline
clock_getres32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)90 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
91 {
92 register struct old_timespec32 *ts asm("r1") = _ts;
93 register clockid_t clkid asm("r0") = _clkid;
94 register long ret asm ("r0");
95 register long nr asm("r7") = __NR_compat_clock_getres;
96
97 asm volatile(
98 " swi #0\n"
99 : "=r" (ret)
100 : "r" (clkid), "r" (ts), "r" (nr)
101 : "memory");
102
103 return ret;
104 }
105
__arch_get_hw_counter(s32 clock_mode,const struct vdso_data * vd)106 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
107 const struct vdso_data *vd)
108 {
109 u64 res;
110
111 /*
112 * Core checks for mode already, so this raced against a concurrent
113 * update. Return something. Core will do another round and then
114 * see the mode change and fallback to the syscall.
115 */
116 if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
117 return 0;
118
119 /*
120 * This isb() is required to prevent that the counter value
121 * is speculated.
122 */
123 isb();
124 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res));
125 /*
126 * This isb() is required to prevent that the seq lock is
127 * speculated.
128 */
129 isb();
130
131 return res;
132 }
133
__arch_get_vdso_data(void)134 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
135 {
136 const struct vdso_data *ret;
137
138 /*
139 * This simply puts &_vdso_data into ret. The reason why we don't use
140 * `ret = _vdso_data` is that the compiler tends to optimise this in a
141 * very suboptimal way: instead of keeping &_vdso_data in a register,
142 * it goes through a relocation almost every time _vdso_data must be
143 * accessed (even in subfunctions). This is both time and space
144 * consuming: each relocation uses a word in the code section, and it
145 * has to be loaded at runtime.
146 *
147 * This trick hides the assignment from the compiler. Since it cannot
148 * track where the pointer comes from, it will only use one relocation
149 * where __arch_get_vdso_data() is called, and then keep the result in
150 * a register.
151 */
152 asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
153
154 return ret;
155 }
156
157 #ifdef CONFIG_TIME_NS
__arch_get_timens_vdso_data(void)158 static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
159 {
160 const struct vdso_data *ret;
161
162 /* See __arch_get_vdso_data(). */
163 asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
164
165 return ret;
166 }
167 #endif
168
vdso_clocksource_ok(const struct vdso_data * vd)169 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
170 {
171 return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
172 }
173 #define vdso_clocksource_ok vdso_clocksource_ok
174
175 #endif /* !__ASSEMBLY__ */
176
177 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
178