1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Userland implementation of gettimeofday() for 32 bits processes in a
4 * ppc64 kernel for use in the vDSO
5 *
6 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
7 *                    IBM Corp.
8 */
9#include <asm/processor.h>
10#include <asm/ppc_asm.h>
11#include <asm/vdso.h>
12#include <asm/asm-offsets.h>
13#include <asm/unistd.h>
14
15/* Offset for the low 32-bit part of a field of long type */
16#ifdef CONFIG_PPC64
17#define LOPART	4
18#define TSPEC_TV_SEC	TSPC64_TV_SEC+LOPART
19#else
20#define LOPART	0
21#define TSPEC_TV_SEC	TSPC32_TV_SEC
22#endif
23
24	.text
25/*
26 * Exact prototype of gettimeofday
27 *
28 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
29 *
30 */
31V_FUNCTION_BEGIN(__kernel_gettimeofday)
32  .cfi_startproc
33	mflr	r12
34  .cfi_register lr,r12
35
36	mr	r10,r3			/* r10 saves tv */
37	mr	r11,r4			/* r11 saves tz */
38	bl	__get_datapage@local	/* get data page */
39	mr	r9, r3			/* datapage ptr in r9 */
40	cmplwi	r10,0			/* check if tv is NULL */
41	beq	3f
42	lis	r7,1000000@ha		/* load up USEC_PER_SEC */
43	addi	r7,r7,1000000@l		/* so we get microseconds in r4 */
44	bl	__do_get_tspec@local	/* get sec/usec from tb & kernel */
45	stw	r3,TVAL32_TV_SEC(r10)
46	stw	r4,TVAL32_TV_USEC(r10)
47
483:	cmplwi	r11,0			/* check if tz is NULL */
49	beq	1f
50	lwz	r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
51	lwz	r5,CFG_TZ_DSTTIME(r9)
52	stw	r4,TZONE_TZ_MINWEST(r11)
53	stw	r5,TZONE_TZ_DSTTIME(r11)
54
551:	mtlr	r12
56	crclr	cr0*4+so
57	li	r3,0
58	blr
59  .cfi_endproc
60V_FUNCTION_END(__kernel_gettimeofday)
61
62/*
63 * Exact prototype of clock_gettime()
64 *
65 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
66 *
67 */
68V_FUNCTION_BEGIN(__kernel_clock_gettime)
69  .cfi_startproc
70	/* Check for supported clock IDs */
71	cmpli	cr0,r3,CLOCK_REALTIME
72	cmpli	cr1,r3,CLOCK_MONOTONIC
73	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
74	bne	cr0,99f
75
76	mflr	r12			/* r12 saves lr */
77  .cfi_register lr,r12
78	mr	r11,r4			/* r11 saves tp */
79	bl	__get_datapage@local	/* get data page */
80	mr	r9,r3			/* datapage ptr in r9 */
81	lis	r7,NSEC_PER_SEC@h	/* want nanoseconds */
82	ori	r7,r7,NSEC_PER_SEC@l
8350:	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */
84	bne	cr1,80f			/* not monotonic -> all done */
85
86	/*
87	 * CLOCK_MONOTONIC
88	 */
89
90	/* now we must fixup using wall to monotonic. We need to snapshot
91	 * that value and do the counter trick again. Fortunately, we still
92	 * have the counter value in r8 that was returned by __do_get_xsec.
93	 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
94	 * can be used, r7 contains NSEC_PER_SEC.
95	 */
96
97	lwz	r5,(WTOM_CLOCK_SEC+LOPART)(r9)
98	lwz	r6,WTOM_CLOCK_NSEC(r9)
99
100	/* We now have our offset in r5,r6. We create a fake dependency
101	 * on that value and re-check the counter
102	 */
103	or	r0,r6,r5
104	xor	r0,r0,r0
105	add	r9,r9,r0
106	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
107        cmpl    cr0,r8,r0		/* check if updated */
108	bne-	50b
109
110	/* Calculate and store result. Note that this mimics the C code,
111	 * which may cause funny results if nsec goes negative... is that
112	 * possible at all ?
113	 */
114	add	r3,r3,r5
115	add	r4,r4,r6
116	cmpw	cr0,r4,r7
117	cmpwi	cr1,r4,0
118	blt	1f
119	subf	r4,r7,r4
120	addi	r3,r3,1
1211:	bge	cr1,80f
122	addi	r3,r3,-1
123	add	r4,r4,r7
124
12580:	stw	r3,TSPC32_TV_SEC(r11)
126	stw	r4,TSPC32_TV_NSEC(r11)
127
128	mtlr	r12
129	crclr	cr0*4+so
130	li	r3,0
131	blr
132
133	/*
134	 * syscall fallback
135	 */
13699:
137	li	r0,__NR_clock_gettime
138  .cfi_restore lr
139	sc
140	blr
141  .cfi_endproc
142V_FUNCTION_END(__kernel_clock_gettime)
143
144
145/*
146 * Exact prototype of clock_getres()
147 *
148 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
149 *
150 */
151V_FUNCTION_BEGIN(__kernel_clock_getres)
152  .cfi_startproc
153	/* Check for supported clock IDs */
154	cmpwi	cr0,r3,CLOCK_REALTIME
155	cmpwi	cr1,r3,CLOCK_MONOTONIC
156	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
157	bne	cr0,99f
158
159	li	r3,0
160	cmpli	cr0,r4,0
161	crclr	cr0*4+so
162	beqlr
163	lis	r5,CLOCK_REALTIME_RES@h
164	ori	r5,r5,CLOCK_REALTIME_RES@l
165	stw	r3,TSPC32_TV_SEC(r4)
166	stw	r5,TSPC32_TV_NSEC(r4)
167	blr
168
169	/*
170	 * syscall fallback
171	 */
17299:
173	li	r0,__NR_clock_getres
174	sc
175	blr
176  .cfi_endproc
177V_FUNCTION_END(__kernel_clock_getres)
178
179
180/*
181 * Exact prototype of time()
182 *
183 * time_t time(time *t);
184 *
185 */
186V_FUNCTION_BEGIN(__kernel_time)
187  .cfi_startproc
188	mflr	r12
189  .cfi_register lr,r12
190
191	mr	r11,r3			/* r11 holds t */
192	bl	__get_datapage@local
193	mr	r9, r3			/* datapage ptr in r9 */
194
195	lwz	r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
196
197	cmplwi	r11,0			/* check if t is NULL */
198	beq	2f
199	stw	r3,0(r11)		/* store result at *t */
2002:	mtlr	r12
201	crclr	cr0*4+so
202	blr
203  .cfi_endproc
204V_FUNCTION_END(__kernel_time)
205
206/*
207 * This is the core of clock_gettime() and gettimeofday(),
208 * it returns the current time in r3 (seconds) and r4.
209 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
210 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
211 * It expects the datapage ptr in r9 and doesn't clobber it.
212 * It clobbers r0, r5 and r6.
213 * On return, r8 contains the counter value that can be reused.
214 * This clobbers cr0 but not any other cr field.
215 */
216__do_get_tspec:
217  .cfi_startproc
218	/* Check for update count & load values. We use the low
219	 * order 32 bits of the update count
220	 */
2211:	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
222	andi.	r0,r8,1			/* pending update ? loop */
223	bne-	1b
224	xor	r0,r8,r8		/* create dependency */
225	add	r9,r9,r0
226
227	/* Load orig stamp (offset to TB) */
228	lwz	r5,CFG_TB_ORIG_STAMP(r9)
229	lwz	r6,(CFG_TB_ORIG_STAMP+4)(r9)
230
231	/* Get a stable TB value */
2322:	MFTBU(r3)
233	MFTBL(r4)
234	MFTBU(r0)
235	cmplw	cr0,r3,r0
236	bne-	2b
237
238	/* Subtract tb orig stamp and shift left 12 bits.
239	 */
240	subfc	r4,r6,r4
241	subfe	r0,r5,r3
242	slwi	r0,r0,12
243	rlwimi.	r0,r4,12,20,31
244	slwi	r4,r4,12
245
246	/*
247	 * Load scale factor & do multiplication.
248	 * We only use the high 32 bits of the tb_to_xs value.
249	 * Even with a 1GHz timebase clock, the high 32 bits of
250	 * tb_to_xs will be at least 4 million, so the error from
251	 * ignoring the low 32 bits will be no more than 0.25ppm.
252	 * The error will just make the clock run very very slightly
253	 * slow until the next time the kernel updates the VDSO data,
254	 * at which point the clock will catch up to the kernel's value,
255	 * so there is no long-term error accumulation.
256	 */
257	lwz	r5,CFG_TB_TO_XS(r9)	/* load values */
258	mulhwu	r4,r4,r5
259	li	r3,0
260
261	beq+	4f			/* skip high part computation if 0 */
262	mulhwu	r3,r0,r5
263	mullw	r5,r0,r5
264	addc	r4,r4,r5
265	addze	r3,r3
2664:
267	/* At this point, we have seconds since the xtime stamp
268	 * as a 32.32 fixed-point number in r3 and r4.
269	 * Load & add the xtime stamp.
270	 */
271	lwz	r5,STAMP_XTIME+TSPEC_TV_SEC(r9)
272	lwz	r6,STAMP_SEC_FRAC(r9)
273	addc	r4,r4,r6
274	adde	r3,r3,r5
275
276	/* We create a fake dependency on the result in r3/r4
277	 * and re-check the counter
278	 */
279	or	r6,r4,r3
280	xor	r0,r6,r6
281	add	r9,r9,r0
282	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
283        cmplw	cr0,r8,r0		/* check if updated */
284	bne-	1b
285
286	mulhwu	r4,r4,r7		/* convert to micro or nanoseconds */
287
288	blr
289  .cfi_endproc
290