1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Userland implementation of gettimeofday() for 64 bits processes in a
4 * ppc64 kernel for use in the vDSO
5 *
6 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
7 *                    IBM Corp.
8 */
9#include <asm/processor.h>
10#include <asm/ppc_asm.h>
11#include <asm/vdso.h>
12#include <asm/asm-offsets.h>
13#include <asm/unistd.h>
14
15	.text
16/*
17 * Exact prototype of gettimeofday
18 *
19 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
20 *
21 */
22V_FUNCTION_BEGIN(__kernel_gettimeofday)
23  .cfi_startproc
24	mflr	r12
25  .cfi_register lr,r12
26
27	mr	r11,r3			/* r11 holds tv */
28	mr	r10,r4			/* r10 holds tz */
29	bl	V_LOCAL_FUNC(__get_datapage)	/* get data page */
30	cmpldi	r11,0			/* check if tv is NULL */
31	beq	2f
32	lis	r7,1000000@ha		/* load up USEC_PER_SEC */
33	addi	r7,r7,1000000@l
34	bl	V_LOCAL_FUNC(__do_get_tspec) /* get sec/us from tb & kernel */
35	std	r4,TVAL64_TV_SEC(r11)	/* store sec in tv */
36	std	r5,TVAL64_TV_USEC(r11)	/* store usec in tv */
372:	cmpldi	r10,0			/* check if tz is NULL */
38	beq	1f
39	lwz	r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
40	lwz	r5,CFG_TZ_DSTTIME(r3)
41	stw	r4,TZONE_TZ_MINWEST(r10)
42	stw	r5,TZONE_TZ_DSTTIME(r10)
431:	mtlr	r12
44	crclr	cr0*4+so
45	li	r3,0			/* always success */
46	blr
47  .cfi_endproc
48V_FUNCTION_END(__kernel_gettimeofday)
49
50
51/*
52 * Exact prototype of clock_gettime()
53 *
54 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
55 *
56 */
57V_FUNCTION_BEGIN(__kernel_clock_gettime)
58  .cfi_startproc
59	/* Check for supported clock IDs */
60	cmpwi	cr0,r3,CLOCK_REALTIME
61	cmpwi	cr1,r3,CLOCK_MONOTONIC
62	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
63
64	cmpwi	cr5,r3,CLOCK_REALTIME_COARSE
65	cmpwi	cr6,r3,CLOCK_MONOTONIC_COARSE
66	cror	cr5*4+eq,cr5*4+eq,cr6*4+eq
67
68	cror	cr0*4+eq,cr0*4+eq,cr5*4+eq
69	bne	cr0,99f
70
71	mflr	r12			/* r12 saves lr */
72  .cfi_register lr,r12
73	mr	r11,r4			/* r11 saves tp */
74	bl	V_LOCAL_FUNC(__get_datapage)	/* get data page */
75	lis	r7,NSEC_PER_SEC@h	/* want nanoseconds */
76	ori	r7,r7,NSEC_PER_SEC@l
77	beq	cr5,70f
7850:	bl	V_LOCAL_FUNC(__do_get_tspec)	/* get time from tb & kernel */
79	bne	cr1,80f			/* if not monotonic, all done */
80
81	/*
82	 * CLOCK_MONOTONIC
83	 */
84
85	/* now we must fixup using wall to monotonic. We need to snapshot
86	 * that value and do the counter trick again. Fortunately, we still
87	 * have the counter value in r8 that was returned by __do_get_tspec.
88	 * At this point, r4,r5 contain our sec/nsec values.
89	 */
90
91	ld	r6,WTOM_CLOCK_SEC(r3)
92	lwa	r9,WTOM_CLOCK_NSEC(r3)
93
94	/* We now have our result in r6,r9. We create a fake dependency
95	 * on that result and re-check the counter
96	 */
97	or	r0,r6,r9
98	xor	r0,r0,r0
99	add	r3,r3,r0
100	ld	r0,CFG_TB_UPDATE_COUNT(r3)
101        cmpld   cr0,r0,r8		/* check if updated */
102	bne-	50b
103	b	78f
104
105	/*
106	 * For coarse clocks we get data directly from the vdso data page, so
107	 * we don't need to call __do_get_tspec, but we still need to do the
108	 * counter trick.
109	 */
11070:	ld      r8,CFG_TB_UPDATE_COUNT(r3)
111	andi.   r0,r8,1                 /* pending update ? loop */
112	bne-    70b
113	add     r3,r3,r0		/* r0 is already 0 */
114
115	/*
116	 * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
117	 * too
118	 */
119	ld      r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
120	ld      r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
121	bne     cr6,75f
122
123	/* CLOCK_MONOTONIC_COARSE */
124	ld	r6,WTOM_CLOCK_SEC(r3)
125	lwa     r9,WTOM_CLOCK_NSEC(r3)
126
127	/* check if counter has updated */
128	or      r0,r6,r9
12975:	or	r0,r0,r4
130	or	r0,r0,r5
131	xor     r0,r0,r0
132	add     r3,r3,r0
133	ld      r0,CFG_TB_UPDATE_COUNT(r3)
134	cmpld   cr0,r0,r8               /* check if updated */
135	bne-    70b
136
137	/* Counter has not updated, so continue calculating proper values for
138	 * sec and nsec if monotonic coarse, or just return with the proper
139	 * values for realtime.
140	 */
141	bne     cr6,80f
142
143	/* Add wall->monotonic offset and check for overflow or underflow */
14478:	add     r4,r4,r6
145	add     r5,r5,r9
146	cmpd    cr0,r5,r7
147	cmpdi   cr1,r5,0
148	blt     79f
149	subf    r5,r7,r5
150	addi    r4,r4,1
15179:	bge     cr1,80f
152	addi    r4,r4,-1
153	add     r5,r5,r7
154
15580:	std	r4,TSPC64_TV_SEC(r11)
156	std	r5,TSPC64_TV_NSEC(r11)
157
158	mtlr	r12
159	crclr	cr0*4+so
160	li	r3,0
161	blr
162
163	/*
164	 * syscall fallback
165	 */
16699:
167	li	r0,__NR_clock_gettime
168  .cfi_restore lr
169	sc
170	blr
171  .cfi_endproc
172V_FUNCTION_END(__kernel_clock_gettime)
173
174
175/*
176 * Exact prototype of clock_getres()
177 *
178 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
179 *
180 */
181V_FUNCTION_BEGIN(__kernel_clock_getres)
182  .cfi_startproc
183	/* Check for supported clock IDs */
184	cmpwi	cr0,r3,CLOCK_REALTIME
185	cmpwi	cr1,r3,CLOCK_MONOTONIC
186	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
187	bne	cr0,99f
188
189	li	r3,0
190	cmpldi	cr0,r4,0
191	crclr	cr0*4+so
192	beqlr
193	lis	r5,CLOCK_REALTIME_RES@h
194	ori	r5,r5,CLOCK_REALTIME_RES@l
195	std	r3,TSPC64_TV_SEC(r4)
196	std	r5,TSPC64_TV_NSEC(r4)
197	blr
198
199	/*
200	 * syscall fallback
201	 */
20299:
203	li	r0,__NR_clock_getres
204	sc
205	blr
206  .cfi_endproc
207V_FUNCTION_END(__kernel_clock_getres)
208
209/*
210 * Exact prototype of time()
211 *
212 * time_t time(time *t);
213 *
214 */
215V_FUNCTION_BEGIN(__kernel_time)
216  .cfi_startproc
217	mflr	r12
218  .cfi_register lr,r12
219
220	mr	r11,r3			/* r11 holds t */
221	bl	V_LOCAL_FUNC(__get_datapage)
222
223	ld	r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
224
225	cmpldi	r11,0			/* check if t is NULL */
226	beq	2f
227	std	r4,0(r11)		/* store result at *t */
2282:	mtlr	r12
229	crclr	cr0*4+so
230	mr	r3,r4
231	blr
232  .cfi_endproc
233V_FUNCTION_END(__kernel_time)
234
235
236/*
237 * This is the core of clock_gettime() and gettimeofday(),
238 * it returns the current time in r4 (seconds) and r5.
239 * On entry, r7 gives the resolution of r5, either USEC_PER_SEC
240 * or NSEC_PER_SEC, giving r5 in microseconds or nanoseconds.
241 * It expects the datapage ptr in r3 and doesn't clobber it.
242 * It clobbers r0, r6 and r9.
243 * On return, r8 contains the counter value that can be reused.
244 * This clobbers cr0 but not any other cr field.
245 */
246V_FUNCTION_BEGIN(__do_get_tspec)
247  .cfi_startproc
248	/* check for update count & load values */
2491:	ld	r8,CFG_TB_UPDATE_COUNT(r3)
250	andi.	r0,r8,1			/* pending update ? loop */
251	bne-	1b
252	xor	r0,r8,r8		/* create dependency */
253	add	r3,r3,r0
254
255	/* Get TB & offset it. We use the MFTB macro which will generate
256	 * workaround code for Cell.
257	 */
258	MFTB(r6)
259	ld	r9,CFG_TB_ORIG_STAMP(r3)
260	subf	r6,r9,r6
261
262	/* Scale result */
263	ld	r5,CFG_TB_TO_XS(r3)
264	sldi	r6,r6,12		/* compute time since stamp_xtime */
265	mulhdu	r6,r6,r5		/* in units of 2^-32 seconds */
266
267	/* Add stamp since epoch */
268	ld	r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
269	lwz	r5,STAMP_SEC_FRAC(r3)
270	or	r0,r4,r5
271	or	r0,r0,r6
272	xor	r0,r0,r0
273	add	r3,r3,r0
274	ld	r0,CFG_TB_UPDATE_COUNT(r3)
275	cmpld   r0,r8			/* check if updated */
276	bne-	1b			/* reload if so */
277
278	/* convert to seconds & nanoseconds and add to stamp */
279	add	r6,r6,r5		/* add on fractional seconds of xtime */
280	mulhwu	r5,r6,r7		/* compute micro or nanoseconds and */
281	srdi	r6,r6,32		/* seconds since stamp_xtime */
282	clrldi	r5,r5,32
283	add	r4,r4,r6
284	blr
285  .cfi_endproc
286V_FUNCTION_END(__do_get_tspec)
287