1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Userland implementation of gettimeofday() for 64 bits processes in a
4 * ppc64 kernel for use in the vDSO
5 *
6 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
7 *                    IBM Corp.
8 */
9#include <asm/processor.h>
10#include <asm/ppc_asm.h>
11#include <asm/vdso.h>
12#include <asm/vdso_datapage.h>
13#include <asm/asm-offsets.h>
14#include <asm/unistd.h>
15
16	.text
17/*
18 * Exact prototype of gettimeofday
19 *
20 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
21 *
22 */
23V_FUNCTION_BEGIN(__kernel_gettimeofday)
24  .cfi_startproc
25	mflr	r12
26  .cfi_register lr,r12
27
28	mr	r11,r3			/* r11 holds tv */
29	mr	r10,r4			/* r10 holds tz */
30	get_datapage	r3, r0
31	cmpldi	r11,0			/* check if tv is NULL */
32	beq	2f
33	lis	r7,1000000@ha		/* load up USEC_PER_SEC */
34	addi	r7,r7,1000000@l
35	bl	V_LOCAL_FUNC(__do_get_tspec) /* get sec/us from tb & kernel */
36	std	r4,TVAL64_TV_SEC(r11)	/* store sec in tv */
37	std	r5,TVAL64_TV_USEC(r11)	/* store usec in tv */
382:	cmpldi	r10,0			/* check if tz is NULL */
39	beq	1f
40	lwz	r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
41	lwz	r5,CFG_TZ_DSTTIME(r3)
42	stw	r4,TZONE_TZ_MINWEST(r10)
43	stw	r5,TZONE_TZ_DSTTIME(r10)
441:	mtlr	r12
45	crclr	cr0*4+so
46	li	r3,0			/* always success */
47	blr
48  .cfi_endproc
49V_FUNCTION_END(__kernel_gettimeofday)
50
51
52/*
53 * Exact prototype of clock_gettime()
54 *
55 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
56 *
57 */
58V_FUNCTION_BEGIN(__kernel_clock_gettime)
59  .cfi_startproc
60	/* Check for supported clock IDs */
61	cmpwi	cr0,r3,CLOCK_REALTIME
62	cmpwi	cr1,r3,CLOCK_MONOTONIC
63	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
64
65	cmpwi	cr5,r3,CLOCK_REALTIME_COARSE
66	cmpwi	cr6,r3,CLOCK_MONOTONIC_COARSE
67	cror	cr5*4+eq,cr5*4+eq,cr6*4+eq
68
69	cror	cr0*4+eq,cr0*4+eq,cr5*4+eq
70	bne	cr0,99f
71
72	mflr	r12			/* r12 saves lr */
73  .cfi_register lr,r12
74	mr	r11,r4			/* r11 saves tp */
75	get_datapage	r3, r0
76	lis	r7,NSEC_PER_SEC@h	/* want nanoseconds */
77	ori	r7,r7,NSEC_PER_SEC@l
78	beq	cr5,70f
7950:	bl	V_LOCAL_FUNC(__do_get_tspec)	/* get time from tb & kernel */
80	bne	cr1,80f			/* if not monotonic, all done */
81
82	/*
83	 * CLOCK_MONOTONIC
84	 */
85
86	/* now we must fixup using wall to monotonic. We need to snapshot
87	 * that value and do the counter trick again. Fortunately, we still
88	 * have the counter value in r8 that was returned by __do_get_tspec.
89	 * At this point, r4,r5 contain our sec/nsec values.
90	 */
91
92	ld	r6,WTOM_CLOCK_SEC(r3)
93	lwa	r9,WTOM_CLOCK_NSEC(r3)
94
95	/* We now have our result in r6,r9. We create a fake dependency
96	 * on that result and re-check the counter
97	 */
98	or	r0,r6,r9
99	xor	r0,r0,r0
100	add	r3,r3,r0
101	ld	r0,CFG_TB_UPDATE_COUNT(r3)
102        cmpld   cr0,r0,r8		/* check if updated */
103	bne-	50b
104	b	78f
105
106	/*
107	 * For coarse clocks we get data directly from the vdso data page, so
108	 * we don't need to call __do_get_tspec, but we still need to do the
109	 * counter trick.
110	 */
11170:	ld      r8,CFG_TB_UPDATE_COUNT(r3)
112	andi.   r0,r8,1                 /* pending update ? loop */
113	bne-    70b
114	add     r3,r3,r0		/* r0 is already 0 */
115
116	/*
117	 * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
118	 * too
119	 */
120	ld      r4,STAMP_XTIME_SEC(r3)
121	ld      r5,STAMP_XTIME_NSEC(r3)
122	bne     cr6,75f
123
124	/* CLOCK_MONOTONIC_COARSE */
125	ld	r6,WTOM_CLOCK_SEC(r3)
126	lwa     r9,WTOM_CLOCK_NSEC(r3)
127
128	/* check if counter has updated */
129	or      r0,r6,r9
13075:	or	r0,r0,r4
131	or	r0,r0,r5
132	xor     r0,r0,r0
133	add     r3,r3,r0
134	ld      r0,CFG_TB_UPDATE_COUNT(r3)
135	cmpld   cr0,r0,r8               /* check if updated */
136	bne-    70b
137
138	/* Counter has not updated, so continue calculating proper values for
139	 * sec and nsec if monotonic coarse, or just return with the proper
140	 * values for realtime.
141	 */
142	bne     cr6,80f
143
144	/* Add wall->monotonic offset and check for overflow or underflow */
14578:	add     r4,r4,r6
146	add     r5,r5,r9
147	cmpd    cr0,r5,r7
148	cmpdi   cr1,r5,0
149	blt     79f
150	subf    r5,r7,r5
151	addi    r4,r4,1
15279:	bge     cr1,80f
153	addi    r4,r4,-1
154	add     r5,r5,r7
155
15680:	std	r4,TSPC64_TV_SEC(r11)
157	std	r5,TSPC64_TV_NSEC(r11)
158
159	mtlr	r12
160	crclr	cr0*4+so
161	li	r3,0
162	blr
163
164	/*
165	 * syscall fallback
166	 */
16799:
168	li	r0,__NR_clock_gettime
169  .cfi_restore lr
170	sc
171	blr
172  .cfi_endproc
173V_FUNCTION_END(__kernel_clock_gettime)
174
175
176/*
177 * Exact prototype of clock_getres()
178 *
179 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
180 *
181 */
182V_FUNCTION_BEGIN(__kernel_clock_getres)
183  .cfi_startproc
184	/* Check for supported clock IDs */
185	cmpwi	cr0,r3,CLOCK_REALTIME
186	cmpwi	cr1,r3,CLOCK_MONOTONIC
187	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
188	bne	cr0,99f
189
190	mflr	r12
191  .cfi_register lr,r12
192	get_datapage	r3, r0
193	lwz	r5, CLOCK_HRTIMER_RES(r3)
194	mtlr	r12
195	li	r3,0
196	cmpldi	cr0,r4,0
197	crclr	cr0*4+so
198	beqlr
199	std	r3,TSPC64_TV_SEC(r4)
200	std	r5,TSPC64_TV_NSEC(r4)
201	blr
202
203	/*
204	 * syscall fallback
205	 */
20699:
207	li	r0,__NR_clock_getres
208	sc
209	blr
210  .cfi_endproc
211V_FUNCTION_END(__kernel_clock_getres)
212
213/*
214 * Exact prototype of time()
215 *
216 * time_t time(time *t);
217 *
218 */
219V_FUNCTION_BEGIN(__kernel_time)
220  .cfi_startproc
221	mflr	r12
222  .cfi_register lr,r12
223
224	mr	r11,r3			/* r11 holds t */
225	get_datapage	r3, r0
226
227	ld	r4,STAMP_XTIME_SEC(r3)
228
229	cmpldi	r11,0			/* check if t is NULL */
230	beq	2f
231	std	r4,0(r11)		/* store result at *t */
2322:	mtlr	r12
233	crclr	cr0*4+so
234	mr	r3,r4
235	blr
236  .cfi_endproc
237V_FUNCTION_END(__kernel_time)
238
239
240/*
241 * This is the core of clock_gettime() and gettimeofday(),
242 * it returns the current time in r4 (seconds) and r5.
243 * On entry, r7 gives the resolution of r5, either USEC_PER_SEC
244 * or NSEC_PER_SEC, giving r5 in microseconds or nanoseconds.
245 * It expects the datapage ptr in r3 and doesn't clobber it.
246 * It clobbers r0, r6 and r9.
247 * On return, r8 contains the counter value that can be reused.
248 * This clobbers cr0 but not any other cr field.
249 */
250V_FUNCTION_BEGIN(__do_get_tspec)
251  .cfi_startproc
252	/* check for update count & load values */
2531:	ld	r8,CFG_TB_UPDATE_COUNT(r3)
254	andi.	r0,r8,1			/* pending update ? loop */
255	bne-	1b
256	xor	r0,r8,r8		/* create dependency */
257	add	r3,r3,r0
258
259	/* Get TB & offset it. We use the MFTB macro which will generate
260	 * workaround code for Cell.
261	 */
262	MFTB(r6)
263	ld	r9,CFG_TB_ORIG_STAMP(r3)
264	subf	r6,r9,r6
265
266	/* Scale result */
267	ld	r5,CFG_TB_TO_XS(r3)
268	sldi	r6,r6,12		/* compute time since stamp_xtime */
269	mulhdu	r6,r6,r5		/* in units of 2^-32 seconds */
270
271	/* Add stamp since epoch */
272	ld	r4,STAMP_XTIME_SEC(r3)
273	lwz	r5,STAMP_SEC_FRAC(r3)
274	or	r0,r4,r5
275	or	r0,r0,r6
276	xor	r0,r0,r0
277	add	r3,r3,r0
278	ld	r0,CFG_TB_UPDATE_COUNT(r3)
279	cmpld   r0,r8			/* check if updated */
280	bne-	1b			/* reload if so */
281
282	/* convert to seconds & nanoseconds and add to stamp */
283	add	r6,r6,r5		/* add on fractional seconds of xtime */
284	mulhwu	r5,r6,r7		/* compute micro or nanoseconds and */
285	srdi	r6,r6,32		/* seconds since stamp_xtime */
286	clrldi	r5,r5,32
287	add	r4,r4,r6
288	blr
289  .cfi_endproc
290V_FUNCTION_END(__do_get_tspec)
291