1 /*
2  * Copyright (c) 2018 Friedt Professional Engineering Services, Inc
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <errno.h>
8 #include <stdint.h>
9 #include <time.h>
10 
11 #include <zephyr/sys_clock.h>
12 #include <zephyr/ztest.h>
13 
14 #define SELECT_NANOSLEEP       1
15 #define SELECT_CLOCK_NANOSLEEP 0
16 
select_nanosleep(int selection,clockid_t clock_id,int flags,const struct timespec * rqtp,struct timespec * rmtp)17 static inline int select_nanosleep(int selection, clockid_t clock_id, int flags,
18 				   const struct timespec *rqtp, struct timespec *rmtp)
19 {
20 	if (selection == SELECT_NANOSLEEP) {
21 		return nanosleep(rqtp, rmtp);
22 	}
23 	return clock_nanosleep(clock_id, flags, rqtp, rmtp);
24 }
25 
cycle_get_64(void)26 static inline uint64_t cycle_get_64(void)
27 {
28 	if (IS_ENABLED(CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER)) {
29 		return k_cycle_get_64();
30 	} else {
31 		return k_cycle_get_32();
32 	}
33 }
34 
common_errors(int selection,clockid_t clock_id,int flags)35 static void common_errors(int selection, clockid_t clock_id, int flags)
36 {
37 	struct timespec rem = {};
38 	struct timespec req = {};
39 
40 	/*
41 	 * invalid parameters
42 	 */
43 	zassert_equal(select_nanosleep(selection, clock_id, flags, NULL, NULL), -1);
44 	zassert_equal(errno, EFAULT);
45 
46 	/* NULL request */
47 	errno = 0;
48 	zassert_equal(select_nanosleep(selection, clock_id, flags, NULL, &rem), -1);
49 	zassert_equal(errno, EFAULT);
50 	/* Expect rem to be the same when function returns */
51 	zassert_equal(rem.tv_sec, 0, "actual: %d expected: %d", rem.tv_sec, 0);
52 	zassert_equal(rem.tv_nsec, 0, "actual: %d expected: %d", rem.tv_nsec, 0);
53 
54 	/* negative times */
55 	errno = 0;
56 	req = (struct timespec){.tv_sec = -1, .tv_nsec = 0};
57 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, NULL), -1);
58 	zassert_equal(errno, EINVAL);
59 
60 	errno = 0;
61 	req = (struct timespec){.tv_sec = 0, .tv_nsec = -1};
62 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, NULL), -1);
63 	zassert_equal(errno, EINVAL);
64 
65 	errno = 0;
66 	req = (struct timespec){.tv_sec = -1, .tv_nsec = -1};
67 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, NULL), -1);
68 	zassert_equal(errno, EINVAL);
69 
70 	/* nanoseconds too high */
71 	errno = 0;
72 	req = (struct timespec){.tv_sec = 0, .tv_nsec = 1000000000};
73 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, NULL), -1);
74 	zassert_equal(errno, EINVAL);
75 
76 	/*
77 	 * Valid parameters
78 	 */
79 	errno = 0;
80 
81 	/* Happy path, plus make sure the const input is unmodified */
82 	req = (struct timespec){.tv_sec = 1, .tv_nsec = 1};
83 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, NULL), 0);
84 	zassert_equal(errno, 0);
85 	zassert_equal(req.tv_sec, 1);
86 	zassert_equal(req.tv_nsec, 1);
87 
88 	/* Sleep for 0.0 s. Expect req & rem to be the same when function returns */
89 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, &rem), 0);
90 	zassert_equal(errno, 0);
91 	zassert_equal(rem.tv_sec, 0, "actual: %d expected: %d", rem.tv_sec, 0);
92 	zassert_equal(rem.tv_nsec, 0, "actual: %d expected: %d", rem.tv_nsec, 0);
93 
94 	/*
95 	 * req and rem point to the same timespec
96 	 *
97 	 * Normative spec says they may be the same.
98 	 * Expect rem to be zero after returning.
99 	 */
100 	req = (struct timespec){.tv_sec = 0, .tv_nsec = 1};
101 	zassert_equal(select_nanosleep(selection, clock_id, flags, &req, &req), 0);
102 	zassert_equal(errno, 0);
103 	zassert_equal(req.tv_sec, 0, "actual: %d expected: %d", req.tv_sec, 0);
104 	zassert_equal(req.tv_nsec, 0, "actual: %d expected: %d", req.tv_nsec, 0);
105 }
106 
ZTEST(nanosleep,test_nanosleep_errors_errno)107 ZTEST(nanosleep, test_nanosleep_errors_errno)
108 {
109 	common_errors(SELECT_NANOSLEEP, CLOCK_REALTIME, 0);
110 }
111 
ZTEST(nanosleep,test_clock_nanosleep_errors_errno)112 ZTEST(nanosleep, test_clock_nanosleep_errors_errno)
113 {
114 	struct timespec rem = {};
115 	struct timespec req = {};
116 
117 	common_errors(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME);
118 
119 	/* Absolute timeout in the past. */
120 	clock_gettime(CLOCK_MONOTONIC, &req);
121 	zassert_equal(clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &req, &rem), 0);
122 	zassert_equal(rem.tv_sec, 0, "actual: %d expected: %d", rem.tv_sec, 0);
123 	zassert_equal(rem.tv_nsec, 0, "actual: %d expected: %d", rem.tv_nsec, 0);
124 
125 	/* Absolute timeout in the past relative to the realtime clock. */
126 	clock_gettime(CLOCK_REALTIME, &req);
127 	zassert_equal(clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &req, &rem), 0);
128 	zassert_equal(rem.tv_sec, 0, "actual: %d expected: %d", rem.tv_sec, 0);
129 	zassert_equal(rem.tv_nsec, 0, "actual: %d expected: %d", rem.tv_nsec, 0);
130 }
131 
132 /**
133  * @brief Check that a call to nanosleep has yielded execution for some minimum time.
134  *
135  * Check that the actual time slept is >= the total time specified by @p s (in seconds) and
136  * @p ns (in nanoseconds).
137  *
138  * @note The time specified by @p s and @p ns is assumed to be absolute (i.e. a time-point)
139  * when @p selection is set to @ref SELECT_CLOCK_NANOSLEEP. The time is assumed to be relative
140  * when @p selection is set to @ref SELECT_NANOSLEEP.
141  *
142  * @param selection Either @ref SELECT_CLOCK_NANOSLEEP or @ref SELECT_NANOSLEEP
143  * @param clock_id The clock to test (e.g. @ref CLOCK_MONOTONIC or @ref CLOCK_REALTIME)
144  * @param flags Flags to pass to @ref clock_nanosleep
145  * @param s Partial lower bound for yielded time (in seconds)
146  * @param ns Partial lower bound for yielded time (in nanoseconds)
147  */
common_lower_bound_check(int selection,clockid_t clock_id,int flags,const uint32_t s,uint32_t ns)148 static void common_lower_bound_check(int selection, clockid_t clock_id, int flags, const uint32_t s,
149 				     uint32_t ns)
150 {
151 	int r;
152 	uint64_t actual_ns;
153 	uint64_t exp_ns;
154 	uint64_t now;
155 	uint64_t then;
156 	struct timespec rem = {0, 0};
157 	struct timespec req = {s, ns};
158 
159 	errno = 0;
160 	then = cycle_get_64();
161 	r = select_nanosleep(selection, clock_id, flags, &req, &rem);
162 	now = cycle_get_64();
163 
164 	zassert_equal(r, 0, "actual: %d expected: %d", r, 0);
165 	zassert_equal(errno, 0, "actual: %d expected: %d", errno, 0);
166 	zassert_equal(req.tv_sec, s, "actual: %d expected: %d", req.tv_sec, s);
167 	zassert_equal(req.tv_nsec, ns, "actual: %d expected: %d", req.tv_nsec, ns);
168 	zassert_equal(rem.tv_sec, 0, "actual: %d expected: %d", rem.tv_sec, 0);
169 	zassert_equal(rem.tv_nsec, 0, "actual: %d expected: %d", rem.tv_nsec, 0);
170 
171 	switch (selection) {
172 	case SELECT_NANOSLEEP:
173 		/* exp_ns and actual_ns are relative (i.e. durations) */
174 		actual_ns = k_cyc_to_ns_ceil64(now + then);
175 		break;
176 	case SELECT_CLOCK_NANOSLEEP:
177 		/* exp_ns and actual_ns are absolute (i.e. time-points) */
178 		actual_ns = k_cyc_to_ns_ceil64(now);
179 		break;
180 	default:
181 		zassert_unreachable();
182 		break;
183 	}
184 
185 	exp_ns = (uint64_t)s * NSEC_PER_SEC + ns;
186 	/* round up to the nearest microsecond for k_busy_wait() */
187 	exp_ns = DIV_ROUND_UP(exp_ns, NSEC_PER_USEC) * NSEC_PER_USEC;
188 
189 	/* The comparison may be incorrect if counter wrap happened. In case of ARC HSDK platforms
190 	 * we have high counter clock frequency (500MHz or 1GHz) so counter wrap quite likely to
191 	 * happen if we wait long enough. As in some test cases we wait more than 1 second, there
192 	 * are significant chances to get false-positive assertion.
193 	 * TODO: switch test for k_cycle_get_64 usage where available.
194 	 */
195 #if !defined(CONFIG_SOC_ARC_HSDK) && !defined(CONFIG_SOC_ARC_HSDK4XD)
196 	/* lower bounds check */
197 	zassert_true(actual_ns >= exp_ns, "actual: %llu expected: %llu", actual_ns, exp_ns);
198 #endif
199 
200 	/* TODO: Upper bounds check when hr timers are available */
201 }
202 
ZTEST(nanosleep,test_nanosleep_execution)203 ZTEST(nanosleep, test_nanosleep_execution)
204 {
205 	/* sleep for 1ns */
206 	common_lower_bound_check(SELECT_NANOSLEEP, 0, 0, 0, 1);
207 
208 	/* sleep for 1us + 1ns */
209 	common_lower_bound_check(SELECT_NANOSLEEP, 0, 0, 0, 1001);
210 
211 	/* sleep for 500000000ns */
212 	common_lower_bound_check(SELECT_NANOSLEEP, 0, 0, 0, 500000000);
213 
214 	/* sleep for 1s */
215 	common_lower_bound_check(SELECT_NANOSLEEP, 0, 0, 1, 0);
216 
217 	/* sleep for 1s + 1ns */
218 	common_lower_bound_check(SELECT_NANOSLEEP, 0, 0, 1, 1);
219 
220 	/* sleep for 1s + 1us + 1ns */
221 	common_lower_bound_check(SELECT_NANOSLEEP, 0, 0, 1, 1001);
222 }
223 
ZTEST(nanosleep,test_clock_nanosleep_execution)224 ZTEST(nanosleep, test_clock_nanosleep_execution)
225 {
226 	struct timespec ts;
227 
228 	clock_gettime(CLOCK_MONOTONIC, &ts);
229 
230 	/* absolute sleeps with the monotonic clock and reference time ts */
231 
232 	/* until 1s + 1ns past the reference time */
233 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME,
234 		ts.tv_sec + 1, 1);
235 
236 	/* until 1s + 1us past the reference time */
237 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME,
238 		ts.tv_sec + 1, 1000);
239 
240 	/* until 1s + 500000000ns past the reference time */
241 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME,
242 		ts.tv_sec + 1, 500000000);
243 
244 	/* until 2s past the reference time */
245 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME,
246 		ts.tv_sec + 2, 0);
247 
248 	/* until 2s + 1ns past the reference time */
249 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME,
250 		ts.tv_sec + 2, 1);
251 
252 	/* until 2s + 1us + 1ns past reference time */
253 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_MONOTONIC, TIMER_ABSTIME,
254 		ts.tv_sec + 2, 1001);
255 
256 	clock_gettime(CLOCK_REALTIME, &ts);
257 
258 	/* absolute sleeps with the real time clock and adjusted reference time ts */
259 
260 	/* until 1s + 1ns past the reference time */
261 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_REALTIME, TIMER_ABSTIME,
262 				 ts.tv_sec + 1, 1);
263 
264 	/* until 1s + 1us past the reference time */
265 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_REALTIME, TIMER_ABSTIME,
266 				 ts.tv_sec + 1, 1000);
267 
268 	/* until 1s + 500000000ns past the reference time */
269 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_REALTIME, TIMER_ABSTIME,
270 				 ts.tv_sec + 1, 500000000);
271 
272 	/* until 2s past the reference time */
273 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_REALTIME, TIMER_ABSTIME,
274 				 ts.tv_sec + 2, 0);
275 
276 	/* until 2s + 1ns past the reference time */
277 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_REALTIME, TIMER_ABSTIME,
278 				 ts.tv_sec + 2, 1);
279 
280 	/* until 2s + 1us + 1ns past the reference time */
281 	common_lower_bound_check(SELECT_CLOCK_NANOSLEEP, CLOCK_REALTIME, TIMER_ABSTIME,
282 				 ts.tv_sec + 2, 1001);
283 }
284 
285 ZTEST_SUITE(nanosleep, NULL, NULL, NULL, NULL, NULL);
286