1 /*
2  * Copyright (c) 2017 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include "test_sched.h"
9 
10 #ifdef CONFIG_TIMESLICING
11 
12 #define NUM_THREAD 3
13 
14 BUILD_ASSERT(NUM_THREAD <= MAX_NUM_THREAD);
15 
16 /* slice size in millisecond */
17 #define SLICE_SIZE 200
18 /* busy for more than one slice */
19 #define BUSY_MS (SLICE_SIZE + 20)
20 /* a half timeslice */
21 #define HALF_SLICE_SIZE (SLICE_SIZE >> 1)
22 #define HALF_SLICE_SIZE_CYCLES                                                 \
23 	((uint64_t)(HALF_SLICE_SIZE)*sys_clock_hw_cycles_per_sec() / 1000)
24 
25 /* Task switch tolerance ... */
26 #if CONFIG_SYS_CLOCK_TICKS_PER_SEC >= 1000
27 /* ... will not take more than 1 ms. */
28 #define TASK_SWITCH_TOLERANCE (1)
29 #else
30 /* ... 1ms is faster than a tick, loosen tolerance to 1 tick */
31 #define TASK_SWITCH_TOLERANCE (1000 / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
32 #endif
33 
34 K_SEM_DEFINE(sema, 0, NUM_THREAD);
35 /* elapsed_slice taken by last thread */
36 static uint32_t elapsed_slice;
37 static int thread_idx;
38 
cycles_delta(uint32_t * reftime)39 static uint32_t cycles_delta(uint32_t *reftime)
40 {
41 	uint32_t now, delta;
42 
43 	now = k_cycle_get_32();
44 	delta = now - *reftime;
45 	*reftime = now;
46 
47 	return delta;
48 }
49 
thread_time_slice(void * p1,void * p2,void * p3)50 static void thread_time_slice(void *p1, void *p2, void *p3)
51 {
52 	uint32_t t = cycles_delta(&elapsed_slice);
53 	uint32_t expected_slice_min, expected_slice_max;
54 	uint32_t switch_tolerance_ticks =
55 		k_ms_to_ticks_ceil32(TASK_SWITCH_TOLERANCE);
56 
57 	if (thread_idx == 0) {
58 		/*
59 		 * Thread number 0 releases CPU after HALF_SLICE_SIZE, and
60 		 * expected to switch in less than the switching tolerance.
61 		 */
62 		expected_slice_min =
63 			(uint64_t)(HALF_SLICE_SIZE - TASK_SWITCH_TOLERANCE) *
64 			sys_clock_hw_cycles_per_sec() / 1000;
65 		expected_slice_max =
66 			(uint64_t)(HALF_SLICE_SIZE + TASK_SWITCH_TOLERANCE) *
67 			sys_clock_hw_cycles_per_sec() / 1000;
68 	} else {
69 		/*
70 		 * Other threads are sliced with tick granularity. Here, we
71 		 * also expecting task switch below the switching tolerance.
72 		 */
73 		expected_slice_min =
74 			(k_ms_to_ticks_floor32(SLICE_SIZE)
75 			 - switch_tolerance_ticks)
76 			* k_ticks_to_cyc_floor32(1);
77 		expected_slice_max =
78 			(k_ms_to_ticks_ceil32(SLICE_SIZE)
79 			 + switch_tolerance_ticks)
80 			* k_ticks_to_cyc_ceil32(1);
81 	}
82 
83 #ifdef CONFIG_DEBUG
84 	TC_PRINT("thread[%d] elapsed slice: %d, expected: <%d, %d>\n",
85 		 thread_idx, t, expected_slice_min, expected_slice_max);
86 #endif
87 
88 	/* Before the assert, otherwise in case of fail the output
89 	 * will give the impression that the same thread ran more than
90 	 * once
91 	 */
92 	thread_idx = (thread_idx + 1) % NUM_THREAD;
93 
94 	/** TESTPOINT: timeslice should be reset for each preemptive thread */
95 #ifndef CONFIG_COVERAGE_GCOV
96 	zassert_true(t >= expected_slice_min,
97 		     "timeslice too small, expected %u got %u",
98 		     expected_slice_min, t);
99 	zassert_true(t <= expected_slice_max,
100 		     "timeslice too big, expected %u got %u",
101 		     expected_slice_max, t);
102 #else
103 	(void)t;
104 #endif /* CONFIG_COVERAGE_GCOV */
105 
106 	/* Keep the current thread busy for more than one slice, even though,
107 	 * when timeslice used up the next thread should be scheduled in.
108 	 */
109 	spin_for_ms(BUSY_MS);
110 	k_sem_give(&sema);
111 }
112 
113 /* test cases */
114 /**
115  * @brief Check the behavior of preemptive threads when the
116  * time slice is disabled and enabled
117  *
118  * @details Create multiple preemptive threads with few different
119  * priorities and few with same priorities and enable the time slice.
120  * Ensure that each thread is given the time slice period to execute.
121  *
122  * @see k_sched_time_slice_set(), k_sem_reset(), k_cycle_get_32(),
123  *      k_uptime_get_32()
124  *
125  * @ingroup kernel_sched_tests
126  */
ZTEST(threads_scheduling,test_slice_reset)127 ZTEST(threads_scheduling, test_slice_reset)
128 {
129 	uint32_t t32;
130 	k_tid_t tid[NUM_THREAD];
131 	struct k_thread t[NUM_THREAD];
132 	int old_prio = k_thread_priority_get(k_current_get());
133 
134 	thread_idx = 0;
135 	/* disable timeslice */
136 	k_sched_time_slice_set(0, K_PRIO_PREEMPT(0));
137 
138 	/* The slice size needs to be set in ms (which get converted
139 	 * into ticks internally), but we want to loop over a half
140 	 * slice in cycles. That requires a bit of care to be sure the
141 	 * value divides properly.
142 	 */
143 	uint32_t slice_ticks = k_ms_to_ticks_ceil32(SLICE_SIZE);
144 	uint32_t half_slice_cyc = k_ticks_to_cyc_ceil32(slice_ticks / 2);
145 
146 	if (slice_ticks % 2 != 0) {
147 		uint32_t deviation = k_ticks_to_cyc_ceil32(1);
148 		/* slice_ticks can't be divisible by two, so we add the
149 		 * (slice_ticks / 2) floating part back to half_slice_cyc.
150 		 */
151 		half_slice_cyc = half_slice_cyc + (deviation / 2);
152 	}
153 
154 	for (int j = 0; j < 2; j++) {
155 		k_sem_reset(&sema);
156 
157 		/* update priority for current thread */
158 		k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(j));
159 
160 		/* synchronize to tick boundary */
161 		k_usleep(1);
162 
163 		/* create delayed threads with equal preemptive priority */
164 		for (int i = 0; i < NUM_THREAD; i++) {
165 			tid[i] = k_thread_create(&t[i], tstacks[i], STACK_SIZE,
166 						 thread_time_slice, NULL, NULL,
167 						 NULL, K_PRIO_PREEMPT(j), 0,
168 						 K_NO_WAIT);
169 		}
170 
171 		/* enable time slice (and reset the counter!) */
172 		k_sched_time_slice_set(SLICE_SIZE, K_PRIO_PREEMPT(0));
173 
174 		/* initialize reference timestamp */
175 		cycles_delta(&elapsed_slice);
176 
177 		/* current thread (ztest native) consumed a half timeslice */
178 		t32 = k_cycle_get_32();
179 		while (k_cycle_get_32() - t32 < half_slice_cyc) {
180 			Z_SPIN_DELAY(50);
181 		}
182 
183 		/* relinquish CPU and wait for each thread to complete */
184 		k_sleep(K_TICKS(slice_ticks * (NUM_THREAD + 1)));
185 		for (int i = 0; i < NUM_THREAD; i++) {
186 			k_sem_take(&sema, K_FOREVER);
187 		}
188 
189 		/* test case teardown */
190 		for (int i = 0; i < NUM_THREAD; i++) {
191 			k_thread_abort(tid[i]);
192 		}
193 		/* disable time slice */
194 		k_sched_time_slice_set(0, K_PRIO_PREEMPT(0));
195 	}
196 	k_thread_priority_set(k_current_get(), old_prio);
197 }
198 
199 #else /* CONFIG_TIMESLICING */
ZTEST(threads_scheduling,test_slice_reset)200 ZTEST(threads_scheduling, test_slice_reset)
201 {
202 	ztest_test_skip();
203 }
204 #endif /* CONFIG_TIMESLICING */
205