1 /*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include "test_sched.h"
9
10 #ifdef CONFIG_TIMESLICING
11
12 /* nrf 51 has lower ram, so creating less number of threads */
13 #if CONFIG_SRAM_SIZE <= 24
14 #define NUM_THREAD 2
15 #elif (CONFIG_SRAM_SIZE <= 32) \
16 || defined(CONFIG_SOC_EMSK_EM7D)
17 #define NUM_THREAD 3
18 #else
19 #define NUM_THREAD 10
20 #endif
21 #define BASE_PRIORITY 0
22 #define ITERATION_COUNT 5
23
24 BUILD_ASSERT(NUM_THREAD <= MAX_NUM_THREAD);
25 /* slice size in millisecond */
26 #define SLICE_SIZE 200
27 #define PERTHREAD_SLICE_TICKS 64
28 #define TICK_SLOP 4
29 /* busy for more than one slice */
30 #define BUSY_MS (SLICE_SIZE + 20)
31 static struct k_thread t[NUM_THREAD];
32
33 static K_SEM_DEFINE(sema1, 0, NUM_THREAD);
34 /* elapsed_slice taken by last thread */
35 static int64_t elapsed_slice;
36
37 static int thread_idx;
38
thread_tslice(void * p1,void * p2,void * p3)39 static void thread_tslice(void *p1, void *p2, void *p3)
40 {
41 int idx = POINTER_TO_INT(p1);
42
43 /* Print New line for last thread */
44 int thread_parameter = (idx == (NUM_THREAD - 1)) ? '\n' :
45 (idx + 'A');
46
47 int64_t expected_slice_min = k_ticks_to_ms_floor64(k_ms_to_ticks_ceil32(SLICE_SIZE) - 1);
48 int64_t expected_slice_max = k_ticks_to_ms_ceil64(k_ms_to_ticks_ceil32(SLICE_SIZE) + 1);
49
50 /* Clumsy, but need to handle the precision loss with
51 * submillisecond ticks. It's always possible to alias and
52 * produce a tdelta of "1", no matter how fast ticks are.
53 */
54 if (expected_slice_max == expected_slice_min) {
55 expected_slice_max = expected_slice_min + 1;
56 }
57
58 while (1) {
59 int64_t tdelta = k_uptime_delta(&elapsed_slice);
60 TC_PRINT("%c", thread_parameter);
61 /* Test Fails if thread exceed allocated time slice or
62 * Any thread is scheduled out of order.
63 */
64 zassert_true(((tdelta >= expected_slice_min) &&
65 (tdelta <= expected_slice_max) &&
66 (idx == thread_idx)), NULL);
67 thread_idx = (thread_idx + 1) % (NUM_THREAD);
68
69 /* Keep the current thread busy for more than one slice,
70 * even though, when timeslice used up the next thread
71 * should be scheduled in.
72 */
73 spin_for_ms(BUSY_MS);
74 k_sem_give(&sema1);
75 }
76 }
77
78 /* test cases */
79
80 /**
81 * @brief Check the behavior of preemptive threads when the
82 * time slice is disabled and enabled
83 *
84 * @details Create multiple preemptive threads with same priorities
85 * and few with same priorities and enable the time slice.
86 * Ensure that each thread is given the time slice period to execute.
87 *
88 * @ingroup kernel_sched_tests
89 */
ZTEST(threads_scheduling,test_slice_scheduling)90 ZTEST(threads_scheduling, test_slice_scheduling)
91 {
92 k_tid_t tid[NUM_THREAD];
93 int old_prio = k_thread_priority_get(k_current_get());
94 int count = 0;
95
96 thread_idx = 0;
97
98 /* disable timeslice */
99 k_sched_time_slice_set(0, K_PRIO_PREEMPT(0));
100
101 /* update priority for current thread */
102 k_thread_priority_set(k_current_get(), K_PRIO_PREEMPT(BASE_PRIORITY));
103
104 /* create threads with equal preemptive priority */
105 for (int i = 0; i < NUM_THREAD; i++) {
106 tid[i] = k_thread_create(&t[i], tstacks[i], STACK_SIZE,
107 thread_tslice,
108 INT_TO_POINTER(i), NULL, NULL,
109 K_PRIO_PREEMPT(BASE_PRIORITY), 0,
110 K_NO_WAIT);
111 }
112
113 /* enable time slice */
114 k_sched_time_slice_set(SLICE_SIZE, K_PRIO_PREEMPT(BASE_PRIORITY));
115
116 while (count < ITERATION_COUNT) {
117 k_uptime_delta(&elapsed_slice);
118
119 /* Keep the current thread busy for more than one slice,
120 * even though, when timeslice used up the next thread
121 * should be scheduled in.
122 */
123 spin_for_ms(BUSY_MS);
124
125 /* relinquish CPU and wait for each thread to complete */
126 for (int i = 0; i < NUM_THREAD; i++) {
127 k_sem_take(&sema1, K_FOREVER);
128 }
129 count++;
130 }
131
132
133 /* test case teardown */
134 for (int i = 0; i < NUM_THREAD; i++) {
135 k_thread_abort(tid[i]);
136 }
137
138 /* disable time slice */
139 k_sched_time_slice_set(0, K_PRIO_PREEMPT(0));
140
141 k_thread_priority_set(k_current_get(), old_prio);
142 }
143
144 static volatile int32_t perthread_count;
145 static volatile uint32_t last_cyc;
146 static volatile bool perthread_running;
147 static K_SEM_DEFINE(perthread_sem, 0, 1);
148
slice_expired(struct k_thread * thread,void * data)149 static void slice_expired(struct k_thread *thread, void *data)
150 {
151 zassert_equal(thread, data, "wrong callback data pointer");
152
153 uint32_t now = k_cycle_get_32();
154 uint32_t dt = k_cyc_to_ticks_near32(now - last_cyc);
155
156 zassert_true(perthread_running, "thread didn't start");
157 zassert_true(dt >= (PERTHREAD_SLICE_TICKS - TICK_SLOP),
158 "slice expired >%d ticks too soon (dt=%d)", TICK_SLOP, dt);
159 zassert_true((dt - PERTHREAD_SLICE_TICKS) <= TICK_SLOP,
160 "slice expired >%d ticks late (dt=%d)", TICK_SLOP, dt);
161
162 last_cyc = now;
163
164 /* First time through, just let the slice expire and keep
165 * running. Second time, abort the thread and wake up the
166 * main test function.
167 */
168 if (perthread_count++ != 0) {
169 k_thread_abort(thread);
170 perthread_running = false;
171 k_sem_give(&perthread_sem);
172 }
173 }
174
slice_perthread_fn(void * a,void * b,void * c)175 static void slice_perthread_fn(void *a, void *b, void *c)
176 {
177 ARG_UNUSED(a); ARG_UNUSED(b); ARG_UNUSED(c);
178 while (true) {
179 perthread_running = true;
180 k_busy_wait(10);
181 }
182 }
183
ZTEST(threads_scheduling,test_slice_perthread)184 ZTEST(threads_scheduling, test_slice_perthread)
185 {
186 if (!IS_ENABLED(CONFIG_TIMESLICE_PER_THREAD)) {
187 ztest_test_skip();
188 return;
189 }
190
191 /* Create the thread but don't start it */
192 k_thread_create(&t[0], tstacks[0], STACK_SIZE,
193 slice_perthread_fn, NULL, NULL, NULL,
194 1, 0, K_FOREVER);
195 k_thread_time_slice_set(&t[0], PERTHREAD_SLICE_TICKS, slice_expired, &t[0]);
196
197 /* Tick align, set up, then start */
198 k_usleep(1);
199 last_cyc = k_cycle_get_32();
200 k_thread_start(&t[0]);
201
202 k_sem_take(&perthread_sem, K_FOREVER);
203 zassert_false(perthread_running, "thread failed to suspend");
204 }
205
206 #else /* CONFIG_TIMESLICING */
ZTEST(threads_scheduling,test_slice_scheduling)207 ZTEST(threads_scheduling, test_slice_scheduling)
208 {
209 ztest_test_skip();
210 }
ZTEST(threads_scheduling,test_slice_perthread)211 ZTEST(threads_scheduling, test_slice_perthread)
212 {
213 ztest_test_skip();
214 }
215 #endif /* CONFIG_TIMESLICING */
216