1 /*
2 * Copyright (c) 2016-2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #ifndef ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
8 #define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
9
10 #include <zephyr/kernel_structs.h>
11 #include <kernel_internal.h>
12 #include <timeout_q.h>
13 #include <kthread.h>
14 #include <zephyr/tracing/tracing.h>
15 #include <stdbool.h>
16 #include <priority_q.h>
17
18 BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
19 >= K_HIGHEST_APPLICATION_THREAD_PRIO);
20
21 #ifdef CONFIG_MULTITHREADING
22 #define Z_VALID_PRIO(prio, entry_point) \
23 (((prio) == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) || \
24 ((K_LOWEST_APPLICATION_THREAD_PRIO \
25 >= K_HIGHEST_APPLICATION_THREAD_PRIO) \
26 && (prio) >= K_HIGHEST_APPLICATION_THREAD_PRIO \
27 && (prio) <= K_LOWEST_APPLICATION_THREAD_PRIO))
28
29 #define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
30 __ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
31 "invalid priority (%d); allowed range: %d to %d", \
32 (prio), \
33 K_LOWEST_APPLICATION_THREAD_PRIO, \
34 K_HIGHEST_APPLICATION_THREAD_PRIO); \
35 } while (false)
36 #else
37 #define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
38 #define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
39 #endif /* CONFIG_MULTITHREADING */
40
41 #if (CONFIG_MP_MAX_NUM_CPUS == 1)
42 #define LOCK_SCHED_SPINLOCK
43 #else
44 #define LOCK_SCHED_SPINLOCK K_SPINLOCK(&_sched_spinlock)
45 #endif
46
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50
51 extern struct k_spinlock _sched_spinlock;
52
53 extern struct k_thread _thread_dummy;
54
55 void z_sched_init(void);
56 void z_unpend_thread_no_timeout(struct k_thread *thread);
57 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
58 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
59 _wait_q_t *wait_q, k_timeout_t timeout);
60 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
61 k_timeout_t timeout);
62 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
63 void z_reschedule_irqlock(uint32_t key);
64 void z_unpend_thread(struct k_thread *thread);
65 int z_unpend_all(_wait_q_t *wait_q);
66 bool z_thread_prio_set(struct k_thread *thread, int prio);
67 void *z_get_next_switch_handle(void *interrupted);
68
69 void z_time_slice(void);
70 void z_reset_time_slice(struct k_thread *curr);
71 void z_sched_start(struct k_thread *thread);
72 void z_ready_thread(struct k_thread *thread);
73 void z_requeue_current(struct k_thread *curr);
74 struct k_thread *z_swap_next_thread(void);
75 void move_current_to_end_of_prio_q(void);
76 bool thread_is_sliceable(struct k_thread *thread);
77
z_reschedule_unlocked(void)78 static inline void z_reschedule_unlocked(void)
79 {
80 (void) z_reschedule_irqlock(arch_irq_lock());
81 }
82
z_is_under_prio_ceiling(int prio)83 static inline bool z_is_under_prio_ceiling(int prio)
84 {
85 return prio >= CONFIG_PRIORITY_CEILING;
86 }
87
z_get_new_prio_with_ceiling(int prio)88 static inline int z_get_new_prio_with_ceiling(int prio)
89 {
90 return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
91 }
92
z_is_prio1_higher_than_or_equal_to_prio2(int prio1,int prio2)93 static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
94 {
95 return prio1 <= prio2;
96 }
97
z_is_prio_higher_or_equal(int prio1,int prio2)98 static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
99 {
100 return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
101 }
102
z_is_prio1_lower_than_or_equal_to_prio2(int prio1,int prio2)103 static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
104 {
105 return prio1 >= prio2;
106 }
107
z_is_prio1_higher_than_prio2(int prio1,int prio2)108 static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
109 {
110 return prio1 < prio2;
111 }
112
z_is_prio_higher(int prio,int test_prio)113 static inline bool z_is_prio_higher(int prio, int test_prio)
114 {
115 return z_is_prio1_higher_than_prio2(prio, test_prio);
116 }
117
z_is_prio_lower_or_equal(int prio1,int prio2)118 static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
119 {
120 return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
121 }
122
_is_valid_prio(int prio,k_thread_entry_t entry_point)123 static inline bool _is_valid_prio(int prio, k_thread_entry_t entry_point)
124 {
125 if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {
126 return true;
127 }
128
129 if (!z_is_prio_higher_or_equal(prio,
130 K_LOWEST_APPLICATION_THREAD_PRIO)) {
131 return false;
132 }
133
134 if (!z_is_prio_lower_or_equal(prio,
135 K_HIGHEST_APPLICATION_THREAD_PRIO)) {
136 return false;
137 }
138
139 return true;
140 }
141
pended_on_thread(struct k_thread * thread)142 static ALWAYS_INLINE _wait_q_t *pended_on_thread(struct k_thread *thread)
143 {
144 __ASSERT_NO_MSG(thread->base.pended_on);
145
146 return thread->base.pended_on;
147 }
148
149
unpend_thread_no_timeout(struct k_thread * thread)150 static inline void unpend_thread_no_timeout(struct k_thread *thread)
151 {
152 _priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
153 z_mark_thread_as_not_pending(thread);
154 thread->base.pended_on = NULL;
155 }
156
157 /*
158 * In a multiprocessor system, z_unpend_first_thread() must lock the scheduler
159 * spinlock _sched_spinlock. However, in a uniprocessor system, that is not
160 * necessary as the caller has already taken precautions (in the form of
161 * locking interrupts).
162 */
z_unpend_first_thread(_wait_q_t * wait_q)163 static ALWAYS_INLINE struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
164 {
165 struct k_thread *thread = NULL;
166
167 __ASSERT_EVAL(, int key = arch_irq_lock(); arch_irq_unlock(key),
168 !arch_irq_unlocked(key), "");
169
170 LOCK_SCHED_SPINLOCK {
171 thread = _priq_wait_best(&wait_q->waitq);
172 if (unlikely(thread != NULL)) {
173 unpend_thread_no_timeout(thread);
174 z_abort_thread_timeout(thread);
175 }
176 }
177
178 return thread;
179 }
180
181 /*
182 * APIs for working with the Zephyr kernel scheduler. Intended for use in
183 * management of IPC objects, either in the core kernel or other IPC
184 * implemented by OS compatibility layers, providing basic wait/wake operations
185 * with spinlocks used for synchronization.
186 *
187 * These APIs are public and will be treated as contract, even if the
188 * underlying scheduler implementation changes.
189 */
190
191 /**
192 * Wake up a thread pending on the provided wait queue
193 *
194 * Given a wait_q, wake up the highest priority thread on the queue. If the
195 * queue was empty just return false.
196 *
197 * Otherwise, do the following, in order, holding _sched_spinlock the entire
198 * time so that the thread state is guaranteed not to change:
199 * - Set the thread's swap return values to swap_retval and swap_data
200 * - un-pend and ready the thread, but do not invoke the scheduler.
201 *
202 * Repeated calls to this function until it returns false is a suitable
203 * way to wake all threads on the queue.
204 *
205 * It is up to the caller to implement locking such that the return value of
206 * this function (whether a thread was woken up or not) does not immediately
207 * become stale. Calls to wait and wake on the same wait_q object must have
208 * synchronization. Calling this without holding any spinlock is a sign that
209 * this API is not being used properly.
210 *
211 * @param wait_q Wait queue to wake up the highest prio thread
212 * @param swap_retval Swap return value for woken thread
213 * @param swap_data Data return value to supplement swap_retval. May be NULL.
214 * @retval true If a thread was woken up
215 * @retval false If the wait_q was empty
216 */
217 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data);
218
219 /**
220 * Wakes the specified thread.
221 *
222 * Given a specific thread, wake it up. This routine assumes that the given
223 * thread is not on the timeout queue.
224 *
225 * @param thread Given thread to wake up.
226 * @param is_timeout True if called from the timer ISR; false otherwise.
227 *
228 */
229 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout);
230
231 /**
232 * Wake up all threads pending on the provided wait queue
233 *
234 * Convenience function to invoke z_sched_wake() on all threads in the queue
235 * until there are no more to wake up.
236 *
237 * @param wait_q Wait queue to wake up the highest prio thread
238 * @param swap_retval Swap return value for woken thread
239 * @param swap_data Data return value to supplement swap_retval. May be NULL.
240 * @retval true If any threads were woken up
241 * @retval false If the wait_q was empty
242 */
z_sched_wake_all(_wait_q_t * wait_q,int swap_retval,void * swap_data)243 static inline bool z_sched_wake_all(_wait_q_t *wait_q, int swap_retval,
244 void *swap_data)
245 {
246 bool woken = false;
247
248 while (z_sched_wake(wait_q, swap_retval, swap_data)) {
249 woken = true;
250 }
251
252 /* True if we woke at least one thread up */
253 return woken;
254 }
255
256 /**
257 * Atomically put the current thread to sleep on a wait queue, with timeout
258 *
259 * The thread will be added to the provided waitqueue. The lock, which should
260 * be held by the caller with the provided key, will be released once this is
261 * completely done and we have swapped out.
262 *
263 * The return value and data pointer is set by whoever woke us up via
264 * z_sched_wake.
265 *
266 * @param lock Address of spinlock to release when we swap out
267 * @param key Key to the provided spinlock when it was locked
268 * @param wait_q Wait queue to go to sleep on
269 * @param timeout Waiting period to be woken up, or K_FOREVER to wait
270 * indefinitely.
271 * @param data Storage location for data pointer set when thread was woken up.
272 * May be NULL if not used.
273 * @retval Return value set by whatever woke us up, or -EAGAIN if the timeout
274 * expired without being woken up.
275 */
276 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
277 _wait_q_t *wait_q, k_timeout_t timeout, void **data);
278
279 /**
280 * @brief Walks the wait queue invoking the callback on each waiting thread
281 *
282 * This function walks the wait queue invoking the callback function on each
283 * waiting thread while holding _sched_spinlock. This can be useful for routines
284 * that need to operate on multiple waiting threads.
285 *
286 * CAUTION! As a wait queue is of indeterminate length, the scheduler will be
287 * locked for an indeterminate amount of time. This may impact system
288 * performance. As such, care must be taken when using both this function and
289 * the specified callback.
290 *
291 * @param wait_q Identifies the wait queue to walk
292 * @param func Callback to invoke on each waiting thread
293 * @param data Custom data passed to the callback
294 *
295 * @retval non-zero if walk is terminated by the callback; otherwise 0
296 */
297 int z_sched_waitq_walk(_wait_q_t *wait_q,
298 int (*func)(struct k_thread *, void *), void *data);
299
300 /** @brief Halt thread cycle usage accounting.
301 *
302 * Halts the accumulation of thread cycle usage and adds the current
303 * total to the thread's counter. Called on context switch.
304 *
305 * Note that this function is idempotent. The core kernel code calls
306 * it at the end of interrupt handlers (because that is where we have
307 * a portable hook) where we are context switching, which will include
308 * any cycles spent in the ISR in the per-thread accounting. But
309 * architecture code can also call it earlier out of interrupt entry
310 * to improve measurement fidelity.
311 *
312 * This function assumes local interrupts are masked (so that the
313 * current CPU pointer and current thread are safe to modify), but
314 * requires no other synchronization. Architecture layers don't need
315 * to do anything more.
316 */
317 void z_sched_usage_stop(void);
318
319 void z_sched_usage_start(struct k_thread *thread);
320
321 /**
322 * @brief Retrieves CPU cycle usage data for specified core
323 */
324 void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats);
325
326 /**
327 * @brief Retrieves thread cycle usage data for specified thread
328 */
329 void z_sched_thread_usage(struct k_thread *thread,
330 struct k_thread_runtime_stats *stats);
331
z_sched_usage_switch(struct k_thread * thread)332 static inline void z_sched_usage_switch(struct k_thread *thread)
333 {
334 ARG_UNUSED(thread);
335 #ifdef CONFIG_SCHED_THREAD_USAGE
336 z_sched_usage_stop();
337 z_sched_usage_start(thread);
338 #endif /* CONFIG_SCHED_THREAD_USAGE */
339 }
340
341 #ifdef __cplusplus
342 }
343 #endif
344
345 #endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
346