1 /*
2 * Copyright (c) 2016-2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #ifndef ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
8 #define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
9
10 #include <zephyr/kernel_structs.h>
11 #include <kernel_internal.h>
12 #include <timeout_q.h>
13 #include <zephyr/tracing/tracing.h>
14 #include <stdbool.h>
15
16 bool z_is_thread_essential(void);
17
18 BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
19 >= K_HIGHEST_APPLICATION_THREAD_PRIO);
20
21 #ifdef CONFIG_MULTITHREADING
22 #define Z_VALID_PRIO(prio, entry_point) \
23 (((prio) == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) || \
24 ((K_LOWEST_APPLICATION_THREAD_PRIO \
25 >= K_HIGHEST_APPLICATION_THREAD_PRIO) \
26 && (prio) >= K_HIGHEST_APPLICATION_THREAD_PRIO \
27 && (prio) <= K_LOWEST_APPLICATION_THREAD_PRIO))
28
29 #define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
30 __ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
31 "invalid priority (%d); allowed range: %d to %d", \
32 (prio), \
33 K_LOWEST_APPLICATION_THREAD_PRIO, \
34 K_HIGHEST_APPLICATION_THREAD_PRIO); \
35 } while (false)
36 #else
37 #define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
38 #define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
39 #endif
40
41 void z_sched_init(void);
42 void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
43 int z_is_thread_time_slicing(struct k_thread *thread);
44 void z_unpend_thread_no_timeout(struct k_thread *thread);
45 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
46 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
47 _wait_q_t *wait_q, k_timeout_t timeout);
48 int z_pend_curr_irqlock(uint32_t key, _wait_q_t *wait_q, k_timeout_t timeout);
49 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
50 k_timeout_t timeout);
51 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
52 void z_reschedule_irqlock(uint32_t key);
53 struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
54 void z_unpend_thread(struct k_thread *thread);
55 int z_unpend_all(_wait_q_t *wait_q);
56 void z_thread_priority_set(struct k_thread *thread, int prio);
57 bool z_set_prio(struct k_thread *thread, int prio);
58 void *z_get_next_switch_handle(void *interrupted);
59 void idle(void *unused1, void *unused2, void *unused3);
60 void z_time_slice(void);
61 void z_reset_time_slice(struct k_thread *curr);
62 void z_sched_abort(struct k_thread *thread);
63 void z_sched_ipi(void);
64 void z_sched_start(struct k_thread *thread);
65 void z_ready_thread(struct k_thread *thread);
66 void z_requeue_current(struct k_thread *curr);
67 struct k_thread *z_swap_next_thread(void);
68 void z_thread_abort(struct k_thread *thread);
69
z_pend_curr_unlocked(_wait_q_t * wait_q,k_timeout_t timeout)70 static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout)
71 {
72 (void) z_pend_curr_irqlock(arch_irq_lock(), wait_q, timeout);
73 }
74
z_reschedule_unlocked(void)75 static inline void z_reschedule_unlocked(void)
76 {
77 (void) z_reschedule_irqlock(arch_irq_lock());
78 }
79
z_is_idle_thread_entry(void * entry_point)80 static inline bool z_is_idle_thread_entry(void *entry_point)
81 {
82 return entry_point == idle;
83 }
84
z_is_idle_thread_object(struct k_thread * thread)85 static inline bool z_is_idle_thread_object(struct k_thread *thread)
86 {
87 #ifdef CONFIG_MULTITHREADING
88 #ifdef CONFIG_SMP
89 return thread->base.is_idle;
90 #else
91 return thread == &z_idle_threads[0];
92 #endif
93 #else
94 return false;
95 #endif /* CONFIG_MULTITHREADING */
96 }
97
z_is_thread_suspended(struct k_thread * thread)98 static inline bool z_is_thread_suspended(struct k_thread *thread)
99 {
100 return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
101 }
102
z_is_thread_pending(struct k_thread * thread)103 static inline bool z_is_thread_pending(struct k_thread *thread)
104 {
105 return (thread->base.thread_state & _THREAD_PENDING) != 0U;
106 }
107
z_is_thread_prevented_from_running(struct k_thread * thread)108 static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
109 {
110 uint8_t state = thread->base.thread_state;
111
112 return (state & (_THREAD_PENDING | _THREAD_PRESTART | _THREAD_DEAD |
113 _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
114
115 }
116
z_is_thread_timeout_active(struct k_thread * thread)117 static inline bool z_is_thread_timeout_active(struct k_thread *thread)
118 {
119 return !z_is_inactive_timeout(&thread->base.timeout);
120 }
121
z_is_thread_ready(struct k_thread * thread)122 static inline bool z_is_thread_ready(struct k_thread *thread)
123 {
124 return !((z_is_thread_prevented_from_running(thread)) != 0U ||
125 z_is_thread_timeout_active(thread));
126 }
127
z_has_thread_started(struct k_thread * thread)128 static inline bool z_has_thread_started(struct k_thread *thread)
129 {
130 return (thread->base.thread_state & _THREAD_PRESTART) == 0U;
131 }
132
z_is_thread_state_set(struct k_thread * thread,uint32_t state)133 static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
134 {
135 return (thread->base.thread_state & state) != 0U;
136 }
137
z_is_thread_queued(struct k_thread * thread)138 static inline bool z_is_thread_queued(struct k_thread *thread)
139 {
140 return z_is_thread_state_set(thread, _THREAD_QUEUED);
141 }
142
z_mark_thread_as_suspended(struct k_thread * thread)143 static inline void z_mark_thread_as_suspended(struct k_thread *thread)
144 {
145 thread->base.thread_state |= _THREAD_SUSPENDED;
146
147 SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
148 }
149
z_mark_thread_as_not_suspended(struct k_thread * thread)150 static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
151 {
152 thread->base.thread_state &= ~_THREAD_SUSPENDED;
153
154 SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
155 }
156
z_mark_thread_as_started(struct k_thread * thread)157 static inline void z_mark_thread_as_started(struct k_thread *thread)
158 {
159 thread->base.thread_state &= ~_THREAD_PRESTART;
160 }
161
z_mark_thread_as_pending(struct k_thread * thread)162 static inline void z_mark_thread_as_pending(struct k_thread *thread)
163 {
164 thread->base.thread_state |= _THREAD_PENDING;
165 }
166
z_mark_thread_as_not_pending(struct k_thread * thread)167 static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
168 {
169 thread->base.thread_state &= ~_THREAD_PENDING;
170 }
171
z_set_thread_states(struct k_thread * thread,uint32_t states)172 static inline void z_set_thread_states(struct k_thread *thread, uint32_t states)
173 {
174 thread->base.thread_state |= states;
175 }
176
z_reset_thread_states(struct k_thread * thread,uint32_t states)177 static inline void z_reset_thread_states(struct k_thread *thread,
178 uint32_t states)
179 {
180 thread->base.thread_state &= ~states;
181 }
182
z_is_under_prio_ceiling(int prio)183 static inline bool z_is_under_prio_ceiling(int prio)
184 {
185 return prio >= CONFIG_PRIORITY_CEILING;
186 }
187
z_get_new_prio_with_ceiling(int prio)188 static inline int z_get_new_prio_with_ceiling(int prio)
189 {
190 return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
191 }
192
z_is_prio1_higher_than_or_equal_to_prio2(int prio1,int prio2)193 static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
194 {
195 return prio1 <= prio2;
196 }
197
z_is_prio_higher_or_equal(int prio1,int prio2)198 static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
199 {
200 return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
201 }
202
z_is_prio1_lower_than_or_equal_to_prio2(int prio1,int prio2)203 static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
204 {
205 return prio1 >= prio2;
206 }
207
z_is_prio1_higher_than_prio2(int prio1,int prio2)208 static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
209 {
210 return prio1 < prio2;
211 }
212
z_is_prio_higher(int prio,int test_prio)213 static inline bool z_is_prio_higher(int prio, int test_prio)
214 {
215 return z_is_prio1_higher_than_prio2(prio, test_prio);
216 }
217
z_is_prio_lower_or_equal(int prio1,int prio2)218 static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
219 {
220 return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
221 }
222
223 int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2);
224
_is_valid_prio(int prio,void * entry_point)225 static inline bool _is_valid_prio(int prio, void *entry_point)
226 {
227 if (prio == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) {
228 return true;
229 }
230
231 if (!z_is_prio_higher_or_equal(prio,
232 K_LOWEST_APPLICATION_THREAD_PRIO)) {
233 return false;
234 }
235
236 if (!z_is_prio_lower_or_equal(prio,
237 K_HIGHEST_APPLICATION_THREAD_PRIO)) {
238 return false;
239 }
240
241 return true;
242 }
243
_ready_one_thread(_wait_q_t * wq)244 static inline void _ready_one_thread(_wait_q_t *wq)
245 {
246 struct k_thread *thread = z_unpend_first_thread(wq);
247
248 if (thread != NULL) {
249 z_ready_thread(thread);
250 }
251 }
252
z_sched_lock(void)253 static inline void z_sched_lock(void)
254 {
255 __ASSERT(!arch_is_in_isr(), "");
256 __ASSERT(_current->base.sched_locked != 1U, "");
257
258 --_current->base.sched_locked;
259
260 compiler_barrier();
261 }
262
z_sched_unlock_no_reschedule(void)263 static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
264 {
265 __ASSERT(!arch_is_in_isr(), "");
266 __ASSERT(_current->base.sched_locked != 0U, "");
267
268 compiler_barrier();
269
270 ++_current->base.sched_locked;
271 }
272
273 /*
274 * APIs for working with the Zephyr kernel scheduler. Intended for use in
275 * management of IPC objects, either in the core kernel or other IPC
276 * implemented by OS compatibility layers, providing basic wait/wake operations
277 * with spinlocks used for synchronization.
278 *
279 * These APIs are public and will be treated as contract, even if the
280 * underlying scheduler implementation changes.
281 */
282
283 /**
284 * Wake up a thread pending on the provided wait queue
285 *
286 * Given a wait_q, wake up the highest priority thread on the queue. If the
287 * queue was empty just return false.
288 *
289 * Otherwise, do the following, in order, holding sched_spinlock the entire
290 * time so that the thread state is guaranteed not to change:
291 * - Set the thread's swap return values to swap_retval and swap_data
292 * - un-pend and ready the thread, but do not invoke the scheduler.
293 *
294 * Repeated calls to this function until it returns false is a suitable
295 * way to wake all threads on the queue.
296 *
297 * It is up to the caller to implement locking such that the return value of
298 * this function (whether a thread was woken up or not) does not immediately
299 * become stale. Calls to wait and wake on the same wait_q object must have
300 * synchronization. Calling this without holding any spinlock is a sign that
301 * this API is not being used properly.
302 *
303 * @param wait_q Wait queue to wake up the highest prio thread
304 * @param swap_retval Swap return value for woken thread
305 * @param swap_data Data return value to supplement swap_retval. May be NULL.
306 * @retval true If a thread was woken up
307 * @retval false If the wait_q was empty
308 */
309 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data);
310
311 /**
312 * Wakes the specified thread.
313 *
314 * Given a specific thread, wake it up. This routine assumes that the given
315 * thread is not on the timeout queue.
316 *
317 * @param thread Given thread to wake up.
318 * @param is_timeout True if called from the timer ISR; false otherwise.
319 *
320 */
321 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout);
322
323 /**
324 * Wake up all threads pending on the provided wait queue
325 *
326 * Convenience function to invoke z_sched_wake() on all threads in the queue
327 * until there are no more to wake up.
328 *
329 * @param wait_q Wait queue to wake up the highest prio thread
330 * @param swap_retval Swap return value for woken thread
331 * @param swap_data Data return value to supplement swap_retval. May be NULL.
332 * @retval true If any threads were woken up
333 * @retval false If the wait_q was empty
334 */
z_sched_wake_all(_wait_q_t * wait_q,int swap_retval,void * swap_data)335 static inline bool z_sched_wake_all(_wait_q_t *wait_q, int swap_retval,
336 void *swap_data)
337 {
338 bool woken = false;
339
340 while (z_sched_wake(wait_q, swap_retval, swap_data)) {
341 woken = true;
342 }
343
344 /* True if we woke at least one thread up */
345 return woken;
346 }
347
348 /**
349 * Atomically put the current thread to sleep on a wait queue, with timeout
350 *
351 * The thread will be added to the provided waitqueue. The lock, which should
352 * be held by the caller with the provided key, will be released once this is
353 * completely done and we have swapped out.
354 *
355 * The return value and data pointer is set by whoever woke us up via
356 * z_sched_wake.
357 *
358 * @param lock Address of spinlock to release when we swap out
359 * @param key Key to the provided spinlock when it was locked
360 * @param wait_q Wait queue to go to sleep on
361 * @param timeout Waiting period to be woken up, or K_FOREVER to wait
362 * indefinitely.
363 * @param data Storage location for data pointer set when thread was woken up.
364 * May be NULL if not used.
365 * @retval Return value set by whatever woke us up, or -EAGAIN if the timeout
366 * expired without being woken up.
367 */
368 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
369 _wait_q_t *wait_q, k_timeout_t timeout, void **data);
370
371 /**
372 * @brief Walks the wait queue invoking the callback on each waiting thread
373 *
374 * This function walks the wait queue invoking the callback function on each
375 * waiting thread while holding sched_spinlock. This can be useful for routines
376 * that need to operate on multiple waiting threads.
377 *
378 * CAUTION! As a wait queue is of indeterminant length, the scheduler will be
379 * locked for an indeterminant amount of time. This may impact system
380 * performance. As such, care must be taken when using both this function and
381 * the specified callback.
382 *
383 * @param wait_q Identifies the wait queue to walk
384 * @param func Callback to invoke on each waiting thread
385 * @param data Custom data passed to the callback
386 *
387 * @retval non-zero if walk is terminated by the callback; otherwise 0
388 */
389 int z_sched_waitq_walk(_wait_q_t *wait_q,
390 int (*func)(struct k_thread *, void *), void *data);
391
392 /** @brief Halt thread cycle usage accounting.
393 *
394 * Halts the accumulation of thread cycle usage and adds the current
395 * total to the thread's counter. Called on context switch.
396 *
397 * Note that this function is idempotent. The core kernel code calls
398 * it at the end of interrupt handlers (because that is where we have
399 * a portable hook) where we are context switching, which will include
400 * any cycles spent in the ISR in the per-thread accounting. But
401 * architecture code can also call it earlier out of interrupt entry
402 * to improve measurement fidelity.
403 *
404 * This function assumes local interrupts are masked (so that the
405 * current CPU pointer and current thread are safe to modify), but
406 * requires no other synchronizaton. Architecture layers don't need
407 * to do anything more.
408 */
409 void z_sched_usage_stop(void);
410
411 void z_sched_usage_start(struct k_thread *thread);
412
413 /**
414 * @brief Retrieves CPU cycle usage data for specified core
415 */
416 void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats);
417
418 /**
419 * @brief Retrieves thread cycle usage data for specified thread
420 */
421 void z_sched_thread_usage(struct k_thread *thread,
422 struct k_thread_runtime_stats *stats);
423
z_sched_usage_switch(struct k_thread * thread)424 static inline void z_sched_usage_switch(struct k_thread *thread)
425 {
426 ARG_UNUSED(thread);
427 #ifdef CONFIG_SCHED_THREAD_USAGE
428 z_sched_usage_stop();
429 z_sched_usage_start(thread);
430 #endif
431 }
432
433 #endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
434