1 /*
2 * Copyright (c) 2016-2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #ifndef ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
8 #define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
9
10 #include <zephyr/kernel_structs.h>
11 #include <kernel_internal.h>
12 #include <timeout_q.h>
13 #include <kthread.h>
14 #include <zephyr/tracing/tracing.h>
15 #include <stdbool.h>
16
17 BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
18 >= K_HIGHEST_APPLICATION_THREAD_PRIO);
19
20 #ifdef CONFIG_MULTITHREADING
21 #define Z_VALID_PRIO(prio, entry_point) \
22 (((prio) == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) || \
23 ((K_LOWEST_APPLICATION_THREAD_PRIO \
24 >= K_HIGHEST_APPLICATION_THREAD_PRIO) \
25 && (prio) >= K_HIGHEST_APPLICATION_THREAD_PRIO \
26 && (prio) <= K_LOWEST_APPLICATION_THREAD_PRIO))
27
28 #define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
29 __ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
30 "invalid priority (%d); allowed range: %d to %d", \
31 (prio), \
32 K_LOWEST_APPLICATION_THREAD_PRIO, \
33 K_HIGHEST_APPLICATION_THREAD_PRIO); \
34 } while (false)
35 #else
36 #define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
37 #define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
38 #endif /* CONFIG_MULTITHREADING */
39
40 extern struct k_thread _thread_dummy;
41
42 void z_sched_init(void);
43 void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
44 void z_unpend_thread_no_timeout(struct k_thread *thread);
45 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
46 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
47 _wait_q_t *wait_q, k_timeout_t timeout);
48 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
49 k_timeout_t timeout);
50 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
51 void z_reschedule_irqlock(uint32_t key);
52 struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
53 void z_unpend_thread(struct k_thread *thread);
54 int z_unpend_all(_wait_q_t *wait_q);
55 bool z_thread_prio_set(struct k_thread *thread, int prio);
56 void *z_get_next_switch_handle(void *interrupted);
57
58 void z_time_slice(void);
59 void z_reset_time_slice(struct k_thread *curr);
60 void z_sched_ipi(void);
61 void z_sched_start(struct k_thread *thread);
62 void z_ready_thread(struct k_thread *thread);
63 void z_ready_thread_locked(struct k_thread *thread);
64 void z_requeue_current(struct k_thread *curr);
65 struct k_thread *z_swap_next_thread(void);
66 void z_thread_abort(struct k_thread *thread);
67 void move_thread_to_end_of_prio_q(struct k_thread *thread);
68 bool thread_is_sliceable(struct k_thread *thread);
69
z_reschedule_unlocked(void)70 static inline void z_reschedule_unlocked(void)
71 {
72 (void) z_reschedule_irqlock(arch_irq_lock());
73 }
74
z_is_under_prio_ceiling(int prio)75 static inline bool z_is_under_prio_ceiling(int prio)
76 {
77 return prio >= CONFIG_PRIORITY_CEILING;
78 }
79
z_get_new_prio_with_ceiling(int prio)80 static inline int z_get_new_prio_with_ceiling(int prio)
81 {
82 return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
83 }
84
z_is_prio1_higher_than_or_equal_to_prio2(int prio1,int prio2)85 static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
86 {
87 return prio1 <= prio2;
88 }
89
z_is_prio_higher_or_equal(int prio1,int prio2)90 static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
91 {
92 return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
93 }
94
z_is_prio1_lower_than_or_equal_to_prio2(int prio1,int prio2)95 static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
96 {
97 return prio1 >= prio2;
98 }
99
z_is_prio1_higher_than_prio2(int prio1,int prio2)100 static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
101 {
102 return prio1 < prio2;
103 }
104
z_is_prio_higher(int prio,int test_prio)105 static inline bool z_is_prio_higher(int prio, int test_prio)
106 {
107 return z_is_prio1_higher_than_prio2(prio, test_prio);
108 }
109
z_is_prio_lower_or_equal(int prio1,int prio2)110 static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
111 {
112 return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
113 }
114
115 int32_t z_sched_prio_cmp(struct k_thread *thread_1, struct k_thread *thread_2);
116
_is_valid_prio(int prio,void * entry_point)117 static inline bool _is_valid_prio(int prio, void *entry_point)
118 {
119 if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {
120 return true;
121 }
122
123 if (!z_is_prio_higher_or_equal(prio,
124 K_LOWEST_APPLICATION_THREAD_PRIO)) {
125 return false;
126 }
127
128 if (!z_is_prio_lower_or_equal(prio,
129 K_HIGHEST_APPLICATION_THREAD_PRIO)) {
130 return false;
131 }
132
133 return true;
134 }
135
z_sched_lock(void)136 static inline void z_sched_lock(void)
137 {
138 __ASSERT(!arch_is_in_isr(), "");
139 __ASSERT(_current->base.sched_locked != 1U, "");
140
141 --_current->base.sched_locked;
142
143 compiler_barrier();
144 }
145
146 /*
147 * APIs for working with the Zephyr kernel scheduler. Intended for use in
148 * management of IPC objects, either in the core kernel or other IPC
149 * implemented by OS compatibility layers, providing basic wait/wake operations
150 * with spinlocks used for synchronization.
151 *
152 * These APIs are public and will be treated as contract, even if the
153 * underlying scheduler implementation changes.
154 */
155
156 /**
157 * Wake up a thread pending on the provided wait queue
158 *
159 * Given a wait_q, wake up the highest priority thread on the queue. If the
160 * queue was empty just return false.
161 *
162 * Otherwise, do the following, in order, holding _sched_spinlock the entire
163 * time so that the thread state is guaranteed not to change:
164 * - Set the thread's swap return values to swap_retval and swap_data
165 * - un-pend and ready the thread, but do not invoke the scheduler.
166 *
167 * Repeated calls to this function until it returns false is a suitable
168 * way to wake all threads on the queue.
169 *
170 * It is up to the caller to implement locking such that the return value of
171 * this function (whether a thread was woken up or not) does not immediately
172 * become stale. Calls to wait and wake on the same wait_q object must have
173 * synchronization. Calling this without holding any spinlock is a sign that
174 * this API is not being used properly.
175 *
176 * @param wait_q Wait queue to wake up the highest prio thread
177 * @param swap_retval Swap return value for woken thread
178 * @param swap_data Data return value to supplement swap_retval. May be NULL.
179 * @retval true If a thread was woken up
180 * @retval false If the wait_q was empty
181 */
182 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data);
183
184 /**
185 * Wakes the specified thread.
186 *
187 * Given a specific thread, wake it up. This routine assumes that the given
188 * thread is not on the timeout queue.
189 *
190 * @param thread Given thread to wake up.
191 * @param is_timeout True if called from the timer ISR; false otherwise.
192 *
193 */
194 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout);
195
196 /**
197 * Wake up all threads pending on the provided wait queue
198 *
199 * Convenience function to invoke z_sched_wake() on all threads in the queue
200 * until there are no more to wake up.
201 *
202 * @param wait_q Wait queue to wake up the highest prio thread
203 * @param swap_retval Swap return value for woken thread
204 * @param swap_data Data return value to supplement swap_retval. May be NULL.
205 * @retval true If any threads were woken up
206 * @retval false If the wait_q was empty
207 */
z_sched_wake_all(_wait_q_t * wait_q,int swap_retval,void * swap_data)208 static inline bool z_sched_wake_all(_wait_q_t *wait_q, int swap_retval,
209 void *swap_data)
210 {
211 bool woken = false;
212
213 while (z_sched_wake(wait_q, swap_retval, swap_data)) {
214 woken = true;
215 }
216
217 /* True if we woke at least one thread up */
218 return woken;
219 }
220
221 /**
222 * Atomically put the current thread to sleep on a wait queue, with timeout
223 *
224 * The thread will be added to the provided waitqueue. The lock, which should
225 * be held by the caller with the provided key, will be released once this is
226 * completely done and we have swapped out.
227 *
228 * The return value and data pointer is set by whoever woke us up via
229 * z_sched_wake.
230 *
231 * @param lock Address of spinlock to release when we swap out
232 * @param key Key to the provided spinlock when it was locked
233 * @param wait_q Wait queue to go to sleep on
234 * @param timeout Waiting period to be woken up, or K_FOREVER to wait
235 * indefinitely.
236 * @param data Storage location for data pointer set when thread was woken up.
237 * May be NULL if not used.
238 * @retval Return value set by whatever woke us up, or -EAGAIN if the timeout
239 * expired without being woken up.
240 */
241 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
242 _wait_q_t *wait_q, k_timeout_t timeout, void **data);
243
244 /**
245 * @brief Walks the wait queue invoking the callback on each waiting thread
246 *
247 * This function walks the wait queue invoking the callback function on each
248 * waiting thread while holding _sched_spinlock. This can be useful for routines
249 * that need to operate on multiple waiting threads.
250 *
251 * CAUTION! As a wait queue is of indeterminate length, the scheduler will be
252 * locked for an indeterminate amount of time. This may impact system
253 * performance. As such, care must be taken when using both this function and
254 * the specified callback.
255 *
256 * @param wait_q Identifies the wait queue to walk
257 * @param func Callback to invoke on each waiting thread
258 * @param data Custom data passed to the callback
259 *
260 * @retval non-zero if walk is terminated by the callback; otherwise 0
261 */
262 int z_sched_waitq_walk(_wait_q_t *wait_q,
263 int (*func)(struct k_thread *, void *), void *data);
264
265 /** @brief Halt thread cycle usage accounting.
266 *
267 * Halts the accumulation of thread cycle usage and adds the current
268 * total to the thread's counter. Called on context switch.
269 *
270 * Note that this function is idempotent. The core kernel code calls
271 * it at the end of interrupt handlers (because that is where we have
272 * a portable hook) where we are context switching, which will include
273 * any cycles spent in the ISR in the per-thread accounting. But
274 * architecture code can also call it earlier out of interrupt entry
275 * to improve measurement fidelity.
276 *
277 * This function assumes local interrupts are masked (so that the
278 * current CPU pointer and current thread are safe to modify), but
279 * requires no other synchronization. Architecture layers don't need
280 * to do anything more.
281 */
282 void z_sched_usage_stop(void);
283
284 void z_sched_usage_start(struct k_thread *thread);
285
286 /**
287 * @brief Retrieves CPU cycle usage data for specified core
288 */
289 void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats);
290
291 /**
292 * @brief Retrieves thread cycle usage data for specified thread
293 */
294 void z_sched_thread_usage(struct k_thread *thread,
295 struct k_thread_runtime_stats *stats);
296
z_sched_usage_switch(struct k_thread * thread)297 static inline void z_sched_usage_switch(struct k_thread *thread)
298 {
299 ARG_UNUSED(thread);
300 #ifdef CONFIG_SCHED_THREAD_USAGE
301 z_sched_usage_stop();
302 z_sched_usage_start(thread);
303 #endif /* CONFIG_SCHED_THREAD_USAGE */
304 }
305
306 #endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
307