1 /*
2  * Copyright (c) 2016-2017 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
8 #define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
9 
10 #include <zephyr/kernel_structs.h>
11 #include <kernel_internal.h>
12 #include <timeout_q.h>
13 #include <kthread.h>
14 #include <zephyr/tracing/tracing.h>
15 #include <stdbool.h>
16 #include <priority_q.h>
17 
18 BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
19 	     >= K_HIGHEST_APPLICATION_THREAD_PRIO);
20 
21 #ifdef CONFIG_MULTITHREADING
22 #define Z_VALID_PRIO(prio, entry_point)				     \
23 	(((prio) == K_IDLE_PRIO && z_is_idle_thread_entry(entry_point)) || \
24 	 ((K_LOWEST_APPLICATION_THREAD_PRIO			     \
25 	   >= K_HIGHEST_APPLICATION_THREAD_PRIO)		     \
26 	  && (prio) >= K_HIGHEST_APPLICATION_THREAD_PRIO	     \
27 	  && (prio) <= K_LOWEST_APPLICATION_THREAD_PRIO))
28 
29 #define Z_ASSERT_VALID_PRIO(prio, entry_point) do { \
30 	__ASSERT(Z_VALID_PRIO((prio), (entry_point)), \
31 		 "invalid priority (%d); allowed range: %d to %d", \
32 		 (prio), \
33 		 K_LOWEST_APPLICATION_THREAD_PRIO, \
34 		 K_HIGHEST_APPLICATION_THREAD_PRIO); \
35 	} while (false)
36 #else
37 #define Z_VALID_PRIO(prio, entry_point) ((prio) == -1)
38 #define Z_ASSERT_VALID_PRIO(prio, entry_point) __ASSERT((prio) == -1, "")
39 #endif /* CONFIG_MULTITHREADING */
40 
41 #if (CONFIG_MP_MAX_NUM_CPUS == 1)
42 #define LOCK_SCHED_SPINLOCK
43 #else
44 #define LOCK_SCHED_SPINLOCK   K_SPINLOCK(&_sched_spinlock)
45 #endif
46 
47 extern struct k_spinlock _sched_spinlock;
48 
49 extern struct k_thread _thread_dummy;
50 
51 void z_sched_init(void);
52 void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
53 void z_unpend_thread_no_timeout(struct k_thread *thread);
54 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q);
55 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
56 	       _wait_q_t *wait_q, k_timeout_t timeout);
57 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
58 		   k_timeout_t timeout);
59 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key);
60 void z_reschedule_irqlock(uint32_t key);
61 void z_unpend_thread(struct k_thread *thread);
62 int z_unpend_all(_wait_q_t *wait_q);
63 bool z_thread_prio_set(struct k_thread *thread, int prio);
64 void *z_get_next_switch_handle(void *interrupted);
65 
66 void z_time_slice(void);
67 void z_reset_time_slice(struct k_thread *curr);
68 void z_sched_ipi(void);
69 void z_sched_start(struct k_thread *thread);
70 void z_ready_thread(struct k_thread *thread);
71 void z_requeue_current(struct k_thread *curr);
72 struct k_thread *z_swap_next_thread(void);
73 void z_thread_abort(struct k_thread *thread);
74 void move_thread_to_end_of_prio_q(struct k_thread *thread);
75 bool thread_is_sliceable(struct k_thread *thread);
76 
z_reschedule_unlocked(void)77 static inline void z_reschedule_unlocked(void)
78 {
79 	(void) z_reschedule_irqlock(arch_irq_lock());
80 }
81 
z_is_under_prio_ceiling(int prio)82 static inline bool z_is_under_prio_ceiling(int prio)
83 {
84 	return prio >= CONFIG_PRIORITY_CEILING;
85 }
86 
z_get_new_prio_with_ceiling(int prio)87 static inline int z_get_new_prio_with_ceiling(int prio)
88 {
89 	return z_is_under_prio_ceiling(prio) ? prio : CONFIG_PRIORITY_CEILING;
90 }
91 
z_is_prio1_higher_than_or_equal_to_prio2(int prio1,int prio2)92 static inline bool z_is_prio1_higher_than_or_equal_to_prio2(int prio1, int prio2)
93 {
94 	return prio1 <= prio2;
95 }
96 
z_is_prio_higher_or_equal(int prio1,int prio2)97 static inline bool z_is_prio_higher_or_equal(int prio1, int prio2)
98 {
99 	return z_is_prio1_higher_than_or_equal_to_prio2(prio1, prio2);
100 }
101 
z_is_prio1_lower_than_or_equal_to_prio2(int prio1,int prio2)102 static inline bool z_is_prio1_lower_than_or_equal_to_prio2(int prio1, int prio2)
103 {
104 	return prio1 >= prio2;
105 }
106 
z_is_prio1_higher_than_prio2(int prio1,int prio2)107 static inline bool z_is_prio1_higher_than_prio2(int prio1, int prio2)
108 {
109 	return prio1 < prio2;
110 }
111 
z_is_prio_higher(int prio,int test_prio)112 static inline bool z_is_prio_higher(int prio, int test_prio)
113 {
114 	return z_is_prio1_higher_than_prio2(prio, test_prio);
115 }
116 
z_is_prio_lower_or_equal(int prio1,int prio2)117 static inline bool z_is_prio_lower_or_equal(int prio1, int prio2)
118 {
119 	return z_is_prio1_lower_than_or_equal_to_prio2(prio1, prio2);
120 }
121 
_is_valid_prio(int prio,k_thread_entry_t entry_point)122 static inline bool _is_valid_prio(int prio, k_thread_entry_t entry_point)
123 {
124 	if ((prio == K_IDLE_PRIO) && z_is_idle_thread_entry(entry_point)) {
125 		return true;
126 	}
127 
128 	if (!z_is_prio_higher_or_equal(prio,
129 				       K_LOWEST_APPLICATION_THREAD_PRIO)) {
130 		return false;
131 	}
132 
133 	if (!z_is_prio_lower_or_equal(prio,
134 				      K_HIGHEST_APPLICATION_THREAD_PRIO)) {
135 		return false;
136 	}
137 
138 	return true;
139 }
140 
z_sched_lock(void)141 static inline void z_sched_lock(void)
142 {
143 	__ASSERT(!arch_is_in_isr(), "");
144 	__ASSERT(_current->base.sched_locked != 1U, "");
145 
146 	--_current->base.sched_locked;
147 
148 	compiler_barrier();
149 }
150 
pended_on_thread(struct k_thread * thread)151 static ALWAYS_INLINE _wait_q_t *pended_on_thread(struct k_thread *thread)
152 {
153 	__ASSERT_NO_MSG(thread->base.pended_on);
154 
155 	return thread->base.pended_on;
156 }
157 
158 
unpend_thread_no_timeout(struct k_thread * thread)159 static inline void unpend_thread_no_timeout(struct k_thread *thread)
160 {
161 	_priq_wait_remove(&pended_on_thread(thread)->waitq, thread);
162 	z_mark_thread_as_not_pending(thread);
163 	thread->base.pended_on = NULL;
164 }
165 
166 /*
167  * In a multiprocessor system, z_unpend_first_thread() must lock the scheduler
168  * spinlock _sched_spinlock. However, in a uniprocessor system, that is not
169  * necessary as the caller has already taken precautions (in the form of
170  * locking interrupts).
171  */
z_unpend_first_thread(_wait_q_t * wait_q)172 static ALWAYS_INLINE struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q)
173 {
174 	struct k_thread *thread = NULL;
175 
176 	__ASSERT_EVAL(, int key = arch_irq_lock(); arch_irq_unlock(key),
177 		      !arch_irq_unlocked(key), "");
178 
179 	LOCK_SCHED_SPINLOCK {
180 		thread = _priq_wait_best(&wait_q->waitq);
181 		if (unlikely(thread != NULL)) {
182 			unpend_thread_no_timeout(thread);
183 			z_abort_thread_timeout(thread);
184 		}
185 	}
186 
187 	return thread;
188 }
189 
190 /*
191  * APIs for working with the Zephyr kernel scheduler. Intended for use in
192  * management of IPC objects, either in the core kernel or other IPC
193  * implemented by OS compatibility layers, providing basic wait/wake operations
194  * with spinlocks used for synchronization.
195  *
196  * These APIs are public and will be treated as contract, even if the
197  * underlying scheduler implementation changes.
198  */
199 
200 /**
201  * Wake up a thread pending on the provided wait queue
202  *
203  * Given a wait_q, wake up the highest priority thread on the queue. If the
204  * queue was empty just return false.
205  *
206  * Otherwise, do the following, in order,  holding _sched_spinlock the entire
207  * time so that the thread state is guaranteed not to change:
208  * - Set the thread's swap return values to swap_retval and swap_data
209  * - un-pend and ready the thread, but do not invoke the scheduler.
210  *
211  * Repeated calls to this function until it returns false is a suitable
212  * way to wake all threads on the queue.
213  *
214  * It is up to the caller to implement locking such that the return value of
215  * this function (whether a thread was woken up or not) does not immediately
216  * become stale. Calls to wait and wake on the same wait_q object must have
217  * synchronization. Calling this without holding any spinlock is a sign that
218  * this API is not being used properly.
219  *
220  * @param wait_q Wait queue to wake up the highest prio thread
221  * @param swap_retval Swap return value for woken thread
222  * @param swap_data Data return value to supplement swap_retval. May be NULL.
223  * @retval true If a thread was woken up
224  * @retval false If the wait_q was empty
225  */
226 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data);
227 
228 /**
229  * Wakes the specified thread.
230  *
231  * Given a specific thread, wake it up. This routine assumes that the given
232  * thread is not on the timeout queue.
233  *
234  * @param thread Given thread to wake up.
235  * @param is_timeout True if called from the timer ISR; false otherwise.
236  *
237  */
238 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout);
239 
240 /**
241  * Wake up all threads pending on the provided wait queue
242  *
243  * Convenience function to invoke z_sched_wake() on all threads in the queue
244  * until there are no more to wake up.
245  *
246  * @param wait_q Wait queue to wake up the highest prio thread
247  * @param swap_retval Swap return value for woken thread
248  * @param swap_data Data return value to supplement swap_retval. May be NULL.
249  * @retval true If any threads were woken up
250  * @retval false If the wait_q was empty
251  */
z_sched_wake_all(_wait_q_t * wait_q,int swap_retval,void * swap_data)252 static inline bool z_sched_wake_all(_wait_q_t *wait_q, int swap_retval,
253 				    void *swap_data)
254 {
255 	bool woken = false;
256 
257 	while (z_sched_wake(wait_q, swap_retval, swap_data)) {
258 		woken = true;
259 	}
260 
261 	/* True if we woke at least one thread up */
262 	return woken;
263 }
264 
265 /**
266  * Atomically put the current thread to sleep on a wait queue, with timeout
267  *
268  * The thread will be added to the provided waitqueue. The lock, which should
269  * be held by the caller with the provided key, will be released once this is
270  * completely done and we have swapped out.
271  *
272  * The return value and data pointer is set by whoever woke us up via
273  * z_sched_wake.
274  *
275  * @param lock Address of spinlock to release when we swap out
276  * @param key Key to the provided spinlock when it was locked
277  * @param wait_q Wait queue to go to sleep on
278  * @param timeout Waiting period to be woken up, or K_FOREVER to wait
279  *                indefinitely.
280  * @param data Storage location for data pointer set when thread was woken up.
281  *             May be NULL if not used.
282  * @retval Return value set by whatever woke us up, or -EAGAIN if the timeout
283  *         expired without being woken up.
284  */
285 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
286 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data);
287 
288 /**
289  * @brief Walks the wait queue invoking the callback on each waiting thread
290  *
291  * This function walks the wait queue invoking the callback function on each
292  * waiting thread while holding _sched_spinlock. This can be useful for routines
293  * that need to operate on multiple waiting threads.
294  *
295  * CAUTION! As a wait queue is of indeterminate length, the scheduler will be
296  * locked for an indeterminate amount of time. This may impact system
297  * performance. As such, care must be taken when using both this function and
298  * the specified callback.
299  *
300  * @param wait_q Identifies the wait queue to walk
301  * @param func   Callback to invoke on each waiting thread
302  * @param data   Custom data passed to the callback
303  *
304  * @retval non-zero if walk is terminated by the callback; otherwise 0
305  */
306 int z_sched_waitq_walk(_wait_q_t *wait_q,
307 		       int (*func)(struct k_thread *, void *), void *data);
308 
309 /** @brief Halt thread cycle usage accounting.
310  *
311  * Halts the accumulation of thread cycle usage and adds the current
312  * total to the thread's counter.  Called on context switch.
313  *
314  * Note that this function is idempotent.  The core kernel code calls
315  * it at the end of interrupt handlers (because that is where we have
316  * a portable hook) where we are context switching, which will include
317  * any cycles spent in the ISR in the per-thread accounting.  But
318  * architecture code can also call it earlier out of interrupt entry
319  * to improve measurement fidelity.
320  *
321  * This function assumes local interrupts are masked (so that the
322  * current CPU pointer and current thread are safe to modify), but
323  * requires no other synchronization.  Architecture layers don't need
324  * to do anything more.
325  */
326 void z_sched_usage_stop(void);
327 
328 void z_sched_usage_start(struct k_thread *thread);
329 
330 /**
331  * @brief Retrieves CPU cycle usage data for specified core
332  */
333 void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats);
334 
335 /**
336  * @brief Retrieves thread cycle usage data for specified thread
337  */
338 void z_sched_thread_usage(struct k_thread *thread,
339 			  struct k_thread_runtime_stats *stats);
340 
z_sched_usage_switch(struct k_thread * thread)341 static inline void z_sched_usage_switch(struct k_thread *thread)
342 {
343 	ARG_UNUSED(thread);
344 #ifdef CONFIG_SCHED_THREAD_USAGE
345 	z_sched_usage_stop();
346 	z_sched_usage_start(thread);
347 #endif /* CONFIG_SCHED_THREAD_USAGE */
348 }
349 
350 #endif /* ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ */
351