1 /*
2  * Copyright (c) 2016-2017 Wind River Systems, Inc.
3  * Copyright (c) 2024 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #ifndef ZEPHYR_KERNEL_INCLUDE_THREAD_H_
9 #define ZEPHYR_KERNEL_INCLUDE_THREAD_H_
10 
11 #include <zephyr/kernel.h>
12 #include <kernel_internal.h>
13 #include <timeout_q.h>
14 
15 
16 #define Z_STATE_STR_DUMMY       "dummy"
17 #define Z_STATE_STR_PENDING     "pending"
18 #define Z_STATE_STR_SLEEPING    "sleeping"
19 #define Z_STATE_STR_DEAD        "dead"
20 #define Z_STATE_STR_SUSPENDED   "suspended"
21 #define Z_STATE_STR_ABORTING    "aborting"
22 #define Z_STATE_STR_SUSPENDING  "suspending"
23 #define Z_STATE_STR_QUEUED      "queued"
24 
25 #ifdef CONFIG_THREAD_MONITOR
26 /* This lock protects the linked list of active threads; i.e. the
27  * initial _kernel.threads pointer and the linked list made up of
28  * thread->next_thread (until NULL)
29  */
30 extern struct k_spinlock z_thread_monitor_lock;
31 #endif /* CONFIG_THREAD_MONITOR */
32 
33 #ifdef CONFIG_MULTITHREADING
34 extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
35 #endif /* CONFIG_MULTITHREADING */
36 
37 void idle(void *unused1, void *unused2, void *unused3);
38 
39 /* clean up when a thread is aborted */
40 
41 #if defined(CONFIG_THREAD_MONITOR)
42 void z_thread_monitor_exit(struct k_thread *thread);
43 #else
44 #define z_thread_monitor_exit(thread) \
45 	do {/* nothing */    \
46 	} while (false)
47 #endif /* CONFIG_THREAD_MONITOR */
48 
49 void z_thread_abort(struct k_thread *thread);
50 
thread_schedule_new(struct k_thread * thread,k_timeout_t delay)51 static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay)
52 {
53 #ifdef CONFIG_SYS_CLOCK_EXISTS
54 	if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
55 		k_thread_start(thread);
56 	} else {
57 		z_add_thread_timeout(thread, delay);
58 	}
59 #else
60 	ARG_UNUSED(delay);
61 	k_thread_start(thread);
62 #endif /* CONFIG_SYS_CLOCK_EXISTS */
63 }
64 
thread_is_preemptible(const struct k_thread * thread)65 static inline int thread_is_preemptible(const struct k_thread *thread)
66 {
67 	/* explanation in kernel_struct.h */
68 	return thread->base.preempt <= _PREEMPT_THRESHOLD;
69 }
70 
71 
thread_is_metairq(const struct k_thread * thread)72 static inline int thread_is_metairq(const struct k_thread *thread)
73 {
74 #if CONFIG_NUM_METAIRQ_PRIORITIES > 0
75 	return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
76 		< CONFIG_NUM_METAIRQ_PRIORITIES;
77 #else
78 	ARG_UNUSED(thread);
79 	return 0;
80 #endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
81 }
82 
is_thread_dummy(const struct k_thread * thread)83 static inline bool is_thread_dummy(const struct k_thread *thread)
84 {
85 	return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
86 }
87 
z_is_thread_suspended(const struct k_thread * thread)88 static inline bool z_is_thread_suspended(const struct k_thread *thread)
89 {
90 	return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
91 }
92 
z_is_thread_pending(const struct k_thread * thread)93 static inline bool z_is_thread_pending(const struct k_thread *thread)
94 {
95 	return (thread->base.thread_state & _THREAD_PENDING) != 0U;
96 }
97 
z_is_thread_dead(const struct k_thread * thread)98 static inline bool z_is_thread_dead(const struct k_thread *thread)
99 {
100 	return (thread->base.thread_state & _THREAD_DEAD) != 0U;
101 }
102 
103 /* Return true if the thread is aborting, else false */
z_is_thread_aborting(const struct k_thread * thread)104 static inline bool z_is_thread_aborting(const struct k_thread *thread)
105 {
106 	return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
107 }
108 
109 /* Return true if the thread is aborting or suspending, else false */
z_is_thread_halting(const struct k_thread * thread)110 static inline bool z_is_thread_halting(const struct k_thread *thread)
111 {
112 	return (thread->base.thread_state &
113 		(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
114 }
115 
116 
z_is_thread_prevented_from_running(const struct k_thread * thread)117 static inline bool z_is_thread_prevented_from_running(const struct k_thread *thread)
118 {
119 	uint8_t state = thread->base.thread_state;
120 
121 	return (state & (_THREAD_PENDING | _THREAD_SLEEPING | _THREAD_DEAD |
122 			 _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
123 }
124 
z_is_thread_timeout_active(const struct k_thread * thread)125 static inline bool z_is_thread_timeout_active(const struct k_thread *thread)
126 {
127 	return !z_is_inactive_timeout(&thread->base.timeout);
128 }
129 
z_is_thread_ready(const struct k_thread * thread)130 static inline bool z_is_thread_ready(const struct k_thread *thread)
131 {
132 	return !z_is_thread_prevented_from_running(thread);
133 }
134 
z_is_thread_state_set(const struct k_thread * thread,uint32_t state)135 static inline bool z_is_thread_state_set(const struct k_thread *thread, uint32_t state)
136 {
137 	return (thread->base.thread_state & state) != 0U;
138 }
139 
z_is_thread_queued(const struct k_thread * thread)140 static inline bool z_is_thread_queued(const struct k_thread *thread)
141 {
142 	return z_is_thread_state_set(thread, _THREAD_QUEUED);
143 }
144 
z_mark_thread_as_queued(struct k_thread * thread)145 static inline void z_mark_thread_as_queued(struct k_thread *thread)
146 {
147 	thread->base.thread_state |= _THREAD_QUEUED;
148 }
149 
z_mark_thread_as_not_queued(struct k_thread * thread)150 static inline void z_mark_thread_as_not_queued(struct k_thread *thread)
151 {
152 	thread->base.thread_state &= ~_THREAD_QUEUED;
153 }
154 
z_mark_thread_as_suspended(struct k_thread * thread)155 static inline void z_mark_thread_as_suspended(struct k_thread *thread)
156 {
157 	thread->base.thread_state |= _THREAD_SUSPENDED;
158 
159 	SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
160 }
161 
z_mark_thread_as_not_suspended(struct k_thread * thread)162 static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
163 {
164 	thread->base.thread_state &= ~_THREAD_SUSPENDED;
165 
166 	SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
167 }
168 
z_mark_thread_as_pending(struct k_thread * thread)169 static inline void z_mark_thread_as_pending(struct k_thread *thread)
170 {
171 	thread->base.thread_state |= _THREAD_PENDING;
172 }
173 
z_mark_thread_as_not_pending(struct k_thread * thread)174 static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
175 {
176 	thread->base.thread_state &= ~_THREAD_PENDING;
177 }
178 
z_is_thread_sleeping(struct k_thread * thread)179 static inline bool z_is_thread_sleeping(struct k_thread *thread)
180 {
181 	return (thread->base.thread_state & _THREAD_SLEEPING) != 0U;
182 }
183 
z_mark_thread_as_sleeping(struct k_thread * thread)184 static inline void z_mark_thread_as_sleeping(struct k_thread *thread)
185 {
186 	thread->base.thread_state |= _THREAD_SLEEPING;
187 }
188 
z_mark_thread_as_not_sleeping(struct k_thread * thread)189 static inline void z_mark_thread_as_not_sleeping(struct k_thread *thread)
190 {
191 	thread->base.thread_state &= ~_THREAD_SLEEPING;
192 }
193 
194 /*
195  * This function tags the current thread as essential to system operation.
196  * Exceptions raised by this thread will be treated as a fatal system error.
197  */
z_thread_essential_set(struct k_thread * thread)198 static inline void z_thread_essential_set(struct k_thread *thread)
199 {
200 	thread->base.user_options |= K_ESSENTIAL;
201 }
202 
203 /*
204  * This function tags the current thread as not essential to system operation.
205  * Exceptions raised by this thread may be recoverable.
206  * (This is the default tag for a thread.)
207  */
z_thread_essential_clear(struct k_thread * thread)208 static inline void z_thread_essential_clear(struct k_thread *thread)
209 {
210 	thread->base.user_options &= ~K_ESSENTIAL;
211 }
212 
213 /*
214  * This routine indicates if the current thread is an essential system thread.
215  *
216  * Returns true if current thread is essential, false if it is not.
217  */
z_is_thread_essential(const struct k_thread * thread)218 static inline bool z_is_thread_essential(const struct k_thread *thread)
219 {
220 	return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
221 }
222 
223 
should_preempt(const struct k_thread * thread,int preempt_ok)224 static ALWAYS_INLINE bool should_preempt(const struct k_thread *thread,
225 					 int preempt_ok)
226 {
227 	/* Preemption is OK if it's being explicitly allowed by
228 	 * software state (e.g. the thread called k_yield())
229 	 */
230 	if (preempt_ok != 0) {
231 		return true;
232 	}
233 
234 	__ASSERT(_current != NULL, "");
235 
236 	/* Or if we're pended/suspended/dummy (duh) */
237 	if (z_is_thread_prevented_from_running(_current)) {
238 		return true;
239 	}
240 
241 	/* Otherwise we have to be running a preemptible thread or
242 	 * switching to a metairq
243 	 */
244 	if (thread_is_preemptible(_current) || thread_is_metairq(thread)) {
245 		return true;
246 	}
247 
248 	/* Edge case on ARM where a thread can be pended out of an
249 	 * interrupt handler before the "synchronous" swap starts
250 	 * context switching.  Platforms with atomic swap can never
251 	 * hit this.
252 	 */
253 	if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
254 	    && z_is_thread_timeout_active(thread)) {
255 		return true;
256 	}
257 
258 	return false;
259 }
260 
261 
z_is_idle_thread_entry(k_thread_entry_t entry_point)262 static inline bool z_is_idle_thread_entry(k_thread_entry_t entry_point)
263 {
264 	return entry_point == idle;
265 }
266 
z_is_idle_thread_object(const struct k_thread * thread)267 static inline bool z_is_idle_thread_object(const struct k_thread *thread)
268 {
269 #ifdef CONFIG_MULTITHREADING
270 #ifdef CONFIG_SMP
271 	return thread->base.is_idle;
272 #else
273 	return thread == &z_idle_threads[0];
274 #endif /* CONFIG_SMP */
275 #else
276 	return false;
277 #endif /* CONFIG_MULTITHREADING */
278 }
279 
280 
281 #endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */
282