1 /*
2  * Copyright (c) 2016-2017 Wind River Systems, Inc.
3  * Copyright (c) 2024 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #ifndef ZEPHYR_KERNEL_INCLUDE_THREAD_H_
9 #define ZEPHYR_KERNEL_INCLUDE_THREAD_H_
10 
11 #include <zephyr/kernel.h>
12 #include <kernel_internal.h>
13 #include <timeout_q.h>
14 
15 
16 #define Z_STATE_STR_DUMMY       "dummy"
17 #define Z_STATE_STR_PENDING     "pending"
18 #define Z_STATE_STR_SLEEPING    "sleeping"
19 #define Z_STATE_STR_DEAD        "dead"
20 #define Z_STATE_STR_SUSPENDED   "suspended"
21 #define Z_STATE_STR_ABORTING    "aborting"
22 #define Z_STATE_STR_SUSPENDING  "suspending"
23 #define Z_STATE_STR_QUEUED      "queued"
24 
25 #ifdef CONFIG_THREAD_MONITOR
26 /* This lock protects the linked list of active threads; i.e. the
27  * initial _kernel.threads pointer and the linked list made up of
28  * thread->next_thread (until NULL)
29  */
30 extern struct k_spinlock z_thread_monitor_lock;
31 #endif /* CONFIG_THREAD_MONITOR */
32 
33 void idle(void *unused1, void *unused2, void *unused3);
34 
35 /* clean up when a thread is aborted */
36 
37 #if defined(CONFIG_THREAD_MONITOR)
38 void z_thread_monitor_exit(struct k_thread *thread);
39 #else
40 #define z_thread_monitor_exit(thread) \
41 	do {/* nothing */    \
42 	} while (false)
43 #endif /* CONFIG_THREAD_MONITOR */
44 
45 
thread_schedule_new(struct k_thread * thread,k_timeout_t delay)46 static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay)
47 {
48 #ifdef CONFIG_SYS_CLOCK_EXISTS
49 	if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
50 		k_thread_start(thread);
51 	} else {
52 		z_add_thread_timeout(thread, delay);
53 	}
54 #else
55 	ARG_UNUSED(delay);
56 	k_thread_start(thread);
57 #endif /* CONFIG_SYS_CLOCK_EXISTS */
58 }
59 
thread_is_preemptible(struct k_thread * thread)60 static inline int thread_is_preemptible(struct k_thread *thread)
61 {
62 	/* explanation in kernel_struct.h */
63 	return thread->base.preempt <= _PREEMPT_THRESHOLD;
64 }
65 
66 
thread_is_metairq(struct k_thread * thread)67 static inline int thread_is_metairq(struct k_thread *thread)
68 {
69 #if CONFIG_NUM_METAIRQ_PRIORITIES > 0
70 	return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
71 		< CONFIG_NUM_METAIRQ_PRIORITIES;
72 #else
73 	ARG_UNUSED(thread);
74 	return 0;
75 #endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
76 }
77 
78 #ifdef CONFIG_ASSERT
is_thread_dummy(struct k_thread * thread)79 static inline bool is_thread_dummy(struct k_thread *thread)
80 {
81 	return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
82 }
83 #endif /* CONFIG_ASSERT */
84 
85 
z_is_thread_suspended(struct k_thread * thread)86 static inline bool z_is_thread_suspended(struct k_thread *thread)
87 {
88 	return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
89 }
90 
z_is_thread_pending(struct k_thread * thread)91 static inline bool z_is_thread_pending(struct k_thread *thread)
92 {
93 	return (thread->base.thread_state & _THREAD_PENDING) != 0U;
94 }
95 
z_is_thread_prevented_from_running(struct k_thread * thread)96 static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
97 {
98 	uint8_t state = thread->base.thread_state;
99 
100 	return (state & (_THREAD_PENDING | _THREAD_SLEEPING | _THREAD_DEAD |
101 			 _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
102 }
103 
z_is_thread_timeout_active(struct k_thread * thread)104 static inline bool z_is_thread_timeout_active(struct k_thread *thread)
105 {
106 	return !z_is_inactive_timeout(&thread->base.timeout);
107 }
108 
z_is_thread_ready(struct k_thread * thread)109 static inline bool z_is_thread_ready(struct k_thread *thread)
110 {
111 	return !z_is_thread_prevented_from_running(thread);
112 }
113 
z_is_thread_state_set(struct k_thread * thread,uint32_t state)114 static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
115 {
116 	return (thread->base.thread_state & state) != 0U;
117 }
118 
z_is_thread_queued(struct k_thread * thread)119 static inline bool z_is_thread_queued(struct k_thread *thread)
120 {
121 	return z_is_thread_state_set(thread, _THREAD_QUEUED);
122 }
123 
z_mark_thread_as_queued(struct k_thread * thread)124 static inline void z_mark_thread_as_queued(struct k_thread *thread)
125 {
126 	thread->base.thread_state |= _THREAD_QUEUED;
127 }
128 
z_mark_thread_as_not_queued(struct k_thread * thread)129 static inline void z_mark_thread_as_not_queued(struct k_thread *thread)
130 {
131 	thread->base.thread_state &= ~_THREAD_QUEUED;
132 }
133 
z_mark_thread_as_suspended(struct k_thread * thread)134 static inline void z_mark_thread_as_suspended(struct k_thread *thread)
135 {
136 	thread->base.thread_state |= _THREAD_SUSPENDED;
137 
138 	SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
139 }
140 
z_mark_thread_as_not_suspended(struct k_thread * thread)141 static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
142 {
143 	thread->base.thread_state &= ~_THREAD_SUSPENDED;
144 
145 	SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
146 }
147 
z_mark_thread_as_pending(struct k_thread * thread)148 static inline void z_mark_thread_as_pending(struct k_thread *thread)
149 {
150 	thread->base.thread_state |= _THREAD_PENDING;
151 }
152 
z_mark_thread_as_not_pending(struct k_thread * thread)153 static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
154 {
155 	thread->base.thread_state &= ~_THREAD_PENDING;
156 }
157 
z_is_thread_sleeping(struct k_thread * thread)158 static inline bool z_is_thread_sleeping(struct k_thread *thread)
159 {
160 	return (thread->base.thread_state & _THREAD_SLEEPING) != 0U;
161 }
162 
z_mark_thread_as_sleeping(struct k_thread * thread)163 static inline void z_mark_thread_as_sleeping(struct k_thread *thread)
164 {
165 	thread->base.thread_state |= _THREAD_SLEEPING;
166 }
167 
z_mark_thread_as_not_sleeping(struct k_thread * thread)168 static inline void z_mark_thread_as_not_sleeping(struct k_thread *thread)
169 {
170 	thread->base.thread_state &= ~_THREAD_SLEEPING;
171 }
172 
173 /*
174  * This function tags the current thread as essential to system operation.
175  * Exceptions raised by this thread will be treated as a fatal system error.
176  */
z_thread_essential_set(struct k_thread * thread)177 static inline void z_thread_essential_set(struct k_thread *thread)
178 {
179 	thread->base.user_options |= K_ESSENTIAL;
180 }
181 
182 /*
183  * This function tags the current thread as not essential to system operation.
184  * Exceptions raised by this thread may be recoverable.
185  * (This is the default tag for a thread.)
186  */
z_thread_essential_clear(struct k_thread * thread)187 static inline void z_thread_essential_clear(struct k_thread *thread)
188 {
189 	thread->base.user_options &= ~K_ESSENTIAL;
190 }
191 
192 /*
193  * This routine indicates if the current thread is an essential system thread.
194  *
195  * Returns true if current thread is essential, false if it is not.
196  */
z_is_thread_essential(struct k_thread * thread)197 static inline bool z_is_thread_essential(struct k_thread *thread)
198 {
199 	return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
200 }
201 
202 
should_preempt(struct k_thread * thread,int preempt_ok)203 static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
204 					 int preempt_ok)
205 {
206 	/* Preemption is OK if it's being explicitly allowed by
207 	 * software state (e.g. the thread called k_yield())
208 	 */
209 	if (preempt_ok != 0) {
210 		return true;
211 	}
212 
213 	__ASSERT(_current != NULL, "");
214 
215 	/* Or if we're pended/suspended/dummy (duh) */
216 	if (z_is_thread_prevented_from_running(_current)) {
217 		return true;
218 	}
219 
220 	/* Otherwise we have to be running a preemptible thread or
221 	 * switching to a metairq
222 	 */
223 	if (thread_is_preemptible(_current) || thread_is_metairq(thread)) {
224 		return true;
225 	}
226 
227 	/* Edge case on ARM where a thread can be pended out of an
228 	 * interrupt handler before the "synchronous" swap starts
229 	 * context switching.  Platforms with atomic swap can never
230 	 * hit this.
231 	 */
232 	if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
233 	    && z_is_thread_timeout_active(thread)) {
234 		return true;
235 	}
236 
237 	return false;
238 }
239 
240 
z_is_idle_thread_entry(k_thread_entry_t entry_point)241 static inline bool z_is_idle_thread_entry(k_thread_entry_t entry_point)
242 {
243 	return entry_point == idle;
244 }
245 
z_is_idle_thread_object(struct k_thread * thread)246 static inline bool z_is_idle_thread_object(struct k_thread *thread)
247 {
248 #ifdef CONFIG_MULTITHREADING
249 #ifdef CONFIG_SMP
250 	return thread->base.is_idle;
251 #else
252 	return thread == &z_idle_threads[0];
253 #endif /* CONFIG_SMP */
254 #else
255 	return false;
256 #endif /* CONFIG_MULTITHREADING */
257 }
258 
259 
260 #endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */
261