1 /*
2 * Copyright (c) 2016-2017 Wind River Systems, Inc.
3 * Copyright (c) 2024 Intel Corporation
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #ifndef ZEPHYR_KERNEL_INCLUDE_THREAD_H_
9 #define ZEPHYR_KERNEL_INCLUDE_THREAD_H_
10
11 #include <zephyr/kernel.h>
12 #include <kernel_internal.h>
13 #include <timeout_q.h>
14
15
16 #define Z_STATE_STR_DUMMY "dummy"
17 #define Z_STATE_STR_PENDING "pending"
18 #define Z_STATE_STR_SLEEPING "sleeping"
19 #define Z_STATE_STR_DEAD "dead"
20 #define Z_STATE_STR_SUSPENDED "suspended"
21 #define Z_STATE_STR_ABORTING "aborting"
22 #define Z_STATE_STR_SUSPENDING "suspending"
23 #define Z_STATE_STR_QUEUED "queued"
24
25 #ifdef CONFIG_THREAD_MONITOR
26 /* This lock protects the linked list of active threads; i.e. the
27 * initial _kernel.threads pointer and the linked list made up of
28 * thread->next_thread (until NULL)
29 */
30 extern struct k_spinlock z_thread_monitor_lock;
31 #endif /* CONFIG_THREAD_MONITOR */
32
33 void idle(void *unused1, void *unused2, void *unused3);
34
35 /* clean up when a thread is aborted */
36
37 #if defined(CONFIG_THREAD_MONITOR)
38 void z_thread_monitor_exit(struct k_thread *thread);
39 #else
40 #define z_thread_monitor_exit(thread) \
41 do {/* nothing */ \
42 } while (false)
43 #endif /* CONFIG_THREAD_MONITOR */
44
45
thread_schedule_new(struct k_thread * thread,k_timeout_t delay)46 static inline void thread_schedule_new(struct k_thread *thread, k_timeout_t delay)
47 {
48 #ifdef CONFIG_SYS_CLOCK_EXISTS
49 if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
50 k_thread_start(thread);
51 } else {
52 z_add_thread_timeout(thread, delay);
53 }
54 #else
55 ARG_UNUSED(delay);
56 k_thread_start(thread);
57 #endif /* CONFIG_SYS_CLOCK_EXISTS */
58 }
59
thread_is_preemptible(struct k_thread * thread)60 static inline int thread_is_preemptible(struct k_thread *thread)
61 {
62 /* explanation in kernel_struct.h */
63 return thread->base.preempt <= _PREEMPT_THRESHOLD;
64 }
65
66
thread_is_metairq(struct k_thread * thread)67 static inline int thread_is_metairq(struct k_thread *thread)
68 {
69 #if CONFIG_NUM_METAIRQ_PRIORITIES > 0
70 return (thread->base.prio - K_HIGHEST_THREAD_PRIO)
71 < CONFIG_NUM_METAIRQ_PRIORITIES;
72 #else
73 ARG_UNUSED(thread);
74 return 0;
75 #endif /* CONFIG_NUM_METAIRQ_PRIORITIES */
76 }
77
78 #ifdef CONFIG_ASSERT
is_thread_dummy(struct k_thread * thread)79 static inline bool is_thread_dummy(struct k_thread *thread)
80 {
81 return (thread->base.thread_state & _THREAD_DUMMY) != 0U;
82 }
83 #endif /* CONFIG_ASSERT */
84
85
z_is_thread_suspended(struct k_thread * thread)86 static inline bool z_is_thread_suspended(struct k_thread *thread)
87 {
88 return (thread->base.thread_state & _THREAD_SUSPENDED) != 0U;
89 }
90
z_is_thread_pending(struct k_thread * thread)91 static inline bool z_is_thread_pending(struct k_thread *thread)
92 {
93 return (thread->base.thread_state & _THREAD_PENDING) != 0U;
94 }
95
z_is_thread_prevented_from_running(struct k_thread * thread)96 static inline bool z_is_thread_prevented_from_running(struct k_thread *thread)
97 {
98 uint8_t state = thread->base.thread_state;
99
100 return (state & (_THREAD_PENDING | _THREAD_SLEEPING | _THREAD_DEAD |
101 _THREAD_DUMMY | _THREAD_SUSPENDED)) != 0U;
102 }
103
z_is_thread_timeout_active(struct k_thread * thread)104 static inline bool z_is_thread_timeout_active(struct k_thread *thread)
105 {
106 return !z_is_inactive_timeout(&thread->base.timeout);
107 }
108
z_is_thread_ready(struct k_thread * thread)109 static inline bool z_is_thread_ready(struct k_thread *thread)
110 {
111 return !((z_is_thread_prevented_from_running(thread)) != 0U ||
112 z_is_thread_timeout_active(thread));
113 }
114
z_is_thread_state_set(struct k_thread * thread,uint32_t state)115 static inline bool z_is_thread_state_set(struct k_thread *thread, uint32_t state)
116 {
117 return (thread->base.thread_state & state) != 0U;
118 }
119
z_is_thread_queued(struct k_thread * thread)120 static inline bool z_is_thread_queued(struct k_thread *thread)
121 {
122 return z_is_thread_state_set(thread, _THREAD_QUEUED);
123 }
124
z_mark_thread_as_suspended(struct k_thread * thread)125 static inline void z_mark_thread_as_suspended(struct k_thread *thread)
126 {
127 thread->base.thread_state |= _THREAD_SUSPENDED;
128
129 SYS_PORT_TRACING_FUNC(k_thread, sched_suspend, thread);
130 }
131
z_mark_thread_as_not_suspended(struct k_thread * thread)132 static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
133 {
134 thread->base.thread_state &= ~_THREAD_SUSPENDED;
135
136 SYS_PORT_TRACING_FUNC(k_thread, sched_resume, thread);
137 }
138
z_mark_thread_as_pending(struct k_thread * thread)139 static inline void z_mark_thread_as_pending(struct k_thread *thread)
140 {
141 thread->base.thread_state |= _THREAD_PENDING;
142 }
143
z_mark_thread_as_not_pending(struct k_thread * thread)144 static inline void z_mark_thread_as_not_pending(struct k_thread *thread)
145 {
146 thread->base.thread_state &= ~_THREAD_PENDING;
147 }
148
z_is_thread_sleeping(struct k_thread * thread)149 static inline bool z_is_thread_sleeping(struct k_thread *thread)
150 {
151 return (thread->base.thread_state & _THREAD_SLEEPING) != 0U;
152 }
153
z_mark_thread_as_sleeping(struct k_thread * thread)154 static inline void z_mark_thread_as_sleeping(struct k_thread *thread)
155 {
156 thread->base.thread_state |= _THREAD_SLEEPING;
157 }
158
z_mark_thread_as_not_sleeping(struct k_thread * thread)159 static inline void z_mark_thread_as_not_sleeping(struct k_thread *thread)
160 {
161 thread->base.thread_state &= ~_THREAD_SLEEPING;
162 }
163
164 /*
165 * This function tags the current thread as essential to system operation.
166 * Exceptions raised by this thread will be treated as a fatal system error.
167 */
z_thread_essential_set(struct k_thread * thread)168 static inline void z_thread_essential_set(struct k_thread *thread)
169 {
170 thread->base.user_options |= K_ESSENTIAL;
171 }
172
173 /*
174 * This function tags the current thread as not essential to system operation.
175 * Exceptions raised by this thread may be recoverable.
176 * (This is the default tag for a thread.)
177 */
z_thread_essential_clear(struct k_thread * thread)178 static inline void z_thread_essential_clear(struct k_thread *thread)
179 {
180 thread->base.user_options &= ~K_ESSENTIAL;
181 }
182
183 /*
184 * This routine indicates if the current thread is an essential system thread.
185 *
186 * Returns true if current thread is essential, false if it is not.
187 */
z_is_thread_essential(struct k_thread * thread)188 static inline bool z_is_thread_essential(struct k_thread *thread)
189 {
190 return (thread->base.user_options & K_ESSENTIAL) == K_ESSENTIAL;
191 }
192
193
should_preempt(struct k_thread * thread,int preempt_ok)194 static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
195 int preempt_ok)
196 {
197 /* Preemption is OK if it's being explicitly allowed by
198 * software state (e.g. the thread called k_yield())
199 */
200 if (preempt_ok != 0) {
201 return true;
202 }
203
204 __ASSERT(arch_current_thread() != NULL, "");
205
206 /* Or if we're pended/suspended/dummy (duh) */
207 if (z_is_thread_prevented_from_running(arch_current_thread())) {
208 return true;
209 }
210
211 /* Otherwise we have to be running a preemptible thread or
212 * switching to a metairq
213 */
214 if (thread_is_preemptible(arch_current_thread()) || thread_is_metairq(thread)) {
215 return true;
216 }
217
218 /* Edge case on ARM where a thread can be pended out of an
219 * interrupt handler before the "synchronous" swap starts
220 * context switching. Platforms with atomic swap can never
221 * hit this.
222 */
223 if (IS_ENABLED(CONFIG_SWAP_NONATOMIC)
224 && z_is_thread_timeout_active(thread)) {
225 return true;
226 }
227
228 return false;
229 }
230
231
z_is_idle_thread_entry(k_thread_entry_t entry_point)232 static inline bool z_is_idle_thread_entry(k_thread_entry_t entry_point)
233 {
234 return entry_point == idle;
235 }
236
z_is_idle_thread_object(struct k_thread * thread)237 static inline bool z_is_idle_thread_object(struct k_thread *thread)
238 {
239 #ifdef CONFIG_MULTITHREADING
240 #ifdef CONFIG_SMP
241 return thread->base.is_idle;
242 #else
243 return thread == &z_idle_threads[0];
244 #endif /* CONFIG_SMP */
245 #else
246 return false;
247 #endif /* CONFIG_MULTITHREADING */
248 }
249
250
251 #endif /* ZEPHYR_KERNEL_INCLUDE_THREAD_H_ */
252