1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * The purpose of this file is to provide essential/minimal kernel structure
9  * definitions, so that they can be used without including kernel.h.
10  *
11  * The following rules must be observed:
12  *  1. kernel_structs.h shall not depend on kernel.h both directly and
13  *    indirectly (i.e. it shall not include any header files that include
14  *    kernel.h in their dependency chain).
15  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
16  *    necessary to include kernel_structs.h explicitly when kernel.h is
17  *    included.
18  */
19 
20 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
21 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
22 
23 #if !defined(_ASMLANGUAGE)
24 #include <zephyr/sys/atomic.h>
25 #include <zephyr/types.h>
26 #include <zephyr/sys/dlist.h>
27 #include <zephyr/sys/util.h>
28 #include <zephyr/sys/sys_heap.h>
29 #include <zephyr/arch/structs.h>
30 #include <zephyr/kernel/stats.h>
31 #include <zephyr/kernel/obj_core.h>
32 #include <zephyr/sys/rb.h>
33 #endif
34 
35 #define K_NUM_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + CONFIG_NUM_COOP_PRIORITIES + 1)
36 #define PRIQ_BITMAP_SIZE  (DIV_ROUND_UP(K_NUM_THREAD_PRIO, BITS_PER_LONG))
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41 
42 /*
43  * Bitmask definitions for the struct k_thread.thread_state field.
44  *
45  * Must be before kernel_arch_data.h because it might need them to be already
46  * defined.
47  */
48 
49 /* states: common uses low bits, arch-specific use high bits */
50 
51 /* Not a real thread */
52 #define _THREAD_DUMMY (BIT(0))
53 
54 /* Thread is waiting on an object */
55 #define _THREAD_PENDING (BIT(1))
56 
57 /* Thread is sleeping */
58 #define _THREAD_SLEEPING (BIT(2))
59 
60 /* Thread has terminated */
61 #define _THREAD_DEAD (BIT(3))
62 
63 /* Thread is suspended */
64 #define _THREAD_SUSPENDED (BIT(4))
65 
66 /* Thread is in the process of aborting */
67 #define _THREAD_ABORTING (BIT(5))
68 
69 /* Thread is in the process of suspending */
70 #define _THREAD_SUSPENDING (BIT(6))
71 
72 /* Thread is present in the ready queue */
73 #define _THREAD_QUEUED (BIT(7))
74 
75 /* end - states */
76 
77 #ifdef CONFIG_STACK_SENTINEL
78 /* Magic value in lowest bytes of the stack */
79 #define STACK_SENTINEL 0xF0F0F0F0
80 #endif
81 
82 /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
83 #define _NON_PREEMPT_THRESHOLD 0x0080U
84 
85 /* highest value of _thread_base.preempt at which a thread is preemptible */
86 #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
87 
88 #if !defined(_ASMLANGUAGE)
89 
90 /* Two abstractions are defined here for "thread priority queues".
91  *
92  * One is a "dumb" list implementation appropriate for systems with
93  * small numbers of threads and sensitive to code size.  It is stored
94  * in sorted order, taking an O(N) cost every time a thread is added
95  * to the list.  This corresponds to the way the original _wait_q_t
96  * abstraction worked and is very fast as long as the number of
97  * threads is small.
98  *
99  * The other is a balanced tree "fast" implementation with rather
100  * larger code size (due to the data structure itself, the code here
101  * is just stubs) and higher constant-factor performance overhead, but
102  * much better O(logN) scaling in the presence of large number of
103  * threads.
104  *
105  * Each can be used for either the wait_q or system ready queue,
106  * configurable at build time.
107  */
108 
109 struct _priq_rb {
110 	struct rbtree tree;
111 	int next_order_key;
112 };
113 
114 
115 /* Traditional/textbook "multi-queue" structure.  Separate lists for a
116  * small number (max 32 here) of fixed priorities.  This corresponds
117  * to the original Zephyr scheduler.  RAM requirements are
118  * comparatively high, but performance is very fast.  Won't work with
119  * features like deadline scheduling which need large priority spaces
120  * to represent their requirements.
121  */
122 struct _priq_mq {
123 	sys_dlist_t queues[K_NUM_THREAD_PRIO];
124 	unsigned long bitmask[PRIQ_BITMAP_SIZE];
125 #ifndef CONFIG_SMP
126 	unsigned int cached_queue_index;
127 #endif
128 };
129 
130 struct _ready_q {
131 #ifndef CONFIG_SMP
132 	/* always contains next thread to run: cannot be NULL */
133 	struct k_thread *cache;
134 #endif
135 
136 #if defined(CONFIG_SCHED_SIMPLE)
137 	sys_dlist_t runq;
138 #elif defined(CONFIG_SCHED_SCALABLE)
139 	struct _priq_rb runq;
140 #elif defined(CONFIG_SCHED_MULTIQ)
141 	struct _priq_mq runq;
142 #endif
143 };
144 
145 typedef struct _ready_q _ready_q_t;
146 
147 struct _cpu {
148 	/* nested interrupt count */
149 	uint32_t nested;
150 
151 	/* interrupt stack pointer base */
152 	char *irq_stack;
153 
154 	/* currently scheduled thread */
155 	struct k_thread *current;
156 
157 	/* one assigned idle thread per CPU */
158 	struct k_thread *idle_thread;
159 
160 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
161 	struct _ready_q ready_q;
162 #endif
163 
164 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0)
165 	/* Coop thread preempted by current metairq, or NULL */
166 	struct k_thread *metairq_preempted;
167 #endif
168 
169 	uint8_t id;
170 
171 #if defined(CONFIG_FPU_SHARING)
172 	void *fp_ctx;
173 #endif
174 
175 #ifdef CONFIG_SMP
176 	/* True when _current is allowed to context switch */
177 	uint8_t swap_ok;
178 #endif
179 
180 #ifdef CONFIG_SCHED_THREAD_USAGE
181 	/*
182 	 * [usage0] is used as a timestamp to mark the beginning of an
183 	 * execution window. [0] is a special value indicating that it
184 	 * has been stopped (but not disabled).
185 	 */
186 
187 	uint32_t usage0;
188 
189 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
190 	struct k_cycle_stats *usage;
191 #endif
192 #endif
193 
194 #ifdef CONFIG_OBJ_CORE_SYSTEM
195 	struct k_obj_core  obj_core;
196 #endif
197 
198 #ifdef CONFIG_SCHED_IPI_SUPPORTED
199 	sys_dlist_t ipi_workq;
200 #endif
201 
202 	/* Per CPU architecture specifics */
203 	struct _cpu_arch arch;
204 };
205 
206 typedef struct _cpu _cpu_t;
207 
208 struct z_kernel {
209 	struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
210 
211 #ifdef CONFIG_PM
212 	int32_t idle; /* Number of ticks for kernel idling */
213 #endif
214 
215 	/*
216 	 * ready queue: can be big, keep after small fields, since some
217 	 * assembly (e.g. ARC) are limited in the encoding of the offset
218 	 */
219 #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
220 	struct _ready_q ready_q;
221 #endif
222 
223 #if defined(CONFIG_THREAD_MONITOR)
224 	struct k_thread *threads; /* singly linked list of ALL threads */
225 #endif
226 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
227 	struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
228 #endif
229 
230 #ifdef CONFIG_OBJ_CORE_SYSTEM
231 	struct k_obj_core  obj_core;
232 #endif
233 
234 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
235 	/* Identify CPUs to send IPIs to at the next scheduling point */
236 	atomic_t pending_ipi;
237 #endif
238 };
239 
240 typedef struct z_kernel _kernel_t;
241 
242 extern struct z_kernel _kernel;
243 
244 extern atomic_t _cpus_active;
245 
246 #ifdef CONFIG_SMP
247 
248 /* True if the current context can be preempted and migrated to
249  * another SMP CPU.
250  */
251 bool z_smp_cpu_mobile(void);
252 #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
253 			arch_curr_cpu(); })
254 
255 __attribute_const__ struct k_thread *z_smp_current_get(void);
256 #define _current z_smp_current_get()
257 
258 #else
259 #define _current_cpu (&_kernel.cpus[0])
260 #define _current _kernel.cpus[0].current
261 #endif
262 
263 #define CPU_ID ((CONFIG_MP_MAX_NUM_CPUS == 1) ? 0 : _current_cpu->id)
264 
265 /* This is always invoked from a context where preemption is disabled */
266 #define z_current_thread_set(thread) ({ _current_cpu->current = (thread); })
267 
268 #ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
269 #undef _current
270 #define _current arch_current_thread()
271 #undef z_current_thread_set
272 #define z_current_thread_set(thread) \
273 	arch_current_thread_set(({ _current_cpu->current = (thread); }))
274 #endif
275 
276 /* kernel wait queue record */
277 #ifdef CONFIG_WAITQ_SCALABLE
278 
279 typedef struct {
280 	struct _priq_rb waitq;
281 } _wait_q_t;
282 
283 /* defined in kernel/priority_queues.c */
284 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
285 
286 #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
287 
288 #else
289 
290 typedef struct {
291 	sys_dlist_t waitq;
292 } _wait_q_t;
293 
294 #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
295 
296 #endif /* CONFIG_WAITQ_SCALABLE */
297 
298 /* kernel timeout record */
299 struct _timeout;
300 typedef void (*_timeout_func_t)(struct _timeout *t);
301 
302 struct _timeout {
303 	sys_dnode_t node;
304 	_timeout_func_t fn;
305 #ifdef CONFIG_TIMEOUT_64BIT
306 	/* Can't use k_ticks_t for header dependency reasons */
307 	int64_t dticks;
308 #else
309 	int32_t dticks;
310 #endif
311 };
312 
313 typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
314 
315 #ifdef __cplusplus
316 }
317 #endif
318 
319 #endif /* _ASMLANGUAGE */
320 
321 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
322