1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * The purpose of this file is to provide essential/minimal kernel structure
9  * definitions, so that they can be used without including kernel.h.
10  *
11  * The following rules must be observed:
12  *  1. kernel_structs.h shall not depend on kernel.h both directly and
13  *    indirectly (i.e. it shall not include any header files that include
14  *    kernel.h in their dependency chain).
15  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
16  *    necessary to include kernel_structs.h explicitly when kernel.h is
17  *    included.
18  */
19 
20 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
21 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
22 
23 #if !defined(_ASMLANGUAGE)
24 #include <zephyr/sys/atomic.h>
25 #include <zephyr/types.h>
26 #include <zephyr/sys/dlist.h>
27 #include <zephyr/sys/util.h>
28 #include <zephyr/sys/sys_heap.h>
29 #include <zephyr/arch/structs.h>
30 #include <zephyr/kernel/stats.h>
31 #include <zephyr/kernel/obj_core.h>
32 #include <zephyr/sys/rb.h>
33 #endif
34 
35 #define K_NUM_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + CONFIG_NUM_COOP_PRIORITIES + 1)
36 #define PRIQ_BITMAP_SIZE  (DIV_ROUND_UP(K_NUM_THREAD_PRIO, BITS_PER_LONG))
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41 
42 /*
43  * Bitmask definitions for the struct k_thread.thread_state field.
44  *
45  * Must be before kernel_arch_data.h because it might need them to be already
46  * defined.
47  */
48 
49 /* states: common uses low bits, arch-specific use high bits */
50 
51 /* Not a real thread */
52 #define _THREAD_DUMMY (BIT(0))
53 
54 /* Thread is waiting on an object */
55 #define _THREAD_PENDING (BIT(1))
56 
57 /* Thread is sleeping */
58 #define _THREAD_SLEEPING (BIT(2))
59 
60 /* Thread has terminated */
61 #define _THREAD_DEAD (BIT(3))
62 
63 /* Thread is suspended */
64 #define _THREAD_SUSPENDED (BIT(4))
65 
66 /* Thread is in the process of aborting */
67 #define _THREAD_ABORTING (BIT(5))
68 
69 /* Thread is in the process of suspending */
70 #define _THREAD_SUSPENDING (BIT(6))
71 
72 /* Thread is present in the ready queue */
73 #define _THREAD_QUEUED (BIT(7))
74 
75 /* end - states */
76 
77 #ifdef CONFIG_STACK_SENTINEL
78 /* Magic value in lowest bytes of the stack */
79 #define STACK_SENTINEL 0xF0F0F0F0
80 #endif
81 
82 /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
83 #define _NON_PREEMPT_THRESHOLD 0x0080U
84 
85 /* highest value of _thread_base.preempt at which a thread is preemptible */
86 #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
87 
88 #if !defined(_ASMLANGUAGE)
89 
90 /* Two abstractions are defined here for "thread priority queues".
91  *
92  * One is a "dumb" list implementation appropriate for systems with
93  * small numbers of threads and sensitive to code size.  It is stored
94  * in sorted order, taking an O(N) cost every time a thread is added
95  * to the list.  This corresponds to the way the original _wait_q_t
96  * abstraction worked and is very fast as long as the number of
97  * threads is small.
98  *
99  * The other is a balanced tree "fast" implementation with rather
100  * larger code size (due to the data structure itself, the code here
101  * is just stubs) and higher constant-factor performance overhead, but
102  * much better O(logN) scaling in the presence of large number of
103  * threads.
104  *
105  * Each can be used for either the wait_q or system ready queue,
106  * configurable at build time.
107  */
108 
109 struct _priq_rb {
110 	struct rbtree tree;
111 	int next_order_key;
112 };
113 
114 
115 /* Traditional/textbook "multi-queue" structure.  Separate lists for a
116  * small number (max 32 here) of fixed priorities.  This corresponds
117  * to the original Zephyr scheduler.  RAM requirements are
118  * comparatively high, but performance is very fast.  Won't work with
119  * features like deadline scheduling which need large priority spaces
120  * to represent their requirements.
121  */
122 struct _priq_mq {
123 	sys_dlist_t queues[K_NUM_THREAD_PRIO];
124 	unsigned long bitmask[PRIQ_BITMAP_SIZE];
125 #ifndef CONFIG_SMP
126 	unsigned int cached_queue_index;
127 #endif
128 };
129 
130 struct _ready_q {
131 #ifndef CONFIG_SMP
132 	/* always contains next thread to run: cannot be NULL */
133 	struct k_thread *cache;
134 #endif
135 
136 #if defined(CONFIG_SCHED_SIMPLE)
137 	sys_dlist_t runq;
138 #elif defined(CONFIG_SCHED_SCALABLE)
139 	struct _priq_rb runq;
140 #elif defined(CONFIG_SCHED_MULTIQ)
141 	struct _priq_mq runq;
142 #endif
143 };
144 
145 typedef struct _ready_q _ready_q_t;
146 
147 struct _cpu {
148 	/* nested interrupt count */
149 	uint32_t nested;
150 
151 	/* interrupt stack pointer base */
152 	char *irq_stack;
153 
154 	/* currently scheduled thread */
155 	struct k_thread *current;
156 
157 	/* one assigned idle thread per CPU */
158 	struct k_thread *idle_thread;
159 
160 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
161 	struct _ready_q ready_q;
162 #endif
163 
164 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
165 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
166 	/* Coop thread preempted by current metairq, or NULL */
167 	struct k_thread *metairq_preempted;
168 #endif
169 
170 	uint8_t id;
171 
172 #if defined(CONFIG_FPU_SHARING)
173 	void *fp_ctx;
174 #endif
175 
176 #ifdef CONFIG_SMP
177 	/* True when _current is allowed to context switch */
178 	uint8_t swap_ok;
179 #endif
180 
181 #ifdef CONFIG_SCHED_THREAD_USAGE
182 	/*
183 	 * [usage0] is used as a timestamp to mark the beginning of an
184 	 * execution window. [0] is a special value indicating that it
185 	 * has been stopped (but not disabled).
186 	 */
187 
188 	uint32_t usage0;
189 
190 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
191 	struct k_cycle_stats *usage;
192 #endif
193 #endif
194 
195 #ifdef CONFIG_OBJ_CORE_SYSTEM
196 	struct k_obj_core  obj_core;
197 #endif
198 
199 	/* Per CPU architecture specifics */
200 	struct _cpu_arch arch;
201 };
202 
203 typedef struct _cpu _cpu_t;
204 
205 struct z_kernel {
206 	struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
207 
208 #ifdef CONFIG_PM
209 	int32_t idle; /* Number of ticks for kernel idling */
210 #endif
211 
212 	/*
213 	 * ready queue: can be big, keep after small fields, since some
214 	 * assembly (e.g. ARC) are limited in the encoding of the offset
215 	 */
216 #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
217 	struct _ready_q ready_q;
218 #endif
219 
220 #if defined(CONFIG_THREAD_MONITOR)
221 	struct k_thread *threads; /* singly linked list of ALL threads */
222 #endif
223 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
224 	struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
225 #endif
226 
227 #ifdef CONFIG_OBJ_CORE_SYSTEM
228 	struct k_obj_core  obj_core;
229 #endif
230 
231 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
232 	/* Identify CPUs to send IPIs to at the next scheduling point */
233 	atomic_t pending_ipi;
234 #endif
235 };
236 
237 typedef struct z_kernel _kernel_t;
238 
239 extern struct z_kernel _kernel;
240 
241 extern atomic_t _cpus_active;
242 
243 #ifdef CONFIG_SMP
244 
245 /* True if the current context can be preempted and migrated to
246  * another SMP CPU.
247  */
248 bool z_smp_cpu_mobile(void);
249 #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
250 			arch_curr_cpu(); })
251 
252 __attribute_const__ struct k_thread *z_smp_current_get(void);
253 #define _current z_smp_current_get()
254 
255 #else
256 #define _current_cpu (&_kernel.cpus[0])
257 #define _current _kernel.cpus[0].current
258 #endif
259 
260 /* This is always invoked from a context where preemption is disabled */
261 #define z_current_thread_set(thread) ({ _current_cpu->current = (thread); })
262 
263 #ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
264 #undef _current
265 #define _current arch_current_thread()
266 #undef z_current_thread_set
267 #define z_current_thread_set(thread) \
268 	arch_current_thread_set(({ _current_cpu->current = (thread); }))
269 #endif
270 
271 /* kernel wait queue record */
272 #ifdef CONFIG_WAITQ_SCALABLE
273 
274 typedef struct {
275 	struct _priq_rb waitq;
276 } _wait_q_t;
277 
278 /* defined in kernel/priority_queues.c */
279 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
280 
281 #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
282 
283 #else
284 
285 typedef struct {
286 	sys_dlist_t waitq;
287 } _wait_q_t;
288 
289 #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
290 
291 #endif /* CONFIG_WAITQ_SCALABLE */
292 
293 /* kernel timeout record */
294 struct _timeout;
295 typedef void (*_timeout_func_t)(struct _timeout *t);
296 
297 struct _timeout {
298 	sys_dnode_t node;
299 	_timeout_func_t fn;
300 #ifdef CONFIG_TIMEOUT_64BIT
301 	/* Can't use k_ticks_t for header dependency reasons */
302 	int64_t dticks;
303 #else
304 	int32_t dticks;
305 #endif
306 };
307 
308 typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
309 
310 #ifdef __cplusplus
311 }
312 #endif
313 
314 #endif /* _ASMLANGUAGE */
315 
316 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
317