1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * The purpose of this file is to provide essential/minimal kernel structure
9  * definitions, so that they can be used without including kernel.h.
10  *
11  * The following rules must be observed:
12  *  1. kernel_structs.h shall not depend on kernel.h both directly and
13  *    indirectly (i.e. it shall not include any header files that include
14  *    kernel.h in their dependency chain).
15  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
16  *    necessary to include kernel_structs.h explicitly when kernel.h is
17  *    included.
18  */
19 
20 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
21 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
22 
23 #if !defined(_ASMLANGUAGE)
24 #include <zephyr/sys/atomic.h>
25 #include <zephyr/types.h>
26 #include <zephyr/kernel/internal/sched_priq.h>
27 #include <zephyr/sys/dlist.h>
28 #include <zephyr/sys/util.h>
29 #include <zephyr/sys/sys_heap.h>
30 #include <zephyr/arch/structs.h>
31 #include <zephyr/kernel/stats.h>
32 #include <zephyr/kernel/obj_core.h>
33 #endif
34 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 /*
40  * Bitmask definitions for the struct k_thread.thread_state field.
41  *
42  * Must be before kernel_arch_data.h because it might need them to be already
43  * defined.
44  */
45 
46 /* states: common uses low bits, arch-specific use high bits */
47 
48 /* Not a real thread */
49 #define _THREAD_DUMMY (BIT(0))
50 
51 /* Thread is waiting on an object */
52 #define _THREAD_PENDING (BIT(1))
53 
54 /* Thread has not yet started */
55 #define _THREAD_PRESTART (BIT(2))
56 
57 /* Thread has terminated */
58 #define _THREAD_DEAD (BIT(3))
59 
60 /* Thread is suspended */
61 #define _THREAD_SUSPENDED (BIT(4))
62 
63 /* Thread is in the process of aborting */
64 #define _THREAD_ABORTING (BIT(5))
65 
66 /* Thread is in the process of suspending */
67 #define _THREAD_SUSPENDING (BIT(6))
68 
69 /* Thread is present in the ready queue */
70 #define _THREAD_QUEUED (BIT(7))
71 
72 /* end - states */
73 
74 #ifdef CONFIG_STACK_SENTINEL
75 /* Magic value in lowest bytes of the stack */
76 #define STACK_SENTINEL 0xF0F0F0F0
77 #endif
78 
79 /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
80 #define _NON_PREEMPT_THRESHOLD 0x0080U
81 
82 /* highest value of _thread_base.preempt at which a thread is preemptible */
83 #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
84 
85 #if !defined(_ASMLANGUAGE)
86 
87 struct _ready_q {
88 #ifndef CONFIG_SMP
89 	/* always contains next thread to run: cannot be NULL */
90 	struct k_thread *cache;
91 #endif
92 
93 #if defined(CONFIG_SCHED_DUMB)
94 	sys_dlist_t runq;
95 #elif defined(CONFIG_SCHED_SCALABLE)
96 	struct _priq_rb runq;
97 #elif defined(CONFIG_SCHED_MULTIQ)
98 	struct _priq_mq runq;
99 #endif
100 };
101 
102 typedef struct _ready_q _ready_q_t;
103 
104 struct _cpu {
105 	/* nested interrupt count */
106 	uint32_t nested;
107 
108 	/* interrupt stack pointer base */
109 	char *irq_stack;
110 
111 	/* currently scheduled thread */
112 	struct k_thread *current;
113 
114 	/* one assigned idle thread per CPU */
115 	struct k_thread *idle_thread;
116 
117 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
118 	struct _ready_q ready_q;
119 #endif
120 
121 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
122 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
123 	/* Coop thread preempted by current metairq, or NULL */
124 	struct k_thread *metairq_preempted;
125 #endif
126 
127 	uint8_t id;
128 
129 #if defined(CONFIG_FPU_SHARING)
130 	void *fp_ctx;
131 #endif
132 
133 #ifdef CONFIG_SMP
134 	/* True when _current is allowed to context switch */
135 	uint8_t swap_ok;
136 #endif
137 
138 #ifdef CONFIG_SCHED_THREAD_USAGE
139 	/*
140 	 * [usage0] is used as a timestamp to mark the beginning of an
141 	 * execution window. [0] is a special value indicating that it
142 	 * has been stopped (but not disabled).
143 	 */
144 
145 	uint32_t usage0;
146 
147 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
148 	struct k_cycle_stats *usage;
149 #endif
150 #endif
151 
152 #ifdef CONFIG_OBJ_CORE_SYSTEM
153 	struct k_obj_core  obj_core;
154 #endif
155 
156 	/* Per CPU architecture specifics */
157 	struct _cpu_arch arch;
158 };
159 
160 typedef struct _cpu _cpu_t;
161 
162 struct z_kernel {
163 	struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
164 
165 #ifdef CONFIG_PM
166 	int32_t idle; /* Number of ticks for kernel idling */
167 #endif
168 
169 	/*
170 	 * ready queue: can be big, keep after small fields, since some
171 	 * assembly (e.g. ARC) are limited in the encoding of the offset
172 	 */
173 #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
174 	struct _ready_q ready_q;
175 #endif
176 
177 #ifdef CONFIG_FPU_SHARING
178 	/*
179 	 * A 'current_sse' field does not exist in addition to the 'current_fp'
180 	 * field since it's not possible to divide the IA-32 non-integer
181 	 * registers into 2 distinct blocks owned by differing threads.  In
182 	 * other words, given that the 'fxnsave/fxrstor' instructions
183 	 * save/restore both the X87 FPU and XMM registers, it's not possible
184 	 * for a thread to only "own" the XMM registers.
185 	 */
186 
187 	/* thread that owns the FP regs */
188 	struct k_thread *current_fp;
189 #endif
190 
191 #if defined(CONFIG_THREAD_MONITOR)
192 	struct k_thread *threads; /* singly linked list of ALL threads */
193 #endif
194 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
195 	struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
196 #endif
197 
198 #ifdef CONFIG_OBJ_CORE_SYSTEM
199 	struct k_obj_core  obj_core;
200 #endif
201 
202 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
203 	/* Need to signal an IPI at the next scheduling point */
204 	bool pending_ipi;
205 #endif
206 };
207 
208 typedef struct z_kernel _kernel_t;
209 
210 extern struct z_kernel _kernel;
211 
212 extern atomic_t _cpus_active;
213 
214 #ifdef CONFIG_SMP
215 
216 /* True if the current context can be preempted and migrated to
217  * another SMP CPU.
218  */
219 bool z_smp_cpu_mobile(void);
220 
221 #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
222 			arch_curr_cpu(); })
223 #define _current k_sched_current_thread_query()
224 
225 #else
226 #define _current_cpu (&_kernel.cpus[0])
227 #define _current _kernel.cpus[0].current
228 #endif
229 
230 /* kernel wait queue record */
231 
232 #ifdef CONFIG_WAITQ_SCALABLE
233 
234 typedef struct {
235 	struct _priq_rb waitq;
236 } _wait_q_t;
237 
238 bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
239 
240 #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
241 
242 #else
243 
244 typedef struct {
245 	sys_dlist_t waitq;
246 } _wait_q_t;
247 
248 #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
249 
250 #endif
251 
252 /* kernel timeout record */
253 
254 struct _timeout;
255 typedef void (*_timeout_func_t)(struct _timeout *t);
256 
257 struct _timeout {
258 	sys_dnode_t node;
259 	_timeout_func_t fn;
260 #ifdef CONFIG_TIMEOUT_64BIT
261 	/* Can't use k_ticks_t for header dependency reasons */
262 	int64_t dticks;
263 #else
264 	int32_t dticks;
265 #endif
266 };
267 
268 typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
269 
270 #ifdef __cplusplus
271 }
272 #endif
273 
274 #endif /* _ASMLANGUAGE */
275 
276 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
277