1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * The purpose of this file is to provide essential/minimal kernel structure
9  * definitions, so that they can be used without including kernel.h.
10  *
11  * The following rules must be observed:
12  *  1. kernel_structs.h shall not depend on kernel.h both directly and
13  *    indirectly (i.e. it shall not include any header files that include
14  *    kernel.h in their dependency chain).
15  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
16  *    necessary to include kernel_structs.h explicitly when kernel.h is
17  *    included.
18  */
19 
20 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
21 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
22 
23 #if !defined(_ASMLANGUAGE)
24 #include <zephyr/sys/atomic.h>
25 #include <zephyr/types.h>
26 #include <zephyr/kernel/internal/sched_priq.h>
27 #include <zephyr/sys/dlist.h>
28 #include <zephyr/sys/util.h>
29 #include <zephyr/sys/sys_heap.h>
30 #include <zephyr/arch/structs.h>
31 #include <zephyr/kernel/stats.h>
32 #include <zephyr/kernel/obj_core.h>
33 #endif
34 
35 #ifdef __cplusplus
36 extern "C" {
37 #endif
38 
39 /*
40  * Bitmask definitions for the struct k_thread.thread_state field.
41  *
42  * Must be before kernel_arch_data.h because it might need them to be already
43  * defined.
44  */
45 
46 /* states: common uses low bits, arch-specific use high bits */
47 
48 /* Not a real thread */
49 #define _THREAD_DUMMY (BIT(0))
50 
51 /* Thread is waiting on an object */
52 #define _THREAD_PENDING (BIT(1))
53 
54 /* Thread has not yet started */
55 #define _THREAD_PRESTART (BIT(2))
56 
57 /* Thread has terminated */
58 #define _THREAD_DEAD (BIT(3))
59 
60 /* Thread is suspended */
61 #define _THREAD_SUSPENDED (BIT(4))
62 
63 /* Thread is being aborted */
64 #define _THREAD_ABORTING (BIT(5))
65 
66 /* Thread is present in the ready queue */
67 #define _THREAD_QUEUED (BIT(7))
68 
69 /* end - states */
70 
71 #ifdef CONFIG_STACK_SENTINEL
72 /* Magic value in lowest bytes of the stack */
73 #define STACK_SENTINEL 0xF0F0F0F0
74 #endif
75 
76 /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
77 #define _NON_PREEMPT_THRESHOLD 0x0080U
78 
79 /* highest value of _thread_base.preempt at which a thread is preemptible */
80 #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
81 
82 #if !defined(_ASMLANGUAGE)
83 
84 struct _ready_q {
85 #ifndef CONFIG_SMP
86 	/* always contains next thread to run: cannot be NULL */
87 	struct k_thread *cache;
88 #endif
89 
90 #if defined(CONFIG_SCHED_DUMB)
91 	sys_dlist_t runq;
92 #elif defined(CONFIG_SCHED_SCALABLE)
93 	struct _priq_rb runq;
94 #elif defined(CONFIG_SCHED_MULTIQ)
95 	struct _priq_mq runq;
96 #endif
97 };
98 
99 typedef struct _ready_q _ready_q_t;
100 
101 struct _cpu {
102 	/* nested interrupt count */
103 	uint32_t nested;
104 
105 	/* interrupt stack pointer base */
106 	char *irq_stack;
107 
108 	/* currently scheduled thread */
109 	struct k_thread *current;
110 
111 	/* one assigned idle thread per CPU */
112 	struct k_thread *idle_thread;
113 
114 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
115 	struct _ready_q ready_q;
116 #endif
117 
118 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
119 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
120 	/* Coop thread preempted by current metairq, or NULL */
121 	struct k_thread *metairq_preempted;
122 #endif
123 
124 	uint8_t id;
125 
126 #if defined(CONFIG_FPU_SHARING)
127 	void *fp_ctx;
128 #endif
129 
130 #ifdef CONFIG_SMP
131 	/* True when _current is allowed to context switch */
132 	uint8_t swap_ok;
133 #endif
134 
135 #ifdef CONFIG_SCHED_THREAD_USAGE
136 	/*
137 	 * [usage0] is used as a timestamp to mark the beginning of an
138 	 * execution window. [0] is a special value indicating that it
139 	 * has been stopped (but not disabled).
140 	 */
141 
142 	uint32_t usage0;
143 
144 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
145 	struct k_cycle_stats *usage;
146 #endif
147 #endif
148 
149 #ifdef CONFIG_OBJ_CORE_SYSTEM
150 	struct k_obj_core  obj_core;
151 #endif
152 
153 	/* Per CPU architecture specifics */
154 	struct _cpu_arch arch;
155 };
156 
157 typedef struct _cpu _cpu_t;
158 
159 struct z_kernel {
160 	struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
161 
162 #ifdef CONFIG_PM
163 	int32_t idle; /* Number of ticks for kernel idling */
164 #endif
165 
166 	/*
167 	 * ready queue: can be big, keep after small fields, since some
168 	 * assembly (e.g. ARC) are limited in the encoding of the offset
169 	 */
170 #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
171 	struct _ready_q ready_q;
172 #endif
173 
174 #ifdef CONFIG_FPU_SHARING
175 	/*
176 	 * A 'current_sse' field does not exist in addition to the 'current_fp'
177 	 * field since it's not possible to divide the IA-32 non-integer
178 	 * registers into 2 distinct blocks owned by differing threads.  In
179 	 * other words, given that the 'fxnsave/fxrstor' instructions
180 	 * save/restore both the X87 FPU and XMM registers, it's not possible
181 	 * for a thread to only "own" the XMM registers.
182 	 */
183 
184 	/* thread that owns the FP regs */
185 	struct k_thread *current_fp;
186 #endif
187 
188 #if defined(CONFIG_THREAD_MONITOR)
189 	struct k_thread *threads; /* singly linked list of ALL threads */
190 #endif
191 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
192 	struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
193 #endif
194 
195 #ifdef CONFIG_OBJ_CORE_SYSTEM
196 	struct k_obj_core  obj_core;
197 #endif
198 
199 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
200 	/* Need to signal an IPI at the next scheduling point */
201 	bool pending_ipi;
202 #endif
203 };
204 
205 typedef struct z_kernel _kernel_t;
206 
207 extern struct z_kernel _kernel;
208 
209 extern atomic_t _cpus_active;
210 
211 #ifdef CONFIG_SMP
212 
213 /* True if the current context can be preempted and migrated to
214  * another SMP CPU.
215  */
216 bool z_smp_cpu_mobile(void);
217 
218 #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
219 			arch_curr_cpu(); })
220 #define _current k_sched_current_thread_query()
221 
222 #else
223 #define _current_cpu (&_kernel.cpus[0])
224 #define _current _kernel.cpus[0].current
225 #endif
226 
227 /* kernel wait queue record */
228 
229 #ifdef CONFIG_WAITQ_SCALABLE
230 
231 typedef struct {
232 	struct _priq_rb waitq;
233 } _wait_q_t;
234 
235 extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
236 
237 #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
238 
239 #else
240 
241 typedef struct {
242 	sys_dlist_t waitq;
243 } _wait_q_t;
244 
245 #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
246 
247 #endif
248 
249 /* kernel timeout record */
250 
251 struct _timeout;
252 typedef void (*_timeout_func_t)(struct _timeout *t);
253 
254 struct _timeout {
255 	sys_dnode_t node;
256 	_timeout_func_t fn;
257 #ifdef CONFIG_TIMEOUT_64BIT
258 	/* Can't use k_ticks_t for header dependency reasons */
259 	int64_t dticks;
260 #else
261 	int32_t dticks;
262 #endif
263 };
264 
265 typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
266 
267 #ifdef __cplusplus
268 }
269 #endif
270 
271 #endif /* _ASMLANGUAGE */
272 
273 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
274