1 /*
2  * Copyright (c) 2016 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /*
8  * The purpose of this file is to provide essential/minimal kernel structure
9  * definitions, so that they can be used without including kernel.h.
10  *
11  * The following rules must be observed:
12  *  1. kernel_structs.h shall not depend on kernel.h both directly and
13  *    indirectly (i.e. it shall not include any header files that include
14  *    kernel.h in their dependency chain).
15  *  2. kernel.h shall imply kernel_structs.h, such that it shall not be
16  *    necessary to include kernel_structs.h explicitly when kernel.h is
17  *    included.
18  */
19 
20 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
21 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_
22 
23 #if !defined(_ASMLANGUAGE)
24 #include <zephyr/sys/atomic.h>
25 #include <zephyr/types.h>
26 #include <zephyr/kernel/sched_priq.h>
27 #include <zephyr/sys/dlist.h>
28 #include <zephyr/sys/util.h>
29 #include <zephyr/sys/sys_heap.h>
30 #include <zephyr/arch/structs.h>
31 #include <zephyr/kernel/stats.h>
32 #endif
33 
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37 
38 /*
39  * Bitmask definitions for the struct k_thread.thread_state field.
40  *
41  * Must be before kernel_arch_data.h because it might need them to be already
42  * defined.
43  */
44 
45 /* states: common uses low bits, arch-specific use high bits */
46 
47 /* Not a real thread */
48 #define _THREAD_DUMMY (BIT(0))
49 
50 /* Thread is waiting on an object */
51 #define _THREAD_PENDING (BIT(1))
52 
53 /* Thread has not yet started */
54 #define _THREAD_PRESTART (BIT(2))
55 
56 /* Thread has terminated */
57 #define _THREAD_DEAD (BIT(3))
58 
59 /* Thread is suspended */
60 #define _THREAD_SUSPENDED (BIT(4))
61 
62 /* Thread is being aborted */
63 #define _THREAD_ABORTING (BIT(5))
64 
65 /* Thread is present in the ready queue */
66 #define _THREAD_QUEUED (BIT(7))
67 
68 /* end - states */
69 
70 #ifdef CONFIG_STACK_SENTINEL
71 /* Magic value in lowest bytes of the stack */
72 #define STACK_SENTINEL 0xF0F0F0F0
73 #endif
74 
75 /* lowest value of _thread_base.preempt at which a thread is non-preemptible */
76 #define _NON_PREEMPT_THRESHOLD 0x0080U
77 
78 /* highest value of _thread_base.preempt at which a thread is preemptible */
79 #define _PREEMPT_THRESHOLD (_NON_PREEMPT_THRESHOLD - 1U)
80 
81 #if !defined(_ASMLANGUAGE)
82 
83 struct _ready_q {
84 #ifndef CONFIG_SMP
85 	/* always contains next thread to run: cannot be NULL */
86 	struct k_thread *cache;
87 #endif
88 
89 #if defined(CONFIG_SCHED_DUMB)
90 	sys_dlist_t runq;
91 #elif defined(CONFIG_SCHED_SCALABLE)
92 	struct _priq_rb runq;
93 #elif defined(CONFIG_SCHED_MULTIQ)
94 	struct _priq_mq runq;
95 #endif
96 };
97 
98 typedef struct _ready_q _ready_q_t;
99 
100 struct _cpu {
101 	/* nested interrupt count */
102 	uint32_t nested;
103 
104 	/* interrupt stack pointer base */
105 	char *irq_stack;
106 
107 	/* currently scheduled thread */
108 	struct k_thread *current;
109 
110 	/* one assigned idle thread per CPU */
111 	struct k_thread *idle_thread;
112 
113 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
114 	struct _ready_q ready_q;
115 #endif
116 
117 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) && (CONFIG_NUM_COOP_PRIORITIES > 0)
118 	/* Coop thread preempted by current metairq, or NULL */
119 	struct k_thread *metairq_preempted;
120 #endif
121 
122 	uint8_t id;
123 
124 #if defined(CONFIG_FPU_SHARING)
125 	void *fp_ctx;
126 #endif
127 
128 #ifdef CONFIG_SMP
129 	/* True when _current is allowed to context switch */
130 	uint8_t swap_ok;
131 #endif
132 
133 #ifdef CONFIG_SCHED_THREAD_USAGE
134 	/*
135 	 * [usage0] is used as a timestamp to mark the beginning of an
136 	 * execution window. [0] is a special value indicating that it
137 	 * has been stopped (but not disabled).
138 	 */
139 
140 	uint32_t usage0;
141 
142 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
143 	struct k_cycle_stats usage;
144 #endif
145 #endif
146 
147 	/* Per CPU architecture specifics */
148 	struct _cpu_arch arch;
149 };
150 
151 typedef struct _cpu _cpu_t;
152 
153 struct z_kernel {
154 	struct _cpu cpus[CONFIG_MP_MAX_NUM_CPUS];
155 
156 #ifdef CONFIG_PM
157 	int32_t idle; /* Number of ticks for kernel idling */
158 #endif
159 
160 	/*
161 	 * ready queue: can be big, keep after small fields, since some
162 	 * assembly (e.g. ARC) are limited in the encoding of the offset
163 	 */
164 #ifndef CONFIG_SCHED_CPU_MASK_PIN_ONLY
165 	struct _ready_q ready_q;
166 #endif
167 
168 #ifdef CONFIG_FPU_SHARING
169 	/*
170 	 * A 'current_sse' field does not exist in addition to the 'current_fp'
171 	 * field since it's not possible to divide the IA-32 non-integer
172 	 * registers into 2 distinct blocks owned by differing threads.  In
173 	 * other words, given that the 'fxnsave/fxrstor' instructions
174 	 * save/restore both the X87 FPU and XMM registers, it's not possible
175 	 * for a thread to only "own" the XMM registers.
176 	 */
177 
178 	/* thread that owns the FP regs */
179 	struct k_thread *current_fp;
180 #endif
181 
182 #if defined(CONFIG_THREAD_MONITOR)
183 	struct k_thread *threads; /* singly linked list of ALL threads */
184 #endif
185 
186 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
187 	/* Need to signal an IPI at the next scheduling point */
188 	bool pending_ipi;
189 #endif
190 };
191 
192 typedef struct z_kernel _kernel_t;
193 
194 extern struct z_kernel _kernel;
195 
196 extern atomic_t _cpus_active;
197 
198 #ifdef CONFIG_SMP
199 
200 /* True if the current context can be preempted and migrated to
201  * another SMP CPU.
202  */
203 bool z_smp_cpu_mobile(void);
204 
205 #define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
206 			arch_curr_cpu(); })
207 #define _current z_current_get()
208 
209 #else
210 #define _current_cpu (&_kernel.cpus[0])
211 #define _current _kernel.cpus[0].current
212 #endif
213 
214 /* kernel wait queue record */
215 
216 #ifdef CONFIG_WAITQ_SCALABLE
217 
218 typedef struct {
219 	struct _priq_rb waitq;
220 } _wait_q_t;
221 
222 extern bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
223 
224 #define Z_WAIT_Q_INIT(wait_q) { { { .lessthan_fn = z_priq_rb_lessthan } } }
225 
226 #else
227 
228 typedef struct {
229 	sys_dlist_t waitq;
230 } _wait_q_t;
231 
232 #define Z_WAIT_Q_INIT(wait_q) { SYS_DLIST_STATIC_INIT(&(wait_q)->waitq) }
233 
234 #endif
235 
236 /* kernel timeout record */
237 
238 struct _timeout;
239 typedef void (*_timeout_func_t)(struct _timeout *t);
240 
241 struct _timeout {
242 	sys_dnode_t node;
243 	_timeout_func_t fn;
244 #ifdef CONFIG_TIMEOUT_64BIT
245 	/* Can't use k_ticks_t for header dependency reasons */
246 	int64_t dticks;
247 #else
248 	int32_t dticks;
249 #endif
250 };
251 
252 typedef void (*k_thread_timeslice_fn_t)(struct k_thread *thread, void *data);
253 
254 #ifdef __cplusplus
255 }
256 #endif
257 
258 #endif /* _ASMLANGUAGE */
259 
260 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */
261