1 /*
2  * Copyright (c) 2016, Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_
8 #define ZEPHYR_INCLUDE_KERNEL_THREAD_H_
9 
10 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
11 #include <sys/mem_manage.h>
12 #endif
13 
14 /**
15  * @typedef k_thread_entry_t
16  * @brief Thread entry point function type.
17  *
18  * A thread's entry point function is invoked when the thread starts executing.
19  * Up to 3 argument values can be passed to the function.
20  *
21  * The thread terminates execution permanently if the entry point function
22  * returns. The thread is responsible for releasing any shared resources
23  * it may own (such as mutexes and dynamically allocated memory), prior to
24  * returning.
25  *
26  * @param p1 First argument.
27  * @param p2 Second argument.
28  * @param p3 Third argument.
29  *
30  * @return N/A
31  */
32 
33 #ifdef CONFIG_THREAD_MONITOR
34 struct __thread_entry {
35 	k_thread_entry_t pEntry;
36 	void *parameter1;
37 	void *parameter2;
38 	void *parameter3;
39 };
40 #endif
41 
42 /* can be used for creating 'dummy' threads, e.g. for pending on objects */
43 struct _thread_base {
44 
45 	/* this thread's entry in a ready/wait queue */
46 	union {
47 		sys_dnode_t qnode_dlist;
48 		struct rbnode qnode_rb;
49 	};
50 
51 	/* wait queue on which the thread is pended (needed only for
52 	 * trees, not dumb lists)
53 	 */
54 	_wait_q_t *pended_on;
55 
56 	/* user facing 'thread options'; values defined in include/kernel.h */
57 	uint8_t user_options;
58 
59 	/* thread state */
60 	uint8_t thread_state;
61 
62 	/*
63 	 * scheduler lock count and thread priority
64 	 *
65 	 * These two fields control the preemptibility of a thread.
66 	 *
67 	 * When the scheduler is locked, sched_locked is decremented, which
68 	 * means that the scheduler is locked for values from 0xff to 0x01. A
69 	 * thread is coop if its prio is negative, thus 0x80 to 0xff when
70 	 * looked at the value as unsigned.
71 	 *
72 	 * By putting them end-to-end, this means that a thread is
73 	 * non-preemptible if the bundled value is greater than or equal to
74 	 * 0x0080.
75 	 */
76 	union {
77 		struct {
78 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
79 			uint8_t sched_locked;
80 			int8_t prio;
81 #else /* LITTLE and PDP */
82 			int8_t prio;
83 			uint8_t sched_locked;
84 #endif
85 		};
86 		uint16_t preempt;
87 	};
88 
89 #ifdef CONFIG_SCHED_DEADLINE
90 	int prio_deadline;
91 #endif
92 
93 	uint32_t order_key;
94 
95 #ifdef CONFIG_SMP
96 	/* True for the per-CPU idle threads */
97 	uint8_t is_idle;
98 
99 	/* CPU index on which thread was last run */
100 	uint8_t cpu;
101 
102 	/* Recursive count of irq_lock() calls */
103 	uint8_t global_lock_count;
104 
105 #endif
106 
107 #ifdef CONFIG_SCHED_CPU_MASK
108 	/* "May run on" bits for each CPU */
109 	uint8_t cpu_mask;
110 #endif
111 
112 	/* data returned by APIs */
113 	void *swap_data;
114 
115 #ifdef CONFIG_SYS_CLOCK_EXISTS
116 	/* this thread's entry in a timeout queue */
117 	struct _timeout timeout;
118 #endif
119 };
120 
121 typedef struct _thread_base _thread_base_t;
122 
123 #if defined(CONFIG_THREAD_STACK_INFO)
124 /* Contains the stack information of a thread */
125 struct _thread_stack_info {
126 	/* Stack start - Represents the start address of the thread-writable
127 	 * stack area.
128 	 */
129 	uintptr_t start;
130 
131 	/* Thread writable stack buffer size. Represents the size of the actual
132 	 * buffer, starting from the 'start' member, that should be writable by
133 	 * the thread. This comprises of the thread stack area, any area reserved
134 	 * for local thread data storage, as well as any area left-out due to
135 	 * random adjustments applied to the initial thread stack pointer during
136 	 * thread initialization.
137 	 */
138 	size_t size;
139 
140 	/* Adjustment value to the size member, removing any storage
141 	 * used for TLS or random stack base offsets. (start + size - delta)
142 	 * is the initial stack pointer for a thread. May be 0.
143 	 */
144 	size_t delta;
145 };
146 
147 typedef struct _thread_stack_info _thread_stack_info_t;
148 #endif /* CONFIG_THREAD_STACK_INFO */
149 
150 #if defined(CONFIG_USERSPACE)
151 struct _mem_domain_info {
152 	/** memory domain queue node */
153 	sys_dnode_t mem_domain_q_node;
154 	/** memory domain of the thread */
155 	struct k_mem_domain *mem_domain;
156 };
157 
158 #endif /* CONFIG_USERSPACE */
159 
160 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
161 struct _thread_userspace_local_data {
162 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS)
163 	int errno_var;
164 #endif
165 };
166 #endif
167 
168 #ifdef CONFIG_THREAD_RUNTIME_STATS
169 struct k_thread_runtime_stats {
170 	/* Thread execution cycles */
171 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
172 	timing_t execution_cycles;
173 #else
174 	uint64_t execution_cycles;
175 #endif
176 };
177 
178 typedef struct k_thread_runtime_stats k_thread_runtime_stats_t;
179 
180 struct _thread_runtime_stats {
181 	/* Timestamp when last switched in */
182 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
183 	timing_t last_switched_in;
184 #else
185 	uint32_t last_switched_in;
186 #endif
187 
188 	k_thread_runtime_stats_t stats;
189 };
190 #endif
191 
192 struct z_poller {
193 	bool is_polling;
194 	uint8_t mode;
195 };
196 
197 /**
198  * @ingroup thread_apis
199  * Thread Structure
200  */
201 struct k_thread {
202 
203 	struct _thread_base base;
204 
205 	/** defined by the architecture, but all archs need these */
206 	struct _callee_saved callee_saved;
207 
208 	/** static thread init data */
209 	void *init_data;
210 
211 	/** threads waiting in k_thread_join() */
212 	_wait_q_t join_queue;
213 
214 #if defined(CONFIG_POLL)
215 	struct z_poller poller;
216 #endif
217 
218 #if defined(CONFIG_THREAD_MONITOR)
219 	/** thread entry and parameters description */
220 	struct __thread_entry entry;
221 
222 	/** next item in list of all threads */
223 	struct k_thread *next_thread;
224 #endif
225 
226 #if defined(CONFIG_THREAD_NAME)
227 	/** Thread name */
228 	char name[CONFIG_THREAD_MAX_NAME_LEN];
229 #endif
230 
231 #ifdef CONFIG_THREAD_CUSTOM_DATA
232 	/** crude thread-local storage */
233 	void *custom_data;
234 #endif
235 
236 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
237 	struct _thread_userspace_local_data *userspace_local_data;
238 #endif
239 
240 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS)
241 #ifndef CONFIG_USERSPACE
242 	/** per-thread errno variable */
243 	int errno_var;
244 #endif
245 #endif
246 
247 #if defined(CONFIG_THREAD_STACK_INFO)
248 	/** Stack Info */
249 	struct _thread_stack_info stack_info;
250 #endif /* CONFIG_THREAD_STACK_INFO */
251 
252 #if defined(CONFIG_USERSPACE)
253 	/** memory domain info of the thread */
254 	struct _mem_domain_info mem_domain_info;
255 	/** Base address of thread stack */
256 	k_thread_stack_t *stack_obj;
257 	/** current syscall frame pointer */
258 	void *syscall_frame;
259 #endif /* CONFIG_USERSPACE */
260 
261 
262 #if defined(CONFIG_USE_SWITCH)
263 	/* When using __switch() a few previously arch-specific items
264 	 * become part of the core OS
265 	 */
266 
267 	/** z_swap() return value */
268 	int swap_retval;
269 
270 	/** Context handle returned via arch_switch() */
271 	void *switch_handle;
272 #endif
273 	/** resource pool */
274 	struct k_heap *resource_pool;
275 
276 #if defined(CONFIG_THREAD_LOCAL_STORAGE)
277 	/* Pointer to arch-specific TLS area */
278 	uintptr_t tls;
279 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
280 
281 #ifdef CONFIG_THREAD_RUNTIME_STATS
282 	/** Runtime statistics */
283 	struct _thread_runtime_stats rt_stats;
284 #endif
285 
286 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
287 	/** Paging statistics */
288 	struct k_mem_paging_stats_t paging_stats;
289 #endif
290 
291 	/** arch-specifics: must always be at the end */
292 	struct _thread_arch arch;
293 };
294 
295 typedef struct k_thread _thread_t;
296 typedef struct k_thread *k_tid_t;
297 
298 #endif
299