1 /*
2  * Copyright (c) 2016, Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_
8 #define ZEPHYR_INCLUDE_KERNEL_THREAD_H_
9 
10 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
11 #include <zephyr/sys/mem_manage.h>
12 #endif
13 
14 #include <zephyr/kernel/stats.h>
15 
16 /**
17  * @typedef k_thread_entry_t
18  * @brief Thread entry point function type.
19  *
20  * A thread's entry point function is invoked when the thread starts executing.
21  * Up to 3 argument values can be passed to the function.
22  *
23  * The thread terminates execution permanently if the entry point function
24  * returns. The thread is responsible for releasing any shared resources
25  * it may own (such as mutexes and dynamically allocated memory), prior to
26  * returning.
27  *
28  * @param p1 First argument.
29  * @param p2 Second argument.
30  * @param p3 Third argument.
31  */
32 
33 #ifdef CONFIG_THREAD_MONITOR
34 struct __thread_entry {
35 	k_thread_entry_t pEntry;
36 	void *parameter1;
37 	void *parameter2;
38 	void *parameter3;
39 };
40 #endif
41 
42 struct k_thread;
43 
44 /*
45  * This _pipe_desc structure is used by the pipes kernel module when
46  * CONFIG_PIPES has been selected.
47  */
48 
49 struct _pipe_desc {
50 	sys_dnode_t      node;
51 	unsigned char   *buffer;         /* Position in src/dest buffer */
52 	size_t           bytes_to_xfer;  /* # bytes left to transfer */
53 	struct k_thread *thread;         /* Back pointer to pended thread */
54 };
55 
56 /* can be used for creating 'dummy' threads, e.g. for pending on objects */
57 struct _thread_base {
58 
59 	/* this thread's entry in a ready/wait queue */
60 	union {
61 		sys_dnode_t qnode_dlist;
62 		struct rbnode qnode_rb;
63 	};
64 
65 	/* wait queue on which the thread is pended (needed only for
66 	 * trees, not dumb lists)
67 	 */
68 	_wait_q_t *pended_on;
69 
70 	/* user facing 'thread options'; values defined in include/kernel.h */
71 	uint8_t user_options;
72 
73 	/* thread state */
74 	uint8_t thread_state;
75 
76 	/*
77 	 * scheduler lock count and thread priority
78 	 *
79 	 * These two fields control the preemptibility of a thread.
80 	 *
81 	 * When the scheduler is locked, sched_locked is decremented, which
82 	 * means that the scheduler is locked for values from 0xff to 0x01. A
83 	 * thread is coop if its prio is negative, thus 0x80 to 0xff when
84 	 * looked at the value as unsigned.
85 	 *
86 	 * By putting them end-to-end, this means that a thread is
87 	 * non-preemptible if the bundled value is greater than or equal to
88 	 * 0x0080.
89 	 */
90 	union {
91 		struct {
92 #ifdef CONFIG_BIG_ENDIAN
93 			uint8_t sched_locked;
94 			int8_t prio;
95 #else /* Little Endian */
96 			int8_t prio;
97 			uint8_t sched_locked;
98 #endif
99 		};
100 		uint16_t preempt;
101 	};
102 
103 #ifdef CONFIG_SCHED_DEADLINE
104 	int prio_deadline;
105 #endif
106 
107 	uint32_t order_key;
108 
109 #ifdef CONFIG_SMP
110 	/* True for the per-CPU idle threads */
111 	uint8_t is_idle;
112 
113 	/* CPU index on which thread was last run */
114 	uint8_t cpu;
115 
116 	/* Recursive count of irq_lock() calls */
117 	uint8_t global_lock_count;
118 
119 #endif
120 
121 #ifdef CONFIG_SCHED_CPU_MASK
122 	/* "May run on" bits for each CPU */
123 	uint8_t cpu_mask;
124 #endif
125 
126 	/* data returned by APIs */
127 	void *swap_data;
128 
129 #ifdef CONFIG_SYS_CLOCK_EXISTS
130 	/* this thread's entry in a timeout queue */
131 	struct _timeout timeout;
132 #endif
133 
134 #ifdef CONFIG_TIMESLICE_PER_THREAD
135 	int32_t slice_ticks;
136 	k_thread_timeslice_fn_t slice_expired;
137 	void *slice_data;
138 #endif
139 
140 #ifdef CONFIG_SCHED_THREAD_USAGE
141 	struct k_cycle_stats  usage;   /* Track thread usage statistics */
142 #endif
143 };
144 
145 typedef struct _thread_base _thread_base_t;
146 
147 #if defined(CONFIG_THREAD_STACK_INFO)
148 /* Contains the stack information of a thread */
149 struct _thread_stack_info {
150 	/* Stack start - Represents the start address of the thread-writable
151 	 * stack area.
152 	 */
153 	uintptr_t start;
154 
155 	/* Thread writable stack buffer size. Represents the size of the actual
156 	 * buffer, starting from the 'start' member, that should be writable by
157 	 * the thread. This comprises of the thread stack area, any area reserved
158 	 * for local thread data storage, as well as any area left-out due to
159 	 * random adjustments applied to the initial thread stack pointer during
160 	 * thread initialization.
161 	 */
162 	size_t size;
163 
164 	/* Adjustment value to the size member, removing any storage
165 	 * used for TLS or random stack base offsets. (start + size - delta)
166 	 * is the initial stack pointer for a thread. May be 0.
167 	 */
168 	size_t delta;
169 };
170 
171 typedef struct _thread_stack_info _thread_stack_info_t;
172 #endif /* CONFIG_THREAD_STACK_INFO */
173 
174 #if defined(CONFIG_USERSPACE)
175 struct _mem_domain_info {
176 	/** memory domain queue node */
177 	sys_dnode_t mem_domain_q_node;
178 	/** memory domain of the thread */
179 	struct k_mem_domain *mem_domain;
180 };
181 
182 #endif /* CONFIG_USERSPACE */
183 
184 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
185 struct _thread_userspace_local_data {
186 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
187 	int errno_var;
188 #endif
189 };
190 #endif
191 
192 typedef struct k_thread_runtime_stats {
193 #ifdef CONFIG_SCHED_THREAD_USAGE
194 	uint64_t execution_cycles;
195 	uint64_t total_cycles;        /* total # of non-idle cycles */
196 	/*
197 	 * In the context of thread statistics, [execution_cycles] is the same
198 	 * as the total # of non-idle cycles. In the context of CPU statistics,
199 	 * it refers to the sum of non-idle + idle cycles.
200 	 */
201 #endif
202 
203 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
204 	/*
205 	 * For threads, the following fields refer to the time spent executing
206 	 * as bounded by when the thread was scheduled in and scheduled out.
207 	 * For CPUs, the same fields refer to the time spent executing
208 	 * non-idle threads as bounded by the idle thread(s).
209 	 */
210 
211 	uint64_t current_cycles;      /* current # of non-idle cycles */
212 	uint64_t peak_cycles;         /* peak # of non-idle cycles */
213 	uint64_t average_cycles;      /* average # of non-idle cycles */
214 #endif
215 
216 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
217 	/*
218 	 * This field is always zero for individual threads. It only comes
219 	 * into play when gathering statistics for the CPU. In that case it
220 	 * represents the total number of cycles spent idling.
221 	 */
222 
223 	uint64_t idle_cycles;
224 #endif
225 
226 #if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) &&                                 \
227 	!defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL)
228 	/* If none of the above Kconfig values are defined, this struct will have a size 0 in C
229 	 * which is not allowed in C++ (it'll have a size 1). To prevent this, we add a 1 byte dummy
230 	 * variable when the struct would otherwise be empty.
231 	 */
232 	uint8_t dummy;
233 #endif
234 }  k_thread_runtime_stats_t;
235 
236 struct z_poller {
237 	bool is_polling;
238 	uint8_t mode;
239 };
240 
241 /**
242  * @ingroup thread_apis
243  * Thread Structure
244  */
245 struct k_thread {
246 
247 	struct _thread_base base;
248 
249 	/** defined by the architecture, but all archs need these */
250 	struct _callee_saved callee_saved;
251 
252 	/** static thread init data */
253 	void *init_data;
254 
255 	/** threads waiting in k_thread_join() */
256 	_wait_q_t join_queue;
257 
258 #if defined(CONFIG_POLL)
259 	struct z_poller poller;
260 #endif
261 
262 #if defined(CONFIG_EVENTS)
263 	struct k_thread *next_event_link;
264 
265 	uint32_t   events;
266 	uint32_t   event_options;
267 
268 	/** true if timeout should not wake the thread */
269 	bool no_wake_on_timeout;
270 #endif
271 
272 #if defined(CONFIG_THREAD_MONITOR)
273 	/** thread entry and parameters description */
274 	struct __thread_entry entry;
275 
276 	/** next item in list of all threads */
277 	struct k_thread *next_thread;
278 #endif
279 
280 #if defined(CONFIG_THREAD_NAME)
281 	/** Thread name */
282 	char name[CONFIG_THREAD_MAX_NAME_LEN];
283 #endif
284 
285 #ifdef CONFIG_THREAD_CUSTOM_DATA
286 	/** crude thread-local storage */
287 	void *custom_data;
288 #endif
289 
290 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
291 	struct _thread_userspace_local_data *userspace_local_data;
292 #endif
293 
294 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO)
295 #ifndef CONFIG_USERSPACE
296 	/** per-thread errno variable */
297 	int errno_var;
298 #endif
299 #endif
300 
301 #if defined(CONFIG_THREAD_STACK_INFO)
302 	/** Stack Info */
303 	struct _thread_stack_info stack_info;
304 #endif /* CONFIG_THREAD_STACK_INFO */
305 
306 #if defined(CONFIG_USERSPACE)
307 	/** memory domain info of the thread */
308 	struct _mem_domain_info mem_domain_info;
309 	/** Base address of thread stack */
310 	k_thread_stack_t *stack_obj;
311 	/** current syscall frame pointer */
312 	void *syscall_frame;
313 #endif /* CONFIG_USERSPACE */
314 
315 
316 #if defined(CONFIG_USE_SWITCH)
317 	/* When using __switch() a few previously arch-specific items
318 	 * become part of the core OS
319 	 */
320 
321 	/** z_swap() return value */
322 	int swap_retval;
323 
324 	/** Context handle returned via arch_switch() */
325 	void *switch_handle;
326 #endif
327 	/** resource pool */
328 	struct k_heap *resource_pool;
329 
330 #if defined(CONFIG_THREAD_LOCAL_STORAGE)
331 	/* Pointer to arch-specific TLS area */
332 	uintptr_t tls;
333 #endif /* CONFIG_THREAD_LOCAL_STORAGE */
334 
335 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS
336 	/** Paging statistics */
337 	struct k_mem_paging_stats_t paging_stats;
338 #endif
339 
340 #ifdef CONFIG_PIPES
341 	/** Pipe descriptor used with blocking k_pipe operations */
342 	struct _pipe_desc pipe_desc;
343 #endif
344 
345 	/** arch-specifics: must always be at the end */
346 	struct _thread_arch arch;
347 };
348 
349 typedef struct k_thread _thread_t;
350 typedef struct k_thread *k_tid_t;
351 
352 void z_init_cpu(int id);
353 void z_sched_ipi(void);
354 void z_smp_start_cpu(int id);
355 
356 #endif
357