1 /* 2 * Copyright (c) 2016, Wind River Systems, Inc. 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7 #ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_ 8 #define ZEPHYR_INCLUDE_KERNEL_THREAD_H_ 9 10 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS 11 #include <zephyr/kernel/mm/demand_paging.h> 12 #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */ 13 14 #include <zephyr/kernel/stats.h> 15 #include <zephyr/arch/arch_interface.h> 16 17 /** 18 * @typedef k_thread_entry_t 19 * @brief Thread entry point function type. 20 * 21 * A thread's entry point function is invoked when the thread starts executing. 22 * Up to 3 argument values can be passed to the function. 23 * 24 * The thread terminates execution permanently if the entry point function 25 * returns. The thread is responsible for releasing any shared resources 26 * it may own (such as mutexes and dynamically allocated memory), prior to 27 * returning. 28 * 29 * @param p1 First argument. 30 * @param p2 Second argument. 31 * @param p3 Third argument. 32 */ 33 34 #ifdef CONFIG_THREAD_MONITOR 35 struct __thread_entry { 36 k_thread_entry_t pEntry; 37 void *parameter1; 38 void *parameter2; 39 void *parameter3; 40 }; 41 #endif /* CONFIG_THREAD_MONITOR */ 42 43 struct k_thread; 44 45 /* can be used for creating 'dummy' threads, e.g. for pending on objects */ 46 struct _thread_base { 47 48 /* this thread's entry in a ready/wait queue */ 49 union { 50 sys_dnode_t qnode_dlist; 51 struct rbnode qnode_rb; 52 }; 53 54 /* wait queue on which the thread is pended (needed only for 55 * trees, not dumb lists) 56 */ 57 _wait_q_t *pended_on; 58 59 /* user facing 'thread options'; values defined in include/zephyr/kernel.h */ 60 uint8_t user_options; 61 62 /* thread state */ 63 uint8_t thread_state; 64 65 /* 66 * scheduler lock count and thread priority 67 * 68 * These two fields control the preemptibility of a thread. 69 * 70 * When the scheduler is locked, sched_locked is decremented, which 71 * means that the scheduler is locked for values from 0xff to 0x01. A 72 * thread is coop if its prio is negative, thus 0x80 to 0xff when 73 * looked at the value as unsigned. 74 * 75 * By putting them end-to-end, this means that a thread is 76 * non-preemptible if the bundled value is greater than or equal to 77 * 0x0080. 78 */ 79 union { 80 struct { 81 #ifdef CONFIG_BIG_ENDIAN 82 uint8_t sched_locked; 83 int8_t prio; 84 #else /* Little Endian */ 85 int8_t prio; 86 uint8_t sched_locked; 87 #endif /* CONFIG_BIG_ENDIAN */ 88 }; 89 uint16_t preempt; 90 }; 91 92 #ifdef CONFIG_SCHED_DEADLINE 93 int prio_deadline; 94 #endif /* CONFIG_SCHED_DEADLINE */ 95 96 #if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE) 97 uint32_t order_key; 98 #endif 99 100 #ifdef CONFIG_SMP 101 /* True for the per-CPU idle threads */ 102 uint8_t is_idle; 103 104 /* CPU index on which thread was last run */ 105 uint8_t cpu; 106 107 /* Recursive count of irq_lock() calls */ 108 uint8_t global_lock_count; 109 110 #endif /* CONFIG_SMP */ 111 112 #ifdef CONFIG_SCHED_CPU_MASK 113 /* "May run on" bits for each CPU */ 114 #if CONFIG_MP_MAX_NUM_CPUS <= 8 115 uint8_t cpu_mask; 116 #else 117 uint16_t cpu_mask; 118 #endif /* CONFIG_MP_MAX_NUM_CPUS */ 119 #endif /* CONFIG_SCHED_CPU_MASK */ 120 121 /* data returned by APIs */ 122 void *swap_data; 123 124 #ifdef CONFIG_SYS_CLOCK_EXISTS 125 /* this thread's entry in a timeout queue */ 126 struct _timeout timeout; 127 #endif /* CONFIG_SYS_CLOCK_EXISTS */ 128 129 #ifdef CONFIG_TIMESLICE_PER_THREAD 130 int32_t slice_ticks; 131 k_thread_timeslice_fn_t slice_expired; 132 void *slice_data; 133 #endif /* CONFIG_TIMESLICE_PER_THREAD */ 134 135 #ifdef CONFIG_SCHED_THREAD_USAGE 136 struct k_cycle_stats usage; /* Track thread usage statistics */ 137 #endif /* CONFIG_SCHED_THREAD_USAGE */ 138 }; 139 140 typedef struct _thread_base _thread_base_t; 141 142 #if defined(CONFIG_THREAD_STACK_INFO) 143 144 #if defined(CONFIG_THREAD_RUNTIME_STACK_SAFETY) 145 struct _thread_stack_usage { 146 size_t unused_threshold; /* Threshold below which to trigger hook */ 147 }; 148 #endif 149 150 /* Contains the stack information of a thread */ 151 struct _thread_stack_info { 152 /* Stack start - Represents the start address of the thread-writable 153 * stack area. 154 */ 155 uintptr_t start; 156 157 /* Thread writable stack buffer size. Represents the size of the actual 158 * buffer, starting from the 'start' member, that should be writable by 159 * the thread. This comprises of the thread stack area, any area reserved 160 * for local thread data storage, as well as any area left-out due to 161 * random adjustments applied to the initial thread stack pointer during 162 * thread initialization. 163 */ 164 size_t size; 165 166 /* Adjustment value to the size member, removing any storage 167 * used for TLS or random stack base offsets. (start + size - delta) 168 * is the initial stack pointer for a thread. May be 0. 169 */ 170 size_t delta; 171 172 #if defined(CONFIG_THREAD_STACK_MEM_MAPPED) 173 struct { 174 /** Base address of the memory mapped thread stack */ 175 k_thread_stack_t *addr; 176 177 /** Size of whole mapped stack object */ 178 size_t sz; 179 } mapped; 180 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ 181 182 #if defined(CONFIG_THREAD_RUNTIME_STACK_SAFETY) 183 struct _thread_stack_usage usage; 184 #endif 185 }; 186 187 typedef struct _thread_stack_info _thread_stack_info_t; 188 #endif /* CONFIG_THREAD_STACK_INFO */ 189 190 #if defined(CONFIG_USERSPACE) 191 struct _mem_domain_info { 192 #ifdef CONFIG_MEM_DOMAIN_HAS_THREAD_LIST 193 /** memory domain queue node */ 194 sys_dnode_t thread_mem_domain_node; 195 #endif /* CONFIG_MEM_DOMAIN_HAS_THREAD_LIST */ 196 /** memory domain of the thread */ 197 struct k_mem_domain *mem_domain; 198 }; 199 200 typedef struct _mem_domain_info _mem_domain_info_t; 201 #endif /* CONFIG_USERSPACE */ 202 203 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA 204 struct _thread_userspace_local_data { 205 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO) 206 int errno_var; 207 #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */ 208 }; 209 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ 210 211 typedef struct k_thread_runtime_stats { 212 #ifdef CONFIG_SCHED_THREAD_USAGE 213 /* 214 * For CPU stats, execution_cycles is the sum of non-idle + idle cycles. 215 * For thread stats, execution_cycles = total_cycles. 216 */ 217 uint64_t execution_cycles; /* total # of cycles (cpu: non-idle + idle) */ 218 uint64_t total_cycles; /* total # of non-idle cycles */ 219 #endif /* CONFIG_SCHED_THREAD_USAGE */ 220 221 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS 222 /* 223 * For threads, the following fields refer to the time spent executing 224 * as bounded by when the thread was scheduled in and scheduled out. 225 * For CPUs, the same fields refer to the time spent executing 226 * non-idle threads as bounded by the idle thread(s). 227 */ 228 229 uint64_t current_cycles; /* current # of non-idle cycles */ 230 uint64_t peak_cycles; /* peak # of non-idle cycles */ 231 uint64_t average_cycles; /* average # of non-idle cycles */ 232 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */ 233 234 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL 235 /* 236 * This field is always zero for individual threads. It only comes 237 * into play when gathering statistics for the CPU. In that case it 238 * represents the total number of cycles spent idling. 239 */ 240 241 uint64_t idle_cycles; 242 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */ 243 244 #if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \ 245 !defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL) 246 /* If none of the above Kconfig values are defined, this struct will have a size 0 in C 247 * which is not allowed in C++ (it'll have a size 1). To prevent this, we add a 1 byte dummy 248 * variable when the struct would otherwise be empty. 249 */ 250 uint8_t dummy; 251 #endif 252 } k_thread_runtime_stats_t; 253 254 struct z_poller { 255 bool is_polling; 256 uint8_t mode; 257 }; 258 259 /** 260 * @ingroup thread_apis 261 * Thread Structure 262 */ 263 struct k_thread { 264 265 struct _thread_base base; 266 267 /** defined by the architecture, but all archs need these */ 268 struct _callee_saved callee_saved; 269 270 /** static thread init data */ 271 void *init_data; 272 273 /** threads waiting in k_thread_join() */ 274 _wait_q_t join_queue; 275 276 #if defined(CONFIG_POLL) 277 struct z_poller poller; 278 #endif /* CONFIG_POLL */ 279 280 #if defined(CONFIG_EVENTS) 281 struct k_thread *next_event_link; 282 283 uint32_t events; /* dual purpose - wait on and then received */ 284 uint32_t event_options; 285 286 /** true if timeout should not wake the thread */ 287 bool no_wake_on_timeout; 288 #endif /* CONFIG_EVENTS */ 289 290 #if defined(CONFIG_THREAD_MONITOR) 291 /** thread entry and parameters description */ 292 struct __thread_entry entry; 293 294 /** next item in list of all threads */ 295 struct k_thread *next_thread; 296 #endif /* CONFIG_THREAD_MONITOR */ 297 298 #if defined(CONFIG_THREAD_NAME) 299 /** Thread name */ 300 char name[CONFIG_THREAD_MAX_NAME_LEN]; 301 #endif /* CONFIG_THREAD_NAME */ 302 303 #ifdef CONFIG_THREAD_CUSTOM_DATA 304 /** crude thread-local storage */ 305 void *custom_data; 306 #endif /* CONFIG_THREAD_CUSTOM_DATA */ 307 308 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA 309 struct _thread_userspace_local_data *userspace_local_data; 310 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ 311 312 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO) 313 #ifndef CONFIG_USERSPACE 314 /** per-thread errno variable */ 315 int errno_var; 316 #endif /* CONFIG_USERSPACE */ 317 #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */ 318 319 #if defined(CONFIG_THREAD_STACK_INFO) 320 /** Stack Info */ 321 struct _thread_stack_info stack_info; 322 #endif /* CONFIG_THREAD_STACK_INFO */ 323 324 #if defined(CONFIG_USERSPACE) 325 /** memory domain info of the thread */ 326 struct _mem_domain_info mem_domain_info; 327 328 /** 329 * Base address of thread stack. 330 * 331 * If memory mapped stack (CONFIG_THREAD_STACK_MEM_MAPPED) 332 * is enabled, this is the physical address of the stack. 333 */ 334 k_thread_stack_t *stack_obj; 335 336 /** current syscall frame pointer */ 337 void *syscall_frame; 338 #endif /* CONFIG_USERSPACE */ 339 340 341 #if defined(CONFIG_USE_SWITCH) 342 /* When using __switch() a few previously arch-specific items 343 * become part of the core OS 344 */ 345 346 /** z_swap() return value */ 347 int swap_retval; 348 349 /** Context handle returned via arch_switch() */ 350 void *switch_handle; 351 #endif /* CONFIG_USE_SWITCH */ 352 /** resource pool */ 353 struct k_heap *resource_pool; 354 355 #if defined(CONFIG_THREAD_LOCAL_STORAGE) 356 /* Pointer to arch-specific TLS area */ 357 uintptr_t tls; 358 #endif /* CONFIG_THREAD_LOCAL_STORAGE */ 359 360 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS 361 /** Paging statistics */ 362 struct k_mem_paging_stats_t paging_stats; 363 #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */ 364 365 #ifdef CONFIG_OBJ_CORE_THREAD 366 struct k_obj_core obj_core; 367 #endif /* CONFIG_OBJ_CORE_THREAD */ 368 369 #ifdef CONFIG_SMP 370 /** threads waiting in k_thread_suspend() */ 371 _wait_q_t halt_queue; 372 #endif /* CONFIG_SMP */ 373 374 /** arch-specifics: must always be at the end */ 375 struct _thread_arch arch; 376 }; 377 378 typedef struct k_thread _thread_t; 379 typedef struct k_thread *k_tid_t; 380 381 #endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */ 382