1 /* 2 * Copyright (c) 2016, Wind River Systems, Inc. 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7 #ifndef ZEPHYR_INCLUDE_KERNEL_THREAD_H_ 8 #define ZEPHYR_INCLUDE_KERNEL_THREAD_H_ 9 10 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS 11 #include <zephyr/kernel/mm/demand_paging.h> 12 #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */ 13 14 #include <zephyr/kernel/stats.h> 15 #include <zephyr/arch/arch_interface.h> 16 17 /** 18 * @typedef k_thread_entry_t 19 * @brief Thread entry point function type. 20 * 21 * A thread's entry point function is invoked when the thread starts executing. 22 * Up to 3 argument values can be passed to the function. 23 * 24 * The thread terminates execution permanently if the entry point function 25 * returns. The thread is responsible for releasing any shared resources 26 * it may own (such as mutexes and dynamically allocated memory), prior to 27 * returning. 28 * 29 * @param p1 First argument. 30 * @param p2 Second argument. 31 * @param p3 Third argument. 32 */ 33 34 #ifdef CONFIG_THREAD_MONITOR 35 struct __thread_entry { 36 k_thread_entry_t pEntry; 37 void *parameter1; 38 void *parameter2; 39 void *parameter3; 40 }; 41 #endif /* CONFIG_THREAD_MONITOR */ 42 43 struct k_thread; 44 45 /* 46 * This _pipe_desc structure is used by the pipes kernel module when 47 * CONFIG_PIPES has been selected. 48 */ 49 50 struct _pipe_desc { 51 sys_dnode_t node; 52 unsigned char *buffer; /* Position in src/dest buffer */ 53 size_t bytes_to_xfer; /* # bytes left to transfer */ 54 struct k_thread *thread; /* Back pointer to pended thread */ 55 }; 56 57 /* can be used for creating 'dummy' threads, e.g. for pending on objects */ 58 struct _thread_base { 59 60 /* this thread's entry in a ready/wait queue */ 61 union { 62 sys_dnode_t qnode_dlist; 63 struct rbnode qnode_rb; 64 }; 65 66 /* wait queue on which the thread is pended (needed only for 67 * trees, not dumb lists) 68 */ 69 _wait_q_t *pended_on; 70 71 /* user facing 'thread options'; values defined in include/kernel.h */ 72 uint8_t user_options; 73 74 /* thread state */ 75 uint8_t thread_state; 76 77 /* 78 * scheduler lock count and thread priority 79 * 80 * These two fields control the preemptibility of a thread. 81 * 82 * When the scheduler is locked, sched_locked is decremented, which 83 * means that the scheduler is locked for values from 0xff to 0x01. A 84 * thread is coop if its prio is negative, thus 0x80 to 0xff when 85 * looked at the value as unsigned. 86 * 87 * By putting them end-to-end, this means that a thread is 88 * non-preemptible if the bundled value is greater than or equal to 89 * 0x0080. 90 */ 91 union { 92 struct { 93 #ifdef CONFIG_BIG_ENDIAN 94 uint8_t sched_locked; 95 int8_t prio; 96 #else /* Little Endian */ 97 int8_t prio; 98 uint8_t sched_locked; 99 #endif /* CONFIG_BIG_ENDIAN */ 100 }; 101 uint16_t preempt; 102 }; 103 104 #ifdef CONFIG_SCHED_DEADLINE 105 int prio_deadline; 106 #endif /* CONFIG_SCHED_DEADLINE */ 107 108 #if defined(CONFIG_SCHED_SCALABLE) || defined(CONFIG_WAITQ_SCALABLE) 109 uint32_t order_key; 110 #endif 111 112 #ifdef CONFIG_SMP 113 /* True for the per-CPU idle threads */ 114 uint8_t is_idle; 115 116 /* CPU index on which thread was last run */ 117 uint8_t cpu; 118 119 /* Recursive count of irq_lock() calls */ 120 uint8_t global_lock_count; 121 122 #endif /* CONFIG_SMP */ 123 124 #ifdef CONFIG_SCHED_CPU_MASK 125 /* "May run on" bits for each CPU */ 126 #if CONFIG_MP_MAX_NUM_CPUS <= 8 127 uint8_t cpu_mask; 128 #else 129 uint16_t cpu_mask; 130 #endif /* CONFIG_MP_MAX_NUM_CPUS */ 131 #endif /* CONFIG_SCHED_CPU_MASK */ 132 133 /* data returned by APIs */ 134 void *swap_data; 135 136 #ifdef CONFIG_SYS_CLOCK_EXISTS 137 /* this thread's entry in a timeout queue */ 138 struct _timeout timeout; 139 #endif /* CONFIG_SYS_CLOCK_EXISTS */ 140 141 #ifdef CONFIG_TIMESLICE_PER_THREAD 142 int32_t slice_ticks; 143 k_thread_timeslice_fn_t slice_expired; 144 void *slice_data; 145 #endif /* CONFIG_TIMESLICE_PER_THREAD */ 146 147 #ifdef CONFIG_SCHED_THREAD_USAGE 148 struct k_cycle_stats usage; /* Track thread usage statistics */ 149 #endif /* CONFIG_SCHED_THREAD_USAGE */ 150 }; 151 152 typedef struct _thread_base _thread_base_t; 153 154 #if defined(CONFIG_THREAD_STACK_INFO) 155 /* Contains the stack information of a thread */ 156 struct _thread_stack_info { 157 /* Stack start - Represents the start address of the thread-writable 158 * stack area. 159 */ 160 uintptr_t start; 161 162 /* Thread writable stack buffer size. Represents the size of the actual 163 * buffer, starting from the 'start' member, that should be writable by 164 * the thread. This comprises of the thread stack area, any area reserved 165 * for local thread data storage, as well as any area left-out due to 166 * random adjustments applied to the initial thread stack pointer during 167 * thread initialization. 168 */ 169 size_t size; 170 171 /* Adjustment value to the size member, removing any storage 172 * used for TLS or random stack base offsets. (start + size - delta) 173 * is the initial stack pointer for a thread. May be 0. 174 */ 175 size_t delta; 176 177 #if defined(CONFIG_THREAD_STACK_MEM_MAPPED) 178 struct { 179 /** Base address of the memory mapped thread stack */ 180 k_thread_stack_t *addr; 181 182 /** Size of whole mapped stack object */ 183 size_t sz; 184 } mapped; 185 #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ 186 }; 187 188 typedef struct _thread_stack_info _thread_stack_info_t; 189 #endif /* CONFIG_THREAD_STACK_INFO */ 190 191 #if defined(CONFIG_USERSPACE) 192 struct _mem_domain_info { 193 /** memory domain queue node */ 194 sys_dnode_t mem_domain_q_node; 195 /** memory domain of the thread */ 196 struct k_mem_domain *mem_domain; 197 }; 198 199 #endif /* CONFIG_USERSPACE */ 200 201 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA 202 struct _thread_userspace_local_data { 203 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO) 204 int errno_var; 205 #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */ 206 }; 207 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ 208 209 typedef struct k_thread_runtime_stats { 210 #ifdef CONFIG_SCHED_THREAD_USAGE 211 /* 212 * For CPU stats, execution_cycles is the sum of non-idle + idle cycles. 213 * For thread stats, execution_cycles = total_cycles. 214 */ 215 uint64_t execution_cycles; /* total # of cycles (cpu: non-idle + idle) */ 216 uint64_t total_cycles; /* total # of non-idle cycles */ 217 #endif /* CONFIG_SCHED_THREAD_USAGE */ 218 219 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS 220 /* 221 * For threads, the following fields refer to the time spent executing 222 * as bounded by when the thread was scheduled in and scheduled out. 223 * For CPUs, the same fields refer to the time spent executing 224 * non-idle threads as bounded by the idle thread(s). 225 */ 226 227 uint64_t current_cycles; /* current # of non-idle cycles */ 228 uint64_t peak_cycles; /* peak # of non-idle cycles */ 229 uint64_t average_cycles; /* average # of non-idle cycles */ 230 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */ 231 232 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL 233 /* 234 * This field is always zero for individual threads. It only comes 235 * into play when gathering statistics for the CPU. In that case it 236 * represents the total number of cycles spent idling. 237 */ 238 239 uint64_t idle_cycles; 240 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */ 241 242 #if defined(__cplusplus) && !defined(CONFIG_SCHED_THREAD_USAGE) && \ 243 !defined(CONFIG_SCHED_THREAD_USAGE_ANALYSIS) && !defined(CONFIG_SCHED_THREAD_USAGE_ALL) 244 /* If none of the above Kconfig values are defined, this struct will have a size 0 in C 245 * which is not allowed in C++ (it'll have a size 1). To prevent this, we add a 1 byte dummy 246 * variable when the struct would otherwise be empty. 247 */ 248 uint8_t dummy; 249 #endif 250 } k_thread_runtime_stats_t; 251 252 struct z_poller { 253 bool is_polling; 254 uint8_t mode; 255 }; 256 257 /** 258 * @ingroup thread_apis 259 * Thread Structure 260 */ 261 struct k_thread { 262 263 struct _thread_base base; 264 265 /** defined by the architecture, but all archs need these */ 266 struct _callee_saved callee_saved; 267 268 /** static thread init data */ 269 void *init_data; 270 271 /** threads waiting in k_thread_join() */ 272 _wait_q_t join_queue; 273 274 #if defined(CONFIG_POLL) 275 struct z_poller poller; 276 #endif /* CONFIG_POLL */ 277 278 #if defined(CONFIG_EVENTS) 279 struct k_thread *next_event_link; 280 281 uint32_t events; 282 uint32_t event_options; 283 284 /** true if timeout should not wake the thread */ 285 bool no_wake_on_timeout; 286 #endif /* CONFIG_EVENTS */ 287 288 #if defined(CONFIG_THREAD_MONITOR) 289 /** thread entry and parameters description */ 290 struct __thread_entry entry; 291 292 /** next item in list of all threads */ 293 struct k_thread *next_thread; 294 #endif /* CONFIG_THREAD_MONITOR */ 295 296 #if defined(CONFIG_THREAD_NAME) 297 /** Thread name */ 298 char name[CONFIG_THREAD_MAX_NAME_LEN]; 299 #endif /* CONFIG_THREAD_NAME */ 300 301 #ifdef CONFIG_THREAD_CUSTOM_DATA 302 /** crude thread-local storage */ 303 void *custom_data; 304 #endif /* CONFIG_THREAD_CUSTOM_DATA */ 305 306 #ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA 307 struct _thread_userspace_local_data *userspace_local_data; 308 #endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */ 309 310 #if defined(CONFIG_ERRNO) && !defined(CONFIG_ERRNO_IN_TLS) && !defined(CONFIG_LIBC_ERRNO) 311 #ifndef CONFIG_USERSPACE 312 /** per-thread errno variable */ 313 int errno_var; 314 #endif /* CONFIG_USERSPACE */ 315 #endif /* CONFIG_ERRNO && !CONFIG_ERRNO_IN_TLS && !CONFIG_LIBC_ERRNO */ 316 317 #if defined(CONFIG_THREAD_STACK_INFO) 318 /** Stack Info */ 319 struct _thread_stack_info stack_info; 320 #endif /* CONFIG_THREAD_STACK_INFO */ 321 322 #if defined(CONFIG_USERSPACE) 323 /** memory domain info of the thread */ 324 struct _mem_domain_info mem_domain_info; 325 326 /** 327 * Base address of thread stack. 328 * 329 * If memory mapped stack (CONFIG_THREAD_STACK_MEM_MAPPED) 330 * is enabled, this is the physical address of the stack. 331 */ 332 k_thread_stack_t *stack_obj; 333 334 /** current syscall frame pointer */ 335 void *syscall_frame; 336 #endif /* CONFIG_USERSPACE */ 337 338 339 #if defined(CONFIG_USE_SWITCH) 340 /* When using __switch() a few previously arch-specific items 341 * become part of the core OS 342 */ 343 344 /** z_swap() return value */ 345 int swap_retval; 346 347 /** Context handle returned via arch_switch() */ 348 void *switch_handle; 349 #endif /* CONFIG_USE_SWITCH */ 350 /** resource pool */ 351 struct k_heap *resource_pool; 352 353 #if defined(CONFIG_THREAD_LOCAL_STORAGE) 354 /* Pointer to arch-specific TLS area */ 355 uintptr_t tls; 356 #endif /* CONFIG_THREAD_LOCAL_STORAGE */ 357 358 #ifdef CONFIG_DEMAND_PAGING_THREAD_STATS 359 /** Paging statistics */ 360 struct k_mem_paging_stats_t paging_stats; 361 #endif /* CONFIG_DEMAND_PAGING_THREAD_STATS */ 362 363 #ifdef CONFIG_PIPES 364 /** Pipe descriptor used with blocking k_pipe operations */ 365 struct _pipe_desc pipe_desc; 366 #endif /* CONFIG_PIPES */ 367 368 #ifdef CONFIG_OBJ_CORE_THREAD 369 struct k_obj_core obj_core; 370 #endif /* CONFIG_OBJ_CORE_THREAD */ 371 372 #ifdef CONFIG_SMP 373 /** threads waiting in k_thread_suspend() */ 374 _wait_q_t halt_queue; 375 #endif /* CONFIG_SMP */ 376 377 /** arch-specifics: must always be at the end */ 378 struct _thread_arch arch; 379 }; 380 381 typedef struct k_thread _thread_t; 382 typedef struct k_thread *k_tid_t; 383 384 #endif /* ZEPHYR_INCLUDE_KERNEL_THREAD_H_ */ 385