1 /* 2 * Copyright (c) 2019 Intel Corporation. 3 * 4 * SPDX-License-Identifier: Apache-2.0 5 */ 6 7 /** 8 * @file 9 * @brief Internal kernel APIs implemented at the architecture layer. 10 * 11 * Not all architecture-specific defines are here, APIs that are used 12 * by public functions and macros are defined in include/zephyr/arch/arch_interface.h. 13 * 14 * For all inline functions prototyped here, the implementation is expected 15 * to be provided by arch/ARCH/include/kernel_arch_func.h 16 */ 17 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ 18 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ 19 20 #include <zephyr/kernel.h> 21 #include <zephyr/arch/arch_interface.h> 22 23 #ifndef _ASMLANGUAGE 24 25 #ifdef __cplusplus 26 extern "C" { 27 #endif 28 29 /** 30 * @defgroup arch-timing Architecture timing APIs 31 * @{ 32 */ 33 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT 34 /** 35 * Architecture-specific implementation of busy-waiting 36 * 37 * @param usec_to_wait Wait period, in microseconds 38 */ 39 void arch_busy_wait(uint32_t usec_to_wait); 40 #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */ 41 42 /** @} */ 43 44 /** 45 * @defgroup arch-threads Architecture thread APIs 46 * @ingroup arch-interface 47 * @{ 48 */ 49 50 /** Handle arch-specific logic for setting up new threads 51 * 52 * The stack and arch-specific thread state variables must be set up 53 * such that a later attempt to switch to this thread will succeed 54 * and we will enter z_thread_entry with the requested thread and 55 * arguments as its parameters. 56 * 57 * At some point in this function's implementation, z_setup_new_thread() must 58 * be called with the true bounds of the available stack buffer within the 59 * thread's stack object. 60 * 61 * The provided stack pointer is guaranteed to be properly aligned with respect 62 * to the CPU and ABI requirements. There may be space reserved between the 63 * stack pointer and the bounds of the stack buffer for initial stack pointer 64 * randomization and thread-local storage. 65 * 66 * Fields in thread->base will be initialized when this is called. 67 * 68 * @param thread Pointer to uninitialized struct k_thread 69 * @param stack Pointer to the stack object 70 * @param stack_ptr Aligned initial stack pointer 71 * @param entry Thread entry function 72 * @param p1 1st entry point parameter 73 * @param p2 2nd entry point parameter 74 * @param p3 3rd entry point parameter 75 */ 76 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, 77 char *stack_ptr, k_thread_entry_t entry, 78 void *p1, void *p2, void *p3); 79 80 #ifdef CONFIG_USE_SWITCH 81 /** Cooperative context switch primitive 82 * 83 * The action of arch_switch() should be to switch to a new context 84 * passed in the first argument, and save a pointer to the current 85 * context into the address passed in the second argument. 86 * 87 * The actual type and interpretation of the switch handle is specified 88 * by the architecture. It is the same data structure stored in the 89 * "switch_handle" field of a newly-created thread in arch_new_thread(), 90 * and passed to the kernel as the "interrupted" argument to 91 * z_get_next_switch_handle(). 92 * 93 * Note that on SMP systems, the kernel uses the store through the 94 * second pointer as a synchronization point to detect when a thread 95 * context is completely saved (so another CPU can know when it is 96 * safe to switch). This store must be done AFTER all relevant state 97 * is saved, and must include whatever memory barriers or cache 98 * management code is required to be sure another CPU will see the 99 * result correctly. 100 * 101 * The simplest implementation of arch_switch() is generally to push 102 * state onto the thread stack and use the resulting stack pointer as the 103 * switch handle. Some architectures may instead decide to use a pointer 104 * into the thread struct as the "switch handle" type. These can legally 105 * assume that the second argument to arch_switch() is the address of the 106 * switch_handle field of struct thread_base and can use an offset on 107 * this value to find other parts of the thread struct. For example a (C 108 * pseudocode) implementation of arch_switch() might look like: 109 * 110 * void arch_switch(void *switch_to, void **switched_from) 111 * { 112 * struct k_thread *new = switch_to; 113 * struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread, 114 * switch_handle); 115 * 116 * // save old context... 117 * *switched_from = old; 118 * // restore new context... 119 * } 120 * 121 * Note that the kernel manages the switch_handle field for 122 * synchronization as described above. So it is not legal for 123 * architecture code to assume that it has any particular value at any 124 * other time. In particular it is not legal to read the field from the 125 * address passed in the second argument. 126 * 127 * @param switch_to Incoming thread's switch handle 128 * @param switched_from Pointer to outgoing thread's switch handle storage 129 * location, which must be updated. 130 */ 131 static inline void arch_switch(void *switch_to, void **switched_from); 132 #endif /* CONFIG_USE_SWITCH */ 133 134 #if !defined(CONFIG_USE_SWITCH) || defined(__DOXYGEN__) 135 #if defined(__DOXYGEN__) 136 /** 137 * Cooperatively context switch 138 * 139 * Must be called with interrupts locked with the provided key. 140 * This is the older-style context switching method, which is incompatible 141 * with SMP. New arch ports, either SMP or UP, are encouraged to implement 142 * arch_switch() instead. 143 * 144 * @param key Interrupt locking key 145 * @return If woken from blocking on some kernel object, the result of that 146 * blocking operation. 147 */ 148 int arch_swap(unsigned int key); 149 #endif /* __DOXYGEN__ */ 150 151 /** 152 * Set the return value for the specified thread. 153 * 154 * It is assumed that the specified @a thread is pending. 155 * 156 * @param thread Pointer to thread object 157 * @param value value to set as return value 158 */ 159 static ALWAYS_INLINE void 160 arch_thread_return_value_set(struct k_thread *thread, unsigned int value); 161 #endif /* !CONFIG_USE_SWITCH || __DOXYGEN__ */ 162 163 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN 164 /** 165 * Custom logic for entering main thread context at early boot 166 * 167 * Used by architectures where the typical trick of setting up a dummy thread 168 * in early boot context to "switch out" of isn't workable. 169 * 170 * @param main_thread main thread object 171 * @param stack_ptr Initial stack pointer 172 * @param _main Entry point for application main function. 173 */ 174 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr, 175 k_thread_entry_t _main); 176 #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */ 177 178 /** 179 * @brief Save coprocessor states on an IPI 180 * 181 * The function, invoked by the IPI handler, is used by cross-CPU lazy context 182 * switches. It saves the relevant coprocessor context(s) before signalling the 183 * waiting CPU that it has finished. 184 */ 185 void arch_ipi_lazy_coprocessors_save(void); 186 187 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) 188 /** 189 * @brief Disable floating point context preservation 190 * 191 * The function is used to disable the preservation of floating 192 * point context information for a particular thread. 193 * 194 * @note For ARM architecture, disabling floating point preservation may only 195 * be requested for the current thread and cannot be requested in ISRs. 196 * 197 * @retval 0 On success. 198 * @retval -EINVAL If the floating point disabling could not be performed. 199 * @retval -ENOTSUP If the operation is not supported 200 */ 201 int arch_float_disable(struct k_thread *thread); 202 203 /** 204 * @brief Enable floating point context preservation 205 * 206 * The function is used to enable the preservation of floating 207 * point context information for a particular thread. 208 * This API depends on each architecture implementation. If the architecture 209 * does not support enabling, this API will always be failed. 210 * 211 * The @a options parameter indicates which floating point register sets will 212 * be used by the specified thread. Currently it is used by x86 only. 213 * 214 * @param thread ID of thread. 215 * @param options architecture dependent options 216 * 217 * @retval 0 On success. 218 * @retval -EINVAL If the floating point enabling could not be performed. 219 * @retval -ENOTSUP If the operation is not supported 220 */ 221 int arch_float_enable(struct k_thread *thread, unsigned int options); 222 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ 223 224 /** 225 * @brief Disable coprocessor context preservation 226 * 227 * This function serves as a catchall for disabling the preservation of 228 * coprocessor context information when aborting a thread. 229 */ 230 int arch_coprocessors_disable(struct k_thread *thread); 231 232 #if defined(CONFIG_USERSPACE) && defined(CONFIG_ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET) 233 /** 234 * @brief Obtain privileged stack usage information for the specified thread 235 * 236 * Must be called under supervisor mode. 237 * 238 * Some hardware may prevent inspection of a stack buffer currently in use. 239 * If this API is called from supervisor mode, on the currently running thread, 240 * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an 241 * error will be generated. 242 * 243 * @param[in] thread Thread to inspect stack information 244 * @param[out] stack_size Filled in with the size of the stack space of 245 * the target thread in bytes. 246 * @param[out] unused_ptr Filled in with the unused stack space of 247 * the target thread in bytes. 248 * 249 * @return 0 on success 250 * @return -EBADF Bad thread object 251 * @return -EPERM No permissions on thread object 252 * #return -ENOTSUP Forbidden by hardware policy 253 * @return -EINVAL Thread is uninitialized or exited or not a user thread 254 * @return -EFAULT Bad memory address for unused_ptr 255 */ 256 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size, 257 size_t *unused_ptr); 258 #endif /* CONFIG_USERSPACE && CONFIG_ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET */ 259 260 /** @} */ 261 262 /** 263 * @defgroup arch-pm Architecture-specific power management APIs 264 * @ingroup arch-interface 265 * @{ 266 */ 267 /** Halt the system, optionally propagating a reason code */ 268 FUNC_NORETURN void arch_system_halt(unsigned int reason); 269 270 /** @} */ 271 272 273 /** 274 * @defgroup arch-irq Architecture-specific IRQ APIs 275 * @ingroup arch-interface 276 * @{ 277 */ 278 279 /** 280 * Test if the current context is in interrupt context 281 * 282 * XXX: This is inconsistently handled among arches wrt exception context 283 * See: #17656 284 * 285 * @return true if we are in interrupt context 286 */ 287 static inline bool arch_is_in_isr(void); 288 289 /** @} */ 290 291 /** 292 * @defgroup arch-mmu Architecture-specific memory-mapping APIs 293 * @ingroup arch-interface 294 * @{ 295 */ 296 297 /** 298 * Map physical memory into the virtual address space 299 * 300 * This is a low-level interface to mapping pages into the address space. 301 * Behavior when providing unaligned addresses/sizes is undefined, these 302 * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE. 303 * 304 * The core kernel handles all management of the virtual address space; 305 * by the time we invoke this function, we know exactly where this mapping 306 * will be established. If the page tables already had mappings installed 307 * for the virtual memory region, these will be overwritten. 308 * 309 * If the target architecture supports multiple page sizes, currently 310 * only the smallest page size will be used. 311 * 312 * The memory range itself is never accessed by this operation. 313 * 314 * This API must be safe to call in ISRs or exception handlers. Calls 315 * to this API are assumed to be serialized, and indeed all usage will 316 * originate from kernel/mm.c which handles virtual memory management. 317 * 318 * Architectures are expected to pre-allocate page tables for the entire 319 * address space, as defined by CONFIG_KERNEL_VM_BASE and 320 * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of 321 * allocation for paging structures. 322 * 323 * Validation of arguments should be done via assertions. 324 * 325 * This API is part of infrastructure still under development and may 326 * change. 327 * 328 * @param virt Page-aligned Destination virtual address to map 329 * @param phys Page-aligned Source physical address to map 330 * @param size Page-aligned size of the mapped memory region in bytes 331 * @param flags Caching, access and control flags, see K_MAP_* macros 332 */ 333 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags); 334 335 /** 336 * Remove mappings for a provided virtual address range 337 * 338 * This is a low-level interface for un-mapping pages from the address space. 339 * When this completes, the relevant page table entries will be updated as 340 * if no mapping was ever made for that memory range. No previous context 341 * needs to be preserved. This function must update mappings in all active 342 * page tables. 343 * 344 * Behavior when providing unaligned addresses/sizes is undefined, these 345 * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE. 346 * 347 * Behavior when providing an address range that is not already mapped is 348 * undefined. 349 * 350 * This function should never require memory allocations for paging structures, 351 * and it is not necessary to free any paging structures. Empty page tables 352 * due to all contained entries being un-mapped may remain in place. 353 * 354 * Implementations must invalidate TLBs as necessary. 355 * 356 * This API is part of infrastructure still under development and may change. 357 * 358 * @param addr Page-aligned base virtual address to un-map 359 * @param size Page-aligned region size 360 */ 361 void arch_mem_unmap(void *addr, size_t size); 362 363 /** 364 * Get the mapped physical memory address from virtual address. 365 * 366 * The function only needs to query the current set of page tables as 367 * the information it reports must be common to all of them if multiple 368 * page tables are in use. If multiple page tables are active it is unnecessary 369 * to iterate over all of them. 370 * 371 * Unless otherwise specified, virtual pages have the same mappings 372 * across all page tables. Calling this function on data pages that are 373 * exceptions to this rule (such as the scratch page) is undefined behavior. 374 * Just check the currently installed page tables and return the information 375 * in that. 376 * 377 * @param virt Page-aligned virtual address 378 * @param[out] phys Mapped physical address (can be NULL if only checking 379 * if virtual address is mapped) 380 * 381 * @retval 0 if mapping is found and valid 382 * @retval -EFAULT if virtual address is not mapped 383 */ 384 int arch_page_phys_get(void *virt, uintptr_t *phys); 385 386 /** 387 * Update page frame database with reserved pages 388 * 389 * Some page frames within system RAM may not be available for use. A good 390 * example of this is reserved regions in the first megabyte on PC-like systems. 391 * 392 * Implementations of this function should mark all relevant entries in 393 * k_mem_page_frames with K_PAGE_FRAME_RESERVED. This function is called at 394 * early system initialization with mm_lock held. 395 */ 396 void arch_reserved_pages_update(void); 397 398 /** 399 * Update all page tables for a paged-out data page 400 * 401 * This function: 402 * - Sets the data page virtual address to trigger a fault if accessed that 403 * can be distinguished from access violations or un-mapped pages. 404 * - Saves the provided location value so that it can retrieved for that 405 * data page in the page fault handler. 406 * - The location value semantics are undefined here but the value will be 407 * always be page-aligned. It could be 0. 408 * 409 * If multiple page tables are in use, this must update all page tables. 410 * This function is called with interrupts locked. 411 * 412 * Calling this function on data pages which are already paged out is 413 * undefined behavior. 414 * 415 * This API is part of infrastructure still under development and may change. 416 */ 417 void arch_mem_page_out(void *addr, uintptr_t location); 418 419 /** 420 * Update all page tables for a paged-in data page 421 * 422 * This function: 423 * - Maps the specified virtual data page address to the provided physical 424 * page frame address, such that future memory accesses will function as 425 * expected. Access and caching attributes are undisturbed. 426 * - Clears any accounting for "accessed" and "dirty" states. 427 * 428 * If multiple page tables are in use, this must update all page tables. 429 * This function is called with interrupts locked. 430 * 431 * Calling this function on data pages which are already paged in is 432 * undefined behavior. 433 * 434 * This API is part of infrastructure still under development and may change. 435 */ 436 void arch_mem_page_in(void *addr, uintptr_t phys); 437 438 /** 439 * Update current page tables for a temporary mapping 440 * 441 * Map a physical page frame address to a special virtual address 442 * K_MEM_SCRATCH_PAGE, with read/write access to supervisor mode, such that 443 * when this function returns, the calling context can read/write the page 444 * frame's contents from the K_MEM_SCRATCH_PAGE address. 445 * 446 * This mapping only needs to be done on the current set of page tables, 447 * as it is only used for a short period of time exclusively by the caller. 448 * This function is called with interrupts locked. 449 * 450 * This API is part of infrastructure still under development and may change. 451 */ 452 void arch_mem_scratch(uintptr_t phys); 453 454 /** 455 * Status of a particular page location. 456 */ 457 enum arch_page_location { 458 /** The page has been evicted to the backing store. */ 459 ARCH_PAGE_LOCATION_PAGED_OUT, 460 461 /** The page is resident in memory. */ 462 ARCH_PAGE_LOCATION_PAGED_IN, 463 464 /** The page is not mapped. */ 465 ARCH_PAGE_LOCATION_BAD 466 }; 467 468 /** 469 * Fetch location information about a page at a particular address 470 * 471 * The function only needs to query the current set of page tables as 472 * the information it reports must be common to all of them if multiple 473 * page tables are in use. If multiple page tables are active it is unnecessary 474 * to iterate over all of them. This may allow certain types of optimizations 475 * (such as reverse page table mapping on x86). 476 * 477 * This function is called with interrupts locked, so that the reported 478 * information can't become stale while decisions are being made based on it. 479 * 480 * Unless otherwise specified, virtual data pages have the same mappings 481 * across all page tables. Calling this function on data pages that are 482 * exceptions to this rule (such as the scratch page) is undefined behavior. 483 * Just check the currently installed page tables and return the information 484 * in that. 485 * 486 * @param addr Virtual data page address that took the page fault 487 * @param [out] location In the case of ARCH_PAGE_LOCATION_PAGED_OUT, the backing 488 * store location value used to retrieve the data page. In the case of 489 * ARCH_PAGE_LOCATION_PAGED_IN, the physical address the page is mapped to. 490 * @retval ARCH_PAGE_LOCATION_PAGED_OUT The page was evicted to the backing store. 491 * @retval ARCH_PAGE_LOCATION_PAGED_IN The data page is resident in memory. 492 * @retval ARCH_PAGE_LOCATION_BAD The page is un-mapped or otherwise has had 493 * invalid access 494 */ 495 enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location); 496 497 /** 498 * @def ARCH_DATA_PAGE_ACCESSED 499 * 500 * Bit indicating the data page was accessed since the value was last cleared. 501 * 502 * Used by marking eviction algorithms. Safe to set this if uncertain. 503 * 504 * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set. 505 */ 506 507 /** 508 * @def ARCH_DATA_PAGE_DIRTY 509 * 510 * Bit indicating the data page, if evicted, will need to be paged out. 511 * 512 * Set if the data page was modified since it was last paged out, or if 513 * it has never been paged out before. Safe to set this if uncertain. 514 * 515 * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set. 516 */ 517 518 /** 519 * @def ARCH_DATA_PAGE_LOADED 520 * 521 * Bit indicating that the data page is loaded into a physical page frame. 522 * 523 * If un-set, the data page is paged out or not mapped. 524 */ 525 526 /** 527 * @def ARCH_DATA_PAGE_NOT_MAPPED 528 * 529 * If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page 530 * is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set. 531 */ 532 533 /** 534 * Retrieve page characteristics from the page table(s) 535 * 536 * The architecture is responsible for maintaining "accessed" and "dirty" 537 * states of data pages to support marking eviction algorithms. This can 538 * either be directly supported by hardware or emulated by modifying 539 * protection policy to generate faults on reads or writes. In all cases 540 * the architecture must maintain this information in some way. 541 * 542 * For the provided virtual address, report the logical OR of the accessed 543 * and dirty states for the relevant entries in all active page tables in 544 * the system if the page is mapped and not paged out. 545 * 546 * If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset. 547 * This function will report its prior state. If multiple page tables are in 548 * use, this function clears accessed state in all of them. 549 * 550 * This function is called with interrupts locked, so that the reported 551 * information can't become stale while decisions are being made based on it. 552 * 553 * The return value may have other bits set which the caller must ignore. 554 * 555 * Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED 556 * is undefined behavior. 557 * 558 * ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value 559 * are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore 560 * them. 561 * 562 * ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant 563 * if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it. 564 * 565 * Unless otherwise specified, virtual data pages have the same mappings 566 * across all page tables. Calling this function on data pages that are 567 * exceptions to this rule (such as the scratch page) is undefined behavior. 568 * 569 * This API is part of infrastructure still under development and may change. 570 * 571 * @param addr Virtual address to look up in page tables 572 * @param [out] location If non-NULL, updated with either physical page frame 573 * address or backing store location depending on 574 * ARCH_DATA_PAGE_LOADED state. This is not touched if 575 * ARCH_DATA_PAGE_NOT_MAPPED. 576 * @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state 577 * @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page 578 * configuration 579 */ 580 uintptr_t arch_page_info_get(void *addr, uintptr_t *location, 581 bool clear_accessed); 582 583 /** @} */ 584 585 /** 586 * @defgroup arch-misc Miscellaneous architecture APIs 587 * @ingroup arch-interface 588 * @{ 589 */ 590 591 /** 592 * Early boot console output hook 593 * 594 * Definition of this function is optional. If implemented, any invocation 595 * of printk() (or logging calls with CONFIG_LOG_MODE_MINIMAL which are backed by 596 * printk) will default to sending characters to this function. It is 597 * useful for early boot debugging before main serial or console drivers 598 * come up. 599 * 600 * This can be overridden at runtime with __printk_hook_install(). 601 * 602 * The default __weak implementation of this does nothing. 603 * 604 * @param c Character to print 605 * @return The character printed 606 */ 607 int arch_printk_char_out(int c); 608 609 #ifdef CONFIG_ARCH_HAS_THREAD_NAME_HOOK 610 /** 611 * Set thread name hook 612 * 613 * If implemented, any invocation of a function setting a thread name 614 * will invoke this function. 615 * 616 * @param thread Pointer to thread object 617 * @param str The thread name 618 * 619 * @retval 0 On success. 620 * @retval -EAGAIN If the operation could not be performed. 621 */ 622 int arch_thread_name_set(struct k_thread *thread, const char *str); 623 #endif /* CONFIG_ARCH_HAS_THREAD_NAME_HOOK */ 624 625 /** 626 * Architecture-specific kernel initialization hook 627 * 628 * This function is invoked near the top of z_cstart, for additional 629 * architecture-specific setup before the rest of the kernel is brought up. 630 */ 631 static inline void arch_kernel_init(void); 632 633 /** Do nothing and return. Yawn. */ 634 static inline void arch_nop(void); 635 636 /** @} */ 637 638 /** 639 * @defgroup arch-coredump Architecture-specific core dump APIs 640 * @ingroup arch-interface 641 * @{ 642 */ 643 644 /** 645 * @brief Architecture-specific handling during coredump 646 * 647 * This dumps architecture-specific information during coredump. 648 * 649 * @param esf Exception Stack Frame (arch-specific) 650 */ 651 void arch_coredump_info_dump(const struct arch_esf *esf); 652 653 /** 654 * @brief Get the target code specified by the architecture. 655 */ 656 uint16_t arch_coredump_tgt_code_get(void); 657 658 /** 659 * @brief Get the stack pointer of the thread. 660 */ 661 uintptr_t arch_coredump_stack_ptr_get(const struct k_thread *thread); 662 663 #if defined(CONFIG_USERSPACE) || defined(__DOXYGEN__) 664 665 /** 666 * @brief Architecture-specific handling of dumping privileged stack 667 * 668 * This dumps the architecture-specific privileged stack during coredump. 669 * 670 * @param thread Pointer to thread object 671 */ 672 void arch_coredump_priv_stack_dump(struct k_thread *thread); 673 674 #endif /* CONFIG_USERSPACE || __DOXYGEN__ */ 675 676 /** @} */ 677 678 /** 679 * @defgroup arch-tls Architecture-specific Thread Local Storage APIs 680 * @ingroup arch-interface 681 * @{ 682 */ 683 684 /** 685 * @brief Setup Architecture-specific TLS area in stack 686 * 687 * This sets up the stack area for thread local storage. 688 * The structure inside TLS area is architecture specific. 689 * 690 * @param new_thread New thread object 691 * @param stack_ptr Stack pointer 692 * @return Number of bytes taken by the TLS area 693 */ 694 size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr); 695 696 /** @} */ 697 698 /* Include arch-specific inline function implementation */ 699 #include <kernel_arch_func.h> 700 701 #ifdef __cplusplus 702 } 703 #endif 704 705 #endif /* _ASMLANGUAGE */ 706 707 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */ 708