1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Internal kernel APIs implemented at the architecture layer.
10  *
11  * Not all architecture-specific defines are here, APIs that are used
12  * by public functions and macros are defined in include/zephyr/arch/arch_interface.h.
13  *
14  * For all inline functions prototyped here, the implementation is expected
15  * to be provided by arch/ARCH/include/kernel_arch_func.h
16  */
17 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
18 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
19 
20 #include <zephyr/kernel.h>
21 #include <zephyr/arch/arch_interface.h>
22 
23 #ifndef _ASMLANGUAGE
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
29 /**
30  * @defgroup arch-timing Architecture timing APIs
31  * @{
32  */
33 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
34 /**
35  * Architecture-specific implementation of busy-waiting
36  *
37  * @param usec_to_wait Wait period, in microseconds
38  */
39 void arch_busy_wait(uint32_t usec_to_wait);
40 #endif /* CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT */
41 
42 /** @} */
43 
44 /**
45  * @defgroup arch-threads Architecture thread APIs
46  * @ingroup arch-interface
47  * @{
48  */
49 
50 /** Handle arch-specific logic for setting up new threads
51  *
52  * The stack and arch-specific thread state variables must be set up
53  * such that a later attempt to switch to this thread will succeed
54  * and we will enter z_thread_entry with the requested thread and
55  * arguments as its parameters.
56  *
57  * At some point in this function's implementation, z_setup_new_thread() must
58  * be called with the true bounds of the available stack buffer within the
59  * thread's stack object.
60  *
61  * The provided stack pointer is guaranteed to be properly aligned with respect
62  * to the CPU and ABI requirements. There may be space reserved between the
63  * stack pointer and the bounds of the stack buffer for initial stack pointer
64  * randomization and thread-local storage.
65  *
66  * Fields in thread->base will be initialized when this is called.
67  *
68  * @param thread Pointer to uninitialized struct k_thread
69  * @param stack Pointer to the stack object
70  * @param stack_ptr Aligned initial stack pointer
71  * @param entry Thread entry function
72  * @param p1 1st entry point parameter
73  * @param p2 2nd entry point parameter
74  * @param p3 3rd entry point parameter
75  */
76 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
77 		     char *stack_ptr, k_thread_entry_t entry,
78 		     void *p1, void *p2, void *p3);
79 
80 #ifdef CONFIG_USE_SWITCH
81 /** Cooperative context switch primitive
82  *
83  * The action of arch_switch() should be to switch to a new context
84  * passed in the first argument, and save a pointer to the current
85  * context into the address passed in the second argument.
86  *
87  * The actual type and interpretation of the switch handle is specified
88  * by the architecture.  It is the same data structure stored in the
89  * "switch_handle" field of a newly-created thread in arch_new_thread(),
90  * and passed to the kernel as the "interrupted" argument to
91  * z_get_next_switch_handle().
92  *
93  * Note that on SMP systems, the kernel uses the store through the
94  * second pointer as a synchronization point to detect when a thread
95  * context is completely saved (so another CPU can know when it is
96  * safe to switch).  This store must be done AFTER all relevant state
97  * is saved, and must include whatever memory barriers or cache
98  * management code is required to be sure another CPU will see the
99  * result correctly.
100  *
101  * The simplest implementation of arch_switch() is generally to push
102  * state onto the thread stack and use the resulting stack pointer as the
103  * switch handle.  Some architectures may instead decide to use a pointer
104  * into the thread struct as the "switch handle" type.  These can legally
105  * assume that the second argument to arch_switch() is the address of the
106  * switch_handle field of struct thread_base and can use an offset on
107  * this value to find other parts of the thread struct.  For example a (C
108  * pseudocode) implementation of arch_switch() might look like:
109  *
110  *   void arch_switch(void *switch_to, void **switched_from)
111  *   {
112  *       struct k_thread *new = switch_to;
113  *       struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
114  *                                           switch_handle);
115  *
116  *       // save old context...
117  *       *switched_from = old;
118  *       // restore new context...
119  *   }
120  *
121  * Note that the kernel manages the switch_handle field for
122  * synchronization as described above.  So it is not legal for
123  * architecture code to assume that it has any particular value at any
124  * other time.  In particular it is not legal to read the field from the
125  * address passed in the second argument.
126  *
127  * @param switch_to Incoming thread's switch handle
128  * @param switched_from Pointer to outgoing thread's switch handle storage
129  *        location, which must be updated.
130  */
131 static inline void arch_switch(void *switch_to, void **switched_from);
132 #endif /* CONFIG_USE_SWITCH */
133 
134 #if !defined(CONFIG_USE_SWITCH) || defined(__DOXYGEN__)
135 #if defined(__DOXYGEN__)
136 /**
137  * Cooperatively context switch
138  *
139  * Must be called with interrupts locked with the provided key.
140  * This is the older-style context switching method, which is incompatible
141  * with SMP. New arch ports, either SMP or UP, are encouraged to implement
142  * arch_switch() instead.
143  *
144  * @param key Interrupt locking key
145  * @return If woken from blocking on some kernel object, the result of that
146  *         blocking operation.
147  */
148 int arch_swap(unsigned int key);
149 #endif /* __DOXYGEN__ */
150 
151 /**
152  * Set the return value for the specified thread.
153  *
154  * It is assumed that the specified @a thread is pending.
155  *
156  * @param thread Pointer to thread object
157  * @param value value to set as return value
158  */
159 static ALWAYS_INLINE void
160 arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
161 #endif /* !CONFIG_USE_SWITCH || __DOXYGEN__ */
162 
163 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
164 /**
165  * Custom logic for entering main thread context at early boot
166  *
167  * Used by architectures where the typical trick of setting up a dummy thread
168  * in early boot context to "switch out" of isn't workable.
169  *
170  * @param main_thread main thread object
171  * @param stack_ptr Initial stack pointer
172  * @param _main Entry point for application main function.
173  */
174 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
175 				k_thread_entry_t _main);
176 #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
177 
178 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
179 /**
180  * @brief Disable floating point context preservation
181  *
182  * The function is used to disable the preservation of floating
183  * point context information for a particular thread.
184  *
185  * @note For ARM architecture, disabling floating point preservation may only
186  * be requested for the current thread and cannot be requested in ISRs.
187  *
188  * @retval 0        On success.
189  * @retval -EINVAL  If the floating point disabling could not be performed.
190  * @retval -ENOTSUP If the operation is not supported
191  */
192 int arch_float_disable(struct k_thread *thread);
193 
194 /**
195  * @brief Enable floating point context preservation
196  *
197  * The function is used to enable the preservation of floating
198  * point context information for a particular thread.
199  * This API depends on each architecture implementation. If the architecture
200  * does not support enabling, this API will always be failed.
201  *
202  * The @a options parameter indicates which floating point register sets will
203  * be used by the specified thread. Currently it is used by x86 only.
204  *
205  * @param thread  ID of thread.
206  * @param options architecture dependent options
207  *
208  * @retval 0        On success.
209  * @retval -EINVAL  If the floating point enabling could not be performed.
210  * @retval -ENOTSUP If the operation is not supported
211  */
212 int arch_float_enable(struct k_thread *thread, unsigned int options);
213 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
214 
215 #if defined(CONFIG_USERSPACE) && defined(CONFIG_ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET)
216 /**
217  * @brief Obtain privileged stack usage information for the specified thread
218  *
219  * Must be called under supervisor mode.
220  *
221  * Some hardware may prevent inspection of a stack buffer currently in use.
222  * If this API is called from supervisor mode, on the currently running thread,
223  * on a platform which selects @kconfig{CONFIG_NO_UNUSED_STACK_INSPECTION}, an
224  * error will be generated.
225  *
226  * @param[in]  thread     Thread to inspect stack information
227  * @param[out] stack_size Filled in with the size of the stack space of
228  *                        the target thread in bytes.
229  * @param[out] unused_ptr Filled in with the unused stack space of
230  *                        the target thread in bytes.
231  *
232  * @return 0 on success
233  * @return -EBADF Bad thread object
234  * @return -EPERM No permissions on thread object
235  * #return -ENOTSUP Forbidden by hardware policy
236  * @return -EINVAL Thread is uninitialized or exited or not a user thread
237  * @return -EFAULT Bad memory address for unused_ptr
238  */
239 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
240 				     size_t *unused_ptr);
241 #endif /* CONFIG_USERSPACE && CONFIG_ARCH_HAS_THREAD_PRIV_STACK_SPACE_GET */
242 
243 /** @} */
244 
245 /**
246  * @defgroup arch-pm Architecture-specific power management APIs
247  * @ingroup arch-interface
248  * @{
249  */
250 /** Halt the system, optionally propagating a reason code */
251 FUNC_NORETURN void arch_system_halt(unsigned int reason);
252 
253 /** @} */
254 
255 
256 /**
257  * @defgroup arch-irq Architecture-specific IRQ APIs
258  * @ingroup arch-interface
259  * @{
260  */
261 
262 /**
263  * Test if the current context is in interrupt context
264  *
265  * XXX: This is inconsistently handled among arches wrt exception context
266  * See: #17656
267  *
268  * @return true if we are in interrupt context
269  */
270 static inline bool arch_is_in_isr(void);
271 
272 /** @} */
273 
274 /**
275  * @defgroup arch-mmu Architecture-specific memory-mapping APIs
276  * @ingroup arch-interface
277  * @{
278  */
279 
280 /**
281  * Map physical memory into the virtual address space
282  *
283  * This is a low-level interface to mapping pages into the address space.
284  * Behavior when providing unaligned addresses/sizes is undefined, these
285  * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
286  *
287  * The core kernel handles all management of the virtual address space;
288  * by the time we invoke this function, we know exactly where this mapping
289  * will be established. If the page tables already had mappings installed
290  * for the virtual memory region, these will be overwritten.
291  *
292  * If the target architecture supports multiple page sizes, currently
293  * only the smallest page size will be used.
294  *
295  * The memory range itself is never accessed by this operation.
296  *
297  * This API must be safe to call in ISRs or exception handlers. Calls
298  * to this API are assumed to be serialized, and indeed all usage will
299  * originate from kernel/mm.c which handles virtual memory management.
300  *
301  * Architectures are expected to pre-allocate page tables for the entire
302  * address space, as defined by CONFIG_KERNEL_VM_BASE and
303  * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
304  * allocation for paging structures.
305  *
306  * Validation of arguments should be done via assertions.
307  *
308  * This API is part of infrastructure still under development and may
309  * change.
310  *
311  * @param virt Page-aligned Destination virtual address to map
312  * @param phys Page-aligned Source physical address to map
313  * @param size Page-aligned size of the mapped memory region in bytes
314  * @param flags Caching, access and control flags, see K_MAP_* macros
315  */
316 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
317 
318 /**
319  * Remove mappings for a provided virtual address range
320  *
321  * This is a low-level interface for un-mapping pages from the address space.
322  * When this completes, the relevant page table entries will be updated as
323  * if no mapping was ever made for that memory range. No previous context
324  * needs to be preserved. This function must update mappings in all active
325  * page tables.
326  *
327  * Behavior when providing unaligned addresses/sizes is undefined, these
328  * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
329  *
330  * Behavior when providing an address range that is not already mapped is
331  * undefined.
332  *
333  * This function should never require memory allocations for paging structures,
334  * and it is not necessary to free any paging structures. Empty page tables
335  * due to all contained entries being un-mapped may remain in place.
336  *
337  * Implementations must invalidate TLBs as necessary.
338  *
339  * This API is part of infrastructure still under development and may change.
340  *
341  * @param addr Page-aligned base virtual address to un-map
342  * @param size Page-aligned region size
343  */
344 void arch_mem_unmap(void *addr, size_t size);
345 
346 /**
347  * Get the mapped physical memory address from virtual address.
348  *
349  * The function only needs to query the current set of page tables as
350  * the information it reports must be common to all of them if multiple
351  * page tables are in use. If multiple page tables are active it is unnecessary
352  * to iterate over all of them.
353  *
354  * Unless otherwise specified, virtual pages have the same mappings
355  * across all page tables. Calling this function on data pages that are
356  * exceptions to this rule (such as the scratch page) is undefined behavior.
357  * Just check the currently installed page tables and return the information
358  * in that.
359  *
360  * @param virt Page-aligned virtual address
361  * @param[out] phys Mapped physical address (can be NULL if only checking
362  *                  if virtual address is mapped)
363  *
364  * @retval 0 if mapping is found and valid
365  * @retval -EFAULT if virtual address is not mapped
366  */
367 int arch_page_phys_get(void *virt, uintptr_t *phys);
368 
369 /**
370  * Update page frame database with reserved pages
371  *
372  * Some page frames within system RAM may not be available for use. A good
373  * example of this is reserved regions in the first megabyte on PC-like systems.
374  *
375  * Implementations of this function should mark all relevant entries in
376  * k_mem_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
377  * early system initialization with mm_lock held.
378  */
379 void arch_reserved_pages_update(void);
380 
381 /**
382  * Update all page tables for a paged-out data page
383  *
384  * This function:
385  * - Sets the data page virtual address to trigger a fault if accessed that
386  *   can be distinguished from access violations or un-mapped pages.
387  * - Saves the provided location value so that it can retrieved for that
388  *   data page in the page fault handler.
389  * - The location value semantics are undefined here but the value will be
390  *   always be page-aligned. It could be 0.
391  *
392  * If multiple page tables are in use, this must update all page tables.
393  * This function is called with interrupts locked.
394  *
395  * Calling this function on data pages which are already paged out is
396  * undefined behavior.
397  *
398  * This API is part of infrastructure still under development and may change.
399  */
400 void arch_mem_page_out(void *addr, uintptr_t location);
401 
402 /**
403  * Update all page tables for a paged-in data page
404  *
405  * This function:
406  * - Maps the specified virtual data page address to the provided physical
407  *   page frame address, such that future memory accesses will function as
408  *   expected. Access and caching attributes are undisturbed.
409  * - Clears any accounting for "accessed" and "dirty" states.
410  *
411  * If multiple page tables are in use, this must update all page tables.
412  * This function is called with interrupts locked.
413  *
414  * Calling this function on data pages which are already paged in is
415  * undefined behavior.
416  *
417  * This API is part of infrastructure still under development and may change.
418  */
419 void arch_mem_page_in(void *addr, uintptr_t phys);
420 
421 /**
422  * Update current page tables for a temporary mapping
423  *
424  * Map a physical page frame address to a special virtual address
425  * K_MEM_SCRATCH_PAGE, with read/write access to supervisor mode, such that
426  * when this function returns, the calling context can read/write the page
427  * frame's contents from the K_MEM_SCRATCH_PAGE address.
428  *
429  * This mapping only needs to be done on the current set of page tables,
430  * as it is only used for a short period of time exclusively by the caller.
431  * This function is called with interrupts locked.
432  *
433  * This API is part of infrastructure still under development and may change.
434  */
435 void arch_mem_scratch(uintptr_t phys);
436 
437 /**
438  * Status of a particular page location.
439  */
440 enum arch_page_location {
441 	/** The page has been evicted to the backing store. */
442 	ARCH_PAGE_LOCATION_PAGED_OUT,
443 
444 	/** The page is resident in memory. */
445 	ARCH_PAGE_LOCATION_PAGED_IN,
446 
447 	/** The page is not mapped. */
448 	ARCH_PAGE_LOCATION_BAD
449 };
450 
451 /**
452  * Fetch location information about a page at a particular address
453  *
454  * The function only needs to query the current set of page tables as
455  * the information it reports must be common to all of them if multiple
456  * page tables are in use. If multiple page tables are active it is unnecessary
457  * to iterate over all of them. This may allow certain types of optimizations
458  * (such as reverse page table mapping on x86).
459  *
460  * This function is called with interrupts locked, so that the reported
461  * information can't become stale while decisions are being made based on it.
462  *
463  * Unless otherwise specified, virtual data pages have the same mappings
464  * across all page tables. Calling this function on data pages that are
465  * exceptions to this rule (such as the scratch page) is undefined behavior.
466  * Just check the currently installed page tables and return the information
467  * in that.
468  *
469  * @param addr Virtual data page address that took the page fault
470  * @param [out] location In the case of ARCH_PAGE_LOCATION_PAGED_OUT, the backing
471  *        store location value used to retrieve the data page. In the case of
472  *        ARCH_PAGE_LOCATION_PAGED_IN, the physical address the page is mapped to.
473  * @retval ARCH_PAGE_LOCATION_PAGED_OUT The page was evicted to the backing store.
474  * @retval ARCH_PAGE_LOCATION_PAGED_IN The data page is resident in memory.
475  * @retval ARCH_PAGE_LOCATION_BAD The page is un-mapped or otherwise has had
476  *         invalid access
477  */
478 enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location);
479 
480 /**
481  * @def ARCH_DATA_PAGE_ACCESSED
482  *
483  * Bit indicating the data page was accessed since the value was last cleared.
484  *
485  * Used by marking eviction algorithms. Safe to set this if uncertain.
486  *
487  * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
488  */
489 
490  /**
491   * @def ARCH_DATA_PAGE_DIRTY
492   *
493   * Bit indicating the data page, if evicted, will need to be paged out.
494   *
495   * Set if the data page was modified since it was last paged out, or if
496   * it has never been paged out before. Safe to set this if uncertain.
497   *
498   * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
499   */
500 
501  /**
502   * @def ARCH_DATA_PAGE_LOADED
503   *
504   * Bit indicating that the data page is loaded into a physical page frame.
505   *
506   * If un-set, the data page is paged out or not mapped.
507   */
508 
509 /**
510  * @def ARCH_DATA_PAGE_NOT_MAPPED
511  *
512  * If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page
513  * is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set.
514  */
515 
516 /**
517  * Retrieve page characteristics from the page table(s)
518  *
519  * The architecture is responsible for maintaining "accessed" and "dirty"
520  * states of data pages to support marking eviction algorithms. This can
521  * either be directly supported by hardware or emulated by modifying
522  * protection policy to generate faults on reads or writes. In all cases
523  * the architecture must maintain this information in some way.
524  *
525  * For the provided virtual address, report the logical OR of the accessed
526  * and dirty states for the relevant entries in all active page tables in
527  * the system if the page is mapped and not paged out.
528  *
529  * If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset.
530  * This function will report its prior state. If multiple page tables are in
531  * use, this function clears accessed state in all of them.
532  *
533  * This function is called with interrupts locked, so that the reported
534  * information can't become stale while decisions are being made based on it.
535  *
536  * The return value may have other bits set which the caller must ignore.
537  *
538  * Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED
539  * is undefined behavior.
540  *
541  * ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value
542  * are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore
543  * them.
544  *
545  * ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant
546  * if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it.
547  *
548  * Unless otherwise specified, virtual data pages have the same mappings
549  * across all page tables. Calling this function on data pages that are
550  * exceptions to this rule (such as the scratch page) is undefined behavior.
551  *
552  * This API is part of infrastructure still under development and may change.
553  *
554  * @param addr Virtual address to look up in page tables
555  * @param [out] location If non-NULL, updated with either physical page frame
556  *                   address or backing store location depending on
557  *                   ARCH_DATA_PAGE_LOADED state. This is not touched if
558  *                   ARCH_DATA_PAGE_NOT_MAPPED.
559  * @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state
560  * @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page
561  *         configuration
562  */
563 uintptr_t arch_page_info_get(void *addr, uintptr_t *location,
564 			     bool clear_accessed);
565 
566 /** @} */
567 
568 /**
569  * @defgroup arch-misc Miscellaneous architecture APIs
570  * @ingroup arch-interface
571  * @{
572  */
573 
574 /**
575  * Early boot console output hook
576  *
577  * Definition of this function is optional. If implemented, any invocation
578  * of printk() (or logging calls with CONFIG_LOG_MODE_MINIMAL which are backed by
579  * printk) will default to sending characters to this function. It is
580  * useful for early boot debugging before main serial or console drivers
581  * come up.
582  *
583  * This can be overridden at runtime with __printk_hook_install().
584  *
585  * The default __weak implementation of this does nothing.
586  *
587  * @param c Character to print
588  * @return The character printed
589  */
590 int arch_printk_char_out(int c);
591 
592 #ifdef CONFIG_ARCH_HAS_THREAD_NAME_HOOK
593 /**
594  * Set thread name hook
595  *
596  * If implemented, any invocation of a function setting a thread name
597  * will invoke this function.
598  *
599  * @param thread    Pointer to thread object
600  * @param str       The thread name
601  *
602  * @retval 0        On success.
603  * @retval -EAGAIN  If the operation could not be performed.
604  */
605 int arch_thread_name_set(struct k_thread *thread, const char *str);
606 #endif /* CONFIG_ARCH_HAS_THREAD_NAME_HOOK */
607 
608 /**
609  * Architecture-specific kernel initialization hook
610  *
611  * This function is invoked near the top of z_cstart, for additional
612  * architecture-specific setup before the rest of the kernel is brought up.
613  */
614 static inline void arch_kernel_init(void);
615 
616 /** Do nothing and return. Yawn. */
617 static inline void arch_nop(void);
618 
619 /** @} */
620 
621 /**
622  * @defgroup arch-coredump Architecture-specific core dump APIs
623  * @ingroup arch-interface
624  * @{
625  */
626 
627 /**
628  * @brief Architecture-specific handling during coredump
629  *
630  * This dumps architecture-specific information during coredump.
631  *
632  * @param esf Exception Stack Frame (arch-specific)
633  */
634 void arch_coredump_info_dump(const struct arch_esf *esf);
635 
636 /**
637  * @brief Get the target code specified by the architecture.
638  */
639 uint16_t arch_coredump_tgt_code_get(void);
640 
641 #if defined(CONFIG_USERSPACE) || defined(__DOXYGEN__)
642 
643 /**
644  * @brief Architecture-specific handling of dumping privileged stack
645  *
646  * This dumps the architecture-specific privileged stack during coredump.
647  *
648  * @param thread Pointer to thread object
649  */
650 void arch_coredump_priv_stack_dump(struct k_thread *thread);
651 
652 #endif /* CONFIG_USERSPACE || __DOXYGEN__ */
653 
654 /** @} */
655 
656 /**
657  * @defgroup arch-tls Architecture-specific Thread Local Storage APIs
658  * @ingroup arch-interface
659  * @{
660  */
661 
662 /**
663  * @brief Setup Architecture-specific TLS area in stack
664  *
665  * This sets up the stack area for thread local storage.
666  * The structure inside TLS area is architecture specific.
667  *
668  * @param new_thread New thread object
669  * @param stack_ptr Stack pointer
670  * @return Number of bytes taken by the TLS area
671  */
672 size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr);
673 
674 /** @} */
675 
676 /* Include arch-specific inline function implementation */
677 #include <kernel_arch_func.h>
678 
679 #ifdef __cplusplus
680 }
681 #endif
682 
683 #endif /* _ASMLANGUAGE */
684 
685 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */
686