1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Internal kernel APIs implemented at the architecture layer.
10  *
11  * Not all architecture-specific defines are here, APIs that are used
12  * by public functions and macros are defined in include/sys/arch_interface.h.
13  *
14  * For all inline functions prototyped here, the implementation is expected
15  * to be provided by arch/ARCH/include/kernel_arch_func.h
16  */
17 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
18 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
19 
20 #include <zephyr/kernel.h>
21 #include <zephyr/sys/arch_interface.h>
22 
23 #ifndef _ASMLANGUAGE
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
29 /**
30  * @defgroup arch-timing Architecture timing APIs
31  * @{
32  */
33 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
34 /**
35  * Architecture-specific implementation of busy-waiting
36  *
37  * @param usec_to_wait Wait period, in microseconds
38  */
39 void arch_busy_wait(uint32_t usec_to_wait);
40 #endif
41 
42 /** @} */
43 
44 /**
45  * @defgroup arch-threads Architecture thread APIs
46  * @ingroup arch-interface
47  * @{
48  */
49 
50 /** Handle arch-specific logic for setting up new threads
51  *
52  * The stack and arch-specific thread state variables must be set up
53  * such that a later attempt to switch to this thread will succeed
54  * and we will enter z_thread_entry with the requested thread and
55  * arguments as its parameters.
56  *
57  * At some point in this function's implementation, z_setup_new_thread() must
58  * be called with the true bounds of the available stack buffer within the
59  * thread's stack object.
60  *
61  * The provided stack pointer is guaranteed to be properly aligned with respect
62  * to the CPU and ABI requirements. There may be space reserved between the
63  * stack pointer and the bounds of the stack buffer for initial stack pointer
64  * randomization and thread-local storage.
65  *
66  * Fields in thread->base will be initialized when this is called.
67  *
68  * @param thread Pointer to uninitialized struct k_thread
69  * @param stack Pointer to the stack object
70  * @param stack_ptr Aligned initial stack pointer
71  * @param entry Thread entry function
72  * @param p1 1st entry point parameter
73  * @param p2 2nd entry point parameter
74  * @param p3 3rd entry point parameter
75  */
76 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
77 		     char *stack_ptr, k_thread_entry_t entry,
78 		     void *p1, void *p2, void *p3);
79 
80 #ifdef CONFIG_USE_SWITCH
81 /** Cooperative context switch primitive
82  *
83  * The action of arch_switch() should be to switch to a new context
84  * passed in the first argument, and save a pointer to the current
85  * context into the address passed in the second argument.
86  *
87  * The actual type and interpretation of the switch handle is specified
88  * by the architecture.  It is the same data structure stored in the
89  * "switch_handle" field of a newly-created thread in arch_new_thread(),
90  * and passed to the kernel as the "interrupted" argument to
91  * z_get_next_switch_handle().
92  *
93  * Note that on SMP systems, the kernel uses the store through the
94  * second pointer as a synchronization point to detect when a thread
95  * context is completely saved (so another CPU can know when it is
96  * safe to switch).  This store must be done AFTER all relevant state
97  * is saved, and must include whatever memory barriers or cache
98  * management code is required to be sure another CPU will see the
99  * result correctly.
100  *
101  * The simplest implementation of arch_switch() is generally to push
102  * state onto the thread stack and use the resulting stack pointer as the
103  * switch handle.  Some architectures may instead decide to use a pointer
104  * into the thread struct as the "switch handle" type.  These can legally
105  * assume that the second argument to arch_switch() is the address of the
106  * switch_handle field of struct thread_base and can use an offset on
107  * this value to find other parts of the thread struct.  For example a (C
108  * pseudocode) implementation of arch_switch() might look like:
109  *
110  *   void arch_switch(void *switch_to, void **switched_from)
111  *   {
112  *       struct k_thread *new = switch_to;
113  *       struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
114  *                                           switch_handle);
115  *
116  *       // save old context...
117  *       *switched_from = old;
118  *       // restore new context...
119  *   }
120  *
121  * Note that the kernel manages the switch_handle field for
122  * synchronization as described above.  So it is not legal for
123  * architecture code to assume that it has any particular value at any
124  * other time.  In particular it is not legal to read the field from the
125  * address passed in the second argument.
126  *
127  * @param switch_to Incoming thread's switch handle
128  * @param switched_from Pointer to outgoing thread's switch handle storage
129  *        location, which must be updated.
130  */
131 static inline void arch_switch(void *switch_to, void **switched_from);
132 #else
133 /**
134  * Cooperatively context switch
135  *
136  * Must be called with interrupts locked with the provided key.
137  * This is the older-style context switching method, which is incompatible
138  * with SMP. New arch ports, either SMP or UP, are encouraged to implement
139  * arch_switch() instead.
140  *
141  * @param key Interrupt locking key
142  * @return If woken from blocking on some kernel object, the result of that
143  *         blocking operation.
144  */
145 int arch_swap(unsigned int key);
146 
147 /**
148  * Set the return value for the specified thread.
149  *
150  * It is assumed that the specified @a thread is pending.
151  *
152  * @param thread Pointer to thread object
153  * @param value value to set as return value
154  */
155 static ALWAYS_INLINE void
156 arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
157 #endif /* CONFIG_USE_SWITCH i*/
158 
159 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
160 /**
161  * Custom logic for entering main thread context at early boot
162  *
163  * Used by architectures where the typical trick of setting up a dummy thread
164  * in early boot context to "switch out" of isn't workable.
165  *
166  * @param main_thread main thread object
167  * @param stack_ptr Initial stack pointer
168  * @param _main Entry point for application main function.
169  */
170 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
171 				k_thread_entry_t _main);
172 #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
173 
174 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
175 /**
176  * @brief Disable floating point context preservation
177  *
178  * The function is used to disable the preservation of floating
179  * point context information for a particular thread.
180  *
181  * @note For ARM architecture, disabling floating point preservation may only
182  * be requested for the current thread and cannot be requested in ISRs.
183  *
184  * @retval 0        On success.
185  * @retval -EINVAL  If the floating point disabling could not be performed.
186  * @retval -ENOTSUP If the operation is not supported
187  */
188 int arch_float_disable(struct k_thread *thread);
189 
190 /**
191  * @brief Enable floating point context preservation
192  *
193  * The function is used to enable the preservation of floating
194  * point context information for a particular thread.
195  * This API depends on each architecture implimentation. If the architecture
196  * does not support enabling, this API will always be failed.
197  *
198  * The @a options parameter indicates which floating point register sets will
199  * be used by the specified thread. Currently it is used by x86 only.
200  *
201  * @param thread  ID of thread.
202  * @param options architecture dependent options
203  *
204  * @retval 0        On success.
205  * @retval -EINVAL  If the floating point enabling could not be performed.
206  * @retval -ENOTSUP If the operation is not supported
207  */
208 int arch_float_enable(struct k_thread *thread, unsigned int options);
209 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
210 
211 /** @} */
212 
213 /**
214  * @defgroup arch-pm Architecture-specific power management APIs
215  * @ingroup arch-interface
216  * @{
217  */
218 /** Halt the system, optionally propagating a reason code */
219 FUNC_NORETURN void arch_system_halt(unsigned int reason);
220 
221 /** @} */
222 
223 
224 /**
225  * @defgroup arch-irq Architecture-specific IRQ APIs
226  * @ingroup arch-interface
227  * @{
228  */
229 
230 /**
231  * Test if the current context is in interrupt context
232  *
233  * XXX: This is inconsistently handled among arches wrt exception context
234  * See: #17656
235  *
236  * @return true if we are in interrupt context
237  */
238 static inline bool arch_is_in_isr(void);
239 
240 /** @} */
241 
242 /**
243  * @defgroup arch-mmu Architecture-specific memory-mapping APIs
244  * @ingroup arch-interface
245  * @{
246  */
247 
248 /**
249  * Map physical memory into the virtual address space
250  *
251  * This is a low-level interface to mapping pages into the address space.
252  * Behavior when providing unaligned addresses/sizes is undefined, these
253  * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
254  *
255  * The core kernel handles all management of the virtual address space;
256  * by the time we invoke this function, we know exactly where this mapping
257  * will be established. If the page tables already had mappings installed
258  * for the virtual memory region, these will be overwritten.
259  *
260  * If the target architecture supports multiple page sizes, currently
261  * only the smallest page size will be used.
262  *
263  * The memory range itself is never accessed by this operation.
264  *
265  * This API must be safe to call in ISRs or exception handlers. Calls
266  * to this API are assumed to be serialized, and indeed all usage will
267  * originate from kernel/mm.c which handles virtual memory management.
268  *
269  * Architectures are expected to pre-allocate page tables for the entire
270  * address space, as defined by CONFIG_KERNEL_VM_BASE and
271  * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
272  * allocation for paging structures.
273  *
274  * Validation of arguments should be done via assertions.
275  *
276  * This API is part of infrastructure still under development and may
277  * change.
278  *
279  * @param virt Page-aligned Destination virtual address to map
280  * @param phys Page-aligned Source physical address to map
281  * @param size Page-aligned size of the mapped memory region in bytes
282  * @param flags Caching, access and control flags, see K_MAP_* macros
283  */
284 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
285 
286 /**
287  * Remove mappings for a provided virtual address range
288  *
289  * This is a low-level interface for un-mapping pages from the address space.
290  * When this completes, the relevant page table entries will be updated as
291  * if no mapping was ever made for that memory range. No previous context
292  * needs to be preserved. This function must update mappings in all active
293  * page tables.
294  *
295  * Behavior when providing unaligned addresses/sizes is undefined, these
296  * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
297  *
298  * Behavior when providing an address range that is not already mapped is
299  * undefined.
300  *
301  * This function should never require memory allocations for paging structures,
302  * and it is not necessary to free any paging structures. Empty page tables
303  * due to all contained entries being un-mapped may remain in place.
304  *
305  * Implementations must invalidate TLBs as necessary.
306  *
307  * This API is part of infrastructure still under development and may change.
308  *
309  * @param addr Page-aligned base virtual address to un-map
310  * @param size Page-aligned region size
311  */
312 void arch_mem_unmap(void *addr, size_t size);
313 
314 /**
315  * Get the mapped physical memory address from virtual address.
316  *
317  * The function only needs to query the current set of page tables as
318  * the information it reports must be common to all of them if multiple
319  * page tables are in use. If multiple page tables are active it is unnecessary
320  * to iterate over all of them.
321  *
322  * Unless otherwise specified, virtual pages have the same mappings
323  * across all page tables. Calling this function on data pages that are
324  * exceptions to this rule (such as the scratch page) is undefined behavior.
325  * Just check the currently installed page tables and return the information
326  * in that.
327  *
328  * @param virt Page-aligned virtual address
329  * @param[out] phys Mapped physical address (can be NULL if only checking
330  *                  if virtual address is mapped)
331  *
332  * @retval 0 if mapping is found and valid
333  * @retval -EFAULT if virtual address is not mapped
334  */
335 int arch_page_phys_get(void *virt, uintptr_t *phys);
336 
337 /**
338  * Update page frame database with reserved pages
339  *
340  * Some page frames within system RAM may not be available for use. A good
341  * example of this is reserved regions in the first megabyte on PC-like systems.
342  *
343  * Implementations of this function should mark all relevant entries in
344  * z_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
345  * early system initialization with mm_lock held.
346  */
347 void arch_reserved_pages_update(void);
348 
349 /**
350  * Update all page tables for a paged-out data page
351  *
352  * This function:
353  * - Sets the data page virtual address to trigger a fault if accessed that
354  *   can be distinguished from access violations or un-mapped pages.
355  * - Saves the provided location value so that it can retrieved for that
356  *   data page in the page fault handler.
357  * - The location value semantics are undefined here but the value will be
358  *   always be page-aligned. It could be 0.
359  *
360  * If multiple page tables are in use, this must update all page tables.
361  * This function is called with interrupts locked.
362  *
363  * Calling this function on data pages which are already paged out is
364  * undefined behavior.
365  *
366  * This API is part of infrastructure still under development and may change.
367  */
368 void arch_mem_page_out(void *addr, uintptr_t location);
369 
370 /**
371  * Update all page tables for a paged-in data page
372  *
373  * This function:
374  * - Maps the specified virtual data page address to the provided physical
375  *   page frame address, such that future memory accesses will function as
376  *   expected. Access and caching attributes are undisturbed.
377  * - Clears any accounting for "accessed" and "dirty" states.
378  *
379  * If multiple page tables are in use, this must update all page tables.
380  * This function is called with interrupts locked.
381  *
382  * Calling this function on data pages which are already paged in is
383  * undefined behavior.
384  *
385  * This API is part of infrastructure still under development and may change.
386  */
387 void arch_mem_page_in(void *addr, uintptr_t phys);
388 
389 /**
390  * Update current page tables for a temporary mapping
391  *
392  * Map a physical page frame address to a special virtual address
393  * Z_SCRATCH_PAGE, with read/write access to supervisor mode, such that
394  * when this function returns, the calling context can read/write the page
395  * frame's contents from the Z_SCRATCH_PAGE address.
396  *
397  * This mapping only needs to be done on the current set of page tables,
398  * as it is only used for a short period of time exclusively by the caller.
399  * This function is called with interrupts locked.
400  *
401  * This API is part of infrastructure still under development and may change.
402  */
403 void arch_mem_scratch(uintptr_t phys);
404 
405 /**
406  * Status of a particular page location.
407  */
408 enum arch_page_location {
409 	/** The page has been evicted to the backing store. */
410 	ARCH_PAGE_LOCATION_PAGED_OUT,
411 
412 	/** The page is resident in memory. */
413 	ARCH_PAGE_LOCATION_PAGED_IN,
414 
415 	/** The page is not mapped. */
416 	ARCH_PAGE_LOCATION_BAD
417 };
418 
419 /**
420  * Fetch location information about a page at a particular address
421  *
422  * The function only needs to query the current set of page tables as
423  * the information it reports must be common to all of them if multiple
424  * page tables are in use. If multiple page tables are active it is unnecessary
425  * to iterate over all of them. This may allow certain types of optimizations
426  * (such as reverse page table mapping on x86).
427  *
428  * This function is called with interrupts locked, so that the reported
429  * information can't become stale while decisions are being made based on it.
430  *
431  * Unless otherwise specified, virtual data pages have the same mappings
432  * across all page tables. Calling this function on data pages that are
433  * exceptions to this rule (such as the scratch page) is undefined behavior.
434  * Just check the currently installed page tables and return the information
435  * in that.
436  *
437  * @param addr Virtual data page address that took the page fault
438  * @param [out] location In the case of ARCH_PAGE_FAULT_PAGED_OUT, the backing
439  *        store location value used to retrieve the data page. In the case of
440  *        ARCH_PAGE_FAULT_PAGED_IN, the physical address the page is mapped to.
441  * @retval ARCH_PAGE_FAULT_PAGED_OUT The page was evicted to the backing store.
442  * @retval ARCH_PAGE_FAULT_PAGED_IN The data page is resident in memory.
443  * @retval ARCH_PAGE_FAULT_BAD The page is un-mapped or otherwise has had
444  *         invalid access
445  */
446 enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location);
447 
448 /**
449  * @def ARCH_DATA_PAGE_ACCESSED
450  *
451  * Bit indicating the data page was accessed since the value was last cleared.
452  *
453  * Used by marking eviction algorithms. Safe to set this if uncertain.
454  *
455  * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
456  */
457 
458  /**
459   * @def ARCH_DATA_PAGE_DIRTY
460   *
461   * Bit indicating the data page, if evicted, will need to be paged out.
462   *
463   * Set if the data page was modified since it was last paged out, or if
464   * it has never been paged out before. Safe to set this if uncertain.
465   *
466   * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
467   */
468 
469  /**
470   * @def ARCH_DATA_PAGE_LOADED
471   *
472   * Bit indicating that the data page is loaded into a physical page frame.
473   *
474   * If un-set, the data page is paged out or not mapped.
475   */
476 
477 /**
478  * @def ARCH_DATA_PAGE_NOT_MAPPED
479  *
480  * If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page
481  * is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set.
482  */
483 
484 /**
485  * Retrieve page characteristics from the page table(s)
486  *
487  * The architecture is responsible for maintaining "accessed" and "dirty"
488  * states of data pages to support marking eviction algorithms. This can
489  * either be directly supported by hardware or emulated by modifying
490  * protection policy to generate faults on reads or writes. In all cases
491  * the architecture must maintain this information in some way.
492  *
493  * For the provided virtual address, report the logical OR of the accessed
494  * and dirty states for the relevant entries in all active page tables in
495  * the system if the page is mapped and not paged out.
496  *
497  * If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset.
498  * This function will report its prior state. If multiple page tables are in
499  * use, this function clears accessed state in all of them.
500  *
501  * This function is called with interrupts locked, so that the reported
502  * information can't become stale while decisions are being made based on it.
503  *
504  * The return value may have other bits set which the caller must ignore.
505  *
506  * Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED
507  * is undefined behavior.
508  *
509  * ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value
510  * are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore
511  * them.
512  *
513  * ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant
514  * if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it.
515  *
516  * Unless otherwise specified, virtual data pages have the same mappings
517  * across all page tables. Calling this function on data pages that are
518  * exceptions to this rule (such as the scratch page) is undefined behavior.
519  *
520  * This API is part of infrastructure still under development and may change.
521  *
522  * @param addr Virtual address to look up in page tables
523  * @param [out] location If non-NULL, updated with either physical page frame
524  *                   address or backing store location depending on
525  *                   ARCH_DATA_PAGE_LOADED state. This is not touched if
526  *                   ARCH_DATA_PAGE_NOT_MAPPED.
527  * @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state
528  * @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page
529  *         configuration
530  */
531 uintptr_t arch_page_info_get(void *addr, uintptr_t *location,
532 			     bool clear_accessed);
533 
534 /** @} */
535 
536 /**
537  * @defgroup arch-misc Miscellaneous architecture APIs
538  * @ingroup arch-interface
539  * @{
540  */
541 
542 /**
543  * Early boot console output hook
544  *
545  * Definition of this function is optional. If implemented, any invocation
546  * of printk() (or logging calls with CONFIG_LOG_MODE_MINIMAL which are backed by
547  * printk) will default to sending characters to this function. It is
548  * useful for early boot debugging before main serial or console drivers
549  * come up.
550  *
551  * This can be overridden at runtime with __printk_hook_install().
552  *
553  * The default __weak implementation of this does nothing.
554  *
555  * @param c Character to print
556  * @return The character printed
557  */
558 int arch_printk_char_out(int c);
559 
560 /**
561  * Architecture-specific kernel initialization hook
562  *
563  * This function is invoked near the top of _Cstart, for additional
564  * architecture-specific setup before the rest of the kernel is brought up.
565  */
566 static inline void arch_kernel_init(void);
567 
568 /** Do nothing and return. Yawn. */
569 static inline void arch_nop(void);
570 
571 /** @} */
572 
573 /**
574  * @defgroup arch-coredump Architecture-specific core dump APIs
575  * @ingroup arch-interface
576  * @{
577  */
578 
579 /**
580  * @brief Architecture-specific handling during coredump
581  *
582  * This dumps architecture-specific information during coredump.
583  *
584  * @param esf Exception Stack Frame (arch-specific)
585  */
586 void arch_coredump_info_dump(const z_arch_esf_t *esf);
587 
588 /**
589  * @brief Get the target code specified by the architecture.
590  */
591 uint16_t arch_coredump_tgt_code_get(void);
592 
593 /** @} */
594 
595 /**
596  * @defgroup arch-tls Architecture-specific Thread Local Storage APIs
597  * @ingroup arch-interface
598  * @{
599  */
600 
601 /**
602  * @brief Setup Architecture-specific TLS area in stack
603  *
604  * This sets up the stack area for thread local storage.
605  * The structure inside TLS area is architecture specific.
606  *
607  * @param new_thread New thread object
608  * @param stack_ptr Stack pointer
609  * @return Number of bytes taken by the TLS area
610  */
611 size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr);
612 
613 /** @} */
614 
615 /* Include arch-specific inline function implementation */
616 #include <kernel_arch_func.h>
617 
618 #ifdef __cplusplus
619 }
620 #endif
621 
622 #endif /* _ASMLANGUAGE */
623 
624 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */
625