1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Internal kernel APIs implemented at the architecture layer.
10  *
11  * Not all architecture-specific defines are here, APIs that are used
12  * by public functions and macros are defined in include/sys/arch_interface.h.
13  *
14  * For all inline functions prototyped here, the implementation is expected
15  * to be provided by arch/ARCH/include/kernel_arch_func.h
16  */
17 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
18 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_
19 
20 #include <kernel.h>
21 #include <sys/arch_interface.h>
22 
23 #ifndef _ASMLANGUAGE
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
29 /**
30  * @defgroup arch-timing Architecture timing APIs
31  * @{
32  */
33 #ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
34 /**
35  * Architecture-specific implementation of busy-waiting
36  *
37  * @param usec_to_wait Wait period, in microseconds
38  */
39 void arch_busy_wait(uint32_t usec_to_wait);
40 #endif
41 
42 /** @} */
43 
44 /**
45  * @defgroup arch-threads Architecture thread APIs
46  * @ingroup arch-interface
47  * @{
48  */
49 
50 /** Handle arch-specific logic for setting up new threads
51  *
52  * The stack and arch-specific thread state variables must be set up
53  * such that a later attempt to switch to this thread will succeed
54  * and we will enter z_thread_entry with the requested thread and
55  * arguments as its parameters.
56  *
57  * At some point in this function's implementation, z_setup_new_thread() must
58  * be called with the true bounds of the available stack buffer within the
59  * thread's stack object.
60  *
61  * The provided stack pointer is guaranteed to be properly aligned with respect
62  * to the CPU and ABI requirements. There may be space reserved between the
63  * stack pointer and the bounds of the stack buffer for initial stack pointer
64  * randomization and thread-local storage.
65  *
66  * Fields in thread->base will be initialized when this is called.
67  *
68  * @param thread Pointer to uninitialized struct k_thread
69  * @param stack Pointer to the stack object
70  * @param stack_ptr Aligned initial stack pointer
71  * @param entry Thread entry function
72  * @param p1 1st entry point parameter
73  * @param p2 2nd entry point parameter
74  * @param p3 3rd entry point parameter
75  */
76 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
77 		     char *stack_ptr, k_thread_entry_t entry,
78 		     void *p1, void *p2, void *p3);
79 
80 #ifdef CONFIG_USE_SWITCH
81 /** Cooperative context switch primitive
82  *
83  * The action of arch_switch() should be to switch to a new context
84  * passed in the first argument, and save a pointer to the current
85  * context into the address passed in the second argument.
86  *
87  * The actual type and interpretation of the switch handle is specified
88  * by the architecture.  It is the same data structure stored in the
89  * "switch_handle" field of a newly-created thread in arch_new_thread(),
90  * and passed to the kernel as the "interrupted" argument to
91  * z_get_next_switch_handle().
92  *
93  * Note that on SMP systems, the kernel uses the store through the
94  * second pointer as a synchronization point to detect when a thread
95  * context is completely saved (so another CPU can know when it is
96  * safe to switch).  This store must be done AFTER all relevant state
97  * is saved, and must include whatever memory barriers or cache
98  * management code is required to be sure another CPU will see the
99  * result correctly.
100  *
101  * The simplest implementation of arch_switch() is generally to push
102  * state onto the thread stack and use the resulting stack pointer as the
103  * switch handle.  Some architectures may instead decide to use a pointer
104  * into the thread struct as the "switch handle" type.  These can legally
105  * assume that the second argument to arch_switch() is the address of the
106  * switch_handle field of struct thread_base and can use an offset on
107  * this value to find other parts of the thread struct.  For example a (C
108  * pseudocode) implementation of arch_switch() might look like:
109  *
110  *   void arch_switch(void *switch_to, void **switched_from)
111  *   {
112  *       struct k_thread *new = switch_to;
113  *       struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
114  *                                           switch_handle);
115  *
116  *       // save old context...
117  *       *switched_from = old;
118  *       // restore new context...
119  *   }
120  *
121  * Note that the kernel manages the switch_handle field for
122  * synchronization as described above.  So it is not legal for
123  * architecture code to assume that it has any particular value at any
124  * other time.  In particular it is not legal to read the field from the
125  * address passed in the second argument.
126  *
127  * @param switch_to Incoming thread's switch handle
128  * @param switched_from Pointer to outgoing thread's switch handle storage
129  *        location, which must be updated.
130  */
131 static inline void arch_switch(void *switch_to, void **switched_from);
132 #else
133 /**
134  * Cooperatively context switch
135  *
136  * Must be called with interrupts locked with the provided key.
137  * This is the older-style context switching method, which is incompatible
138  * with SMP. New arch ports, either SMP or UP, are encouraged to implement
139  * arch_switch() instead.
140  *
141  * @param key Interrupt locking key
142  * @return If woken from blocking on some kernel object, the result of that
143  *         blocking operation.
144  */
145 int arch_swap(unsigned int key);
146 
147 /**
148  * Set the return value for the specified thread.
149  *
150  * It is assumed that the specified @a thread is pending.
151  *
152  * @param thread Pointer to thread object
153  * @param value value to set as return value
154  */
155 static ALWAYS_INLINE void
156 arch_thread_return_value_set(struct k_thread *thread, unsigned int value);
157 #endif /* CONFIG_USE_SWITCH i*/
158 
159 #ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
160 /**
161  * Custom logic for entering main thread context at early boot
162  *
163  * Used by architectures where the typical trick of setting up a dummy thread
164  * in early boot context to "switch out" of isn't workable.
165  *
166  * @param main_thread main thread object
167  * @param stack_ptr Initial stack pointer
168  * @param _main Entry point for application main function.
169  */
170 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
171 				k_thread_entry_t _main);
172 #endif /* CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN */
173 
174 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
175 /**
176  * @brief Disable floating point context preservation
177  *
178  * The function is used to disable the preservation of floating
179  * point context information for a particular thread.
180  *
181  * @note For ARM architecture, disabling floating point preservation may only
182  * be requested for the current thread and cannot be requested in ISRs.
183  *
184  * @retval 0        On success.
185  * @retval -EINVAL  If the floating point disabling could not be performed.
186  * @retval -ENOTSUP If the operation is not supported
187  */
188 int arch_float_disable(struct k_thread *thread);
189 
190 /**
191  * @brief Enable floating point context preservation
192  *
193  * The function is used to enable the preservation of floating
194  * point context information for a particular thread.
195  * This API depends on each architecture implimentation. If the architecture
196  * does not support enabling, this API will always be failed.
197  *
198  * The @a options parameter indicates which floating point register sets will
199  * be used by the specified thread. Currently it is used by x86 only.
200  *
201  * @param thread  ID of thread.
202  * @param options architecture dependent options
203  *
204  * @retval 0        On success.
205  * @retval -EINVAL  If the floating point enabling could not be performed.
206  * @retval -ENOTSUP If the operation is not supported
207  */
208 int arch_float_enable(struct k_thread *thread, unsigned int options);
209 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
210 
211 /** @} */
212 
213 /**
214  * @defgroup arch-pm Architecture-specific power management APIs
215  * @ingroup arch-interface
216  * @{
217  */
218 /** Halt the system, optionally propagating a reason code */
219 FUNC_NORETURN void arch_system_halt(unsigned int reason);
220 
221 /** @} */
222 
223 
224 /**
225  * @defgroup arch-irq Architecture-specific IRQ APIs
226  * @ingroup arch-interface
227  * @{
228  */
229 
230 /**
231  * Test if the current context is in interrupt context
232  *
233  * XXX: This is inconsistently handled among arches wrt exception context
234  * See: #17656
235  *
236  * @return true if we are in interrupt context
237  */
238 static inline bool arch_is_in_isr(void);
239 
240 /** @} */
241 
242 /**
243  * @defgroup arch-mmu Architecture-specific memory-mapping APIs
244  * @ingroup arch-interface
245  * @{
246  */
247 
248 #ifdef CONFIG_MMU
249 /**
250  * Map physical memory into the virtual address space
251  *
252  * This is a low-level interface to mapping pages into the address space.
253  * Behavior when providing unaligned addresses/sizes is undefined, these
254  * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
255  *
256  * The core kernel handles all management of the virtual address space;
257  * by the time we invoke this function, we know exactly where this mapping
258  * will be established. If the page tables already had mappings installed
259  * for the virtual memory region, these will be overwritten.
260  *
261  * If the target architecture supports multiple page sizes, currently
262  * only the smallest page size will be used.
263  *
264  * The memory range itself is never accessed by this operation.
265  *
266  * This API must be safe to call in ISRs or exception handlers. Calls
267  * to this API are assumed to be serialized, and indeed all usage will
268  * originate from kernel/mm.c which handles virtual memory management.
269  *
270  * Architectures are expected to pre-allocate page tables for the entire
271  * address space, as defined by CONFIG_KERNEL_VM_BASE and
272  * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
273  * allocation for paging structures.
274  *
275  * Validation of arguments should be done via assertions.
276  *
277  * This API is part of infrastructure still under development and may
278  * change.
279  *
280  * @param virt Page-aligned Destination virtual address to map
281  * @param phys Page-aligned Source physical address to map
282  * @param size Page-aligned size of the mapped memory region in bytes
283  * @param flags Caching, access and control flags, see K_MAP_* macros
284  */
285 void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags);
286 
287 /**
288  * Remove mappings for a provided virtual address range
289  *
290  * This is a low-level interface for un-mapping pages from the address space.
291  * When this completes, the relevant page table entries will be updated as
292  * if no mapping was ever made for that memory range. No previous context
293  * needs to be preserved. This function must update mappings in all active
294  * page tables.
295  *
296  * Behavior when providing unaligned addresses/sizes is undefined, these
297  * are assumed to be aligned to CONFIG_MMU_PAGE_SIZE.
298  *
299  * Behavior when providing an address range that is not already mapped is
300  * undefined.
301  *
302  * This function should never require memory allocations for paging structures,
303  * and it is not necessary to free any paging structures. Empty page tables
304  * due to all contained entries being un-mapped may remain in place.
305  *
306  * Implementations must invalidate TLBs as necessary.
307  *
308  * This API is part of infrastructure still under development and may change.
309  *
310  * @param addr Page-aligned base virtual address to un-map
311  * @param size Page-aligned region size
312  */
313 void arch_mem_unmap(void *addr, size_t size);
314 
315 /**
316  * Get the mapped physical memory address from virtual address.
317  *
318  * The function only needs to query the current set of page tables as
319  * the information it reports must be common to all of them if multiple
320  * page tables are in use. If multiple page tables are active it is unnecessary
321  * to iterate over all of them.
322  *
323  * Unless otherwise specified, virtual pages have the same mappings
324  * across all page tables. Calling this function on data pages that are
325  * exceptions to this rule (such as the scratch page) is undefined behavior.
326  * Just check the currently installed page tables and return the information
327  * in that.
328  *
329  * @param virt Page-aligned virtual address
330  * @param[out] phys Mapped physical address (can be NULL if only checking
331  *                  if virtual address is mapped)
332  *
333  * @retval 0 if mapping is found and valid
334  * @retval -EFAULT if virtual address is not mapped
335  */
336 int arch_page_phys_get(void *virt, uintptr_t *phys);
337 
338 #ifdef CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES
339 /**
340  * Update page frame database with reserved pages
341  *
342  * Some page frames within system RAM may not be available for use. A good
343  * example of this is reserved regions in the first megabyte on PC-like systems.
344  *
345  * Implementations of this function should mark all relavent entries in
346  * z_page_frames with K_PAGE_FRAME_RESERVED. This function is called at
347  * early system initialization with mm_lock held.
348  */
349 void arch_reserved_pages_update(void);
350 #endif /* ARCH_HAS_RESERVED_PAGE_FRAMES */
351 
352 #ifdef CONFIG_DEMAND_PAGING
353 /**
354  * Update all page tables for a paged-out data page
355  *
356  * This function:
357  * - Sets the data page virtual address to trigger a fault if accessed that
358  *   can be distinguished from access violations or un-mapped pages.
359  * - Saves the provided location value so that it can retrieved for that
360  *   data page in the page fault handler.
361  * - The location value semantics are undefined here but the value will be
362  *   always be page-aligned. It could be 0.
363  *
364  * If multiple page tables are in use, this must update all page tables.
365  * This function is called with interrupts locked.
366  *
367  * Calling this function on data pages which are already paged out is
368  * undefined behavior.
369  *
370  * This API is part of infrastructure still under development and may change.
371  */
372 void arch_mem_page_out(void *addr, uintptr_t location);
373 
374 /**
375  * Update all page tables for a paged-in data page
376  *
377  * This function:
378  * - Maps the specified virtual data page address to the provided physical
379  *   page frame address, such that future memory accesses will function as
380  *   expected. Access and caching attributes are undisturbed.
381  * - Clears any accounting for "accessed" and "dirty" states.
382  *
383  * If multiple page tables are in use, this must update all page tables.
384  * This function is called with interrupts locked.
385  *
386  * Calling this function on data pages which are already paged in is
387  * undefined behavior.
388  *
389  * This API is part of infrastructure still under development and may change.
390  */
391 void arch_mem_page_in(void *addr, uintptr_t phys);
392 
393 /**
394  * Update current page tables for a temporary mapping
395  *
396  * Map a physical page frame address to a special virtual address
397  * Z_SCRATCH_PAGE, with read/write access to supervisor mode, such that
398  * when this function returns, the calling context can read/write the page
399  * frame's contents from the Z_SCRATCH_PAGE address.
400  *
401  * This mapping only needs to be done on the current set of page tables,
402  * as it is only used for a short period of time exclusively by the caller.
403  * This function is called with interrupts locked.
404  *
405  * This API is part of infrastructure still under development and may change.
406  */
407 void arch_mem_scratch(uintptr_t phys);
408 
409 enum arch_page_location {
410 	ARCH_PAGE_LOCATION_PAGED_OUT,
411 	ARCH_PAGE_LOCATION_PAGED_IN,
412 	ARCH_PAGE_LOCATION_BAD
413 };
414 
415 /**
416  * Fetch location information about a page at a particular address
417  *
418  * The function only needs to query the current set of page tables as
419  * the information it reports must be common to all of them if multiple
420  * page tables are in use. If multiple page tables are active it is unnecessary
421  * to iterate over all of them. This may allow certain types of optimizations
422  * (such as reverse page table mapping on x86).
423  *
424  * This function is called with interrupts locked, so that the reported
425  * information can't become stale while decisions are being made based on it.
426  *
427  * Unless otherwise specified, virtual data pages have the same mappings
428  * across all page tables. Calling this function on data pages that are
429  * exceptions to this rule (such as the scratch page) is undefined behavior.
430  * Just check the currently installed page tables and return the information
431  * in that.
432  *
433  * @param addr Virtual data page address that took the page fault
434  * @param [out] location In the case of ARCH_PAGE_FAULT_PAGED_OUT, the backing
435  *        store location value used to retrieve the data page. In the case of
436  *        ARCH_PAGE_FAULT_PAGED_IN, the physical address the page is mapped to.
437  * @retval ARCH_PAGE_FAULT_PAGED_OUT The page was evicted to the backing store.
438  * @retval ARCH_PAGE_FAULT_PAGED_IN The data page is resident in memory.
439  * @retval ARCH_PAGE_FAULT_BAD The page is un-mapped or otherwise has had
440  *         invalid access
441  */
442 enum arch_page_location arch_page_location_get(void *addr, uintptr_t *location);
443 
444 /**
445  * @def ARCH_DATA_PAGE_ACCESSED
446  *
447  * Bit indicating the data page was accessed since the value was last cleared.
448  *
449  * Used by marking eviction algorithms. Safe to set this if uncertain.
450  *
451  * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
452  */
453 
454  /**
455   * @def ARCH_DATA_PAGE_DIRTY
456   *
457   * Bit indicating the data page, if evicted, will need to be paged out.
458   *
459   * Set if the data page was modified since it was last paged out, or if
460   * it has never been paged out before. Safe to set this if uncertain.
461   *
462   * This bit is undefined if ARCH_DATA_PAGE_LOADED is not set.
463   */
464 
465  /**
466   * @def ARCH_DATA_PAGE_LOADED
467   *
468   * Bit indicating that the data page is loaded into a physical page frame.
469   *
470   * If un-set, the data page is paged out or not mapped.
471   */
472 
473 /**
474  * @def ARCH_DATA_PAGE_NOT_MAPPED
475  *
476  * If ARCH_DATA_PAGE_LOADED is un-set, this will indicate that the page
477  * is not mapped at all. This bit is undefined if ARCH_DATA_PAGE_LOADED is set.
478  */
479 
480 /**
481  * Retrieve page characteristics from the page table(s)
482  *
483  * The architecture is responsible for maintaining "accessed" and "dirty"
484  * states of data pages to support marking eviction algorithms. This can
485  * either be directly supported by hardware or emulated by modifying
486  * protection policy to generate faults on reads or writes. In all cases
487  * the architecture must maintain this information in some way.
488  *
489  * For the provided virtual address, report the logical OR of the accessed
490  * and dirty states for the relevant entries in all active page tables in
491  * the system if the page is mapped and not paged out.
492  *
493  * If clear_accessed is true, the ARCH_DATA_PAGE_ACCESSED flag will be reset.
494  * This function will report its prior state. If multiple page tables are in
495  * use, this function clears accessed state in all of them.
496  *
497  * This function is called with interrupts locked, so that the reported
498  * information can't become stale while decisions are being made based on it.
499  *
500  * The return value may have other bits set which the caller must ignore.
501  *
502  * Clearing accessed state for data pages that are not ARCH_DATA_PAGE_LOADED
503  * is undefined behavior.
504  *
505  * ARCH_DATA_PAGE_DIRTY and ARCH_DATA_PAGE_ACCESSED bits in the return value
506  * are only significant if ARCH_DATA_PAGE_LOADED is set, otherwise ignore
507  * them.
508  *
509  * ARCH_DATA_PAGE_NOT_MAPPED bit in the return value is only significant
510  * if ARCH_DATA_PAGE_LOADED is un-set, otherwise ignore it.
511  *
512  * Unless otherwise specified, virtual data pages have the same mappings
513  * across all page tables. Calling this function on data pages that are
514  * exceptions to this rule (such as the scratch page) is undefined behavior.
515  *
516  * This API is part of infrastructure still under development and may change.
517  *
518  * @param addr Virtual address to look up in page tables
519  * @param [out] location If non-NULL, updated with either physical page frame
520  *                   address or backing store location depending on
521  *                   ARCH_DATA_PAGE_LOADED state. This is not touched if
522  *                   ARCH_DATA_PAGE_NOT_MAPPED.
523  * @param clear_accessed Whether to clear ARCH_DATA_PAGE_ACCESSED state
524  * @retval Value with ARCH_DATA_PAGE_* bits set reflecting the data page
525  *         configuration
526  */
527 uintptr_t arch_page_info_get(void *addr, uintptr_t *location,
528 			     bool clear_accessed);
529 #endif /* CONFIG_DEMAND_PAGING */
530 #endif /* CONFIG_MMU */
531 /** @} */
532 
533 /**
534  * @defgroup arch-misc Miscellaneous architecture APIs
535  * @ingroup arch-interface
536  * @{
537  */
538 
539 /**
540  * Early boot console output hook
541  *
542  * Definition of this function is optional. If implemented, any invocation
543  * of printk() (or logging calls with CONFIG_LOG_MINIMAL which are backed by
544  * printk) will default to sending characters to this function. It is
545  * useful for early boot debugging before main serial or console drivers
546  * come up.
547  *
548  * This can be overridden at runtime with __printk_hook_install().
549  *
550  * The default __weak implementation of this does nothing.
551  *
552  * @param c Character to print
553  * @return The character printed
554  */
555 int arch_printk_char_out(int c);
556 
557 /**
558  * Architecture-specific kernel initialization hook
559  *
560  * This function is invoked near the top of _Cstart, for additional
561  * architecture-specific setup before the rest of the kernel is brought up.
562  *
563  * TODO: Deprecate, most arches are using a prep_c() function to do the same
564  * thing in a simpler way
565  */
566 static inline void arch_kernel_init(void);
567 
568 /** Do nothing and return. Yawn. */
569 static inline void arch_nop(void);
570 
571 /** @} */
572 
573 /**
574  * @defgroup arch-coredump Architecture-specific core dump APIs
575  * @ingroup arch-interface
576  * @{
577  */
578 
579 /**
580  * @brief Architecture-specific handling during coredump
581  *
582  * This dumps architecture-specific information during coredump.
583  *
584  * @param esf Exception Stack Frame (arch-specific)
585  */
586 void arch_coredump_info_dump(const z_arch_esf_t *esf);
587 
588 /**
589  * @brief Get the target code specified by the architecture.
590  */
591 uint16_t arch_coredump_tgt_code_get(void);
592 
593 /** @} */
594 
595 /**
596  * @defgroup arch-tls Architecture-specific Thread Local Storage APIs
597  * @ingroup arch-interface
598  * @{
599  */
600 
601 /**
602  * @brief Setup Architecture-specific TLS area in stack
603  *
604  * This sets up the stack area for thread local storage.
605  * The structure inside in area is architecture specific.
606  *
607  * @param new_thread New thread object
608  * @param stack_ptr Stack pointer
609  * @return Number of bytes taken by the TLS area
610  */
611 size_t arch_tls_stack_setup(struct k_thread *new_thread, char *stack_ptr);
612 
613 /** @} */
614 
615 /* Include arch-specific inline function implementation */
616 #include <kernel_arch_func.h>
617 
618 #ifdef __cplusplus
619 }
620 #endif
621 
622 #endif /* _ASMLANGUAGE */
623 
624 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_ARCH_INTERFACE_H_ */
625