1 /*
2  * Copyright (c) 2019 Intel Corporation
3  * SPDX-License-Identifier: Apache-2.0
4  */
5 
6 #ifndef ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_
7 #define ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_
8 
9 #include <kernel_arch_data.h>
10 #include <zephyr/arch/x86/mmustructs.h>
11 
12 #ifdef CONFIG_X86_64
13 #include <intel64/kernel_arch_func.h>
14 #else
15 #include <ia32/kernel_arch_func.h>
16 #endif
17 
18 #ifndef _ASMLANGUAGE
arch_is_in_isr(void)19 static inline bool arch_is_in_isr(void)
20 {
21 #ifdef CONFIG_SMP
22 	/* On SMP, there is a race vs. the current CPU changing if we
23 	 * are preempted.  Need to mask interrupts while inspecting
24 	 * (note deliberate lack of gcc size suffix on the
25 	 * instructions, we need to work with both architectures here)
26 	 */
27 	bool ret;
28 
29 	__asm__ volatile ("pushf; cli");
30 	ret = arch_curr_cpu()->nested != 0;
31 	__asm__ volatile ("popf");
32 	return ret;
33 #else
34 	return _kernel.cpus[0].nested != 0U;
35 #endif
36 }
37 
38 struct multiboot_info;
39 
40 extern FUNC_NORETURN void z_prep_c(void *arg);
41 
42 #ifdef CONFIG_X86_VERY_EARLY_CONSOLE
43 /* Setup ultra-minimal serial driver for printk() */
44 void z_x86_early_serial_init(void);
45 #endif /* CONFIG_X86_VERY_EARLY_CONSOLE */
46 
47 
48 /* Called upon CPU exception that is unhandled and hence fatal; dump
49  * interesting info and call z_x86_fatal_error()
50  */
51 FUNC_NORETURN void z_x86_unhandled_cpu_exception(uintptr_t vector,
52 						 const struct arch_esf *esf);
53 
54 /* Called upon unrecoverable error; dump registers and transfer control to
55  * kernel via z_fatal_error()
56  */
57 FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
58 				     const struct arch_esf *esf);
59 
60 /* Common handling for page fault exceptions */
61 void z_x86_page_fault_handler(struct arch_esf *esf);
62 
63 #ifdef CONFIG_THREAD_STACK_INFO
64 /**
65  * @brief Check if a memory address range falls within the stack
66  *
67  * Given a memory address range, ensure that it falls within the bounds
68  * of the faulting context's stack.
69  *
70  * @param addr Starting address
71  * @param size Size of the region, or 0 if we just want to see if addr is
72  *             in bounds
73  * @param cs Code segment of faulting context
74  * @return true if addr/size region is not within the thread stack
75  */
76 bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs);
77 #endif /* CONFIG_THREAD_STACK_INFO */
78 
79 #ifdef CONFIG_USERSPACE
80 extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
81 					       void *p1, void *p2, void *p3,
82 					       uintptr_t stack_end,
83 					       uintptr_t stack_start);
84 
85 /* Preparation steps needed for all threads if user mode is turned on.
86  *
87  * Returns the initial entry point to swap into.
88  */
89 void *z_x86_userspace_prepare_thread(struct k_thread *thread);
90 
91 #endif /* CONFIG_USERSPACE */
92 
93 void z_x86_do_kernel_oops(const struct arch_esf *esf);
94 
95 /*
96  * Find a free IRQ vector at the specified priority, or return -1 if none left.
97  * For multiple vector allocated one after another, prev_vector can be used to
98  * speed up the allocation: it only needs to be filled with the previous
99  * allocated vector, or -1 to start over.
100  */
101 int z_x86_allocate_vector(unsigned int priority, int prev_vector);
102 
103 /*
104  * Connect a vector
105  */
106 void z_x86_irq_connect_on_vector(unsigned int irq,
107 				 uint8_t vector,
108 				 void (*func)(const void *arg),
109 				 const void *arg);
110 
111 #endif /* !_ASMLANGUAGE */
112 
113 #endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */
114