1 /*
2 * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Architecture-independent private kernel APIs
10 *
11 * This file contains private kernel APIs that are not architecture-specific.
12 */
13
14 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
16
17 #include <kernel.h>
18 #include <kernel_arch_interface.h>
19 #include <string.h>
20
21 #ifndef _ASMLANGUAGE
22
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26
27 /* Early boot functions */
28
29 void z_bss_zero(void);
30 #ifdef CONFIG_XIP
31 void z_data_copy(void);
32 #else
z_data_copy(void)33 static inline void z_data_copy(void)
34 {
35 /* Do nothing */
36 }
37 #endif
38
39 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
40 void z_bss_zero_boot(void);
41 #else
z_bss_zero_boot(void)42 static inline void z_bss_zero_boot(void)
43 {
44 /* Do nothing */
45 }
46 #endif
47
48 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
49 void z_bss_zero_pinned(void);
50 #else
z_bss_zero_pinned(void)51 static inline void z_bss_zero_pinned(void)
52 {
53 /* Do nothing */
54 }
55 #endif
56
57 FUNC_NORETURN void z_cstart(void);
58
59 void z_device_state_init(void);
60
61 extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
62 void *p1, void *p2, void *p3);
63
64 extern char *z_setup_new_thread(struct k_thread *new_thread,
65 k_thread_stack_t *stack, size_t stack_size,
66 k_thread_entry_t entry,
67 void *p1, void *p2, void *p3,
68 int prio, uint32_t options, const char *name);
69
70 /**
71 * @brief Allocate aligned memory from the current thread's resource pool
72 *
73 * Threads may be assigned a resource pool, which will be used to allocate
74 * memory on behalf of certain kernel and driver APIs. Memory reserved
75 * in this way should be freed with k_free().
76 *
77 * If called from an ISR, the k_malloc() system heap will be used if it exists.
78 *
79 * @param align Required memory alignment
80 * @param size Memory allocation size
81 * @return A pointer to the allocated memory, or NULL if there is insufficient
82 * RAM in the pool or there is no pool to draw memory from
83 */
84 void *z_thread_aligned_alloc(size_t align, size_t size);
85
86 /**
87 * @brief Allocate some memory from the current thread's resource pool
88 *
89 * Threads may be assigned a resource pool, which will be used to allocate
90 * memory on behalf of certain kernel and driver APIs. Memory reserved
91 * in this way should be freed with k_free().
92 *
93 * If called from an ISR, the k_malloc() system heap will be used if it exists.
94 *
95 * @param size Memory allocation size
96 * @return A pointer to the allocated memory, or NULL if there is insufficient
97 * RAM in the pool or there is no pool to draw memory from
98 */
z_thread_malloc(size_t size)99 static inline void *z_thread_malloc(size_t size)
100 {
101 return z_thread_aligned_alloc(0, size);
102 }
103
104 /* set and clear essential thread flag */
105
106 extern void z_thread_essential_set(void);
107 extern void z_thread_essential_clear(void);
108
109 /* clean up when a thread is aborted */
110
111 #if defined(CONFIG_THREAD_MONITOR)
112 extern void z_thread_monitor_exit(struct k_thread *thread);
113 #else
114 #define z_thread_monitor_exit(thread) \
115 do {/* nothing */ \
116 } while (false)
117 #endif /* CONFIG_THREAD_MONITOR */
118
119 #ifdef CONFIG_USE_SWITCH
120 /* This is a arch function traditionally, but when the switch-based
121 * z_swap() is in use it's a simple inline provided by the kernel.
122 */
123 static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread * thread,unsigned int value)124 arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
125 {
126 thread->swap_retval = value;
127 }
128 #endif
129
130 static ALWAYS_INLINE void
z_thread_return_value_set_with_data(struct k_thread * thread,unsigned int value,void * data)131 z_thread_return_value_set_with_data(struct k_thread *thread,
132 unsigned int value,
133 void *data)
134 {
135 arch_thread_return_value_set(thread, value);
136 thread->base.swap_data = data;
137 }
138
139 #ifdef CONFIG_SMP
140 extern void z_smp_init(void);
141 extern void smp_timer_init(void);
142 #endif
143
144 extern void z_early_boot_rand_get(uint8_t *buf, size_t length);
145
146 #if CONFIG_STACK_POINTER_RANDOM
147 extern int z_stack_adjust_initialized;
148 #endif
149
150 extern struct k_thread z_main_thread;
151
152
153 #ifdef CONFIG_MULTITHREADING
154 extern struct k_thread z_idle_threads[CONFIG_MP_NUM_CPUS];
155 #endif
156 K_KERNEL_PINNED_STACK_ARRAY_EXTERN(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
157 CONFIG_ISR_STACK_SIZE);
158
159 #ifdef CONFIG_GEN_PRIV_STACKS
160 extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
161 #endif
162
163 #ifdef CONFIG_USERSPACE
164 bool z_stack_is_user_capable(k_thread_stack_t *stack);
165
166 /* Memory domain setup hook, called from z_setup_new_thread() */
167 void z_mem_domain_init_thread(struct k_thread *thread);
168
169 /* Memory domain teardown hook, called from z_thread_abort() */
170 void z_mem_domain_exit_thread(struct k_thread *thread);
171
172 /* This spinlock:
173 *
174 * - Protects the full set of active k_mem_domain objects and their contents
175 * - Serializes calls to arch_mem_domain_* APIs
176 *
177 * If architecture code needs to access k_mem_domain structures or the
178 * partitions they contain at any other point, this spinlock should be held.
179 * Uniprocessor systems can get away with just locking interrupts but this is
180 * not recommended.
181 */
182 extern struct k_spinlock z_mem_domain_lock;
183 #endif /* CONFIG_USERSPACE */
184
185 #ifdef CONFIG_GDBSTUB
186 struct gdb_ctx;
187
188 /* Should be called by the arch layer. This is the gdbstub main loop
189 * and synchronously communicate with gdb on host.
190 */
191 extern int z_gdb_main_loop(struct gdb_ctx *ctx, bool start);
192 #endif
193
194 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
195 void z_thread_mark_switched_in(void);
196 void z_thread_mark_switched_out(void);
197 #else
198
199 /**
200 * @brief Called after a thread has been selected to run
201 */
202 #define z_thread_mark_switched_in()
203
204 /**
205 * @brief Called before a thread has been selected to run
206 */
207
208 #define z_thread_mark_switched_out()
209
210 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
211
212 /* Init hook for page frame management, invoked immediately upon entry of
213 * main thread, before POST_KERNEL tasks
214 */
215 void z_mem_manage_init(void);
216
217 /**
218 * @brief Finalize page frame management at the end of boot process.
219 */
220 void z_mem_manage_boot_finish(void);
221
222 #define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
223 __key = k_spin_lock(lck); \
224 !__i.key; \
225 k_spin_unlock(lck, __key), __i.key = 1)
226
227 #ifdef CONFIG_PM
228
229 /* When the kernel is about to go idle, it calls this function to notify the
230 * power management subsystem, that the kernel is ready to enter the idle state.
231 *
232 * At this point, the kernel has disabled interrupts and computed the maximum
233 * time the system can remain idle. The function passes the time that the system
234 * can remain idle. The SOC interface performs power operations that can be done
235 * in the available time. The power management operations must halt execution of
236 * the CPU.
237 *
238 * This function assumes that a wake up event has already been set up by the
239 * application.
240 *
241 * This function is entered with interrupts disabled. It should re-enable
242 * interrupts if it had entered a power state.
243 */
244 enum pm_state pm_system_suspend(int32_t ticks);
245
246 /**
247 * Notify exit from kernel idling after PM operations
248 *
249 * This function would notify exit from kernel idling if a corresponding
250 * pm_system_suspend() notification was handled and did not return
251 * PM_STATE_ACTIVE.
252 *
253 * This function would be called from the ISR context of the event
254 * that caused the exit from kernel idling. This will be called immediately
255 * after interrupts are enabled. This is called to give a chance to do
256 * any operations before the kernel would switch tasks or processes nested
257 * interrupts. This is required for cpu low power states that would require
258 * interrupts to be enabled while entering low power states. e.g. C1 in x86. In
259 * those cases, the ISR would be invoked immediately after the event wakes up
260 * the CPU, before code following the CPU wait, gets a chance to execute. This
261 * can be ignored if no operation needs to be done at the wake event
262 * notification. Alternatively pm_idle_exit_notification_disable() can
263 * be called in pm_system_suspend to disable this notification.
264 */
265 void pm_system_resume(void);
266
267 #endif
268
269 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
270 /**
271 * Initialize the timing histograms for demand paging.
272 */
273 void z_paging_histogram_init(void);
274
275 /**
276 * Increment the counter in the timing histogram.
277 *
278 * @param hist The timing histogram to be updated.
279 * @param cycles Time spent in measured operation.
280 */
281 void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
282 uint32_t cycles);
283 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
284
285 #ifdef __cplusplus
286 }
287 #endif
288
289 #endif /* _ASMLANGUAGE */
290
291 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
292