1 /*
2  * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Architecture-independent private kernel APIs
10  *
11  * This file contains private kernel APIs that are not architecture-specific.
12  */
13 
14 #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15 #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
16 
17 #include <zephyr/kernel.h>
18 #include <kernel_arch_interface.h>
19 #include <string.h>
20 
21 #ifndef _ASMLANGUAGE
22 
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26 
27 /* Initialize per-CPU kernel data */
28 void z_init_cpu(int id);
29 
30 /* Initialize a thread */
31 void z_init_thread_base(struct _thread_base *thread_base, int priority,
32 			uint32_t initial_state, unsigned int options);
33 
34 /* Early boot functions */
35 void z_early_memset(void *dst, int c, size_t n);
36 void z_early_memcpy(void *dst, const void *src, size_t n);
37 
38 void z_bss_zero(void);
39 #ifdef CONFIG_XIP
40 void z_data_copy(void);
41 #else
z_data_copy(void)42 static inline void z_data_copy(void)
43 {
44 	/* Do nothing */
45 }
46 #endif
47 
48 #ifdef CONFIG_LINKER_USE_BOOT_SECTION
49 void z_bss_zero_boot(void);
50 #else
z_bss_zero_boot(void)51 static inline void z_bss_zero_boot(void)
52 {
53 	/* Do nothing */
54 }
55 #endif
56 
57 #ifdef CONFIG_LINKER_USE_PINNED_SECTION
58 void z_bss_zero_pinned(void);
59 #else
z_bss_zero_pinned(void)60 static inline void z_bss_zero_pinned(void)
61 {
62 	/* Do nothing */
63 }
64 #endif
65 
66 FUNC_NORETURN void z_cstart(void);
67 
68 void z_device_state_init(void);
69 
70 extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
71 			  void *p1, void *p2, void *p3);
72 
73 extern char *z_setup_new_thread(struct k_thread *new_thread,
74 				k_thread_stack_t *stack, size_t stack_size,
75 				k_thread_entry_t entry,
76 				void *p1, void *p2, void *p3,
77 				int prio, uint32_t options, const char *name);
78 
79 /**
80  * @brief Allocate aligned memory from the current thread's resource pool
81  *
82  * Threads may be assigned a resource pool, which will be used to allocate
83  * memory on behalf of certain kernel and driver APIs. Memory reserved
84  * in this way should be freed with k_free().
85  *
86  * If called from an ISR, the k_malloc() system heap will be used if it exists.
87  *
88  * @param align Required memory alignment
89  * @param size Memory allocation size
90  * @return A pointer to the allocated memory, or NULL if there is insufficient
91  * RAM in the pool or there is no pool to draw memory from
92  */
93 void *z_thread_aligned_alloc(size_t align, size_t size);
94 
95 /**
96  * @brief Allocate some memory from the current thread's resource pool
97  *
98  * Threads may be assigned a resource pool, which will be used to allocate
99  * memory on behalf of certain kernel and driver APIs. Memory reserved
100  * in this way should be freed with k_free().
101  *
102  * If called from an ISR, the k_malloc() system heap will be used if it exists.
103  *
104  * @param size Memory allocation size
105  * @return A pointer to the allocated memory, or NULL if there is insufficient
106  * RAM in the pool or there is no pool to draw memory from
107  */
z_thread_malloc(size_t size)108 static inline void *z_thread_malloc(size_t size)
109 {
110 	return z_thread_aligned_alloc(0, size);
111 }
112 
113 /* set and clear essential thread flag */
114 
115 extern void z_thread_essential_set(void);
116 extern void z_thread_essential_clear(void);
117 
118 /* clean up when a thread is aborted */
119 
120 #if defined(CONFIG_THREAD_MONITOR)
121 extern void z_thread_monitor_exit(struct k_thread *thread);
122 #else
123 #define z_thread_monitor_exit(thread) \
124 	do {/* nothing */    \
125 	} while (false)
126 #endif /* CONFIG_THREAD_MONITOR */
127 
128 #ifdef CONFIG_USE_SWITCH
129 /* This is a arch function traditionally, but when the switch-based
130  * z_swap() is in use it's a simple inline provided by the kernel.
131  */
132 static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread * thread,unsigned int value)133 arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
134 {
135 	thread->swap_retval = value;
136 }
137 #endif
138 
139 static ALWAYS_INLINE void
z_thread_return_value_set_with_data(struct k_thread * thread,unsigned int value,void * data)140 z_thread_return_value_set_with_data(struct k_thread *thread,
141 				   unsigned int value,
142 				   void *data)
143 {
144 	arch_thread_return_value_set(thread, value);
145 	thread->base.swap_data = data;
146 }
147 
148 #ifdef CONFIG_SMP
149 extern void z_smp_init(void);
150 #ifdef CONFIG_SYS_CLOCK_EXISTS
151 extern void smp_timer_init(void);
152 #endif
153 #endif
154 
155 extern void z_early_rand_get(uint8_t *buf, size_t length);
156 
157 #if CONFIG_STACK_POINTER_RANDOM
158 extern int z_stack_adjust_initialized;
159 #endif
160 
161 extern struct k_thread z_main_thread;
162 
163 
164 #ifdef CONFIG_MULTITHREADING
165 extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
166 #endif
167 K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
168 				    CONFIG_ISR_STACK_SIZE);
169 
170 #ifdef CONFIG_GEN_PRIV_STACKS
171 extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
172 #endif
173 
174 /* Calculate stack usage. */
175 int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
176 
177 #ifdef CONFIG_USERSPACE
178 bool z_stack_is_user_capable(k_thread_stack_t *stack);
179 
180 /* Memory domain setup hook, called from z_setup_new_thread() */
181 void z_mem_domain_init_thread(struct k_thread *thread);
182 
183 /* Memory domain teardown hook, called from z_thread_abort() */
184 void z_mem_domain_exit_thread(struct k_thread *thread);
185 
186 /* This spinlock:
187  *
188  * - Protects the full set of active k_mem_domain objects and their contents
189  * - Serializes calls to arch_mem_domain_* APIs
190  *
191  * If architecture code needs to access k_mem_domain structures or the
192  * partitions they contain at any other point, this spinlock should be held.
193  * Uniprocessor systems can get away with just locking interrupts but this is
194  * not recommended.
195  */
196 extern struct k_spinlock z_mem_domain_lock;
197 #endif /* CONFIG_USERSPACE */
198 
199 #ifdef CONFIG_GDBSTUB
200 struct gdb_ctx;
201 
202 /* Should be called by the arch layer. This is the gdbstub main loop
203  * and synchronously communicate with gdb on host.
204  */
205 extern int z_gdb_main_loop(struct gdb_ctx *ctx);
206 #endif
207 
208 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
209 void z_thread_mark_switched_in(void);
210 void z_thread_mark_switched_out(void);
211 #else
212 
213 /**
214  * @brief Called after a thread has been selected to run
215  */
216 #define z_thread_mark_switched_in()
217 
218 /**
219  * @brief Called before a thread has been selected to run
220  */
221 
222 #define z_thread_mark_switched_out()
223 
224 #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
225 
226 /* Init hook for page frame management, invoked immediately upon entry of
227  * main thread, before POST_KERNEL tasks
228  */
229 void z_mem_manage_init(void);
230 
231 /**
232  * @brief Finalize page frame management at the end of boot process.
233  */
234 void z_mem_manage_boot_finish(void);
235 
236 
237 void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
238 
239 #ifdef CONFIG_PM
240 
241 /* When the kernel is about to go idle, it calls this function to notify the
242  * power management subsystem, that the kernel is ready to enter the idle state.
243  *
244  * At this point, the kernel has disabled interrupts and computed the maximum
245  * time the system can remain idle. The function passes the time that the system
246  * can remain idle. The SOC interface performs power operations that can be done
247  * in the available time. The power management operations must halt execution of
248  * the CPU.
249  *
250  * This function assumes that a wake up event has already been set up by the
251  * application.
252  *
253  * This function is entered with interrupts disabled. It should re-enable
254  * interrupts if it had entered a power state.
255  *
256  * @return True if the system suspended, otherwise return false
257  */
258 bool pm_system_suspend(int32_t ticks);
259 
260 /**
261  * Notify exit from kernel idling after PM operations
262  *
263  * This function would notify exit from kernel idling if a corresponding
264  * pm_system_suspend() notification was handled and did not return
265  * PM_STATE_ACTIVE.
266  *
267  * This function would be called from the ISR context of the event
268  * that caused the exit from kernel idling. This will be called immediately
269  * after interrupts are enabled. This is called to give a chance to do
270  * any operations before the kernel would switch tasks or processes nested
271  * interrupts. This is required for cpu low power states that would require
272  * interrupts to be enabled while entering low power states. e.g. C1 in x86. In
273  * those cases, the ISR would be invoked immediately after the event wakes up
274  * the CPU, before code following the CPU wait, gets a chance to execute. This
275  * can be ignored if no operation needs to be done at the wake event
276  * notification.
277  */
278 void pm_system_resume(void);
279 
280 #endif
281 
282 #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
283 /**
284  * Initialize the timing histograms for demand paging.
285  */
286 void z_paging_histogram_init(void);
287 
288 /**
289  * Increment the counter in the timing histogram.
290  *
291  * @param hist The timing histogram to be updated.
292  * @param cycles Time spent in measured operation.
293  */
294 void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
295 			    uint32_t cycles);
296 #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
297 
298 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
299 int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats);
300 int z_thread_stats_query(struct k_obj_core *obj_core, void *stats);
301 int z_thread_stats_reset(struct k_obj_core *obj_core);
302 int z_thread_stats_disable(struct k_obj_core *obj_core);
303 int z_thread_stats_enable(struct k_obj_core *obj_core);
304 #endif
305 
306 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
307 int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
308 int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats);
309 
310 int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
311 int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
312 #endif
313 
314 #ifdef __cplusplus
315 }
316 #endif
317 
318 #endif /* _ASMLANGUAGE */
319 
320 #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
321