1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @defgroup arch-interface Architecture Interface
9  * @ingroup internal_api
10  * @brief Internal kernel APIs with public scope
11  *
12  * Any public kernel APIs that are implemented as inline functions and need to
13  * call architecture-specific API so will have the prototypes for the
14  * architecture-specific APIs here. Architecture APIs that aren't used in this
15  * way go in kernel/include/kernel_arch_interface.h.
16  *
17  * The set of architecture-specific APIs used internally by public macros and
18  * inline functions in public headers are also specified and documented.
19  *
20  * For all macros and inline function prototypes described herein, <arch/cpu.h>
21  * must eventually pull in full definitions for all of them (the actual macro
22  * defines and inline function bodies)
23  *
24  * include/zephyr/kernel.h and other public headers depend on definitions in
25  * this header.
26  */
27 #ifndef ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
28 #define ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_
29 
30 #ifndef _ASMLANGUAGE
31 #include <zephyr/toolchain.h>
32 #include <stddef.h>
33 #include <zephyr/types.h>
34 #include <zephyr/arch/cpu.h>
35 #include <zephyr/irq_offload.h>
36 
37 #ifdef __cplusplus
38 extern "C" {
39 #endif
40 
41 /* NOTE: We cannot pull in kernel.h here, need some forward declarations  */
42 struct arch_esf;
43 struct k_thread;
44 struct k_mem_domain;
45 
46 typedef struct z_thread_stack_element k_thread_stack_t;
47 
48 typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
49 
50 /**
51  * @defgroup arch-timing Architecture timing APIs
52  * @ingroup arch-interface
53  * @{
54  */
55 
56 /**
57  * Obtain the current cycle count, in units specified by
58  * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC.  While this is historically
59  * specified as part of the architecture API, in practice virtually
60  * all platforms forward it to the sys_clock_cycle_get_32() API
61  * provided by the timer driver.
62  *
63  * @see k_cycle_get_32()
64  *
65  * @return The current cycle time.  This should count up monotonically
66  * through the full 32 bit space, wrapping at 0xffffffff.  Hardware
67  * with fewer bits of precision in the timer is expected to synthesize
68  * a 32 bit count.
69  */
70 static inline uint32_t arch_k_cycle_get_32(void);
71 
72 /**
73  * As for arch_k_cycle_get_32(), but with a 64 bit return value.  Not
74  * all timer hardware has a 64 bit timer, this needs to be implemented
75  * only if CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER is set.
76  *
77  * @see arch_k_cycle_get_32()
78  *
79  * @return The current cycle time.  This should count up monotonically
80  * through the full 64 bit space, wrapping at 2^64-1.  Hardware with
81  * fewer bits of precision in the timer is generally not expected to
82  * implement this API.
83  */
84 static inline uint64_t arch_k_cycle_get_64(void);
85 
86 /** @} */
87 
88 
89 /**
90  * @addtogroup arch-threads
91  * @{
92  */
93 
94 /**
95  * @def ARCH_THREAD_STACK_RESERVED
96  *
97  * @see K_THREAD_STACK_RESERVED
98  */
99 
100 /**
101  * @def ARCH_STACK_PTR_ALIGN
102  *
103  * Required alignment of the CPU's stack pointer register value, dictated by
104  * hardware constraints and the ABI calling convention.
105  *
106  * @see Z_STACK_PTR_ALIGN
107  */
108 
109 /**
110  * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
111  *
112  * Required alignment of the lowest address of a stack object.
113  *
114  * Optional definition.
115  *
116  * @see Z_THREAD_STACK_OBJ_ALIGN
117  */
118 
119 /**
120  * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
121  * @brief Round up a stack buffer size to alignment constraints
122  *
123  * Adjust a requested stack buffer size to the true size of its underlying
124  * buffer, defined as the area usable for thread stack context and thread-
125  * local storage.
126  *
127  * The size value passed here does not include storage reserved for platform
128  * data.
129  *
130  * The returned value is either the same size provided (if already properly
131  * aligned), or rounded up to satisfy alignment constraints.  Calculations
132  * performed here *must* be idempotent.
133  *
134  * Optional definition. If undefined, stack buffer sizes are either:
135  * - Rounded up to the next power of two if user mode is enabled on an arch
136  *   with an MPU that requires such alignment
137  * - Rounded up to ARCH_STACK_PTR_ALIGN
138  *
139  * @see Z_THREAD_STACK_SIZE_ADJUST
140  */
141 
142 /**
143  * @def ARCH_KERNEL_STACK_RESERVED
144  * @brief MPU guard size for kernel-only stacks
145  *
146  * If MPU stack guards are used to catch stack overflows, specify the
147  * amount of space reserved in kernel stack objects. If guard sizes are
148  * context dependent, this should be in the minimum guard size, with
149  * remaining space carved out if needed.
150  *
151  * Optional definition, defaults to 0.
152  *
153  * @see K_KERNEL_STACK_RESERVED
154  */
155 
156 /**
157  * @def ARCH_KERNEL_STACK_OBJ_ALIGN
158  * @brief Required alignment of the lowest address of a kernel-only stack.
159  */
160 
161 /** @} */
162 
163 /**
164  * @addtogroup arch-pm
165  * @{
166  */
167 
168 /**
169  * @brief Power save idle routine
170  *
171  * This function will be called by the kernel idle loop or possibly within
172  * an implementation of z_pm_save_idle in the kernel when the
173  * '_pm_save_flag' variable is non-zero.
174  *
175  * Architectures that do not implement power management instructions may
176  * immediately return, otherwise a power-saving instruction should be
177  * issued to wait for an interrupt.
178  *
179  * @note The function is expected to return after the interrupt that has
180  * caused the CPU to exit power-saving mode has been serviced, although
181  * this is not a firm requirement.
182  *
183  * @see k_cpu_idle()
184  */
185 void arch_cpu_idle(void);
186 
187 /**
188  * @brief Atomically re-enable interrupts and enter low power mode
189  *
190  * The requirements for arch_cpu_atomic_idle() are as follows:
191  *
192  * -# Enabling interrupts and entering a low-power mode needs to be
193  *    atomic, i.e. there should be no period of time where interrupts are
194  *    enabled before the processor enters a low-power mode.  See the comments
195  *    in k_lifo_get(), for example, of the race condition that
196  *    occurs if this requirement is not met.
197  *
198  * -# After waking up from the low-power mode, the interrupt lockout state
199  *    must be restored as indicated in the 'key' input parameter.
200  *
201  * @see k_cpu_atomic_idle()
202  *
203  * @param key Lockout key returned by previous invocation of arch_irq_lock()
204  */
205 void arch_cpu_atomic_idle(unsigned int key);
206 
207 /** @} */
208 
209 
210 /**
211  * @addtogroup arch-smp
212  * @{
213  */
214 
215 /**
216  * Per-cpu entry function
217  *
218  * @param data context parameter, implementation specific
219  */
220 typedef void (*arch_cpustart_t)(void *data);
221 
222 /**
223  * @brief Start a numbered CPU on a MP-capable system
224  *
225  * This starts and initializes a specific CPU.  The main thread on startup is
226  * running on CPU zero, other processors are numbered sequentially.  On return
227  * from this function, the CPU is known to have begun operating and will enter
228  * the provided function.  Its interrupts will be initialized but disabled such
229  * that irq_unlock() with the provided key will work to enable them.
230  *
231  * Normally, in SMP mode this function will be called by the kernel
232  * initialization and should not be used as a user API.  But it is defined here
233  * for special-purpose apps which want Zephyr running on one core and to use
234  * others for design-specific processing.
235  *
236  * @param cpu_num Integer number of the CPU
237  * @param stack Stack memory for the CPU
238  * @param sz Stack buffer size, in bytes
239  * @param fn Function to begin running on the CPU.
240  * @param arg Untyped argument to be passed to "fn"
241  */
242 void arch_cpu_start(int cpu_num, k_thread_stack_t *stack, int sz,
243 		    arch_cpustart_t fn, void *arg);
244 
245 /**
246  * @brief Return CPU power status
247  *
248  * @param cpu_num Integer number of the CPU
249  */
250 bool arch_cpu_active(int cpu_num);
251 
252 /** @} */
253 
254 
255 /**
256  * @addtogroup arch-irq
257  * @{
258  */
259 
260 /**
261  * Lock interrupts on the current CPU
262  *
263  * @see irq_lock()
264  */
265 static inline unsigned int arch_irq_lock(void);
266 
267 /**
268  * Unlock interrupts on the current CPU
269  *
270  * @see irq_unlock()
271  */
272 static inline void arch_irq_unlock(unsigned int key);
273 
274 /**
275  * Test if calling arch_irq_unlock() with this key would unlock irqs
276  *
277  * @param key value returned by arch_irq_lock()
278  * @return true if interrupts were unlocked prior to the arch_irq_lock()
279  * call that produced the key argument.
280  */
281 static inline bool arch_irq_unlocked(unsigned int key);
282 
283 #ifdef CONFIG_ZERO_LATENCY_IRQS
284 
285 /**
286  * @brief Lock all interrupts including zero latency interrupts on the current CPU.
287  *
288  * @details Intended to be used when breaking the promise of zero latency interrupts is
289  * unavoidable and necessary, like accessing shared core peripherals or powering down the
290  * SoC.
291  *
292  * @warning This lock breaks the promise of zero latency interrupts.
293  */
294 static inline unsigned int arch_zli_lock(void);
295 
296 /**
297  * @brief Unlock all interrupts including zero latency interrupts on the current CPU
298  *
299  * @see arch_zli_lock()
300  */
301 static inline void arch_zli_unlock(unsigned int key);
302 
303 #endif
304 
305 /**
306  * Disable the specified interrupt line
307  *
308  * @note: The behavior of interrupts that arrive after this call
309  * returns and before the corresponding call to arch_irq_enable() is
310  * undefined.  The hardware is not required to latch and deliver such
311  * an interrupt, though on some architectures that may work.  Other
312  * architectures will simply lose such an interrupt and never deliver
313  * it.  Many drivers and subsystems are not tolerant of such dropped
314  * interrupts and it is the job of the application layer to ensure
315  * that behavior remains correct.
316  *
317  * @see irq_disable()
318  */
319 void arch_irq_disable(unsigned int irq);
320 
321 /**
322  * Enable the specified interrupt line
323  *
324  * @see irq_enable()
325  */
326 void arch_irq_enable(unsigned int irq);
327 
328 /**
329  * Test if an interrupt line is enabled
330  *
331  * @see irq_is_enabled()
332  */
333 int arch_irq_is_enabled(unsigned int irq);
334 
335 /**
336  * Arch-specific hook to install a dynamic interrupt.
337  *
338  * @param irq IRQ line number
339  * @param priority Interrupt priority
340  * @param routine Interrupt service routine
341  * @param parameter ISR parameter
342  * @param flags Arch-specific IRQ configuration flag
343  *
344  * @return The vector assigned to this interrupt
345  */
346 int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
347 			     void (*routine)(const void *parameter),
348 			     const void *parameter, uint32_t flags);
349 
350 /**
351  * Arch-specific hook to dynamically uninstall a shared interrupt.
352  * If the interrupt is not being shared, then the associated
353  * _sw_isr_table entry will be replaced by (NULL, z_irq_spurious)
354  * (default entry).
355  *
356  * @param irq IRQ line number
357  * @param priority Interrupt priority
358  * @param routine Interrupt service routine
359  * @param parameter ISR parameter
360  * @param flags Arch-specific IRQ configuration flag
361  *
362  * @return 0 in case of success, negative value otherwise
363  */
364 int arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority,
365 				void (*routine)(const void *parameter),
366 				const void *parameter, uint32_t flags);
367 
368 /**
369  * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
370  *
371  * @see IRQ_CONNECT()
372  */
373 
374 #ifdef CONFIG_PCIE
375 /**
376  * @def ARCH_PCIE_IRQ_CONNECT(bdf, irq, pri, isr, arg, flags)
377  *
378  * @see PCIE_IRQ_CONNECT()
379  */
380 #endif /* CONFIG_PCIE */
381 
382 /**
383  * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
384  *
385  * @see IRQ_DIRECT_CONNECT()
386  */
387 
388 /**
389  * @def ARCH_ISR_DIRECT_PM()
390  *
391  * @see ISR_DIRECT_PM()
392  */
393 
394 /**
395  * @def ARCH_ISR_DIRECT_HEADER()
396  *
397  * @see ISR_DIRECT_HEADER()
398  */
399 
400 /**
401  * @def ARCH_ISR_DIRECT_FOOTER(swap)
402  *
403  * @see ISR_DIRECT_FOOTER()
404  */
405 
406 /**
407  * @def ARCH_ISR_DIRECT_DECLARE(name)
408  *
409  * @see ISR_DIRECT_DECLARE()
410  */
411 
412 #ifndef CONFIG_PCIE_CONTROLLER
413 /**
414  * @brief Arch-specific hook for allocating IRQs
415  *
416  * Note: disable/enable IRQ relevantly inside the implementation of such
417  * function to avoid concurrency issues. Also, an allocated IRQ is assumed
418  * to be used thus a following @see arch_irq_is_used() should return true.
419  *
420  * @return The newly allocated IRQ or UINT_MAX on error.
421  */
422 unsigned int arch_irq_allocate(void);
423 
424 /**
425  * @brief Arch-specific hook for declaring an IRQ being used
426  *
427  * Note: disable/enable IRQ relevantly inside the implementation of such
428  * function to avoid concurrency issues.
429  *
430  * @param irq the IRQ to declare being used
431  */
432 void arch_irq_set_used(unsigned int irq);
433 
434 /**
435  * @brief Arch-specific hook for checking if an IRQ is being used already
436  *
437  * @param irq the IRQ to check
438  *
439  * @return true if being, false otherwise
440  */
441 bool arch_irq_is_used(unsigned int irq);
442 
443 #endif /* CONFIG_PCIE_CONTROLLER */
444 
445 /**
446  * @def ARCH_EXCEPT(reason_p)
447  *
448  * Generate a software induced fatal error.
449  *
450  * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
451  * K_ERR_STACK_CHK_FAIL may be induced.
452  *
453  * This should ideally generate a software trap, with exception context
454  * indicating state when this was invoked. General purpose register state at
455  * the time of trap should not be disturbed from the calling context.
456  *
457  * @param reason_p K_ERR_ scoped reason code for the fatal error.
458  */
459 
460 #ifdef CONFIG_IRQ_OFFLOAD
461 /**
462  * Run a function in interrupt context.
463  *
464  * Implementations should invoke an exception such that the kernel goes through
465  * its interrupt handling dispatch path, to include switching to the interrupt
466  * stack, and runs the provided routine and parameter.
467  *
468  * The only intended use-case for this function is for test code to simulate
469  * the correctness of kernel APIs in interrupt handling context. This API
470  * is not intended for real applications.
471  *
472  * @see irq_offload()
473  *
474  * @param routine Function to run in interrupt context
475  * @param parameter Value to pass to the function when invoked
476  */
477 void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
478 
479 
480 /**
481  * Initialize the architecture-specific portion of the irq_offload subsystem
482  */
483 void arch_irq_offload_init(void);
484 
485 #endif /* CONFIG_IRQ_OFFLOAD */
486 
487 /** @} */
488 
489 
490 /**
491  * @defgroup arch-smp Architecture-specific SMP APIs
492  * @ingroup arch-interface
493  * @{
494  */
495 #ifdef CONFIG_SMP
496 /** Return the CPU struct for the currently executing CPU */
497 static inline struct _cpu *arch_curr_cpu(void);
498 
499 
500 /**
501  * @brief Processor hardware ID
502  *
503  * Most multiprocessor architectures have a low-level unique ID value
504  * associated with the current CPU that can be retrieved rapidly and
505  * efficiently in kernel context.  Note that while the numbering of
506  * the CPUs is guaranteed to be unique, the values are
507  * platform-defined. In particular, they are not guaranteed to match
508  * Zephyr's own sequential CPU IDs (even though on some platforms they
509  * do).
510  *
511  * @note There is an inherent race with this API: the system may
512  * preempt the current thread and migrate it to another CPU before the
513  * value is used.  Safe usage requires knowing the migration is
514  * impossible (e.g. because the code is in interrupt context, holds a
515  * spinlock, or cannot migrate due to k_cpu_mask state).
516  *
517  * @return Unique ID for currently-executing CPU
518  */
519 static inline uint32_t arch_proc_id(void);
520 
521 /**
522  * Broadcast an interrupt to all CPUs
523  *
524  * This will invoke z_sched_ipi() on all other CPUs in the system.
525  */
526 void arch_sched_broadcast_ipi(void);
527 
528 /**
529  * Direct IPIs to the specified CPUs
530  *
531  * This will invoke z_sched_ipi() on the CPUs identified by @a cpu_bitmap.
532  *
533  * @param cpu_bitmap A bitmap indicating which CPUs need the IPI
534  */
535 void arch_sched_directed_ipi(uint32_t cpu_bitmap);
536 
537 int arch_smp_init(void);
538 
539 #endif /* CONFIG_SMP */
540 
541 /**
542  * @brief Returns the number of CPUs
543  *
544  * For most systems this will be the same as CONFIG_MP_MAX_NUM_CPUS,
545  * however some systems may determine this at runtime instead.
546  *
547  * @return the number of CPUs
548  */
549 static inline unsigned int arch_num_cpus(void);
550 
551 /** @} */
552 
553 
554 /**
555  * @defgroup arch-userspace Architecture-specific userspace APIs
556  * @ingroup arch-interface
557  * @{
558  */
559 
560 #ifdef CONFIG_USERSPACE
561 #include <zephyr/arch/syscall.h>
562 
563 /**
564  * Invoke a system call with 0 arguments.
565  *
566  * No general-purpose register state other than return value may be preserved
567  * when transitioning from supervisor mode back down to user mode for
568  * security reasons.
569  *
570  * It is required that all arguments be stored in registers when elevating
571  * privileges from user to supervisor mode.
572  *
573  * Processing of the syscall takes place on a separate kernel stack. Interrupts
574  * should be enabled when invoking the system call marshallers from the
575  * dispatch table. Thread preemption may occur when handling system calls.
576  *
577  * Call IDs are untrusted and must be bounds-checked, as the value is used to
578  * index the system call dispatch table, containing function pointers to the
579  * specific system call code.
580  *
581  * @param call_id System call ID
582  * @return Return value of the system call. Void system calls return 0 here.
583  */
584 static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
585 
586 /**
587  * Invoke a system call with 1 argument.
588  *
589  * @see arch_syscall_invoke0()
590  *
591  * @param arg1 First argument to the system call.
592  * @param call_id System call ID, will be bounds-checked and used to reference
593  *	          kernel-side dispatch table
594  * @return Return value of the system call. Void system calls return 0 here.
595  */
596 static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
597 					     uintptr_t call_id);
598 
599 /**
600  * Invoke a system call with 2 arguments.
601  *
602  * @see arch_syscall_invoke0()
603  *
604  * @param arg1 First argument to the system call.
605  * @param arg2 Second argument to the system call.
606  * @param call_id System call ID, will be bounds-checked and used to reference
607  *	          kernel-side dispatch table
608  * @return Return value of the system call. Void system calls return 0 here.
609  */
610 static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
611 					     uintptr_t call_id);
612 
613 /**
614  * Invoke a system call with 3 arguments.
615  *
616  * @see arch_syscall_invoke0()
617  *
618  * @param arg1 First argument to the system call.
619  * @param arg2 Second argument to the system call.
620  * @param arg3 Third argument to the system call.
621  * @param call_id System call ID, will be bounds-checked and used to reference
622  *	          kernel-side dispatch table
623  * @return Return value of the system call. Void system calls return 0 here.
624  */
625 static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
626 					     uintptr_t arg3,
627 					     uintptr_t call_id);
628 
629 /**
630  * Invoke a system call with 4 arguments.
631  *
632  * @see arch_syscall_invoke0()
633  *
634  * @param arg1 First argument to the system call.
635  * @param arg2 Second argument to the system call.
636  * @param arg3 Third argument to the system call.
637  * @param arg4 Fourth argument to the system call.
638  * @param call_id System call ID, will be bounds-checked and used to reference
639  *	          kernel-side dispatch table
640  * @return Return value of the system call. Void system calls return 0 here.
641  */
642 static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
643 					     uintptr_t arg3, uintptr_t arg4,
644 					     uintptr_t call_id);
645 
646 /**
647  * Invoke a system call with 5 arguments.
648  *
649  * @see arch_syscall_invoke0()
650  *
651  * @param arg1 First argument to the system call.
652  * @param arg2 Second argument to the system call.
653  * @param arg3 Third argument to the system call.
654  * @param arg4 Fourth argument to the system call.
655  * @param arg5 Fifth argument to the system call.
656  * @param call_id System call ID, will be bounds-checked and used to reference
657  *	          kernel-side dispatch table
658  * @return Return value of the system call. Void system calls return 0 here.
659  */
660 static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
661 					     uintptr_t arg3, uintptr_t arg4,
662 					     uintptr_t arg5,
663 					     uintptr_t call_id);
664 
665 /**
666  * Invoke a system call with 6 arguments.
667  *
668  * @see arch_syscall_invoke0()
669  *
670  * @param arg1 First argument to the system call.
671  * @param arg2 Second argument to the system call.
672  * @param arg3 Third argument to the system call.
673  * @param arg4 Fourth argument to the system call.
674  * @param arg5 Fifth argument to the system call.
675  * @param arg6 Sixth argument to the system call.
676  * @param call_id System call ID, will be bounds-checked and used to reference
677  *	          kernel-side dispatch table
678  * @return Return value of the system call. Void system calls return 0 here.
679  */
680 static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
681 					     uintptr_t arg3, uintptr_t arg4,
682 					     uintptr_t arg5, uintptr_t arg6,
683 					     uintptr_t call_id);
684 
685 /**
686  * Indicate whether we are currently running in user mode
687  *
688  * @return True if the CPU is currently running with user permissions
689  */
690 static inline bool arch_is_user_context(void);
691 
692 /**
693  * @brief Get the maximum number of partitions for a memory domain
694  *
695  * @return Max number of partitions, or -1 if there is no limit
696  */
697 int arch_mem_domain_max_partitions_get(void);
698 
699 #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
700 /**
701  *
702  * @brief Architecture-specific hook for memory domain initialization
703  *
704  * Perform any tasks needed to initialize architecture-specific data within
705  * the memory domain, such as reserving memory for page tables. All members
706  * of the provided memory domain aside from `arch` will be initialized when
707  * this is called, but no threads will be a assigned yet.
708  *
709  * This function may fail if initializing the memory domain requires allocation,
710  * such as for page tables.
711  *
712  * The associated function k_mem_domain_init() documents that making
713  * multiple init calls to the same memory domain is undefined behavior,
714  * but has no assertions in place to check this. If this matters, it may be
715  * desirable to add checks for this in the implementation of this function.
716  *
717  * @param domain The memory domain to initialize
718  * @retval 0 Success
719  * @retval -ENOMEM Insufficient memory
720  */
721 int arch_mem_domain_init(struct k_mem_domain *domain);
722 #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
723 
724 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
725 /**
726  * @brief Add a thread to a memory domain (arch-specific)
727  *
728  * Architecture-specific hook to manage internal data structures or hardware
729  * state when the provided thread has been added to a memory domain.
730  *
731  * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
732  * be added to before this is called. Implementations may assume that the
733  * thread is not already a member of this domain.
734  *
735  * @param thread Thread which needs to be configured.
736  *
737  * @retval 0 if successful
738  * @retval -EINVAL if invalid parameters supplied
739  * @retval -ENOSPC if running out of space in internal structures
740  *                    (e.g. translation tables)
741  */
742 int arch_mem_domain_thread_add(struct k_thread *thread);
743 
744 /**
745  * @brief Remove a thread from a memory domain (arch-specific)
746  *
747  * Architecture-specific hook to manage internal data structures or hardware
748  * state when the provided thread has been removed from a memory domain.
749  *
750  * The thread's memory domain pointer will be the domain that the thread
751  * is being removed from.
752  *
753  * @param thread Thread being removed from its memory domain
754  *
755  * @retval 0 if successful
756  * @retval -EINVAL if invalid parameters supplied
757  */
758 int arch_mem_domain_thread_remove(struct k_thread *thread);
759 
760 /**
761  * @brief Remove a partition from the memory domain (arch-specific)
762  *
763  * Architecture-specific hook to manage internal data structures or hardware
764  * state when a memory domain has had a partition removed.
765  *
766  * The partition index data, and the number of partitions configured, are not
767  * respectively cleared and decremented in the domain until after this function
768  * runs.
769  *
770  * @param domain The memory domain structure
771  * @param partition_id The partition index that needs to be deleted
772  *
773  * @retval 0 if successful
774  * @retval -EINVAL if invalid parameters supplied
775  * @retval -ENOENT if no matching partition found
776  */
777 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
778 				     uint32_t partition_id);
779 
780 /**
781  * @brief Add a partition to the memory domain
782  *
783  * Architecture-specific hook to manage internal data structures or hardware
784  * state when a memory domain has a partition added.
785  *
786  * @param domain The memory domain structure
787  * @param partition_id The partition that needs to be added
788  *
789  * @retval 0 if successful
790  * @retval -EINVAL if invalid parameters supplied
791  */
792 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
793 				  uint32_t partition_id);
794 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
795 
796 /**
797  * @brief Check memory region permissions
798  *
799  * Given a memory region, return whether the current memory management hardware
800  * configuration would allow a user thread to read/write that region. Used by
801  * system calls to validate buffers coming in from userspace.
802  *
803  * Notes:
804  * The function is guaranteed to never return validation success, if the entire
805  * buffer area is not user accessible.
806  *
807  * The function is guaranteed to correctly validate the permissions of the
808  * supplied buffer, if the user access permissions of the entire buffer are
809  * enforced by a single, enabled memory management region.
810  *
811  * In some architectures the validation will always return failure
812  * if the supplied memory buffer spans multiple enabled memory management
813  * regions (even if all such regions permit user access).
814  *
815  * @warning Buffer of size zero (0) has undefined behavior.
816  *
817  * @param addr start address of the buffer
818  * @param size the size of the buffer
819  * @param write If non-zero, additionally check if the area is writable.
820  *	  Otherwise, just check if the memory can be read.
821  *
822  * @return nonzero if the permissions don't match.
823  */
824 int arch_buffer_validate(const void *addr, size_t size, int write);
825 
826 /**
827  * Get the optimal virtual region alignment to optimize the MMU table layout
828  *
829  * Some MMU HW requires some region to be aligned to some of the intermediate
830  * block alignment in order to reduce table usage.
831  * This call returns the optimal virtual address alignment in order to permit
832  * such optimization in the following MMU mapping call.
833  *
834  * @param[in] phys Physical address of region to be mapped,
835  *                 aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
836  * @param[in] size Size of region to be mapped,
837  *                 aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
838  *
839  * @return Alignment to apply on the virtual address of this region
840  */
841 size_t arch_virt_region_align(uintptr_t phys, size_t size);
842 
843 /**
844  * Perform a one-way transition from supervisor to user mode.
845  *
846  * Implementations of this function must do the following:
847  *
848  * - Reset the thread's stack pointer to a suitable initial value. We do not
849  *   need any prior context since this is a one-way operation.
850  * - Set up any kernel stack region for the CPU to use during privilege
851  *   elevation
852  * - Put the CPU in whatever its equivalent of user mode is
853  * - Transfer execution to arch_new_thread() passing along all the supplied
854  *   arguments, in user mode.
855  *
856  * @param user_entry Entry point to start executing as a user thread
857  * @param p1 1st parameter to user thread
858  * @param p2 2nd parameter to user thread
859  * @param p3 3rd parameter to user thread
860  */
861 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
862 					void *p1, void *p2, void *p3);
863 
864 /**
865  * @brief Induce a kernel oops that appears to come from a specific location
866  *
867  * Normally, k_oops() generates an exception that appears to come from the
868  * call site of the k_oops() itself.
869  *
870  * However, when validating arguments to a system call, if there are problems
871  * we want the oops to appear to come from where the system call was invoked
872  * and not inside the validation function.
873  *
874  * @param ssf System call stack frame pointer. This gets passed as an argument
875  *            to _k_syscall_handler_t functions and its contents are completely
876  *            architecture specific.
877  */
878 FUNC_NORETURN void arch_syscall_oops(void *ssf);
879 
880 /**
881  * @brief Safely take the length of a potentially bad string
882  *
883  * This must not fault, instead the @p err parameter must have -1 written to it.
884  * This function otherwise should work exactly like libc strnlen(). On success
885  * @p err should be set to 0.
886  *
887  * @param s String to measure
888  * @param maxsize Max length of the string
889  * @param err Error value to write
890  * @return Length of the string, not counting NULL byte, up to maxsize
891  */
892 size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
893 #endif /* CONFIG_USERSPACE */
894 
895 /**
896  * @brief Ensure cache coherence prior to context switch
897  *
898  * Required when CONFIG_KERNEL_COHERENCE is true.  On cache-incoherent
899  * multiprocessor architectures, thread stacks are cached by default
900  * for performance reasons.  They must therefore be flushed
901  * appropriately on context switch.  The rules are:
902  *
903  * 1. The region containing live data in the old stack (generally the
904  *    bytes between the current stack pointer and the top of the stack
905  *    memory) must be flushed to underlying storage so a new CPU that
906  *    runs the same thread sees the correct data.  This must happen
907  *    before the assignment of the switch_handle field in the thread
908  *    struct which signals the completion of context switch.
909  *
910  * 2. Any data areas to be read from the new stack (generally the same
911  *    as the live region when it was saved) should be invalidated (and
912  *    NOT flushed!) in the data cache.  This is because another CPU
913  *    may have run or re-initialized the thread since this CPU
914  *    suspended it, and any data present in cache will be stale.
915  *
916  * @note The kernel will call this function during interrupt exit when
917  * a new thread has been chosen to run, and also immediately before
918  * entering arch_switch() to effect a code-driven context switch.  In
919  * the latter case, it is very likely that more data will be written
920  * to the old_thread stack region after this function returns but
921  * before the completion of the switch.  Simply flushing naively here
922  * is not sufficient on many architectures and coordination with the
923  * arch_switch() implementation is likely required.
924  *
925  * @param old_thread The old thread to be flushed before being allowed
926  *                   to run on other CPUs.
927  * @param old_switch_handle The switch handle to be stored into
928  *                          old_thread (it will not be valid until the
929  *                          cache is flushed so is not present yet).
930  *                          This will be NULL if inside z_swap()
931  *                          (because the arch_switch() has not saved it
932  *                          yet).
933  * @param new_thread The new thread to be invalidated before it runs locally.
934  */
935 #ifndef CONFIG_KERNEL_COHERENCE
arch_cohere_stacks(struct k_thread * old_thread,void * old_switch_handle,struct k_thread * new_thread)936 static inline void arch_cohere_stacks(struct k_thread *old_thread,
937 				      void *old_switch_handle,
938 				      struct k_thread *new_thread)
939 {
940 	ARG_UNUSED(old_thread);
941 	ARG_UNUSED(old_switch_handle);
942 	ARG_UNUSED(new_thread);
943 }
944 #endif
945 
946 /** @} */
947 
948 /**
949  * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
950  * @ingroup arch-interface
951  * @{
952  */
953 
954 #ifdef CONFIG_GDBSTUB
955 struct gdb_ctx;
956 
957 /**
958  * @brief Architecture layer debug start
959  *
960  * This function is called by @c gdb_init()
961  */
962 void arch_gdb_init(void);
963 
964 /**
965  * @brief Continue running program
966  *
967  * Continue software execution.
968  */
969 void arch_gdb_continue(void);
970 
971 /**
972  * @brief Continue with one step
973  *
974  * Continue software execution until reaches the next statement.
975  */
976 void arch_gdb_step(void);
977 
978 /**
979  * @brief Read all registers, and outputs as hexadecimal string.
980  *
981  * This reads all CPU registers and outputs as hexadecimal string.
982  * The output string must be parsable by GDB.
983  *
984  * @param ctx    GDB context
985  * @param buf    Buffer to output hexadecimal string.
986  * @param buflen Length of buffer.
987  *
988  * @return Length of hexadecimal string written.
989  *         Return 0 if error or not supported.
990  */
991 size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen);
992 
993 /**
994  * @brief Take a hexadecimal string and update all registers.
995  *
996  * This takes in a hexadecimal string as presented from GDB,
997  * and updates all CPU registers with new values.
998  *
999  * @param ctx    GDB context
1000  * @param hex    Input hexadecimal string.
1001  * @param hexlen Length of hexadecimal string.
1002  *
1003  * @return Length of hexadecimal string parsed.
1004  *         Return 0 if error or not supported.
1005  */
1006 size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen);
1007 
1008 /**
1009  * @brief Read one register, and outputs as hexadecimal string.
1010  *
1011  * This reads one CPU register and outputs as hexadecimal string.
1012  * The output string must be parsable by GDB.
1013  *
1014  * @param ctx    GDB context
1015  * @param buf    Buffer to output hexadecimal string.
1016  * @param buflen Length of buffer.
1017  * @param regno  Register number
1018  *
1019  * @return Length of hexadecimal string written.
1020  *         Return 0 if error or not supported.
1021  */
1022 size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen,
1023 			    uint32_t regno);
1024 
1025 /**
1026  * @brief Take a hexadecimal string and update one register.
1027  *
1028  * This takes in a hexadecimal string as presented from GDB,
1029  * and updates one CPU registers with new value.
1030  *
1031  * @param ctx    GDB context
1032  * @param hex    Input hexadecimal string.
1033  * @param hexlen Length of hexadecimal string.
1034  * @param regno  Register number
1035  *
1036  * @return Length of hexadecimal string parsed.
1037  *         Return 0 if error or not supported.
1038  */
1039 size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
1040 			     uint32_t regno);
1041 
1042 /**
1043  * @brief Add breakpoint or watchpoint.
1044  *
1045  * @param ctx GDB context
1046  * @param type Breakpoint or watchpoint type
1047  * @param addr Address of breakpoint or watchpoint
1048  * @param kind Size of breakpoint/watchpoint in bytes
1049  *
1050  * @retval 0  Operation successful
1051  * @retval -1 Error encountered
1052  * @retval -2 Not supported
1053  */
1054 int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1055 			    uintptr_t addr, uint32_t kind);
1056 
1057 /**
1058  * @brief Remove breakpoint or watchpoint.
1059  *
1060  * @param ctx GDB context
1061  * @param type Breakpoint or watchpoint type
1062  * @param addr Address of breakpoint or watchpoint
1063  * @param kind Size of breakpoint/watchpoint in bytes
1064  *
1065  * @retval 0  Operation successful
1066  * @retval -1 Error encountered
1067  * @retval -2 Not supported
1068  */
1069 int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1070 			       uintptr_t addr, uint32_t kind);
1071 
1072 /**
1073  * @brief Post processing after memory write.
1074  *
1075  * @param[in] addr  Starting address of the memory region
1076  * @param[in] len   Size of the memory region
1077  * @param[in] align Write alignment of memory region
1078  */
1079 void arch_gdb_post_memory_write(uintptr_t addr, size_t len, uint8_t align);
1080 
1081 #endif
1082 /** @} */
1083 
1084 #ifdef CONFIG_TIMING_FUNCTIONS
1085 #include <zephyr/timing/types.h>
1086 
1087 /**
1088  * @brief Arch specific Timing Measurement APIs
1089  * @defgroup timing_api_arch Arch specific Timing Measurement APIs
1090  * @ingroup timing_api
1091  *
1092  * Implements the necessary bits to support timing measurement
1093  * using architecture specific timing measurement mechanism.
1094  *
1095  * @{
1096  */
1097 
1098 /**
1099  * @brief Initialize the timing subsystem.
1100  *
1101  * Perform the necessary steps to initialize the timing subsystem.
1102  *
1103  * @see timing_init()
1104  */
1105 void arch_timing_init(void);
1106 
1107 /**
1108  * @brief Signal the start of the timing information gathering.
1109  *
1110  * Signal to the timing subsystem that timing information
1111  * will be gathered from this point forward.
1112  *
1113  * @note Any call to arch_timing_counter_get() must be done between
1114  * calls to arch_timing_start() and arch_timing_stop(), and on the
1115  * same CPU core.
1116  *
1117  * @see timing_start()
1118  */
1119 void arch_timing_start(void);
1120 
1121 /**
1122  * @brief Signal the end of the timing information gathering.
1123  *
1124  * Signal to the timing subsystem that timing information
1125  * is no longer being gathered from this point forward.
1126  *
1127  * @note Any call to arch_timing_counter_get() must be done between
1128  * calls to arch_timing_start() and arch_timing_stop(), and on the
1129  * same CPU core.
1130  *
1131  * @see timing_stop()
1132  */
1133 void arch_timing_stop(void);
1134 
1135 /**
1136  * @brief Return timing counter.
1137  *
1138  * @parblock
1139  *
1140  * @note Any call to arch_timing_counter_get() must be done between
1141  * calls to arch_timing_start() and arch_timing_stop(), and on the
1142  * same CPU core.
1143  *
1144  * @endparblock
1145  *
1146  * @parblock
1147  *
1148  * @note Not all architectures have a timing counter with 64 bit precision.
1149  * It is possible to see this value "go backwards" due to internal
1150  * rollover.  Timing code must be prepared to address the rollover
1151  * (with platform-dependent code, e.g. by casting to a uint32_t before
1152  * subtraction) or by using arch_timing_cycles_get() which is required
1153  * to understand the distinction.
1154  *
1155  * @endparblock
1156  *
1157  * @return Timing counter.
1158  *
1159  * @see timing_counter_get()
1160  */
1161 timing_t arch_timing_counter_get(void);
1162 
1163 /**
1164  * @brief Get number of cycles between @p start and @p end.
1165  *
1166  * @note For some architectures, the raw numbers from counter need
1167  * to be scaled to obtain actual number of cycles, or may roll over
1168  * internally.  This function computes a positive-definite interval
1169  * between two returned cycle values.
1170  *
1171  * @param start Pointer to counter at start of a measured execution.
1172  * @param end Pointer to counter at stop of a measured execution.
1173  * @return Number of cycles between start and end.
1174  *
1175  * @see timing_cycles_get()
1176  */
1177 uint64_t arch_timing_cycles_get(volatile timing_t *const start,
1178 				volatile timing_t *const end);
1179 
1180 /**
1181  * @brief Get frequency of counter used (in Hz).
1182  *
1183  * @return Frequency of counter used for timing in Hz.
1184  *
1185  * @see timing_freq_get()
1186  */
1187 uint64_t arch_timing_freq_get(void);
1188 
1189 /**
1190  * @brief Convert number of @p cycles into nanoseconds.
1191  *
1192  * @param cycles Number of cycles
1193  * @return Converted time value
1194  *
1195  * @see timing_cycles_to_ns()
1196  */
1197 uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
1198 
1199 /**
1200  * @brief Convert number of @p cycles into nanoseconds with averaging.
1201  *
1202  * @param cycles Number of cycles
1203  * @param count Times of accumulated cycles to average over
1204  * @return Converted time value
1205  *
1206  * @see timing_cycles_to_ns_avg()
1207  */
1208 uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
1209 
1210 /**
1211  * @brief Get frequency of counter used (in MHz).
1212  *
1213  * @return Frequency of counter used for timing in MHz.
1214  *
1215  * @see timing_freq_get_mhz()
1216  */
1217 uint32_t arch_timing_freq_get_mhz(void);
1218 
1219 /** @} */
1220 
1221 #endif /* CONFIG_TIMING_FUNCTIONS */
1222 
1223 #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
1224 
1225 struct msi_vector;
1226 typedef struct msi_vector msi_vector_t;
1227 
1228 /**
1229  * @brief Allocate vector(s) for the endpoint MSI message(s).
1230  *
1231  * @param priority the MSI vectors base interrupt priority
1232  * @param vectors an array to fill with allocated MSI vectors
1233  * @param n_vector the size of MSI vectors array
1234  *
1235  * @return The number of allocated MSI vectors
1236  */
1237 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
1238 				       msi_vector_t *vectors,
1239 				       uint8_t n_vector);
1240 
1241 /**
1242  * @brief Connect an MSI vector to the given routine
1243  *
1244  * @param vector The MSI vector to connect to
1245  * @param routine Interrupt service routine
1246  * @param parameter ISR parameter
1247  * @param flags Arch-specific IRQ configuration flag
1248  *
1249  * @return True on success, false otherwise
1250  */
1251 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
1252 				  void (*routine)(const void *parameter),
1253 				  const void *parameter,
1254 				  uint32_t flags);
1255 
1256 #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
1257 
1258 /**
1259  * @brief Perform architecture specific processing within spin loops
1260  *
1261  * This is invoked from busy loops with IRQs disabled such as the contended
1262  * spinlock loop. The default implementation is a weak function that calls
1263  * arch_nop(). Architectures may implement this function to perform extra
1264  * checks or power management tricks if needed.
1265  */
1266 void arch_spin_relax(void);
1267 
1268 /**
1269  * @defgroup arch-stackwalk Architecture-specific Stack Walk APIs
1270  * @ingroup arch-interface
1271  * @brief Architecture-specific Stack Walk APIs
1272  *
1273  * To add API support to an architecture, `arch_stack_walk()` should be implemented and a non-user
1274  * configurable Kconfig `ARCH_HAS_STACKWALK` that is default to `y` should be created in the
1275  * architecture's top level Kconfig, with all the relevant dependencies.
1276  *
1277  * @{
1278  */
1279 
1280 /**
1281  * stack_trace_callback_fn - Callback for @ref arch_stack_walk
1282  * @param cookie Caller supplied pointer handed back by @ref arch_stack_walk
1283  * @param addr The stack entry address to consume
1284  *
1285  * @return True, if the entry was consumed or skipped. False, if there is no space left to store
1286  */
1287 typedef bool (*stack_trace_callback_fn)(void *cookie, unsigned long addr);
1288 
1289 /**
1290  * @brief Architecture-specific function to walk the stack
1291  *
1292  * @param callback_fn Callback which is invoked by the architecture code for each entry.
1293  * @param cookie Caller supplied pointer which is handed back to @a callback_fn
1294  * @param thread Pointer to a k_thread struct, can be NULL
1295  * @param esf Pointer to an arch_esf struct, can be NULL
1296  *
1297  * ============ ======= ============================================
1298  * thread	esf
1299  * ============ ======= ============================================
1300  * thread	NULL	Stack trace from thread (can be _current)
1301  * thread	esf	Stack trace starting on esf
1302  * ============ ======= ============================================
1303  */
1304 void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
1305 		     const struct k_thread *thread, const struct arch_esf *esf);
1306 
1307 /**
1308  * arch-stackwalk
1309  * @}
1310  */
1311 
1312 #ifdef __cplusplus
1313 }
1314 #endif /* __cplusplus */
1315 
1316 #include <zephyr/arch/arch_inlines.h>
1317 
1318 #endif /* _ASMLANGUAGE */
1319 
1320 #endif /* ZEPHYR_INCLUDE_ARCH_ARCH_INTERFACE_H_ */
1321