1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @defgroup arch-interface Architecture Interface
9  * @brief Internal kernel APIs with public scope
10  *
11  * Any public kernel APIs that are implemented as inline functions and need to
12  * call architecture-specific API so will have the prototypes for the
13  * architecture-specific APIs here. Architecture APIs that aren't used in this
14  * way go in kernel/include/kernel_arch_interface.h.
15  *
16  * The set of architecture-specific APIs used internally by public macros and
17  * inline functions in public headers are also specified and documented.
18  *
19  * For all macros and inline function prototypes described herein, <arch/cpu.h>
20  * must eventually pull in full definitions for all of them (the actual macro
21  * defines and inline function bodies)
22  *
23  * include/kernel.h and other public headers depend on definitions in this
24  * header.
25  */
26 #ifndef ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
27 #define ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
28 
29 #ifndef _ASMLANGUAGE
30 #include <zephyr/toolchain.h>
31 #include <stddef.h>
32 #include <zephyr/types.h>
33 #include <zephyr/arch/cpu.h>
34 #include <zephyr/irq_offload.h>
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 /* NOTE: We cannot pull in kernel.h here, need some forward declarations  */
41 struct k_thread;
42 struct k_mem_domain;
43 
44 typedef struct z_thread_stack_element k_thread_stack_t;
45 
46 typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
47 
48 /**
49  * @defgroup arch-timing Architecture timing APIs
50  * @ingroup arch-interface
51  * @{
52  */
53 
54 /**
55  * Obtain the current cycle count, in units specified by
56  * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC.  While this is historically
57  * specified as part of the architecture API, in practice virtually
58  * all platforms forward it to the sys_clock_cycle_get_32() API
59  * provided by the timer driver.
60  *
61  * @see k_cycle_get_32()
62  *
63  * @return The current cycle time.  This should count up monotonically
64  * through the full 32 bit space, wrapping at 0xffffffff.  Hardware
65  * with fewer bits of precision in the timer is expected to synthesize
66  * a 32 bit count.
67  */
68 static inline uint32_t arch_k_cycle_get_32(void);
69 
70 /**
71  * As for arch_k_cycle_get_32(), but with a 64 bit return value.  Not
72  * all timer hardware has a 64 bit timer, this needs to be implemented
73  * only if CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER is set.
74  *
75  * @see arch_k_cycle_get_32()
76  *
77  * @return The current cycle time.  This should count up monotonically
78  * through the full 64 bit space, wrapping at 2^64-1.  Hardware with
79  * fewer bits of precision in the timer is generally not expected to
80  * implement this API.
81  */
82 static inline uint64_t arch_k_cycle_get_64(void);
83 
84 /** @} */
85 
86 
87 /**
88  * @addtogroup arch-threads
89  * @{
90  */
91 
92 /**
93  * @def ARCH_THREAD_STACK_RESERVED
94  *
95  * @see K_THREAD_STACK_RESERVED
96  */
97 
98 /**
99  * @def ARCH_STACK_PTR_ALIGN
100  *
101  * Required alignment of the CPU's stack pointer register value, dictated by
102  * hardware constraints and the ABI calling convention.
103  *
104  * @see Z_STACK_PTR_ALIGN
105  */
106 
107 /**
108  * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
109  *
110  * Required alignment of the lowest address of a stack object.
111  *
112  * Optional definition.
113  *
114  * @see Z_THREAD_STACK_OBJ_ALIGN
115  */
116 
117 /**
118  * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
119  * @brief Round up a stack buffer size to alignment constraints
120  *
121  * Adjust a requested stack buffer size to the true size of its underlying
122  * buffer, defined as the area usable for thread stack context and thread-
123  * local storage.
124  *
125  * The size value passed here does not include storage reserved for platform
126  * data.
127  *
128  * The returned value is either the same size provided (if already properly
129  * aligned), or rounded up to satisfy alignment constraints.  Calculations
130  * performed here *must* be idempotent.
131  *
132  * Optional definition. If undefined, stack buffer sizes are either:
133  * - Rounded up to the next power of two if user mode is enabled on an arch
134  *   with an MPU that requires such alignment
135  * - Rounded up to ARCH_STACK_PTR_ALIGN
136  *
137  * @see Z_THREAD_STACK_SIZE_ADJUST
138  */
139 
140 /**
141  * @def ARCH_KERNEL_STACK_RESERVED
142  * @brief MPU guard size for kernel-only stacks
143  *
144  * If MPU stack guards are used to catch stack overflows, specify the
145  * amount of space reserved in kernel stack objects. If guard sizes are
146  * context dependent, this should be in the minimum guard size, with
147  * remaining space carved out if needed.
148  *
149  * Optional definition, defaults to 0.
150  *
151  * @see K_KERNEL_STACK_RESERVED
152  */
153 
154 /**
155  * @def ARCH_KERNEL_STACK_OBJ_ALIGN
156  * @brief Required alignment of the lowest address of a kernel-only stack.
157  */
158 
159 /** @} */
160 
161 /**
162  * @addtogroup arch-pm
163  * @{
164  */
165 
166 /**
167  * @brief Power save idle routine
168  *
169  * This function will be called by the kernel idle loop or possibly within
170  * an implementation of z_pm_save_idle in the kernel when the
171  * '_pm_save_flag' variable is non-zero.
172  *
173  * Architectures that do not implement power management instructions may
174  * immediately return, otherwise a power-saving instruction should be
175  * issued to wait for an interrupt.
176  *
177  * @note The function is expected to return after the interrupt that has
178  * caused the CPU to exit power-saving mode has been serviced, although
179  * this is not a firm requirement.
180  *
181  * @see k_cpu_idle()
182  */
183 void arch_cpu_idle(void);
184 
185 /**
186  * @brief Atomically re-enable interrupts and enter low power mode
187  *
188  * The requirements for arch_cpu_atomic_idle() are as follows:
189  *
190  * -# Enabling interrupts and entering a low-power mode needs to be
191  *    atomic, i.e. there should be no period of time where interrupts are
192  *    enabled before the processor enters a low-power mode.  See the comments
193  *    in k_lifo_get(), for example, of the race condition that
194  *    occurs if this requirement is not met.
195  *
196  * -# After waking up from the low-power mode, the interrupt lockout state
197  *    must be restored as indicated in the 'key' input parameter.
198  *
199  * @see k_cpu_atomic_idle()
200  *
201  * @param key Lockout key returned by previous invocation of arch_irq_lock()
202  */
203 void arch_cpu_atomic_idle(unsigned int key);
204 
205 /** @} */
206 
207 
208 /**
209  * @addtogroup arch-smp
210  * @{
211  */
212 
213 /**
214  * Per-cpu entry function
215  *
216  * @param data context parameter, implementation specific
217  */
218 typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
219 
220 /**
221  * @brief Start a numbered CPU on a MP-capable system
222  *
223  * This starts and initializes a specific CPU.  The main thread on startup is
224  * running on CPU zero, other processors are numbered sequentially.  On return
225  * from this function, the CPU is known to have begun operating and will enter
226  * the provided function.  Its interrupts will be initialized but disabled such
227  * that irq_unlock() with the provided key will work to enable them.
228  *
229  * Normally, in SMP mode this function will be called by the kernel
230  * initialization and should not be used as a user API.  But it is defined here
231  * for special-purpose apps which want Zephyr running on one core and to use
232  * others for design-specific processing.
233  *
234  * @param cpu_num Integer number of the CPU
235  * @param stack Stack memory for the CPU
236  * @param sz Stack buffer size, in bytes
237  * @param fn Function to begin running on the CPU.
238  * @param arg Untyped argument to be passed to "fn"
239  */
240 void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
241 		    arch_cpustart_t fn, void *arg);
242 
243 /**
244  * @brief Return CPU power status
245  *
246  * @param cpu_num Integer number of the CPU
247  */
248 bool arch_cpu_active(int cpu_num);
249 
250 /** @} */
251 
252 
253 /**
254  * @addtogroup arch-irq
255  * @{
256  */
257 
258 /**
259  * Lock interrupts on the current CPU
260  *
261  * @see irq_lock()
262  */
263 static inline unsigned int arch_irq_lock(void);
264 
265 /**
266  * Unlock interrupts on the current CPU
267  *
268  * @see irq_unlock()
269  */
270 static inline void arch_irq_unlock(unsigned int key);
271 
272 /**
273  * Test if calling arch_irq_unlock() with this key would unlock irqs
274  *
275  * @param key value returned by arch_irq_lock()
276  * @return true if interrupts were unlocked prior to the arch_irq_lock()
277  * call that produced the key argument.
278  */
279 static inline bool arch_irq_unlocked(unsigned int key);
280 
281 /**
282  * Disable the specified interrupt line
283  *
284  * @note: The behavior of interrupts that arrive after this call
285  * returns and before the corresponding call to arch_irq_enable() is
286  * undefined.  The hardware is not required to latch and deliver such
287  * an interrupt, though on some architectures that may work.  Other
288  * architectures will simply lose such an interrupt and never deliver
289  * it.  Many drivers and subsystems are not tolerant of such dropped
290  * interrupts and it is the job of the application layer to ensure
291  * that behavior remains correct.
292  *
293  * @see irq_disable()
294  */
295 void arch_irq_disable(unsigned int irq);
296 
297 /**
298  * Enable the specified interrupt line
299  *
300  * @see irq_enable()
301  */
302 void arch_irq_enable(unsigned int irq);
303 
304 /**
305  * Test if an interrupt line is enabled
306  *
307  * @see irq_is_enabled()
308  */
309 int arch_irq_is_enabled(unsigned int irq);
310 
311 /**
312  * Arch-specific hook to install a dynamic interrupt.
313  *
314  * @param irq IRQ line number
315  * @param priority Interrupt priority
316  * @param routine Interrupt service routine
317  * @param parameter ISR parameter
318  * @param flags Arch-specific IRQ configuration flag
319  *
320  * @return The vector assigned to this interrupt
321  */
322 int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
323 			     void (*routine)(const void *parameter),
324 			     const void *parameter, uint32_t flags);
325 
326 /**
327  * Arch-specific hook to dynamically uninstall a shared interrupt.
328  * If the interrupt is not being shared, then the associated
329  * _sw_isr_table entry will be replaced by (NULL, z_irq_spurious)
330  * (default entry).
331  *
332  * @param irq IRQ line number
333  * @param priority Interrupt priority
334  * @param routine Interrupt service routine
335  * @param parameter ISR parameter
336  * @param flags Arch-specific IRQ configuration flag
337  *
338  * @return 0 in case of success, negative value otherwise
339  */
340 int arch_irq_disconnect_dynamic(unsigned int irq, unsigned int priority,
341 				void (*routine)(const void *parameter),
342 				const void *parameter, uint32_t flags);
343 
344 /**
345  * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
346  *
347  * @see IRQ_CONNECT()
348  */
349 
350 #ifdef CONFIG_PCIE
351 /**
352  * @def ARCH_PCIE_IRQ_CONNECT(bdf, irq, pri, isr, arg, flags)
353  *
354  * @see PCIE_IRQ_CONNECT()
355  */
356 #endif /* CONFIG_PCIE */
357 
358 /**
359  * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
360  *
361  * @see IRQ_DIRECT_CONNECT()
362  */
363 
364 /**
365  * @def ARCH_ISR_DIRECT_PM()
366  *
367  * @see ISR_DIRECT_PM()
368  */
369 
370 /**
371  * @def ARCH_ISR_DIRECT_HEADER()
372  *
373  * @see ISR_DIRECT_HEADER()
374  */
375 
376 /**
377  * @def ARCH_ISR_DIRECT_FOOTER(swap)
378  *
379  * @see ISR_DIRECT_FOOTER()
380  */
381 
382 /**
383  * @def ARCH_ISR_DIRECT_DECLARE(name)
384  *
385  * @see ISR_DIRECT_DECLARE()
386  */
387 
388 #ifndef CONFIG_PCIE_CONTROLLER
389 /**
390  * @brief Arch-specific hook for allocating IRQs
391  *
392  * Note: disable/enable IRQ relevantly inside the implementation of such
393  * function to avoid concurrency issues. Also, an allocated IRQ is assumed
394  * to be used thus a following @see arch_irq_is_used() should return true.
395  *
396  * @return The newly allocated IRQ or UINT_MAX on error.
397  */
398 unsigned int arch_irq_allocate(void);
399 
400 /**
401  * @brief Arch-specific hook for declaring an IRQ being used
402  *
403  * Note: disable/enable IRQ relevantly inside the implementation of such
404  * function to avoid concurrency issues.
405  *
406  * @param irq the IRQ to declare being used
407  */
408 void arch_irq_set_used(unsigned int irq);
409 
410 /**
411  * @brief Arch-specific hook for checking if an IRQ is being used already
412  *
413  * @param irq the IRQ to check
414  *
415  * @return true if being, false otherwise
416  */
417 bool arch_irq_is_used(unsigned int irq);
418 
419 #endif /* CONFIG_PCIE_CONTROLLER */
420 
421 /**
422  * @def ARCH_EXCEPT(reason_p)
423  *
424  * Generate a software induced fatal error.
425  *
426  * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
427  * K_ERR_STACK_CHK_FAIL may be induced.
428  *
429  * This should ideally generate a software trap, with exception context
430  * indicating state when this was invoked. General purpose register state at
431  * the time of trap should not be disturbed from the calling context.
432  *
433  * @param reason_p K_ERR_ scoped reason code for the fatal error.
434  */
435 
436 #ifdef CONFIG_IRQ_OFFLOAD
437 /**
438  * Run a function in interrupt context.
439  *
440  * Implementations should invoke an exception such that the kernel goes through
441  * its interrupt handling dispatch path, to include switching to the interrupt
442  * stack, and runs the provided routine and parameter.
443  *
444  * The only intended use-case for this function is for test code to simulate
445  * the correctness of kernel APIs in interrupt handling context. This API
446  * is not intended for real applications.
447  *
448  * @see irq_offload()
449  *
450  * @param routine Function to run in interrupt context
451  * @param parameter Value to pass to the function when invoked
452  */
453 void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
454 #endif /* CONFIG_IRQ_OFFLOAD */
455 
456 /** @} */
457 
458 
459 /**
460  * @defgroup arch-smp Architecture-specific SMP APIs
461  * @ingroup arch-interface
462  * @{
463  */
464 #ifdef CONFIG_SMP
465 /** Return the CPU struct for the currently executing CPU */
466 static inline struct _cpu *arch_curr_cpu(void);
467 
468 
469 /**
470  * @brief Processor hardware ID
471  *
472  * Most multiprocessor architectures have a low-level unique ID value
473  * associated with the current CPU that can be retrieved rapidly and
474  * efficiently in kernel context.  Note that while the numbering of
475  * the CPUs is guaranteed to be unique, the values are
476  * platform-defined. In particular, they are not guaranteed to match
477  * Zephyr's own sequential CPU IDs (even though on some platforms they
478  * do).
479  *
480  * @note There is an inherent race with this API: the system may
481  * preempt the current thread and migrate it to another CPU before the
482  * value is used.  Safe usage requires knowing the migration is
483  * impossible (e.g. because the code is in interrupt context, holds a
484  * spinlock, or cannot migrate due to k_cpu_mask state).
485  *
486  * @return Unique ID for currently-executing CPU
487  */
488 static inline uint32_t arch_proc_id(void);
489 
490 /**
491  * Broadcast an interrupt to all CPUs
492  *
493  * This will invoke z_sched_ipi() on other CPUs in the system.
494  */
495 void arch_sched_ipi(void);
496 
497 #endif /* CONFIG_SMP */
498 
499 /**
500  * @brief Returns the number of CPUs
501  *
502  * For most systems this will be the same as CONFIG_MP_MAX_NUM_CPUS,
503  * however some systems may determine this at runtime instead.
504  *
505  * @return the number of CPUs
506  */
507 static inline unsigned int arch_num_cpus(void);
508 
509 /** @} */
510 
511 
512 /**
513  * @defgroup arch-userspace Architecture-specific userspace APIs
514  * @ingroup arch-interface
515  * @{
516  */
517 
518 #ifdef CONFIG_USERSPACE
519 /**
520  * Invoke a system call with 0 arguments.
521  *
522  * No general-purpose register state other than return value may be preserved
523  * when transitioning from supervisor mode back down to user mode for
524  * security reasons.
525  *
526  * It is required that all arguments be stored in registers when elevating
527  * privileges from user to supervisor mode.
528  *
529  * Processing of the syscall takes place on a separate kernel stack. Interrupts
530  * should be enabled when invoking the system call marshallers from the
531  * dispatch table. Thread preemption may occur when handling system calls.
532  *
533  * Call IDs are untrusted and must be bounds-checked, as the value is used to
534  * index the system call dispatch table, containing function pointers to the
535  * specific system call code.
536  *
537  * @param call_id System call ID
538  * @return Return value of the system call. Void system calls return 0 here.
539  */
540 static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
541 
542 /**
543  * Invoke a system call with 1 argument.
544  *
545  * @see arch_syscall_invoke0()
546  *
547  * @param arg1 First argument to the system call.
548  * @param call_id System call ID, will be bounds-checked and used to reference
549  *	          kernel-side dispatch table
550  * @return Return value of the system call. Void system calls return 0 here.
551  */
552 static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
553 					     uintptr_t call_id);
554 
555 /**
556  * Invoke a system call with 2 arguments.
557  *
558  * @see arch_syscall_invoke0()
559  *
560  * @param arg1 First argument to the system call.
561  * @param arg2 Second argument to the system call.
562  * @param call_id System call ID, will be bounds-checked and used to reference
563  *	          kernel-side dispatch table
564  * @return Return value of the system call. Void system calls return 0 here.
565  */
566 static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
567 					     uintptr_t call_id);
568 
569 /**
570  * Invoke a system call with 3 arguments.
571  *
572  * @see arch_syscall_invoke0()
573  *
574  * @param arg1 First argument to the system call.
575  * @param arg2 Second argument to the system call.
576  * @param arg3 Third argument to the system call.
577  * @param call_id System call ID, will be bounds-checked and used to reference
578  *	          kernel-side dispatch table
579  * @return Return value of the system call. Void system calls return 0 here.
580  */
581 static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
582 					     uintptr_t arg3,
583 					     uintptr_t call_id);
584 
585 /**
586  * Invoke a system call with 4 arguments.
587  *
588  * @see arch_syscall_invoke0()
589  *
590  * @param arg1 First argument to the system call.
591  * @param arg2 Second argument to the system call.
592  * @param arg3 Third argument to the system call.
593  * @param arg4 Fourth argument to the system call.
594  * @param call_id System call ID, will be bounds-checked and used to reference
595  *	          kernel-side dispatch table
596  * @return Return value of the system call. Void system calls return 0 here.
597  */
598 static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
599 					     uintptr_t arg3, uintptr_t arg4,
600 					     uintptr_t call_id);
601 
602 /**
603  * Invoke a system call with 5 arguments.
604  *
605  * @see arch_syscall_invoke0()
606  *
607  * @param arg1 First argument to the system call.
608  * @param arg2 Second argument to the system call.
609  * @param arg3 Third argument to the system call.
610  * @param arg4 Fourth argument to the system call.
611  * @param arg5 Fifth argument to the system call.
612  * @param call_id System call ID, will be bounds-checked and used to reference
613  *	          kernel-side dispatch table
614  * @return Return value of the system call. Void system calls return 0 here.
615  */
616 static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
617 					     uintptr_t arg3, uintptr_t arg4,
618 					     uintptr_t arg5,
619 					     uintptr_t call_id);
620 
621 /**
622  * Invoke a system call with 6 arguments.
623  *
624  * @see arch_syscall_invoke0()
625  *
626  * @param arg1 First argument to the system call.
627  * @param arg2 Second argument to the system call.
628  * @param arg3 Third argument to the system call.
629  * @param arg4 Fourth argument to the system call.
630  * @param arg5 Fifth argument to the system call.
631  * @param arg6 Sixth argument to the system call.
632  * @param call_id System call ID, will be bounds-checked and used to reference
633  *	          kernel-side dispatch table
634  * @return Return value of the system call. Void system calls return 0 here.
635  */
636 static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
637 					     uintptr_t arg3, uintptr_t arg4,
638 					     uintptr_t arg5, uintptr_t arg6,
639 					     uintptr_t call_id);
640 
641 /**
642  * Indicate whether we are currently running in user mode
643  *
644  * @return True if the CPU is currently running with user permissions
645  */
646 static inline bool arch_is_user_context(void);
647 
648 /**
649  * @brief Get the maximum number of partitions for a memory domain
650  *
651  * @return Max number of partitions, or -1 if there is no limit
652  */
653 int arch_mem_domain_max_partitions_get(void);
654 
655 #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
656 /**
657  *
658  * @brief Architecture-specific hook for memory domain initialization
659  *
660  * Perform any tasks needed to initialize architecture-specific data within
661  * the memory domain, such as reserving memory for page tables. All members
662  * of the provided memory domain aside from `arch` will be initialized when
663  * this is called, but no threads will be a assigned yet.
664  *
665  * This function may fail if initializing the memory domain requires allocation,
666  * such as for page tables.
667  *
668  * The associated function k_mem_domain_init() documents that making
669  * multiple init calls to the same memory domain is undefined behavior,
670  * but has no assertions in place to check this. If this matters, it may be
671  * desirable to add checks for this in the implementation of this function.
672  *
673  * @param domain The memory domain to initialize
674  * @retval 0 Success
675  * @retval -ENOMEM Insufficient memory
676  */
677 int arch_mem_domain_init(struct k_mem_domain *domain);
678 #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
679 
680 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
681 /**
682  * @brief Add a thread to a memory domain (arch-specific)
683  *
684  * Architecture-specific hook to manage internal data structures or hardware
685  * state when the provided thread has been added to a memory domain.
686  *
687  * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
688  * be added to before this is called. Implementations may assume that the
689  * thread is not already a member of this domain.
690  *
691  * @param thread Thread which needs to be configured.
692  *
693  * @retval 0 if successful
694  * @retval -EINVAL if invalid parameters supplied
695  * @retval -ENOSPC if running out of space in internal structures
696  *                    (e.g. translation tables)
697  */
698 int arch_mem_domain_thread_add(struct k_thread *thread);
699 
700 /**
701  * @brief Remove a thread from a memory domain (arch-specific)
702  *
703  * Architecture-specific hook to manage internal data structures or hardware
704  * state when the provided thread has been removed from a memory domain.
705  *
706  * The thread's memory domain pointer will be the domain that the thread
707  * is being removed from.
708  *
709  * @param thread Thread being removed from its memory domain
710  *
711  * @retval 0 if successful
712  * @retval -EINVAL if invalid parameters supplied
713  */
714 int arch_mem_domain_thread_remove(struct k_thread *thread);
715 
716 /**
717  * @brief Remove a partition from the memory domain (arch-specific)
718  *
719  * Architecture-specific hook to manage internal data structures or hardware
720  * state when a memory domain has had a partition removed.
721  *
722  * The partition index data, and the number of partitions configured, are not
723  * respectively cleared and decremented in the domain until after this function
724  * runs.
725  *
726  * @param domain The memory domain structure
727  * @param partition_id The partition index that needs to be deleted
728  *
729  * @retval 0 if successful
730  * @retval -EINVAL if invalid parameters supplied
731  * @retval -ENOENT if no matching partition found
732  */
733 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
734 				     uint32_t partition_id);
735 
736 /**
737  * @brief Add a partition to the memory domain
738  *
739  * Architecture-specific hook to manage internal data structures or hardware
740  * state when a memory domain has a partition added.
741  *
742  * @param domain The memory domain structure
743  * @param partition_id The partition that needs to be added
744  *
745  * @retval 0 if successful
746  * @retval -EINVAL if invalid parameters supplied
747  */
748 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
749 				  uint32_t partition_id);
750 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
751 
752 /**
753  * @brief Check memory region permissions
754  *
755  * Given a memory region, return whether the current memory management hardware
756  * configuration would allow a user thread to read/write that region. Used by
757  * system calls to validate buffers coming in from userspace.
758  *
759  * Notes:
760  * The function is guaranteed to never return validation success, if the entire
761  * buffer area is not user accessible.
762  *
763  * The function is guaranteed to correctly validate the permissions of the
764  * supplied buffer, if the user access permissions of the entire buffer are
765  * enforced by a single, enabled memory management region.
766  *
767  * In some architectures the validation will always return failure
768  * if the supplied memory buffer spans multiple enabled memory management
769  * regions (even if all such regions permit user access).
770  *
771  * @warning Buffer of size zero (0) has undefined behavior.
772  *
773  * @param addr start address of the buffer
774  * @param size the size of the buffer
775  * @param write If non-zero, additionally check if the area is writable.
776  *	  Otherwise, just check if the memory can be read.
777  *
778  * @return nonzero if the permissions don't match.
779  */
780 int arch_buffer_validate(void *addr, size_t size, int write);
781 
782 /**
783  * Get the optimal virtual region alignment to optimize the MMU table layout
784  *
785  * Some MMU HW requires some region to be aligned to some of the intermediate
786  * block alignment in order to reduce table usage.
787  * This call returns the optimal virtual address alignment in order to permit
788  * such optimization in the following MMU mapping call.
789  *
790  * @param[in] phys Physical address of region to be mapped,
791  *                 aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
792  * @param[in] size Size of region to be mapped,
793  *                 aligned to @kconfig{CONFIG_MMU_PAGE_SIZE}
794  *
795  * @return Alignment to apply on the virtual address of this region
796  */
797 size_t arch_virt_region_align(uintptr_t phys, size_t size);
798 
799 /**
800  * Perform a one-way transition from supervisor to kernel mode.
801  *
802  * Implementations of this function must do the following:
803  *
804  * - Reset the thread's stack pointer to a suitable initial value. We do not
805  *   need any prior context since this is a one-way operation.
806  * - Set up any kernel stack region for the CPU to use during privilege
807  *   elevation
808  * - Put the CPU in whatever its equivalent of user mode is
809  * - Transfer execution to arch_new_thread() passing along all the supplied
810  *   arguments, in user mode.
811  *
812  * @param user_entry Entry point to start executing as a user thread
813  * @param p1 1st parameter to user thread
814  * @param p2 2nd parameter to user thread
815  * @param p3 3rd parameter to user thread
816  */
817 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
818 					void *p1, void *p2, void *p3);
819 
820 /**
821  * @brief Induce a kernel oops that appears to come from a specific location
822  *
823  * Normally, k_oops() generates an exception that appears to come from the
824  * call site of the k_oops() itself.
825  *
826  * However, when validating arguments to a system call, if there are problems
827  * we want the oops to appear to come from where the system call was invoked
828  * and not inside the validation function.
829  *
830  * @param ssf System call stack frame pointer. This gets passed as an argument
831  *            to _k_syscall_handler_t functions and its contents are completely
832  *            architecture specific.
833  */
834 FUNC_NORETURN void arch_syscall_oops(void *ssf);
835 
836 /**
837  * @brief Safely take the length of a potentially bad string
838  *
839  * This must not fault, instead the @p err parameter must have -1 written to it.
840  * This function otherwise should work exactly like libc strnlen(). On success
841  * @p err should be set to 0.
842  *
843  * @param s String to measure
844  * @param maxsize Max length of the string
845  * @param err Error value to write
846  * @return Length of the string, not counting NULL byte, up to maxsize
847  */
848 size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
849 #endif /* CONFIG_USERSPACE */
850 
851 /**
852  * @brief Detect memory coherence type
853  *
854  * Required when ARCH_HAS_COHERENCE is true.  This function returns
855  * true if the byte pointed to lies within an architecture-defined
856  * "coherence region" (typically implemented with uncached memory) and
857  * can safely be used in multiprocessor code without explicit flush or
858  * invalidate operations.
859  *
860  * @note The result is for only the single byte at the specified
861  * address, this API is not required to check region boundaries or to
862  * expect aligned pointers.  The expectation is that the code above
863  * will have queried the appropriate address(es).
864  */
865 #ifndef CONFIG_ARCH_HAS_COHERENCE
arch_mem_coherent(void * ptr)866 static inline bool arch_mem_coherent(void *ptr)
867 {
868 	ARG_UNUSED(ptr);
869 	return true;
870 }
871 #endif
872 
873 /**
874  * @brief Ensure cache coherence prior to context switch
875  *
876  * Required when ARCH_HAS_COHERENCE is true.  On cache-incoherent
877  * multiprocessor architectures, thread stacks are cached by default
878  * for performance reasons.  They must therefore be flushed
879  * appropriately on context switch.  The rules are:
880  *
881  * 1. The region containing live data in the old stack (generally the
882  *    bytes between the current stack pointer and the top of the stack
883  *    memory) must be flushed to underlying storage so a new CPU that
884  *    runs the same thread sees the correct data.  This must happen
885  *    before the assignment of the switch_handle field in the thread
886  *    struct which signals the completion of context switch.
887  *
888  * 2. Any data areas to be read from the new stack (generally the same
889  *    as the live region when it was saved) should be invalidated (and
890  *    NOT flushed!) in the data cache.  This is because another CPU
891  *    may have run or re-initialized the thread since this CPU
892  *    suspended it, and any data present in cache will be stale.
893  *
894  * @note The kernel will call this function during interrupt exit when
895  * a new thread has been chosen to run, and also immediately before
896  * entering arch_switch() to effect a code-driven context switch.  In
897  * the latter case, it is very likely that more data will be written
898  * to the old_thread stack region after this function returns but
899  * before the completion of the switch.  Simply flushing naively here
900  * is not sufficient on many architectures and coordination with the
901  * arch_switch() implementation is likely required.
902  *
903  * @param old_thread The old thread to be flushed before being allowed
904  *                   to run on other CPUs.
905  * @param old_switch_handle The switch handle to be stored into
906  *                          old_thread (it will not be valid until the
907  *                          cache is flushed so is not present yet).
908  *                          This will be NULL if inside z_swap()
909  *                          (because the arch_switch() has not saved it
910  *                          yet).
911  * @param new_thread The new thread to be invalidated before it runs locally.
912  */
913 #ifndef CONFIG_KERNEL_COHERENCE
arch_cohere_stacks(struct k_thread * old_thread,void * old_switch_handle,struct k_thread * new_thread)914 static inline void arch_cohere_stacks(struct k_thread *old_thread,
915 				      void *old_switch_handle,
916 				      struct k_thread *new_thread)
917 {
918 	ARG_UNUSED(old_thread);
919 	ARG_UNUSED(old_switch_handle);
920 	ARG_UNUSED(new_thread);
921 }
922 #endif
923 
924 /** @} */
925 
926 /**
927  * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
928  * @ingroup arch-interface
929  * @{
930  */
931 
932 #ifdef CONFIG_GDBSTUB
933 struct gdb_ctx;
934 
935 /**
936  * @brief Architecture layer debug start
937  *
938  * This function is called by @c gdb_init()
939  */
940 void arch_gdb_init(void);
941 
942 /**
943  * @brief Continue running program
944  *
945  * Continue software execution.
946  */
947 void arch_gdb_continue(void);
948 
949 /**
950  * @brief Continue with one step
951  *
952  * Continue software execution until reaches the next statement.
953  */
954 void arch_gdb_step(void);
955 
956 /**
957  * @brief Read all registers, and outputs as hexadecimal string.
958  *
959  * This reads all CPU registers and outputs as hexadecimal string.
960  * The output string must be parsable by GDB.
961  *
962  * @param ctx    GDB context
963  * @param buf    Buffer to output hexadecimal string.
964  * @param buflen Length of buffer.
965  *
966  * @return Length of hexadecimal string written.
967  *         Return 0 if error or not supported.
968  */
969 size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen);
970 
971 /**
972  * @brief Take a hexadecimal string and update all registers.
973  *
974  * This takes in a hexadecimal string as presented from GDB,
975  * and updates all CPU registers with new values.
976  *
977  * @param ctx    GDB context
978  * @param hex    Input hexadecimal string.
979  * @param hexlen Length of hexadecimal string.
980  *
981  * @return Length of hexadecimal string parsed.
982  *         Return 0 if error or not supported.
983  */
984 size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen);
985 
986 /**
987  * @brief Read one register, and outputs as hexadecimal string.
988  *
989  * This reads one CPU register and outputs as hexadecimal string.
990  * The output string must be parsable by GDB.
991  *
992  * @param ctx    GDB context
993  * @param buf    Buffer to output hexadecimal string.
994  * @param buflen Length of buffer.
995  * @param regno  Register number
996  *
997  * @return Length of hexadecimal string written.
998  *         Return 0 if error or not supported.
999  */
1000 size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen,
1001 			    uint32_t regno);
1002 
1003 /**
1004  * @brief Take a hexadecimal string and update one register.
1005  *
1006  * This takes in a hexadecimal string as presented from GDB,
1007  * and updates one CPU registers with new value.
1008  *
1009  * @param ctx    GDB context
1010  * @param hex    Input hexadecimal string.
1011  * @param hexlen Length of hexadecimal string.
1012  * @param regno  Register number
1013  *
1014  * @return Length of hexadecimal string parsed.
1015  *         Return 0 if error or not supported.
1016  */
1017 size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
1018 			     uint32_t regno);
1019 
1020 /**
1021  * @brief Add breakpoint or watchpoint.
1022  *
1023  * @param ctx GDB context
1024  * @param type Breakpoint or watchpoint type
1025  * @param addr Address of breakpoint or watchpoint
1026  * @param kind Size of breakpoint/watchpoint in bytes
1027  *
1028  * @retval 0  Operation successful
1029  * @retval -1 Error encountered
1030  * @retval -2 Not supported
1031  */
1032 int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1033 			    uintptr_t addr, uint32_t kind);
1034 
1035 /**
1036  * @brief Remove breakpoint or watchpoint.
1037  *
1038  * @param ctx GDB context
1039  * @param type Breakpoint or watchpoint type
1040  * @param addr Address of breakpoint or watchpoint
1041  * @param kind Size of breakpoint/watchpoint in bytes
1042  *
1043  * @retval 0  Operation successful
1044  * @retval -1 Error encountered
1045  * @retval -2 Not supported
1046  */
1047 int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1048 			       uintptr_t addr, uint32_t kind);
1049 
1050 #endif
1051 /** @} */
1052 
1053 #ifdef CONFIG_TIMING_FUNCTIONS
1054 #include <zephyr/timing/types.h>
1055 
1056 /**
1057  * @ingroup arch-timing
1058  * @{
1059  */
1060 
1061 /**
1062  * @brief Initialize the timing subsystem.
1063  *
1064  * Perform the necessary steps to initialize the timing subsystem.
1065  *
1066  * @see timing_init()
1067  */
1068 void arch_timing_init(void);
1069 
1070 /**
1071  * @brief Signal the start of the timing information gathering.
1072  *
1073  * Signal to the timing subsystem that timing information
1074  * will be gathered from this point forward.
1075  *
1076  * @note Any call to arch_timing_counter_get() must be done between
1077  * calls to arch_timing_start() and arch_timing_stop(), and on the
1078  * same CPU core.
1079  *
1080  * @see timing_start()
1081  */
1082 void arch_timing_start(void);
1083 
1084 /**
1085  * @brief Signal the end of the timing information gathering.
1086  *
1087  * Signal to the timing subsystem that timing information
1088  * is no longer being gathered from this point forward.
1089  *
1090  * @note Any call to arch_timing_counter_get() must be done between
1091  * calls to arch_timing_start() and arch_timing_stop(), and on the
1092  * same CPU core.
1093  *
1094  * @see timing_stop()
1095  */
1096 void arch_timing_stop(void);
1097 
1098 /**
1099  * @brief Return timing counter.
1100  *
1101  * @note Any call to arch_timing_counter_get() must be done between
1102  * calls to arch_timing_start() and arch_timing_stop(), and on the
1103  * same CPU core.
1104  *
1105  * @note Not all platforms have a timing counter with 64 bit precision.  It
1106  * is possible to see this value "go backwards" due to internal
1107  * rollover.  Timing code must be prepared to address the rollover
1108  * (with platform-dependent code, e.g. by casting to a uint32_t before
1109  * subtraction) or by using arch_timing_cycles_get() which is required
1110  * to understand the distinction.
1111  *
1112  * @return Timing counter.
1113  *
1114  * @see timing_counter_get()
1115  */
1116 timing_t arch_timing_counter_get(void);
1117 
1118 /**
1119  * @brief Get number of cycles between @p start and @p end.
1120  *
1121  * For some architectures or SoCs, the raw numbers from counter need
1122  * to be scaled to obtain actual number of cycles, or may roll over
1123  * internally.  This function computes a positive-definite interval
1124  * between two returned cycle values.
1125  *
1126  * @param start Pointer to counter at start of a measured execution.
1127  * @param end Pointer to counter at stop of a measured execution.
1128  * @return Number of cycles between start and end.
1129  *
1130  * @see timing_cycles_get()
1131  */
1132 uint64_t arch_timing_cycles_get(volatile timing_t *const start,
1133 				volatile timing_t *const end);
1134 
1135 /**
1136  * @brief Get frequency of counter used (in Hz).
1137  *
1138  * @return Frequency of counter used for timing in Hz.
1139  *
1140  * @see timing_freq_get()
1141  */
1142 uint64_t arch_timing_freq_get(void);
1143 
1144 /**
1145  * @brief Convert number of @p cycles into nanoseconds.
1146  *
1147  * @param cycles Number of cycles
1148  * @return Converted time value
1149  *
1150  * @see timing_cycles_to_ns()
1151  */
1152 uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
1153 
1154 /**
1155  * @brief Convert number of @p cycles into nanoseconds with averaging.
1156  *
1157  * @param cycles Number of cycles
1158  * @param count Times of accumulated cycles to average over
1159  * @return Converted time value
1160  *
1161  * @see timing_cycles_to_ns_avg()
1162  */
1163 uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
1164 
1165 /**
1166  * @brief Get frequency of counter used (in MHz).
1167  *
1168  * @return Frequency of counter used for timing in MHz.
1169  *
1170  * @see timing_freq_get_mhz()
1171  */
1172 uint32_t arch_timing_freq_get_mhz(void);
1173 
1174 /** @} */
1175 
1176 #endif /* CONFIG_TIMING_FUNCTIONS */
1177 
1178 #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
1179 
1180 struct msi_vector;
1181 typedef struct msi_vector msi_vector_t;
1182 
1183 /**
1184  * @brief Allocate vector(s) for the endpoint MSI message(s).
1185  *
1186  * @param priority the MSI vectors base interrupt priority
1187  * @param vectors an array to fill with allocated MSI vectors
1188  * @param n_vector the size of MSI vectors array
1189  *
1190  * @return The number of allocated MSI vectors
1191  */
1192 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
1193 				       msi_vector_t *vectors,
1194 				       uint8_t n_vector);
1195 
1196 /**
1197  * @brief Connect an MSI vector to the given routine
1198  *
1199  * @param vector The MSI vector to connect to
1200  * @param routine Interrupt service routine
1201  * @param parameter ISR parameter
1202  * @param flags Arch-specific IRQ configuration flag
1203  *
1204  * @return True on success, false otherwise
1205  */
1206 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
1207 				  void (*routine)(const void *parameter),
1208 				  const void *parameter,
1209 				  uint32_t flags);
1210 
1211 #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
1212 
1213 /**
1214  * @brief Perform architecture specific processing within spin loops
1215  *
1216  * This is invoked from busy loops with IRQs disabled such as the contended
1217  * spinlock loop. The default implementation is a weak function that calls
1218  * arch_nop(). Architectures may implement this function to perform extra
1219  * checks or power management tricks if needed.
1220  */
1221 void arch_spin_relax(void);
1222 
1223 #ifdef __cplusplus
1224 }
1225 #endif /* __cplusplus */
1226 
1227 #include <zephyr/arch/arch_inlines.h>
1228 
1229 #endif /* _ASMLANGUAGE */
1230 
1231 #endif /* ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_ */
1232