1 /*
2  * Copyright (c) 2019 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @defgroup arch-interface Architecture Interface
9  * @brief Internal kernel APIs with public scope
10  *
11  * Any public kernel APIs that are implemented as inline functions and need to
12  * call architecture-specific API so will have the prototypes for the
13  * architecture-specific APIs here. Architecture APIs that aren't used in this
14  * way go in kernel/include/kernel_arch_interface.h.
15  *
16  * The set of architecture-specific APIs used internally by public macros and
17  * inline functions in public headers are also specified and documented.
18  *
19  * For all macros and inline function prototypes described herein, <arch/cpu.h>
20  * must eventually pull in full definitions for all of them (the actual macro
21  * defines and inline function bodies)
22  *
23  * include/kernel.h and other public headers depend on definitions in this
24  * header.
25  */
26 #ifndef ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
27 #define ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
28 
29 #ifndef _ASMLANGUAGE
30 #include <toolchain.h>
31 #include <stddef.h>
32 #include <zephyr/types.h>
33 #include <arch/cpu.h>
34 #include <irq_offload.h>
35 
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39 
40 /* NOTE: We cannot pull in kernel.h here, need some forward declarations  */
41 struct k_thread;
42 struct k_mem_domain;
43 
44 typedef struct z_thread_stack_element k_thread_stack_t;
45 
46 typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
47 
48 /**
49  * @defgroup arch-timing Architecture timing APIs
50  * @ingroup arch-interface
51  * @{
52  */
53 
54 /**
55  * Obtain the current cycle count, in units that are hardware-specific
56  *
57  * @see k_cycle_get_32()
58  */
59 static inline uint32_t arch_k_cycle_get_32(void);
60 
61 /** @} */
62 
63 
64 /**
65  * @addtogroup arch-threads
66  * @{
67  */
68 
69 /**
70  * @def ARCH_THREAD_STACK_RESERVED
71  *
72  * @see K_THREAD_STACK_RESERVED
73  */
74 
75 /**
76  * @def ARCH_STACK_PTR_ALIGN
77  *
78  * Required alignment of the CPU's stack pointer register value, dictated by
79  * hardware constraints and the ABI calling convention.
80  *
81  * @see Z_STACK_PTR_ALIGN
82  */
83 
84 /**
85  * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
86  *
87  * Required alignment of the lowest address of a stack object.
88  *
89  * Optional definition.
90  *
91  * @see Z_THREAD_STACK_OBJ_ALIGN
92  */
93 
94 /**
95  * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
96  * @brief Round up a stack buffer size to alignment constraints
97  *
98  * Adjust a requested stack buffer size to the true size of its underlying
99  * buffer, defined as the area usable for thread stack context and thread-
100  * local storage.
101  *
102  * The size value passed here does not include storage reserved for platform
103  * data.
104  *
105  * The returned value is either the same size provided (if already properly
106  * aligned), or rounded up to satisfy alignment constraints.  Calculations
107  * performed here *must* be idempotent.
108  *
109  * Optional definition. If undefined, stack buffer sizes are either:
110  * - Rounded up to the next power of two if user mode is enabled on an arch
111  *   with an MPU that requires such alignment
112  * - Rounded up to ARCH_STACK_PTR_ALIGN
113  *
114  * @see Z_THREAD_STACK_SIZE_ADJUST
115  */
116 
117 /**
118  * @def ARCH_KERNEL_STACK_RESERVED
119  * @brief MPU guard size for kernel-only stacks
120  *
121  * If MPU stack guards are used to catch stack overflows, specify the
122  * amount of space reserved in kernel stack objects. If guard sizes are
123  * context dependent, this should be in the minimum guard size, with
124  * remaining space carved out if needed.
125  *
126  * Optional definition, defaults to 0.
127  *
128  * @see K_KERNEL_STACK_RESERVED
129  */
130 
131 /**
132  * @def ARCH_KERNEL_STACK_OBJ_ALIGN
133  * @brief Required alignment of the lowest address of a kernel-only stack.
134  */
135 
136 /** @} */
137 
138 /**
139  * @addtogroup arch-pm
140  * @{
141  */
142 
143 /**
144  * @brief Power save idle routine
145  *
146  * This function will be called by the kernel idle loop or possibly within
147  * an implementation of z_pm_save_idle in the kernel when the
148  * '_pm_save_flag' variable is non-zero.
149  *
150  * Architectures that do not implement power management instructions may
151  * immediately return, otherwise a power-saving instruction should be
152  * issued to wait for an interrupt.
153  *
154  * @note The function is expected to return after the interrupt that has
155  * caused the CPU to exit power-saving mode has been serviced, although
156  * this is not a firm requirement.
157  *
158  * @see k_cpu_idle()
159  */
160 void arch_cpu_idle(void);
161 
162 /**
163  * @brief Atomically re-enable interrupts and enter low power mode
164  *
165  * The requirements for arch_cpu_atomic_idle() are as follows:
166  *
167  * -# Enabling interrupts and entering a low-power mode needs to be
168  *    atomic, i.e. there should be no period of time where interrupts are
169  *    enabled before the processor enters a low-power mode.  See the comments
170  *    in k_lifo_get(), for example, of the race condition that
171  *    occurs if this requirement is not met.
172  *
173  * -# After waking up from the low-power mode, the interrupt lockout state
174  *    must be restored as indicated in the 'key' input parameter.
175  *
176  * @see k_cpu_atomic_idle()
177  *
178  * @param key Lockout key returned by previous invocation of arch_irq_lock()
179  */
180 void arch_cpu_atomic_idle(unsigned int key);
181 
182 /** @} */
183 
184 
185 /**
186  * @addtogroup arch-smp
187  * @{
188  */
189 
190 /**
191  * Per-cpu entry function
192  *
193  * @param data context parameter, implementation specific
194  */
195 typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
196 
197 /**
198  * @brief Start a numbered CPU on a MP-capable system
199  *
200  * This starts and initializes a specific CPU.  The main thread on startup is
201  * running on CPU zero, other processors are numbered sequentially.  On return
202  * from this function, the CPU is known to have begun operating and will enter
203  * the provided function.  Its interrupts will be initialized but disabled such
204  * that irq_unlock() with the provided key will work to enable them.
205  *
206  * Normally, in SMP mode this function will be called by the kernel
207  * initialization and should not be used as a user API.  But it is defined here
208  * for special-purpose apps which want Zephyr running on one core and to use
209  * others for design-specific processing.
210  *
211  * @param cpu_num Integer number of the CPU
212  * @param stack Stack memory for the CPU
213  * @param sz Stack buffer size, in bytes
214  * @param fn Function to begin running on the CPU.
215  * @param arg Untyped argument to be passed to "fn"
216  */
217 void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
218 		    arch_cpustart_t fn, void *arg);
219 
220 /**
221  * @brief Return CPU power status
222  *
223  * @param cpu_num Integer number of the CPU
224  */
225 bool arch_cpu_active(int cpu_num);
226 
227 /** @} */
228 
229 
230 /**
231  * @addtogroup arch-irq
232  * @{
233  */
234 
235 /**
236  * Lock interrupts on the current CPU
237  *
238  * @see irq_lock()
239  */
240 static inline unsigned int arch_irq_lock(void);
241 
242 /**
243  * Unlock interrupts on the current CPU
244  *
245  * @see irq_unlock()
246  */
247 static inline void arch_irq_unlock(unsigned int key);
248 
249 /**
250  * Test if calling arch_irq_unlock() with this key would unlock irqs
251  *
252  * @param key value returned by arch_irq_lock()
253  * @return true if interrupts were unlocked prior to the arch_irq_lock()
254  * call that produced the key argument.
255  */
256 static inline bool arch_irq_unlocked(unsigned int key);
257 
258 /**
259  * Disable the specified interrupt line
260  *
261  * @note: The behavior of interrupts that arrive after this call
262  * returns and before the corresponding call to arch_irq_enable() is
263  * undefined.  The hardware is not required to latch and deliver such
264  * an interrupt, though on some architectures that may work.  Other
265  * architectures will simply lose such an interrupt and never deliver
266  * it.  Many drivers and subsystems are not tolerant of such dropped
267  * interrupts and it is the job of the application layer to ensure
268  * that behavior remains correct.
269  *
270  * @see irq_disable()
271  */
272 void arch_irq_disable(unsigned int irq);
273 
274 /**
275  * Enable the specified interrupt line
276  *
277  * @see irq_enable()
278  */
279 void arch_irq_enable(unsigned int irq);
280 
281 /**
282  * Test if an interrupt line is enabled
283  *
284  * @see irq_is_enabled()
285  */
286 int arch_irq_is_enabled(unsigned int irq);
287 
288 /**
289  * Arch-specific hook to install a dynamic interrupt.
290  *
291  * @param irq IRQ line number
292  * @param priority Interrupt priority
293  * @param routine Interrupt service routine
294  * @param parameter ISR parameter
295  * @param flags Arch-specific IRQ configuration flag
296  *
297  * @return The vector assigned to this interrupt
298  */
299 int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
300 			     void (*routine)(const void *parameter),
301 			     const void *parameter, uint32_t flags);
302 
303 /**
304  * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
305  *
306  * @see IRQ_CONNECT()
307  */
308 
309 /**
310  * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
311  *
312  * @see IRQ_DIRECT_CONNECT()
313  */
314 
315 /**
316  * @def ARCH_ISR_DIRECT_PM()
317  *
318  * @see ISR_DIRECT_PM()
319  */
320 
321 /**
322  * @def ARCH_ISR_DIRECT_HEADER()
323  *
324  * @see ISR_DIRECT_HEADER()
325  */
326 
327 /**
328  * @def ARCH_ISR_DIRECT_FOOTER(swap)
329  *
330  * @see ISR_DIRECT_FOOTER()
331  */
332 
333 /**
334  * @def ARCH_ISR_DIRECT_DECLARE(name)
335  *
336  * @see ISR_DIRECT_DECLARE()
337  */
338 
339 /**
340  * @def ARCH_EXCEPT(reason_p)
341  *
342  * Generate a software induced fatal error.
343  *
344  * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
345  * K_ERR_STACK_CHK_FAIL may be induced.
346  *
347  * This should ideally generate a software trap, with exception context
348  * indicating state when this was invoked. General purpose register state at
349  * the time of trap should not be disturbed from the calling context.
350  *
351  * @param reason_p K_ERR_ scoped reason code for the fatal error.
352  */
353 
354 #ifdef CONFIG_IRQ_OFFLOAD
355 /**
356  * Run a function in interrupt context.
357  *
358  * Implementations should invoke an exception such that the kernel goes through
359  * its interrupt handling dispatch path, to include switching to the interrupt
360  * stack, and runs the provided routine and parameter.
361  *
362  * The only intended use-case for this function is for test code to simulate
363  * the correctness of kernel APIs in interrupt handling context. This API
364  * is not intended for real applications.
365  *
366  * @see irq_offload()
367  *
368  * @param routine Function to run in interrupt context
369  * @param parameter Value to pass to the function when invoked
370  */
371 void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
372 #endif /* CONFIG_IRQ_OFFLOAD */
373 
374 /** @} */
375 
376 
377 /**
378  * @defgroup arch-smp Architecture-specific SMP APIs
379  * @ingroup arch-interface
380  * @{
381  */
382 #ifdef CONFIG_SMP
383 /** Return the CPU struct for the currently executing CPU */
384 static inline struct _cpu *arch_curr_cpu(void);
385 
386 /**
387  * Broadcast an interrupt to all CPUs
388  *
389  * This will invoke z_sched_ipi() on other CPUs in the system.
390  */
391 void arch_sched_ipi(void);
392 #endif /* CONFIG_SMP */
393 
394 /** @} */
395 
396 
397 /**
398  * @defgroup arch-userspace Architecture-specific userspace APIs
399  * @ingroup arch-interface
400  * @{
401  */
402 
403 #ifdef CONFIG_USERSPACE
404 /**
405  * Invoke a system call with 0 arguments.
406  *
407  * No general-purpose register state other than return value may be preserved
408  * when transitioning from supervisor mode back down to user mode for
409  * security reasons.
410  *
411  * It is required that all arguments be stored in registers when elevating
412  * privileges from user to supervisor mode.
413  *
414  * Processing of the syscall takes place on a separate kernel stack. Interrupts
415  * should be enabled when invoking the system call marshallers from the
416  * dispatch table. Thread preemption may occur when handling system calls.
417  *
418  * Call ids are untrusted and must be bounds-checked, as the value is used to
419  * index the system call dispatch table, containing function pointers to the
420  * specific system call code.
421  *
422  * @param call_id System call ID
423  * @return Return value of the system call. Void system calls return 0 here.
424  */
425 static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
426 
427 /**
428  * Invoke a system call with 1 argument.
429  *
430  * @see arch_syscall_invoke0()
431  *
432  * @param arg1 First argument to the system call.
433  * @param call_id System call ID, will be bounds-checked and used to reference
434  *	          kernel-side dispatch table
435  * @return Return value of the system call. Void system calls return 0 here.
436  */
437 static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
438 					     uintptr_t call_id);
439 
440 /**
441  * Invoke a system call with 2 arguments.
442  *
443  * @see arch_syscall_invoke0()
444  *
445  * @param arg1 First argument to the system call.
446  * @param arg2 Second argument to the system call.
447  * @param call_id System call ID, will be bounds-checked and used to reference
448  *	          kernel-side dispatch table
449  * @return Return value of the system call. Void system calls return 0 here.
450  */
451 static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
452 					     uintptr_t call_id);
453 
454 /**
455  * Invoke a system call with 3 arguments.
456  *
457  * @see arch_syscall_invoke0()
458  *
459  * @param arg1 First argument to the system call.
460  * @param arg2 Second argument to the system call.
461  * @param arg3 Third argument to the system call.
462  * @param call_id System call ID, will be bounds-checked and used to reference
463  *	          kernel-side dispatch table
464  * @return Return value of the system call. Void system calls return 0 here.
465  */
466 static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
467 					     uintptr_t arg3,
468 					     uintptr_t call_id);
469 
470 /**
471  * Invoke a system call with 4 arguments.
472  *
473  * @see arch_syscall_invoke0()
474  *
475  * @param arg1 First argument to the system call.
476  * @param arg2 Second argument to the system call.
477  * @param arg3 Third argument to the system call.
478  * @param arg4 Fourth argument to the system call.
479  * @param call_id System call ID, will be bounds-checked and used to reference
480  *	          kernel-side dispatch table
481  * @return Return value of the system call. Void system calls return 0 here.
482  */
483 static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
484 					     uintptr_t arg3, uintptr_t arg4,
485 					     uintptr_t call_id);
486 
487 /**
488  * Invoke a system call with 5 arguments.
489  *
490  * @see arch_syscall_invoke0()
491  *
492  * @param arg1 First argument to the system call.
493  * @param arg2 Second argument to the system call.
494  * @param arg3 Third argument to the system call.
495  * @param arg4 Fourth argument to the system call.
496  * @param arg5 Fifth argument to the system call.
497  * @param call_id System call ID, will be bounds-checked and used to reference
498  *	          kernel-side dispatch table
499  * @return Return value of the system call. Void system calls return 0 here.
500  */
501 static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
502 					     uintptr_t arg3, uintptr_t arg4,
503 					     uintptr_t arg5,
504 					     uintptr_t call_id);
505 
506 /**
507  * Invoke a system call with 6 arguments.
508  *
509  * @see arch_syscall_invoke0()
510  *
511  * @param arg1 First argument to the system call.
512  * @param arg2 Second argument to the system call.
513  * @param arg3 Third argument to the system call.
514  * @param arg4 Fourth argument to the system call.
515  * @param arg5 Fifth argument to the system call.
516  * @param arg6 Sixth argument to the system call.
517  * @param call_id System call ID, will be bounds-checked and used to reference
518  *	          kernel-side dispatch table
519  * @return Return value of the system call. Void system calls return 0 here.
520  */
521 static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
522 					     uintptr_t arg3, uintptr_t arg4,
523 					     uintptr_t arg5, uintptr_t arg6,
524 					     uintptr_t call_id);
525 
526 /**
527  * Indicate whether we are currently running in user mode
528  *
529  * @return true if the CPU is currently running with user permissions
530  */
531 static inline bool arch_is_user_context(void);
532 
533 /**
534  * @brief Get the maximum number of partitions for a memory domain
535  *
536  * @return Max number of partitions, or -1 if there is no limit
537  */
538 int arch_mem_domain_max_partitions_get(void);
539 
540 #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
541 /**
542  *
543  * @brief Architecture-specific hook for memory domain initialization
544  *
545  * Perform any tasks needed to initialize architecture-specific data within
546  * the memory domain, such as reserving memory for page tables. All members
547  * of the provided memory domain aside from `arch` will be initialized when
548  * this is called, but no threads will be a assigned yet.
549  *
550  * This function may fail if initializing the memory domain requires allocation,
551  * such as for page tables.
552  *
553  * The associated function k_mem_domain_init() documents that making
554  * multiple init calls to the same memory domain is undefined behavior,
555  * but has no assertions in place to check this. If this matters, it may be
556  * desirable to add checks for this in the implementation of this function.
557  *
558  * @param domain The memory domain to initialize
559  * @retval 0 Success
560  * @retval -ENOMEM Insufficient memory
561  */
562 int arch_mem_domain_init(struct k_mem_domain *domain);
563 #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
564 
565 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
566 /**
567  * @brief Add a thread to a memory domain (arch-specific)
568  *
569  * Architecture-specific hook to manage internal data structures or hardware
570  * state when the provided thread has been added to a memory domain.
571  *
572  * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
573  * be added to before this is called. Implementations may assume that the
574  * thread is not already a member of this domain.
575  *
576  * @param thread Thread which needs to be configured.
577  */
578 void arch_mem_domain_thread_add(struct k_thread *thread);
579 
580 /**
581  * @brief Remove a thread from a memory domain (arch-specific)
582  *
583  * Architecture-specific hook to manage internal data structures or hardware
584  * state when the provided thread has been removed from a memory domain.
585  *
586  * The thread's memory domain pointer will be the domain that the thread
587  * is being removed from.
588  *
589  * @param thread Thread being removed from its memory domain
590  */
591 void arch_mem_domain_thread_remove(struct k_thread *thread);
592 
593 /**
594  * @brief Remove a partition from the memory domain (arch-specific)
595  *
596  * Architecture-specific hook to manage internal data structures or hardware
597  * state when a memory domain has had a partition removed.
598  *
599  * The partition index data, and the number of partitions configured, are not
600  * respectively cleared and decremented in the domain until after this function
601  * runs.
602  *
603  * @param domain The memory domain structure
604  * @param partition_id The partition index that needs to be deleted
605  */
606 void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
607 				      uint32_t partition_id);
608 
609 /**
610  * @brief Add a partition to the memory domain
611  *
612  * Architecture-specific hook to manage internal data structures or hardware
613  * state when a memory domain has a partition added.
614  *
615  * @param domain The memory domain structure
616  * @param partition_id The partition that needs to be added
617  */
618 void arch_mem_domain_partition_add(struct k_mem_domain *domain,
619 				   uint32_t partition_id);
620 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
621 
622 /**
623  * @brief Check memory region permissions
624  *
625  * Given a memory region, return whether the current memory management hardware
626  * configuration would allow a user thread to read/write that region. Used by
627  * system calls to validate buffers coming in from userspace.
628  *
629  * Notes:
630  * The function is guaranteed to never return validation success, if the entire
631  * buffer area is not user accessible.
632  *
633  * The function is guaranteed to correctly validate the permissions of the
634  * supplied buffer, if the user access permissions of the entire buffer are
635  * enforced by a single, enabled memory management region.
636  *
637  * In some architectures the validation will always return failure
638  * if the supplied memory buffer spans multiple enabled memory management
639  * regions (even if all such regions permit user access).
640  *
641  * @warning 0 size buffer has undefined behavior.
642  *
643  * @param addr start address of the buffer
644  * @param size the size of the buffer
645  * @param write If nonzero, additionally check if the area is writable.
646  *	  Otherwise, just check if the memory can be read.
647  *
648  * @return nonzero if the permissions don't match.
649  */
650 int arch_buffer_validate(void *addr, size_t size, int write);
651 
652 /**
653  * Perform a one-way transition from supervisor to kernel mode.
654  *
655  * Implementations of this function must do the following:
656  *
657  * - Reset the thread's stack pointer to a suitable initial value. We do not
658  *   need any prior context since this is a one-way operation.
659  * - Set up any kernel stack region for the CPU to use during privilege
660  *   elevation
661  * - Put the CPU in whatever its equivalent of user mode is
662  * - Transfer execution to arch_new_thread() passing along all the supplied
663  *   arguments, in user mode.
664  *
665  * @param user_entry Entry point to start executing as a user thread
666  * @param p1 1st parameter to user thread
667  * @param p2 2nd parameter to user thread
668  * @param p3 3rd parameter to user thread
669  */
670 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
671 					void *p1, void *p2, void *p3);
672 
673 /**
674  * @brief Induce a kernel oops that appears to come from a specific location
675  *
676  * Normally, k_oops() generates an exception that appears to come from the
677  * call site of the k_oops() itself.
678  *
679  * However, when validating arguments to a system call, if there are problems
680  * we want the oops to appear to come from where the system call was invoked
681  * and not inside the validation function.
682  *
683  * @param ssf System call stack frame pointer. This gets passed as an argument
684  *            to _k_syscall_handler_t functions and its contents are completely
685  *            architecture specific.
686  */
687 FUNC_NORETURN void arch_syscall_oops(void *ssf);
688 
689 /**
690  * @brief Safely take the length of a potentially bad string
691  *
692  * This must not fault, instead the err parameter must have -1 written to it.
693  * This function otherwise should work exactly like libc strnlen(). On success
694  * *err should be set to 0.
695  *
696  * @param s String to measure
697  * @param maxsize Max length of the string
698  * @param err Error value to write
699  * @return Length of the string, not counting NULL byte, up to maxsize
700  */
701 size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
702 #endif /* CONFIG_USERSPACE */
703 
704 /**
705  * @brief Detect memory coherence type
706  *
707  * Required when ARCH_HAS_COHERENCE is true.  This function returns
708  * true if the byte pointed to lies within an architecture-defined
709  * "coherence region" (typically implemented with uncached memory) and
710  * can safely be used in multiprocessor code without explicit flush or
711  * invalidate operations.
712  *
713  * @note The result is for only the single byte at the specified
714  * address, this API is not required to check region boundaries or to
715  * expect aligned pointers.  The expectation is that the code above
716  * will have queried the appropriate address(es).
717  */
718 #ifndef CONFIG_ARCH_HAS_COHERENCE
arch_mem_coherent(void * ptr)719 static inline bool arch_mem_coherent(void *ptr)
720 {
721 	ARG_UNUSED(ptr);
722 	return true;
723 }
724 #endif
725 
726 /**
727  * @brief Ensure cache coherence prior to context switch
728  *
729  * Required when ARCH_HAS_COHERENCE is true.  On cache-incoherent
730  * multiprocessor architectures, thread stacks are cached by default
731  * for performance reasons.  They must therefore be flushed
732  * appropriately on context switch.  The rules are:
733  *
734  * 1. The region containing live data in the old stack (generally the
735  *    bytes between the current stack pointer and the top of the stack
736  *    memory) must be flushed to underlying storage so a new CPU that
737  *    runs the same thread sees the correct data.  This must happen
738  *    before the assignment of the switch_handle field in the thread
739  *    struct which signals the completion of context switch.
740  *
741  * 2. Any data areas to be read from the new stack (generally the same
742  *    as the live region when it was saved) should be invalidated (and
743  *    NOT flushed!) in the data cache.  This is because another CPU
744  *    may have run or re-initialized the thread since this CPU
745  *    suspended it, and any data present in cache will be stale.
746  *
747  * @note The kernel will call this function during interrupt exit when
748  * a new thread has been chosen to run, and also immediately before
749  * entering arch_switch() to effect a code-driven context switch.  In
750  * the latter case, it is very likely that more data will be written
751  * to the old_thread stack region after this function returns but
752  * before the completion of the switch.  Simply flushing naively here
753  * is not sufficient on many architectures and coordination with the
754  * arch_switch() implementation is likely required.
755  *
756  * @arg old_thread The old thread to be flushed before being allowed
757  *                 to run on other CPUs.
758  * @arg old_switch_handle The switch handle to be stored into
759  *                        old_thread (it will not be valid until the
760  *                        cache is flushed so is not present yet).
761  *                        This will be NULL if inside z_swap()
762  *                        (because the arch_switch() has not saved it
763  *                        yet).
764  * @arg new_thread The new thread to be invalidated before it runs locally.
765  */
766 #ifndef CONFIG_KERNEL_COHERENCE
arch_cohere_stacks(struct k_thread * old_thread,void * old_switch_handle,struct k_thread * new_thread)767 static inline void arch_cohere_stacks(struct k_thread *old_thread,
768 				      void *old_switch_handle,
769 				      struct k_thread *new_thread)
770 {
771 	ARG_UNUSED(old_thread);
772 	ARG_UNUSED(old_switch_handle);
773 	ARG_UNUSED(new_thread);
774 }
775 #endif
776 
777 /** @} */
778 
779 /**
780  * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
781  * @ingroup arch-interface
782  * @{
783  */
784 
785 /**
786  * @def ARCH_GDB_NUM_REGISTERS
787  *
788  * ARCH_GDB_NUM_REGISTERS is architecure specific and
789  * this symbol must be defined in architecure specific header
790  */
791 
792 #ifdef CONFIG_GDBSTUB
793 /**
794  * @brief Architecture layer debug start
795  *
796  * This function is called by @c gdb_init()
797  */
798 void arch_gdb_init(void);
799 
800 /**
801  * @brief Continue running program
802  *
803  * Continue software execution.
804  */
805 void arch_gdb_continue(void);
806 
807 /**
808  * @brief Continue with one step
809  *
810  * Continue software execution until reaches the next statement.
811  */
812 void arch_gdb_step(void);
813 
814 #endif
815 /** @} */
816 
817 /**
818  * @defgroup arch_cache Architecture-specific cache functions
819  * @ingroup arch-interface
820  * @{
821  */
822 
823 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_HAS_ARCH_CACHE)
824 /**
825  *
826  * @brief Enable d-cache
827  *
828  * @see arch_dcache_enable
829  */
830 void arch_dcache_enable(void);
831 
832 /**
833  *
834  * @brief Disable d-cache
835  *
836  * @see arch_dcache_disable
837  */
838 void arch_dcache_disable(void);
839 
840 /**
841  *
842  * @brief Enable i-cache
843  *
844  * @see arch_icache_enable
845  */
846 void arch_icache_enable(void);
847 
848 /**
849  *
850  * @brief Enable i-cache
851  *
852  * @see arch_dcache_disable
853  */
854 void arch_dcache_disable(void);
855 
856 /**
857  *
858  * @brief Write-back / Invalidate / Write-back + Invalidate all d-cache
859  *
860  * @see arch_dcache_all
861  */
862 int arch_dcache_all(int op);
863 
864 /**
865  *
866  * @brief Write-back / Invalidate / Write-back + Invalidate d-cache lines
867  *
868  * @see arch_dcache_range
869  */
870 int arch_dcache_range(void *addr, size_t size, int op);
871 
872 /**
873  *
874  * @brief Write-back / Invalidate / Write-back + Invalidate all i-cache
875  *
876  * @see arch_icache_all
877  */
878 int arch_icache_all(int op);
879 
880 /**
881  *
882  * @brief Write-back / Invalidate / Write-back + Invalidate i-cache lines
883  *
884  * @see arch_icache_range
885  */
886 int arch_icache_range(void *addr, size_t size, int op);
887 
888 #ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
889 /**
890  *
891  * @brief Get d-cache line size
892  *
893  * @see sys_cache_data_line_size_get
894  */
895 size_t arch_dcache_line_size_get(void);
896 #endif /* CONFIG_DCACHE_LINE_SIZE_DETECT */
897 
898 #ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
899 /**
900  *
901  * @brief Get i-cache line size
902  *
903  * @see sys_cache_instr_line_size_get
904  */
905 size_t arch_icache_line_size_get(void);
906 #endif /* CONFIG_ICACHE_LINE_SIZE_DETECT */
907 
908 #endif /* CONFIG_CACHE_MANAGEMENT && CONFIG_HAS_ARCH_CACHE */
909 
910 /** @} */
911 
912 #ifdef CONFIG_TIMING_FUNCTIONS
913 #include <timing/types.h>
914 
915 /**
916  * @ingroup arch-timing
917  * @{
918  */
919 
920 /**
921  * @brief Initialize the timing subsystem.
922  *
923  * Perform the necessary steps to initialize the timing subsystem.
924  *
925  * @see timing_init()
926  */
927 void arch_timing_init(void);
928 
929 /**
930  * @brief Signal the start of the timing information gathering.
931  *
932  * Signal to the timing subsystem that timing information
933  * will be gathered from this point forward.
934  *
935  * @see timing_start()
936  */
937 void arch_timing_start(void);
938 
939 /**
940  * @brief Signal the end of the timing information gathering.
941  *
942  * Signal to the timing subsystem that timing information
943  * is no longer being gathered from this point forward.
944  *
945  * @see timing_stop()
946  */
947 void arch_timing_stop(void);
948 
949 /**
950  * @brief Return timing counter.
951  *
952  * @return Timing counter.
953  *
954  * @see timing_counter_get()
955  */
956 timing_t arch_timing_counter_get(void);
957 
958 /**
959  * @brief Get number of cycles between @p start and @p end.
960  *
961  * For some architectures or SoCs, the raw numbers from counter
962  * need to be scaled to obtain actual number of cycles.
963  *
964  * @param start Pointer to counter at start of a measured execution.
965  * @param end Pointer to counter at stop of a measured execution.
966  * @return Number of cycles between start and end.
967  *
968  * @see timing_cycles_get()
969  */
970 uint64_t arch_timing_cycles_get(volatile timing_t *const start,
971 				volatile timing_t *const end);
972 
973 /**
974  * @brief Get frequency of counter used (in Hz).
975  *
976  * @return Frequency of counter used for timing in Hz.
977  *
978  * @see timing_freq_get()
979  */
980 uint64_t arch_timing_freq_get(void);
981 
982 /**
983  * @brief Convert number of @p cycles into nanoseconds.
984  *
985  * @param cycles Number of cycles
986  * @return Converted time value
987  *
988  * @see timing_cycles_to_ns()
989  */
990 uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
991 
992 /**
993  * @brief Convert number of @p cycles into nanoseconds with averaging.
994  *
995  * @param cycles Number of cycles
996  * @param count Times of accumulated cycles to average over
997  * @return Converted time value
998  *
999  * @see timing_cycles_to_ns_avg()
1000  */
1001 uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
1002 
1003 /**
1004  * @brief Get frequency of counter used (in MHz).
1005  *
1006  * @return Frequency of counter used for timing in MHz.
1007  *
1008  * @see timing_freq_get_mhz()
1009  */
1010 uint32_t arch_timing_freq_get_mhz(void);
1011 
1012 /** @} */
1013 
1014 #endif /* CONFIG_TIMING_FUNCTIONS */
1015 
1016 #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
1017 
1018 struct msi_vector;
1019 typedef struct msi_vector msi_vector_t;
1020 
1021 /**
1022  * @brief Allocate vector(s) for the endpoint MSI message(s).
1023  *
1024  * @param priority the MSI vectors base interrupt priority
1025  * @param vectors an array to fill with allocated MSI vectors
1026  * @param n_vector the size of MSI vectors array
1027  *
1028  * @return The number of allocated MSI vectors
1029  */
1030 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
1031 				       msi_vector_t *vectors,
1032 				       uint8_t n_vector);
1033 
1034 /**
1035  * @brief Connect an MSI vector to the given routine
1036  *
1037  * @param vector The MSI vector to connect to
1038  * @param routine Interrupt service routine
1039  * @param parameter ISR parameter
1040  * @param flags Arch-specific IRQ configuration flag
1041  *
1042  * @return True on success, false otherwise
1043  */
1044 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
1045 				  void (*routine)(const void *parameter),
1046 				  const void *parameter,
1047 				  uint32_t flags);
1048 
1049 #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
1050 
1051 #ifdef __cplusplus
1052 }
1053 #endif /* __cplusplus */
1054 
1055 #include <arch/arch_inlines.h>
1056 
1057 #endif /* _ASMLANGUAGE */
1058 
1059 #endif /* ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_ */
1060