1 /*
2 * Copyright (c) 2019 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @defgroup arch-interface Architecture Interface
9 * @brief Internal kernel APIs with public scope
10 *
11 * Any public kernel APIs that are implemented as inline functions and need to
12 * call architecture-specific API so will have the prototypes for the
13 * architecture-specific APIs here. Architecture APIs that aren't used in this
14 * way go in kernel/include/kernel_arch_interface.h.
15 *
16 * The set of architecture-specific APIs used internally by public macros and
17 * inline functions in public headers are also specified and documented.
18 *
19 * For all macros and inline function prototypes described herein, <arch/cpu.h>
20 * must eventually pull in full definitions for all of them (the actual macro
21 * defines and inline function bodies)
22 *
23 * include/kernel.h and other public headers depend on definitions in this
24 * header.
25 */
26 #ifndef ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
27 #define ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
28
29 #ifndef _ASMLANGUAGE
30 #include <zephyr/toolchain.h>
31 #include <stddef.h>
32 #include <zephyr/types.h>
33 #include <zephyr/arch/cpu.h>
34 #include <zephyr/irq_offload.h>
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 /* NOTE: We cannot pull in kernel.h here, need some forward declarations */
41 struct k_thread;
42 struct k_mem_domain;
43
44 typedef struct z_thread_stack_element k_thread_stack_t;
45
46 typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
47
48 /**
49 * @defgroup arch-timing Architecture timing APIs
50 * @ingroup arch-interface
51 * @{
52 */
53
54 /**
55 * Obtain the current cycle count, in units specified by
56 * CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC. While this is historically
57 * specified as part of the architecture API, in practice virtually
58 * all platforms forward it to the sys_clock_cycle_get_32() API
59 * provided by the timer driver.
60 *
61 * @see k_cycle_get_32()
62 *
63 * @return The current cycle time. This should count up monotonically
64 * through the full 32 bit space, wrapping at 0xffffffff. Hardware
65 * with fewer bits of precision in the timer is expected to synthesize
66 * a 32 bit count.
67 */
68 static inline uint32_t arch_k_cycle_get_32(void);
69
70 /**
71 * As for arch_k_cycle_get_32(), but with a 64 bit return value. Not
72 * all timer hardware has a 64 bit timer, this needs to be implemented
73 * only if CONFIG_TIMER_HAS_64BIT_CYCLE_COUNTER is set.
74 *
75 * @see arch_k_cycle_get_32()
76 *
77 * @return The current cycle time. This should count up monotonically
78 * through the full 64 bit space, wrapping at 2^64-1. Hardware with
79 * fewer bits of precision in the timer is generally not expected to
80 * implement this API.
81 */
82 static inline uint64_t arch_k_cycle_get_64(void);
83
84 /** @} */
85
86
87 /**
88 * @addtogroup arch-threads
89 * @{
90 */
91
92 /**
93 * @def ARCH_THREAD_STACK_RESERVED
94 *
95 * @see K_THREAD_STACK_RESERVED
96 */
97
98 /**
99 * @def ARCH_STACK_PTR_ALIGN
100 *
101 * Required alignment of the CPU's stack pointer register value, dictated by
102 * hardware constraints and the ABI calling convention.
103 *
104 * @see Z_STACK_PTR_ALIGN
105 */
106
107 /**
108 * @def ARCH_THREAD_STACK_OBJ_ALIGN(size)
109 *
110 * Required alignment of the lowest address of a stack object.
111 *
112 * Optional definition.
113 *
114 * @see Z_THREAD_STACK_OBJ_ALIGN
115 */
116
117 /**
118 * @def ARCH_THREAD_STACK_SIZE_ADJUST(size)
119 * @brief Round up a stack buffer size to alignment constraints
120 *
121 * Adjust a requested stack buffer size to the true size of its underlying
122 * buffer, defined as the area usable for thread stack context and thread-
123 * local storage.
124 *
125 * The size value passed here does not include storage reserved for platform
126 * data.
127 *
128 * The returned value is either the same size provided (if already properly
129 * aligned), or rounded up to satisfy alignment constraints. Calculations
130 * performed here *must* be idempotent.
131 *
132 * Optional definition. If undefined, stack buffer sizes are either:
133 * - Rounded up to the next power of two if user mode is enabled on an arch
134 * with an MPU that requires such alignment
135 * - Rounded up to ARCH_STACK_PTR_ALIGN
136 *
137 * @see Z_THREAD_STACK_SIZE_ADJUST
138 */
139
140 /**
141 * @def ARCH_KERNEL_STACK_RESERVED
142 * @brief MPU guard size for kernel-only stacks
143 *
144 * If MPU stack guards are used to catch stack overflows, specify the
145 * amount of space reserved in kernel stack objects. If guard sizes are
146 * context dependent, this should be in the minimum guard size, with
147 * remaining space carved out if needed.
148 *
149 * Optional definition, defaults to 0.
150 *
151 * @see K_KERNEL_STACK_RESERVED
152 */
153
154 /**
155 * @def ARCH_KERNEL_STACK_OBJ_ALIGN
156 * @brief Required alignment of the lowest address of a kernel-only stack.
157 */
158
159 /** @} */
160
161 /**
162 * @addtogroup arch-pm
163 * @{
164 */
165
166 /**
167 * @brief Power save idle routine
168 *
169 * This function will be called by the kernel idle loop or possibly within
170 * an implementation of z_pm_save_idle in the kernel when the
171 * '_pm_save_flag' variable is non-zero.
172 *
173 * Architectures that do not implement power management instructions may
174 * immediately return, otherwise a power-saving instruction should be
175 * issued to wait for an interrupt.
176 *
177 * @note The function is expected to return after the interrupt that has
178 * caused the CPU to exit power-saving mode has been serviced, although
179 * this is not a firm requirement.
180 *
181 * @see k_cpu_idle()
182 */
183 void arch_cpu_idle(void);
184
185 /**
186 * @brief Atomically re-enable interrupts and enter low power mode
187 *
188 * The requirements for arch_cpu_atomic_idle() are as follows:
189 *
190 * -# Enabling interrupts and entering a low-power mode needs to be
191 * atomic, i.e. there should be no period of time where interrupts are
192 * enabled before the processor enters a low-power mode. See the comments
193 * in k_lifo_get(), for example, of the race condition that
194 * occurs if this requirement is not met.
195 *
196 * -# After waking up from the low-power mode, the interrupt lockout state
197 * must be restored as indicated in the 'key' input parameter.
198 *
199 * @see k_cpu_atomic_idle()
200 *
201 * @param key Lockout key returned by previous invocation of arch_irq_lock()
202 */
203 void arch_cpu_atomic_idle(unsigned int key);
204
205 /** @} */
206
207
208 /**
209 * @addtogroup arch-smp
210 * @{
211 */
212
213 /**
214 * Per-cpu entry function
215 *
216 * @param data context parameter, implementation specific
217 */
218 typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
219
220 /**
221 * @brief Start a numbered CPU on a MP-capable system
222 *
223 * This starts and initializes a specific CPU. The main thread on startup is
224 * running on CPU zero, other processors are numbered sequentially. On return
225 * from this function, the CPU is known to have begun operating and will enter
226 * the provided function. Its interrupts will be initialized but disabled such
227 * that irq_unlock() with the provided key will work to enable them.
228 *
229 * Normally, in SMP mode this function will be called by the kernel
230 * initialization and should not be used as a user API. But it is defined here
231 * for special-purpose apps which want Zephyr running on one core and to use
232 * others for design-specific processing.
233 *
234 * @param cpu_num Integer number of the CPU
235 * @param stack Stack memory for the CPU
236 * @param sz Stack buffer size, in bytes
237 * @param fn Function to begin running on the CPU.
238 * @param arg Untyped argument to be passed to "fn"
239 */
240 void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
241 arch_cpustart_t fn, void *arg);
242
243 /**
244 * @brief Return CPU power status
245 *
246 * @param cpu_num Integer number of the CPU
247 */
248 bool arch_cpu_active(int cpu_num);
249
250 /** @} */
251
252
253 /**
254 * @addtogroup arch-irq
255 * @{
256 */
257
258 /**
259 * Lock interrupts on the current CPU
260 *
261 * @see irq_lock()
262 */
263 static inline unsigned int arch_irq_lock(void);
264
265 /**
266 * Unlock interrupts on the current CPU
267 *
268 * @see irq_unlock()
269 */
270 static inline void arch_irq_unlock(unsigned int key);
271
272 /**
273 * Test if calling arch_irq_unlock() with this key would unlock irqs
274 *
275 * @param key value returned by arch_irq_lock()
276 * @return true if interrupts were unlocked prior to the arch_irq_lock()
277 * call that produced the key argument.
278 */
279 static inline bool arch_irq_unlocked(unsigned int key);
280
281 /**
282 * Disable the specified interrupt line
283 *
284 * @note: The behavior of interrupts that arrive after this call
285 * returns and before the corresponding call to arch_irq_enable() is
286 * undefined. The hardware is not required to latch and deliver such
287 * an interrupt, though on some architectures that may work. Other
288 * architectures will simply lose such an interrupt and never deliver
289 * it. Many drivers and subsystems are not tolerant of such dropped
290 * interrupts and it is the job of the application layer to ensure
291 * that behavior remains correct.
292 *
293 * @see irq_disable()
294 */
295 void arch_irq_disable(unsigned int irq);
296
297 /**
298 * Enable the specified interrupt line
299 *
300 * @see irq_enable()
301 */
302 void arch_irq_enable(unsigned int irq);
303
304 /**
305 * Test if an interrupt line is enabled
306 *
307 * @see irq_is_enabled()
308 */
309 int arch_irq_is_enabled(unsigned int irq);
310
311 /**
312 * Arch-specific hook to install a dynamic interrupt.
313 *
314 * @param irq IRQ line number
315 * @param priority Interrupt priority
316 * @param routine Interrupt service routine
317 * @param parameter ISR parameter
318 * @param flags Arch-specific IRQ configuration flag
319 *
320 * @return The vector assigned to this interrupt
321 */
322 int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
323 void (*routine)(const void *parameter),
324 const void *parameter, uint32_t flags);
325
326 /**
327 * @def ARCH_IRQ_CONNECT(irq, pri, isr, arg, flags)
328 *
329 * @see IRQ_CONNECT()
330 */
331
332 #ifdef CONFIG_PCIE
333 /**
334 * @def ARCH_PCIE_IRQ_CONNECT(bdf, irq, pri, isr, arg, flags)
335 *
336 * @see PCIE_IRQ_CONNECT()
337 */
338 #endif /* CONFIG_PCIE */
339
340 /**
341 * @def ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p)
342 *
343 * @see IRQ_DIRECT_CONNECT()
344 */
345
346 /**
347 * @def ARCH_ISR_DIRECT_PM()
348 *
349 * @see ISR_DIRECT_PM()
350 */
351
352 /**
353 * @def ARCH_ISR_DIRECT_HEADER()
354 *
355 * @see ISR_DIRECT_HEADER()
356 */
357
358 /**
359 * @def ARCH_ISR_DIRECT_FOOTER(swap)
360 *
361 * @see ISR_DIRECT_FOOTER()
362 */
363
364 /**
365 * @def ARCH_ISR_DIRECT_DECLARE(name)
366 *
367 * @see ISR_DIRECT_DECLARE()
368 */
369
370 #ifndef CONFIG_PCIE_CONTROLLER
371 /**
372 * @brief Arch-specific hook for allocating IRQs
373 *
374 * Note: disable/enable IRQ relevantly inside the implementation of such
375 * function to avoid concurrency issues. Also, an allocated IRQ is assumed
376 * to be used thus a following @see arch_irq_is_used() should return true.
377 *
378 * @return The newly allocated IRQ or UINT_MAX on error.
379 */
380 unsigned int arch_irq_allocate(void);
381
382 /**
383 * @brief Arch-specific hook for declaring an IRQ being used
384 *
385 * Note: disable/enable IRQ relevantly inside the implementation of such
386 * function to avoid concurrency issues.
387 *
388 * @param irq the IRQ to declare being used
389 */
390 void arch_irq_set_used(unsigned int irq);
391
392 /**
393 * @brief Arch-specific hook for checking if an IRQ is being used already
394 *
395 * @param irq the IRQ to check
396 *
397 * @return true if being, false otherwise
398 */
399 bool arch_irq_is_used(unsigned int irq);
400
401 #endif /* CONFIG_PCIE_CONTROLLER */
402
403 /**
404 * @def ARCH_EXCEPT(reason_p)
405 *
406 * Generate a software induced fatal error.
407 *
408 * If the caller is running in user mode, only K_ERR_KERNEL_OOPS or
409 * K_ERR_STACK_CHK_FAIL may be induced.
410 *
411 * This should ideally generate a software trap, with exception context
412 * indicating state when this was invoked. General purpose register state at
413 * the time of trap should not be disturbed from the calling context.
414 *
415 * @param reason_p K_ERR_ scoped reason code for the fatal error.
416 */
417
418 #ifdef CONFIG_IRQ_OFFLOAD
419 /**
420 * Run a function in interrupt context.
421 *
422 * Implementations should invoke an exception such that the kernel goes through
423 * its interrupt handling dispatch path, to include switching to the interrupt
424 * stack, and runs the provided routine and parameter.
425 *
426 * The only intended use-case for this function is for test code to simulate
427 * the correctness of kernel APIs in interrupt handling context. This API
428 * is not intended for real applications.
429 *
430 * @see irq_offload()
431 *
432 * @param routine Function to run in interrupt context
433 * @param parameter Value to pass to the function when invoked
434 */
435 void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
436 #endif /* CONFIG_IRQ_OFFLOAD */
437
438 /** @} */
439
440
441 /**
442 * @defgroup arch-smp Architecture-specific SMP APIs
443 * @ingroup arch-interface
444 * @{
445 */
446 #ifdef CONFIG_SMP
447 /** Return the CPU struct for the currently executing CPU */
448 static inline struct _cpu *arch_curr_cpu(void);
449
450
451 /**
452 * @brief Processor hardware ID
453 *
454 * Most multiprocessor architectures have a low-level unique ID value
455 * associated with the current CPU that can be retrieved rapidly and
456 * efficiently in kernel context. Note that while the numbering of
457 * the CPUs is guaranteed to be unique, the values are
458 * platform-defined. In particular, they are not guaranteed to match
459 * Zephyr's own sequential CPU IDs (even though on some platforms they
460 * do).
461 *
462 * @note There is an inherent race with this API: the system may
463 * preempt the current thread and migrate it to another CPU before the
464 * value is used. Safe usage requires knowing the migration is
465 * impossible (e.g. because the code is in interrupt context, holds a
466 * spinlock, or cannot migrate due to k_cpu_mask state).
467 *
468 * @return Unique ID for currently-executing CPU
469 */
470 static inline uint32_t arch_proc_id(void);
471
472 /**
473 * Broadcast an interrupt to all CPUs
474 *
475 * This will invoke z_sched_ipi() on other CPUs in the system.
476 */
477 void arch_sched_ipi(void);
478
479 #endif /* CONFIG_SMP */
480
481 /**
482 * @brief Returns the number of CPUs
483 *
484 * For most systems this will be the same as CONFIG_MP_MAX_NUM_CPUS,
485 * however some systems may determine this at runtime instead.
486 *
487 * @return the number of CPUs
488 */
489 static inline unsigned int arch_num_cpus(void);
490
491 /** @} */
492
493
494 /**
495 * @defgroup arch-userspace Architecture-specific userspace APIs
496 * @ingroup arch-interface
497 * @{
498 */
499
500 #ifdef CONFIG_USERSPACE
501 /**
502 * Invoke a system call with 0 arguments.
503 *
504 * No general-purpose register state other than return value may be preserved
505 * when transitioning from supervisor mode back down to user mode for
506 * security reasons.
507 *
508 * It is required that all arguments be stored in registers when elevating
509 * privileges from user to supervisor mode.
510 *
511 * Processing of the syscall takes place on a separate kernel stack. Interrupts
512 * should be enabled when invoking the system call marshallers from the
513 * dispatch table. Thread preemption may occur when handling system calls.
514 *
515 * Call ids are untrusted and must be bounds-checked, as the value is used to
516 * index the system call dispatch table, containing function pointers to the
517 * specific system call code.
518 *
519 * @param call_id System call ID
520 * @return Return value of the system call. Void system calls return 0 here.
521 */
522 static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id);
523
524 /**
525 * Invoke a system call with 1 argument.
526 *
527 * @see arch_syscall_invoke0()
528 *
529 * @param arg1 First argument to the system call.
530 * @param call_id System call ID, will be bounds-checked and used to reference
531 * kernel-side dispatch table
532 * @return Return value of the system call. Void system calls return 0 here.
533 */
534 static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
535 uintptr_t call_id);
536
537 /**
538 * Invoke a system call with 2 arguments.
539 *
540 * @see arch_syscall_invoke0()
541 *
542 * @param arg1 First argument to the system call.
543 * @param arg2 Second argument to the system call.
544 * @param call_id System call ID, will be bounds-checked and used to reference
545 * kernel-side dispatch table
546 * @return Return value of the system call. Void system calls return 0 here.
547 */
548 static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
549 uintptr_t call_id);
550
551 /**
552 * Invoke a system call with 3 arguments.
553 *
554 * @see arch_syscall_invoke0()
555 *
556 * @param arg1 First argument to the system call.
557 * @param arg2 Second argument to the system call.
558 * @param arg3 Third argument to the system call.
559 * @param call_id System call ID, will be bounds-checked and used to reference
560 * kernel-side dispatch table
561 * @return Return value of the system call. Void system calls return 0 here.
562 */
563 static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
564 uintptr_t arg3,
565 uintptr_t call_id);
566
567 /**
568 * Invoke a system call with 4 arguments.
569 *
570 * @see arch_syscall_invoke0()
571 *
572 * @param arg1 First argument to the system call.
573 * @param arg2 Second argument to the system call.
574 * @param arg3 Third argument to the system call.
575 * @param arg4 Fourth argument to the system call.
576 * @param call_id System call ID, will be bounds-checked and used to reference
577 * kernel-side dispatch table
578 * @return Return value of the system call. Void system calls return 0 here.
579 */
580 static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
581 uintptr_t arg3, uintptr_t arg4,
582 uintptr_t call_id);
583
584 /**
585 * Invoke a system call with 5 arguments.
586 *
587 * @see arch_syscall_invoke0()
588 *
589 * @param arg1 First argument to the system call.
590 * @param arg2 Second argument to the system call.
591 * @param arg3 Third argument to the system call.
592 * @param arg4 Fourth argument to the system call.
593 * @param arg5 Fifth argument to the system call.
594 * @param call_id System call ID, will be bounds-checked and used to reference
595 * kernel-side dispatch table
596 * @return Return value of the system call. Void system calls return 0 here.
597 */
598 static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
599 uintptr_t arg3, uintptr_t arg4,
600 uintptr_t arg5,
601 uintptr_t call_id);
602
603 /**
604 * Invoke a system call with 6 arguments.
605 *
606 * @see arch_syscall_invoke0()
607 *
608 * @param arg1 First argument to the system call.
609 * @param arg2 Second argument to the system call.
610 * @param arg3 Third argument to the system call.
611 * @param arg4 Fourth argument to the system call.
612 * @param arg5 Fifth argument to the system call.
613 * @param arg6 Sixth argument to the system call.
614 * @param call_id System call ID, will be bounds-checked and used to reference
615 * kernel-side dispatch table
616 * @return Return value of the system call. Void system calls return 0 here.
617 */
618 static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
619 uintptr_t arg3, uintptr_t arg4,
620 uintptr_t arg5, uintptr_t arg6,
621 uintptr_t call_id);
622
623 /**
624 * Indicate whether we are currently running in user mode
625 *
626 * @return true if the CPU is currently running with user permissions
627 */
628 static inline bool arch_is_user_context(void);
629
630 /**
631 * @brief Get the maximum number of partitions for a memory domain
632 *
633 * @return Max number of partitions, or -1 if there is no limit
634 */
635 int arch_mem_domain_max_partitions_get(void);
636
637 #ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
638 /**
639 *
640 * @brief Architecture-specific hook for memory domain initialization
641 *
642 * Perform any tasks needed to initialize architecture-specific data within
643 * the memory domain, such as reserving memory for page tables. All members
644 * of the provided memory domain aside from `arch` will be initialized when
645 * this is called, but no threads will be a assigned yet.
646 *
647 * This function may fail if initializing the memory domain requires allocation,
648 * such as for page tables.
649 *
650 * The associated function k_mem_domain_init() documents that making
651 * multiple init calls to the same memory domain is undefined behavior,
652 * but has no assertions in place to check this. If this matters, it may be
653 * desirable to add checks for this in the implementation of this function.
654 *
655 * @param domain The memory domain to initialize
656 * @retval 0 Success
657 * @retval -ENOMEM Insufficient memory
658 */
659 int arch_mem_domain_init(struct k_mem_domain *domain);
660 #endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
661
662 #ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
663 /**
664 * @brief Add a thread to a memory domain (arch-specific)
665 *
666 * Architecture-specific hook to manage internal data structures or hardware
667 * state when the provided thread has been added to a memory domain.
668 *
669 * The thread->mem_domain_info.mem_domain pointer will be set to the domain to
670 * be added to before this is called. Implementations may assume that the
671 * thread is not already a member of this domain.
672 *
673 * @param thread Thread which needs to be configured.
674 *
675 * @retval 0 if successful
676 * @retval -EINVAL if invalid parameters supplied
677 * @retval -ENOSPC if running out of space in internal structures
678 * (e.g. translation tables)
679 */
680 int arch_mem_domain_thread_add(struct k_thread *thread);
681
682 /**
683 * @brief Remove a thread from a memory domain (arch-specific)
684 *
685 * Architecture-specific hook to manage internal data structures or hardware
686 * state when the provided thread has been removed from a memory domain.
687 *
688 * The thread's memory domain pointer will be the domain that the thread
689 * is being removed from.
690 *
691 * @param thread Thread being removed from its memory domain
692 *
693 * @retval 0 if successful
694 * @retval -EINVAL if invalid parameters supplied
695 */
696 int arch_mem_domain_thread_remove(struct k_thread *thread);
697
698 /**
699 * @brief Remove a partition from the memory domain (arch-specific)
700 *
701 * Architecture-specific hook to manage internal data structures or hardware
702 * state when a memory domain has had a partition removed.
703 *
704 * The partition index data, and the number of partitions configured, are not
705 * respectively cleared and decremented in the domain until after this function
706 * runs.
707 *
708 * @param domain The memory domain structure
709 * @param partition_id The partition index that needs to be deleted
710 *
711 * @retval 0 if successful
712 * @retval -EINVAL if invalid parameters supplied
713 * @retval -ENOENT if no matching partition found
714 */
715 int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
716 uint32_t partition_id);
717
718 /**
719 * @brief Add a partition to the memory domain
720 *
721 * Architecture-specific hook to manage internal data structures or hardware
722 * state when a memory domain has a partition added.
723 *
724 * @param domain The memory domain structure
725 * @param partition_id The partition that needs to be added
726 *
727 * @retval 0 if successful
728 * @retval -EINVAL if invalid parameters supplied
729 */
730 int arch_mem_domain_partition_add(struct k_mem_domain *domain,
731 uint32_t partition_id);
732 #endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
733
734 /**
735 * @brief Check memory region permissions
736 *
737 * Given a memory region, return whether the current memory management hardware
738 * configuration would allow a user thread to read/write that region. Used by
739 * system calls to validate buffers coming in from userspace.
740 *
741 * Notes:
742 * The function is guaranteed to never return validation success, if the entire
743 * buffer area is not user accessible.
744 *
745 * The function is guaranteed to correctly validate the permissions of the
746 * supplied buffer, if the user access permissions of the entire buffer are
747 * enforced by a single, enabled memory management region.
748 *
749 * In some architectures the validation will always return failure
750 * if the supplied memory buffer spans multiple enabled memory management
751 * regions (even if all such regions permit user access).
752 *
753 * @warning 0 size buffer has undefined behavior.
754 *
755 * @param addr start address of the buffer
756 * @param size the size of the buffer
757 * @param write If nonzero, additionally check if the area is writable.
758 * Otherwise, just check if the memory can be read.
759 *
760 * @return nonzero if the permissions don't match.
761 */
762 int arch_buffer_validate(void *addr, size_t size, int write);
763
764 /**
765 * Get the optimal virtual region alignment to optimize the MMU table layout
766 *
767 * Some MMU HW requires some region to be aligned to some of the intermediate
768 * block alignment in order to reduce table usage.
769 * This call returns the optimal virtual address alignment in order to permit
770 * such optimization in the following MMU mapping call.
771 *
772 * @param[in] phys Physical address of region to be mapped, aligned to MMU_PAGE_SIZE
773 * @param[in] size Size of region to be mapped, aligned to MMU_PAGE_SIZE
774 *
775 * @retval alignment to apply on the virtual address of this region
776 */
777 size_t arch_virt_region_align(uintptr_t phys, size_t size);
778
779 /**
780 * Perform a one-way transition from supervisor to kernel mode.
781 *
782 * Implementations of this function must do the following:
783 *
784 * - Reset the thread's stack pointer to a suitable initial value. We do not
785 * need any prior context since this is a one-way operation.
786 * - Set up any kernel stack region for the CPU to use during privilege
787 * elevation
788 * - Put the CPU in whatever its equivalent of user mode is
789 * - Transfer execution to arch_new_thread() passing along all the supplied
790 * arguments, in user mode.
791 *
792 * @param user_entry Entry point to start executing as a user thread
793 * @param p1 1st parameter to user thread
794 * @param p2 2nd parameter to user thread
795 * @param p3 3rd parameter to user thread
796 */
797 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
798 void *p1, void *p2, void *p3);
799
800 /**
801 * @brief Induce a kernel oops that appears to come from a specific location
802 *
803 * Normally, k_oops() generates an exception that appears to come from the
804 * call site of the k_oops() itself.
805 *
806 * However, when validating arguments to a system call, if there are problems
807 * we want the oops to appear to come from where the system call was invoked
808 * and not inside the validation function.
809 *
810 * @param ssf System call stack frame pointer. This gets passed as an argument
811 * to _k_syscall_handler_t functions and its contents are completely
812 * architecture specific.
813 */
814 FUNC_NORETURN void arch_syscall_oops(void *ssf);
815
816 /**
817 * @brief Safely take the length of a potentially bad string
818 *
819 * This must not fault, instead the err parameter must have -1 written to it.
820 * This function otherwise should work exactly like libc strnlen(). On success
821 * *err should be set to 0.
822 *
823 * @param s String to measure
824 * @param maxsize Max length of the string
825 * @param err Error value to write
826 * @return Length of the string, not counting NULL byte, up to maxsize
827 */
828 size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
829 #endif /* CONFIG_USERSPACE */
830
831 /**
832 * @brief Detect memory coherence type
833 *
834 * Required when ARCH_HAS_COHERENCE is true. This function returns
835 * true if the byte pointed to lies within an architecture-defined
836 * "coherence region" (typically implemented with uncached memory) and
837 * can safely be used in multiprocessor code without explicit flush or
838 * invalidate operations.
839 *
840 * @note The result is for only the single byte at the specified
841 * address, this API is not required to check region boundaries or to
842 * expect aligned pointers. The expectation is that the code above
843 * will have queried the appropriate address(es).
844 */
845 #ifndef CONFIG_ARCH_HAS_COHERENCE
arch_mem_coherent(void * ptr)846 static inline bool arch_mem_coherent(void *ptr)
847 {
848 ARG_UNUSED(ptr);
849 return true;
850 }
851 #endif
852
853 /**
854 * @brief Ensure cache coherence prior to context switch
855 *
856 * Required when ARCH_HAS_COHERENCE is true. On cache-incoherent
857 * multiprocessor architectures, thread stacks are cached by default
858 * for performance reasons. They must therefore be flushed
859 * appropriately on context switch. The rules are:
860 *
861 * 1. The region containing live data in the old stack (generally the
862 * bytes between the current stack pointer and the top of the stack
863 * memory) must be flushed to underlying storage so a new CPU that
864 * runs the same thread sees the correct data. This must happen
865 * before the assignment of the switch_handle field in the thread
866 * struct which signals the completion of context switch.
867 *
868 * 2. Any data areas to be read from the new stack (generally the same
869 * as the live region when it was saved) should be invalidated (and
870 * NOT flushed!) in the data cache. This is because another CPU
871 * may have run or re-initialized the thread since this CPU
872 * suspended it, and any data present in cache will be stale.
873 *
874 * @note The kernel will call this function during interrupt exit when
875 * a new thread has been chosen to run, and also immediately before
876 * entering arch_switch() to effect a code-driven context switch. In
877 * the latter case, it is very likely that more data will be written
878 * to the old_thread stack region after this function returns but
879 * before the completion of the switch. Simply flushing naively here
880 * is not sufficient on many architectures and coordination with the
881 * arch_switch() implementation is likely required.
882 *
883 * @arg old_thread The old thread to be flushed before being allowed
884 * to run on other CPUs.
885 * @arg old_switch_handle The switch handle to be stored into
886 * old_thread (it will not be valid until the
887 * cache is flushed so is not present yet).
888 * This will be NULL if inside z_swap()
889 * (because the arch_switch() has not saved it
890 * yet).
891 * @arg new_thread The new thread to be invalidated before it runs locally.
892 */
893 #ifndef CONFIG_KERNEL_COHERENCE
arch_cohere_stacks(struct k_thread * old_thread,void * old_switch_handle,struct k_thread * new_thread)894 static inline void arch_cohere_stacks(struct k_thread *old_thread,
895 void *old_switch_handle,
896 struct k_thread *new_thread)
897 {
898 ARG_UNUSED(old_thread);
899 ARG_UNUSED(old_switch_handle);
900 ARG_UNUSED(new_thread);
901 }
902 #endif
903
904 /** @} */
905
906 /**
907 * @defgroup arch-gdbstub Architecture-specific gdbstub APIs
908 * @ingroup arch-interface
909 * @{
910 */
911
912 #ifdef CONFIG_GDBSTUB
913 struct gdb_ctx;
914
915 /**
916 * @brief Architecture layer debug start
917 *
918 * This function is called by @c gdb_init()
919 */
920 void arch_gdb_init(void);
921
922 /**
923 * @brief Continue running program
924 *
925 * Continue software execution.
926 */
927 void arch_gdb_continue(void);
928
929 /**
930 * @brief Continue with one step
931 *
932 * Continue software execution until reaches the next statement.
933 */
934 void arch_gdb_step(void);
935
936 /**
937 * @brief Read all registers, and outputs as hexadecimal string.
938 *
939 * This reads all CPU registers and outputs as hexadecimal string.
940 * The output string must be parsable by GDB.
941 *
942 * @param ctx GDB context
943 * @param buf Buffer to output hexadecimal string.
944 * @param buflen Length of buffer.
945 *
946 * @return Length of hexadecimal string written.
947 * Return 0 if error or not supported.
948 */
949 size_t arch_gdb_reg_readall(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen);
950
951 /**
952 * @brief Take a hexadecimal string and update all registers.
953 *
954 * This takes in a hexadecimal string as presented from GDB,
955 * and updates all CPU registers with new values.
956 *
957 * @param ctx GDB context
958 * @param hex Input hexadecimal string.
959 * @param hexlen Length of hexadecimal string.
960 *
961 * @return Length of hexadecimal string parsed.
962 * Return 0 if error or not supported.
963 */
964 size_t arch_gdb_reg_writeall(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen);
965
966 /**
967 * @brief Read one register, and outputs as hexadecimal string.
968 *
969 * This reads one CPU register and outputs as hexadecimal string.
970 * The output string must be parsable by GDB.
971 *
972 * @param ctx GDB context
973 * @param buf Buffer to output hexadecimal string.
974 * @param buflen Length of buffer.
975 * @param regno Register number
976 *
977 * @return Length of hexadecimal string written.
978 * Return 0 if error or not supported.
979 */
980 size_t arch_gdb_reg_readone(struct gdb_ctx *ctx, uint8_t *buf, size_t buflen,
981 uint32_t regno);
982
983 /**
984 * @brief Take a hexadecimal string and update one register.
985 *
986 * This takes in a hexadecimal string as presented from GDB,
987 * and updates one CPU registers with new value.
988 *
989 * @param ctx GDB context
990 * @param hex Input hexadecimal string.
991 * @param hexlen Length of hexadecimal string.
992 * @param regno Register number
993 *
994 * @return Length of hexadecimal string parsed.
995 * Return 0 if error or not supported.
996 */
997 size_t arch_gdb_reg_writeone(struct gdb_ctx *ctx, uint8_t *hex, size_t hexlen,
998 uint32_t regno);
999
1000 /**
1001 * @brief Add breakpoint or watchpoint.
1002 *
1003 * @param ctx GDB context
1004 * @param type Breakpoint or watchpoint type
1005 * @param addr Address of breakpoint or watchpoint
1006 * @param kind Size of breakpoint/watchpoint in bytes
1007 *
1008 * @retval 0 Operation successful
1009 * @retval -1 Error encountered
1010 * @retval -2 Not supported
1011 */
1012 int arch_gdb_add_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1013 uintptr_t addr, uint32_t kind);
1014
1015 /**
1016 * @brief Remove breakpoint or watchpoint.
1017 *
1018 * @param ctx GDB context
1019 * @param type Breakpoint or watchpoint type
1020 * @param addr Address of breakpoint or watchpoint
1021 * @param kind Size of breakpoint/watchpoint in bytes
1022 *
1023 * @retval 0 Operation successful
1024 * @retval -1 Error encountered
1025 * @retval -2 Not supported
1026 */
1027 int arch_gdb_remove_breakpoint(struct gdb_ctx *ctx, uint8_t type,
1028 uintptr_t addr, uint32_t kind);
1029
1030 #endif
1031 /** @} */
1032
1033 #ifdef CONFIG_TIMING_FUNCTIONS
1034 #include <zephyr/timing/types.h>
1035
1036 /**
1037 * @ingroup arch-timing
1038 * @{
1039 */
1040
1041 /**
1042 * @brief Initialize the timing subsystem.
1043 *
1044 * Perform the necessary steps to initialize the timing subsystem.
1045 *
1046 * @see timing_init()
1047 */
1048 void arch_timing_init(void);
1049
1050 /**
1051 * @brief Signal the start of the timing information gathering.
1052 *
1053 * Signal to the timing subsystem that timing information
1054 * will be gathered from this point forward.
1055 *
1056 * @note Any call to arch_timing_counter_get() must be done between
1057 * calls to arch_timing_start() and arch_timing_stop(), and on the
1058 * same CPU core.
1059 *
1060 * @see timing_start()
1061 */
1062 void arch_timing_start(void);
1063
1064 /**
1065 * @brief Signal the end of the timing information gathering.
1066 *
1067 * Signal to the timing subsystem that timing information
1068 * is no longer being gathered from this point forward.
1069 *
1070 * @note Any call to arch_timing_counter_get() must be done between
1071 * calls to arch_timing_start() and arch_timing_stop(), and on the
1072 * same CPU core.
1073 *
1074 * @see timing_stop()
1075 */
1076 void arch_timing_stop(void);
1077
1078 /**
1079 * @brief Return timing counter.
1080 *
1081 * @note Any call to arch_timing_counter_get() must be done between
1082 * calls to arch_timing_start() and arch_timing_stop(), and on the
1083 * same CPU core.
1084 *
1085 * @note Not all platforms have a timing counter with 64 bit precision. It
1086 * is possible to see this value "go backwards" due to internal
1087 * rollover. Timing code must be prepared to address the rollover
1088 * (with platform-dependent code, e.g. by casting to a uint32_t before
1089 * subtraction) or by using arch_timing_cycles_get() which is required
1090 * to understand the distinction.
1091 *
1092 * @return Timing counter.
1093 *
1094 * @see timing_counter_get()
1095 */
1096 timing_t arch_timing_counter_get(void);
1097
1098 /**
1099 * @brief Get number of cycles between @p start and @p end.
1100 *
1101 * For some architectures or SoCs, the raw numbers from counter need
1102 * to be scaled to obtain actual number of cycles, or may roll over
1103 * internally. This function computes a positive-definite interval
1104 * between two returned cycle values.
1105 *
1106 * @param start Pointer to counter at start of a measured execution.
1107 * @param end Pointer to counter at stop of a measured execution.
1108 * @return Number of cycles between start and end.
1109 *
1110 * @see timing_cycles_get()
1111 */
1112 uint64_t arch_timing_cycles_get(volatile timing_t *const start,
1113 volatile timing_t *const end);
1114
1115 /**
1116 * @brief Get frequency of counter used (in Hz).
1117 *
1118 * @return Frequency of counter used for timing in Hz.
1119 *
1120 * @see timing_freq_get()
1121 */
1122 uint64_t arch_timing_freq_get(void);
1123
1124 /**
1125 * @brief Convert number of @p cycles into nanoseconds.
1126 *
1127 * @param cycles Number of cycles
1128 * @return Converted time value
1129 *
1130 * @see timing_cycles_to_ns()
1131 */
1132 uint64_t arch_timing_cycles_to_ns(uint64_t cycles);
1133
1134 /**
1135 * @brief Convert number of @p cycles into nanoseconds with averaging.
1136 *
1137 * @param cycles Number of cycles
1138 * @param count Times of accumulated cycles to average over
1139 * @return Converted time value
1140 *
1141 * @see timing_cycles_to_ns_avg()
1142 */
1143 uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count);
1144
1145 /**
1146 * @brief Get frequency of counter used (in MHz).
1147 *
1148 * @return Frequency of counter used for timing in MHz.
1149 *
1150 * @see timing_freq_get_mhz()
1151 */
1152 uint32_t arch_timing_freq_get_mhz(void);
1153
1154 /** @} */
1155
1156 #endif /* CONFIG_TIMING_FUNCTIONS */
1157
1158 #ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
1159
1160 struct msi_vector;
1161 typedef struct msi_vector msi_vector_t;
1162
1163 /**
1164 * @brief Allocate vector(s) for the endpoint MSI message(s).
1165 *
1166 * @param priority the MSI vectors base interrupt priority
1167 * @param vectors an array to fill with allocated MSI vectors
1168 * @param n_vector the size of MSI vectors array
1169 *
1170 * @return The number of allocated MSI vectors
1171 */
1172 uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
1173 msi_vector_t *vectors,
1174 uint8_t n_vector);
1175
1176 /**
1177 * @brief Connect an MSI vector to the given routine
1178 *
1179 * @param vector The MSI vector to connect to
1180 * @param routine Interrupt service routine
1181 * @param parameter ISR parameter
1182 * @param flags Arch-specific IRQ configuration flag
1183 *
1184 * @return True on success, false otherwise
1185 */
1186 bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
1187 void (*routine)(const void *parameter),
1188 const void *parameter,
1189 uint32_t flags);
1190
1191 #endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
1192
1193 /**
1194 * @brief Perform architecture specific processing within spin loops
1195 *
1196 * This is invoked from busy loops with IRQs disabled such as the contended
1197 * spinlock loop. The default implementation is a weak function that calls
1198 * arch_nop(). Architectures may implement this function to perform extra
1199 * checks or power management tricks if needed.
1200 */
1201 void arch_spin_relax(void);
1202
1203 #ifdef __cplusplus
1204 }
1205 #endif /* __cplusplus */
1206
1207 #include <zephyr/arch/arch_inlines.h>
1208
1209 #endif /* _ASMLANGUAGE */
1210
1211 #endif /* ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_ */
1212