1 /*
2 * Copyright (c) 2019-2020 Cobham Gaisler AB
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief SPARC specific kernel interface header
10 * This header contains the SPARC specific kernel interface. It is
11 * included by the generic kernel interface header (arch/cpu.h)
12 */
13
14 #ifndef ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_
15 #define ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_
16
17 #include <zephyr/arch/sparc/exception.h>
18 #include <zephyr/arch/sparc/thread.h>
19 #include <zephyr/arch/sparc/sparc.h>
20 #include <zephyr/arch/common/sys_bitops.h>
21 #include <zephyr/arch/common/sys_io.h>
22 #include <zephyr/arch/common/ffs.h>
23
24 #include <zephyr/irq.h>
25 #include <zephyr/sw_isr_table.h>
26 #include <soc.h>
27 #include <zephyr/devicetree.h>
28
29 /* stacks, for SPARC architecture stack shall be 8byte-aligned */
30 #define ARCH_STACK_PTR_ALIGN 8
31
32 /*
33 * Software trap numbers.
34 * Assembly usage: "ta SPARC_SW_TRAP_<TYPE>"
35 */
36 #define SPARC_SW_TRAP_FLUSH_WINDOWS 0x03
37 #define SPARC_SW_TRAP_SET_PIL 0x09
38 #define SPARC_SW_TRAP_EXCEPT 0x0F
39
40 #ifndef _ASMLANGUAGE
41 #include <zephyr/sys/util.h>
42
43 #ifdef __cplusplus
44 extern "C" {
45 #endif
46
47 #define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN)
48
49 /*
50 * SOC specific function to translate from processor interrupt request level
51 * (1..15) to logical interrupt source number. For example by probing the
52 * interrupt controller.
53 */
54 int z_sparc_int_get_source(int irl);
55 void z_irq_spurious(const void *unused);
56
57
58 #define ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
59 { \
60 Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
61 }
62
63
z_sparc_set_pil_inline(unsigned int newpil)64 static ALWAYS_INLINE unsigned int z_sparc_set_pil_inline(unsigned int newpil)
65 {
66 register uint32_t oldpil __asm__ ("o0") = newpil;
67
68 __asm__ volatile (
69 "ta %1\nnop\n" :
70 "=r" (oldpil) :
71 "i" (SPARC_SW_TRAP_SET_PIL), "r" (oldpil) :
72 "memory"
73 );
74 return oldpil;
75 }
76
arch_irq_lock(void)77 static ALWAYS_INLINE unsigned int arch_irq_lock(void)
78 {
79 return z_sparc_set_pil_inline(15);
80 }
81
arch_irq_unlock(unsigned int key)82 static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
83 {
84 z_sparc_set_pil_inline(key);
85 }
86
arch_irq_unlocked(unsigned int key)87 static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
88 {
89 return key == 0;
90 }
91
arch_nop(void)92 static ALWAYS_INLINE void arch_nop(void)
93 {
94 __asm__ volatile ("nop");
95 }
96
97 extern uint32_t sys_clock_cycle_get_32(void);
98
arch_k_cycle_get_32(void)99 static inline uint32_t arch_k_cycle_get_32(void)
100 {
101 return sys_clock_cycle_get_32();
102 }
103
104 extern uint64_t sys_clock_cycle_get_64(void);
105
arch_k_cycle_get_64(void)106 static inline uint64_t arch_k_cycle_get_64(void)
107 {
108 return sys_clock_cycle_get_64();
109 }
110
111 #define ARCH_EXCEPT(reason_p) \
112 do { \
113 register uint32_t _g1 __asm__("g1") = reason_p; \
114 \
115 __asm__ volatile ( \
116 "ta %[vector]\n\t" \
117 : \
118 : [vector] "i" (SPARC_SW_TRAP_EXCEPT), "r" (_g1) \
119 : "memory" \
120 ); \
121 CODE_UNREACHABLE; \
122 } while (false)
123
124 #ifdef __cplusplus
125 }
126 #endif
127
128 #endif /*_ASMLANGUAGE */
129
130 #endif /* ZEPHYR_INCLUDE_ARCH_SPARC_ARCH_H_ */
131