1 /*
2  * Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <kernel_internal.h>
8 #include <zephyr/sys/barrier.h>
9 #include "boot.h"
10 
11 void z_arm64_el2_init(void);
12 
z_arm64_el_highest_plat_init(void)13 void __weak z_arm64_el_highest_plat_init(void)
14 {
15 	/* do nothing */
16 }
17 
z_arm64_el3_plat_init(void)18 void __weak z_arm64_el3_plat_init(void)
19 {
20 	/* do nothing */
21 }
22 
z_arm64_el2_plat_init(void)23 void __weak z_arm64_el2_plat_init(void)
24 {
25 	/* do nothing */
26 }
27 
z_arm64_el1_plat_init(void)28 void __weak z_arm64_el1_plat_init(void)
29 {
30 	/* do nothing */
31 }
32 
z_arm64_el_highest_init(void)33 void z_arm64_el_highest_init(void)
34 {
35 	if (is_el_highest_implemented()) {
36 		write_cntfrq_el0(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC);
37 	}
38 
39 	z_arm64_el_highest_plat_init();
40 
41 	barrier_isync_fence_full();
42 }
43 
44 enum el3_next_el {
45 	EL3_TO_EL2,
46 	EL3_TO_EL1_NO_EL2,
47 	EL3_TO_EL1_SKIP_EL2
48 };
49 
el3_get_next_el(void)50 static inline enum el3_next_el el3_get_next_el(void)
51 {
52 	if (!is_el_implemented(2)) {
53 		return EL3_TO_EL1_NO_EL2;
54 	} else if (is_in_secure_state() && !is_el2_sec_supported()) {
55 		/*
56 		 * Is considered an illegal return "[..] a return to EL2 when EL3 is
57 		 * implemented and the value of the SCR_EL3.NS bit is 0 if
58 		 * ARMv8.4-SecEL2 is not implemented" (D1.11.2 from ARM DDI 0487E.a)
59 		 */
60 		return EL3_TO_EL1_SKIP_EL2;
61 	} else {
62 		return EL3_TO_EL2;
63 	}
64 }
65 
z_arm64_el3_init(void)66 void z_arm64_el3_init(void)
67 {
68 	uint64_t reg;
69 
70 	/* Setup vector table */
71 	write_vbar_el3((uint64_t)_vector_table);
72 	barrier_isync_fence_full();
73 
74 	reg = 0U;			/* Mostly RES0 */
75 	reg &= ~(CPTR_TTA_BIT |		/* Do not trap sysreg accesses */
76 		 CPTR_TFP_BIT |		/* Do not trap SVE, SIMD and FP */
77 		 CPTR_TCPAC_BIT);	/* Do not trap CPTR_EL2 / CPACR_EL1 accesses */
78 	write_cptr_el3(reg);
79 
80 	reg = 0U;			/* Reset */
81 #ifdef CONFIG_ARMV8_A_NS
82 	reg |= SCR_NS_BIT;		/* EL2 / EL3 non-secure */
83 #else
84 	if (is_in_secure_state() && is_el2_sec_supported()) {
85 		reg |= SCR_EEL2_BIT;    /* Enable EL2 secure */
86 	}
87 #endif
88 	reg |= (SCR_RES1 |		/* RES1 */
89 		SCR_RW_BIT |		/* EL2 execution state is AArch64 */
90 		SCR_ST_BIT |		/* Do not trap EL1 accesses to timer */
91 		SCR_HCE_BIT |		/* Do not trap HVC */
92 		SCR_SMD_BIT);		/* Do not trap SMC */
93 	write_scr_el3(reg);
94 
95 #if defined(CONFIG_GIC_V3)
96 	reg = read_sysreg(ICC_SRE_EL3);
97 	reg |= (ICC_SRE_ELx_DFB_BIT |	/* Disable FIQ bypass */
98 		ICC_SRE_ELx_DIB_BIT |	/* Disable IRQ bypass */
99 		ICC_SRE_ELx_SRE_BIT |	/* System register interface is used */
100 		ICC_SRE_EL3_EN_BIT);	/* Enables lower Exception level access to ICC_SRE_EL1 */
101 	write_sysreg(reg, ICC_SRE_EL3);
102 #endif
103 
104 	z_arm64_el3_plat_init();
105 
106 	barrier_isync_fence_full();
107 
108 	if (el3_get_next_el() == EL3_TO_EL1_SKIP_EL2) {
109 		/*
110 		 * handle EL2 init in EL3, as it still needs to be done,
111 		 * but we are going to be skipping EL2.
112 		 */
113 		z_arm64_el2_init();
114 	}
115 }
116 
z_arm64_el2_init(void)117 void z_arm64_el2_init(void)
118 {
119 	uint64_t reg;
120 
121 	reg = read_sctlr_el2();
122 	reg |= (SCTLR_EL2_RES1 |	/* RES1 */
123 		SCTLR_I_BIT |		/* Enable i-cache */
124 		SCTLR_SA_BIT);		/* Enable SP alignment check */
125 	write_sctlr_el2(reg);
126 
127 	reg = read_hcr_el2();
128 	reg |= HCR_RW_BIT;		/* EL1 Execution state is AArch64 */
129 	write_hcr_el2(reg);
130 
131 	reg = 0U;			/* RES0 */
132 	reg |= CPTR_EL2_RES1;		/* RES1 */
133 	reg &= ~(CPTR_TFP_BIT |		/* Do not trap SVE, SIMD and FP */
134 		 CPTR_TCPAC_BIT);	/* Do not trap CPACR_EL1 accesses */
135 	write_cptr_el2(reg);
136 
137 	zero_cntvoff_el2();		/* Set 64-bit virtual timer offset to 0 */
138 	zero_cnthctl_el2();
139 #ifdef CONFIG_CPU_AARCH64_CORTEX_R
140 	zero_cnthps_ctl_el2();
141 #else
142 	zero_cnthp_ctl_el2();
143 #endif
144 
145 #ifdef CONFIG_ARM64_SET_VMPIDR_EL2
146 	reg = read_mpidr_el1();
147 	write_vmpidr_el2(reg);
148 #endif
149 
150 	/*
151 	 * Enable this if/when we use the hypervisor timer.
152 	 * write_cnthp_cval_el2(~(uint64_t)0);
153 	 */
154 
155 	z_arm64_el2_plat_init();
156 
157 	barrier_isync_fence_full();
158 }
159 
z_arm64_el1_init(void)160 void z_arm64_el1_init(void)
161 {
162 	uint64_t reg;
163 
164 	/* Setup vector table */
165 	write_vbar_el1((uint64_t)_vector_table);
166 	barrier_isync_fence_full();
167 
168 	reg = 0U;			/* RES0 */
169 	reg |= CPACR_EL1_FPEN_NOTRAP;	/* Do not trap NEON/SIMD/FP initially */
170 					/* TODO: CONFIG_FLOAT_*_FORBIDDEN */
171 	write_cpacr_el1(reg);
172 
173 	reg = read_sctlr_el1();
174 	reg |= (SCTLR_EL1_RES1 |	/* RES1 */
175 		SCTLR_I_BIT |		/* Enable i-cache */
176 		SCTLR_C_BIT |		/* Enable d-cache */
177 		SCTLR_SA_BIT);		/* Enable SP alignment check */
178 	write_sctlr_el1(reg);
179 
180 	write_cntv_cval_el0(~(uint64_t)0);
181 	/*
182 	 * Enable these if/when we use the corresponding timers.
183 	 * write_cntp_cval_el0(~(uint64_t)0);
184 	 * write_cntps_cval_el1(~(uint64_t)0);
185 	 */
186 
187 	z_arm64_el1_plat_init();
188 
189 	barrier_isync_fence_full();
190 }
191 
z_arm64_el3_get_next_el(uint64_t switch_addr)192 void z_arm64_el3_get_next_el(uint64_t switch_addr)
193 {
194 	uint64_t spsr;
195 
196 	write_elr_el3(switch_addr);
197 
198 	/* Mask the DAIF */
199 	spsr = SPSR_DAIF_MASK;
200 
201 	if (el3_get_next_el() == EL3_TO_EL2) {
202 		/* Dropping into EL2 */
203 		spsr |= SPSR_MODE_EL2T;
204 	} else {
205 		/* Dropping into EL1 */
206 		spsr |= SPSR_MODE_EL1T;
207 	}
208 
209 	write_spsr_el3(spsr);
210 }
211