1/*
2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <zephyr/toolchain.h>
8#include <zephyr/linker/sections.h>
9#include <zephyr/arch/cpu.h>
10#include <zephyr/offsets.h>
11#include "boot.h"
12#include "macro_priv.inc"
13
14_ASM_FILE_PROLOGUE
15
16/*
17 * Platform specific pre-C init code
18 *
19 * Note: - Stack is not yet available
20 *       - x23, x24 and x25 must be preserved
21 */
22
23WTEXT(z_arm64_el3_plat_prep_c)
24SECTION_FUNC(TEXT,z_arm64_el3_plat_prep_c)
25	ret
26
27WTEXT(z_arm64_el2_plat_prep_c)
28SECTION_FUNC(TEXT,z_arm64_el2_plat_prep_c)
29	ret
30
31WTEXT(z_arm64_el1_plat_prep_c)
32SECTION_FUNC(TEXT,z_arm64_el1_plat_prep_c)
33	ret
34
35/*
36 * Set the minimum necessary to safely call C code
37 */
38
39GTEXT(__reset_prep_c)
40SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset_prep_c)
41	/* return address: x23 */
42	mov	x23, lr
43
44	switch_el x0, 3f, 2f, 1f
453:
46#if !defined(CONFIG_ARMV8_R)
47	/* Reinitialize SCTLR from scratch in EL3 */
48	ldr	w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT)
49	msr	sctlr_el3, x0
50	isb
51
52	/* Custom plat prep_c init */
53	bl	z_arm64_el3_plat_prep_c
54
55	/* Set SP_EL1 */
56	msr     sp_el1, x24
57
58	b	out
59#endif /* CONFIG_ARMV8_R */
602:
61	/* Disable alignment fault checking */
62	mrs	x0, sctlr_el2
63	bic	x0, x0, SCTLR_A_BIT
64	msr	sctlr_el2, x0
65	isb
66
67	/* Custom plat prep_c init */
68	bl	z_arm64_el2_plat_prep_c
69
70	/* Set SP_EL1 */
71	msr     sp_el1, x24
72
73	b	out
741:
75	/* Disable alignment fault checking */
76	mrs	x0, sctlr_el1
77	bic	x0, x0, SCTLR_A_BIT
78	msr	sctlr_el1, x0
79	isb
80
81	/* Custom plat prep_c init */
82	bl	z_arm64_el1_plat_prep_c
83
84	/* Set SP_EL1. We cannot use sp_el1 at EL1 */
85	msr     SPSel, #1
86	mov     sp, x24
87out:
88	isb
89
90	/* Select SP_EL0 */
91	msr	SPSel, #0
92
93	/* Initialize stack */
94	mov	sp, x24
95
96	/* fp = NULL */
97	mov	fp, xzr
98
99	ret	x23
100
101/*
102 * Reset vector
103 *
104 * Ran when the system comes out of reset. The processor is in thread mode with
105 * privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
106 * area in SRAM.
107 */
108
109GTEXT(__reset)
110SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
111
112GTEXT(__start)
113SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
114
115#ifdef CONFIG_WAIT_AT_RESET_VECTOR
116resetwait:
117	wfe
118	b       resetwait
119#endif
120
121	/* Mask all exceptions */
122	msr	DAIFSet, #0xf
123
124#if CONFIG_MP_MAX_NUM_CPUS > 1
125
126	/*
127	 * Deal with multi core booting simultaneously to race for being the primary core.
128	 * Use voting lock[1] with reasonable but minimal requirements on the memory system
129	 * to make sure only one core wins at last.
130	 *
131	 * [1] kernel.org/doc/html/next/arch/arm/vlocks.html
132	 */
133	ldr	x0, =arm64_cpu_boot_params
134
135	/*
136	 * Get the "logic" id defined by cpu_node_list statically for voting lock self-identify.
137	 * It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id)
138	 */
139	get_cpu_logic_id	x1, x2, x3, x4	//x1: MPID, x2: logic id
140
141	add	x4, x0, #BOOT_PARAM_VOTING_OFFSET
142
143	/* signal our desire to vote */
144	mov	w5, #1
145	strb	w5, [x4, x2]
146	ldr	x3, [x0, #BOOT_PARAM_MPID_OFFSET]
147	cmn	x3, #1
148	beq	1f
149
150	/* some core already won, release */
151	strb	wzr, [x4, x2]
152	b	secondary_core
153
154	/* suggest current core then release */
1551:	str	x1, [x0, #BOOT_PARAM_MPID_OFFSET]
156	strb	wzr, [x4, x2]
157	dmb	ish
158
159	/* then wait until every core else is done voting */
160	mov	x5, #0
1612:	ldrb	w3, [x4, x5]
162	tst	w3, #255
163	/* wait */
164	bne	2b
165	add	x5, x5, #1
166	cmp	x5, #CONFIG_MP_MAX_NUM_CPUS
167	bne	2b
168
169
170	/* check if current core won */
171	dmb	ish
172	ldr	x3, [x0, #BOOT_PARAM_MPID_OFFSET]
173	cmp	x3, x1
174	beq	primary_core
175	/* fallthrough secondary */
176
177	/* loop until our turn comes */
178secondary_core:
179	dmb	ish
180	ldr	x2, [x0, #BOOT_PARAM_MPID_OFFSET]
181	cmp	x1, x2
182	bne	secondary_core
183
184	/* we can now load our stack pointer value and move on */
185	ldr	x24, [x0, #BOOT_PARAM_SP_OFFSET]
186	ldr	x25, =z_arm64_secondary_prep_c
187	b	boot
188
189primary_core:
190#endif
191	/* load primary stack and entry point */
192	ldr	x24, =(z_interrupt_stacks + __z_interrupt_stack_SIZEOF)
193	ldr	x25, =z_prep_c
194boot:
195	/* Prepare for calling C code */
196	bl	__reset_prep_c
197
198	/*
199	 * Initialize the interrupt stack with 0xaa so stack utilization
200	 * can be measured. This needs to be done before using the stack
201	 * so that we don't clobber any data.
202	 */
203#ifdef CONFIG_INIT_STACKS
204	mov_imm	x0, CONFIG_ISR_STACK_SIZE
205	sub	x0, sp, x0
206	sub     x9, sp, #8
207	mov     x10, 0xaaaaaaaaaaaaaaaa
208stack_init_loop:
209	cmp     x0, x9
210	beq     stack_init_done
211	str     x10, [x0], #8
212	b       stack_init_loop
213stack_init_done:
214#endif
215
216	/* Platform hook for highest EL */
217	bl	z_arm64_el_highest_init
218
219switch_el:
220	switch_el x0, 3f, 2f, 1f
221
2223:
223#if !defined(CONFIG_ARMV8_R)
224	/* EL3 init */
225	bl	z_arm64_el3_init
226
227	/* Get next EL */
228	adr	x0, switch_el
229	bl	z_arm64_el3_get_next_el
230	eret
231#endif /* CONFIG_ARMV8_R */
232
2332:
234	/* EL2 init */
235	bl	z_arm64_el2_init
236
237	/* Move to EL1 with all exceptions masked */
238	mov_imm	x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1T)
239	msr	spsr_el2, x0
240
241	adr	x0, 1f
242	msr	elr_el2, x0
243	eret
244
2451:
246	/* EL1 init */
247	bl	z_arm64_el1_init
248
249	/* We want to use SP_ELx from now on */
250	msr	SPSel, #1
251
252	/* Enable SError interrupts */
253	msr	DAIFClr, #(DAIFCLR_ABT_BIT)
254	isb
255
256	ret	x25  /* either z_prep_c or z_arm64_secondary_prep_c */
257