1/*
2 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <common/bl_common.h>
11#include <el3_common_macros.S>
12#include <lib/pmf/aarch64/pmf_asm_macros.S>
13#include <lib/runtime_instr.h>
14#include <lib/xlat_tables/xlat_mmu_helpers.h>
15
16	.globl	bl31_entrypoint
17	.globl	bl31_warm_entrypoint
18
19	/* -----------------------------------------------------
20	 * bl31_entrypoint() is the cold boot entrypoint,
21	 * executed only by the primary cpu.
22	 * -----------------------------------------------------
23	 */
24
25func bl31_entrypoint
26	/* ---------------------------------------------------------------
27	 * Stash the previous bootloader arguments x0 - x3 for later use.
28	 * ---------------------------------------------------------------
29	 */
30	mov	x20, x0
31	mov	x21, x1
32	mov	x22, x2
33	mov	x23, x3
34
35#if !RESET_TO_BL31
36	/* ---------------------------------------------------------------------
37	 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
38	 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
39	 * and primary/secondary CPU logic should not be executed in this case.
40	 *
41	 * Also, assume that the previous bootloader has already initialised the
42	 * SCTLR_EL3, including the endianness, and has initialised the memory.
43	 * ---------------------------------------------------------------------
44	 */
45	el3_entrypoint_common					\
46		_init_sctlr=0					\
47		_warm_boot_mailbox=0				\
48		_secondary_cold_boot=0				\
49		_init_memory=0					\
50		_init_c_runtime=1				\
51		_exception_vectors=runtime_exceptions		\
52		_pie_fixup_size=BL31_LIMIT - BL31_BASE
53#else
54
55	/* ---------------------------------------------------------------------
56	 * For RESET_TO_BL31 systems which have a programmable reset address,
57	 * bl31_entrypoint() is executed only on the cold boot path so we can
58	 * skip the warm boot mailbox mechanism.
59	 * ---------------------------------------------------------------------
60	 */
61	el3_entrypoint_common					\
62		_init_sctlr=1					\
63		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
64		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
65		_init_memory=1					\
66		_init_c_runtime=1				\
67		_exception_vectors=runtime_exceptions		\
68		_pie_fixup_size=BL31_LIMIT - BL31_BASE
69
70#if !RESET_TO_BL31_WITH_PARAMS
71	/* ---------------------------------------------------------------------
72	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
73	 * there's no argument to relay from a previous bootloader. Zero the
74	 * arguments passed to the platform layer to reflect that.
75	 * ---------------------------------------------------------------------
76	 */
77	mov	x20, 0
78	mov	x21, 0
79	mov	x22, 0
80	mov	x23, 0
81#endif /* RESET_TO_BL31_WITH_PARAMS */
82#endif /* RESET_TO_BL31 */
83
84	/* --------------------------------------------------------------------
85	 * Perform BL31 setup
86	 * --------------------------------------------------------------------
87	 */
88	mov	x0, x20
89	mov	x1, x21
90	mov	x2, x22
91	mov	x3, x23
92	bl	bl31_setup
93
94#if ENABLE_PAUTH
95	/* --------------------------------------------------------------------
96	 * Program APIAKey_EL1 and enable pointer authentication
97	 * --------------------------------------------------------------------
98	 */
99	bl	pauth_init_enable_el3
100#endif /* ENABLE_PAUTH */
101
102	/* --------------------------------------------------------------------
103	 * Jump to main function
104	 * --------------------------------------------------------------------
105	 */
106	bl	bl31_main
107
108	/* --------------------------------------------------------------------
109	 * Clean the .data & .bss sections to main memory. This ensures
110	 * that any global data which was initialised by the primary CPU
111	 * is visible to secondary CPUs before they enable their data
112	 * caches and participate in coherency.
113	 * --------------------------------------------------------------------
114	 */
115	adrp	x0, __DATA_START__
116	add	x0, x0, :lo12:__DATA_START__
117	adrp	x1, __DATA_END__
118	add	x1, x1, :lo12:__DATA_END__
119	sub	x1, x1, x0
120	bl	clean_dcache_range
121
122	adrp	x0, __BSS_START__
123	add	x0, x0, :lo12:__BSS_START__
124	adrp	x1, __BSS_END__
125	add	x1, x1, :lo12:__BSS_END__
126	sub	x1, x1, x0
127	bl	clean_dcache_range
128
129	b	el3_exit
130endfunc bl31_entrypoint
131
132	/* --------------------------------------------------------------------
133	 * This CPU has been physically powered up. It is either resuming from
134	 * suspend or has simply been turned on. In both cases, call the BL31
135	 * warmboot entrypoint
136	 * --------------------------------------------------------------------
137	 */
138func bl31_warm_entrypoint
139#if ENABLE_RUNTIME_INSTRUMENTATION
140
141	/*
142	 * This timestamp update happens with cache off.  The next
143	 * timestamp collection will need to do cache maintenance prior
144	 * to timestamp update.
145	 */
146	pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
147	mrs	x1, cntpct_el0
148	str	x1, [x0]
149#endif
150
151	/*
152	 * On the warm boot path, most of the EL3 initialisations performed by
153	 * 'el3_entrypoint_common' must be skipped:
154	 *
155	 *  - Only when the platform bypasses the BL1/BL31 entrypoint by
156	 *    programming the reset address do we need to initialise SCTLR_EL3.
157	 *    In other cases, we assume this has been taken care by the
158	 *    entrypoint code.
159	 *
160	 *  - No need to determine the type of boot, we know it is a warm boot.
161	 *
162	 *  - Do not try to distinguish between primary and secondary CPUs, this
163	 *    notion only exists for a cold boot.
164	 *
165	 *  - No need to initialise the memory or the C runtime environment,
166	 *    it has been done once and for all on the cold boot path.
167	 */
168	el3_entrypoint_common					\
169		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
170		_warm_boot_mailbox=0				\
171		_secondary_cold_boot=0				\
172		_init_memory=0					\
173		_init_c_runtime=0				\
174		_exception_vectors=runtime_exceptions		\
175		_pie_fixup_size=0
176
177	/*
178	 * We're about to enable MMU and participate in PSCI state coordination.
179	 *
180	 * The PSCI implementation invokes platform routines that enable CPUs to
181	 * participate in coherency. On a system where CPUs are not
182	 * cache-coherent without appropriate platform specific programming,
183	 * having caches enabled until such time might lead to coherency issues
184	 * (resulting from stale data getting speculatively fetched, among
185	 * others). Therefore we keep data caches disabled even after enabling
186	 * the MMU for such platforms.
187	 *
188	 * On systems with hardware-assisted coherency, or on single cluster
189	 * platforms, such platform specific programming is not required to
190	 * enter coherency (as CPUs already are); and there's no reason to have
191	 * caches disabled either.
192	 */
193#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
194	mov	x0, xzr
195#else
196	mov	x0, #DISABLE_DCACHE
197#endif
198	bl	bl31_plat_enable_mmu
199
200#if ENABLE_RME
201	/*
202	 * At warm boot GPT data structures have already been initialized in RAM
203	 * but the sysregs for this CPU need to be initialized. Note that the GPT
204	 * accesses are controlled attributes in GPCCR and do not depend on the
205	 * SCR_EL3.C bit.
206	 */
207	bl	gpt_enable
208	cbz	x0, 1f
209	no_ret plat_panic_handler
2101:
211#endif
212
213#if ENABLE_PAUTH
214	/* --------------------------------------------------------------------
215	 * Program APIAKey_EL1 and enable pointer authentication
216	 * --------------------------------------------------------------------
217	 */
218	bl	pauth_init_enable_el3
219#endif /* ENABLE_PAUTH */
220
221	bl	psci_warmboot_entrypoint
222
223#if ENABLE_RUNTIME_INSTRUMENTATION
224	pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
225	mov	x19, x0
226
227	/*
228	 * Invalidate before updating timestamp to ensure previous timestamp
229	 * updates on the same cache line with caches disabled are properly
230	 * seen by the same core. Without the cache invalidate, the core might
231	 * write into a stale cache line.
232	 */
233	mov	x1, #PMF_TS_SIZE
234	mov	x20, x30
235	bl	inv_dcache_range
236	mov	x30, x20
237
238	mrs	x0, cntpct_el0
239	str	x0, [x19]
240#endif
241	b	el3_exit
242endfunc bl31_warm_entrypoint
243