1/*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 * Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
4 * Copyright 2024 Arm Limited and/or its affiliates <open-source-office@arm.com>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9/**
10 * @file
11 * @brief Reset handler
12 *
13 * Reset handler that prepares the system for running C code.
14 */
15
16#include <zephyr/toolchain.h>
17#include <zephyr/linker/sections.h>
18#include <zephyr/arch/cpu.h>
19#include <offsets_short.h>
20#include <cortex_a_r/tcm.h>
21#include "vector_table.h"
22#include "boot.h"
23#include "macro_priv.inc"
24
25_ASM_FILE_PROLOGUE
26
27GTEXT(z_arm_reset)
28GDATA(z_interrupt_stacks)
29GDATA(z_arm_svc_stack)
30GDATA(z_arm_sys_stack)
31GDATA(z_arm_fiq_stack)
32GDATA(z_arm_abort_stack)
33GDATA(z_arm_undef_stack)
34#if defined(CONFIG_SOC_RESET_HOOK)
35GTEXT(soc_reset_hook)
36#endif
37
38/**
39 *
40 * @brief Reset vector
41 *
42 * Ran when the system comes out of reset. The processor is in Supervisor mode
43 * and interrupts are disabled. The processor architectural registers are in
44 * an indeterminate state.
45 *
46 * When these steps are completed, jump to z_prep_c(), which will finish
47 * setting up the system for running C code.
48 *
49 */
50SECTION_SUBSEC_FUNC(TEXT, _reset_section, z_arm_reset)
51SECTION_SUBSEC_FUNC(TEXT, _reset_section, __start)
52#if defined(CONFIG_AARCH32_ARMV8_R)
53    /* Check if we are starting in HYP mode */
54    mrs r0, cpsr
55    and r0, r0, #MODE_MASK
56    cmp r0, #MODE_HYP
57    bne EL1_Reset_Handler
58
59    /*
60     * The HSCTLR register provides top-level control of system operation in Hyp mode.
61     * Since the OS is not running in Hyp mode, and considering the Armv8-R AArch32
62     * architecture profile, there's no need to modify HSCTLR configuration unless
63     * Fast Interrupts need to be enabled.
64     */
65
66    /* Init HACTLR: Enable EL1 access to all IMP DEF registers */
67    ldr r0, =HACTLR_INIT
68    mcr p15, 4, r0, c1, c0, 1
69
70   /* Go to SVC mode */
71    mrs r0, cpsr
72    bic r0, #MODE_MASK
73    orr r0, #MODE_SVC
74    msr spsr_cxsf, r0
75
76    ldr r0, =EL1_Reset_Handler
77    msr elr_hyp, r0
78    dsb
79    isb
80    eret
81
82EL1_Reset_Handler:
83
84#endif
85
86#if defined(CONFIG_DCLS)
87    /*
88     * Initialise CPU registers to a defined state if the processor is
89     * configured as Dual-redundant Core Lock-step (DCLS). This is required
90     * for state convergence of the two parallel executing cores.
91     */
92
93    /* Common and SVC mode registers */
94    mov r0,  #0
95    mov r1,  #0
96    mov r2,  #0
97    mov r3,  #0
98    mov r4,  #0
99    mov r5,  #0
100    mov r6,  #0
101    mov r7,  #0
102    mov r8,  #0
103    mov r9,  #0
104    mov r10, #0
105    mov r11, #0
106    mov r12, #0
107    mov r13, #0         /* r13_svc */
108    mov r14, #0         /* r14_svc */
109    mrs r0,  cpsr
110    msr spsr_cxsf, r0   /* spsr_svc */
111
112    /* FIQ mode registers */
113    cps #MODE_FIQ
114    mov r8,  #0         /* r8_fiq */
115    mov r9,  #0         /* r9_fiq */
116    mov r10, #0         /* r10_fiq */
117    mov r11, #0         /* r11_fiq */
118    mov r12, #0         /* r12_fiq */
119    mov r13, #0         /* r13_fiq */
120    mov r14, #0         /* r14_fiq */
121    mrs r0,  cpsr
122    msr spsr_cxsf, r0   /* spsr_fiq */
123
124    /* IRQ mode registers */
125    cps #MODE_IRQ
126    mov r13, #0         /* r13_irq */
127    mov r14, #0         /* r14_irq */
128    mrs r0,  cpsr
129    msr spsr_cxsf, r0   /* spsr_irq */
130
131    /* ABT mode registers */
132    cps #MODE_ABT
133    mov r13, #0         /* r13_abt */
134    mov r14, #0         /* r14_abt */
135    mrs r0,  cpsr
136    msr spsr_cxsf, r0   /* spsr_abt */
137
138    /* UND mode registers */
139    cps #MODE_UND
140    mov r13, #0         /* r13_und */
141    mov r14, #0         /* r14_und */
142    mrs r0,  cpsr
143    msr spsr_cxsf, r0   /* spsr_und */
144
145    /* SYS mode registers */
146    cps #MODE_SYS
147    mov r13, #0         /* r13_sys */
148    mov r14, #0         /* r14_sys */
149
150#if defined(CONFIG_FPU) && defined(CONFIG_CPU_HAS_VFP)
151    /*
152     * Initialise FPU registers to a defined state.
153     */
154
155    /* Allow VFP coprocessor access */
156    mrc p15, 0, r0, c1, c0, 2
157    orr r0, r0, #(CPACR_CP10(CPACR_FA) | CPACR_CP11(CPACR_FA))
158    mcr p15, 0, r0, c1, c0, 2
159
160    /* Enable VFP */
161    mov  r0, #FPEXC_EN
162    vmsr fpexc, r0
163
164    /* Initialise VFP registers */
165    fmdrr d0,  r1, r1
166    fmdrr d1,  r1, r1
167    fmdrr d2,  r1, r1
168    fmdrr d3,  r1, r1
169    fmdrr d4,  r1, r1
170    fmdrr d5,  r1, r1
171    fmdrr d6,  r1, r1
172    fmdrr d7,  r1, r1
173    fmdrr d8,  r1, r1
174    fmdrr d9,  r1, r1
175    fmdrr d10, r1, r1
176    fmdrr d11, r1, r1
177    fmdrr d12, r1, r1
178    fmdrr d13, r1, r1
179    fmdrr d14, r1, r1
180    fmdrr d15, r1, r1
181#if defined(CONFIG_VFP_FEATURE_REGS_S64_D32)
182    fmdrr d16, r1, r1
183    fmdrr d17, r1, r1
184    fmdrr d18, r1, r1
185    fmdrr d19, r1, r1
186    fmdrr d20, r1, r1
187    fmdrr d21, r1, r1
188    fmdrr d22, r1, r1
189    fmdrr d23, r1, r1
190    fmdrr d24, r1, r1
191    fmdrr d25, r1, r1
192    fmdrr d26, r1, r1
193    fmdrr d27, r1, r1
194    fmdrr d28, r1, r1
195    fmdrr d29, r1, r1
196    fmdrr d30, r1, r1
197    fmdrr d31, r1, r1
198#endif /* CONFIG_VFP_FEATURE_REGS_S64_D32 */
199
200    vmsr fpscr, r1
201    vmsr fpexc, r1
202#endif /* CONFIG_FPU && CONFIG_CPU_HAS_VFP */
203
204#endif /* CONFIG_DCLS */
205
206#if defined(CONFIG_CPU_CORTEX_R52_CACHE_SEGREGATION)
207    ldr r0, =IMP_CSCTLR(CONFIG_CPU_CORTEX_R52_ICACHE_FLASH_WAY,
208                        CONFIG_CPU_CORTEX_R52_DCACHE_FLASH_WAY)
209    mcr p15, 1, r0, c9, c1, 0
210#endif
211
212    ldr r0, =arm_cpu_boot_params
213
214#if CONFIG_MP_MAX_NUM_CPUS > 1
215    /*
216     * This code uses voting locks, like arch/arm64/core/reset.S, to determine primary CPU.
217     */
218
219    /*
220     * Get the "logic" id defined by cpu_node_list statically for voting lock self-identify.
221     * It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id)
222     */
223    get_cpu_logic_id r1, r2, r3, r4 // r1: MPID, r2: logic id
224
225    add r4, r0, #BOOT_PARAM_VOTING_OFFSET
226
227    /* signal our desire to vote */
228    mov r5, #1
229    strb r5, [r4, r2]
230    ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET]
231    cmn r3, #1
232    beq 1f
233
234    /* some core already won, release */
235    mov r7, #0
236    strb r7, [r4, r2]
237    b _secondary_core
238
239    /* suggest current core then release */
2401:  str r1, [r0, #BOOT_PARAM_MPID_OFFSET]
241    strb r7, [r4, r2]
242    dmb
243
244    /* then wait until every core else is done voting */
245    mov r5, #0
2462:  ldrb r3, [r4, r5]
247    tst r3, #255
248    /* wait */
249    bne 2b
250    add r5, r5, #1
251    cmp r5, #CONFIG_MP_MAX_NUM_CPUS
252    bne 2b
253
254    /* check if current core won */
255    dmb
256    ldr r3, [r0, #BOOT_PARAM_MPID_OFFSET]
257    cmp r3, r1
258    beq _primary_core
259    /* fallthrough secondary */
260
261    /* loop until our turn comes */
262_secondary_core:
263    dmb
264    ldr r2, [r0, #BOOT_PARAM_MPID_OFFSET]
265    cmp r1, r2
266    bne _secondary_core
267
268    /* we can now load our stack pointer values and move on */
269    ldr r4, =arch_secondary_cpu_init
270    ldr r5, [r0, #BOOT_PARAM_FIQ_SP_OFFSET]
271    ldr r6, [r0, #BOOT_PARAM_IRQ_SP_OFFSET]
272    ldr r7, [r0, #BOOT_PARAM_ABT_SP_OFFSET]
273    ldr r8, [r0, #BOOT_PARAM_UDF_SP_OFFSET]
274    ldr r9, [r0, #BOOT_PARAM_SVC_SP_OFFSET]
275    ldr r10, [r0, #BOOT_PARAM_SYS_SP_OFFSET]
276    b 2f
277
278_primary_core:
279#endif
280
281    ldr r4, =z_prep_c
282    ldr r5, =(z_arm_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE)
283    ldr r6, =(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE)
284    ldr r7, =(z_arm_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE)
285    ldr r8, =(z_arm_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE)
286    ldr r9, =(z_arm_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE)
287    ldr r10, =(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE)
288
2892:
290    /*
291     * Configure stack.
292     */
293
294    /* FIQ mode stack */
295    msr CPSR_c, #(MODE_FIQ | I_BIT | F_BIT)
296    mov sp, r5
297
298    /* IRQ mode stack */
299    msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT)
300    mov sp, r6
301
302    /* ABT mode stack */
303    msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT)
304    mov sp, r7
305
306    /* UND mode stack */
307    msr CPSR_c, #(MODE_UND | I_BIT | F_BIT)
308    mov sp, r8
309
310    /* SVC mode stack */
311    msr CPSR_c, #(MODE_SVC | I_BIT | F_BIT)
312    mov sp, r9
313
314    /* SYS mode stack */
315    msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT)
316    mov sp, r10
317
318#if defined(CONFIG_SOC_RESET_HOOK)
319    /* Execute platform-specific initialisation if applicable */
320    bl soc_reset_hook
321#endif
322
323#if defined(CONFIG_WDOG_INIT)
324    /* board-specific watchdog initialization is necessary */
325    bl z_arm_watchdog_init
326#endif
327
328#if defined(CONFIG_DISABLE_TCM_ECC)
329    bl z_arm_tcm_disable_ecc
330#endif
331
332    bl z_arm_relocate_vector_table
333
334    bx r4
335