1/*
2 * Copyright (c) 2019-2023, Arm Limited. All rights reserved.
3 * Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8#include <arch.h>
9#include <asm_macros.S>
10#include <common/bl_common.h>
11#include <cortex_a78_ae.h>
12#include <cpu_macros.S>
13#include <plat_macros.S>
14#include "wa_cve_2022_23960_bhb_vector.S"
15
16/* Hardware handled coherency */
17#if HW_ASSISTED_COHERENCY == 0
18#error "cortex_a78_ae must be compiled with HW_ASSISTED_COHERENCY enabled"
19#endif
20
21#if WORKAROUND_CVE_2022_23960
22	wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae
23#endif /* WORKAROUND_CVE_2022_23960 */
24
25workaround_reset_start cortex_a78_ae, ERRATUM(1941500), ERRATA_A78_AE_1941500
26	sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, CORTEX_A78_AE_CPUECTLR_EL1_BIT_8
27workaround_reset_end cortex_a78_ae, ERRATUM(1941500)
28
29check_erratum_ls cortex_a78_ae, ERRATUM(1941500), CPU_REV(0, 1)
30
31workaround_reset_start cortex_a78_ae, ERRATUM(1951502), ERRATA_A78_AE_1951502
32	msr	S3_6_c15_c8_0, xzr
33	ldr	x0, =0x10E3900002
34	msr	S3_6_c15_c8_2, x0
35	ldr	x0, =0x10FFF00083
36	msr	S3_6_c15_c8_3, x0
37	ldr	x0, =0x2001003FF
38	msr	S3_6_c15_c8_1, x0
39
40	mov	x0, #1
41	msr	S3_6_c15_c8_0, x0
42	ldr	x0, =0x10E3800082
43	msr	S3_6_c15_c8_2, x0
44	ldr	x0, =0x10FFF00083
45	msr	S3_6_c15_c8_3, x0
46	ldr	x0, =0x2001003FF
47	msr	S3_6_c15_c8_1, x0
48
49	mov	x0, #2
50	msr	S3_6_c15_c8_0, x0
51	ldr	x0, =0x10E3800200
52	msr	S3_6_c15_c8_2, x0
53	ldr	x0, =0x10FFF003E0
54	msr	S3_6_c15_c8_3, x0
55	ldr	x0, =0x2001003FF
56	msr	S3_6_c15_c8_1, x0
57workaround_reset_end cortex_a78_ae, ERRATUM(1951502)
58
59check_erratum_ls cortex_a78_ae, ERRATUM(1951502), CPU_REV(0, 1)
60
61workaround_reset_start cortex_a78_ae, ERRATUM(2376748), ERRATA_A78_AE_2376748
62	/* -------------------------------------------------------
63	 * Set CPUACTLR2_EL1[0] to 1 to force PLDW/PFRM ST to
64	 * behave like PLD/PRFM LD and not cause invalidations to
65	 * other PE caches. There might be a small performance
66	 * degradation to this workaround for certain workloads
67	 * that share data.
68	 * -------------------------------------------------------
69	 */
70	sysreg_bit_set CORTEX_A78_AE_ACTLR2_EL1, CORTEX_A78_AE_ACTLR2_EL1_BIT_0
71workaround_reset_end cortex_a78_ae, ERRATUM(2376748)
72
73check_erratum_ls cortex_a78_ae, ERRATUM(2376748), CPU_REV(0, 2)
74
75workaround_reset_start cortex_a78_ae, ERRATUM(2395408), ERRATA_A78_AE_2395408
76	/* --------------------------------------------------------
77	 * Disable folding of demand requests into older prefetches
78	 * with L2 miss requests outstanding by setting the
79	 * CPUACTLR2_EL1[40] to 1.
80	 * --------------------------------------------------------
81	 */
82	sysreg_bit_set CORTEX_A78_AE_ACTLR2_EL1, CORTEX_A78_AE_ACTLR2_EL1_BIT_40
83workaround_reset_end cortex_a78_ae, ERRATUM(2395408)
84
85check_erratum_ls cortex_a78_ae, ERRATUM(2395408), CPU_REV(0, 1)
86
87workaround_reset_start cortex_a78_ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
88#if IMAGE_BL31
89	/*
90	 * The Cortex-A78AE generic vectors are overridden to apply errata
91	 * mitigation on exception entry from lower ELs.
92	 */
93	override_vector_table wa_cve_vbar_cortex_a78_ae
94#endif /* IMAGE_BL31 */
95workaround_reset_end cortex_a78_ae, CVE(2022, 23960)
96
97check_erratum_chosen cortex_a78_ae, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
98
99cpu_reset_func_start cortex_a78_ae
100#if ENABLE_FEAT_AMU
101	/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
102	sysreg_bit_clear actlr_el3, CORTEX_A78_ACTLR_TAM_BIT
103
104	/* Make sure accesses from non-secure EL0/EL1 are not trapped to EL2 */
105	sysreg_bit_clear actlr_el2, CORTEX_A78_ACTLR_TAM_BIT
106
107	/* Enable group0 counters */
108	mov	x0, #CORTEX_A78_AMU_GROUP0_MASK
109	msr	CPUAMCNTENSET0_EL0, x0
110
111	/* Enable group1 counters */
112	mov	x0, #CORTEX_A78_AMU_GROUP1_MASK
113	msr	CPUAMCNTENSET1_EL0, x0
114#endif
115cpu_reset_func_end cortex_a78_ae
116
117	/* -------------------------------------------------------
118	 * HW will do the cache maintenance while powering down
119	 * -------------------------------------------------------
120	 */
121func cortex_a78_ae_core_pwr_dwn
122	/* -------------------------------------------------------
123	 * Enable CPU power down bit in power control register
124	 * -------------------------------------------------------
125	 */
126	sysreg_bit_set CORTEX_A78_CPUPWRCTLR_EL1, CORTEX_A78_CPUPWRCTLR_EL1_CORE_PWRDN_EN_BIT
127	isb
128	ret
129endfunc cortex_a78_ae_core_pwr_dwn
130
131errata_report_shim cortex_a78_ae
132
133	/* -------------------------------------------------------
134	 * This function provides cortex_a78_ae specific
135	 * register information for crash reporting.
136	 * It needs to return with x6 pointing to
137	 * a list of register names in ascii and
138	 * x8 - x15 having values of registers to be
139	 * reported.
140	 * -------------------------------------------------------
141	 */
142.section .rodata.cortex_a78_ae_regs, "aS"
143cortex_a78_ae_regs:  /* The ascii list of register names to be reported */
144	.asciz	"cpuectlr_el1", ""
145
146func cortex_a78_ae_cpu_reg_dump
147	adr	x6, cortex_a78_ae_regs
148	mrs	x8, CORTEX_A78_CPUECTLR_EL1
149	ret
150endfunc cortex_a78_ae_cpu_reg_dump
151
152declare_cpu_ops cortex_a78_ae, CORTEX_A78_AE_MIDR, \
153	cortex_a78_ae_reset_func, \
154	cortex_a78_ae_core_pwr_dwn
155