1/*
2 * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <cortex_a72.h>
10#include <cpu_macros.S>
11#include <platform_def.h>
12
13#define K3_BOOT_REASON_COLD_RESET 0x1
14
15	/* ------------------------------------------------------------------
16	 *  uintptr_t plat_get_my_entrypoint(void)
17	 * ------------------------------------------------------------------
18	 *
19	 * This function is called with the called with the MMU and caches
20	 * disabled (SCTLR_EL3.M = 0 and SCTLR_EL3.C = 0). The function is
21	 * responsible for distinguishing between a warm and cold reset for the
22	 * current CPU using platform-specific means. If it's a warm reset,
23	 * then it returns the warm reset entrypoint point provided to
24	 * plat_setup_psci_ops() during BL31 initialization. If it's a cold
25	 * reset then this function must return zero.
26	 *
27	 * This function does not follow the Procedure Call Standard used by
28	 * the Application Binary Interface for the ARM 64-bit architecture.
29	 * The caller should not assume that callee saved registers are
30	 * preserved across a call to this function.
31	 */
32	.globl	plat_get_my_entrypoint
33func plat_get_my_entrypoint
34	ldr x0, k3_boot_reason_data_store
35	cmp  x0, #K3_BOOT_REASON_COLD_RESET
36
37	/* We ONLY support cold boot at this point */
38	bne plat_unsupported_boot
39	mov	x0, #0
40	ret
41
42	/*
43	 * We self manage our boot reason.
44	 * At load time, we have just a default reason - which is cold reset
45	 */
46k3_boot_reason_data_store:
47	.word	K3_BOOT_REASON_COLD_RESET
48
49plat_unsupported_boot:
50	b plat_unsupported_boot
51
52endfunc plat_get_my_entrypoint
53
54	/* ------------------------------------------------------------------
55	 * unsigned int plat_my_core_pos(void)
56	 * ------------------------------------------------------------------
57	 *
58	 * This function returns the index of the calling CPU which is used as a
59	 * CPU-specific linear index into blocks of memory (for example while
60	 * allocating per-CPU stacks). This function will be invoked very early
61	 * in the initialization sequence which mandates that this function
62	 * should be implemented in assembly and should not rely on the
63	 * avalability of a C runtime environment. This function can clobber x0
64	 * - x8 and must preserve x9 - x29.
65	 *
66	 * This function plays a crucial role in the power domain topology
67	 * framework in PSCI and details of this can be found in Power Domain
68	 * Topology Design.
69	 */
70	.globl plat_my_core_pos
71func plat_my_core_pos
72	mrs	x0, MPIDR_EL1
73
74	and	x1, x0, #MPIDR_CLUSTER_MASK
75	lsr	x1, x1, #MPIDR_AFF1_SHIFT
76	and	x0, x0, #MPIDR_CPU_MASK
77
78	cmp	x1, 0
79	b.eq out
80	add	x0, x0, #K3_CLUSTER0_CORE_COUNT
81
82	cmp	x1, 1
83	b.eq out
84	add	x0, x0, #K3_CLUSTER1_CORE_COUNT
85
86	cmp	x1, 2
87	b.eq out
88	add	x0, x0, #K3_CLUSTER2_CORE_COUNT
89
90out:
91	ret
92endfunc plat_my_core_pos
93
94	/* --------------------------------------------------------------------
95	 * This handler does the following:
96	 * - Set the L2 Data RAM latency to 2 (i.e. 3 cycles) for Cortex-A72
97	 * --------------------------------------------------------------------
98	 */
99	.globl plat_reset_handler
100func plat_reset_handler
101	/* Only on Cortex-A72 */
102	jump_if_cpu_midr CORTEX_A72_MIDR, a72
103	ret
104
105	/* Cortex-A72 specific settings */
106a72:
107	mrs x0, CORTEX_A72_L2CTLR_EL1
108#if K3_DATA_RAM_4_LATENCY
109	/* Set L2 cache data RAM latency to 4 cycles */
110	orr x0, x0, #(CORTEX_A72_L2_DATA_RAM_LATENCY_4_CYCLES << \
111			CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT)
112#else
113	/* Set L2 cache data RAM latency to 3 cycles */
114	orr x0, x0, #(CORTEX_A72_L2_DATA_RAM_LATENCY_3_CYCLES << \
115			CORTEX_A72_L2CTLR_DATA_RAM_LATENCY_SHIFT)
116#endif
117	/* Enable L2 ECC and parity with inline data */
118	orr x0, x0, #CORTEX_A72_L2CTLR_EL1_ECC_AND_PARITY_ENABLE
119	orr x0, x0, #CORTEX_A72_L2CTLR_EL1_DATA_INLINE_ECC_ENABLE
120	msr CORTEX_A72_L2CTLR_EL1, x0
121
122	mrs x0, CORTEX_A72_L2ACTLR_EL1
123	/* Enable L2 UniqueClean evictions with data */
124	orr x0, x0, #CORTEX_A72_L2ACTLR_ENABLE_UNIQUE_CLEAN
125	msr CORTEX_A72_L2ACTLR_EL1, x0
126
127#if K3_EXCLUSIVE_SNOOP_DELAY
128	mrs	x0, CORTEX_A72_CPUACTLR_EL1
129	/* Set Snoop-delayed exclusive handling */
130	orr	x0, x0, #CORTEX_A72_CPUACTLR_EL1_DELAY_EXCLUSIVE_SNOOP
131	msr	CORTEX_A72_CPUACTLR_EL1, x0
132#endif
133
134	isb
135	ret
136endfunc plat_reset_handler
137
138	/* ---------------------------------------------
139	 * int plat_crash_console_init(void)
140	 * Function to initialize the crash console
141	 * without a C Runtime to print crash report.
142	 * Clobber list : x0 - x4
143	 * ---------------------------------------------
144	 */
145	.globl plat_crash_console_init
146func plat_crash_console_init
147	mov_imm	x0, CRASH_CONSOLE_BASE
148	mov_imm	x1, CRASH_CONSOLE_CLK
149	mov_imm	x2, CRASH_CONSOLE_BAUD_RATE
150	mov w3, #0x0
151	b	console_16550_core_init
152endfunc plat_crash_console_init
153
154	/* ---------------------------------------------
155	 * int plat_crash_console_putc(void)
156	 * Function to print a character on the crash
157	 * console without a C Runtime.
158	 * Clobber list : x1, x2
159	 * ---------------------------------------------
160	 */
161	.globl plat_crash_console_putc
162func plat_crash_console_putc
163	mov_imm	x1, CRASH_CONSOLE_BASE
164	b	console_16550_core_putc
165endfunc plat_crash_console_putc
166
167	/* ---------------------------------------------
168	 * void plat_crash_console_flush()
169	 * Function to force a write of all buffered
170	 * data that hasn't been output.
171	 * Out : void.
172	 * Clobber list : x0, x1
173	 * ---------------------------------------------
174	 */
175	.globl plat_crash_console_flush
176func plat_crash_console_flush
177	mov_imm	x0, CRASH_CONSOLE_BASE
178	b	console_16550_core_flush
179endfunc plat_crash_console_flush
180