1/* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019 Intel Corporation. All rights reserved.
4 */
5
6
7#include <sof/bit.h>
8#include <sof/common.h>
9#include <sof/drivers/interrupt.h>
10#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM
11#include <sof/lib/memory.h>
12#include <sof/lib/shim.h>
13#endif
14#include <xtensa/cacheattrasm.h>
15#include <xtensa/cacheasm.h>
16#include <xtensa/coreasm.h>
17#include <xtensa/config/system.h>
18#include <xtensa/xdm-regs.h>
19#include <xtensa/xtensa-xer.h>
20#include <xtensa/xtruntime-core-state.h>
21
22#include "xtos-internal.h"
23
24.type   secondary_core_init, @function
25.type   shared_vecbase_ptr, @object
26
27.section        .AlternateResetVector.text, "ax"
28
29	/* Alignemt and padding for 4 dwords is in order to meet
30	 * lpsram header requiremetns  */
31_LpsramHeader:
32	j    _AltResetVector
33	.align 4
34	/* Magic value. */
35	.long 0
36	/* Lp-restore vector address. */
37	.long 0
38	/* Reserved. */
39	.long 0
40
41	.size    _LpsramHeader, . - _LpsramHeader
42
43.align 4
44_AltResetVectorLiterals:
45	.literal_position
46
47	.size    _AltResetVectorLiterals, . - _AltResetVectorLiterals
48
49.align 4
50.global _AltResetVector
51
52.align 4
53_AltResetVector:
54	movi a0, 0
55	movi a1, 1
56	wsr  a1, WINDOWSTART
57	wsr  a0, WINDOWBASE
58	rsync
59
60	get_prid a5
61	movi a6, PLATFORM_PRIMARY_CORE_ID
62	bne a5, a6, secondary_init //core0 handle restore, core1 always starts from scratch
63	/* Currently no implementation for core 0:
64	   - shutoff restore should be put either here on in L1SRAM */
65	j alt_boot_error_loop
66
67secondary_init:
68	/* Block interrupt but not enable WOE */
69	movi a3, PS_UM | PS_INTLEVEL(5)
70	wsr a3, PS
71	rsync
72
73	movi a2, 0
74	call0 _l1_cache_init
75	movi a0, 0
76
77alt_boot_secondary_core_idc_receiver:
78	get_prid a5
79	movi a3, IPC_DSP_BASE(0)
80	movi a4, IPC_DSP_SIZE
81	mull a4, a4, a5
82	add a3, a3, a4
83
84	l32i a2, a3, 0
85	bbsi a2, 31, alt_boot_validate_idc_msg
86
87alt_boot_secondary_core_enter_waiti:
88
89	/* Setup int vector to be local */
90	movi a2, _LpsramHeader
91	wsr a2, vecbase
92
93	get_prid a5
94	// Get core interrupt reg
95	// a5 should still contain processor id
96	movi a3, IRQ_CPU_OFFSET
97	mull a3, a3, a5
98	movi a2, IRQ_BASE
99	add  a3, a2, a3
100
101	movi a2, 0xffffffff & ~(BIT(IRQ_BIT_LVL2_IDC))
102	s32i a2, a3, 0
103	movi a2, 0xffffffff
104	s32i a2, a3, 0x10
105	movi a2, 0xffffffff
106	s32i a2, a3, 0x20
107	movi a2, 0xffffffff
108	s32i a2, a3, 0x30
109	memw // at this point use l32i to read
110	//Unmask Tensilica L2 interrupt
111	movi a2, IRQ_MASK_EXT_LEVEL2
112
113	/* Enable L2 level trigger (external) interrupt */
114	movi a2, BIT(6)
115	wsr a2, INTENABLE
116
117	movi a2, 128
118
119alt_boot_secondary_wait_for_waiti:
120	addi a2, a2, -1
121	nop
122	bnez a2, alt_boot_secondary_wait_for_waiti
123
124alt_boot_secondary_enter_waiti:
125	isync
126	extw
127	waiti 1
128	j alt_boot_secondary_core_idc_receiver
129
130alt_boot_validate_idc_msg:
131	/*  Core wake version: bits 0-8 (9 - bits) - core wake version must be 0x2. */
132	l32i a2, a3, 0
133	movi a4, 0x1FF
134	and a2, a2, a4
135	bnei a2, 0x2, alt_boot_handle_incorrect_idc
136	/* ROM Control Version bits 24-28 (5 bits) - rom control version must be 0x1. */
137	l32i a2, a3, 0
138	srli a2, a2, 24
139	movi a4, 0x1F
140	and a2, a2, a4
141	bnei a2, 0x1, alt_boot_handle_incorrect_idc
142
143	.macro	init_vector	level
144	  .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level)
145	    .if XCHAL_DEBUGLEVEL-\level
146	      .weak   _Level&level&FromVector
147	      movi    a4, _Level&level&FromVector
148	      writesr excsave \level a4
149	      .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL)
150		movi    a5, _Pri_&level&_HandlerAddress
151		s32i    a4, a5, 0
152		/*  If user provides their own handler, that handler might
153		 *  not provide its own _Pri_<n>_HandlerAddress variable for
154		 *  linking handlers.  In that case, the reference below
155		 *  would pull in the XTOS handler anyway, causing a conflict.
156		 *  To avoid that, provide a weak version of it here:
157		 */
158		.pushsection .data, "aw"
159		.global  _Pri_&level&_HandlerAddress
160		.weak   _Pri_&level&_HandlerAddress
161		.align	4
162		_Pri_&level&_HandlerAddress: .space 4
163		.popsection
164	      .endif
165	    .endif
166	  .endif
167	.endm
168
169
170lp_reset_setup_vecbase:
171	movi a2, shared_vecbase_ptr
172	l32i a2, a2, 0
173	beqz a2, alt_boot_error_loop
174	/*  Apply alternate vector base given from ldscripts. */
175	wsr a2, vecbase
176
177	init_vector    2
178	init_vector    3
179	init_vector    4
180	init_vector    5
181
182alt_boot_secondary_core_wakeup:
183	movi a2, 0
184	wsr a2, INTENABLE
185	rsync
186
187	// Compute address to jump
188	l32i a2, a3, 4
189	slli a2, a2, 2
190	// Clear busy bit
191	l32i a4, a3, 0
192	s32i a4, a3, 0
193	memw
194
195	jx a2
196alt_boot_handle_incorrect_idc:
197	// Clear BUSY
198	l32i a2, a3, 0
199	s32i a2, a3, 0
200	memw
201	// HW limitation read the register
202	l32i a4, a3, 0
203	j alt_boot_secondary_core_idc_receiver
204
205alt_boot_error_loop:
206	// TODO: consider some kind of status reporting here.
207	j alt_boot_error_loop
208
209	.size _AltResetVector, . - _AltResetVector
210
211.section        .AlternateResetL2IntVector.text, "ax"
212
213/* Note: at this moment it is essential that this is linked on
214 * _LpsramHeader + 0x180 */
215.align 4
216.global _AltResetL2IntHandler
217_AltResetL2IntHandler:
218	xsr a2, excsave2
219	xor a2, a2, a2
220	wsr a2, intenable
221	xsr a2, excsave2
222	rfi 2
223	.size _AltResetL2IntHandler, . - _AltResetL2IntHandler
224
225.section        .LpsramCode.text, "ax"
226
227.literal_position
228
229.global    _l1_cache_init
230.align 4
231
232_l1_cache_init:
233	mov a9, a2
234	movi a3, CxL1CCFG
235	bnez a9, l1_cache_enable_one_way
236
237l1_cache_way_enable_all:
238	movi a2, L1_CACHE_ALL_WAY_ENABLED_MASK
239	movi a4, L1_CACHE_ALL_WAY_ACTIVE_MASK
240	j l1_cache_enable_write
241
242l1_cache_enable_one_way:
243	movi a2, L1_CACHE_ONE_WAY_ENABLED_MASK
244	movi a4, L1_CACHE_ONE_WAY_ACTIVE_MASK
245
246l1_cache_enable_write:
247	s32i a2, a3, 0
248
249l1_cache_wait_for_way_enable_loop:
250	l32i a2, a3, 0
251	and a2, a2, a4
252	bne a2, a4, l1_cache_wait_for_way_enable_loop
253
254#if XCHAL_HAVE_PREFETCH
255l1_cache_pref_ebb_enble:
256	movi a3, CxL1PCFG
257	movi a2, L1_CACHE_PREFETCHER_ENABLED
258	movi a4, L1_CACHE_PREFETCHER_ACTIVE
259	s32i a2, a3, 0
260
261l1_cache_wait_for_prefetcher:
262	l32i a2, a3, 0
263	and a2, a2, a4
264	bne a4, a2, l1_cache_wait_for_prefetcher
265#endif
266
267l1_cache_inv_unlock:
268#if ! XCHAL_HAVE_ICACHE_DYN_WAYS
269	icache_reset    a2, a3
270#endif
271
272#if ! XCHAL_HAVE_DCACHE_DYN_WAYS
273	dcache_reset    a2, a3
274#endif
275
276l1_cache_set_prefctl:
277#if XCHAL_HAVE_PREFETCH
278	/* Enable cache prefetch if present.  */
279	movi a2, L1_CACHE_PREFCTL_VALUE
280	wsr a2, PREFCTL
281#endif
282
283l1_cache_setup_memprotection:
284	movi    a2, _memmap_cacheattr_reset
285	/* NOTE: CLOBBERS a2 - a8 !!! */
286	cacheattr_set
287#if XCHAL_USE_MEMCTL
288	bnez a9, l1_cache_init_program_memctl_one_way
289
290l1_cache_init_program_memctl_all_ways:
291	movi a3, ((~MEMCTL_SNOOP_EN))
292	j l1_cache_init_program_memctl
293
294l1_cache_init_program_memctl_one_way:
295	movi a3, ((1 << 8) | (1 << 13) | (1 << 18) | MEMCTL_L0IBUF_EN | 1 << 23)
296
297l1_cache_init_program_memctl:
298	wsr a3, MEMCTL
299	rsync
300	/*  Enable zero-overhead loop instr buffer, and snoop responses, if configured.  */
301	movi    a3, (MEMCTL_SNOOP_EN | MEMCTL_L0IBUF_EN)
302	rsr a2, MEMCTL
303	or  a2, a2, a3
304	wsr a2, MEMCTL
305	rsync
306#endif
307	ret
308
309	.size _l1_cache_init, . - _l1_cache_init
310