1// reset-vector.S  --  Xtensa Reset Vector
2// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/reset-vector.S#1 $
3
4// Copyright (c) 1999-2013 Tensilica Inc.
5//
6// Permission is hereby granted, free of charge, to any person obtaining
7// a copy of this software and associated documentation files (the
8// "Software"), to deal in the Software without restriction, including
9// without limitation the rights to use, copy, modify, merge, publish,
10// distribute, sublicense, and/or sell copies of the Software, and to
11// permit persons to whom the Software is furnished to do so, subject to
12// the following conditions:
13//
14// The above copyright notice and this permission notice shall be included
15// in all copies or substantial portions of the Software.
16//
17// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
21// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24
25
26#include <sof/common.h>
27#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM
28#include <sof/lib/memory.h>
29#endif
30#include <xtensa/coreasm.h>
31#include <xtensa/corebits.h>
32#include <xtensa/cacheasm.h>
33#include <xtensa/cacheattrasm.h>
34#include <xtensa/xtensa-xer.h>
35#include <xtensa/xdm-regs.h>
36#include <xtensa/config/system.h>	/* for XSHAL_USE_ABSOLUTE_LITERALS only */
37#include <xtensa/xtruntime-core-state.h>
38#include "xtos-internal.h"
39
40// The following reset vector avoids initializing certain registers already
41// initialized by processor reset.  But it does initialize some of them
42// anyway, for minimal support of warm restart (restarting in software by
43// jumping to the reset vector rather than asserting hardware reset).
44
45
46	.begin	literal_prefix	.ResetVector
47	.section		.ResetVector.text, "ax"
48
49	.align	4
50	.global	_ResetVector
51_ResetVector:
52
53#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
54	//  NOTE:
55	//
56	//  IMPORTANT:  If you move the _ResetHandler portion to a section
57	//  other than .ResetVector.text that is outside the range of
58	//  the reset vector's 'j' instruction, the _ResetHandler symbol
59	//  and a more elaborate j/movi/jx sequence are needed in
60	//  .ResetVector.text to dispatch to the new location.
61
62#if CONFIG_XT_HAVE_RESET_VECTOR_ROM
63	j	_ResetHandler
64#else
65	// This is our VM ROM, it simply jumps to the reset handler.
66	j	.sram_jump		// jump over the literals
67
68	.align	4
69	.literal_position	// tells the assembler/linker to place literals here
70
71_reset_sram:
72	.word	_ResetHandler
73	.align	4
74.sram_jump:
75	l32r	a0, _reset_sram	// load SRAM reset handler address
76	jx		a0				// jump to the handler
77#endif
78	.size	_ResetVector, . - _ResetVector
79
80# if XCHAL_HAVE_HALT
81	//  Xtensa TX: reset vector segment is only 4 bytes, so must place the
82	//  unpacker code elsewhere in the memory that contains the reset vector.
83#  if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR
84	.section .iram0.text, "ax"
85#  elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR
86	.section .irom0.text, "ax"
87#  elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR
88	.section .uram0.text, "ax"
89#  else
90#   warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work"
91	.text
92#  endif
93# endif
94
95	.extern	__memctl_default
96
97#if CONFIG_XT_BOOT_LOADER || CONFIG_VM_ROM
98	.section .ResetHandler.text, "ax"
99	j	_ResetHandler
100#endif
101	.align	4
102	.literal_position	// tells the assembler/linker to place literals here
103
104	//  For MPU empty background map -- see XCHAL_HAVE_MPU code further below.
105	//  Cannot put this in .rodata (not unpacked before MPU init).
106# if XCHAL_HAVE_MPU && XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2
107	.global _xtos_mpu_attribs
108	.align 4
109_xtos_mpu_attribs:
110	.word   0x00006000+XCHAL_MPU_ENTRIES-8	// Illegal	(---)
111	.word   0x000F7700+XCHAL_MPU_ENTRIES-8	// Writeback	(rwx Cacheable Non-shareable wb rd-alloc wr-alloc)
112	.word   0x000D5700+XCHAL_MPU_ENTRIES-8	// WBNA		(rwx Cacheable Non-shareable wb rd-alloc)
113	.word   0x000C4700+XCHAL_MPU_ENTRIES-8	// Writethru	(rwx Cacheable Non-shareable wt rd-alloc)
114	.word   0x00006700+XCHAL_MPU_ENTRIES-8	// Bypass	(rwx Device non-interruptible system-shareable)
115# endif
116
117	.align	4
118	.global	_ResetHandler
119_ResetHandler:
120#endif
121
122#if !XCHAL_HAVE_HALT
123
124	/*
125	 *  Even if the processor supports the non-PC-relative L32R option,
126	 *  it will always start up in PC-relative mode.  We take advantage of
127	 *  this, and use PC-relative mode at least until we're sure the .lit4
128	 *  section is in place (which is sometimes only after unpacking).
129	 */
130	.begin	no-absolute-literals
131
132	// If we have dynamic cache way support, init the caches as soon
133	// as we can, which is now. Except, if we are waking up from a
134	// PSO event, then we need to do this slightly later.
135
136#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
137# if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
138	// Do this later on in the code -- see below
139# else
140	movi	a0, __memctl_default
141	wsr.memctl	a0
142# endif
143#endif
144
145	// If we have PSO support, then we must check for a warm start with
146	// caches left powered on. If the caches had been left powered on,
147	// we must restore the state of MEMCTL to the saved state if any.
148	// Note that MEMCTL may not be present depending on config.
149
150#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
151	movi	a2, XDM_MISC_PWRSTAT		// Read PWRSTAT
152	movi	a3, _xtos_pso_savearea		// Save area address - retained for later
153	movi	a5, CORE_STATE_SIGNATURE	// Signature for compare - retained for later
154	rer	a7, a2				// PWRSTAT value - retained for later
155	extui	a4, a7, 1, 2			// Now bottom 2 bits are core wakeup and cache power lost
156	bnei	a4, 1, .Lcold_start		// a4==1 means PSO wakeup, caches did not lose power
157	l32i	a4, a3, CS_SA_signature		// Load save area signature field
158	sub	a4, a4, a5
159	bnez	a4, .Lcold_start		// If signature mismatch then do cold start
160#if XCHAL_USE_MEMCTL
161	l32i	a4, a3, CS_SA_memctl		// Load saved MEMCTL value
162	movi	a0, ~MEMCTL_INV_EN
163	and	a0, a4, a0			// Clear invalidate bit
164	wsr.memctl	a0
165#endif
166	j	.Lwarm_start
167
168.Lcold_start:
169
170#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
171	// Enable and invalidate all ways of both caches. If there is no
172	// dynamic way support then this write will have no effect.
173
174	movi	a0, __memctl_default
175	wsr.memctl	a0
176#endif
177
178.Lwarm_start:
179
180#endif
181
182	movi	a0, 0		// a0 is always 0 in this code, used to initialize lots of things
183
184#if XCHAL_HAVE_INTERRUPTS	// technically this should be under !FULL_RESET, assuming hard reset
185	wsr.intenable	a0	// make sure that interrupts are shut off (*before* we lower PS.INTLEVEL and PS.EXCM!)
186#endif
187
188#if !XCHAL_HAVE_FULL_RESET
189
190#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0)	/* pre-LX2 cores only */
191	wsr.ccount	a0	// not really necessary, but nice; best done very early
192#endif
193
194	// For full MMU configs, put page table at an unmapped virtual address.
195	// This ensures that accesses outside the static maps result
196	// in miss exceptions rather than random behaviour.
197	// Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU).
198#if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0
199	wsr.ptevaddr	a0
200#endif
201
202	// Debug initialization
203	//
204	// NOTE: DBREAKCn must be initialized before the combination of these two things:
205	//       any load/store, and a lowering of PS.INTLEVEL below DEBUG_LEVEL.
206	//       The processor already resets IBREAKENABLE appropriately.
207	//
208#if XCHAL_HAVE_DEBUG
209# if XCHAL_NUM_DBREAK
210#  if XCHAL_NUM_DBREAK >= 2
211	wsr.dbreakc1	a0
212#  endif
213	wsr.dbreakc0	a0
214	dsync			// wait for WSRs to DBREAKCn to complete
215# endif
216
217# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1	/* pre-LX cores only */
218	//  Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no need to initialize it.
219	//  Prior to that we do, otherwise we get an ICOUNT exception, 2^32 instructions after reset.
220	rsr.icountlevel	a2	// are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped below 12)
221	bltui	a2, 12, 1f	// if so, avoid initializing ICOUNTLEVEL which drops single-steps through here
222	wsr.icountlevel	a0	// avoid ICOUNT exceptions
223	isync			// wait for WSR to ICOUNTLEVEL to complete
2241:
225# endif
226#endif
227
228#endif /* !XCHAL_HAVE_FULL_RESET */
229
230#if XCHAL_HAVE_ABSOLUTE_LITERALS
231	//  Technically, this only needs to be done under !FULL_RESET, assuming hard reset:
232	wsr.litbase	a0
233	rsync
234#endif
235
236#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
237	// If we're powering up from a temporary power shut-off (PSO),
238	// restore state saved just prior to shut-off. Note that the
239	// MEMCTL register was already restored earlier, and as a side
240	// effect, registers a3, a5, a7 are now preloaded with values
241	// that we will use here.
242	// a3 - pointer to save area base address (_xtos_pso_savearea)
243	// a5 - saved state signature (CORE_STATE_SIGNATURE)
244	// a7 - contents of PWRSTAT register
245
246	l32i	a4, a3, CS_SA_signature		// load save area signature
247	sub	a4, a4, a5			// compare signature with expected one
248# if XTOS_PSO_TEST
249	movi	a7, PWRSTAT_WAKEUP_RESET	// pretend PSO warm start with warm caches
250# endif
251	bbci.l	a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f	// wakeup from PSO? (branch if not)
252	//  Yes, wakeup from PSO.  Check whether state was properly saved.
253	addi	a5, a7, - PWRSTAT_WAKEUP_RESET		// speculatively clear PSO-wakeup bit
254	movnez	a7, a5, a4	// if state not saved (corrupted?), mark as cold start
255	bnez	a4, 1f		// if state not saved, just continue with reset
256	//  Wakeup from PSO with good signature.  Now check cache status:
257	bbci.l	a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore	// if caches warm, restore now
258	//  Caches got shutoff.  Continue reset, we'll end up initializing caches, and check again later for PSO.
259# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
260	j	.Ldonesync	// skip reset sync, only done for cold start
261# endif
2621:	//  Cold start.  (Not PSO wakeup.)  Proceed with normal full reset.
263#endif
264
265#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
266	/* Core 0 initializes the XMP synchronization variable, if present. This operation needs to
267	   happen as early as possible in the startup sequence so that the other cores can be released
268	   from reset.	*/
269	.weak _ResetSync
270	movi 	a2, _ResetSync	// address of sync variable
271	rsr.prid a3		// core and multiprocessor ID
272	extui 	a3, a3, 0, 8	// extract core ID (FIXME: need proper constants for PRID bits to extract)
273	beqz	a2, .Ldonesync	// skip if no sync variable
274	bnez	a3, .Ldonesync	// only do this on core 0
275	s32i	a0, a2, 0	// clear sync variable
276.Ldonesync:
277#endif
278#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL
279	/* On core 0, this releases other cores.  On other cores this has no effect, because
280	   runstall control is unconnected.  */
281	movi	a2, XER_MPSCORE
282	wer	a0, a2
283#endif
284
285	/*
286	 *  For processors with relocatable vectors, apply any alternate
287	 *  vector base given to xt-genldscripts, which sets the
288	 *  _memmap_vecbase_reset symbol accordingly.
289	 */
290#if XCHAL_HAVE_VECBASE
291	movi	a2, _memmap_vecbase_reset	/* note: absolute symbol, not a ptr */
292	wsr.vecbase	a2
293#endif
294
295#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)	/* have ATOMCTL ? */
296# if XCHAL_DCACHE_IS_COHERENT
297	movi	a3, 0x25		/* MX -- internal for writeback, RCW otherwise */
298# else
299	movi	a3, 0x15		/* non-MX -- always RCW */
300# endif
301	wsr.atomctl	a3
302#endif
303
304#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG
305	rsil	a2, 1		// lower PS.INTLEVEL here to make reset vector easier to debug
306#endif
307
308	/* If either of the caches does not have dynamic way support, then
309	 * use the old (slow) method to init them. If the cache is absent
310	 * the macros will expand to empty.
311	 */
312#if ! XCHAL_HAVE_ICACHE_DYN_WAYS
313	icache_reset	a2, a3
314#endif
315#if ! XCHAL_HAVE_DCACHE_DYN_WAYS
316	dcache_reset	a2, a3
317#endif
318
319#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
320	//  Here, a7 still contains status from the power status register,
321	//  or zero if signature check failed.
322	bbci.l	a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart	// wakeup from PSO with good signature?
323	//  Yes, wakeup from PSO.  Caches had been powered down, now are initialized.
324.Lpso_restore:
325	//  Assume memory still initialized, so all code still unpacked etc.
326	//  So we can just jump/call to relevant state restore code (wherever located).
327	movi	a2, 0				// make shutoff routine return zero
328	movi	a3, _xtos_pso_savearea
329	//  Here, as below for _start, call0 is used as an unlimited-range jump.
330	call0	_xtos_core_restore_nw
331	//  (does not return)
332.Lcoldstart:
333#endif
334
335#if XCHAL_HAVE_PREFETCH
336	/* Enable cache prefetch if present.  */
337#if CONFIG_APOLLOLAKE
338
339#if CONFIG_SKYLAKE || CONFIG_KABYLAKE
340	movi.n	a2, 0	/* skylake and kabylake */
341#else
342	movi.n	a2, 34  /* apollolake */
343#endif
344
345#else
346	movi.n	a2, 68  /* eveything else */
347#endif
348	wsr	a2, PREFCTL
349#endif
350
351	/*
352	 *  Now setup the memory attributes.  On some cores this "enables" caches.
353	 *  We do this ahead of unpacking, so it can proceed more efficiently.
354	 *
355	 *  The _memmap_cacheattr_reset symbol's value (address) is defined
356	 *  by the LSP's linker script, as generated by xt-genldscripts.
357	 *  If defines 4-bit attributes for eight 512MB regions.
358	 *
359	 *  (NOTE:  for cores with the older MMU v1 or v2, or without any memory
360	 *   protection mechanism, the following code has no effect.)
361	 */
362#if XCHAL_HAVE_MPU
363	/*  If there's an empty background map, setup foreground maps to mimic region protection:  */
364# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2
365	// We assume reset state:  all MPU entries zeroed and disabled.
366	// Otherwise we'd need a loop to zero everything.
367
368	movi	a2, _memmap_cacheattr_reset	// note: absolute symbol, not a ptr
369	movi	a3, _xtos_mpu_attribs		// see literal area at start of reset vector
370	movi	a4, 0x20000000			// 512 MB delta
371	movi	a6, 8
372	movi	a7, 1				// MPU entry vaddr 0, with valid bit set
373	movi	a9, 0				// cacheadrdis value
374	wsr.cacheadrdis a9			// enable everything temporarily while MPU updates
375
376        // Write eight MPU entries, from the last one going backwards (entries n-1 thru n-8)
377	//
3782:	extui	a8, a2, 28, 4			// get next attribute nibble (msb first)
379	extui	a5, a8, 0, 2			// lower two bit indicate whether cached
380	slli	a9, a9, 1			// add a bit to cacheadrdis...
381	addi	a10, a9, 1			// set that new bit if...
382	moveqz	a9, a10, a5			// ... that region is non-cacheable
383	addx4	a5, a8, a3			// index into _xtos_mpu_attribs table
384	addi	a8, a8, -5			// make valid attrib indices negative
385	movgez	a5, a3, a8			// if not valid attrib, use Illegal
386	l32i	a5, a5, 0			// load access rights, memtype from table entry
387	slli	a2, a2, 4
388	sub	a7, a7, a4			// next 512MB region (last to first)
389	addi	a6, a6, -1
390	add	a5, a5, a6			// add the index
391	wptlb	a5, a7				// write the MPU entry
392	bnez	a6, 2b				// loop until done
393# else
394	movi	a9, XCHAL_MPU_BG_CACHEADRDIS	// default value of CACHEADRDIS for bgnd map
395# endif
396	wsr.cacheadrdis a9			// update cacheadrdis
397#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \
398		|| (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
399	movi	a2, _memmap_cacheattr_reset	/* note: absolute symbol, not a ptr */
400	cacheattr_set				/* set CACHEATTR from a2 (clobbers a3-a8) */
401#endif
402
403	/*  Now that caches are initialized, cache coherency can be enabled.  */
404#if XCHAL_DCACHE_IS_COHERENT
405# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
406	/* Opt into coherence for MX (for backward compatibility / testing).  */
407	movi	a3, 1
408	movi	a2, XER_CCON
409	wer	a3, a2
410# endif
411#endif
412
413	/*  Enable zero-overhead loop instr buffer and snoop responses if configured. */
414	/*  If HW erratum 453 fix is to be applied then disable loop instr buffer. */
415#if XCHAL_USE_MEMCTL && (XCHAL_SNOOP_LB_MEMCTL_DEFAULT || XCHAL_ERRATUM_453)
416	rsr.memctl	a2
417#if XCHAL_SNOOP_LB_MEMCTL_DEFAULT
418	movi	a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT
419	or	a2, a2, a3
420#endif
421#if XCHAL_ERRATUM_453
422	srli	a2, a2, 1			/* clear bit 0 (ZOL buffer enable) */
423	slli	a2, a2, 1
424#endif
425	wsr.memctl	a2
426#endif
427
428	/* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */
429#if XCHAL_HAVE_PSO_CDM
430	movi	a2, XDM_MISC_PWRCTL
431	movi	a4, ~PWRCTL_CORE_SHUTOFF
432	rer	a3, a2
433	and	a3, a3, a4
434	wer	a3, a2
435#endif
436
437#endif /* !XCHAL_HAVE_HALT */
438
439	/*
440	 * At this point we can unpack code and data (e.g. copy segments from
441	 * ROM to RAM, vectors into their proper location, etc.). However,
442	 *
443	 * 1) the destination of the unpack may require some setup,
444	 *    for instance a DDR controller may need to be initialized
445	 *    and enabled before anything is unpacked into DDR.
446	 * 2) users may wish to provide their own unpack code which works
447	 *    faster or in a different way than the default unpack code.
448	 *
449	 * To support such uses, we provide a user hook at this point.
450	 * If the user hook function is defined, then it is called from
451	 * here, and its return value (in a2) is checked. If the return
452	 * value is non-zero, then we assume that code unpacking has been
453	 * completed. The user hook function must be written in assembly
454	 * and should make minimal assumptions about system state.
455	 */
456
457	.weak	__reset_user_init
458
459	movi	a2, __reset_user_init
460	beqz	a2, 1f			// no user hook
461	callx0	a2			// execute user hook
462	movi	a0, 0			// ensure a0 continues to hold 0
463	bnez	a2, unpackdone		// if a2 != 0 then unpack is done
4641:
465
466#if defined(XTOS_UNPACK)
467	movi	a2, _rom_store_table
468	beqz	a2, unpackdone
469unpack:	l32i	a3, a2, 0	// start vaddr
470	l32i	a4, a2, 4	// end vaddr
471	l32i	a5, a2, 8	// store vaddr
472	addi	a2, a2, 12
473	bgeu	a3, a4, upnext	// skip unless start < end
474uploop:	l32i 	a6, a5, 0
475	addi	a5, a5, 4
476	s32i	a6, a3, 0
477	addi	a3, a3, 4
478	bltu	a3, a4, uploop
479	j	unpack
480upnext:	bnez	a3, unpack
481	bnez	a5, unpack
482#endif /* XTOS_UNPACK */
483
484unpackdone:
485
486#if defined(XTOS_UNPACK) || defined(XTOS_MP)
487	/*
488	 *  If writeback caches are configured and enabled, unpacked data must be
489	 *  written out to memory before trying to execute it:
490	 */
491	dcache_writeback_all	a2, a3, a4, 0
492	icache_sync		a2	// ensure data written back is visible to i-fetch
493	/*
494	 *  Note:  no need to invalidate the i-cache after the above, because we
495	 *  already invalidated it further above and did not execute anything within
496	 *  unpacked regions afterwards.  [Strictly speaking, if an unpacked region
497	 *  follows this code very closely, it's possible for cache-ahead to have
498	 *  cached a bit of that unpacked region, so in the future we may need to
499	 *  invalidate the entire i-cache here again anyway.]
500	 */
501#endif
502
503
504#if !XCHAL_HAVE_HALT	/* skip for TX */
505
506	/*
507	 *  Now that we know the .lit4 section is present (if got unpacked)
508	 *  (and if absolute literals are used), initialize LITBASE to use it.
509	 */
510#if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS
511	/*
512	 *  Switch from PC-relative to absolute (litbase-relative) L32R mode.
513	 *  Set LITBASE to 256 kB beyond the start of the literals in .lit4
514	 *  (aligns to the nearest 4 kB boundary, LITBASE does not have bits 1..11)
515	 *  and set the enable bit (_lit4_start is assumed 4-byte aligned).
516	 */
517	movi	a2, _lit4_start + 0x40001
518	wsr.litbase	a2
519	rsync
520#endif /* have and use absolute literals */
521	.end	no-absolute-literals		// we can now start using absolute literals
522
523
524//  Technically, this only needs to be done pre-LX2, assuming hard reset:
525# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__)
526	//  Windowed register init, so we can call windowed code (eg. C code).
527	movi	a1, 1
528	wsr.windowstart	a1
529	//  The processor always clears WINDOWBASE at reset, so no need to clear it here.
530	//  It resets WINDOWSTART to 1 starting with LX2.0/X7.0 (RB-2006.0).
531	//  However, assuming hard reset is not yet always practical, so do this anyway:
532	wsr.windowbase	a0
533	rsync
534	movi	a0, 0			// possibly a different a0, clear it
535# endif
536
537#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0	/* only pre-LX2 needs this */
538	// Coprocessor option initialization
539# if XCHAL_HAVE_CP
540	//movi	a2, XCHAL_CP_MASK	// enable existing CPs
541	//  To allow creating new coprocessors using TC that are not known
542	//  at GUI build time without having to explicitly enable them,
543	//  all CPENABLE bits must be set, even though they may not always
544	//  correspond to a coprocessor.
545	movi	a2, 0xFF	// enable *all* bits, to allow dynamic TIE
546	wsr.cpenable	a2
547# endif
548
549	// Floating point coprocessor option initialization (at least
550	// rounding mode, so that floating point ops give predictable results)
551# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005
552	rsync		/* wait for WSR to CPENABLE to complete before accessing FP coproc state */
553	wur.fcr	a0	/* clear FCR (default rounding mode, round-nearest) */
554	wur.fsr	a0	/* clear FSR */
555# endif
556#endif /* pre-LX2 */
557
558
559	//  Initialize memory error handler address.
560	//  Putting this address in a register allows multiple instances of
561	//  the same configured core (with separate program images but shared
562	//  code memory, thus forcing memory error vector to be shared given
563	//  it is not VECBASE relative) to have the same memory error vector,
564	//  yet each have their own handler and associated data save area.
565#if XCHAL_HAVE_MEM_ECC_PARITY_IGNORE
566	movi	a4, _MemErrorHandler
567	wsr.mesave	a4
568#endif
569
570
571	/*
572	 *  Initialize medium and high priority interrupt dispatchers:
573	 */
574#if HAVE_XSR && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2)
575
576#if !CONFIG_XT_BOOT_LOADER || CONFIG_VM_ROM
577# ifndef XCHAL_DEBUGLEVEL		/* debug option not selected? */
578#  define XCHAL_DEBUGLEVEL	99	/* bogus value outside 2..6 */
579# endif
580
581	.macro	init_vector	level
582	  .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level)
583	    .if XCHAL_DEBUGLEVEL-\level
584	      .weak   _Level&level&FromVector
585	      movi    a4, _Level&level&FromVector
586	      writesr excsave \level a4
587	      .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL)
588		movi    a5, _Pri_&level&_HandlerAddress
589		s32i    a4, a5, 0
590		/*  If user provides their own handler, that handler might
591		 *  not provide its own _Pri_<n>_HandlerAddress variable for
592		 *  linking handlers.  In that case, the reference below
593		 *  would pull in the XTOS handler anyway, causing a conflict.
594		 *  To avoid that, provide a weak version of it here:
595		 */
596		.pushsection .data, "aw"
597		.global  _Pri_&level&_HandlerAddress
598		.weak   _Pri_&level&_HandlerAddress
599		.align	4
600		_Pri_&level&_HandlerAddress: .space 4
601		.popsection
602	      .endif
603	    .endif
604	  .endif
605	.endm
606
607	init_vector	2
608	init_vector	3
609	init_vector	4
610	init_vector	5
611	init_vector	6
612#endif
613#endif /*HAVE_XSR*/
614
615
616	/*
617	 *  Complete reset initialization outside the vector,
618	 *  to avoid requiring a vector that is larger than necessary.
619	 *  This 2nd-stage startup code sets up the C Run-Time (CRT) and calls main().
620	 *
621	 *  Here we use call0 not because we expect any return, but
622	 *  because the assembler/linker dynamically sizes call0 as
623	 *  needed (with -mlongcalls) which it doesn't with j or jx.
624	 *  Note:  This needs to be call0 regardless of the selected ABI.
625	 */
626
627#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM
628	movi	a0, SOF_TEXT_BASE
629	callx0	a0
630#else
631	call0	_start		// jump to _start (in crt1-*.S)
632#endif
633	/* does not return */
634
635#else /* XCHAL_HAVE_HALT */
636
637	j	_start		// jump to _start (in crt1-*.S)
638				// (TX has max 64kB IRAM, so J always in range)
639
640	//  Paranoia -- double-check requirements / assumptions of this Xtensa TX code:
641# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET || XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT || XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG || XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS || XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF || XCHAL_HAVE_WINDOWED
642#  error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs"
643# endif
644
645#endif /* XCHAL_HAVE_HALT */
646
647
648#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
649	.size	_ResetHandler, . - _ResetHandler
650#else
651	.size	_ResetVector, . - _ResetVector
652#endif
653
654	.text
655	.global xthals_hw_configid0, xthals_hw_configid1
656	.global xthals_release_major, xthals_release_minor
657	.end	literal_prefix
658