1/*
2 * Copyright (c) 2016 Cadence Design Systems, Inc.
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6#include <xtensa/coreasm.h>
7#include <xtensa/corebits.h>
8#include <xtensa/cacheasm.h>
9#include <xtensa/cacheattrasm.h>
10#include <xtensa/xtensa-xer.h>
11#include <xtensa/xdm-regs.h>
12#include <xtensa/config/specreg.h>
13#include <xtensa/config/system.h>  /* for XSHAL_USE_ABSOLUTE_LITERALS only */
14#include <xtensa/xtruntime-core-state.h>
15
16/*
17 * The following reset vector avoids initializing certain registers already
18 * initialized by processor reset.  But it does initialize some of them
19 * anyway, for minimal support of warm restart (restarting in software by
20 * jumping to the reset vector rather than asserting hardware reset).
21 */
22
23	.begin	literal_prefix	.ResetVector
24	.section		.ResetVector.text, "ax"
25
26	.align	4
27	.global	__start
28__start:
29
30#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
31	/*
32	 *  NOTE:
33	 *
34	 *  IMPORTANT:  If you move the _ResetHandler portion to a section
35	 *  other than .ResetVector.text that is outside the range of
36	 *  the reset vector's 'j' instruction, the _ResetHandler symbol
37	 *  and a more elaborate j/movi/jx sequence are needed in
38	 *  .ResetVector.text to dispatch to the new location.
39	 */
40	j	_ResetHandler
41
42	.size	__start, . - __start
43
44#if XCHAL_HAVE_HALT
45	/*
46	 *  Xtensa TX: reset vector segment is only 4 bytes, so must place the
47	 *  unpacker code elsewhere in the memory that contains the reset
48	 *  vector.
49	 */
50#if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR
51	.section .iram0.text, "ax"
52#elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR
53	.section .irom0.text, "ax"
54#elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR
55	.section .uram0.text, "ax"
56#else
57#warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work"
58	.text
59#endif
60#endif /* XCHAL_HAVE_HALT */
61
62	.extern	__memctl_default
63
64	.align	4
65
66	/* tells the assembler/linker to place literals here */
67	.literal_position
68	.align	4
69	.global	_ResetHandler
70_ResetHandler:
71#endif
72
73#if !XCHAL_HAVE_HALT
74
75	/*
76	 *  Even if the processor supports the non-PC-relative L32R option,
77	 *  it will always start up in PC-relative mode.  We take advantage of
78	 *  this, and use PC-relative mode at least until we're sure the .lit4
79	 *  section is in place (which is sometimes only after unpacking).
80	 */
81	.begin	no-absolute-literals
82
83	/*
84	 * If we have dynamic cache way support, init the caches as soon
85	 * as we can, which is now. Except, if we are waking up from a
86	 * PSO event, then we need to do this slightly later.
87	 */
88#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
89# if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
90	 /* Do this later on in the code -- see below */
91# else
92	movi	a0, __memctl_default
93	wsr	a0, MEMCTL
94# endif
95#endif
96
97	/*
98	 * If we have PSO support, then we must check for a warm start with
99	 * caches left powered on. If the caches had been left powered on,
100	 * we must restore the state of MEMCTL to the saved state if any.
101	 * Note that MEMCTL may not be present depending on config.
102	 */
103#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION
104	/* Read PWRSTAT */
105	movi	a2, XDM_MISC_PWRSTAT
106	/* Save area address - retained for later */
107	movi	a3, _xtos_pso_savearea
108	/* Signature for compare - retained for later */
109	movi	a5, CORE_STATE_SIGNATURE
110	 /* PWRSTAT value - retained for later */
111	rer	a7, a2
112	/* Now bottom 2 bits are core wakeup and cache power lost */
113	extui	a4, a7, 1, 2
114	/* a4==1 means PSO wakeup, caches did not lose power */
115	bnei	a4, 1, .Lcold_start
116	/* Load save area signature field */
117	l32i	a4, a3, CS_SA_signature
118	sub	a4, a4, a5
119	/* If signature mismatch then do cold start */
120	bnez	a4, .Lcold_start
121#if XCHAL_USE_MEMCTL
122	/* Load saved MEMCTL value */
123	l32i	a4, a3, CS_SA_memctl
124	movi	a0, ~MEMCTL_INV_EN
125	/* Clear invalidate bit */
126	and	a0, a4, a0
127	wsr	a0, MEMCTL
128#endif
129	j	.Lwarm_start
130
131.Lcold_start:
132
133#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS
134	/*
135	 * Enable and invalidate all ways of both caches. If there is no
136	 * dynamic way support then this write will have no effect.
137	 */
138	movi	a0, __memctl_default
139	wsr	a0, MEMCTL
140#endif
141
142.Lwarm_start:
143
144#endif
145	/* a0 is always 0 in this code, used to initialize lots of things */
146	movi	a0, 0
147
148/* technically this should be under !FULL_RESET, assuming hard reset */
149#if XCHAL_HAVE_INTERRUPTS
150	/* make sure that interrupts are shut off (*before* we lower
151	 * PS.INTLEVEL and PS.EXCM!)
152	 */
153	wsr	a0, INTENABLE
154#endif
155
156#if !XCHAL_HAVE_FULL_RESET
157
158/* pre-LX2 cores only */
159#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0)
160	/* not really necessary, but nice; best done very early */
161	wsr	a0, CCOUNT
162#endif
163
164	/*
165	 * For full MMU configs, put page table at an unmapped virtual address.
166	 * This ensures that accesses outside the static maps result
167	 * in miss exceptions rather than random behaviour.
168	 * Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU).
169	 */
170#if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0
171	wsr	a0, PTEVADDR
172#endif
173
174	/*
175	 * Debug initialization
176	 *
177	 * NOTE: DBREAKCn must be initialized before the combination of these
178	 * two things: any load/store, and a lowering of PS.INTLEVEL below
179	 * DEBUG_LEVEL.  The processor already resets IBREAKENABLE
180	 * appropriately.
181	 */
182#if XCHAL_HAVE_DEBUG
183#if XCHAL_NUM_DBREAK
184#if XCHAL_NUM_DBREAK >= 2
185	wsr	a0, DBREAKC1
186#endif
187	wsr	a0, DBREAKC0
188	dsync			 /* wait for WSRs to DBREAKCn to complete */
189#endif /* XCHAL_NUM_DBREAK */
190
191/* pre-LX cores only */
192# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1
193	/*
194	 *  Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no
195	 *  need to initialize it.  Prior to that we do, otherwise we get an
196	 *  ICOUNT exception, 2^32 instructions after reset.
197	 */
198
199	/* are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped
200	 * below 12)
201	 */
202	rsr	a2, ICOUNTLEVEL
203	/* if so, avoid initializing ICOUNTLEVEL which drops single-steps
204	 * through here
205	 * */
206	bltui	a2, 12, 1f
207	 /* avoid ICOUNT exceptions */
208	wsr	a0, ICOUNTLEVEL
209	/* wait for WSR to ICOUNTLEVEL to complete */
210	isync
2111:
212#endif
213#endif /* XCHAL_HAVE_DEBUG */
214
215#endif /* !XCHAL_HAVE_FULL_RESET */
216
217#if XCHAL_HAVE_ABSOLUTE_LITERALS
218	/* Technically, this only needs to be done under !FULL_RESET,
219	 * assuming hard reset:
220	 */
221	wsr	a0, LITBASE
222	rsync
223#endif
224
225#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
226	/*
227	 * If we're powering up from a temporary power shut-off (PSO),
228	 * restore state saved just prior to shut-off. Note that the
229	 * MEMCTL register was already restored earlier, and as a side
230	 * effect, registers a3, a5, a7 are now preloaded with values
231	 * that we will use here.
232	 * a3 - pointer to save area base address (_xtos_pso_savearea)
233	 * a5 - saved state signature (CORE_STATE_SIGNATURE)
234	 * a7 - contents of PWRSTAT register
235	 */
236
237	/* load save area signature */
238	l32i	a4, a3, CS_SA_signature
239	/* compare signature with expected one */
240	sub	a4, a4, a5
241# if XTOS_PSO_TEST
242	/* pretend PSO warm start with warm caches */
243	movi	a7, PWRSTAT_WAKEUP_RESET
244# endif
245	/* wakeup from PSO? (branch if not) */
246	bbci.l	a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f
247	/* Yes, wakeup from PSO.  Check whether state was properly saved.
248	 * speculatively clear PSO-wakeup bit  */
249	addi	a5, a7, - PWRSTAT_WAKEUP_RESET
250	/* if state not saved (corrupted?), mark as cold start */
251	movnez	a7, a5, a4
252	/* if state not saved, just continue with reset */
253	bnez	a4, 1f
254	/* Wakeup from PSO with good signature.  Now check cache status:
255	 * if caches warm, restore now  */
256	bbci.l	a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore
257	/* Caches got shutoff.  Continue reset, we'll end up initializing
258	 * caches, and check again later for PSO.
259	 */
260# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
261	j	.Ldonesync	 /* skip reset sync, only done for cold start */
262# endif
2631:	/*  Cold start.  (Not PSO wakeup.)  Proceed with normal full reset. */
264#endif
265
266#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I
267	/* Core 0 initializes the XMP synchronization variable, if present.
268	 * This operation needs to happen as early as possible in the startup
269	 * sequence so that the other cores can be released from reset.
270	 */
271	.weak _ResetSync
272	movi 	a2, _ResetSync	 /* address of sync variable */
273	rsr.prid a3		 /* core and multiprocessor ID */
274	extui 	a3, a3, 0, 8	 /* extract core ID (FIXME: need proper
275				  * constants for PRID bits to extract) */
276	beqz	a2, .Ldonesync	 /* skip if no sync variable */
277	bnez	a3, .Ldonesync	 /* only do this on core 0 */
278	s32i	a0, a2, 0	 /* clear sync variable */
279.Ldonesync:
280#endif
281#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL
282	/* On core 0, this releases other cores.  On other cores this has no
283	 * effect, because runstall control is unconnected
284	 */
285	movi	a2, XER_MPSCORE
286	wer	a0, a2
287#endif
288
289	/*
290	 * For processors with relocatable vectors, apply any alternate
291	 * vector base given to xt-genldscripts, which sets the
292	 * _memmap_vecbase_reset symbol accordingly.
293	 */
294#if XCHAL_HAVE_VECBASE
295	/* note: absolute symbol, not a ptr */
296	movi	a2, _memmap_vecbase_reset
297	wsr	a2, vecbase
298#endif
299
300/* have ATOMCTL ? */
301#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
302#if XCHAL_DCACHE_IS_COHERENT
303	/* MX -- internal for writeback, RCW otherwise */
304	movi	a3, 0x25
305#else
306	/* non-MX -- always RCW */
307	movi	a3, 0x15
308#endif /* XCHAL_DCACHE_IS_COHERENT */
309	wsr	a3, ATOMCTL
310#endif
311
312#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG
313	/* lower PS.INTLEVEL here to make reset vector easier to debug */
314	rsil	a2, 1
315#endif
316
317	/* If either of the caches does not have dynamic way support, then
318	 * use the old (slow) method to init them. If the cache is absent
319	 * the macros will expand to empty.
320	 */
321#if ! XCHAL_HAVE_ICACHE_DYN_WAYS
322	icache_reset	a2, a3
323#endif
324#if ! XCHAL_HAVE_DCACHE_DYN_WAYS
325	dcache_reset	a2, a3
326#endif
327
328#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION
329	/* Here, a7 still contains status from the power status register,
330	 * or zero if signature check failed.
331	 */
332
333	/* wakeup from PSO with good signature? */
334	bbci.l	a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart
335	/* Yes, wakeup from PSO.  Caches had been powered down, now are
336	 * initialized.
337	 */
338.Lpso_restore:
339	/* Assume memory still initialized, so all code still unpacked etc.
340	 * So we can just jump/call to relevant state restore code (wherever
341	 * located).
342	 */
343
344	/* make shutoff routine return zero */
345	movi	a2, 0
346	movi	a3, _xtos_pso_savearea
347	/* Here, as below for _start, call0 is used as an unlimited-range
348	 * jump.
349	 */
350	call0	_xtos_core_restore_nw
351	/*  (does not return) */
352.Lcoldstart:
353#endif
354
355#if XCHAL_HAVE_PREFETCH
356	/* Enable cache prefetch if present.  */
357	movi	a2, XCHAL_CACHE_PREFCTL_DEFAULT
358	wsr	a2, PREFCTL
359#endif
360
361	/*
362	 *  Now setup the memory attributes.  On some cores this "enables"
363	 *  caches.  We do this ahead of unpacking, so it can proceed more
364	 *  efficiently.
365	 *
366	 *  The _memmap_cacheattr_reset symbol's value (address) is defined by
367	 *  the LSP's linker script, as generated by xt-genldscripts.  If
368	 *  defines 4-bit attributes for eight 512MB regions.
369	 *
370	 *  (NOTE:  for cores with the older MMU v1 or v2, or without any
371	 *  memory protection mechanism, the following code has no effect.)
372	 */
373#if XCHAL_HAVE_MPU
374	/*  If there's an empty background map, setup foreground maps to mimic
375	 *  region protection:
376	 */
377# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2
378	.pushsection .rodata, "a"
379	.global _xtos_mpu_attribs
380	.align 4
381_xtos_mpu_attribs:
382	/*  Illegal	(---) */
383	.word   0x00006000+XCHAL_MPU_ENTRIES-8
384	/* Writeback	(rwx Cacheable Non-shareable wb rd-alloc wr-alloc) */
385	.word   0x000F7700+XCHAL_MPU_ENTRIES-8
386	/* WBNA		(rwx Cacheable Non-shareable wb rd-alloc) */
387	.word   0x000D5700+XCHAL_MPU_ENTRIES-8
388	/* Writethru	(rwx Cacheable Non-shareable wt rd-alloc) */
389	.word   0x000C4700+XCHAL_MPU_ENTRIES-8
390	/* Bypass	(rwx Device non-interruptible system-shareable) */
391	.word   0x00006700+XCHAL_MPU_ENTRIES-8
392	.popsection
393
394	/*
395	 * We assume reset state:  all MPU entries zeroed and disabled.
396	 * Otherwise we'd need a loop to zero everything.
397	 */
398	/* note: absolute symbol, not a ptr */
399	movi	a2, _memmap_cacheattr_reset
400	movi	a3, _xtos_mpu_attribs
401	movi	a4, 0x20000000	/* 512 MB delta */
402	movi	a6, 8
403	movi	a7, 1		/* MPU entry vaddr 0, with valid bit set */
404	movi	a9, 0		/* cacheadrdis value */
405	/* enable everything temporarily while MPU updates */
406	wsr.cacheadrdis a9
407
408	/* Write eight MPU entries, from the last one going backwards
409	 * (entries n-1 thru n-8)
410	 */
4112:	extui	a8, a2, 28, 4	/* get next attribute nibble (msb first) */
412	extui	a5, a8, 0, 2	/* lower two bit indicate whether cached */
413	slli	a9, a9, 1	/* add a bit to cacheadrdis... */
414	addi	a10, a9, 1	/* set that new bit if... */
415	moveqz	a9, a10, a5	/* ... that region is non-cacheable */
416	addx4	a5, a8, a3	/* index into _xtos_mpu_attribs table */
417	addi	a8, a8, -5	/* make valid attrib indices negative */
418	movgez	a5, a3, a8	/* if not valid attrib, use Illegal */
419	l32i	a5, a5, 0	/* load access rights, memtype from table
420				 * entry
421				 */
422	slli	a2, a2, 4
423	sub	a7, a7, a4	/* next 512MB region (last to first) */
424	addi	a6, a6, -1
425	add	a5, a5, a6	/* add the index */
426	wptlb	a5, a7		/* write the MPU entry */
427	bnez	a6, 2b		/* loop until done */
428# else
429	/* default value of CACHEADRDIS for bgnd map */
430	movi	a9, XCHAL_MPU_BG_CACHEADRDIS
431# endif
432	wsr.cacheadrdis a9			 /* update cacheadrdis */
433#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR \
434		|| XCHAL_HAVE_XLT_CACHEATTR \
435		|| (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
436	/* note: absolute symbol, not a ptr */
437	movi	a2, _memmap_cacheattr_reset
438	/* set CACHEATTR from a2 (clobbers a3-a8) */
439	cacheattr_set
440#endif
441
442	/* Now that caches are initialized, cache coherency can be enabled. */
443#if XCHAL_DCACHE_IS_COHERENT
444# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && \
445		(XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0)
446	/* Opt into coherence for MX (for backward compatibility / testing). */
447	movi	a3, 1
448	movi	a2, XER_CCON
449	wer	a3, a2
450# endif
451#endif
452
453	/* Enable zero-overhead loop instr buffer, and snoop responses, if
454	 * configured.  If HW erratum 453 fix is to be applied, then don't
455	 * enable loop instr buffer.
456	 */
457#if XCHAL_USE_MEMCTL && XCHAL_SNOOP_LB_MEMCTL_DEFAULT
458	movi	a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT
459	rsr	a2, MEMCTL
460	or	a2, a2, a3
461	wsr	a2, MEMCTL
462#endif
463
464	/* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */
465#if XCHAL_HAVE_PSO_CDM
466	movi	a2, XDM_MISC_PWRCTL
467	movi	a4, ~PWRCTL_CORE_SHUTOFF
468	rer	a3, a2
469	and	a3, a3, a4
470	wer	a3, a2
471#endif
472
473#endif /* !XCHAL_HAVE_HALT */
474
475	/*
476	 *  Unpack code and data (eg. copy ROMed segments to RAM, vectors into
477	 *  their proper location, etc).
478	 */
479
480#if defined(XTOS_UNPACK)
481	movi	a2, _rom_store_table
482	beqz	a2, unpackdone
483unpack:	l32i	a3, a2, 0	 /* start vaddr */
484	l32i	a4, a2, 4	 /* end vaddr */
485	l32i	a5, a2, 8	 /* store vaddr */
486	addi	a2, a2, 12
487	bgeu	a3, a4, upnext	 /* skip unless start < end */
488uploop:	l32i 	a6, a5, 0
489	addi	a5, a5, 4
490	s32i	a6, a3, 0
491	addi	a3, a3, 4
492	bltu	a3, a4, uploop
493	j	unpack
494upnext:	bnez	a3, unpack
495	bnez	a5, unpack
496#endif /* XTOS_UNPACK */
497
498unpackdone:
499
500#if defined(XTOS_UNPACK) || defined(XTOS_MP)
501	/*
502	 * If writeback caches are configured and enabled, unpacked data must
503	 * be written out to memory before trying to execute it:
504	 */
505	dcache_writeback_all	a2, a3, a4, 0
506	/* ensure data written back is visible to i-fetch */
507	icache_sync		a2
508	/*
509	 * Note:  no need to invalidate the i-cache after the above, because
510	 * we already invalidated it further above and did not execute
511	 * anything within unpacked regions afterwards.  [Strictly speaking,
512	 * if an unpacked region follows this code very closely, it's possible
513	 * for cache-ahead to have cached a bit of that unpacked region, so in
514	 * the future we may need to invalidate the entire i-cache here again
515	 * anyway.]
516	 */
517#endif
518
519
520#if !XCHAL_HAVE_HALT	/* skip for TX */
521
522	/*
523	 *  Now that we know the .lit4 section is present (if got unpacked)
524	 *  (and if absolute literals are used), initialize LITBASE to use it.
525	 */
526#if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS
527	/*
528	 *  Switch from PC-relative to absolute (litbase-relative) L32R mode.
529	 *  Set LITBASE to 256 kB beyond the start of the literals in .lit4
530	 *  (aligns to the nearest 4 kB boundary, LITBASE does not have bits
531	 *  1..11) and set the enable bit (_lit4_start is assumed 4-byte
532	 *  aligned).
533	 */
534	movi	a2, _lit4_start + 0x40001
535	wsr	a2, LITBASE
536	rsync
537#endif /* have and use absolute literals */
538	/* we can now start using absolute literals */
539	.end	no-absolute-literals
540
541	/* Technically, this only needs to be done pre-LX2, assuming hard
542	 * reset:
543	 */
544# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__)
545	/* Windowed register init, so we can call windowed code (eg. C code). */
546	movi	a1, 1
547	wsr	a1, WINDOWSTART
548	/*
549	 *  The processor always clears WINDOWBASE at reset, so no need to
550	 *  clear it here.  It resets WINDOWSTART to 1 starting with LX2.0/X7.0
551	 *  (RB-2006.0).  However, assuming hard reset is not yet always
552	 *  practical, so do this anyway:
553	 */
554	wsr	a0, WINDOWBASE
555	rsync
556	movi	a0, 0			 /* possibly a different a0, clear it */
557# endif
558
559/* only pre-LX2 needs this */
560#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0
561	/* Coprocessor option initialization */
562# if XCHAL_HAVE_CP
563	/*
564	 * To allow creating new coprocessors using TC that are not known
565	 * at GUI build time without having to explicitly enable them,
566	 * all CPENABLE bits must be set, even though they may not always
567	 * correspond to a coprocessor.
568	 */
569	movi	a2, 0xFF	 /* enable *all* bits, to allow dynamic TIE */
570	wsr	a2, CPENABLE
571# endif
572
573	/*
574	 * Floating point coprocessor option initialization (at least
575	 * rounding mode, so that floating point ops give predictable results)
576	 */
577# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005
578/* floating-point control register (user register number) */
579#  define FCR	232
580/* floating-point status register (user register number) */
581#  define FSR	233
582	/* wait for WSR to CPENABLE to complete before accessing FP coproc
583	 * state
584	 */
585	rsync
586	wur	a0, FCR	/* clear FCR (default rounding mode, round-nearest) */
587	wur	a0, FSR	/* clear FSR */
588# endif
589#endif /* pre-LX2 */
590
591
592	/*
593	 *  Initialize memory error handler address.
594	 *  Putting this address in a register allows multiple instances of
595	 *  the same configured core (with separate program images but shared
596	 *  code memory, thus forcing memory error vector to be shared given
597	 *  it is not VECBASE relative) to have the same memory error vector,
598	 *  yet each have their own handler and associated data save area.
599	 */
600#if XCHAL_HAVE_MEM_ECC_PARITY
601	movi	a4, _MemErrorHandler
602	wsr	a4, MESAVE
603#endif
604
605
606	/*
607	 *  Initialize medium and high priority interrupt dispatchers:
608	 */
609#if HAVE_XSR
610
611/*  For asm macros; works for positive a,b smaller than 1000:  */
612# define GREATERTHAN(a,b)	(((b)-(a)) & ~0xFFF)
613
614# ifndef XCHAL_DEBUGLEVEL		/* debug option not selected? */
615#  define XCHAL_DEBUGLEVEL	99	/* bogus value outside 2..6 */
616# endif
617
618	.macro	init_vector	level
619	  .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level)
620	    .if XCHAL_DEBUGLEVEL-\level
621	      .weak   _Level&level&FromVector
622	      movi    a4, _Level&level&FromVector
623	      wsr     a4, EXCSAVE+\level
624	      .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL)
625		movi    a5, _Pri_&level&_HandlerAddress
626		s32i    a4, a5, 0
627		/*  If user provides their own handler, that handler might
628		 *  not provide its own _Pri_<n>_HandlerAddress variable for
629		 *  linking handlers.  In that case, the reference below
630		 *  would pull in the XTOS handler anyway, causing a conflict.
631		 *  To avoid that, provide a weak version of it here:
632		 */
633		.pushsection .data, "aw"
634		.global  _Pri_&level&_HandlerAddress
635		.weak   _Pri_&level&_HandlerAddress
636		.align	4
637		_Pri_&level&_HandlerAddress: .space 4
638		.popsection
639	      .endif
640	    .endif
641	  .endif
642	.endm
643
644	init_vector	2
645	init_vector	3
646	init_vector	4
647	init_vector	5
648	init_vector	6
649
650#endif /*HAVE_XSR*/
651
652
653	/*
654	 *  Complete reset initialization outside the vector, to avoid
655	 *  requiring a vector that is larger than necessary.  This 2nd-stage
656	 *  startup code sets up the C Run-Time (CRT) and calls main().
657	 *
658	 *  Here we use call0 not because we expect any return, but because the
659	 *  assembler/linker dynamically sizes call0 as needed (with
660	 *  -mlongcalls) which it doesn't with j or jx.  Note:  This needs to
661	 *  be call0 regardless of the selected ABI.
662	 */
663	call0	_start		 /* jump to _start (in crt1-*.S) */
664	/* does not return */
665
666#else /* XCHAL_HAVE_HALT */
667
668	j	_start	/* jump to _start (in crt1-*.S) */
669			/* (TX has max 64kB IRAM, so J always in range) */
670
671	 /* Paranoia -- double-check requirements / assumptions of this Xtensa
672	  * TX code:
673	  */
674# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET \
675		|| XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT \
676		|| XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG \
677		|| XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS \
678		|| XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF \
679		|| XCHAL_HAVE_WINDOWED
680#  error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs"
681# endif
682
683#endif /* XCHAL_HAVE_HALT */
684
685
686#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE
687	.size	_ResetHandler, . - _ResetHandler
688#else
689	.size	__start, . - __start
690#endif
691
692	.text
693	.global xthals_hw_configid0, xthals_hw_configid1
694	.global xthals_release_major, xthals_release_minor
695	.end	literal_prefix
696