1// reset-vector.S -- Xtensa Reset Vector 2// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/reset-vector.S#1 $ 3 4// Copyright (c) 1999-2013 Tensilica Inc. 5// 6// Permission is hereby granted, free of charge, to any person obtaining 7// a copy of this software and associated documentation files (the 8// "Software"), to deal in the Software without restriction, including 9// without limitation the rights to use, copy, modify, merge, publish, 10// distribute, sublicense, and/or sell copies of the Software, and to 11// permit persons to whom the Software is furnished to do so, subject to 12// the following conditions: 13// 14// The above copyright notice and this permission notice shall be included 15// in all copies or substantial portions of the Software. 16// 17// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 21// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 25 26#include <sof/common.h> 27#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM 28#include <sof/lib/memory.h> 29#endif 30#include <xtensa/coreasm.h> 31#include <xtensa/corebits.h> 32#include <xtensa/cacheasm.h> 33#include <xtensa/cacheattrasm.h> 34#include <xtensa/xtensa-xer.h> 35#include <xtensa/xdm-regs.h> 36#include <xtensa/config/system.h> /* for XSHAL_USE_ABSOLUTE_LITERALS only */ 37#include <xtensa/xtruntime-core-state.h> 38#include "xtos-internal.h" 39 40#if XCHAL_HAVE_MPU 41/* for mpu_write_map opcode */ 42#include <xtensa/mpuasm.h> 43#endif 44 45// The following reset vector avoids initializing certain registers already 46// initialized by processor reset. But it does initialize some of them 47// anyway, for minimal support of warm restart (restarting in software by 48// jumping to the reset vector rather than asserting hardware reset). 49 50 51 .begin literal_prefix .ResetVector 52 .section .ResetVector.text, "ax" 53 54 .align 4 55 .global _ResetVector 56_ResetVector: 57 58#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE 59 // NOTE: 60 // 61 // IMPORTANT: If you move the _ResetHandler portion to a section 62 // other than .ResetVector.text that is outside the range of 63 // the reset vector's 'j' instruction, the _ResetHandler symbol 64 // and a more elaborate j/movi/jx sequence are needed in 65 // .ResetVector.text to dispatch to the new location. 66 67#if CONFIG_XT_HAVE_RESET_VECTOR_ROM 68 j _ResetHandler 69#else 70 // This is our VM ROM, it simply jumps to the reset handler. 71 j .sram_jump // jump over the literals 72 73 .align 4 74 .literal_position // tells the assembler/linker to place literals here 75 76_reset_sram: 77 .word _ResetHandler 78 .align 4 79.sram_jump: 80 l32r a0, _reset_sram // load SRAM reset handler address 81 jx a0 // jump to the handler 82#endif 83 .size _ResetVector, . - _ResetVector 84 85# if XCHAL_HAVE_HALT 86 // Xtensa TX: reset vector segment is only 4 bytes, so must place the 87 // unpacker code elsewhere in the memory that contains the reset vector. 88# if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR 89 .section .iram0.text, "ax" 90# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR 91 .section .irom0.text, "ax" 92# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR 93 .section .uram0.text, "ax" 94# else 95# warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work" 96 .text 97# endif 98# endif 99 100 .extern __memctl_default 101 102#if CONFIG_XT_BOOT_LOADER || CONFIG_VM_ROM 103 .section .ResetHandler.text, "ax" 104 j _ResetHandler 105#endif 106 .align 4 107 .literal_position // tells the assembler/linker to place literals here 108 109 // For MPU empty background map -- see XCHAL_HAVE_MPU code further below. 110 // Cannot put this in .rodata (not unpacked before MPU init). 111# if XCHAL_HAVE_MPU && XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2 112 .global _xtos_mpu_attribs 113 .align 4 114_xtos_mpu_attribs: 115 .word 0x00006000+XCHAL_MPU_ENTRIES-8 // Illegal (---) 116 .word 0x000F7700+XCHAL_MPU_ENTRIES-8 // Writeback (rwx Cacheable Non-shareable wb rd-alloc wr-alloc) 117 .word 0x000D5700+XCHAL_MPU_ENTRIES-8 // WBNA (rwx Cacheable Non-shareable wb rd-alloc) 118 .word 0x000C4700+XCHAL_MPU_ENTRIES-8 // Writethru (rwx Cacheable Non-shareable wt rd-alloc) 119 .word 0x00006700+XCHAL_MPU_ENTRIES-8 // Bypass (rwx Device non-interruptible system-shareable) 120# endif 121 122 .align 4 123 .global _ResetHandler 124_ResetHandler: 125#endif 126 127#if !XCHAL_HAVE_HALT 128 129 /* 130 * Even if the processor supports the non-PC-relative L32R option, 131 * it will always start up in PC-relative mode. We take advantage of 132 * this, and use PC-relative mode at least until we're sure the .lit4 133 * section is in place (which is sometimes only after unpacking). 134 */ 135 .begin no-absolute-literals 136 137 // If we have dynamic cache way support, init the caches as soon 138 // as we can, which is now. Except, if we are waking up from a 139 // PSO event, then we need to do this slightly later. 140 141#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS 142# if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION 143 // Do this later on in the code -- see below 144# else 145 movi a0, __memctl_default 146 wsr.memctl a0 147# endif 148#endif 149 150 // If we have PSO support, then we must check for a warm start with 151 // caches left powered on. If the caches had been left powered on, 152 // we must restore the state of MEMCTL to the saved state if any. 153 // Note that MEMCTL may not be present depending on config. 154 155#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION 156 movi a2, XDM_MISC_PWRSTAT // Read PWRSTAT 157 movi a3, _xtos_pso_savearea // Save area address - retained for later 158 movi a5, CORE_STATE_SIGNATURE // Signature for compare - retained for later 159 rer a7, a2 // PWRSTAT value - retained for later 160 extui a4, a7, 1, 2 // Now bottom 2 bits are core wakeup and cache power lost 161 bnei a4, 1, .Lcold_start // a4==1 means PSO wakeup, caches did not lose power 162 l32i a4, a3, CS_SA_signature // Load save area signature field 163 sub a4, a4, a5 164 bnez a4, .Lcold_start // If signature mismatch then do cold start 165#if XCHAL_USE_MEMCTL 166 l32i a4, a3, CS_SA_memctl // Load saved MEMCTL value 167 movi a0, ~MEMCTL_INV_EN 168 and a0, a4, a0 // Clear invalidate bit 169 wsr.memctl a0 170#endif 171 j .Lwarm_start 172 173.Lcold_start: 174 175#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS 176 // Enable and invalidate all ways of both caches. If there is no 177 // dynamic way support then this write will have no effect. 178 179 movi a0, __memctl_default 180 wsr.memctl a0 181#endif 182 183.Lwarm_start: 184 185#endif 186 187 movi a0, 0 // a0 is always 0 in this code, used to initialize lots of things 188 189#if XCHAL_HAVE_INTERRUPTS // technically this should be under !FULL_RESET, assuming hard reset 190 wsr.intenable a0 // make sure that interrupts are shut off (*before* we lower PS.INTLEVEL and PS.EXCM!) 191#endif 192 193#if !XCHAL_HAVE_FULL_RESET 194 195#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0) /* pre-LX2 cores only */ 196 wsr.ccount a0 // not really necessary, but nice; best done very early 197#endif 198 199 // For full MMU configs, put page table at an unmapped virtual address. 200 // This ensures that accesses outside the static maps result 201 // in miss exceptions rather than random behaviour. 202 // Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU). 203#if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0 204 wsr.ptevaddr a0 205#endif 206 207 // Debug initialization 208 // 209 // NOTE: DBREAKCn must be initialized before the combination of these two things: 210 // any load/store, and a lowering of PS.INTLEVEL below DEBUG_LEVEL. 211 // The processor already resets IBREAKENABLE appropriately. 212 // 213#if XCHAL_HAVE_DEBUG 214# if XCHAL_NUM_DBREAK 215# if XCHAL_NUM_DBREAK >= 2 216 wsr.dbreakc1 a0 217# endif 218 wsr.dbreakc0 a0 219 dsync // wait for WSRs to DBREAKCn to complete 220# endif 221 222# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1 /* pre-LX cores only */ 223 // Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no need to initialize it. 224 // Prior to that we do, otherwise we get an ICOUNT exception, 2^32 instructions after reset. 225 rsr.icountlevel a2 // are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped below 12) 226 bltui a2, 12, 1f // if so, avoid initializing ICOUNTLEVEL which drops single-steps through here 227 wsr.icountlevel a0 // avoid ICOUNT exceptions 228 isync // wait for WSR to ICOUNTLEVEL to complete 2291: 230# endif 231#endif 232 233#endif /* !XCHAL_HAVE_FULL_RESET */ 234 235#if XCHAL_HAVE_ABSOLUTE_LITERALS 236 // Technically, this only needs to be done under !FULL_RESET, assuming hard reset: 237 wsr.litbase a0 238 rsync 239#endif 240 241#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION 242 // If we're powering up from a temporary power shut-off (PSO), 243 // restore state saved just prior to shut-off. Note that the 244 // MEMCTL register was already restored earlier, and as a side 245 // effect, registers a3, a5, a7 are now preloaded with values 246 // that we will use here. 247 // a3 - pointer to save area base address (_xtos_pso_savearea) 248 // a5 - saved state signature (CORE_STATE_SIGNATURE) 249 // a7 - contents of PWRSTAT register 250 251 l32i a4, a3, CS_SA_signature // load save area signature 252 sub a4, a4, a5 // compare signature with expected one 253# if XTOS_PSO_TEST 254 movi a7, PWRSTAT_WAKEUP_RESET // pretend PSO warm start with warm caches 255# endif 256 bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f // wakeup from PSO? (branch if not) 257 // Yes, wakeup from PSO. Check whether state was properly saved. 258 addi a5, a7, - PWRSTAT_WAKEUP_RESET // speculatively clear PSO-wakeup bit 259 movnez a7, a5, a4 // if state not saved (corrupted?), mark as cold start 260 bnez a4, 1f // if state not saved, just continue with reset 261 // Wakeup from PSO with good signature. Now check cache status: 262 bbci.l a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore // if caches warm, restore now 263 // Caches got shutoff. Continue reset, we'll end up initializing caches, and check again later for PSO. 264# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I 265 j .Ldonesync // skip reset sync, only done for cold start 266# endif 2671: // Cold start. (Not PSO wakeup.) Proceed with normal full reset. 268#endif 269 270#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I 271 /* Core 0 initializes the XMP synchronization variable, if present. This operation needs to 272 happen as early as possible in the startup sequence so that the other cores can be released 273 from reset. */ 274 .weak _ResetSync 275 movi a2, _ResetSync // address of sync variable 276 rsr.prid a3 // core and multiprocessor ID 277 extui a3, a3, 0, 8 // extract core ID (FIXME: need proper constants for PRID bits to extract) 278 beqz a2, .Ldonesync // skip if no sync variable 279 bnez a3, .Ldonesync // only do this on core 0 280 s32i a0, a2, 0 // clear sync variable 281.Ldonesync: 282#endif 283#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL 284 /* On core 0, this releases other cores. On other cores this has no effect, because 285 runstall control is unconnected. */ 286 movi a2, XER_MPSCORE 287 wer a0, a2 288#endif 289 290 /* 291 * For processors with relocatable vectors, apply any alternate 292 * vector base given to xt-genldscripts, which sets the 293 * _memmap_vecbase_reset symbol accordingly. 294 */ 295#if XCHAL_HAVE_VECBASE 296 movi a2, _memmap_vecbase_reset /* note: absolute symbol, not a ptr */ 297 wsr.vecbase a2 298#endif 299 300#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */ 301# if XCHAL_DCACHE_IS_COHERENT 302 movi a3, 0x25 /* MX -- internal for writeback, RCW otherwise */ 303# else 304 movi a3, 0x15 /* non-MX -- always RCW */ 305# endif 306 wsr.atomctl a3 307#endif 308 309#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG 310 rsil a2, 1 // lower PS.INTLEVEL here to make reset vector easier to debug 311#endif 312 313 /* If either of the caches does not have dynamic way support, then 314 * use the old (slow) method to init them. If the cache is absent 315 * the macros will expand to empty. 316 */ 317#if ! XCHAL_HAVE_ICACHE_DYN_WAYS 318 icache_reset a2, a3 319#endif 320#if ! XCHAL_HAVE_DCACHE_DYN_WAYS 321 dcache_reset a2, a3 322#endif 323 324#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION 325 // Here, a7 still contains status from the power status register, 326 // or zero if signature check failed. 327 bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart // wakeup from PSO with good signature? 328 // Yes, wakeup from PSO. Caches had been powered down, now are initialized. 329.Lpso_restore: 330 // Assume memory still initialized, so all code still unpacked etc. 331 // So we can just jump/call to relevant state restore code (wherever located). 332 movi a2, 0 // make shutoff routine return zero 333 movi a3, _xtos_pso_savearea 334 // Here, as below for _start, call0 is used as an unlimited-range jump. 335 call0 _xtos_core_restore_nw 336 // (does not return) 337.Lcoldstart: 338#endif 339 340#if XCHAL_HAVE_PREFETCH 341 /* Enable cache prefetch if present. */ 342 movi.n a2, 68 343 wsr a2, PREFCTL 344#endif 345 346 /* 347 * Now setup the memory attributes. On some cores this "enables" caches. 348 * We do this ahead of unpacking, so it can proceed more efficiently. 349 * 350 * The _memmap_cacheattr_reset symbol's value (address) is defined 351 * by the LSP's linker script, as generated by xt-genldscripts. 352 * If defines 4-bit attributes for eight 512MB regions. 353 * 354 * (NOTE: for cores with the older MMU v1 or v2, or without any memory 355 * protection mechanism, the following code has no effect.) 356 */ 357#if XCHAL_HAVE_MPU 358 // If there is a user-provided MPU table, then we will program the MPU 359 // with it now. Can't call xthal_write_map_raw() because code sections 360 // haven't been unpacked yet. For romable images, the MPU table values 361 // and the table size must reside in a section that does not need to be 362 // unpacked (.ResetHandler.text or .srom.text). 363 // NOTE: This will set CACHEADRDIS to all zeros, because computing a 364 // useful nonzero value from the user settings is too complex and slow 365 // to implement here. 366 367 .weak __xt_mpu_init_table // Table of MPU entries 368 .weak __xt_mpu_init_table_size // Number of entries in table 369 370 movi a2, __xt_mpu_init_table // non-zero if user defined 371 movi a3, __xt_mpu_init_table_size // non-zero if user defined 372 beqz a2, .Lno_user_mpu 373 beqz a3, .Lno_user_mpu 374 l32i a3, a3, 0 375 beqz a3, .Lno_user_mpu // skip if size = 0 376 mpu_write_map a2, a3, a12, a13, a14, a15 377 j .Lno_default_mpu 378 379.Lno_user_mpu: 380 // If there's an empty background map, setup foreground maps to mimic 381 // region protection. 382 383 /* If there's an empty background map, setup foreground maps to mimic region protection: */ 384# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2 385 // We assume reset state: all MPU entries zeroed and disabled. 386 // Otherwise we'd need a loop to zero everything. 387 388 movi a2, _memmap_cacheattr_reset // note: absolute symbol, not a ptr 389 movi a3, _xtos_mpu_attribs // see literal area at start of reset vector 390 movi a4, 0x20000000 // 512 MB delta 391 movi a6, 8 392 movi a7, 1 // MPU entry vaddr 0, with valid bit set 393 movi a9, 0 // cacheadrdis value 394 wsr.cacheadrdis a9 // enable everything temporarily while MPU updates 395 396 // Write eight MPU entries, from the last one going backwards (entries n-1 thru n-8) 397 // 3982: extui a8, a2, 28, 4 // get next attribute nibble (msb first) 399 extui a5, a8, 0, 2 // lower two bit indicate whether cached 400 slli a9, a9, 1 // add a bit to cacheadrdis... 401 addi a10, a9, 1 // set that new bit if... 402 moveqz a9, a10, a5 // ... that region is non-cacheable 403 addx4 a5, a8, a3 // index into _xtos_mpu_attribs table 404 addi a8, a8, -5 // make valid attrib indices negative 405 movgez a5, a3, a8 // if not valid attrib, use Illegal 406 l32i a5, a5, 0 // load access rights, memtype from table entry 407 slli a2, a2, 4 408 sub a7, a7, a4 // next 512MB region (last to first) 409 addi a6, a6, -1 410 add a5, a5, a6 // add the index 411 wptlb a5, a7 // write the MPU entry 412 bnez a6, 2b // loop until done 413# else 414 movi a9, XCHAL_MPU_BG_CACHEADRDIS // default value of CACHEADRDIS for bgnd map 415# endif 416 wsr.cacheadrdis a9 // update cacheadrdis 417.Lno_default_mpu: 418#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \ 419 || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) 420 movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */ 421 cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */ 422#endif 423 424 /* Now that caches are initialized, cache coherency can be enabled. */ 425#if XCHAL_DCACHE_IS_COHERENT 426# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0) 427 /* Opt into coherence for MX (for backward compatibility / testing). */ 428 movi a3, 1 429 movi a2, XER_CCON 430 wer a3, a2 431# endif 432#endif 433 434 /* Enable zero-overhead loop instr buffer and snoop responses if configured. */ 435 /* If HW erratum 453 fix is to be applied then disable loop instr buffer. */ 436#if XCHAL_USE_MEMCTL && (XCHAL_SNOOP_LB_MEMCTL_DEFAULT || XCHAL_ERRATUM_453) 437 rsr.memctl a2 438#if XCHAL_SNOOP_LB_MEMCTL_DEFAULT 439 movi a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT 440 or a2, a2, a3 441#endif 442#if XCHAL_ERRATUM_453 443 srli a2, a2, 1 /* clear bit 0 (ZOL buffer enable) */ 444 slli a2, a2, 1 445#endif 446 wsr.memctl a2 447#endif 448 449 /* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */ 450#if XCHAL_HAVE_PSO_CDM 451 movi a2, XDM_MISC_PWRCTL 452 movi a4, ~PWRCTL_CORE_SHUTOFF 453 rer a3, a2 454 and a3, a3, a4 455 wer a3, a2 456#endif 457 458#endif /* !XCHAL_HAVE_HALT */ 459 460 /* 461 * At this point we can unpack code and data (e.g. copy segments from 462 * ROM to RAM, vectors into their proper location, etc.). However, 463 * 464 * 1) the destination of the unpack may require some setup, 465 * for instance a DDR controller may need to be initialized 466 * and enabled before anything is unpacked into DDR. 467 * 2) users may wish to provide their own unpack code which works 468 * faster or in a different way than the default unpack code. 469 * 470 * To support such uses, we provide a user hook at this point. 471 * If the user hook function is defined, then it is called from 472 * here, and its return value (in a2) is checked. If the return 473 * value is non-zero, then we assume that code unpacking has been 474 * completed. The user hook function must be written in assembly 475 * and should make minimal assumptions about system state. 476 */ 477 478 .weak __reset_user_init 479 480 movi a2, __reset_user_init 481 beqz a2, 1f // no user hook 482 callx0 a2 // execute user hook 483 movi a0, 0 // ensure a0 continues to hold 0 484 bnez a2, unpackdone // if a2 != 0 then unpack is done 4851: 486 487#if defined(XTOS_UNPACK) 488 movi a2, _rom_store_table 489 beqz a2, unpackdone 490unpack: l32i a3, a2, 0 // start vaddr 491 l32i a4, a2, 4 // end vaddr 492 l32i a5, a2, 8 // store vaddr 493 addi a2, a2, 12 494 bgeu a3, a4, upnext // skip unless start < end 495uploop: l32i a6, a5, 0 496 addi a5, a5, 4 497 s32i a6, a3, 0 498 addi a3, a3, 4 499 bltu a3, a4, uploop 500 j unpack 501upnext: bnez a3, unpack 502 bnez a5, unpack 503#endif /* XTOS_UNPACK */ 504 505unpackdone: 506 507#if defined(XTOS_UNPACK) || defined(XTOS_MP) 508 /* 509 * If writeback caches are configured and enabled, unpacked data must be 510 * written out to memory before trying to execute it: 511 */ 512 dcache_writeback_all a2, a3, a4, 0 513 icache_sync a2 // ensure data written back is visible to i-fetch 514 /* 515 * Note: no need to invalidate the i-cache after the above, because we 516 * already invalidated it further above and did not execute anything within 517 * unpacked regions afterwards. [Strictly speaking, if an unpacked region 518 * follows this code very closely, it's possible for cache-ahead to have 519 * cached a bit of that unpacked region, so in the future we may need to 520 * invalidate the entire i-cache here again anyway.] 521 */ 522#endif 523 524 525#if !XCHAL_HAVE_HALT /* skip for TX */ 526 527 /* 528 * Now that we know the .lit4 section is present (if got unpacked) 529 * (and if absolute literals are used), initialize LITBASE to use it. 530 */ 531#if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS 532 /* 533 * Switch from PC-relative to absolute (litbase-relative) L32R mode. 534 * Set LITBASE to 256 kB beyond the start of the literals in .lit4 535 * (aligns to the nearest 4 kB boundary, LITBASE does not have bits 1..11) 536 * and set the enable bit (_lit4_start is assumed 4-byte aligned). 537 */ 538 movi a2, _lit4_start + 0x40001 539 wsr.litbase a2 540 rsync 541#endif /* have and use absolute literals */ 542 .end no-absolute-literals // we can now start using absolute literals 543 544 545// Technically, this only needs to be done pre-LX2, assuming hard reset: 546# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__) 547 // Windowed register init, so we can call windowed code (eg. C code). 548 movi a1, 1 549 wsr.windowstart a1 550 // The processor always clears WINDOWBASE at reset, so no need to clear it here. 551 // It resets WINDOWSTART to 1 starting with LX2.0/X7.0 (RB-2006.0). 552 // However, assuming hard reset is not yet always practical, so do this anyway: 553 wsr.windowbase a0 554 rsync 555 movi a0, 0 // possibly a different a0, clear it 556# endif 557 558#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0 /* only pre-LX2 needs this */ 559 // Coprocessor option initialization 560# if XCHAL_HAVE_CP 561 //movi a2, XCHAL_CP_MASK // enable existing CPs 562 // To allow creating new coprocessors using TC that are not known 563 // at GUI build time without having to explicitly enable them, 564 // all CPENABLE bits must be set, even though they may not always 565 // correspond to a coprocessor. 566 movi a2, 0xFF // enable *all* bits, to allow dynamic TIE 567 wsr.cpenable a2 568# endif 569 570 // Floating point coprocessor option initialization (at least 571 // rounding mode, so that floating point ops give predictable results) 572# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005 573 rsync /* wait for WSR to CPENABLE to complete before accessing FP coproc state */ 574 wur.fcr a0 /* clear FCR (default rounding mode, round-nearest) */ 575 wur.fsr a0 /* clear FSR */ 576# endif 577#endif /* pre-LX2 */ 578 579 580 // Initialize memory error handler address. 581 // Putting this address in a register allows multiple instances of 582 // the same configured core (with separate program images but shared 583 // code memory, thus forcing memory error vector to be shared given 584 // it is not VECBASE relative) to have the same memory error vector, 585 // yet each have their own handler and associated data save area. 586#if XCHAL_HAVE_MEM_ECC_PARITY_IGNORE 587 movi a4, _MemErrorHandler 588 wsr.mesave a4 589#endif 590 591 592 /* 593 * Initialize medium and high priority interrupt dispatchers: 594 */ 595#if HAVE_XSR && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) 596 597#if !CONFIG_XT_BOOT_LOADER || CONFIG_VM_ROM 598# ifndef XCHAL_DEBUGLEVEL /* debug option not selected? */ 599# define XCHAL_DEBUGLEVEL 99 /* bogus value outside 2..6 */ 600# endif 601 602 .macro init_vector level 603 .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level) 604 .if XCHAL_DEBUGLEVEL-\level 605 .weak _Level&level&FromVector 606 movi a4, _Level&level&FromVector 607 writesr excsave \level a4 608 .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL) 609 movi a5, _Pri_&level&_HandlerAddress 610 s32i a4, a5, 0 611 /* If user provides their own handler, that handler might 612 * not provide its own _Pri_<n>_HandlerAddress variable for 613 * linking handlers. In that case, the reference below 614 * would pull in the XTOS handler anyway, causing a conflict. 615 * To avoid that, provide a weak version of it here: 616 */ 617 .pushsection .data, "aw" 618 .global _Pri_&level&_HandlerAddress 619 .weak _Pri_&level&_HandlerAddress 620 .align 4 621 _Pri_&level&_HandlerAddress: .space 4 622 .popsection 623 .endif 624 .endif 625 .endif 626 .endm 627 628 init_vector 2 629 init_vector 3 630 init_vector 4 631 init_vector 5 632 init_vector 6 633#endif 634#endif /*HAVE_XSR*/ 635 636 637 /* 638 * Complete reset initialization outside the vector, 639 * to avoid requiring a vector that is larger than necessary. 640 * This 2nd-stage startup code sets up the C Run-Time (CRT) and calls main(). 641 * 642 * Here we use call0 not because we expect any return, but 643 * because the assembler/linker dynamically sizes call0 as 644 * needed (with -mlongcalls) which it doesn't with j or jx. 645 * Note: This needs to be call0 regardless of the selected ABI. 646 */ 647 648#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM 649 movi a0, SOF_TEXT_BASE 650 callx0 a0 651#else 652 call0 _start // jump to _start (in crt1-*.S) 653#endif 654 /* does not return */ 655 656#else /* XCHAL_HAVE_HALT */ 657 658 j _start // jump to _start (in crt1-*.S) 659 // (TX has max 64kB IRAM, so J always in range) 660 661 // Paranoia -- double-check requirements / assumptions of this Xtensa TX code: 662# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET || XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT || XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG || XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS || XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF || XCHAL_HAVE_WINDOWED 663# error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs" 664# endif 665 666#endif /* XCHAL_HAVE_HALT */ 667 668 669#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE 670 .size _ResetHandler, . - _ResetHandler 671#else 672 .size _ResetVector, . - _ResetVector 673#endif 674 675 .text 676 .global xthals_hw_configid0, xthals_hw_configid1 677 .global xthals_release_major, xthals_release_minor 678 .end literal_prefix 679