1/* 2 * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved. 3 * 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7#include <platform_def.h> 8 9#include <arch.h> 10#include <asm_macros.S> 11#include <bl31/ea_handle.h> 12#include <bl31/interrupt_mgmt.h> 13#include <common/runtime_svc.h> 14#include <context.h> 15#include <el3_common_macros.S> 16#include <lib/el3_runtime/cpu_data.h> 17#include <lib/smccc.h> 18 19 .globl runtime_exceptions 20 21 .globl sync_exception_sp_el0 22 .globl irq_sp_el0 23 .globl fiq_sp_el0 24 .globl serror_sp_el0 25 26 .globl sync_exception_sp_elx 27 .globl irq_sp_elx 28 .globl fiq_sp_elx 29 .globl serror_sp_elx 30 31 .globl sync_exception_aarch64 32 .globl irq_aarch64 33 .globl fiq_aarch64 34 .globl serror_aarch64 35 36 .globl sync_exception_aarch32 37 .globl irq_aarch32 38 .globl fiq_aarch32 39 .globl serror_aarch32 40 41 /* 42 * Macro that prepares entry to EL3 upon taking an exception. 43 * 44 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB 45 * instruction. When an error is thus synchronized, the handling is 46 * delegated to platform EA handler. 47 * 48 * Without RAS_EXTENSION, this macro synchronizes pending errors using 49 * a DSB, unmasks Asynchronous External Aborts and saves X30 before 50 * setting the flag CTX_IS_IN_EL3. 51 */ 52 .macro check_and_unmask_ea 53#if RAS_EXTENSION 54 /* Synchronize pending External Aborts */ 55 esb 56 57 /* Unmask the SError interrupt */ 58 msr daifclr, #DAIF_ABT_BIT 59 60 /* 61 * Explicitly save x30 so as to free up a register and to enable 62 * branching 63 */ 64 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 65 66 /* Check for SErrors synchronized by the ESB instruction */ 67 mrs x30, DISR_EL1 68 tbz x30, #DISR_A_BIT, 1f 69 70 /* 71 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 72 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 73 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 74 * Also set the PSTATE to a known state. 75 */ 76 bl prepare_el3_entry 77 78 bl handle_lower_el_ea_esb 79 80 /* Restore general purpose, PMCR_EL0 and ARMv8.3-PAuth registers */ 81 bl restore_gp_pmcr_pauth_regs 821: 83#else 84 /* 85 * For SoCs which do not implement RAS, use DSB as a barrier to 86 * synchronize pending external aborts. 87 */ 88 dsb sy 89 90 /* Unmask the SError interrupt */ 91 msr daifclr, #DAIF_ABT_BIT 92 93 /* Use ISB for the above unmask operation to take effect immediately */ 94 isb 95 96 /* 97 * Refer Note 1. No need to restore X30 as both handle_sync_exception 98 * and handle_interrupt_exception macro which follow this macro modify 99 * X30 anyway. 100 */ 101 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 102 mov x30, #1 103 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 104 dmb sy 105#endif 106 .endm 107 108#if !RAS_EXTENSION 109 /* 110 * Note 1: The explicit DSB at the entry of various exception vectors 111 * for handling exceptions from lower ELs can inadvertently trigger an 112 * SError exception in EL3 due to pending asynchronous aborts in lower 113 * ELs. This will end up being handled by serror_sp_elx which will 114 * ultimately panic and die. 115 * The way to workaround is to update a flag to indicate if the exception 116 * truly came from EL3. This flag is allocated in the cpu_context 117 * structure and located at offset "CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3" 118 * This is not a bullet proof solution to the problem at hand because 119 * we assume the instructions following "isb" that help to update the 120 * flag execute without causing further exceptions. 121 */ 122 123 /* --------------------------------------------------------------------- 124 * This macro handles Asynchronous External Aborts. 125 * --------------------------------------------------------------------- 126 */ 127 .macro handle_async_ea 128 /* 129 * Use a barrier to synchronize pending external aborts. 130 */ 131 dsb sy 132 133 /* Unmask the SError interrupt */ 134 msr daifclr, #DAIF_ABT_BIT 135 136 /* Use ISB for the above unmask operation to take effect immediately */ 137 isb 138 139 /* Refer Note 1 */ 140 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 141 mov x30, #1 142 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 143 dmb sy 144 145 b handle_lower_el_async_ea 146 .endm 147 148 /* 149 * This macro checks if the exception was taken due to SError in EL3 or 150 * because of pending asynchronous external aborts from lower EL that got 151 * triggered due to explicit synchronization in EL3. Refer Note 1. 152 */ 153 .macro check_if_serror_from_EL3 154 /* Assumes SP_EL3 on entry */ 155 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 156 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] 157 cbnz x30, exp_from_EL3 158 159 /* Handle asynchronous external abort from lower EL */ 160 b handle_lower_el_async_ea 161 162exp_from_EL3: 163 /* Jump to plat_handle_el3_ea which does not return */ 164 .endm 165#endif 166 167 /* --------------------------------------------------------------------- 168 * This macro handles Synchronous exceptions. 169 * Only SMC exceptions are supported. 170 * --------------------------------------------------------------------- 171 */ 172 .macro handle_sync_exception 173#if ENABLE_RUNTIME_INSTRUMENTATION 174 /* 175 * Read the timestamp value and store it in per-cpu data. The value 176 * will be extracted from per-cpu data by the C level SMC handler and 177 * saved to the PMF timestamp region. 178 */ 179 mrs x30, cntpct_el0 180 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 181 mrs x29, tpidr_el3 182 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET] 183 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] 184#endif 185 186 mrs x30, esr_el3 187 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 188 189 /* Handle SMC exceptions separately from other synchronous exceptions */ 190 cmp x30, #EC_AARCH32_SMC 191 b.eq smc_handler32 192 193 cmp x30, #EC_AARCH64_SMC 194 b.eq smc_handler64 195 196 /* Synchronous exceptions other than the above are assumed to be EA */ 197 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 198 b enter_lower_el_sync_ea 199 .endm 200 201 202 /* --------------------------------------------------------------------- 203 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS 204 * interrupts. 205 * --------------------------------------------------------------------- 206 */ 207 .macro handle_interrupt_exception label 208 209 /* 210 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 211 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 212 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 213 * Also set the PSTATE to a known state. 214 */ 215 bl prepare_el3_entry 216 217#if ENABLE_PAUTH 218 /* Load and program APIAKey firmware key */ 219 bl pauth_load_bl31_apiakey 220#endif 221 222 /* Save the EL3 system registers needed to return from this exception */ 223 mrs x0, spsr_el3 224 mrs x1, elr_el3 225 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 226 227 /* Switch to the runtime stack i.e. SP_EL0 */ 228 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 229 mov x20, sp 230 msr spsel, #MODE_SP_EL0 231 mov sp, x2 232 233 /* 234 * Find out whether this is a valid interrupt type. 235 * If the interrupt controller reports a spurious interrupt then return 236 * to where we came from. 237 */ 238 bl plat_ic_get_pending_interrupt_type 239 cmp x0, #INTR_TYPE_INVAL 240 b.eq interrupt_exit_\label 241 242 /* 243 * Get the registered handler for this interrupt type. 244 * A NULL return value could be 'cause of the following conditions: 245 * 246 * a. An interrupt of a type was routed correctly but a handler for its 247 * type was not registered. 248 * 249 * b. An interrupt of a type was not routed correctly so a handler for 250 * its type was not registered. 251 * 252 * c. An interrupt of a type was routed correctly to EL3, but was 253 * deasserted before its pending state could be read. Another 254 * interrupt of a different type pended at the same time and its 255 * type was reported as pending instead. However, a handler for this 256 * type was not registered. 257 * 258 * a. and b. can only happen due to a programming error. The 259 * occurrence of c. could be beyond the control of Trusted Firmware. 260 * It makes sense to return from this exception instead of reporting an 261 * error. 262 */ 263 bl get_interrupt_type_handler 264 cbz x0, interrupt_exit_\label 265 mov x21, x0 266 267 mov x0, #INTR_ID_UNAVAILABLE 268 269 /* Set the current security state in the 'flags' parameter */ 270 mrs x2, scr_el3 271 ubfx x1, x2, #0, #1 272 273 /* Restore the reference to the 'handle' i.e. SP_EL3 */ 274 mov x2, x20 275 276 /* x3 will point to a cookie (not used now) */ 277 mov x3, xzr 278 279 /* Call the interrupt type handler */ 280 blr x21 281 282interrupt_exit_\label: 283 /* Return from exception, possibly in a different security state */ 284 b el3_exit 285 286 .endm 287 288 289vector_base runtime_exceptions 290 291 /* --------------------------------------------------------------------- 292 * Current EL with SP_EL0 : 0x0 - 0x200 293 * --------------------------------------------------------------------- 294 */ 295vector_entry sync_exception_sp_el0 296#ifdef MONITOR_TRAPS 297 stp x29, x30, [sp, #-16]! 298 299 mrs x30, esr_el3 300 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH 301 302 /* Check for BRK */ 303 cmp x30, #EC_BRK 304 b.eq brk_handler 305 306 ldp x29, x30, [sp], #16 307#endif /* MONITOR_TRAPS */ 308 309 /* We don't expect any synchronous exceptions from EL3 */ 310 b report_unhandled_exception 311end_vector_entry sync_exception_sp_el0 312 313vector_entry irq_sp_el0 314 /* 315 * EL3 code is non-reentrant. Any asynchronous exception is a serious 316 * error. Loop infinitely. 317 */ 318 b report_unhandled_interrupt 319end_vector_entry irq_sp_el0 320 321 322vector_entry fiq_sp_el0 323 b report_unhandled_interrupt 324end_vector_entry fiq_sp_el0 325 326 327vector_entry serror_sp_el0 328 no_ret plat_handle_el3_ea 329end_vector_entry serror_sp_el0 330 331 /* --------------------------------------------------------------------- 332 * Current EL with SP_ELx: 0x200 - 0x400 333 * --------------------------------------------------------------------- 334 */ 335vector_entry sync_exception_sp_elx 336 /* 337 * This exception will trigger if anything went wrong during a previous 338 * exception entry or exit or while handling an earlier unexpected 339 * synchronous exception. There is a high probability that SP_EL3 is 340 * corrupted. 341 */ 342 b report_unhandled_exception 343end_vector_entry sync_exception_sp_elx 344 345vector_entry irq_sp_elx 346 b report_unhandled_interrupt 347end_vector_entry irq_sp_elx 348 349vector_entry fiq_sp_elx 350 b report_unhandled_interrupt 351end_vector_entry fiq_sp_elx 352 353vector_entry serror_sp_elx 354#if !RAS_EXTENSION 355 check_if_serror_from_EL3 356#endif 357 no_ret plat_handle_el3_ea 358end_vector_entry serror_sp_elx 359 360 /* --------------------------------------------------------------------- 361 * Lower EL using AArch64 : 0x400 - 0x600 362 * --------------------------------------------------------------------- 363 */ 364vector_entry sync_exception_aarch64 365 /* 366 * This exception vector will be the entry point for SMCs and traps 367 * that are unhandled at lower ELs most commonly. SP_EL3 should point 368 * to a valid cpu context where the general purpose and system register 369 * state can be saved. 370 */ 371 apply_at_speculative_wa 372 check_and_unmask_ea 373 handle_sync_exception 374end_vector_entry sync_exception_aarch64 375 376vector_entry irq_aarch64 377 apply_at_speculative_wa 378 check_and_unmask_ea 379 handle_interrupt_exception irq_aarch64 380end_vector_entry irq_aarch64 381 382vector_entry fiq_aarch64 383 apply_at_speculative_wa 384 check_and_unmask_ea 385 handle_interrupt_exception fiq_aarch64 386end_vector_entry fiq_aarch64 387 388vector_entry serror_aarch64 389 apply_at_speculative_wa 390#if RAS_EXTENSION 391 msr daifclr, #DAIF_ABT_BIT 392 b enter_lower_el_async_ea 393#else 394 handle_async_ea 395#endif 396end_vector_entry serror_aarch64 397 398 /* --------------------------------------------------------------------- 399 * Lower EL using AArch32 : 0x600 - 0x800 400 * --------------------------------------------------------------------- 401 */ 402vector_entry sync_exception_aarch32 403 /* 404 * This exception vector will be the entry point for SMCs and traps 405 * that are unhandled at lower ELs most commonly. SP_EL3 should point 406 * to a valid cpu context where the general purpose and system register 407 * state can be saved. 408 */ 409 apply_at_speculative_wa 410 check_and_unmask_ea 411 handle_sync_exception 412end_vector_entry sync_exception_aarch32 413 414vector_entry irq_aarch32 415 apply_at_speculative_wa 416 check_and_unmask_ea 417 handle_interrupt_exception irq_aarch32 418end_vector_entry irq_aarch32 419 420vector_entry fiq_aarch32 421 apply_at_speculative_wa 422 check_and_unmask_ea 423 handle_interrupt_exception fiq_aarch32 424end_vector_entry fiq_aarch32 425 426vector_entry serror_aarch32 427 apply_at_speculative_wa 428#if RAS_EXTENSION 429 msr daifclr, #DAIF_ABT_BIT 430 b enter_lower_el_async_ea 431#else 432 handle_async_ea 433#endif 434end_vector_entry serror_aarch32 435 436#ifdef MONITOR_TRAPS 437 .section .rodata.brk_string, "aS" 438brk_location: 439 .asciz "Error at instruction 0x" 440brk_message: 441 .asciz "Unexpected BRK instruction with value 0x" 442#endif /* MONITOR_TRAPS */ 443 444 /* --------------------------------------------------------------------- 445 * The following code handles secure monitor calls. 446 * Depending upon the execution state from where the SMC has been 447 * invoked, it frees some general purpose registers to perform the 448 * remaining tasks. They involve finding the runtime service handler 449 * that is the target of the SMC & switching to runtime stacks (SP_EL0) 450 * before calling the handler. 451 * 452 * Note that x30 has been explicitly saved and can be used here 453 * --------------------------------------------------------------------- 454 */ 455func smc_handler 456smc_handler32: 457 /* Check whether aarch32 issued an SMC64 */ 458 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited 459 460smc_handler64: 461 /* NOTE: The code below must preserve x0-x4 */ 462 463 /* 464 * Save general purpose and ARMv8.3-PAuth registers (if enabled). 465 * If Secure Cycle Counter is not disabled in MDCR_EL3 when 466 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter. 467 * Also set the PSTATE to a known state. 468 */ 469 bl prepare_el3_entry 470 471#if ENABLE_PAUTH 472 /* Load and program APIAKey firmware key */ 473 bl pauth_load_bl31_apiakey 474#endif 475 476 /* 477 * Populate the parameters for the SMC handler. 478 * We already have x0-x4 in place. x5 will point to a cookie (not used 479 * now). x6 will point to the context structure (SP_EL3) and x7 will 480 * contain flags we need to pass to the handler. 481 */ 482 mov x5, xzr 483 mov x6, sp 484 485 /* 486 * Restore the saved C runtime stack value which will become the new 487 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context' 488 * structure prior to the last ERET from EL3. 489 */ 490 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP] 491 492 /* Switch to SP_EL0 */ 493 msr spsel, #MODE_SP_EL0 494 495 /* 496 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world 497 * switch during SMC handling. 498 * TODO: Revisit if all system registers can be saved later. 499 */ 500 mrs x16, spsr_el3 501 mrs x17, elr_el3 502 mrs x18, scr_el3 503 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] 504 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] 505 506 /* Clear flag register */ 507 mov x7, xzr 508 509#if ENABLE_RME 510 /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ 511 ubfx x7, x18, #SCR_NSE_SHIFT, 1 512 513 /* 514 * Shift copied SCR_EL3.NSE bit by 5 to create space for 515 * SCR_EL3.NS bit. Bit 5 of the flag correspondes to 516 * the SCR_EL3.NSE bit. 517 */ 518 lsl x7, x7, #5 519#endif /* ENABLE_RME */ 520 521 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ 522 bfi x7, x18, #0, #1 523 524 mov sp, x12 525 526 /* Get the unique owning entity number */ 527 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH 528 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH 529 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH 530 531 /* Load descriptor index from array of indices */ 532 adrp x14, rt_svc_descs_indices 533 add x14, x14, :lo12:rt_svc_descs_indices 534 ldrb w15, [x14, x16] 535 536 /* Any index greater than 127 is invalid. Check bit 7. */ 537 tbnz w15, 7, smc_unknown 538 539 /* 540 * Get the descriptor using the index 541 * x11 = (base + off), w15 = index 542 * 543 * handler = (base + off) + (index << log2(size)) 544 */ 545 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE) 546 lsl w10, w15, #RT_SVC_SIZE_LOG2 547 ldr x15, [x11, w10, uxtw] 548 549 /* 550 * Call the Secure Monitor Call handler and then drop directly into 551 * el3_exit() which will program any remaining architectural state 552 * prior to issuing the ERET to the desired lower EL. 553 */ 554#if DEBUG 555 cbz x15, rt_svc_fw_critical_error 556#endif 557 blr x15 558 559 b el3_exit 560 561smc_unknown: 562 /* 563 * Unknown SMC call. Populate return value with SMC_UNK and call 564 * el3_exit() which will restore the remaining architectural state 565 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET 566 * to the desired lower EL. 567 */ 568 mov x0, #SMC_UNK 569 str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0] 570 b el3_exit 571 572smc_prohibited: 573 restore_ptw_el1_sys_regs 574 ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28] 575 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] 576 mov x0, #SMC_UNK 577 exception_return 578 579#if DEBUG 580rt_svc_fw_critical_error: 581 /* Switch to SP_ELx */ 582 msr spsel, #MODE_SP_ELX 583 no_ret report_unhandled_exception 584#endif 585endfunc smc_handler 586 587 /* --------------------------------------------------------------------- 588 * The following code handles exceptions caused by BRK instructions. 589 * Following a BRK instruction, the only real valid cause of action is 590 * to print some information and panic, as the code that caused it is 591 * likely in an inconsistent internal state. 592 * 593 * This is initially intended to be used in conjunction with 594 * __builtin_trap. 595 * --------------------------------------------------------------------- 596 */ 597#ifdef MONITOR_TRAPS 598func brk_handler 599 /* Extract the ISS */ 600 mrs x10, esr_el3 601 ubfx x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH 602 603 /* Ensure the console is initialized */ 604 bl plat_crash_console_init 605 606 adr x4, brk_location 607 bl asm_print_str 608 mrs x4, elr_el3 609 bl asm_print_hex 610 bl asm_print_newline 611 612 adr x4, brk_message 613 bl asm_print_str 614 mov x4, x10 615 mov x5, #28 616 bl asm_print_hex_bits 617 bl asm_print_newline 618 619 no_ret plat_panic_handler 620endfunc brk_handler 621#endif /* MONITOR_TRAPS */ 622