1/*
2 * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <asm_macros.S>
11#include <bl31/ea_handle.h>
12#include <bl31/interrupt_mgmt.h>
13#include <bl31/sync_handle.h>
14#include <common/runtime_svc.h>
15#include <context.h>
16#include <cpu_macros.S>
17#include <el3_common_macros.S>
18#include <lib/el3_runtime/cpu_data.h>
19#include <lib/smccc.h>
20
21	.globl	runtime_exceptions
22
23	.globl	sync_exception_sp_el0
24	.globl	irq_sp_el0
25	.globl	fiq_sp_el0
26	.globl	serror_sp_el0
27
28	.globl	sync_exception_sp_elx
29	.globl	irq_sp_elx
30	.globl	fiq_sp_elx
31	.globl	serror_sp_elx
32
33	.globl	sync_exception_aarch64
34	.globl	irq_aarch64
35	.globl	fiq_aarch64
36	.globl	serror_aarch64
37
38	.globl	sync_exception_aarch32
39	.globl	irq_aarch32
40	.globl	fiq_aarch32
41	.globl	serror_aarch32
42
43	/*
44	 * Save LR and make x30 available as most of the routines in vector entry
45	 * need a free register
46	 */
47	.macro save_x30
48	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
49	.endm
50
51	.macro restore_x30
52	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
53	.endm
54
55	/*
56	 * Macro that synchronizes errors (EA) and checks for pending SError.
57	 * On detecting a pending SError it either reflects it back to lower
58	 * EL (KFH) or handles it in EL3 (FFH) based on EA routing model.
59	 */
60	.macro	sync_and_handle_pending_serror
61	synchronize_errors
62	mrs	x30, ISR_EL1
63	tbz	x30, #ISR_A_SHIFT, 2f
64#if FFH_SUPPORT
65	mrs	x30, scr_el3
66	tst	x30, #SCR_EA_BIT
67	b.eq	1f
68	bl	handle_pending_async_ea
69	b	2f
70#endif
711:
72	/* This function never returns, but need LR for decision making */
73	bl	reflect_pending_async_ea_to_lower_el
742:
75	.endm
76
77	/* ---------------------------------------------------------------------
78	 * This macro handles Synchronous exceptions.
79	 * Only SMC exceptions are supported.
80	 * ---------------------------------------------------------------------
81	 */
82	.macro	handle_sync_exception
83#if ENABLE_RUNTIME_INSTRUMENTATION
84	/*
85	 * Read the timestamp value and store it in per-cpu data. The value
86	 * will be extracted from per-cpu data by the C level SMC handler and
87	 * saved to the PMF timestamp region.
88	 */
89	mrs	x30, cntpct_el0
90	str	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
91	mrs	x29, tpidr_el3
92	str	x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
93	ldr	x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
94#endif
95
96	mrs	x30, esr_el3
97	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
98
99	/* Handle SMC exceptions separately from other synchronous exceptions */
100	cmp	x30, #EC_AARCH32_SMC
101	b.eq	smc_handler32
102
103	cmp	x30, #EC_AARCH64_SMC
104	b.eq	sync_handler64
105
106	cmp	x30, #EC_AARCH64_SYS
107	b.eq	sync_handler64
108
109	cmp	x30, #EC_IMP_DEF_EL3
110	b.eq	imp_def_el3_handler
111
112	/* If FFH Support then try to handle lower EL EA exceptions. */
113#if FFH_SUPPORT
114	mrs	x30, scr_el3
115	tst	x30, #SCR_EA_BIT
116	b.eq	1f
117	b	handle_lower_el_sync_ea
118#endif
1191:
120	/* Synchronous exceptions other than the above are unhandled */
121	b	report_unhandled_exception
122	.endm
123
124vector_base runtime_exceptions
125
126	/* ---------------------------------------------------------------------
127	 * Current EL with SP_EL0 : 0x0 - 0x200
128	 * ---------------------------------------------------------------------
129	 */
130vector_entry sync_exception_sp_el0
131#ifdef MONITOR_TRAPS
132	stp x29, x30, [sp, #-16]!
133
134	mrs	x30, esr_el3
135	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
136
137	/* Check for BRK */
138	cmp	x30, #EC_BRK
139	b.eq	brk_handler
140
141	ldp x29, x30, [sp], #16
142#endif /* MONITOR_TRAPS */
143
144	/* We don't expect any synchronous exceptions from EL3 */
145	b	report_unhandled_exception
146end_vector_entry sync_exception_sp_el0
147
148vector_entry irq_sp_el0
149	/*
150	 * EL3 code is non-reentrant. Any asynchronous exception is a serious
151	 * error. Loop infinitely.
152	 */
153	b	report_unhandled_interrupt
154end_vector_entry irq_sp_el0
155
156
157vector_entry fiq_sp_el0
158	b	report_unhandled_interrupt
159end_vector_entry fiq_sp_el0
160
161
162vector_entry serror_sp_el0
163	no_ret	plat_handle_el3_ea
164end_vector_entry serror_sp_el0
165
166	/* ---------------------------------------------------------------------
167	 * Current EL with SP_ELx: 0x200 - 0x400
168	 * ---------------------------------------------------------------------
169	 */
170vector_entry sync_exception_sp_elx
171	/*
172	 * This exception will trigger if anything went wrong during a previous
173	 * exception entry or exit or while handling an earlier unexpected
174	 * synchronous exception. There is a high probability that SP_EL3 is
175	 * corrupted.
176	 */
177	b	report_unhandled_exception
178end_vector_entry sync_exception_sp_elx
179
180vector_entry irq_sp_elx
181	b	report_unhandled_interrupt
182end_vector_entry irq_sp_elx
183
184vector_entry fiq_sp_elx
185	b	report_unhandled_interrupt
186end_vector_entry fiq_sp_elx
187
188vector_entry serror_sp_elx
189#if FFH_SUPPORT
190	/*
191	 * This will trigger if the exception was taken due to SError in EL3 or
192	 * because of pending asynchronous external aborts from lower EL that got
193	 * triggered due to implicit/explicit synchronization in EL3 (SCR_EL3.EA=1)
194	 * during EL3 entry. For the former case we continue with "plat_handle_el3_ea".
195	 * The later case will occur when PSTATE.A bit is cleared in
196	 * "handle_pending_async_ea". This means we are doing a nested
197	 * exception in EL3. Call the handler for async EA which will eret back to
198	 * original el3 handler if it is nested exception. Also, unmask EA so that we
199	 * catch any further EA arise when handling this nested exception at EL3.
200	 */
201	save_x30
202	ldr	x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
203	cbz	x30, 1f
204	/*
205	 * This is nested exception handling, clear the flag to avoid taking this
206	 * path for further exceptions caused by EA handling
207	 */
208	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
209	unmask_async_ea
210	b	handle_lower_el_async_ea
2111:
212	restore_x30
213#endif
214	no_ret	plat_handle_el3_ea
215
216end_vector_entry serror_sp_elx
217
218	/* ---------------------------------------------------------------------
219	 * Lower EL using AArch64 : 0x400 - 0x600
220	 * ---------------------------------------------------------------------
221	 */
222vector_entry sync_exception_aarch64
223	/*
224	 * This exception vector will be the entry point for SMCs and traps
225	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
226	 * to a valid cpu context where the general purpose and system register
227	 * state can be saved.
228	 */
229	save_x30
230	apply_at_speculative_wa
231	sync_and_handle_pending_serror
232	unmask_async_ea
233	handle_sync_exception
234end_vector_entry sync_exception_aarch64
235
236vector_entry irq_aarch64
237	save_x30
238	apply_at_speculative_wa
239	sync_and_handle_pending_serror
240	unmask_async_ea
241	b	handle_interrupt_exception
242end_vector_entry irq_aarch64
243
244vector_entry fiq_aarch64
245	save_x30
246	apply_at_speculative_wa
247	sync_and_handle_pending_serror
248	unmask_async_ea
249	b 	handle_interrupt_exception
250end_vector_entry fiq_aarch64
251
252	/*
253	 * Need to synchronize any outstanding SError since we can get a burst of errors.
254	 * So reuse the sync mechanism to catch any further errors which are pending.
255	 */
256vector_entry serror_aarch64
257#if FFH_SUPPORT
258	save_x30
259	apply_at_speculative_wa
260	sync_and_handle_pending_serror
261	unmask_async_ea
262	b	handle_lower_el_async_ea
263#else
264	b	report_unhandled_exception
265#endif
266end_vector_entry serror_aarch64
267
268	/* ---------------------------------------------------------------------
269	 * Lower EL using AArch32 : 0x600 - 0x800
270	 * ---------------------------------------------------------------------
271	 */
272vector_entry sync_exception_aarch32
273	/*
274	 * This exception vector will be the entry point for SMCs and traps
275	 * that are unhandled at lower ELs most commonly. SP_EL3 should point
276	 * to a valid cpu context where the general purpose and system register
277	 * state can be saved.
278	 */
279	save_x30
280	apply_at_speculative_wa
281	sync_and_handle_pending_serror
282	unmask_async_ea
283	handle_sync_exception
284end_vector_entry sync_exception_aarch32
285
286vector_entry irq_aarch32
287	save_x30
288	apply_at_speculative_wa
289	sync_and_handle_pending_serror
290	unmask_async_ea
291	b	handle_interrupt_exception
292end_vector_entry irq_aarch32
293
294vector_entry fiq_aarch32
295	save_x30
296	apply_at_speculative_wa
297	sync_and_handle_pending_serror
298	unmask_async_ea
299	b	handle_interrupt_exception
300end_vector_entry fiq_aarch32
301
302	/*
303	 * Need to synchronize any outstanding SError since we can get a burst of errors.
304	 * So reuse the sync mechanism to catch any further errors which are pending.
305	 */
306vector_entry serror_aarch32
307#if FFH_SUPPORT
308	save_x30
309	apply_at_speculative_wa
310	sync_and_handle_pending_serror
311	unmask_async_ea
312	b	handle_lower_el_async_ea
313#else
314	b	report_unhandled_exception
315#endif
316end_vector_entry serror_aarch32
317
318#ifdef MONITOR_TRAPS
319	.section .rodata.brk_string, "aS"
320brk_location:
321	.asciz "Error at instruction 0x"
322brk_message:
323	.asciz "Unexpected BRK instruction with value 0x"
324#endif /* MONITOR_TRAPS */
325
326	/* ---------------------------------------------------------------------
327	 * The following code handles secure monitor calls.
328	 * Depending upon the execution state from where the SMC has been
329	 * invoked, it frees some general purpose registers to perform the
330	 * remaining tasks. They involve finding the runtime service handler
331	 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
332	 * before calling the handler.
333	 *
334	 * Note that x30 has been explicitly saved and can be used here
335	 * ---------------------------------------------------------------------
336	 */
337func sync_exception_handler
338smc_handler32:
339	/* Check whether aarch32 issued an SMC64 */
340	tbnz	x0, #FUNCID_CC_SHIFT, smc_prohibited
341
342sync_handler64:
343	/* NOTE: The code below must preserve x0-x4 */
344
345	/*
346	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
347	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
348	 */
349	bl	prepare_el3_entry
350
351#if ENABLE_PAUTH
352	/* Load and program APIAKey firmware key */
353	bl	pauth_load_bl31_apiakey
354#endif
355
356	/*
357	 * Populate the parameters for the SMC handler.
358	 * We already have x0-x4 in place. x5 will point to a cookie (not used
359	 * now). x6 will point to the context structure (SP_EL3) and x7 will
360	 * contain flags we need to pass to the handler.
361	 */
362	mov	x5, xzr
363	mov	x6, sp
364
365	/*
366	 * Restore the saved C runtime stack value which will become the new
367	 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
368	 * structure prior to the last ERET from EL3.
369	 */
370	ldr	x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
371
372	/* Switch to SP_EL0 */
373	msr	spsel, #MODE_SP_EL0
374
375	/*
376	 * Save the SPSR_EL3 and ELR_EL3 in case there is a world
377	 * switch during SMC handling.
378	 * TODO: Revisit if all system registers can be saved later.
379	 */
380	mrs	x16, spsr_el3
381	mrs	x17, elr_el3
382	stp	x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
383
384	/* Load SCR_EL3 */
385	mrs	x18, scr_el3
386
387	/* check for system register traps */
388	mrs	x16, esr_el3
389	ubfx	x17, x16, #ESR_EC_SHIFT, #ESR_EC_LENGTH
390	cmp	x17, #EC_AARCH64_SYS
391	b.eq	sysreg_handler64
392
393	/* Clear flag register */
394	mov	x7, xzr
395
396#if ENABLE_RME
397	/* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */
398	ubfx	x7, x18, #SCR_NSE_SHIFT, #1
399
400	/*
401	 * Shift copied SCR_EL3.NSE bit by 5 to create space for
402	 * SCR_EL3.NS bit. Bit 5 of the flag corresponds to
403	 * the SCR_EL3.NSE bit.
404	 */
405	lsl	x7, x7, #5
406#endif /* ENABLE_RME */
407
408	/* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
409	bfi	x7, x18, #0, #1
410
411	mov	sp, x12
412
413	/*
414	 * Per SMCCC documentation, bits [23:17] must be zero for Fast
415	 * SMCs. Other values are reserved for future use. Ensure that
416	 * these bits are zeroes, if not report as unknown SMC.
417	 */
418	tbz	x0, #FUNCID_TYPE_SHIFT, 2f  /* Skip check if its a Yield Call*/
419	tst	x0, #(FUNCID_FC_RESERVED_MASK << FUNCID_FC_RESERVED_SHIFT)
420	b.ne	smc_unknown
421
422	/*
423	 * Per SMCCCv1.3 a caller can set the SVE hint bit in the SMC FID
424	 * passed through x0. Copy the SVE hint bit to flags and mask the
425	 * bit in smc_fid passed to the standard service dispatcher.
426	 * A service/dispatcher can retrieve the SVE hint bit state from
427	 * flags using the appropriate helper.
428	 */
4292:
430	and	x16, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
431	orr	x7, x7, x16
432	bic	x0, x0, #(FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT)
433
434	/* Get the unique owning entity number */
435	ubfx	x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
436	ubfx	x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
437	orr	x16, x16, x15, lsl #FUNCID_OEN_WIDTH
438
439	/* Load descriptor index from array of indices */
440	adrp	x14, rt_svc_descs_indices
441	add	x14, x14, :lo12:rt_svc_descs_indices
442	ldrb	w15, [x14, x16]
443
444	/* Any index greater than 127 is invalid. Check bit 7. */
445	tbnz	w15, 7, smc_unknown
446
447	/*
448	 * Get the descriptor using the index
449	 * x11 = (base + off), w15 = index
450	 *
451	 * handler = (base + off) + (index << log2(size))
452	 */
453	adr	x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
454	lsl	w10, w15, #RT_SVC_SIZE_LOG2
455	ldr	x15, [x11, w10, uxtw]
456
457	/*
458	 * Call the Secure Monitor Call handler and then drop directly into
459	 * el3_exit() which will program any remaining architectural state
460	 * prior to issuing the ERET to the desired lower EL.
461	 */
462#if DEBUG
463	cbz	x15, rt_svc_fw_critical_error
464#endif
465	blr	x15
466
467	b	el3_exit
468
469sysreg_handler64:
470	mov	x0, x16		/* ESR_EL3, containing syndrome information */
471	mov	x1, x6		/* lower EL's context */
472	mov	x19, x6		/* save context pointer for after the call */
473	mov	sp, x12		/* EL3 runtime stack, as loaded above */
474
475	/* int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx); */
476	bl	handle_sysreg_trap
477	/*
478	 * returns:
479	 *   -1: unhandled trap, panic
480	 *    0: handled trap, return to the trapping instruction (repeating it)
481	 *    1: handled trap, return to the next instruction
482	 */
483
484	tst	w0, w0
485	b.mi	elx_panic	/* negative return value: panic */
486	b.eq	1f		/* zero: do not change ELR_EL3 */
487
488	/* advance the PC to continue after the instruction */
489	ldr	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
490	add	x1, x1, #4
491	str	x1, [x19, #CTX_EL3STATE_OFFSET + CTX_ELR_EL3]
4921:
493	b	el3_exit
494
495smc_unknown:
496	/*
497	 * Unknown SMC call. Populate return value with SMC_UNK and call
498	 * el3_exit() which will restore the remaining architectural state
499	 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
500	 * to the desired lower EL.
501	 */
502	mov	x0, #SMC_UNK
503	str	x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
504	b	el3_exit
505
506smc_prohibited:
507	restore_ptw_el1_sys_regs
508	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
509	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
510	mov	x0, #SMC_UNK
511	exception_return
512
513#if DEBUG
514rt_svc_fw_critical_error:
515	/* Switch to SP_ELx */
516	msr	spsel, #MODE_SP_ELX
517	no_ret	report_unhandled_exception
518#endif
519endfunc sync_exception_handler
520
521	/* ---------------------------------------------------------------------
522	 * This function handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
523	 * interrupts.
524	 *
525	 * Note that x30 has been explicitly saved and can be used here
526	 * ---------------------------------------------------------------------
527	 */
528func handle_interrupt_exception
529	/*
530	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
531	 * Also save PMCR_EL0 and  set the PSTATE to a known state.
532	 */
533	bl	prepare_el3_entry
534
535#if ENABLE_PAUTH
536	/* Load and program APIAKey firmware key */
537	bl	pauth_load_bl31_apiakey
538#endif
539
540	/* Save the EL3 system registers needed to return from this exception */
541	mrs	x0, spsr_el3
542	mrs	x1, elr_el3
543	stp	x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
544
545	/* Switch to the runtime stack i.e. SP_EL0 */
546	ldr	x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
547	mov	x20, sp
548	msr	spsel, #MODE_SP_EL0
549	mov	sp, x2
550
551	/*
552	 * Find out whether this is a valid interrupt type.
553	 * If the interrupt controller reports a spurious interrupt then return
554	 * to where we came from.
555	 */
556	bl	plat_ic_get_pending_interrupt_type
557	cmp	x0, #INTR_TYPE_INVAL
558	b.eq	interrupt_exit
559
560	/*
561	 * Get the registered handler for this interrupt type.
562	 * A NULL return value could be 'cause of the following conditions:
563	 *
564	 * a. An interrupt of a type was routed correctly but a handler for its
565	 *    type was not registered.
566	 *
567	 * b. An interrupt of a type was not routed correctly so a handler for
568	 *    its type was not registered.
569	 *
570	 * c. An interrupt of a type was routed correctly to EL3, but was
571	 *    deasserted before its pending state could be read. Another
572	 *    interrupt of a different type pended at the same time and its
573	 *    type was reported as pending instead. However, a handler for this
574	 *    type was not registered.
575	 *
576	 * a. and b. can only happen due to a programming error. The
577	 * occurrence of c. could be beyond the control of Trusted Firmware.
578	 * It makes sense to return from this exception instead of reporting an
579	 * error.
580	 */
581	bl	get_interrupt_type_handler
582	cbz	x0, interrupt_exit
583	mov	x21, x0
584
585	mov	x0, #INTR_ID_UNAVAILABLE
586
587	/* Set the current security state in the 'flags' parameter */
588	mrs	x2, scr_el3
589	ubfx	x1, x2, #0, #1
590
591	/* Restore the reference to the 'handle' i.e. SP_EL3 */
592	mov	x2, x20
593
594	/* x3 will point to a cookie (not used now) */
595	mov	x3, xzr
596
597	/* Call the interrupt type handler */
598	blr	x21
599
600interrupt_exit:
601	/* Return from exception, possibly in a different security state */
602	b	el3_exit
603endfunc handle_interrupt_exception
604
605func imp_def_el3_handler
606	/* Save GP registers */
607	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
608	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
609	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
610
611	/* Get the cpu_ops pointer */
612	bl	get_cpu_ops_ptr
613
614	/* Get the cpu_ops exception handler */
615	ldr	x0, [x0, #CPU_E_HANDLER_FUNC]
616
617	/*
618	 * If the reserved function pointer is NULL, this CPU does not have an
619	 * implementation defined exception handler function
620	 */
621	cbz	x0, el3_handler_exit
622	mrs	x1, esr_el3
623	ubfx	x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
624	blr	x0
625el3_handler_exit:
626	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
627	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
628	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
629	restore_x30
630	no_ret	report_unhandled_exception
631endfunc imp_def_el3_handler
632
633/*
634 * Handler for async EA from lower EL synchronized at EL3 entry in KFH mode.
635 *
636 * This scenario may arise when there is an error (EA) in the system which is not
637 * yet signaled to PE while executing in lower EL. During entry into EL3, the errors
638 * are synchronized either implicitly or explicitly causing async EA to pend at EL3.
639 *
640 * On detecting the pending EA (via ISR_EL1.A) and if the EA routing model is
641 * KFH (SCR_EL3.EA = 1) this handler reflects ther error back to lower EL.
642 *
643 * This function assumes x30 has been saved.
644 */
645func reflect_pending_async_ea_to_lower_el
646	/*
647	 * As the original exception was not handled we need to ensure that we return
648	 * back to the instruction which caused the exception. To acheive that, eret
649	 * to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
650	 * (Label "skip_smc_check").
651	 *
652	 * LIMITATION: It could be that async EA is masked at the target exception level
653	 * or the priority of async EA wrt to the EL3/secure interrupt is lower, which
654	 * causes back and forth between lower EL and EL3. In case of back and forth between
655	 * lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
656	 * previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
657	 * to indicate a problem here (Label "check_loop_ctr"). If we are in this cycle, loop
658	 * counter retains its value but if we do a normal el3_exit this flag gets cleared.
659	 * However, setting SCR_EL3.IESB = 1, should give priority to SError handling
660	 * as per AArch64.TakeException pseudo code in Arm ARM.
661	 *
662	 * TODO: In future if EL3 gets a capability to inject a virtual SError to lower
663	 * ELs, we can remove the el3_panic and handle the original exception first and
664	 * inject SError to lower EL before ereting back.
665	 */
666	stp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
667	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
668	mrs	x28, elr_el3
669	cmp	x29, x28
670	b.eq	check_loop_ctr
671	str	x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
672	/* Zero the loop counter */
673	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
674	b	skip_loop_ctr
675check_loop_ctr:
676	ldr	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
677	add	x29, x29, #1
678	str	x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
679	cmp	x29, #ASYNC_EA_REPLAY_COUNTER
680	b.ge	el3_panic
681skip_loop_ctr:
682	/*
683	 * Logic to distinguish if we came from SMC or any other exception.
684	 * Use offsets in vector entry to get which exception we are handling.
685	 * In each vector entry of size 0x200, address "0x0-0x80" is for sync
686	 * exception and "0x80-0x200" is for async exceptions.
687	 * Use vector base address (vbar_el3) and exception offset (LR) to
688	 * calculate whether the address we came from is any of the following
689	 * "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
690	 */
691	mrs	x29, vbar_el3
692	sub	x30, x30, x29
693	and	x30, x30, #0x1ff
694	cmp	x30, #0x80
695	b.ge	skip_smc_check
696	/* Its a synchronous exception, Now check if it is SMC or not? */
697	mrs	x30, esr_el3
698	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
699	cmp	x30, #EC_AARCH32_SMC
700	b.eq	subtract_elr_el3
701	cmp	x30, #EC_AARCH64_SMC
702	b.eq	subtract_elr_el3
703	b	skip_smc_check
704subtract_elr_el3:
705	sub	x28, x28, #4
706skip_smc_check:
707	msr	elr_el3, x28
708	ldp	x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
709	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
710	exception_return
711endfunc reflect_pending_async_ea_to_lower_el
712
713	/* ---------------------------------------------------------------------
714	 * The following code handles exceptions caused by BRK instructions.
715	 * Following a BRK instruction, the only real valid cause of action is
716	 * to print some information and panic, as the code that caused it is
717	 * likely in an inconsistent internal state.
718	 *
719	 * This is initially intended to be used in conjunction with
720	 * __builtin_trap.
721	 * ---------------------------------------------------------------------
722	 */
723#ifdef MONITOR_TRAPS
724func brk_handler
725	/* Extract the ISS */
726	mrs	x10, esr_el3
727	ubfx	x10, x10, #ESR_ISS_SHIFT, #ESR_ISS_LENGTH
728
729	/* Ensure the console is initialized */
730	bl	plat_crash_console_init
731
732	adr	x4, brk_location
733	bl	asm_print_str
734	mrs	x4, elr_el3
735	bl	asm_print_hex
736	bl	asm_print_newline
737
738	adr	x4, brk_message
739	bl	asm_print_str
740	mov	x4, x10
741	mov	x5, #28
742	bl	asm_print_hex_bits
743	bl	asm_print_newline
744
745	no_ret	plat_panic_handler
746endfunc brk_handler
747#endif /* MONITOR_TRAPS */
748