1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exception handling code
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 */
7
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/lockdep.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/thread_info.h>
16
17 #include <asm/cpufeature.h>
18 #include <asm/daifflags.h>
19 #include <asm/esr.h>
20 #include <asm/exception.h>
21 #include <asm/irq_regs.h>
22 #include <asm/kprobes.h>
23 #include <asm/mmu.h>
24 #include <asm/processor.h>
25 #include <asm/sdei.h>
26 #include <asm/stacktrace.h>
27 #include <asm/sysreg.h>
28 #include <asm/system_misc.h>
29
30 /*
31 * Handle IRQ/context state management when entering from kernel mode.
32 * Before this function is called it is not safe to call regular kernel code,
33 * intrumentable code, or any code which may trigger an exception.
34 *
35 * This is intended to match the logic in irqentry_enter(), handling the kernel
36 * mode transitions only.
37 */
__enter_from_kernel_mode(struct pt_regs * regs)38 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
39 {
40 regs->exit_rcu = false;
41
42 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
43 lockdep_hardirqs_off(CALLER_ADDR0);
44 ct_irq_enter();
45 trace_hardirqs_off_finish();
46
47 regs->exit_rcu = true;
48 return;
49 }
50
51 lockdep_hardirqs_off(CALLER_ADDR0);
52 rcu_irq_enter_check_tick();
53 trace_hardirqs_off_finish();
54 }
55
enter_from_kernel_mode(struct pt_regs * regs)56 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
57 {
58 __enter_from_kernel_mode(regs);
59 mte_check_tfsr_entry();
60 mte_disable_tco_entry(current);
61 }
62
63 /*
64 * Handle IRQ/context state management when exiting to kernel mode.
65 * After this function returns it is not safe to call regular kernel code,
66 * intrumentable code, or any code which may trigger an exception.
67 *
68 * This is intended to match the logic in irqentry_exit(), handling the kernel
69 * mode transitions only, and with preemption handled elsewhere.
70 */
__exit_to_kernel_mode(struct pt_regs * regs)71 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
72 {
73 lockdep_assert_irqs_disabled();
74
75 if (interrupts_enabled(regs)) {
76 if (regs->exit_rcu) {
77 trace_hardirqs_on_prepare();
78 lockdep_hardirqs_on_prepare();
79 ct_irq_exit();
80 lockdep_hardirqs_on(CALLER_ADDR0);
81 return;
82 }
83
84 trace_hardirqs_on();
85 } else {
86 if (regs->exit_rcu)
87 ct_irq_exit();
88 }
89 }
90
exit_to_kernel_mode(struct pt_regs * regs)91 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
92 {
93 mte_check_tfsr_exit();
94 __exit_to_kernel_mode(regs);
95 }
96
97 /*
98 * Handle IRQ/context state management when entering from user mode.
99 * Before this function is called it is not safe to call regular kernel code,
100 * intrumentable code, or any code which may trigger an exception.
101 */
__enter_from_user_mode(void)102 static __always_inline void __enter_from_user_mode(void)
103 {
104 lockdep_hardirqs_off(CALLER_ADDR0);
105 CT_WARN_ON(ct_state() != CONTEXT_USER);
106 user_exit_irqoff();
107 trace_hardirqs_off_finish();
108 mte_disable_tco_entry(current);
109 }
110
enter_from_user_mode(struct pt_regs * regs)111 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
112 {
113 __enter_from_user_mode();
114 }
115
116 /*
117 * Handle IRQ/context state management when exiting to user mode.
118 * After this function returns it is not safe to call regular kernel code,
119 * intrumentable code, or any code which may trigger an exception.
120 */
__exit_to_user_mode(void)121 static __always_inline void __exit_to_user_mode(void)
122 {
123 trace_hardirqs_on_prepare();
124 lockdep_hardirqs_on_prepare();
125 user_enter_irqoff();
126 lockdep_hardirqs_on(CALLER_ADDR0);
127 }
128
prepare_exit_to_user_mode(struct pt_regs * regs)129 static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
130 {
131 unsigned long flags;
132
133 local_daif_mask();
134
135 flags = read_thread_flags();
136 if (unlikely(flags & _TIF_WORK_MASK))
137 do_notify_resume(regs, flags);
138 }
139
exit_to_user_mode(struct pt_regs * regs)140 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
141 {
142 prepare_exit_to_user_mode(regs);
143 mte_check_tfsr_exit();
144 __exit_to_user_mode();
145 }
146
asm_exit_to_user_mode(struct pt_regs * regs)147 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
148 {
149 exit_to_user_mode(regs);
150 }
151
152 /*
153 * Handle IRQ/context state management when entering an NMI from user/kernel
154 * mode. Before this function is called it is not safe to call regular kernel
155 * code, intrumentable code, or any code which may trigger an exception.
156 */
arm64_enter_nmi(struct pt_regs * regs)157 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
158 {
159 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
160
161 __nmi_enter();
162 lockdep_hardirqs_off(CALLER_ADDR0);
163 lockdep_hardirq_enter();
164 ct_nmi_enter();
165
166 trace_hardirqs_off_finish();
167 ftrace_nmi_enter();
168 }
169
170 /*
171 * Handle IRQ/context state management when exiting an NMI from user/kernel
172 * mode. After this function returns it is not safe to call regular kernel
173 * code, intrumentable code, or any code which may trigger an exception.
174 */
arm64_exit_nmi(struct pt_regs * regs)175 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
176 {
177 bool restore = regs->lockdep_hardirqs;
178
179 ftrace_nmi_exit();
180 if (restore) {
181 trace_hardirqs_on_prepare();
182 lockdep_hardirqs_on_prepare();
183 }
184
185 ct_nmi_exit();
186 lockdep_hardirq_exit();
187 if (restore)
188 lockdep_hardirqs_on(CALLER_ADDR0);
189 __nmi_exit();
190 }
191
192 /*
193 * Handle IRQ/context state management when entering a debug exception from
194 * kernel mode. Before this function is called it is not safe to call regular
195 * kernel code, intrumentable code, or any code which may trigger an exception.
196 */
arm64_enter_el1_dbg(struct pt_regs * regs)197 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
198 {
199 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
200
201 lockdep_hardirqs_off(CALLER_ADDR0);
202 ct_nmi_enter();
203
204 trace_hardirqs_off_finish();
205 }
206
207 /*
208 * Handle IRQ/context state management when exiting a debug exception from
209 * kernel mode. After this function returns it is not safe to call regular
210 * kernel code, intrumentable code, or any code which may trigger an exception.
211 */
arm64_exit_el1_dbg(struct pt_regs * regs)212 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
213 {
214 bool restore = regs->lockdep_hardirqs;
215
216 if (restore) {
217 trace_hardirqs_on_prepare();
218 lockdep_hardirqs_on_prepare();
219 }
220
221 ct_nmi_exit();
222 if (restore)
223 lockdep_hardirqs_on(CALLER_ADDR0);
224 }
225
226 #ifdef CONFIG_PREEMPT_DYNAMIC
227 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
228 #define need_irq_preemption() \
229 (static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
230 #else
231 #define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
232 #endif
233
arm64_preempt_schedule_irq(void)234 static void __sched arm64_preempt_schedule_irq(void)
235 {
236 if (!need_irq_preemption())
237 return;
238
239 /*
240 * Note: thread_info::preempt_count includes both thread_info::count
241 * and thread_info::need_resched, and is not equivalent to
242 * preempt_count().
243 */
244 if (READ_ONCE(current_thread_info()->preempt_count) != 0)
245 return;
246
247 /*
248 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
249 * priority masking is used the GIC irqchip driver will clear DAIF.IF
250 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
251 * DAIF we must have handled an NMI, so skip preemption.
252 */
253 if (system_uses_irq_prio_masking() && read_sysreg(daif))
254 return;
255
256 /*
257 * Preempting a task from an IRQ means we leave copies of PSTATE
258 * on the stack. cpufeature's enable calls may modify PSTATE, but
259 * resuming one of these preempted tasks would undo those changes.
260 *
261 * Only allow a task to be preempted once cpufeatures have been
262 * enabled.
263 */
264 if (system_capabilities_finalized())
265 preempt_schedule_irq();
266 }
267
do_interrupt_handler(struct pt_regs * regs,void (* handler)(struct pt_regs *))268 static void do_interrupt_handler(struct pt_regs *regs,
269 void (*handler)(struct pt_regs *))
270 {
271 struct pt_regs *old_regs = set_irq_regs(regs);
272
273 if (on_thread_stack())
274 call_on_irq_stack(regs, handler);
275 else
276 handler(regs);
277
278 set_irq_regs(old_regs);
279 }
280
281 extern void (*handle_arch_irq)(struct pt_regs *);
282 extern void (*handle_arch_fiq)(struct pt_regs *);
283
__panic_unhandled(struct pt_regs * regs,const char * vector,unsigned long esr)284 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
285 unsigned long esr)
286 {
287 arm64_enter_nmi(regs);
288
289 console_verbose();
290
291 pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
292 vector, smp_processor_id(), esr,
293 esr_get_class_string(esr));
294
295 __show_regs(regs);
296 panic("Unhandled exception");
297 }
298
299 #define UNHANDLED(el, regsize, vector) \
300 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
301 { \
302 const char *desc = #regsize "-bit " #el " " #vector; \
303 __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
304 }
305
306 #ifdef CONFIG_ARM64_ERRATUM_1463225
307 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
308
cortex_a76_erratum_1463225_svc_handler(void)309 static void cortex_a76_erratum_1463225_svc_handler(void)
310 {
311 u32 reg, val;
312
313 if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
314 return;
315
316 if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
317 return;
318
319 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
320 reg = read_sysreg(mdscr_el1);
321 val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
322 write_sysreg(val, mdscr_el1);
323 asm volatile("msr daifclr, #8");
324 isb();
325
326 /* We will have taken a single-step exception by this point */
327
328 write_sysreg(reg, mdscr_el1);
329 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
330 }
331
332 static __always_inline bool
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)333 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
334 {
335 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
336 return false;
337
338 /*
339 * We've taken a dummy step exception from the kernel to ensure
340 * that interrupts are re-enabled on the syscall path. Return back
341 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
342 * masked so that we can safely restore the mdscr and get on with
343 * handling the syscall.
344 */
345 regs->pstate |= PSR_D_BIT;
346 return true;
347 }
348 #else /* CONFIG_ARM64_ERRATUM_1463225 */
cortex_a76_erratum_1463225_svc_handler(void)349 static void cortex_a76_erratum_1463225_svc_handler(void) { }
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)350 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
351 {
352 return false;
353 }
354 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
355
356 UNHANDLED(el1t, 64, sync)
357 UNHANDLED(el1t, 64, irq)
358 UNHANDLED(el1t, 64, fiq)
359 UNHANDLED(el1t, 64, error)
360
el1_abort(struct pt_regs * regs,unsigned long esr)361 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
362 {
363 unsigned long far = read_sysreg(far_el1);
364
365 enter_from_kernel_mode(regs);
366 local_daif_inherit(regs);
367 do_mem_abort(far, esr, regs);
368 local_daif_mask();
369 exit_to_kernel_mode(regs);
370 }
371
el1_pc(struct pt_regs * regs,unsigned long esr)372 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
373 {
374 unsigned long far = read_sysreg(far_el1);
375
376 enter_from_kernel_mode(regs);
377 local_daif_inherit(regs);
378 do_sp_pc_abort(far, esr, regs);
379 local_daif_mask();
380 exit_to_kernel_mode(regs);
381 }
382
el1_undef(struct pt_regs * regs,unsigned long esr)383 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
384 {
385 enter_from_kernel_mode(regs);
386 local_daif_inherit(regs);
387 do_undefinstr(regs, esr);
388 local_daif_mask();
389 exit_to_kernel_mode(regs);
390 }
391
el1_bti(struct pt_regs * regs,unsigned long esr)392 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
393 {
394 enter_from_kernel_mode(regs);
395 local_daif_inherit(regs);
396 do_el1_bti(regs, esr);
397 local_daif_mask();
398 exit_to_kernel_mode(regs);
399 }
400
el1_dbg(struct pt_regs * regs,unsigned long esr)401 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
402 {
403 unsigned long far = read_sysreg(far_el1);
404
405 arm64_enter_el1_dbg(regs);
406 if (!cortex_a76_erratum_1463225_debug_handler(regs))
407 do_debug_exception(far, esr, regs);
408 arm64_exit_el1_dbg(regs);
409 }
410
el1_fpac(struct pt_regs * regs,unsigned long esr)411 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
412 {
413 enter_from_kernel_mode(regs);
414 local_daif_inherit(regs);
415 do_el1_fpac(regs, esr);
416 local_daif_mask();
417 exit_to_kernel_mode(regs);
418 }
419
el1h_64_sync_handler(struct pt_regs * regs)420 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
421 {
422 unsigned long esr = read_sysreg(esr_el1);
423
424 switch (ESR_ELx_EC(esr)) {
425 case ESR_ELx_EC_DABT_CUR:
426 case ESR_ELx_EC_IABT_CUR:
427 el1_abort(regs, esr);
428 break;
429 /*
430 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
431 * recursive exception when trying to push the initial pt_regs.
432 */
433 case ESR_ELx_EC_PC_ALIGN:
434 el1_pc(regs, esr);
435 break;
436 case ESR_ELx_EC_SYS64:
437 case ESR_ELx_EC_UNKNOWN:
438 el1_undef(regs, esr);
439 break;
440 case ESR_ELx_EC_BTI:
441 el1_bti(regs, esr);
442 break;
443 case ESR_ELx_EC_BREAKPT_CUR:
444 case ESR_ELx_EC_SOFTSTP_CUR:
445 case ESR_ELx_EC_WATCHPT_CUR:
446 case ESR_ELx_EC_BRK64:
447 el1_dbg(regs, esr);
448 break;
449 case ESR_ELx_EC_FPAC:
450 el1_fpac(regs, esr);
451 break;
452 default:
453 __panic_unhandled(regs, "64-bit el1h sync", esr);
454 }
455 }
456
__el1_pnmi(struct pt_regs * regs,void (* handler)(struct pt_regs *))457 static __always_inline void __el1_pnmi(struct pt_regs *regs,
458 void (*handler)(struct pt_regs *))
459 {
460 arm64_enter_nmi(regs);
461 do_interrupt_handler(regs, handler);
462 arm64_exit_nmi(regs);
463 }
464
__el1_irq(struct pt_regs * regs,void (* handler)(struct pt_regs *))465 static __always_inline void __el1_irq(struct pt_regs *regs,
466 void (*handler)(struct pt_regs *))
467 {
468 enter_from_kernel_mode(regs);
469
470 irq_enter_rcu();
471 do_interrupt_handler(regs, handler);
472 irq_exit_rcu();
473
474 arm64_preempt_schedule_irq();
475
476 exit_to_kernel_mode(regs);
477 }
el1_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))478 static void noinstr el1_interrupt(struct pt_regs *regs,
479 void (*handler)(struct pt_regs *))
480 {
481 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
482
483 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
484 __el1_pnmi(regs, handler);
485 else
486 __el1_irq(regs, handler);
487 }
488
el1h_64_irq_handler(struct pt_regs * regs)489 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
490 {
491 el1_interrupt(regs, handle_arch_irq);
492 }
493
el1h_64_fiq_handler(struct pt_regs * regs)494 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
495 {
496 el1_interrupt(regs, handle_arch_fiq);
497 }
498
el1h_64_error_handler(struct pt_regs * regs)499 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
500 {
501 unsigned long esr = read_sysreg(esr_el1);
502
503 local_daif_restore(DAIF_ERRCTX);
504 arm64_enter_nmi(regs);
505 do_serror(regs, esr);
506 arm64_exit_nmi(regs);
507 }
508
el0_da(struct pt_regs * regs,unsigned long esr)509 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
510 {
511 unsigned long far = read_sysreg(far_el1);
512
513 enter_from_user_mode(regs);
514 local_daif_restore(DAIF_PROCCTX);
515 do_mem_abort(far, esr, regs);
516 exit_to_user_mode(regs);
517 }
518
el0_ia(struct pt_regs * regs,unsigned long esr)519 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
520 {
521 unsigned long far = read_sysreg(far_el1);
522
523 /*
524 * We've taken an instruction abort from userspace and not yet
525 * re-enabled IRQs. If the address is a kernel address, apply
526 * BP hardening prior to enabling IRQs and pre-emption.
527 */
528 if (!is_ttbr0_addr(far))
529 arm64_apply_bp_hardening();
530
531 enter_from_user_mode(regs);
532 local_daif_restore(DAIF_PROCCTX);
533 do_mem_abort(far, esr, regs);
534 exit_to_user_mode(regs);
535 }
536
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)537 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
538 {
539 enter_from_user_mode(regs);
540 local_daif_restore(DAIF_PROCCTX);
541 do_fpsimd_acc(esr, regs);
542 exit_to_user_mode(regs);
543 }
544
el0_sve_acc(struct pt_regs * regs,unsigned long esr)545 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
546 {
547 enter_from_user_mode(regs);
548 local_daif_restore(DAIF_PROCCTX);
549 do_sve_acc(esr, regs);
550 exit_to_user_mode(regs);
551 }
552
el0_sme_acc(struct pt_regs * regs,unsigned long esr)553 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
554 {
555 enter_from_user_mode(regs);
556 local_daif_restore(DAIF_PROCCTX);
557 do_sme_acc(esr, regs);
558 exit_to_user_mode(regs);
559 }
560
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)561 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
562 {
563 enter_from_user_mode(regs);
564 local_daif_restore(DAIF_PROCCTX);
565 do_fpsimd_exc(esr, regs);
566 exit_to_user_mode(regs);
567 }
568
el0_sys(struct pt_regs * regs,unsigned long esr)569 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
570 {
571 enter_from_user_mode(regs);
572 local_daif_restore(DAIF_PROCCTX);
573 do_sysinstr(esr, regs);
574 exit_to_user_mode(regs);
575 }
576
el0_pc(struct pt_regs * regs,unsigned long esr)577 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
578 {
579 unsigned long far = read_sysreg(far_el1);
580
581 if (!is_ttbr0_addr(instruction_pointer(regs)))
582 arm64_apply_bp_hardening();
583
584 enter_from_user_mode(regs);
585 local_daif_restore(DAIF_PROCCTX);
586 do_sp_pc_abort(far, esr, regs);
587 exit_to_user_mode(regs);
588 }
589
el0_sp(struct pt_regs * regs,unsigned long esr)590 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
591 {
592 enter_from_user_mode(regs);
593 local_daif_restore(DAIF_PROCCTX);
594 do_sp_pc_abort(regs->sp, esr, regs);
595 exit_to_user_mode(regs);
596 }
597
el0_undef(struct pt_regs * regs,unsigned long esr)598 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
599 {
600 enter_from_user_mode(regs);
601 local_daif_restore(DAIF_PROCCTX);
602 do_undefinstr(regs, esr);
603 exit_to_user_mode(regs);
604 }
605
el0_bti(struct pt_regs * regs)606 static void noinstr el0_bti(struct pt_regs *regs)
607 {
608 enter_from_user_mode(regs);
609 local_daif_restore(DAIF_PROCCTX);
610 do_el0_bti(regs);
611 exit_to_user_mode(regs);
612 }
613
el0_inv(struct pt_regs * regs,unsigned long esr)614 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
615 {
616 enter_from_user_mode(regs);
617 local_daif_restore(DAIF_PROCCTX);
618 bad_el0_sync(regs, 0, esr);
619 exit_to_user_mode(regs);
620 }
621
el0_dbg(struct pt_regs * regs,unsigned long esr)622 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
623 {
624 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
625 unsigned long far = read_sysreg(far_el1);
626
627 enter_from_user_mode(regs);
628 do_debug_exception(far, esr, regs);
629 local_daif_restore(DAIF_PROCCTX);
630 exit_to_user_mode(regs);
631 }
632
el0_svc(struct pt_regs * regs)633 static void noinstr el0_svc(struct pt_regs *regs)
634 {
635 enter_from_user_mode(regs);
636 cortex_a76_erratum_1463225_svc_handler();
637 do_el0_svc(regs);
638 exit_to_user_mode(regs);
639 }
640
el0_fpac(struct pt_regs * regs,unsigned long esr)641 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
642 {
643 enter_from_user_mode(regs);
644 local_daif_restore(DAIF_PROCCTX);
645 do_el0_fpac(regs, esr);
646 exit_to_user_mode(regs);
647 }
648
el0t_64_sync_handler(struct pt_regs * regs)649 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
650 {
651 unsigned long esr = read_sysreg(esr_el1);
652
653 switch (ESR_ELx_EC(esr)) {
654 case ESR_ELx_EC_SVC64:
655 el0_svc(regs);
656 break;
657 case ESR_ELx_EC_DABT_LOW:
658 el0_da(regs, esr);
659 break;
660 case ESR_ELx_EC_IABT_LOW:
661 el0_ia(regs, esr);
662 break;
663 case ESR_ELx_EC_FP_ASIMD:
664 el0_fpsimd_acc(regs, esr);
665 break;
666 case ESR_ELx_EC_SVE:
667 el0_sve_acc(regs, esr);
668 break;
669 case ESR_ELx_EC_SME:
670 el0_sme_acc(regs, esr);
671 break;
672 case ESR_ELx_EC_FP_EXC64:
673 el0_fpsimd_exc(regs, esr);
674 break;
675 case ESR_ELx_EC_SYS64:
676 case ESR_ELx_EC_WFx:
677 el0_sys(regs, esr);
678 break;
679 case ESR_ELx_EC_SP_ALIGN:
680 el0_sp(regs, esr);
681 break;
682 case ESR_ELx_EC_PC_ALIGN:
683 el0_pc(regs, esr);
684 break;
685 case ESR_ELx_EC_UNKNOWN:
686 el0_undef(regs, esr);
687 break;
688 case ESR_ELx_EC_BTI:
689 el0_bti(regs);
690 break;
691 case ESR_ELx_EC_BREAKPT_LOW:
692 case ESR_ELx_EC_SOFTSTP_LOW:
693 case ESR_ELx_EC_WATCHPT_LOW:
694 case ESR_ELx_EC_BRK64:
695 el0_dbg(regs, esr);
696 break;
697 case ESR_ELx_EC_FPAC:
698 el0_fpac(regs, esr);
699 break;
700 default:
701 el0_inv(regs, esr);
702 }
703 }
704
el0_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))705 static void noinstr el0_interrupt(struct pt_regs *regs,
706 void (*handler)(struct pt_regs *))
707 {
708 enter_from_user_mode(regs);
709
710 write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
711
712 if (regs->pc & BIT(55))
713 arm64_apply_bp_hardening();
714
715 irq_enter_rcu();
716 do_interrupt_handler(regs, handler);
717 irq_exit_rcu();
718
719 exit_to_user_mode(regs);
720 }
721
__el0_irq_handler_common(struct pt_regs * regs)722 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
723 {
724 el0_interrupt(regs, handle_arch_irq);
725 }
726
el0t_64_irq_handler(struct pt_regs * regs)727 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
728 {
729 __el0_irq_handler_common(regs);
730 }
731
__el0_fiq_handler_common(struct pt_regs * regs)732 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
733 {
734 el0_interrupt(regs, handle_arch_fiq);
735 }
736
el0t_64_fiq_handler(struct pt_regs * regs)737 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
738 {
739 __el0_fiq_handler_common(regs);
740 }
741
__el0_error_handler_common(struct pt_regs * regs)742 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
743 {
744 unsigned long esr = read_sysreg(esr_el1);
745
746 enter_from_user_mode(regs);
747 local_daif_restore(DAIF_ERRCTX);
748 arm64_enter_nmi(regs);
749 do_serror(regs, esr);
750 arm64_exit_nmi(regs);
751 local_daif_restore(DAIF_PROCCTX);
752 exit_to_user_mode(regs);
753 }
754
el0t_64_error_handler(struct pt_regs * regs)755 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
756 {
757 __el0_error_handler_common(regs);
758 }
759
760 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)761 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
762 {
763 enter_from_user_mode(regs);
764 local_daif_restore(DAIF_PROCCTX);
765 do_cp15instr(esr, regs);
766 exit_to_user_mode(regs);
767 }
768
el0_svc_compat(struct pt_regs * regs)769 static void noinstr el0_svc_compat(struct pt_regs *regs)
770 {
771 enter_from_user_mode(regs);
772 cortex_a76_erratum_1463225_svc_handler();
773 do_el0_svc_compat(regs);
774 exit_to_user_mode(regs);
775 }
776
el0t_32_sync_handler(struct pt_regs * regs)777 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
778 {
779 unsigned long esr = read_sysreg(esr_el1);
780
781 switch (ESR_ELx_EC(esr)) {
782 case ESR_ELx_EC_SVC32:
783 el0_svc_compat(regs);
784 break;
785 case ESR_ELx_EC_DABT_LOW:
786 el0_da(regs, esr);
787 break;
788 case ESR_ELx_EC_IABT_LOW:
789 el0_ia(regs, esr);
790 break;
791 case ESR_ELx_EC_FP_ASIMD:
792 el0_fpsimd_acc(regs, esr);
793 break;
794 case ESR_ELx_EC_FP_EXC32:
795 el0_fpsimd_exc(regs, esr);
796 break;
797 case ESR_ELx_EC_PC_ALIGN:
798 el0_pc(regs, esr);
799 break;
800 case ESR_ELx_EC_UNKNOWN:
801 case ESR_ELx_EC_CP14_MR:
802 case ESR_ELx_EC_CP14_LS:
803 case ESR_ELx_EC_CP14_64:
804 el0_undef(regs, esr);
805 break;
806 case ESR_ELx_EC_CP15_32:
807 case ESR_ELx_EC_CP15_64:
808 el0_cp15(regs, esr);
809 break;
810 case ESR_ELx_EC_BREAKPT_LOW:
811 case ESR_ELx_EC_SOFTSTP_LOW:
812 case ESR_ELx_EC_WATCHPT_LOW:
813 case ESR_ELx_EC_BKPT32:
814 el0_dbg(regs, esr);
815 break;
816 default:
817 el0_inv(regs, esr);
818 }
819 }
820
el0t_32_irq_handler(struct pt_regs * regs)821 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
822 {
823 __el0_irq_handler_common(regs);
824 }
825
el0t_32_fiq_handler(struct pt_regs * regs)826 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
827 {
828 __el0_fiq_handler_common(regs);
829 }
830
el0t_32_error_handler(struct pt_regs * regs)831 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
832 {
833 __el0_error_handler_common(regs);
834 }
835 #else /* CONFIG_COMPAT */
836 UNHANDLED(el0t, 32, sync)
837 UNHANDLED(el0t, 32, irq)
838 UNHANDLED(el0t, 32, fiq)
839 UNHANDLED(el0t, 32, error)
840 #endif /* CONFIG_COMPAT */
841
842 #ifdef CONFIG_VMAP_STACK
handle_bad_stack(struct pt_regs * regs)843 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
844 {
845 unsigned long esr = read_sysreg(esr_el1);
846 unsigned long far = read_sysreg(far_el1);
847
848 arm64_enter_nmi(regs);
849 panic_bad_stack(regs, esr, far);
850 }
851 #endif /* CONFIG_VMAP_STACK */
852
853 #ifdef CONFIG_ARM_SDE_INTERFACE
854 asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs * regs,struct sdei_registered_event * arg)855 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
856 {
857 unsigned long ret;
858
859 /*
860 * We didn't take an exception to get here, so the HW hasn't
861 * set/cleared bits in PSTATE that we may rely on.
862 *
863 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
864 * whether PSTATE bits are inherited unchanged or generated from
865 * scratch, and the TF-A implementation always clears PAN and always
866 * clears UAO. There are no other known implementations.
867 *
868 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
869 * PSTATE is modified upon architectural exceptions, and so PAN is
870 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
871 * cleared.
872 *
873 * We must explicitly reset PAN to the expected state, including
874 * clearing it when the host isn't using it, in case a VM had it set.
875 */
876 if (system_uses_hw_pan())
877 set_pstate_pan(1);
878 else if (cpu_has_pan())
879 set_pstate_pan(0);
880
881 arm64_enter_nmi(regs);
882 ret = do_sdei_event(regs, arg);
883 arm64_exit_nmi(regs);
884
885 return ret;
886 }
887 #endif /* CONFIG_ARM_SDE_INTERFACE */
888