1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exception handling code
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 */
7
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19
20 /*
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
23 */
enter_from_kernel_mode(struct pt_regs * regs)24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 regs->exit_rcu = false;
27
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
30 rcu_irq_enter();
31 trace_hardirqs_off_finish();
32
33 regs->exit_rcu = true;
34 return;
35 }
36
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
40 }
41
42 /*
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
45 */
exit_to_kernel_mode(struct pt_regs * regs)46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
47 {
48 lockdep_assert_irqs_disabled();
49
50 if (interrupts_enabled(regs)) {
51 if (regs->exit_rcu) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
54 rcu_irq_exit();
55 lockdep_hardirqs_on(CALLER_ADDR0);
56 return;
57 }
58
59 trace_hardirqs_on();
60 } else {
61 if (regs->exit_rcu)
62 rcu_irq_exit();
63 }
64 }
65
arm64_enter_nmi(struct pt_regs * regs)66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
67 {
68 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
69
70 __nmi_enter();
71 lockdep_hardirqs_off(CALLER_ADDR0);
72 lockdep_hardirq_enter();
73 rcu_nmi_enter();
74
75 trace_hardirqs_off_finish();
76 ftrace_nmi_enter();
77 }
78
arm64_exit_nmi(struct pt_regs * regs)79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
80 {
81 bool restore = regs->lockdep_hardirqs;
82
83 ftrace_nmi_exit();
84 if (restore) {
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
87 }
88
89 rcu_nmi_exit();
90 lockdep_hardirq_exit();
91 if (restore)
92 lockdep_hardirqs_on(CALLER_ADDR0);
93 __nmi_exit();
94 }
95
enter_el1_irq_or_nmi(struct pt_regs * regs)96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
97 {
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 arm64_enter_nmi(regs);
100 else
101 enter_from_kernel_mode(regs);
102 }
103
exit_el1_irq_or_nmi(struct pt_regs * regs)104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
105 {
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 arm64_exit_nmi(regs);
108 else
109 exit_to_kernel_mode(regs);
110 }
111
el1_abort(struct pt_regs * regs,unsigned long esr)112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
113 {
114 unsigned long far = read_sysreg(far_el1);
115
116 enter_from_kernel_mode(regs);
117 local_daif_inherit(regs);
118 far = untagged_addr(far);
119 do_mem_abort(far, esr, regs);
120 local_daif_mask();
121 exit_to_kernel_mode(regs);
122 }
123
el1_pc(struct pt_regs * regs,unsigned long esr)124 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
125 {
126 unsigned long far = read_sysreg(far_el1);
127
128 enter_from_kernel_mode(regs);
129 local_daif_inherit(regs);
130 do_sp_pc_abort(far, esr, regs);
131 local_daif_mask();
132 exit_to_kernel_mode(regs);
133 }
134
el1_undef(struct pt_regs * regs)135 static void noinstr el1_undef(struct pt_regs *regs)
136 {
137 enter_from_kernel_mode(regs);
138 local_daif_inherit(regs);
139 do_undefinstr(regs);
140 local_daif_mask();
141 exit_to_kernel_mode(regs);
142 }
143
el1_inv(struct pt_regs * regs,unsigned long esr)144 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
145 {
146 enter_from_kernel_mode(regs);
147 local_daif_inherit(regs);
148 bad_mode(regs, 0, esr);
149 local_daif_mask();
150 exit_to_kernel_mode(regs);
151 }
152
arm64_enter_el1_dbg(struct pt_regs * regs)153 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
154 {
155 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
156
157 lockdep_hardirqs_off(CALLER_ADDR0);
158 rcu_nmi_enter();
159
160 trace_hardirqs_off_finish();
161 }
162
arm64_exit_el1_dbg(struct pt_regs * regs)163 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
164 {
165 bool restore = regs->lockdep_hardirqs;
166
167 if (restore) {
168 trace_hardirqs_on_prepare();
169 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
170 }
171
172 rcu_nmi_exit();
173 if (restore)
174 lockdep_hardirqs_on(CALLER_ADDR0);
175 }
176
el1_dbg(struct pt_regs * regs,unsigned long esr)177 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
178 {
179 unsigned long far = read_sysreg(far_el1);
180
181 /*
182 * The CPU masked interrupts, and we are leaving them masked during
183 * do_debug_exception(). Update PMR as if we had called
184 * local_daif_mask().
185 */
186 if (system_uses_irq_prio_masking())
187 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
188
189 arm64_enter_el1_dbg(regs);
190 do_debug_exception(far, esr, regs);
191 arm64_exit_el1_dbg(regs);
192 }
193
el1_fpac(struct pt_regs * regs,unsigned long esr)194 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
195 {
196 enter_from_kernel_mode(regs);
197 local_daif_inherit(regs);
198 do_ptrauth_fault(regs, esr);
199 local_daif_mask();
200 exit_to_kernel_mode(regs);
201 }
202
el1_sync_handler(struct pt_regs * regs)203 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
204 {
205 unsigned long esr = read_sysreg(esr_el1);
206
207 switch (ESR_ELx_EC(esr)) {
208 case ESR_ELx_EC_DABT_CUR:
209 case ESR_ELx_EC_IABT_CUR:
210 el1_abort(regs, esr);
211 break;
212 /*
213 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
214 * recursive exception when trying to push the initial pt_regs.
215 */
216 case ESR_ELx_EC_PC_ALIGN:
217 el1_pc(regs, esr);
218 break;
219 case ESR_ELx_EC_SYS64:
220 case ESR_ELx_EC_UNKNOWN:
221 el1_undef(regs);
222 break;
223 case ESR_ELx_EC_BREAKPT_CUR:
224 case ESR_ELx_EC_SOFTSTP_CUR:
225 case ESR_ELx_EC_WATCHPT_CUR:
226 case ESR_ELx_EC_BRK64:
227 el1_dbg(regs, esr);
228 break;
229 case ESR_ELx_EC_FPAC:
230 el1_fpac(regs, esr);
231 break;
232 default:
233 el1_inv(regs, esr);
234 }
235 }
236
enter_from_user_mode(void)237 asmlinkage void noinstr enter_from_user_mode(void)
238 {
239 lockdep_hardirqs_off(CALLER_ADDR0);
240 CT_WARN_ON(ct_state() != CONTEXT_USER);
241 user_exit_irqoff();
242 trace_hardirqs_off_finish();
243 }
244
exit_to_user_mode(void)245 asmlinkage void noinstr exit_to_user_mode(void)
246 {
247 trace_hardirqs_on_prepare();
248 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
249 user_enter_irqoff();
250 lockdep_hardirqs_on(CALLER_ADDR0);
251 }
252
el0_da(struct pt_regs * regs,unsigned long esr)253 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
254 {
255 unsigned long far = read_sysreg(far_el1);
256
257 enter_from_user_mode();
258 local_daif_restore(DAIF_PROCCTX);
259 far = untagged_addr(far);
260 do_mem_abort(far, esr, regs);
261 }
262
el0_ia(struct pt_regs * regs,unsigned long esr)263 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
264 {
265 unsigned long far = read_sysreg(far_el1);
266
267 /*
268 * We've taken an instruction abort from userspace and not yet
269 * re-enabled IRQs. If the address is a kernel address, apply
270 * BP hardening prior to enabling IRQs and pre-emption.
271 */
272 if (!is_ttbr0_addr(far))
273 arm64_apply_bp_hardening();
274
275 enter_from_user_mode();
276 local_daif_restore(DAIF_PROCCTX);
277 do_mem_abort(far, esr, regs);
278 }
279
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)280 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
281 {
282 enter_from_user_mode();
283 local_daif_restore(DAIF_PROCCTX);
284 do_fpsimd_acc(esr, regs);
285 }
286
el0_sve_acc(struct pt_regs * regs,unsigned long esr)287 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
288 {
289 enter_from_user_mode();
290 local_daif_restore(DAIF_PROCCTX);
291 do_sve_acc(esr, regs);
292 }
293
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)294 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
295 {
296 enter_from_user_mode();
297 local_daif_restore(DAIF_PROCCTX);
298 do_fpsimd_exc(esr, regs);
299 }
300
el0_sys(struct pt_regs * regs,unsigned long esr)301 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
302 {
303 enter_from_user_mode();
304 local_daif_restore(DAIF_PROCCTX);
305 do_sysinstr(esr, regs);
306 }
307
el0_pc(struct pt_regs * regs,unsigned long esr)308 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
309 {
310 unsigned long far = read_sysreg(far_el1);
311
312 if (!is_ttbr0_addr(instruction_pointer(regs)))
313 arm64_apply_bp_hardening();
314
315 enter_from_user_mode();
316 local_daif_restore(DAIF_PROCCTX);
317 do_sp_pc_abort(far, esr, regs);
318 }
319
el0_sp(struct pt_regs * regs,unsigned long esr)320 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
321 {
322 enter_from_user_mode();
323 local_daif_restore(DAIF_PROCCTX);
324 do_sp_pc_abort(regs->sp, esr, regs);
325 }
326
el0_undef(struct pt_regs * regs)327 static void noinstr el0_undef(struct pt_regs *regs)
328 {
329 enter_from_user_mode();
330 local_daif_restore(DAIF_PROCCTX);
331 do_undefinstr(regs);
332 }
333
el0_bti(struct pt_regs * regs)334 static void noinstr el0_bti(struct pt_regs *regs)
335 {
336 enter_from_user_mode();
337 local_daif_restore(DAIF_PROCCTX);
338 do_bti(regs);
339 }
340
el0_inv(struct pt_regs * regs,unsigned long esr)341 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
342 {
343 enter_from_user_mode();
344 local_daif_restore(DAIF_PROCCTX);
345 bad_el0_sync(regs, 0, esr);
346 }
347
el0_dbg(struct pt_regs * regs,unsigned long esr)348 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
349 {
350 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
351 unsigned long far = read_sysreg(far_el1);
352
353 if (system_uses_irq_prio_masking())
354 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
355
356 enter_from_user_mode();
357 do_debug_exception(far, esr, regs);
358 local_daif_restore(DAIF_PROCCTX_NOIRQ);
359 }
360
el0_svc(struct pt_regs * regs)361 static void noinstr el0_svc(struct pt_regs *regs)
362 {
363 if (system_uses_irq_prio_masking())
364 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
365
366 enter_from_user_mode();
367 do_el0_svc(regs);
368 }
369
el0_fpac(struct pt_regs * regs,unsigned long esr)370 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
371 {
372 enter_from_user_mode();
373 local_daif_restore(DAIF_PROCCTX);
374 do_ptrauth_fault(regs, esr);
375 }
376
el0_sync_handler(struct pt_regs * regs)377 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
378 {
379 unsigned long esr = read_sysreg(esr_el1);
380
381 switch (ESR_ELx_EC(esr)) {
382 case ESR_ELx_EC_SVC64:
383 el0_svc(regs);
384 break;
385 case ESR_ELx_EC_DABT_LOW:
386 el0_da(regs, esr);
387 break;
388 case ESR_ELx_EC_IABT_LOW:
389 el0_ia(regs, esr);
390 break;
391 case ESR_ELx_EC_FP_ASIMD:
392 el0_fpsimd_acc(regs, esr);
393 break;
394 case ESR_ELx_EC_SVE:
395 el0_sve_acc(regs, esr);
396 break;
397 case ESR_ELx_EC_FP_EXC64:
398 el0_fpsimd_exc(regs, esr);
399 break;
400 case ESR_ELx_EC_SYS64:
401 case ESR_ELx_EC_WFx:
402 el0_sys(regs, esr);
403 break;
404 case ESR_ELx_EC_SP_ALIGN:
405 el0_sp(regs, esr);
406 break;
407 case ESR_ELx_EC_PC_ALIGN:
408 el0_pc(regs, esr);
409 break;
410 case ESR_ELx_EC_UNKNOWN:
411 el0_undef(regs);
412 break;
413 case ESR_ELx_EC_BTI:
414 el0_bti(regs);
415 break;
416 case ESR_ELx_EC_BREAKPT_LOW:
417 case ESR_ELx_EC_SOFTSTP_LOW:
418 case ESR_ELx_EC_WATCHPT_LOW:
419 case ESR_ELx_EC_BRK64:
420 el0_dbg(regs, esr);
421 break;
422 case ESR_ELx_EC_FPAC:
423 el0_fpac(regs, esr);
424 break;
425 default:
426 el0_inv(regs, esr);
427 }
428 }
429
430 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)431 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
432 {
433 enter_from_user_mode();
434 local_daif_restore(DAIF_PROCCTX);
435 do_cp15instr(esr, regs);
436 }
437
el0_svc_compat(struct pt_regs * regs)438 static void noinstr el0_svc_compat(struct pt_regs *regs)
439 {
440 if (system_uses_irq_prio_masking())
441 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
442
443 enter_from_user_mode();
444 do_el0_svc_compat(regs);
445 }
446
el0_sync_compat_handler(struct pt_regs * regs)447 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
448 {
449 unsigned long esr = read_sysreg(esr_el1);
450
451 switch (ESR_ELx_EC(esr)) {
452 case ESR_ELx_EC_SVC32:
453 el0_svc_compat(regs);
454 break;
455 case ESR_ELx_EC_DABT_LOW:
456 el0_da(regs, esr);
457 break;
458 case ESR_ELx_EC_IABT_LOW:
459 el0_ia(regs, esr);
460 break;
461 case ESR_ELx_EC_FP_ASIMD:
462 el0_fpsimd_acc(regs, esr);
463 break;
464 case ESR_ELx_EC_FP_EXC32:
465 el0_fpsimd_exc(regs, esr);
466 break;
467 case ESR_ELx_EC_PC_ALIGN:
468 el0_pc(regs, esr);
469 break;
470 case ESR_ELx_EC_UNKNOWN:
471 case ESR_ELx_EC_CP14_MR:
472 case ESR_ELx_EC_CP14_LS:
473 case ESR_ELx_EC_CP14_64:
474 el0_undef(regs);
475 break;
476 case ESR_ELx_EC_CP15_32:
477 case ESR_ELx_EC_CP15_64:
478 el0_cp15(regs, esr);
479 break;
480 case ESR_ELx_EC_BREAKPT_LOW:
481 case ESR_ELx_EC_SOFTSTP_LOW:
482 case ESR_ELx_EC_WATCHPT_LOW:
483 case ESR_ELx_EC_BKPT32:
484 el0_dbg(regs, esr);
485 break;
486 default:
487 el0_inv(regs, esr);
488 }
489 }
490 #endif /* CONFIG_COMPAT */
491