1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/kernel/panic.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 /*
9 * This function is used through-out the kernel (including mm and fs)
10 * to indicate a major problem.
11 */
12 #include <linux/debug_locks.h>
13 #include <linux/sched/debug.h>
14 #include <linux/interrupt.h>
15 #include <linux/kgdb.h>
16 #include <linux/kmsg_dump.h>
17 #include <linux/kallsyms.h>
18 #include <linux/notifier.h>
19 #include <linux/vt_kern.h>
20 #include <linux/module.h>
21 #include <linux/random.h>
22 #include <linux/ftrace.h>
23 #include <linux/reboot.h>
24 #include <linux/delay.h>
25 #include <linux/kexec.h>
26 #include <linux/panic_notifier.h>
27 #include <linux/sched.h>
28 #include <linux/sysrq.h>
29 #include <linux/init.h>
30 #include <linux/nmi.h>
31 #include <linux/console.h>
32 #include <linux/bug.h>
33 #include <linux/ratelimit.h>
34 #include <linux/debugfs.h>
35 #include <trace/events/error_report.h>
36 #include <asm/sections.h>
37
38 #define PANIC_TIMER_STEP 100
39 #define PANIC_BLINK_SPD 18
40
41 #ifdef CONFIG_SMP
42 /*
43 * Should we dump all CPUs backtraces in an oops event?
44 * Defaults to 0, can be changed via sysctl.
45 */
46 static unsigned int __read_mostly sysctl_oops_all_cpu_backtrace;
47 #else
48 #define sysctl_oops_all_cpu_backtrace 0
49 #endif /* CONFIG_SMP */
50
51 int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
52 static unsigned long tainted_mask =
53 IS_ENABLED(CONFIG_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
54 static int pause_on_oops;
55 static int pause_on_oops_flag;
56 static DEFINE_SPINLOCK(pause_on_oops_lock);
57 bool crash_kexec_post_notifiers;
58 int panic_on_warn __read_mostly;
59 unsigned long panic_on_taint;
60 bool panic_on_taint_nousertaint = false;
61
62 int panic_timeout = CONFIG_PANIC_TIMEOUT;
63 EXPORT_SYMBOL_GPL(panic_timeout);
64
65 #define PANIC_PRINT_TASK_INFO 0x00000001
66 #define PANIC_PRINT_MEM_INFO 0x00000002
67 #define PANIC_PRINT_TIMER_INFO 0x00000004
68 #define PANIC_PRINT_LOCK_INFO 0x00000008
69 #define PANIC_PRINT_FTRACE_INFO 0x00000010
70 #define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020
71 #define PANIC_PRINT_ALL_CPU_BT 0x00000040
72 unsigned long panic_print;
73
74 ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
75
76 EXPORT_SYMBOL(panic_notifier_list);
77
78 #if defined(CONFIG_SMP) && defined(CONFIG_SYSCTL)
79 static struct ctl_table kern_panic_table[] = {
80 {
81 .procname = "oops_all_cpu_backtrace",
82 .data = &sysctl_oops_all_cpu_backtrace,
83 .maxlen = sizeof(int),
84 .mode = 0644,
85 .proc_handler = proc_dointvec_minmax,
86 .extra1 = SYSCTL_ZERO,
87 .extra2 = SYSCTL_ONE,
88 },
89 { }
90 };
91
kernel_panic_sysctls_init(void)92 static __init int kernel_panic_sysctls_init(void)
93 {
94 register_sysctl_init("kernel", kern_panic_table);
95 return 0;
96 }
97 late_initcall(kernel_panic_sysctls_init);
98 #endif
99
no_blink(int state)100 static long no_blink(int state)
101 {
102 return 0;
103 }
104
105 /* Returns how long it waited in ms */
106 long (*panic_blink)(int state);
107 EXPORT_SYMBOL(panic_blink);
108
109 /*
110 * Stop ourself in panic -- architecture code may override this
111 */
panic_smp_self_stop(void)112 void __weak panic_smp_self_stop(void)
113 {
114 while (1)
115 cpu_relax();
116 }
117
118 /*
119 * Stop ourselves in NMI context if another CPU has already panicked. Arch code
120 * may override this to prepare for crash dumping, e.g. save regs info.
121 */
nmi_panic_self_stop(struct pt_regs * regs)122 void __weak nmi_panic_self_stop(struct pt_regs *regs)
123 {
124 panic_smp_self_stop();
125 }
126
127 /*
128 * Stop other CPUs in panic. Architecture dependent code may override this
129 * with more suitable version. For example, if the architecture supports
130 * crash dump, it should save registers of each stopped CPU and disable
131 * per-CPU features such as virtualization extensions.
132 */
crash_smp_send_stop(void)133 void __weak crash_smp_send_stop(void)
134 {
135 static int cpus_stopped;
136
137 /*
138 * This function can be called twice in panic path, but obviously
139 * we execute this only once.
140 */
141 if (cpus_stopped)
142 return;
143
144 /*
145 * Note smp_send_stop is the usual smp shutdown function, which
146 * unfortunately means it may not be hardened to work in a panic
147 * situation.
148 */
149 smp_send_stop();
150 cpus_stopped = 1;
151 }
152
153 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
154
155 /*
156 * A variant of panic() called from NMI context. We return if we've already
157 * panicked on this CPU. If another CPU already panicked, loop in
158 * nmi_panic_self_stop() which can provide architecture dependent code such
159 * as saving register state for crash dump.
160 */
nmi_panic(struct pt_regs * regs,const char * msg)161 void nmi_panic(struct pt_regs *regs, const char *msg)
162 {
163 int old_cpu, cpu;
164
165 cpu = raw_smp_processor_id();
166 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu);
167
168 if (old_cpu == PANIC_CPU_INVALID)
169 panic("%s", msg);
170 else if (old_cpu != cpu)
171 nmi_panic_self_stop(regs);
172 }
173 EXPORT_SYMBOL(nmi_panic);
174
panic_print_sys_info(bool console_flush)175 static void panic_print_sys_info(bool console_flush)
176 {
177 if (console_flush) {
178 if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
179 console_flush_on_panic(CONSOLE_REPLAY_ALL);
180 return;
181 }
182
183 if (panic_print & PANIC_PRINT_ALL_CPU_BT)
184 trigger_all_cpu_backtrace();
185
186 if (panic_print & PANIC_PRINT_TASK_INFO)
187 show_state();
188
189 if (panic_print & PANIC_PRINT_MEM_INFO)
190 show_mem(0, NULL);
191
192 if (panic_print & PANIC_PRINT_TIMER_INFO)
193 sysrq_timer_list_show();
194
195 if (panic_print & PANIC_PRINT_LOCK_INFO)
196 debug_show_all_locks();
197
198 if (panic_print & PANIC_PRINT_FTRACE_INFO)
199 ftrace_dump(DUMP_ALL);
200 }
201
202 /**
203 * panic - halt the system
204 * @fmt: The text string to print
205 *
206 * Display a message, then perform cleanups.
207 *
208 * This function never returns.
209 */
panic(const char * fmt,...)210 void panic(const char *fmt, ...)
211 {
212 static char buf[1024];
213 va_list args;
214 long i, i_next = 0, len;
215 int state = 0;
216 int old_cpu, this_cpu;
217 bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
218
219 if (panic_on_warn) {
220 /*
221 * This thread may hit another WARN() in the panic path.
222 * Resetting this prevents additional WARN() from panicking the
223 * system on this thread. Other threads are blocked by the
224 * panic_mutex in panic().
225 */
226 panic_on_warn = 0;
227 }
228
229 /*
230 * Disable local interrupts. This will prevent panic_smp_self_stop
231 * from deadlocking the first cpu that invokes the panic, since
232 * there is nothing to prevent an interrupt handler (that runs
233 * after setting panic_cpu) from invoking panic() again.
234 */
235 local_irq_disable();
236 preempt_disable_notrace();
237
238 /*
239 * It's possible to come here directly from a panic-assertion and
240 * not have preempt disabled. Some functions called from here want
241 * preempt to be disabled. No point enabling it later though...
242 *
243 * Only one CPU is allowed to execute the panic code from here. For
244 * multiple parallel invocations of panic, all other CPUs either
245 * stop themself or will wait until they are stopped by the 1st CPU
246 * with smp_send_stop().
247 *
248 * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which
249 * comes here, so go ahead.
250 * `old_cpu == this_cpu' means we came from nmi_panic() which sets
251 * panic_cpu to this CPU. In this case, this is also the 1st CPU.
252 */
253 this_cpu = raw_smp_processor_id();
254 old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
255
256 if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
257 panic_smp_self_stop();
258
259 console_verbose();
260 bust_spinlocks(1);
261 va_start(args, fmt);
262 len = vscnprintf(buf, sizeof(buf), fmt, args);
263 va_end(args);
264
265 if (len && buf[len - 1] == '\n')
266 buf[len - 1] = '\0';
267
268 pr_emerg("Kernel panic - not syncing: %s\n", buf);
269 #ifdef CONFIG_DEBUG_BUGVERBOSE
270 /*
271 * Avoid nested stack-dumping if a panic occurs during oops processing
272 */
273 if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
274 dump_stack();
275 #endif
276
277 /*
278 * If kgdb is enabled, give it a chance to run before we stop all
279 * the other CPUs or else we won't be able to debug processes left
280 * running on them.
281 */
282 kgdb_panic(buf);
283
284 /*
285 * If we have crashed and we have a crash kernel loaded let it handle
286 * everything else.
287 * If we want to run this after calling panic_notifiers, pass
288 * the "crash_kexec_post_notifiers" option to the kernel.
289 *
290 * Bypass the panic_cpu check and call __crash_kexec directly.
291 */
292 if (!_crash_kexec_post_notifiers) {
293 __crash_kexec(NULL);
294
295 /*
296 * Note smp_send_stop is the usual smp shutdown function, which
297 * unfortunately means it may not be hardened to work in a
298 * panic situation.
299 */
300 smp_send_stop();
301 } else {
302 /*
303 * If we want to do crash dump after notifier calls and
304 * kmsg_dump, we will need architecture dependent extra
305 * works in addition to stopping other CPUs.
306 */
307 crash_smp_send_stop();
308 }
309
310 /*
311 * Run any panic handlers, including those that might need to
312 * add information to the kmsg dump output.
313 */
314 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
315
316 panic_print_sys_info(false);
317
318 kmsg_dump(KMSG_DUMP_PANIC);
319
320 /*
321 * If you doubt kdump always works fine in any situation,
322 * "crash_kexec_post_notifiers" offers you a chance to run
323 * panic_notifiers and dumping kmsg before kdump.
324 * Note: since some panic_notifiers can make crashed kernel
325 * more unstable, it can increase risks of the kdump failure too.
326 *
327 * Bypass the panic_cpu check and call __crash_kexec directly.
328 */
329 if (_crash_kexec_post_notifiers)
330 __crash_kexec(NULL);
331
332 console_unblank();
333
334 /*
335 * We may have ended up stopping the CPU holding the lock (in
336 * smp_send_stop()) while still having some valuable data in the console
337 * buffer. Try to acquire the lock then release it regardless of the
338 * result. The release will also print the buffers out. Locks debug
339 * should be disabled to avoid reporting bad unlock balance when
340 * panic() is not being callled from OOPS.
341 */
342 debug_locks_off();
343 console_flush_on_panic(CONSOLE_FLUSH_PENDING);
344
345 panic_print_sys_info(true);
346
347 if (!panic_blink)
348 panic_blink = no_blink;
349
350 if (panic_timeout > 0) {
351 /*
352 * Delay timeout seconds before rebooting the machine.
353 * We can't use the "normal" timers since we just panicked.
354 */
355 pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
356
357 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
358 touch_nmi_watchdog();
359 if (i >= i_next) {
360 i += panic_blink(state ^= 1);
361 i_next = i + 3600 / PANIC_BLINK_SPD;
362 }
363 mdelay(PANIC_TIMER_STEP);
364 }
365 }
366 if (panic_timeout != 0) {
367 /*
368 * This will not be a clean reboot, with everything
369 * shutting down. But if there is a chance of
370 * rebooting the system it will be rebooted.
371 */
372 if (panic_reboot_mode != REBOOT_UNDEFINED)
373 reboot_mode = panic_reboot_mode;
374 emergency_restart();
375 }
376 #ifdef __sparc__
377 {
378 extern int stop_a_enabled;
379 /* Make sure the user can actually press Stop-A (L1-A) */
380 stop_a_enabled = 1;
381 pr_emerg("Press Stop-A (L1-A) from sun keyboard or send break\n"
382 "twice on console to return to the boot prom\n");
383 }
384 #endif
385 #if defined(CONFIG_S390)
386 disabled_wait();
387 #endif
388 pr_emerg("---[ end Kernel panic - not syncing: %s ]---\n", buf);
389
390 /* Do not scroll important messages printed above */
391 suppress_printk = 1;
392 local_irq_enable();
393 for (i = 0; ; i += PANIC_TIMER_STEP) {
394 touch_softlockup_watchdog();
395 if (i >= i_next) {
396 i += panic_blink(state ^= 1);
397 i_next = i + 3600 / PANIC_BLINK_SPD;
398 }
399 mdelay(PANIC_TIMER_STEP);
400 }
401 }
402
403 EXPORT_SYMBOL(panic);
404
405 /*
406 * TAINT_FORCED_RMMOD could be a per-module flag but the module
407 * is being removed anyway.
408 */
409 const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = {
410 [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true },
411 [ TAINT_FORCED_MODULE ] = { 'F', ' ', true },
412 [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false },
413 [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false },
414 [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false },
415 [ TAINT_BAD_PAGE ] = { 'B', ' ', false },
416 [ TAINT_USER ] = { 'U', ' ', false },
417 [ TAINT_DIE ] = { 'D', ' ', false },
418 [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false },
419 [ TAINT_WARN ] = { 'W', ' ', false },
420 [ TAINT_CRAP ] = { 'C', ' ', true },
421 [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false },
422 [ TAINT_OOT_MODULE ] = { 'O', ' ', true },
423 [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true },
424 [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false },
425 [ TAINT_LIVEPATCH ] = { 'K', ' ', true },
426 [ TAINT_AUX ] = { 'X', ' ', true },
427 [ TAINT_RANDSTRUCT ] = { 'T', ' ', true },
428 [ TAINT_TEST ] = { 'N', ' ', true },
429 };
430
431 /**
432 * print_tainted - return a string to represent the kernel taint state.
433 *
434 * For individual taint flag meanings, see Documentation/admin-guide/sysctl/kernel.rst
435 *
436 * The string is overwritten by the next call to print_tainted(),
437 * but is always NULL terminated.
438 */
print_tainted(void)439 const char *print_tainted(void)
440 {
441 static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")];
442
443 BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT);
444
445 if (tainted_mask) {
446 char *s;
447 int i;
448
449 s = buf + sprintf(buf, "Tainted: ");
450 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
451 const struct taint_flag *t = &taint_flags[i];
452 *s++ = test_bit(i, &tainted_mask) ?
453 t->c_true : t->c_false;
454 }
455 *s = 0;
456 } else
457 snprintf(buf, sizeof(buf), "Not tainted");
458
459 return buf;
460 }
461
test_taint(unsigned flag)462 int test_taint(unsigned flag)
463 {
464 return test_bit(flag, &tainted_mask);
465 }
466 EXPORT_SYMBOL(test_taint);
467
get_taint(void)468 unsigned long get_taint(void)
469 {
470 return tainted_mask;
471 }
472
473 /**
474 * add_taint: add a taint flag if not already set.
475 * @flag: one of the TAINT_* constants.
476 * @lockdep_ok: whether lock debugging is still OK.
477 *
478 * If something bad has gone wrong, you'll want @lockdebug_ok = false, but for
479 * some notewortht-but-not-corrupting cases, it can be set to true.
480 */
add_taint(unsigned flag,enum lockdep_ok lockdep_ok)481 void add_taint(unsigned flag, enum lockdep_ok lockdep_ok)
482 {
483 if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off())
484 pr_warn("Disabling lock debugging due to kernel taint\n");
485
486 set_bit(flag, &tainted_mask);
487
488 if (tainted_mask & panic_on_taint) {
489 panic_on_taint = 0;
490 panic("panic_on_taint set ...");
491 }
492 }
493 EXPORT_SYMBOL(add_taint);
494
spin_msec(int msecs)495 static void spin_msec(int msecs)
496 {
497 int i;
498
499 for (i = 0; i < msecs; i++) {
500 touch_nmi_watchdog();
501 mdelay(1);
502 }
503 }
504
505 /*
506 * It just happens that oops_enter() and oops_exit() are identically
507 * implemented...
508 */
do_oops_enter_exit(void)509 static void do_oops_enter_exit(void)
510 {
511 unsigned long flags;
512 static int spin_counter;
513
514 if (!pause_on_oops)
515 return;
516
517 spin_lock_irqsave(&pause_on_oops_lock, flags);
518 if (pause_on_oops_flag == 0) {
519 /* This CPU may now print the oops message */
520 pause_on_oops_flag = 1;
521 } else {
522 /* We need to stall this CPU */
523 if (!spin_counter) {
524 /* This CPU gets to do the counting */
525 spin_counter = pause_on_oops;
526 do {
527 spin_unlock(&pause_on_oops_lock);
528 spin_msec(MSEC_PER_SEC);
529 spin_lock(&pause_on_oops_lock);
530 } while (--spin_counter);
531 pause_on_oops_flag = 0;
532 } else {
533 /* This CPU waits for a different one */
534 while (spin_counter) {
535 spin_unlock(&pause_on_oops_lock);
536 spin_msec(1);
537 spin_lock(&pause_on_oops_lock);
538 }
539 }
540 }
541 spin_unlock_irqrestore(&pause_on_oops_lock, flags);
542 }
543
544 /*
545 * Return true if the calling CPU is allowed to print oops-related info.
546 * This is a bit racy..
547 */
oops_may_print(void)548 bool oops_may_print(void)
549 {
550 return pause_on_oops_flag == 0;
551 }
552
553 /*
554 * Called when the architecture enters its oops handler, before it prints
555 * anything. If this is the first CPU to oops, and it's oopsing the first
556 * time then let it proceed.
557 *
558 * This is all enabled by the pause_on_oops kernel boot option. We do all
559 * this to ensure that oopses don't scroll off the screen. It has the
560 * side-effect of preventing later-oopsing CPUs from mucking up the display,
561 * too.
562 *
563 * It turns out that the CPU which is allowed to print ends up pausing for
564 * the right duration, whereas all the other CPUs pause for twice as long:
565 * once in oops_enter(), once in oops_exit().
566 */
oops_enter(void)567 void oops_enter(void)
568 {
569 tracing_off();
570 /* can't trust the integrity of the kernel anymore: */
571 debug_locks_off();
572 do_oops_enter_exit();
573
574 if (sysctl_oops_all_cpu_backtrace)
575 trigger_all_cpu_backtrace();
576 }
577
print_oops_end_marker(void)578 static void print_oops_end_marker(void)
579 {
580 pr_warn("---[ end trace %016llx ]---\n", 0ULL);
581 }
582
583 /*
584 * Called when the architecture exits its oops handler, after printing
585 * everything.
586 */
oops_exit(void)587 void oops_exit(void)
588 {
589 do_oops_enter_exit();
590 print_oops_end_marker();
591 kmsg_dump(KMSG_DUMP_OOPS);
592 }
593
594 struct warn_args {
595 const char *fmt;
596 va_list args;
597 };
598
__warn(const char * file,int line,void * caller,unsigned taint,struct pt_regs * regs,struct warn_args * args)599 void __warn(const char *file, int line, void *caller, unsigned taint,
600 struct pt_regs *regs, struct warn_args *args)
601 {
602 disable_trace_on_warning();
603
604 if (file)
605 pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
606 raw_smp_processor_id(), current->pid, file, line,
607 caller);
608 else
609 pr_warn("WARNING: CPU: %d PID: %d at %pS\n",
610 raw_smp_processor_id(), current->pid, caller);
611
612 if (args)
613 vprintk(args->fmt, args->args);
614
615 print_modules();
616
617 if (regs)
618 show_regs(regs);
619
620 if (panic_on_warn)
621 panic("panic_on_warn set ...\n");
622
623 if (!regs)
624 dump_stack();
625
626 print_irqtrace_events(current);
627
628 print_oops_end_marker();
629 trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller);
630
631 /* Just a warning, don't kill lockdep. */
632 add_taint(taint, LOCKDEP_STILL_OK);
633 }
634
635 #ifndef __WARN_FLAGS
warn_slowpath_fmt(const char * file,int line,unsigned taint,const char * fmt,...)636 void warn_slowpath_fmt(const char *file, int line, unsigned taint,
637 const char *fmt, ...)
638 {
639 struct warn_args args;
640
641 pr_warn(CUT_HERE);
642
643 if (!fmt) {
644 __warn(file, line, __builtin_return_address(0), taint,
645 NULL, NULL);
646 return;
647 }
648
649 args.fmt = fmt;
650 va_start(args.args, fmt);
651 __warn(file, line, __builtin_return_address(0), taint, NULL, &args);
652 va_end(args.args);
653 }
654 EXPORT_SYMBOL(warn_slowpath_fmt);
655 #else
__warn_printk(const char * fmt,...)656 void __warn_printk(const char *fmt, ...)
657 {
658 va_list args;
659
660 pr_warn(CUT_HERE);
661
662 va_start(args, fmt);
663 vprintk(fmt, args);
664 va_end(args);
665 }
666 EXPORT_SYMBOL(__warn_printk);
667 #endif
668
669 #ifdef CONFIG_BUG
670
671 /* Support resetting WARN*_ONCE state */
672
clear_warn_once_set(void * data,u64 val)673 static int clear_warn_once_set(void *data, u64 val)
674 {
675 generic_bug_clear_once();
676 memset(__start_once, 0, __end_once - __start_once);
677 return 0;
678 }
679
680 DEFINE_DEBUGFS_ATTRIBUTE(clear_warn_once_fops, NULL, clear_warn_once_set,
681 "%lld\n");
682
register_warn_debugfs(void)683 static __init int register_warn_debugfs(void)
684 {
685 /* Don't care about failure */
686 debugfs_create_file_unsafe("clear_warn_once", 0200, NULL, NULL,
687 &clear_warn_once_fops);
688 return 0;
689 }
690
691 device_initcall(register_warn_debugfs);
692 #endif
693
694 #ifdef CONFIG_STACKPROTECTOR
695
696 /*
697 * Called when gcc's -fstack-protector feature is used, and
698 * gcc detects corruption of the on-stack canary value
699 */
__stack_chk_fail(void)700 __visible noinstr void __stack_chk_fail(void)
701 {
702 instrumentation_begin();
703 panic("stack-protector: Kernel stack is corrupted in: %pB",
704 __builtin_return_address(0));
705 instrumentation_end();
706 }
707 EXPORT_SYMBOL(__stack_chk_fail);
708
709 #endif
710
711 core_param(panic, panic_timeout, int, 0644);
712 core_param(panic_print, panic_print, ulong, 0644);
713 core_param(pause_on_oops, pause_on_oops, int, 0644);
714 core_param(panic_on_warn, panic_on_warn, int, 0644);
715 core_param(crash_kexec_post_notifiers, crash_kexec_post_notifiers, bool, 0644);
716
oops_setup(char * s)717 static int __init oops_setup(char *s)
718 {
719 if (!s)
720 return -EINVAL;
721 if (!strcmp(s, "panic"))
722 panic_on_oops = 1;
723 return 0;
724 }
725 early_param("oops", oops_setup);
726
panic_on_taint_setup(char * s)727 static int __init panic_on_taint_setup(char *s)
728 {
729 char *taint_str;
730
731 if (!s)
732 return -EINVAL;
733
734 taint_str = strsep(&s, ",");
735 if (kstrtoul(taint_str, 16, &panic_on_taint))
736 return -EINVAL;
737
738 /* make sure panic_on_taint doesn't hold out-of-range TAINT flags */
739 panic_on_taint &= TAINT_FLAGS_MAX;
740
741 if (!panic_on_taint)
742 return -EINVAL;
743
744 if (s && !strcmp(s, "nousertaint"))
745 panic_on_taint_nousertaint = true;
746
747 pr_info("panic_on_taint: bitmask=0x%lx nousertaint_mode=%sabled\n",
748 panic_on_taint, panic_on_taint_nousertaint ? "en" : "dis");
749
750 return 0;
751 }
752 early_param("panic_on_taint", panic_on_taint_setup);
753