1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Detect hard and soft lockups on a system
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13 #define pr_fmt(fmt) "watchdog: " fmt
14
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/tick.h>
22 #include <linux/sched/clock.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/isolation.h>
25 #include <linux/stop_machine.h>
26
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
29
30 static DEFINE_MUTEX(watchdog_mutex);
31
32 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
33 # define WATCHDOG_HARDLOCKUP_DEFAULT 1
34 #else
35 # define WATCHDOG_HARDLOCKUP_DEFAULT 0
36 #endif
37
38 unsigned long __read_mostly watchdog_enabled;
39 int __read_mostly watchdog_user_enabled = 1;
40 static int __read_mostly watchdog_hardlockup_user_enabled = WATCHDOG_HARDLOCKUP_DEFAULT;
41 static int __read_mostly watchdog_softlockup_user_enabled = 1;
42 int __read_mostly watchdog_thresh = 10;
43 static int __read_mostly watchdog_hardlockup_available;
44
45 struct cpumask watchdog_cpumask __read_mostly;
46 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
47
48 #ifdef CONFIG_HARDLOCKUP_DETECTOR
49
50 # ifdef CONFIG_SMP
51 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
52 # endif /* CONFIG_SMP */
53
54 /*
55 * Should we panic when a soft-lockup or hard-lockup occurs:
56 */
57 unsigned int __read_mostly hardlockup_panic =
58 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
59 /*
60 * We may not want to enable hard lockup detection by default in all cases,
61 * for example when running the kernel as a guest on a hypervisor. In these
62 * cases this function can be called to disable hard lockup detection. This
63 * function should only be executed once by the boot processor before the
64 * kernel command line parameters are parsed, because otherwise it is not
65 * possible to override this in hardlockup_panic_setup().
66 */
hardlockup_detector_disable(void)67 void __init hardlockup_detector_disable(void)
68 {
69 watchdog_hardlockup_user_enabled = 0;
70 }
71
hardlockup_panic_setup(char * str)72 static int __init hardlockup_panic_setup(char *str)
73 {
74 if (!strncmp(str, "panic", 5))
75 hardlockup_panic = 1;
76 else if (!strncmp(str, "nopanic", 7))
77 hardlockup_panic = 0;
78 else if (!strncmp(str, "0", 1))
79 watchdog_hardlockup_user_enabled = 0;
80 else if (!strncmp(str, "1", 1))
81 watchdog_hardlockup_user_enabled = 1;
82 return 1;
83 }
84 __setup("nmi_watchdog=", hardlockup_panic_setup);
85
86 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
87
88 #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
89
90 static DEFINE_PER_CPU(atomic_t, hrtimer_interrupts);
91 static DEFINE_PER_CPU(int, hrtimer_interrupts_saved);
92 static DEFINE_PER_CPU(bool, watchdog_hardlockup_warned);
93 static DEFINE_PER_CPU(bool, watchdog_hardlockup_touched);
94 static unsigned long watchdog_hardlockup_all_cpu_dumped;
95
arch_touch_nmi_watchdog(void)96 notrace void arch_touch_nmi_watchdog(void)
97 {
98 /*
99 * Using __raw here because some code paths have
100 * preemption enabled. If preemption is enabled
101 * then interrupts should be enabled too, in which
102 * case we shouldn't have to worry about the watchdog
103 * going off.
104 */
105 raw_cpu_write(watchdog_hardlockup_touched, true);
106 }
107 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
108
watchdog_hardlockup_touch_cpu(unsigned int cpu)109 void watchdog_hardlockup_touch_cpu(unsigned int cpu)
110 {
111 per_cpu(watchdog_hardlockup_touched, cpu) = true;
112 }
113
is_hardlockup(unsigned int cpu)114 static bool is_hardlockup(unsigned int cpu)
115 {
116 int hrint = atomic_read(&per_cpu(hrtimer_interrupts, cpu));
117
118 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
119 return true;
120
121 /*
122 * NOTE: we don't need any fancy atomic_t or READ_ONCE/WRITE_ONCE
123 * for hrtimer_interrupts_saved. hrtimer_interrupts_saved is
124 * written/read by a single CPU.
125 */
126 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
127
128 return false;
129 }
130
watchdog_hardlockup_kick(void)131 static void watchdog_hardlockup_kick(void)
132 {
133 int new_interrupts;
134
135 new_interrupts = atomic_inc_return(this_cpu_ptr(&hrtimer_interrupts));
136 watchdog_buddy_check_hardlockup(new_interrupts);
137 }
138
watchdog_hardlockup_check(unsigned int cpu,struct pt_regs * regs)139 void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
140 {
141 if (per_cpu(watchdog_hardlockup_touched, cpu)) {
142 per_cpu(watchdog_hardlockup_touched, cpu) = false;
143 return;
144 }
145
146 /*
147 * Check for a hardlockup by making sure the CPU's timer
148 * interrupt is incrementing. The timer interrupt should have
149 * fired multiple times before we overflow'd. If it hasn't
150 * then this is a good indication the cpu is stuck
151 */
152 if (is_hardlockup(cpu)) {
153 unsigned int this_cpu = smp_processor_id();
154
155 /* Only print hardlockups once. */
156 if (per_cpu(watchdog_hardlockup_warned, cpu))
157 return;
158
159 pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
160 print_modules();
161 print_irqtrace_events(current);
162 if (cpu == this_cpu) {
163 if (regs)
164 show_regs(regs);
165 else
166 dump_stack();
167 } else {
168 trigger_single_cpu_backtrace(cpu);
169 }
170
171 /*
172 * Perform multi-CPU dump only once to avoid multiple
173 * hardlockups generating interleaving traces
174 */
175 if (sysctl_hardlockup_all_cpu_backtrace &&
176 !test_and_set_bit(0, &watchdog_hardlockup_all_cpu_dumped))
177 trigger_allbutcpu_cpu_backtrace(cpu);
178
179 if (hardlockup_panic)
180 nmi_panic(regs, "Hard LOCKUP");
181
182 per_cpu(watchdog_hardlockup_warned, cpu) = true;
183 } else {
184 per_cpu(watchdog_hardlockup_warned, cpu) = false;
185 }
186 }
187
188 #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
189
watchdog_hardlockup_kick(void)190 static inline void watchdog_hardlockup_kick(void) { }
191
192 #endif /* !CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */
193
194 /*
195 * These functions can be overridden based on the configured hardlockdup detector.
196 *
197 * watchdog_hardlockup_enable/disable can be implemented to start and stop when
198 * softlockup watchdog start and stop. The detector must select the
199 * SOFTLOCKUP_DETECTOR Kconfig.
200 */
watchdog_hardlockup_enable(unsigned int cpu)201 void __weak watchdog_hardlockup_enable(unsigned int cpu) { }
202
watchdog_hardlockup_disable(unsigned int cpu)203 void __weak watchdog_hardlockup_disable(unsigned int cpu) { }
204
205 /*
206 * Watchdog-detector specific API.
207 *
208 * Return 0 when hardlockup watchdog is available, negative value otherwise.
209 * Note that the negative value means that a delayed probe might
210 * succeed later.
211 */
watchdog_hardlockup_probe(void)212 int __weak __init watchdog_hardlockup_probe(void)
213 {
214 return -ENODEV;
215 }
216
217 /**
218 * watchdog_hardlockup_stop - Stop the watchdog for reconfiguration
219 *
220 * The reconfiguration steps are:
221 * watchdog_hardlockup_stop();
222 * update_variables();
223 * watchdog_hardlockup_start();
224 */
watchdog_hardlockup_stop(void)225 void __weak watchdog_hardlockup_stop(void) { }
226
227 /**
228 * watchdog_hardlockup_start - Start the watchdog after reconfiguration
229 *
230 * Counterpart to watchdog_hardlockup_stop().
231 *
232 * The following variables have been updated in update_variables() and
233 * contain the currently valid configuration:
234 * - watchdog_enabled
235 * - watchdog_thresh
236 * - watchdog_cpumask
237 */
watchdog_hardlockup_start(void)238 void __weak watchdog_hardlockup_start(void) { }
239
240 /**
241 * lockup_detector_update_enable - Update the sysctl enable bit
242 *
243 * Caller needs to make sure that the hard watchdogs are off, so this
244 * can't race with watchdog_hardlockup_disable().
245 */
lockup_detector_update_enable(void)246 static void lockup_detector_update_enable(void)
247 {
248 watchdog_enabled = 0;
249 if (!watchdog_user_enabled)
250 return;
251 if (watchdog_hardlockup_available && watchdog_hardlockup_user_enabled)
252 watchdog_enabled |= WATCHDOG_HARDLOCKUP_ENABLED;
253 if (watchdog_softlockup_user_enabled)
254 watchdog_enabled |= WATCHDOG_SOFTOCKUP_ENABLED;
255 }
256
257 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
258
259 /*
260 * Delay the soflockup report when running a known slow code.
261 * It does _not_ affect the timestamp of the last successdul reschedule.
262 */
263 #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
264
265 #ifdef CONFIG_SMP
266 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
267 #endif
268
269 static struct cpumask watchdog_allowed_mask __read_mostly;
270
271 /* Global variables, exported for sysctl */
272 unsigned int __read_mostly softlockup_panic =
273 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
274
275 static bool softlockup_initialized __read_mostly;
276 static u64 __read_mostly sample_period;
277
278 /* Timestamp taken after the last successful reschedule. */
279 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
280 /* Timestamp of the last softlockup report. */
281 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
282 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
283 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
284 static unsigned long soft_lockup_nmi_warn;
285
nowatchdog_setup(char * str)286 static int __init nowatchdog_setup(char *str)
287 {
288 watchdog_user_enabled = 0;
289 return 1;
290 }
291 __setup("nowatchdog", nowatchdog_setup);
292
nosoftlockup_setup(char * str)293 static int __init nosoftlockup_setup(char *str)
294 {
295 watchdog_softlockup_user_enabled = 0;
296 return 1;
297 }
298 __setup("nosoftlockup", nosoftlockup_setup);
299
watchdog_thresh_setup(char * str)300 static int __init watchdog_thresh_setup(char *str)
301 {
302 get_option(&str, &watchdog_thresh);
303 return 1;
304 }
305 __setup("watchdog_thresh=", watchdog_thresh_setup);
306
307 static void __lockup_detector_cleanup(void);
308
309 /*
310 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
311 * lockups can have false positives under extreme conditions. So we generally
312 * want a higher threshold for soft lockups than for hard lockups. So we couple
313 * the thresholds with a factor: we make the soft threshold twice the amount of
314 * time the hard threshold is.
315 */
get_softlockup_thresh(void)316 static int get_softlockup_thresh(void)
317 {
318 return watchdog_thresh * 2;
319 }
320
321 /*
322 * Returns seconds, approximately. We don't need nanosecond
323 * resolution, and we don't need to waste time with a big divide when
324 * 2^30ns == 1.074s.
325 */
get_timestamp(void)326 static unsigned long get_timestamp(void)
327 {
328 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
329 }
330
set_sample_period(void)331 static void set_sample_period(void)
332 {
333 /*
334 * convert watchdog_thresh from seconds to ns
335 * the divide by 5 is to give hrtimer several chances (two
336 * or three with the current relation between the soft
337 * and hard thresholds) to increment before the
338 * hardlockup detector generates a warning
339 */
340 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
341 watchdog_update_hrtimer_threshold(sample_period);
342 }
343
update_report_ts(void)344 static void update_report_ts(void)
345 {
346 __this_cpu_write(watchdog_report_ts, get_timestamp());
347 }
348
349 /* Commands for resetting the watchdog */
update_touch_ts(void)350 static void update_touch_ts(void)
351 {
352 __this_cpu_write(watchdog_touch_ts, get_timestamp());
353 update_report_ts();
354 }
355
356 /**
357 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
358 *
359 * Call when the scheduler may have stalled for legitimate reasons
360 * preventing the watchdog task from executing - e.g. the scheduler
361 * entering idle state. This should only be used for scheduler events.
362 * Use touch_softlockup_watchdog() for everything else.
363 */
touch_softlockup_watchdog_sched(void)364 notrace void touch_softlockup_watchdog_sched(void)
365 {
366 /*
367 * Preemption can be enabled. It doesn't matter which CPU's watchdog
368 * report period gets restarted here, so use the raw_ operation.
369 */
370 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
371 }
372
touch_softlockup_watchdog(void)373 notrace void touch_softlockup_watchdog(void)
374 {
375 touch_softlockup_watchdog_sched();
376 wq_watchdog_touch(raw_smp_processor_id());
377 }
378 EXPORT_SYMBOL(touch_softlockup_watchdog);
379
touch_all_softlockup_watchdogs(void)380 void touch_all_softlockup_watchdogs(void)
381 {
382 int cpu;
383
384 /*
385 * watchdog_mutex cannpt be taken here, as this might be called
386 * from (soft)interrupt context, so the access to
387 * watchdog_allowed_cpumask might race with a concurrent update.
388 *
389 * The watchdog time stamp can race against a concurrent real
390 * update as well, the only side effect might be a cycle delay for
391 * the softlockup check.
392 */
393 for_each_cpu(cpu, &watchdog_allowed_mask) {
394 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
395 wq_watchdog_touch(cpu);
396 }
397 }
398
touch_softlockup_watchdog_sync(void)399 void touch_softlockup_watchdog_sync(void)
400 {
401 __this_cpu_write(softlockup_touch_sync, true);
402 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
403 }
404
is_softlockup(unsigned long touch_ts,unsigned long period_ts,unsigned long now)405 static int is_softlockup(unsigned long touch_ts,
406 unsigned long period_ts,
407 unsigned long now)
408 {
409 if ((watchdog_enabled & WATCHDOG_SOFTOCKUP_ENABLED) && watchdog_thresh) {
410 /* Warn about unreasonable delays. */
411 if (time_after(now, period_ts + get_softlockup_thresh()))
412 return now - touch_ts;
413 }
414 return 0;
415 }
416
417 /* watchdog detector functions */
418 static DEFINE_PER_CPU(struct completion, softlockup_completion);
419 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
420
421 /*
422 * The watchdog feed function - touches the timestamp.
423 *
424 * It only runs once every sample_period seconds (4 seconds by
425 * default) to reset the softlockup timestamp. If this gets delayed
426 * for more than 2*watchdog_thresh seconds then the debug-printout
427 * triggers in watchdog_timer_fn().
428 */
softlockup_fn(void * data)429 static int softlockup_fn(void *data)
430 {
431 update_touch_ts();
432 complete(this_cpu_ptr(&softlockup_completion));
433
434 return 0;
435 }
436
437 /* watchdog kicker functions */
watchdog_timer_fn(struct hrtimer * hrtimer)438 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
439 {
440 unsigned long touch_ts, period_ts, now;
441 struct pt_regs *regs = get_irq_regs();
442 int duration;
443 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
444
445 if (!watchdog_enabled)
446 return HRTIMER_NORESTART;
447
448 watchdog_hardlockup_kick();
449
450 /* kick the softlockup detector */
451 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
452 reinit_completion(this_cpu_ptr(&softlockup_completion));
453 stop_one_cpu_nowait(smp_processor_id(),
454 softlockup_fn, NULL,
455 this_cpu_ptr(&softlockup_stop_work));
456 }
457
458 /* .. and repeat */
459 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
460
461 /*
462 * Read the current timestamp first. It might become invalid anytime
463 * when a virtual machine is stopped by the host or when the watchog
464 * is touched from NMI.
465 */
466 now = get_timestamp();
467 /*
468 * If a virtual machine is stopped by the host it can look to
469 * the watchdog like a soft lockup. This function touches the watchdog.
470 */
471 kvm_check_and_clear_guest_paused();
472 /*
473 * The stored timestamp is comparable with @now only when not touched.
474 * It might get touched anytime from NMI. Make sure that is_softlockup()
475 * uses the same (valid) value.
476 */
477 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
478
479 /* Reset the interval when touched by known problematic code. */
480 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
481 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
482 /*
483 * If the time stamp was touched atomically
484 * make sure the scheduler tick is up to date.
485 */
486 __this_cpu_write(softlockup_touch_sync, false);
487 sched_clock_tick();
488 }
489
490 update_report_ts();
491 return HRTIMER_RESTART;
492 }
493
494 /* Check for a softlockup. */
495 touch_ts = __this_cpu_read(watchdog_touch_ts);
496 duration = is_softlockup(touch_ts, period_ts, now);
497 if (unlikely(duration)) {
498 /*
499 * Prevent multiple soft-lockup reports if one cpu is already
500 * engaged in dumping all cpu back traces.
501 */
502 if (softlockup_all_cpu_backtrace) {
503 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
504 return HRTIMER_RESTART;
505 }
506
507 /* Start period for the next softlockup warning. */
508 update_report_ts();
509
510 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
511 smp_processor_id(), duration,
512 current->comm, task_pid_nr(current));
513 print_modules();
514 print_irqtrace_events(current);
515 if (regs)
516 show_regs(regs);
517 else
518 dump_stack();
519
520 if (softlockup_all_cpu_backtrace) {
521 trigger_allbutcpu_cpu_backtrace(smp_processor_id());
522 clear_bit_unlock(0, &soft_lockup_nmi_warn);
523 }
524
525 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
526 if (softlockup_panic)
527 panic("softlockup: hung tasks");
528 }
529
530 return HRTIMER_RESTART;
531 }
532
watchdog_enable(unsigned int cpu)533 static void watchdog_enable(unsigned int cpu)
534 {
535 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
536 struct completion *done = this_cpu_ptr(&softlockup_completion);
537
538 WARN_ON_ONCE(cpu != smp_processor_id());
539
540 init_completion(done);
541 complete(done);
542
543 /*
544 * Start the timer first to prevent the hardlockup watchdog triggering
545 * before the timer has a chance to fire.
546 */
547 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
548 hrtimer->function = watchdog_timer_fn;
549 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
550 HRTIMER_MODE_REL_PINNED_HARD);
551
552 /* Initialize timestamp */
553 update_touch_ts();
554 /* Enable the hardlockup detector */
555 if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED)
556 watchdog_hardlockup_enable(cpu);
557 }
558
watchdog_disable(unsigned int cpu)559 static void watchdog_disable(unsigned int cpu)
560 {
561 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
562
563 WARN_ON_ONCE(cpu != smp_processor_id());
564
565 /*
566 * Disable the hardlockup detector first. That prevents that a large
567 * delay between disabling the timer and disabling the hardlockup
568 * detector causes a false positive.
569 */
570 watchdog_hardlockup_disable(cpu);
571 hrtimer_cancel(hrtimer);
572 wait_for_completion(this_cpu_ptr(&softlockup_completion));
573 }
574
softlockup_stop_fn(void * data)575 static int softlockup_stop_fn(void *data)
576 {
577 watchdog_disable(smp_processor_id());
578 return 0;
579 }
580
softlockup_stop_all(void)581 static void softlockup_stop_all(void)
582 {
583 int cpu;
584
585 if (!softlockup_initialized)
586 return;
587
588 for_each_cpu(cpu, &watchdog_allowed_mask)
589 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
590
591 cpumask_clear(&watchdog_allowed_mask);
592 }
593
softlockup_start_fn(void * data)594 static int softlockup_start_fn(void *data)
595 {
596 watchdog_enable(smp_processor_id());
597 return 0;
598 }
599
softlockup_start_all(void)600 static void softlockup_start_all(void)
601 {
602 int cpu;
603
604 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
605 for_each_cpu(cpu, &watchdog_allowed_mask)
606 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
607 }
608
lockup_detector_online_cpu(unsigned int cpu)609 int lockup_detector_online_cpu(unsigned int cpu)
610 {
611 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
612 watchdog_enable(cpu);
613 return 0;
614 }
615
lockup_detector_offline_cpu(unsigned int cpu)616 int lockup_detector_offline_cpu(unsigned int cpu)
617 {
618 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
619 watchdog_disable(cpu);
620 return 0;
621 }
622
__lockup_detector_reconfigure(void)623 static void __lockup_detector_reconfigure(void)
624 {
625 cpus_read_lock();
626 watchdog_hardlockup_stop();
627
628 softlockup_stop_all();
629 set_sample_period();
630 lockup_detector_update_enable();
631 if (watchdog_enabled && watchdog_thresh)
632 softlockup_start_all();
633
634 watchdog_hardlockup_start();
635 cpus_read_unlock();
636 /*
637 * Must be called outside the cpus locked section to prevent
638 * recursive locking in the perf code.
639 */
640 __lockup_detector_cleanup();
641 }
642
lockup_detector_reconfigure(void)643 void lockup_detector_reconfigure(void)
644 {
645 mutex_lock(&watchdog_mutex);
646 __lockup_detector_reconfigure();
647 mutex_unlock(&watchdog_mutex);
648 }
649
650 /*
651 * Create the watchdog infrastructure and configure the detector(s).
652 */
lockup_detector_setup(void)653 static __init void lockup_detector_setup(void)
654 {
655 /*
656 * If sysctl is off and watchdog got disabled on the command line,
657 * nothing to do here.
658 */
659 lockup_detector_update_enable();
660
661 if (!IS_ENABLED(CONFIG_SYSCTL) &&
662 !(watchdog_enabled && watchdog_thresh))
663 return;
664
665 mutex_lock(&watchdog_mutex);
666 __lockup_detector_reconfigure();
667 softlockup_initialized = true;
668 mutex_unlock(&watchdog_mutex);
669 }
670
671 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
__lockup_detector_reconfigure(void)672 static void __lockup_detector_reconfigure(void)
673 {
674 cpus_read_lock();
675 watchdog_hardlockup_stop();
676 lockup_detector_update_enable();
677 watchdog_hardlockup_start();
678 cpus_read_unlock();
679 }
lockup_detector_reconfigure(void)680 void lockup_detector_reconfigure(void)
681 {
682 __lockup_detector_reconfigure();
683 }
lockup_detector_setup(void)684 static inline void lockup_detector_setup(void)
685 {
686 __lockup_detector_reconfigure();
687 }
688 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
689
__lockup_detector_cleanup(void)690 static void __lockup_detector_cleanup(void)
691 {
692 lockdep_assert_held(&watchdog_mutex);
693 hardlockup_detector_perf_cleanup();
694 }
695
696 /**
697 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
698 *
699 * Caller must not hold the cpu hotplug rwsem.
700 */
lockup_detector_cleanup(void)701 void lockup_detector_cleanup(void)
702 {
703 mutex_lock(&watchdog_mutex);
704 __lockup_detector_cleanup();
705 mutex_unlock(&watchdog_mutex);
706 }
707
708 /**
709 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
710 *
711 * Special interface for parisc. It prevents lockup detector warnings from
712 * the default pm_poweroff() function which busy loops forever.
713 */
lockup_detector_soft_poweroff(void)714 void lockup_detector_soft_poweroff(void)
715 {
716 watchdog_enabled = 0;
717 }
718
719 #ifdef CONFIG_SYSCTL
720
721 /* Propagate any changes to the watchdog infrastructure */
proc_watchdog_update(void)722 static void proc_watchdog_update(void)
723 {
724 /* Remove impossible cpus to keep sysctl output clean. */
725 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
726 __lockup_detector_reconfigure();
727 }
728
729 /*
730 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
731 *
732 * caller | table->data points to | 'which'
733 * -------------------|----------------------------------|-------------------------------
734 * proc_watchdog | watchdog_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED |
735 * | | WATCHDOG_SOFTOCKUP_ENABLED
736 * -------------------|----------------------------------|-------------------------------
737 * proc_nmi_watchdog | watchdog_hardlockup_user_enabled | WATCHDOG_HARDLOCKUP_ENABLED
738 * -------------------|----------------------------------|-------------------------------
739 * proc_soft_watchdog | watchdog_softlockup_user_enabled | WATCHDOG_SOFTOCKUP_ENABLED
740 */
proc_watchdog_common(int which,struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)741 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
742 void *buffer, size_t *lenp, loff_t *ppos)
743 {
744 int err, old, *param = table->data;
745
746 mutex_lock(&watchdog_mutex);
747
748 if (!write) {
749 /*
750 * On read synchronize the userspace interface. This is a
751 * racy snapshot.
752 */
753 *param = (watchdog_enabled & which) != 0;
754 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
755 } else {
756 old = READ_ONCE(*param);
757 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
758 if (!err && old != READ_ONCE(*param))
759 proc_watchdog_update();
760 }
761 mutex_unlock(&watchdog_mutex);
762 return err;
763 }
764
765 /*
766 * /proc/sys/kernel/watchdog
767 */
proc_watchdog(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)768 int proc_watchdog(struct ctl_table *table, int write,
769 void *buffer, size_t *lenp, loff_t *ppos)
770 {
771 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED |
772 WATCHDOG_SOFTOCKUP_ENABLED,
773 table, write, buffer, lenp, ppos);
774 }
775
776 /*
777 * /proc/sys/kernel/nmi_watchdog
778 */
proc_nmi_watchdog(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)779 int proc_nmi_watchdog(struct ctl_table *table, int write,
780 void *buffer, size_t *lenp, loff_t *ppos)
781 {
782 if (!watchdog_hardlockup_available && write)
783 return -ENOTSUPP;
784 return proc_watchdog_common(WATCHDOG_HARDLOCKUP_ENABLED,
785 table, write, buffer, lenp, ppos);
786 }
787
788 /*
789 * /proc/sys/kernel/soft_watchdog
790 */
proc_soft_watchdog(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)791 int proc_soft_watchdog(struct ctl_table *table, int write,
792 void *buffer, size_t *lenp, loff_t *ppos)
793 {
794 return proc_watchdog_common(WATCHDOG_SOFTOCKUP_ENABLED,
795 table, write, buffer, lenp, ppos);
796 }
797
798 /*
799 * /proc/sys/kernel/watchdog_thresh
800 */
proc_watchdog_thresh(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)801 int proc_watchdog_thresh(struct ctl_table *table, int write,
802 void *buffer, size_t *lenp, loff_t *ppos)
803 {
804 int err, old;
805
806 mutex_lock(&watchdog_mutex);
807
808 old = READ_ONCE(watchdog_thresh);
809 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
810
811 if (!err && write && old != READ_ONCE(watchdog_thresh))
812 proc_watchdog_update();
813
814 mutex_unlock(&watchdog_mutex);
815 return err;
816 }
817
818 /*
819 * The cpumask is the mask of possible cpus that the watchdog can run
820 * on, not the mask of cpus it is actually running on. This allows the
821 * user to specify a mask that will include cpus that have not yet
822 * been brought online, if desired.
823 */
proc_watchdog_cpumask(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)824 int proc_watchdog_cpumask(struct ctl_table *table, int write,
825 void *buffer, size_t *lenp, loff_t *ppos)
826 {
827 int err;
828
829 mutex_lock(&watchdog_mutex);
830
831 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
832 if (!err && write)
833 proc_watchdog_update();
834
835 mutex_unlock(&watchdog_mutex);
836 return err;
837 }
838
839 static const int sixty = 60;
840
841 static struct ctl_table watchdog_sysctls[] = {
842 {
843 .procname = "watchdog",
844 .data = &watchdog_user_enabled,
845 .maxlen = sizeof(int),
846 .mode = 0644,
847 .proc_handler = proc_watchdog,
848 .extra1 = SYSCTL_ZERO,
849 .extra2 = SYSCTL_ONE,
850 },
851 {
852 .procname = "watchdog_thresh",
853 .data = &watchdog_thresh,
854 .maxlen = sizeof(int),
855 .mode = 0644,
856 .proc_handler = proc_watchdog_thresh,
857 .extra1 = SYSCTL_ZERO,
858 .extra2 = (void *)&sixty,
859 },
860 {
861 .procname = "watchdog_cpumask",
862 .data = &watchdog_cpumask_bits,
863 .maxlen = NR_CPUS,
864 .mode = 0644,
865 .proc_handler = proc_watchdog_cpumask,
866 },
867 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
868 {
869 .procname = "soft_watchdog",
870 .data = &watchdog_softlockup_user_enabled,
871 .maxlen = sizeof(int),
872 .mode = 0644,
873 .proc_handler = proc_soft_watchdog,
874 .extra1 = SYSCTL_ZERO,
875 .extra2 = SYSCTL_ONE,
876 },
877 {
878 .procname = "softlockup_panic",
879 .data = &softlockup_panic,
880 .maxlen = sizeof(int),
881 .mode = 0644,
882 .proc_handler = proc_dointvec_minmax,
883 .extra1 = SYSCTL_ZERO,
884 .extra2 = SYSCTL_ONE,
885 },
886 #ifdef CONFIG_SMP
887 {
888 .procname = "softlockup_all_cpu_backtrace",
889 .data = &sysctl_softlockup_all_cpu_backtrace,
890 .maxlen = sizeof(int),
891 .mode = 0644,
892 .proc_handler = proc_dointvec_minmax,
893 .extra1 = SYSCTL_ZERO,
894 .extra2 = SYSCTL_ONE,
895 },
896 #endif /* CONFIG_SMP */
897 #endif
898 #ifdef CONFIG_HARDLOCKUP_DETECTOR
899 {
900 .procname = "hardlockup_panic",
901 .data = &hardlockup_panic,
902 .maxlen = sizeof(int),
903 .mode = 0644,
904 .proc_handler = proc_dointvec_minmax,
905 .extra1 = SYSCTL_ZERO,
906 .extra2 = SYSCTL_ONE,
907 },
908 #ifdef CONFIG_SMP
909 {
910 .procname = "hardlockup_all_cpu_backtrace",
911 .data = &sysctl_hardlockup_all_cpu_backtrace,
912 .maxlen = sizeof(int),
913 .mode = 0644,
914 .proc_handler = proc_dointvec_minmax,
915 .extra1 = SYSCTL_ZERO,
916 .extra2 = SYSCTL_ONE,
917 },
918 #endif /* CONFIG_SMP */
919 #endif
920 {}
921 };
922
923 static struct ctl_table watchdog_hardlockup_sysctl[] = {
924 {
925 .procname = "nmi_watchdog",
926 .data = &watchdog_hardlockup_user_enabled,
927 .maxlen = sizeof(int),
928 .mode = 0444,
929 .proc_handler = proc_nmi_watchdog,
930 .extra1 = SYSCTL_ZERO,
931 .extra2 = SYSCTL_ONE,
932 },
933 {}
934 };
935
watchdog_sysctl_init(void)936 static void __init watchdog_sysctl_init(void)
937 {
938 register_sysctl_init("kernel", watchdog_sysctls);
939
940 if (watchdog_hardlockup_available)
941 watchdog_hardlockup_sysctl[0].mode = 0644;
942 register_sysctl_init("kernel", watchdog_hardlockup_sysctl);
943 }
944
945 #else
946 #define watchdog_sysctl_init() do { } while (0)
947 #endif /* CONFIG_SYSCTL */
948
949 static void __init lockup_detector_delay_init(struct work_struct *work);
950 static bool allow_lockup_detector_init_retry __initdata;
951
952 static struct work_struct detector_work __initdata =
953 __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
954
lockup_detector_delay_init(struct work_struct * work)955 static void __init lockup_detector_delay_init(struct work_struct *work)
956 {
957 int ret;
958
959 ret = watchdog_hardlockup_probe();
960 if (ret) {
961 pr_info("Delayed init of the lockup detector failed: %d\n", ret);
962 pr_info("Hard watchdog permanently disabled\n");
963 return;
964 }
965
966 allow_lockup_detector_init_retry = false;
967
968 watchdog_hardlockup_available = true;
969 lockup_detector_setup();
970 }
971
972 /*
973 * lockup_detector_retry_init - retry init lockup detector if possible.
974 *
975 * Retry hardlockup detector init. It is useful when it requires some
976 * functionality that has to be initialized later on a particular
977 * platform.
978 */
lockup_detector_retry_init(void)979 void __init lockup_detector_retry_init(void)
980 {
981 /* Must be called before late init calls */
982 if (!allow_lockup_detector_init_retry)
983 return;
984
985 schedule_work(&detector_work);
986 }
987
988 /*
989 * Ensure that optional delayed hardlockup init is proceed before
990 * the init code and memory is freed.
991 */
lockup_detector_check(void)992 static int __init lockup_detector_check(void)
993 {
994 /* Prevent any later retry. */
995 allow_lockup_detector_init_retry = false;
996
997 /* Make sure no work is pending. */
998 flush_work(&detector_work);
999
1000 watchdog_sysctl_init();
1001
1002 return 0;
1003
1004 }
1005 late_initcall_sync(lockup_detector_check);
1006
lockup_detector_init(void)1007 void __init lockup_detector_init(void)
1008 {
1009 if (tick_nohz_full_enabled())
1010 pr_info("Disabling watchdog on nohz_full cores by default\n");
1011
1012 cpumask_copy(&watchdog_cpumask,
1013 housekeeping_cpumask(HK_TYPE_TIMER));
1014
1015 if (!watchdog_hardlockup_probe())
1016 watchdog_hardlockup_available = true;
1017 else
1018 allow_lockup_detector_init_retry = true;
1019
1020 lockup_detector_setup();
1021 }
1022