1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard and soft lockups on a system
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
13 #define pr_fmt(fmt) "watchdog: " fmt
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/smpboot.h>
22 #include <linux/sched/rt.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/debug.h>
29 #include <asm/irq_regs.h>
30 #include <linux/kvm_para.h>
31 #include <linux/kthread.h>
33 static DEFINE_MUTEX(watchdog_mutex
);
35 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) \
36 || defined(CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU)
37 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
38 # define NMI_WATCHDOG_DEFAULT 1
40 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
41 # define NMI_WATCHDOG_DEFAULT 0
44 unsigned long __read_mostly watchdog_enabled
;
45 int __read_mostly watchdog_user_enabled
= 1;
46 int __read_mostly nmi_watchdog_user_enabled
= NMI_WATCHDOG_DEFAULT
;
47 int __read_mostly soft_watchdog_user_enabled
= 1;
48 int __read_mostly watchdog_thresh
= 10;
49 int __read_mostly nmi_watchdog_available
;
50 #if defined(CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU)
51 int __read_mostly watchdog_other_cpu_available
= WATCHDOG_DEFAULT
;
54 struct cpumask watchdog_allowed_mask __read_mostly
;
56 struct cpumask watchdog_cpumask __read_mostly
;
57 unsigned long *watchdog_cpumask_bits
= cpumask_bits(&watchdog_cpumask
);
59 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU)
61 * Should we panic when a soft-lockup or hard-lockup occurs:
63 unsigned int __read_mostly hardlockup_panic
=
64 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
66 * We may not want to enable hard lockup detection by default in all cases,
67 * for example when running the kernel as a guest on a hypervisor. In these
68 * cases this function can be called to disable hard lockup detection. This
69 * function should only be executed once by the boot processor before the
70 * kernel command line parameters are parsed, because otherwise it is not
71 * possible to override this in hardlockup_panic_setup().
73 void __init
hardlockup_detector_disable(void)
75 nmi_watchdog_user_enabled
= 0;
78 static int __init
hardlockup_panic_setup(char *str
)
80 if (!strncmp(str
, "panic", 5))
82 else if (!strncmp(str
, "nopanic", 7))
84 else if (!strncmp(str
, "0", 1))
85 nmi_watchdog_user_enabled
= 0;
86 else if (!strncmp(str
, "1", 1))
87 nmi_watchdog_user_enabled
= 1;
90 __setup("nmi_watchdog=", hardlockup_panic_setup
);
93 int __read_mostly sysctl_hardlockup_all_cpu_backtrace
;
95 static int __init
hardlockup_all_cpu_backtrace_setup(char *str
)
97 sysctl_hardlockup_all_cpu_backtrace
= !!simple_strtol(str
, NULL
, 0);
100 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup
);
101 # endif /* CONFIG_SMP */
102 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
105 * These functions can be overridden if an architecture implements its
106 * own hardlockup detector.
108 * watchdog_nmi_enable/disable can be implemented to start and stop when
109 * softlockup watchdog threads start and stop. The arch must select the
110 * SOFTLOCKUP_DETECTOR Kconfig.
113 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
114 static int watchdog_nmi_enable(unsigned int cpu
);
115 static void watchdog_nmi_disable(unsigned int cpu
);
117 int __weak
watchdog_nmi_enable(unsigned int cpu
)
119 hardlockup_detector_perf_enable();
123 void __weak
watchdog_nmi_disable(unsigned int cpu
)
125 hardlockup_detector_perf_disable();
129 /* Return 0, if a NMI watchdog is available. Error code otherwise */
130 int __weak __init
watchdog_nmi_probe(void)
132 return hardlockup_detector_perf_init();
136 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
138 * The reconfiguration steps are:
139 * watchdog_nmi_stop();
140 * update_variables();
141 * watchdog_nmi_start();
143 void __weak
watchdog_nmi_stop(void) { }
146 * watchdog_nmi_start - Start the watchdog after reconfiguration
148 * Counterpart to watchdog_nmi_stop().
150 * The following variables have been updated in update_variables() and
151 * contain the currently valid configuration:
156 void __weak
watchdog_nmi_start(void) { }
159 * lockup_detector_update_enable - Update the sysctl enable bit
161 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
162 * can't race with watchdog_nmi_disable().
164 static void lockup_detector_update_enable(void)
166 watchdog_enabled
= 0;
167 if (!watchdog_user_enabled
)
169 #if defined(CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU)
170 if (watchdog_other_cpu_available
&& nmi_watchdog_user_enabled
)
171 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
173 if (nmi_watchdog_available
&& nmi_watchdog_user_enabled
)
174 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
175 if (soft_watchdog_user_enabled
)
176 watchdog_enabled
|= SOFT_WATCHDOG_ENABLED
;
179 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
181 /* Global variables, exported for sysctl */
182 unsigned int __read_mostly softlockup_panic
=
183 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
185 static bool softlockup_threads_initialized __read_mostly
;
186 static u64 __read_mostly sample_period
;
187 static unsigned long __read_mostly hardlockup_thresh
;
189 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
190 static DEFINE_PER_CPU(unsigned long, hardlockup_touch_ts
);
191 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
192 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
193 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
194 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
195 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
196 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
197 static DEFINE_PER_CPU(struct task_struct
*, softlockup_task_ptr_saved
);
198 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
199 static unsigned long soft_lockup_nmi_warn
;
201 static int __init
softlockup_panic_setup(char *str
)
203 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
206 __setup("softlockup_panic=", softlockup_panic_setup
);
208 static int __init
nowatchdog_setup(char *str
)
210 watchdog_user_enabled
= 0;
213 __setup("nowatchdog", nowatchdog_setup
);
215 static int __init
nosoftlockup_setup(char *str
)
217 soft_watchdog_user_enabled
= 0;
220 __setup("nosoftlockup", nosoftlockup_setup
);
223 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
225 static int __init
softlockup_all_cpu_backtrace_setup(char *str
)
227 sysctl_softlockup_all_cpu_backtrace
= !!simple_strtol(str
, NULL
, 0);
230 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup
);
233 static void __lockup_detector_cleanup(void);
236 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
237 * lockups can have false positives under extreme conditions. So we generally
238 * want a higher threshold for soft lockups than for hard lockups. So we couple
239 * the thresholds with a factor: we make the soft threshold twice the amount of
240 * time the hard threshold is.
242 static int get_softlockup_thresh(void)
244 return watchdog_thresh
* 2;
248 * Returns seconds, approximately. We don't need nanosecond
249 * resolution, and we don't need to waste time with a big divide when
252 static unsigned long get_timestamp(void)
254 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
257 static void set_sample_period(void)
260 * convert watchdog_thresh from seconds to ns
261 * the divide by 5 is to give hrtimer several chances (two
262 * or three with the current relation between the soft
263 * and hard thresholds) to increment before the
264 * hardlockup detector generates a warning
266 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
267 watchdog_update_hrtimer_threshold(sample_period
);
268 hardlockup_thresh
= sample_period
* 3 / NSEC_PER_SEC
;
271 /* Commands for resetting the watchdog */
272 static void __touch_watchdog(void)
274 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
275 __this_cpu_write(hardlockup_touch_ts
, get_timestamp());
279 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
281 * Call when the scheduler may have stalled for legitimate reasons
282 * preventing the watchdog task from executing - e.g. the scheduler
283 * entering idle state. This should only be used for scheduler events.
284 * Use touch_softlockup_watchdog() for everything else.
286 void touch_softlockup_watchdog_sched(void)
289 * Preemption can be enabled. It doesn't matter which CPU's timestamp
290 * gets zeroed here, so use the raw_ operation.
292 raw_cpu_write(watchdog_touch_ts
, 0);
295 void touch_softlockup_watchdog(void)
297 touch_softlockup_watchdog_sched();
298 wq_watchdog_touch(raw_smp_processor_id());
300 EXPORT_SYMBOL(touch_softlockup_watchdog
);
302 void touch_all_softlockup_watchdogs(void)
307 * watchdog_mutex cannpt be taken here, as this might be called
308 * from (soft)interrupt context, so the access to
309 * watchdog_allowed_cpumask might race with a concurrent update.
311 * The watchdog time stamp can race against a concurrent real
312 * update as well, the only side effect might be a cycle delay for
313 * the softlockup check.
315 for_each_cpu(cpu
, &watchdog_allowed_mask
)
316 per_cpu(watchdog_touch_ts
, cpu
) = 0;
317 wq_watchdog_touch(-1);
320 void touch_softlockup_watchdog_sync(void)
322 __this_cpu_write(softlockup_touch_sync
, true);
323 __this_cpu_write(watchdog_touch_ts
, 0);
326 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
327 static void watchdog_check_hardlockup_other_cpu(void);
329 static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
332 static int is_softlockup(unsigned long touch_ts
)
334 unsigned long now
= get_timestamp();
336 if ((watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) && watchdog_thresh
){
337 /* Warn about unreasonable delays. */
338 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
339 return now
- touch_ts
;
344 /* watchdog detector functions */
345 bool is_hardlockup(void)
347 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
349 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
352 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
356 static void watchdog_interrupt_count(void)
358 __this_cpu_inc(hrtimer_interrupts
);
361 /* watchdog kicker functions */
362 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
364 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
365 struct pt_regs
*regs
= get_irq_regs();
367 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
369 /* try to enable log_kevent of exynos-snapshot if log_kevent was off because of rcu stall */
370 dbg_snapshot_try_enable("log_kevent", NSEC_PER_SEC
* 15);
371 if (!watchdog_enabled
)
372 return HRTIMER_NORESTART
;
374 /* kick the hardlockup detector */
375 watchdog_interrupt_count();
377 /* test for hardlockups on the next cpu */
378 watchdog_check_hardlockup_other_cpu();
380 /* kick the softlockup detector */
381 wake_up_process(__this_cpu_read(softlockup_watchdog
));
384 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
387 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
389 * If the time stamp was touched atomically
390 * make sure the scheduler tick is up to date.
392 __this_cpu_write(softlockup_touch_sync
, false);
396 /* Clear the guest paused flag on watchdog reset */
397 kvm_check_and_clear_guest_paused();
399 return HRTIMER_RESTART
;
402 /* check for a softlockup
403 * This is done by making sure a high priority task is
404 * being scheduled. The task touches the watchdog to
405 * indicate it is getting cpu time. If it hasn't then
406 * this is a good indication some task is hogging the cpu
408 duration
= is_softlockup(touch_ts
);
409 if (unlikely(duration
)) {
411 * If a virtual machine is stopped by the host it can look to
412 * the watchdog like a soft lockup, check to see if the host
413 * stopped the vm before we issue the warning
415 if (kvm_check_and_clear_guest_paused())
416 return HRTIMER_RESTART
;
419 if (__this_cpu_read(soft_watchdog_warn
) == true) {
421 * When multiple processes are causing softlockups the
422 * softlockup detector only warns on the first one
423 * because the code relies on a full quiet cycle to
424 * re-arm. The second process prevents the quiet cycle
425 * and never gets reported. Use task pointers to detect
428 if (__this_cpu_read(softlockup_task_ptr_saved
) !=
430 __this_cpu_write(soft_watchdog_warn
, false);
433 return HRTIMER_RESTART
;
436 if (softlockup_all_cpu_backtrace
) {
437 /* Prevent multiple soft-lockup reports if one cpu is already
438 * engaged in dumping cpu back traces
440 if (test_and_set_bit(0, &soft_lockup_nmi_warn
)) {
441 /* Someone else will report us. Let's give up */
442 __this_cpu_write(soft_watchdog_warn
, true);
443 return HRTIMER_RESTART
;
447 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
448 smp_processor_id(), duration
,
449 current
->comm
, task_pid_nr(current
));
450 __this_cpu_write(softlockup_task_ptr_saved
, current
);
452 print_irqtrace_events(current
);
458 if (softlockup_all_cpu_backtrace
) {
459 /* Avoid generating two back traces for current
460 * given that one is already made above
462 trigger_allbutself_cpu_backtrace();
464 clear_bit(0, &soft_lockup_nmi_warn
);
465 /* Barrier to sync with other cpus */
466 smp_mb__after_atomic();
469 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
470 if (softlockup_panic
)
471 panic("softlockup: hung tasks");
472 __this_cpu_write(soft_watchdog_warn
, true);
474 __this_cpu_write(soft_watchdog_warn
, false);
476 return HRTIMER_RESTART
;
479 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
481 struct sched_param param
= { .sched_priority
= prio
};
483 sched_setscheduler(current
, policy
, ¶m
);
486 static void watchdog_enable(unsigned int cpu
)
488 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
491 * Start the timer first to prevent the NMI watchdog triggering
492 * before the timer has a chance to fire.
494 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
495 hrtimer
->function
= watchdog_timer_fn
;
496 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
497 HRTIMER_MODE_REL_PINNED
);
499 /* Initialize timestamp */
501 /* Enable the perf event */
502 if (watchdog_enabled
& NMI_WATCHDOG_ENABLED
)
503 watchdog_nmi_enable(cpu
);
505 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
508 static void watchdog_disable(unsigned int cpu
)
510 struct hrtimer
*hrtimer
= this_cpu_ptr(&watchdog_hrtimer
);
512 watchdog_set_prio(SCHED_NORMAL
, 0);
514 * Disable the perf event first. That prevents that a large delay
515 * between disabling the timer and disabling the perf event causes
516 * the perf NMI to detect a false positive.
518 watchdog_nmi_disable(cpu
);
519 hrtimer_cancel(hrtimer
);
522 static void watchdog_cleanup(unsigned int cpu
, bool online
)
524 watchdog_disable(cpu
);
527 static int watchdog_should_run(unsigned int cpu
)
529 return __this_cpu_read(hrtimer_interrupts
) !=
530 __this_cpu_read(soft_lockup_hrtimer_cnt
);
534 * The watchdog thread function - touches the timestamp.
536 * It only runs once every sample_period seconds (4 seconds by
537 * default) to reset the softlockup timestamp. If this gets delayed
538 * for more than 2*watchdog_thresh seconds then the debug-printout
539 * triggers in watchdog_timer_fn().
541 static void watchdog(unsigned int cpu
)
543 __this_cpu_write(soft_lockup_hrtimer_cnt
,
544 __this_cpu_read(hrtimer_interrupts
));
548 static struct smp_hotplug_thread watchdog_threads
= {
549 .store
= &softlockup_watchdog
,
550 .thread_should_run
= watchdog_should_run
,
551 .thread_fn
= watchdog
,
552 .thread_comm
= "watchdog/%u",
553 .setup
= watchdog_enable
,
554 .cleanup
= watchdog_cleanup
,
555 .park
= watchdog_disable
,
556 .unpark
= watchdog_enable
,
559 static void softlockup_update_smpboot_threads(void)
561 lockdep_assert_held(&watchdog_mutex
);
563 if (!softlockup_threads_initialized
)
566 smpboot_update_cpumask_percpu_thread(&watchdog_threads
,
567 &watchdog_allowed_mask
);
570 /* Temporarily park all watchdog threads */
571 static void softlockup_park_all_threads(void)
573 cpumask_clear(&watchdog_allowed_mask
);
574 softlockup_update_smpboot_threads();
577 /* Unpark enabled threads */
578 static void softlockup_unpark_threads(void)
580 cpumask_copy(&watchdog_allowed_mask
, &watchdog_cpumask
);
581 softlockup_update_smpboot_threads();
584 static void lockup_detector_reconfigure(void)
588 softlockup_park_all_threads();
590 lockup_detector_update_enable();
591 if (watchdog_enabled
&& watchdog_thresh
)
592 softlockup_unpark_threads();
593 watchdog_nmi_start();
596 * Must be called outside the cpus locked section to prevent
597 * recursive locking in the perf code.
599 __lockup_detector_cleanup();
603 * Create the watchdog thread infrastructure and configure the detector(s).
605 * The threads are not unparked as watchdog_allowed_mask is empty. When
606 * the threads are sucessfully initialized, take the proper locks and
607 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
609 static __init
void lockup_detector_setup(void)
614 * If sysctl is off and watchdog got disabled on the command line,
615 * nothing to do here.
617 lockup_detector_update_enable();
619 if (!IS_ENABLED(CONFIG_SYSCTL
) &&
620 !(watchdog_enabled
&& watchdog_thresh
))
623 ret
= smpboot_register_percpu_thread_cpumask(&watchdog_threads
,
624 &watchdog_allowed_mask
);
626 pr_err("Failed to initialize soft lockup detector threads\n");
630 mutex_lock(&watchdog_mutex
);
631 softlockup_threads_initialized
= true;
632 lockup_detector_reconfigure();
633 mutex_unlock(&watchdog_mutex
);
636 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
637 static inline int watchdog_park_threads(void) { return 0; }
638 static inline void watchdog_unpark_threads(void) { }
639 static inline int watchdog_enable_all_cpus(void) { return 0; }
640 static inline void watchdog_disable_all_cpus(void) { }
641 static void lockup_detector_reconfigure(void)
645 lockup_detector_update_enable();
646 watchdog_nmi_start();
649 static inline void lockup_detector_setup(void)
651 lockup_detector_reconfigure();
653 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
655 static void __lockup_detector_cleanup(void)
657 lockdep_assert_held(&watchdog_mutex
);
658 hardlockup_detector_perf_cleanup();
662 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
664 * Caller must not hold the cpu hotplug rwsem.
666 void lockup_detector_cleanup(void)
668 mutex_lock(&watchdog_mutex
);
669 __lockup_detector_cleanup();
670 mutex_unlock(&watchdog_mutex
);
674 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
676 * Special interface for parisc. It prevents lockup detector warnings from
677 * the default pm_poweroff() function which busy loops forever.
679 void lockup_detector_soft_poweroff(void)
681 watchdog_enabled
= 0;
686 /* Propagate any changes to the watchdog threads */
687 static void proc_watchdog_update(void)
689 /* Remove impossible cpus to keep sysctl output clean. */
690 cpumask_and(&watchdog_cpumask
, &watchdog_cpumask
, cpu_possible_mask
);
691 lockup_detector_reconfigure();
695 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
697 * caller | table->data points to | 'which'
698 * -------------------|----------------------------|--------------------------
699 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
700 * | | SOFT_WATCHDOG_ENABLED
701 * -------------------|----------------------------|--------------------------
702 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
703 * -------------------|----------------------------|--------------------------
704 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
706 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
707 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
709 int err
, old
, *param
= table
->data
;
711 mutex_lock(&watchdog_mutex
);
715 * On read synchronize the userspace interface. This is a
718 *param
= (watchdog_enabled
& which
) != 0;
719 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
721 old
= READ_ONCE(*param
);
722 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
723 if (!err
&& old
!= READ_ONCE(*param
))
724 proc_watchdog_update();
726 mutex_unlock(&watchdog_mutex
);
731 * /proc/sys/kernel/watchdog
733 int proc_watchdog(struct ctl_table
*table
, int write
,
734 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
736 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
737 table
, write
, buffer
, lenp
, ppos
);
741 * /proc/sys/kernel/nmi_watchdog
743 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
744 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
746 if (!nmi_watchdog_available
&& write
)
748 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
749 table
, write
, buffer
, lenp
, ppos
);
753 * /proc/sys/kernel/soft_watchdog
755 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
756 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
758 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
759 table
, write
, buffer
, lenp
, ppos
);
763 * /proc/sys/kernel/watchdog_thresh
765 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
766 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
770 mutex_lock(&watchdog_mutex
);
772 old
= READ_ONCE(watchdog_thresh
);
773 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
775 if (!err
&& write
&& old
!= READ_ONCE(watchdog_thresh
))
776 proc_watchdog_update();
778 mutex_unlock(&watchdog_mutex
);
783 * The cpumask is the mask of possible cpus that the watchdog can run
784 * on, not the mask of cpus it is actually running on. This allows the
785 * user to specify a mask that will include cpus that have not yet
786 * been brought online, if desired.
788 int proc_watchdog_cpumask(struct ctl_table
*table
, int write
,
789 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
793 mutex_lock(&watchdog_mutex
);
795 err
= proc_do_large_bitmap(table
, write
, buffer
, lenp
, ppos
);
797 proc_watchdog_update();
799 mutex_unlock(&watchdog_mutex
);
802 #endif /* CONFIG_SYSCTL */
804 void __init
lockup_detector_init(void)
806 #ifdef CONFIG_NO_HZ_FULL
807 if (tick_nohz_full_enabled()) {
808 pr_info("Disabling watchdog on nohz_full cores by default\n");
809 cpumask_copy(&watchdog_cpumask
, housekeeping_mask
);
811 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
813 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
816 if (!watchdog_nmi_probe())
817 nmi_watchdog_available
= true;
818 lockup_detector_setup();
821 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
822 static DEFINE_PER_CPU(bool, hard_watchdog_warn
);
823 static DEFINE_PER_CPU(bool, watchdog_nmi_touch
);
824 static cpumask_t __read_mostly watchdog_cpus
;
825 ATOMIC_NOTIFIER_HEAD(hardlockup_notifier_list
);
826 EXPORT_SYMBOL(hardlockup_notifier_list
);
828 static unsigned int watchdog_next_cpu(unsigned int cpu
)
830 cpumask_t cpus
= watchdog_cpus
;
831 unsigned int next_cpu
;
833 next_cpu
= cpumask_next(cpu
, &cpus
);
834 if (next_cpu
>= nr_cpu_ids
)
835 next_cpu
= cpumask_first(&cpus
);
843 static int is_hardlockup_other_cpu(unsigned int cpu
)
845 unsigned long hrint
= per_cpu(hrtimer_interrupts
, cpu
);
847 if (per_cpu(hrtimer_interrupts_saved
, cpu
) == hrint
) {
848 unsigned long now
= get_timestamp();
849 unsigned long touch_ts
= per_cpu(hardlockup_touch_ts
, cpu
);
851 if (time_after(now
, touch_ts
) &&
852 (now
- touch_ts
>= hardlockup_thresh
))
856 per_cpu(hrtimer_interrupts_saved
, cpu
) = hrint
;
860 static void watchdog_check_hardlockup_other_cpu(void)
862 unsigned int next_cpu
;
865 * Test for hardlockups every 3 samples. The sample period is
866 * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
867 * watchdog_thresh (over by 20%).
869 if (__this_cpu_read(hrtimer_interrupts
) % 3 != 0)
872 /* check for a hardlockup on the next cpu */
873 next_cpu
= watchdog_next_cpu(smp_processor_id());
874 if (next_cpu
>= nr_cpu_ids
)
879 if (per_cpu(watchdog_nmi_touch
, next_cpu
) == true) {
880 per_cpu(watchdog_nmi_touch
, next_cpu
) = false;
884 if (is_hardlockup_other_cpu(next_cpu
)) {
886 if (per_cpu(hard_watchdog_warn
, next_cpu
) == true)
889 if (hardlockup_panic
) {
890 dbg_snapshot_set_hardlockup(hardlockup_panic
);
891 atomic_notifier_call_chain(&hardlockup_notifier_list
, 0, (void *)&next_cpu
);
892 panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu
);
894 WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu
);
897 per_cpu(hard_watchdog_warn
, next_cpu
) = true;
899 per_cpu(hard_watchdog_warn
, next_cpu
) = false;
903 void touch_nmi_watchdog(void)
906 * Using __raw here because some code paths have
907 * preemption enabled. If preemption is enabled
908 * then interrupts should be enabled too, in which
909 * case we shouldn't have to worry about the watchdog
912 raw_cpu_write(watchdog_nmi_touch
, true);
913 arch_touch_nmi_watchdog();
914 touch_softlockup_watchdog();
916 EXPORT_SYMBOL(touch_nmi_watchdog
);
918 static int watchdog_nmi_enable(unsigned int cpu
)
921 * The new cpu will be marked online before the first hrtimer interrupt
922 * runs on it. If another cpu tests for a hardlockup on the new cpu
923 * before it has run its first hrtimer, it will get a false positive.
924 * Touch the watchdog on the new cpu to delay the first check for at
925 * least 3 sampling periods to guarantee one hrtimer has run on the new
928 per_cpu(watchdog_nmi_touch
, cpu
) = true;
930 cpumask_set_cpu(cpu
, &watchdog_cpus
);
934 static void watchdog_nmi_disable(unsigned int cpu
)
936 unsigned int next_cpu
= watchdog_next_cpu(cpu
);
939 * Offlining this cpu will cause the cpu before this one to start
940 * checking the one after this one. If this cpu just finished checking
941 * the next cpu and updating hrtimer_interrupts_saved, and then the
942 * previous cpu checks it within one sample period, it will trigger a
943 * false positive. Touch the watchdog on the next cpu to prevent it.
945 if (next_cpu
< nr_cpu_ids
)
946 per_cpu(watchdog_nmi_touch
, next_cpu
) = true;
948 cpumask_clear_cpu(cpu
, &watchdog_cpus
);