2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <linux/tick.h>
24 #include <asm/irq_regs.h>
25 #include <linux/kvm_para.h>
26 #include <linux/perf_event.h>
27 #include <linux/kthread.h>
28 #include <linux/sec_debug.h>
30 #include <linux/exynos-ss.h>
31 #include <linux/irqflags.h>
33 #ifdef CONFIG_SEC_DEBUG
34 static const char * const hl_to_name
[] = {
35 "NONE", "TASK STUCK", "IRQ STUCK",
36 "IDLE STUCK", "SMCCALL STUCK", "IRQ STORM",
37 "HRTIMER ERROR", "UNKNOWN STUCK"
40 static const char * const sl_to_name
[] = {
41 "NONE", "SOFTIRQ STUCK", "TASK STUCK", "UNKNOWN STUCK"
46 * The run state of the lockup detectors is controlled by the content of the
47 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
48 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
50 * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled'
51 * are variables that are only used as an 'interface' between the parameters
52 * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The
53 * 'watchdog_thresh' variable is handled differently because its value is not
54 * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh'
57 #define NMI_WATCHDOG_ENABLED_BIT 0
58 #define SOFT_WATCHDOG_ENABLED_BIT 1
59 #define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
60 #define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
62 static DEFINE_MUTEX(watchdog_proc_mutex
);
64 #ifdef CONFIG_HARDLOCKUP_DETECTOR
65 static unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
|NMI_WATCHDOG_ENABLED
;
67 static unsigned long __read_mostly watchdog_enabled
= SOFT_WATCHDOG_ENABLED
;
69 int __read_mostly nmi_watchdog_enabled
;
70 int __read_mostly soft_watchdog_enabled
;
71 int __read_mostly watchdog_user_enabled
;
72 int __read_mostly watchdog_thresh
= 10;
75 int __read_mostly sysctl_softlockup_all_cpu_backtrace
;
76 int __read_mostly sysctl_hardlockup_all_cpu_backtrace
;
78 #define sysctl_softlockup_all_cpu_backtrace 0
79 #define sysctl_hardlockup_all_cpu_backtrace 0
81 static struct cpumask watchdog_cpumask __read_mostly
;
82 unsigned long *watchdog_cpumask_bits
= cpumask_bits(&watchdog_cpumask
);
84 /* Helper for online, unparked cpus. */
85 #define for_each_watchdog_cpu(cpu) \
86 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
89 * The 'watchdog_running' variable is set to 1 when the watchdog threads
90 * are registered/started and is set to 0 when the watchdog threads are
91 * unregistered/stopped, so it is an indicator whether the threads exist.
93 static int __read_mostly watchdog_running
;
95 * If a subsystem has a need to deactivate the watchdog temporarily, it
96 * can use the suspend/resume interface to achieve this. The content of
97 * the 'watchdog_suspended' variable reflects this state. Existing threads
98 * are parked/unparked by the lockup_detector_{suspend|resume} functions
99 * (see comment blocks pertaining to those functions for further details).
101 * 'watchdog_suspended' also prevents threads from being registered/started
102 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
103 * of 'watchdog_running' cannot change while the watchdog is deactivated
104 * temporarily (see related code in 'proc' handlers).
106 static int __read_mostly watchdog_suspended
;
108 static u64 __read_mostly sample_period
;
109 static unsigned long __read_mostly hardlockup_thresh
;
111 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts
);
112 static DEFINE_PER_CPU(unsigned long, hardlockup_touch_ts
);
113 static DEFINE_PER_CPU(struct task_struct
*, softlockup_watchdog
);
114 static DEFINE_PER_CPU(struct hrtimer
, watchdog_hrtimer
);
115 static DEFINE_PER_CPU(bool, softlockup_touch_sync
);
116 static DEFINE_PER_CPU(bool, soft_watchdog_warn
);
117 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts
);
118 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt
);
119 static DEFINE_PER_CPU(struct task_struct
*, softlockup_task_ptr_saved
);
120 #ifdef CONFIG_HARDLOCKUP_DETECTOR
121 static DEFINE_PER_CPU(bool, hard_watchdog_warn
);
122 static DEFINE_PER_CPU(bool, watchdog_nmi_touch
);
123 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved
);
126 #ifdef CONFIG_SEC_DEBUG
127 static DEFINE_PER_CPU(struct softlockup_info
, percpu_sl_info
);
128 static void check_softlockup_type(void);
130 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
131 static DEFINE_PER_CPU(struct hardlockup_info
, percpu_hl_info
);
132 static void check_hardlockup_type(unsigned int cpu
);
136 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
137 static cpumask_t __read_mostly watchdog_cpus
;
138 ATOMIC_NOTIFIER_HEAD(hardlockup_notifier_list
);
139 EXPORT_SYMBOL(hardlockup_notifier_list
);
141 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
142 static DEFINE_PER_CPU(struct perf_event
*, watchdog_ev
);
144 static unsigned long soft_lockup_nmi_warn
;
148 * Should we panic when a soft-lockup or hard-lockup occurs:
150 #ifdef CONFIG_HARDLOCKUP_DETECTOR
151 unsigned int __read_mostly hardlockup_panic
=
152 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE
;
153 static unsigned long __maybe_unused hardlockup_allcpu_dumped
;
155 * We may not want to enable hard lockup detection by default in all cases,
156 * for example when running the kernel as a guest on a hypervisor. In these
157 * cases this function can be called to disable hard lockup detection. This
158 * function should only be executed once by the boot processor before the
159 * kernel command line parameters are parsed, because otherwise it is not
160 * possible to override this in hardlockup_panic_setup().
162 void hardlockup_detector_disable(void)
164 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
167 static int __init
hardlockup_panic_setup(char *str
)
169 if (!strncmp(str
, "panic", 5))
170 hardlockup_panic
= 1;
171 else if (!strncmp(str
, "nopanic", 7))
172 hardlockup_panic
= 0;
173 else if (!strncmp(str
, "0", 1))
174 watchdog_enabled
&= ~NMI_WATCHDOG_ENABLED
;
175 else if (!strncmp(str
, "1", 1))
176 watchdog_enabled
|= NMI_WATCHDOG_ENABLED
;
179 __setup("nmi_watchdog=", hardlockup_panic_setup
);
182 unsigned int __read_mostly softlockup_panic
=
183 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
;
185 static int __init
softlockup_panic_setup(char *str
)
187 softlockup_panic
= simple_strtoul(str
, NULL
, 0);
191 __setup("softlockup_panic=", softlockup_panic_setup
);
193 static int __init
nowatchdog_setup(char *str
)
195 watchdog_enabled
= 0;
198 __setup("nowatchdog", nowatchdog_setup
);
200 static int __init
nosoftlockup_setup(char *str
)
202 watchdog_enabled
&= ~SOFT_WATCHDOG_ENABLED
;
205 __setup("nosoftlockup", nosoftlockup_setup
);
208 static int __init
softlockup_all_cpu_backtrace_setup(char *str
)
210 sysctl_softlockup_all_cpu_backtrace
=
211 !!simple_strtol(str
, NULL
, 0);
214 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup
);
215 static int __init
hardlockup_all_cpu_backtrace_setup(char *str
)
217 sysctl_hardlockup_all_cpu_backtrace
=
218 !!simple_strtol(str
, NULL
, 0);
221 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup
);
225 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
226 * lockups can have false positives under extreme conditions. So we generally
227 * want a higher threshold for soft lockups than for hard lockups. So we couple
228 * the thresholds with a factor: we make the soft threshold twice the amount of
229 * time the hard threshold is.
231 static int get_softlockup_thresh(void)
233 return watchdog_thresh
* 2;
237 * Returns seconds, approximately. We don't need nanosecond
238 * resolution, and we don't need to waste time with a big divide when
241 static unsigned long get_timestamp(void)
243 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
246 static void set_sample_period(void)
249 * convert watchdog_thresh from seconds to ns
250 * the divide by 5 is to give hrtimer several chances (two
251 * or three with the current relation between the soft
252 * and hard thresholds) to increment before the
253 * hardlockup detector generates a warning
255 sample_period
= get_softlockup_thresh() * ((u64
)NSEC_PER_SEC
/ 5);
256 hardlockup_thresh
= sample_period
* 3 / NSEC_PER_SEC
;
259 /* Commands for resetting the watchdog */
260 static void __touch_watchdog(void)
262 __this_cpu_write(watchdog_touch_ts
, get_timestamp());
263 __this_cpu_write(hardlockup_touch_ts
, get_timestamp());
266 void touch_softlockup_watchdog(void)
269 * Preemption can be enabled. It doesn't matter which CPU's timestamp
270 * gets zeroed here, so use the raw_ operation.
272 raw_cpu_write(watchdog_touch_ts
, 0);
274 EXPORT_SYMBOL(touch_softlockup_watchdog
);
276 void touch_all_softlockup_watchdogs(void)
281 * this is done lockless
282 * do we care if a 0 races with a timestamp?
283 * all it means is the softlock check starts one cycle later
285 for_each_watchdog_cpu(cpu
)
286 per_cpu(watchdog_touch_ts
, cpu
) = 0;
289 #ifdef CONFIG_HARDLOCKUP_DETECTOR
290 void touch_nmi_watchdog(void)
293 * Using __raw here because some code paths have
294 * preemption enabled. If preemption is enabled
295 * then interrupts should be enabled too, in which
296 * case we shouldn't have to worry about the watchdog
299 raw_cpu_write(watchdog_nmi_touch
, true);
300 touch_softlockup_watchdog();
302 EXPORT_SYMBOL(touch_nmi_watchdog
);
306 void touch_softlockup_watchdog_sync(void)
308 __this_cpu_write(softlockup_touch_sync
, true);
309 __this_cpu_write(watchdog_touch_ts
, 0);
312 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
313 /* watchdog detector functions */
314 static bool is_hardlockup(void)
316 unsigned long hrint
= __this_cpu_read(hrtimer_interrupts
);
318 if (__this_cpu_read(hrtimer_interrupts_saved
) == hrint
)
321 __this_cpu_write(hrtimer_interrupts_saved
, hrint
);
326 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
327 static unsigned int watchdog_next_cpu(unsigned int cpu
)
329 cpumask_t cpus
= watchdog_cpus
;
330 unsigned int next_cpu
;
332 next_cpu
= cpumask_next(cpu
, &cpus
);
333 if (next_cpu
>= nr_cpu_ids
)
334 next_cpu
= cpumask_first(&cpus
);
342 static int is_hardlockup_other_cpu(unsigned int cpu
)
344 unsigned long hrint
= per_cpu(hrtimer_interrupts
, cpu
);
346 if (per_cpu(hrtimer_interrupts_saved
, cpu
) == hrint
) {
347 unsigned long now
= get_timestamp();
348 unsigned long touch_ts
= per_cpu(hardlockup_touch_ts
, cpu
);
350 if (time_after(now
, touch_ts
) &&
351 (now
- touch_ts
>= hardlockup_thresh
))
355 per_cpu(hrtimer_interrupts_saved
, cpu
) = hrint
;
359 static void watchdog_check_hardlockup_other_cpu(void)
361 unsigned int next_cpu
;
364 * Test for hardlockups every 3 samples. The sample period is
365 * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
366 * watchdog_thresh (over by 20%).
368 if (__this_cpu_read(hrtimer_interrupts
) % 3 != 0)
371 /* check for a hardlockup on the next cpu */
372 next_cpu
= watchdog_next_cpu(smp_processor_id());
373 if (next_cpu
>= nr_cpu_ids
)
378 if (per_cpu(watchdog_nmi_touch
, next_cpu
) == true) {
379 per_cpu(watchdog_nmi_touch
, next_cpu
) = false;
383 if (is_hardlockup_other_cpu(next_cpu
)) {
384 #ifdef CONFIG_SEC_DEBUG
385 check_hardlockup_type(next_cpu
);
388 if (per_cpu(hard_watchdog_warn
, next_cpu
) == true)
391 if (hardlockup_panic
) {
392 exynos_ss_set_hardlockup(hardlockup_panic
);
393 atomic_notifier_call_chain(&hardlockup_notifier_list
, 0, (void *)&next_cpu
);
394 panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu
);
396 WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu
);
399 per_cpu(hard_watchdog_warn
, next_cpu
) = true;
401 per_cpu(hard_watchdog_warn
, next_cpu
) = false;
405 static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
408 static int is_softlockup(unsigned long touch_ts
)
410 unsigned long now
= get_timestamp();
412 if ((watchdog_enabled
& SOFT_WATCHDOG_ENABLED
) && watchdog_thresh
) {
413 /* Warn about unreasonable delays. */
414 if (time_after(now
, touch_ts
+ get_softlockup_thresh()))
415 return now
- touch_ts
;
420 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
422 static struct perf_event_attr wd_hw_attr
= {
423 .type
= PERF_TYPE_HARDWARE
,
424 .config
= PERF_COUNT_HW_CPU_CYCLES
,
425 .size
= sizeof(struct perf_event_attr
),
430 /* Callback function for perf event subsystem */
431 static void watchdog_overflow_callback(struct perf_event
*event
,
432 struct perf_sample_data
*data
,
433 struct pt_regs
*regs
)
435 /* Ensure the watchdog never gets throttled */
436 event
->hw
.interrupts
= 0;
438 if (__this_cpu_read(watchdog_nmi_touch
) == true) {
439 __this_cpu_write(watchdog_nmi_touch
, false);
443 /* check for a hardlockup
444 * This is done by making sure our timer interrupt
445 * is incrementing. The timer interrupt should have
446 * fired multiple times before we overflow'd. If it hasn't
447 * then this is a good indication the cpu is stuck
449 if (is_hardlockup()) {
450 int this_cpu
= smp_processor_id();
452 /* only print hardlockups once */
453 if (__this_cpu_read(hard_watchdog_warn
) == true)
456 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu
);
458 print_irqtrace_events(current
);
465 * Perform all-CPU dump only once to avoid multiple hardlockups
466 * generating interleaving traces
468 if (sysctl_hardlockup_all_cpu_backtrace
&&
469 !test_and_set_bit(0, &hardlockup_allcpu_dumped
))
470 trigger_allbutself_cpu_backtrace();
472 if (hardlockup_panic
) {
473 exynos_ss_set_hardlockup(hardlockup_panic
);
474 panic("Hard LOCKUP");
477 __this_cpu_write(hard_watchdog_warn
, true);
481 __this_cpu_write(hard_watchdog_warn
, false);
484 #endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
486 static void watchdog_interrupt_count(void)
488 __this_cpu_inc(hrtimer_interrupts
);
491 static int watchdog_nmi_enable(unsigned int cpu
);
492 static void watchdog_nmi_disable(unsigned int cpu
);
494 static int watchdog_enable_all_cpus(void);
495 static void watchdog_disable_all_cpus(void);
497 /* watchdog kicker functions */
498 static enum hrtimer_restart
watchdog_timer_fn(struct hrtimer
*hrtimer
)
500 unsigned long touch_ts
= __this_cpu_read(watchdog_touch_ts
);
501 struct pt_regs
*regs
= get_irq_regs();
503 int softlockup_all_cpu_backtrace
= sysctl_softlockup_all_cpu_backtrace
;
505 /* try to enable log_kevent of exynos-snapshot if log_kevent was off because of rcu stall */
506 exynos_ss_try_enable("log_kevent", NSEC_PER_SEC
* 15);
508 /* kick the hardlockup detector */
509 watchdog_interrupt_count();
511 /* test for hardlockups on the next cpu */
512 watchdog_check_hardlockup_other_cpu();
514 /* kick the softlockup detector */
515 wake_up_process(__this_cpu_read(softlockup_watchdog
));
518 hrtimer_forward_now(hrtimer
, ns_to_ktime(sample_period
));
521 if (unlikely(__this_cpu_read(softlockup_touch_sync
))) {
523 * If the time stamp was touched atomically
524 * make sure the scheduler tick is up to date.
526 __this_cpu_write(softlockup_touch_sync
, false);
530 /* Clear the guest paused flag on watchdog reset */
531 kvm_check_and_clear_guest_paused();
533 return HRTIMER_RESTART
;
536 /* check for a softlockup
537 * This is done by making sure a high priority task is
538 * being scheduled. The task touches the watchdog to
539 * indicate it is getting cpu time. If it hasn't then
540 * this is a good indication some task is hogging the cpu
542 duration
= is_softlockup(touch_ts
);
543 if (unlikely(duration
)) {
545 * If a virtual machine is stopped by the host it can look to
546 * the watchdog like a soft lockup, check to see if the host
547 * stopped the vm before we issue the warning
549 if (kvm_check_and_clear_guest_paused())
550 return HRTIMER_RESTART
;
553 if (__this_cpu_read(soft_watchdog_warn
) == true) {
555 * When multiple processes are causing softlockups the
556 * softlockup detector only warns on the first one
557 * because the code relies on a full quiet cycle to
558 * re-arm. The second process prevents the quiet cycle
559 * and never gets reported. Use task pointers to detect
562 if (__this_cpu_read(softlockup_task_ptr_saved
) !=
564 __this_cpu_write(soft_watchdog_warn
, false);
567 return HRTIMER_RESTART
;
570 if (softlockup_all_cpu_backtrace
) {
571 /* Prevent multiple soft-lockup reports if one cpu is already
572 * engaged in dumping cpu back traces
574 if (test_and_set_bit(0, &soft_lockup_nmi_warn
)) {
575 /* Someone else will report us. Let's give up */
576 __this_cpu_write(soft_watchdog_warn
, true);
577 return HRTIMER_RESTART
;
581 pr_auto(ASL1
, "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
582 smp_processor_id(), duration
,
583 current
->comm
, task_pid_nr(current
));
584 #ifdef CONFIG_SEC_DEBUG
585 check_softlockup_type();
587 __this_cpu_write(softlockup_task_ptr_saved
, current
);
589 print_irqtrace_events(current
);
595 if (softlockup_all_cpu_backtrace
) {
596 /* Avoid generating two back traces for current
597 * given that one is already made above
599 trigger_allbutself_cpu_backtrace();
601 clear_bit(0, &soft_lockup_nmi_warn
);
602 /* Barrier to sync with other cpus */
603 smp_mb__after_atomic();
606 add_taint(TAINT_SOFTLOCKUP
, LOCKDEP_STILL_OK
);
607 if (softlockup_panic
) {
608 #ifdef CONFIG_SEC_DEBUG_EXTRA_INFO
610 sec_debug_set_extra_info_fault(WATCHDOG_FAULT
, (unsigned long)regs
->pc
, regs
);
611 sec_debug_set_extra_info_backtrace(regs
);
614 panic("softlockup: hung tasks");
616 __this_cpu_write(soft_watchdog_warn
, true);
618 __this_cpu_write(soft_watchdog_warn
, false);
620 return HRTIMER_RESTART
;
623 static void watchdog_set_prio(unsigned int policy
, unsigned int prio
)
625 struct sched_param param
= { .sched_priority
= prio
};
627 sched_setscheduler(current
, policy
, ¶m
);
630 static void watchdog_enable(unsigned int cpu
)
632 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
634 /* kick off the timer for the hardlockup detector */
635 hrtimer_init(hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
636 hrtimer
->function
= watchdog_timer_fn
;
638 /* Enable the perf event */
639 watchdog_nmi_enable(cpu
);
641 /* done here because hrtimer_start can only pin to smp_processor_id() */
642 hrtimer_start(hrtimer
, ns_to_ktime(sample_period
),
643 HRTIMER_MODE_REL_PINNED
);
645 /* initialize timestamp */
646 watchdog_set_prio(SCHED_FIFO
, MAX_RT_PRIO
- 1);
650 static void watchdog_disable(unsigned int cpu
)
652 struct hrtimer
*hrtimer
= raw_cpu_ptr(&watchdog_hrtimer
);
654 watchdog_set_prio(SCHED_NORMAL
, 0);
655 hrtimer_cancel(hrtimer
);
656 /* disable the perf event */
657 watchdog_nmi_disable(cpu
);
660 static void watchdog_cleanup(unsigned int cpu
, bool online
)
662 watchdog_disable(cpu
);
665 static int watchdog_should_run(unsigned int cpu
)
667 return __this_cpu_read(hrtimer_interrupts
) !=
668 __this_cpu_read(soft_lockup_hrtimer_cnt
);
672 * The watchdog thread function - touches the timestamp.
674 * It only runs once every sample_period seconds (4 seconds by
675 * default) to reset the softlockup timestamp. If this gets delayed
676 * for more than 2*watchdog_thresh seconds then the debug-printout
677 * triggers in watchdog_timer_fn().
679 static void watchdog(unsigned int cpu
)
681 __this_cpu_write(soft_lockup_hrtimer_cnt
,
682 __this_cpu_read(hrtimer_interrupts
));
686 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
687 * failure path. Check for failures that can occur asynchronously -
688 * for example, when CPUs are on-lined - and shut down the hardware
689 * perf event on each CPU accordingly.
691 * The only non-obvious place this bit can be cleared is through
692 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
693 * pr_info here would be too noisy as it would result in a message
694 * every few seconds if the hardlockup was disabled but the softlockup
697 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
698 watchdog_nmi_disable(cpu
);
701 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
703 * People like the simple clean cpu node info on boot.
704 * Reduce the watchdog noise by only printing messages
705 * that are different from what cpu0 displayed.
707 static unsigned long cpu0_err
;
709 static int watchdog_nmi_enable(unsigned int cpu
)
711 struct perf_event_attr
*wd_attr
;
712 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
714 /* nothing to do if the hard lockup detector is disabled */
715 if (!(watchdog_enabled
& NMI_WATCHDOG_ENABLED
))
718 /* is it already setup and enabled? */
719 if (event
&& event
->state
> PERF_EVENT_STATE_OFF
)
722 /* it is setup but not enabled */
726 wd_attr
= &wd_hw_attr
;
727 wd_attr
->sample_period
= hw_nmi_get_sample_period(watchdog_thresh
);
729 /* Try to register using hardware perf events */
730 event
= perf_event_create_kernel_counter(wd_attr
, cpu
, NULL
, watchdog_overflow_callback
, NULL
);
732 /* save cpu0 error for future comparision */
733 if (cpu
== 0 && IS_ERR(event
))
734 cpu0_err
= PTR_ERR(event
);
736 if (!IS_ERR(event
)) {
737 /* only print for cpu0 or different than cpu0 */
738 if (cpu
== 0 || cpu0_err
)
739 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
744 * Disable the hard lockup detector if _any_ CPU fails to set up
745 * set up the hardware perf event. The watchdog() function checks
746 * the NMI_WATCHDOG_ENABLED bit periodically.
748 * The barriers are for syncing up watchdog_enabled across all the
749 * cpus, as clear_bit() does not use barriers.
751 smp_mb__before_atomic();
752 clear_bit(NMI_WATCHDOG_ENABLED_BIT
, &watchdog_enabled
);
753 smp_mb__after_atomic();
755 /* skip displaying the same error again */
756 if (cpu
> 0 && (PTR_ERR(event
) == cpu0_err
))
757 return PTR_ERR(event
);
759 /* vary the KERN level based on the returned errno */
760 if (PTR_ERR(event
) == -EOPNOTSUPP
)
761 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu
);
762 else if (PTR_ERR(event
) == -ENOENT
)
763 pr_warn("disabled (cpu%i): hardware events not enabled\n",
766 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
767 cpu
, PTR_ERR(event
));
769 pr_info("Shutting down hard lockup detector on all cpus\n");
771 return PTR_ERR(event
);
775 per_cpu(watchdog_ev
, cpu
) = event
;
777 perf_event_enable(per_cpu(watchdog_ev
, cpu
));
782 static void watchdog_nmi_disable(unsigned int cpu
)
784 struct perf_event
*event
= per_cpu(watchdog_ev
, cpu
);
787 perf_event_disable(event
);
788 per_cpu(watchdog_ev
, cpu
) = NULL
;
790 /* should be in cleanup, but blocks oprofile */
791 perf_event_release_kernel(event
);
794 /* watchdog_nmi_enable() expects this to be zero initially. */
800 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
801 static int watchdog_nmi_enable(unsigned int cpu
)
804 * The new cpu will be marked online before the first hrtimer interrupt
805 * runs on it. If another cpu tests for a hardlockup on the new cpu
806 * before it has run its first hrtimer, it will get a false positive.
807 * Touch the watchdog on the new cpu to delay the first check for at
808 * least 3 sampling periods to guarantee one hrtimer has run on the new
811 per_cpu(watchdog_nmi_touch
, cpu
) = true;
813 cpumask_set_cpu(cpu
, &watchdog_cpus
);
817 static void watchdog_nmi_disable(unsigned int cpu
)
819 unsigned int next_cpu
= watchdog_next_cpu(cpu
);
822 * Offlining this cpu will cause the cpu before this one to start
823 * checking the one after this one. If this cpu just finished checking
824 * the next cpu and updating hrtimer_interrupts_saved, and then the
825 * previous cpu checks it within one sample period, it will trigger a
826 * false positive. Touch the watchdog on the next cpu to prevent it.
828 if (next_cpu
< nr_cpu_ids
)
829 per_cpu(watchdog_nmi_touch
, next_cpu
) = true;
831 cpumask_clear_cpu(cpu
, &watchdog_cpus
);
834 static int watchdog_nmi_enable(unsigned int cpu
) { return 0; }
835 static void watchdog_nmi_disable(unsigned int cpu
) { return; }
836 #endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
837 #endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
839 static struct smp_hotplug_thread watchdog_threads
= {
840 .store
= &softlockup_watchdog
,
841 .thread_should_run
= watchdog_should_run
,
842 .thread_fn
= watchdog
,
843 .thread_comm
= "watchdog/%u",
844 .setup
= watchdog_enable
,
845 .cleanup
= watchdog_cleanup
,
846 .park
= watchdog_disable
,
847 .unpark
= watchdog_enable
,
851 * park all watchdog threads that are specified in 'watchdog_cpumask'
853 * This function returns an error if kthread_park() of a watchdog thread
854 * fails. In this situation, the watchdog threads of some CPUs can already
855 * be parked and the watchdog threads of other CPUs can still be runnable.
856 * Callers are expected to handle this special condition as appropriate in
859 * This function may only be called in a context that is protected against
860 * races with CPU hotplug - for example, via get_online_cpus().
862 static int watchdog_park_threads(void)
866 for_each_watchdog_cpu(cpu
) {
867 ret
= kthread_park(per_cpu(softlockup_watchdog
, cpu
));
876 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
878 * This function may only be called in a context that is protected against
879 * races with CPU hotplug - for example, via get_online_cpus().
881 static void watchdog_unpark_threads(void)
885 for_each_watchdog_cpu(cpu
)
886 kthread_unpark(per_cpu(softlockup_watchdog
, cpu
));
890 * Suspend the hard and soft lockup detector by parking the watchdog threads.
892 int lockup_detector_suspend(void)
897 mutex_lock(&watchdog_proc_mutex
);
899 * Multiple suspend requests can be active in parallel (counted by
900 * the 'watchdog_suspended' variable). If the watchdog threads are
901 * running, the first caller takes care that they will be parked.
902 * The state of 'watchdog_running' cannot change while a suspend
903 * request is active (see related code in 'proc' handlers).
905 if (watchdog_running
&& !watchdog_suspended
)
906 ret
= watchdog_park_threads();
909 watchdog_suspended
++;
911 watchdog_disable_all_cpus();
912 pr_err("Failed to suspend lockup detectors, disabled\n");
913 watchdog_enabled
= 0;
916 mutex_unlock(&watchdog_proc_mutex
);
922 * Resume the hard and soft lockup detector by unparking the watchdog threads.
924 void lockup_detector_resume(void)
926 mutex_lock(&watchdog_proc_mutex
);
928 watchdog_suspended
--;
930 * The watchdog threads are unparked if they were previously running
931 * and if there is no more active suspend request.
933 if (watchdog_running
&& !watchdog_suspended
)
934 watchdog_unpark_threads();
936 mutex_unlock(&watchdog_proc_mutex
);
940 static int update_watchdog_all_cpus(void)
944 ret
= watchdog_park_threads();
948 watchdog_unpark_threads();
953 static int watchdog_enable_all_cpus(void)
957 if (!watchdog_running
) {
958 err
= smpboot_register_percpu_thread_cpumask(&watchdog_threads
,
961 pr_err("Failed to create watchdog threads, disabled\n");
963 watchdog_running
= 1;
966 * Enable/disable the lockup detectors or
967 * change the sample period 'on the fly'.
969 err
= update_watchdog_all_cpus();
972 watchdog_disable_all_cpus();
973 pr_err("Failed to update lockup detectors, disabled\n");
978 watchdog_enabled
= 0;
983 static void watchdog_disable_all_cpus(void)
985 if (watchdog_running
) {
986 watchdog_running
= 0;
987 smpboot_unregister_percpu_thread(&watchdog_threads
);
994 * Update the run state of the lockup detectors.
996 static int proc_watchdog_update(void)
1001 * Watchdog threads won't be started if they are already active.
1002 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
1003 * care of this. If those threads are already active, the sample
1004 * period will be updated and the lockup detectors will be enabled
1005 * or disabled 'on the fly'.
1007 if (watchdog_enabled
&& watchdog_thresh
)
1008 err
= watchdog_enable_all_cpus();
1010 watchdog_disable_all_cpus();
1017 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
1019 * caller | table->data points to | 'which' contains the flag(s)
1020 * -------------------|-----------------------|-----------------------------
1021 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
1022 * | | with SOFT_WATCHDOG_ENABLED
1023 * -------------------|-----------------------|-----------------------------
1024 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
1025 * -------------------|-----------------------|-----------------------------
1026 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
1028 static int proc_watchdog_common(int which
, struct ctl_table
*table
, int write
,
1029 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1032 int *watchdog_param
= (int *)table
->data
;
1035 mutex_lock(&watchdog_proc_mutex
);
1037 if (watchdog_suspended
) {
1038 /* no parameter changes allowed while watchdog is suspended */
1044 * If the parameter is being read return the state of the corresponding
1045 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
1046 * run state of the lockup detectors.
1049 *watchdog_param
= (watchdog_enabled
& which
) != 0;
1050 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
1052 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
1057 * There is a race window between fetching the current value
1058 * from 'watchdog_enabled' and storing the new value. During
1059 * this race window, watchdog_nmi_enable() can sneak in and
1060 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
1061 * The 'cmpxchg' detects this race and the loop retries.
1064 old
= watchdog_enabled
;
1066 * If the parameter value is not zero set the
1067 * corresponding bit(s), else clear it(them).
1069 if (*watchdog_param
)
1073 } while (cmpxchg(&watchdog_enabled
, old
, new) != old
);
1076 * Update the run state of the lockup detectors. There is _no_
1077 * need to check the value returned by proc_watchdog_update()
1078 * and to restore the previous value of 'watchdog_enabled' as
1079 * both lockup detectors are disabled if proc_watchdog_update()
1085 err
= proc_watchdog_update();
1088 mutex_unlock(&watchdog_proc_mutex
);
1094 * /proc/sys/kernel/watchdog
1096 int proc_watchdog(struct ctl_table
*table
, int write
,
1097 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1099 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
|SOFT_WATCHDOG_ENABLED
,
1100 table
, write
, buffer
, lenp
, ppos
);
1104 * /proc/sys/kernel/nmi_watchdog
1106 int proc_nmi_watchdog(struct ctl_table
*table
, int write
,
1107 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1109 return proc_watchdog_common(NMI_WATCHDOG_ENABLED
,
1110 table
, write
, buffer
, lenp
, ppos
);
1114 * /proc/sys/kernel/soft_watchdog
1116 int proc_soft_watchdog(struct ctl_table
*table
, int write
,
1117 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1119 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED
,
1120 table
, write
, buffer
, lenp
, ppos
);
1124 * /proc/sys/kernel/watchdog_thresh
1126 int proc_watchdog_thresh(struct ctl_table
*table
, int write
,
1127 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1132 mutex_lock(&watchdog_proc_mutex
);
1134 if (watchdog_suspended
) {
1135 /* no parameter changes allowed while watchdog is suspended */
1140 old
= ACCESS_ONCE(watchdog_thresh
);
1141 err
= proc_dointvec_minmax(table
, write
, buffer
, lenp
, ppos
);
1147 * Update the sample period. Restore on failure.
1149 new = ACCESS_ONCE(watchdog_thresh
);
1153 set_sample_period();
1154 err
= proc_watchdog_update();
1156 watchdog_thresh
= old
;
1157 set_sample_period();
1160 mutex_unlock(&watchdog_proc_mutex
);
1166 * The cpumask is the mask of possible cpus that the watchdog can run
1167 * on, not the mask of cpus it is actually running on. This allows the
1168 * user to specify a mask that will include cpus that have not yet
1169 * been brought online, if desired.
1171 int proc_watchdog_cpumask(struct ctl_table
*table
, int write
,
1172 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1177 mutex_lock(&watchdog_proc_mutex
);
1179 if (watchdog_suspended
) {
1180 /* no parameter changes allowed while watchdog is suspended */
1185 err
= proc_do_large_bitmap(table
, write
, buffer
, lenp
, ppos
);
1186 if (!err
&& write
) {
1187 /* Remove impossible cpus to keep sysctl output cleaner. */
1188 cpumask_and(&watchdog_cpumask
, &watchdog_cpumask
,
1191 if (watchdog_running
) {
1193 * Failure would be due to being unable to allocate
1194 * a temporary cpumask, so we are likely not in a
1195 * position to do much else to make things better.
1197 if (smpboot_update_cpumask_percpu_thread(
1198 &watchdog_threads
, &watchdog_cpumask
) != 0)
1199 pr_err("cpumask update failed\n");
1203 mutex_unlock(&watchdog_proc_mutex
);
1208 #endif /* CONFIG_SYSCTL */
1210 void __init
lockup_detector_init(void)
1212 set_sample_period();
1214 #ifdef CONFIG_NO_HZ_FULL
1215 if (tick_nohz_full_enabled()) {
1216 pr_info("Disabling watchdog on nohz_full cores by default\n");
1217 cpumask_copy(&watchdog_cpumask
, housekeeping_mask
);
1219 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
1221 cpumask_copy(&watchdog_cpumask
, cpu_possible_mask
);
1224 if (watchdog_enabled
)
1225 watchdog_enable_all_cpus();
1228 #ifdef CONFIG_SEC_DEBUG
1229 void sl_softirq_entry(const char *softirq_type
, void *fn
)
1231 struct softlockup_info
*sl_info
= per_cpu_ptr(&percpu_sl_info
, smp_processor_id());
1234 strncpy(sl_info
->softirq_info
.softirq_type
, softirq_type
, sizeof(sl_info
->softirq_info
.softirq_type
) - 1);
1235 sl_info
->softirq_info
.softirq_type
[SOFTIRQ_TYPE_LEN
- 1] = '\0';
1237 sl_info
->softirq_info
.last_arrival
= local_clock();
1238 sl_info
->softirq_info
.fn
= fn
;
1241 void sl_softirq_exit(void)
1243 struct softlockup_info
*sl_info
= per_cpu_ptr(&percpu_sl_info
, smp_processor_id());
1245 sl_info
->softirq_info
.last_arrival
= 0;
1246 sl_info
->softirq_info
.fn
= (void *)0;
1247 sl_info
->softirq_info
.softirq_type
[0] = '\0';
1250 void check_softlockup_type(void)
1252 int cpu
= smp_processor_id();
1253 struct softlockup_info
*sl_info
= per_cpu_ptr(&percpu_sl_info
, cpu
);
1255 sl_info
->preempt_count
= preempt_count();
1256 if (softirq_count() &&
1257 sl_info
->softirq_info
.last_arrival
!= 0 && sl_info
->softirq_info
.fn
!= NULL
) {
1258 sl_info
->delay_time
= local_clock() - sl_info
->softirq_info
.last_arrival
;
1259 sl_info
->sl_type
= SL_SOFTIRQ_STUCK
;
1260 pr_auto(ASL9
, "Softlockup state: %s, Latency: %lluns, Softirq type: %s, Func: %pf, preempt_count : %x\n",
1261 sl_to_name
[sl_info
->sl_type
], sl_info
->delay_time
, sl_info
->softirq_info
.softirq_type
, sl_info
->softirq_info
.fn
, sl_info
->preempt_count
);
1263 exynos_ss_get_softlockup_info(cpu
, sl_info
);
1264 if (!(preempt_count() & PREEMPT_MASK
) || softirq_count())
1265 sl_info
->sl_type
= SL_UNKNOWN_STUCK
;
1266 pr_auto(ASL9
, "Softlockup state: %s, Latency: %lluns, Task: %s, preempt_count: %x\n",
1267 sl_to_name
[sl_info
->sl_type
], sl_info
->delay_time
, sl_info
->task_info
.task_comm
, sl_info
->preempt_count
);
1271 unsigned long long get_ess_softlockup_thresh(void)
1273 return watchdog_thresh
* 2 * NSEC_PER_SEC
;
1275 EXPORT_SYMBOL(get_ess_softlockup_thresh
);
1277 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
1278 static void check_hardlockup_type(unsigned int cpu
)
1280 struct hardlockup_info
*hl_info
= per_cpu_ptr(&percpu_hl_info
, cpu
);
1282 exynos_ss_get_hardlockup_info(cpu
, hl_info
);
1284 if (hl_info
->hl_type
== HL_TASK_STUCK
) {
1285 pr_auto(ASL9
, "Hardlockup state: %s, Latency: %lluns, TASK: %s\n",
1286 hl_to_name
[hl_info
->hl_type
], hl_info
->delay_time
, hl_info
->task_info
.task_comm
);
1287 } else if (hl_info
->hl_type
== HL_IRQ_STUCK
) {
1288 pr_auto(ASL9
, "Hardlockup state: %s, Latency: %lluns, IRQ: %d, Func: %pf\n",
1289 hl_to_name
[hl_info
->hl_type
], hl_info
->delay_time
, hl_info
->irq_info
.irq
, hl_info
->irq_info
.fn
);
1290 } else if (hl_info
->hl_type
== HL_IDLE_STUCK
) {
1291 pr_auto(ASL9
, "Hardlockup state: %s, Latency: %lluns, mode: %s\n",
1292 hl_to_name
[hl_info
->hl_type
], hl_info
->delay_time
, hl_info
->cpuidle_info
.mode
);
1293 } else if (hl_info
->hl_type
== HL_SMC_CALL_STUCK
) {
1294 pr_auto(ASL9
, "Hardlockup state: %s, Latency: %lluns, CMD: %u\n",
1295 hl_to_name
[hl_info
->hl_type
], hl_info
->delay_time
, hl_info
->smc_info
.cmd
);
1296 } else if (hl_info
->hl_type
== HL_IRQ_STORM
) {
1297 pr_auto(ASL9
, "Hardlockup state: %s, Latency: %lluns, IRQ : %d, Func: %pf, Avg period: %lluns\n",
1298 hl_to_name
[hl_info
->hl_type
], hl_info
->delay_time
, hl_info
->irq_info
.irq
, hl_info
->irq_info
.fn
, hl_info
->irq_info
.avg_period
);
1299 } else if (hl_info
->hl_type
== HL_UNKNOWN_STUCK
) {
1300 pr_auto(ASL9
, "Hardlockup state: %s, Latency: %lluns, TASK: %s\n",
1301 hl_to_name
[hl_info
->hl_type
], hl_info
->delay_time
, hl_info
->task_info
.task_comm
);
1305 void update_hardlockup_type(unsigned int cpu
)
1307 struct hardlockup_info
*hl_info
= per_cpu_ptr(&percpu_hl_info
, cpu
);
1309 if (hl_info
->hl_type
== HL_TASK_STUCK
&& !irqs_disabled()) {
1310 hl_info
->hl_type
= HL_UNKNOWN_STUCK
;
1311 pr_info("Unknown stuck because IRQ was enabled but IRQ was not generated\n");
1314 EXPORT_SYMBOL(update_hardlockup_type
);
1316 unsigned long long get_hardlockup_thresh(void)
1318 return (hardlockup_thresh
* NSEC_PER_SEC
- sample_period
);
1320 EXPORT_SYMBOL(get_hardlockup_thresh
);