drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / watchdog.c
1 /*
2 * Detect hard and soft lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
13
14 #include <linux/mm.h>
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/freezer.h>
20 #include <linux/kthread.h>
21 #include <linux/lockdep.h>
22 #include <linux/notifier.h>
23 #include <linux/module.h>
24 #include <linux/sysctl.h>
25 #include <linux/smpboot.h>
26 #include <linux/sched/rt.h>
27
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/perf_event.h>
31
32 int watchdog_enabled = 1;
33 int __read_mostly watchdog_thresh = 10;
34 static int __read_mostly watchdog_disabled;
35 static u64 __read_mostly sample_period;
36
37 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
38 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
39 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
40 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
41 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
43 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
45 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
46 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
47 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
48 #endif
49 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
50 static cpumask_t __read_mostly watchdog_cpus;
51 #endif
52 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
53 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
54 #endif
55
56 /* boot commands */
57 /*
58 * Should we panic when a soft-lockup or hard-lockup occurs:
59 */
60 #ifdef CONFIG_HARDLOCKUP_DETECTOR
61 static int hardlockup_panic =
62 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
63
64 static int __init hardlockup_panic_setup(char *str)
65 {
66 if (!strncmp(str, "panic", 5))
67 hardlockup_panic = 1;
68 else if (!strncmp(str, "nopanic", 7))
69 hardlockup_panic = 0;
70 else if (!strncmp(str, "0", 1))
71 watchdog_enabled = 0;
72 return 1;
73 }
74 __setup("nmi_watchdog=", hardlockup_panic_setup);
75 #endif
76
77 unsigned int __read_mostly softlockup_panic =
78 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
79
80 static int __init softlockup_panic_setup(char *str)
81 {
82 softlockup_panic = simple_strtoul(str, NULL, 0);
83
84 return 1;
85 }
86 __setup("softlockup_panic=", softlockup_panic_setup);
87
88 static int __init nowatchdog_setup(char *str)
89 {
90 watchdog_enabled = 0;
91 return 1;
92 }
93 __setup("nowatchdog", nowatchdog_setup);
94
95 /* deprecated */
96 static int __init nosoftlockup_setup(char *str)
97 {
98 watchdog_enabled = 0;
99 return 1;
100 }
101 __setup("nosoftlockup", nosoftlockup_setup);
102 /* */
103
104 /*
105 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
106 * lockups can have false positives under extreme conditions. So we generally
107 * want a higher threshold for soft lockups than for hard lockups. So we couple
108 * the thresholds with a factor: we make the soft threshold twice the amount of
109 * time the hard threshold is.
110 */
111 static int get_softlockup_thresh(void)
112 {
113 return watchdog_thresh * 2;
114 }
115
116 /*
117 * Returns seconds, approximately. We don't need nanosecond
118 * resolution, and we don't need to waste time with a big divide when
119 * 2^30ns == 1.074s.
120 */
121 static unsigned long get_timestamp(void)
122 {
123 return local_clock() >> 30LL; /* 2^30 ~= 10^9 */
124 }
125
126 static void set_sample_period(void)
127 {
128 /*
129 * convert watchdog_thresh from seconds to ns
130 * the divide by 5 is to give hrtimer several chances (two
131 * or three with the current relation between the soft
132 * and hard thresholds) to increment before the
133 * hardlockup detector generates a warning
134 */
135 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
136 }
137
138 /* Commands for resetting the watchdog */
139 static void __touch_watchdog(void)
140 {
141 __this_cpu_write(watchdog_touch_ts, get_timestamp());
142 }
143
144 void touch_softlockup_watchdog(void)
145 {
146 __this_cpu_write(watchdog_touch_ts, 0);
147 }
148 EXPORT_SYMBOL(touch_softlockup_watchdog);
149
150 void touch_all_softlockup_watchdogs(void)
151 {
152 int cpu;
153
154 /*
155 * this is done lockless
156 * do we care if a 0 races with a timestamp?
157 * all it means is the softlock check starts one cycle later
158 */
159 for_each_online_cpu(cpu)
160 per_cpu(watchdog_touch_ts, cpu) = 0;
161 }
162
163 #ifdef CONFIG_HARDLOCKUP_DETECTOR
164 void touch_nmi_watchdog(void)
165 {
166 if (watchdog_enabled) {
167 unsigned cpu;
168
169 for_each_present_cpu(cpu) {
170 if (per_cpu(watchdog_nmi_touch, cpu) != true)
171 per_cpu(watchdog_nmi_touch, cpu) = true;
172 }
173 }
174 touch_softlockup_watchdog();
175 }
176 EXPORT_SYMBOL(touch_nmi_watchdog);
177
178 #endif
179
180 void touch_softlockup_watchdog_sync(void)
181 {
182 __raw_get_cpu_var(softlockup_touch_sync) = true;
183 __raw_get_cpu_var(watchdog_touch_ts) = 0;
184 }
185
186 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
187 /* watchdog detector functions */
188 static int is_hardlockup(void)
189 {
190 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
191
192 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
193 return 1;
194
195 __this_cpu_write(hrtimer_interrupts_saved, hrint);
196 return 0;
197 }
198 #endif
199
200 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
201 static unsigned int watchdog_next_cpu(unsigned int cpu)
202 {
203 cpumask_t cpus = watchdog_cpus;
204 unsigned int next_cpu;
205
206 next_cpu = cpumask_next(cpu, &cpus);
207 if (next_cpu >= nr_cpu_ids)
208 next_cpu = cpumask_first(&cpus);
209
210 if (next_cpu == cpu)
211 return nr_cpu_ids;
212
213 return next_cpu;
214 }
215
216 static int is_hardlockup_other_cpu(unsigned int cpu)
217 {
218 unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
219
220 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
221 return 1;
222
223 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
224 return 0;
225 }
226
227 static void watchdog_check_hardlockup_other_cpu(void)
228 {
229 unsigned int next_cpu;
230
231 /*
232 * Test for hardlockups every 3 samples. The sample period is
233 * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
234 * watchdog_thresh (over by 20%).
235 */
236 if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
237 return;
238
239 /* check for a hardlockup on the next cpu */
240 next_cpu = watchdog_next_cpu(smp_processor_id());
241 if (next_cpu >= nr_cpu_ids)
242 return;
243
244 smp_rmb();
245
246 if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
247 per_cpu(watchdog_nmi_touch, next_cpu) = false;
248 return;
249 }
250
251 if (is_hardlockup_other_cpu(next_cpu)) {
252 /* only warn once */
253 if (per_cpu(hard_watchdog_warn, next_cpu) == true)
254 return;
255
256 if (hardlockup_panic)
257 panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
258 else
259 WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
260
261 per_cpu(hard_watchdog_warn, next_cpu) = true;
262 } else {
263 per_cpu(hard_watchdog_warn, next_cpu) = false;
264 }
265 }
266 #else
267 static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
268 #endif
269
270 static int is_softlockup(unsigned long touch_ts)
271 {
272 unsigned long now = get_timestamp();
273
274 /* Warn about unreasonable delays: */
275 if (time_after(now, touch_ts + get_softlockup_thresh()))
276 return now - touch_ts;
277
278 return 0;
279 }
280
281 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
282
283 static struct perf_event_attr wd_hw_attr = {
284 .type = PERF_TYPE_HARDWARE,
285 .config = PERF_COUNT_HW_CPU_CYCLES,
286 .size = sizeof(struct perf_event_attr),
287 .pinned = 1,
288 .disabled = 1,
289 };
290
291 /* Callback function for perf event subsystem */
292 static void watchdog_overflow_callback(struct perf_event *event,
293 struct perf_sample_data *data,
294 struct pt_regs *regs)
295 {
296 /* Ensure the watchdog never gets throttled */
297 event->hw.interrupts = 0;
298
299 if (__this_cpu_read(watchdog_nmi_touch) == true) {
300 __this_cpu_write(watchdog_nmi_touch, false);
301 return;
302 }
303
304 /* check for a hardlockup
305 * This is done by making sure our timer interrupt
306 * is incrementing. The timer interrupt should have
307 * fired multiple times before we overflow'd. If it hasn't
308 * then this is a good indication the cpu is stuck
309 */
310 if (is_hardlockup()) {
311 int this_cpu = smp_processor_id();
312
313 /* only print hardlockups once */
314 if (__this_cpu_read(hard_watchdog_warn) == true)
315 return;
316
317 if (hardlockup_panic)
318 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
319 else
320 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
321
322 __this_cpu_write(hard_watchdog_warn, true);
323 return;
324 }
325
326 __this_cpu_write(hard_watchdog_warn, false);
327 return;
328 }
329 #endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
330
331 static void watchdog_interrupt_count(void)
332 {
333 __this_cpu_inc(hrtimer_interrupts);
334 }
335
336 static int watchdog_nmi_enable(unsigned int cpu);
337 static void watchdog_nmi_disable(unsigned int cpu);
338
339 /* watchdog kicker functions */
340 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
341 {
342 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
343 struct pt_regs *regs = get_irq_regs();
344 int duration;
345
346 /* kick the hardlockup detector */
347 watchdog_interrupt_count();
348
349 /* test for hardlockups on the next cpu */
350 watchdog_check_hardlockup_other_cpu();
351
352 /* kick the softlockup detector */
353 wake_up_process(__this_cpu_read(softlockup_watchdog));
354
355 /* .. and repeat */
356 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
357
358 if (touch_ts == 0) {
359 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
360 /*
361 * If the time stamp was touched atomically
362 * make sure the scheduler tick is up to date.
363 */
364 __this_cpu_write(softlockup_touch_sync, false);
365 sched_clock_tick();
366 }
367
368 /* Clear the guest paused flag on watchdog reset */
369 kvm_check_and_clear_guest_paused();
370 __touch_watchdog();
371 return HRTIMER_RESTART;
372 }
373
374 /* check for a softlockup
375 * This is done by making sure a high priority task is
376 * being scheduled. The task touches the watchdog to
377 * indicate it is getting cpu time. If it hasn't then
378 * this is a good indication some task is hogging the cpu
379 */
380 duration = is_softlockup(touch_ts);
381 if (unlikely(duration)) {
382 /*
383 * If a virtual machine is stopped by the host it can look to
384 * the watchdog like a soft lockup, check to see if the host
385 * stopped the vm before we issue the warning
386 */
387 if (kvm_check_and_clear_guest_paused())
388 return HRTIMER_RESTART;
389
390 /* only warn once */
391 if (__this_cpu_read(soft_watchdog_warn) == true)
392 return HRTIMER_RESTART;
393
394 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
395 smp_processor_id(), duration,
396 current->comm, task_pid_nr(current));
397 print_modules();
398 print_irqtrace_events(current);
399 if (regs)
400 show_regs(regs);
401 else
402 dump_stack();
403
404 if (softlockup_panic)
405 panic("softlockup: hung tasks");
406 __this_cpu_write(soft_watchdog_warn, true);
407 } else
408 __this_cpu_write(soft_watchdog_warn, false);
409
410 return HRTIMER_RESTART;
411 }
412
413 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
414 {
415 struct sched_param param = { .sched_priority = prio };
416
417 sched_setscheduler(current, policy, &param);
418 }
419
420 static void watchdog_enable(unsigned int cpu)
421 {
422 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
423
424 /* kick off the timer for the hardlockup detector */
425 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
426 hrtimer->function = watchdog_timer_fn;
427
428 if (!watchdog_enabled) {
429 kthread_park(current);
430 return;
431 }
432
433 /* Enable the perf event */
434 watchdog_nmi_enable(cpu);
435
436 /* done here because hrtimer_start can only pin to smp_processor_id() */
437 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
438 HRTIMER_MODE_REL_PINNED);
439
440 /* initialize timestamp */
441 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
442 __touch_watchdog();
443 }
444
445 static void watchdog_disable(unsigned int cpu)
446 {
447 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
448
449 watchdog_set_prio(SCHED_NORMAL, 0);
450 hrtimer_cancel(hrtimer);
451 /* disable the perf event */
452 watchdog_nmi_disable(cpu);
453 }
454
455 static int watchdog_should_run(unsigned int cpu)
456 {
457 return __this_cpu_read(hrtimer_interrupts) !=
458 __this_cpu_read(soft_lockup_hrtimer_cnt);
459 }
460
461 /*
462 * The watchdog thread function - touches the timestamp.
463 *
464 * It only runs once every sample_period seconds (4 seconds by
465 * default) to reset the softlockup timestamp. If this gets delayed
466 * for more than 2*watchdog_thresh seconds then the debug-printout
467 * triggers in watchdog_timer_fn().
468 */
469 static void watchdog(unsigned int cpu)
470 {
471 __this_cpu_write(soft_lockup_hrtimer_cnt,
472 __this_cpu_read(hrtimer_interrupts));
473 __touch_watchdog();
474 }
475
476 #ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
477 /*
478 * People like the simple clean cpu node info on boot.
479 * Reduce the watchdog noise by only printing messages
480 * that are different from what cpu0 displayed.
481 */
482 static unsigned long cpu0_err;
483
484 static int watchdog_nmi_enable(unsigned int cpu)
485 {
486 struct perf_event_attr *wd_attr;
487 struct perf_event *event = per_cpu(watchdog_ev, cpu);
488
489 /* is it already setup and enabled? */
490 if (event && event->state > PERF_EVENT_STATE_OFF)
491 goto out;
492
493 /* it is setup but not enabled */
494 if (event != NULL)
495 goto out_enable;
496
497 wd_attr = &wd_hw_attr;
498 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
499
500 /* Try to register using hardware perf events */
501 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
502
503 /* save cpu0 error for future comparision */
504 if (cpu == 0 && IS_ERR(event))
505 cpu0_err = PTR_ERR(event);
506
507 if (!IS_ERR(event)) {
508 /* only print for cpu0 or different than cpu0 */
509 if (cpu == 0 || cpu0_err)
510 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
511 goto out_save;
512 }
513
514 /* skip displaying the same error again */
515 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
516 return PTR_ERR(event);
517
518 /* vary the KERN level based on the returned errno */
519 if (PTR_ERR(event) == -EOPNOTSUPP)
520 pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
521 else if (PTR_ERR(event) == -ENOENT)
522 pr_warning("disabled (cpu%i): hardware events not enabled\n",
523 cpu);
524 else
525 pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
526 cpu, PTR_ERR(event));
527 return PTR_ERR(event);
528
529 /* success path */
530 out_save:
531 per_cpu(watchdog_ev, cpu) = event;
532 out_enable:
533 perf_event_enable(per_cpu(watchdog_ev, cpu));
534 out:
535 return 0;
536 }
537
538 static void watchdog_nmi_disable(unsigned int cpu)
539 {
540 struct perf_event *event = per_cpu(watchdog_ev, cpu);
541
542 if (event) {
543 perf_event_disable(event);
544 per_cpu(watchdog_ev, cpu) = NULL;
545
546 /* should be in cleanup, but blocks oprofile */
547 perf_event_release_kernel(event);
548 }
549 return;
550 }
551 #else
552 #ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
553 static int watchdog_nmi_enable(unsigned int cpu)
554 {
555 /*
556 * The new cpu will be marked online before the first hrtimer interrupt
557 * runs on it. If another cpu tests for a hardlockup on the new cpu
558 * before it has run its first hrtimer, it will get a false positive.
559 * Touch the watchdog on the new cpu to delay the first check for at
560 * least 3 sampling periods to guarantee one hrtimer has run on the new
561 * cpu.
562 */
563 per_cpu(watchdog_nmi_touch, cpu) = true;
564 smp_wmb();
565 cpumask_set_cpu(cpu, &watchdog_cpus);
566 return 0;
567 }
568
569 static void watchdog_nmi_disable(unsigned int cpu)
570 {
571 unsigned int next_cpu = watchdog_next_cpu(cpu);
572
573 /*
574 * Offlining this cpu will cause the cpu before this one to start
575 * checking the one after this one. If this cpu just finished checking
576 * the next cpu and updating hrtimer_interrupts_saved, and then the
577 * previous cpu checks it within one sample period, it will trigger a
578 * false positive. Touch the watchdog on the next cpu to prevent it.
579 */
580 if (next_cpu < nr_cpu_ids)
581 per_cpu(watchdog_nmi_touch, next_cpu) = true;
582 smp_wmb();
583 cpumask_clear_cpu(cpu, &watchdog_cpus);
584 }
585 #else
586 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
587 static void watchdog_nmi_disable(unsigned int cpu) { return; }
588 #endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
589 #endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
590
591 /* prepare/enable/disable routines */
592 /* sysctl functions */
593 #ifdef CONFIG_SYSCTL
594 static void watchdog_enable_all_cpus(void)
595 {
596 unsigned int cpu;
597
598 if (watchdog_disabled) {
599 watchdog_disabled = 0;
600 for_each_online_cpu(cpu)
601 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
602 }
603 }
604
605 static void watchdog_disable_all_cpus(void)
606 {
607 unsigned int cpu;
608
609 if (!watchdog_disabled) {
610 watchdog_disabled = 1;
611 for_each_online_cpu(cpu)
612 kthread_park(per_cpu(softlockup_watchdog, cpu));
613 }
614 }
615
616 /*
617 * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh
618 */
619
620 int proc_dowatchdog(struct ctl_table *table, int write,
621 void __user *buffer, size_t *lenp, loff_t *ppos)
622 {
623 int ret;
624
625 if (watchdog_disabled < 0)
626 return -ENODEV;
627
628 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
629 if (ret || !write)
630 return ret;
631
632 set_sample_period();
633 /*
634 * Watchdog threads shouldn't be enabled if they are
635 * disabled. The 'watchdog_disabled' variable check in
636 * watchdog_*_all_cpus() function takes care of this.
637 */
638 if (watchdog_enabled && watchdog_thresh)
639 watchdog_enable_all_cpus();
640 else
641 watchdog_disable_all_cpus();
642
643 return ret;
644 }
645 #endif /* CONFIG_SYSCTL */
646
647 static struct smp_hotplug_thread watchdog_threads = {
648 .store = &softlockup_watchdog,
649 .thread_should_run = watchdog_should_run,
650 .thread_fn = watchdog,
651 .thread_comm = "watchdog/%u",
652 .setup = watchdog_enable,
653 .park = watchdog_disable,
654 .unpark = watchdog_enable,
655 };
656
657 void __init lockup_detector_init(void)
658 {
659 set_sample_period();
660 if (smpboot_register_percpu_thread(&watchdog_threads)) {
661 pr_err("Failed to create watchdog threads, disabled\n");
662 watchdog_disabled = -ENODEV;
663 }
664 }