1 /* local apic based NMI watchdog for various CPUs.
2 This file also handles reservation of performance counters for coordination
3 with other users (like oprofile).
5 Note that these events normally don't tick when the CPU idles. This means
6 the frequency varies with CPU load.
8 Original code for K7/P6 written by Keith Owens */
10 #include <linux/percpu.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <linux/smp.h>
15 #include <linux/nmi.h>
17 #include <asm/intel_arch_perfmon.h>
19 struct nmi_watchdog_ctlblk
{
20 unsigned int cccr_msr
;
21 unsigned int perfctr_msr
; /* the MSR to reset in NMI handler */
22 unsigned int evntsel_msr
; /* the MSR to select the events to handle */
25 /* Interface defining a CPU specific perfctr watchdog */
28 void (*unreserve
)(void);
29 int (*setup
)(unsigned nmi_hz
);
30 void (*rearm
)(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
);
37 static struct wd_ops
*wd_ops
;
39 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
42 #define NMI_MAX_COUNTER_BITS 66
44 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
45 * evtsel_nmi_owner tracks the ownership of the event selection
46 * - different performance counters/ event selection may be reserved for
47 * different subsystems this reservation system just tries to coordinate
50 static DECLARE_BITMAP(perfctr_nmi_owner
, NMI_MAX_COUNTER_BITS
);
51 static DECLARE_BITMAP(evntsel_nmi_owner
, NMI_MAX_COUNTER_BITS
);
53 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk
, nmi_watchdog_ctlblk
);
55 /* converts an msr to an appropriate reservation bit */
56 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr
)
58 /* returns the bit offset of the performance counter register */
59 switch (boot_cpu_data
.x86_vendor
) {
61 return (msr
- MSR_K7_PERFCTR0
);
62 case X86_VENDOR_INTEL
:
63 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
64 return (msr
- MSR_ARCH_PERFMON_PERFCTR0
);
66 switch (boot_cpu_data
.x86
) {
68 return (msr
- MSR_P6_PERFCTR0
);
70 return (msr
- MSR_P4_BPU_PERFCTR0
);
76 /* converts an msr to an appropriate reservation bit */
77 /* returns the bit offset of the event selection register */
78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr
)
80 /* returns the bit offset of the event selection register */
81 switch (boot_cpu_data
.x86_vendor
) {
83 return (msr
- MSR_K7_EVNTSEL0
);
84 case X86_VENDOR_INTEL
:
85 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
))
86 return (msr
- MSR_ARCH_PERFMON_EVENTSEL0
);
88 switch (boot_cpu_data
.x86
) {
90 return (msr
- MSR_P6_EVNTSEL0
);
92 return (msr
- MSR_P4_BSU_ESCR0
);
99 /* checks for a bit availability (hack for oprofile) */
100 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter
)
102 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
104 return (!test_bit(counter
, perfctr_nmi_owner
));
107 /* checks the an msr for availability */
108 int avail_to_resrv_perfctr_nmi(unsigned int msr
)
110 unsigned int counter
;
112 counter
= nmi_perfctr_msr_to_bit(msr
);
113 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
115 return (!test_bit(counter
, perfctr_nmi_owner
));
118 int reserve_perfctr_nmi(unsigned int msr
)
120 unsigned int counter
;
122 counter
= nmi_perfctr_msr_to_bit(msr
);
123 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
125 if (!test_and_set_bit(counter
, perfctr_nmi_owner
))
130 void release_perfctr_nmi(unsigned int msr
)
132 unsigned int counter
;
134 counter
= nmi_perfctr_msr_to_bit(msr
);
135 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
137 clear_bit(counter
, perfctr_nmi_owner
);
140 int reserve_evntsel_nmi(unsigned int msr
)
142 unsigned int counter
;
144 counter
= nmi_evntsel_msr_to_bit(msr
);
145 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
147 if (!test_and_set_bit(counter
, evntsel_nmi_owner
))
152 void release_evntsel_nmi(unsigned int msr
)
154 unsigned int counter
;
156 counter
= nmi_evntsel_msr_to_bit(msr
);
157 BUG_ON(counter
> NMI_MAX_COUNTER_BITS
);
159 clear_bit(counter
, evntsel_nmi_owner
);
162 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi
);
163 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit
);
164 EXPORT_SYMBOL(reserve_perfctr_nmi
);
165 EXPORT_SYMBOL(release_perfctr_nmi
);
166 EXPORT_SYMBOL(reserve_evntsel_nmi
);
167 EXPORT_SYMBOL(release_evntsel_nmi
);
169 void disable_lapic_nmi_watchdog(void)
171 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
173 if (atomic_read(&nmi_active
) <= 0)
176 on_each_cpu(stop_apic_nmi_watchdog
, NULL
, 0, 1);
179 BUG_ON(atomic_read(&nmi_active
) != 0);
182 void enable_lapic_nmi_watchdog(void)
184 BUG_ON(nmi_watchdog
!= NMI_LOCAL_APIC
);
186 /* are we already enabled */
187 if (atomic_read(&nmi_active
) != 0)
190 /* are we lapic aware */
193 if (!wd_ops
->reserve()) {
194 printk(KERN_ERR
"NMI watchdog: cannot reserve perfctrs\n");
198 on_each_cpu(setup_apic_nmi_watchdog
, NULL
, 0, 1);
199 touch_nmi_watchdog();
203 * Activate the NMI watchdog via the local APIC.
206 static unsigned int adjust_for_32bit_ctr(unsigned int hz
)
209 unsigned int retval
= hz
;
212 * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
213 * are writable, with higher bits sign extending from bit 31.
214 * So, we can only program the counter with 31 bit values and
215 * 32nd bit should be 1, for 33.. to be 1.
216 * Find the appropriate nmi_hz
218 counter_val
= (u64
)cpu_khz
* 1000;
219 do_div(counter_val
, retval
);
220 if (counter_val
> 0x7fffffffULL
) {
221 u64 count
= (u64
)cpu_khz
* 1000;
222 do_div(count
, 0x7fffffffUL
);
229 write_watchdog_counter(unsigned int perfctr_msr
, const char *descr
, unsigned nmi_hz
)
231 u64 count
= (u64
)cpu_khz
* 1000;
233 do_div(count
, nmi_hz
);
235 Dprintk("setting %s to -0x%08Lx\n", descr
, count
);
236 wrmsrl(perfctr_msr
, 0 - count
);
239 static void write_watchdog_counter32(unsigned int perfctr_msr
,
240 const char *descr
, unsigned nmi_hz
)
242 u64 count
= (u64
)cpu_khz
* 1000;
244 do_div(count
, nmi_hz
);
246 Dprintk("setting %s to -0x%08Lx\n", descr
, count
);
247 wrmsr(perfctr_msr
, (u32
)(-count
), 0);
250 /* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
251 nicely stable so there is not much variety */
253 #define K7_EVNTSEL_ENABLE (1 << 22)
254 #define K7_EVNTSEL_INT (1 << 20)
255 #define K7_EVNTSEL_OS (1 << 17)
256 #define K7_EVNTSEL_USR (1 << 16)
257 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
258 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
260 static int setup_k7_watchdog(unsigned nmi_hz
)
262 unsigned int perfctr_msr
, evntsel_msr
;
263 unsigned int evntsel
;
264 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
266 perfctr_msr
= MSR_K7_PERFCTR0
;
267 evntsel_msr
= MSR_K7_EVNTSEL0
;
269 wrmsrl(perfctr_msr
, 0UL);
271 evntsel
= K7_EVNTSEL_INT
276 /* setup the timer */
277 wrmsr(evntsel_msr
, evntsel
, 0);
278 write_watchdog_counter(perfctr_msr
, "K7_PERFCTR0",nmi_hz
);
279 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
280 evntsel
|= K7_EVNTSEL_ENABLE
;
281 wrmsr(evntsel_msr
, evntsel
, 0);
283 wd
->perfctr_msr
= perfctr_msr
;
284 wd
->evntsel_msr
= evntsel_msr
;
285 wd
->cccr_msr
= 0; //unused
289 static void single_msr_stop_watchdog(void)
291 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
293 wrmsr(wd
->evntsel_msr
, 0, 0);
296 static int single_msr_reserve(void)
298 if (!reserve_perfctr_nmi(wd_ops
->perfctr
))
301 if (!reserve_evntsel_nmi(wd_ops
->evntsel
)) {
302 release_perfctr_nmi(wd_ops
->perfctr
);
308 static void single_msr_unreserve(void)
310 release_evntsel_nmi(wd_ops
->evntsel
);
311 release_perfctr_nmi(wd_ops
->perfctr
);
314 static void single_msr_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
316 /* start the cycle over again */
317 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
320 static struct wd_ops k7_wd_ops
= {
321 .reserve
= single_msr_reserve
,
322 .unreserve
= single_msr_unreserve
,
323 .setup
= setup_k7_watchdog
,
324 .rearm
= single_msr_rearm
,
325 .stop
= single_msr_stop_watchdog
,
326 .perfctr
= MSR_K7_PERFCTR0
,
327 .evntsel
= MSR_K7_EVNTSEL0
,
328 .checkbit
= 1ULL<<63,
331 /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
333 #define P6_EVNTSEL0_ENABLE (1 << 22)
334 #define P6_EVNTSEL_INT (1 << 20)
335 #define P6_EVNTSEL_OS (1 << 17)
336 #define P6_EVNTSEL_USR (1 << 16)
337 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79
338 #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED
340 static int setup_p6_watchdog(unsigned nmi_hz
)
342 unsigned int perfctr_msr
, evntsel_msr
;
343 unsigned int evntsel
;
344 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
346 perfctr_msr
= MSR_P6_PERFCTR0
;
347 evntsel_msr
= MSR_P6_EVNTSEL0
;
349 wrmsrl(perfctr_msr
, 0UL);
351 evntsel
= P6_EVNTSEL_INT
356 /* setup the timer */
357 wrmsr(evntsel_msr
, evntsel
, 0);
358 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
359 write_watchdog_counter32(perfctr_msr
, "P6_PERFCTR0",nmi_hz
);
360 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
361 evntsel
|= P6_EVNTSEL0_ENABLE
;
362 wrmsr(evntsel_msr
, evntsel
, 0);
364 wd
->perfctr_msr
= perfctr_msr
;
365 wd
->evntsel_msr
= evntsel_msr
;
366 wd
->cccr_msr
= 0; //unused
370 static void p6_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
372 /* P6 based Pentium M need to re-unmask
373 * the apic vector but it doesn't hurt
375 * ArchPerfom/Core Duo also needs this */
376 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
377 /* P6/ARCH_PERFMON has 32 bit counter write */
378 write_watchdog_counter32(wd
->perfctr_msr
, NULL
,nmi_hz
);
381 static struct wd_ops p6_wd_ops
= {
382 .reserve
= single_msr_reserve
,
383 .unreserve
= single_msr_unreserve
,
384 .setup
= setup_p6_watchdog
,
386 .stop
= single_msr_stop_watchdog
,
387 .perfctr
= MSR_P6_PERFCTR0
,
388 .evntsel
= MSR_P6_EVNTSEL0
,
389 .checkbit
= 1ULL<<39,
392 /* Intel P4 performance counters. By far the most complicated of all. */
394 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
395 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
396 #define P4_ESCR_OS (1<<3)
397 #define P4_ESCR_USR (1<<2)
398 #define P4_CCCR_OVF_PMI0 (1<<26)
399 #define P4_CCCR_OVF_PMI1 (1<<27)
400 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
401 #define P4_CCCR_COMPLEMENT (1<<19)
402 #define P4_CCCR_COMPARE (1<<18)
403 #define P4_CCCR_REQUIRED (3<<16)
404 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
405 #define P4_CCCR_ENABLE (1<<12)
406 #define P4_CCCR_OVF (1<<31)
408 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
409 CRU_ESCR0 (with any non-null event selector) through a complemented
410 max threshold. [IA32-Vol3, Section 14.9.9] */
412 static int setup_p4_watchdog(unsigned nmi_hz
)
414 unsigned int perfctr_msr
, evntsel_msr
, cccr_msr
;
415 unsigned int evntsel
, cccr_val
;
416 unsigned int misc_enable
, dummy
;
418 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
420 rdmsr(MSR_IA32_MISC_ENABLE
, misc_enable
, dummy
);
421 if (!(misc_enable
& MSR_P4_MISC_ENABLE_PERF_AVAIL
))
425 /* detect which hyperthread we are on */
426 if (smp_num_siblings
== 2) {
427 unsigned int ebx
, apicid
;
430 apicid
= (ebx
>> 24) & 0xff;
436 /* performance counters are shared resources
437 * assign each hyperthread its own set
438 * (re-use the ESCR0 register, seems safe
439 * and keeps the cccr_val the same)
443 perfctr_msr
= MSR_P4_IQ_PERFCTR0
;
444 evntsel_msr
= MSR_P4_CRU_ESCR0
;
445 cccr_msr
= MSR_P4_IQ_CCCR0
;
446 cccr_val
= P4_CCCR_OVF_PMI0
| P4_CCCR_ESCR_SELECT(4);
449 perfctr_msr
= MSR_P4_IQ_PERFCTR1
;
450 evntsel_msr
= MSR_P4_CRU_ESCR0
;
451 cccr_msr
= MSR_P4_IQ_CCCR1
;
452 cccr_val
= P4_CCCR_OVF_PMI1
| P4_CCCR_ESCR_SELECT(4);
455 evntsel
= P4_ESCR_EVENT_SELECT(0x3F)
459 cccr_val
|= P4_CCCR_THRESHOLD(15)
464 wrmsr(evntsel_msr
, evntsel
, 0);
465 wrmsr(cccr_msr
, cccr_val
, 0);
466 write_watchdog_counter(perfctr_msr
, "P4_IQ_COUNTER0", nmi_hz
);
467 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
468 cccr_val
|= P4_CCCR_ENABLE
;
469 wrmsr(cccr_msr
, cccr_val
, 0);
470 wd
->perfctr_msr
= perfctr_msr
;
471 wd
->evntsel_msr
= evntsel_msr
;
472 wd
->cccr_msr
= cccr_msr
;
476 static void stop_p4_watchdog(void)
478 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
479 wrmsr(wd
->cccr_msr
, 0, 0);
480 wrmsr(wd
->evntsel_msr
, 0, 0);
483 static int p4_reserve(void)
485 if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0
))
488 if (smp_num_siblings
> 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1
))
491 if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0
))
493 /* RED-PEN why is ESCR1 not reserved here? */
497 if (smp_num_siblings
> 1)
498 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
501 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
505 static void p4_unreserve(void)
508 if (smp_num_siblings
> 1)
509 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1
);
511 release_evntsel_nmi(MSR_P4_CRU_ESCR0
);
512 release_perfctr_nmi(MSR_P4_IQ_PERFCTR0
);
515 static void p4_rearm(struct nmi_watchdog_ctlblk
*wd
, unsigned nmi_hz
)
520 * - An overflown perfctr will assert its interrupt
521 * until the OVF flag in its CCCR is cleared.
522 * - LVTPC is masked on interrupt and must be
523 * unmasked by the LVTPC handler.
525 rdmsrl(wd
->cccr_msr
, dummy
);
526 dummy
&= ~P4_CCCR_OVF
;
527 wrmsrl(wd
->cccr_msr
, dummy
);
528 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
529 /* start the cycle over again */
530 write_watchdog_counter(wd
->perfctr_msr
, NULL
, nmi_hz
);
533 static struct wd_ops p4_wd_ops
= {
534 .reserve
= p4_reserve
,
535 .unreserve
= p4_unreserve
,
536 .setup
= setup_p4_watchdog
,
538 .stop
= stop_p4_watchdog
,
539 /* RED-PEN this is wrong for the other sibling */
540 .perfctr
= MSR_P4_BPU_PERFCTR0
,
541 .evntsel
= MSR_P4_BSU_ESCR0
,
542 .checkbit
= 1ULL<<39,
545 /* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
546 all future Intel CPUs. */
548 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
549 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
551 static int setup_intel_arch_watchdog(unsigned nmi_hz
)
554 union cpuid10_eax eax
;
556 unsigned int perfctr_msr
, evntsel_msr
;
557 unsigned int evntsel
;
558 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
561 * Check whether the Architectural PerfMon supports
562 * Unhalted Core Cycles Event or not.
563 * NOTE: Corresponding bit = 0 in ebx indicates event present.
565 cpuid(10, &(eax
.full
), &ebx
, &unused
, &unused
);
566 if ((eax
.split
.mask_length
< (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX
+1)) ||
567 (ebx
& ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT
))
570 perfctr_msr
= MSR_ARCH_PERFMON_PERFCTR1
;
571 evntsel_msr
= MSR_ARCH_PERFMON_EVENTSEL1
;
573 wrmsrl(perfctr_msr
, 0UL);
575 evntsel
= ARCH_PERFMON_EVENTSEL_INT
576 | ARCH_PERFMON_EVENTSEL_OS
577 | ARCH_PERFMON_EVENTSEL_USR
578 | ARCH_PERFMON_NMI_EVENT_SEL
579 | ARCH_PERFMON_NMI_EVENT_UMASK
;
581 /* setup the timer */
582 wrmsr(evntsel_msr
, evntsel
, 0);
583 nmi_hz
= adjust_for_32bit_ctr(nmi_hz
);
584 write_watchdog_counter32(perfctr_msr
, "INTEL_ARCH_PERFCTR0", nmi_hz
);
585 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
586 evntsel
|= ARCH_PERFMON_EVENTSEL0_ENABLE
;
587 wrmsr(evntsel_msr
, evntsel
, 0);
589 wd
->perfctr_msr
= perfctr_msr
;
590 wd
->evntsel_msr
= evntsel_msr
;
591 wd
->cccr_msr
= 0; //unused
592 wd_ops
->checkbit
= 1ULL << (eax
.split
.bit_width
- 1);
596 static struct wd_ops intel_arch_wd_ops
= {
597 .reserve
= single_msr_reserve
,
598 .unreserve
= single_msr_unreserve
,
599 .setup
= setup_intel_arch_watchdog
,
601 .stop
= single_msr_stop_watchdog
,
602 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
603 .evntsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
606 static void probe_nmi_watchdog(void)
608 switch (boot_cpu_data
.x86_vendor
) {
610 if (boot_cpu_data
.x86
!= 6 && boot_cpu_data
.x86
!= 15 &&
611 boot_cpu_data
.x86
!= 16)
615 case X86_VENDOR_INTEL
:
616 if (cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
617 wd_ops
= &intel_arch_wd_ops
;
620 switch (boot_cpu_data
.x86
) {
622 if (boot_cpu_data
.x86_model
> 0xd)
628 if (boot_cpu_data
.x86_model
> 0x4)
640 /* Interface to nmi.c */
642 int lapic_watchdog_init(unsigned nmi_hz
)
645 probe_nmi_watchdog();
649 if (!wd_ops
->reserve()) {
651 "NMI watchdog: cannot reserve perfctrs\n");
656 if (!(wd_ops
->setup(nmi_hz
))) {
657 printk(KERN_ERR
"Cannot setup NMI watchdog on CPU %d\n",
658 raw_smp_processor_id());
665 void lapic_watchdog_stop(void)
671 unsigned lapic_adjust_nmi_hz(unsigned hz
)
673 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
674 if (wd
->perfctr_msr
== MSR_P6_PERFCTR0
||
675 wd
->perfctr_msr
== MSR_ARCH_PERFMON_PERFCTR1
)
676 hz
= adjust_for_32bit_ctr(hz
);
680 int lapic_wd_event(unsigned nmi_hz
)
682 struct nmi_watchdog_ctlblk
*wd
= &__get_cpu_var(nmi_watchdog_ctlblk
);
684 rdmsrl(wd
->perfctr_msr
, ctr
);
685 if (ctr
& wd_ops
->checkbit
) { /* perfctr still running? */
688 wd_ops
->rearm(wd
, nmi_hz
);
692 int lapic_watchdog_ok(void)
694 return wd_ops
!= NULL
;