};
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
-int mce_notify_user(void);
+int mce_notify_irq(void);
DECLARE_PER_CPU(struct mce, injectm);
extern struct file_operations mce_chrdev_ops;
memset(&b, 0xff, sizeof(mce_banks_t));
printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
machine_check_poll(0, &b);
- mce_notify_user();
+ mce_notify_irq();
printk(KERN_INFO "Finished machine check poll on CPU %d\n",
cpu);
}
ack_APIC_irq();
exit_idle();
irq_enter();
- mce_notify_user();
+ mce_notify_irq();
irq_exit();
}
#endif
static void mce_report_event(struct pt_regs *regs)
{
if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
- mce_notify_user();
+ mce_notify_irq();
return;
}
* polling interval, otherwise increase the polling interval.
*/
n = &__get_cpu_var(next_interval);
- if (mce_notify_user())
+ if (mce_notify_irq())
*n = max(*n/2, HZ/100);
else
*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
* Can be called from interrupt context, but not from machine check/NMI
* context.
*/
-int mce_notify_user(void)
+int mce_notify_irq(void)
{
/* Not more than two messages every minute */
static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
}
return 0;
}
-EXPORT_SYMBOL_GPL(mce_notify_user);
+EXPORT_SYMBOL_GPL(mce_notify_irq);
/*
* Initialize Machine Checks for a CPU.
static void intel_threshold_interrupt(void)
{
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
- mce_notify_user();
+ mce_notify_irq();
}
static void print_update(char *type, int *hdr, int num)
#ifdef CONFIG_X86_NEW_MCE
/* notify userspace of pending MCEs */
if (thread_info_flags & _TIF_MCE_NOTIFY)
- mce_notify_user();
+ mce_notify_irq();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
/* deal with pending signal delivery */