2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
4 * Copyright (C) 2008, 2009 Intel Corporation
8 #include <linux/init.h>
9 #include <linux/interrupt.h>
10 #include <linux/percpu.h>
11 #include <asm/processor.h>
15 #include <asm/hw_irq.h>
17 #include <asm/therm_throt.h>
21 asmlinkage
void smp_thermal_interrupt(void)
30 rdmsrl(MSR_IA32_THERM_STATUS
, msr_val
);
31 if (therm_throt_process(msr_val
& THERM_STATUS_PROCHOT
))
32 mce_log_therm_throt_event(msr_val
);
34 inc_irq_stat(irq_thermal_count
);
39 * Support for Intel Correct Machine Check Interrupts. This allows
40 * the CPU to raise an interrupt when a corrected machine check happened.
41 * Normally we pick those up using a regular polling timer.
42 * Also supports reliable discovery of shared banks.
45 static DEFINE_PER_CPU(mce_banks_t
, mce_banks_owned
);
48 * cmci_discover_lock protects against parallel discovery attempts
49 * which could race against each other.
51 static DEFINE_SPINLOCK(cmci_discover_lock
);
53 #define CMCI_THRESHOLD 1
55 static int cmci_supported(int *banks
)
59 if (mce_cmci_disabled
|| mce_ignore_ce
)
63 * Vendor check is not strictly needed, but the initial
64 * initialization is vendor keyed and this
65 * makes sure none of the backdoors are entered otherwise.
67 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
)
69 if (!cpu_has_apic
|| lapic_get_maxlvt() < 6)
71 rdmsrl(MSR_IA32_MCG_CAP
, cap
);
72 *banks
= min_t(unsigned, MAX_NR_BANKS
, cap
& 0xff);
73 return !!(cap
& MCG_CMCI_P
);
77 * The interrupt handler. This is called on every event.
78 * Just call the poller directly to log any events.
79 * This could in theory increase the threshold under high load,
80 * but doesn't for now.
82 static void intel_threshold_interrupt(void)
84 machine_check_poll(MCP_TIMESTAMP
, &__get_cpu_var(mce_banks_owned
));
88 static void print_update(char *type
, int *hdr
, int num
)
91 printk(KERN_INFO
"CPU %d MCA banks", smp_processor_id());
93 printk(KERN_CONT
" %s:%d", type
, num
);
97 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
98 * on this CPU. Use the algorithm recommended in the SDM to discover shared
101 static void cmci_discover(int banks
, int boot
)
103 unsigned long *owned
= (void *)&__get_cpu_var(mce_banks_owned
);
108 spin_lock_irqsave(&cmci_discover_lock
, flags
);
109 for (i
= 0; i
< banks
; i
++) {
112 if (test_bit(i
, owned
))
115 rdmsrl(MSR_IA32_MC0_CTL2
+ i
, val
);
117 /* Already owned by someone else? */
119 if (test_and_clear_bit(i
, owned
) || boot
)
120 print_update("SHD", &hdr
, i
);
121 __clear_bit(i
, __get_cpu_var(mce_poll_banks
));
125 val
|= CMCI_EN
| CMCI_THRESHOLD
;
126 wrmsrl(MSR_IA32_MC0_CTL2
+ i
, val
);
127 rdmsrl(MSR_IA32_MC0_CTL2
+ i
, val
);
129 /* Did the enable bit stick? -- the bank supports CMCI */
131 if (!test_and_set_bit(i
, owned
) || boot
)
132 print_update("CMCI", &hdr
, i
);
133 __clear_bit(i
, __get_cpu_var(mce_poll_banks
));
135 WARN_ON(!test_bit(i
, __get_cpu_var(mce_poll_banks
)));
138 spin_unlock_irqrestore(&cmci_discover_lock
, flags
);
140 printk(KERN_CONT
"\n");
144 * Just in case we missed an event during initialization check
145 * all the CMCI owned banks.
147 void cmci_recheck(void)
152 if (!mce_available(¤t_cpu_data
) || !cmci_supported(&banks
))
154 local_irq_save(flags
);
155 machine_check_poll(MCP_TIMESTAMP
, &__get_cpu_var(mce_banks_owned
));
156 local_irq_restore(flags
);
160 * Disable CMCI on this CPU for all banks it owns when it goes down.
161 * This allows other CPUs to claim the banks on rediscovery.
163 void cmci_clear(void)
170 if (!cmci_supported(&banks
))
172 spin_lock_irqsave(&cmci_discover_lock
, flags
);
173 for (i
= 0; i
< banks
; i
++) {
174 if (!test_bit(i
, __get_cpu_var(mce_banks_owned
)))
177 rdmsrl(MSR_IA32_MC0_CTL2
+ i
, val
);
178 val
&= ~(CMCI_EN
|CMCI_THRESHOLD_MASK
);
179 wrmsrl(MSR_IA32_MC0_CTL2
+ i
, val
);
180 __clear_bit(i
, __get_cpu_var(mce_banks_owned
));
182 spin_unlock_irqrestore(&cmci_discover_lock
, flags
);
186 * After a CPU went down cycle through all the others and rediscover
187 * Must run in process context.
189 void cmci_rediscover(int dying
)
195 if (!cmci_supported(&banks
))
197 if (!alloc_cpumask_var(&old
, GFP_KERNEL
))
199 cpumask_copy(old
, ¤t
->cpus_allowed
);
201 for_each_online_cpu(cpu
) {
204 if (set_cpus_allowed_ptr(current
, cpumask_of(cpu
)))
206 /* Recheck banks in case CPUs don't all have the same */
207 if (cmci_supported(&banks
))
208 cmci_discover(banks
, 0);
211 set_cpus_allowed_ptr(current
, old
);
212 free_cpumask_var(old
);
216 * Reenable CMCI on this CPU in case a CPU down failed.
218 void cmci_reenable(void)
221 if (cmci_supported(&banks
))
222 cmci_discover(banks
, 0);
225 static void intel_init_cmci(void)
229 if (!cmci_supported(&banks
))
232 mce_threshold_vector
= intel_threshold_interrupt
;
233 cmci_discover(banks
, 1);
235 * For CPU #0 this runs with still disabled APIC, but that's
236 * ok because only the vector is set up. We still do another
237 * check for the banks later for CPU #0 just to make sure
238 * to not miss any events.
240 apic_write(APIC_LVTCMCI
, THRESHOLD_APIC_VECTOR
|APIC_DM_FIXED
);
244 void mce_intel_feature_init(struct cpuinfo_x86
*c
)
246 intel_init_thermal(c
);