x86/mce: Include the PPIN in MCE records when available
authorTony Luck <tony.luck@intel.com>
Fri, 18 Nov 2016 17:48:36 +0000 (09:48 -0800)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 23 Nov 2016 15:51:52 +0000 (16:51 +0100)
Intel Xeons from Ivy Bridge onwards support a processor identification
number set in the factory. To the user this is a handy unique number to
identify a particular CPU. Intel can decode this to the fab/production
run to track errors. On systems that have it, include it in the machine
check record. I'm told that this would be helpful for users that run
large data centers with multi-socket servers to keep track of which CPUs
are seeing errors.

Boris:
* Add some clarifying comments and spacing.
* Mask out [63:2] in the disabled-but-not-locked case
* Call the MSR variable "val" for more readability.

Signed-off-by: Tony Luck <tony.luck@intel.com>
Cc: Ashok Raj <ashok.raj@intel.com>
Cc: linux-edac <linux-edac@vger.kernel.org>
Cc: x86-ml <x86@kernel.org>
Link: http://lkml.kernel.org/r/20161123114855.njguoaygp3qnbkia@pd.tnic
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/msr-index.h
arch/x86/include/uapi/asm/mce.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_intel.c

index a39629206864e5bb74aaddea15ca1ab762877042..d625b651e526605dad64ab369b13bbd450fcba3e 100644 (file)
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
 #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
 #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
index 78f3760ca1f2985cfcbb278d59fbe71f92f69ea7..710273c617b8d3d735d9aaacdc3349d599717204 100644 (file)
 #define EFER_FFXSR             (1<<_EFER_FFXSR)
 
 /* Intel MSRs. Some also available on other CPUs */
+
+#define MSR_PPIN_CTL                   0x0000004e
+#define MSR_PPIN                       0x0000004f
+
 #define MSR_IA32_PERFCTR0              0x000000c1
 #define MSR_IA32_PERFCTR1              0x000000c2
 #define MSR_FSB_FREQ                   0x000000cd
index 69a6e07e3149a5851757550c2a0c2780d955bd76..eb6247a7009b00d0900ef90f365ca16939c6e7ea 100644 (file)
@@ -28,6 +28,7 @@ struct mce {
        __u64 mcgcap;   /* MCGCAP MSR: machine check capabilities of CPU */
        __u64 synd;     /* MCA_SYND MSR: only valid on SMCA systems */
        __u64 ipid;     /* MCA_IPID MSR: only valid on SMCA systems */
+       __u64 ppin;     /* Protected Processor Inventory Number */
 };
 
 #define MCE_GET_RECORD_LEN   _IOR('M', 1, int)
index aab96f8d52b0cf3f66afe5ea13451c851b1eb111..a3cb27af4f9b0608b7ba191b0410ce688dc30a30 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/export.h>
 #include <linux/jump_label.h>
 
+#include <asm/intel-family.h>
 #include <asm/processor.h>
 #include <asm/traps.h>
 #include <asm/tlbflush.h>
@@ -135,6 +136,9 @@ void mce_setup(struct mce *m)
        m->socketid = cpu_data(m->extcpu).phys_proc_id;
        m->apicid = cpu_data(m->extcpu).initial_apicid;
        rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+       if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
+               rdmsrl(MSR_PPIN, m->ppin);
 }
 
 DEFINE_PER_CPU(struct mce, injectm);
index be0b2fad47c5c6d3adab0247d2acce649d1a74e7..190b3e6cef4d1e1af17c5c77a4ad94e63ed662e3 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/sched.h>
 #include <linux/cpumask.h>
 #include <asm/apic.h>
+#include <asm/cpufeature.h>
+#include <asm/intel-family.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/mce.h>
@@ -464,11 +466,46 @@ static void intel_clear_lmce(void)
        wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
 }
 
+static void intel_ppin_init(struct cpuinfo_x86 *c)
+{
+       unsigned long long val;
+
+       /*
+        * Even if testing the presence of the MSR would be enough, we don't
+        * want to risk the situation where other models reuse this MSR for
+        * other purposes.
+        */
+       switch (c->x86_model) {
+       case INTEL_FAM6_IVYBRIDGE_X:
+       case INTEL_FAM6_HASWELL_X:
+       case INTEL_FAM6_BROADWELL_XEON_D:
+       case INTEL_FAM6_BROADWELL_X:
+       case INTEL_FAM6_SKYLAKE_X:
+               if (rdmsrl_safe(MSR_PPIN_CTL, &val))
+                       return;
+
+               if ((val & 3UL) == 1UL) {
+                       /* PPIN available but disabled: */
+                       return;
+               }
+
+               /* If PPIN is disabled, but not locked, try to enable: */
+               if (!(val & 3UL)) {
+                       wrmsrl_safe(MSR_PPIN_CTL,  val | 2UL);
+                       rdmsrl_safe(MSR_PPIN_CTL, &val);
+               }
+
+               if ((val & 3UL) == 2UL)
+                       set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
+       }
+}
+
 void mce_intel_feature_init(struct cpuinfo_x86 *c)
 {
        intel_init_thermal(c);
        intel_init_cmci();
        intel_init_lmce();
+       intel_ppin_init(c);
 }
 
 void mce_intel_feature_clear(struct cpuinfo_x86 *c)