}
display_cacheinfo(c);
+
+ if (cpuid_eax(0x80000000) >= 0x80000008) {
+ c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+ if (c->x86_num_cores & (c->x86_num_cores - 1))
+ c->x86_num_cores = 1;
+ }
+
detect_ht(c);
#ifdef CONFIG_X86_HT
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
smp_num_siblings = 1;
#endif
-
- if (cpuid_eax(0x80000000) >= 0x80000008) {
- c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
- if (c->x86_num_cores & (c->x86_num_cores - 1))
- c->x86_num_cores = 1;
- }
}
static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
void __init detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
- int index_lsb, index_msb, tmp;
+ int index_msb, tmp;
int cpu = smp_processor_id();
if (!cpu_has(c, X86_FEATURE_HT))
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1 ) {
- index_lsb = 0;
index_msb = 31;
if (smp_num_siblings > NR_CPUS) {
return;
}
tmp = smp_num_siblings;
- while ((tmp & 1) == 0) {
- tmp >>=1 ;
- index_lsb++;
- }
- tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
- if (index_lsb != index_msb )
+ if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
+
+ smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+
+ tmp = smp_num_siblings;
+ index_msb = 31;
+ while ((tmp & 0x80000000) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+
+ if (smp_num_siblings & (smp_num_siblings - 1))
+ index_msb++;
+
+ cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+ if (c->x86_num_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ cpu_core_id[cpu]);
}
}
#endif
}
+/*
+ * find out the number of processor cores on the die
+ */
+static int __init num_cpu_cores(struct cpuinfo_x86 *c)
+{
+ unsigned int eax;
+
+ if (c->cpuid_level < 4)
+ return 1;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (4), "c" (0)
+ : "bx", "dx");
+
+ if (eax & 0x1f)
+ return ((eax >> 26) + 1);
+ else
+ return 1;
+}
+
static void __init init_intel(struct cpuinfo_x86 *c)
{
unsigned int l2 = 0;
if ( p )
strcpy(c->x86_model_id, p);
+ c->x86_num_cores = num_cpu_cores(c);
+
detect_ht(c);
/* Work around errata */
seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n",
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
+
+#ifdef CONFIG_SMP
+ /* Put new fields at the end to lower the probability of
+ breaking user space parsers. */
+ seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
+ seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
+#endif
+
return 0;
}
int smp_num_siblings = 1;
int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
EXPORT_SYMBOL(phys_proc_id);
+int cpu_core_id[NR_CPUS]; /* Core ID of each logical CPU */
+EXPORT_SYMBOL(cpu_core_id);
/* bitmap of online cpus */
cpumask_t cpu_online_map;
void *xquad_portio;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
static void __init smp_boot_cpus(unsigned int max_cpus)
{
cpus_clear(cpu_sibling_map[0]);
cpu_set(0, cpu_sibling_map[0]);
+ cpus_clear(cpu_core_map[0]);
+ cpu_set(0, cpu_core_map[0]);
+
/*
* If we couldn't find an SMP configuration at boot time,
* get out of here now!
printk(KERN_NOTICE "Local APIC not detected."
" Using dummy APIC emulation.\n");
map_cpu_to_logical_apicid();
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
return;
}
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
return;
}
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
smpboot_clear_io_apic_irqs();
phys_cpu_present_map = physid_mask_of_physid(0);
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
return;
}
* construct cpu_sibling_map[], so that we can tell sibling CPUs
* efficiently.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
+ }
for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct cpuinfo_x86 *c = cpu_data + cpu;
int siblings = 0;
int i;
if (!cpu_isset(cpu, cpu_callout_map))
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (cpu_core_id[cpu] == cpu_core_id[i]) {
siblings++;
cpu_set(i, cpu_sibling_map[cpu]);
}
if (siblings != smp_num_siblings)
printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
+
+ if (c->x86_num_cores > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ cpu_set(i, cpu_core_map[cpu]);
+ }
+ }
+ } else {
+ cpu_core_map[cpu] = cpu_sibling_map[cpu];
+ }
}
if (nmi_watchdog == NMI_LOCAL_APIC)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
- int index_lsb, index_msb, tmp;
+ int index_msb, tmp;
int cpu = smp_processor_id();
if (!cpu_has(c, X86_FEATURE_HT))
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) {
- index_lsb = 0;
index_msb = 31;
/*
* At this point we only support two siblings per
return;
}
tmp = smp_num_siblings;
- while ((tmp & 1) == 0) {
- tmp >>=1 ;
- index_lsb++;
- }
- tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
- if (index_lsb != index_msb )
+ if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
phys_proc_id[cpu] = phys_pkg_id(index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
+
+ smp_num_siblings = smp_num_siblings / c->x86_num_cores;
+
+ tmp = smp_num_siblings;
+ index_msb = 31;
+ while ((tmp & 0x80000000) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+ if (smp_num_siblings & (smp_num_siblings - 1))
+ index_msb++;
+
+ cpu_core_id[cpu] = phys_pkg_id(index_msb);
+
+ if (c->x86_num_cores > 1)
+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ cpu_core_id[cpu]);
}
#endif
}
smp_num_siblings = 1;
#endif
}
-
+
+/*
+ * find out the number of processor cores on the die
+ */
+static int __init intel_num_cpu_cores(struct cpuinfo_x86 *c)
+{
+ unsigned int eax;
+
+ if (c->cpuid_level < 4)
+ return 1;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (4), "c" (0)
+ : "bx", "dx");
+
+ if (eax & 0x1f)
+ return ((eax >> 26) + 1);
+ else
+ return 1;
+}
+
static void __init init_intel(struct cpuinfo_x86 *c)
{
/* Cache sizes */
c->x86_cache_alignment = c->x86_clflush_size * 2;
if (c->x86 >= 15)
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+ c->x86_num_cores = intel_num_cpu_cores(c);
}
void __init get_cpu_vendor(struct cpuinfo_x86 *c)
seq_printf(m, " [%d]", i);
}
}
- seq_printf(m, "\n");
- if (c->x86_num_cores > 1)
- seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
-
- seq_printf(m, "\n\n");
+ seq_printf(m, "\n");
+#ifdef CONFIG_SMP
+ /* Put new fields at the end to lower the probability of
+ breaking user space parsers. */
+ seq_printf(m, "core id\t\t: %d\n", cpu_core_id[c - cpu_data]);
+ seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
+#endif
+ seq_printf(m, "\n");
return 0;
}
int smp_num_siblings = 1;
/* Package ID of each logical CPU */
u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+/* Core ID of each logical CPU */
+u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
EXPORT_SYMBOL(phys_proc_id);
+EXPORT_SYMBOL(cpu_core_id);
/* Bitmask of currently online CPUs */
cpumask_t cpu_online_map;
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
+cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
/*
* Trampoline 80x86 program as an array.
io_apic_irqs = 0;
cpu_online_map = cpumask_of_cpu(0);
cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
phys_cpu_present_map = physid_mask_of_physid(0);
if (APIC_init_uniprocessor())
printk(KERN_NOTICE "Local APIC not detected."
io_apic_irqs = 0;
cpu_online_map = cpumask_of_cpu(0);
cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
phys_cpu_present_map = physid_mask_of_physid(0);
disable_apic = 1;
goto smp_done;
io_apic_irqs = 0;
cpu_online_map = cpumask_of_cpu(0);
cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
phys_cpu_present_map = physid_mask_of_physid(0);
disable_apic = 1;
goto smp_done;
* Construct cpu_sibling_map[], so that we can tell the
* sibling CPU efficiently.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
cpus_clear(cpu_sibling_map[cpu]);
+ cpus_clear(cpu_core_map[cpu]);
+ }
for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct cpuinfo_x86 *c = cpu_data + cpu;
int siblings = 0;
int i;
if (!cpu_isset(cpu, cpu_callout_map))
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (phys_proc_id[cpu] == cpu_core_id[i]) {
siblings++;
cpu_set(i, cpu_sibling_map[cpu]);
}
siblings, cpu, smp_num_siblings);
smp_num_siblings = siblings;
}
+ if (c->x86_num_cores > 1) {
+ for (i = 0; i < NR_CPUS; i++) {
+ if (!cpu_isset(i, cpu_callout_map))
+ continue;
+ if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ cpu_set(i, cpu_core_map[cpu]);
+ }
+ }
+ } else
+ cpu_core_map[cpu] = cpu_sibling_map[cpu];
}
Dprintk("Boot done.\n");
#endif
extern int phys_proc_id[NR_CPUS];
+extern int cpu_core_id[NR_CPUS];
extern char ignore_fpu_irq;
extern void identify_cpu(struct cpuinfo_x86 *);
extern int pic_mode;
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
+extern cpumask_t cpu_core_map[];
extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void zap_low_mappings(void);
void smp_stop_cpu(void);
extern cpumask_t cpu_sibling_map[NR_CPUS];
+extern cpumask_t cpu_core_map[NR_CPUS];
extern u8 phys_proc_id[NR_CPUS];
+extern u8 cpu_core_id[NR_CPUS];
#define SMP_TRAMPOLINE_BASE 0x6000