unsigned long bh_count; /* number of times bh was invoked */
unsigned long fp_rev;
unsigned long fp_model;
+ unsigned long cpu_num; /* CPU number from PAT firmware */
+ unsigned long cpu_loc; /* CPU location from PAT firmware */
unsigned int state;
struct parisc_device *dev;
unsigned long loops_per_jiffy;
unsigned long txn_addr;
unsigned long cpuid;
struct cpuinfo_parisc *p;
- struct pdc_pat_cpu_num cpu_info __maybe_unused;
+ struct pdc_pat_cpu_num cpu_info = { };
#ifdef CONFIG_SMP
if (num_online_cpus() >= nr_cpu_ids) {
*/
cpuid = boot_cpu_data.cpu_count;
txn_addr = dev->hpa.start; /* for legacy PDC */
+ cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
#ifdef CONFIG_64BIT
if (is_pdc_pat()) {
p->hpa = dev->hpa.start; /* save CPU hpa */
p->cpuid = cpuid; /* save CPU id */
p->txn_addr = txn_addr; /* save CPU IRQ address */
+ p->cpu_num = cpu_info.cpu_num;
+ p->cpu_loc = cpu_info.cpu_loc;
#ifdef CONFIG_SMP
/*
** FIXME: review if any other initialization is clobbered
static int __init init_cr16_clocksource(void)
{
/*
- * The cr16 interval timers are not syncronized across CPUs, so mark
- * them unstable and lower rating on SMP systems.
+ * The cr16 interval timers are not syncronized across CPUs on
+ * different sockets, so mark them unstable and lower rating on
+ * multi-socket SMP systems.
*/
if (num_online_cpus() > 1) {
- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
- clocksource_cr16.rating = 0;
+ int cpu;
+ unsigned long cpu0_loc;
+ cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
+
+ for_each_online_cpu(cpu) {
+ if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
+ continue;
+
+ clocksource_cr16.name = "cr16_unstable";
+ clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+ clocksource_cr16.rating = 0;
+ break;
+ }
}
+ /* XXX: We may want to mark sched_clock stable here if cr16 clocks are
+ * in sync:
+ * (clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */
+
/* register at clocksource framework */
clocksource_register_hz(&clocksource_cr16,
100 * PAGE0->mem_10msec);