#endif
};
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
/*
* The "local" data variable. It refers to the per-CPU data of the currently executing
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
-#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
+#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
extern void print_cpu_info (struct cpuinfo_ia64 *);
* intermediate precision so that we can produce a full 64-bit result.
*/
GLOBAL_ENTRY(ia64_native_sched_clock)
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;;
ldf8 f8=[r8]
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
;;
ldf8 f8=[r8]
;;
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__cpu_info);
+EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif
ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
// purge all TC entries
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
EXPORT_SYMBOL(__per_cpu_offset);
#endif
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() through the canonical per-CPU address.
*/
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
+ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
/* Set the node_data pointer for each per-cpu struct */
for_each_possible_early_cpu(cpu) {
node = node_cpuid[cpu].nid;
- per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+ per_cpu(ia64_cpu_info, cpu).node_data =
+ mem_data[node].node_data;
}
#else
{
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__cpu_info - __per_cpu_start));
+ ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
- 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2,
stat->shub_ipi_flushes,
- 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
return 0;
}