struct smp_ops smp_ops = {
.smp_send_reschedule = native_smp_send_reschedule,
.smp_call_function_mask = native_smp_call_function_mask,
+ .cpu_up = native_cpu_up,
};
EXPORT_SYMBOL_GPL(smp_ops);
/*
* Entry point to boot a CPU.
*/
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit native_cpu_up(unsigned int cpu)
{
int apicid = cpu_present_to_apicid(cpu);
unsigned long flags;
#ifdef CONFIG_SMP
extern struct smp_ops smp_ops;
+static inline int __cpu_up(unsigned int cpu)
+{
+ return smp_ops.cpu_up(cpu);
+}
+
static inline void smp_send_reschedule(int cpu)
{
smp_ops.smp_send_reschedule(cpu);
{
return smp_ops.smp_call_function_mask(mask, func, info, wait);
}
+
+int native_cpu_up(unsigned int cpunum);
#endif
#ifdef CONFIG_X86_32
{
smp_ops.smp_prepare_cpus(max_cpus);
}
-static inline int __cpu_up(unsigned int cpu)
-{
- return smp_ops.cpu_up(cpu);
-}
static inline void smp_cpus_done(unsigned int max_cpus)
{
smp_ops.smp_cpus_done(max_cpus);
void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
-int native_cpu_up(unsigned int cpunum);
void native_smp_cpus_done(unsigned int max_cpus);
#ifndef CONFIG_PARAVIRT