x86: standalone trampoline code
authorIngo Molnar <mingo@elte.hu>
Fri, 21 Mar 2008 14:23:19 +0000 (15:23 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 17 Apr 2008 15:41:37 +0000 (17:41 +0200)
move the trampoline setup code out of smpboot.c - UP kernels can have
suspend support too.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/Makefile
arch/x86/kernel/smpboot.c
arch/x86/kernel/trampoline.c [new file with mode: 0644]

index fdd8395e0ed3d15975e2ff7f129a9f4a0d93741a..530ed6a4a031824158b34ee1d217b7924be85c74 100644 (file)
@@ -28,6 +28,7 @@ obj-y                 += alternative.o i8253.o
 obj-$(CONFIG_X86_64)   += pci-nommu_64.o bugs_64.o
 obj-y                  += tsc_$(BITS).o io_delay.o rtc.o
 
+obj-$(CONFIG_X86_TRAMPOLINE)   += trampoline.o
 obj-y                          += i387.o
 obj-y                          += ptrace.o
 obj-y                          += ds.o
index 424600e671bd100b42018cce819833e1f4aa1b05..e6abe8a49b1fa0b63cccf4b4e1ebfb03c6de8fde 100644 (file)
@@ -140,9 +140,6 @@ static atomic_t init_deasserted;
 
 static int boot_cpu_logical_apicid;
 
-/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
-unsigned char *trampoline_base = __va(TRAMPOLINE_BASE);
-
 /* representing cpus for which sibling maps can be computed */
 static cpumask_t cpu_sibling_setup_map;
 
@@ -550,18 +547,6 @@ cpumask_t cpu_coregroup_map(int cpu)
                return c->llc_shared_map;
 }
 
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-unsigned long setup_trampoline(void)
-{
-       memcpy(trampoline_base, trampoline_data,
-              trampoline_end - trampoline_data);
-       return virt_to_phys(trampoline_base);
-}
-
 #ifdef CONFIG_X86_32
 /*
  * We are called very early to get the low memory for the
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
new file mode 100644 (file)
index 0000000..abbf199
--- /dev/null
@@ -0,0 +1,18 @@
+#include <linux/io.h>
+
+#include <asm/trampoline.h>
+
+/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
+unsigned char *trampoline_base = __va(TRAMPOLINE_BASE);
+
+/*
+ * Currently trivial. Write the real->protected mode
+ * bootstrap into the page concerned. The caller
+ * has made sure it's suitably aligned.
+ */
+unsigned long setup_trampoline(void)
+{
+       memcpy(trampoline_base, trampoline_data,
+              trampoline_end - trampoline_data);
+       return virt_to_phys(trampoline_base);
+}