x86-64: Fill unused parts of the vsyscall page with 0xcc
authorAndy Lutomirski <luto@MIT.EDU>
Sun, 5 Jun 2011 17:50:23 +0000 (13:50 -0400)
committerIngo Molnar <mingo@elte.hu>
Mon, 6 Jun 2011 07:43:14 +0000 (09:43 +0200)
Jumping to 0x00 might do something depending on the following
bytes. Jumping to 0xcc is a trap.  So fill the unused parts of
the vsyscall page with 0xcc to make it useless for exploits to
jump there.

Signed-off-by: Andy Lutomirski <luto@mit.edu>
Cc: Jesper Juhl <jj@chaosbits.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: richard -rw- weinberger <richard.weinberger@gmail.com>
Cc: Mikael Pettersson <mikpe@it.uu.se>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Louis Rilling <Louis.Rilling@kerlabs.com>
Cc: Valdis.Kletnieks@vt.edu
Cc: pageexec@freemail.hu
Link: http://lkml.kernel.org/r/ed54bfcfbe50a9070d20ec1edbe0d149e22a4568.1307292171.git.luto@mit.edu
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/vmlinux.lds.S

index 4f90082fd640d286f338b978208725c421e2ccb4..80174719910c9fcb43d6e2c0ad934861d3f6e76f 100644 (file)
@@ -166,22 +166,20 @@ SECTIONS
        __vsyscall_0 = .;
 
        . = VSYSCALL_ADDR;
-       .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
+       .vsyscall : AT(VLOAD(.vsyscall)) {
                *(.vsyscall_0)
-       } :user
 
-       . = ALIGN(L1_CACHE_BYTES);
-       .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
+               . = ALIGN(L1_CACHE_BYTES);
                *(.vsyscall_fn)
-       }
 
-       .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
+               . = 1024;
                *(.vsyscall_1)
-       }
-       .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2)) {
+
+               . = 2048;
                *(.vsyscall_2)
-       }
 
+               . = 4096;  /* Pad the whole page. */
+       } :user =0xcc
        . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE);
 
 #undef VSYSCALL_ADDR