Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86-64: Give vvars their own page

Move vvars out of the vsyscall page into their own page and mark
it NX.

Without this patch, an attacker who can force a daemon to call
some fixed address could wait until the time contains, say,
0xCD80, and then execute the current time.

Signed-off-by: Andy Lutomirski <luto@mit.edu>
Cc: Jesper Juhl <jj@chaosbits.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Jan Beulich <JBeulich@novell.com>
Cc: richard -rw- weinberger <richard.weinberger@gmail.com>
Cc: Mikael Pettersson <mikpe@it.uu.se>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Louis Rilling <Louis.Rilling@kerlabs.com>
Cc: Valdis.Kletnieks@vt.edu
Cc: pageexec@freemail.hu
Link: http://lkml.kernel.org/r/b1460f81dc4463d66ea3f2b5ce240f58d48effec.1307292171.git.luto@mit.edu
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Andy Lutomirski and committed by
Ingo Molnar
9fd67b4e 8b4777a4

+35 -23
+1
arch/x86/include/asm/fixmap.h
··· 78 78 VSYSCALL_LAST_PAGE, 79 79 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE 80 80 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, 81 + VVAR_PAGE, 81 82 VSYSCALL_HPET, 82 83 #endif 83 84 FIX_DBGP_BASE,
+2
arch/x86/include/asm/pgtable_types.h
··· 108 108 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) 109 109 #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) 110 110 #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) 111 + #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) 111 112 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 112 113 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) 113 114 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) ··· 131 130 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) 132 131 #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) 133 132 #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) 133 + #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR) 134 134 135 135 #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) 136 136 #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+10 -12
arch/x86/include/asm/vvar.h
··· 10 10 * In normal kernel code, they are used like any other variable. 11 11 * In user code, they are accessed through the VVAR macro. 12 12 * 13 - * Each of these variables lives in the vsyscall page, and each 14 - * one needs a unique offset within the little piece of the page 15 - * reserved for vvars. Specify that offset in DECLARE_VVAR. 16 - * (There are 896 bytes available. If you mess up, the linker will 17 - * catch it.) 13 + * These variables live in a page of kernel data that has an extra RO 14 + * mapping for userspace. Each variable needs a unique offset within 15 + * that page; specify that offset with the DECLARE_VVAR macro. (If 16 + * you mess up, the linker will catch it.) 18 17 */ 19 18 20 - /* Offset of vars within vsyscall page */ 21 - #define VSYSCALL_VARS_OFFSET (3072 + 128) 19 + /* Base address of vvars. This is not ABI. */ 20 + #define VVAR_ADDRESS (-10*1024*1024 - 4096) 22 21 23 22 #if defined(__VVAR_KERNEL_LDS) 24 23 ··· 25 26 * right place. 26 27 */ 27 28 #define DECLARE_VVAR(offset, type, name) \ 28 - EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset) 29 + EMIT_VVAR(name, offset) 29 30 30 31 #else 31 32 32 33 #define DECLARE_VVAR(offset, type, name) \ 33 34 static type const * const vvaraddr_ ## name = \ 34 - (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset)); 35 + (void *)(VVAR_ADDRESS + (offset)); 35 36 36 37 #define DEFINE_VVAR(type, name) \ 37 - type __vvar_ ## name \ 38 - __attribute__((section(".vsyscall_var_" #name), aligned(16))) 38 + type name \ 39 + __attribute__((section(".vvar_" #name), aligned(16))) 39 40 40 41 #define VVAR(name) (*vvaraddr_ ## name) 41 42 ··· 48 49 DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) 49 50 50 51 #undef DECLARE_VVAR 51 - #undef VSYSCALL_VARS_OFFSET
+17 -11
arch/x86/kernel/vmlinux.lds.S
··· 161 161 162 162 #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) 163 163 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) 164 - #define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \ 165 - ADDR(.vsyscall_0) + offset \ 166 - : AT(VLOAD(.vsyscall_var_ ## x)) { \ 167 - *(.vsyscall_var_ ## x) \ 168 - } \ 169 - x = VVIRT(.vsyscall_var_ ## x); 170 164 171 165 . = ALIGN(4096); 172 166 __vsyscall_0 = .; ··· 186 192 *(.vsyscall_3) 187 193 } 188 194 189 - #define __VVAR_KERNEL_LDS 190 - #include <asm/vvar.h> 191 - #undef __VVAR_KERNEL_LDS 192 - 193 - . = __vsyscall_0 + PAGE_SIZE; 195 + . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE); 194 196 195 197 #undef VSYSCALL_ADDR 196 198 #undef VLOAD_OFFSET 197 199 #undef VLOAD 198 200 #undef VVIRT_OFFSET 199 201 #undef VVIRT 202 + 203 + __vvar_page = .; 204 + 205 + .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { 206 + 207 + /* Place all vvars at the offsets in asm/vvar.h. */ 208 + #define EMIT_VVAR(name, offset) \ 209 + . = offset; \ 210 + *(.vvar_ ## name) 211 + #define __VVAR_KERNEL_LDS 212 + #include <asm/vvar.h> 213 + #undef __VVAR_KERNEL_LDS 200 214 #undef EMIT_VVAR 215 + 216 + } :data 217 + 218 + . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); 201 219 202 220 #endif /* CONFIG_X86_64 */ 203 221
+5
arch/x86/kernel/vsyscall_64.c
··· 284 284 { 285 285 extern char __vsyscall_0; 286 286 unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); 287 + extern char __vvar_page; 288 + unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); 287 289 288 290 /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ 289 291 __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); 292 + __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); 293 + BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != 294 + (unsigned long)VVAR_ADDRESS); 290 295 } 291 296 292 297 static int __init vsyscall_init(void)