Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64,ia64,ppc,s390,sh,tile,um,x86,mm: remove default gate area

The core mm code will provide a default gate area based on
FIXADDR_USER_START and FIXADDR_USER_END if
!defined(__HAVE_ARCH_GATE_AREA) && defined(AT_SYSINFO_EHDR).

This default is only useful for ia64. arm64, ppc, s390, sh, tile, 64-bit
UML, and x86_32 have their own code just to disable it. arm, 32-bit UML,
and x86_64 have gate areas, but they have their own implementations.

This gets rid of the default and moves the code into ia64.

This should save some code on architectures without a gate area: it's now
possible to inline the gate_area functions in the default case.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Nathan Lynch <nathan_lynch@mentor.com>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [in principle]
Acked-by: Richard Weinberger <richard@nod.at> [for um]
Acked-by: Will Deacon <will.deacon@arm.com> [for arm64]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Nathan Lynch <Nathan_Lynch@mentor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Andy Lutomirski and committed by
Linus Torvalds
a6c19dfe e0d9bf4c

+53 -182
-3
arch/arm64/include/asm/page.h
··· 28 28 #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 29 29 #define PAGE_MASK (~(PAGE_SIZE-1)) 30 30 31 - /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ 32 - #define __HAVE_ARCH_GATE_AREA 1 33 - 34 31 /* 35 32 * The idmap and swapper page tables need some space reserved in the kernel 36 33 * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
-19
arch/arm64/kernel/vdso.c
··· 195 195 } 196 196 197 197 /* 198 - * We define AT_SYSINFO_EHDR, so we need these function stubs to keep 199 - * Linux happy. 200 - */ 201 - int in_gate_area_no_mm(unsigned long addr) 202 - { 203 - return 0; 204 - } 205 - 206 - int in_gate_area(struct mm_struct *mm, unsigned long addr) 207 - { 208 - return 0; 209 - } 210 - 211 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 212 - { 213 - return NULL; 214 - } 215 - 216 - /* 217 198 * Update the vDSO data page to keep in sync with kernel timekeeping. 218 199 */ 219 200 void update_vsyscall(struct timekeeper *tk)
+2
arch/ia64/include/asm/page.h
··· 231 231 #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) 232 232 #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE) 233 233 234 + #define __HAVE_ARCH_GATE_AREA 1 235 + 234 236 #endif /* _ASM_IA64_PAGE_H */
+31
arch/ia64/mm/init.c
··· 278 278 ia64_patch_gate(); 279 279 } 280 280 281 + static struct vm_area_struct gate_vma; 282 + 283 + static int __init gate_vma_init(void) 284 + { 285 + gate_vma.vm_mm = NULL; 286 + gate_vma.vm_start = FIXADDR_USER_START; 287 + gate_vma.vm_end = FIXADDR_USER_END; 288 + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 289 + gate_vma.vm_page_prot = __P101; 290 + 291 + return 0; 292 + } 293 + __initcall(gate_vma_init); 294 + 295 + struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 296 + { 297 + return &gate_vma; 298 + } 299 + 300 + int in_gate_area_no_mm(unsigned long addr) 301 + { 302 + if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 303 + return 1; 304 + return 0; 305 + } 306 + 307 + int in_gate_area(struct mm_struct *mm, unsigned long addr) 308 + { 309 + return in_gate_area_no_mm(addr); 310 + } 311 + 281 312 void ia64_mmu_init(void *my_cpu_data) 282 313 { 283 314 unsigned long pta, impl_va_bits;
-3
arch/powerpc/include/asm/page.h
··· 48 48 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1) 49 49 #endif 50 50 51 - /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ 52 - #define __HAVE_ARCH_GATE_AREA 1 53 - 54 51 /* 55 52 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we 56 53 * assign PAGE_MASK to a larger type it gets extended the way we want
-16
arch/powerpc/kernel/vdso.c
··· 840 840 return 0; 841 841 } 842 842 arch_initcall(vdso_init); 843 - 844 - int in_gate_area_no_mm(unsigned long addr) 845 - { 846 - return 0; 847 - } 848 - 849 - int in_gate_area(struct mm_struct *mm, unsigned long addr) 850 - { 851 - return 0; 852 - } 853 - 854 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 855 - { 856 - return NULL; 857 - } 858 -
-2
arch/s390/include/asm/page.h
··· 162 162 #include <asm-generic/memory_model.h> 163 163 #include <asm-generic/getorder.h> 164 164 165 - #define __HAVE_ARCH_GATE_AREA 1 166 - 167 165 #endif /* _S390_PAGE_H */
-15
arch/s390/kernel/vdso.c
··· 316 316 return 0; 317 317 } 318 318 early_initcall(vdso_init); 319 - 320 - int in_gate_area_no_mm(unsigned long addr) 321 - { 322 - return 0; 323 - } 324 - 325 - int in_gate_area(struct mm_struct *mm, unsigned long addr) 326 - { 327 - return 0; 328 - } 329 - 330 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 331 - { 332 - return NULL; 333 - }
-5
arch/sh/include/asm/page.h
··· 186 186 #include <asm-generic/memory_model.h> 187 187 #include <asm-generic/getorder.h> 188 188 189 - /* vDSO support */ 190 - #ifdef CONFIG_VSYSCALL 191 - #define __HAVE_ARCH_GATE_AREA 192 - #endif 193 - 194 189 /* 195 190 * Some drivers need to perform DMA into kmalloc'ed buffers 196 191 * and so we have to increase the kmalloc minalign for this.
-15
arch/sh/kernel/vsyscall/vsyscall.c
··· 92 92 93 93 return NULL; 94 94 } 95 - 96 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 97 - { 98 - return NULL; 99 - } 100 - 101 - int in_gate_area(struct mm_struct *mm, unsigned long address) 102 - { 103 - return 0; 104 - } 105 - 106 - int in_gate_area_no_mm(unsigned long address) 107 - { 108 - return 0; 109 - }
-6
arch/tile/include/asm/page.h
··· 39 39 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 40 40 41 41 /* 42 - * We do define AT_SYSINFO_EHDR to support vDSO, 43 - * but don't use the gate mechanism. 44 - */ 45 - #define __HAVE_ARCH_GATE_AREA 1 46 - 47 - /* 48 42 * If the Kconfig doesn't specify, set a maximum zone order that 49 43 * is enough so that we can create huge pages from small pages given 50 44 * the respective sizes of the two page types. See <linux/mmzone.h>.
-15
arch/tile/kernel/vdso.c
··· 121 121 return NULL; 122 122 } 123 123 124 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 125 - { 126 - return NULL; 127 - } 128 - 129 - int in_gate_area(struct mm_struct *mm, unsigned long address) 130 - { 131 - return 0; 132 - } 133 - 134 - int in_gate_area_no_mm(unsigned long address) 135 - { 136 - return 0; 137 - } 138 - 139 124 int setup_vdso_pages(void) 140 125 { 141 126 struct page **pagelist;
+5
arch/um/include/asm/page.h
··· 119 119 #include <asm-generic/getorder.h> 120 120 121 121 #endif /* __ASSEMBLY__ */ 122 + 123 + #ifdef CONFIG_X86_32 124 + #define __HAVE_ARCH_GATE_AREA 1 125 + #endif 126 + 122 127 #endif /* __UM_PAGE_H */
-1
arch/x86/include/asm/page.h
··· 70 70 #include <asm-generic/memory_model.h> 71 71 #include <asm-generic/getorder.h> 72 72 73 - #define __HAVE_ARCH_GATE_AREA 1 74 73 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 75 74 76 75 #endif /* __KERNEL__ */
+2
arch/x86/include/asm/page_64.h
··· 39 39 40 40 #endif /* !__ASSEMBLY__ */ 41 41 42 + #define __HAVE_ARCH_GATE_AREA 1 43 + 42 44 #endif /* _ASM_X86_PAGE_64_H */
-1
arch/x86/um/asm/elf.h
··· 216 216 #define ELF_HWCAP (elf_aux_hwcap) 217 217 218 218 #define SET_PERSONALITY(ex) do ; while(0) 219 - #define __HAVE_ARCH_GATE_AREA 1 220 219 221 220 #endif
-15
arch/x86/um/mem_64.c
··· 9 9 10 10 return NULL; 11 11 } 12 - 13 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 14 - { 15 - return NULL; 16 - } 17 - 18 - int in_gate_area(struct mm_struct *mm, unsigned long addr) 19 - { 20 - return 0; 21 - } 22 - 23 - int in_gate_area_no_mm(unsigned long addr) 24 - { 25 - return 0; 26 - }
+1 -18
arch/x86/vdso/vdso32-setup.c
··· 115 115 return 0; 116 116 } 117 117 __initcall(ia32_binfmt_init); 118 - #endif 119 - 120 - #else /* CONFIG_X86_32 */ 121 - 122 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 123 - { 124 - return NULL; 125 - } 126 - 127 - int in_gate_area(struct mm_struct *mm, unsigned long addr) 128 - { 129 - return 0; 130 - } 131 - 132 - int in_gate_area_no_mm(unsigned long addr) 133 - { 134 - return 0; 135 - } 118 + #endif /* CONFIG_SYSCTL */ 136 119 137 120 #endif /* CONFIG_X86_64 */
+12 -5
include/linux/mm.h
··· 2014 2014 #endif /* CONFIG_HIBERNATION */ 2015 2015 #endif 2016 2016 2017 + #ifdef __HAVE_ARCH_GATE_AREA 2017 2018 extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); 2018 - #ifdef __HAVE_ARCH_GATE_AREA 2019 - int in_gate_area_no_mm(unsigned long addr); 2020 - int in_gate_area(struct mm_struct *mm, unsigned long addr); 2019 + extern int in_gate_area_no_mm(unsigned long addr); 2020 + extern int in_gate_area(struct mm_struct *mm, unsigned long addr); 2021 2021 #else 2022 - int in_gate_area_no_mm(unsigned long addr); 2023 - #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);}) 2022 + static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 2023 + { 2024 + return NULL; 2025 + } 2026 + static inline int in_gate_area_no_mm(unsigned long addr) { return 0; } 2027 + static inline int in_gate_area(struct mm_struct *mm, unsigned long addr) 2028 + { 2029 + return 0; 2030 + } 2024 2031 #endif /* __HAVE_ARCH_GATE_AREA */ 2025 2032 2026 2033 #ifdef CONFIG_SYSCTL
-38
mm/memory.c
··· 3430 3430 } 3431 3431 #endif /* __PAGETABLE_PMD_FOLDED */ 3432 3432 3433 - #if !defined(__HAVE_ARCH_GATE_AREA) 3434 - 3435 - #if defined(AT_SYSINFO_EHDR) 3436 - static struct vm_area_struct gate_vma; 3437 - 3438 - static int __init gate_vma_init(void) 3439 - { 3440 - gate_vma.vm_mm = NULL; 3441 - gate_vma.vm_start = FIXADDR_USER_START; 3442 - gate_vma.vm_end = FIXADDR_USER_END; 3443 - gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 3444 - gate_vma.vm_page_prot = __P101; 3445 - 3446 - return 0; 3447 - } 3448 - __initcall(gate_vma_init); 3449 - #endif 3450 - 3451 - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 3452 - { 3453 - #ifdef AT_SYSINFO_EHDR 3454 - return &gate_vma; 3455 - #else 3456 - return NULL; 3457 - #endif 3458 - } 3459 - 3460 - int in_gate_area_no_mm(unsigned long addr) 3461 - { 3462 - #ifdef AT_SYSINFO_EHDR 3463 - if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 3464 - return 1; 3465 - #endif 3466 - return 0; 3467 - } 3468 - 3469 - #endif /* __HAVE_ARCH_GATE_AREA */ 3470 - 3471 3433 static int __follow_pte(struct mm_struct *mm, unsigned long address, 3472 3434 pte_t **ptepp, spinlock_t **ptlp) 3473 3435 {
-5
mm/nommu.c
··· 1981 1981 return -ENOMEM; 1982 1982 } 1983 1983 1984 - int in_gate_area_no_mm(unsigned long addr) 1985 - { 1986 - return 0; 1987 - } 1988 - 1989 1984 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1990 1985 { 1991 1986 BUG();