Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: remove free_area_cache

Since all architectures have been converted to use vm_unmapped_area(),
there is no remaining use for the free_area_cache.

Signed-off-by: Michel Lespinasse <walken@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Michel Lespinasse and committed by
Linus Torvalds
98d1e64f 61b0d760

-66
-2
arch/arm/mm/mmap.c
··· 181 181 if (mmap_is_legacy()) { 182 182 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 183 183 mm->get_unmapped_area = arch_get_unmapped_area; 184 - mm->unmap_area = arch_unmap_area; 185 184 } else { 186 185 mm->mmap_base = mmap_base(random_factor); 187 186 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 188 - mm->unmap_area = arch_unmap_area_topdown; 189 187 } 190 188 } 191 189
-2
arch/arm64/mm/mmap.c
··· 90 90 if (mmap_is_legacy()) { 91 91 mm->mmap_base = TASK_UNMAPPED_BASE; 92 92 mm->get_unmapped_area = arch_get_unmapped_area; 93 - mm->unmap_area = arch_unmap_area; 94 93 } else { 95 94 mm->mmap_base = mmap_base(); 96 95 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 97 - mm->unmap_area = arch_unmap_area_topdown; 98 96 } 99 97 } 100 98 EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
-2
arch/mips/mm/mmap.c
··· 158 158 if (mmap_is_legacy()) { 159 159 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 160 160 mm->get_unmapped_area = arch_get_unmapped_area; 161 - mm->unmap_area = arch_unmap_area; 162 161 } else { 163 162 mm->mmap_base = mmap_base(random_factor); 164 163 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 165 - mm->unmap_area = arch_unmap_area_topdown; 166 164 } 167 165 } 168 166
-2
arch/powerpc/mm/mmap.c
··· 92 92 if (mmap_is_legacy()) { 93 93 mm->mmap_base = TASK_UNMAPPED_BASE; 94 94 mm->get_unmapped_area = arch_get_unmapped_area; 95 - mm->unmap_area = arch_unmap_area; 96 95 } else { 97 96 mm->mmap_base = mmap_base(); 98 97 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 99 - mm->unmap_area = arch_unmap_area_topdown; 100 98 } 101 99 }
-4
arch/s390/mm/mmap.c
··· 91 91 if (mmap_is_legacy()) { 92 92 mm->mmap_base = TASK_UNMAPPED_BASE; 93 93 mm->get_unmapped_area = arch_get_unmapped_area; 94 - mm->unmap_area = arch_unmap_area; 95 94 } else { 96 95 mm->mmap_base = mmap_base(); 97 96 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 98 - mm->unmap_area = arch_unmap_area_topdown; 99 97 } 100 98 } 101 99 ··· 174 176 if (mmap_is_legacy()) { 175 177 mm->mmap_base = TASK_UNMAPPED_BASE; 176 178 mm->get_unmapped_area = s390_get_unmapped_area; 177 - mm->unmap_area = arch_unmap_area; 178 179 } else { 179 180 mm->mmap_base = mmap_base(); 180 181 mm->get_unmapped_area = s390_get_unmapped_area_topdown; 181 - mm->unmap_area = arch_unmap_area_topdown; 182 182 } 183 183 } 184 184
-2
arch/sparc/kernel/sys_sparc_64.c
··· 290 290 sysctl_legacy_va_layout) { 291 291 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 292 292 mm->get_unmapped_area = arch_get_unmapped_area; 293 - mm->unmap_area = arch_unmap_area; 294 293 } else { 295 294 /* We know it's 32-bit */ 296 295 unsigned long task_size = STACK_TOP32; ··· 301 302 302 303 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); 303 304 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 304 - mm->unmap_area = arch_unmap_area_topdown; 305 305 } 306 306 } 307 307
-2
arch/tile/mm/mmap.c
··· 66 66 if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { 67 67 mm->mmap_base = TASK_UNMAPPED_BASE; 68 68 mm->get_unmapped_area = arch_get_unmapped_area; 69 - mm->unmap_area = arch_unmap_area; 70 69 } else { 71 70 mm->mmap_base = mmap_base(mm); 72 71 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 73 - mm->unmap_area = arch_unmap_area_topdown; 74 72 } 75 73 }
-2
arch/x86/ia32/ia32_aout.c
··· 308 308 (current->mm->start_data = N_DATADDR(ex)); 309 309 current->mm->brk = ex.a_bss + 310 310 (current->mm->start_brk = N_BSSADDR(ex)); 311 - current->mm->free_area_cache = TASK_UNMAPPED_BASE; 312 - current->mm->cached_hole_size = 0; 313 311 314 312 retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); 315 313 if (retval < 0) {
-2
arch/x86/mm/mmap.c
··· 115 115 if (mmap_is_legacy()) { 116 116 mm->mmap_base = mmap_legacy_base(); 117 117 mm->get_unmapped_area = arch_get_unmapped_area; 118 - mm->unmap_area = arch_unmap_area; 119 118 } else { 120 119 mm->mmap_base = mmap_base(); 121 120 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 122 - mm->unmap_area = arch_unmap_area_topdown; 123 121 } 124 122 }
-2
fs/binfmt_aout.c
··· 255 255 (current->mm->start_data = N_DATADDR(ex)); 256 256 current->mm->brk = ex.a_bss + 257 257 (current->mm->start_brk = N_BSSADDR(ex)); 258 - current->mm->free_area_cache = current->mm->mmap_base; 259 - current->mm->cached_hole_size = 0; 260 258 261 259 retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); 262 260 if (retval < 0) {
-2
fs/binfmt_elf.c
··· 738 738 739 739 /* Do this so that we can load the interpreter, if need be. We will 740 740 change some of these later */ 741 - current->mm->free_area_cache = current->mm->mmap_base; 742 - current->mm->cached_hole_size = 0; 743 741 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), 744 742 executable_stack); 745 743 if (retval < 0) {
-3
include/linux/mm_types.h
··· 330 330 unsigned long (*get_unmapped_area) (struct file *filp, 331 331 unsigned long addr, unsigned long len, 332 332 unsigned long pgoff, unsigned long flags); 333 - void (*unmap_area) (struct mm_struct *mm, unsigned long addr); 334 333 #endif 335 334 unsigned long mmap_base; /* base of mmap area */ 336 335 unsigned long task_size; /* size of task vm space */ 337 - unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ 338 - unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ 339 336 unsigned long highest_vm_end; /* highest vma end address */ 340 337 pgd_t * pgd; 341 338 atomic_t mm_users; /* How many users with user space? */
-2
include/linux/sched.h
··· 322 322 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 323 323 unsigned long len, unsigned long pgoff, 324 324 unsigned long flags); 325 - extern void arch_unmap_area(struct mm_struct *, unsigned long); 326 - extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 327 325 #else 328 326 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 329 327 #endif
-4
kernel/fork.c
··· 365 365 mm->locked_vm = 0; 366 366 mm->mmap = NULL; 367 367 mm->mmap_cache = NULL; 368 - mm->free_area_cache = oldmm->mmap_base; 369 - mm->cached_hole_size = ~0UL; 370 368 mm->map_count = 0; 371 369 cpumask_clear(mm_cpumask(mm)); 372 370 mm->mm_rb = RB_ROOT; ··· 538 540 mm->nr_ptes = 0; 539 541 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 540 542 spin_lock_init(&mm->page_table_lock); 541 - mm->free_area_cache = TASK_UNMAPPED_BASE; 542 - mm->cached_hole_size = ~0UL; 543 543 mm_init_aio(mm); 544 544 mm_init_owner(mm, p); 545 545
-28
mm/mmap.c
··· 1878 1878 } 1879 1879 #endif 1880 1880 1881 - void arch_unmap_area(struct mm_struct *mm, unsigned long addr) 1882 - { 1883 - /* 1884 - * Is this a new hole at the lowest possible address? 1885 - */ 1886 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) 1887 - mm->free_area_cache = addr; 1888 - } 1889 - 1890 1881 /* 1891 1882 * This mmap-allocator allocates new areas top-down from below the 1892 1883 * stack's low limit (the base): ··· 1933 1942 return addr; 1934 1943 } 1935 1944 #endif 1936 - 1937 - void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) 1938 - { 1939 - /* 1940 - * Is this a new hole at the highest possible address? 1941 - */ 1942 - if (addr > mm->free_area_cache) 1943 - mm->free_area_cache = addr; 1944 - 1945 - /* dont allow allocations above current base */ 1946 - if (mm->free_area_cache > mm->mmap_base) 1947 - mm->free_area_cache = mm->mmap_base; 1948 - } 1949 1945 1950 1946 unsigned long 1951 1947 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, ··· 2354 2376 { 2355 2377 struct vm_area_struct **insertion_point; 2356 2378 struct vm_area_struct *tail_vma = NULL; 2357 - unsigned long addr; 2358 2379 2359 2380 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 2360 2381 vma->vm_prev = NULL; ··· 2370 2393 } else 2371 2394 mm->highest_vm_end = prev ? prev->vm_end : 0; 2372 2395 tail_vma->vm_next = NULL; 2373 - if (mm->unmap_area == arch_unmap_area) 2374 - addr = prev ? prev->vm_end : mm->mmap_base; 2375 - else 2376 - addr = vma ? vma->vm_start : mm->mmap_base; 2377 - mm->unmap_area(mm, addr); 2378 2396 mm->mmap_cache = NULL; /* Kill the cache. */ 2379 2397 } 2380 2398
-4
mm/nommu.c
··· 1871 1871 return -ENOMEM; 1872 1872 } 1873 1873 1874 - void arch_unmap_area(struct mm_struct *mm, unsigned long addr) 1875 - { 1876 - } 1877 - 1878 1874 void unmap_mapping_range(struct address_space *mapping, 1879 1875 loff_t const holebegin, loff_t const holelen, 1880 1876 int even_cows)
-1
mm/util.c
··· 295 295 { 296 296 mm->mmap_base = TASK_UNMAPPED_BASE; 297 297 mm->get_unmapped_area = arch_get_unmapped_area; 298 - mm->unmap_area = arch_unmap_area; 299 298 } 300 299 #endif 301 300