Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, arch: remove empty_bad_page*

empty_bad_page() and empty_bad_pte_table() seem to be relics from old
days which is not used by any code for a long time. I have tried to
find when exactly but this is not really all that straightforward due to
many code movements - traces disappear around 2.4 times.

Anyway no code really references neither empty_bad_page nor
empty_bad_pte_table. We only allocate the storage which is not used by
anybody so remove them.

Link: http://lkml.kernel.org/r/20171004150045.30755-1-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Ralf Baechle <ralf@linus-mips.org>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: David Howells <dhowells@redhat.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Michal Hocko and committed by
Linus Torvalds
8745808f a2e16731

+2 -54
-14
arch/frv/mm/init.c
··· 42 42 #undef DEBUG 43 43 44 44 /* 45 - * BAD_PAGE is the page that is used for page faults when linux 46 - * is out-of-memory. Older versions of linux just did a 47 - * do_exit(), but using this instead means there is less risk 48 - * for a process dying in kernel mode, possibly leaving a inode 49 - * unused etc.. 50 - * 51 - * BAD_PAGETABLE is the accompanying page-table: it is initialized 52 - * to point to BAD_PAGE entries. 53 - * 54 45 * ZERO_PAGE is a special page that is used for zero-initialized 55 46 * data and COW. 56 47 */ 57 - static unsigned long empty_bad_page_table; 58 - static unsigned long empty_bad_page; 59 - 60 48 unsigned long empty_zero_page; 61 49 EXPORT_SYMBOL(empty_zero_page); 62 50 ··· 60 72 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 61 73 62 74 /* allocate some pages for kernel housekeeping tasks */ 63 - empty_bad_page_table = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); 64 - empty_bad_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); 65 75 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); 66 76 67 77 memset((void *) empty_zero_page, 0, PAGE_SIZE);
-13
arch/h8300/mm/init.c
··· 40 40 #include <asm/sections.h> 41 41 42 42 /* 43 - * BAD_PAGE is the page that is used for page faults when linux 44 - * is out-of-memory. Older versions of linux just did a 45 - * do_exit(), but using this instead means there is less risk 46 - * for a process dying in kernel mode, possibly leaving a inode 47 - * unused etc.. 48 - * 49 - * BAD_PAGETABLE is the accompanying page-table: it is initialized 50 - * to point to BAD_PAGE entries. 51 - * 52 43 * ZERO_PAGE is a special page that is used for zero-initialized 53 44 * data and COW. 54 45 */ 55 - static unsigned long empty_bad_page_table; 56 - static unsigned long empty_bad_page; 57 46 unsigned long empty_zero_page; 58 47 59 48 /* ··· 67 78 * Initialize the bad page table and bad page to point 68 79 * to a couple of allocated pages. 69 80 */ 70 - empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 71 - empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 72 81 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 73 82 memset((void *)empty_zero_page, 0, PAGE_SIZE); 74 83
+1 -7
arch/mips/include/asm/pgtable-64.h
··· 31 31 * tables. Each page table is also a single 4K page, giving 512 (== 32 32 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to 33 33 * invalid_pmd_table, each pmd entry is initialized to point to 34 - * invalid_pte_table, each pte is initialized to 0. When memory is low, 35 - * and a pmd table or a page table allocation fails, empty_bad_pmd_table 36 - * and empty_bad_page_table is returned back to higher layer code, so 37 - * that the failure is recognized later on. Linux does not seem to 38 - * handle these failures very well though. The empty_bad_page_table has 39 - * invalid pte entries in it, to force page faults. 34 + * invalid_pte_table, each pte is initialized to 0. 40 35 * 41 36 * Kernel mappings: kernel mappings are held in the swapper_pg_table. 42 37 * The layout is identical to userspace except it's indexed with the ··· 170 175 printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 171 176 172 177 extern pte_t invalid_pte_table[PTRS_PER_PTE]; 173 - extern pte_t empty_bad_page_table[PTRS_PER_PTE]; 174 178 175 179 #ifndef __PAGETABLE_PUD_FOLDED 176 180 /*
-8
arch/mn10300/kernel/head.S
··· 434 434 .space PAGE_SIZE 435 435 436 436 .balign PAGE_SIZE 437 - ENTRY(empty_bad_page) 438 - .space PAGE_SIZE 439 - 440 - .balign PAGE_SIZE 441 - ENTRY(empty_bad_pte_table) 442 - .space PAGE_SIZE 443 - 444 - .balign PAGE_SIZE 445 437 ENTRY(large_page_table) 446 438 .space PAGE_SIZE 447 439
-8
arch/sh/kernel/head_64.S
··· 101 101 mmu_pdtp_cache: 102 102 .space PAGE_SIZE, 0 103 103 104 - .global empty_bad_page 105 - empty_bad_page: 106 - .space PAGE_SIZE, 0 107 - 108 - .global empty_bad_pte_table 109 - empty_bad_pte_table: 110 - .space PAGE_SIZE, 0 111 - 112 104 .global fpu_in_use 113 105 fpu_in_use: .quad 0 114 106
-3
arch/um/kernel/mem.c
··· 22 22 /* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */ 23 23 unsigned long *empty_zero_page = NULL; 24 24 EXPORT_SYMBOL(empty_zero_page); 25 - /* allocated in paging_init and unchanged thereafter */ 26 - static unsigned long *empty_bad_page = NULL; 27 25 28 26 /* 29 27 * Initialized during boot, and readonly for initializing page tables ··· 144 146 int i; 145 147 146 148 empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 147 - empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE); 148 149 for (i = 0; i < ARRAY_SIZE(zones_size); i++) 149 150 zones_size[i] = 0; 150 151
+1 -1
include/linux/page-flags.h
··· 18 18 * Various page->flags bits: 19 19 * 20 20 * PG_reserved is set for special pages, which can never be swapped out. Some 21 - * of them might not even exist (eg empty_bad_page)... 21 + * of them might not even exist... 22 22 * 23 23 * The PG_private bitflag is set on pagecache pages if they contain filesystem 24 24 * specific data (which is normally at page->private). It can be used by