Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86, kexec: Replace ident_mapping_init and init_level4_page

Now ident_mapping_init is checking if pgd/pud is present for every 2M,
so several 2Ms are in same PUD, it will keep checking if pud is there
with same pud.

init_level4_page just does not check existing pgd/pud.

We could use generic mapping_init with different settings in info to
replace those two local grown version functions.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-24-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

authored by

Yinghai Lu and committed by
H. Peter Anvin
9ebdc79f 084d1283

+26 -135
+26 -135
arch/x86/kernel/machine_kexec_64.c
··· 16 16 #include <linux/io.h> 17 17 #include <linux/suspend.h> 18 18 19 + #include <asm/init.h> 19 20 #include <asm/pgtable.h> 20 21 #include <asm/tlbflush.h> 21 22 #include <asm/mmu_context.h> 22 23 #include <asm/debugreg.h> 23 - 24 - static int init_one_level2_page(struct kimage *image, pgd_t *pgd, 25 - unsigned long addr) 26 - { 27 - pud_t *pud; 28 - pmd_t *pmd; 29 - struct page *page; 30 - int result = -ENOMEM; 31 - 32 - addr &= PMD_MASK; 33 - pgd += pgd_index(addr); 34 - if (!pgd_present(*pgd)) { 35 - page = kimage_alloc_control_pages(image, 0); 36 - if (!page) 37 - goto out; 38 - pud = (pud_t *)page_address(page); 39 - clear_page(pud); 40 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); 41 - } 42 - pud = pud_offset(pgd, addr); 43 - if (!pud_present(*pud)) { 44 - page = kimage_alloc_control_pages(image, 0); 45 - if (!page) 46 - goto out; 47 - pmd = (pmd_t *)page_address(page); 48 - clear_page(pmd); 49 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 50 - } 51 - pmd = pmd_offset(pud, addr); 52 - if (!pmd_present(*pmd)) 53 - set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); 54 - result = 0; 55 - out: 56 - return result; 57 - } 58 - 59 - static int ident_mapping_init(struct kimage *image, pgd_t *level4p, 60 - unsigned long mstart, unsigned long mend) 61 - { 62 - int result; 63 - 64 - mstart = round_down(mstart, PMD_SIZE); 65 - mend = round_up(mend - 1, PMD_SIZE); 66 - 67 - while (mstart < mend) { 68 - result = init_one_level2_page(image, level4p, mstart); 69 - if (result) 70 - return result; 71 - 72 - mstart += PMD_SIZE; 73 - } 74 - 75 - return 0; 76 - } 77 - 78 - static void init_level2_page(pmd_t *level2p, unsigned long addr) 79 - { 80 - unsigned long end_addr; 81 - 82 - addr &= PAGE_MASK; 83 - end_addr = addr + PUD_SIZE; 84 - while (addr < end_addr) { 85 - set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); 86 - addr += PMD_SIZE; 87 - } 88 - } 89 - 90 - static int init_level3_page(struct kimage *image, pud_t *level3p, 91 - unsigned long addr, unsigned long last_addr) 92 - { 93 - unsigned long end_addr; 94 - int result; 95 - 96 - result = 0; 97 - addr &= PAGE_MASK; 98 - end_addr = addr + PGDIR_SIZE; 99 - while ((addr < last_addr) && (addr < end_addr)) { 100 - struct page *page; 101 - pmd_t *level2p; 102 - 103 - page = kimage_alloc_control_pages(image, 0); 104 - if (!page) { 105 - result = -ENOMEM; 106 - goto out; 107 - } 108 - level2p = (pmd_t *)page_address(page); 109 - init_level2_page(level2p, addr); 110 - set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE)); 111 - addr += PUD_SIZE; 112 - } 113 - /* clear the unused entries */ 114 - while (addr < end_addr) { 115 - pud_clear(level3p++); 116 - addr += PUD_SIZE; 117 - } 118 - out: 119 - return result; 120 - } 121 - 122 - 123 - static int init_level4_page(struct kimage *image, pgd_t *level4p, 124 - unsigned long addr, unsigned long last_addr) 125 - { 126 - unsigned long end_addr; 127 - int result; 128 - 129 - result = 0; 130 - addr &= PAGE_MASK; 131 - end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE); 132 - while ((addr < last_addr) && (addr < end_addr)) { 133 - struct page *page; 134 - pud_t *level3p; 135 - 136 - page = kimage_alloc_control_pages(image, 0); 137 - if (!page) { 138 - result = -ENOMEM; 139 - goto out; 140 - } 141 - level3p = (pud_t *)page_address(page); 142 - result = init_level3_page(image, level3p, addr, last_addr); 143 - if (result) 144 - goto out; 145 - set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); 146 - addr += PGDIR_SIZE; 147 - } 148 - /* clear the unused entries */ 149 - while (addr < end_addr) { 150 - pgd_clear(level4p++); 151 - addr += PGDIR_SIZE; 152 - } 153 - out: 154 - return result; 155 - } 156 24 157 25 static void free_transition_pgtable(struct kimage *image) 158 26 { ··· 71 203 return result; 72 204 } 73 205 206 + static void *alloc_pgt_page(void *data) 207 + { 208 + struct kimage *image = (struct kimage *)data; 209 + struct page *page; 210 + void *p = NULL; 211 + 212 + page = kimage_alloc_control_pages(image, 0); 213 + if (page) { 214 + p = page_address(page); 215 + clear_page(p); 216 + } 217 + 218 + return p; 219 + } 220 + 74 221 static int init_pgtable(struct kimage *image, unsigned long start_pgtable) 75 222 { 223 + struct x86_mapping_info info = { 224 + .alloc_pgt_page = alloc_pgt_page, 225 + .context = image, 226 + .pmd_flag = __PAGE_KERNEL_LARGE_EXEC, 227 + }; 76 228 unsigned long mstart, mend; 77 229 pgd_t *level4p; 78 230 int result; 79 231 int i; 80 232 81 233 level4p = (pgd_t *)__va(start_pgtable); 82 - result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT); 234 + clear_page(level4p); 235 + result = kernel_ident_mapping_init(&info, level4p, 236 + 0, max_pfn << PAGE_SHIFT); 83 237 if (result) 84 238 return result; 85 239 ··· 115 225 mstart = image->segment[i].mem; 116 226 mend = mstart + image->segment[i].memsz; 117 227 118 - result = ident_mapping_init(image, level4p, mstart, mend); 228 + result = kernel_ident_mapping_init(&info, 229 + level4p, mstart, mend); 119 230 120 231 if (result) 121 232 return result;