Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86, mm: Initialize initial_page_table before paravirt jumps

+39 -143
+39 -38
arch/x86/kernel/head_32.S
··· 139 139 movl %eax, pa(olpc_ofw_pgd) 140 140 #endif 141 141 142 - #ifdef CONFIG_PARAVIRT 143 - /* This is can only trip for a broken bootloader... */ 144 - cmpw $0x207, pa(boot_params + BP_version) 145 - jb default_entry 146 - 147 - /* Paravirt-compatible boot parameters. Look to see what architecture 148 - we're booting under. */ 149 - movl pa(boot_params + BP_hardware_subarch), %eax 150 - cmpl $num_subarch_entries, %eax 151 - jae bad_subarch 152 - 153 - movl pa(subarch_entries)(,%eax,4), %eax 154 - subl $__PAGE_OFFSET, %eax 155 - jmp *%eax 156 - 157 - bad_subarch: 158 - WEAK(lguest_entry) 159 - WEAK(xen_entry) 160 - /* Unknown implementation; there's really 161 - nothing we can do at this point. */ 162 - ud2a 163 - 164 - __INITDATA 165 - 166 - subarch_entries: 167 - .long default_entry /* normal x86/PC */ 168 - .long lguest_entry /* lguest hypervisor */ 169 - .long xen_entry /* Xen hypervisor */ 170 - .long default_entry /* Moorestown MID */ 171 - num_subarch_entries = (. - subarch_entries) / 4 172 - .previous 173 - #endif /* CONFIG_PARAVIRT */ 174 - 175 142 /* 176 143 * Initialize page tables. This creates a PDE and a set of page 177 144 * tables, which are located immediately beyond __brk_base. The variable ··· 148 181 * 149 182 * Note that the stack is not yet set up! 150 183 */ 151 - default_entry: 152 184 #ifdef CONFIG_X86_PAE 153 185 154 186 /* ··· 227 261 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 228 262 movl %eax,pa(initial_page_table+0xffc) 229 263 #endif 230 - jmp 3f 264 + 265 + #ifdef CONFIG_PARAVIRT 266 + /* This is can only trip for a broken bootloader... */ 267 + cmpw $0x207, pa(boot_params + BP_version) 268 + jb default_entry 269 + 270 + /* Paravirt-compatible boot parameters. Look to see what architecture 271 + we're booting under. */ 272 + movl pa(boot_params + BP_hardware_subarch), %eax 273 + cmpl $num_subarch_entries, %eax 274 + jae bad_subarch 275 + 276 + movl pa(subarch_entries)(,%eax,4), %eax 277 + subl $__PAGE_OFFSET, %eax 278 + jmp *%eax 279 + 280 + bad_subarch: 281 + WEAK(lguest_entry) 282 + WEAK(xen_entry) 283 + /* Unknown implementation; there's really 284 + nothing we can do at this point. */ 285 + ud2a 286 + 287 + __INITDATA 288 + 289 + subarch_entries: 290 + .long default_entry /* normal x86/PC */ 291 + .long lguest_entry /* lguest hypervisor */ 292 + .long xen_entry /* Xen hypervisor */ 293 + .long default_entry /* Moorestown MID */ 294 + num_subarch_entries = (. - subarch_entries) / 4 295 + .previous 296 + #else 297 + jmp default_entry 298 + #endif /* CONFIG_PARAVIRT */ 299 + 231 300 /* 232 301 * Non-boot CPU entry point; entered from trampoline.S 233 302 * We can't lgdt here, because lgdt itself uses a data segment, but ··· 283 282 movl %eax,%fs 284 283 movl %eax,%gs 285 284 #endif /* CONFIG_SMP */ 286 - 3: 285 + default_entry: 287 286 288 287 /* 289 288 * New page tables may be in 4Mbyte page mode and may ··· 629 628 __PAGE_ALIGNED_BSS 630 629 .align PAGE_SIZE_asm 631 630 #ifdef CONFIG_X86_PAE 632 - ENTRY(initial_pg_pmd) 631 + initial_pg_pmd: 633 632 .fill 1024*KPMDS,4,0 634 633 #else 635 634 ENTRY(initial_page_table) 636 635 .fill 1024,4,0 637 636 #endif 638 - ENTRY(initial_pg_fixmap) 637 + initial_pg_fixmap: 639 638 .fill 1024,4,0 640 639 ENTRY(empty_zero_page) 641 640 .fill 4096,1,0
-105
arch/x86/lguest/i386_head.S
··· 4 4 #include <asm/asm-offsets.h> 5 5 #include <asm/thread_info.h> 6 6 #include <asm/processor-flags.h> 7 - #include <asm/pgtable.h> 8 7 9 8 /*G:020 10 9 * Our story starts with the kernel booting into startup_32 in ··· 37 38 /* Set up the initial stack so we can run C code. */ 38 39 movl $(init_thread_union+THREAD_SIZE),%esp 39 40 40 - call init_pagetables 41 - 42 41 /* Jumps are relative: we're running __PAGE_OFFSET too low. */ 43 42 jmp lguest_init+__PAGE_OFFSET 44 - 45 - /* 46 - * Initialize page tables. This creates a PDE and a set of page 47 - * tables, which are located immediately beyond __brk_base. The variable 48 - * _brk_end is set up to point to the first "safe" location. 49 - * Mappings are created both at virtual address 0 (identity mapping) 50 - * and PAGE_OFFSET for up to _end. 51 - * 52 - * FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they 53 - * don't have a stack at this point, so we can't just use call and ret. 54 - */ 55 - init_pagetables: 56 - #if PTRS_PER_PMD > 1 57 - #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) 58 - #else 59 - #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) 60 - #endif 61 - #define pa(X) ((X) - __PAGE_OFFSET) 62 - 63 - /* Enough space to fit pagetables for the low memory linear map */ 64 - MAPPING_BEYOND_END = \ 65 - PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT 66 - #ifdef CONFIG_X86_PAE 67 - 68 - /* 69 - * In PAE mode initial_page_table is statically defined to contain 70 - * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 71 - * entries). The identity mapping is handled by pointing two PGD entries 72 - * to the first kernel PMD. 73 - * 74 - * Note the upper half of each PMD or PTE are always zero at this stage. 75 - */ 76 - 77 - #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ 78 - 79 - xorl %ebx,%ebx /* %ebx is kept at zero */ 80 - 81 - movl $pa(__brk_base), %edi 82 - movl $pa(initial_pg_pmd), %edx 83 - movl $PTE_IDENT_ATTR, %eax 84 - 10: 85 - leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ 86 - movl %ecx,(%edx) /* Store PMD entry */ 87 - /* Upper half already zero */ 88 - addl $8,%edx 89 - movl $512,%ecx 90 - 11: 91 - stosl 92 - xchgl %eax,%ebx 93 - stosl 94 - xchgl %eax,%ebx 95 - addl $0x1000,%eax 96 - loop 11b 97 - 98 - /* 99 - * End condition: we must map up to the end + MAPPING_BEYOND_END. 100 - */ 101 - movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 102 - cmpl %ebp,%eax 103 - jb 10b 104 - 1: 105 - addl $__PAGE_OFFSET, %edi 106 - movl %edi, pa(_brk_end) 107 - shrl $12, %eax 108 - movl %eax, pa(max_pfn_mapped) 109 - 110 - /* Do early initialization of the fixmap area */ 111 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 112 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) 113 - #else /* Not PAE */ 114 - 115 - page_pde_offset = (__PAGE_OFFSET >> 20); 116 - 117 - movl $pa(__brk_base), %edi 118 - movl $pa(initial_page_table), %edx 119 - movl $PTE_IDENT_ATTR, %eax 120 - 10: 121 - leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ 122 - movl %ecx,(%edx) /* Store identity PDE entry */ 123 - movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ 124 - addl $4,%edx 125 - movl $1024, %ecx 126 - 11: 127 - stosl 128 - addl $0x1000,%eax 129 - loop 11b 130 - /* 131 - * End condition: we must map up to the end + MAPPING_BEYOND_END. 132 - */ 133 - movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp 134 - cmpl %ebp,%eax 135 - jb 10b 136 - addl $__PAGE_OFFSET, %edi 137 - movl %edi, pa(_brk_end) 138 - shrl $12, %eax 139 - movl %eax, pa(max_pfn_mapped) 140 - 141 - /* Do early initialization of the fixmap area */ 142 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax 143 - movl %eax,pa(initial_page_table+0xffc) 144 - #endif 145 - ret 146 43 147 44 /*G:055 148 45 * We create a macro which puts the assembler code between lgstart_ and lgend_