Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen: remove duplicated #include
xen: x86/32: perform initial startup on initial_page_table

+56 -16
-2
arch/x86/xen/enlighten.c
··· 1200 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1201 xen_build_mfn_list_list(); 1202 1203 - init_mm.pgd = pgd; 1204 - 1205 /* keep using Xen gdt for now; no urgent need to change it */ 1206 1207 #ifdef CONFIG_X86_32
··· 1200 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1201 xen_build_mfn_list_list(); 1202 1203 /* keep using Xen gdt for now; no urgent need to change it */ 1204 1205 #ifdef CONFIG_X86_32
+56 -13
arch/x86/xen/mmu.c
··· 2133 return pgd; 2134 } 2135 #else /* !CONFIG_X86_64 */ 2136 - static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); 2137 2138 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 2139 unsigned long max_pfn) 2140 { 2141 pmd_t *kernel_pmd; 2142 2143 - level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2144 2145 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2146 xen_start_info->nr_pt_frames * PAGE_SIZE + 2147 512*1024); 2148 2149 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 2150 - memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 2151 2152 - xen_map_identity_early(level2_kernel_pgt, max_pfn); 2153 2154 - memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); 2155 - set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], 2156 - __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); 2157 2158 - set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 2159 - set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 2160 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 2161 2162 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 2163 2164 - xen_write_cr3(__pa(swapper_pg_dir)); 2165 - 2166 - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 2167 2168 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 2169 __pa(xen_start_info->pt_base + 2170 xen_start_info->nr_pt_frames * PAGE_SIZE), 2171 "XEN PAGETABLES"); 2172 2173 - return swapper_pg_dir; 2174 } 2175 #endif /* CONFIG_X86_64 */ 2176 ··· 2343 .write_cr2 = xen_write_cr2, 2344 2345 .read_cr3 = xen_read_cr3, 2346 .write_cr3 = xen_write_cr3, 2347 2348 .flush_tlb_user = xen_flush_tlb, 2349 .flush_tlb_kernel = xen_flush_tlb,
··· 2133 return pgd; 2134 } 2135 #else /* !CONFIG_X86_64 */ 2136 + static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); 2137 + static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); 2138 + 2139 + static __init void xen_write_cr3_init(unsigned long cr3) 2140 + { 2141 + unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); 2142 + 2143 + BUG_ON(read_cr3() != __pa(initial_page_table)); 2144 + BUG_ON(cr3 != __pa(swapper_pg_dir)); 2145 + 2146 + /* 2147 + * We are switching to swapper_pg_dir for the first time (from 2148 + * initial_page_table) and therefore need to mark that page 2149 + * read-only and then pin it. 2150 + * 2151 + * Xen disallows sharing of kernel PMDs for PAE 2152 + * guests. Therefore we must copy the kernel PMD from 2153 + * initial_page_table into a new kernel PMD to be used in 2154 + * swapper_pg_dir. 2155 + */ 2156 + swapper_kernel_pmd = 2157 + extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2158 + memcpy(swapper_kernel_pmd, initial_kernel_pmd, 2159 + sizeof(pmd_t) * PTRS_PER_PMD); 2160 + swapper_pg_dir[KERNEL_PGD_BOUNDARY] = 2161 + __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); 2162 + set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); 2163 + 2164 + set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 2165 + xen_write_cr3(cr3); 2166 + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); 2167 + 2168 + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, 2169 + PFN_DOWN(__pa(initial_page_table))); 2170 + set_page_prot(initial_page_table, PAGE_KERNEL); 2171 + set_page_prot(initial_kernel_pmd, PAGE_KERNEL); 2172 + 2173 + pv_mmu_ops.write_cr3 = &xen_write_cr3; 2174 + } 2175 2176 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 2177 unsigned long max_pfn) 2178 { 2179 pmd_t *kernel_pmd; 2180 2181 + initial_kernel_pmd = 2182 + extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2183 2184 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2185 xen_start_info->nr_pt_frames * PAGE_SIZE + 2186 512*1024); 2187 2188 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 2189 + memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 2190 2191 + xen_map_identity_early(initial_kernel_pmd, max_pfn); 2192 2193 + memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); 2194 + initial_page_table[KERNEL_PGD_BOUNDARY] = 2195 + __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); 2196 2197 + set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); 2198 + set_page_prot(initial_page_table, PAGE_KERNEL_RO); 2199 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 2200 2201 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 2202 2203 + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, 2204 + PFN_DOWN(__pa(initial_page_table))); 2205 + xen_write_cr3(__pa(initial_page_table)); 2206 2207 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 2208 __pa(xen_start_info->pt_base + 2209 xen_start_info->nr_pt_frames * PAGE_SIZE), 2210 "XEN PAGETABLES"); 2211 2212 + return initial_page_table; 2213 } 2214 #endif /* CONFIG_X86_64 */ 2215 ··· 2304 .write_cr2 = xen_write_cr2, 2305 2306 .read_cr3 = xen_read_cr3, 2307 + #ifdef CONFIG_X86_32 2308 + .write_cr3 = xen_write_cr3_init, 2309 + #else 2310 .write_cr3 = xen_write_cr3, 2311 + #endif 2312 2313 .flush_tlb_user = xen_flush_tlb, 2314 .flush_tlb_kernel = xen_flush_tlb,
-1
arch/x86/xen/setup.c
··· 23 #include <xen/interface/callback.h> 24 #include <xen/interface/memory.h> 25 #include <xen/interface/physdev.h> 26 - #include <xen/interface/memory.h> 27 #include <xen/features.h> 28 29 #include "xen-ops.h"
··· 23 #include <xen/interface/callback.h> 24 #include <xen/interface/memory.h> 25 #include <xen/interface/physdev.h> 26 #include <xen/features.h> 27 28 #include "xen-ops.h"