x86: fix 1:1 mapping init on 64-bit (memory hotplug case)

While I don't have a hotplug capable system at hand, I think two issues need
fixing:

- pud_phys (in kernel_physical_ampping_init()) would remain uninitialized in
the after_bootmem case

- the locking done just around phys_pmd_{init,update}() would leave out pgd
updates, and it was needlessly covering code portions that do allocations
(perhaps using a more friendly gfp value in alloc_low_page() would then be
possible)

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by Jan Beulich and committed by Ingo Molnar 8ae3a5a8 38cc1c3d

+18 -14
+18 -14
arch/x86/mm/init_64.c
··· 336 336 } 337 337 338 338 if (pmd_val(*pmd)) { 339 - if (!pmd_large(*pmd)) 339 + if (!pmd_large(*pmd)) { 340 + spin_lock(&init_mm.page_table_lock); 340 341 last_map_addr = phys_pte_update(pmd, address, 341 - end); 342 + end); 343 + spin_unlock(&init_mm.page_table_lock); 344 + } 342 345 /* Count entries we're using from level2_ident_pgt */ 343 346 if (start == 0) 344 347 pages++; ··· 350 347 351 348 if (page_size_mask & (1<<PG_LEVEL_2M)) { 352 349 pages++; 350 + spin_lock(&init_mm.page_table_lock); 353 351 set_pte((pte_t *)pmd, 354 352 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 353 + spin_unlock(&init_mm.page_table_lock); 355 354 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 356 355 continue; 357 356 } ··· 362 357 last_map_addr = phys_pte_init(pte, address, end); 363 358 unmap_low_page(pte); 364 359 360 + spin_lock(&init_mm.page_table_lock); 365 361 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); 362 + spin_unlock(&init_mm.page_table_lock); 366 363 } 367 364 update_page_count(PG_LEVEL_2M, pages); 368 365 return last_map_addr; ··· 377 370 pmd_t *pmd = pmd_offset(pud, 0); 378 371 unsigned long last_map_addr; 379 372 380 - spin_lock(&init_mm.page_table_lock); 381 373 last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); 382 - spin_unlock(&init_mm.page_table_lock); 383 374 __flush_tlb_all(); 384 375 return last_map_addr; 385 376 } ··· 413 408 414 409 if (page_size_mask & (1<<PG_LEVEL_1G)) { 415 410 pages++; 411 + spin_lock(&init_mm.page_table_lock); 416 412 set_pte((pte_t *)pud, 417 413 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 414 + spin_unlock(&init_mm.page_table_lock); 418 415 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 419 416 continue; 420 417 } 421 418 422 419 pmd = alloc_low_page(&pmd_phys); 423 - 424 - spin_lock(&init_mm.page_table_lock); 425 420 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); 426 421 unmap_low_page(pmd); 422 + 423 + spin_lock(&init_mm.page_table_lock); 427 424 pud_populate(&init_mm, pud, __va(pmd_phys)); 428 425 spin_unlock(&init_mm.page_table_lock); 429 - 430 426 } 431 427 __flush_tlb_all(); 432 428 update_page_count(PG_LEVEL_1G, pages); ··· 519 513 continue; 520 514 } 521 515 522 - if (after_bootmem) 523 - pud = pud_offset(pgd, start & PGDIR_MASK); 524 - else 525 - pud = alloc_low_page(&pud_phys); 526 - 516 + pud = alloc_low_page(&pud_phys); 527 517 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), 528 518 page_size_mask); 529 519 unmap_low_page(pud); 530 - pgd_populate(&init_mm, pgd_offset_k(start), 531 - __va(pud_phys)); 520 + 521 + spin_lock(&init_mm.page_table_lock); 522 + pgd_populate(&init_mm, pgd, __va(pud_phys)); 523 + spin_unlock(&init_mm.page_table_lock); 532 524 } 533 525 534 526 return last_map_addr;