x86: cpa, use page pool

Switch the split page code to use the page pool. We do this
unconditionally to avoid different behaviour with and without
DEBUG_PAGEALLOC enabled.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

+26 -11
+26 -11
arch/x86/mm/pageattr.c
··· 411 static int split_large_page(pte_t *kpte, unsigned long address) 412 { 413 unsigned long flags, pfn, pfninc = 1; 414 - gfp_t gfp_flags = GFP_KERNEL; 415 unsigned int i, level; 416 pte_t *pbase, *tmp; 417 pgprot_t ref_prot; 418 struct page *base; 419 420 - #ifdef CONFIG_DEBUG_PAGEALLOC 421 - gfp_flags = GFP_ATOMIC | __GFP_NOWARN; 422 - #endif 423 - base = alloc_pages(gfp_flags, 0); 424 - if (!base) 425 - return -ENOMEM; 426 - 427 spin_lock_irqsave(&pgd_lock, flags); 428 /* 429 * Check for races, another CPU might have split this page 430 * up for us already: ··· 478 base = NULL; 479 480 out_unlock: 481 spin_unlock_irqrestore(&pgd_lock, flags); 482 - 483 - if (base) 484 - __free_pages(base, 0); 485 486 return 0; 487 }
··· 411 static int split_large_page(pte_t *kpte, unsigned long address) 412 { 413 unsigned long flags, pfn, pfninc = 1; 414 unsigned int i, level; 415 pte_t *pbase, *tmp; 416 pgprot_t ref_prot; 417 struct page *base; 418 419 + /* 420 + * Get a page from the pool. The pool list is protected by the 421 + * pgd_lock, which we have to take anyway for the split 422 + * operation: 423 + */ 424 spin_lock_irqsave(&pgd_lock, flags); 425 + if (list_empty(&page_pool)) { 426 + spin_unlock_irqrestore(&pgd_lock, flags); 427 + return -ENOMEM; 428 + } 429 + 430 + base = list_first_entry(&page_pool, struct page, lru); 431 + list_del(&base->lru); 432 + pool_pages--; 433 + 434 + if (pool_pages < pool_low) 435 + pool_low = pool_pages; 436 + 437 /* 438 * Check for races, another CPU might have split this page 439 * up for us already: ··· 469 base = NULL; 470 471 out_unlock: 472 + /* 473 + * If we dropped out via the lookup_address check under 474 + * pgd_lock then stick the page back into the pool: 475 + */ 476 + if (base) { 477 + list_add(&base->lru, &page_pool); 478 + pool_pages++; 479 + } else 480 + pool_used++; 481 spin_unlock_irqrestore(&pgd_lock, flags); 482 483 return 0; 484 }