x86: cpa, micro-optimization

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by Thomas Gleixner and committed by Ingo Molnar 7b610eec 87f7f8fe

+2 -4
+2 -4
arch/x86/mm/pageattr.c
··· 237 if (!SHARED_KERNEL_PMD) { 238 struct page *page; 239 240 list_for_each_entry(page, &pgd_list, lru) { 241 pgd_t *pgd; 242 pud_t *pud; ··· 352 353 static int split_large_page(pte_t *kpte, unsigned long address) 354 { 355 - unsigned long flags, addr, pfn, pfninc = 1; 356 gfp_t gfp_flags = GFP_KERNEL; 357 unsigned int i, level; 358 pte_t *pbase, *tmp; ··· 375 if (tmp != kpte) 376 goto out_unlock; 377 378 - address = __pa(address); 379 - addr = address & PMD_PAGE_MASK; 380 pbase = (pte_t *)page_address(base); 381 #ifdef CONFIG_X86_32 382 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); ··· 385 if (level == PG_LEVEL_1G) { 386 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 387 pgprot_val(ref_prot) |= _PAGE_PSE; 388 - addr &= PUD_PAGE_MASK; 389 } 390 #endif 391
··· 237 if (!SHARED_KERNEL_PMD) { 238 struct page *page; 239 240 + address = __pa(address); 241 list_for_each_entry(page, &pgd_list, lru) { 242 pgd_t *pgd; 243 pud_t *pud; ··· 351 352 static int split_large_page(pte_t *kpte, unsigned long address) 353 { 354 + unsigned long flags, pfn, pfninc = 1; 355 gfp_t gfp_flags = GFP_KERNEL; 356 unsigned int i, level; 357 pte_t *pbase, *tmp; ··· 374 if (tmp != kpte) 375 goto out_unlock; 376 377 pbase = (pte_t *)page_address(base); 378 #ifdef CONFIG_X86_32 379 paravirt_alloc_pt(&init_mm, page_to_pfn(base)); ··· 386 if (level == PG_LEVEL_1G) { 387 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 388 pgprot_val(ref_prot) |= _PAGE_PSE; 389 } 390 #endif 391