x86: cpa, cleanups

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+16 -7
+16 -7
arch/x86/mm/pageattr.c
··· 16 16 #include <asm/uaccess.h> 17 17 #include <asm/pgalloc.h> 18 18 19 + /* 20 + * The current flushing context - we pass it instead of 5 arguments: 21 + */ 19 22 struct cpa_data { 20 23 unsigned long vaddr; 21 24 pgprot_t mask_set; ··· 209 206 210 207 if (pgd_none(*pgd)) 211 208 return NULL; 209 + 212 210 pud = pud_offset(pgd, address); 213 211 if (pud_none(*pud)) 214 212 return NULL; ··· 227 223 return (pte_t *)pmd; 228 224 229 225 *level = PG_LEVEL_4K; 226 + 230 227 return pte_offset_kernel(pmd, address); 231 228 } 232 229 230 + /* 231 + * Set the new pmd in all the pgds we know about: 232 + */ 233 233 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 234 234 { 235 235 /* change init_mm */ ··· 256 248 #endif 257 249 } 258 250 259 - static int try_preserve_large_page(pte_t *kpte, unsigned long address, 260 - struct cpa_data *cpa) 251 + static int 252 + try_preserve_large_page(pte_t *kpte, unsigned long address, 253 + struct cpa_data *cpa) 261 254 { 262 255 unsigned long nextpage_addr, numpages, pmask, psize, flags; 263 256 pte_t new_pte, old_pte, *tmp; ··· 350 341 351 342 out_unlock: 352 343 spin_unlock_irqrestore(&pgd_lock, flags); 344 + 353 345 return res; 354 346 } 355 347 356 348 static int split_large_page(pte_t *kpte, unsigned long address) 357 349 { 358 - pgprot_t ref_prot; 359 - gfp_t gfp_flags = GFP_KERNEL; 360 350 unsigned long flags, addr, pfn, pfninc = 1; 361 - pte_t *pbase, *tmp; 362 - struct page *base; 351 + gfp_t gfp_flags = GFP_KERNEL; 363 352 unsigned int i, level; 353 + pte_t *pbase, *tmp; 354 + pgprot_t ref_prot; 355 + struct page *base; 364 356 365 357 #ifdef CONFIG_DEBUG_PAGEALLOC 366 358 gfp_flags = GFP_ATOMIC | __GFP_NOWARN; ··· 515 505 * 516 506 * Modules and drivers should use the set_memory_* APIs instead. 517 507 */ 518 - 519 508 static int change_page_attr_addr(struct cpa_data *cpa) 520 509 { 521 510 int err;