x86: implement gbpages support in change_page_attr()

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by Andi Kleen and committed by Ingo Molnar f07333fd b5360222

+15 -2
+15 -2
arch/x86/mm/pageattr.c
··· 281 281 psize = PMD_PAGE_SIZE; 282 282 pmask = PMD_PAGE_MASK; 283 283 break; 284 + #ifdef CONFIG_X86_64 284 285 case PG_LEVEL_1G: 286 + psize = PMD_PAGE_SIZE; 287 + pmask = PMD_PAGE_MASK; 288 + break; 289 + #endif 285 290 default: 286 291 res = -EINVAL; 287 292 goto out_unlock; ··· 348 343 { 349 344 pgprot_t ref_prot; 350 345 gfp_t gfp_flags = GFP_KERNEL; 351 - unsigned long flags, addr, pfn; 346 + unsigned long flags, addr, pfn, pfninc = 1; 352 347 pte_t *pbase, *tmp; 353 348 struct page *base; 354 349 unsigned int i, level; ··· 377 372 #endif 378 373 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 379 374 375 + #ifdef CONFIG_X86_64 376 + if (level == PG_LEVEL_1G) { 377 + pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 378 + pgprot_val(ref_prot) |= _PAGE_PSE; 379 + addr &= PUD_PAGE_MASK; 380 + } 381 + #endif 382 + 380 383 /* 381 384 * Get the target pfn from the original entry: 382 385 */ 383 386 pfn = pte_pfn(*kpte); 384 - for (i = 0; i < PTRS_PER_PTE; i++, pfn++) 387 + for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) 385 388 set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); 386 389 387 390 /*