x86: implement gbpages support in change_page_attr()

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Andi Kleen and committed by
Ingo Molnar
f07333fd b5360222

+15 -2
+15 -2
arch/x86/mm/pageattr.c
··· 281 psize = PMD_PAGE_SIZE; 282 pmask = PMD_PAGE_MASK; 283 break; 284 case PG_LEVEL_1G: 285 default: 286 res = -EINVAL; 287 goto out_unlock; ··· 348 { 349 pgprot_t ref_prot; 350 gfp_t gfp_flags = GFP_KERNEL; 351 - unsigned long flags, addr, pfn; 352 pte_t *pbase, *tmp; 353 struct page *base; 354 unsigned int i, level; ··· 377 #endif 378 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 379 380 /* 381 * Get the target pfn from the original entry: 382 */ 383 pfn = pte_pfn(*kpte); 384 - for (i = 0; i < PTRS_PER_PTE; i++, pfn++) 385 set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); 386 387 /*
··· 281 psize = PMD_PAGE_SIZE; 282 pmask = PMD_PAGE_MASK; 283 break; 284 + #ifdef CONFIG_X86_64 285 case PG_LEVEL_1G: 286 + psize = PMD_PAGE_SIZE; 287 + pmask = PMD_PAGE_MASK; 288 + break; 289 + #endif 290 default: 291 res = -EINVAL; 292 goto out_unlock; ··· 343 { 344 pgprot_t ref_prot; 345 gfp_t gfp_flags = GFP_KERNEL; 346 + unsigned long flags, addr, pfn, pfninc = 1; 347 pte_t *pbase, *tmp; 348 struct page *base; 349 unsigned int i, level; ··· 372 #endif 373 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 374 375 + #ifdef CONFIG_X86_64 376 + if (level == PG_LEVEL_1G) { 377 + pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 378 + pgprot_val(ref_prot) |= _PAGE_PSE; 379 + addr &= PUD_PAGE_MASK; 380 + } 381 + #endif 382 + 383 /* 384 * Get the target pfn from the original entry: 385 */ 386 pfn = pte_pfn(*kpte); 387 + for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) 388 set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); 389 390 /*