Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: move __find_linux_pte() out of hugetlbpage.c

__find_linux_pte() is the only function in hugetlbpage.c
which is compiled in regardless on CONFIG_HUGETLBPAGE

This patch moves it in pgtable.c.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Christophe Leroy and committed by
Michael Ellerman
0caed4de 3dea7332

+104 -103
-103
arch/powerpc/mm/hugetlbpage.c
··· 756 756 757 757 #endif /* CONFIG_HUGETLB_PAGE */ 758 758 759 - /* 760 - * We have 4 cases for pgds and pmds: 761 - * (1) invalid (all zeroes) 762 - * (2) pointer to next table, as normal; bottom 6 bits == 0 763 - * (3) leaf pte for huge page _PAGE_PTE set 764 - * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table 765 - * 766 - * So long as we atomically load page table pointers we are safe against teardown, 767 - * we can follow the address down to the the page and take a ref on it. 768 - * This function need to be called with interrupts disabled. We use this variant 769 - * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED 770 - */ 771 - pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, 772 - bool *is_thp, unsigned *hpage_shift) 773 - { 774 - pgd_t pgd, *pgdp; 775 - pud_t pud, *pudp; 776 - pmd_t pmd, *pmdp; 777 - pte_t *ret_pte; 778 - hugepd_t *hpdp = NULL; 779 - unsigned pdshift = PGDIR_SHIFT; 780 - 781 - if (hpage_shift) 782 - *hpage_shift = 0; 783 - 784 - if (is_thp) 785 - *is_thp = false; 786 - 787 - pgdp = pgdir + pgd_index(ea); 788 - pgd = READ_ONCE(*pgdp); 789 - /* 790 - * Always operate on the local stack value. This make sure the 791 - * value don't get updated by a parallel THP split/collapse, 792 - * page fault or a page unmap. The return pte_t * is still not 793 - * stable. So should be checked there for above conditions. 794 - */ 795 - if (pgd_none(pgd)) 796 - return NULL; 797 - else if (pgd_huge(pgd)) { 798 - ret_pte = (pte_t *) pgdp; 799 - goto out; 800 - } else if (is_hugepd(__hugepd(pgd_val(pgd)))) 801 - hpdp = (hugepd_t *)&pgd; 802 - else { 803 - /* 804 - * Even if we end up with an unmap, the pgtable will not 805 - * be freed, because we do an rcu free and here we are 806 - * irq disabled 807 - */ 808 - pdshift = PUD_SHIFT; 809 - pudp = pud_offset(&pgd, ea); 810 - pud = READ_ONCE(*pudp); 811 - 812 - if (pud_none(pud)) 813 - return NULL; 814 - else if (pud_huge(pud)) { 815 - ret_pte = (pte_t *) pudp; 816 - goto out; 817 - } else if (is_hugepd(__hugepd(pud_val(pud)))) 818 - hpdp = (hugepd_t *)&pud; 819 - else { 820 - pdshift = PMD_SHIFT; 821 - pmdp = pmd_offset(&pud, ea); 822 - pmd = READ_ONCE(*pmdp); 823 - /* 824 - * A hugepage collapse is captured by pmd_none, because 825 - * it mark the pmd none and do a hpte invalidate. 826 - */ 827 - if (pmd_none(pmd)) 828 - return NULL; 829 - 830 - if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { 831 - if (is_thp) 832 - *is_thp = true; 833 - ret_pte = (pte_t *) pmdp; 834 - goto out; 835 - } 836 - /* 837 - * pmd_large check below will handle the swap pmd pte 838 - * we need to do both the check because they are config 839 - * dependent. 840 - */ 841 - if (pmd_huge(pmd) || pmd_large(pmd)) { 842 - ret_pte = (pte_t *) pmdp; 843 - goto out; 844 - } else if (is_hugepd(__hugepd(pmd_val(pmd)))) 845 - hpdp = (hugepd_t *)&pmd; 846 - else 847 - return pte_offset_kernel(&pmd, ea); 848 - } 849 - } 850 - if (!hpdp) 851 - return NULL; 852 - 853 - ret_pte = hugepte_offset(*hpdp, ea, pdshift); 854 - pdshift = hugepd_shift(*hpdp); 855 - out: 856 - if (hpage_shift) 857 - *hpage_shift = pdshift; 858 - return ret_pte; 859 - } 860 - EXPORT_SYMBOL_GPL(__find_linux_pte); 861 - 862 759 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 863 760 unsigned long end, int write, struct page **pages, int *nr) 864 761 {
+104
arch/powerpc/mm/pgtable.c
··· 30 30 #include <asm/pgalloc.h> 31 31 #include <asm/tlbflush.h> 32 32 #include <asm/tlb.h> 33 + #include <asm/hugetlb.h> 33 34 34 35 static inline int is_exec_fault(void) 35 36 { ··· 300 299 return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); 301 300 } 302 301 EXPORT_SYMBOL_GPL(vmalloc_to_phys); 302 + 303 + /* 304 + * We have 4 cases for pgds and pmds: 305 + * (1) invalid (all zeroes) 306 + * (2) pointer to next table, as normal; bottom 6 bits == 0 307 + * (3) leaf pte for huge page _PAGE_PTE set 308 + * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table 309 + * 310 + * So long as we atomically load page table pointers we are safe against teardown, 311 + * we can follow the address down to the the page and take a ref on it. 312 + * This function need to be called with interrupts disabled. We use this variant 313 + * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED 314 + */ 315 + pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, 316 + bool *is_thp, unsigned *hpage_shift) 317 + { 318 + pgd_t pgd, *pgdp; 319 + pud_t pud, *pudp; 320 + pmd_t pmd, *pmdp; 321 + pte_t *ret_pte; 322 + hugepd_t *hpdp = NULL; 323 + unsigned pdshift = PGDIR_SHIFT; 324 + 325 + if (hpage_shift) 326 + *hpage_shift = 0; 327 + 328 + if (is_thp) 329 + *is_thp = false; 330 + 331 + pgdp = pgdir + pgd_index(ea); 332 + pgd = READ_ONCE(*pgdp); 333 + /* 334 + * Always operate on the local stack value. This make sure the 335 + * value don't get updated by a parallel THP split/collapse, 336 + * page fault or a page unmap. The return pte_t * is still not 337 + * stable. So should be checked there for above conditions. 338 + */ 339 + if (pgd_none(pgd)) 340 + return NULL; 341 + else if (pgd_huge(pgd)) { 342 + ret_pte = (pte_t *) pgdp; 343 + goto out; 344 + } else if (is_hugepd(__hugepd(pgd_val(pgd)))) 345 + hpdp = (hugepd_t *)&pgd; 346 + else { 347 + /* 348 + * Even if we end up with an unmap, the pgtable will not 349 + * be freed, because we do an rcu free and here we are 350 + * irq disabled 351 + */ 352 + pdshift = PUD_SHIFT; 353 + pudp = pud_offset(&pgd, ea); 354 + pud = READ_ONCE(*pudp); 355 + 356 + if (pud_none(pud)) 357 + return NULL; 358 + else if (pud_huge(pud)) { 359 + ret_pte = (pte_t *) pudp; 360 + goto out; 361 + } else if (is_hugepd(__hugepd(pud_val(pud)))) 362 + hpdp = (hugepd_t *)&pud; 363 + else { 364 + pdshift = PMD_SHIFT; 365 + pmdp = pmd_offset(&pud, ea); 366 + pmd = READ_ONCE(*pmdp); 367 + /* 368 + * A hugepage collapse is captured by pmd_none, because 369 + * it mark the pmd none and do a hpte invalidate. 370 + */ 371 + if (pmd_none(pmd)) 372 + return NULL; 373 + 374 + if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { 375 + if (is_thp) 376 + *is_thp = true; 377 + ret_pte = (pte_t *) pmdp; 378 + goto out; 379 + } 380 + /* 381 + * pmd_large check below will handle the swap pmd pte 382 + * we need to do both the check because they are config 383 + * dependent. 384 + */ 385 + if (pmd_huge(pmd) || pmd_large(pmd)) { 386 + ret_pte = (pte_t *) pmdp; 387 + goto out; 388 + } else if (is_hugepd(__hugepd(pmd_val(pmd)))) 389 + hpdp = (hugepd_t *)&pmd; 390 + else 391 + return pte_offset_kernel(&pmd, ea); 392 + } 393 + } 394 + if (!hpdp) 395 + return NULL; 396 + 397 + ret_pte = hugepte_offset(*hpdp, ea, pdshift); 398 + pdshift = hugepd_shift(*hpdp); 399 + out: 400 + if (hpage_shift) 401 + *hpage_shift = pdshift; 402 + return ret_pte; 403 + } 404 + EXPORT_SYMBOL_GPL(__find_linux_pte);