Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARC: mm: switch pgtable_t back to struct page *

So far ARC pgtable_t has not been struct page based to avoid extra
page_address() calls involved. However the differences are down to
noise and get in the way of using generic code, hence this patch.

This also allows us to reuse generic THP depost/withdraw code.

There's some additional consideration for PGDIR_SHIFT in 4K page config.
Now due to page tables being PAGE_SIZE deep only, the address split
can't be really arbitrary.

Tested-by: kernel test robot <lkp@intel.com>
Suggested-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>

+28 -87
-8
arch/arc/include/asm/hugepage.h
··· 58 58 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 59 59 pmd_t *pmd); 60 60 61 - /* Generic variants assume pgtable_t is struct page *, hence need for these */ 62 - #define __HAVE_ARCH_PGTABLE_DEPOSIT 63 - extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 64 - pgtable_t pgtable); 65 - 66 - #define __HAVE_ARCH_PGTABLE_WITHDRAW 67 - extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); 68 - 69 61 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE 70 62 extern void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, 71 63 unsigned long end);
+1 -1
arch/arc/include/asm/page.h
··· 60 60 #define __pgprot(x) ((pgprot_t) { (x) }) 61 61 #define pte_pgprot(x) __pgprot(pte_val(x)) 62 62 63 - typedef pte_t * pgtable_t; 63 + typedef struct page *pgtable_t; 64 64 65 65 /* 66 66 * Use virt_to_pfn with caution:
+17 -40
arch/arc/include/asm/pgalloc.h
··· 45 45 set_pmd(pmd, __pmd((unsigned long)pte)); 46 46 } 47 47 48 - static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) 48 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) 49 49 { 50 - set_pmd(pmd, __pmd((unsigned long)pte)); 51 - } 52 - 53 - static inline int __get_order_pgd(void) 54 - { 55 - return get_order(PTRS_PER_PGD * sizeof(pgd_t)); 50 + set_pmd(pmd, __pmd((unsigned long)page_address(pte_page))); 56 51 } 57 52 58 53 static inline pgd_t *pgd_alloc(struct mm_struct *mm) 59 54 { 60 - int num, num2; 61 - pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd()); 55 + pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL); 62 56 63 57 if (ret) { 58 + int num, num2; 64 59 num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE; 65 60 memzero(ret, num * sizeof(pgd_t)); 66 61 ··· 71 76 72 77 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 73 78 { 74 - free_pages((unsigned long)pgd, __get_order_pgd()); 75 - } 76 - 77 - 78 - /* 79 - * With software-only page-tables, addr-split for traversal is tweakable and 80 - * that directly governs how big tables would be at each level. 81 - * Further, the MMU page size is configurable. 82 - * Thus we need to programatically assert the size constraint 83 - * All of this is const math, allowing gcc to do constant folding/propagation. 84 - */ 85 - 86 - static inline int __get_order_pte(void) 87 - { 88 - return get_order(PTRS_PER_PTE * sizeof(pte_t)); 79 + free_page((unsigned long)pgd); 89 80 } 90 81 91 82 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 92 83 { 93 84 pte_t *pte; 94 85 95 - pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, 96 - __get_order_pte()); 86 + pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO); 97 87 98 88 return pte; 99 89 } 100 90 101 - static inline pgtable_t 102 - pte_alloc_one(struct mm_struct *mm) 91 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 103 92 { 104 - pgtable_t pte_pg; 105 93 struct page *page; 106 94 107 - pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte()); 108 - if (!pte_pg) 109 - return 0; 110 - memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); 111 - page = virt_to_page(pte_pg); 95 + page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); 96 + if (!page) 97 + return NULL; 98 + 112 99 if (!pgtable_pte_page_ctor(page)) { 113 100 __free_page(page); 114 - return 0; 101 + return NULL; 115 102 } 116 103 117 - return pte_pg; 104 + return page; 118 105 } 119 106 120 107 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 121 108 { 122 - free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */ 109 + free_page((unsigned long)pte); 123 110 } 124 111 125 - static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) 112 + static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page) 126 113 { 127 - pgtable_pte_page_dtor(virt_to_page(ptep)); 128 - free_pages((unsigned long)ptep, __get_order_pte()); 114 + pgtable_pte_page_dtor(pte_page); 115 + __free_page(pte_page); 129 116 } 130 117 131 118 #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+7 -1
arch/arc/include/asm/pgtable-levels.h
··· 35 35 #else 36 36 /* 37 37 * No Super page case 38 - * Default value provides 11:8:13 (8K), 11:9:12 (4K) 38 + * Default value provides 11:8:13 (8K), 10:10:12 (4K) 39 + * Limits imposed by pgtable_t only PAGE_SIZE long 40 + * (so 4K page can only have 1K entries: or 10 bits) 39 41 */ 42 + #ifdef CONFIG_ARC_PAGE_SIZE_4K 43 + #define PGDIR_SHIFT 22 44 + #else 40 45 #define PGDIR_SHIFT 21 46 + #endif 41 47 42 48 #endif 43 49
+3
arch/arc/mm/init.c
··· 189 189 { 190 190 memblock_free_all(); 191 191 highmem_init(); 192 + 193 + BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); 194 + BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE); 192 195 } 193 196 194 197 #ifdef CONFIG_HIGHMEM
-37
arch/arc/mm/tlb.c
··· 534 534 update_mmu_cache(vma, addr, &pte); 535 535 } 536 536 537 - void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 538 - pgtable_t pgtable) 539 - { 540 - struct list_head *lh = (struct list_head *) pgtable; 541 - 542 - assert_spin_locked(&mm->page_table_lock); 543 - 544 - /* FIFO */ 545 - if (!pmd_huge_pte(mm, pmdp)) 546 - INIT_LIST_HEAD(lh); 547 - else 548 - list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); 549 - pmd_huge_pte(mm, pmdp) = pgtable; 550 - } 551 - 552 - pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) 553 - { 554 - struct list_head *lh; 555 - pgtable_t pgtable; 556 - 557 - assert_spin_locked(&mm->page_table_lock); 558 - 559 - pgtable = pmd_huge_pte(mm, pmdp); 560 - lh = (struct list_head *) pgtable; 561 - if (list_empty(lh)) 562 - pmd_huge_pte(mm, pmdp) = NULL; 563 - else { 564 - pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; 565 - list_del(lh); 566 - } 567 - 568 - pte_val(pgtable[0]) = 0; 569 - pte_val(pgtable[1]) = 0; 570 - 571 - return pgtable; 572 - } 573 - 574 537 void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, 575 538 unsigned long end) 576 539 {