Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: treewide: remove unused address argument from pte_alloc functions

Patch series "Add support for fast mremap".

This series speeds up the mremap(2) syscall by copying page tables at
the PMD level even for non-THP systems. There is concern that the extra
'address' argument that mremap passes to pte_alloc may do something
subtle architecture related in the future that may make the scheme not
work. Also we find that there is no point in passing the 'address' to
pte_alloc since its unused. This patch therefore removes this argument
tree-wide resulting in a nice negative diff as well. Also ensuring
along the way that the enabled architectures do not do anything funky
with the 'address' argument that goes unnoticed by the optimization.

Build and boot tested on x86-64. Build tested on arm64. The config
enablement patch for arm64 will be posted in the future after more
testing.

The changes were obtained by applying the following Coccinelle script.
(thanks Julia for answering all Coccinelle questions!).
Following fix ups were done manually:
* Removal of address argument from pte_fragment_alloc
* Removal of pte_alloc_one_fast definitions from m68k and microblaze.

// Options: --include-headers --no-includes
// Note: I split the 'identifier fn' line, so if you are manually
// running it, please unsplit it so it runs for you.

virtual patch

@pte_alloc_func_def depends on patch exists@
identifier E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
type T2;
@@

fn(...
- , T2 E2
)
{ ... }

@pte_alloc_func_proto_noarg depends on patch exists@
type T1, T2, T3, T4;
identifier fn =~ "^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@

(
- T3 fn(T1, T2);
+ T3 fn(T1);
|
- T3 fn(T1, T2, T4);
+ T3 fn(T1, T2);
)

@pte_alloc_func_proto depends on patch exists@
identifier E1, E2, E4;
type T1, T2, T3, T4;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@

(
- T3 fn(T1 E1, T2 E2);
+ T3 fn(T1 E1);
|
- T3 fn(T1 E1, T2 E2, T4 E4);
+ T3 fn(T1 E1, T2 E2);
)

@pte_alloc_func_call depends on patch exists@
expression E2;
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
@@

fn(...
-, E2
)

@pte_alloc_macro depends on patch exists@
identifier fn =~
"^(__pte_alloc|pte_alloc_one|pte_alloc|__pte_alloc_kernel|pte_alloc_one_kernel)$";
identifier a, b, c;
expression e;
position p;
@@

(
- #define fn(a, b, c) e
+ #define fn(a, b) e
|
- #define fn(a, b) e
+ #define fn(a) e
)

Link: http://lkml.kernel.org/r/20181108181201.88826-2-joelaf@google.com
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
Acked-by: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Julia Lawall <Julia.Lawall@lip6.fr>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joel Fernandes (Google) and committed by
Linus Torvalds
4cf58924 ff1522bb

+101 -151
+3 -3
arch/alpha/include/asm/pgalloc.h
··· 52 52 } 53 53 54 54 static inline pte_t * 55 - pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 55 + pte_alloc_one_kernel(struct mm_struct *mm) 56 56 { 57 57 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 58 58 return pte; ··· 65 65 } 66 66 67 67 static inline pgtable_t 68 - pte_alloc_one(struct mm_struct *mm, unsigned long address) 68 + pte_alloc_one(struct mm_struct *mm) 69 69 { 70 - pte_t *pte = pte_alloc_one_kernel(mm, address); 70 + pte_t *pte = pte_alloc_one_kernel(mm); 71 71 struct page *page; 72 72 73 73 if (!pte)
+2 -3
arch/arc/include/asm/pgalloc.h
··· 90 90 return get_order(PTRS_PER_PTE * sizeof(pte_t)); 91 91 } 92 92 93 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 94 - unsigned long address) 93 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 95 94 { 96 95 pte_t *pte; 97 96 ··· 101 102 } 102 103 103 104 static inline pgtable_t 104 - pte_alloc_one(struct mm_struct *mm, unsigned long address) 105 + pte_alloc_one(struct mm_struct *mm) 105 106 { 106 107 pgtable_t pte_pg; 107 108 struct page *page;
+2 -2
arch/arm/include/asm/pgalloc.h
··· 81 81 * +------------+ 82 82 */ 83 83 static inline pte_t * 84 - pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 84 + pte_alloc_one_kernel(struct mm_struct *mm) 85 85 { 86 86 pte_t *pte; 87 87 ··· 93 93 } 94 94 95 95 static inline pgtable_t 96 - pte_alloc_one(struct mm_struct *mm, unsigned long addr) 96 + pte_alloc_one(struct mm_struct *mm) 97 97 { 98 98 struct page *pte; 99 99
+2 -2
arch/arm64/include/asm/pgalloc.h
··· 91 91 extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp); 92 92 93 93 static inline pte_t * 94 - pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 94 + pte_alloc_one_kernel(struct mm_struct *mm) 95 95 { 96 96 return (pte_t *)__get_free_page(PGALLOC_GFP); 97 97 } 98 98 99 99 static inline pgtable_t 100 - pte_alloc_one(struct mm_struct *mm, unsigned long addr) 100 + pte_alloc_one(struct mm_struct *mm) 101 101 { 102 102 struct page *pte; 103 103
+2 -4
arch/hexagon/include/asm/pgalloc.h
··· 59 59 free_page((unsigned long) pgd); 60 60 } 61 61 62 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 63 - unsigned long address) 62 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 64 63 { 65 64 struct page *pte; 66 65 ··· 74 75 } 75 76 76 77 /* _kernel variant gets to use a different allocator */ 77 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 78 - unsigned long address) 78 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 79 79 { 80 80 gfp_t flags = GFP_KERNEL | __GFP_ZERO; 81 81 return (pte_t *) __get_free_page(flags);
+2 -3
arch/ia64/include/asm/pgalloc.h
··· 83 83 pmd_val(*pmd_entry) = __pa(pte); 84 84 } 85 85 86 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) 86 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 87 87 { 88 88 struct page *page; 89 89 void *pg; ··· 99 99 return page; 100 100 } 101 101 102 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 103 - unsigned long addr) 102 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 104 103 { 105 104 return quicklist_alloc(0, GFP_KERNEL, NULL); 106 105 }
+2 -6
arch/m68k/include/asm/mcf_pgalloc.h
··· 12 12 13 13 extern const char bad_pmd_string[]; 14 14 15 - extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 16 - unsigned long address) 15 + extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 17 16 { 18 17 unsigned long page = __get_free_page(GFP_DMA); 19 18 ··· 31 32 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) 32 33 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 33 34 34 - #define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr) 35 - 36 35 #define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \ 37 36 (unsigned long)(page_address(page))) 38 37 ··· 47 50 48 51 #define __pmd_free_tlb(tlb, pmd, address) do { } while (0) 49 52 50 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 51 - unsigned long address) 53 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 52 54 { 53 55 struct page *page = alloc_pages(GFP_DMA, 0); 54 56 pte_t *pte;
+2 -2
arch/m68k/include/asm/motorola_pgalloc.h
··· 8 8 extern pmd_t *get_pointer_table(void); 9 9 extern int free_pointer_table(pmd_t *); 10 10 11 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 11 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 12 12 { 13 13 pte_t *pte; 14 14 ··· 28 28 free_page((unsigned long) pte); 29 29 } 30 30 31 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 31 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 32 32 { 33 33 struct page *page; 34 34 pte_t *pte;
+2 -4
arch/m68k/include/asm/sun3_pgalloc.h
··· 35 35 tlb_remove_page((tlb), pte); \ 36 36 } while (0) 37 37 38 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 39 - unsigned long address) 38 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 40 39 { 41 40 unsigned long page = __get_free_page(GFP_KERNEL); 42 41 ··· 46 47 return (pte_t *) (page); 47 48 } 48 49 49 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 50 - unsigned long address) 50 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 51 51 { 52 52 struct page *page = alloc_pages(GFP_KERNEL, 0); 53 53
+2 -17
arch/microblaze/include/asm/pgalloc.h
··· 108 108 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); }) 109 109 #define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 110 110 111 - extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 111 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); 112 112 113 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 114 - unsigned long address) 113 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 115 114 { 116 115 struct page *ptepage; 117 116 ··· 129 130 return NULL; 130 131 } 131 132 return ptepage; 132 - } 133 - 134 - static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, 135 - unsigned long address) 136 - { 137 - unsigned long *ret; 138 - 139 - ret = pte_quicklist; 140 - if (ret != NULL) { 141 - pte_quicklist = (unsigned long *)(*ret); 142 - ret[0] = 0; 143 - pgtable_cache_size--; 144 - } 145 - return (pte_t *)ret; 146 133 } 147 134 148 135 static inline void pte_free_fast(pte_t *pte)
+1 -2
arch/microblaze/mm/pgtable.c
··· 235 235 return pa; 236 236 } 237 237 238 - __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 239 - unsigned long address) 238 + __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 240 239 { 241 240 pte_t *pte; 242 241 if (mem_init_done) {
+2 -4
arch/mips/include/asm/pgalloc.h
··· 50 50 free_pages((unsigned long)pgd, PGD_ORDER); 51 51 } 52 52 53 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 54 - unsigned long address) 53 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 55 54 { 56 55 return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER); 57 56 } 58 57 59 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 60 - unsigned long address) 58 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 61 59 { 62 60 struct page *pte; 63 61
+2 -3
arch/nds32/include/asm/pgalloc.h
··· 22 22 23 23 #define check_pgt_cache() do { } while (0) 24 24 25 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 26 - unsigned long addr) 25 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 27 26 { 28 27 pte_t *pte; 29 28 ··· 33 34 return pte; 34 35 } 35 36 36 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr) 37 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 37 38 { 38 39 pgtable_t pte; 39 40
+2 -4
arch/nios2/include/asm/pgalloc.h
··· 37 37 free_pages((unsigned long)pgd, PGD_ORDER); 38 38 } 39 39 40 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 41 - unsigned long address) 40 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 42 41 { 43 42 pte_t *pte; 44 43 ··· 46 47 return pte; 47 48 } 48 49 49 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 50 - unsigned long address) 50 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 51 51 { 52 52 struct page *pte; 53 53
+2 -3
arch/openrisc/include/asm/pgalloc.h
··· 70 70 free_page((unsigned long)pgd); 71 71 } 72 72 73 - extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); 73 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); 74 74 75 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 76 - unsigned long address) 75 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 77 76 { 78 77 struct page *pte; 79 78 pte = alloc_pages(GFP_KERNEL, 0);
+1 -2
arch/openrisc/mm/ioremap.c
··· 118 118 * the memblock infrastructure. 119 119 */ 120 120 121 - pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm, 122 - unsigned long address) 121 + pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) 123 122 { 124 123 pte_t *pte; 125 124
+2 -2
arch/parisc/include/asm/pgalloc.h
··· 122 122 #define pmd_pgtable(pmd) pmd_page(pmd) 123 123 124 124 static inline pgtable_t 125 - pte_alloc_one(struct mm_struct *mm, unsigned long address) 125 + pte_alloc_one(struct mm_struct *mm) 126 126 { 127 127 struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO); 128 128 if (!page) ··· 135 135 } 136 136 137 137 static inline pte_t * 138 - pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 138 + pte_alloc_one_kernel(struct mm_struct *mm) 139 139 { 140 140 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 141 141 return pte;
+3 -3
arch/powerpc/include/asm/book3s/32/pgalloc.h
··· 61 61 62 62 #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) 63 63 64 - extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 65 - extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); 64 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); 65 + extern pgtable_t pte_alloc_one(struct mm_struct *mm); 66 66 void pte_frag_destroy(void *pte_frag); 67 - pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); 67 + pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel); 68 68 void pte_fragment_free(unsigned long *table, int kernel); 69 69 70 70 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+5 -7
arch/powerpc/include/asm/book3s/64/pgalloc.h
··· 39 39 extern struct kmem_cache *pgtable_cache[]; 40 40 #define PGT_CACHE(shift) pgtable_cache[shift] 41 41 42 - extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 42 + extern pte_t *pte_fragment_alloc(struct mm_struct *, int); 43 43 extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); 44 44 extern void pte_fragment_free(unsigned long *, int); 45 45 extern void pmd_fragment_free(unsigned long *); ··· 190 190 return (pgtable_t)pmd_page_vaddr(pmd); 191 191 } 192 192 193 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 194 - unsigned long address) 193 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 195 194 { 196 - return (pte_t *)pte_fragment_alloc(mm, address, 1); 195 + return (pte_t *)pte_fragment_alloc(mm, 1); 197 196 } 198 197 199 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 200 - unsigned long address) 198 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 201 199 { 202 - return (pgtable_t)pte_fragment_alloc(mm, address, 0); 200 + return (pgtable_t)pte_fragment_alloc(mm, 0); 203 201 } 204 202 205 203 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+3 -3
arch/powerpc/include/asm/nohash/32/pgalloc.h
··· 79 79 #define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd)) 80 80 #endif 81 81 82 - extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 83 - extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr); 82 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); 83 + extern pgtable_t pte_alloc_one(struct mm_struct *mm); 84 84 void pte_frag_destroy(void *pte_frag); 85 - pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel); 85 + pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel); 86 86 void pte_fragment_free(unsigned long *table, int kernel); 87 87 88 88 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+2 -4
arch/powerpc/include/asm/nohash/64/pgalloc.h
··· 93 93 } 94 94 95 95 96 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 97 - unsigned long address) 96 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 98 97 { 99 98 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 100 99 } 101 100 102 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 103 - unsigned long address) 101 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 104 102 { 105 103 struct page *page; 106 104 pte_t *pte;
+1 -1
arch/powerpc/mm/pgtable-frag.c
··· 95 95 return (pte_t *)ret; 96 96 } 97 97 98 - pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) 98 + pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel) 99 99 { 100 100 pte_t *pte; 101 101
+4 -4
arch/powerpc/mm/pgtable_32.c
··· 43 43 44 44 extern char etext[], _stext[], _sinittext[], _einittext[]; 45 45 46 - __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 46 + __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 47 47 { 48 48 if (!slab_is_available()) 49 49 return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); 50 50 51 - return (pte_t *)pte_fragment_alloc(mm, address, 1); 51 + return (pte_t *)pte_fragment_alloc(mm, 1); 52 52 } 53 53 54 - pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 54 + pgtable_t pte_alloc_one(struct mm_struct *mm) 55 55 { 56 - return (pgtable_t)pte_fragment_alloc(mm, address, 0); 56 + return (pgtable_t)pte_fragment_alloc(mm, 0); 57 57 } 58 58 59 59 void __iomem *
+2 -4
arch/riscv/include/asm/pgalloc.h
··· 82 82 83 83 #endif /* __PAGETABLE_PMD_FOLDED */ 84 84 85 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 86 - unsigned long address) 85 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 87 86 { 88 87 return (pte_t *)__get_free_page( 89 88 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO); 90 89 } 91 90 92 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 93 - unsigned long address) 91 + static inline struct page *pte_alloc_one(struct mm_struct *mm) 94 92 { 95 93 struct page *pte; 96 94
+2 -2
arch/s390/include/asm/pgalloc.h
··· 139 139 /* 140 140 * page table entry allocation/free routines. 141 141 */ 142 - #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 143 - #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 142 + #define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm)) 143 + #define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm)) 144 144 145 145 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 146 146 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
+2 -4
arch/sh/include/asm/pgalloc.h
··· 32 32 /* 33 33 * Allocate and free page tables. 34 34 */ 35 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 36 - unsigned long address) 35 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 37 36 { 38 37 return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL); 39 38 } 40 39 41 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 42 - unsigned long address) 40 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 43 41 { 44 42 struct page *page; 45 43 void *pg;
+2 -3
arch/sparc/include/asm/pgalloc_32.h
··· 58 58 void pmd_set(pmd_t *pmdp, pte_t *ptep); 59 59 #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) 60 60 61 - pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); 61 + pgtable_t pte_alloc_one(struct mm_struct *mm); 62 62 63 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 64 - unsigned long address) 63 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 65 64 { 66 65 return srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 67 66 }
+2 -4
arch/sparc/include/asm/pgalloc_64.h
··· 60 60 kmem_cache_free(pgtable_cache, pmd); 61 61 } 62 62 63 - pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 64 - unsigned long address); 65 - pgtable_t pte_alloc_one(struct mm_struct *mm, 66 - unsigned long address); 63 + pte_t *pte_alloc_one_kernel(struct mm_struct *mm); 64 + pgtable_t pte_alloc_one(struct mm_struct *mm); 67 65 void pte_free_kernel(struct mm_struct *mm, pte_t *pte); 68 66 void pte_free(struct mm_struct *mm, pgtable_t ptepage); 69 67
+2 -4
arch/sparc/mm/init_64.c
··· 2925 2925 : : "r" (pstate)); 2926 2926 } 2927 2927 2928 - pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 2929 - unsigned long address) 2928 + pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 2930 2929 { 2931 2930 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2932 2931 pte_t *pte = NULL; ··· 2936 2937 return pte; 2937 2938 } 2938 2939 2939 - pgtable_t pte_alloc_one(struct mm_struct *mm, 2940 - unsigned long address) 2940 + pgtable_t pte_alloc_one(struct mm_struct *mm) 2941 2941 { 2942 2942 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 2943 2943 if (!page)
+2 -2
arch/sparc/mm/srmmu.c
··· 364 364 * Alignments up to the page size are the same for physical and virtual 365 365 * addresses of the nocache area. 366 366 */ 367 - pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 367 + pgtable_t pte_alloc_one(struct mm_struct *mm) 368 368 { 369 369 unsigned long pte; 370 370 struct page *page; 371 371 372 - if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0) 372 + if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0) 373 373 return NULL; 374 374 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); 375 375 if (!pgtable_page_ctor(page)) {
+2 -2
arch/um/include/asm/pgalloc.h
··· 25 25 extern pgd_t *pgd_alloc(struct mm_struct *); 26 26 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 27 27 28 - extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); 29 - extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); 28 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *); 29 + extern pgtable_t pte_alloc_one(struct mm_struct *); 30 30 31 31 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 32 32 {
+2 -2
arch/um/kernel/mem.c
··· 199 199 free_page((unsigned long) pgd); 200 200 } 201 201 202 - pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 202 + pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 203 203 { 204 204 pte_t *pte; 205 205 ··· 207 207 return pte; 208 208 } 209 209 210 - pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 210 + pgtable_t pte_alloc_one(struct mm_struct *mm) 211 211 { 212 212 struct page *pte; 213 213
+2 -2
arch/unicore32/include/asm/pgalloc.h
··· 34 34 * Allocate one PTE table. 35 35 */ 36 36 static inline pte_t * 37 - pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) 37 + pte_alloc_one_kernel(struct mm_struct *mm) 38 38 { 39 39 pte_t *pte; 40 40 ··· 46 46 } 47 47 48 48 static inline pgtable_t 49 - pte_alloc_one(struct mm_struct *mm, unsigned long addr) 49 + pte_alloc_one(struct mm_struct *mm) 50 50 { 51 51 struct page *pte; 52 52
+2 -2
arch/x86/include/asm/pgalloc.h
··· 47 47 extern pgd_t *pgd_alloc(struct mm_struct *); 48 48 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 49 49 50 - extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); 51 - extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); 50 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *); 51 + extern pgtable_t pte_alloc_one(struct mm_struct *); 52 52 53 53 /* Should really implement gc for free page table pages. This could be 54 54 done with a reference count in struct page. */
+2 -2
arch/x86/mm/pgtable.c
··· 23 23 24 24 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; 25 25 26 - pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 26 + pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 27 27 { 28 28 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); 29 29 } 30 30 31 - pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 31 + pgtable_t pte_alloc_one(struct mm_struct *mm) 32 32 { 33 33 struct page *pte; 34 34
+3 -5
arch/xtensa/include/asm/pgalloc.h
··· 38 38 free_page((unsigned long)pgd); 39 39 } 40 40 41 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 42 - unsigned long address) 41 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 43 42 { 44 43 pte_t *ptep; 45 44 int i; ··· 51 52 return ptep; 52 53 } 53 54 54 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 55 - unsigned long addr) 55 + static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 56 56 { 57 57 pte_t *pte; 58 58 struct page *page; 59 59 60 - pte = pte_alloc_one_kernel(mm, addr); 60 + pte = pte_alloc_one_kernel(mm); 61 61 if (!pte) 62 62 return NULL; 63 63 page = virt_to_page(pte);
+6 -7
include/linux/mm.h
··· 1873 1873 static inline void mm_dec_nr_ptes(struct mm_struct *mm) {} 1874 1874 #endif 1875 1875 1876 - int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); 1877 - int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); 1876 + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 1877 + int __pte_alloc_kernel(pmd_t *pmd); 1878 1878 1879 1879 /* 1880 1880 * The following ifdef needed to get the 4level-fixup.h header to work. ··· 2005 2005 pte_unmap(pte); \ 2006 2006 } while (0) 2007 2007 2008 - #define pte_alloc(mm, pmd, address) \ 2009 - (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address)) 2008 + #define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd)) 2010 2009 2011 2010 #define pte_alloc_map(mm, pmd, address) \ 2012 - (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address)) 2011 + (pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address)) 2013 2012 2014 2013 #define pte_alloc_map_lock(mm, pmd, address, ptlp) \ 2015 - (pte_alloc(mm, pmd, address) ? \ 2014 + (pte_alloc(mm, pmd) ? \ 2016 2015 NULL : pte_offset_map_lock(mm, pmd, address, ptlp)) 2017 2016 2018 2017 #define pte_alloc_kernel(pmd, address) \ 2019 - ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 2018 + ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ 2020 2019 NULL: pte_offset_kernel(pmd, address)) 2021 2020 2022 2021 #if USE_SPLIT_PMD_PTLOCKS
+4 -4
mm/huge_memory.c
··· 568 568 return VM_FAULT_FALLBACK; 569 569 } 570 570 571 - pgtable = pte_alloc_one(vma->vm_mm, haddr); 571 + pgtable = pte_alloc_one(vma->vm_mm); 572 572 if (unlikely(!pgtable)) { 573 573 ret = VM_FAULT_OOM; 574 574 goto release; ··· 702 702 struct page *zero_page; 703 703 bool set; 704 704 vm_fault_t ret; 705 - pgtable = pte_alloc_one(vma->vm_mm, haddr); 705 + pgtable = pte_alloc_one(vma->vm_mm); 706 706 if (unlikely(!pgtable)) 707 707 return VM_FAULT_OOM; 708 708 zero_page = mm_get_huge_zero_page(vma->vm_mm); ··· 791 791 return VM_FAULT_SIGBUS; 792 792 793 793 if (arch_needs_pgtable_deposit()) { 794 - pgtable = pte_alloc_one(vma->vm_mm, addr); 794 + pgtable = pte_alloc_one(vma->vm_mm); 795 795 if (!pgtable) 796 796 return VM_FAULT_OOM; 797 797 } ··· 927 927 if (!vma_is_anonymous(vma)) 928 928 return 0; 929 929 930 - pgtable = pte_alloc_one(dst_mm, addr); 930 + pgtable = pte_alloc_one(dst_mm); 931 931 if (unlikely(!pgtable)) 932 932 goto out; 933 933
+1 -1
mm/kasan/init.c
··· 123 123 pte_t *p; 124 124 125 125 if (slab_is_available()) 126 - p = pte_alloc_one_kernel(&init_mm, addr); 126 + p = pte_alloc_one_kernel(&init_mm); 127 127 else 128 128 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); 129 129 if (!p)
+8 -9
mm/memory.c
··· 400 400 } 401 401 } 402 402 403 - int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 403 + int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 404 404 { 405 405 spinlock_t *ptl; 406 - pgtable_t new = pte_alloc_one(mm, address); 406 + pgtable_t new = pte_alloc_one(mm); 407 407 if (!new) 408 408 return -ENOMEM; 409 409 ··· 434 434 return 0; 435 435 } 436 436 437 - int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 437 + int __pte_alloc_kernel(pmd_t *pmd) 438 438 { 439 - pte_t *new = pte_alloc_one_kernel(&init_mm, address); 439 + pte_t *new = pte_alloc_one_kernel(&init_mm); 440 440 if (!new) 441 441 return -ENOMEM; 442 442 ··· 2896 2896 * 2897 2897 * Here we only have down_read(mmap_sem). 2898 2898 */ 2899 - if (pte_alloc(vma->vm_mm, vmf->pmd, vmf->address)) 2899 + if (pte_alloc(vma->vm_mm, vmf->pmd)) 2900 2900 return VM_FAULT_OOM; 2901 2901 2902 2902 /* See the comment in pte_alloc_one_map() */ ··· 3043 3043 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 3044 3044 spin_unlock(vmf->ptl); 3045 3045 vmf->prealloc_pte = NULL; 3046 - } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd, vmf->address))) { 3046 + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { 3047 3047 return VM_FAULT_OOM; 3048 3048 } 3049 3049 map_pte: ··· 3122 3122 * related to pte entry. Use the preallocated table for that. 3123 3123 */ 3124 3124 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 3125 - vmf->prealloc_pte = pte_alloc_one(vma->vm_mm, vmf->address); 3125 + vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 3126 3126 if (!vmf->prealloc_pte) 3127 3127 return VM_FAULT_OOM; 3128 3128 smp_wmb(); /* See comment in __pte_alloc() */ ··· 3360 3360 start_pgoff + nr_pages - 1); 3361 3361 3362 3362 if (pmd_none(*vmf->pmd)) { 3363 - vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm, 3364 - vmf->address); 3363 + vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 3365 3364 if (!vmf->prealloc_pte) 3366 3365 goto out; 3367 3366 smp_wmb(); /* See comment in __pte_alloc() */
+1 -1
mm/migrate.c
··· 2636 2636 * 2637 2637 * Here we only have down_read(mmap_sem). 2638 2638 */ 2639 - if (pte_alloc(mm, pmdp, addr)) 2639 + if (pte_alloc(mm, pmdp)) 2640 2640 goto abort; 2641 2641 2642 2642 /* See the comment in pte_alloc_one_map() */
+1 -1
mm/mremap.c
··· 236 236 if (pmd_trans_unstable(old_pmd)) 237 237 continue; 238 238 } 239 - if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) 239 + if (pte_alloc(new_vma->vm_mm, new_pmd)) 240 240 break; 241 241 next = (new_addr + PMD_SIZE) & PMD_MASK; 242 242 if (extent > next - new_addr)
+1 -1
mm/userfaultfd.c
··· 550 550 break; 551 551 } 552 552 if (unlikely(pmd_none(dst_pmdval)) && 553 - unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { 553 + unlikely(__pte_alloc(dst_mm, dst_pmd))) { 554 554 err = -ENOMEM; 555 555 break; 556 556 }
+1 -1
virt/kvm/arm/mmu.c
··· 647 647 BUG_ON(pmd_sect(*pmd)); 648 648 649 649 if (pmd_none(*pmd)) { 650 - pte = pte_alloc_one_kernel(NULL, addr); 650 + pte = pte_alloc_one_kernel(NULL); 651 651 if (!pte) { 652 652 kvm_err("Cannot allocate Hyp pte\n"); 653 653 return -ENOMEM;