Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: get rid of superfluous __GFP_REPEAT

__GFP_REPEAT has a rather weak semantic but since it has been introduced
around 2.6.12 it has been ignored for low order allocations.

{pud,pmd}_alloc_one are allocating from {PGT,PUD}_CACHE initialized in
pgtable_cache_init which doesn't have larger than sizeof(void *) << 12
size and that fits into !costly allocation request size.

PGALLOC_GFP is used only in radix__pgd_alloc which uses either order-0
or order-4 requests. The first one doesn't need the flag while the
second does. Drop __GFP_REPEAT from PGALLOC_GFP and add it for the
order-4 one.

This means that this flag has never been actually useful here because it
has always been used only for PAGE_ALLOC_COSTLY requests.

Link: http://lkml.kernel.org/r/1464599699-30131-12-git-send-email-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Michal Hocko and committed by
Linus Torvalds
2379a23e a4135b93

+7 -11
+4 -6
arch/powerpc/include/asm/book3s/64/pgalloc.h
··· 41 41 pgtable_cache[(shift) - 1]; \ 42 42 }) 43 43 44 - #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO 44 + #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO 45 45 46 46 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 47 47 extern void pte_fragment_free(unsigned long *, int); ··· 56 56 return (pgd_t *)__get_free_page(PGALLOC_GFP); 57 57 #else 58 58 struct page *page; 59 - page = alloc_pages(PGALLOC_GFP, 4); 59 + page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4); 60 60 if (!page) 61 61 return NULL; 62 62 return (pgd_t *) page_address(page); ··· 93 93 94 94 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 95 95 { 96 - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 97 - GFP_KERNEL|__GFP_REPEAT); 96 + return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); 98 97 } 99 98 100 99 static inline void pud_free(struct mm_struct *mm, pud_t *pud) ··· 114 115 115 116 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 116 117 { 117 - return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 118 - GFP_KERNEL|__GFP_REPEAT); 118 + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL); 119 119 } 120 120 121 121 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+2 -4
arch/powerpc/include/asm/nohash/64/pgalloc.h
··· 57 57 58 58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 59 59 { 60 - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), 61 - GFP_KERNEL|__GFP_REPEAT); 60 + return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); 62 61 } 63 62 64 63 static inline void pud_free(struct mm_struct *mm, pud_t *pud) ··· 189 190 190 191 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 191 192 { 192 - return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), 193 - GFP_KERNEL|__GFP_REPEAT); 193 + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL); 194 194 } 195 195 196 196 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+1 -1
arch/powerpc/mm/hugetlbpage.c
··· 73 73 cachep = PGT_CACHE(pdshift - pshift); 74 74 #endif 75 75 76 - new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); 76 + new = kmem_cache_zalloc(cachep, GFP_KERNEL); 77 77 78 78 BUG_ON(pshift > HUGEPD_SHIFT_MASK); 79 79 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);