Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: port 64 bits pgtable_cache to 32 bits

Today powerpc64 uses a set of pgtable_caches while powerpc32 uses
standard pages when using 4k pages and a single pgtable_cache
if using other size pages.

In preparation of implementing huge pages on the 8xx, this patch
replaces the specific powerpc32 handling by the 64 bits approach.

This is done by:
* moving 64 bits pgtable_cache_add() and pgtable_cache_init()
in a new file called init-common.c
* modifying pgtable_cache_init() to also handle the case
without PMD
* removing the 32 bits version of pgtable_cache_add() and
pgtable_cache_init()
* copying related header contents from 64 bits into both the
book3s/32 and nohash/32 header files

On the 8xx, the following cache sizes will be used:
* 4k pages mode:
- PGT_CACHE(10) for PGD
- PGT_CACHE(3) for 512k hugepage tables
* 16k pages mode:
- PGT_CACHE(6) for PGD
- PGT_CACHE(7) for 512k hugepage tables
- PGT_CACHE(3) for 8M hugepage tables

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

Christophe Leroy and committed by
Scott Wood
9b081e10 d7544424

+227 -174
+38 -6
arch/powerpc/include/asm/book3s/32/pgalloc.h
··· 2 2 #define _ASM_POWERPC_BOOK3S_32_PGALLOC_H 3 3 4 4 #include <linux/threads.h> 5 + #include <linux/slab.h> 5 6 6 - /* For 32-bit, all levels of page tables are just drawn from get_free_page() */ 7 - #define MAX_PGTABLE_INDEX_SIZE 0 7 + /* 8 + * Functions that deal with pagetables that could be at any level of 9 + * the table need to be passed an "index_size" so they know how to 10 + * handle allocation. For PTE pages (which are linked to a struct 11 + * page for now, and drawn from the main get_free_pages() pool), the 12 + * allocation size will be (2^index_size * sizeof(pointer)) and 13 + * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). 14 + * 15 + * The maximum index size needs to be big enough to allow any 16 + * pagetable sizes we need, but small enough to fit in the low bits of 17 + * any page table pointer. In other words all pagetables, even tiny 18 + * ones, must be aligned to allow at least enough low 0 bits to 19 + * contain this value. This value is also used as a mask, so it must 20 + * be one less than a power of two. 21 + */ 22 + #define MAX_PGTABLE_INDEX_SIZE 0xf 8 23 9 24 extern void __bad_pte(pmd_t *pmd); 10 25 11 - extern pgd_t *pgd_alloc(struct mm_struct *mm); 12 - extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 26 + extern struct kmem_cache *pgtable_cache[]; 27 + #define PGT_CACHE(shift) ({ \ 28 + BUG_ON(!(shift)); \ 29 + pgtable_cache[(shift) - 1]; \ 30 + }) 31 + 32 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 33 + { 34 + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); 35 + } 36 + 37 + static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 38 + { 39 + kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 40 + } 13 41 14 42 /* 15 43 * We don't have any real pmd's, and this code never triggers because ··· 96 68 97 69 static inline void pgtable_free(void *table, unsigned index_size) 98 70 { 99 - BUG_ON(index_size); /* 32-bit doesn't use this */ 100 - free_page((unsigned long)table); 71 + if (!index_size) { 72 + free_page((unsigned long)table); 73 + } else { 74 + BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); 75 + kmem_cache_free(PGT_CACHE(index_size), table); 76 + } 101 77 } 102 78 103 79 #define check_pgt_cache() do { } while (0)
+20 -20
arch/powerpc/include/asm/book3s/32/pgtable.h
··· 8 8 /* And here we include common definitions */ 9 9 #include <asm/pte-common.h> 10 10 11 + #define PTE_INDEX_SIZE PTE_SHIFT 12 + #define PMD_INDEX_SIZE 0 13 + #define PUD_INDEX_SIZE 0 14 + #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 15 + 16 + #define PMD_CACHE_INDEX PMD_INDEX_SIZE 17 + 18 + #ifndef __ASSEMBLY__ 19 + #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 20 + #define PMD_TABLE_SIZE 0 21 + #define PUD_TABLE_SIZE 0 22 + #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 23 + #endif /* __ASSEMBLY__ */ 24 + 25 + #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 26 + #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 27 + 11 28 /* 12 29 * The normal case is that PTEs are 32-bits and we have a 1-page 13 30 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus ··· 36 19 * -Matt 37 20 */ 38 21 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 39 - #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) 22 + #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 40 23 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 41 24 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 42 - 43 - #define PTRS_PER_PTE (1 << PTE_SHIFT) 44 - #define PTRS_PER_PMD 1 45 - #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 46 25 47 26 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 48 27 /* ··· 95 82 96 83 extern unsigned long ioremap_bot; 97 84 98 - /* 99 - * entries per page directory level: our page-table tree is two-level, so 100 - * we don't really have any PMD directory. 101 - */ 102 - #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) 103 - #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) 85 + /* Bits to mask out from a PGD to get to the PUD page */ 86 + #define PGD_MASKED_BITS 0 104 87 105 88 #define pte_ERROR(e) \ 106 89 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ ··· 291 282 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 292 283 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 293 284 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 294 - 295 - #ifndef CONFIG_PPC_4K_PAGES 296 - void pgtable_cache_init(void); 297 - #else 298 - /* 299 - * No page table caches to initialise 300 - */ 301 - #define pgtable_cache_init() do { } while (0) 302 - #endif 303 285 304 286 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, 305 287 pmd_t **pmdp);
-3
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 789 789 #define pgd_ERROR(e) \ 790 790 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 791 791 792 - void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 793 - void pgtable_cache_init(void); 794 - 795 792 static inline int map_kernel_page(unsigned long ea, unsigned long pa, 796 793 unsigned long flags) 797 794 {
+38 -6
arch/powerpc/include/asm/nohash/32/pgalloc.h
··· 2 2 #define _ASM_POWERPC_PGALLOC_32_H 3 3 4 4 #include <linux/threads.h> 5 + #include <linux/slab.h> 5 6 6 - /* For 32-bit, all levels of page tables are just drawn from get_free_page() */ 7 - #define MAX_PGTABLE_INDEX_SIZE 0 7 + /* 8 + * Functions that deal with pagetables that could be at any level of 9 + * the table need to be passed an "index_size" so they know how to 10 + * handle allocation. For PTE pages (which are linked to a struct 11 + * page for now, and drawn from the main get_free_pages() pool), the 12 + * allocation size will be (2^index_size * sizeof(pointer)) and 13 + * allocations are drawn from the kmem_cache in PGT_CACHE(index_size). 14 + * 15 + * The maximum index size needs to be big enough to allow any 16 + * pagetable sizes we need, but small enough to fit in the low bits of 17 + * any page table pointer. In other words all pagetables, even tiny 18 + * ones, must be aligned to allow at least enough low 0 bits to 19 + * contain this value. This value is also used as a mask, so it must 20 + * be one less than a power of two. 21 + */ 22 + #define MAX_PGTABLE_INDEX_SIZE 0xf 8 23 9 24 extern void __bad_pte(pmd_t *pmd); 10 25 11 - extern pgd_t *pgd_alloc(struct mm_struct *mm); 12 - extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 26 + extern struct kmem_cache *pgtable_cache[]; 27 + #define PGT_CACHE(shift) ({ \ 28 + BUG_ON(!(shift)); \ 29 + pgtable_cache[(shift) - 1]; \ 30 + }) 31 + 32 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 33 + { 34 + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL); 35 + } 36 + 37 + static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 38 + { 39 + kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 40 + } 13 41 14 42 /* 15 43 * We don't have any real pmd's, and this code never triggers because ··· 96 68 97 69 static inline void pgtable_free(void *table, unsigned index_size) 98 70 { 99 - BUG_ON(index_size); /* 32-bit doesn't use this */ 100 - free_page((unsigned long)table); 71 + if (!index_size) { 72 + free_page((unsigned long)table); 73 + } else { 74 + BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE); 75 + kmem_cache_free(PGT_CACHE(index_size), table); 76 + } 101 77 } 102 78 103 79 #define check_pgt_cache() do { } while (0)
+20 -22
arch/powerpc/include/asm/nohash/32/pgtable.h
··· 16 16 17 17 #endif /* __ASSEMBLY__ */ 18 18 19 + #define PTE_INDEX_SIZE PTE_SHIFT 20 + #define PMD_INDEX_SIZE 0 21 + #define PUD_INDEX_SIZE 0 22 + #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 23 + 24 + #define PMD_CACHE_INDEX PMD_INDEX_SIZE 25 + 26 + #ifndef __ASSEMBLY__ 27 + #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 28 + #define PMD_TABLE_SIZE 0 29 + #define PUD_TABLE_SIZE 0 30 + #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 31 + #endif /* __ASSEMBLY__ */ 32 + 33 + #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 34 + #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 35 + 19 36 /* 20 37 * The normal case is that PTEs are 32-bits and we have a 1-page 21 38 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus ··· 44 27 * -Matt 45 28 */ 46 29 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 47 - #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) 30 + #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 48 31 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 49 32 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 50 33 51 - /* 52 - * entries per page directory level: our page-table tree is two-level, so 53 - * we don't really have any PMD directory. 54 - */ 55 - #ifndef __ASSEMBLY__ 56 - #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT) 57 - #define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT)) 58 - #endif /* __ASSEMBLY__ */ 59 - 60 - #define PTRS_PER_PTE (1 << PTE_SHIFT) 61 - #define PTRS_PER_PMD 1 62 - #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 34 + /* Bits to mask out from a PGD to get to the PUD page */ 35 + #define PGD_MASKED_BITS 0 63 36 64 37 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 65 38 #define FIRST_USER_ADDRESS 0UL ··· 334 327 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 335 328 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 336 329 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 337 - 338 - #ifndef CONFIG_PPC_4K_PAGES 339 - void pgtable_cache_init(void); 340 - #else 341 - /* 342 - * No page table caches to initialise 343 - */ 344 - #define pgtable_cache_init() do { } while (0) 345 - #endif 346 330 347 331 extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, 348 332 pmd_t **pmdp);
-2
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 346 346 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) 347 347 #define __swp_entry_to_pte(x) __pte((x).val) 348 348 349 - void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 350 - void pgtable_cache_init(void); 351 349 extern int map_kernel_page(unsigned long ea, unsigned long pa, 352 350 unsigned long flags); 353 351 extern int __meminit vmemmap_create_mapping(unsigned long start,
+2
arch/powerpc/include/asm/pgtable.h
··· 78 78 79 79 unsigned long vmalloc_to_phys(void *vmalloc_addr); 80 80 81 + void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 82 + void pgtable_cache_init(void); 81 83 #endif /* __ASSEMBLY__ */ 82 84 83 85 #endif /* _ASM_POWERPC_PGTABLE_H */
+2 -1
arch/powerpc/mm/Makefile
··· 7 7 ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) 8 8 9 9 obj-y := fault.o mem.o pgtable.o mmap.o \ 10 - init_$(BITS).o pgtable_$(BITS).o 10 + init_$(BITS).o pgtable_$(BITS).o \ 11 + init-common.o 11 12 obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ 12 13 tlb_nohash_low.o 13 14 obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
+107
arch/powerpc/mm/init-common.c
··· 1 + /* 2 + * PowerPC version 3 + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 + * 5 + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 + * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 + * Copyright (C) 1996 Paul Mackerras 8 + * 9 + * Derived from "arch/i386/mm/init.c" 10 + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 11 + * 12 + * Dave Engebretsen <engebret@us.ibm.com> 13 + * Rework for PPC64 port. 14 + * 15 + * This program is free software; you can redistribute it and/or 16 + * modify it under the terms of the GNU General Public License 17 + * as published by the Free Software Foundation; either version 18 + * 2 of the License, or (at your option) any later version. 19 + * 20 + */ 21 + 22 + #undef DEBUG 23 + 24 + #include <linux/string.h> 25 + #include <asm/pgalloc.h> 26 + #include <asm/pgtable.h> 27 + 28 + static void pgd_ctor(void *addr) 29 + { 30 + memset(addr, 0, PGD_TABLE_SIZE); 31 + } 32 + 33 + static void pud_ctor(void *addr) 34 + { 35 + memset(addr, 0, PUD_TABLE_SIZE); 36 + } 37 + 38 + static void pmd_ctor(void *addr) 39 + { 40 + memset(addr, 0, PMD_TABLE_SIZE); 41 + } 42 + 43 + struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; 44 + 45 + /* 46 + * Create a kmem_cache() for pagetables. This is not used for PTE 47 + * pages - they're linked to struct page, come from the normal free 48 + * pages pool and have a different entry size (see real_pte_t) to 49 + * everything else. Caches created by this function are used for all 50 + * the higher level pagetables, and for hugepage pagetables. 51 + */ 52 + void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) 53 + { 54 + char *name; 55 + unsigned long table_size = sizeof(void *) << shift; 56 + unsigned long align = table_size; 57 + 58 + /* When batching pgtable pointers for RCU freeing, we store 59 + * the index size in the low bits. Table alignment must be 60 + * big enough to fit it. 61 + * 62 + * Likewise, hugeapge pagetable pointers contain a (different) 63 + * shift value in the low bits. All tables must be aligned so 64 + * as to leave enough 0 bits in the address to contain it. */ 65 + unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, 66 + HUGEPD_SHIFT_MASK + 1); 67 + struct kmem_cache *new; 68 + 69 + /* It would be nice if this was a BUILD_BUG_ON(), but at the 70 + * moment, gcc doesn't seem to recognize is_power_of_2 as a 71 + * constant expression, so so much for that. */ 72 + BUG_ON(!is_power_of_2(minalign)); 73 + BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); 74 + 75 + if (PGT_CACHE(shift)) 76 + return; /* Already have a cache of this size */ 77 + 78 + align = max_t(unsigned long, align, minalign); 79 + name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 80 + new = kmem_cache_create(name, table_size, align, 0, ctor); 81 + kfree(name); 82 + pgtable_cache[shift - 1] = new; 83 + pr_debug("Allocated pgtable cache for order %d\n", shift); 84 + } 85 + 86 + 87 + void pgtable_cache_init(void) 88 + { 89 + pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 90 + 91 + if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) 92 + pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 93 + /* 94 + * In all current configs, when the PUD index exists it's the 95 + * same size as either the pgd or pmd index except with THP enabled 96 + * on book3s 64 97 + */ 98 + if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 99 + pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 100 + 101 + if (!PGT_CACHE(PGD_INDEX_SIZE)) 102 + panic("Couldn't allocate pgd cache"); 103 + if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE)) 104 + panic("Couldn't allocate pmd pgtable caches"); 105 + if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 106 + panic("Couldn't allocate pud pgtable caches"); 107 + }
-77
arch/powerpc/mm/init_64.c
··· 80 80 phys_addr_t kernstart_addr; 81 81 EXPORT_SYMBOL_GPL(kernstart_addr); 82 82 83 - static void pgd_ctor(void *addr) 84 - { 85 - memset(addr, 0, PGD_TABLE_SIZE); 86 - } 87 - 88 - static void pud_ctor(void *addr) 89 - { 90 - memset(addr, 0, PUD_TABLE_SIZE); 91 - } 92 - 93 - static void pmd_ctor(void *addr) 94 - { 95 - memset(addr, 0, PMD_TABLE_SIZE); 96 - } 97 - 98 - struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; 99 - 100 - /* 101 - * Create a kmem_cache() for pagetables. This is not used for PTE 102 - * pages - they're linked to struct page, come from the normal free 103 - * pages pool and have a different entry size (see real_pte_t) to 104 - * everything else. Caches created by this function are used for all 105 - * the higher level pagetables, and for hugepage pagetables. 106 - */ 107 - void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) 108 - { 109 - char *name; 110 - unsigned long table_size = sizeof(void *) << shift; 111 - unsigned long align = table_size; 112 - 113 - /* When batching pgtable pointers for RCU freeing, we store 114 - * the index size in the low bits. Table alignment must be 115 - * big enough to fit it. 116 - * 117 - * Likewise, hugeapge pagetable pointers contain a (different) 118 - * shift value in the low bits. All tables must be aligned so 119 - * as to leave enough 0 bits in the address to contain it. */ 120 - unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, 121 - HUGEPD_SHIFT_MASK + 1); 122 - struct kmem_cache *new; 123 - 124 - /* It would be nice if this was a BUILD_BUG_ON(), but at the 125 - * moment, gcc doesn't seem to recognize is_power_of_2 as a 126 - * constant expression, so so much for that. */ 127 - BUG_ON(!is_power_of_2(minalign)); 128 - BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); 129 - 130 - if (PGT_CACHE(shift)) 131 - return; /* Already have a cache of this size */ 132 - 133 - align = max_t(unsigned long, align, minalign); 134 - name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); 135 - new = kmem_cache_create(name, table_size, align, 0, ctor); 136 - kfree(name); 137 - pgtable_cache[shift - 1] = new; 138 - pr_debug("Allocated pgtable cache for order %d\n", shift); 139 - } 140 - 141 - 142 - void pgtable_cache_init(void) 143 - { 144 - pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); 145 - pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor); 146 - /* 147 - * In all current configs, when the PUD index exists it's the 148 - * same size as either the pgd or pmd index except with THP enabled 149 - * on book3s 64 150 - */ 151 - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 152 - pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor); 153 - 154 - if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX)) 155 - panic("Couldn't allocate pgtable caches"); 156 - if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)) 157 - panic("Couldn't allocate pud pgtable caches"); 158 - } 159 - 160 83 #ifdef CONFIG_SPARSEMEM_VMEMMAP 161 84 /* 162 85 * Given an address within the vmemmap, determine the pfn of the page that
-37
arch/powerpc/mm/pgtable_32.c
··· 42 42 43 43 extern char etext[], _stext[], _sinittext[], _einittext[]; 44 44 45 - #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) 46 - 47 - #ifndef CONFIG_PPC_4K_PAGES 48 - static struct kmem_cache *pgtable_cache; 49 - 50 - void pgtable_cache_init(void) 51 - { 52 - pgtable_cache = kmem_cache_create("PGDIR cache", 1 << PGDIR_ORDER, 53 - 1 << PGDIR_ORDER, 0, NULL); 54 - if (pgtable_cache == NULL) 55 - panic("Couldn't allocate pgtable caches"); 56 - } 57 - #endif 58 - 59 - pgd_t *pgd_alloc(struct mm_struct *mm) 60 - { 61 - pgd_t *ret; 62 - 63 - /* pgdir take page or two with 4K pages and a page fraction otherwise */ 64 - #ifndef CONFIG_PPC_4K_PAGES 65 - ret = kmem_cache_alloc(pgtable_cache, GFP_KERNEL | __GFP_ZERO); 66 - #else 67 - ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 68 - PGDIR_ORDER - PAGE_SHIFT); 69 - #endif 70 - return ret; 71 - } 72 - 73 - void pgd_free(struct mm_struct *mm, pgd_t *pgd) 74 - { 75 - #ifndef CONFIG_PPC_4K_PAGES 76 - kmem_cache_free(pgtable_cache, (void *)pgd); 77 - #else 78 - free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT); 79 - #endif 80 - } 81 - 82 45 __ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 83 46 { 84 47 pte_t *pte;