Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm/book3e/64: Remove unsupported 64Kpage size from 64bit booke

We have in Kconfig

config PPC_64K_PAGES
bool "64k page size"
depends on !PPC_FSL_BOOK3E && (44x || PPC_BOOK3S_64 || PPC_BOOK3E_64)
select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64

Only supported BOOK3E 64 bit platforms is FSL_BOOK3E. Remove the dead 64k page
support code from 64bit nohash.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Aneesh Kumar K.V and committed by
Michael Ellerman
7820856a 8ce74cff

+4 -127
-6
arch/powerpc/include/asm/mmu-book3e.h
··· 230 230 unsigned int id; 231 231 unsigned int active; 232 232 unsigned long vdso_base; 233 - #ifdef CONFIG_PPC_64K_PAGES 234 - /* for 4K PTE fragment support */ 235 - void *pte_frag; 236 - #endif 237 233 } mm_context_t; 238 234 239 235 /* Page size definitions, common between 32 and 64-bit ··· 271 275 */ 272 276 #if defined(CONFIG_PPC_4K_PAGES) 273 277 #define mmu_virtual_psize MMU_PAGE_4K 274 - #elif defined(CONFIG_PPC_64K_PAGES) 275 - #define mmu_virtual_psize MMU_PAGE_64K 276 278 #else 277 279 #error Unsupported page size 278 280 #endif
-60
arch/powerpc/include/asm/nohash/64/pgalloc.h
··· 52 52 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); 53 53 } 54 54 55 - #ifndef CONFIG_PPC_64K_PAGES 56 - 57 55 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) 58 56 59 57 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) ··· 128 130 tlb_flush_pgtable(tlb, address); 129 131 pgtable_free_tlb(tlb, page_address(table), 0); 130 132 } 131 - 132 - #else /* if CONFIG_PPC_64K_PAGES */ 133 - 134 - extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); 135 - extern void pte_fragment_free(unsigned long *, int); 136 - extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); 137 - #ifdef CONFIG_SMP 138 - extern void __tlb_remove_table(void *_table); 139 - #endif 140 - 141 - #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) 142 - 143 - static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 144 - pte_t *pte) 145 - { 146 - pmd_set(pmd, (unsigned long)pte); 147 - } 148 - 149 - static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 150 - pgtable_t pte_page) 151 - { 152 - pmd_set(pmd, (unsigned long)pte_page); 153 - } 154 - 155 - static inline pgtable_t pmd_pgtable(pmd_t pmd) 156 - { 157 - return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); 158 - } 159 - 160 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 161 - unsigned long address) 162 - { 163 - return (pte_t *)pte_fragment_alloc(mm, address, 1); 164 - } 165 - 166 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm, 167 - unsigned long address) 168 - { 169 - return (pgtable_t)pte_fragment_alloc(mm, address, 0); 170 - } 171 - 172 - static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) 173 - { 174 - pte_fragment_free((unsigned long *)pte, 1); 175 - } 176 - 177 - static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) 178 - { 179 - pte_fragment_free((unsigned long *)ptepage, 0); 180 - } 181 - 182 - static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, 183 - unsigned long address) 184 - { 185 - tlb_flush_pgtable(tlb, address); 186 - pgtable_free_tlb(tlb, table, 0); 187 - } 188 - #endif /* CONFIG_PPC_64K_PAGES */ 189 133 190 134 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 191 135 {
-57
arch/powerpc/include/asm/nohash/64/pgtable-64k.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H 3 - #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H 4 - 5 - #define __ARCH_USE_5LEVEL_HACK 6 - #include <asm-generic/pgtable-nopud.h> 7 - 8 - 9 - #define PTE_INDEX_SIZE 8 10 - #define PMD_INDEX_SIZE 10 11 - #define PUD_INDEX_SIZE 0 12 - #define PGD_INDEX_SIZE 12 13 - 14 - /* 15 - * we support 32 fragments per PTE page of 64K size 16 - */ 17 - #define PTE_FRAG_NR 32 18 - /* 19 - * We use a 2K PTE page fragment and another 2K for storing 20 - * real_pte_t hash index 21 - */ 22 - #define PTE_FRAG_SIZE_SHIFT 11 23 - #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT) 24 - 25 - #ifndef __ASSEMBLY__ 26 - #define PTE_TABLE_SIZE PTE_FRAG_SIZE 27 - #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 28 - #define PUD_TABLE_SIZE (0) 29 - #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 30 - #endif /* __ASSEMBLY__ */ 31 - 32 - #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 33 - #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 34 - #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 35 - 36 - /* PMD_SHIFT determines what a second-level page table entry can map */ 37 - #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 38 - #define PMD_SIZE (1UL << PMD_SHIFT) 39 - #define PMD_MASK (~(PMD_SIZE-1)) 40 - 41 - /* PGDIR_SHIFT determines what a third-level page table entry can map */ 42 - #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 43 - #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 44 - #define PGDIR_MASK (~(PGDIR_SIZE-1)) 45 - 46 - /* 47 - * Bits to mask out from a PMD to get to the PTE page 48 - * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned. 49 - */ 50 - #define PMD_MASKED_BITS (PTE_FRAG_SIZE - 1) 51 - /* Bits to mask out from a PGD/PUD to get to the PMD page */ 52 - #define PUD_MASKED_BITS 0x1ff 53 - 54 - #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) 55 - #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) 56 - 57 - #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */
+4 -4
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 6 6 * the ppc64 hashed page table. 7 7 */ 8 8 9 - #ifdef CONFIG_PPC_64K_PAGES 10 - #include <asm/nohash/64/pgtable-64k.h> 11 - #else 12 9 #include <asm/nohash/64/pgtable-4k.h> 13 - #endif 14 10 #include <asm/barrier.h> 11 + 12 + #ifdef CONFIG_PPC_64K_PAGES 13 + #error "Page size not supported" 14 + #endif 15 15 16 16 #define FIRST_USER_ADDRESS 0UL 17 17