Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/8xx: add support for huge pages on VMAP and VMALLOC

powerpc 8xx has 4 page sizes:
- 4k
- 16k
- 512k
- 8M

At the time being, vmalloc and vmap only support huge pages which are leaf
at PMD level.

Here the PMD level is 4M, it doesn't correspond to any supported page
size.

For now, implement use of 16k and 512k pages which is done at PTE level.

Support of 8M pages will be implemented later, it requires vmalloc to
support hugepd tables.

Link: https://lkml.kernel.org/r/8b972f1c03fb6bd59953035f0a3e4d26659de4f8.1620795204.git.christophe.leroy@csgroup.eu
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christophe Leroy and committed by
Linus Torvalds
a6a8f7c4 3382bbee

+44 -1
+1 -1
arch/powerpc/Kconfig
··· 187 187 select GENERIC_VDSO_TIME_NS 188 188 select HAVE_ARCH_AUDITSYSCALL 189 189 select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP 190 - select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU 190 + select HAVE_ARCH_HUGE_VMAP if PPC_RADIX_MMU || PPC_8xx 191 191 select HAVE_ARCH_JUMP_LABEL 192 192 select HAVE_ARCH_JUMP_LABEL_RELATIVE 193 193 select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
+43
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
··· 178 178 #ifndef __ASSEMBLY__ 179 179 180 180 #include <linux/mmdebug.h> 181 + #include <linux/sizes.h> 181 182 182 183 void mmu_pin_tlb(unsigned long top, bool readonly); 183 184 ··· 225 224 return mmu_psize_defs[mmu_psize].shift; 226 225 BUG(); 227 226 } 227 + 228 + static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn, 229 + unsigned int max_page_shift, unsigned long size) 230 + { 231 + if (end - addr < size) 232 + return false; 233 + 234 + if ((1UL << max_page_shift) < size) 235 + return false; 236 + 237 + if (!IS_ALIGNED(addr, size)) 238 + return false; 239 + 240 + if (!IS_ALIGNED(PFN_PHYS(pfn), size)) 241 + return false; 242 + 243 + return true; 244 + } 245 + 246 + static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, 247 + u64 pfn, unsigned int max_page_shift) 248 + { 249 + if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K)) 250 + return SZ_512K; 251 + if (PAGE_SIZE == SZ_16K) 252 + return SZ_16K; 253 + if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K)) 254 + return SZ_16K; 255 + return PAGE_SIZE; 256 + } 257 + #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size 258 + 259 + static inline int arch_vmap_pte_supported_shift(unsigned long size) 260 + { 261 + if (size >= SZ_512K) 262 + return 19; 263 + else if (size >= SZ_16K) 264 + return 14; 265 + else 266 + return PAGE_SHIFT; 267 + } 268 + #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift 228 269 229 270 /* patch sites */ 230 271 extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;