Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64s: Move hash MMU support code under CONFIG_PPC_64S_HASH_MMU

Compiling out hash support code when CONFIG_PPC_64S_HASH_MMU=n saves
128kB kernel image size (90kB text) on powernv_defconfig minus KVM,
350kB on pseries_defconfig minus KVM, 40kB on a tiny config.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Fixup defined(ARCH_HAS_MEMREMAP_COMPAT_ALIGN), which needs CONFIG.
Fix radix_enabled() use in setup_initial_memory_limit(). Add some
stubs to reduce number of ifdefs.]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211201144153.2456614-18-npiggin@gmail.com

authored by

Nicholas Piggin and committed by
Michael Ellerman
387e220a c2857374

+172 -57
+1 -1
arch/powerpc/Kconfig
··· 129 129 select ARCH_HAS_KCOV 130 130 select ARCH_HAS_MEMBARRIER_CALLBACKS 131 131 select ARCH_HAS_MEMBARRIER_SYNC_CORE 132 - select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_BOOK3S_64 132 + select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU 133 133 select ARCH_HAS_MMIOWB if PPC64 134 134 select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 135 135 select ARCH_HAS_PHYS_TO_DMA
+7 -1
arch/powerpc/include/asm/book3s/64/mmu-hash.h
··· 523 523 void slb_dump_contents(struct slb_entry *slb_ptr); 524 524 525 525 extern void slb_vmalloc_update(void); 526 - extern void slb_set_size(u16 size); 527 526 void preload_new_slb_context(unsigned long start, unsigned long sp); 527 + 528 + #ifdef CONFIG_PPC_64S_HASH_MMU 529 + void slb_set_size(u16 size); 530 + #else 531 + static inline void slb_set_size(u16 size) { } 532 + #endif 533 + 528 534 #endif /* __ASSEMBLY__ */ 529 535 530 536 /*
+18 -3
arch/powerpc/include/asm/book3s/64/mmu.h
··· 105 105 * from EA and new context ids to build the new VAs. 106 106 */ 107 107 mm_context_id_t id; 108 + #ifdef CONFIG_PPC_64S_HASH_MMU 108 109 mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE]; 110 + #endif 109 111 }; 110 112 111 113 /* Number of bits in the mm_cpumask */ ··· 119 117 /* Number of user space windows opened in process mm_context */ 120 118 atomic_t vas_windows; 121 119 120 + #ifdef CONFIG_PPC_64S_HASH_MMU 122 121 struct hash_mm_context *hash_context; 122 + #endif 123 123 124 124 void __user *vdso; 125 125 /* ··· 144 140 #endif 145 141 } mm_context_t; 146 142 143 + #ifdef CONFIG_PPC_64S_HASH_MMU 147 144 static inline u16 mm_ctx_user_psize(mm_context_t *ctx) 148 145 { 149 146 return ctx->hash_context->user_psize; ··· 205 200 extern int mmu_linear_psize; 206 201 extern int mmu_virtual_psize; 207 202 extern int mmu_vmalloc_psize; 208 - extern int mmu_vmemmap_psize; 209 203 extern int mmu_io_psize; 204 + #else /* CONFIG_PPC_64S_HASH_MMU */ 205 + #ifdef CONFIG_PPC_64K_PAGES 206 + #define mmu_virtual_psize MMU_PAGE_64K 207 + #else 208 + #define mmu_virtual_psize MMU_PAGE_4K 209 + #endif 210 + #endif 211 + extern int mmu_vmemmap_psize; 210 212 211 213 /* MMU initialization */ 212 214 void mmu_early_init_devtree(void); ··· 252 240 * know which translations we will pick. Hence go with hash 253 241 * restrictions. 254 242 */ 255 - return hash__setup_initial_memory_limit(first_memblock_base, 256 - first_memblock_size); 243 + if (!early_radix_enabled()) 244 + hash__setup_initial_memory_limit(first_memblock_base, 245 + first_memblock_size); 257 246 } 258 247 259 248 #ifdef CONFIG_PPC_PSERIES ··· 275 262 void cleanup_cpu_mmu_context(void); 276 263 #endif 277 264 265 + #ifdef CONFIG_PPC_64S_HASH_MMU 278 266 static inline int get_user_context(mm_context_t *ctx, unsigned long ea) 279 267 { 280 268 int index = ea >> MAX_EA_BITS_PER_CONTEXT; ··· 295 281 296 282 return get_vsid(context, ea, ssize); 297 283 } 284 + #endif 298 285 299 286 #endif /* __ASSEMBLY__ */ 300 287 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
+6
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
··· 112 112 113 113 struct mmu_gather; 114 114 extern void hash__tlb_flush(struct mmu_gather *tlb); 115 + void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr); 116 + 117 + #ifdef CONFIG_PPC_64S_HASH_MMU 115 118 /* Private function for use by PCI IO mapping code */ 116 119 extern void __flush_hash_table_range(unsigned long start, unsigned long end); 117 120 extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, 118 121 unsigned long addr); 122 + #else 123 + static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { } 124 + #endif 119 125 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
+4
arch/powerpc/include/asm/book3s/pgtable.h
··· 25 25 unsigned long size, pgprot_t vma_prot); 26 26 #define __HAVE_PHYS_MEM_ACCESS_PROT 27 27 28 + #if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU) 28 29 /* 29 30 * This gets called at the end of handling a page fault, when 30 31 * the kernel has put a new PTE into the page table for the process. ··· 36 35 * waiting for the inevitable extra hash-table miss exception. 37 36 */ 38 37 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 38 + #else 39 + static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} 40 + #endif 39 41 40 42 #endif /* __ASSEMBLY__ */ 41 43 #endif
+2
arch/powerpc/include/asm/mmu_context.h
··· 75 75 extern void __destroy_context(int context_id); 76 76 static inline void mmu_context_init(void) { } 77 77 78 + #ifdef CONFIG_PPC_64S_HASH_MMU 78 79 static inline int alloc_extended_context(struct mm_struct *mm, 79 80 unsigned long ea) 80 81 { ··· 101 100 return true; 102 101 return false; 103 102 } 103 + #endif 104 104 105 105 #else 106 106 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
+8
arch/powerpc/include/asm/paca.h
··· 97 97 /* this becomes non-zero. */ 98 98 u8 kexec_state; /* set when kexec down has irqs off */ 99 99 #ifdef CONFIG_PPC_BOOK3S_64 100 + #ifdef CONFIG_PPC_64S_HASH_MMU 100 101 struct slb_shadow *slb_shadow_ptr; 102 + #endif 101 103 struct dtl_entry *dispatch_log; 102 104 struct dtl_entry *dispatch_log_end; 103 105 #endif ··· 112 110 /* used for most interrupts/exceptions */ 113 111 u64 exgen[EX_SIZE] __attribute__((aligned(0x80))); 114 112 113 + #ifdef CONFIG_PPC_64S_HASH_MMU 115 114 /* SLB related definitions */ 116 115 u16 vmalloc_sllp; 117 116 u8 slb_cache_ptr; ··· 123 120 u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */ 124 121 u32 slb_kern_bitmap; 125 122 u32 slb_cache[SLB_CACHE_ENTRIES]; 123 + #endif 126 124 #endif /* CONFIG_PPC_BOOK3S_64 */ 127 125 128 126 #ifdef CONFIG_PPC_BOOK3E ··· 153 149 #endif /* CONFIG_PPC_BOOK3E */ 154 150 155 151 #ifdef CONFIG_PPC_BOOK3S 152 + #ifdef CONFIG_PPC_64S_HASH_MMU 156 153 #ifdef CONFIG_PPC_MM_SLICES 157 154 unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE]; 158 155 unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; 159 156 #else 160 157 u16 mm_ctx_user_psize; 161 158 u16 mm_ctx_sllp; 159 + #endif 162 160 #endif 163 161 #endif 164 162 ··· 274 268 #endif /* CONFIG_PPC_PSERIES */ 275 269 276 270 #ifdef CONFIG_PPC_BOOK3S_64 271 + #ifdef CONFIG_PPC_64S_HASH_MMU 277 272 /* Capture SLB related old contents in MCE handler. */ 278 273 struct slb_entry *mce_faulty_slbs; 279 274 u16 slb_save_cache_ptr; 275 + #endif 280 276 #endif /* CONFIG_PPC_BOOK3S_64 */ 281 277 #ifdef CONFIG_STACKPROTECTOR 282 278 unsigned long canary;
+2
arch/powerpc/kernel/asm-offsets.c
··· 218 218 OFFSET(PACA_EXGEN, paca_struct, exgen); 219 219 OFFSET(PACA_EXMC, paca_struct, exmc); 220 220 OFFSET(PACA_EXNMI, paca_struct, exnmi); 221 + #ifdef CONFIG_PPC_64S_HASH_MMU 221 222 OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr); 222 223 OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid); 223 224 OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid); 224 225 OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area); 226 + #endif 225 227 OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use); 226 228 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 227 229 OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
+2 -2
arch/powerpc/kernel/entry_64.S
··· 180 180 #endif 181 181 182 182 ld r8,KSP(r4) /* new stack pointer */ 183 - #ifdef CONFIG_PPC_BOOK3S_64 183 + #ifdef CONFIG_PPC_64S_HASH_MMU 184 184 BEGIN_MMU_FTR_SECTION 185 185 b 2f 186 186 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ··· 232 232 slbmte r7,r0 233 233 isync 234 234 2: 235 - #endif /* CONFIG_PPC_BOOK3S_64 */ 235 + #endif /* CONFIG_PPC_64S_HASH_MMU */ 236 236 237 237 clrrdi r7, r8, THREAD_SHIFT /* base of new stack */ 238 238 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+16
arch/powerpc/kernel/exceptions-64s.S
··· 1367 1367 addi r3,r1,STACK_FRAME_OVERHEAD 1368 1368 andis. r0,r4,DSISR_DABRMATCH@h 1369 1369 bne- 1f 1370 + #ifdef CONFIG_PPC_64S_HASH_MMU 1370 1371 BEGIN_MMU_FTR_SECTION 1371 1372 bl do_hash_fault 1372 1373 MMU_FTR_SECTION_ELSE 1373 1374 bl do_page_fault 1374 1375 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1376 + #else 1377 + bl do_page_fault 1378 + #endif 1375 1379 b interrupt_return_srr 1376 1380 1377 1381 1: bl do_break ··· 1418 1414 EXC_VIRT_END(data_access_slb, 0x4380, 0x80) 1419 1415 EXC_COMMON_BEGIN(data_access_slb_common) 1420 1416 GEN_COMMON data_access_slb 1417 + #ifdef CONFIG_PPC_64S_HASH_MMU 1421 1418 BEGIN_MMU_FTR_SECTION 1422 1419 /* HPT case, do SLB fault */ 1423 1420 addi r3,r1,STACK_FRAME_OVERHEAD ··· 1431 1426 /* Radix case, access is outside page table range */ 1432 1427 li r3,-EFAULT 1433 1428 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1429 + #else 1430 + li r3,-EFAULT 1431 + #endif 1434 1432 std r3,RESULT(r1) 1435 1433 addi r3,r1,STACK_FRAME_OVERHEAD 1436 1434 bl do_bad_segment_interrupt ··· 1468 1460 EXC_COMMON_BEGIN(instruction_access_common) 1469 1461 GEN_COMMON instruction_access 1470 1462 addi r3,r1,STACK_FRAME_OVERHEAD 1463 + #ifdef CONFIG_PPC_64S_HASH_MMU 1471 1464 BEGIN_MMU_FTR_SECTION 1472 1465 bl do_hash_fault 1473 1466 MMU_FTR_SECTION_ELSE 1474 1467 bl do_page_fault 1475 1468 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1469 + #else 1470 + bl do_page_fault 1471 + #endif 1476 1472 b interrupt_return_srr 1477 1473 1478 1474 ··· 1506 1494 EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80) 1507 1495 EXC_COMMON_BEGIN(instruction_access_slb_common) 1508 1496 GEN_COMMON instruction_access_slb 1497 + #ifdef CONFIG_PPC_64S_HASH_MMU 1509 1498 BEGIN_MMU_FTR_SECTION 1510 1499 /* HPT case, do SLB fault */ 1511 1500 addi r3,r1,STACK_FRAME_OVERHEAD ··· 1519 1506 /* Radix case, access is outside page table range */ 1520 1507 li r3,-EFAULT 1521 1508 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) 1509 + #else 1510 + li r3,-EFAULT 1511 + #endif 1522 1512 std r3,RESULT(r1) 1523 1513 addi r3,r1,STACK_FRAME_OVERHEAD 1524 1514 bl do_bad_segment_interrupt
+1 -1
arch/powerpc/kernel/mce.c
··· 586 586 mc_error_class[evt->error_class] : "Unknown"; 587 587 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype); 588 588 589 - #ifdef CONFIG_PPC_BOOK3S_64 589 + #ifdef CONFIG_PPC_64S_HASH_MMU 590 590 /* Display faulty slb contents for SLB errors. */ 591 591 if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest) 592 592 slb_dump_contents(local_paca->mce_faulty_slbs);
+7 -3
arch/powerpc/kernel/mce_power.c
··· 77 77 } 78 78 79 79 /* flush SLBs and reload */ 80 - #ifdef CONFIG_PPC_BOOK3S_64 80 + #ifdef CONFIG_PPC_64S_HASH_MMU 81 81 void flush_and_reload_slb(void) 82 82 { 83 83 if (early_radix_enabled()) ··· 99 99 100 100 void flush_erat(void) 101 101 { 102 - #ifdef CONFIG_PPC_BOOK3S_64 102 + #ifdef CONFIG_PPC_64S_HASH_MMU 103 103 if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) { 104 104 flush_and_reload_slb(); 105 105 return; ··· 114 114 115 115 static int mce_flush(int what) 116 116 { 117 - #ifdef CONFIG_PPC_BOOK3S_64 117 + #ifdef CONFIG_PPC_64S_HASH_MMU 118 118 if (what == MCE_FLUSH_SLB) { 119 119 flush_and_reload_slb(); 120 120 return 1; ··· 499 499 /* attempt to correct the error */ 500 500 switch (table[i].error_type) { 501 501 case MCE_ERROR_TYPE_SLB: 502 + #ifdef CONFIG_PPC_64S_HASH_MMU 502 503 if (local_paca->in_mce == 1) 503 504 slb_save_contents(local_paca->mce_faulty_slbs); 505 + #endif 504 506 handled = mce_flush(MCE_FLUSH_SLB); 505 507 break; 506 508 case MCE_ERROR_TYPE_ERAT: ··· 590 588 /* attempt to correct the error */ 591 589 switch (table[i].error_type) { 592 590 case MCE_ERROR_TYPE_SLB: 591 + #ifdef CONFIG_PPC_64S_HASH_MMU 593 592 if (local_paca->in_mce == 1) 594 593 slb_save_contents(local_paca->mce_faulty_slbs); 594 + #endif 595 595 if (mce_flush(MCE_FLUSH_SLB)) 596 596 handled = 1; 597 597 break;
+7 -11
arch/powerpc/kernel/paca.c
··· 139 139 } 140 140 #endif /* CONFIG_PPC_PSERIES */ 141 141 142 - #ifdef CONFIG_PPC_BOOK3S_64 143 - 142 + #ifdef CONFIG_PPC_64S_HASH_MMU 144 143 /* 145 144 * 3 persistent SLBs are allocated here. The buffer will be zero 146 145 * initially, hence will all be invaild until we actually write them. ··· 168 169 169 170 return s; 170 171 } 171 - 172 - #endif /* CONFIG_PPC_BOOK3S_64 */ 172 + #endif /* CONFIG_PPC_64S_HASH_MMU */ 173 173 174 174 #ifdef CONFIG_PPC_PSERIES 175 175 /** ··· 224 226 new_paca->kexec_state = KEXEC_STATE_NONE; 225 227 new_paca->__current = &init_task; 226 228 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; 227 - #ifdef CONFIG_PPC_BOOK3S_64 229 + #ifdef CONFIG_PPC_64S_HASH_MMU 228 230 new_paca->slb_shadow_ptr = NULL; 229 231 #endif 230 232 ··· 305 307 #ifdef CONFIG_PPC_PSERIES 306 308 paca->lppaca_ptr = new_lppaca(cpu, limit); 307 309 #endif 308 - #ifdef CONFIG_PPC_BOOK3S_64 310 + #ifdef CONFIG_PPC_64S_HASH_MMU 309 311 paca->slb_shadow_ptr = new_slb_shadow(cpu, limit); 310 312 #endif 311 313 #ifdef CONFIG_PPC_PSERIES ··· 326 328 paca_nr_cpu_ids = nr_cpu_ids; 327 329 paca_ptrs_size = new_ptrs_size; 328 330 329 - #ifdef CONFIG_PPC_BOOK3S_64 331 + #ifdef CONFIG_PPC_64S_HASH_MMU 330 332 if (early_radix_enabled()) { 331 333 /* Ugly fixup, see new_slb_shadow() */ 332 334 memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr), ··· 339 341 paca_ptrs_size + paca_struct_size, nr_cpu_ids); 340 342 } 341 343 344 + #ifdef CONFIG_PPC_64S_HASH_MMU 342 345 void copy_mm_to_paca(struct mm_struct *mm) 343 346 { 344 - #ifdef CONFIG_PPC_BOOK3S 345 347 mm_context_t *context = &mm->context; 346 348 347 349 #ifdef CONFIG_PPC_MM_SLICES ··· 354 356 get_paca()->mm_ctx_user_psize = context->user_psize; 355 357 get_paca()->mm_ctx_sllp = context->sllp; 356 358 #endif 357 - #else /* !CONFIG_PPC_BOOK3S */ 358 - return; 359 - #endif 360 359 } 360 + #endif /* CONFIG_PPC_64S_HASH_MMU */
+7 -6
arch/powerpc/kernel/process.c
··· 1240 1240 { 1241 1241 struct thread_struct *new_thread, *old_thread; 1242 1242 struct task_struct *last; 1243 - #ifdef CONFIG_PPC_BOOK3S_64 1243 + #ifdef CONFIG_PPC_64S_HASH_MMU 1244 1244 struct ppc64_tlb_batch *batch; 1245 1245 #endif 1246 1246 ··· 1249 1249 1250 1250 WARN_ON(!irqs_disabled()); 1251 1251 1252 - #ifdef CONFIG_PPC_BOOK3S_64 1252 + #ifdef CONFIG_PPC_64S_HASH_MMU 1253 1253 batch = this_cpu_ptr(&ppc64_tlb_batch); 1254 1254 if (batch->active) { 1255 1255 current_thread_info()->local_flags |= _TLF_LAZY_MMU; ··· 1328 1328 */ 1329 1329 1330 1330 #ifdef CONFIG_PPC_BOOK3S_64 1331 + #ifdef CONFIG_PPC_64S_HASH_MMU 1331 1332 /* 1332 1333 * This applies to a process that was context switched while inside 1333 1334 * arch_enter_lazy_mmu_mode(), to re-activate the batch that was ··· 1340 1339 batch = this_cpu_ptr(&ppc64_tlb_batch); 1341 1340 batch->active = 1; 1342 1341 } 1342 + #endif 1343 1343 1344 1344 /* 1345 1345 * Math facilities are masked out of the child MSR in copy_thread. ··· 1691 1689 1692 1690 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) 1693 1691 { 1694 - #ifdef CONFIG_PPC_BOOK3S_64 1692 + #ifdef CONFIG_PPC_64S_HASH_MMU 1695 1693 unsigned long sp_vsid; 1696 1694 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 1697 1695 ··· 2335 2333 * the heap, we can put it above 1TB so it is backed by a 1TB 2336 2334 * segment. Otherwise the heap will be in the bottom 1TB 2337 2335 * which always uses 256MB segments and this may result in a 2338 - * performance penalty. We don't need to worry about radix. For 2339 - * radix, mmu_highuser_ssize remains unchanged from 256MB. 2336 + * performance penalty. 2340 2337 */ 2341 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 2338 + if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) 2342 2339 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); 2343 2340 #endif 2344 2341
+1 -1
arch/powerpc/kernel/prom.c
··· 231 231 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 232 232 } 233 233 234 - #ifdef CONFIG_PPC_BOOK3S_64 234 + #ifdef CONFIG_PPC_64S_HASH_MMU 235 235 static void __init init_mmu_slb_size(unsigned long node) 236 236 { 237 237 const __be32 *slb_size_ptr;
+1 -1
arch/powerpc/kernel/setup_64.c
··· 886 886 atom_size = SZ_1M; 887 887 } else if (radix_enabled()) { 888 888 atom_size = PAGE_SIZE; 889 - } else { 889 + } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) { 890 890 /* 891 891 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need 892 892 * to group units. For larger mappings, use 1M atom which
+2 -2
arch/powerpc/kexec/core_64.c
··· 378 378 /* NOTREACHED */ 379 379 } 380 380 381 - #ifdef CONFIG_PPC_BOOK3S_64 381 + #ifdef CONFIG_PPC_64S_HASH_MMU 382 382 /* Values we need to export to the second kernel via the device tree. */ 383 383 static unsigned long htab_base; 384 384 static unsigned long htab_size; ··· 420 420 return 0; 421 421 } 422 422 late_initcall(export_htab_values); 423 - #endif /* CONFIG_PPC_BOOK3S_64 */ 423 + #endif /* CONFIG_PPC_64S_HASH_MMU */
+1 -1
arch/powerpc/kexec/ranges.c
··· 296 296 return ret; 297 297 } 298 298 299 - #ifdef CONFIG_PPC_BOOK3S_64 299 + #ifdef CONFIG_PPC_64S_HASH_MMU 300 300 /** 301 301 * add_htab_mem_range - Adds htab range to the given memory ranges list, 302 302 * if it exists
+9 -6
arch/powerpc/mm/book3s64/Makefile
··· 2 2 3 3 ccflags-y := $(NO_MINIMAL_TOC) 4 4 5 + obj-y += mmu_context.o pgtable.o trace.o 6 + ifdef CONFIG_PPC_64S_HASH_MMU 5 7 CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE) 6 - 7 - obj-y += hash_pgtable.o hash_utils.o slb.o \ 8 - mmu_context.o pgtable.o hash_tlb.o trace.o 8 + obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o 9 9 obj-$(CONFIG_PPC_HASH_MMU_NATIVE) += hash_native.o 10 - obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o 11 10 obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o 12 11 obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o 12 + obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o 13 + obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o 14 + endif 15 + 13 16 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 17 + 18 + obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o 14 19 ifdef CONFIG_HUGETLB_PAGE 15 20 obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o 16 21 endif 17 - obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o 18 - obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o 19 22 obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o 20 23 obj-$(CONFIG_PPC_PKEY) += pkeys.o 21 24
+2
arch/powerpc/mm/book3s64/hugetlbpage.c
··· 16 16 unsigned int hpage_shift; 17 17 EXPORT_SYMBOL(hpage_shift); 18 18 19 + #ifdef CONFIG_PPC_64S_HASH_MMU 19 20 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 20 21 pte_t *ptep, unsigned long trap, unsigned long flags, 21 22 int ssize, unsigned int shift, unsigned int mmu_psize) ··· 123 122 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 124 123 return 0; 125 124 } 125 + #endif 126 126 127 127 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, 128 128 unsigned long addr, pte_t *ptep)
+26 -6
arch/powerpc/mm/book3s64/mmu_context.c
··· 31 31 return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL); 32 32 } 33 33 34 + #ifdef CONFIG_PPC_64S_HASH_MMU 34 35 void hash__reserve_context_id(int id) 35 36 { 36 37 int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL); ··· 51 50 return alloc_context_id(MIN_USER_CONTEXT, max); 52 51 } 53 52 EXPORT_SYMBOL_GPL(hash__alloc_context_id); 53 + #endif 54 54 55 + #ifdef CONFIG_PPC_64S_HASH_MMU 55 56 static int realloc_context_ids(mm_context_t *ctx) 56 57 { 57 58 int i, id; ··· 153 150 154 151 slb_setup_new_exec(); 155 152 } 153 + #else 154 + static inline int hash__init_new_context(struct mm_struct *mm) 155 + { 156 + BUILD_BUG(); 157 + return 0; 158 + } 159 + #endif 156 160 157 161 static int radix__init_new_context(struct mm_struct *mm) 158 162 { ··· 185 175 */ 186 176 asm volatile("ptesync;isync" : : : "memory"); 187 177 178 + #ifdef CONFIG_PPC_64S_HASH_MMU 188 179 mm->context.hash_context = NULL; 180 + #endif 189 181 190 182 return index; 191 183 } ··· 225 213 226 214 static void destroy_contexts(mm_context_t *ctx) 227 215 { 228 - int index, context_id; 216 + if (radix_enabled()) { 217 + ida_free(&mmu_context_ida, ctx->id); 218 + } else { 219 + #ifdef CONFIG_PPC_64S_HASH_MMU 220 + int index, context_id; 229 221 230 - for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { 231 - context_id = ctx->extended_id[index]; 232 - if (context_id) 233 - ida_free(&mmu_context_ida, context_id); 222 + for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) { 223 + context_id = ctx->extended_id[index]; 224 + if (context_id) 225 + ida_free(&mmu_context_ida, context_id); 226 + } 227 + kfree(ctx->hash_context); 228 + #else 229 + BUILD_BUG(); // radix_enabled() should be constant true 230 + #endif 234 231 } 235 - kfree(ctx->hash_context); 236 232 } 237 233 238 234 static void pmd_frag_destroy(void *pmd_frag)
+1 -1
arch/powerpc/mm/book3s64/pgtable.c
··· 529 529 } 530 530 arch_initcall(pgtable_debugfs_setup); 531 531 532 - #ifdef CONFIG_ZONE_DEVICE 532 + #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN) 533 533 /* 534 534 * Override the generic version in mm/memremap.c. 535 535 *
+3 -1
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 334 334 u64 i; 335 335 336 336 /* We don't support slb for radix */ 337 - mmu_slb_size = 0; 337 + slb_set_size(0); 338 338 339 339 /* 340 340 * Create the linear mapping ··· 565 565 { 566 566 unsigned long lpcr; 567 567 568 + #ifdef CONFIG_PPC_64S_HASH_MMU 568 569 #ifdef CONFIG_PPC_64K_PAGES 569 570 /* PAGE_SIZE mappings */ 570 571 mmu_virtual_psize = MMU_PAGE_64K; ··· 582 581 mmu_vmemmap_psize = MMU_PAGE_2M; 583 582 } else 584 583 mmu_vmemmap_psize = mmu_virtual_psize; 584 + #endif 585 585 #endif 586 586 /* 587 587 * initialize page table size
+2
arch/powerpc/mm/copro_fault.c
··· 82 82 } 83 83 EXPORT_SYMBOL_GPL(copro_handle_mm_fault); 84 84 85 + #ifdef CONFIG_PPC_64S_HASH_MMU 85 86 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) 86 87 { 87 88 u64 vsid, vsidkey; ··· 147 146 cxl_slbia(mm); 148 147 } 149 148 EXPORT_SYMBOL_GPL(copro_flush_all_slbs); 149 + #endif
+1 -1
arch/powerpc/mm/ptdump/Makefile
··· 10 10 11 11 ifdef CONFIG_PTDUMP_DEBUGFS 12 12 obj-$(CONFIG_PPC_BOOK3S_32) += bats.o segment_regs.o 13 - obj-$(CONFIG_PPC_BOOK3S_64) += hashpagetable.o 13 + obj-$(CONFIG_PPC_64S_HASH_MMU) += hashpagetable.o 14 14 endif
+2
arch/powerpc/platforms/powernv/idle.c
··· 491 491 492 492 mtspr(SPRN_SPRG3, local_paca->sprg_vdso); 493 493 494 + #ifdef CONFIG_PPC_64S_HASH_MMU 494 495 /* 495 496 * The SLB has to be restored here, but it sometimes still 496 497 * contains entries, so the __ variant must be used to prevent 497 498 * multi hits. 498 499 */ 499 500 __slb_restore_bolted_realmode(); 501 + #endif 500 502 501 503 return srr1; 502 504 }
+2
arch/powerpc/platforms/powernv/setup.c
··· 211 211 #endif 212 212 add_preferred_console("hvc", 0, NULL); 213 213 214 + #ifdef CONFIG_PPC_64S_HASH_MMU 214 215 if (!radix_enabled()) { 215 216 size_t size = sizeof(struct slb_entry) * mmu_slb_size; 216 217 int i; ··· 224 223 cpu_to_node(i)); 225 224 } 226 225 } 226 + #endif 227 227 } 228 228 229 229 static void __init pnv_init_IRQ(void)
+9 -2
arch/powerpc/platforms/pseries/lpar.c
··· 58 58 EXPORT_SYMBOL(plpar_hcall9); 59 59 EXPORT_SYMBOL(plpar_hcall_norets); 60 60 61 + #ifdef CONFIG_PPC_64S_HASH_MMU 61 62 /* 62 63 * H_BLOCK_REMOVE supported block size for this page size in segment who's base 63 64 * page size is that page size. ··· 67 66 * page size. 68 67 */ 69 68 static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init; 69 + #endif 70 70 71 71 /* 72 72 * Due to the involved complexity, and that the current hypervisor is only ··· 691 689 return; 692 690 } 693 691 694 - #ifdef CONFIG_PPC_BOOK3S_64 692 + #ifdef CONFIG_PPC_64S_HASH_MMU 695 693 /* 696 694 * PAPR says this feature is SLB-Buffer but firmware never 697 695 * reports that. All SPLPAR support SLB shadow buffer. ··· 704 702 "cpu %d (hw %d) of area %lx failed with %ld\n", 705 703 cpu, hwcpu, addr, ret); 706 704 } 707 - #endif /* CONFIG_PPC_BOOK3S_64 */ 705 + #endif /* CONFIG_PPC_64S_HASH_MMU */ 708 706 709 707 /* 710 708 * Register dispatch trace log, if one has been allocated. ··· 741 739 } 742 740 return rc; 743 741 } 742 + 743 + #ifdef CONFIG_PPC_64S_HASH_MMU 744 744 745 745 static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 746 746 unsigned long vpn, unsigned long pa, ··· 1734 1730 if (cpu_has_feature(CPU_FTR_ARCH_300)) 1735 1731 pseries_lpar_register_process_table(0, 0, 0); 1736 1732 } 1733 + #endif /* CONFIG_PPC_64S_HASH_MMU */ 1737 1734 1738 1735 #ifdef CONFIG_PPC_RADIX_MMU 1739 1736 void radix_init_pseries(void) ··· 1937 1932 return rc; 1938 1933 } 1939 1934 1935 + #ifdef CONFIG_PPC_64S_HASH_MMU 1940 1936 static unsigned long vsid_unscramble(unsigned long vsid, int ssize) 1941 1937 { 1942 1938 unsigned long protovsid; ··· 1998 1992 return 0; 1999 1993 } 2000 1994 machine_device_initcall(pseries, reserve_vrma_context_id); 1995 + #endif 2001 1996 2002 1997 #ifdef CONFIG_DEBUG_FS 2003 1998 /* debugfs file interface for vpa data */
+1 -1
arch/powerpc/platforms/pseries/lparcfg.c
··· 531 531 seq_printf(m, "shared_processor_mode=%d\n", 532 532 lppaca_shared_proc(get_lppaca())); 533 533 534 - #ifdef CONFIG_PPC_BOOK3S_64 534 + #ifdef CONFIG_PPC_64S_HASH_MMU 535 535 if (!radix_enabled()) 536 536 seq_printf(m, "slb_size=%d\n", mmu_slb_size); 537 537 #endif
+4
arch/powerpc/platforms/pseries/mobility.c
··· 451 451 452 452 static u16 clamp_slb_size(void) 453 453 { 454 + #ifdef CONFIG_PPC_64S_HASH_MMU 454 455 u16 prev = mmu_slb_size; 455 456 456 457 slb_set_size(SLB_MIN_SIZE); 457 458 458 459 return prev; 460 + #else 461 + return 0; 462 + #endif 459 463 } 460 464 461 465 static int do_suspend(void)
+5
arch/powerpc/platforms/pseries/pseries.h
··· 113 113 114 114 extern u32 pseries_security_flavor; 115 115 void pseries_setup_security_mitigations(void); 116 + 117 + #ifdef CONFIG_PPC_64S_HASH_MMU 116 118 void pseries_lpar_read_hblkrm_characteristics(void); 119 + #else 120 + static inline void pseries_lpar_read_hblkrm_characteristics(void) { } 121 + #endif 117 122 118 123 #endif /* _PSERIES_PSERIES_H */
+2
arch/powerpc/platforms/pseries/ras.c
··· 526 526 disposition = RTAS_DISP_FULLY_RECOVERED; 527 527 break; 528 528 case MC_ERROR_TYPE_SLB: 529 + #ifdef CONFIG_PPC_64S_HASH_MMU 529 530 /* 530 531 * Store the old slb content in paca before flushing. 531 532 * Print this when we go to virtual mode. ··· 539 538 slb_save_contents(local_paca->mce_faulty_slbs); 540 539 flush_and_reload_slb(); 541 540 disposition = RTAS_DISP_FULLY_RECOVERED; 541 + #endif 542 542 break; 543 543 default: 544 544 break;
+4 -2
arch/powerpc/platforms/pseries/setup.c
··· 112 112 u8 *mce_data_buf; 113 113 unsigned int i; 114 114 int nr_cpus = num_possible_cpus(); 115 - #ifdef CONFIG_PPC_BOOK3S_64 115 + #ifdef CONFIG_PPC_64S_HASH_MMU 116 116 struct slb_entry *slb_ptr; 117 117 size_t size; 118 118 #endif ··· 152 152 (RTAS_ERROR_LOG_MAX * i); 153 153 } 154 154 155 - #ifdef CONFIG_PPC_BOOK3S_64 155 + #ifdef CONFIG_PPC_64S_HASH_MMU 156 156 if (!radix_enabled()) { 157 157 /* Allocate per cpu area to save old slb contents during MCE */ 158 158 size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus; ··· 801 801 fwnmi_init(); 802 802 803 803 pseries_setup_security_mitigations(); 804 + #ifdef CONFIG_PPC_64S_HASH_MMU 804 805 pseries_lpar_read_hblkrm_characteristics(); 806 + #endif 805 807 806 808 /* By default, only probe PCI (can be overridden by rtas_pci) */ 807 809 pci_add_flags(PCI_PROBE_ONLY);
+5 -3
arch/powerpc/xmon/xmon.c
··· 1159 1159 case 'P': 1160 1160 show_tasks(); 1161 1161 break; 1162 - #ifdef CONFIG_PPC_BOOK3S 1162 + #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_64S_HASH_MMU) 1163 1163 case 'u': 1164 1164 dump_segments(); 1165 1165 break; ··· 2614 2614 static void dump_one_paca(int cpu) 2615 2615 { 2616 2616 struct paca_struct *p; 2617 - #ifdef CONFIG_PPC_BOOK3S_64 2617 + #ifdef CONFIG_PPC_64S_HASH_MMU 2618 2618 int i = 0; 2619 2619 #endif 2620 2620 ··· 2656 2656 DUMP(p, cpu_start, "%#-*x"); 2657 2657 DUMP(p, kexec_state, "%#-*x"); 2658 2658 #ifdef CONFIG_PPC_BOOK3S_64 2659 + #ifdef CONFIG_PPC_64S_HASH_MMU 2659 2660 if (!early_radix_enabled()) { 2660 2661 for (i = 0; i < SLB_NUM_BOLTED; i++) { 2661 2662 u64 esid, vsid; ··· 2684 2683 22, "slb_cache", i, p->slb_cache[i]); 2685 2684 } 2686 2685 } 2686 + #endif 2687 2687 2688 2688 DUMP(p, rfi_flush_fallback_area, "%-*px"); 2689 2689 #endif ··· 3748 3746 printf("%s", after); 3749 3747 } 3750 3748 3751 - #ifdef CONFIG_PPC_BOOK3S_64 3749 + #ifdef CONFIG_PPC_64S_HASH_MMU 3752 3750 void dump_segments(void) 3753 3751 { 3754 3752 int i;
+1 -1
drivers/misc/lkdtm/core.c
··· 182 182 CRASHTYPE(FORTIFIED_SUBOBJECT), 183 183 CRASHTYPE(FORTIFIED_STRSCPY), 184 184 CRASHTYPE(DOUBLE_FAULT), 185 - #ifdef CONFIG_PPC_BOOK3S_64 185 + #ifdef CONFIG_PPC_64S_HASH_MMU 186 186 CRASHTYPE(PPC_SLB_MULTIHIT), 187 187 #endif 188 188 };