Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64s: Replace CONFIG_PPC_STD_MMU_64 with CONFIG_PPC_BOOK3S_64

CONFIG_PPC_STD_MMU_64 indicates support for the "standard" powerpc MMU
on 64-bit CPUs. The "standard" MMU refers to the hash page table MMU
found in "server" processors, from IBM mainly.

Currently CONFIG_PPC_STD_MMU_64 is == CONFIG_PPC_BOOK3S_64. While it's
annoying to have two symbols that always have the same value, it's not
quite annoying enough to bother removing one.

However with the arrival of Power9, we now have the situation where
CONFIG_PPC_STD_MMU_64 is enabled, but the kernel is running using the
Radix MMU - *not* the "standard" MMU. So it is now actively confusing
to use it, because it implies that code is disabled or inactive when
the Radix MMU is in use, however that is not necessarily true.

So s/CONFIG_PPC_STD_MMU_64/CONFIG_PPC_BOOK3S_64/, and do some minor
formatting updates of some of the affected lines.

This will be a pain for backports, but c'est la vie.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

+68 -72
+3 -3
arch/powerpc/Kconfig
··· 334 334 default n 335 335 336 336 config ARCH_SUPPORTS_DEBUG_PAGEALLOC 337 - depends on PPC32 || PPC_STD_MMU_64 337 + depends on PPC32 || PPC_BOOK3S_64 338 338 def_bool y 339 339 340 340 config ARCH_SUPPORTS_UPROBES ··· 721 721 722 722 config PPC_64K_PAGES 723 723 bool "64k page size" 724 - depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64) 724 + depends on !PPC_FSL_BOOK3E && (44x || PPC_BOOK3S_64 || PPC_BOOK3E_64) 725 725 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64 726 726 727 727 config PPC_256K_PAGES ··· 780 780 781 781 config PPC_SUBPAGE_PROT 782 782 bool "Support setting protections for 4k subpages" 783 - depends on PPC_STD_MMU_64 && PPC_64K_PAGES 783 + depends on PPC_BOOK3S_64 && PPC_64K_PAGES 784 784 help 785 785 This option adds support for a system call to allow user programs 786 786 to set access permissions (read/write, readonly, or no access)
+1 -1
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 203 203 if (!huge) 204 204 assert_pte_locked(mm, addr); 205 205 206 - #ifdef CONFIG_PPC_STD_MMU_64 206 + #ifdef CONFIG_PPC_BOOK3S_64 207 207 if (old & _PAGE_HASHPTE) 208 208 hpte_need_flush(mm, addr, ptep, old, huge); 209 209 #endif
+5 -5
arch/powerpc/include/asm/paca.h
··· 91 91 u8 cpu_start; /* At startup, processor spins until */ 92 92 /* this becomes non-zero. */ 93 93 u8 kexec_state; /* set when kexec down has irqs off */ 94 - #ifdef CONFIG_PPC_STD_MMU_64 94 + #ifdef CONFIG_PPC_BOOK3S_64 95 95 struct slb_shadow *slb_shadow_ptr; 96 96 struct dtl_entry *dispatch_log; 97 97 struct dtl_entry *dispatch_log_end; 98 - #endif /* CONFIG_PPC_STD_MMU_64 */ 98 + #endif 99 99 u64 dscr_default; /* per-CPU default DSCR */ 100 100 101 - #ifdef CONFIG_PPC_STD_MMU_64 101 + #ifdef CONFIG_PPC_BOOK3S_64 102 102 /* 103 103 * Now, starting in cacheline 2, the exception save areas 104 104 */ ··· 110 110 u16 vmalloc_sllp; 111 111 u16 slb_cache_ptr; 112 112 u32 slb_cache[SLB_CACHE_ENTRIES]; 113 - #endif /* CONFIG_PPC_STD_MMU_64 */ 113 + #endif /* CONFIG_PPC_BOOK3S_64 */ 114 114 115 115 #ifdef CONFIG_PPC_BOOK3E 116 116 u64 exgen[8] __aligned(0x40); ··· 192 192 struct stop_sprs stop_sprs; 193 193 #endif 194 194 195 - #ifdef CONFIG_PPC_STD_MMU_64 195 + #ifdef CONFIG_PPC_BOOK3S_64 196 196 /* Non-maskable exceptions that are not performance critical */ 197 197 u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */ 198 198 u64 exmc[EX_SIZE]; /* used for machine checks */
+3 -3
arch/powerpc/include/asm/page_64.h
··· 117 117 #endif /* __ASSEMBLY__ */ 118 118 #else 119 119 #define slice_init() 120 - #ifdef CONFIG_PPC_STD_MMU_64 120 + #ifdef CONFIG_PPC_BOOK3S_64 121 121 #define get_slice_psize(mm, addr) ((mm)->context.user_psize) 122 122 #define slice_set_user_psize(mm, psize) \ 123 123 do { \ 124 124 (mm)->context.user_psize = (psize); \ 125 125 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ 126 126 } while (0) 127 - #else /* CONFIG_PPC_STD_MMU_64 */ 127 + #else /* !CONFIG_PPC_BOOK3S_64 */ 128 128 #ifdef CONFIG_PPC_64K_PAGES 129 129 #define get_slice_psize(mm, addr) MMU_PAGE_64K 130 130 #else /* CONFIG_PPC_64K_PAGES */ 131 131 #define get_slice_psize(mm, addr) MMU_PAGE_4K 132 132 #endif /* !CONFIG_PPC_64K_PAGES */ 133 133 #define slice_set_user_psize(mm, psize) do { BUG(); } while(0) 134 - #endif /* !CONFIG_PPC_STD_MMU_64 */ 134 + #endif /* CONFIG_PPC_BOOK3S_64 */ 135 135 136 136 #define slice_set_range_psize(mm, start, len, psize) \ 137 137 slice_set_user_psize((mm), (psize))
+1 -1
arch/powerpc/include/asm/pgtable-be-types.h
··· 76 76 * With hash config 64k pages additionally define a bigger "real PTE" type that 77 77 * gathers the "second half" part of the PTE for pseudo 64k pages 78 78 */ 79 - #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) 79 + #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64) 80 80 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 81 81 #else 82 82 typedef struct { pte_t pte; } real_pte_t;
+2 -2
arch/powerpc/include/asm/pgtable-types.h
··· 49 49 * With hash config 64k pages additionally define a bigger "real PTE" type that 50 50 * gathers the "second half" part of the PTE for pseudo 64k pages 51 51 */ 52 - #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64) 52 + #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64) 53 53 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 54 54 #else 55 55 typedef struct { pte_t pte; } real_pte_t; 56 56 #endif 57 57 58 - #ifdef CONFIG_PPC_STD_MMU_64 58 + #ifdef CONFIG_PPC_BOOK3S_64 59 59 #include <asm/cmpxchg.h> 60 60 61 61 static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
+1 -1
arch/powerpc/include/asm/tlbflush.h
··· 77 77 flush_tlb_mm(mm); 78 78 } 79 79 80 - #elif defined(CONFIG_PPC_STD_MMU_64) 80 + #elif defined(CONFIG_PPC_BOOK3S_64) 81 81 #include <asm/book3s/64/tlbflush.h> 82 82 #else 83 83 #error Unsupported MMU type
+2 -2
arch/powerpc/kernel/asm-offsets.c
··· 208 208 OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first); 209 209 #endif /* CONFIG_PPC_BOOK3E */ 210 210 211 - #ifdef CONFIG_PPC_STD_MMU_64 211 + #ifdef CONFIG_PPC_BOOK3S_64 212 212 OFFSET(PACASLBCACHE, paca_struct, slb_cache); 213 213 OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr); 214 214 OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp); ··· 230 230 OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx); 231 231 OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count); 232 232 OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx); 233 - #endif /* CONFIG_PPC_STD_MMU_64 */ 233 + #endif /* CONFIG_PPC_BOOK3S_64 */ 234 234 OFFSET(PACAEMERGSP, paca_struct, emergency_sp); 235 235 #ifdef CONFIG_PPC_BOOK3S_64 236 236 OFFSET(PACAMCEMERGSP, paca_struct, mc_emergency_sp);
+2 -2
arch/powerpc/kernel/entry_64.S
··· 539 539 std r6,PACACURRENT(r13) /* Set new 'current' */ 540 540 541 541 ld r8,KSP(r4) /* new stack pointer */ 542 - #ifdef CONFIG_PPC_STD_MMU_64 542 + #ifdef CONFIG_PPC_BOOK3S_64 543 543 BEGIN_MMU_FTR_SECTION 544 544 b 2f 545 545 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ··· 588 588 slbmte r7,r0 589 589 isync 590 590 2: 591 - #endif /* CONFIG_PPC_STD_MMU_64 */ 591 + #endif /* CONFIG_PPC_BOOK3S_64 */ 592 592 593 593 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ 594 594 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
+4 -4
arch/powerpc/kernel/exceptions-64s.S
··· 607 607 cmpdi cr5,r11,MSR_RI 608 608 609 609 crset 4*cr0+eq 610 - #ifdef CONFIG_PPC_STD_MMU_64 610 + #ifdef CONFIG_PPC_BOOK3S_64 611 611 BEGIN_MMU_FTR_SECTION 612 612 bl slb_allocate 613 613 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) ··· 1497 1497 */ 1498 1498 .balign IFETCH_ALIGN_BYTES 1499 1499 do_hash_page: 1500 - #ifdef CONFIG_PPC_STD_MMU_64 1500 + #ifdef CONFIG_PPC_BOOK3S_64 1501 1501 lis r0,DSISR_BAD_FAULT_64S@h 1502 1502 ori r0,r0,DSISR_BAD_FAULT_64S@l 1503 1503 and. r0,r4,r0 /* weird error? */ ··· 1528 1528 1529 1529 /* Reload DSISR into r4 for the DABR check below */ 1530 1530 ld r4,_DSISR(r1) 1531 - #endif /* CONFIG_PPC_STD_MMU_64 */ 1531 + #endif /* CONFIG_PPC_BOOK3S_64 */ 1532 1532 1533 1533 /* Here we have a page fault that hash_page can't handle. */ 1534 1534 handle_page_fault: ··· 1557 1557 12: b ret_from_except_lite 1558 1558 1559 1559 1560 - #ifdef CONFIG_PPC_STD_MMU_64 1560 + #ifdef CONFIG_PPC_BOOK3S_64 1561 1561 /* We have a page fault that hash_page could handle but HV refused 1562 1562 * the PTE insertion 1563 1563 */
+2 -2
arch/powerpc/kernel/machine_kexec_64.c
··· 360 360 /* NOTREACHED */ 361 361 } 362 362 363 - #ifdef CONFIG_PPC_STD_MMU_64 363 + #ifdef CONFIG_PPC_BOOK3S_64 364 364 /* Values we need to export to the second kernel via the device tree. */ 365 365 static unsigned long htab_base; 366 366 static unsigned long htab_size; ··· 402 402 return 0; 403 403 } 404 404 late_initcall(export_htab_values); 405 - #endif /* CONFIG_PPC_STD_MMU_64 */ 405 + #endif /* CONFIG_PPC_BOOK3S_64 */
+2 -2
arch/powerpc/kernel/mce_power.c
··· 168 168 169 169 170 170 /* flush SLBs and reload */ 171 - #ifdef CONFIG_PPC_STD_MMU_64 171 + #ifdef CONFIG_PPC_BOOK3S_64 172 172 static void flush_and_reload_slb(void) 173 173 { 174 174 struct slb_shadow *slb; ··· 215 215 216 216 static int mce_flush(int what) 217 217 { 218 - #ifdef CONFIG_PPC_STD_MMU_64 218 + #ifdef CONFIG_PPC_BOOK3S_64 219 219 if (what == MCE_FLUSH_SLB) { 220 220 flush_and_reload_slb(); 221 221 return 1;
+6 -6
arch/powerpc/kernel/paca.c
··· 90 90 91 91 #endif /* CONFIG_PPC_BOOK3S */ 92 92 93 - #ifdef CONFIG_PPC_STD_MMU_64 93 + #ifdef CONFIG_PPC_BOOK3S_64 94 94 95 95 /* 96 96 * 3 persistent SLBs are registered here. The buffer will be zero ··· 135 135 return s; 136 136 } 137 137 138 - #else /* CONFIG_PPC_STD_MMU_64 */ 138 + #else /* !CONFIG_PPC_BOOK3S_64 */ 139 139 140 140 static void __init allocate_slb_shadows(int nr_cpus, int limit) { } 141 141 142 - #endif /* CONFIG_PPC_STD_MMU_64 */ 142 + #endif /* CONFIG_PPC_BOOK3S_64 */ 143 143 144 144 /* The Paca is an array with one entry per processor. Each contains an 145 145 * lppaca, which contains the information shared between the ··· 170 170 new_paca->kexec_state = KEXEC_STATE_NONE; 171 171 new_paca->__current = &init_task; 172 172 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL; 173 - #ifdef CONFIG_PPC_STD_MMU_64 173 + #ifdef CONFIG_PPC_BOOK3S_64 174 174 new_paca->slb_shadow_ptr = init_slb_shadow(cpu); 175 - #endif /* CONFIG_PPC_STD_MMU_64 */ 175 + #endif 176 176 177 177 #ifdef CONFIG_PPC_BOOK3E 178 178 /* For now -- if we have threads this will be adjusted later */ ··· 271 271 get_paca()->mm_ctx_user_psize = context->user_psize; 272 272 get_paca()->mm_ctx_sllp = context->sllp; 273 273 #endif 274 - #else /* CONFIG_PPC_BOOK3S */ 274 + #else /* !CONFIG_PPC_BOOK3S */ 275 275 return; 276 276 #endif 277 277 }
+2 -2
arch/powerpc/kernel/pci_64.c
··· 90 90 * to do an appropriate TLB flush here too 91 91 */ 92 92 if (bus->self) { 93 - #ifdef CONFIG_PPC_STD_MMU_64 93 + #ifdef CONFIG_PPC_BOOK3S_64 94 94 struct resource *res = bus->resource[0]; 95 95 #endif 96 96 97 97 pr_debug("IO unmapping for PCI-PCI bridge %s\n", 98 98 pci_name(bus->self)); 99 99 100 - #ifdef CONFIG_PPC_STD_MMU_64 100 + #ifdef CONFIG_PPC_BOOK3S_64 101 101 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, 102 102 res->end + _IO_BASE + 1); 103 103 #endif
+6 -6
arch/powerpc/kernel/process.c
··· 1164 1164 } 1165 1165 #endif /* CONFIG_PPC64 */ 1166 1166 1167 - #ifdef CONFIG_PPC_STD_MMU_64 1167 + #ifdef CONFIG_PPC_BOOK3S_64 1168 1168 batch = this_cpu_ptr(&ppc64_tlb_batch); 1169 1169 if (batch->active) { 1170 1170 current_thread_info()->local_flags |= _TLF_LAZY_MMU; ··· 1172 1172 __flush_tlb_pending(batch); 1173 1173 batch->active = 0; 1174 1174 } 1175 - #endif /* CONFIG_PPC_STD_MMU_64 */ 1175 + #endif /* CONFIG_PPC_BOOK3S_64 */ 1176 1176 1177 1177 #ifdef CONFIG_PPC_ADV_DEBUG_REGS 1178 1178 switch_booke_debug_regs(&new->thread.debug); ··· 1218 1218 1219 1219 last = _switch(old_thread, new_thread); 1220 1220 1221 - #ifdef CONFIG_PPC_STD_MMU_64 1221 + #ifdef CONFIG_PPC_BOOK3S_64 1222 1222 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 1223 1223 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 1224 1224 batch = this_cpu_ptr(&ppc64_tlb_batch); ··· 1247 1247 : : "r"(dummy_copy_buffer), "r"(0)); 1248 1248 } 1249 1249 } 1250 - #endif /* CONFIG_PPC_STD_MMU_64 */ 1250 + #endif /* CONFIG_PPC_BOOK3S_64 */ 1251 1251 1252 1252 return last; 1253 1253 } ··· 1476 1476 1477 1477 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) 1478 1478 { 1479 - #ifdef CONFIG_PPC_STD_MMU_64 1479 + #ifdef CONFIG_PPC_BOOK3S_64 1480 1480 unsigned long sp_vsid; 1481 1481 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; 1482 1482 ··· 2056 2056 unsigned long base = mm->brk; 2057 2057 unsigned long ret; 2058 2058 2059 - #ifdef CONFIG_PPC_STD_MMU_64 2059 + #ifdef CONFIG_PPC_BOOK3S_64 2060 2060 /* 2061 2061 * If we are using 1TB segments and we are allowed to randomise 2062 2062 * the heap, we can put it above 1TB so it is backed by a 1TB
+1 -1
arch/powerpc/kernel/prom.c
··· 229 229 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 230 230 } 231 231 232 - #ifdef CONFIG_PPC_STD_MMU_64 232 + #ifdef CONFIG_PPC_BOOK3S_64 233 233 static void __init init_mmu_slb_size(unsigned long node) 234 234 { 235 235 const __be32 *slb_size_ptr;
+2 -2
arch/powerpc/kernel/setup-common.c
··· 773 773 static __init void print_system_info(void) 774 774 { 775 775 pr_info("-----------------------------------------------------\n"); 776 - #ifdef CONFIG_PPC_STD_MMU_64 776 + #ifdef CONFIG_PPC_BOOK3S_64 777 777 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 778 778 #endif 779 779 #ifdef CONFIG_PPC_STD_MMU_32 ··· 800 800 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features); 801 801 #endif 802 802 803 - #ifdef CONFIG_PPC_STD_MMU_64 803 + #ifdef CONFIG_PPC_BOOK3S_64 804 804 if (htab_address) 805 805 pr_info("htab_address = 0x%p\n", htab_address); 806 806 if (htab_hash_mask)
+3 -3
arch/powerpc/mm/Makefile
··· 14 14 obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o 15 15 hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o 16 16 obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o 17 - obj-$(CONFIG_PPC_STD_MMU_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o 17 + obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o 18 18 obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o 19 19 obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o 20 20 obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o 21 - ifeq ($(CONFIG_PPC_STD_MMU_64),y) 21 + ifeq ($(CONFIG_PPC_BOOK3S_64),y) 22 22 obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o 23 23 obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o 24 24 endif ··· 31 31 obj-$(CONFIG_PPC_MM_SLICES) += slice.o 32 32 obj-y += hugetlbpage.o 33 33 ifeq ($(CONFIG_HUGETLB_PAGE),y) 34 - obj-$(CONFIG_PPC_STD_MMU_64) += hugetlbpage-hash64.o 34 + obj-$(CONFIG_PPC_BOOK3S_64) += hugetlbpage-hash64.o 35 35 obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o 36 36 obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o 37 37 endif
+1 -1
arch/powerpc/mm/dump_hashpagetable.c
··· 500 500 address_markers[6].start_address = PHB_IO_END; 501 501 address_markers[7].start_address = IOREMAP_BASE; 502 502 address_markers[8].start_address = IOREMAP_END; 503 - #ifdef CONFIG_PPC_STD_MMU_64 503 + #ifdef CONFIG_PPC_BOOK3S_64 504 504 address_markers[9].start_address = H_VMEMMAP_BASE; 505 505 #else 506 506 address_markers[9].start_address = VMEMMAP_BASE;
+5 -5
arch/powerpc/mm/dump_linuxpagetables.c
··· 112 112 113 113 static const struct flag_info flag_array[] = { 114 114 { 115 - #ifdef CONFIG_PPC_STD_MMU_64 115 + #ifdef CONFIG_PPC_BOOK3S_64 116 116 .mask = _PAGE_PRIVILEGED, 117 117 .val = 0, 118 118 #else ··· 147 147 .set = "present", 148 148 .clear = " ", 149 149 }, { 150 - #ifdef CONFIG_PPC_STD_MMU_64 150 + #ifdef CONFIG_PPC_BOOK3S_64 151 151 .mask = H_PAGE_HASHPTE, 152 152 .val = H_PAGE_HASHPTE, 153 153 #else ··· 157 157 .set = "hpte", 158 158 .clear = " ", 159 159 }, { 160 - #ifndef CONFIG_PPC_STD_MMU_64 160 + #ifndef CONFIG_PPC_BOOK3S_64 161 161 .mask = _PAGE_GUARDED, 162 162 .val = _PAGE_GUARDED, 163 163 .set = "guarded", ··· 174 174 .set = "accessed", 175 175 .clear = " ", 176 176 }, { 177 - #ifndef CONFIG_PPC_STD_MMU_64 177 + #ifndef CONFIG_PPC_BOOK3S_64 178 178 .mask = _PAGE_WRITETHRU, 179 179 .val = _PAGE_WRITETHRU, 180 180 .set = "write through", ··· 450 450 address_markers[i++].start_address = PHB_IO_END; 451 451 address_markers[i++].start_address = IOREMAP_BASE; 452 452 address_markers[i++].start_address = IOREMAP_END; 453 - #ifdef CONFIG_PPC_STD_MMU_64 453 + #ifdef CONFIG_PPC_BOOK3S_64 454 454 address_markers[i++].start_address = H_VMEMMAP_BASE; 455 455 #else 456 456 address_markers[i++].start_address = VMEMMAP_BASE;
+4 -4
arch/powerpc/mm/init_64.c
··· 68 68 69 69 #include "mmu_decl.h" 70 70 71 - #ifdef CONFIG_PPC_STD_MMU_64 71 + #ifdef CONFIG_PPC_BOOK3S_64 72 72 #if H_PGTABLE_RANGE > USER_VSID_RANGE 73 73 #warning Limited user VSID range means pagetable space is wasted 74 74 #endif 75 - #endif /* CONFIG_PPC_STD_MMU_64 */ 75 + #endif /* CONFIG_PPC_BOOK3S_64 */ 76 76 77 77 phys_addr_t memstart_addr = ~0; 78 78 EXPORT_SYMBOL_GPL(memstart_addr); ··· 367 367 368 368 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 369 369 370 - #ifdef CONFIG_PPC_STD_MMU_64 370 + #ifdef CONFIG_PPC_BOOK3S_64 371 371 static bool disable_radix; 372 372 static int __init parse_disable_radix(char *p) 373 373 { ··· 444 444 else 445 445 hash__early_init_devtree(); 446 446 } 447 - #endif /* CONFIG_PPC_STD_MMU_64 */ 447 + #endif /* CONFIG_PPC_BOOK3S_64 */
+1 -1
arch/powerpc/mm/pgtable_64.c
··· 57 57 58 58 #include "mmu_decl.h" 59 59 60 - #ifdef CONFIG_PPC_STD_MMU_64 60 + #ifdef CONFIG_PPC_BOOK3S_64 61 61 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) 62 62 #error TASK_SIZE_USER64 exceeds user VSID range 63 63 #endif
+1 -5
arch/powerpc/platforms/Kconfig.cputype
··· 294 294 def_bool y 295 295 depends on PPC_STD_MMU && PPC32 296 296 297 - config PPC_STD_MMU_64 298 - def_bool y 299 - depends on PPC_STD_MMU && PPC64 300 - 301 297 config PPC_RADIX_MMU 302 298 bool "Radix MMU Support" 303 299 depends on PPC_BOOK3S_64 ··· 319 323 320 324 config PPC_MM_SLICES 321 325 bool 322 - default y if PPC_STD_MMU_64 326 + default y if PPC_BOOK3S_64 323 327 default n 324 328 325 329 config PPC_HAVE_PMU_SUPPORT
+4 -4
arch/powerpc/platforms/pseries/lpar.c
··· 93 93 return; 94 94 } 95 95 96 - #ifdef CONFIG_PPC_STD_MMU_64 96 + #ifdef CONFIG_PPC_BOOK3S_64 97 97 /* 98 98 * PAPR says this feature is SLB-Buffer but firmware never 99 99 * reports that. All SPLPAR support SLB shadow buffer. ··· 106 106 "cpu %d (hw %d) of area %lx failed with %ld\n", 107 107 cpu, hwcpu, addr, ret); 108 108 } 109 - #endif /* CONFIG_PPC_STD_MMU_64 */ 109 + #endif /* CONFIG_PPC_BOOK3S_64 */ 110 110 111 111 /* 112 112 * Register dispatch trace log, if one has been allocated. ··· 129 129 } 130 130 } 131 131 132 - #ifdef CONFIG_PPC_STD_MMU_64 132 + #ifdef CONFIG_PPC_BOOK3S_64 133 133 134 134 static long pSeries_lpar_hpte_insert(unsigned long hpte_group, 135 135 unsigned long vpn, unsigned long pa, ··· 824 824 EXPORT_SYMBOL(arch_free_page); 825 825 826 826 #endif /* CONFIG_PPC_SMLPAR */ 827 - #endif /* CONFIG_PPC_STD_MMU_64 */ 827 + #endif /* CONFIG_PPC_BOOK3S_64 */ 828 828 829 829 #ifdef CONFIG_TRACEPOINTS 830 830 #ifdef HAVE_JUMP_LABEL
+1 -1
arch/powerpc/platforms/pseries/lparcfg.c
··· 485 485 seq_printf(m, "shared_processor_mode=%d\n", 486 486 lppaca_shared_proc(get_lppaca())); 487 487 488 - #ifdef CONFIG_PPC_STD_MMU_64 488 + #ifdef CONFIG_PPC_BOOK3S_64 489 489 seq_printf(m, "slb_size=%d\n", mmu_slb_size); 490 490 #endif 491 491 parse_em_data(m);
+3 -3
arch/powerpc/xmon/xmon.c
··· 2312 2312 static void dump_one_paca(int cpu) 2313 2313 { 2314 2314 struct paca_struct *p; 2315 - #ifdef CONFIG_PPC_STD_MMU_64 2315 + #ifdef CONFIG_PPC_BOOK3S_64 2316 2316 int i = 0; 2317 2317 #endif 2318 2318 ··· 2353 2353 DUMP(p, hw_cpu_id, "x"); 2354 2354 DUMP(p, cpu_start, "x"); 2355 2355 DUMP(p, kexec_state, "x"); 2356 - #ifdef CONFIG_PPC_STD_MMU_64 2356 + #ifdef CONFIG_PPC_BOOK3S_64 2357 2357 for (i = 0; i < SLB_NUM_BOLTED; i++) { 2358 2358 u64 esid, vsid; 2359 2359 ··· 3263 3263 printf("%s", after); 3264 3264 } 3265 3265 3266 - #ifdef CONFIG_PPC_STD_MMU_64 3266 + #ifdef CONFIG_PPC_BOOK3S_64 3267 3267 void dump_segments(void) 3268 3268 { 3269 3269 int i;