Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/slb: Define an enum for the bolted indexes

This patch defines macros for the three bolted SLB indexes we use.
Switch the functions that take the indexes as an argument to use the
enum.

Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Anshuman Khandual and committed by
Michael Ellerman
1d15010c 787b393c

+26 -21
+26 -21
arch/powerpc/mm/slb.c
··· 25 25 #include <asm/udbg.h> 26 26 #include <asm/code-patching.h> 27 27 28 + enum slb_index { 29 + LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ 30 + VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */ 31 + KSTACK_INDEX = 2, /* Kernel stack map */ 32 + }; 28 33 29 34 extern void slb_allocate_realmode(unsigned long ea); 30 35 extern void slb_allocate_user(unsigned long ea); ··· 46 41 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) 47 42 48 43 static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 49 - unsigned long entry) 44 + enum slb_index index) 50 45 { 51 - return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry; 46 + return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; 52 47 } 53 48 54 49 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, ··· 60 55 61 56 static inline void slb_shadow_update(unsigned long ea, int ssize, 62 57 unsigned long flags, 63 - unsigned long entry) 58 + enum slb_index index) 64 59 { 65 60 /* 66 61 * Clear the ESID first so the entry is not valid while we are 67 62 * updating it. No write barriers are needed here, provided 68 63 * we only update the current CPU's SLB shadow buffer. 69 64 */ 70 - get_slb_shadow()->save_area[entry].esid = 0; 71 - get_slb_shadow()->save_area[entry].vsid = 65 + get_slb_shadow()->save_area[index].esid = 0; 66 + get_slb_shadow()->save_area[index].vsid = 72 67 cpu_to_be64(mk_vsid_data(ea, ssize, flags)); 73 - get_slb_shadow()->save_area[entry].esid = 74 - cpu_to_be64(mk_esid_data(ea, ssize, entry)); 68 + get_slb_shadow()->save_area[index].esid = 69 + cpu_to_be64(mk_esid_data(ea, ssize, index)); 75 70 } 76 71 77 - static inline void slb_shadow_clear(unsigned long entry) 72 + static inline void slb_shadow_clear(enum slb_index index) 78 73 { 79 - get_slb_shadow()->save_area[entry].esid = 0; 74 + get_slb_shadow()->save_area[index].esid = 0; 80 75 } 81 76 82 77 static inline void create_shadowed_slbe(unsigned long ea, int ssize, 83 78 unsigned long flags, 84 - unsigned long entry) 79 + enum slb_index index) 85 80 { 86 81 /* 87 82 * Updating the shadow buffer before writing the SLB ensures 88 83 * we don't get a stale entry here if we get preempted by PHYP 89 84 * between these two statements. 90 85 */ 91 - slb_shadow_update(ea, ssize, flags, entry); 86 + slb_shadow_update(ea, ssize, flags, index); 92 87 93 88 asm volatile("slbmte %0,%1" : 94 89 : "r" (mk_vsid_data(ea, ssize, flags)), 95 - "r" (mk_esid_data(ea, ssize, entry)) 90 + "r" (mk_esid_data(ea, ssize, index)) 96 91 : "memory" ); 97 92 } 98 93 ··· 108 103 lflags = SLB_VSID_KERNEL | linear_llp; 109 104 vflags = SLB_VSID_KERNEL | vmalloc_llp; 110 105 111 - ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2); 106 + ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX); 112 107 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { 113 108 ksp_esid_data &= ~SLB_ESID_V; 114 109 ksp_vsid_data = 0; 115 - slb_shadow_clear(2); 110 + slb_shadow_clear(KSTACK_INDEX); 116 111 } else { 117 112 /* Update stack entry; others don't change */ 118 - slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); 113 + slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); 119 114 ksp_vsid_data = 120 - be64_to_cpu(get_slb_shadow()->save_area[2].vsid); 115 + be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); 121 116 } 122 117 123 118 /* We need to do this all in asm, so we're sure we don't touch ··· 156 151 unsigned long vflags; 157 152 158 153 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 159 - slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 154 + slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); 160 155 slb_flush_and_rebolt(); 161 156 } 162 157 ··· 331 326 asm volatile("isync":::"memory"); 332 327 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 333 328 asm volatile("isync; slbia; isync":::"memory"); 334 - create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0); 335 - create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 329 + create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); 330 + create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX); 336 331 337 332 /* For the boot cpu, we're running on the stack in init_thread_union, 338 333 * which is in the first segment of the linear mapping, and also 339 334 * get_paca()->kstack hasn't been initialized yet. 340 335 * For secondary cpus, we need to bolt the kernel stack entry now. 341 336 */ 342 - slb_shadow_clear(2); 337 + slb_shadow_clear(KSTACK_INDEX); 343 338 if (raw_smp_processor_id() != boot_cpuid && 344 339 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) 345 340 create_shadowed_slbe(get_paca()->kstack, 346 - mmu_kernel_ssize, lflags, 2); 341 + mmu_kernel_ssize, lflags, KSTACK_INDEX); 347 342 348 343 asm volatile("isync":::"memory"); 349 344 }