Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/vdso: Replace vdso_base by vdso

All other architectures but s390 use a void pointer named 'vdso'
to reference the VDSO mapping.

In a following patch, the VDSO data page will be put in front of
text, vdso_base will then not anymore point to VDSO text.

To avoid confusion between vdso_base and VDSO text, rename vdso_base
into vdso and make it a void __user *.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8e6cefe474aa4ceba028abb729485cd46c140990.1601197618.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
c102f076 526a9c4a

+27 -25
+1 -1
arch/powerpc/include/asm/book3s/32/mmu-hash.h
··· 90 90 91 91 typedef struct { 92 92 unsigned long id; 93 - unsigned long vdso_base; 93 + void __user *vdso; 94 94 } mm_context_t; 95 95 96 96 void update_bats(void);
+1 -1
arch/powerpc/include/asm/book3s/64/mmu.h
··· 111 111 112 112 struct hash_mm_context *hash_context; 113 113 114 - unsigned long vdso_base; 114 + void __user *vdso; 115 115 /* 116 116 * pagetable fragment support 117 117 */
+1 -1
arch/powerpc/include/asm/elf.h
··· 169 169 NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \ 170 170 NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \ 171 171 NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \ 172 - VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ 172 + VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\ 173 173 ARCH_DLINFO_CACHE_GEOMETRY; \ 174 174 } while (0) 175 175
+4 -2
arch/powerpc/include/asm/mmu_context.h
··· 262 262 static inline void arch_unmap(struct mm_struct *mm, 263 263 unsigned long start, unsigned long end) 264 264 { 265 - if (start <= mm->context.vdso_base && mm->context.vdso_base < end) 266 - mm->context.vdso_base = 0; 265 + unsigned long vdso_base = (unsigned long)mm->context.vdso; 266 + 267 + if (start <= vdso_base && vdso_base < end) 268 + mm->context.vdso = NULL; 267 269 } 268 270 269 271 #ifdef CONFIG_PPC_MEM_KEYS
+1 -1
arch/powerpc/include/asm/nohash/32/mmu-40x.h
··· 57 57 typedef struct { 58 58 unsigned int id; 59 59 unsigned int active; 60 - unsigned long vdso_base; 60 + void __user *vdso; 61 61 } mm_context_t; 62 62 63 63 #endif /* !__ASSEMBLY__ */
+1 -1
arch/powerpc/include/asm/nohash/32/mmu-44x.h
··· 108 108 typedef struct { 109 109 unsigned int id; 110 110 unsigned int active; 111 - unsigned long vdso_base; 111 + void __user *vdso; 112 112 } mm_context_t; 113 113 114 114 /* patch sites */
+1 -1
arch/powerpc/include/asm/nohash/32/mmu-8xx.h
··· 181 181 typedef struct { 182 182 unsigned int id; 183 183 unsigned int active; 184 - unsigned long vdso_base; 184 + void __user *vdso; 185 185 void *pte_frag; 186 186 } mm_context_t; 187 187
+1 -1
arch/powerpc/include/asm/nohash/mmu-book3e.h
··· 238 238 typedef struct { 239 239 unsigned int id; 240 240 unsigned int active; 241 - unsigned long vdso_base; 241 + void __user *vdso; 242 242 } mm_context_t; 243 243 244 244 /* Page size definitions, common between 32 and 64-bit
+4 -4
arch/powerpc/kernel/signal_32.c
··· 801 801 } 802 802 803 803 /* Save user registers on the stack */ 804 - if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) { 805 - tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp; 804 + if (vdso32_rt_sigtramp && tsk->mm->context.vdso) { 805 + tramp = (unsigned long)tsk->mm->context.vdso + vdso32_rt_sigtramp; 806 806 } else { 807 807 tramp = (unsigned long)mctx->mc_pad; 808 808 /* Set up the sigreturn trampoline: li r0,sigret; sc */ ··· 901 901 else 902 902 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed); 903 903 904 - if (vdso32_sigtramp && tsk->mm->context.vdso_base) { 905 - tramp = tsk->mm->context.vdso_base + vdso32_sigtramp; 904 + if (vdso32_sigtramp && tsk->mm->context.vdso) { 905 + tramp = (unsigned long)tsk->mm->context.vdso + vdso32_sigtramp; 906 906 } else { 907 907 tramp = (unsigned long)mctx->mc_pad; 908 908 /* Set up the sigreturn trampoline: li r0,sigret; sc */
+2 -2
arch/powerpc/kernel/signal_64.c
··· 854 854 tsk->thread.fp_state.fpscr = 0; 855 855 856 856 /* Set up to return from userspace. */ 857 - if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { 858 - regs->nip = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; 857 + if (vdso64_rt_sigtramp && tsk->mm->context.vdso) { 858 + regs->nip = (unsigned long)tsk->mm->context.vdso + vdso64_rt_sigtramp; 859 859 } else { 860 860 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); 861 861 if (err)
+4 -4
arch/powerpc/kernel/vdso.c
··· 123 123 if (new_size != text_size + PAGE_SIZE) 124 124 return -EINVAL; 125 125 126 - current->mm->context.vdso_base = new_vma->vm_start; 126 + current->mm->context.vdso = (void __user *)new_vma->vm_start; 127 127 128 128 return 0; 129 129 } ··· 198 198 * install_special_mapping or the perf counter mmap tracking code 199 199 * will fail to recognise it as a vDSO. 200 200 */ 201 - current->mm->context.vdso_base = vdso_base; 201 + mm->context.vdso = (void __user *)vdso_base; 202 202 203 203 /* 204 204 * our vma flags don't have VM_WRITE so by default, the process isn't ··· 221 221 struct mm_struct *mm = current->mm; 222 222 int rc; 223 223 224 - mm->context.vdso_base = 0; 224 + mm->context.vdso = NULL; 225 225 226 226 if (!vdso_ready) 227 227 return 0; ··· 231 231 232 232 rc = __arch_setup_additional_pages(bprm, uses_interp); 233 233 if (rc) 234 - mm->context.vdso_base = 0; 234 + mm->context.vdso = NULL; 235 235 236 236 mmap_write_unlock(mm); 237 237 return rc;
+4 -4
arch/powerpc/perf/callchain_32.c
··· 59 59 { 60 60 if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad)) 61 61 return 1; 62 - if (vdso32_sigtramp && current->mm->context.vdso_base && 63 - nip == current->mm->context.vdso_base + vdso32_sigtramp) 62 + if (vdso32_sigtramp && current->mm->context.vdso && 63 + nip == (unsigned long)current->mm->context.vdso + vdso32_sigtramp) 64 64 return 1; 65 65 return 0; 66 66 } ··· 70 70 if (nip == fp + offsetof(struct rt_signal_frame_32, 71 71 uc.uc_mcontext.mc_pad)) 72 72 return 1; 73 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base && 74 - nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) 73 + if (vdso32_rt_sigtramp && current->mm->context.vdso && 74 + nip == (unsigned long)current->mm->context.vdso + vdso32_rt_sigtramp) 75 75 return 1; 76 76 return 0; 77 77 }
+2 -2
arch/powerpc/perf/callchain_64.c
··· 68 68 { 69 69 if (nip == fp + offsetof(struct signal_frame_64, tramp)) 70 70 return 1; 71 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base && 72 - nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) 71 + if (vdso64_rt_sigtramp && current->mm->context.vdso && 72 + nip == (unsigned long)current->mm->context.vdso + vdso64_rt_sigtramp) 73 73 return 1; 74 74 return 0; 75 75 }