Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vDSO for sparc

Following patch is based on work done by Nick Alcock on 64-bit vDSO for sparc
in Oracle linux. I have extended it to include support for 32-bit vDSO for sparc
on 64-bit kernel.

vDSO for sparc is based on the X86 implementation. This patch
provides vDSO support for both 64-bit and 32-bit programs on 64-bit kernel.
vDSO will be disabled on 32-bit linux kernel on sparc.

*) vclock_gettime.c contains all the vdso functions. Since data page is mapped
before the vdso code page, the pointer to data page is got by subracting offset
from an address in the vdso code page. The return address stored in
%i7 is used for this purpose.
*) During compilation, both 32-bit and 64-bit vdso images are compiled and are
converted into raw bytes by vdso2c program to be ready for mapping into the
process. 32-bit images are compiled only if CONFIG_COMPAT is enabled. vdso2c
generates two files vdso-image-64.c and vdso-image-32.c which contains the
respective vDSO image in C structure.
*) During vdso initialization, required number of vdso pages are allocated and
raw bytes are copied into the pages.
*) During every exec, these pages are mapped into the process through
arch_setup_additional_pages and the location of mapping is passed on to the
process through aux vector AT_SYSINFO_EHDR which is used by glibc.
*) A new update_vsyscall routine for sparc is added to keep the data page in
vdso updated.
*) As vDSO cannot contain dynamically relocatable references, a new version of
cpu_relax is added for the use of vDSO.

This change also requires a putback to glibc to use vDSO. For testing,
programs planning to try vDSO can be compiled against the generated
vdso(64/32).so in the source.

Testing:

========
[root@localhost ~]# cat vdso_test.c
int main() {
struct timespec tv_start, tv_end;
struct timeval tv_tmp;
int i;
int count = 1 * 1000 * 10000;
long long diff;

clock_gettime(0, &tv_start);
for (i = 0; i < count; i++)
gettimeofday(&tv_tmp, NULL);
clock_gettime(0, &tv_end);
diff = (long long)(tv_end.tv_sec -
tv_start.tv_sec)*(1*1000*1000*1000);
diff += (tv_end.tv_nsec - tv_start.tv_nsec);
printf("Start sec: %d\n", tv_start.tv_sec);
printf("End sec : %d\n", tv_end.tv_sec);
printf("%d cycles in %lld ns = %f ns/cycle\n", count, diff,
(double)diff / (double)count);
return 0;
}

[root@localhost ~]# cc vdso_test.c -o t32_without_fix -m32 -lrt
[root@localhost ~]# ./t32_without_fix
Start sec: 1502396130
End sec : 1502396140
10000000 cycles in 9565148528 ns = 956.514853 ns/cycle
[root@localhost ~]# cc vdso_test.c -o t32_with_fix -m32 ./vdso32.so.dbg
[root@localhost ~]# ./t32_with_fix
Start sec: 1502396168
End sec : 1502396169
10000000 cycles in 798141262 ns = 79.814126 ns/cycle
[root@localhost ~]# cc vdso_test.c -o t64_without_fix -m64 -lrt
[root@localhost ~]# ./t64_without_fix
Start sec: 1502396208
End sec : 1502396218
10000000 cycles in 9846091800 ns = 984.609180 ns/cycle
[root@localhost ~]# cc vdso_test.c -o t64_with_fix -m64 ./vdso64.so.dbg
[root@localhost ~]# ./t64_with_fix
Start sec: 1502396257
End sec : 1502396257
10000000 cycles in 380984048 ns = 38.098405 ns/cycle

V1 to V2 Changes:
=================
Added hot patching code to switch the read stick instruction to read
tick instruction based on the hardware.

V2 to V3 Changes:
=================
Merged latest changes from sparc-next and moved the initialization
of clocksource_tick.archdata.vclock_mode to time_init_early. Disabled
queued spinlock and rwlock configuration when simulating 32-bit config
to compile 32-bit VDSO.

V3 to V4 Changes:
=================
Hardcoded the page size as 8192 in linker script for both 64-bit and
32-bit binaries. Removed unused variables in vdso2c.h. Added -mv8plus flag to
Makefile to prevent the generation of relocation entries for __lshrdi3 in 32-bit
vdso binary.

Signed-off-by: Nick Alcock <nick.alcock@oracle.com>
Signed-off-by: Nagarathnam Muthusamy <nagarathnam.muthusamy@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Nagarathnam Muthusamy and committed by
David S. Miller
9a08862a 23198ddf

+1494 -2
+1
arch/sparc/Kbuild
··· 7 7 obj-y += math-emu/ 8 8 obj-y += net/ 9 9 obj-y += crypto/ 10 + obj-$(CONFIG_SPARC64) += vdso/
+2
arch/sparc/Kconfig
··· 85 85 select HAVE_REGS_AND_STACK_ACCESS_API 86 86 select ARCH_USE_QUEUED_RWLOCKS 87 87 select ARCH_USE_QUEUED_SPINLOCKS 88 + select GENERIC_TIME_VSYSCALL 89 + select ARCH_CLOCKSOURCE_DATA 88 90 89 91 config ARCH_DEFCONFIG 90 92 string
+4
arch/sparc/Makefile
··· 80 80 archclean: 81 81 $(Q)$(MAKE) $(clean)=$(boot) 82 82 83 + PHONY += vdso_install 84 + vdso_install: 85 + $(Q)$(MAKE) $(build)=arch/sparc/vdso $@ 86 + 83 87 # This is the image used for packaging 84 88 KBUILD_IMAGE := $(boot)/zImage 85 89
+17
arch/sparc/include/asm/clocksource.h
··· 1 + /* 2 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 3 + */ 4 + 5 + #ifndef _ASM_SPARC_CLOCKSOURCE_H 6 + #define _ASM_SPARC_CLOCKSOURCE_H 7 + 8 + /* VDSO clocksources */ 9 + #define VCLOCK_NONE 0 /* Nothing userspace can do. */ 10 + #define VCLOCK_TICK 1 /* Use %tick. */ 11 + #define VCLOCK_STICK 2 /* Use %stick. */ 12 + 13 + struct arch_clocksource_data { 14 + int vclock_mode; 15 + }; 16 + 17 + #endif /* _ASM_SPARC_CLOCKSOURCE_H */
+14
arch/sparc/include/asm/elf_64.h
··· 210 210 (current->personality & (~PER_MASK))); \ 211 211 } while (0) 212 212 213 + extern unsigned int vdso_enabled; 214 + 215 + #define ARCH_DLINFO \ 216 + do { \ 217 + if (vdso_enabled) \ 218 + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ 219 + (unsigned long)current->mm->context.vdso); \ 220 + } while (0) 221 + 222 + struct linux_binprm; 223 + 224 + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 225 + extern int arch_setup_additional_pages(struct linux_binprm *bprm, 226 + int uses_interp); 213 227 #endif /* !(__ASM_SPARC64_ELF_H) */
+1
arch/sparc/include/asm/mmu_64.h
··· 96 96 unsigned long thp_pte_count; 97 97 struct tsb_config tsb_block[MM_NUM_TSBS]; 98 98 struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; 99 + void *vdso; 99 100 } mm_context_t; 100 101 101 102 #endif /* !__ASSEMBLY__ */
+8
arch/sparc/include/asm/processor_64.h
··· 199 199 * To make a long story short, we are trying to yield the current cpu 200 200 * strand during busy loops. 201 201 */ 202 + #ifdef BUILD_VDSO 203 + #define cpu_relax() asm volatile("\n99:\n\t" \ 204 + "rd %%ccr, %%g0\n\t" \ 205 + "rd %%ccr, %%g0\n\t" \ 206 + "rd %%ccr, %%g0\n\t" \ 207 + ::: "memory") 208 + #else /* ! BUILD_VDSO */ 202 209 #define cpu_relax() asm volatile("\n99:\n\t" \ 203 210 "rd %%ccr, %%g0\n\t" \ 204 211 "rd %%ccr, %%g0\n\t" \ ··· 217 210 "nop\n\t" \ 218 211 ".previous" \ 219 212 ::: "memory") 213 + #endif 220 214 221 215 /* Prefetch support. This is tuned for UltraSPARC-III and later. 222 216 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
+24
arch/sparc/include/asm/vdso.h
··· 1 + /* 2 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 3 + */ 4 + 5 + #ifndef _ASM_SPARC_VDSO_H 6 + #define _ASM_SPARC_VDSO_H 7 + 8 + struct vdso_image { 9 + void *data; 10 + unsigned long size; /* Always a multiple of PAGE_SIZE */ 11 + long sym_vvar_start; /* Negative offset to the vvar area */ 12 + long sym_vread_tick; /* Start of vread_tick section */ 13 + long sym_vread_tick_patch_start; /* Start of tick read */ 14 + long sym_vread_tick_patch_end; /* End of tick read */ 15 + }; 16 + 17 + #ifdef CONFIG_SPARC64 18 + extern const struct vdso_image vdso_image_64_builtin; 19 + #endif 20 + #ifdef CONFIG_COMPAT 21 + extern const struct vdso_image vdso_image_32_builtin; 22 + #endif 23 + 24 + #endif /* _ASM_SPARC_VDSO_H */
+74
arch/sparc/include/asm/vvar.h
··· 1 + /* 2 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 3 + */ 4 + 5 + #ifndef _ASM_SPARC_VVAR_DATA_H 6 + #define _ASM_SPARC_VVAR_DATA_H 7 + 8 + #include <asm/clocksource.h> 9 + #include <linux/seqlock.h> 10 + #include <linux/time.h> 11 + #include <linux/types.h> 12 + 13 + struct vvar_data { 14 + unsigned int seq; 15 + 16 + int vclock_mode; 17 + struct { /* extract of a clocksource struct */ 18 + u64 cycle_last; 19 + u64 mask; 20 + int mult; 21 + int shift; 22 + } clock; 23 + /* open coded 'struct timespec' */ 24 + u64 wall_time_sec; 25 + u64 wall_time_snsec; 26 + u64 monotonic_time_snsec; 27 + u64 monotonic_time_sec; 28 + u64 monotonic_time_coarse_sec; 29 + u64 monotonic_time_coarse_nsec; 30 + u64 wall_time_coarse_sec; 31 + u64 wall_time_coarse_nsec; 32 + 33 + int tz_minuteswest; 34 + int tz_dsttime; 35 + }; 36 + 37 + extern struct vvar_data *vvar_data; 38 + extern int vdso_fix_stick; 39 + 40 + static inline unsigned int vvar_read_begin(const struct vvar_data *s) 41 + { 42 + unsigned int ret; 43 + 44 + repeat: 45 + ret = READ_ONCE(s->seq); 46 + if (unlikely(ret & 1)) { 47 + cpu_relax(); 48 + goto repeat; 49 + } 50 + smp_rmb(); /* Finish all reads before we return seq */ 51 + return ret; 52 + } 53 + 54 + static inline int vvar_read_retry(const struct vvar_data *s, 55 + unsigned int start) 56 + { 57 + smp_rmb(); /* Finish all reads before checking the value of seq */ 58 + return unlikely(s->seq != start); 59 + } 60 + 61 + static inline void vvar_write_begin(struct vvar_data *s) 62 + { 63 + ++s->seq; 64 + smp_wmb(); /* Makes sure that increment of seq is reflected */ 65 + } 66 + 67 + static inline void vvar_write_end(struct vvar_data *s) 68 + { 69 + smp_wmb(); /* Makes the value of seq current before we increment */ 70 + ++s->seq; 71 + } 72 + 73 + 74 + #endif /* _ASM_SPARC_VVAR_DATA_H */
+4
arch/sparc/include/uapi/asm/auxvec.h
··· 1 1 #ifndef __ASMSPARC_AUXVEC_H 2 2 #define __ASMSPARC_AUXVEC_H 3 3 4 + #define AT_SYSINFO_EHDR 33 5 + 6 + #define AT_VECTOR_SIZE_ARCH 1 7 + 4 8 #endif /* !(__ASMSPARC_AUXVEC_H) */
+1
arch/sparc/kernel/Makefile
··· 42 42 obj-y += time_$(BITS).o 43 43 obj-$(CONFIG_SPARC32) += windows.o 44 44 obj-y += cpu.o 45 + obj-$(CONFIG_SPARC64) += vdso.o 45 46 obj-$(CONFIG_SPARC32) += devices.o 46 47 obj-y += ptrace_$(BITS).o 47 48 obj-y += unaligned_$(BITS).o
+9 -2
arch/sparc/kernel/time_64.c
··· 52 52 53 53 DEFINE_SPINLOCK(rtc_lock); 54 54 55 + unsigned int __read_mostly vdso_fix_stick; 56 + 55 57 #ifdef CONFIG_SMP 56 58 unsigned long profile_pc(struct pt_regs *regs) 57 59 { ··· 831 829 void __init time_init_early(void) 832 830 { 833 831 if (tlb_type == spitfire) { 834 - if (is_hummingbird()) 832 + if (is_hummingbird()) { 835 833 init_tick_ops(&hbtick_operations); 836 - else 834 + clocksource_tick.archdata.vclock_mode = VCLOCK_NONE; 835 + } else { 837 836 init_tick_ops(&tick_operations); 837 + clocksource_tick.archdata.vclock_mode = VCLOCK_TICK; 838 + vdso_fix_stick = 1; 839 + } 838 840 } else { 839 841 init_tick_ops(&stick_operations); 842 + clocksource_tick.archdata.vclock_mode = VCLOCK_STICK; 840 843 } 841 844 } 842 845
+70
arch/sparc/kernel/vdso.c
··· 1 + /* 2 + * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 3 + * Copyright 2003 Andi Kleen, SuSE Labs. 4 + * 5 + * Thanks to hpa@transmeta.com for some useful hint. 6 + * Special thanks to Ingo Molnar for his early experience with 7 + * a different vsyscall implementation for Linux/IA32 and for the name. 8 + */ 9 + 10 + #include <linux/seqlock.h> 11 + #include <linux/time.h> 12 + #include <linux/timekeeper_internal.h> 13 + 14 + #include <asm/vvar.h> 15 + 16 + void update_vsyscall_tz(void) 17 + { 18 + if (unlikely(vvar_data == NULL)) 19 + return; 20 + 21 + vvar_data->tz_minuteswest = sys_tz.tz_minuteswest; 22 + vvar_data->tz_dsttime = sys_tz.tz_dsttime; 23 + } 24 + 25 + void update_vsyscall(struct timekeeper *tk) 26 + { 27 + struct vvar_data *vdata = vvar_data; 28 + 29 + if (unlikely(vdata == NULL)) 30 + return; 31 + 32 + vvar_write_begin(vdata); 33 + vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; 34 + vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 35 + vdata->clock.mask = tk->tkr_mono.mask; 36 + vdata->clock.mult = tk->tkr_mono.mult; 37 + vdata->clock.shift = tk->tkr_mono.shift; 38 + 39 + vdata->wall_time_sec = tk->xtime_sec; 40 + vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; 41 + 42 + vdata->monotonic_time_sec = tk->xtime_sec + 43 + tk->wall_to_monotonic.tv_sec; 44 + vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec + 45 + (tk->wall_to_monotonic.tv_nsec << 46 + tk->tkr_mono.shift); 47 + 48 + while (vdata->monotonic_time_snsec >= 49 + (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 50 + vdata->monotonic_time_snsec -= 51 + ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; 52 + vdata->monotonic_time_sec++; 53 + } 54 + 55 + vdata->wall_time_coarse_sec = tk->xtime_sec; 56 + vdata->wall_time_coarse_nsec = 57 + (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); 58 + 59 + vdata->monotonic_time_coarse_sec = 60 + vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; 61 + vdata->monotonic_time_coarse_nsec = 62 + vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; 63 + 64 + while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) { 65 + vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC; 66 + vdata->monotonic_time_coarse_sec++; 67 + } 68 + 69 + vvar_write_end(vdata); 70 + }
+3
arch/sparc/vdso/.gitignore
··· 1 + vdso.lds 2 + vdso-image-*.c 3 + vdso2c
+149
arch/sparc/vdso/Makefile
··· 1 + # 2 + # Building vDSO images for sparc. 3 + # 4 + 5 + KBUILD_CFLAGS += $(DISABLE_LTO) 6 + 7 + VDSO64-$(CONFIG_SPARC64) := y 8 + VDSOCOMPAT-$(CONFIG_COMPAT) := y 9 + 10 + # files to link into the vdso 11 + vobjs-y := vdso-note.o vclock_gettime.o 12 + 13 + # files to link into kernel 14 + obj-y += vma.o 15 + 16 + # vDSO images to build 17 + vdso_img-$(VDSO64-y) += 64 18 + vdso_img-$(VDSOCOMPAT-y) += 32 19 + 20 + vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) 21 + 22 + $(obj)/vdso.o: $(obj)/vdso.so 23 + 24 + targets += vdso.lds $(vobjs-y) 25 + 26 + # Build the vDSO image C files and link them in. 27 + vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o) 28 + vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c) 29 + vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg) 30 + obj-y += $(vdso_img_objs) 31 + targets += $(vdso_img_cfiles) 32 + targets += $(vdso_img_sodbg) 33 + .SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \ 34 + $(vdso_img-y:%=$(obj)/vdso%.so) 35 + 36 + export CPPFLAGS_vdso.lds += -P -C 37 + 38 + VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ 39 + -Wl,--no-undefined \ 40 + -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \ 41 + $(DISABLE_LTO) 42 + 43 + $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE 44 + $(call if_changed,vdso) 45 + 46 + HOST_EXTRACFLAGS += -I$(srctree)/tools/include 47 + hostprogs-y += vdso2c 48 + 49 + quiet_cmd_vdso2c = VDSO2C $@ 50 + define cmd_vdso2c 51 + $(obj)/vdso2c $< $(<:%.dbg=%) $@ 52 + endef 53 + 54 + $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE 55 + $(call if_changed,vdso2c) 56 + 57 + # 58 + # Don't omit frame pointers for ease of userspace debugging, but do 59 + # optimize sibling calls. 60 + # 61 + CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \ 62 + -m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \ 63 + -ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \ 64 + $(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \ 65 + -foptimize-sibling-calls -DBUILD_VDSO 66 + 67 + $(vobjs): KBUILD_CFLAGS += $(CFL) 68 + 69 + # 70 + # vDSO code runs in userspace and -pg doesn't help with profiling anyway. 71 + # 72 + CFLAGS_REMOVE_vdso-note.o = -pg 73 + CFLAGS_REMOVE_vclock_gettime.o = -pg 74 + 75 + $(obj)/%.so: OBJCOPYFLAGS := -S 76 + $(obj)/%.so: $(obj)/%.so.dbg 77 + $(call if_changed,objcopy) 78 + 79 + CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) 80 + VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1 81 + 82 + #This makes sure the $(obj) subdirectory exists even though vdso32/ 83 + #is not a kbuild sub-make subdirectory 84 + override obj-dirs = $(dir $(obj)) $(obj)/vdso32/ 85 + 86 + targets += vdso32/vdso32.lds 87 + targets += vdso32/vdso-note.o 88 + targets += vdso32/vclock_gettime.o 89 + 90 + KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO 91 + $(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32) 92 + $(obj)/vdso32.so.dbg: asflags-$(CONFIG_SPARC64) += -m32 93 + 94 + KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) 95 + KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32)) 96 + KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) 97 + KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) 98 + KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7 99 + KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) 100 + KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 101 + KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 102 + KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING 103 + KBUILD_CFLAGS_32 += -mv8plus 104 + $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 105 + 106 + $(obj)/vdso32.so.dbg: FORCE \ 107 + $(obj)/vdso32/vdso32.lds \ 108 + $(obj)/vdso32/vclock_gettime.o \ 109 + $(obj)/vdso32/vdso-note.o 110 + $(call if_changed,vdso) 111 + 112 + # 113 + # The DSO images are built using a special linker script. 114 + # 115 + quiet_cmd_vdso = VDSO $@ 116 + cmd_vdso = $(CC) -nostdlib -o $@ \ 117 + $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ 118 + -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) 119 + 120 + VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ 121 + $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic 122 + GCOV_PROFILE := n 123 + 124 + # 125 + # Install the unstripped copies of vdso*.so. If our toolchain supports 126 + # build-id, install .build-id links as well. 127 + # 128 + quiet_cmd_vdso_install = INSTALL $(@:install_%=%) 129 + define cmd_vdso_install 130 + cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \ 131 + if readelf -n $< |grep -q 'Build ID'; then \ 132 + buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \ 133 + first=`echo $$buildid | cut -b-2`; \ 134 + last=`echo $$buildid | cut -b3-`; \ 135 + mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \ 136 + ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \ 137 + fi 138 + endef 139 + 140 + vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) 141 + 142 + $(MODLIB)/vdso: FORCE 143 + @mkdir -p $(MODLIB)/vdso 144 + 145 + $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE 146 + $(call cmd,vdso_install) 147 + 148 + PHONY += vdso_install $(vdso_img_insttargets) 149 + vdso_install: $(vdso_img_insttargets) FORCE
+264
arch/sparc/vdso/vclock_gettime.c
··· 1 + /* 2 + * Copyright 2006 Andi Kleen, SUSE Labs. 3 + * Subject to the GNU Public License, v.2 4 + * 5 + * Fast user context implementation of clock_gettime, gettimeofday, and time. 6 + * 7 + * The code should have no internal unresolved relocations. 8 + * Check with readelf after changing. 9 + * Also alternative() doesn't work. 10 + */ 11 + /* 12 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 13 + */ 14 + 15 + /* Disable profiling for userspace code: */ 16 + #ifndef DISABLE_BRANCH_PROFILING 17 + #define DISABLE_BRANCH_PROFILING 18 + #endif 19 + 20 + #include <linux/kernel.h> 21 + #include <linux/time.h> 22 + #include <linux/string.h> 23 + #include <asm/io.h> 24 + #include <asm/unistd.h> 25 + #include <asm/timex.h> 26 + #include <asm/clocksource.h> 27 + #include <asm/vvar.h> 28 + 29 + #undef TICK_PRIV_BIT 30 + #ifdef CONFIG_SPARC64 31 + #define TICK_PRIV_BIT (1UL << 63) 32 + #else 33 + #define TICK_PRIV_BIT (1ULL << 63) 34 + #endif 35 + 36 + #define SYSCALL_STRING \ 37 + "ta 0x6d;" \ 38 + "sub %%g0, %%o0, %%o0;" \ 39 + 40 + #define SYSCALL_CLOBBERS \ 41 + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ 42 + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \ 43 + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \ 44 + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \ 45 + "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \ 46 + "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \ 47 + "cc", "memory" 48 + 49 + /* 50 + * Compute the vvar page's address in the process address space, and return it 51 + * as a pointer to the vvar_data. 52 + */ 53 + static notrace noinline struct vvar_data * 54 + get_vvar_data(void) 55 + { 56 + unsigned long ret; 57 + 58 + /* 59 + * vdso data page is the first vDSO page so grab the return address 60 + * and move up a page to get to the data page. 61 + */ 62 + ret = (unsigned long)__builtin_return_address(0); 63 + ret &= ~(8192 - 1); 64 + ret -= 8192; 65 + 66 + return (struct vvar_data *) ret; 67 + } 68 + 69 + static notrace long 70 + vdso_fallback_gettime(long clock, struct timespec *ts) 71 + { 72 + register long num __asm__("g1") = __NR_clock_gettime; 73 + register long o0 __asm__("o0") = clock; 74 + register long o1 __asm__("o1") = (long) ts; 75 + 76 + __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 77 + "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 78 + return o0; 79 + } 80 + 81 + static notrace __always_inline long 82 + vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) 83 + { 84 + register long num __asm__("g1") = __NR_gettimeofday; 85 + register long o0 __asm__("o0") = (long) tv; 86 + register long o1 __asm__("o1") = (long) tz; 87 + 88 + __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 89 + "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 90 + return o0; 91 + } 92 + 93 + #ifdef CONFIG_SPARC64 94 + static notrace noinline u64 95 + vread_tick(void) { 96 + u64 ret; 97 + 98 + __asm__ __volatile__("rd %%asr24, %0 \n" 99 + ".section .vread_tick_patch, \"ax\" \n" 100 + "rd %%tick, %0 \n" 101 + ".previous \n" 102 + : "=&r" (ret)); 103 + return ret & ~TICK_PRIV_BIT; 104 + } 105 + #else 106 + static notrace noinline u64 107 + vread_tick(void) 108 + { 109 + unsigned int lo, hi; 110 + 111 + __asm__ __volatile__("rd %%asr24, %%g1\n\t" 112 + "srlx %%g1, 32, %1\n\t" 113 + "srl %%g1, 0, %0\n" 114 + ".section .vread_tick_patch, \"ax\" \n" 115 + "rd %%tick, %%g1\n" 116 + ".previous \n" 117 + : "=&r" (lo), "=&r" (hi) 118 + : 119 + : "g1"); 120 + return lo | ((u64)hi << 32); 121 + } 122 + #endif 123 + 124 + static notrace inline u64 125 + vgetsns(struct vvar_data *vvar) 126 + { 127 + u64 v; 128 + u64 cycles; 129 + 130 + cycles = vread_tick(); 131 + v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask; 132 + return v * vvar->clock.mult; 133 + } 134 + 135 + static notrace noinline int 136 + do_realtime(struct vvar_data *vvar, struct timespec *ts) 137 + { 138 + unsigned long seq; 139 + u64 ns; 140 + 141 + ts->tv_nsec = 0; 142 + do { 143 + seq = vvar_read_begin(vvar); 144 + ts->tv_sec = vvar->wall_time_sec; 145 + ns = vvar->wall_time_snsec; 146 + ns += vgetsns(vvar); 147 + ns >>= vvar->clock.shift; 148 + } while (unlikely(vvar_read_retry(vvar, seq))); 149 + 150 + timespec_add_ns(ts, ns); 151 + 152 + return 0; 153 + } 154 + 155 + static notrace noinline int 156 + do_monotonic(struct vvar_data *vvar, struct timespec *ts) 157 + { 158 + unsigned long seq; 159 + u64 ns; 160 + 161 + ts->tv_nsec = 0; 162 + do { 163 + seq = vvar_read_begin(vvar); 164 + ts->tv_sec = vvar->monotonic_time_sec; 165 + ns = vvar->monotonic_time_snsec; 166 + ns += vgetsns(vvar); 167 + ns >>= vvar->clock.shift; 168 + } while (unlikely(vvar_read_retry(vvar, seq))); 169 + 170 + timespec_add_ns(ts, ns); 171 + 172 + return 0; 173 + } 174 + 175 + static notrace noinline int 176 + do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts) 177 + { 178 + unsigned long seq; 179 + 180 + do { 181 + seq = vvar_read_begin(vvar); 182 + ts->tv_sec = vvar->wall_time_coarse_sec; 183 + ts->tv_nsec = vvar->wall_time_coarse_nsec; 184 + } while (unlikely(vvar_read_retry(vvar, seq))); 185 + return 0; 186 + } 187 + 188 + static notrace noinline int 189 + do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts) 190 + { 191 + unsigned long seq; 192 + 193 + do { 194 + seq = vvar_read_begin(vvar); 195 + ts->tv_sec = vvar->monotonic_time_coarse_sec; 196 + ts->tv_nsec = vvar->monotonic_time_coarse_nsec; 197 + } while (unlikely(vvar_read_retry(vvar, seq))); 198 + 199 + return 0; 200 + } 201 + 202 + notrace int 203 + __vdso_clock_gettime(clockid_t clock, struct timespec *ts) 204 + { 205 + struct vvar_data *vvd = get_vvar_data(); 206 + 207 + switch (clock) { 208 + case CLOCK_REALTIME: 209 + if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) 210 + break; 211 + return do_realtime(vvd, ts); 212 + case CLOCK_MONOTONIC: 213 + if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) 214 + break; 215 + return do_monotonic(vvd, ts); 216 + case CLOCK_REALTIME_COARSE: 217 + return do_realtime_coarse(vvd, ts); 218 + case CLOCK_MONOTONIC_COARSE: 219 + return do_monotonic_coarse(vvd, ts); 220 + } 221 + /* 222 + * Unknown clock ID ? Fall back to the syscall. 223 + */ 224 + return vdso_fallback_gettime(clock, ts); 225 + } 226 + int 227 + clock_gettime(clockid_t, struct timespec *) 228 + __attribute__((weak, alias("__vdso_clock_gettime"))); 229 + 230 + notrace int 231 + __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) 232 + { 233 + struct vvar_data *vvd = get_vvar_data(); 234 + 235 + if (likely(vvd->vclock_mode != VCLOCK_NONE)) { 236 + if (likely(tv != NULL)) { 237 + union tstv_t { 238 + struct timespec ts; 239 + struct timeval tv; 240 + } *tstv = (union tstv_t *) tv; 241 + do_realtime(vvd, &tstv->ts); 242 + /* 243 + * Assign before dividing to ensure that the division is 244 + * done in the type of tv_usec, not tv_nsec. 245 + * 246 + * There cannot be > 1 billion usec in a second: 247 + * do_realtime() has already distributed such overflow 248 + * into tv_sec. So we can assign it to an int safely. 249 + */ 250 + tstv->tv.tv_usec = tstv->ts.tv_nsec; 251 + tstv->tv.tv_usec /= 1000; 252 + } 253 + if (unlikely(tz != NULL)) { 254 + /* Avoid memcpy. Some old compilers fail to inline it */ 255 + tz->tz_minuteswest = vvd->tz_minuteswest; 256 + tz->tz_dsttime = vvd->tz_dsttime; 257 + } 258 + return 0; 259 + } 260 + return vdso_fallback_gettimeofday(tv, tz); 261 + } 262 + int 263 + gettimeofday(struct timeval *, struct timezone *) 264 + __attribute__((weak, alias("__vdso_gettimeofday")));
+104
arch/sparc/vdso/vdso-layout.lds.S
··· 1 + /* 2 + * Linker script for vDSO. This is an ELF shared object prelinked to 3 + * its virtual address, and with only one read-only segment. 4 + * This script controls its layout. 5 + */ 6 + 7 + #if defined(BUILD_VDSO64) 8 + # define SHDR_SIZE 64 9 + #elif defined(BUILD_VDSO32) 10 + # define SHDR_SIZE 40 11 + #else 12 + # error unknown VDSO target 13 + #endif 14 + 15 + #define NUM_FAKE_SHDRS 7 16 + 17 + SECTIONS 18 + { 19 + /* 20 + * User/kernel shared data is before the vDSO. This may be a little 21 + * uglier than putting it after the vDSO, but it avoids issues with 22 + * non-allocatable things that dangle past the end of the PT_LOAD 23 + * segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries 24 + */ 25 + 26 + vvar_start = . -8192; 27 + vvar_data = vvar_start; 28 + 29 + . = SIZEOF_HEADERS; 30 + 31 + .hash : { *(.hash) } :text 32 + .gnu.hash : { *(.gnu.hash) } 33 + .dynsym : { *(.dynsym) } 34 + .dynstr : { *(.dynstr) } 35 + .gnu.version : { *(.gnu.version) } 36 + .gnu.version_d : { *(.gnu.version_d) } 37 + .gnu.version_r : { *(.gnu.version_r) } 38 + 39 + .dynamic : { *(.dynamic) } :text :dynamic 40 + 41 + .rodata : { 42 + *(.rodata*) 43 + *(.data*) 44 + *(.sdata*) 45 + *(.got.plt) *(.got) 46 + *(.gnu.linkonce.d.*) 47 + *(.bss*) 48 + *(.dynbss*) 49 + *(.gnu.linkonce.b.*) 50 + 51 + /* 52 + * Ideally this would live in a C file: kept in here for 53 + * compatibility with x86-64. 54 + */ 55 + VDSO_FAKE_SECTION_TABLE_START = .; 56 + . = . + NUM_FAKE_SHDRS * SHDR_SIZE; 57 + VDSO_FAKE_SECTION_TABLE_END = .; 58 + } :text 59 + 60 + .fake_shstrtab : { *(.fake_shstrtab) } :text 61 + 62 + 63 + .note : { *(.note.*) } :text :note 64 + 65 + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 66 + .eh_frame : { KEEP (*(.eh_frame)) } :text 67 + 68 + 69 + /* 70 + * Text is well-separated from actual data: there's plenty of 71 + * stuff that isn't used at runtime in between. 72 + */ 73 + 74 + .text : { *(.text*) } :text =0x90909090, 75 + 76 + .vread_tick_patch : { 77 + vread_tick_patch_start = .; 78 + *(.vread_tick_patch) 79 + vread_tick_patch_end = .; 80 + } 81 + 82 + /DISCARD/ : { 83 + *(.discard) 84 + *(.discard.*) 85 + *(__bug_table) 86 + } 87 + } 88 + 89 + /* 90 + * Very old versions of ld do not recognize this name token; use the constant. 91 + */ 92 + #define PT_GNU_EH_FRAME 0x6474e550 93 + 94 + /* 95 + * We must supply the ELF program headers explicitly to get just one 96 + * PT_LOAD segment, and set the flags explicitly to make segments read-only. 97 + */ 98 + PHDRS 99 + { 100 + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ 101 + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ 102 + note PT_NOTE FLAGS(4); /* PF_R */ 103 + eh_frame_hdr PT_GNU_EH_FRAME; 104 + }
+12
arch/sparc/vdso/vdso-note.S
··· 1 + /* 2 + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 3 + * Here we can supply some information useful to userland. 4 + */ 5 + 6 + #include <linux/uts.h> 7 + #include <linux/version.h> 8 + #include <linux/elfnote.h> 9 + 10 + ELFNOTE_START(Linux, 0, "a") 11 + .long LINUX_VERSION_CODE 12 + ELFNOTE_END
+25
arch/sparc/vdso/vdso.lds.S
··· 1 + /* 2 + * Linker script for 64-bit vDSO. 3 + * We #include the file to define the layout details. 4 + * 5 + * This file defines the version script giving the user-exported symbols in 6 + * the DSO. 7 + */ 8 + 9 + #define BUILD_VDSO64 10 + 11 + #include "vdso-layout.lds.S" 12 + 13 + /* 14 + * This controls what userland symbols we export from the vDSO. 15 + */ 16 + VERSION { 17 + LINUX_2.6 { 18 + global: 19 + clock_gettime; 20 + __vdso_clock_gettime; 21 + gettimeofday; 22 + __vdso_gettimeofday; 23 + local: *; 24 + }; 25 + }
+234
arch/sparc/vdso/vdso2c.c
··· 1 + /* 2 + * vdso2c - A vdso image preparation tool 3 + * Copyright (c) 2014 Andy Lutomirski and others 4 + * Licensed under the GPL v2 5 + * 6 + * vdso2c requires stripped and unstripped input. It would be trivial 7 + * to fully strip the input in here, but, for reasons described below, 8 + * we need to write a section table. Doing this is more or less 9 + * equivalent to dropping all non-allocatable sections, but it's 10 + * easier to let objcopy handle that instead of doing it ourselves. 11 + * If we ever need to do something fancier than what objcopy provides, 12 + * it would be straightforward to add here. 13 + * 14 + * We keep a section table for a few reasons: 15 + * 16 + * Binutils has issues debugging the vDSO: it reads the section table to 17 + * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which 18 + * would break build-id if we removed the section table. Binutils 19 + * also requires that shstrndx != 0. See: 20 + * https://sourceware.org/bugzilla/show_bug.cgi?id=17064 21 + * 22 + * elfutils might not look for PT_NOTE if there is a section table at 23 + * all. I don't know whether this matters for any practical purpose. 24 + * 25 + * For simplicity, rather than hacking up a partial section table, we 26 + * just write a mostly complete one. We omit non-dynamic symbols, 27 + * though, since they're rather large. 28 + * 29 + * Once binutils gets fixed, we might be able to drop this for all but 30 + * the 64-bit vdso, since build-id only works in kernel RPMs, and 31 + * systems that update to new enough kernel RPMs will likely update 32 + * binutils in sync. build-id has never worked for home-built kernel 33 + * RPMs without manual symlinking, and I suspect that no one ever does 34 + * that. 35 + */ 36 + 37 + /* 38 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 39 + */ 40 + 41 + #include <inttypes.h> 42 + #include <stdint.h> 43 + #include <unistd.h> 44 + #include <stdarg.h> 45 + #include <stdlib.h> 46 + #include <stdio.h> 47 + #include <string.h> 48 + #include <fcntl.h> 49 + #include <err.h> 50 + 51 + #include <sys/mman.h> 52 + #include <sys/types.h> 53 + #include <tools/be_byteshift.h> 54 + 55 + #include <linux/elf.h> 56 + #include <linux/types.h> 57 + #include <linux/kernel.h> 58 + 59 + const char *outfilename; 60 + 61 + /* Symbols that we need in vdso2c. */ 62 + enum { 63 + sym_vvar_start, 64 + sym_VDSO_FAKE_SECTION_TABLE_START, 65 + sym_VDSO_FAKE_SECTION_TABLE_END, 66 + sym_vread_tick, 67 + sym_vread_tick_patch_start, 68 + sym_vread_tick_patch_end 69 + }; 70 + 71 + struct vdso_sym { 72 + const char *name; 73 + int export; 74 + }; 75 + 76 + struct vdso_sym required_syms[] = { 77 + [sym_vvar_start] = {"vvar_start", 1}, 78 + [sym_VDSO_FAKE_SECTION_TABLE_START] = { 79 + "VDSO_FAKE_SECTION_TABLE_START", 0 80 + }, 81 + [sym_VDSO_FAKE_SECTION_TABLE_END] = { 82 + "VDSO_FAKE_SECTION_TABLE_END", 0 83 + }, 84 + [sym_vread_tick] = {"vread_tick", 1}, 85 + [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1}, 86 + [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1} 87 + }; 88 + 89 + __attribute__((format(printf, 1, 2))) __attribute__((noreturn)) 90 + static void fail(const char *format, ...) 91 + { 92 + va_list ap; 93 + 94 + va_start(ap, format); 95 + fprintf(stderr, "Error: "); 96 + vfprintf(stderr, format, ap); 97 + if (outfilename) 98 + unlink(outfilename); 99 + exit(1); 100 + va_end(ap); 101 + } 102 + 103 + /* 104 + * Evil macros for big-endian reads and writes 105 + */ 106 + #define GBE(x, bits, ifnot) \ 107 + __builtin_choose_expr( \ 108 + (sizeof(*(x)) == bits/8), \ 109 + (__typeof__(*(x)))get_unaligned_be##bits(x), ifnot) 110 + 111 + #define LAST_GBE(x) \ 112 + __builtin_choose_expr(sizeof(*(x)) == 1, *(x), (void)(0)) 113 + 114 + #define GET_BE(x) \ 115 + GBE(x, 64, GBE(x, 32, GBE(x, 16, LAST_GBE(x)))) 116 + 117 + #define PBE(x, val, bits, ifnot) \ 118 + __builtin_choose_expr( \ 119 + (sizeof(*(x)) == bits/8), \ 120 + put_unaligned_be##bits((val), (x)), ifnot) 121 + 122 + #define LAST_PBE(x, val) \ 123 + __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), (void)(0)) 124 + 125 + #define PUT_BE(x, val) \ 126 + PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val)))) 127 + 128 + #define NSYMS ARRAY_SIZE(required_syms) 129 + 130 + #define BITSFUNC3(name, bits, suffix) name##bits##suffix 131 + #define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix) 132 + #define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, ) 133 + 134 + #define INT_BITS BITSFUNC2(int, ELF_BITS, _t) 135 + 136 + #define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x 137 + #define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x) 138 + #define ELF(x) ELF_BITS_XFORM(ELF_BITS, x) 139 + 140 + #define ELF_BITS 64 141 + #include "vdso2c.h" 142 + #undef ELF_BITS 143 + 144 + #define ELF_BITS 32 145 + #include "vdso2c.h" 146 + #undef ELF_BITS 147 + 148 + static void go(void *raw_addr, size_t raw_len, 149 + void *stripped_addr, size_t stripped_len, 150 + FILE *outfile, const char *name) 151 + { 152 + Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr; 153 + 154 + if (hdr->e_ident[EI_CLASS] == ELFCLASS64) { 155 + go64(raw_addr, raw_len, stripped_addr, stripped_len, 156 + outfile, name); 157 + } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) { 158 + go32(raw_addr, raw_len, stripped_addr, stripped_len, 159 + outfile, name); 160 + } else { 161 + fail("unknown ELF class\n"); 162 + } 163 + } 164 + 165 + static void map_input(const char *name, void **addr, size_t *len, int prot) 166 + { 167 + off_t tmp_len; 168 + 169 + int fd = open(name, O_RDONLY); 170 + 171 + if (fd == -1) 172 + err(1, "%s", name); 173 + 174 + tmp_len = lseek(fd, 0, SEEK_END); 175 + if (tmp_len == (off_t)-1) 176 + err(1, "lseek"); 177 + *len = (size_t)tmp_len; 178 + 179 + *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0); 180 + if (*addr == MAP_FAILED) 181 + err(1, "mmap"); 182 + 183 + close(fd); 184 + } 185 + 186 + int main(int argc, char **argv) 187 + { 188 + size_t raw_len, stripped_len; 189 + void *raw_addr, *stripped_addr; 190 + FILE *outfile; 191 + char *name, *tmp; 192 + int namelen; 193 + 194 + if (argc != 4) { 195 + printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n"); 196 + return 1; 197 + } 198 + 199 + /* 200 + * Figure out the struct name. If we're writing to a .so file, 201 + * generate raw output insted. 202 + */ 203 + name = strdup(argv[3]); 204 + namelen = strlen(name); 205 + if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) { 206 + name = NULL; 207 + } else { 208 + tmp = strrchr(name, '/'); 209 + if (tmp) 210 + name = tmp + 1; 211 + tmp = strchr(name, '.'); 212 + if (tmp) 213 + *tmp = '\0'; 214 + for (tmp = name; *tmp; tmp++) 215 + if (*tmp == '-') 216 + *tmp = '_'; 217 + } 218 + 219 + map_input(argv[1], &raw_addr, &raw_len, PROT_READ); 220 + map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ); 221 + 222 + outfilename = argv[3]; 223 + outfile = fopen(outfilename, "w"); 224 + if (!outfile) 225 + err(1, "%s", argv[2]); 226 + 227 + go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name); 228 + 229 + munmap(raw_addr, raw_len); 230 + munmap(stripped_addr, stripped_len); 231 + fclose(outfile); 232 + 233 + return 0; 234 + }
+143
arch/sparc/vdso/vdso2c.h
··· 1 + /* 2 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 3 + */ 4 + 5 + /* 6 + * This file is included up to twice from vdso2c.c. It generates code for 7 + * 32-bit and 64-bit vDSOs. We will eventually need both for 64-bit builds, 8 + * since 32-bit vDSOs will then be built for 32-bit userspace. 9 + */ 10 + 11 + static void BITSFUNC(go)(void *raw_addr, size_t raw_len, 12 + void *stripped_addr, size_t stripped_len, 13 + FILE *outfile, const char *name) 14 + { 15 + int found_load = 0; 16 + unsigned long load_size = -1; /* Work around bogus warning */ 17 + unsigned long mapping_size; 18 + int i; 19 + unsigned long j; 20 + 21 + ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr; 22 + ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; 23 + ELF(Dyn) *dyn = 0, *dyn_end = 0; 24 + INT_BITS syms[NSYMS] = {}; 25 + 26 + ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff)); 27 + 28 + /* Walk the segment table. */ 29 + for (i = 0; i < GET_BE(&hdr->e_phnum); i++) { 30 + if (GET_BE(&pt[i].p_type) == PT_LOAD) { 31 + if (found_load) 32 + fail("multiple PT_LOAD segs\n"); 33 + 34 + if (GET_BE(&pt[i].p_offset) != 0 || 35 + GET_BE(&pt[i].p_vaddr) != 0) 36 + fail("PT_LOAD in wrong place\n"); 37 + 38 + if (GET_BE(&pt[i].p_memsz) != GET_BE(&pt[i].p_filesz)) 39 + fail("cannot handle memsz != filesz\n"); 40 + 41 + load_size = GET_BE(&pt[i].p_memsz); 42 + found_load = 1; 43 + } else if (GET_BE(&pt[i].p_type) == PT_DYNAMIC) { 44 + dyn = raw_addr + GET_BE(&pt[i].p_offset); 45 + dyn_end = raw_addr + GET_BE(&pt[i].p_offset) + 46 + GET_BE(&pt[i].p_memsz); 47 + } 48 + } 49 + if (!found_load) 50 + fail("no PT_LOAD seg\n"); 51 + 52 + if (stripped_len < load_size) 53 + fail("stripped input is too short\n"); 54 + 55 + /* Walk the dynamic table */ 56 + for (i = 0; dyn + i < dyn_end && 57 + GET_BE(&dyn[i].d_tag) != DT_NULL; i++) { 58 + typeof(dyn[i].d_tag) tag = GET_BE(&dyn[i].d_tag); 59 + typeof(dyn[i].d_un.d_val) val = GET_BE(&dyn[i].d_un.d_val); 60 + 61 + if ((tag == DT_RELSZ || tag == DT_RELASZ) && (val != 0)) 62 + fail("vdso image contains dynamic relocations\n"); 63 + } 64 + 65 + /* Walk the section table */ 66 + for (i = 0; i < GET_BE(&hdr->e_shnum); i++) { 67 + ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) + 68 + GET_BE(&hdr->e_shentsize) * i; 69 + if (GET_BE(&sh->sh_type) == SHT_SYMTAB) 70 + symtab_hdr = sh; 71 + } 72 + 73 + if (!symtab_hdr) 74 + fail("no symbol table\n"); 75 + 76 + strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) + 77 + GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link); 78 + 79 + /* Walk the symbol table */ 80 + for (i = 0; 81 + i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize); 82 + i++) { 83 + int k; 84 + 85 + ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) + 86 + GET_BE(&symtab_hdr->sh_entsize) * i; 87 + const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) + 88 + GET_BE(&sym->st_name); 89 + 90 + for (k = 0; k < NSYMS; k++) { 91 + if (!strcmp(name, required_syms[k].name)) { 92 + if (syms[k]) { 93 + fail("duplicate symbol %s\n", 94 + required_syms[k].name); 95 + } 96 + 97 + /* 98 + * Careful: we use negative addresses, but 99 + * st_value is unsigned, so we rely 100 + * on syms[k] being a signed type of the 101 + * correct width. 102 + */ 103 + syms[k] = GET_BE(&sym->st_value); 104 + } 105 + } 106 + } 107 + 108 + /* Validate mapping addresses. */ 109 + if (syms[sym_vvar_start] % 8192) 110 + fail("vvar_begin must be a multiple of 8192\n"); 111 + 112 + if (!name) { 113 + fwrite(stripped_addr, stripped_len, 1, outfile); 114 + return; 115 + } 116 + 117 + mapping_size = (stripped_len + 8191) / 8192 * 8192; 118 + 119 + fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n"); 120 + fprintf(outfile, "#include <linux/cache.h>\n"); 121 + fprintf(outfile, "#include <asm/vdso.h>\n"); 122 + fprintf(outfile, "\n"); 123 + fprintf(outfile, 124 + "static unsigned char raw_data[%lu] __ro_after_init __aligned(8192)= {", 125 + mapping_size); 126 + for (j = 0; j < stripped_len; j++) { 127 + if (j % 10 == 0) 128 + fprintf(outfile, "\n\t"); 129 + fprintf(outfile, "0x%02X, ", 130 + (int)((unsigned char *)stripped_addr)[j]); 131 + } 132 + fprintf(outfile, "\n};\n\n"); 133 + 134 + fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name); 135 + fprintf(outfile, "\t.data = raw_data,\n"); 136 + fprintf(outfile, "\t.size = %lu,\n", mapping_size); 137 + for (i = 0; i < NSYMS; i++) { 138 + if (required_syms[i].export && syms[i]) 139 + fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", 140 + required_syms[i].name, (int64_t)syms[i]); 141 + } 142 + fprintf(outfile, "};\n"); 143 + }
+1
arch/sparc/vdso/vdso32/.gitignore
··· 1 + vdso32.lds
+26
arch/sparc/vdso/vdso32/vclock_gettime.c
··· 1 + /* 2 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 3 + */ 4 + 5 + #define BUILD_VDSO32 6 + 7 + #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE 8 + #undef CONFIG_OPTIMIZE_INLINING 9 + #endif 10 + 11 + #ifdef CONFIG_SPARC64 12 + 13 + /* 14 + * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel 15 + * configuration 16 + */ 17 + #undef CONFIG_64BIT 18 + #undef CONFIG_SPARC64 19 + #define BUILD_VDSO32_64 20 + #define CONFIG_32BIT 21 + #undef CONFIG_QUEUED_RWLOCKS 22 + #undef CONFIG_QUEUED_SPINLOCKS 23 + 24 + #endif 25 + 26 + #include "../vclock_gettime.c"
+12
arch/sparc/vdso/vdso32/vdso-note.S
··· 1 + /* 2 + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO 3 + * text. Here we can supply some information useful to userland. 4 + */ 5 + 6 + #include <linux/uts.h> 7 + #include <linux/version.h> 8 + #include <linux/elfnote.h> 9 + 10 + ELFNOTE_START(Linux, 0, "a") 11 + .long LINUX_VERSION_CODE 12 + ELFNOTE_END
+24
arch/sparc/vdso/vdso32/vdso32.lds.S
··· 1 + /* 2 + * Linker script for sparc32 vDSO 3 + * We #include the file to define the layout details. 4 + * 5 + * This file defines the version script giving the user-exported symbols in 6 + * the DSO. 7 + */ 8 + 9 + #define BUILD_VDSO32 10 + #include "../vdso-layout.lds.S" 11 + 12 + /* 13 + * This controls what userland symbols we export from the vDSO. 14 + */ 15 + VERSION { 16 + LINUX_2.6 { 17 + global: 18 + clock_gettime; 19 + __vdso_clock_gettime; 20 + gettimeofday; 21 + __vdso_gettimeofday; 22 + local: *; 23 + }; 24 + }
+268
arch/sparc/vdso/vma.c
··· 1 + /* 2 + * Set up the VMAs to tell the VM about the vDSO. 3 + * Copyright 2007 Andi Kleen, SUSE Labs. 4 + * Subject to the GPL, v.2 5 + */ 6 + 7 + /* 8 + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 9 + */ 10 + 11 + #include <linux/mm.h> 12 + #include <linux/err.h> 13 + #include <linux/sched.h> 14 + #include <linux/slab.h> 15 + #include <linux/init.h> 16 + #include <linux/linkage.h> 17 + #include <linux/random.h> 18 + #include <linux/elf.h> 19 + #include <asm/vdso.h> 20 + #include <asm/vvar.h> 21 + #include <asm/page.h> 22 + 23 + unsigned int __read_mostly vdso_enabled = 1; 24 + 25 + static struct vm_special_mapping vvar_mapping = { 26 + .name = "[vvar]" 27 + }; 28 + 29 + #ifdef CONFIG_SPARC64 30 + static struct vm_special_mapping vdso_mapping64 = { 31 + .name = "[vdso]" 32 + }; 33 + #endif 34 + 35 + #ifdef CONFIG_COMPAT 36 + static struct vm_special_mapping vdso_mapping32 = { 37 + .name = "[vdso]" 38 + }; 39 + #endif 40 + 41 + struct vvar_data *vvar_data; 42 + 43 + #define SAVE_INSTR_SIZE 4 44 + 45 + /* 46 + * Allocate pages for the vdso and vvar, and copy in the vdso text from the 47 + * kernel image. 48 + */ 49 + int __init init_vdso_image(const struct vdso_image *image, 50 + struct vm_special_mapping *vdso_mapping) 51 + { 52 + int i; 53 + struct page *dp, **dpp = NULL; 54 + int dnpages = 0; 55 + struct page *cp, **cpp = NULL; 56 + int cnpages = (image->size) / PAGE_SIZE; 57 + 58 + /* 59 + * First, the vdso text. This is initialied data, an integral number of 60 + * pages long. 61 + */ 62 + if (WARN_ON(image->size % PAGE_SIZE != 0)) 63 + goto oom; 64 + 65 + cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL); 66 + vdso_mapping->pages = cpp; 67 + 68 + if (!cpp) 69 + goto oom; 70 + 71 + if (vdso_fix_stick) { 72 + /* 73 + * If the system uses %tick instead of %stick, patch the VDSO 74 + * with instruction reading %tick instead of %stick. 75 + */ 76 + unsigned int j, k = SAVE_INSTR_SIZE; 77 + unsigned char *data = image->data; 78 + 79 + for (j = image->sym_vread_tick_patch_start; 80 + j < image->sym_vread_tick_patch_end; j++) { 81 + 82 + data[image->sym_vread_tick + k] = data[j]; 83 + k++; 84 + } 85 + } 86 + 87 + for (i = 0; i < cnpages; i++) { 88 + cp = alloc_page(GFP_KERNEL); 89 + if (!cp) 90 + goto oom; 91 + cpp[i] = cp; 92 + copy_page(page_address(cp), image->data + i * PAGE_SIZE); 93 + } 94 + 95 + /* 96 + * Now the vvar page. This is uninitialized data. 97 + */ 98 + 99 + if (vvar_data == NULL) { 100 + dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1; 101 + if (WARN_ON(dnpages != 1)) 102 + goto oom; 103 + dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL); 104 + vvar_mapping.pages = dpp; 105 + 106 + if (!dpp) 107 + goto oom; 108 + 109 + dp = alloc_page(GFP_KERNEL); 110 + if (!dp) 111 + goto oom; 112 + 113 + dpp[0] = dp; 114 + vvar_data = page_address(dp); 115 + memset(vvar_data, 0, PAGE_SIZE); 116 + 117 + vvar_data->seq = 0; 118 + } 119 + 120 + return 0; 121 + oom: 122 + if (cpp != NULL) { 123 + for (i = 0; i < cnpages; i++) { 124 + if (cpp[i] != NULL) 125 + __free_page(cpp[i]); 126 + } 127 + kfree(cpp); 128 + vdso_mapping->pages = NULL; 129 + } 130 + 131 + if (dpp != NULL) { 132 + for (i = 0; i < dnpages; i++) { 133 + if (dpp[i] != NULL) 134 + __free_page(dpp[i]); 135 + } 136 + kfree(dpp); 137 + vvar_mapping.pages = NULL; 138 + } 139 + 140 + pr_warn("Cannot allocate vdso\n"); 141 + vdso_enabled = 0; 142 + return -ENOMEM; 143 + } 144 + 145 + static int __init init_vdso(void) 146 + { 147 + int err = 0; 148 + #ifdef CONFIG_SPARC64 149 + err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64); 150 + if (err) 151 + return err; 152 + #endif 153 + 154 + #ifdef CONFIG_COMPAT 155 + err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32); 156 + #endif 157 + return err; 158 + 159 + } 160 + subsys_initcall(init_vdso); 161 + 162 + struct linux_binprm; 163 + 164 + /* Shuffle the vdso up a bit, randomly. */ 165 + static unsigned long vdso_addr(unsigned long start, unsigned int len) 166 + { 167 + unsigned int offset; 168 + 169 + /* This loses some more bits than a modulo, but is cheaper */ 170 + offset = get_random_int() & (PTRS_PER_PTE - 1); 171 + return start + (offset << PAGE_SHIFT); 172 + } 173 + 174 + static int map_vdso(const struct vdso_image *image, 175 + struct vm_special_mapping *vdso_mapping) 176 + { 177 + struct mm_struct *mm = current->mm; 178 + struct vm_area_struct *vma; 179 + unsigned long text_start, addr = 0; 180 + int ret = 0; 181 + 182 + down_write(&mm->mmap_sem); 183 + 184 + /* 185 + * First, get an unmapped region: then randomize it, and make sure that 186 + * region is free. 187 + */ 188 + if (current->flags & PF_RANDOMIZE) { 189 + addr = get_unmapped_area(NULL, 0, 190 + image->size - image->sym_vvar_start, 191 + 0, 0); 192 + if (IS_ERR_VALUE(addr)) { 193 + ret = addr; 194 + goto up_fail; 195 + } 196 + addr = vdso_addr(addr, image->size - image->sym_vvar_start); 197 + } 198 + addr = get_unmapped_area(NULL, addr, 199 + image->size - image->sym_vvar_start, 0, 0); 200 + if (IS_ERR_VALUE(addr)) { 201 + ret = addr; 202 + goto up_fail; 203 + } 204 + 205 + text_start = addr - image->sym_vvar_start; 206 + current->mm->context.vdso = (void __user *)text_start; 207 + 208 + /* 209 + * MAYWRITE to allow gdb to COW and set breakpoints 210 + */ 211 + vma = _install_special_mapping(mm, 212 + text_start, 213 + image->size, 214 + VM_READ|VM_EXEC| 215 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 216 + vdso_mapping); 217 + 218 + if (IS_ERR(vma)) { 219 + ret = PTR_ERR(vma); 220 + goto up_fail; 221 + } 222 + 223 + vma = _install_special_mapping(mm, 224 + addr, 225 + -image->sym_vvar_start, 226 + VM_READ|VM_MAYREAD, 227 + &vvar_mapping); 228 + 229 + if (IS_ERR(vma)) { 230 + ret = PTR_ERR(vma); 231 + do_munmap(mm, text_start, image->size, NULL); 232 + } 233 + 234 + up_fail: 235 + if (ret) 236 + current->mm->context.vdso = NULL; 237 + 238 + up_write(&mm->mmap_sem); 239 + return ret; 240 + } 241 + 242 + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 243 + { 244 + 245 + if (!vdso_enabled) 246 + return 0; 247 + 248 + #if defined CONFIG_COMPAT 249 + if (!(is_32bit_task())) 250 + return map_vdso(&vdso_image_64_builtin, &vdso_mapping64); 251 + else 252 + return map_vdso(&vdso_image_32_builtin, &vdso_mapping32); 253 + #else 254 + return map_vdso(&vdso_image_64_builtin, &vdso_mapping64); 255 + #endif 256 + 257 + } 258 + 259 + static __init int vdso_setup(char *s) 260 + { 261 + int err; 262 + unsigned long val; 263 + 264 + err = kstrtoul(s, 10, &val); 265 + vdso_enabled = val; 266 + return err; 267 + } 268 + __setup("vdso=", vdso_setup);