Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'timers-vdso-2026-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull vdso updates from Thomas Gleixner:

- Make the handling of compat functions consistent and more robust

- Rework the underlying data store so that it is dynamically allocated,
which allows the conversion of the last holdout SPARC64 to the
generic VDSO implementation

- Rework the SPARC64 VDSO to utilize the generic implementation

- Mop up the left overs of the non-generic VDSO support in the core
code

- Expand the VDSO selftest and make them more robust

- Allow time namespaces to be enabled independently of the generic VDSO
support, which was not possible before due to SPARC64 not using it

- Various cleanups and improvements in the related code

* tag 'timers-vdso-2026-04-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits)
timens: Use task_lock guard in timens_get*()
timens: Use mutex guard in proc_timens_set_offset()
timens: Simplify some calls to put_time_ns()
timens: Add a __free() wrapper for put_time_ns()
timens: Remove dependency on the vDSO
vdso/timens: Move functions to new file
selftests: vDSO: vdso_test_correctness: Add a test for time()
selftests: vDSO: vdso_test_correctness: Use facilities from parse_vdso.c
selftests: vDSO: vdso_test_correctness: Handle different tv_usec types
selftests: vDSO: vdso_test_correctness: Drop SYS_getcpu fallbacks
selftests: vDSO: vdso_test_gettimeofday: Remove nolibc checks
Revert "selftests: vDSO: parse_vdso: Use UAPI headers instead of libc headers"
random: vDSO: Remove ifdeffery
random: vDSO: Trim vDSO includes
vdso/datapage: Trim down unnecessary includes
vdso/datapage: Remove inclusion of gettimeofday.h
vdso/helpers: Explicitly include vdso/processor.h
vdso/gettimeofday: Add explicit includes
random: vDSO: Add explicit includes
MIPS: vdso: Explicitly include asm/vdso/vdso.h
...

+848 -1310
+2
MAINTAINERS
··· 10796 10796 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/vdso 10797 10797 F: include/asm-generic/vdso/vsyscall.h 10798 10798 F: include/vdso/ 10799 + F: kernel/time/namespace_vdso.c 10799 10800 F: kernel/time/vsyscall.c 10800 10801 F: lib/vdso/ 10801 10802 F: tools/testing/selftests/vDSO/ ··· 21043 21042 F: kernel/time/itimer.c 21044 21043 F: kernel/time/posix-* 21045 21044 F: kernel/time/namespace.c 21045 + F: kernel/time/namespace_vdso.c 21046 21046 21047 21047 POWER MANAGEMENT CORE 21048 21048 M: "Rafael J. Wysocki" <rafael@kernel.org>
+2
arch/arm/include/asm/vdso/gettimeofday.h
··· 11 11 #include <asm/errno.h> 12 12 #include <asm/unistd.h> 13 13 #include <asm/vdso/cp15.h> 14 + #include <vdso/clocksource.h> 15 + #include <vdso/time32.h> 14 16 #include <uapi/linux/time.h> 15 17 16 18 #define VDSO_HAS_CLOCK_GETRES 1
+3
arch/arm64/include/asm/vdso/compat_gettimeofday.h
··· 7 7 8 8 #ifndef __ASSEMBLER__ 9 9 10 + #include <vdso/clocksource.h> 11 + #include <vdso/time32.h> 12 + 10 13 #include <asm/barrier.h> 11 14 #include <asm/unistd_compat_32.h> 12 15 #include <asm/errno.h>
+2
arch/arm64/include/asm/vdso/gettimeofday.h
··· 9 9 10 10 #ifndef __ASSEMBLER__ 11 11 12 + #include <vdso/clocksource.h> 13 + 12 14 #include <asm/alternative.h> 13 15 #include <asm/arch_timer.h> 14 16 #include <asm/barrier.h>
+1
arch/loongarch/kernel/process.c
··· 52 52 #include <asm/switch_to.h> 53 53 #include <asm/unwind.h> 54 54 #include <asm/vdso.h> 55 + #include <asm/vdso/vdso.h> 55 56 56 57 #ifdef CONFIG_STACKPROTECTOR 57 58 #include <linux/stackprotector.h>
+1
arch/loongarch/kernel/vdso.c
··· 18 18 19 19 #include <asm/page.h> 20 20 #include <asm/vdso.h> 21 + #include <asm/vdso/vdso.h> 21 22 #include <vdso/helpers.h> 22 23 #include <vdso/vsyscall.h> 23 24 #include <vdso/datapage.h>
+5
arch/mips/include/asm/vdso/vdso.h
··· 4 4 * Author: Alex Smith <alex.smith@imgtec.com> 5 5 */ 6 6 7 + #ifndef __ASM_VDSO_VDSO_H 8 + #define __ASM_VDSO_VDSO_H 9 + 7 10 #include <asm/sgidefs.h> 8 11 #include <vdso/page.h> 9 12 ··· 73 70 #endif /* CONFIG_CLKSRC_MIPS_GIC */ 74 71 75 72 #endif /* __ASSEMBLER__ */ 73 + 74 + #endif /* __ASM_VDSO_VDSO_H */
+1
arch/mips/kernel/vdso.c
··· 21 21 #include <asm/mips-cps.h> 22 22 #include <asm/page.h> 23 23 #include <asm/vdso.h> 24 + #include <asm/vdso/vdso.h> 24 25 #include <vdso/helpers.h> 25 26 #include <vdso/vsyscall.h> 26 27
+1
arch/powerpc/include/asm/vdso/gettimeofday.h
··· 8 8 #include <asm/barrier.h> 9 9 #include <asm/unistd.h> 10 10 #include <uapi/linux/time.h> 11 + #include <vdso/time32.h> 11 12 12 13 #define VDSO_HAS_CLOCK_GETRES 1 13 14
+3
arch/powerpc/include/asm/vdso/processor.h
··· 4 4 5 5 #ifndef __ASSEMBLER__ 6 6 7 + #include <asm/cputable.h> 8 + #include <asm/feature-fixups.h> 9 + 7 10 /* Macros for adjusting thread priority (hardware multi-threading) */ 8 11 #ifdef CONFIG_PPC64 9 12 #define HMT_very_low() asm volatile("or 31, 31, 31 # very low priority")
+1 -2
arch/powerpc/kernel/compat_audit.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #undef __powerpc64__ 3 2 #include <linux/audit_arch.h> 4 - #include <asm/unistd.h> 3 + #include <asm/unistd_32.h> 5 4 6 5 #include "audit_32.h" 7 6
+1 -2
arch/s390/Makefile
··· 12 12 KBUILD_LDFLAGS := -m elf64_s390 13 13 KBUILD_AFLAGS_MODULE += -fPIC 14 14 KBUILD_CFLAGS_MODULE += -fPIC 15 - KBUILD_AFLAGS += -m64 16 - KBUILD_CFLAGS += -m64 15 + KBUILD_CPPFLAGS += -m64 17 16 KBUILD_CFLAGS += -fPIC 18 17 LDFLAGS_vmlinux := $(call ld-option,-no-pie) 19 18 extra_tools := relocs
+2 -1
arch/sparc/Kconfig
··· 104 104 select ARCH_USE_QUEUED_RWLOCKS 105 105 select ARCH_USE_QUEUED_SPINLOCKS 106 106 select GENERIC_TIME_VSYSCALL 107 - select ARCH_CLOCKSOURCE_DATA 108 107 select ARCH_HAS_PTE_SPECIAL 109 108 select PCI_DOMAINS if PCI 110 109 select ARCH_HAS_GIGANTIC_PAGE ··· 114 115 select ARCH_SUPPORTS_SCHED_SMT if SMP 115 116 select ARCH_SUPPORTS_SCHED_MC if SMP 116 117 select ARCH_HAS_LAZY_MMU_MODE 118 + select HAVE_GENERIC_VDSO 119 + select GENERIC_GETTIMEOFDAY 117 120 118 121 config ARCH_PROC_KCORE_TEXT 119 122 def_bool y
-9
arch/sparc/include/asm/clocksource.h
··· 5 5 #ifndef _ASM_SPARC_CLOCKSOURCE_H 6 6 #define _ASM_SPARC_CLOCKSOURCE_H 7 7 8 - /* VDSO clocksources */ 9 - #define VCLOCK_NONE 0 /* Nothing userspace can do. */ 10 - #define VCLOCK_TICK 1 /* Use %tick. */ 11 - #define VCLOCK_STICK 2 /* Use %stick. */ 12 - 13 - struct arch_clocksource_data { 14 - int vclock_mode; 15 - }; 16 - 17 8 #endif /* _ASM_SPARC_CLOCKSOURCE_H */
+3
arch/sparc/include/asm/processor.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 #ifndef ___ASM_SPARC_PROCESSOR_H 3 3 #define ___ASM_SPARC_PROCESSOR_H 4 + 5 + #include <asm/vdso/processor.h> 6 + 4 7 #if defined(__sparc__) && defined(__arch64__) 5 8 #include <asm/processor_64.h> 6 9 #else
-2
arch/sparc/include/asm/processor_32.h
··· 91 91 extern struct task_struct *last_task_used_math; 92 92 int do_mathemu(struct pt_regs *regs, struct task_struct *fpt); 93 93 94 - #define cpu_relax() barrier() 95 - 96 94 extern void (*sparc_idle)(void); 97 95 98 96 #endif
-25
arch/sparc/include/asm/processor_64.h
··· 182 182 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) 183 183 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) 184 184 185 - /* Please see the commentary in asm/backoff.h for a description of 186 - * what these instructions are doing and how they have been chosen. 187 - * To make a long story short, we are trying to yield the current cpu 188 - * strand during busy loops. 189 - */ 190 - #ifdef BUILD_VDSO 191 - #define cpu_relax() asm volatile("\n99:\n\t" \ 192 - "rd %%ccr, %%g0\n\t" \ 193 - "rd %%ccr, %%g0\n\t" \ 194 - "rd %%ccr, %%g0\n\t" \ 195 - ::: "memory") 196 - #else /* ! BUILD_VDSO */ 197 - #define cpu_relax() asm volatile("\n99:\n\t" \ 198 - "rd %%ccr, %%g0\n\t" \ 199 - "rd %%ccr, %%g0\n\t" \ 200 - "rd %%ccr, %%g0\n\t" \ 201 - ".section .pause_3insn_patch,\"ax\"\n\t"\ 202 - ".word 99b\n\t" \ 203 - "wr %%g0, 128, %%asr27\n\t" \ 204 - "nop\n\t" \ 205 - "nop\n\t" \ 206 - ".previous" \ 207 - ::: "memory") 208 - #endif 209 - 210 185 /* Prefetch support. This is tuned for UltraSPARC-III and later. 211 186 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has 212 187 * a shallower prefetch queue than later chips.
-2
arch/sparc/include/asm/vdso.h
··· 8 8 struct vdso_image { 9 9 void *data; 10 10 unsigned long size; /* Always a multiple of PAGE_SIZE */ 11 - 12 - long sym_vvar_start; /* Negative offset to the vvar area */ 13 11 }; 14 12 15 13 #ifdef CONFIG_SPARC64
+10
arch/sparc/include/asm/vdso/clocksource.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __ASM_VDSO_CLOCKSOURCE_H 3 + #define __ASM_VDSO_CLOCKSOURCE_H 4 + 5 + /* VDSO clocksources */ 6 + #define VDSO_ARCH_CLOCKMODES \ 7 + VDSO_CLOCKMODE_TICK, \ 8 + VDSO_CLOCKMODE_STICK 9 + 10 + #endif /* __ASM_VDSO_CLOCKSOURCE_H */
+184
arch/sparc/include/asm/vdso/gettimeofday.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright 2006 Andi Kleen, SUSE Labs. 4 + */ 5 + 6 + #ifndef _ASM_SPARC_VDSO_GETTIMEOFDAY_H 7 + #define _ASM_SPARC_VDSO_GETTIMEOFDAY_H 8 + 9 + #include <uapi/linux/time.h> 10 + #include <uapi/linux/unistd.h> 11 + 12 + #include <vdso/align.h> 13 + #include <vdso/clocksource.h> 14 + #include <vdso/datapage.h> 15 + #include <vdso/page.h> 16 + 17 + #include <linux/types.h> 18 + 19 + #ifdef CONFIG_SPARC64 20 + static __always_inline u64 vread_tick(void) 21 + { 22 + u64 ret; 23 + 24 + __asm__ __volatile__("rd %%tick, %0" : "=r" (ret)); 25 + return ret; 26 + } 27 + 28 + static __always_inline u64 vread_tick_stick(void) 29 + { 30 + u64 ret; 31 + 32 + __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret)); 33 + return ret; 34 + } 35 + #else 36 + static __always_inline u64 vdso_shift_ns(u64 val, u32 amt) 37 + { 38 + u64 ret; 39 + 40 + __asm__ __volatile__("sllx %H1, 32, %%g1\n\t" 41 + "srl %L1, 0, %L1\n\t" 42 + "or %%g1, %L1, %%g1\n\t" 43 + "srlx %%g1, %2, %L0\n\t" 44 + "srlx %L0, 32, %H0" 45 + : "=r" (ret) 46 + : "r" (val), "r" (amt) 47 + : "g1"); 48 + return ret; 49 + } 50 + #define vdso_shift_ns vdso_shift_ns 51 + 52 + static __always_inline u64 vread_tick(void) 53 + { 54 + register unsigned long long ret asm("o4"); 55 + 56 + __asm__ __volatile__("rd %%tick, %L0\n\t" 57 + "srlx %L0, 32, %H0" 58 + : "=r" (ret)); 59 + return ret; 60 + } 61 + 62 + static __always_inline u64 vread_tick_stick(void) 63 + { 64 + register unsigned long long ret asm("o4"); 65 + 66 + __asm__ __volatile__("rd %%asr24, %L0\n\t" 67 + "srlx %L0, 32, %H0" 68 + : "=r" (ret)); 69 + return ret; 70 + } 71 + #endif 72 + 73 + static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_time_data *vd) 74 + { 75 + if (likely(clock_mode == VDSO_CLOCKMODE_STICK)) 76 + return vread_tick_stick(); 77 + else 78 + return vread_tick(); 79 + } 80 + 81 + #ifdef CONFIG_SPARC64 82 + #define SYSCALL_STRING \ 83 + "ta 0x6d;" \ 84 + "bcs,a 1f;" \ 85 + " sub %%g0, %%o0, %%o0;" \ 86 + "1:" 87 + #else 88 + #define SYSCALL_STRING \ 89 + "ta 0x10;" \ 90 + "bcs,a 1f;" \ 91 + " sub %%g0, %%o0, %%o0;" \ 92 + "1:" 93 + #endif 94 + 95 + #define SYSCALL_CLOBBERS \ 96 + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ 97 + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \ 98 + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \ 99 + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \ 100 + "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \ 101 + "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \ 102 + "cc", "memory" 103 + 104 + #ifdef CONFIG_SPARC64 105 + 106 + static __always_inline 107 + long clock_gettime_fallback(clockid_t clock, struct __kernel_timespec *ts) 108 + { 109 + register long num __asm__("g1") = __NR_clock_gettime; 110 + register long o0 __asm__("o0") = clock; 111 + register long o1 __asm__("o1") = (long) ts; 112 + 113 + __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 114 + "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 115 + return o0; 116 + } 117 + 118 + #else /* !CONFIG_SPARC64 */ 119 + 120 + static __always_inline 121 + long clock_gettime_fallback(clockid_t clock, struct __kernel_timespec *ts) 122 + { 123 + register long num __asm__("g1") = __NR_clock_gettime64; 124 + register long o0 __asm__("o0") = clock; 125 + register long o1 __asm__("o1") = (long) ts; 126 + 127 + __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 128 + "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 129 + return o0; 130 + } 131 + 132 + static __always_inline 133 + long clock_gettime32_fallback(clockid_t clock, struct old_timespec32 *ts) 134 + { 135 + register long num __asm__("g1") = __NR_clock_gettime; 136 + register long o0 __asm__("o0") = clock; 137 + register long o1 __asm__("o1") = (long) ts; 138 + 139 + __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 140 + "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 141 + return o0; 142 + } 143 + 144 + #endif /* CONFIG_SPARC64 */ 145 + 146 + static __always_inline 147 + long gettimeofday_fallback(struct __kernel_old_timeval *tv, struct timezone *tz) 148 + { 149 + register long num __asm__("g1") = __NR_gettimeofday; 150 + register long o0 __asm__("o0") = (long) tv; 151 + register long o1 __asm__("o1") = (long) tz; 152 + 153 + __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 154 + "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 155 + return o0; 156 + } 157 + 158 + static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void) 159 + { 160 + unsigned long ret; 161 + 162 + /* 163 + * SPARC does not support native PC-relative code relocations. 164 + * Calculate the address manually, works for 32 and 64 bit code. 165 + */ 166 + __asm__ __volatile__( 167 + "1:\n" 168 + "call 3f\n" // Jump over the embedded data and set up %o7 169 + "nop\n" // Delay slot 170 + "2:\n" 171 + ".word vdso_u_time_data - .\n" // Embedded offset to external symbol 172 + "3:\n" 173 + "add %%o7, 2b - 1b, %%o7\n" // Point %o7 to the embedded offset 174 + "ldsw [%%o7], %0\n" // Load the offset 175 + "add %0, %%o7, %0\n" // Calculate the absolute address 176 + : "=r" (ret) 177 + : 178 + : "o7"); 179 + 180 + return (const struct vdso_time_data *)ret; 181 + } 182 + #define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data 183 + 184 + #endif /* _ASM_SPARC_VDSO_GETTIMEOFDAY_H */
+41
arch/sparc/include/asm/vdso/processor.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_SPARC_VDSO_PROCESSOR_H 4 + #define _ASM_SPARC_VDSO_PROCESSOR_H 5 + 6 + #include <linux/compiler.h> 7 + 8 + #if defined(__arch64__) 9 + 10 + /* Please see the commentary in asm/backoff.h for a description of 11 + * what these instructions are doing and how they have been chosen. 12 + * To make a long story short, we are trying to yield the current cpu 13 + * strand during busy loops. 14 + */ 15 + #ifdef BUILD_VDSO 16 + #define cpu_relax() asm volatile("\n99:\n\t" \ 17 + "rd %%ccr, %%g0\n\t" \ 18 + "rd %%ccr, %%g0\n\t" \ 19 + "rd %%ccr, %%g0\n\t" \ 20 + ::: "memory") 21 + #else /* ! BUILD_VDSO */ 22 + #define cpu_relax() asm volatile("\n99:\n\t" \ 23 + "rd %%ccr, %%g0\n\t" \ 24 + "rd %%ccr, %%g0\n\t" \ 25 + "rd %%ccr, %%g0\n\t" \ 26 + ".section .pause_3insn_patch,\"ax\"\n\t"\ 27 + ".word 99b\n\t" \ 28 + "wr %%g0, 128, %%asr27\n\t" \ 29 + "nop\n\t" \ 30 + "nop\n\t" \ 31 + ".previous" \ 32 + ::: "memory") 33 + #endif /* BUILD_VDSO */ 34 + 35 + #else /* ! __arch64__ */ 36 + 37 + #define cpu_relax() barrier() 38 + 39 + #endif /* __arch64__ */ 40 + 41 + #endif /* _ASM_SPARC_VDSO_PROCESSOR_H */
+10
arch/sparc/include/asm/vdso/vsyscall.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_SPARC_VDSO_VSYSCALL_H 4 + #define _ASM_SPARC_VDSO_VSYSCALL_H 5 + 6 + #define __VDSO_PAGES 4 7 + 8 + #include <asm-generic/vdso/vsyscall.h> 9 + 10 + #endif /* _ASM_SPARC_VDSO_VSYSCALL_H */
-75
arch/sparc/include/asm/vvar.h
··· 1 - /* 2 - * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 3 - */ 4 - 5 - #ifndef _ASM_SPARC_VVAR_DATA_H 6 - #define _ASM_SPARC_VVAR_DATA_H 7 - 8 - #include <asm/clocksource.h> 9 - #include <asm/processor.h> 10 - #include <asm/barrier.h> 11 - #include <linux/time.h> 12 - #include <linux/types.h> 13 - 14 - struct vvar_data { 15 - unsigned int seq; 16 - 17 - int vclock_mode; 18 - struct { /* extract of a clocksource struct */ 19 - u64 cycle_last; 20 - u64 mask; 21 - int mult; 22 - int shift; 23 - } clock; 24 - /* open coded 'struct timespec' */ 25 - u64 wall_time_sec; 26 - u64 wall_time_snsec; 27 - u64 monotonic_time_snsec; 28 - u64 monotonic_time_sec; 29 - u64 monotonic_time_coarse_sec; 30 - u64 monotonic_time_coarse_nsec; 31 - u64 wall_time_coarse_sec; 32 - u64 wall_time_coarse_nsec; 33 - 34 - int tz_minuteswest; 35 - int tz_dsttime; 36 - }; 37 - 38 - extern struct vvar_data *vvar_data; 39 - extern int vdso_fix_stick; 40 - 41 - static inline unsigned int vvar_read_begin(const struct vvar_data *s) 42 - { 43 - unsigned int ret; 44 - 45 - repeat: 46 - ret = READ_ONCE(s->seq); 47 - if (unlikely(ret & 1)) { 48 - cpu_relax(); 49 - goto repeat; 50 - } 51 - smp_rmb(); /* Finish all reads before we return seq */ 52 - return ret; 53 - } 54 - 55 - static inline int vvar_read_retry(const struct vvar_data *s, 56 - unsigned int start) 57 - { 58 - smp_rmb(); /* Finish all reads before checking the value of seq */ 59 - return unlikely(s->seq != start); 60 - } 61 - 62 - static inline void vvar_write_begin(struct vvar_data *s) 63 - { 64 - ++s->seq; 65 - smp_wmb(); /* Makes sure that increment of seq is reflected */ 66 - } 67 - 68 - static inline void vvar_write_end(struct vvar_data *s) 69 - { 70 - smp_wmb(); /* Makes the value of seq current before we increment */ 71 - ++s->seq; 72 - } 73 - 74 - 75 - #endif /* _ASM_SPARC_VVAR_DATA_H */
-1
arch/sparc/kernel/Makefile
··· 41 41 obj-y += time_$(BITS).o 42 42 obj-$(CONFIG_SPARC32) += windows.o 43 43 obj-y += cpu.o 44 - obj-$(CONFIG_SPARC64) += vdso.o 45 44 obj-$(CONFIG_SPARC32) += devices.o 46 45 obj-y += ptrace_$(BITS).o 47 46 obj-y += unaligned_$(BITS).o
+3 -3
arch/sparc/kernel/time_64.c
··· 838 838 if (tlb_type == spitfire) { 839 839 if (is_hummingbird()) { 840 840 init_tick_ops(&hbtick_operations); 841 - clocksource_tick.archdata.vclock_mode = VCLOCK_NONE; 841 + clocksource_tick.vdso_clock_mode = VDSO_CLOCKMODE_NONE; 842 842 } else { 843 843 init_tick_ops(&tick_operations); 844 - clocksource_tick.archdata.vclock_mode = VCLOCK_TICK; 844 + clocksource_tick.vdso_clock_mode = VDSO_CLOCKMODE_TICK; 845 845 } 846 846 } else { 847 847 init_tick_ops(&stick_operations); 848 - clocksource_tick.archdata.vclock_mode = VCLOCK_STICK; 848 + clocksource_tick.vdso_clock_mode = VDSO_CLOCKMODE_STICK; 849 849 } 850 850 } 851 851
-69
arch/sparc/kernel/vdso.c
··· 1 - /* 2 - * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 3 - * Copyright 2003 Andi Kleen, SuSE Labs. 4 - * 5 - * Thanks to hpa@transmeta.com for some useful hint. 6 - * Special thanks to Ingo Molnar for his early experience with 7 - * a different vsyscall implementation for Linux/IA32 and for the name. 8 - */ 9 - 10 - #include <linux/time.h> 11 - #include <linux/timekeeper_internal.h> 12 - 13 - #include <asm/vvar.h> 14 - 15 - void update_vsyscall_tz(void) 16 - { 17 - if (unlikely(vvar_data == NULL)) 18 - return; 19 - 20 - vvar_data->tz_minuteswest = sys_tz.tz_minuteswest; 21 - vvar_data->tz_dsttime = sys_tz.tz_dsttime; 22 - } 23 - 24 - void update_vsyscall(struct timekeeper *tk) 25 - { 26 - struct vvar_data *vdata = vvar_data; 27 - 28 - if (unlikely(vdata == NULL)) 29 - return; 30 - 31 - vvar_write_begin(vdata); 32 - vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; 33 - vdata->clock.cycle_last = tk->tkr_mono.cycle_last; 34 - vdata->clock.mask = tk->tkr_mono.mask; 35 - vdata->clock.mult = tk->tkr_mono.mult; 36 - vdata->clock.shift = tk->tkr_mono.shift; 37 - 38 - vdata->wall_time_sec = tk->xtime_sec; 39 - vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; 40 - 41 - vdata->monotonic_time_sec = tk->xtime_sec + 42 - tk->wall_to_monotonic.tv_sec; 43 - vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec + 44 - (tk->wall_to_monotonic.tv_nsec << 45 - tk->tkr_mono.shift); 46 - 47 - while (vdata->monotonic_time_snsec >= 48 - (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 49 - vdata->monotonic_time_snsec -= 50 - ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; 51 - vdata->monotonic_time_sec++; 52 - } 53 - 54 - vdata->wall_time_coarse_sec = tk->xtime_sec; 55 - vdata->wall_time_coarse_nsec = 56 - (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); 57 - 58 - vdata->monotonic_time_coarse_sec = 59 - vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; 60 - vdata->monotonic_time_coarse_nsec = 61 - vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; 62 - 63 - while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) { 64 - vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC; 65 - vdata->monotonic_time_coarse_sec++; 66 - } 67 - 68 - vvar_write_end(vdata); 69 - }
+9 -2
arch/sparc/vdso/Makefile
··· 3 3 # Building vDSO images for sparc. 4 4 # 5 5 6 + # Include the generic Makefile to check the built vDSO: 7 + include $(srctree)/lib/vdso/Makefile.include 8 + 6 9 # files to link into the vdso 7 10 vobjs-y := vdso-note.o vclock_gettime.o 8 11 ··· 93 90 KBUILD_CFLAGS_32 += -mv8plus 94 91 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 95 92 93 + CHECKFLAGS_32 := $(filter-out -m64 -D__sparc_v9__ -D__arch64__, $(CHECKFLAGS)) -m32 94 + $(obj)/vdso32.so.dbg: CHECKFLAGS = $(CHECKFLAGS_32) 95 + 96 96 $(obj)/vdso32.so.dbg: FORCE \ 97 97 $(obj)/vdso32/vdso32.lds \ 98 98 $(obj)/vdso32/vclock_gettime.o \ ··· 108 102 quiet_cmd_vdso = VDSO $@ 109 103 cmd_vdso = $(LD) -nostdlib -o $@ \ 110 104 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ 111 - -T $(filter %.lds,$^) $(filter %.o,$^) 105 + -T $(filter %.lds,$^) $(filter %.o,$^); \ 106 + $(cmd_vdso_check) 112 107 113 - VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic --no-undefined 108 + VDSO_LDFLAGS = -shared --hash-style=both --build-id=sha1 -Bsymbolic --no-undefined -z noexecstack
+42 -376
arch/sparc/vdso/vclock_gettime.c
··· 12 12 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. 13 13 */ 14 14 15 - #include <linux/kernel.h> 16 - #include <linux/time.h> 17 - #include <linux/string.h> 18 - #include <asm/io.h> 19 - #include <asm/unistd.h> 20 - #include <asm/timex.h> 21 - #include <asm/clocksource.h> 22 - #include <asm/vvar.h> 15 + #include <linux/compiler.h> 16 + #include <linux/types.h> 23 17 24 - #ifdef CONFIG_SPARC64 25 - #define SYSCALL_STRING \ 26 - "ta 0x6d;" \ 27 - "bcs,a 1f;" \ 28 - " sub %%g0, %%o0, %%o0;" \ 29 - "1:" 18 + #include <vdso/gettime.h> 19 + 20 + #include <asm/vdso/gettimeofday.h> 21 + 22 + #include "../../../../lib/vdso/gettimeofday.c" 23 + 24 + int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) 25 + { 26 + return __cvdso_gettimeofday(tv, tz); 27 + } 28 + 29 + int gettimeofday(struct __kernel_old_timeval *, struct timezone *) 30 + __weak __alias(__vdso_gettimeofday); 31 + 32 + #if defined(CONFIG_SPARC64) 33 + int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) 34 + { 35 + return __cvdso_clock_gettime(clock, ts); 36 + } 37 + 38 + int clock_gettime(clockid_t, struct __kernel_timespec *) 39 + __weak __alias(__vdso_clock_gettime); 40 + 30 41 #else 31 - #define SYSCALL_STRING \ 32 - "ta 0x10;" \ 33 - "bcs,a 1f;" \ 34 - " sub %%g0, %%o0, %%o0;" \ 35 - "1:" 42 + 43 + int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts) 44 + { 45 + return __cvdso_clock_gettime32(clock, ts); 46 + } 47 + 48 + int clock_gettime(clockid_t, struct old_timespec32 *) 49 + __weak __alias(__vdso_clock_gettime); 50 + 51 + int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts) 52 + { 53 + return __cvdso_clock_gettime(clock, ts); 54 + } 55 + 56 + int clock_gettime64(clockid_t, struct __kernel_timespec *) 57 + __weak __alias(__vdso_clock_gettime64); 58 + 36 59 #endif 37 - 38 - #define SYSCALL_CLOBBERS \ 39 - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ 40 - "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \ 41 - "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \ 42 - "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \ 43 - "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \ 44 - "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \ 45 - "cc", "memory" 46 - 47 - /* 48 - * Compute the vvar page's address in the process address space, and return it 49 - * as a pointer to the vvar_data. 50 - */ 51 - notrace static __always_inline struct vvar_data *get_vvar_data(void) 52 - { 53 - unsigned long ret; 54 - 55 - /* 56 - * vdso data page is the first vDSO page so grab the PC 57 - * and move up a page to get to the data page. 58 - */ 59 - __asm__("rd %%pc, %0" : "=r" (ret)); 60 - ret &= ~(8192 - 1); 61 - ret -= 8192; 62 - 63 - return (struct vvar_data *) ret; 64 - } 65 - 66 - notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts) 67 - { 68 - register long num __asm__("g1") = __NR_clock_gettime; 69 - register long o0 __asm__("o0") = clock; 70 - register long o1 __asm__("o1") = (long) ts; 71 - 72 - __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 73 - "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 74 - return o0; 75 - } 76 - 77 - notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) 78 - { 79 - register long num __asm__("g1") = __NR_gettimeofday; 80 - register long o0 __asm__("o0") = (long) tv; 81 - register long o1 __asm__("o1") = (long) tz; 82 - 83 - __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num), 84 - "0" (o0), "r" (o1) : SYSCALL_CLOBBERS); 85 - return o0; 86 - } 87 - 88 - #ifdef CONFIG_SPARC64 89 - notrace static __always_inline u64 __shr64(u64 val, int amt) 90 - { 91 - return val >> amt; 92 - } 93 - 94 - notrace static __always_inline u64 vread_tick(void) 95 - { 96 - u64 ret; 97 - 98 - __asm__ __volatile__("rd %%tick, %0" : "=r" (ret)); 99 - return ret; 100 - } 101 - 102 - notrace static __always_inline u64 vread_tick_stick(void) 103 - { 104 - u64 ret; 105 - 106 - __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret)); 107 - return ret; 108 - } 109 - #else 110 - notrace static __always_inline u64 __shr64(u64 val, int amt) 111 - { 112 - u64 ret; 113 - 114 - __asm__ __volatile__("sllx %H1, 32, %%g1\n\t" 115 - "srl %L1, 0, %L1\n\t" 116 - "or %%g1, %L1, %%g1\n\t" 117 - "srlx %%g1, %2, %L0\n\t" 118 - "srlx %L0, 32, %H0" 119 - : "=r" (ret) 120 - : "r" (val), "r" (amt) 121 - : "g1"); 122 - return ret; 123 - } 124 - 125 - notrace static __always_inline u64 vread_tick(void) 126 - { 127 - register unsigned long long ret asm("o4"); 128 - 129 - __asm__ __volatile__("rd %%tick, %L0\n\t" 130 - "srlx %L0, 32, %H0" 131 - : "=r" (ret)); 132 - return ret; 133 - } 134 - 135 - notrace static __always_inline u64 vread_tick_stick(void) 136 - { 137 - register unsigned long long ret asm("o4"); 138 - 139 - __asm__ __volatile__("rd %%asr24, %L0\n\t" 140 - "srlx %L0, 32, %H0" 141 - : "=r" (ret)); 142 - return ret; 143 - } 144 - #endif 145 - 146 - notrace static __always_inline u64 vgetsns(struct vvar_data *vvar) 147 - { 148 - u64 v; 149 - u64 cycles; 150 - 151 - cycles = vread_tick(); 152 - v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask; 153 - return v * vvar->clock.mult; 154 - } 155 - 156 - notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar) 157 - { 158 - u64 v; 159 - u64 cycles; 160 - 161 - cycles = vread_tick_stick(); 162 - v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask; 163 - return v * vvar->clock.mult; 164 - } 165 - 166 - notrace static __always_inline int do_realtime(struct vvar_data *vvar, 167 - struct __kernel_old_timespec *ts) 168 - { 169 - unsigned long seq; 170 - u64 ns; 171 - 172 - do { 173 - seq = vvar_read_begin(vvar); 174 - ts->tv_sec = vvar->wall_time_sec; 175 - ns = vvar->wall_time_snsec; 176 - ns += vgetsns(vvar); 177 - ns = __shr64(ns, vvar->clock.shift); 178 - } while (unlikely(vvar_read_retry(vvar, seq))); 179 - 180 - ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 181 - ts->tv_nsec = ns; 182 - 183 - return 0; 184 - } 185 - 186 - notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar, 187 - struct __kernel_old_timespec *ts) 188 - { 189 - unsigned long seq; 190 - u64 ns; 191 - 192 - do { 193 - seq = vvar_read_begin(vvar); 194 - ts->tv_sec = vvar->wall_time_sec; 195 - ns = vvar->wall_time_snsec; 196 - ns += vgetsns_stick(vvar); 197 - ns = __shr64(ns, vvar->clock.shift); 198 - } while (unlikely(vvar_read_retry(vvar, seq))); 199 - 200 - ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 201 - ts->tv_nsec = ns; 202 - 203 - return 0; 204 - } 205 - 206 - notrace static __always_inline int do_monotonic(struct vvar_data *vvar, 207 - struct __kernel_old_timespec *ts) 208 - { 209 - unsigned long seq; 210 - u64 ns; 211 - 212 - do { 213 - seq = vvar_read_begin(vvar); 214 - ts->tv_sec = vvar->monotonic_time_sec; 215 - ns = vvar->monotonic_time_snsec; 216 - ns += vgetsns(vvar); 217 - ns = __shr64(ns, vvar->clock.shift); 218 - } while (unlikely(vvar_read_retry(vvar, seq))); 219 - 220 - ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 221 - ts->tv_nsec = ns; 222 - 223 - return 0; 224 - } 225 - 226 - notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar, 227 - struct __kernel_old_timespec *ts) 228 - { 229 - unsigned long seq; 230 - u64 ns; 231 - 232 - do { 233 - seq = vvar_read_begin(vvar); 234 - ts->tv_sec = vvar->monotonic_time_sec; 235 - ns = vvar->monotonic_time_snsec; 236 - ns += vgetsns_stick(vvar); 237 - ns = __shr64(ns, vvar->clock.shift); 238 - } while (unlikely(vvar_read_retry(vvar, seq))); 239 - 240 - ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); 241 - ts->tv_nsec = ns; 242 - 243 - return 0; 244 - } 245 - 246 - notrace static int do_realtime_coarse(struct vvar_data *vvar, 247 - struct __kernel_old_timespec *ts) 248 - { 249 - unsigned long seq; 250 - 251 - do { 252 - seq = vvar_read_begin(vvar); 253 - ts->tv_sec = vvar->wall_time_coarse_sec; 254 - ts->tv_nsec = vvar->wall_time_coarse_nsec; 255 - } while (unlikely(vvar_read_retry(vvar, seq))); 256 - return 0; 257 - } 258 - 259 - notrace static int do_monotonic_coarse(struct vvar_data *vvar, 260 - struct __kernel_old_timespec *ts) 261 - { 262 - unsigned long seq; 263 - 264 - do { 265 - seq = vvar_read_begin(vvar); 266 - ts->tv_sec = vvar->monotonic_time_coarse_sec; 267 - ts->tv_nsec = vvar->monotonic_time_coarse_nsec; 268 - } while (unlikely(vvar_read_retry(vvar, seq))); 269 - 270 - return 0; 271 - } 272 - 273 - notrace int 274 - __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts) 275 - { 276 - struct vvar_data *vvd = get_vvar_data(); 277 - 278 - switch (clock) { 279 - case CLOCK_REALTIME: 280 - if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) 281 - break; 282 - return do_realtime(vvd, ts); 283 - case CLOCK_MONOTONIC: 284 - if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) 285 - break; 286 - return do_monotonic(vvd, ts); 287 - case CLOCK_REALTIME_COARSE: 288 - return do_realtime_coarse(vvd, ts); 289 - case CLOCK_MONOTONIC_COARSE: 290 - return do_monotonic_coarse(vvd, ts); 291 - } 292 - /* 293 - * Unknown clock ID ? Fall back to the syscall. 294 - */ 295 - return vdso_fallback_gettime(clock, ts); 296 - } 297 - int 298 - clock_gettime(clockid_t, struct __kernel_old_timespec *) 299 - __attribute__((weak, alias("__vdso_clock_gettime"))); 300 - 301 - notrace int 302 - __vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts) 303 - { 304 - struct vvar_data *vvd = get_vvar_data(); 305 - 306 - switch (clock) { 307 - case CLOCK_REALTIME: 308 - if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) 309 - break; 310 - return do_realtime_stick(vvd, ts); 311 - case CLOCK_MONOTONIC: 312 - if (unlikely(vvd->vclock_mode == VCLOCK_NONE)) 313 - break; 314 - return do_monotonic_stick(vvd, ts); 315 - case CLOCK_REALTIME_COARSE: 316 - return do_realtime_coarse(vvd, ts); 317 - case CLOCK_MONOTONIC_COARSE: 318 - return do_monotonic_coarse(vvd, ts); 319 - } 320 - /* 321 - * Unknown clock ID ? Fall back to the syscall. 322 - */ 323 - return vdso_fallback_gettime(clock, ts); 324 - } 325 - 326 - notrace int 327 - __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) 328 - { 329 - struct vvar_data *vvd = get_vvar_data(); 330 - 331 - if (likely(vvd->vclock_mode != VCLOCK_NONE)) { 332 - if (likely(tv != NULL)) { 333 - union tstv_t { 334 - struct __kernel_old_timespec ts; 335 - struct __kernel_old_timeval tv; 336 - } *tstv = (union tstv_t *) tv; 337 - do_realtime(vvd, &tstv->ts); 338 - /* 339 - * Assign before dividing to ensure that the division is 340 - * done in the type of tv_usec, not tv_nsec. 341 - * 342 - * There cannot be > 1 billion usec in a second: 343 - * do_realtime() has already distributed such overflow 344 - * into tv_sec. So we can assign it to an int safely. 345 - */ 346 - tstv->tv.tv_usec = tstv->ts.tv_nsec; 347 - tstv->tv.tv_usec /= 1000; 348 - } 349 - if (unlikely(tz != NULL)) { 350 - /* Avoid memcpy. Some old compilers fail to inline it */ 351 - tz->tz_minuteswest = vvd->tz_minuteswest; 352 - tz->tz_dsttime = vvd->tz_dsttime; 353 - } 354 - return 0; 355 - } 356 - return vdso_fallback_gettimeofday(tv, tz); 357 - } 358 - int 359 - gettimeofday(struct __kernel_old_timeval *, struct timezone *) 360 - __attribute__((weak, alias("__vdso_gettimeofday"))); 361 - 362 - notrace int 363 - __vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz) 364 - { 365 - struct vvar_data *vvd = get_vvar_data(); 366 - 367 - if (likely(vvd->vclock_mode != VCLOCK_NONE)) { 368 - if (likely(tv != NULL)) { 369 - union tstv_t { 370 - struct __kernel_old_timespec ts; 371 - struct __kernel_old_timeval tv; 372 - } *tstv = (union tstv_t *) tv; 373 - do_realtime_stick(vvd, &tstv->ts); 374 - /* 375 - * Assign before dividing to ensure that the division is 376 - * done in the type of tv_usec, not tv_nsec. 377 - * 378 - * There cannot be > 1 billion usec in a second: 379 - * do_realtime() has already distributed such overflow 380 - * into tv_sec. So we can assign it to an int safely. 381 - */ 382 - tstv->tv.tv_usec = tstv->ts.tv_nsec; 383 - tstv->tv.tv_usec /= 1000; 384 - } 385 - if (unlikely(tz != NULL)) { 386 - /* Avoid memcpy. Some old compilers fail to inline it */ 387 - tz->tz_minuteswest = vvd->tz_minuteswest; 388 - tz->tz_dsttime = vvd->tz_dsttime; 389 - } 390 - return 0; 391 - } 392 - return vdso_fallback_gettimeofday(tv, tz); 393 - }
+4 -22
arch/sparc/vdso/vdso-layout.lds.S
··· 4 4 * This script controls its layout. 5 5 */ 6 6 7 - #if defined(BUILD_VDSO64) 8 - # define SHDR_SIZE 64 9 - #elif defined(BUILD_VDSO32) 10 - # define SHDR_SIZE 40 11 - #else 12 - # error unknown VDSO target 13 - #endif 14 - 15 - #define NUM_FAKE_SHDRS 7 7 + #include <vdso/datapage.h> 8 + #include <vdso/page.h> 9 + #include <asm/vdso/vsyscall.h> 16 10 17 11 SECTIONS 18 12 { ··· 17 23 * segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries 18 24 */ 19 25 20 - vvar_start = . -8192; 21 - vvar_data = vvar_start; 26 + VDSO_VVAR_SYMS 22 27 23 28 . = SIZEOF_HEADERS; 24 29 ··· 40 47 *(.bss*) 41 48 *(.dynbss*) 42 49 *(.gnu.linkonce.b.*) 43 - 44 - /* 45 - * Ideally this would live in a C file: kept in here for 46 - * compatibility with x86-64. 47 - */ 48 - VDSO_FAKE_SECTION_TABLE_START = .; 49 - . = . + NUM_FAKE_SHDRS * SHDR_SIZE; 50 - VDSO_FAKE_SECTION_TABLE_END = .; 51 50 } :text 52 - 53 - .fake_shstrtab : { *(.fake_shstrtab) } :text 54 - 55 51 56 52 .note : { *(.note.*) } :text :note 57 53
-2
arch/sparc/vdso/vdso.lds.S
··· 18 18 global: 19 19 clock_gettime; 20 20 __vdso_clock_gettime; 21 - __vdso_clock_gettime_stick; 22 21 gettimeofday; 23 22 __vdso_gettimeofday; 24 - __vdso_gettimeofday_stick; 25 23 local: *; 26 24 }; 27 25 }
-24
arch/sparc/vdso/vdso2c.c
··· 58 58 59 59 const char *outfilename; 60 60 61 - /* Symbols that we need in vdso2c. */ 62 - enum { 63 - sym_vvar_start, 64 - sym_VDSO_FAKE_SECTION_TABLE_START, 65 - sym_VDSO_FAKE_SECTION_TABLE_END, 66 - }; 67 - 68 - struct vdso_sym { 69 - const char *name; 70 - int export; 71 - }; 72 - 73 - struct vdso_sym required_syms[] = { 74 - [sym_vvar_start] = {"vvar_start", 1}, 75 - [sym_VDSO_FAKE_SECTION_TABLE_START] = { 76 - "VDSO_FAKE_SECTION_TABLE_START", 0 77 - }, 78 - [sym_VDSO_FAKE_SECTION_TABLE_END] = { 79 - "VDSO_FAKE_SECTION_TABLE_END", 0 80 - }, 81 - }; 82 - 83 61 __attribute__((format(printf, 1, 2))) __attribute__((noreturn)) 84 62 static void fail(const char *format, ...) 85 63 { ··· 96 118 97 119 #define PUT_BE(x, val) \ 98 120 PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val)))) 99 - 100 - #define NSYMS ARRAY_SIZE(required_syms) 101 121 102 122 #define BITSFUNC3(name, bits, suffix) name##bits##suffix 103 123 #define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
+1 -44
arch/sparc/vdso/vdso2c.h
··· 17 17 unsigned long mapping_size; 18 18 int i; 19 19 unsigned long j; 20 - ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr; 20 + ELF(Shdr) *symtab_hdr = NULL; 21 21 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; 22 22 ELF(Dyn) *dyn = 0, *dyn_end = 0; 23 - INT_BITS syms[NSYMS] = {}; 24 - 25 23 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff)); 26 24 27 25 /* Walk the segment table. */ ··· 70 72 if (!symtab_hdr) 71 73 fail("no symbol table\n"); 72 74 73 - strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) + 74 - GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link); 75 - 76 - /* Walk the symbol table */ 77 - for (i = 0; 78 - i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize); 79 - i++) { 80 - int k; 81 - 82 - ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) + 83 - GET_BE(&symtab_hdr->sh_entsize) * i; 84 - const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) + 85 - GET_BE(&sym->st_name); 86 - 87 - for (k = 0; k < NSYMS; k++) { 88 - if (!strcmp(name, required_syms[k].name)) { 89 - if (syms[k]) { 90 - fail("duplicate symbol %s\n", 91 - required_syms[k].name); 92 - } 93 - 94 - /* 95 - * Careful: we use negative addresses, but 96 - * st_value is unsigned, so we rely 97 - * on syms[k] being a signed type of the 98 - * correct width. 99 - */ 100 - syms[k] = GET_BE(&sym->st_value); 101 - } 102 - } 103 - } 104 - 105 - /* Validate mapping addresses. */ 106 - if (syms[sym_vvar_start] % 8192) 107 - fail("vvar_begin must be a multiple of 8192\n"); 108 - 109 75 if (!name) { 110 76 fwrite(stripped_addr, stripped_len, 1, outfile); 111 77 return; ··· 95 133 fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name); 96 134 fprintf(outfile, "\t.data = raw_data,\n"); 97 135 fprintf(outfile, "\t.size = %lu,\n", mapping_size); 98 - for (i = 0; i < NSYMS; i++) { 99 - if (required_syms[i].export && syms[i]) 100 - fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", 101 - required_syms[i].name, (int64_t)syms[i]); 102 - } 103 136 fprintf(outfile, "};\n"); 104 137 }
+2 -2
arch/sparc/vdso/vdso32/vdso32.lds.S
··· 17 17 global: 18 18 clock_gettime; 19 19 __vdso_clock_gettime; 20 - __vdso_clock_gettime_stick; 20 + clock_gettime64; 21 + __vdso_clock_gettime64; 21 22 gettimeofday; 22 23 __vdso_gettimeofday; 23 - __vdso_gettimeofday_stick; 24 24 local: *; 25 25 }; 26 26 }
+14 -260
arch/sparc/vdso/vma.c
··· 16 16 #include <linux/linkage.h> 17 17 #include <linux/random.h> 18 18 #include <linux/elf.h> 19 + #include <linux/vdso_datastore.h> 19 20 #include <asm/cacheflush.h> 20 21 #include <asm/spitfire.h> 21 22 #include <asm/vdso.h> 22 - #include <asm/vvar.h> 23 23 #include <asm/page.h> 24 24 25 - unsigned int __read_mostly vdso_enabled = 1; 25 + #include <vdso/datapage.h> 26 + #include <asm/vdso/vsyscall.h> 26 27 27 - static struct vm_special_mapping vvar_mapping = { 28 - .name = "[vvar]" 29 - }; 28 + unsigned int __read_mostly vdso_enabled = 1; 30 29 31 30 #ifdef CONFIG_SPARC64 32 31 static struct vm_special_mapping vdso_mapping64 = { ··· 39 40 }; 40 41 #endif 41 42 42 - struct vvar_data *vvar_data; 43 - 44 - struct vdso_elfinfo32 { 45 - Elf32_Ehdr *hdr; 46 - Elf32_Sym *dynsym; 47 - unsigned long dynsymsize; 48 - const char *dynstr; 49 - unsigned long text; 50 - }; 51 - 52 - struct vdso_elfinfo64 { 53 - Elf64_Ehdr *hdr; 54 - Elf64_Sym *dynsym; 55 - unsigned long dynsymsize; 56 - const char *dynstr; 57 - unsigned long text; 58 - }; 59 - 60 - struct vdso_elfinfo { 61 - union { 62 - struct vdso_elfinfo32 elf32; 63 - struct vdso_elfinfo64 elf64; 64 - } u; 65 - }; 66 - 67 - static void *one_section64(struct vdso_elfinfo64 *e, const char *name, 68 - unsigned long *size) 69 - { 70 - const char *snames; 71 - Elf64_Shdr *shdrs; 72 - unsigned int i; 73 - 74 - shdrs = (void *)e->hdr + e->hdr->e_shoff; 75 - snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; 76 - for (i = 1; i < e->hdr->e_shnum; i++) { 77 - if (!strcmp(snames+shdrs[i].sh_name, name)) { 78 - if (size) 79 - *size = shdrs[i].sh_size; 80 - return (void *)e->hdr + shdrs[i].sh_offset; 81 - } 82 - } 83 - return NULL; 84 - } 85 - 86 - static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e) 87 - { 88 - struct vdso_elfinfo64 *e = &_e->u.elf64; 89 - 90 - e->hdr = image->data; 91 - e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize); 92 - e->dynstr = one_section64(e, ".dynstr", NULL); 93 - 94 - if (!e->dynsym || !e->dynstr) { 95 - pr_err("VDSO64: Missing symbol sections.\n"); 96 - return -ENODEV; 97 - } 98 - return 0; 99 - } 100 - 101 - static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name) 102 - { 103 - unsigned int i; 104 - 105 - for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) { 106 - Elf64_Sym *s = &e->dynsym[i]; 107 - if (s->st_name == 0) 108 - continue; 109 - if (!strcmp(e->dynstr + s->st_name, name)) 110 - return s; 111 - } 112 - return NULL; 113 - } 114 - 115 - static int patchsym64(struct vdso_elfinfo *_e, const char *orig, 116 - const char *new) 117 - { 118 - struct vdso_elfinfo64 *e = &_e->u.elf64; 119 - Elf64_Sym *osym = find_sym64(e, orig); 120 - Elf64_Sym *nsym = find_sym64(e, new); 121 - 122 - if (!nsym || !osym) { 123 - pr_err("VDSO64: Missing symbols.\n"); 124 - return -ENODEV; 125 - } 126 - osym->st_value = nsym->st_value; 127 - osym->st_size = nsym->st_size; 128 - osym->st_info = nsym->st_info; 129 - osym->st_other = nsym->st_other; 130 - osym->st_shndx = nsym->st_shndx; 131 - 132 - return 0; 133 - } 134 - 135 - static void *one_section32(struct vdso_elfinfo32 *e, const char *name, 136 - unsigned long *size) 137 - { 138 - const char *snames; 139 - Elf32_Shdr *shdrs; 140 - unsigned int i; 141 - 142 - shdrs = (void *)e->hdr + e->hdr->e_shoff; 143 - snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset; 144 - for (i = 1; i < e->hdr->e_shnum; i++) { 145 - if (!strcmp(snames+shdrs[i].sh_name, name)) { 146 - if (size) 147 - *size = shdrs[i].sh_size; 148 - return (void *)e->hdr + shdrs[i].sh_offset; 149 - } 150 - } 151 - return NULL; 152 - } 153 - 154 - static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e) 155 - { 156 - struct vdso_elfinfo32 *e = &_e->u.elf32; 157 - 158 - e->hdr = image->data; 159 - e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize); 160 - e->dynstr = one_section32(e, ".dynstr", NULL); 161 - 162 - if (!e->dynsym || !e->dynstr) { 163 - pr_err("VDSO32: Missing symbol sections.\n"); 164 - return -ENODEV; 165 - } 166 - return 0; 167 - } 168 - 169 - static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name) 170 - { 171 - unsigned int i; 172 - 173 - for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) { 174 - Elf32_Sym *s = &e->dynsym[i]; 175 - if (s->st_name == 0) 176 - continue; 177 - if (!strcmp(e->dynstr + s->st_name, name)) 178 - return s; 179 - } 180 - return NULL; 181 - } 182 - 183 - static int patchsym32(struct vdso_elfinfo *_e, const char *orig, 184 - const char *new) 185 - { 186 - struct vdso_elfinfo32 *e = &_e->u.elf32; 187 - Elf32_Sym *osym = find_sym32(e, orig); 188 - Elf32_Sym *nsym = find_sym32(e, new); 189 - 190 - if (!nsym || !osym) { 191 - pr_err("VDSO32: Missing symbols.\n"); 192 - return -ENODEV; 193 - } 194 - osym->st_value = nsym->st_value; 195 - osym->st_size = nsym->st_size; 196 - osym->st_info = nsym->st_info; 197 - osym->st_other = nsym->st_other; 198 - osym->st_shndx = nsym->st_shndx; 199 - 200 - return 0; 201 - } 202 - 203 - static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e, 204 - bool elf64) 205 - { 206 - if (elf64) 207 - return find_sections64(image, e); 208 - else 209 - return find_sections32(image, e); 210 - } 211 - 212 - static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig, 213 - const char *new_target, bool elf64) 214 - { 215 - if (elf64) 216 - return patchsym64(e, orig, new_target); 217 - else 218 - return patchsym32(e, orig, new_target); 219 - } 220 - 221 - static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64) 222 - { 223 - int err; 224 - 225 - err = find_sections(image, e, elf64); 226 - if (err) 227 - return err; 228 - 229 - err = patch_one_symbol(e, 230 - "__vdso_gettimeofday", 231 - "__vdso_gettimeofday_stick", elf64); 232 - if (err) 233 - return err; 234 - 235 - return patch_one_symbol(e, 236 - "__vdso_clock_gettime", 237 - "__vdso_clock_gettime_stick", elf64); 238 - return 0; 239 - } 240 - 241 43 /* 242 - * Allocate pages for the vdso and vvar, and copy in the vdso text from the 44 + * Allocate pages for the vdso and copy in the vdso text from the 243 45 * kernel image. 244 46 */ 245 47 static int __init init_vdso_image(const struct vdso_image *image, ··· 48 248 bool elf64) 49 249 { 50 250 int cnpages = (image->size) / PAGE_SIZE; 51 - struct page *dp, **dpp = NULL; 52 251 struct page *cp, **cpp = NULL; 53 - struct vdso_elfinfo ei; 54 - int i, dnpages = 0; 55 - 56 - if (tlb_type != spitfire) { 57 - int err = stick_patch(image, &ei, elf64); 58 - if (err) 59 - return err; 60 - } 252 + int i; 61 253 62 254 /* 63 255 * First, the vdso text. This is initialied data, an integral number of ··· 72 280 copy_page(page_address(cp), image->data + i * PAGE_SIZE); 73 281 } 74 282 75 - /* 76 - * Now the vvar page. This is uninitialized data. 77 - */ 78 - 79 - if (vvar_data == NULL) { 80 - dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1; 81 - if (WARN_ON(dnpages != 1)) 82 - goto oom; 83 - dpp = kzalloc_objs(struct page *, dnpages); 84 - vvar_mapping.pages = dpp; 85 - 86 - if (!dpp) 87 - goto oom; 88 - 89 - dp = alloc_page(GFP_KERNEL); 90 - if (!dp) 91 - goto oom; 92 - 93 - dpp[0] = dp; 94 - vvar_data = page_address(dp); 95 - memset(vvar_data, 0, PAGE_SIZE); 96 - 97 - vvar_data->seq = 0; 98 - } 99 - 100 283 return 0; 101 284 oom: 102 285 if (cpp != NULL) { ··· 81 314 } 82 315 kfree(cpp); 83 316 vdso_mapping->pages = NULL; 84 - } 85 - 86 - if (dpp != NULL) { 87 - for (i = 0; i < dnpages; i++) { 88 - if (dpp[i] != NULL) 89 - __free_page(dpp[i]); 90 - } 91 - kfree(dpp); 92 - vvar_mapping.pages = NULL; 93 317 } 94 318 95 319 pr_warn("Cannot allocate vdso\n"); ··· 117 359 return start + (offset << PAGE_SHIFT); 118 360 } 119 361 362 + static_assert(VDSO_NR_PAGES == __VDSO_PAGES); 363 + 120 364 static int map_vdso(const struct vdso_image *image, 121 365 struct vm_special_mapping *vdso_mapping) 122 366 { 367 + const size_t area_size = image->size + VDSO_NR_PAGES * PAGE_SIZE; 123 368 struct mm_struct *mm = current->mm; 124 369 struct vm_area_struct *vma; 125 370 unsigned long text_start, addr = 0; ··· 135 374 * region is free. 136 375 */ 137 376 if (current->flags & PF_RANDOMIZE) { 138 - addr = get_unmapped_area(NULL, 0, 139 - image->size - image->sym_vvar_start, 140 - 0, 0); 377 + addr = get_unmapped_area(NULL, 0, area_size, 0, 0); 141 378 if (IS_ERR_VALUE(addr)) { 142 379 ret = addr; 143 380 goto up_fail; 144 381 } 145 - addr = vdso_addr(addr, image->size - image->sym_vvar_start); 382 + addr = vdso_addr(addr, area_size); 146 383 } 147 - addr = get_unmapped_area(NULL, addr, 148 - image->size - image->sym_vvar_start, 0, 0); 384 + addr = get_unmapped_area(NULL, addr, area_size, 0, 0); 149 385 if (IS_ERR_VALUE(addr)) { 150 386 ret = addr; 151 387 goto up_fail; 152 388 } 153 389 154 - text_start = addr - image->sym_vvar_start; 390 + text_start = addr + VDSO_NR_PAGES * PAGE_SIZE; 155 391 current->mm->context.vdso = (void __user *)text_start; 156 392 157 393 /* ··· 166 408 goto up_fail; 167 409 } 168 410 169 - vma = _install_special_mapping(mm, 170 - addr, 171 - -image->sym_vvar_start, 172 - VM_READ|VM_MAYREAD, 173 - &vvar_mapping); 411 + vma = vdso_install_vvar_mapping(mm, addr); 174 412 175 413 if (IS_ERR(vma)) { 176 414 ret = PTR_ERR(vma);
+4
arch/x86/entry/vdso/vdso32/Makefile
··· 15 15 flags-$(CONFIG_X86_64) += -include $(src)/fake_32bit_build.h 16 16 flags-remove-y := -m64 17 17 18 + # Checker flags 19 + CHECKFLAGS := $(subst -m64,-m32,$(CHECKFLAGS)) 20 + CHECKFLAGS := $(subst -D__x86_64__,-D__i386__,$(CHECKFLAGS)) 21 + 18 22 # The location of this include matters! 19 23 include $(src)/../common/Makefile.include 20 24
+6 -10
drivers/char/random.c
··· 56 56 #include <linux/sched/isolation.h> 57 57 #include <crypto/chacha.h> 58 58 #include <crypto/blake2s.h> 59 - #ifdef CONFIG_VDSO_GETRANDOM 60 - #include <vdso/getrandom.h> 61 59 #include <vdso/datapage.h> 62 - #include <vdso/vsyscall.h> 63 - #endif 64 60 #include <asm/archrandom.h> 65 61 #include <asm/processor.h> 66 62 #include <asm/irq.h> ··· 265 269 if (next_gen == ULONG_MAX) 266 270 ++next_gen; 267 271 WRITE_ONCE(base_crng.generation, next_gen); 268 - #ifdef CONFIG_VDSO_GETRANDOM 272 + 269 273 /* base_crng.generation's invalid value is ULONG_MAX, while 270 274 * vdso_k_rng_data->generation's invalid value is 0, so add one to the 271 275 * former to arrive at the latter. Use smp_store_release so that this ··· 279 283 * because the vDSO side only checks whether the value changed, without 280 284 * actually using or interpreting the value. 281 285 */ 282 - smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1); 283 - #endif 286 + if (IS_ENABLED(CONFIG_VDSO_GETRANDOM)) 287 + smp_store_release((unsigned long *)&vdso_k_rng_data->generation, next_gen + 1); 288 + 284 289 if (!static_branch_likely(&crng_is_ready)) 285 290 crng_init = CRNG_READY; 286 291 spin_unlock_irqrestore(&base_crng.lock, flags); ··· 731 734 if (system_dfl_wq) 732 735 queue_work(system_dfl_wq, &set_ready); 733 736 atomic_notifier_call_chain(&random_ready_notifier, 0, NULL); 734 - #ifdef CONFIG_VDSO_GETRANDOM 735 - WRITE_ONCE(vdso_k_rng_data->is_ready, true); 736 - #endif 737 + if (IS_ENABLED(CONFIG_VDSO_GETRANDOM)) 738 + WRITE_ONCE(vdso_k_rng_data->is_ready, true); 737 739 wake_up_interruptible(&crng_init_wait); 738 740 kill_fasync(&fasync, SIGIO, POLL_IN); 739 741 pr_notice("crng init done\n");
+9
include/asm-generic/bitsperlong.h
··· 19 19 #error Inconsistent word size. Check asm/bitsperlong.h 20 20 #endif 21 21 22 + #if __CHAR_BIT__ * __SIZEOF_LONG__ != __BITS_PER_LONG 23 + #error Inconsistent word size. Check asm/bitsperlong.h 24 + #endif 25 + 26 + #ifndef __ASSEMBLER__ 27 + _Static_assert(sizeof(long) * 8 == __BITS_PER_LONG, 28 + "Inconsistent word size. Check asm/bitsperlong.h"); 29 + #endif 30 + 22 31 #ifndef BITS_PER_LONG_LONG 23 32 #define BITS_PER_LONG_LONG 64 24 33 #endif
+1 -5
include/linux/clocksource.h
··· 25 25 struct clocksource; 26 26 struct module; 27 27 28 - #if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \ 29 - defined(CONFIG_GENERIC_GETTIMEOFDAY) 28 + #if defined(CONFIG_GENERIC_GETTIMEOFDAY) 30 29 #include <asm/clocksource.h> 31 30 #endif 32 31 ··· 102 103 u32 shift; 103 104 u64 max_idle_ns; 104 105 u32 maxadj; 105 - #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 106 - struct arch_clocksource_data archdata; 107 - #endif 108 106 u64 max_cycles; 109 107 u64 max_raw_delta; 110 108 const char *name;
+19 -20
include/linux/time_namespace.h
··· 8 8 #include <linux/ns_common.h> 9 9 #include <linux/err.h> 10 10 #include <linux/time64.h> 11 + #include <linux/cleanup.h> 11 12 12 13 struct user_namespace; 13 14 extern struct user_namespace init_user_ns; ··· 26 25 struct ucounts *ucounts; 27 26 struct ns_common ns; 28 27 struct timens_offsets offsets; 28 + #ifdef CONFIG_TIME_NS_VDSO 29 29 struct page *vvar_page; 30 + #endif 30 31 /* If set prevents changing offsets after any task joined namespace. */ 31 32 bool frozen_offsets; 32 33 } __randomize_layout; ··· 41 38 return container_of(ns, struct time_namespace, ns); 42 39 } 43 40 void __init time_ns_init(void); 44 - extern int vdso_join_timens(struct task_struct *task, 45 - struct time_namespace *ns); 46 - extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns); 47 41 48 42 static inline struct time_namespace *get_time_ns(struct time_namespace *ns) 49 43 { ··· 53 53 struct time_namespace *old_ns); 54 54 void free_time_ns(struct time_namespace *ns); 55 55 void timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk); 56 - struct page *find_timens_vvar_page(struct vm_area_struct *vma); 57 56 58 57 static inline void put_time_ns(struct time_namespace *ns) 59 58 { ··· 116 117 { 117 118 } 118 119 119 - static inline int vdso_join_timens(struct task_struct *task, 120 - struct time_namespace *ns) 121 - { 122 - return 0; 123 - } 124 - 125 - static inline void timens_commit(struct task_struct *tsk, 126 - struct time_namespace *ns) 127 - { 128 - } 129 - 130 120 static inline struct time_namespace *get_time_ns(struct time_namespace *ns) 131 121 { 132 122 return NULL; ··· 142 154 return; 143 155 } 144 156 145 - static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma) 146 - { 147 - return NULL; 148 - } 149 - 150 157 static inline void timens_add_monotonic(struct timespec64 *ts) { } 151 158 static inline void timens_add_boottime(struct timespec64 *ts) { } 152 159 ··· 157 174 return tim; 158 175 } 159 176 #endif 177 + 178 + #ifdef CONFIG_TIME_NS_VDSO 179 + extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns); 180 + struct page *find_timens_vvar_page(struct vm_area_struct *vma); 181 + #else /* !CONFIG_TIME_NS_VDSO */ 182 + static inline void timens_commit(struct task_struct *tsk, struct time_namespace *ns) 183 + { 184 + } 185 + 186 + static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma) 187 + { 188 + return NULL; 189 + } 190 + #endif /* CONFIG_TIME_NS_VDSO */ 191 + 192 + DEFINE_FREE(time_ns, struct time_namespace *, if (_T) put_time_ns(_T)) 160 193 161 194 #endif /* _LINUX_TIMENS_H */
+6
include/linux/vdso_datastore.h
··· 2 2 #ifndef _LINUX_VDSO_DATASTORE_H 3 3 #define _LINUX_VDSO_DATASTORE_H 4 4 5 + #ifdef CONFIG_HAVE_GENERIC_VDSO 5 6 #include <linux/mm_types.h> 6 7 7 8 extern const struct vm_special_mapping vdso_vvar_mapping; 8 9 struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr); 10 + 11 + void __init vdso_setup_data_pages(void); 12 + #else /* !CONFIG_HAVE_GENERIC_VDSO */ 13 + static inline void vdso_setup_data_pages(void) { } 14 + #endif /* CONFIG_HAVE_GENERIC_VDSO */ 9 15 10 16 #endif /* _LINUX_VDSO_DATASTORE_H */
+4 -23
include/vdso/datapage.h
··· 4 4 5 5 #ifndef __ASSEMBLY__ 6 6 7 - #include <linux/compiler.h> 7 + #include <linux/types.h> 8 + 8 9 #include <uapi/linux/bits.h> 9 10 #include <uapi/linux/time.h> 10 - #include <uapi/linux/types.h> 11 - #include <uapi/asm-generic/errno-base.h> 12 11 13 12 #include <vdso/align.h> 14 13 #include <vdso/bits.h> 15 14 #include <vdso/cache.h> 16 - #include <vdso/clocksource.h> 17 - #include <vdso/ktime.h> 18 - #include <vdso/limits.h> 19 - #include <vdso/math64.h> 20 15 #include <vdso/page.h> 21 - #include <vdso/processor.h> 22 16 #include <vdso/time.h> 23 - #include <vdso/time32.h> 24 - #include <vdso/time64.h> 25 17 26 18 #ifdef CONFIG_ARCH_HAS_VDSO_TIME_DATA 27 19 #include <asm/vdso/time_data.h> ··· 72 80 * @mask: clocksource mask 73 81 * @mult: clocksource multiplier 74 82 * @shift: clocksource shift 75 - * @basetime[clock_id]: basetime per clock_id 76 - * @offset[clock_id]: time namespace offset per clock_id 83 + * @basetime: basetime per clock_id 84 + * @offset: time namespace offset per clock_id 77 85 * 78 86 * See also struct vdso_time_data for basic access and ordering information as 79 87 * struct vdso_clock is used there. ··· 175 183 VDSO_ARCH_PAGES_END = VDSO_ARCH_PAGES_START + VDSO_ARCH_DATA_PAGES - 1, 176 184 VDSO_NR_PAGES 177 185 }; 178 - 179 - /* 180 - * The generic vDSO implementation requires that gettimeofday.h 181 - * provides: 182 - * - __arch_get_hw_counter(): to get the hw counter based on the 183 - * clock_mode. 184 - * - gettimeofday_fallback(): fallback for gettimeofday. 185 - * - clock_gettime_fallback(): fallback for clock_gettime. 186 - * - clock_getres_fallback(): fallback for clock_getres. 187 - */ 188 - #include <asm/vdso/gettimeofday.h> 189 186 190 187 #else /* !__ASSEMBLY__ */ 191 188
+30 -1
include/vdso/helpers.h
··· 6 6 7 7 #include <asm/barrier.h> 8 8 #include <vdso/datapage.h> 9 + #include <vdso/processor.h> 10 + #include <vdso/clocksource.h> 11 + 12 + static __always_inline bool vdso_is_timens_clock(const struct vdso_clock *vc) 13 + { 14 + return IS_ENABLED(CONFIG_TIME_NS) && vc->clock_mode == VDSO_CLOCKMODE_TIMENS; 15 + } 9 16 10 17 static __always_inline u32 vdso_read_begin(const struct vdso_clock *vc) 11 18 { ··· 25 18 return seq; 26 19 } 27 20 21 + /* 22 + * Variant of vdso_read_begin() to handle VDSO_CLOCKMODE_TIMENS. 23 + * 24 + * Time namespace enabled tasks have a special VVAR page installed which has 25 + * vc->seq set to 1 and vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non 26 + * time namespace affected tasks this does not affect performance because if 27 + * vc->seq is odd, i.e. a concurrent update is in progress the extra check for 28 + * vc->clock_mode is just a few extra instructions while spin waiting for 29 + * vc->seq to become even again. 30 + */ 31 + static __always_inline bool vdso_read_begin_timens(const struct vdso_clock *vc, u32 *seq) 32 + { 33 + while (unlikely((*seq = READ_ONCE(vc->seq)) & 1)) { 34 + if (vdso_is_timens_clock(vc)) 35 + return true; 36 + cpu_relax(); 37 + } 38 + smp_rmb(); 39 + 40 + return false; 41 + } 42 + 28 43 static __always_inline u32 vdso_read_retry(const struct vdso_clock *vc, 29 44 u32 start) 30 45 { ··· 54 25 55 26 smp_rmb(); 56 27 seq = READ_ONCE(vc->seq); 57 - return seq != start; 28 + return unlikely(seq != start); 58 29 } 59 30 60 31 static __always_inline void vdso_write_seq_begin(struct vdso_clock *vc)
+3 -1
init/Kconfig
··· 1400 1400 1401 1401 config TIME_NS 1402 1402 bool "TIME namespace" 1403 - depends on GENERIC_GETTIMEOFDAY 1404 1403 default y 1405 1404 help 1406 1405 In this namespace boottime and monotonic clocks can be set. 1407 1406 The time will keep going with the same pace. 1407 + 1408 + config TIME_NS_VDSO 1409 + def_bool TIME_NS && GENERIC_GETTIMEOFDAY 1408 1410 1409 1411 config IPC_NS 1410 1412 bool "IPC namespace"
+2
init/main.c
··· 106 106 #include <linux/ptdump.h> 107 107 #include <linux/time_namespace.h> 108 108 #include <linux/unaligned.h> 109 + #include <linux/vdso_datastore.h> 109 110 #include <net/net_namespace.h> 110 111 111 112 #include <asm/io.h> ··· 1128 1127 srcu_init(); 1129 1128 hrtimers_init(); 1130 1129 softirq_init(); 1130 + vdso_setup_data_pages(); 1131 1131 timekeeping_init(); 1132 1132 time_init(); 1133 1133
-4
kernel/time/Kconfig
··· 9 9 config CLOCKSOURCE_WATCHDOG 10 10 bool 11 11 12 - # Architecture has extra clocksource data 13 - config ARCH_CLOCKSOURCE_DATA 14 - bool 15 - 16 12 # Architecture has extra clocksource init called from registration 17 13 config ARCH_CLOCKSOURCE_INIT 18 14 bool
+1
kernel/time/Makefile
··· 30 30 obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o 31 31 obj-$(CONFIG_TEST_UDELAY) += test_udelay.o 32 32 obj-$(CONFIG_TIME_NS) += namespace.o 33 + obj-$(CONFIG_TIME_NS_VDSO) += namespace_vdso.o 33 34 obj-$(CONFIG_TEST_CLOCKSOURCE_WATCHDOG) += clocksource-wdtest.o 34 35 obj-$(CONFIG_TIME_KUNIT_TEST) += time_test.o
+36 -167
kernel/time/namespace.c
··· 18 18 #include <linux/cred.h> 19 19 #include <linux/err.h> 20 20 #include <linux/mm.h> 21 + #include <linux/cleanup.h> 21 22 22 - #include <vdso/datapage.h> 23 + #include "namespace_internal.h" 23 24 24 25 ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim, 25 26 struct timens_offsets *ns_offsets) ··· 94 93 if (!ns) 95 94 goto fail_dec; 96 95 97 - ns->vvar_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 98 - if (!ns->vvar_page) 96 + err = timens_vdso_alloc_vvar_page(ns); 97 + if (err) 99 98 goto fail_free; 100 99 101 100 err = ns_common_init(ns); ··· 110 109 return ns; 111 110 112 111 fail_free_page: 113 - __free_page(ns->vvar_page); 112 + timens_vdso_free_vvar_page(ns); 114 113 fail_free: 115 114 kfree(ns); 116 115 fail_dec: ··· 139 138 return clone_time_ns(user_ns, old_ns); 140 139 } 141 140 142 - static struct timens_offset offset_from_ts(struct timespec64 off) 143 - { 144 - struct timens_offset ret; 145 - 146 - ret.sec = off.tv_sec; 147 - ret.nsec = off.tv_nsec; 148 - 149 - return ret; 150 - } 151 - 152 - /* 153 - * A time namespace VVAR page has the same layout as the VVAR page which 154 - * contains the system wide VDSO data. 155 - * 156 - * For a normal task the VVAR pages are installed in the normal ordering: 157 - * VVAR 158 - * PVCLOCK 159 - * HVCLOCK 160 - * TIMENS <- Not really required 161 - * 162 - * Now for a timens task the pages are installed in the following order: 163 - * TIMENS 164 - * PVCLOCK 165 - * HVCLOCK 166 - * VVAR 167 - * 168 - * The check for vdso_clock->clock_mode is in the unlikely path of 169 - * the seq begin magic. So for the non-timens case most of the time 170 - * 'seq' is even, so the branch is not taken. 171 - * 172 - * If 'seq' is odd, i.e. a concurrent update is in progress, the extra check 173 - * for vdso_clock->clock_mode is a non-issue. The task is spin waiting for the 174 - * update to finish and for 'seq' to become even anyway. 175 - * 176 - * Timens page has vdso_clock->clock_mode set to VDSO_CLOCKMODE_TIMENS which 177 - * enforces the time namespace handling path. 178 - */ 179 - static void timens_setup_vdso_clock_data(struct vdso_clock *vc, 180 - struct time_namespace *ns) 181 - { 182 - struct timens_offset *offset = vc->offset; 183 - struct timens_offset monotonic = offset_from_ts(ns->offsets.monotonic); 184 - struct timens_offset boottime = offset_from_ts(ns->offsets.boottime); 185 - 186 - vc->seq = 1; 187 - vc->clock_mode = VDSO_CLOCKMODE_TIMENS; 188 - offset[CLOCK_MONOTONIC] = monotonic; 189 - offset[CLOCK_MONOTONIC_RAW] = monotonic; 190 - offset[CLOCK_MONOTONIC_COARSE] = monotonic; 191 - offset[CLOCK_BOOTTIME] = boottime; 192 - offset[CLOCK_BOOTTIME_ALARM] = boottime; 193 - } 194 - 195 - struct page *find_timens_vvar_page(struct vm_area_struct *vma) 196 - { 197 - if (likely(vma->vm_mm == current->mm)) 198 - return current->nsproxy->time_ns->vvar_page; 199 - 200 - /* 201 - * VM_PFNMAP | VM_IO protect .fault() handler from being called 202 - * through interfaces like /proc/$pid/mem or 203 - * process_vm_{readv,writev}() as long as there's no .access() 204 - * in special_mapping_vmops(). 205 - * For more details check_vma_flags() and __access_remote_vm() 206 - */ 207 - 208 - WARN(1, "vvar_page accessed remotely"); 209 - 210 - return NULL; 211 - } 212 - 213 - /* 214 - * Protects possibly multiple offsets writers racing each other 215 - * and tasks entering the namespace. 216 - */ 217 - static DEFINE_MUTEX(offset_lock); 218 - 219 - static void timens_set_vvar_page(struct task_struct *task, 220 - struct time_namespace *ns) 221 - { 222 - struct vdso_time_data *vdata; 223 - struct vdso_clock *vc; 224 - unsigned int i; 225 - 226 - if (ns == &init_time_ns) 227 - return; 228 - 229 - /* Fast-path, taken by every task in namespace except the first. */ 230 - if (likely(ns->frozen_offsets)) 231 - return; 232 - 233 - mutex_lock(&offset_lock); 234 - /* Nothing to-do: vvar_page has been already initialized. */ 235 - if (ns->frozen_offsets) 236 - goto out; 237 - 238 - ns->frozen_offsets = true; 239 - vdata = page_address(ns->vvar_page); 240 - vc = vdata->clock_data; 241 - 242 - for (i = 0; i < CS_BASES; i++) 243 - timens_setup_vdso_clock_data(&vc[i], ns); 244 - 245 - if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS)) { 246 - for (i = 0; i < ARRAY_SIZE(vdata->aux_clock_data); i++) 247 - timens_setup_vdso_clock_data(&vdata->aux_clock_data[i], ns); 248 - } 249 - 250 - out: 251 - mutex_unlock(&offset_lock); 252 - } 141 + DEFINE_MUTEX(timens_offset_lock); 253 142 254 143 void free_time_ns(struct time_namespace *ns) 255 144 { ··· 147 256 dec_time_namespaces(ns->ucounts); 148 257 put_user_ns(ns->user_ns); 149 258 ns_common_free(ns); 150 - __free_page(ns->vvar_page); 259 + timens_vdso_free_vvar_page(ns); 151 260 /* Concurrent nstree traversal depends on a grace period. */ 152 261 kfree_rcu(ns, ns.ns_rcu); 153 262 } 154 263 155 264 static struct ns_common *timens_get(struct task_struct *task) 156 265 { 157 - struct time_namespace *ns = NULL; 266 + struct time_namespace *ns; 158 267 struct nsproxy *nsproxy; 159 268 160 - task_lock(task); 269 + guard(task_lock)(task); 161 270 nsproxy = task->nsproxy; 162 - if (nsproxy) { 163 - ns = nsproxy->time_ns; 164 - get_time_ns(ns); 165 - } 166 - task_unlock(task); 271 + if (!nsproxy) 272 + return NULL; 167 273 168 - return ns ? &ns->ns : NULL; 274 + ns = nsproxy->time_ns; 275 + get_time_ns(ns); 276 + return &ns->ns; 169 277 } 170 278 171 279 static struct ns_common *timens_for_children_get(struct task_struct *task) 172 280 { 173 - struct time_namespace *ns = NULL; 281 + struct time_namespace *ns; 174 282 struct nsproxy *nsproxy; 175 283 176 - task_lock(task); 284 + guard(task_lock)(task); 177 285 nsproxy = task->nsproxy; 178 - if (nsproxy) { 179 - ns = nsproxy->time_ns_for_children; 180 - get_time_ns(ns); 181 - } 182 - task_unlock(task); 286 + if (!nsproxy) 287 + return NULL; 183 288 184 - return ns ? &ns->ns : NULL; 289 + ns = nsproxy->time_ns_for_children; 290 + get_time_ns(ns); 291 + return &ns->ns; 185 292 } 186 293 187 294 static void timens_put(struct ns_common *ns) 188 295 { 189 296 put_time_ns(to_time_ns(ns)); 190 - } 191 - 192 - void timens_commit(struct task_struct *tsk, struct time_namespace *ns) 193 - { 194 - timens_set_vvar_page(tsk, ns); 195 - vdso_join_timens(tsk, ns); 196 297 } 197 298 198 299 static int timens_install(struct nsset *nsset, struct ns_common *new) ··· 250 367 251 368 void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m) 252 369 { 253 - struct ns_common *ns; 254 - struct time_namespace *time_ns; 370 + struct time_namespace *time_ns __free(time_ns) = NULL; 371 + struct ns_common *ns = timens_for_children_get(p); 255 372 256 - ns = timens_for_children_get(p); 257 373 if (!ns) 258 374 return; 375 + 259 376 time_ns = to_time_ns(ns); 260 377 261 378 show_offset(m, CLOCK_MONOTONIC, &time_ns->offsets.monotonic); 262 379 show_offset(m, CLOCK_BOOTTIME, &time_ns->offsets.boottime); 263 - put_time_ns(time_ns); 264 380 } 265 381 266 382 int proc_timens_set_offset(struct file *file, struct task_struct *p, 267 383 struct proc_timens_offset *offsets, int noffsets) 268 384 { 269 - struct ns_common *ns; 270 - struct time_namespace *time_ns; 385 + struct time_namespace *time_ns __free(time_ns) = NULL; 386 + struct ns_common *ns = timens_for_children_get(p); 271 387 struct timespec64 tp; 272 - int i, err; 388 + int i; 273 389 274 - ns = timens_for_children_get(p); 275 390 if (!ns) 276 391 return -ESRCH; 392 + 277 393 time_ns = to_time_ns(ns); 278 394 279 - if (!file_ns_capable(file, time_ns->user_ns, CAP_SYS_TIME)) { 280 - put_time_ns(time_ns); 395 + if (!file_ns_capable(file, time_ns->user_ns, CAP_SYS_TIME)) 281 396 return -EPERM; 282 - } 283 397 284 398 for (i = 0; i < noffsets; i++) { 285 399 struct proc_timens_offset *off = &offsets[i]; ··· 289 409 ktime_get_boottime_ts64(&tp); 290 410 break; 291 411 default: 292 - err = -EINVAL; 293 - goto out; 412 + return -EINVAL; 294 413 } 295 - 296 - err = -ERANGE; 297 414 298 415 if (off->val.tv_sec > KTIME_SEC_MAX || 299 416 off->val.tv_sec < -KTIME_SEC_MAX) 300 - goto out; 417 + return -ERANGE; 301 418 302 419 tp = timespec64_add(tp, off->val); 303 420 /* ··· 302 425 * still unreachable. 303 426 */ 304 427 if (tp.tv_sec < 0 || tp.tv_sec > KTIME_SEC_MAX / 2) 305 - goto out; 428 + return -ERANGE; 306 429 } 307 430 308 - mutex_lock(&offset_lock); 309 - if (time_ns->frozen_offsets) { 310 - err = -EACCES; 311 - goto out_unlock; 312 - } 431 + guard(mutex)(&timens_offset_lock); 432 + if (time_ns->frozen_offsets) 433 + return -EACCES; 313 434 314 - err = 0; 315 435 /* Don't report errors after this line */ 316 436 for (i = 0; i < noffsets; i++) { 317 437 struct proc_timens_offset *off = &offsets[i]; ··· 326 452 *offset = off->val; 327 453 } 328 454 329 - out_unlock: 330 - mutex_unlock(&offset_lock); 331 - out: 332 - put_time_ns(time_ns); 333 - 334 - return err; 455 + return 0; 335 456 } 336 457 337 458 const struct proc_ns_operations timens_operations = {
+28
kernel/time/namespace_internal.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _TIME_NAMESPACE_INTERNAL_H 3 + #define _TIME_NAMESPACE_INTERNAL_H 4 + 5 + #include <linux/mutex.h> 6 + 7 + struct time_namespace; 8 + 9 + /* 10 + * Protects possibly multiple offsets writers racing each other 11 + * and tasks entering the namespace. 12 + */ 13 + extern struct mutex timens_offset_lock; 14 + 15 + #ifdef CONFIG_TIME_NS_VDSO 16 + int timens_vdso_alloc_vvar_page(struct time_namespace *ns); 17 + void timens_vdso_free_vvar_page(struct time_namespace *ns); 18 + #else /* !CONFIG_TIME_NS_VDSO */ 19 + static inline int timens_vdso_alloc_vvar_page(struct time_namespace *ns) 20 + { 21 + return 0; 22 + } 23 + static inline void timens_vdso_free_vvar_page(struct time_namespace *ns) 24 + { 25 + } 26 + #endif /* CONFIG_TIME_NS_VDSO */ 27 + 28 + #endif /* _TIME_NAMESPACE_INTERNAL_H */
+160
kernel/time/namespace_vdso.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Author: Andrei Vagin <avagin@openvz.org> 4 + * Author: Dmitry Safonov <dima@arista.com> 5 + */ 6 + 7 + #include <linux/cleanup.h> 8 + #include <linux/mm.h> 9 + #include <linux/time_namespace.h> 10 + #include <linux/time.h> 11 + #include <linux/vdso_datastore.h> 12 + 13 + #include <vdso/clocksource.h> 14 + #include <vdso/datapage.h> 15 + 16 + #include "namespace_internal.h" 17 + 18 + static struct timens_offset offset_from_ts(struct timespec64 off) 19 + { 20 + struct timens_offset ret; 21 + 22 + ret.sec = off.tv_sec; 23 + ret.nsec = off.tv_nsec; 24 + 25 + return ret; 26 + } 27 + 28 + /* 29 + * A time namespace VVAR page has the same layout as the VVAR page which 30 + * contains the system wide VDSO data. 31 + * 32 + * For a normal task the VVAR pages are installed in the normal ordering: 33 + * VVAR 34 + * PVCLOCK 35 + * HVCLOCK 36 + * TIMENS <- Not really required 37 + * 38 + * Now for a timens task the pages are installed in the following order: 39 + * TIMENS 40 + * PVCLOCK 41 + * HVCLOCK 42 + * VVAR 43 + * 44 + * The check for vdso_clock->clock_mode is in the unlikely path of 45 + * the seq begin magic. So for the non-timens case most of the time 46 + * 'seq' is even, so the branch is not taken. 47 + * 48 + * If 'seq' is odd, i.e. a concurrent update is in progress, the extra check 49 + * for vdso_clock->clock_mode is a non-issue. The task is spin waiting for the 50 + * update to finish and for 'seq' to become even anyway. 51 + * 52 + * Timens page has vdso_clock->clock_mode set to VDSO_CLOCKMODE_TIMENS which 53 + * enforces the time namespace handling path. 54 + */ 55 + static void timens_setup_vdso_clock_data(struct vdso_clock *vc, 56 + struct time_namespace *ns) 57 + { 58 + struct timens_offset *offset = vc->offset; 59 + struct timens_offset monotonic = offset_from_ts(ns->offsets.monotonic); 60 + struct timens_offset boottime = offset_from_ts(ns->offsets.boottime); 61 + 62 + vc->seq = 1; 63 + vc->clock_mode = VDSO_CLOCKMODE_TIMENS; 64 + offset[CLOCK_MONOTONIC] = monotonic; 65 + offset[CLOCK_MONOTONIC_RAW] = monotonic; 66 + offset[CLOCK_MONOTONIC_COARSE] = monotonic; 67 + offset[CLOCK_BOOTTIME] = boottime; 68 + offset[CLOCK_BOOTTIME_ALARM] = boottime; 69 + } 70 + 71 + struct page *find_timens_vvar_page(struct vm_area_struct *vma) 72 + { 73 + if (likely(vma->vm_mm == current->mm)) 74 + return current->nsproxy->time_ns->vvar_page; 75 + 76 + /* 77 + * VM_PFNMAP | VM_IO protect .fault() handler from being called 78 + * through interfaces like /proc/$pid/mem or 79 + * process_vm_{readv,writev}() as long as there's no .access() 80 + * in special_mapping_vmops(). 81 + * For more details check_vma_flags() and __access_remote_vm() 82 + */ 83 + 84 + WARN(1, "vvar_page accessed remotely"); 85 + 86 + return NULL; 87 + } 88 + 89 + static void timens_set_vvar_page(struct task_struct *task, 90 + struct time_namespace *ns) 91 + { 92 + struct vdso_time_data *vdata; 93 + struct vdso_clock *vc; 94 + unsigned int i; 95 + 96 + if (ns == &init_time_ns) 97 + return; 98 + 99 + /* Fast-path, taken by every task in namespace except the first. */ 100 + if (likely(ns->frozen_offsets)) 101 + return; 102 + 103 + guard(mutex)(&timens_offset_lock); 104 + /* Nothing to-do: vvar_page has been already initialized. */ 105 + if (ns->frozen_offsets) 106 + return; 107 + 108 + ns->frozen_offsets = true; 109 + vdata = page_address(ns->vvar_page); 110 + vc = vdata->clock_data; 111 + 112 + for (i = 0; i < CS_BASES; i++) 113 + timens_setup_vdso_clock_data(&vc[i], ns); 114 + 115 + if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS)) { 116 + for (i = 0; i < ARRAY_SIZE(vdata->aux_clock_data); i++) 117 + timens_setup_vdso_clock_data(&vdata->aux_clock_data[i], ns); 118 + } 119 + } 120 + 121 + /* 122 + * The vvar page layout depends on whether a task belongs to the root or 123 + * non-root time namespace. Whenever a task changes its namespace, the VVAR 124 + * page tables are cleared and then they will be re-faulted with a 125 + * corresponding layout. 126 + * See also the comment near timens_setup_vdso_clock_data() for details. 127 + */ 128 + static int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 129 + { 130 + struct mm_struct *mm = task->mm; 131 + struct vm_area_struct *vma; 132 + VMA_ITERATOR(vmi, mm, 0); 133 + 134 + guard(mmap_read_lock)(mm); 135 + for_each_vma(vmi, vma) { 136 + if (vma_is_special_mapping(vma, &vdso_vvar_mapping)) 137 + zap_vma_pages(vma); 138 + } 139 + return 0; 140 + } 141 + 142 + void timens_commit(struct task_struct *tsk, struct time_namespace *ns) 143 + { 144 + timens_set_vvar_page(tsk, ns); 145 + vdso_join_timens(tsk, ns); 146 + } 147 + 148 + int timens_vdso_alloc_vvar_page(struct time_namespace *ns) 149 + { 150 + ns->vvar_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); 151 + if (!ns->vvar_page) 152 + return -ENOMEM; 153 + 154 + return 0; 155 + } 156 + 157 + void timens_vdso_free_vvar_page(struct time_namespace *ns) 158 + { 159 + __free_page(ns->vvar_page); 160 + }
+63 -59
lib/vdso/datastore.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 3 - #include <linux/linkage.h> 4 - #include <linux/mmap_lock.h> 3 + #include <linux/gfp.h> 4 + #include <linux/init.h> 5 5 #include <linux/mm.h> 6 6 #include <linux/time_namespace.h> 7 7 #include <linux/types.h> 8 8 #include <linux/vdso_datastore.h> 9 9 #include <vdso/datapage.h> 10 10 11 - /* 12 - * The vDSO data page. 13 - */ 11 + static u8 vdso_initdata[VDSO_NR_PAGES * PAGE_SIZE] __aligned(PAGE_SIZE) __initdata = {}; 12 + 14 13 #ifdef CONFIG_GENERIC_GETTIMEOFDAY 15 - static union { 16 - struct vdso_time_data data; 17 - u8 page[PAGE_SIZE]; 18 - } vdso_time_data_store __page_aligned_data; 19 - struct vdso_time_data *vdso_k_time_data = &vdso_time_data_store.data; 20 - static_assert(sizeof(vdso_time_data_store) == PAGE_SIZE); 14 + struct vdso_time_data *vdso_k_time_data __refdata = 15 + (void *)&vdso_initdata[VDSO_TIME_PAGE_OFFSET * PAGE_SIZE]; 16 + 17 + static_assert(sizeof(struct vdso_time_data) <= PAGE_SIZE); 21 18 #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ 22 19 23 20 #ifdef CONFIG_VDSO_GETRANDOM 24 - static union { 25 - struct vdso_rng_data data; 26 - u8 page[PAGE_SIZE]; 27 - } vdso_rng_data_store __page_aligned_data; 28 - struct vdso_rng_data *vdso_k_rng_data = &vdso_rng_data_store.data; 29 - static_assert(sizeof(vdso_rng_data_store) == PAGE_SIZE); 21 + struct vdso_rng_data *vdso_k_rng_data __refdata = 22 + (void *)&vdso_initdata[VDSO_RNG_PAGE_OFFSET * PAGE_SIZE]; 23 + 24 + static_assert(sizeof(struct vdso_rng_data) <= PAGE_SIZE); 30 25 #endif /* CONFIG_VDSO_GETRANDOM */ 31 26 32 27 #ifdef CONFIG_ARCH_HAS_VDSO_ARCH_DATA 33 - static union { 34 - struct vdso_arch_data data; 35 - u8 page[VDSO_ARCH_DATA_SIZE]; 36 - } vdso_arch_data_store __page_aligned_data; 37 - struct vdso_arch_data *vdso_k_arch_data = &vdso_arch_data_store.data; 28 + struct vdso_arch_data *vdso_k_arch_data __refdata = 29 + (void *)&vdso_initdata[VDSO_ARCH_PAGES_START * PAGE_SIZE]; 38 30 #endif /* CONFIG_ARCH_HAS_VDSO_ARCH_DATA */ 31 + 32 + void __init vdso_setup_data_pages(void) 33 + { 34 + unsigned int order = get_order(VDSO_NR_PAGES * PAGE_SIZE); 35 + struct page *pages; 36 + 37 + /* 38 + * Allocate the data pages dynamically. SPARC does not support mapping 39 + * static pages to be mapped into userspace. 40 + * It is also a requirement for mlockall() support. 41 + * 42 + * Do not use folios. In time namespaces the pages are mapped in a different order 43 + * to userspace, which is not handled by the folio optimizations in finish_fault(). 44 + */ 45 + pages = alloc_pages(GFP_KERNEL, order); 46 + if (!pages) 47 + panic("Unable to allocate VDSO storage pages"); 48 + 49 + /* The pages are mapped one-by-one into userspace and each one needs to be refcounted. */ 50 + split_page(pages, order); 51 + 52 + /* Move the data already written by other subsystems to the new pages */ 53 + memcpy(page_address(pages), vdso_initdata, VDSO_NR_PAGES * PAGE_SIZE); 54 + 55 + if (IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)) 56 + vdso_k_time_data = page_address(pages + VDSO_TIME_PAGE_OFFSET); 57 + 58 + if (IS_ENABLED(CONFIG_VDSO_GETRANDOM)) 59 + vdso_k_rng_data = page_address(pages + VDSO_RNG_PAGE_OFFSET); 60 + 61 + if (IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA)) 62 + vdso_k_arch_data = page_address(pages + VDSO_ARCH_PAGES_START); 63 + } 39 64 40 65 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 41 66 struct vm_area_struct *vma, struct vm_fault *vmf) 42 67 { 43 - struct page *timens_page = find_timens_vvar_page(vma); 44 - unsigned long addr, pfn; 45 - vm_fault_t err; 68 + struct page *page, *timens_page; 69 + 70 + timens_page = find_timens_vvar_page(vma); 46 71 47 72 switch (vmf->pgoff) { 48 73 case VDSO_TIME_PAGE_OFFSET: 49 74 if (!IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)) 50 75 return VM_FAULT_SIGBUS; 51 - pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data)); 76 + page = virt_to_page(vdso_k_time_data); 52 77 if (timens_page) { 53 78 /* 54 79 * Fault in VVAR page too, since it will be accessed 55 80 * to get clock data anyway. 56 81 */ 82 + unsigned long addr; 83 + vm_fault_t err; 84 + 57 85 addr = vmf->address + VDSO_TIMENS_PAGE_OFFSET * PAGE_SIZE; 58 - err = vmf_insert_pfn(vma, addr, pfn); 86 + err = vmf_insert_page(vma, addr, page); 59 87 if (unlikely(err & VM_FAULT_ERROR)) 60 88 return err; 61 - pfn = page_to_pfn(timens_page); 89 + page = timens_page; 62 90 } 63 91 break; 64 92 case VDSO_TIMENS_PAGE_OFFSET: ··· 99 71 */ 100 72 if (!IS_ENABLED(CONFIG_TIME_NS) || !timens_page) 101 73 return VM_FAULT_SIGBUS; 102 - pfn = __phys_to_pfn(__pa_symbol(vdso_k_time_data)); 74 + page = virt_to_page(vdso_k_time_data); 103 75 break; 104 76 case VDSO_RNG_PAGE_OFFSET: 105 77 if (!IS_ENABLED(CONFIG_VDSO_GETRANDOM)) 106 78 return VM_FAULT_SIGBUS; 107 - pfn = __phys_to_pfn(__pa_symbol(vdso_k_rng_data)); 79 + page = virt_to_page(vdso_k_rng_data); 108 80 break; 109 81 case VDSO_ARCH_PAGES_START ... VDSO_ARCH_PAGES_END: 110 82 if (!IS_ENABLED(CONFIG_ARCH_HAS_VDSO_ARCH_DATA)) 111 83 return VM_FAULT_SIGBUS; 112 - pfn = __phys_to_pfn(__pa_symbol(vdso_k_arch_data)) + 113 - vmf->pgoff - VDSO_ARCH_PAGES_START; 84 + page = virt_to_page(vdso_k_arch_data) + vmf->pgoff - VDSO_ARCH_PAGES_START; 114 85 break; 115 86 default: 116 87 return VM_FAULT_SIGBUS; 117 88 } 118 89 119 - return vmf_insert_pfn(vma, vmf->address, pfn); 90 + get_page(page); 91 + vmf->page = page; 92 + return 0; 120 93 } 121 94 122 95 const struct vm_special_mapping vdso_vvar_mapping = { ··· 129 100 { 130 101 return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE, 131 102 VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | 132 - VM_PFNMAP | VM_SEALED_SYSMAP, 103 + VM_MIXEDMAP | VM_SEALED_SYSMAP, 133 104 &vdso_vvar_mapping); 134 105 } 135 - 136 - #ifdef CONFIG_TIME_NS 137 - /* 138 - * The vvar page layout depends on whether a task belongs to the root or 139 - * non-root time namespace. Whenever a task changes its namespace, the VVAR 140 - * page tables are cleared and then they will be re-faulted with a 141 - * corresponding layout. 142 - * See also the comment near timens_setup_vdso_clock_data() for details. 143 - */ 144 - int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 145 - { 146 - struct mm_struct *mm = task->mm; 147 - struct vm_area_struct *vma; 148 - VMA_ITERATOR(vmi, mm, 0); 149 - 150 - mmap_read_lock(mm); 151 - for_each_vma(vmi, vma) { 152 - if (vma_is_special_mapping(vma, &vdso_vvar_mapping)) 153 - zap_vma_pages(vma); 154 - } 155 - mmap_read_unlock(mm); 156 - 157 - return 0; 158 - } 159 - #endif
+3
lib/vdso/getrandom.c
··· 7 7 #include <linux/minmax.h> 8 8 #include <vdso/datapage.h> 9 9 #include <vdso/getrandom.h> 10 + #include <vdso/limits.h> 10 11 #include <vdso/unaligned.h> 12 + #include <asm/barrier.h> 11 13 #include <asm/vdso/getrandom.h> 14 + #include <uapi/linux/errno.h> 12 15 #include <uapi/linux/mman.h> 13 16 #include <uapi/linux/random.h> 14 17
+40 -59
lib/vdso/gettimeofday.c
··· 3 3 * Generic userspace implementations of gettimeofday() and similar. 4 4 */ 5 5 #include <vdso/auxclock.h> 6 + #include <vdso/clocksource.h> 6 7 #include <vdso/datapage.h> 7 8 #include <vdso/helpers.h> 9 + #include <vdso/ktime.h> 10 + #include <vdso/limits.h> 11 + #include <vdso/math64.h> 12 + #include <vdso/time32.h> 13 + #include <vdso/time64.h> 14 + 15 + /* 16 + * The generic vDSO implementation requires that gettimeofday.h 17 + * provides: 18 + * - __arch_get_hw_counter(): to get the hw counter based on the 19 + * clock_mode. 20 + * - gettimeofday_fallback(): fallback for gettimeofday. 21 + * - clock_gettime_fallback(): fallback for clock_gettime. 22 + * - clock_getres_fallback(): fallback for clock_getres. 23 + */ 24 + #include <asm/vdso/gettimeofday.h> 8 25 9 26 /* Bring in default accessors */ 10 27 #include <vdso/vsyscall.h> ··· 152 135 153 136 if (!vdso_get_timestamp(vd, vc, clk, &sec, &ns)) 154 137 return false; 155 - } while (unlikely(vdso_read_retry(vc, seq))); 138 + } while (vdso_read_retry(vc, seq)); 156 139 157 140 /* Add the namespace offset */ 158 141 sec += offs->sec; ··· 175 158 return false; 176 159 177 160 do { 178 - /* 179 - * Open coded function vdso_read_begin() to handle 180 - * VDSO_CLOCKMODE_TIMENS. Time namespace enabled tasks have a 181 - * special VVAR page installed which has vc->seq set to 1 and 182 - * vc->clock_mode set to VDSO_CLOCKMODE_TIMENS. For non time 183 - * namespace affected tasks this does not affect performance 184 - * because if vc->seq is odd, i.e. a concurrent update is in 185 - * progress the extra check for vc->clock_mode is just a few 186 - * extra instructions while spin waiting for vc->seq to become 187 - * even again. 188 - */ 189 - while (unlikely((seq = READ_ONCE(vc->seq)) & 1)) { 190 - if (IS_ENABLED(CONFIG_TIME_NS) && 191 - vc->clock_mode == VDSO_CLOCKMODE_TIMENS) 192 - return do_hres_timens(vd, vc, clk, ts); 193 - cpu_relax(); 194 - } 195 - smp_rmb(); 161 + if (vdso_read_begin_timens(vc, &seq)) 162 + return do_hres_timens(vd, vc, clk, ts); 196 163 197 164 if (!vdso_get_timestamp(vd, vc, clk, &sec, &ns)) 198 165 return false; 199 - } while (unlikely(vdso_read_retry(vc, seq))); 166 + } while (vdso_read_retry(vc, seq)); 200 167 201 168 vdso_set_timespec(ts, sec, ns); 202 169 ··· 205 204 seq = vdso_read_begin(vc); 206 205 sec = vdso_ts->sec; 207 206 nsec = vdso_ts->nsec; 208 - } while (unlikely(vdso_read_retry(vc, seq))); 207 + } while (vdso_read_retry(vc, seq)); 209 208 210 209 /* Add the namespace offset */ 211 210 sec += offs->sec; ··· 224 223 u32 seq; 225 224 226 225 do { 227 - /* 228 - * Open coded function vdso_read_begin() to handle 229 - * VDSO_CLOCK_TIMENS. See comment in do_hres(). 230 - */ 231 - while ((seq = READ_ONCE(vc->seq)) & 1) { 232 - if (IS_ENABLED(CONFIG_TIME_NS) && 233 - vc->clock_mode == VDSO_CLOCKMODE_TIMENS) 234 - return do_coarse_timens(vd, vc, clk, ts); 235 - cpu_relax(); 236 - } 237 - smp_rmb(); 226 + if (vdso_read_begin_timens(vc, &seq)) 227 + return do_coarse_timens(vd, vc, clk, ts); 238 228 239 229 ts->tv_sec = vdso_ts->sec; 240 230 ts->tv_nsec = vdso_ts->nsec; 241 - } while (unlikely(vdso_read_retry(vc, seq))); 231 + } while (vdso_read_retry(vc, seq)); 242 232 243 233 return true; 244 234 } ··· 248 256 vc = &vd->aux_clock_data[idx]; 249 257 250 258 do { 251 - /* 252 - * Open coded function vdso_read_begin() to handle 253 - * VDSO_CLOCK_TIMENS. See comment in do_hres(). 254 - */ 255 - while ((seq = READ_ONCE(vc->seq)) & 1) { 256 - if (IS_ENABLED(CONFIG_TIME_NS) && vc->clock_mode == VDSO_CLOCKMODE_TIMENS) { 257 - vd = __arch_get_vdso_u_timens_data(vd); 258 - vc = &vd->aux_clock_data[idx]; 259 - /* Re-read from the real time data page */ 260 - continue; 261 - } 262 - cpu_relax(); 259 + if (vdso_read_begin_timens(vc, &seq)) { 260 + vd = __arch_get_vdso_u_timens_data(vd); 261 + vc = &vd->aux_clock_data[idx]; 262 + /* Re-read from the real time data page */ 263 + continue; 263 264 } 264 - smp_rmb(); 265 265 266 266 /* Auxclock disabled? */ 267 267 if (vc->clock_mode == VDSO_CLOCKMODE_NONE) ··· 261 277 262 278 if (!vdso_get_timestamp(vd, vc, VDSO_BASE_AUX, &sec, &ns)) 263 279 return false; 264 - } while (unlikely(vdso_read_retry(vc, seq))); 280 + } while (vdso_read_retry(vc, seq)); 265 281 266 282 vdso_set_timespec(ts, sec, ns); 267 283 ··· 297 313 return do_hres(vd, vc, clock, ts); 298 314 } 299 315 300 - static __maybe_unused int 316 + static int 301 317 __cvdso_clock_gettime_data(const struct vdso_time_data *vd, clockid_t clock, 302 318 struct __kernel_timespec *ts) 303 319 { ··· 317 333 } 318 334 319 335 #ifdef BUILD_VDSO32 320 - static __maybe_unused int 336 + static int 321 337 __cvdso_clock_gettime32_data(const struct vdso_time_data *vd, clockid_t clock, 322 338 struct old_timespec32 *res) 323 339 { ··· 343 359 } 344 360 #endif /* BUILD_VDSO32 */ 345 361 346 - static __maybe_unused int 362 + static int 347 363 __cvdso_gettimeofday_data(const struct vdso_time_data *vd, 348 364 struct __kernel_old_timeval *tv, struct timezone *tz) 349 365 { ··· 360 376 } 361 377 362 378 if (unlikely(tz != NULL)) { 363 - if (IS_ENABLED(CONFIG_TIME_NS) && 364 - vc->clock_mode == VDSO_CLOCKMODE_TIMENS) 379 + if (vdso_is_timens_clock(vc)) 365 380 vd = __arch_get_vdso_u_timens_data(vd); 366 381 367 382 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest; ··· 377 394 } 378 395 379 396 #ifdef VDSO_HAS_TIME 380 - static __maybe_unused __kernel_old_time_t 397 + static __kernel_old_time_t 381 398 __cvdso_time_data(const struct vdso_time_data *vd, __kernel_old_time_t *time) 382 399 { 383 400 const struct vdso_clock *vc = vd->clock_data; 384 401 __kernel_old_time_t t; 385 402 386 - if (IS_ENABLED(CONFIG_TIME_NS) && 387 - vc->clock_mode == VDSO_CLOCKMODE_TIMENS) { 403 + if (vdso_is_timens_clock(vc)) { 388 404 vd = __arch_get_vdso_u_timens_data(vd); 389 405 vc = vd->clock_data; 390 406 } ··· 414 432 if (!vdso_clockid_valid(clock)) 415 433 return false; 416 434 417 - if (IS_ENABLED(CONFIG_TIME_NS) && 418 - vc->clock_mode == VDSO_CLOCKMODE_TIMENS) 435 + if (vdso_is_timens_clock(vc)) 419 436 vd = __arch_get_vdso_u_timens_data(vd); 420 437 421 438 /* ··· 445 464 return true; 446 465 } 447 466 448 - static __maybe_unused 467 + static 449 468 int __cvdso_clock_getres_data(const struct vdso_time_data *vd, clockid_t clock, 450 469 struct __kernel_timespec *res) 451 470 { ··· 465 484 } 466 485 467 486 #ifdef BUILD_VDSO32 468 - static __maybe_unused int 487 + static int 469 488 __cvdso_clock_getres_time32_data(const struct vdso_time_data *vd, clockid_t clock, 470 489 struct old_timespec32 *res) 471 490 {
+1 -5
tools/testing/selftests/vDSO/Makefile
··· 19 19 20 20 include ../lib.mk 21 21 22 - CFLAGS += $(TOOLS_INCLUDES) 23 - 24 22 CFLAGS_NOLIBC := -nostdlib -nostdinc -ffreestanding -fno-asynchronous-unwind-tables \ 25 23 -fno-stack-protector -include $(top_srcdir)/tools/include/nolibc/nolibc.h \ 26 24 -I$(top_srcdir)/tools/include/nolibc/ $(KHDR_INCLUDES) ··· 26 28 $(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c 27 29 $(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c 28 30 $(OUTPUT)/vdso_test_abi: parse_vdso.c vdso_test_abi.c 31 + $(OUTPUT)/vdso_test_correctness: parse_vdso.c vdso_test_correctness.c 29 32 30 33 $(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c | headers 31 34 $(OUTPUT)/vdso_standalone_test_x86: CFLAGS:=$(CFLAGS_NOLIBC) $(CFLAGS) 32 - 33 - $(OUTPUT)/vdso_test_correctness: vdso_test_correctness.c 34 - $(OUTPUT)/vdso_test_correctness: LDFLAGS += -ldl 35 35 36 36 $(OUTPUT)/vdso_test_getrandom: parse_vdso.c 37 37 $(OUTPUT)/vdso_test_getrandom: CFLAGS += -isystem $(top_srcdir)/tools/include \
+1 -2
tools/testing/selftests/vDSO/parse_vdso.c
··· 19 19 #include <stdint.h> 20 20 #include <string.h> 21 21 #include <limits.h> 22 - #include <linux/auxvec.h> 23 - #include <linux/elf.h> 22 + #include <elf.h> 24 23 25 24 #include "parse_vdso.h" 26 25
+83 -29
tools/testing/selftests/vDSO/vdso_test_correctness.c
··· 11 11 #include <time.h> 12 12 #include <stdlib.h> 13 13 #include <unistd.h> 14 + #include <sys/auxv.h> 14 15 #include <sys/syscall.h> 15 - #include <dlfcn.h> 16 16 #include <string.h> 17 17 #include <errno.h> 18 18 #include <sched.h> 19 19 #include <stdbool.h> 20 20 #include <limits.h> 21 21 22 + #include "parse_vdso.h" 22 23 #include "vdso_config.h" 23 24 #include "vdso_call.h" 24 25 #include "kselftest.h" 25 26 27 + static const char *version; 26 28 static const char **name; 27 - 28 - #ifndef SYS_getcpu 29 - # ifdef __x86_64__ 30 - # define SYS_getcpu 309 31 - # else 32 - # define SYS_getcpu 318 33 - # endif 34 - #endif 35 29 36 30 #ifndef __NR_clock_gettime64 37 31 #define __NR_clock_gettime64 403 ··· 54 60 typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz); 55 61 56 62 vgtod_t vdso_gettimeofday; 63 + 64 + typedef time_t (*vtime_t)(__kernel_time_t *tloc); 65 + 66 + vtime_t vdso_time; 57 67 58 68 typedef long (*getcpu_t)(unsigned *, unsigned *, void *); 59 69 ··· 108 110 109 111 static void fill_function_pointers(void) 110 112 { 111 - void *vdso = dlopen("linux-vdso.so.1", 112 - RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); 113 - if (!vdso) 114 - vdso = dlopen("linux-gate.so.1", 115 - RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); 116 - if (!vdso) 117 - vdso = dlopen("linux-vdso32.so.1", 118 - RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); 119 - if (!vdso) 120 - vdso = dlopen("linux-vdso64.so.1", 121 - RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); 122 - if (!vdso) { 113 + unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR); 114 + 115 + if (!sysinfo_ehdr) { 123 116 printf("[WARN]\tfailed to find vDSO\n"); 124 117 return; 125 118 } 126 119 127 - vdso_getcpu = (getcpu_t)dlsym(vdso, name[4]); 120 + vdso_init_from_sysinfo_ehdr(sysinfo_ehdr); 121 + 122 + vdso_getcpu = (getcpu_t)vdso_sym(version, name[4]); 128 123 if (!vdso_getcpu) 129 124 printf("Warning: failed to find getcpu in vDSO\n"); 130 125 131 126 vgetcpu = (getcpu_t) vsyscall_getcpu(); 132 127 133 - vdso_clock_gettime = (vgettime_t)dlsym(vdso, name[1]); 128 + vdso_clock_gettime = (vgettime_t)vdso_sym(version, name[1]); 134 129 if (!vdso_clock_gettime) 135 130 printf("Warning: failed to find clock_gettime in vDSO\n"); 136 131 137 132 #if defined(VDSO_32BIT) 138 - vdso_clock_gettime64 = (vgettime64_t)dlsym(vdso, name[5]); 133 + vdso_clock_gettime64 = (vgettime64_t)vdso_sym(version, name[5]); 139 134 if (!vdso_clock_gettime64) 140 135 printf("Warning: failed to find clock_gettime64 in vDSO\n"); 141 136 #endif 142 137 143 - vdso_gettimeofday = (vgtod_t)dlsym(vdso, name[0]); 138 + vdso_gettimeofday = (vgtod_t)vdso_sym(version, name[0]); 144 139 if (!vdso_gettimeofday) 145 140 printf("Warning: failed to find gettimeofday in vDSO\n"); 141 + 142 + vdso_time = (vtime_t)vdso_sym(version, name[2]); 143 + if (!vdso_time) 144 + printf("Warning: failed to find time in vDSO\n"); 146 145 147 146 } 148 147 ··· 162 167 static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz) 163 168 { 164 169 return syscall(__NR_gettimeofday, tv, tz); 170 + } 171 + 172 + static inline __kernel_old_time_t sys_time(__kernel_old_time_t *tloc) 173 + { 174 + #ifdef __NR_time 175 + return syscall(__NR_time, tloc); 176 + #else 177 + errno = ENOSYS; 178 + return -1; 179 + #endif 165 180 } 166 181 167 182 static void test_getcpu(void) ··· 417 412 return; 418 413 } 419 414 420 - printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n", 421 - (unsigned long long)start.tv_sec, start.tv_usec, 422 - (unsigned long long)vdso.tv_sec, vdso.tv_usec, 423 - (unsigned long long)end.tv_sec, end.tv_usec); 415 + printf("\t%llu.%06lld %llu.%06lld %llu.%06lld\n", 416 + (unsigned long long)start.tv_sec, (long long)start.tv_usec, 417 + (unsigned long long)vdso.tv_sec, (long long)vdso.tv_usec, 418 + (unsigned long long)end.tv_sec, (long long)end.tv_usec); 424 419 425 420 if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) { 426 421 printf("[FAIL]\tTimes are out of sequence\n"); ··· 440 435 VDSO_CALL(vdso_gettimeofday, 2, &vdso, NULL); 441 436 } 442 437 438 + static void test_time(void) 439 + { 440 + __kernel_old_time_t start, end, vdso_ret, vdso_param; 441 + 442 + if (!vdso_time) 443 + return; 444 + 445 + printf("[RUN]\tTesting time...\n"); 446 + 447 + if (sys_time(&start) < 0) { 448 + if (errno == -ENOSYS) { 449 + printf("[SKIP]\tNo time() support\n"); 450 + } else { 451 + printf("[FAIL]\tsys_time failed (%d)\n", errno); 452 + nerrs++; 453 + } 454 + return; 455 + } 456 + 457 + vdso_ret = VDSO_CALL(vdso_time, 1, &vdso_param); 458 + end = sys_time(NULL); 459 + 460 + if (vdso_ret < 0 || end < 0) { 461 + printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n", 462 + (int)vdso_ret, errno); 463 + nerrs++; 464 + return; 465 + } 466 + 467 + printf("\t%lld %lld %lld\n", 468 + (long long)start, 469 + (long long)vdso_ret, 470 + (long long)end); 471 + 472 + if (vdso_ret != vdso_param) { 473 + printf("[FAIL]\tinconsistent return values: %lld %lld\n", 474 + (long long)vdso_ret, (long long)vdso_param); 475 + nerrs++; 476 + return; 477 + } 478 + 479 + if (!(start <= vdso_ret) || !(vdso_ret <= end)) { 480 + printf("[FAIL]\tTimes are out of sequence\n"); 481 + nerrs++; 482 + } 483 + } 484 + 443 485 int main(int argc, char **argv) 444 486 { 487 + version = versions[VDSO_VERSION]; 445 488 name = (const char **)&names[VDSO_NAMES]; 446 489 447 490 fill_function_pointers(); ··· 497 444 test_clock_gettime(); 498 445 test_clock_gettime64(); 499 446 test_gettimeofday(); 447 + test_time(); 500 448 501 449 /* 502 450 * Test getcpu() last so that, if something goes wrong setting affinity,
-2
tools/testing/selftests/vDSO/vdso_test_gettimeofday.c
··· 11 11 */ 12 12 13 13 #include <stdio.h> 14 - #ifndef NOLIBC 15 14 #include <sys/auxv.h> 16 15 #include <sys/time.h> 17 - #endif 18 16 19 17 #include "kselftest.h" 20 18 #include "parse_vdso.h"