Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: VDSO support

This patch adds VDSO support for 64-bit applications. The VDSO code is
currently used for sys_rt_sigreturn() and optimised gettimeofday()
(using the user-accessible generic counter).

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>

authored by

Will Deacon and committed by
Catalin Marinas
9031fefd 7992d60d

+865
+41
arch/arm64/include/asm/vdso.h
··· 1 + /* 2 + * Copyright (C) 2012 ARM Limited 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + #ifndef __ASM_VDSO_H 17 + #define __ASM_VDSO_H 18 + 19 + #ifdef __KERNEL__ 20 + 21 + /* 22 + * Default link address for the vDSO. 23 + * Since we randomise the VDSO mapping, there's little point in trying 24 + * to prelink this. 25 + */ 26 + #define VDSO_LBASE 0x0 27 + 28 + #ifndef __ASSEMBLY__ 29 + 30 + #include <generated/vdso-offsets.h> 31 + 32 + #define VDSO_SYMBOL(base, name) \ 33 + ({ \ 34 + (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \ 35 + }) 36 + 37 + #endif /* !__ASSEMBLY__ */ 38 + 39 + #endif /* __KERNEL__ */ 40 + 41 + #endif /* __ASM_VDSO_H */
+43
arch/arm64/include/asm/vdso_datapage.h
··· 1 + /* 2 + * Copyright (C) 2012 ARM Limited 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + #ifndef __ASM_VDSO_DATAPAGE_H 17 + #define __ASM_VDSO_DATAPAGE_H 18 + 19 + #ifdef __KERNEL__ 20 + 21 + #ifndef __ASSEMBLY__ 22 + 23 + struct vdso_data { 24 + __u64 cs_cycle_last; /* Timebase at clocksource init */ 25 + __u64 xtime_clock_sec; /* Kernel time */ 26 + __u64 xtime_clock_nsec; 27 + __u64 xtime_coarse_sec; /* Coarse time */ 28 + __u64 xtime_coarse_nsec; 29 + __u64 wtm_clock_sec; /* Wall to monotonic time */ 30 + __u64 wtm_clock_nsec; 31 + __u32 tb_seq_count; /* Timebase sequence counter */ 32 + __u32 cs_mult; /* Clocksource multiplier */ 33 + __u32 cs_shift; /* Clocksource shift */ 34 + __u32 tz_minuteswest; /* Whacky timezone stuff */ 35 + __u32 tz_dsttime; 36 + __u32 use_syscall; 37 + }; 38 + 39 + #endif /* !__ASSEMBLY__ */ 40 + 41 + #endif /* __KERNEL__ */ 42 + 43 + #endif /* __ASM_VDSO_DATAPAGE_H */
+261
arch/arm64/kernel/vdso.c
··· 1 + /* 2 + * VDSO implementation for AArch64 and vector page setup for AArch32. 3 + * 4 + * Copyright (C) 2012 ARM Limited 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + * 18 + * Author: Will Deacon <will.deacon@arm.com> 19 + */ 20 + 21 + #include <linux/kernel.h> 22 + #include <linux/clocksource.h> 23 + #include <linux/elf.h> 24 + #include <linux/err.h> 25 + #include <linux/errno.h> 26 + #include <linux/gfp.h> 27 + #include <linux/mm.h> 28 + #include <linux/sched.h> 29 + #include <linux/signal.h> 30 + #include <linux/slab.h> 31 + #include <linux/vmalloc.h> 32 + 33 + #include <asm/cacheflush.h> 34 + #include <asm/signal32.h> 35 + #include <asm/vdso.h> 36 + #include <asm/vdso_datapage.h> 37 + 38 + extern char vdso_start, vdso_end; 39 + static unsigned long vdso_pages; 40 + static struct page **vdso_pagelist; 41 + 42 + /* 43 + * The vDSO data page. 44 + */ 45 + static union { 46 + struct vdso_data data; 47 + u8 page[PAGE_SIZE]; 48 + } vdso_data_store __page_aligned_data; 49 + struct vdso_data *vdso_data = &vdso_data_store.data; 50 + 51 + #ifdef CONFIG_COMPAT 52 + /* 53 + * Create and map the vectors page for AArch32 tasks. 54 + */ 55 + static struct page *vectors_page[1]; 56 + 57 + static int alloc_vectors_page(void) 58 + { 59 + extern char __kuser_helper_start[], __kuser_helper_end[]; 60 + int kuser_sz = __kuser_helper_end - __kuser_helper_start; 61 + unsigned long vpage; 62 + 63 + vpage = get_zeroed_page(GFP_ATOMIC); 64 + 65 + if (!vpage) 66 + return -ENOMEM; 67 + 68 + /* kuser helpers */ 69 + memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, 70 + kuser_sz); 71 + 72 + /* sigreturn code */ 73 + memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, 74 + aarch32_sigret_code, sizeof(aarch32_sigret_code)); 75 + 76 + flush_icache_range(vpage, vpage + PAGE_SIZE); 77 + vectors_page[0] = virt_to_page(vpage); 78 + 79 + return 0; 80 + } 81 + arch_initcall(alloc_vectors_page); 82 + 83 + int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) 84 + { 85 + struct mm_struct *mm = current->mm; 86 + unsigned long addr = AARCH32_VECTORS_BASE; 87 + int ret; 88 + 89 + down_write(&mm->mmap_sem); 90 + current->mm->context.vdso = (void *)addr; 91 + 92 + /* Map vectors page at the high address. */ 93 + ret = install_special_mapping(mm, addr, PAGE_SIZE, 94 + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, 95 + vectors_page); 96 + 97 + up_write(&mm->mmap_sem); 98 + 99 + return ret; 100 + } 101 + #endif /* CONFIG_COMPAT */ 102 + 103 + static int __init vdso_init(void) 104 + { 105 + struct page *pg; 106 + char *vbase; 107 + int i, ret = 0; 108 + 109 + vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; 110 + pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", 111 + vdso_pages + 1, vdso_pages, 1L, &vdso_start); 112 + 113 + /* Allocate the vDSO pagelist, plus a page for the data. */ 114 + vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1), 115 + GFP_KERNEL); 116 + if (vdso_pagelist == NULL) { 117 + pr_err("Failed to allocate vDSO pagelist!\n"); 118 + return -ENOMEM; 119 + } 120 + 121 + /* Grab the vDSO code pages. */ 122 + for (i = 0; i < vdso_pages; i++) { 123 + pg = virt_to_page(&vdso_start + i*PAGE_SIZE); 124 + ClearPageReserved(pg); 125 + get_page(pg); 126 + vdso_pagelist[i] = pg; 127 + } 128 + 129 + /* Sanity check the shared object header. */ 130 + vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL); 131 + if (vbase == NULL) { 132 + pr_err("Failed to map vDSO pagelist!\n"); 133 + return -ENOMEM; 134 + } else if (memcmp(vbase, "\177ELF", 4)) { 135 + pr_err("vDSO is not a valid ELF object!\n"); 136 + ret = -EINVAL; 137 + goto unmap; 138 + } 139 + 140 + /* Grab the vDSO data page. */ 141 + pg = virt_to_page(vdso_data); 142 + get_page(pg); 143 + vdso_pagelist[i] = pg; 144 + 145 + unmap: 146 + vunmap(vbase); 147 + return ret; 148 + } 149 + arch_initcall(vdso_init); 150 + 151 + int arch_setup_additional_pages(struct linux_binprm *bprm, 152 + int uses_interp) 153 + { 154 + struct mm_struct *mm = current->mm; 155 + unsigned long vdso_base, vdso_mapping_len; 156 + int ret; 157 + 158 + /* Be sure to map the data page */ 159 + vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; 160 + 161 + down_write(&mm->mmap_sem); 162 + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); 163 + if (IS_ERR_VALUE(vdso_base)) { 164 + ret = vdso_base; 165 + goto up_fail; 166 + } 167 + mm->context.vdso = (void *)vdso_base; 168 + 169 + ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, 170 + VM_READ|VM_EXEC| 171 + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 172 + vdso_pagelist); 173 + if (ret) { 174 + mm->context.vdso = NULL; 175 + goto up_fail; 176 + } 177 + 178 + up_fail: 179 + up_write(&mm->mmap_sem); 180 + 181 + return ret; 182 + } 183 + 184 + const char *arch_vma_name(struct vm_area_struct *vma) 185 + { 186 + /* 187 + * We can re-use the vdso pointer in mm_context_t for identifying 188 + * the vectors page for compat applications. The vDSO will always 189 + * sit above TASK_UNMAPPED_BASE and so we don't need to worry about 190 + * it conflicting with the vectors base. 191 + */ 192 + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { 193 + #ifdef CONFIG_COMPAT 194 + if (vma->vm_start == AARCH32_VECTORS_BASE) 195 + return "[vectors]"; 196 + #endif 197 + return "[vdso]"; 198 + } 199 + 200 + return NULL; 201 + } 202 + 203 + /* 204 + * We define AT_SYSINFO_EHDR, so we need these function stubs to keep 205 + * Linux happy. 206 + */ 207 + int in_gate_area_no_mm(unsigned long addr) 208 + { 209 + return 0; 210 + } 211 + 212 + int in_gate_area(struct mm_struct *mm, unsigned long addr) 213 + { 214 + return 0; 215 + } 216 + 217 + struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 218 + { 219 + return NULL; 220 + } 221 + 222 + /* 223 + * Update the vDSO data page to keep in sync with kernel timekeeping. 224 + */ 225 + void update_vsyscall(struct timespec *ts, struct timespec *wtm, 226 + struct clocksource *clock, u32 mult) 227 + { 228 + struct timespec xtime_coarse; 229 + u32 use_syscall = strcmp(clock->name, "arch_sys_counter"); 230 + 231 + ++vdso_data->tb_seq_count; 232 + smp_wmb(); 233 + 234 + xtime_coarse = __current_kernel_time(); 235 + vdso_data->use_syscall = use_syscall; 236 + vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 237 + vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 238 + 239 + if (!use_syscall) { 240 + vdso_data->cs_cycle_last = clock->cycle_last; 241 + vdso_data->xtime_clock_sec = ts->tv_sec; 242 + vdso_data->xtime_clock_nsec = ts->tv_nsec; 243 + vdso_data->cs_mult = mult; 244 + vdso_data->cs_shift = clock->shift; 245 + vdso_data->wtm_clock_sec = wtm->tv_sec; 246 + vdso_data->wtm_clock_nsec = wtm->tv_nsec; 247 + } 248 + 249 + smp_wmb(); 250 + ++vdso_data->tb_seq_count; 251 + } 252 + 253 + void update_vsyscall_tz(void) 254 + { 255 + ++vdso_data->tb_seq_count; 256 + smp_wmb(); 257 + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; 258 + vdso_data->tz_dsttime = sys_tz.tz_dsttime; 259 + smp_wmb(); 260 + ++vdso_data->tb_seq_count; 261 + }
+2
arch/arm64/kernel/vdso/.gitignore
··· 1 + vdso.lds 2 + vdso-offsets.h
+63
arch/arm64/kernel/vdso/Makefile
··· 1 + # 2 + # Building a vDSO image for AArch64. 3 + # 4 + # Author: Will Deacon <will.deacon@arm.com> 5 + # Heavily based on the vDSO Makefiles for other archs. 6 + # 7 + 8 + obj-vdso := gettimeofday.o note.o sigreturn.o 9 + 10 + # Build rules 11 + targets := $(obj-vdso) vdso.so vdso.so.dbg 12 + obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) 13 + 14 + ccflags-y := -shared -fno-common -fno-builtin 15 + ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ 16 + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 17 + 18 + obj-y += vdso.o 19 + extra-y += vdso.lds vdso-offsets.h 20 + CPPFLAGS_vdso.lds += -P -C -U$(ARCH) 21 + 22 + # Force dependency (incbin is bad) 23 + $(obj)/vdso.o : $(obj)/vdso.so 24 + 25 + # Link rule for the .so file, .lds has to be first 26 + $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) 27 + $(call if_changed,vdsold) 28 + 29 + # Strip rule for the .so file 30 + $(obj)/%.so: OBJCOPYFLAGS := -S 31 + $(obj)/%.so: $(obj)/%.so.dbg FORCE 32 + $(call if_changed,objcopy) 33 + 34 + # Generate VDSO offsets using helper script 35 + gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh 36 + quiet_cmd_vdsosym = VDSOSYM $@ 37 + define cmd_vdsosym 38 + $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \ 39 + cp $@ include/generated/ 40 + endef 41 + 42 + $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE 43 + $(call if_changed,vdsosym) 44 + 45 + # Assembly rules for the .S files 46 + $(obj-vdso): %.o: %.S 47 + $(call if_changed_dep,vdsoas) 48 + 49 + # Actual build commands 50 + quiet_cmd_vdsold = VDSOL $@ 51 + cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ 52 + quiet_cmd_vdsoas = VDSOA $@ 53 + cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< 54 + 55 + # Install commands for the unstripped file 56 + quiet_cmd_vdso_install = INSTALL $@ 57 + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ 58 + 59 + vdso.so: $(obj)/vdso.so.dbg 60 + @mkdir -p $(MODLIB)/vdso 61 + $(call cmd,vdso_install) 62 + 63 + vdso_install: vdso.so
+15
arch/arm64/kernel/vdso/gen_vdso_offsets.sh
··· 1 + #!/bin/sh 2 + 3 + # 4 + # Match symbols in the DSO that look like VDSO_*; produce a header file 5 + # of constant offsets into the shared object. 6 + # 7 + # Doing this inside the Makefile will break the $(filter-out) function, 8 + # causing Kbuild to rebuild the vdso-offsets header file every time. 9 + # 10 + # Author: Will Deacon <will.deacon@arm.com 11 + # 12 + 13 + LC_ALL=C 14 + sed -n -e 's/^00*/0/' -e \ 15 + 's/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
+242
arch/arm64/kernel/vdso/gettimeofday.S
··· 1 + /* 2 + * Userspace implementations of gettimeofday() and friends. 3 + * 4 + * Copyright (C) 2012 ARM Limited 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + * 18 + * Author: Will Deacon <will.deacon@arm.com> 19 + */ 20 + 21 + #include <linux/linkage.h> 22 + #include <asm/asm-offsets.h> 23 + #include <asm/unistd.h> 24 + 25 + #define NSEC_PER_SEC_LO16 0xca00 26 + #define NSEC_PER_SEC_HI16 0x3b9a 27 + 28 + vdso_data .req x6 29 + use_syscall .req w7 30 + seqcnt .req w8 31 + 32 + .macro seqcnt_acquire 33 + 9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT] 34 + tbnz seqcnt, #0, 9999b 35 + dmb ishld 36 + ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL] 37 + .endm 38 + 39 + .macro seqcnt_read, cnt 40 + dmb ishld 41 + ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT] 42 + .endm 43 + 44 + .macro seqcnt_check, cnt, fail 45 + cmp \cnt, seqcnt 46 + b.ne \fail 47 + .endm 48 + 49 + .text 50 + 51 + /* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */ 52 + ENTRY(__kernel_gettimeofday) 53 + .cfi_startproc 54 + mov x2, x30 55 + .cfi_register x30, x2 56 + 57 + /* Acquire the sequence counter and get the timespec. */ 58 + adr vdso_data, _vdso_data 59 + 1: seqcnt_acquire 60 + cbnz use_syscall, 4f 61 + 62 + /* If tv is NULL, skip to the timezone code. */ 63 + cbz x0, 2f 64 + bl __do_get_tspec 65 + seqcnt_check w13, 1b 66 + 67 + /* Convert ns to us. */ 68 + mov x11, #1000 69 + udiv x10, x10, x11 70 + stp x9, x10, [x0, #TVAL_TV_SEC] 71 + 2: 72 + /* If tz is NULL, return 0. */ 73 + cbz x1, 3f 74 + ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] 75 + seqcnt_read w13 76 + seqcnt_check w13, 1b 77 + stp w4, w5, [x1, #TZ_MINWEST] 78 + 3: 79 + mov x0, xzr 80 + ret x2 81 + 4: 82 + /* Syscall fallback. */ 83 + mov x8, #__NR_gettimeofday 84 + svc #0 85 + ret x2 86 + .cfi_endproc 87 + ENDPROC(__kernel_gettimeofday) 88 + 89 + /* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */ 90 + ENTRY(__kernel_clock_gettime) 91 + .cfi_startproc 92 + cmp w0, #CLOCK_REALTIME 93 + ccmp w0, #CLOCK_MONOTONIC, #0x4, ne 94 + b.ne 2f 95 + 96 + mov x2, x30 97 + .cfi_register x30, x2 98 + 99 + /* Get kernel timespec. */ 100 + adr vdso_data, _vdso_data 101 + 1: seqcnt_acquire 102 + cbnz use_syscall, 7f 103 + 104 + bl __do_get_tspec 105 + seqcnt_check w13, 1b 106 + 107 + cmp w0, #CLOCK_MONOTONIC 108 + b.ne 6f 109 + 110 + /* Get wtm timespec. */ 111 + ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] 112 + 113 + /* Check the sequence counter. */ 114 + seqcnt_read w13 115 + seqcnt_check w13, 1b 116 + b 4f 117 + 2: 118 + cmp w0, #CLOCK_REALTIME_COARSE 119 + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne 120 + b.ne 8f 121 + 122 + /* Get coarse timespec. */ 123 + adr vdso_data, _vdso_data 124 + 3: seqcnt_acquire 125 + ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC] 126 + 127 + cmp w0, #CLOCK_MONOTONIC_COARSE 128 + b.ne 6f 129 + 130 + /* Get wtm timespec. */ 131 + ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] 132 + 133 + /* Check the sequence counter. */ 134 + seqcnt_read w13 135 + seqcnt_check w13, 3b 136 + 4: 137 + /* Add on wtm timespec. */ 138 + add x9, x9, x14 139 + add x10, x10, x15 140 + 141 + /* Normalise the new timespec. */ 142 + mov x14, #NSEC_PER_SEC_LO16 143 + movk x14, #NSEC_PER_SEC_HI16, lsl #16 144 + cmp x10, x14 145 + b.lt 5f 146 + sub x10, x10, x14 147 + add x9, x9, #1 148 + 5: 149 + cmp x10, #0 150 + b.ge 6f 151 + add x10, x10, x14 152 + sub x9, x9, #1 153 + 154 + 6: /* Store to the user timespec. */ 155 + stp x9, x10, [x1, #TSPEC_TV_SEC] 156 + mov x0, xzr 157 + ret x2 158 + 7: 159 + mov x30, x2 160 + 8: /* Syscall fallback. */ 161 + mov x8, #__NR_clock_gettime 162 + svc #0 163 + ret 164 + .cfi_endproc 165 + ENDPROC(__kernel_clock_gettime) 166 + 167 + /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ 168 + ENTRY(__kernel_clock_getres) 169 + .cfi_startproc 170 + cbz w1, 3f 171 + 172 + cmp w0, #CLOCK_REALTIME 173 + ccmp w0, #CLOCK_MONOTONIC, #0x4, ne 174 + b.ne 1f 175 + 176 + ldr x2, 5f 177 + b 2f 178 + 1: 179 + cmp w0, #CLOCK_REALTIME_COARSE 180 + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne 181 + b.ne 4f 182 + ldr x2, 6f 183 + 2: 184 + stp xzr, x2, [x1] 185 + 186 + 3: /* res == NULL. */ 187 + mov w0, wzr 188 + ret 189 + 190 + 4: /* Syscall fallback. */ 191 + mov x8, #__NR_clock_getres 192 + svc #0 193 + ret 194 + 5: 195 + .quad CLOCK_REALTIME_RES 196 + 6: 197 + .quad CLOCK_COARSE_RES 198 + .cfi_endproc 199 + ENDPROC(__kernel_clock_getres) 200 + 201 + /* 202 + * Read the current time from the architected counter. 203 + * Expects vdso_data to be initialised. 204 + * Clobbers the temporary registers (x9 - x15). 205 + * Returns: 206 + * - (x9, x10) = (ts->tv_sec, ts->tv_nsec) 207 + * - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec) 208 + * - w13 = vDSO sequence counter 209 + */ 210 + ENTRY(__do_get_tspec) 211 + .cfi_startproc 212 + 213 + /* Read from the vDSO data page. */ 214 + ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] 215 + ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC] 216 + ldp w14, w15, [vdso_data, #VDSO_CS_MULT] 217 + seqcnt_read w13 218 + 219 + /* Read the physical counter. */ 220 + isb 221 + mrs x9, cntpct_el0 222 + 223 + /* Calculate cycle delta and convert to ns. */ 224 + sub x10, x9, x10 225 + /* We can only guarantee 56 bits of precision. */ 226 + movn x9, #0xff0, lsl #48 227 + and x10, x9, x10 228 + mul x10, x10, x14 229 + lsr x10, x10, x15 230 + 231 + /* Use the kernel time to calculate the new timespec. */ 232 + add x10, x12, x10 233 + mov x14, #NSEC_PER_SEC_LO16 234 + movk x14, #NSEC_PER_SEC_HI16, lsl #16 235 + udiv x15, x10, x14 236 + add x9, x15, x11 237 + mul x14, x14, x15 238 + sub x10, x10, x14 239 + 240 + ret 241 + .cfi_endproc 242 + ENDPROC(__do_get_tspec)
+28
arch/arm64/kernel/vdso/note.S
··· 1 + /* 2 + * Copyright (C) 2012 ARM Limited 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + * 16 + * Author: Will Deacon <will.deacon@arm.com> 17 + * 18 + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 19 + * Here we can supply some information useful to userland. 20 + */ 21 + 22 + #include <linux/uts.h> 23 + #include <linux/version.h> 24 + #include <linux/elfnote.h> 25 + 26 + ELFNOTE_START(Linux, 0, "a") 27 + .long LINUX_VERSION_CODE 28 + ELFNOTE_END
+37
arch/arm64/kernel/vdso/sigreturn.S
··· 1 + /* 2 + * Sigreturn trampoline for returning from a signal when the SA_RESTORER 3 + * flag is not set. 4 + * 5 + * Copyright (C) 2012 ARM Limited 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + * 19 + * Author: Will Deacon <will.deacon@arm.com> 20 + */ 21 + 22 + #include <linux/linkage.h> 23 + #include <asm/unistd.h> 24 + 25 + .text 26 + 27 + nop 28 + ENTRY(__kernel_rt_sigreturn) 29 + .cfi_startproc 30 + .cfi_signal_frame 31 + .cfi_def_cfa x29, 0 32 + .cfi_offset x29, 0 * 8 33 + .cfi_offset x30, 1 * 8 34 + mov x8, #__NR_rt_sigreturn 35 + svc #0 36 + .cfi_endproc 37 + ENDPROC(__kernel_rt_sigreturn)
+33
arch/arm64/kernel/vdso/vdso.S
··· 1 + /* 2 + * Copyright (C) 2012 ARM Limited 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + * 16 + * Author: Will Deacon <will.deacon@arm.com> 17 + */ 18 + 19 + #include <linux/init.h> 20 + #include <linux/linkage.h> 21 + #include <linux/const.h> 22 + #include <asm/page.h> 23 + 24 + __PAGE_ALIGNED_DATA 25 + 26 + .globl vdso_start, vdso_end 27 + .balign PAGE_SIZE 28 + vdso_start: 29 + .incbin "arch/arm64/kernel/vdso/vdso.so" 30 + .balign PAGE_SIZE 31 + vdso_end: 32 + 33 + .previous
+100
arch/arm64/kernel/vdso/vdso.lds.S
··· 1 + /* 2 + * GNU linker script for the VDSO library. 3 + * 4 + * Copyright (C) 2012 ARM Limited 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + * 18 + * Author: Will Deacon <will.deacon@arm.com> 19 + * Heavily based on the vDSO linker scripts for other archs. 20 + */ 21 + 22 + #include <linux/const.h> 23 + #include <asm/page.h> 24 + #include <asm/vdso.h> 25 + 26 + OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64") 27 + OUTPUT_ARCH(aarch64) 28 + 29 + SECTIONS 30 + { 31 + . = VDSO_LBASE + SIZEOF_HEADERS; 32 + 33 + .hash : { *(.hash) } :text 34 + .gnu.hash : { *(.gnu.hash) } 35 + .dynsym : { *(.dynsym) } 36 + .dynstr : { *(.dynstr) } 37 + .gnu.version : { *(.gnu.version) } 38 + .gnu.version_d : { *(.gnu.version_d) } 39 + .gnu.version_r : { *(.gnu.version_r) } 40 + 41 + .note : { *(.note.*) } :text :note 42 + 43 + . = ALIGN(16); 44 + 45 + .text : { *(.text*) } :text =0xd503201f 46 + PROVIDE (__etext = .); 47 + PROVIDE (_etext = .); 48 + PROVIDE (etext = .); 49 + 50 + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 51 + .eh_frame : { KEEP (*(.eh_frame)) } :text 52 + 53 + .dynamic : { *(.dynamic) } :text :dynamic 54 + 55 + .rodata : { *(.rodata*) } :text 56 + 57 + _end = .; 58 + PROVIDE(end = .); 59 + 60 + . = ALIGN(PAGE_SIZE); 61 + PROVIDE(_vdso_data = .); 62 + 63 + /DISCARD/ : { 64 + *(.note.GNU-stack) 65 + *(.data .data.* .gnu.linkonce.d.* .sdata*) 66 + *(.bss .sbss .dynbss .dynsbss) 67 + } 68 + } 69 + 70 + /* 71 + * We must supply the ELF program headers explicitly to get just one 72 + * PT_LOAD segment, and set the flags explicitly to make segments read-only. 73 + */ 74 + PHDRS 75 + { 76 + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ 77 + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ 78 + note PT_NOTE FLAGS(4); /* PF_R */ 79 + eh_frame_hdr PT_GNU_EH_FRAME; 80 + } 81 + 82 + /* 83 + * This controls what symbols we export from the DSO. 84 + */ 85 + VERSION 86 + { 87 + LINUX_2.6.39 { 88 + global: 89 + __kernel_rt_sigreturn; 90 + __kernel_gettimeofday; 91 + __kernel_clock_gettime; 92 + __kernel_clock_getres; 93 + local: *; 94 + }; 95 + } 96 + 97 + /* 98 + * Make the sigreturn code visible to the kernel. 99 + */ 100 + VDSO_sigtramp = __kernel_rt_sigreturn;