Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

LoongArch: vDSO: Switch to generic storage implementation

The generic storage implementation provides the same features as the
custom one. However it can be shared between architectures, making
maintenance easier.

Co-developed-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/all/20250204-vdso-store-rng-v3-10-13a4669dfc8c@linutronix.de

authored by

Thomas Weißschuh and committed by
Thomas Gleixner
d2862bb9 46fe55b2

+39 -177
+2
arch/loongarch/Kconfig
··· 30 30 select ARCH_HAS_SET_MEMORY 31 31 select ARCH_HAS_SET_DIRECT_MAP 32 32 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 33 + select ARCH_HAS_VDSO_ARCH_DATA 33 34 select ARCH_INLINE_READ_LOCK if !PREEMPTION 34 35 select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION 35 36 select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION ··· 107 106 select GENERIC_SCHED_CLOCK 108 107 select GENERIC_SMP_IDLE_THREAD 109 108 select GENERIC_TIME_VSYSCALL 109 + select GENERIC_VDSO_DATA_STORE 110 110 select GENERIC_VDSO_TIME_NS 111 111 select GPIOLIB 112 112 select HAS_IOPORT
-1
arch/loongarch/include/asm/vdso.h
··· 31 31 unsigned long size; 32 32 unsigned long offset_sigreturn; 33 33 struct vm_special_mapping code_mapping; 34 - struct vm_special_mapping data_mapping; 35 34 }; 36 35 37 36 extern struct loongarch_vdso_info vdso_info;
+25
arch/loongarch/include/asm/vdso/arch_data.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Author: Huacai Chen <chenhuacai@loongson.cn> 4 + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 5 + */ 6 + 7 + #ifndef _VDSO_ARCH_DATA_H 8 + #define _VDSO_ARCH_DATA_H 9 + 10 + #ifndef __ASSEMBLY__ 11 + 12 + #include <asm/asm.h> 13 + #include <asm/vdso.h> 14 + 15 + struct vdso_pcpu_data { 16 + u32 node; 17 + } ____cacheline_aligned_in_smp; 18 + 19 + struct vdso_arch_data { 20 + struct vdso_pcpu_data pdata[NR_CPUS]; 21 + }; 22 + 23 + #endif /* __ASSEMBLY__ */ 24 + 25 + #endif
-5
arch/loongarch/include/asm/vdso/getrandom.h
··· 28 28 return ret; 29 29 } 30 30 31 - static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void) 32 - { 33 - return &_loongarch_data.rng_data; 34 - } 35 - 36 31 #endif /* !__ASSEMBLY__ */ 37 32 38 33 #endif /* __ASM_VDSO_GETRANDOM_H */
+1 -13
arch/loongarch/include/asm/vdso/gettimeofday.h
··· 72 72 } 73 73 74 74 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, 75 - const struct vdso_data *vd) 75 + const struct vdso_time_data *vd) 76 76 { 77 77 uint64_t count; 78 78 ··· 89 89 } 90 90 #define __arch_vdso_hres_capable loongarch_vdso_hres_capable 91 91 92 - static __always_inline const struct vdso_data *__arch_get_vdso_data(void) 93 - { 94 - return _vdso_data; 95 - } 96 - 97 - #ifdef CONFIG_TIME_NS 98 - static __always_inline 99 - const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) 100 - { 101 - return _timens_data; 102 - } 103 - #endif 104 92 #endif /* !__ASSEMBLY__ */ 105 93 106 94 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
+2 -36
arch/loongarch/include/asm/vdso/vdso.h
··· 12 12 #include <asm/asm.h> 13 13 #include <asm/page.h> 14 14 #include <asm/vdso.h> 15 + #include <vdso/datapage.h> 15 16 16 - struct vdso_pcpu_data { 17 - u32 node; 18 - } ____cacheline_aligned_in_smp; 19 - 20 - struct loongarch_vdso_data { 21 - struct vdso_pcpu_data pdata[NR_CPUS]; 22 - struct vdso_rng_data rng_data; 23 - }; 24 - 25 - /* 26 - * The layout of vvar: 27 - * 28 - * high 29 - * +---------------------+--------------------------+ 30 - * | loongarch vdso data | LOONGARCH_VDSO_DATA_SIZE | 31 - * +---------------------+--------------------------+ 32 - * | time-ns vdso data | PAGE_SIZE | 33 - * +---------------------+--------------------------+ 34 - * | generic vdso data | PAGE_SIZE | 35 - * +---------------------+--------------------------+ 36 - * low 37 - */ 38 - #define LOONGARCH_VDSO_DATA_SIZE PAGE_ALIGN(sizeof(struct loongarch_vdso_data)) 39 - #define LOONGARCH_VDSO_DATA_PAGES (LOONGARCH_VDSO_DATA_SIZE >> PAGE_SHIFT) 40 - 41 - enum vvar_pages { 42 - VVAR_GENERIC_PAGE_OFFSET, 43 - VVAR_TIMENS_PAGE_OFFSET, 44 - VVAR_LOONGARCH_PAGES_START, 45 - VVAR_LOONGARCH_PAGES_END = VVAR_LOONGARCH_PAGES_START + LOONGARCH_VDSO_DATA_PAGES - 1, 46 - VVAR_NR_PAGES, 47 - }; 48 - 49 - #define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT) 50 - 51 - extern struct loongarch_vdso_data _loongarch_data __attribute__((visibility("hidden"))); 17 + #define VVAR_SIZE (VDSO_NR_PAGES << PAGE_SHIFT) 52 18 53 19 #endif /* __ASSEMBLY__ */ 54 20
-17
arch/loongarch/include/asm/vdso/vsyscall.h
··· 6 6 7 7 #include <vdso/datapage.h> 8 8 9 - extern struct vdso_data *vdso_data; 10 - extern struct vdso_rng_data *vdso_rng_data; 11 - 12 - static __always_inline 13 - struct vdso_data *__loongarch_get_k_vdso_data(void) 14 - { 15 - return vdso_data; 16 - } 17 - #define __arch_get_k_vdso_data __loongarch_get_k_vdso_data 18 - 19 - static __always_inline 20 - struct vdso_rng_data *__loongarch_get_k_vdso_rng_data(void) 21 - { 22 - return vdso_rng_data; 23 - } 24 - #define __arch_get_k_vdso_rng_data __loongarch_get_k_vdso_rng_data 25 - 26 9 /* The asm-generic header needs to be included after the definitions above */ 27 10 #include <asm-generic/vdso/vsyscall.h> 28 11
+1 -1
arch/loongarch/kernel/asm-offsets.c
··· 315 315 { 316 316 COMMENT("LoongArch vDSO offsets."); 317 317 318 - DEFINE(__VVAR_PAGES, VVAR_NR_PAGES); 318 + DEFINE(__VDSO_PAGES, VDSO_NR_PAGES); 319 319 BLANK(); 320 320 }
+3 -89
arch/loongarch/kernel/vdso.c
··· 14 14 #include <linux/random.h> 15 15 #include <linux/sched.h> 16 16 #include <linux/slab.h> 17 - #include <linux/time_namespace.h> 17 + #include <linux/vdso_datastore.h> 18 18 19 19 #include <asm/page.h> 20 20 #include <asm/vdso.h> ··· 25 25 26 26 extern char vdso_start[], vdso_end[]; 27 27 28 - /* Kernel-provided data used by the VDSO. */ 29 - static union vdso_data_store generic_vdso_data __page_aligned_data; 30 - 31 - static union { 32 - u8 page[LOONGARCH_VDSO_DATA_SIZE]; 33 - struct loongarch_vdso_data vdata; 34 - } loongarch_vdso_data __page_aligned_data; 35 - 36 - struct vdso_data *vdso_data = generic_vdso_data.data; 37 - struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata; 38 - struct vdso_rng_data *vdso_rng_data = &loongarch_vdso_data.vdata.rng_data; 39 - 40 28 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) 41 29 { 42 30 current->mm->context.vdso = (void *)(new_vma->vm_start); ··· 32 44 return 0; 33 45 } 34 46 35 - static vm_fault_t vvar_fault(const struct vm_special_mapping *sm, 36 - struct vm_area_struct *vma, struct vm_fault *vmf) 37 - { 38 - unsigned long pfn; 39 - struct page *timens_page = find_timens_vvar_page(vma); 40 - 41 - switch (vmf->pgoff) { 42 - case VVAR_GENERIC_PAGE_OFFSET: 43 - if (!timens_page) 44 - pfn = sym_to_pfn(vdso_data); 45 - else 46 - pfn = page_to_pfn(timens_page); 47 - break; 48 - #ifdef CONFIG_TIME_NS 49 - case VVAR_TIMENS_PAGE_OFFSET: 50 - /* 51 - * If a task belongs to a time namespace then a namespace specific 52 - * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real 53 - * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset. 54 - * See also the comment near timens_setup_vdso_data(). 55 - */ 56 - if (!timens_page) 57 - return VM_FAULT_SIGBUS; 58 - else 59 - pfn = sym_to_pfn(vdso_data); 60 - break; 61 - #endif /* CONFIG_TIME_NS */ 62 - case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END: 63 - pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START; 64 - break; 65 - default: 66 - return VM_FAULT_SIGBUS; 67 - } 68 - 69 - return vmf_insert_pfn(vma, vmf->address, pfn); 70 - } 71 - 72 47 struct loongarch_vdso_info vdso_info = { 73 48 .vdso = vdso_start, 74 49 .code_mapping = { 75 50 .name = "[vdso]", 76 51 .mremap = vdso_mremap, 77 - }, 78 - .data_mapping = { 79 - .name = "[vvar]", 80 - .fault = vvar_fault, 81 52 }, 82 53 .offset_sigreturn = vdso_offset_sigreturn, 83 54 }; ··· 48 101 BUG_ON(!PAGE_ALIGNED(vdso_info.vdso)); 49 102 50 103 for_each_possible_cpu(cpu) 51 - vdso_pdata[cpu].node = cpu_to_node(cpu); 104 + vdso_k_arch_data->pdata[cpu].node = cpu_to_node(cpu); 52 105 53 106 vdso_info.size = PAGE_ALIGN(vdso_end - vdso_start); 54 107 vdso_info.code_mapping.pages = ··· 61 114 return 0; 62 115 } 63 116 subsys_initcall(init_vdso); 64 - 65 - #ifdef CONFIG_TIME_NS 66 - struct vdso_data *arch_get_vdso_data(void *vvar_page) 67 - { 68 - return (struct vdso_data *)(vvar_page); 69 - } 70 - 71 - /* 72 - * The vvar mapping contains data for a specific time namespace, so when a 73 - * task changes namespace we must unmap its vvar data for the old namespace. 74 - * Subsequent faults will map in data for the new namespace. 75 - * 76 - * For more details see timens_setup_vdso_data(). 77 - */ 78 - int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) 79 - { 80 - struct mm_struct *mm = task->mm; 81 - struct vm_area_struct *vma; 82 - 83 - VMA_ITERATOR(vmi, mm, 0); 84 - 85 - mmap_read_lock(mm); 86 - for_each_vma(vmi, vma) { 87 - if (vma_is_special_mapping(vma, &vdso_info.data_mapping)) 88 - zap_vma_pages(vma); 89 - } 90 - mmap_read_unlock(mm); 91 - 92 - return 0; 93 - } 94 - #endif 95 117 96 118 static unsigned long vdso_base(void) 97 119 { ··· 97 181 goto out; 98 182 } 99 183 100 - vma = _install_special_mapping(mm, data_addr, VVAR_SIZE, 101 - VM_READ | VM_MAYREAD | VM_PFNMAP, 102 - &info->data_mapping); 184 + vma = vdso_install_vvar_mapping(mm, data_addr); 103 185 if (IS_ERR(vma)) { 104 186 ret = PTR_ERR(vma); 105 187 goto out;
+3 -5
arch/loongarch/vdso/vdso.lds.S
··· 5 5 */ 6 6 #include <asm/page.h> 7 7 #include <generated/asm-offsets.h> 8 + #include <vdso/datapage.h> 8 9 9 10 OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch") 10 11 ··· 13 12 14 13 SECTIONS 15 14 { 16 - PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE); 17 - #ifdef CONFIG_TIME_NS 18 - PROVIDE(_timens_data = _vdso_data + PAGE_SIZE); 19 - #endif 20 - PROVIDE(_loongarch_data = _vdso_data + 2 * PAGE_SIZE); 15 + VDSO_VVAR_SYMS 16 + 21 17 . = SIZEOF_HEADERS; 22 18 23 19 .hash : { *(.hash) } :text
+2 -10
arch/loongarch/vdso/vgetcpu.c
··· 19 19 return cpu_id; 20 20 } 21 21 22 - static __always_inline const struct vdso_pcpu_data *get_pcpu_data(void) 23 - { 24 - return _loongarch_data.pdata; 25 - } 26 - 27 22 extern 28 23 int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused); 29 24 int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused) 30 25 { 31 26 int cpu_id; 32 - const struct vdso_pcpu_data *data; 33 27 34 28 cpu_id = read_cpu_id(); 35 29 36 30 if (cpu) 37 31 *cpu = cpu_id; 38 32 39 - if (node) { 40 - data = get_pcpu_data(); 41 - *node = data[cpu_id].node; 42 - } 33 + if (node) 34 + *node = vdso_u_arch_data.pdata[cpu_id].node; 43 35 44 36 return 0; 45 37 }