Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sparc: Improve VDSO instruction patching.

The current VDSO patch mechanism has several problems:

1) It assumes how gcc will emit a function, with a register
window, an initial save instruction and then immediately
the %tick read when compiling vread_tick().

There is no such guarantees, code generation could change
at any time, gcc could put a nop between the save and
the %tick read, etc.

So this is extremely fragile and would fail some day.

2) It disallows us to properly inline vread_tick() into the callers
and thus get the best possible code sequences.

So fix this to patch properly, with location based annotations.

We have to be careful because we cannot do it the way we do
patches elsewhere in the kernel. Those use a sequence like:

1:
insn
.section .whatever_patch, "ax"
.word 1b
replacement_insn
.previous

This is a dynamic shared object, so that .word cannot be resolved at
build time, and thus cannot be used to execute the patches when the
kernel initializes the images.

Even trying to use label difference equations doesn't work in the
above kind of scheme:

1:
insn
.section .whatever_patch, "ax"
.word . - 1b
replacement_insn
.previous

The assembler complains that it cannot resolve that computation.
The issue is that this is contained in an executable section.

Borrow the sequence used by x86 alternatives, which is:

1:
insn
.pushsection .whatever_patch, "a"
.word . - 1b, . - 1f
.popsection
.pushsection .whatever_patch_replacements, "ax"
1:
replacement_insn
.previous

This works, allows us to inline vread_tick() as much as we like, and
can be used for arbitrary kinds of VDSO patching in the future.

Also, reverse the condition for patching. Most systems are %stick
based, so if we only patch on %tick systems the patching code will
get little or no testing.

Signed-off-by: David S. Miller <davem@davemloft.net>

+67 -51
+3 -3
arch/sparc/include/asm/vdso.h
··· 8 8 struct vdso_image { 9 9 void *data; 10 10 unsigned long size; /* Always a multiple of PAGE_SIZE */ 11 + 12 + unsigned long tick_patch, tick_patch_len; 13 + 11 14 long sym_vvar_start; /* Negative offset to the vvar area */ 12 - long sym_vread_tick; /* Start of vread_tick section */ 13 - long sym_vread_tick_patch_start; /* Start of tick read */ 14 - long sym_vread_tick_patch_end; /* End of tick read */ 15 15 }; 16 16 17 17 #ifdef CONFIG_SPARC64
-3
arch/sparc/kernel/time_64.c
··· 53 53 54 54 DEFINE_SPINLOCK(rtc_lock); 55 55 56 - unsigned int __read_mostly vdso_fix_stick; 57 - 58 56 #ifdef CONFIG_SMP 59 57 unsigned long profile_pc(struct pt_regs *regs) 60 58 { ··· 836 838 } else { 837 839 init_tick_ops(&tick_operations); 838 840 clocksource_tick.archdata.vclock_mode = VCLOCK_TICK; 839 - vdso_fix_stick = 1; 840 841 } 841 842 } else { 842 843 init_tick_ops(&stick_operations);
+23 -16
arch/sparc/vdso/vclock_gettime.c
··· 105 105 vread_tick(void) { 106 106 u64 ret; 107 107 108 - __asm__ __volatile__("rd %%asr24, %0 \n" 109 - ".section .vread_tick_patch, \"ax\" \n" 110 - "rd %%tick, %0 \n" 111 - ".previous \n" 112 - : "=&r" (ret)); 108 + __asm__ __volatile__("1:\n\t" 109 + "rd %%tick, %0\n\t" 110 + ".pushsection .tick_patch, \"a\"\n\t" 111 + ".word 1b - ., 1f - .\n\t" 112 + ".popsection\n\t" 113 + ".pushsection .tick_patch_replacement, \"ax\"\n\t" 114 + "1:\n\t" 115 + "rd %%asr24, %0\n\t" 116 + ".popsection\n" 117 + : "=r" (ret)); 113 118 return ret & ~TICK_PRIV_BIT; 114 119 } 115 120 #else 116 121 static notrace noinline u64 117 122 vread_tick(void) 118 123 { 119 - unsigned int lo, hi; 124 + register unsigned long long ret asm("o4"); 120 125 121 - __asm__ __volatile__("rd %%asr24, %%g1\n\t" 122 - "srlx %%g1, 32, %1\n\t" 123 - "srl %%g1, 0, %0\n" 124 - ".section .vread_tick_patch, \"ax\" \n" 125 - "rd %%tick, %%g1\n" 126 - ".previous \n" 127 - : "=&r" (lo), "=&r" (hi) 128 - : 129 - : "g1"); 130 - return lo | ((u64)hi << 32); 126 + __asm__ __volatile__("1:\n\t" 127 + "rd %%tick, %L0\n\t" 128 + "srlx %L0, 32, %H0\n\t" 129 + ".pushsection .tick_patch, \"a\"\n\t" 130 + ".word 1b - ., 1f - .\n\t" 131 + ".popsection\n\t" 132 + ".pushsection .tick_patch_replacement, \"ax\"\n\t" 133 + "1:\n\t" 134 + "rd %%asr24, %L0\n\t" 135 + ".popsection\n" 136 + : "=r" (ret)); 137 + return ret; 131 138 } 132 139 #endif 133 140
+2 -5
arch/sparc/vdso/vdso-layout.lds.S
··· 73 73 74 74 .text : { *(.text*) } :text =0x90909090, 75 75 76 - .vread_tick_patch : { 77 - vread_tick_patch_start = .; 78 - *(.vread_tick_patch) 79 - vread_tick_patch_end = .; 80 - } 76 + .tick_patch : { *(.tick_patch) } :text 77 + .tick_patch_insns : { *(.tick_patch_insns) } :text 81 78 82 79 /DISCARD/ : { 83 80 *(.discard)
-6
arch/sparc/vdso/vdso2c.c
··· 63 63 sym_vvar_start, 64 64 sym_VDSO_FAKE_SECTION_TABLE_START, 65 65 sym_VDSO_FAKE_SECTION_TABLE_END, 66 - sym_vread_tick, 67 - sym_vread_tick_patch_start, 68 - sym_vread_tick_patch_end 69 66 }; 70 67 71 68 struct vdso_sym { ··· 78 81 [sym_VDSO_FAKE_SECTION_TABLE_END] = { 79 82 "VDSO_FAKE_SECTION_TABLE_END", 0 80 83 }, 81 - [sym_vread_tick] = {"vread_tick", 1}, 82 - [sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1}, 83 - [sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1} 84 84 }; 85 85 86 86 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+16 -2
arch/sparc/vdso/vdso2c.h
··· 17 17 unsigned long mapping_size; 18 18 int i; 19 19 unsigned long j; 20 - 21 - ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr; 20 + ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, 21 + *patch_sec = NULL; 22 22 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; 23 23 ELF(Dyn) *dyn = 0, *dyn_end = 0; 24 + const char *secstrings; 24 25 INT_BITS syms[NSYMS] = {}; 25 26 26 27 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff)); ··· 64 63 } 65 64 66 65 /* Walk the section table */ 66 + secstrings_hdr = raw_addr + GET_BE(&hdr->e_shoff) + 67 + GET_BE(&hdr->e_shentsize)*GET_BE(&hdr->e_shstrndx); 68 + secstrings = raw_addr + GET_BE(&secstrings_hdr->sh_offset); 67 69 for (i = 0; i < GET_BE(&hdr->e_shnum); i++) { 68 70 ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) + 69 71 GET_BE(&hdr->e_shentsize) * i; 70 72 if (GET_BE(&sh->sh_type) == SHT_SYMTAB) 71 73 symtab_hdr = sh; 74 + 75 + if (!strcmp(secstrings + GET_BE(&sh->sh_name), 76 + ".tick_patch")) 77 + patch_sec = sh; 72 78 } 73 79 74 80 if (!symtab_hdr) ··· 142 134 fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name); 143 135 fprintf(outfile, "\t.data = raw_data,\n"); 144 136 fprintf(outfile, "\t.size = %lu,\n", mapping_size); 137 + if (patch_sec) { 138 + fprintf(outfile, "\t.tick_patch = %lu,\n", 139 + (unsigned long)GET_BE(&patch_sec->sh_offset)); 140 + fprintf(outfile, "\t.tick_patch_len = %lu,\n", 141 + (unsigned long)GET_BE(&patch_sec->sh_size)); 142 + } 145 143 for (i = 0; i < NSYMS; i++) { 146 144 if (required_syms[i].export && syms[i]) 147 145 fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
+23 -16
arch/sparc/vdso/vma.c
··· 16 16 #include <linux/linkage.h> 17 17 #include <linux/random.h> 18 18 #include <linux/elf.h> 19 + #include <asm/cacheflush.h> 20 + #include <asm/spitfire.h> 19 21 #include <asm/vdso.h> 20 22 #include <asm/vvar.h> 21 23 #include <asm/page.h> ··· 42 40 43 41 struct vvar_data *vvar_data; 44 42 45 - #define SAVE_INSTR_SIZE 4 43 + struct tick_patch_entry { 44 + s32 orig, repl; 45 + }; 46 + 47 + static void stick_patch(const struct vdso_image *image) 48 + { 49 + struct tick_patch_entry *p, *p_end; 50 + 51 + p = image->data + image->tick_patch; 52 + p_end = (void *)p + image->tick_patch_len; 53 + while (p < p_end) { 54 + u32 *instr = (void *)&p->orig + p->orig; 55 + u32 *repl = (void *)&p->repl + p->repl; 56 + 57 + *instr = *repl; 58 + flushi(instr); 59 + p++; 60 + } 61 + } 46 62 47 63 /* 48 64 * Allocate pages for the vdso and vvar, and copy in the vdso text from the ··· 88 68 if (!cpp) 89 69 goto oom; 90 70 91 - if (vdso_fix_stick) { 92 - /* 93 - * If the system uses %tick instead of %stick, patch the VDSO 94 - * with instruction reading %tick instead of %stick. 95 - */ 96 - unsigned int j, k = SAVE_INSTR_SIZE; 97 - unsigned char *data = image->data; 98 - 99 - for (j = image->sym_vread_tick_patch_start; 100 - j < image->sym_vread_tick_patch_end; j++) { 101 - 102 - data[image->sym_vread_tick + k] = data[j]; 103 - k++; 104 - } 105 - } 71 + if (tlb_type != spitfire) 72 + stick_patch(image); 106 73 107 74 for (i = 0; i < cnpages; i++) { 108 75 cp = alloc_page(GFP_KERNEL);