Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Support feature fixups in vdso's

This patch reworks the feature fixup mecanism so vdso's can be fixed up.
The main issue was that the construct:

.long label (or .llong on 64 bits)

will not work in the case of a shared library like the vdso. It will
generate an empty placeholder in the fixup table along with a reloc,
which is not something we can deal with in the vdso.

The idea here (thanks Alan Modra !) is to instead use something like:

1:
.long label - 1b

That is, the feature fixup tables no longer contain addresses of bits of
code to patch, but offsets of such code from the fixup table entry
itself. That is properly resolved by ld when building the .so's. I've
modified the fixup mecanism generically to use that method for the rest
of the kernel as well.

Another trick is that the 32 bits vDSO included in the 64 bits kernel
need to have a table in the 64 bits format. However, gas does not
support 32 bits code with a statement of the form:

.llong label - 1b (Or even just .llong label)

That is, it cannot emit the right fixup/relocation for the linker to use
to assign a 32 bits address to an .llong field. Thus, in the specific
case of the 32 bits vdso built as part of the 64 bits kernel, we are
using a modified macro that generates:

.long 0xffffffff
.llong label - 1b

Note that is assumes that the value is negative which is enforced by
the .lds (those offsets are always negative as the .text is always
before the fixup table and gas doesn't support emiting the reloc the
other way around).

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Benjamin Herrenschmidt and committed by
Paul Mackerras
0909c8c2 7aeb7324

+141 -49
+5 -6
arch/powerpc/kernel/cputable.c
··· 1202 1202 return NULL; 1203 1203 } 1204 1204 1205 - void do_feature_fixups(unsigned long offset, unsigned long value, 1206 - void *fixup_start, void *fixup_end) 1205 + void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) 1207 1206 { 1208 1207 struct fixup_entry { 1209 1208 unsigned long mask; 1210 1209 unsigned long value; 1211 - unsigned int *start; 1212 - unsigned int *end; 1210 + long start_off; 1211 + long end_off; 1213 1212 } *fcur, *fend; 1214 1213 1215 1214 fcur = fixup_start; ··· 1223 1224 /* These PTRRELOCs will disappear once the new scheme for 1224 1225 * modules and vdso is implemented 1225 1226 */ 1226 - pstart = PTRRELOC(fcur->start); 1227 - pend = PTRRELOC(fcur->end); 1227 + pstart = ((unsigned int *)fcur) + (fcur->start_off / 4); 1228 + pend = ((unsigned int *)fcur) + (fcur->end_off / 4); 1228 1229 1229 1230 for (p = pstart; p < pend; p++) { 1230 1231 *p = 0x60000000u;
+1 -1
arch/powerpc/kernel/setup_32.c
··· 103 103 */ 104 104 spec = identify_cpu(offset); 105 105 106 - do_feature_fixups(offset, spec->cpu_features, 106 + do_feature_fixups(spec->cpu_features, 107 107 PTRRELOC(&__start___ftr_fixup), 108 108 PTRRELOC(&__stop___ftr_fixup)); 109 109
+2 -2
arch/powerpc/kernel/setup_64.c
··· 354 354 /* Apply the CPUs-specific and firmware specific fixups to kernel 355 355 * text (nop out sections not relevant to this CPU or this firmware) 356 356 */ 357 - do_feature_fixups(0, cur_cpu_spec->cpu_features, 357 + do_feature_fixups(cur_cpu_spec->cpu_features, 358 358 &__start___ftr_fixup, &__stop___ftr_fixup); 359 - do_feature_fixups(0, powerpc_firmware_features, 359 + do_feature_fixups(powerpc_firmware_features, 360 360 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 361 361 362 362 /*
+43
arch/powerpc/kernel/vdso.c
··· 36 36 #include <asm/vdso.h> 37 37 #include <asm/vdso_datapage.h> 38 38 39 + #include "setup.h" 40 + 39 41 #undef DEBUG 40 42 41 43 #ifdef DEBUG ··· 588 586 return 0; 589 587 } 590 588 589 + 590 + static __init int vdso_fixup_features(struct lib32_elfinfo *v32, 591 + struct lib64_elfinfo *v64) 592 + { 593 + void *start32; 594 + unsigned long size32; 595 + 596 + #ifdef CONFIG_PPC64 597 + void *start64; 598 + unsigned long size64; 599 + 600 + start64 = find_section64(v64->hdr, "__ftr_fixup", &size64); 601 + if (start64) 602 + do_feature_fixups(cur_cpu_spec->cpu_features, 603 + start64, start64 + size64); 604 + 605 + start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64); 606 + if (start64) 607 + do_feature_fixups(powerpc_firmware_features, 608 + start64, start64 + size64); 609 + #endif /* CONFIG_PPC64 */ 610 + 611 + start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); 612 + if (start32) 613 + do_feature_fixups(cur_cpu_spec->cpu_features, 614 + start32, start32 + size32); 615 + 616 + #ifdef CONFIG_PPC64 617 + start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32); 618 + if (start32) 619 + do_feature_fixups(powerpc_firmware_features, 620 + start32, start32 + size32); 621 + #endif /* CONFIG_PPC64 */ 622 + 623 + return 0; 624 + } 625 + 591 626 static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32, 592 627 struct lib64_elfinfo *v64) 593 628 { ··· 671 632 return -1; 672 633 673 634 if (vdso_fixup_datapage(&v32, &v64)) 635 + return -1; 636 + 637 + if (vdso_fixup_features(&v32, &v64)) 674 638 return -1; 675 639 676 640 if (vdso_fixup_alt_funcs(&v32, &v64)) ··· 756 714 * Setup the syscall map in the vDOS 757 715 */ 758 716 vdso_setup_syscall_map(); 717 + 759 718 /* 760 719 * Initialize the vDSO images in memory, that is do necessary 761 720 * fixups of vDSO symbols, locate trampolines, etc...
+12
arch/powerpc/kernel/vdso32/vdso32.lds.S
··· 32 32 PROVIDE (_etext = .); 33 33 PROVIDE (etext = .); 34 34 35 + . = ALIGN(8); 36 + __ftr_fixup : { 37 + *(__ftr_fixup) 38 + } 39 + 40 + #ifdef CONFIG_PPC64 41 + . = ALIGN(8); 42 + __fw_ftr_fixup : { 43 + *(__fw_ftr_fixup) 44 + } 45 + #endif 46 + 35 47 /* Other stuff is appended to the text segment: */ 36 48 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 37 49 .rodata1 : { *(.rodata1) }
+10
arch/powerpc/kernel/vdso64/vdso64.lds.S
··· 31 31 PROVIDE (_etext = .); 32 32 PROVIDE (etext = .); 33 33 34 + . = ALIGN(8); 35 + __ftr_fixup : { 36 + *(__ftr_fixup) 37 + } 38 + 39 + . = ALIGN(8); 40 + __fw_ftr_fixup : { 41 + *(__fw_ftr_fixup) 42 + } 43 + 34 44 /* Other stuff is appended to the text segment: */ 35 45 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 36 46 .rodata1 : { *(.rodata1) }
+1 -1
arch/ppc/kernel/setup.c
··· 314 314 * that depend on which cpu we have. 315 315 */ 316 316 spec = identify_cpu(offset); 317 - do_feature_fixups(offset, spec->cpu_features, 317 + do_feature_fixups(spec->cpu_features, 318 318 PTRRELOC(&__start___ftr_fixup), 319 319 PTRRELOC(&__stop___ftr_fixup)); 320 320
+52
include/asm-powerpc/asm-compat.h
··· 14 14 # define ASM_CONST(x) __ASM_CONST(x) 15 15 #endif 16 16 17 + 18 + /* 19 + * Feature section common macros 20 + * 21 + * Note that the entries now contain offsets between the table entry 22 + * and the code rather than absolute code pointers in order to be 23 + * useable with the vdso shared library. There is also an assumption 24 + * that values will be negative, that is, the fixup table has to be 25 + * located after the code it fixes up. 26 + */ 27 + #ifdef CONFIG_PPC64 28 + #ifdef __powerpc64__ 29 + /* 64 bits kernel, 64 bits code */ 30 + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ 31 + 99: \ 32 + .section sect,"a"; \ 33 + .align 3; \ 34 + 98: \ 35 + .llong msk; \ 36 + .llong val; \ 37 + .llong label##b-98b; \ 38 + .llong 99b-98b; \ 39 + .previous 40 + #else /* __powerpc64__ */ 41 + /* 64 bits kernel, 32 bits code (ie. vdso32) */ 42 + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ 43 + 99: \ 44 + .section sect,"a"; \ 45 + .align 3; \ 46 + 98: \ 47 + .llong msk; \ 48 + .llong val; \ 49 + .long 0xffffffff; \ 50 + .long label##b-98b; \ 51 + .long 0xffffffff; \ 52 + .long 99b-98b; \ 53 + .previous 54 + #endif /* !__powerpc64__ */ 55 + #else /* CONFIG_PPC64 */ 56 + /* 32 bits kernel, 32 bits code */ 57 + #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ 58 + 99: \ 59 + .section sect,"a"; \ 60 + .align 2; \ 61 + 98: \ 62 + .long msk; \ 63 + .long val; \ 64 + .long label##b-98b; \ 65 + .long 99b-98b; \ 66 + .previous 67 + #endif /* !CONFIG_PPC64 */ 68 + 17 69 #ifdef __powerpc64__ 18 70 19 71 /* operations for longs and pointers */
+5 -26
include/asm-powerpc/cputable.h
··· 92 92 extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; 93 93 94 94 extern struct cpu_spec *identify_cpu(unsigned long offset); 95 - extern void do_feature_fixups(unsigned long offset, unsigned long value, 96 - void *fixup_start, void *fixup_end); 95 + extern void do_feature_fixups(unsigned long value, void *fixup_start, 96 + void *fixup_end); 97 97 98 98 #endif /* __ASSEMBLY__ */ 99 99 ··· 435 435 #ifdef __ASSEMBLY__ 436 436 437 437 #define BEGIN_FTR_SECTION_NESTED(label) label: 438 - #define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(98) 439 - 440 - #ifndef __powerpc64__ 438 + #define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) 441 439 #define END_FTR_SECTION_NESTED(msk, val, label) \ 442 - 99: \ 443 - .section __ftr_fixup,"a"; \ 444 - .align 2; \ 445 - .long msk; \ 446 - .long val; \ 447 - .long label##b; \ 448 - .long 99b; \ 449 - .previous 450 - #else /* __powerpc64__ */ 451 - #define END_FTR_SECTION_NESTED(msk, val, label) \ 452 - 99: \ 453 - .section __ftr_fixup,"a"; \ 454 - .align 3; \ 455 - .llong msk; \ 456 - .llong val; \ 457 - .llong label##b; \ 458 - .llong 99b; \ 459 - .previous 460 - #endif /* __powerpc64__ */ 461 - 440 + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) 462 441 #define END_FTR_SECTION(msk, val) \ 463 - END_FTR_SECTION_NESTED(msk, val, 98) 442 + END_FTR_SECTION_NESTED(msk, val, 97) 464 443 465 444 #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) 466 445 #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0)
+5 -10
include/asm-powerpc/firmware.h
··· 100 100 101 101 #else /* __ASSEMBLY__ */ 102 102 103 - #define BEGIN_FW_FTR_SECTION 96: 104 - 103 + #define BEGIN_FW_FTR_SECTION_NESTED(label) label: 104 + #define BEGIN_FW_FTR_SECTION BEGIN_FW_FTR_SECTION_NESTED(97) 105 + #define END_FW_FTR_SECTION_NESTED(msk, val, label) \ 106 + MAKE_FTR_SECTION_ENTRY(msk, val, label, __fw_ftr_fixup) 105 107 #define END_FW_FTR_SECTION(msk, val) \ 106 - 97: \ 107 - .section __fw_ftr_fixup,"a"; \ 108 - .align 3; \ 109 - .llong msk; \ 110 - .llong val; \ 111 - .llong 96b; \ 112 - .llong 97b; \ 113 - .previous 108 + END_FW_FTR_SECTION_NESTED(msk, val, 97) 114 109 115 110 #define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk)) 116 111 #define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0)
+5 -3
include/asm-powerpc/timex.h
··· 30 30 ret = 0; 31 31 32 32 __asm__ __volatile__( 33 - "98: mftb %0\n" 33 + "97: mftb %0\n" 34 34 "99:\n" 35 35 ".section __ftr_fixup,\"a\"\n" 36 + ".align 2\n" 37 + "98:\n" 36 38 " .long %1\n" 37 39 " .long 0\n" 38 - " .long 98b\n" 39 - " .long 99b\n" 40 + " .long 97b-98b\n" 41 + " .long 99b-98b\n" 40 42 ".previous" 41 43 : "=r" (ret) : "i" (CPU_FTR_601)); 42 44 #endif