Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Fixup lwsync at runtime

To allow for a single kernel image on e500 v1/v2/mc we need to fixup lwsync
at runtime. On e500v1/v2 lwsync causes an illop so we need to patch up
the code. We default to 'sync' since that is always safe and if the cpu
is capable we will replace 'sync' with 'lwsync'.

We introduce CPU_FTR_LWSYNC as a way to determine at runtime if this is
needed. This flag could be moved elsewhere since we dont really use it
for the normal CPU_FTR purpose.

Finally we only store the relative offset in the fixup section to keep it
as small as possible rather than using a full fixup_entry.

Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Kumar Gala and committed by
Paul Mackerras
2d1b2027 5888da18

+133 -28
+6
arch/powerpc/kernel/module.c
··· 86 86 (void *)sect->sh_addr + sect->sh_size); 87 87 #endif 88 88 89 + sect = find_section(hdr, sechdrs, "__lwsync_fixup"); 90 + if (sect != NULL) 91 + do_lwsync_fixups(cur_cpu_spec->cpu_features, 92 + (void *)sect->sh_addr, 93 + (void *)sect->sh_addr + sect->sh_size); 94 + 89 95 return 0; 90 96 } 91 97
+4
arch/powerpc/kernel/setup_32.c
··· 101 101 PTRRELOC(&__start___ftr_fixup), 102 102 PTRRELOC(&__stop___ftr_fixup)); 103 103 104 + do_lwsync_fixups(spec->cpu_features, 105 + PTRRELOC(&__start___lwsync_fixup), 106 + PTRRELOC(&__stop___lwsync_fixup)); 107 + 104 108 return KERNELBASE + offset; 105 109 } 106 110
+2
arch/powerpc/kernel/setup_64.c
··· 363 363 &__start___ftr_fixup, &__stop___ftr_fixup); 364 364 do_feature_fixups(powerpc_firmware_features, 365 365 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 366 + do_lwsync_fixups(cur_cpu_spec->cpu_features, 367 + &__start___lwsync_fixup, &__stop___lwsync_fixup); 366 368 367 369 /* 368 370 * Unflatten the device-tree passed by prom_init or kexec
+10
arch/powerpc/kernel/vdso.c
··· 571 571 if (start64) 572 572 do_feature_fixups(powerpc_firmware_features, 573 573 start64, start64 + size64); 574 + 575 + start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64); 576 + if (start64) 577 + do_lwsync_fixups(cur_cpu_spec->cpu_features, 578 + start64, start64 + size64); 574 579 #endif /* CONFIG_PPC64 */ 575 580 576 581 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); ··· 589 584 do_feature_fixups(powerpc_firmware_features, 590 585 start32, start32 + size32); 591 586 #endif /* CONFIG_PPC64 */ 587 + 588 + start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32); 589 + if (start32) 590 + do_lwsync_fixups(cur_cpu_spec->cpu_features, 591 + start32, start32 + size32); 592 592 593 593 return 0; 594 594 }
+3
arch/powerpc/kernel/vdso32/vdso32.lds.S
··· 33 33 . = ALIGN(8); 34 34 __ftr_fixup : { *(__ftr_fixup) } 35 35 36 + . = ALIGN(8); 37 + __lwsync_fixup : { *(__lwsync_fixup) } 38 + 36 39 #ifdef CONFIG_PPC64 37 40 . = ALIGN(8); 38 41 __fw_ftr_fixup : { *(__fw_ftr_fixup) }
+3
arch/powerpc/kernel/vdso64/vdso64.lds.S
··· 35 35 __ftr_fixup : { *(__ftr_fixup) } 36 36 37 37 . = ALIGN(8); 38 + __lwsync_fixup : { *(__lwsync_fixup) } 39 + 40 + . = ALIGN(8); 38 41 __fw_ftr_fixup : { *(__fw_ftr_fixup) } 39 42 40 43 /*
+6
arch/powerpc/kernel/vmlinux.lds.S
··· 127 127 *(__ftr_fixup) 128 128 __stop___ftr_fixup = .; 129 129 } 130 + . = ALIGN(8); 131 + __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) { 132 + __start___lwsync_fixup = .; 133 + *(__lwsync_fixup) 134 + __stop___lwsync_fixup = .; 135 + } 130 136 #ifdef CONFIG_PPC64 131 137 . = ALIGN(8); 132 138 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
+15
arch/powerpc/lib/feature-fixups-test.S
··· 10 10 11 11 #include <asm/feature-fixups.h> 12 12 #include <asm/ppc_asm.h> 13 + #include <asm/synch.h> 13 14 14 15 .text 15 16 ··· 726 725 MAKE_MACRO_TEST(FW_FTR); 727 726 MAKE_MACRO_TEST_EXPECTED(FW_FTR); 728 727 #endif 728 + 729 + globl(lwsync_fixup_test) 730 + 1: or 1,1,1 731 + LWSYNC 732 + globl(end_lwsync_fixup_test) 733 + 734 + globl(lwsync_fixup_test_expected_LWSYNC) 735 + 1: or 1,1,1 736 + lwsync 737 + 738 + globl(lwsync_fixup_test_expected_SYNC) 739 + 1: or 1,1,1 740 + sync 741 +
+36
arch/powerpc/lib/feature-fixups.c
··· 110 110 } 111 111 } 112 112 113 + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) 114 + { 115 + unsigned int *start, *end, *dest; 116 + 117 + if (!(value & CPU_FTR_LWSYNC)) 118 + return ; 119 + 120 + start = fixup_start; 121 + end = fixup_end; 122 + 123 + for (; start < end; start++) { 124 + dest = (void *)start + *start; 125 + patch_instruction(dest, PPC_LWSYNC_INSTR); 126 + } 127 + } 128 + 113 129 #ifdef CONFIG_FTR_FIXUP_SELFTEST 114 130 115 131 #define check(x) \ ··· 311 295 #endif 312 296 } 313 297 298 + static void test_lwsync_macros(void) 299 + { 300 + extern void lwsync_fixup_test; 301 + extern void end_lwsync_fixup_test; 302 + extern void lwsync_fixup_test_expected_LWSYNC; 303 + extern void lwsync_fixup_test_expected_SYNC; 304 + unsigned long size = &end_lwsync_fixup_test - 305 + &lwsync_fixup_test; 306 + 307 + /* The fixups have already been done for us during boot */ 308 + if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { 309 + check(memcmp(&lwsync_fixup_test, 310 + &lwsync_fixup_test_expected_LWSYNC, size) == 0); 311 + } else { 312 + check(memcmp(&lwsync_fixup_test, 313 + &lwsync_fixup_test_expected_SYNC, size) == 0); 314 + } 315 + } 316 + 314 317 static int __init test_feature_fixups(void) 315 318 { 316 319 printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); ··· 342 307 test_alternative_case_with_external_branch(); 343 308 test_cpu_macros(); 344 309 test_fw_macros(); 310 + test_lwsync_macros(); 345 311 346 312 return 0; 347 313 }
+2 -1
include/asm-powerpc/code-patching.h
··· 12 12 13 13 #include <asm/types.h> 14 14 15 - #define PPC_NOP_INSTR 0x60000000 15 + #define PPC_NOP_INSTR 0x60000000 16 + #define PPC_LWSYNC_INSTR 0x7c2004ac 16 17 17 18 /* Flags for create_branch: 18 19 * "b" == create_branch(addr, target, 0);
+11 -10
include/asm-powerpc/cputable.h
··· 156 156 #define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) 157 157 #define CPU_FTR_SPE ASM_CONST(0x0000000002000000) 158 158 #define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) 159 + #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) 159 160 160 161 /* 161 162 * Add the 64-bit processor unique features in the top half of the word; ··· 370 369 CPU_FTR_NODSISRALIGN) 371 370 #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 372 371 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ 373 - CPU_FTR_L2CSR) 372 + CPU_FTR_L2CSR | CPU_FTR_LWSYNC) 374 373 #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 375 374 376 375 /* 64-bit CPUs */ 377 - #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ 376 + #define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 378 377 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) 379 - #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ 378 + #define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 380 379 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ 381 380 CPU_FTR_MMCRA | CPU_FTR_CTRL) 382 - #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | \ 381 + #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 383 382 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 384 383 CPU_FTR_MMCRA) 385 - #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | \ 384 + #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 386 385 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 387 386 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) 388 - #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | \ 387 + #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 389 388 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 390 389 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 391 390 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 392 391 CPU_FTR_PURR) 393 - #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \ 392 + #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 394 393 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 395 394 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 396 395 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 397 396 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 398 397 CPU_FTR_DSCR) 399 - #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | \ 398 + #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 400 399 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 401 400 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 402 401 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 403 402 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 404 403 CPU_FTR_DSCR) 405 - #define CPU_FTRS_CELL (CPU_FTR_USE_TB | \ 404 + #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 406 405 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 407 406 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 408 407 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) 409 - #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \ 408 + #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 410 409 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 411 410 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ 412 411 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
+10
include/asm-powerpc/feature-fixups.h
··· 113 113 114 114 #endif /* __ASSEMBLY__ */ 115 115 116 + /* LWSYNC feature sections */ 117 + #define START_LWSYNC_SECTION(label) label##1: 118 + #define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \ 119 + label##2: \ 120 + .pushsection sect,"a"; \ 121 + .align 2; \ 122 + label##3: \ 123 + .long label##1b-label##3b; \ 124 + .popsection; 125 + 116 126 #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
+25 -17
include/asm-powerpc/synch.h
··· 3 3 #ifdef __KERNEL__ 4 4 5 5 #include <linux/stringify.h> 6 + #include <asm/feature-fixups.h> 6 7 7 - #if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) 8 - #define __SUBARCH_HAS_LWSYNC 9 - #endif 10 - 11 - #ifdef __SUBARCH_HAS_LWSYNC 12 - # define LWSYNC lwsync 13 - #else 14 - # define LWSYNC sync 15 - #endif 16 - 17 - #ifdef CONFIG_SMP 18 - #define ISYNC_ON_SMP "\n\tisync\n" 19 - #define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" 20 - #else 21 - #define ISYNC_ON_SMP 22 - #define LWSYNC_ON_SMP 23 - #endif 8 + #ifndef __ASSEMBLY__ 9 + extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup; 10 + extern void do_lwsync_fixups(unsigned long value, void *fixup_start, 11 + void *fixup_end); 24 12 25 13 static inline void eieio(void) 26 14 { ··· 19 31 { 20 32 __asm__ __volatile__ ("isync" : : : "memory"); 21 33 } 34 + #endif /* __ASSEMBLY__ */ 35 + 36 + #if defined(__powerpc64__) 37 + # define LWSYNC lwsync 38 + #elif defined(CONFIG_E500) 39 + # define LWSYNC \ 40 + START_LWSYNC_SECTION(96); \ 41 + sync; \ 42 + MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup); 43 + #else 44 + # define LWSYNC sync 45 + #endif 46 + 47 + #ifdef CONFIG_SMP 48 + #define ISYNC_ON_SMP "\n\tisync\n" 49 + #define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" 50 + #else 51 + #define ISYNC_ON_SMP 52 + #define LWSYNC_ON_SMP 53 + #endif 22 54 23 55 #endif /* __KERNEL__ */ 24 56 #endif /* _ASM_POWERPC_SYNCH_H */