Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: kernel: use relative references for UP/SMP alternatives

Currently, the .alt.smp.init section contains the virtual addresses
of the patch sites. Since patching may occur both before and after
switching into virtual mode, this requires some manual handling of
the address when applying the UP alternative.

Let's simplify this by using relative offsets in the table entries:
this allows us to simply add each entry's address to its contents,
regardless of whether we are running in virtual mode or not.

Reviewed-by: Nicolas Pitre <nico@fluxnic.net>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

+8 -8
+2 -2
arch/arm/include/asm/assembler.h
··· 259 259 */ 260 260 #define ALT_UP(instr...) \ 261 261 .pushsection ".alt.smp.init", "a" ;\ 262 - .long 9998b ;\ 262 + .long 9998b - . ;\ 263 263 9997: instr ;\ 264 264 .if . - 9997b == 2 ;\ 265 265 nop ;\ ··· 270 270 .popsection 271 271 #define ALT_UP_B(label) \ 272 272 .pushsection ".alt.smp.init", "a" ;\ 273 - .long 9998b ;\ 273 + .long 9998b - . ;\ 274 274 W(b) . + (label - 9998b) ;\ 275 275 .popsection 276 276 #else
+1 -1
arch/arm/include/asm/processor.h
··· 96 96 #define __ALT_SMP_ASM(smp, up) \ 97 97 "9998: " smp "\n" \ 98 98 " .pushsection \".alt.smp.init\", \"a\"\n" \ 99 - " .long 9998b\n" \ 99 + " .long 9998b - .\n" \ 100 100 " " up "\n" \ 101 101 " .popsection\n" 102 102 #else
+5 -5
arch/arm/kernel/head.S
··· 546 546 __do_fixup_smp_on_up: 547 547 cmp r4, r5 548 548 reths lr 549 - ldmia r4!, {r0, r6} 550 - ARM( str r6, [r0, r3] ) 551 - THUMB( add r0, r0, r3 ) 549 + ldmia r4, {r0, r6} 550 + ARM( str r6, [r0, r4] ) 551 + THUMB( add r0, r0, r4 ) 552 + add r4, r4, #8 552 553 #ifdef __ARMEB__ 553 554 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. 554 555 #endif 555 556 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords 556 - THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. 557 + THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r0. 557 558 THUMB( strh r6, [r0] ) 558 559 b __do_fixup_smp_on_up 559 560 ENDPROC(__do_fixup_smp_on_up) ··· 563 562 stmfd sp!, {r4 - r6, lr} 564 563 mov r4, r0 565 564 add r5, r0, r1 566 - mov r3, #0 567 565 bl __do_fixup_smp_on_up 568 566 ldmfd sp!, {r4 - r6, pc} 569 567 ENDPROC(fixup_smp)