Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: microMIPS: Disable LL/SC and fix linker bug.

Partially revert commit e0c14a260d66ba35935600d6435940a566fe806b
and turn off LL/SC when building a pure microMIPS kernel. This is
a temporary fix until the cmpxchg assembly macro functions are
re-written to not use the HI/LO registers in address calculations.

Also add .insn in selected user access functions which would
otherwise produce ISA mode jump incompatibilities. This is also a
temporary fix.

Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com>

authored by

Steven J. Hill and committed by
Ralf Baechle
1658f914 01be057b

+16 -2
+4
arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
··· 28 28 /* #define cpu_has_prefetch ? */ 29 29 #define cpu_has_mcheck 1 30 30 /* #define cpu_has_ejtag ? */ 31 + #ifdef CONFIG_CPU_MICROMIPS 32 + #define cpu_has_llsc 0 33 + #else 31 34 #define cpu_has_llsc 1 35 + #endif 32 36 /* #define cpu_has_vtag_icache ? */ 33 37 /* #define cpu_has_dc_aliases ? */ 34 38 /* #define cpu_has_ic_fills_f_dc ? */
+12 -2
arch/mips/include/asm/uaccess.h
··· 261 261 __asm__ __volatile__( \ 262 262 "1: " insn " %1, %3 \n" \ 263 263 "2: \n" \ 264 + " .insn \n" \ 264 265 " .section .fixup,\"ax\" \n" \ 265 266 "3: li %0, %4 \n" \ 266 267 " j 2b \n" \ ··· 288 287 __asm__ __volatile__( \ 289 288 "1: lw %1, (%3) \n" \ 290 289 "2: lw %D1, 4(%3) \n" \ 291 - "3: .section .fixup,\"ax\" \n" \ 290 + "3: \n" \ 291 + " .insn \n" \ 292 + " .section .fixup,\"ax\" \n" \ 292 293 "4: li %0, %4 \n" \ 293 294 " move %1, $0 \n" \ 294 295 " move %D1, $0 \n" \ ··· 358 355 __asm__ __volatile__( \ 359 356 "1: " insn " %z2, %3 # __put_user_asm\n" \ 360 357 "2: \n" \ 358 + " .insn \n" \ 361 359 " .section .fixup,\"ax\" \n" \ 362 360 "3: li %0, %4 \n" \ 363 361 " j 2b \n" \ ··· 377 373 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 378 374 "2: sw %D2, 4(%3) \n" \ 379 375 "3: \n" \ 376 + " .insn \n" \ 380 377 " .section .fixup,\"ax\" \n" \ 381 378 "4: li %0, %4 \n" \ 382 379 " j 3b \n" \ ··· 529 524 __asm__ __volatile__( \ 530 525 "1: " insn " %1, %3 \n" \ 531 526 "2: \n" \ 527 + " .insn \n" \ 532 528 " .section .fixup,\"ax\" \n" \ 533 529 "3: li %0, %4 \n" \ 534 530 " j 2b \n" \ ··· 555 549 "1: ulw %1, (%3) \n" \ 556 550 "2: ulw %D1, 4(%3) \n" \ 557 551 " move %0, $0 \n" \ 558 - "3: .section .fixup,\"ax\" \n" \ 552 + "3: \n" \ 553 + " .insn \n" \ 554 + " .section .fixup,\"ax\" \n" \ 559 555 "4: li %0, %4 \n" \ 560 556 " move %1, $0 \n" \ 561 557 " move %D1, $0 \n" \ ··· 624 616 __asm__ __volatile__( \ 625 617 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 626 618 "2: \n" \ 619 + " .insn \n" \ 627 620 " .section .fixup,\"ax\" \n" \ 628 621 "3: li %0, %4 \n" \ 629 622 " j 2b \n" \ ··· 643 634 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 644 635 "2: sw %D2, 4(%3) \n" \ 645 636 "3: \n" \ 637 + " .insn \n" \ 646 638 " .section .fixup,\"ax\" \n" \ 647 639 "4: li %0, %4 \n" \ 648 640 " j 3b \n" \