Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Fix gigaton of warning building with microMIPS.

With binutils 2.24 the attempt to switch with microMIPS mode to MIPS III
mode through .set mips3 results in *lots* of warnings like

{standard input}: Assembler messages:
{standard input}:397: Warning: the 64-bit MIPS architecture does not support the `smartmips' extension

during a kernel build. Fixed by using .set arch=r4000 instead.

This breaks support for building the kernel with binutils 2.13 which
was supported for 32 bit kernels only anyway and 2.14 which was a bad
vintage for MIPS anyway.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+84 -84
+3 -3
arch/mips/alchemy/common/sleeper.S
··· 95 95 96 96 /* cache following instructions, as memory gets put to sleep */ 97 97 la t0, 1f 98 - .set mips3 98 + .set arch=r4000 99 99 cache 0x14, 0(t0) 100 100 cache 0x14, 32(t0) 101 101 cache 0x14, 64(t0) ··· 121 121 122 122 /* cache following instructions, as memory gets put to sleep */ 123 123 la t0, 1f 124 - .set mips3 124 + .set arch=r4000 125 125 cache 0x14, 0(t0) 126 126 cache 0x14, 32(t0) 127 127 cache 0x14, 64(t0) ··· 163 163 la t1, 4f 164 164 subu t2, t1, t0 165 165 166 - .set mips3 166 + .set arch=r4000 167 167 168 168 1: cache 0x14, 0(t0) 169 169 subu t2, t2, 32
+2 -2
arch/mips/include/asm/asm.h
··· 146 146 147 147 #define PREF(hint,addr) \ 148 148 .set push; \ 149 - .set mips4; \ 149 + .set arch=r5000; \ 150 150 pref hint, addr; \ 151 151 .set pop 152 152 ··· 159 159 160 160 #define PREFX(hint,addr) \ 161 161 .set push; \ 162 - .set mips4; \ 162 + .set arch=r5000; \ 163 163 prefx hint, addr; \ 164 164 .set pop 165 165
+20 -20
arch/mips/include/asm/atomic.h
··· 53 53 int temp; 54 54 55 55 __asm__ __volatile__( 56 - " .set mips3 \n" 56 + " .set arch=r4000 \n" 57 57 "1: ll %0, %1 # atomic_add \n" 58 58 " addu %0, %2 \n" 59 59 " sc %0, %1 \n" ··· 66 66 67 67 do { 68 68 __asm__ __volatile__( 69 - " .set mips3 \n" 69 + " .set arch=r4000 \n" 70 70 " ll %0, %1 # atomic_add \n" 71 71 " addu %0, %2 \n" 72 72 " sc %0, %1 \n" ··· 96 96 int temp; 97 97 98 98 __asm__ __volatile__( 99 - " .set mips3 \n" 99 + " .set arch=r4000 \n" 100 100 "1: ll %0, %1 # atomic_sub \n" 101 101 " subu %0, %2 \n" 102 102 " sc %0, %1 \n" ··· 109 109 110 110 do { 111 111 __asm__ __volatile__( 112 - " .set mips3 \n" 112 + " .set arch=r4000 \n" 113 113 " ll %0, %1 # atomic_sub \n" 114 114 " subu %0, %2 \n" 115 115 " sc %0, %1 \n" ··· 139 139 int temp; 140 140 141 141 __asm__ __volatile__( 142 - " .set mips3 \n" 142 + " .set arch=r4000 \n" 143 143 "1: ll %1, %2 # atomic_add_return \n" 144 144 " addu %0, %1, %3 \n" 145 145 " sc %0, %2 \n" ··· 153 153 154 154 do { 155 155 __asm__ __volatile__( 156 - " .set mips3 \n" 156 + " .set arch=r4000 \n" 157 157 " ll %1, %2 # atomic_add_return \n" 158 158 " addu %0, %1, %3 \n" 159 159 " sc %0, %2 \n" ··· 188 188 int temp; 189 189 190 190 __asm__ __volatile__( 191 - " .set mips3 \n" 191 + " .set arch=r4000 \n" 192 192 "1: ll %1, %2 # atomic_sub_return \n" 193 193 " subu %0, %1, %3 \n" 194 194 " sc %0, %2 \n" ··· 205 205 206 206 do { 207 207 __asm__ __volatile__( 208 - " .set mips3 \n" 208 + " .set arch=r4000 \n" 209 209 " ll %1, %2 # atomic_sub_return \n" 210 210 " subu %0, %1, %3 \n" 211 211 " sc %0, %2 \n" ··· 248 248 int temp; 249 249 250 250 __asm__ __volatile__( 251 - " .set mips3 \n" 251 + " .set arch=r4000 \n" 252 252 "1: ll %1, %2 # atomic_sub_if_positive\n" 253 253 " subu %0, %1, %3 \n" 254 254 " bltz %0, 1f \n" ··· 266 266 int temp; 267 267 268 268 __asm__ __volatile__( 269 - " .set mips3 \n" 269 + " .set arch=r4000 \n" 270 270 "1: ll %1, %2 # atomic_sub_if_positive\n" 271 271 " subu %0, %1, %3 \n" 272 272 " bltz %0, 1f \n" ··· 420 420 long temp; 421 421 422 422 __asm__ __volatile__( 423 - " .set mips3 \n" 423 + " .set arch=r4000 \n" 424 424 "1: lld %0, %1 # atomic64_add \n" 425 425 " daddu %0, %2 \n" 426 426 " scd %0, %1 \n" ··· 433 433 434 434 do { 435 435 __asm__ __volatile__( 436 - " .set mips3 \n" 436 + " .set arch=r4000 \n" 437 437 " lld %0, %1 # atomic64_add \n" 438 438 " daddu %0, %2 \n" 439 439 " scd %0, %1 \n" ··· 463 463 long temp; 464 464 465 465 __asm__ __volatile__( 466 - " .set mips3 \n" 466 + " .set arch=r4000 \n" 467 467 "1: lld %0, %1 # atomic64_sub \n" 468 468 " dsubu %0, %2 \n" 469 469 " scd %0, %1 \n" ··· 476 476 477 477 do { 478 478 __asm__ __volatile__( 479 - " .set mips3 \n" 479 + " .set arch=r4000 \n" 480 480 " lld %0, %1 # atomic64_sub \n" 481 481 " dsubu %0, %2 \n" 482 482 " scd %0, %1 \n" ··· 506 506 long temp; 507 507 508 508 __asm__ __volatile__( 509 - " .set mips3 \n" 509 + " .set arch=r4000 \n" 510 510 "1: lld %1, %2 # atomic64_add_return \n" 511 511 " daddu %0, %1, %3 \n" 512 512 " scd %0, %2 \n" ··· 520 520 521 521 do { 522 522 __asm__ __volatile__( 523 - " .set mips3 \n" 523 + " .set arch=r4000 \n" 524 524 " lld %1, %2 # atomic64_add_return \n" 525 525 " daddu %0, %1, %3 \n" 526 526 " scd %0, %2 \n" ··· 556 556 long temp; 557 557 558 558 __asm__ __volatile__( 559 - " .set mips3 \n" 559 + " .set arch=r4000 \n" 560 560 "1: lld %1, %2 # atomic64_sub_return \n" 561 561 " dsubu %0, %1, %3 \n" 562 562 " scd %0, %2 \n" ··· 571 571 572 572 do { 573 573 __asm__ __volatile__( 574 - " .set mips3 \n" 574 + " .set arch=r4000 \n" 575 575 " lld %1, %2 # atomic64_sub_return \n" 576 576 " dsubu %0, %1, %3 \n" 577 577 " scd %0, %2 \n" ··· 615 615 long temp; 616 616 617 617 __asm__ __volatile__( 618 - " .set mips3 \n" 618 + " .set arch=r4000 \n" 619 619 "1: lld %1, %2 # atomic64_sub_if_positive\n" 620 620 " dsubu %0, %1, %3 \n" 621 621 " bltz %0, 1f \n" ··· 633 633 long temp; 634 634 635 635 __asm__ __volatile__( 636 - " .set mips3 \n" 636 + " .set arch=r4000 \n" 637 637 "1: lld %1, %2 # atomic64_sub_if_positive\n" 638 638 " dsubu %0, %1, %3 \n" 639 639 " bltz %0, 1f \n"
+14 -14
arch/mips/include/asm/bitops.h
··· 79 79 80 80 if (kernel_uses_llsc && R10000_LLSC_WAR) { 81 81 __asm__ __volatile__( 82 - " .set mips3 \n" 82 + " .set arch=r4000 \n" 83 83 "1: " __LL "%0, %1 # set_bit \n" 84 84 " or %0, %2 \n" 85 85 " " __SC "%0, %1 \n" ··· 101 101 } else if (kernel_uses_llsc) { 102 102 do { 103 103 __asm__ __volatile__( 104 - " .set mips3 \n" 104 + " .set arch=r4000 \n" 105 105 " " __LL "%0, %1 # set_bit \n" 106 106 " or %0, %2 \n" 107 107 " " __SC "%0, %1 \n" ··· 131 131 132 132 if (kernel_uses_llsc && R10000_LLSC_WAR) { 133 133 __asm__ __volatile__( 134 - " .set mips3 \n" 134 + " .set arch=r4000 \n" 135 135 "1: " __LL "%0, %1 # clear_bit \n" 136 136 " and %0, %2 \n" 137 137 " " __SC "%0, %1 \n" ··· 153 153 } else if (kernel_uses_llsc) { 154 154 do { 155 155 __asm__ __volatile__( 156 - " .set mips3 \n" 156 + " .set arch=r4000 \n" 157 157 " " __LL "%0, %1 # clear_bit \n" 158 158 " and %0, %2 \n" 159 159 " " __SC "%0, %1 \n" ··· 197 197 unsigned long temp; 198 198 199 199 __asm__ __volatile__( 200 - " .set mips3 \n" 200 + " .set arch=r4000 \n" 201 201 "1: " __LL "%0, %1 # change_bit \n" 202 202 " xor %0, %2 \n" 203 203 " " __SC "%0, %1 \n" ··· 211 211 212 212 do { 213 213 __asm__ __volatile__( 214 - " .set mips3 \n" 214 + " .set arch=r4000 \n" 215 215 " " __LL "%0, %1 # change_bit \n" 216 216 " xor %0, %2 \n" 217 217 " " __SC "%0, %1 \n" ··· 244 244 unsigned long temp; 245 245 246 246 __asm__ __volatile__( 247 - " .set mips3 \n" 247 + " .set arch=r4000 \n" 248 248 "1: " __LL "%0, %1 # test_and_set_bit \n" 249 249 " or %2, %0, %3 \n" 250 250 " " __SC "%2, %1 \n" ··· 260 260 261 261 do { 262 262 __asm__ __volatile__( 263 - " .set mips3 \n" 263 + " .set arch=r4000 \n" 264 264 " " __LL "%0, %1 # test_and_set_bit \n" 265 265 " or %2, %0, %3 \n" 266 266 " " __SC "%2, %1 \n" ··· 298 298 unsigned long temp; 299 299 300 300 __asm__ __volatile__( 301 - " .set mips3 \n" 301 + " .set arch=r4000 \n" 302 302 "1: " __LL "%0, %1 # test_and_set_bit \n" 303 303 " or %2, %0, %3 \n" 304 304 " " __SC "%2, %1 \n" ··· 314 314 315 315 do { 316 316 __asm__ __volatile__( 317 - " .set mips3 \n" 317 + " .set arch=r4000 \n" 318 318 " " __LL "%0, %1 # test_and_set_bit \n" 319 319 " or %2, %0, %3 \n" 320 320 " " __SC "%2, %1 \n" ··· 353 353 unsigned long temp; 354 354 355 355 __asm__ __volatile__( 356 - " .set mips3 \n" 356 + " .set arch=r4000 \n" 357 357 "1: " __LL "%0, %1 # test_and_clear_bit \n" 358 358 " or %2, %0, %3 \n" 359 359 " xor %2, %3 \n" ··· 386 386 387 387 do { 388 388 __asm__ __volatile__( 389 - " .set mips3 \n" 389 + " .set arch=r4000 \n" 390 390 " " __LL "%0, %1 # test_and_clear_bit \n" 391 391 " or %2, %0, %3 \n" 392 392 " xor %2, %3 \n" ··· 427 427 unsigned long temp; 428 428 429 429 __asm__ __volatile__( 430 - " .set mips3 \n" 430 + " .set arch=r4000 \n" 431 431 "1: " __LL "%0, %1 # test_and_change_bit \n" 432 432 " xor %2, %0, %3 \n" 433 433 " " __SC "%2, %1 \n" ··· 443 443 444 444 do { 445 445 __asm__ __volatile__( 446 - " .set mips3 \n" 446 + " .set arch=r4000 \n" 447 447 " " __LL "%0, %1 # test_and_change_bit \n" 448 448 " xor %2, %0, %3 \n" 449 449 " " __SC "\t%2, %1 \n"
+10 -10
arch/mips/include/asm/cmpxchg.h
··· 22 22 unsigned long dummy; 23 23 24 24 __asm__ __volatile__( 25 - " .set mips3 \n" 25 + " .set arch=r4000 \n" 26 26 "1: ll %0, %3 # xchg_u32 \n" 27 27 " .set mips0 \n" 28 28 " move %2, %z4 \n" 29 - " .set mips3 \n" 29 + " .set arch=r4000 \n" 30 30 " sc %2, %1 \n" 31 31 " beqzl %2, 1b \n" 32 32 " .set mips0 \n" ··· 38 38 39 39 do { 40 40 __asm__ __volatile__( 41 - " .set mips3 \n" 41 + " .set arch=r4000 \n" 42 42 " ll %0, %3 # xchg_u32 \n" 43 43 " .set mips0 \n" 44 44 " move %2, %z4 \n" 45 - " .set mips3 \n" 45 + " .set arch=r4000 \n" 46 46 " sc %2, %1 \n" 47 47 " .set mips0 \n" 48 48 : "=&r" (retval), "=m" (*m), "=&r" (dummy) ··· 74 74 unsigned long dummy; 75 75 76 76 __asm__ __volatile__( 77 - " .set mips3 \n" 77 + " .set arch=r4000 \n" 78 78 "1: lld %0, %3 # xchg_u64 \n" 79 79 " move %2, %z4 \n" 80 80 " scd %2, %1 \n" ··· 88 88 89 89 do { 90 90 __asm__ __volatile__( 91 - " .set mips3 \n" 91 + " .set arch=r4000 \n" 92 92 " lld %0, %3 # xchg_u64 \n" 93 93 " move %2, %z4 \n" 94 94 " scd %2, %1 \n" ··· 145 145 __asm__ __volatile__( \ 146 146 " .set push \n" \ 147 147 " .set noat \n" \ 148 - " .set mips3 \n" \ 148 + " .set arch=r4000 \n" \ 149 149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 150 150 " bne %0, %z3, 2f \n" \ 151 151 " .set mips0 \n" \ 152 152 " move $1, %z4 \n" \ 153 - " .set mips3 \n" \ 153 + " .set arch=r4000 \n" \ 154 154 " " st " $1, %1 \n" \ 155 155 " beqzl $1, 1b \n" \ 156 156 "2: \n" \ ··· 162 162 __asm__ __volatile__( \ 163 163 " .set push \n" \ 164 164 " .set noat \n" \ 165 - " .set mips3 \n" \ 165 + " .set arch=r4000 \n" \ 166 166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 167 167 " bne %0, %z3, 2f \n" \ 168 168 " .set mips0 \n" \ 169 169 " move $1, %z4 \n" \ 170 - " .set mips3 \n" \ 170 + " .set arch=r4000 \n" \ 171 171 " " st " $1, %1 \n" \ 172 172 " beqz $1, 1b \n" \ 173 173 " .set pop \n" \
+8 -8
arch/mips/include/asm/futex.h
··· 23 23 __asm__ __volatile__( \ 24 24 " .set push \n" \ 25 25 " .set noat \n" \ 26 - " .set mips3 \n" \ 26 + " .set arch=r4000 \n" \ 27 27 "1: ll %1, %4 # __futex_atomic_op \n" \ 28 28 " .set mips0 \n" \ 29 29 " " insn " \n" \ 30 - " .set mips3 \n" \ 30 + " .set arch=r4000 \n" \ 31 31 "2: sc $1, %2 \n" \ 32 32 " beqzl $1, 1b \n" \ 33 33 __WEAK_LLSC_MB \ ··· 49 49 __asm__ __volatile__( \ 50 50 " .set push \n" \ 51 51 " .set noat \n" \ 52 - " .set mips3 \n" \ 52 + " .set arch=r4000 \n" \ 53 53 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 54 54 " .set mips0 \n" \ 55 55 " " insn " \n" \ 56 - " .set mips3 \n" \ 56 + " .set arch=r4000 \n" \ 57 57 "2: "user_sc("$1", "%2")" \n" \ 58 58 " beqz $1, 1b \n" \ 59 59 __WEAK_LLSC_MB \ ··· 147 147 "# futex_atomic_cmpxchg_inatomic \n" 148 148 " .set push \n" 149 149 " .set noat \n" 150 - " .set mips3 \n" 150 + " .set arch=r4000 \n" 151 151 "1: ll %1, %3 \n" 152 152 " bne %1, %z4, 3f \n" 153 153 " .set mips0 \n" 154 154 " move $1, %z5 \n" 155 - " .set mips3 \n" 155 + " .set arch=r4000 \n" 156 156 "2: sc $1, %2 \n" 157 157 " beqzl $1, 1b \n" 158 158 __WEAK_LLSC_MB ··· 174 174 "# futex_atomic_cmpxchg_inatomic \n" 175 175 " .set push \n" 176 176 " .set noat \n" 177 - " .set mips3 \n" 177 + " .set arch=r4000 \n" 178 178 "1: "user_ll("%1", "%3")" \n" 179 179 " bne %1, %z4, 3f \n" 180 180 " .set mips0 \n" 181 181 " move $1, %z5 \n" 182 - " .set mips3 \n" 182 + " .set arch=r4000 \n" 183 183 "2: "user_sc("$1", "%2")" \n" 184 184 " beqz $1, 1b \n" 185 185 __WEAK_LLSC_MB
+2 -2
arch/mips/include/asm/io.h
··· 331 331 if (irq) \ 332 332 local_irq_save(__flags); \ 333 333 __asm__ __volatile__( \ 334 - ".set mips3" "\t\t# __writeq""\n\t" \ 334 + ".set arch=r4000" "\t\t# __writeq""\n\t" \ 335 335 "dsll32 %L0, %L0, 0" "\n\t" \ 336 336 "dsrl32 %L0, %L0, 0" "\n\t" \ 337 337 "dsll32 %M0, %M0, 0" "\n\t" \ ··· 361 361 if (irq) \ 362 362 local_irq_save(__flags); \ 363 363 __asm__ __volatile__( \ 364 - ".set mips3" "\t\t# __readq" "\n\t" \ 364 + ".set arch=r4000" "\t\t# __readq" "\n\t" \ 365 365 "ld %L0, %1" "\n\t" \ 366 366 "dsra32 %M0, %L0, 0" "\n\t" \ 367 367 "sll %L0, %L0, 0" "\n\t" \
+4 -4
arch/mips/include/asm/local.h
··· 33 33 unsigned long temp; 34 34 35 35 __asm__ __volatile__( 36 - " .set mips3 \n" 36 + " .set arch=r4000 \n" 37 37 "1:" __LL "%1, %2 # local_add_return \n" 38 38 " addu %0, %1, %3 \n" 39 39 __SC "%0, %2 \n" ··· 47 47 unsigned long temp; 48 48 49 49 __asm__ __volatile__( 50 - " .set mips3 \n" 50 + " .set arch=r4000 \n" 51 51 "1:" __LL "%1, %2 # local_add_return \n" 52 52 " addu %0, %1, %3 \n" 53 53 __SC "%0, %2 \n" ··· 78 78 unsigned long temp; 79 79 80 80 __asm__ __volatile__( 81 - " .set mips3 \n" 81 + " .set arch=r4000 \n" 82 82 "1:" __LL "%1, %2 # local_sub_return \n" 83 83 " subu %0, %1, %3 \n" 84 84 __SC "%0, %2 \n" ··· 92 92 unsigned long temp; 93 93 94 94 __asm__ __volatile__( 95 - " .set mips3 \n" 95 + " .set arch=r4000 \n" 96 96 "1:" __LL "%1, %2 # local_sub_return \n" 97 97 " subu %0, %1, %3 \n" 98 98 __SC "%0, %2 \n"
+6 -6
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
··· 76 76 77 77 __asm__ __volatile__( 78 78 " .set push \n" 79 - " .set mips3 \n" 79 + " .set arch=r4000 \n" 80 80 "1: ll %0, %1 # set_value_reg32 \n" 81 81 " and %0, %2 \n" 82 82 " or %0, %3 \n" ··· 98 98 99 99 __asm__ __volatile__( 100 100 " .set push \n" 101 - " .set mips3 \n" 101 + " .set arch=r4000 \n" 102 102 "1: ll %0, %1 # set_reg32 \n" 103 103 " or %0, %2 \n" 104 104 " sc %0, %1 \n" ··· 119 119 120 120 __asm__ __volatile__( 121 121 " .set push \n" 122 - " .set mips3 \n" 122 + " .set arch=r4000 \n" 123 123 "1: ll %0, %1 # clear_reg32 \n" 124 124 " and %0, %2 \n" 125 125 " sc %0, %1 \n" ··· 140 140 141 141 __asm__ __volatile__( 142 142 " .set push \n" 143 - " .set mips3 \n" 143 + " .set arch=r4000 \n" 144 144 "1: ll %0, %1 # toggle_reg32 \n" 145 145 " xor %0, %2 \n" 146 146 " sc %0, %1 \n" ··· 216 216 #define custom_read_reg32(address, tmp) \ 217 217 __asm__ __volatile__( \ 218 218 " .set push \n" \ 219 - " .set mips3 \n" \ 219 + " .set arch=r4000 \n" \ 220 220 "1: ll %0, %1 #custom_read_reg32 \n" \ 221 221 " .set pop \n" \ 222 222 : "=r" (tmp), "=m" (*address) \ ··· 225 225 #define custom_write_reg32(address, tmp) \ 226 226 __asm__ __volatile__( \ 227 227 " .set push \n" \ 228 - " .set mips3 \n" \ 228 + " .set arch=r4000 \n" \ 229 229 " sc %0, %1 #custom_write_reg32 \n" \ 230 230 " "__beqz"%0, 1b \n" \ 231 231 " nop \n" \
+2 -2
arch/mips/include/asm/r4kcache.h
··· 36 36 __asm__ __volatile__( \ 37 37 " .set push \n" \ 38 38 " .set noreorder \n" \ 39 - " .set mips3\n\t \n" \ 39 + " .set arch=r4000 \n" \ 40 40 " cache %0, %1 \n" \ 41 41 " .set pop \n" \ 42 42 : \ ··· 204 204 __asm__ __volatile__( \ 205 205 " .set push \n" \ 206 206 " .set noreorder \n" \ 207 - " .set mips3 \n" \ 207 + " .set arch=r4000 \n" \ 208 208 "1: cache %0, (%1) \n" \ 209 209 "2: .set pop \n" \ 210 210 " .section __ex_table,\"a\" \n" \
+1 -1
arch/mips/include/asm/stackframe.h
··· 435 435 436 436 .macro RESTORE_SP_AND_RET 437 437 LONG_L sp, PT_R29(sp) 438 - .set mips3 438 + .set arch=r4000 439 439 eret 440 440 .set mips0 441 441 .endm
+1 -1
arch/mips/kernel/bmips_vec.S
··· 122 122 jr k0 123 123 124 124 RESTORE_ALL 125 - .set mips3 125 + .set arch=r4000 126 126 eret 127 127 128 128 /***********************************************************************
+3 -3
arch/mips/kernel/genex.S
··· 67 67 */ 68 68 NESTED(except_vec3_r4000, 0, sp) 69 69 .set push 70 - .set mips3 70 + .set arch=r4000 71 71 .set noat 72 72 mfc0 k1, CP0_CAUSE 73 73 li k0, 31<<2 ··· 139 139 nop 140 140 nop 141 141 #endif 142 - .set mips3 142 + .set arch=r4000 143 143 wait 144 144 /* end of rollback region (the region size must be power of two) */ 145 145 1: ··· 577 577 ori k1, _THREAD_MASK 578 578 xori k1, _THREAD_MASK 579 579 LONG_L v1, TI_TP_VALUE(k1) 580 - .set mips3 580 + .set arch=r4000 581 581 eret 582 582 .set mips0 583 583 #endif
+3 -3
arch/mips/kernel/idle.c
··· 64 64 if (!need_resched()) 65 65 __asm__( 66 66 " .set push \n" 67 - " .set mips3 \n" 67 + " .set arch=r4000 \n" 68 68 " wait \n" 69 69 " .set pop \n"); 70 70 local_irq_enable(); ··· 82 82 if (!need_resched()) 83 83 __asm__( 84 84 " .set push \n" 85 - " .set mips3 \n" 85 + " .set arch=r4000 \n" 86 86 " .set noat \n" 87 87 " mfc0 $1, $12 \n" 88 88 " sync \n" ··· 103 103 unsigned long c0status = read_c0_status() | 1; /* irqs on */ 104 104 105 105 __asm__( 106 - " .set mips3 \n" 106 + " .set arch=r4000 \n" 107 107 " cache 0x14, 0(%0) \n" 108 108 " cache 0x14, 32(%0) \n" 109 109 " sync \n"
+1 -1
arch/mips/kernel/r4k_fpu.S
··· 31 31 .endm 32 32 33 33 .set noreorder 34 - .set mips3 34 + .set arch=r4000 35 35 36 36 LEAF(_save_fp_context) 37 37 cfc1 t1, fcr31
+1 -1
arch/mips/kernel/r4k_switch.S
··· 294 294 1: .set pop 295 295 #endif /* CONFIG_CPU_MIPS32_R2 */ 296 296 #else 297 - .set mips3 297 + .set arch=r4000 298 298 dmtc1 t1, $f0 299 299 dmtc1 t1, $f2 300 300 dmtc1 t1, $f4
+2 -2
arch/mips/kernel/syscall.c
··· 110 110 111 111 if (cpu_has_llsc && R10000_LLSC_WAR) { 112 112 __asm__ __volatile__ ( 113 - " .set mips3 \n" 113 + " .set arch=r4000 \n" 114 114 " li %[err], 0 \n" 115 115 "1: ll %[old], (%[addr]) \n" 116 116 " move %[tmp], %[new] \n" ··· 135 135 : "memory"); 136 136 } else if (cpu_has_llsc) { 137 137 __asm__ __volatile__ ( 138 - " .set mips3 \n" 138 + " .set arch=r4000 \n" 139 139 " li %[err], 0 \n" 140 140 "1: ll %[old], (%[addr]) \n" 141 141 " move %[tmp], %[new] \n"
+1 -1
arch/mips/pmcs-msp71xx/msp_setup.c
··· 49 49 /* Cache the reset code of this function */ 50 50 __asm__ __volatile__ ( 51 51 " .set push \n" 52 - " .set mips3 \n" 52 + " .set arch=r4000 \n" 53 53 " la %0,startpoint \n" 54 54 " la %1,endpoint \n" 55 55 " .set pop \n"