Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Fix invalid use of register expressions

binutils >= 2.26 now warns about misuse of register expressions in
assembler operands that are actually literals, for example:

arch/powerpc/kernel/entry_64.S:535: Warning: invalid register expression

In practice these are almost all uses of r0 that should just be a
literal 0.

Signed-off-by: Andreas Schwab <schwab@linux-m68k.org>
[mpe: Mention r0 is almost always the culprit, fold in purgatory change]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Andreas Schwab and committed by
Michael Ellerman
8a583c0a 21a0e8c1

+77 -77
+1 -1
arch/powerpc/include/asm/ppc_asm.h
··· 439 439 .machine push ; \ 440 440 .machine "power4" ; \ 441 441 lis scratch,0x60000000@h; \ 442 - dcbt r0,scratch,0b01010; \ 442 + dcbt 0,scratch,0b01010; \ 443 443 .machine pop 444 444 445 445 /*
+1 -1
arch/powerpc/kernel/swsusp_asm64.S
··· 179 179 sld r3, r3, r0 180 180 li r0, 0 181 181 1: 182 - dcbf r0,r3 182 + dcbf 0,r3 183 183 addi r3,r3,0x20 184 184 bdnz 1b 185 185
+7 -7
arch/powerpc/lib/copypage_power7.S
··· 45 45 .machine push 46 46 .machine "power4" 47 47 /* setup read stream 0 */ 48 - dcbt r0,r4,0b01000 /* addr from */ 49 - dcbt r0,r7,0b01010 /* length and depth from */ 48 + dcbt 0,r4,0b01000 /* addr from */ 49 + dcbt 0,r7,0b01010 /* length and depth from */ 50 50 /* setup write stream 1 */ 51 - dcbtst r0,r9,0b01000 /* addr to */ 52 - dcbtst r0,r10,0b01010 /* length and depth to */ 51 + dcbtst 0,r9,0b01000 /* addr to */ 52 + dcbtst 0,r10,0b01010 /* length and depth to */ 53 53 eieio 54 - dcbt r0,r8,0b01010 /* all streams GO */ 54 + dcbt 0,r8,0b01010 /* all streams GO */ 55 55 .machine pop 56 56 57 57 #ifdef CONFIG_ALTIVEC ··· 83 83 li r12,112 84 84 85 85 .align 5 86 - 1: lvx v7,r0,r4 86 + 1: lvx v7,0,r4 87 87 lvx v6,r4,r6 88 88 lvx v5,r4,r7 89 89 lvx v4,r4,r8 ··· 92 92 lvx v1,r4,r11 93 93 lvx v0,r4,r12 94 94 addi r4,r4,128 95 - stvx v7,r0,r3 95 + stvx v7,0,r3 96 96 stvx v6,r3,r6 97 97 stvx v5,r3,r7 98 98 stvx v4,r3,r8
+33 -33
arch/powerpc/lib/copyuser_power7.S
··· 315 315 .machine push 316 316 .machine "power4" 317 317 /* setup read stream 0 */ 318 - dcbt r0,r6,0b01000 /* addr from */ 319 - dcbt r0,r7,0b01010 /* length and depth from */ 318 + dcbt 0,r6,0b01000 /* addr from */ 319 + dcbt 0,r7,0b01010 /* length and depth from */ 320 320 /* setup write stream 1 */ 321 - dcbtst r0,r9,0b01000 /* addr to */ 322 - dcbtst r0,r10,0b01010 /* length and depth to */ 321 + dcbtst 0,r9,0b01000 /* addr to */ 322 + dcbtst 0,r10,0b01010 /* length and depth to */ 323 323 eieio 324 - dcbt r0,r8,0b01010 /* all streams GO */ 324 + dcbt 0,r8,0b01010 /* all streams GO */ 325 325 .machine pop 326 326 327 327 beq cr1,.Lunwind_stack_nonvmx_copy ··· 376 376 li r11,48 377 377 378 378 bf cr7*4+3,5f 379 - err3; lvx v1,r0,r4 379 + err3; lvx v1,0,r4 380 380 addi r4,r4,16 381 - err3; stvx v1,r0,r3 381 + err3; stvx v1,0,r3 382 382 addi r3,r3,16 383 383 384 384 5: bf cr7*4+2,6f 385 - err3; lvx v1,r0,r4 385 + err3; lvx v1,0,r4 386 386 err3; lvx v0,r4,r9 387 387 addi r4,r4,32 388 - err3; stvx v1,r0,r3 388 + err3; stvx v1,0,r3 389 389 err3; stvx v0,r3,r9 390 390 addi r3,r3,32 391 391 392 392 6: bf cr7*4+1,7f 393 - err3; lvx v3,r0,r4 393 + err3; lvx v3,0,r4 394 394 err3; lvx v2,r4,r9 395 395 err3; lvx v1,r4,r10 396 396 err3; lvx v0,r4,r11 397 397 addi r4,r4,64 398 - err3; stvx v3,r0,r3 398 + err3; stvx v3,0,r3 399 399 err3; stvx v2,r3,r9 400 400 err3; stvx v1,r3,r10 401 401 err3; stvx v0,r3,r11 ··· 421 421 */ 422 422 .align 5 423 423 8: 424 - err4; lvx v7,r0,r4 424 + err4; lvx v7,0,r4 425 425 err4; lvx v6,r4,r9 426 426 err4; lvx v5,r4,r10 427 427 err4; lvx v4,r4,r11 ··· 430 430 err4; lvx v1,r4,r15 431 431 err4; lvx v0,r4,r16 432 432 addi r4,r4,128 433 - err4; stvx v7,r0,r3 433 + err4; stvx v7,0,r3 434 434 err4; stvx v6,r3,r9 435 435 err4; stvx v5,r3,r10 436 436 err4; stvx v4,r3,r11 ··· 451 451 mtocrf 0x01,r6 452 452 453 453 bf cr7*4+1,9f 454 - err3; lvx v3,r0,r4 454 + err3; lvx v3,0,r4 455 455 err3; lvx v2,r4,r9 456 456 err3; lvx v1,r4,r10 457 457 err3; lvx v0,r4,r11 458 458 addi r4,r4,64 459 - err3; stvx v3,r0,r3 459 + err3; stvx v3,0,r3 460 460 err3; stvx v2,r3,r9 461 461 err3; stvx v1,r3,r10 462 462 err3; stvx v0,r3,r11 463 463 addi r3,r3,64 464 464 465 465 9: bf cr7*4+2,10f 466 - err3; lvx v1,r0,r4 466 + err3; lvx v1,0,r4 467 467 err3; lvx v0,r4,r9 468 468 addi r4,r4,32 469 - err3; stvx v1,r0,r3 469 + err3; stvx v1,0,r3 470 470 err3; stvx v0,r3,r9 471 471 addi r3,r3,32 472 472 473 473 10: bf cr7*4+3,11f 474 - err3; lvx v1,r0,r4 474 + err3; lvx v1,0,r4 475 475 addi r4,r4,16 476 - err3; stvx v1,r0,r3 476 + err3; stvx v1,0,r3 477 477 addi r3,r3,16 478 478 479 479 /* Up to 15B to go */ ··· 553 553 addi r4,r4,16 554 554 555 555 bf cr7*4+3,5f 556 - err3; lvx v1,r0,r4 556 + err3; lvx v1,0,r4 557 557 VPERM(v8,v0,v1,v16) 558 558 addi r4,r4,16 559 - err3; stvx v8,r0,r3 559 + err3; stvx v8,0,r3 560 560 addi r3,r3,16 561 561 vor v0,v1,v1 562 562 563 563 5: bf cr7*4+2,6f 564 - err3; lvx v1,r0,r4 564 + err3; lvx v1,0,r4 565 565 VPERM(v8,v0,v1,v16) 566 566 err3; lvx v0,r4,r9 567 567 VPERM(v9,v1,v0,v16) 568 568 addi r4,r4,32 569 - err3; stvx v8,r0,r3 569 + err3; stvx v8,0,r3 570 570 err3; stvx v9,r3,r9 571 571 addi r3,r3,32 572 572 573 573 6: bf cr7*4+1,7f 574 - err3; lvx v3,r0,r4 574 + err3; lvx v3,0,r4 575 575 VPERM(v8,v0,v3,v16) 576 576 err3; lvx v2,r4,r9 577 577 VPERM(v9,v3,v2,v16) ··· 580 580 err3; lvx v0,r4,r11 581 581 VPERM(v11,v1,v0,v16) 582 582 addi r4,r4,64 583 - err3; stvx v8,r0,r3 583 + err3; stvx v8,0,r3 584 584 err3; stvx v9,r3,r9 585 585 err3; stvx v10,r3,r10 586 586 err3; stvx v11,r3,r11 ··· 606 606 */ 607 607 .align 5 608 608 8: 609 - err4; lvx v7,r0,r4 609 + err4; lvx v7,0,r4 610 610 VPERM(v8,v0,v7,v16) 611 611 err4; lvx v6,r4,r9 612 612 VPERM(v9,v7,v6,v16) ··· 623 623 err4; lvx v0,r4,r16 624 624 VPERM(v15,v1,v0,v16) 625 625 addi r4,r4,128 626 - err4; stvx v8,r0,r3 626 + err4; stvx v8,0,r3 627 627 err4; stvx v9,r3,r9 628 628 err4; stvx v10,r3,r10 629 629 err4; stvx v11,r3,r11 ··· 644 644 mtocrf 0x01,r6 645 645 646 646 bf cr7*4+1,9f 647 - err3; lvx v3,r0,r4 647 + err3; lvx v3,0,r4 648 648 VPERM(v8,v0,v3,v16) 649 649 err3; lvx v2,r4,r9 650 650 VPERM(v9,v3,v2,v16) ··· 653 653 err3; lvx v0,r4,r11 654 654 VPERM(v11,v1,v0,v16) 655 655 addi r4,r4,64 656 - err3; stvx v8,r0,r3 656 + err3; stvx v8,0,r3 657 657 err3; stvx v9,r3,r9 658 658 err3; stvx v10,r3,r10 659 659 err3; stvx v11,r3,r11 660 660 addi r3,r3,64 661 661 662 662 9: bf cr7*4+2,10f 663 - err3; lvx v1,r0,r4 663 + err3; lvx v1,0,r4 664 664 VPERM(v8,v0,v1,v16) 665 665 err3; lvx v0,r4,r9 666 666 VPERM(v9,v1,v0,v16) 667 667 addi r4,r4,32 668 - err3; stvx v8,r0,r3 668 + err3; stvx v8,0,r3 669 669 err3; stvx v9,r3,r9 670 670 addi r3,r3,32 671 671 672 672 10: bf cr7*4+3,11f 673 - err3; lvx v1,r0,r4 673 + err3; lvx v1,0,r4 674 674 VPERM(v8,v0,v1,v16) 675 675 addi r4,r4,16 676 - err3; stvx v8,r0,r3 676 + err3; stvx v8,0,r3 677 677 addi r3,r3,16 678 678 679 679 /* Up to 15B to go */
+33 -33
arch/powerpc/lib/memcpy_power7.S
··· 261 261 262 262 .machine push 263 263 .machine "power4" 264 - dcbt r0,r6,0b01000 265 - dcbt r0,r7,0b01010 266 - dcbtst r0,r9,0b01000 267 - dcbtst r0,r10,0b01010 264 + dcbt 0,r6,0b01000 265 + dcbt 0,r7,0b01010 266 + dcbtst 0,r9,0b01000 267 + dcbtst 0,r10,0b01010 268 268 eieio 269 - dcbt r0,r8,0b01010 /* GO */ 269 + dcbt 0,r8,0b01010 /* GO */ 270 270 .machine pop 271 271 272 272 beq cr1,.Lunwind_stack_nonvmx_copy ··· 321 321 li r11,48 322 322 323 323 bf cr7*4+3,5f 324 - lvx v1,r0,r4 324 + lvx v1,0,r4 325 325 addi r4,r4,16 326 - stvx v1,r0,r3 326 + stvx v1,0,r3 327 327 addi r3,r3,16 328 328 329 329 5: bf cr7*4+2,6f 330 - lvx v1,r0,r4 330 + lvx v1,0,r4 331 331 lvx v0,r4,r9 332 332 addi r4,r4,32 333 - stvx v1,r0,r3 333 + stvx v1,0,r3 334 334 stvx v0,r3,r9 335 335 addi r3,r3,32 336 336 337 337 6: bf cr7*4+1,7f 338 - lvx v3,r0,r4 338 + lvx v3,0,r4 339 339 lvx v2,r4,r9 340 340 lvx v1,r4,r10 341 341 lvx v0,r4,r11 342 342 addi r4,r4,64 343 - stvx v3,r0,r3 343 + stvx v3,0,r3 344 344 stvx v2,r3,r9 345 345 stvx v1,r3,r10 346 346 stvx v0,r3,r11 ··· 366 366 */ 367 367 .align 5 368 368 8: 369 - lvx v7,r0,r4 369 + lvx v7,0,r4 370 370 lvx v6,r4,r9 371 371 lvx v5,r4,r10 372 372 lvx v4,r4,r11 ··· 375 375 lvx v1,r4,r15 376 376 lvx v0,r4,r16 377 377 addi r4,r4,128 378 - stvx v7,r0,r3 378 + stvx v7,0,r3 379 379 stvx v6,r3,r9 380 380 stvx v5,r3,r10 381 381 stvx v4,r3,r11 ··· 396 396 mtocrf 0x01,r6 397 397 398 398 bf cr7*4+1,9f 399 - lvx v3,r0,r4 399 + lvx v3,0,r4 400 400 lvx v2,r4,r9 401 401 lvx v1,r4,r10 402 402 lvx v0,r4,r11 403 403 addi r4,r4,64 404 - stvx v3,r0,r3 404 + stvx v3,0,r3 405 405 stvx v2,r3,r9 406 406 stvx v1,r3,r10 407 407 stvx v0,r3,r11 408 408 addi r3,r3,64 409 409 410 410 9: bf cr7*4+2,10f 411 - lvx v1,r0,r4 411 + lvx v1,0,r4 412 412 lvx v0,r4,r9 413 413 addi r4,r4,32 414 - stvx v1,r0,r3 414 + stvx v1,0,r3 415 415 stvx v0,r3,r9 416 416 addi r3,r3,32 417 417 418 418 10: bf cr7*4+3,11f 419 - lvx v1,r0,r4 419 + lvx v1,0,r4 420 420 addi r4,r4,16 421 - stvx v1,r0,r3 421 + stvx v1,0,r3 422 422 addi r3,r3,16 423 423 424 424 /* Up to 15B to go */ ··· 499 499 addi r4,r4,16 500 500 501 501 bf cr7*4+3,5f 502 - lvx v1,r0,r4 502 + lvx v1,0,r4 503 503 VPERM(v8,v0,v1,v16) 504 504 addi r4,r4,16 505 - stvx v8,r0,r3 505 + stvx v8,0,r3 506 506 addi r3,r3,16 507 507 vor v0,v1,v1 508 508 509 509 5: bf cr7*4+2,6f 510 - lvx v1,r0,r4 510 + lvx v1,0,r4 511 511 VPERM(v8,v0,v1,v16) 512 512 lvx v0,r4,r9 513 513 VPERM(v9,v1,v0,v16) 514 514 addi r4,r4,32 515 - stvx v8,r0,r3 515 + stvx v8,0,r3 516 516 stvx v9,r3,r9 517 517 addi r3,r3,32 518 518 519 519 6: bf cr7*4+1,7f 520 - lvx v3,r0,r4 520 + lvx v3,0,r4 521 521 VPERM(v8,v0,v3,v16) 522 522 lvx v2,r4,r9 523 523 VPERM(v9,v3,v2,v16) ··· 526 526 lvx v0,r4,r11 527 527 VPERM(v11,v1,v0,v16) 528 528 addi r4,r4,64 529 - stvx v8,r0,r3 529 + stvx v8,0,r3 530 530 stvx v9,r3,r9 531 531 stvx v10,r3,r10 532 532 stvx v11,r3,r11 ··· 552 552 */ 553 553 .align 5 554 554 8: 555 - lvx v7,r0,r4 555 + lvx v7,0,r4 556 556 VPERM(v8,v0,v7,v16) 557 557 lvx v6,r4,r9 558 558 VPERM(v9,v7,v6,v16) ··· 569 569 lvx v0,r4,r16 570 570 VPERM(v15,v1,v0,v16) 571 571 addi r4,r4,128 572 - stvx v8,r0,r3 572 + stvx v8,0,r3 573 573 stvx v9,r3,r9 574 574 stvx v10,r3,r10 575 575 stvx v11,r3,r11 ··· 590 590 mtocrf 0x01,r6 591 591 592 592 bf cr7*4+1,9f 593 - lvx v3,r0,r4 593 + lvx v3,0,r4 594 594 VPERM(v8,v0,v3,v16) 595 595 lvx v2,r4,r9 596 596 VPERM(v9,v3,v2,v16) ··· 599 599 lvx v0,r4,r11 600 600 VPERM(v11,v1,v0,v16) 601 601 addi r4,r4,64 602 - stvx v8,r0,r3 602 + stvx v8,0,r3 603 603 stvx v9,r3,r9 604 604 stvx v10,r3,r10 605 605 stvx v11,r3,r11 606 606 addi r3,r3,64 607 607 608 608 9: bf cr7*4+2,10f 609 - lvx v1,r0,r4 609 + lvx v1,0,r4 610 610 VPERM(v8,v0,v1,v16) 611 611 lvx v0,r4,r9 612 612 VPERM(v9,v1,v0,v16) 613 613 addi r4,r4,32 614 - stvx v8,r0,r3 614 + stvx v8,0,r3 615 615 stvx v9,r3,r9 616 616 addi r3,r3,32 617 617 618 618 10: bf cr7*4+3,11f 619 - lvx v1,r0,r4 619 + lvx v1,0,r4 620 620 VPERM(v8,v0,v1,v16) 621 621 addi r4,r4,16 622 - stvx v8,r0,r3 622 + stvx v8,0,r3 623 623 addi r3,r3,16 624 624 625 625 /* Up to 15B to go */
+1 -1
arch/powerpc/lib/string_64.S
··· 184 184 mtctr r6 185 185 mr r8,r3 186 186 14: 187 - err1; dcbz r0,r3 187 + err1; dcbz 0,r3 188 188 add r3,r3,r9 189 189 bdnz 14b 190 190
+1 -1
arch/powerpc/purgatory/trampoline.S
··· 67 67 mr %r16,%r3 /* save dt address in reg16 */ 68 68 li %r4,20 69 69 LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */ 70 - cmpwi %r0,%r6,2 /* v2 or later? */ 70 + cmpwi %cr0,%r6,2 /* v2 or later? */ 71 71 blt 1f 72 72 li %r4,28 73 73 STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */