Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] Eleminate local symbols from the symbol table.

These symbols appear in oprofile output, stacktraces and similar but only
make the output harder to read. Many identical symbol names such as
"both_aligned" were also being used in multiple source files making it
impossible to see which file actually was meant. So let's get rid of them.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+282 -281
+107 -107
arch/mips/lib/csum_partial.S
··· 96 96 move t7, zero 97 97 98 98 sltiu t8, a1, 0x8 99 - bnez t8, small_csumcpy /* < 8 bytes to copy */ 99 + bnez t8, .Lsmall_csumcpy /* < 8 bytes to copy */ 100 100 move t2, a1 101 101 102 102 andi t7, src, 0x1 /* odd buffer? */ 103 103 104 - hword_align: 105 - beqz t7, word_align 104 + .Lhword_align: 105 + beqz t7, .Lword_align 106 106 andi t8, src, 0x2 107 107 108 108 lbu t0, (src) ··· 114 114 PTR_ADDU src, src, 0x1 115 115 andi t8, src, 0x2 116 116 117 - word_align: 118 - beqz t8, dword_align 117 + .Lword_align: 118 + beqz t8, .Ldword_align 119 119 sltiu t8, a1, 56 120 120 121 121 lhu t0, (src) ··· 124 124 sltiu t8, a1, 56 125 125 PTR_ADDU src, src, 0x2 126 126 127 - dword_align: 128 - bnez t8, do_end_words 127 + .Ldword_align: 128 + bnez t8, .Ldo_end_words 129 129 move t8, a1 130 130 131 131 andi t8, src, 0x4 132 - beqz t8, qword_align 132 + beqz t8, .Lqword_align 133 133 andi t8, src, 0x8 134 134 135 135 lw t0, 0x00(src) ··· 138 138 PTR_ADDU src, src, 0x4 139 139 andi t8, src, 0x8 140 140 141 - qword_align: 142 - beqz t8, oword_align 141 + .Lqword_align: 142 + beqz t8, .Loword_align 143 143 andi t8, src, 0x10 144 144 145 145 #ifdef USE_DOUBLE ··· 156 156 PTR_ADDU src, src, 0x8 157 157 andi t8, src, 0x10 158 158 159 - oword_align: 160 - beqz t8, begin_movement 159 + .Loword_align: 160 + beqz t8, .Lbegin_movement 161 161 LONG_SRL t8, a1, 0x7 162 162 163 163 #ifdef USE_DOUBLE ··· 172 172 PTR_ADDU src, src, 0x10 173 173 LONG_SRL t8, a1, 0x7 174 174 175 - begin_movement: 175 + .Lbegin_movement: 176 176 beqz t8, 1f 177 177 andi t2, a1, 0x40 178 178 179 - move_128bytes: 179 + .Lmove_128bytes: 180 180 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 181 181 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) 182 182 CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) ··· 184 184 LONG_SUBU t8, t8, 0x01 185 185 .set reorder /* DADDI_WAR */ 186 186 PTR_ADDU src, src, 0x80 187 - bnez t8, move_128bytes 187 + bnez t8, .Lmove_128bytes 188 188 .set noreorder 189 189 190 190 1: 191 191 beqz t2, 1f 192 192 andi t2, a1, 0x20 193 193 194 - move_64bytes: 194 + .Lmove_64bytes: 195 195 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 196 196 CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4) 197 197 PTR_ADDU src, src, 0x40 198 198 199 199 1: 200 - beqz t2, do_end_words 200 + beqz t2, .Ldo_end_words 201 201 andi t8, a1, 0x1c 202 202 203 - move_32bytes: 203 + .Lmove_32bytes: 204 204 CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4) 205 205 andi t8, a1, 0x1c 206 206 PTR_ADDU src, src, 0x20 207 207 208 - do_end_words: 209 - beqz t8, small_csumcpy 208 + .Ldo_end_words: 209 + beqz t8, .Lsmall_csumcpy 210 210 andi t2, a1, 0x3 211 211 LONG_SRL t8, t8, 0x2 212 212 213 - end_words: 213 + .Lend_words: 214 214 lw t0, (src) 215 215 LONG_SUBU t8, t8, 0x1 216 216 ADDC(sum, t0) 217 217 .set reorder /* DADDI_WAR */ 218 218 PTR_ADDU src, src, 0x4 219 - bnez t8, end_words 219 + bnez t8, .Lend_words 220 220 .set noreorder 221 221 222 222 /* unknown src alignment and < 8 bytes to go */ 223 - small_csumcpy: 223 + .Lsmall_csumcpy: 224 224 move a1, t2 225 225 226 226 andi t0, a1, 4 ··· 413 413 */ 414 414 sltu t2, len, NBYTES 415 415 and t1, dst, ADDRMASK 416 - bnez t2, copy_bytes_checklen 416 + bnez t2, .Lcopy_bytes_checklen 417 417 and t0, src, ADDRMASK 418 418 andi odd, dst, 0x1 /* odd buffer? */ 419 - bnez t1, dst_unaligned 419 + bnez t1, .Ldst_unaligned 420 420 nop 421 - bnez t0, src_unaligned_dst_aligned 421 + bnez t0, .Lsrc_unaligned_dst_aligned 422 422 /* 423 423 * use delay slot for fall-through 424 424 * src and dst are aligned; need to compute rem 425 425 */ 426 - both_aligned: 426 + .Lboth_aligned: 427 427 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 428 - beqz t0, cleanup_both_aligned # len < 8*NBYTES 428 + beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES 429 429 nop 430 430 SUB len, 8*NBYTES # subtract here for bgez loop 431 431 .align 4 432 432 1: 433 - EXC( LOAD t0, UNIT(0)(src), l_exc) 434 - EXC( LOAD t1, UNIT(1)(src), l_exc_copy) 435 - EXC( LOAD t2, UNIT(2)(src), l_exc_copy) 436 - EXC( LOAD t3, UNIT(3)(src), l_exc_copy) 437 - EXC( LOAD t4, UNIT(4)(src), l_exc_copy) 438 - EXC( LOAD t5, UNIT(5)(src), l_exc_copy) 439 - EXC( LOAD t6, UNIT(6)(src), l_exc_copy) 440 - EXC( LOAD t7, UNIT(7)(src), l_exc_copy) 433 + EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 434 + EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 435 + EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 436 + EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 437 + EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) 438 + EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy) 439 + EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy) 440 + EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy) 441 441 SUB len, len, 8*NBYTES 442 442 ADD src, src, 8*NBYTES 443 - EXC( STORE t0, UNIT(0)(dst), s_exc) 443 + EXC( STORE t0, UNIT(0)(dst), .Ls_exc) 444 444 ADDC(sum, t0) 445 - EXC( STORE t1, UNIT(1)(dst), s_exc) 445 + EXC( STORE t1, UNIT(1)(dst), .Ls_exc) 446 446 ADDC(sum, t1) 447 - EXC( STORE t2, UNIT(2)(dst), s_exc) 447 + EXC( STORE t2, UNIT(2)(dst), .Ls_exc) 448 448 ADDC(sum, t2) 449 - EXC( STORE t3, UNIT(3)(dst), s_exc) 449 + EXC( STORE t3, UNIT(3)(dst), .Ls_exc) 450 450 ADDC(sum, t3) 451 - EXC( STORE t4, UNIT(4)(dst), s_exc) 451 + EXC( STORE t4, UNIT(4)(dst), .Ls_exc) 452 452 ADDC(sum, t4) 453 - EXC( STORE t5, UNIT(5)(dst), s_exc) 453 + EXC( STORE t5, UNIT(5)(dst), .Ls_exc) 454 454 ADDC(sum, t5) 455 - EXC( STORE t6, UNIT(6)(dst), s_exc) 455 + EXC( STORE t6, UNIT(6)(dst), .Ls_exc) 456 456 ADDC(sum, t6) 457 - EXC( STORE t7, UNIT(7)(dst), s_exc) 457 + EXC( STORE t7, UNIT(7)(dst), .Ls_exc) 458 458 ADDC(sum, t7) 459 459 .set reorder /* DADDI_WAR */ 460 460 ADD dst, dst, 8*NBYTES ··· 465 465 /* 466 466 * len == the number of bytes left to copy < 8*NBYTES 467 467 */ 468 - cleanup_both_aligned: 468 + .Lcleanup_both_aligned: 469 469 #define rem t7 470 - beqz len, done 470 + beqz len, .Ldone 471 471 sltu t0, len, 4*NBYTES 472 - bnez t0, less_than_4units 472 + bnez t0, .Lless_than_4units 473 473 and rem, len, (NBYTES-1) # rem = len % NBYTES 474 474 /* 475 475 * len >= 4*NBYTES 476 476 */ 477 - EXC( LOAD t0, UNIT(0)(src), l_exc) 478 - EXC( LOAD t1, UNIT(1)(src), l_exc_copy) 479 - EXC( LOAD t2, UNIT(2)(src), l_exc_copy) 480 - EXC( LOAD t3, UNIT(3)(src), l_exc_copy) 477 + EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 478 + EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 479 + EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 480 + EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 481 481 SUB len, len, 4*NBYTES 482 482 ADD src, src, 4*NBYTES 483 - EXC( STORE t0, UNIT(0)(dst), s_exc) 483 + EXC( STORE t0, UNIT(0)(dst), .Ls_exc) 484 484 ADDC(sum, t0) 485 - EXC( STORE t1, UNIT(1)(dst), s_exc) 485 + EXC( STORE t1, UNIT(1)(dst), .Ls_exc) 486 486 ADDC(sum, t1) 487 - EXC( STORE t2, UNIT(2)(dst), s_exc) 487 + EXC( STORE t2, UNIT(2)(dst), .Ls_exc) 488 488 ADDC(sum, t2) 489 - EXC( STORE t3, UNIT(3)(dst), s_exc) 489 + EXC( STORE t3, UNIT(3)(dst), .Ls_exc) 490 490 ADDC(sum, t3) 491 491 .set reorder /* DADDI_WAR */ 492 492 ADD dst, dst, 4*NBYTES 493 - beqz len, done 493 + beqz len, .Ldone 494 494 .set noreorder 495 - less_than_4units: 495 + .Lless_than_4units: 496 496 /* 497 497 * rem = len % NBYTES 498 498 */ 499 - beq rem, len, copy_bytes 499 + beq rem, len, .Lcopy_bytes 500 500 nop 501 501 1: 502 - EXC( LOAD t0, 0(src), l_exc) 502 + EXC( LOAD t0, 0(src), .Ll_exc) 503 503 ADD src, src, NBYTES 504 504 SUB len, len, NBYTES 505 - EXC( STORE t0, 0(dst), s_exc) 505 + EXC( STORE t0, 0(dst), .Ls_exc) 506 506 ADDC(sum, t0) 507 507 .set reorder /* DADDI_WAR */ 508 508 ADD dst, dst, NBYTES ··· 521 521 * more instruction-level parallelism. 522 522 */ 523 523 #define bits t2 524 - beqz len, done 524 + beqz len, .Ldone 525 525 ADD t1, dst, len # t1 is just past last byte of dst 526 526 li bits, 8*NBYTES 527 527 SLL rem, len, 3 # rem = number of bits to keep 528 - EXC( LOAD t0, 0(src), l_exc) 528 + EXC( LOAD t0, 0(src), .Ll_exc) 529 529 SUB bits, bits, rem # bits = number of bits to discard 530 530 SHIFT_DISCARD t0, t0, bits 531 - EXC( STREST t0, -1(t1), s_exc) 531 + EXC( STREST t0, -1(t1), .Ls_exc) 532 532 SHIFT_DISCARD_REVERT t0, t0, bits 533 533 .set reorder 534 534 ADDC(sum, t0) 535 - b done 535 + b .Ldone 536 536 .set noreorder 537 - dst_unaligned: 537 + .Ldst_unaligned: 538 538 /* 539 539 * dst is unaligned 540 540 * t0 = src & ADDRMASK ··· 545 545 * Set match = (src and dst have same alignment) 546 546 */ 547 547 #define match rem 548 - EXC( LDFIRST t3, FIRST(0)(src), l_exc) 548 + EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) 549 549 ADD t2, zero, NBYTES 550 - EXC( LDREST t3, REST(0)(src), l_exc_copy) 550 + EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) 551 551 SUB t2, t2, t1 # t2 = number of bytes copied 552 552 xor match, t0, t1 553 - EXC( STFIRST t3, FIRST(0)(dst), s_exc) 553 + EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) 554 554 SLL t4, t1, 3 # t4 = number of bits to discard 555 555 SHIFT_DISCARD t3, t3, t4 556 556 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ 557 557 ADDC(sum, t3) 558 - beq len, t2, done 558 + beq len, t2, .Ldone 559 559 SUB len, len, t2 560 560 ADD dst, dst, t2 561 - beqz match, both_aligned 561 + beqz match, .Lboth_aligned 562 562 ADD src, src, t2 563 563 564 - src_unaligned_dst_aligned: 564 + .Lsrc_unaligned_dst_aligned: 565 565 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 566 - beqz t0, cleanup_src_unaligned 566 + beqz t0, .Lcleanup_src_unaligned 567 567 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 568 568 1: 569 569 /* ··· 572 572 * It's OK to load FIRST(N+1) before REST(N) because the two addresses 573 573 * are to the same unit (unless src is aligned, but it's not). 574 574 */ 575 - EXC( LDFIRST t0, FIRST(0)(src), l_exc) 576 - EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) 575 + EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 576 + EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) 577 577 SUB len, len, 4*NBYTES 578 - EXC( LDREST t0, REST(0)(src), l_exc_copy) 579 - EXC( LDREST t1, REST(1)(src), l_exc_copy) 580 - EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) 581 - EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) 582 - EXC( LDREST t2, REST(2)(src), l_exc_copy) 583 - EXC( LDREST t3, REST(3)(src), l_exc_copy) 578 + EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 579 + EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) 580 + EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) 581 + EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) 582 + EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) 583 + EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) 584 584 ADD src, src, 4*NBYTES 585 585 #ifdef CONFIG_CPU_SB1 586 586 nop # improves slotting 587 587 #endif 588 - EXC( STORE t0, UNIT(0)(dst), s_exc) 588 + EXC( STORE t0, UNIT(0)(dst), .Ls_exc) 589 589 ADDC(sum, t0) 590 - EXC( STORE t1, UNIT(1)(dst), s_exc) 590 + EXC( STORE t1, UNIT(1)(dst), .Ls_exc) 591 591 ADDC(sum, t1) 592 - EXC( STORE t2, UNIT(2)(dst), s_exc) 592 + EXC( STORE t2, UNIT(2)(dst), .Ls_exc) 593 593 ADDC(sum, t2) 594 - EXC( STORE t3, UNIT(3)(dst), s_exc) 594 + EXC( STORE t3, UNIT(3)(dst), .Ls_exc) 595 595 ADDC(sum, t3) 596 596 .set reorder /* DADDI_WAR */ 597 597 ADD dst, dst, 4*NBYTES 598 598 bne len, rem, 1b 599 599 .set noreorder 600 600 601 - cleanup_src_unaligned: 602 - beqz len, done 601 + .Lcleanup_src_unaligned: 602 + beqz len, .Ldone 603 603 and rem, len, NBYTES-1 # rem = len % NBYTES 604 - beq rem, len, copy_bytes 604 + beq rem, len, .Lcopy_bytes 605 605 nop 606 606 1: 607 - EXC( LDFIRST t0, FIRST(0)(src), l_exc) 608 - EXC( LDREST t0, REST(0)(src), l_exc_copy) 607 + EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 608 + EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 609 609 ADD src, src, NBYTES 610 610 SUB len, len, NBYTES 611 - EXC( STORE t0, 0(dst), s_exc) 611 + EXC( STORE t0, 0(dst), .Ls_exc) 612 612 ADDC(sum, t0) 613 613 .set reorder /* DADDI_WAR */ 614 614 ADD dst, dst, NBYTES 615 615 bne len, rem, 1b 616 616 .set noreorder 617 617 618 - copy_bytes_checklen: 619 - beqz len, done 618 + .Lcopy_bytes_checklen: 619 + beqz len, .Ldone 620 620 nop 621 - copy_bytes: 621 + .Lcopy_bytes: 622 622 /* 0 < len < NBYTES */ 623 623 #ifdef CONFIG_CPU_LITTLE_ENDIAN 624 624 #define SHIFT_START 0 ··· 629 629 #endif 630 630 move t2, zero # partial word 631 631 li t3, SHIFT_START # shift 632 - /* use l_exc_copy here to return correct sum on fault */ 632 + /* use .Ll_exc_copy here to return correct sum on fault */ 633 633 #define COPY_BYTE(N) \ 634 - EXC( lbu t0, N(src), l_exc_copy); \ 634 + EXC( lbu t0, N(src), .Ll_exc_copy); \ 635 635 SUB len, len, 1; \ 636 - EXC( sb t0, N(dst), s_exc); \ 636 + EXC( sb t0, N(dst), .Ls_exc); \ 637 637 SLLV t0, t0, t3; \ 638 638 addu t3, SHIFT_INC; \ 639 - beqz len, copy_bytes_done; \ 639 + beqz len, .Lcopy_bytes_done; \ 640 640 or t2, t0 641 641 642 642 COPY_BYTE(0) ··· 647 647 COPY_BYTE(4) 648 648 COPY_BYTE(5) 649 649 #endif 650 - EXC( lbu t0, NBYTES-2(src), l_exc_copy) 650 + EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy) 651 651 SUB len, len, 1 652 - EXC( sb t0, NBYTES-2(dst), s_exc) 652 + EXC( sb t0, NBYTES-2(dst), .Ls_exc) 653 653 SLLV t0, t0, t3 654 654 or t2, t0 655 - copy_bytes_done: 655 + .Lcopy_bytes_done: 656 656 ADDC(sum, t2) 657 - done: 657 + .Ldone: 658 658 /* fold checksum */ 659 659 .set push 660 660 .set noat ··· 685 685 jr ra 686 686 .set noreorder 687 687 688 - l_exc_copy: 688 + .Ll_exc_copy: 689 689 /* 690 690 * Copy bytes from src until faulting load address (or until a 691 691 * lb faults) ··· 700 700 li t2, SHIFT_START 701 701 LOAD t0, THREAD_BUADDR(t0) 702 702 1: 703 - EXC( lbu t1, 0(src), l_exc) 703 + EXC( lbu t1, 0(src), .Ll_exc) 704 704 ADD src, src, 1 705 705 sb t1, 0(dst) # can't fault -- we're copy_from_user 706 706 SLLV t1, t1, t2 ··· 710 710 ADD dst, dst, 1 711 711 bne src, t0, 1b 712 712 .set noreorder 713 - l_exc: 713 + .Ll_exc: 714 714 LOAD t0, TI_TASK($28) 715 715 nop 716 716 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address ··· 729 729 */ 730 730 .set reorder /* DADDI_WAR */ 731 731 SUB src, len, 1 732 - beqz len, done 732 + beqz len, .Ldone 733 733 .set noreorder 734 734 1: sb zero, 0(dst) 735 735 ADD dst, dst, 1 ··· 744 744 SUB src, src, v1 745 745 #endif 746 746 li v1, -EFAULT 747 - b done 747 + b .Ldone 748 748 sw v1, (errptr) 749 749 750 - s_exc: 750 + .Ls_exc: 751 751 li v0, -1 /* invalid checksum */ 752 752 li v1, -EFAULT 753 753 jr ra
+58 -58
arch/mips/lib/memcpy-inatomic.S
··· 209 209 and t1, dst, ADDRMASK 210 210 PREF( 0, 1*32(src) ) 211 211 PREF( 1, 1*32(dst) ) 212 - bnez t2, copy_bytes_checklen 212 + bnez t2, .Lcopy_bytes_checklen 213 213 and t0, src, ADDRMASK 214 214 PREF( 0, 2*32(src) ) 215 215 PREF( 1, 2*32(dst) ) 216 - bnez t1, dst_unaligned 216 + bnez t1, .Ldst_unaligned 217 217 nop 218 - bnez t0, src_unaligned_dst_aligned 218 + bnez t0, .Lsrc_unaligned_dst_aligned 219 219 /* 220 220 * use delay slot for fall-through 221 221 * src and dst are aligned; need to compute rem 222 222 */ 223 - both_aligned: 224 - SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 225 - beqz t0, cleanup_both_aligned # len < 8*NBYTES 226 - and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) 223 + .Lboth_aligned: 224 + SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 225 + beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES 226 + and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) 227 227 PREF( 0, 3*32(src) ) 228 228 PREF( 1, 3*32(dst) ) 229 229 .align 4 230 230 1: 231 - EXC( LOAD t0, UNIT(0)(src), l_exc) 232 - EXC( LOAD t1, UNIT(1)(src), l_exc_copy) 233 - EXC( LOAD t2, UNIT(2)(src), l_exc_copy) 234 - EXC( LOAD t3, UNIT(3)(src), l_exc_copy) 231 + EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 232 + EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 233 + EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 234 + EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 235 235 SUB len, len, 8*NBYTES 236 - EXC( LOAD t4, UNIT(4)(src), l_exc_copy) 237 - EXC( LOAD t7, UNIT(5)(src), l_exc_copy) 236 + EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) 237 + EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy) 238 238 STORE t0, UNIT(0)(dst) 239 239 STORE t1, UNIT(1)(dst) 240 - EXC( LOAD t0, UNIT(6)(src), l_exc_copy) 241 - EXC( LOAD t1, UNIT(7)(src), l_exc_copy) 240 + EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy) 241 + EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy) 242 242 ADD src, src, 8*NBYTES 243 243 ADD dst, dst, 8*NBYTES 244 244 STORE t2, UNIT(-6)(dst) ··· 255 255 /* 256 256 * len == rem == the number of bytes left to copy < 8*NBYTES 257 257 */ 258 - cleanup_both_aligned: 259 - beqz len, done 258 + .Lcleanup_both_aligned: 259 + beqz len, .Ldone 260 260 sltu t0, len, 4*NBYTES 261 - bnez t0, less_than_4units 261 + bnez t0, .Lless_than_4units 262 262 and rem, len, (NBYTES-1) # rem = len % NBYTES 263 263 /* 264 264 * len >= 4*NBYTES 265 265 */ 266 - EXC( LOAD t0, UNIT(0)(src), l_exc) 267 - EXC( LOAD t1, UNIT(1)(src), l_exc_copy) 268 - EXC( LOAD t2, UNIT(2)(src), l_exc_copy) 269 - EXC( LOAD t3, UNIT(3)(src), l_exc_copy) 266 + EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 267 + EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 268 + EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 269 + EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 270 270 SUB len, len, 4*NBYTES 271 271 ADD src, src, 4*NBYTES 272 272 STORE t0, UNIT(0)(dst) ··· 275 275 STORE t3, UNIT(3)(dst) 276 276 .set reorder /* DADDI_WAR */ 277 277 ADD dst, dst, 4*NBYTES 278 - beqz len, done 278 + beqz len, .Ldone 279 279 .set noreorder 280 - less_than_4units: 280 + .Lless_than_4units: 281 281 /* 282 282 * rem = len % NBYTES 283 283 */ 284 - beq rem, len, copy_bytes 284 + beq rem, len, .Lcopy_bytes 285 285 nop 286 286 1: 287 - EXC( LOAD t0, 0(src), l_exc) 287 + EXC( LOAD t0, 0(src), .Ll_exc) 288 288 ADD src, src, NBYTES 289 289 SUB len, len, NBYTES 290 290 STORE t0, 0(dst) ··· 305 305 * more instruction-level parallelism. 306 306 */ 307 307 #define bits t2 308 - beqz len, done 308 + beqz len, .Ldone 309 309 ADD t1, dst, len # t1 is just past last byte of dst 310 310 li bits, 8*NBYTES 311 311 SLL rem, len, 3 # rem = number of bits to keep 312 - EXC( LOAD t0, 0(src), l_exc) 312 + EXC( LOAD t0, 0(src), .Ll_exc) 313 313 SUB bits, bits, rem # bits = number of bits to discard 314 314 SHIFT_DISCARD t0, t0, bits 315 315 STREST t0, -1(t1) 316 316 jr ra 317 317 move len, zero 318 - dst_unaligned: 318 + .Ldst_unaligned: 319 319 /* 320 320 * dst is unaligned 321 321 * t0 = src & ADDRMASK ··· 326 326 * Set match = (src and dst have same alignment) 327 327 */ 328 328 #define match rem 329 - EXC( LDFIRST t3, FIRST(0)(src), l_exc) 329 + EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) 330 330 ADD t2, zero, NBYTES 331 - EXC( LDREST t3, REST(0)(src), l_exc_copy) 331 + EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) 332 332 SUB t2, t2, t1 # t2 = number of bytes copied 333 333 xor match, t0, t1 334 334 STFIRST t3, FIRST(0)(dst) 335 - beq len, t2, done 335 + beq len, t2, .Ldone 336 336 SUB len, len, t2 337 337 ADD dst, dst, t2 338 - beqz match, both_aligned 338 + beqz match, .Lboth_aligned 339 339 ADD src, src, t2 340 340 341 - src_unaligned_dst_aligned: 341 + .Lsrc_unaligned_dst_aligned: 342 342 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 343 343 PREF( 0, 3*32(src) ) 344 - beqz t0, cleanup_src_unaligned 344 + beqz t0, .Lcleanup_src_unaligned 345 345 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 346 346 PREF( 1, 3*32(dst) ) 347 347 1: ··· 351 351 * It's OK to load FIRST(N+1) before REST(N) because the two addresses 352 352 * are to the same unit (unless src is aligned, but it's not). 353 353 */ 354 - EXC( LDFIRST t0, FIRST(0)(src), l_exc) 355 - EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) 354 + EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 355 + EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) 356 356 SUB len, len, 4*NBYTES 357 - EXC( LDREST t0, REST(0)(src), l_exc_copy) 358 - EXC( LDREST t1, REST(1)(src), l_exc_copy) 359 - EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) 360 - EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) 361 - EXC( LDREST t2, REST(2)(src), l_exc_copy) 362 - EXC( LDREST t3, REST(3)(src), l_exc_copy) 357 + EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 358 + EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) 359 + EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) 360 + EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) 361 + EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) 362 + EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) 363 363 PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) 364 364 ADD src, src, 4*NBYTES 365 365 #ifdef CONFIG_CPU_SB1 ··· 375 375 bne len, rem, 1b 376 376 .set noreorder 377 377 378 - cleanup_src_unaligned: 379 - beqz len, done 378 + .Lcleanup_src_unaligned: 379 + beqz len, .Ldone 380 380 and rem, len, NBYTES-1 # rem = len % NBYTES 381 - beq rem, len, copy_bytes 381 + beq rem, len, .Lcopy_bytes 382 382 nop 383 383 1: 384 - EXC( LDFIRST t0, FIRST(0)(src), l_exc) 385 - EXC( LDREST t0, REST(0)(src), l_exc_copy) 384 + EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 385 + EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 386 386 ADD src, src, NBYTES 387 387 SUB len, len, NBYTES 388 388 STORE t0, 0(dst) ··· 391 391 bne len, rem, 1b 392 392 .set noreorder 393 393 394 - copy_bytes_checklen: 395 - beqz len, done 394 + .Lcopy_bytes_checklen: 395 + beqz len, .Ldone 396 396 nop 397 - copy_bytes: 397 + .Lcopy_bytes: 398 398 /* 0 < len < NBYTES */ 399 399 #define COPY_BYTE(N) \ 400 - EXC( lb t0, N(src), l_exc); \ 400 + EXC( lb t0, N(src), .Ll_exc); \ 401 401 SUB len, len, 1; \ 402 - beqz len, done; \ 402 + beqz len, .Ldone; \ 403 403 sb t0, N(dst) 404 404 405 405 COPY_BYTE(0) ··· 410 410 COPY_BYTE(4) 411 411 COPY_BYTE(5) 412 412 #endif 413 - EXC( lb t0, NBYTES-2(src), l_exc) 413 + EXC( lb t0, NBYTES-2(src), .Ll_exc) 414 414 SUB len, len, 1 415 415 jr ra 416 416 sb t0, NBYTES-2(dst) 417 - done: 417 + .Ldone: 418 418 jr ra 419 419 nop 420 420 END(__copy_user_inatomic) 421 421 422 - l_exc_copy: 422 + .Ll_exc_copy: 423 423 /* 424 424 * Copy bytes from src until faulting load address (or until a 425 425 * lb faults) ··· 434 434 nop 435 435 LOAD t0, THREAD_BUADDR(t0) 436 436 1: 437 - EXC( lb t1, 0(src), l_exc) 437 + EXC( lb t1, 0(src), .Ll_exc) 438 438 ADD src, src, 1 439 439 sb t1, 0(dst) # can't fault -- we're copy_from_user 440 440 .set reorder /* DADDI_WAR */ 441 441 ADD dst, dst, 1 442 442 bne src, t0, 1b 443 443 .set noreorder 444 - l_exc: 444 + .Ll_exc: 445 445 LOAD t0, TI_TASK($28) 446 446 nop 447 447 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+91 -91
arch/mips/lib/memcpy.S
··· 191 191 .align 5 192 192 LEAF(memcpy) /* a0=dst a1=src a2=len */ 193 193 move v0, dst /* return value */ 194 - __memcpy: 194 + .L__memcpy: 195 195 FEXPORT(__copy_user) 196 196 /* 197 197 * Note: dst & src may be unaligned, len may be 0 ··· 213 213 and t1, dst, ADDRMASK 214 214 PREF( 0, 1*32(src) ) 215 215 PREF( 1, 1*32(dst) ) 216 - bnez t2, copy_bytes_checklen 216 + bnez t2, .Lcopy_bytes_checklen 217 217 and t0, src, ADDRMASK 218 218 PREF( 0, 2*32(src) ) 219 219 PREF( 1, 2*32(dst) ) 220 - bnez t1, dst_unaligned 220 + bnez t1, .Ldst_unaligned 221 221 nop 222 - bnez t0, src_unaligned_dst_aligned 222 + bnez t0, .Lsrc_unaligned_dst_aligned 223 223 /* 224 224 * use delay slot for fall-through 225 225 * src and dst are aligned; need to compute rem 226 226 */ 227 - both_aligned: 227 + .Lboth_aligned: 228 228 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 229 - beqz t0, cleanup_both_aligned # len < 8*NBYTES 229 + beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES 230 230 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) 231 231 PREF( 0, 3*32(src) ) 232 232 PREF( 1, 3*32(dst) ) 233 233 .align 4 234 234 1: 235 235 R10KCBARRIER(0(ra)) 236 - EXC( LOAD t0, UNIT(0)(src), l_exc) 237 - EXC( LOAD t1, UNIT(1)(src), l_exc_copy) 238 - EXC( LOAD t2, UNIT(2)(src), l_exc_copy) 239 - EXC( LOAD t3, UNIT(3)(src), l_exc_copy) 236 + EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 237 + EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 238 + EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 239 + EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 240 240 SUB len, len, 8*NBYTES 241 - EXC( LOAD t4, UNIT(4)(src), l_exc_copy) 242 - EXC( LOAD t7, UNIT(5)(src), l_exc_copy) 243 - EXC( STORE t0, UNIT(0)(dst), s_exc_p8u) 244 - EXC( STORE t1, UNIT(1)(dst), s_exc_p7u) 245 - EXC( LOAD t0, UNIT(6)(src), l_exc_copy) 246 - EXC( LOAD t1, UNIT(7)(src), l_exc_copy) 241 + EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) 242 + EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy) 243 + EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u) 244 + EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u) 245 + EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy) 246 + EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy) 247 247 ADD src, src, 8*NBYTES 248 248 ADD dst, dst, 8*NBYTES 249 - EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u) 250 - EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u) 251 - EXC( STORE t4, UNIT(-4)(dst), s_exc_p4u) 252 - EXC( STORE t7, UNIT(-3)(dst), s_exc_p3u) 253 - EXC( STORE t0, UNIT(-2)(dst), s_exc_p2u) 254 - EXC( STORE t1, UNIT(-1)(dst), s_exc_p1u) 249 + EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u) 250 + EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u) 251 + EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u) 252 + EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u) 253 + EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u) 254 + EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u) 255 255 PREF( 0, 8*32(src) ) 256 256 PREF( 1, 8*32(dst) ) 257 257 bne len, rem, 1b ··· 260 260 /* 261 261 * len == rem == the number of bytes left to copy < 8*NBYTES 262 262 */ 263 - cleanup_both_aligned: 264 - beqz len, done 263 + .Lcleanup_both_aligned: 264 + beqz len, .Ldone 265 265 sltu t0, len, 4*NBYTES 266 - bnez t0, less_than_4units 266 + bnez t0, .Lless_than_4units 267 267 and rem, len, (NBYTES-1) # rem = len % NBYTES 268 268 /* 269 269 * len >= 4*NBYTES 270 270 */ 271 - EXC( LOAD t0, UNIT(0)(src), l_exc) 272 - EXC( LOAD t1, UNIT(1)(src), l_exc_copy) 273 - EXC( LOAD t2, UNIT(2)(src), l_exc_copy) 274 - EXC( LOAD t3, UNIT(3)(src), l_exc_copy) 271 + EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 272 + EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 273 + EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 274 + EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 275 275 SUB len, len, 4*NBYTES 276 276 ADD src, src, 4*NBYTES 277 277 R10KCBARRIER(0(ra)) 278 - EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) 279 - EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) 280 - EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) 281 - EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) 278 + EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) 279 + EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) 280 + EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) 281 + EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) 282 282 .set reorder /* DADDI_WAR */ 283 283 ADD dst, dst, 4*NBYTES 284 - beqz len, done 284 + beqz len, .Ldone 285 285 .set noreorder 286 - less_than_4units: 286 + .Lless_than_4units: 287 287 /* 288 288 * rem = len % NBYTES 289 289 */ 290 - beq rem, len, copy_bytes 290 + beq rem, len, .Lcopy_bytes 291 291 nop 292 292 1: 293 293 R10KCBARRIER(0(ra)) 294 - EXC( LOAD t0, 0(src), l_exc) 294 + EXC( LOAD t0, 0(src), .Ll_exc) 295 295 ADD src, src, NBYTES 296 296 SUB len, len, NBYTES 297 - EXC( STORE t0, 0(dst), s_exc_p1u) 297 + EXC( STORE t0, 0(dst), .Ls_exc_p1u) 298 298 .set reorder /* DADDI_WAR */ 299 299 ADD dst, dst, NBYTES 300 300 bne rem, len, 1b ··· 312 312 * more instruction-level parallelism. 313 313 */ 314 314 #define bits t2 315 - beqz len, done 315 + beqz len, .Ldone 316 316 ADD t1, dst, len # t1 is just past last byte of dst 317 317 li bits, 8*NBYTES 318 318 SLL rem, len, 3 # rem = number of bits to keep 319 - EXC( LOAD t0, 0(src), l_exc) 319 + EXC( LOAD t0, 0(src), .Ll_exc) 320 320 SUB bits, bits, rem # bits = number of bits to discard 321 321 SHIFT_DISCARD t0, t0, bits 322 - EXC( STREST t0, -1(t1), s_exc) 322 + EXC( STREST t0, -1(t1), .Ls_exc) 323 323 jr ra 324 324 move len, zero 325 - dst_unaligned: 325 + .Ldst_unaligned: 326 326 /* 327 327 * dst is unaligned 328 328 * t0 = src & ADDRMASK ··· 333 333 * Set match = (src and dst have same alignment) 334 334 */ 335 335 #define match rem 336 - EXC( LDFIRST t3, FIRST(0)(src), l_exc) 336 + EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) 337 337 ADD t2, zero, NBYTES 338 - EXC( LDREST t3, REST(0)(src), l_exc_copy) 338 + EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) 339 339 SUB t2, t2, t1 # t2 = number of bytes copied 340 340 xor match, t0, t1 341 341 R10KCBARRIER(0(ra)) 342 - EXC( STFIRST t3, FIRST(0)(dst), s_exc) 343 - beq len, t2, done 342 + EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) 343 + beq len, t2, .Ldone 344 344 SUB len, len, t2 345 345 ADD dst, dst, t2 346 - beqz match, both_aligned 346 + beqz match, .Lboth_aligned 347 347 ADD src, src, t2 348 348 349 - src_unaligned_dst_aligned: 349 + .Lsrc_unaligned_dst_aligned: 350 350 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 351 351 PREF( 0, 3*32(src) ) 352 - beqz t0, cleanup_src_unaligned 352 + beqz t0, .Lcleanup_src_unaligned 353 353 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 354 354 PREF( 1, 3*32(dst) ) 355 355 1: ··· 360 360 * are to the same unit (unless src is aligned, but it's not). 361 361 */ 362 362 R10KCBARRIER(0(ra)) 363 - EXC( LDFIRST t0, FIRST(0)(src), l_exc) 364 - EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy) 363 + EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 364 + EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) 365 365 SUB len, len, 4*NBYTES 366 - EXC( LDREST t0, REST(0)(src), l_exc_copy) 367 - EXC( LDREST t1, REST(1)(src), l_exc_copy) 368 - EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy) 369 - EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy) 370 - EXC( LDREST t2, REST(2)(src), l_exc_copy) 371 - EXC( LDREST t3, REST(3)(src), l_exc_copy) 366 + EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 367 + EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) 368 + EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) 369 + EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) 370 + EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) 371 + EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) 372 372 PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) 373 373 ADD src, src, 4*NBYTES 374 374 #ifdef CONFIG_CPU_SB1 375 375 nop # improves slotting 376 376 #endif 377 - EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) 378 - EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) 379 - EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) 380 - EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) 377 + EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) 378 + EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) 379 + EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) 380 + EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) 381 381 PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) 382 382 .set reorder /* DADDI_WAR */ 383 383 ADD dst, dst, 4*NBYTES 384 384 bne len, rem, 1b 385 385 .set noreorder 386 386 387 - cleanup_src_unaligned: 388 - beqz len, done 387 + .Lcleanup_src_unaligned: 388 + beqz len, .Ldone 389 389 and rem, len, NBYTES-1 # rem = len % NBYTES 390 - beq rem, len, copy_bytes 390 + beq rem, len, .Lcopy_bytes 391 391 nop 392 392 1: 393 393 R10KCBARRIER(0(ra)) 394 - EXC( LDFIRST t0, FIRST(0)(src), l_exc) 395 - EXC( LDREST t0, REST(0)(src), l_exc_copy) 394 + EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 395 + EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 396 396 ADD src, src, NBYTES 397 397 SUB len, len, NBYTES 398 - EXC( STORE t0, 0(dst), s_exc_p1u) 398 + EXC( STORE t0, 0(dst), .Ls_exc_p1u) 399 399 .set reorder /* DADDI_WAR */ 400 400 ADD dst, dst, NBYTES 401 401 bne len, rem, 1b 402 402 .set noreorder 403 403 404 - copy_bytes_checklen: 405 - beqz len, done 404 + .Lcopy_bytes_checklen: 405 + beqz len, .Ldone 406 406 nop 407 - copy_bytes: 407 + .Lcopy_bytes: 408 408 /* 0 < len < NBYTES */ 409 409 R10KCBARRIER(0(ra)) 410 410 #define COPY_BYTE(N) \ 411 - EXC( lb t0, N(src), l_exc); \ 411 + EXC( lb t0, N(src), .Ll_exc); \ 412 412 SUB len, len, 1; \ 413 - beqz len, done; \ 414 - EXC( sb t0, N(dst), s_exc_p1) 413 + beqz len, .Ldone; \ 414 + EXC( sb t0, N(dst), .Ls_exc_p1) 415 415 416 416 COPY_BYTE(0) 417 417 COPY_BYTE(1) ··· 421 421 COPY_BYTE(4) 422 422 COPY_BYTE(5) 423 423 #endif 424 - EXC( lb t0, NBYTES-2(src), l_exc) 424 + EXC( lb t0, NBYTES-2(src), .Ll_exc) 425 425 SUB len, len, 1 426 426 jr ra 427 - EXC( sb t0, NBYTES-2(dst), s_exc_p1) 428 - done: 427 + EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1) 428 + .Ldone: 429 429 jr ra 430 430 nop 431 431 END(memcpy) 432 432 433 - l_exc_copy: 433 + .Ll_exc_copy: 434 434 /* 435 435 * Copy bytes from src until faulting load address (or until a 436 436 * lb faults) ··· 445 445 nop 446 446 LOAD t0, THREAD_BUADDR(t0) 447 447 1: 448 - EXC( lb t1, 0(src), l_exc) 448 + EXC( lb t1, 0(src), .Ll_exc) 449 449 ADD src, src, 1 450 450 sb t1, 0(dst) # can't fault -- we're copy_from_user 451 451 .set reorder /* DADDI_WAR */ 452 452 ADD dst, dst, 1 453 453 bne src, t0, 1b 454 454 .set noreorder 455 - l_exc: 455 + .Ll_exc: 456 456 LOAD t0, TI_TASK($28) 457 457 nop 458 458 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address ··· 471 471 */ 472 472 .set reorder /* DADDI_WAR */ 473 473 SUB src, len, 1 474 - beqz len, done 474 + beqz len, .Ldone 475 475 .set noreorder 476 476 1: sb zero, 0(dst) 477 477 ADD dst, dst, 1 ··· 492 492 493 493 #define SEXC(n) \ 494 494 .set reorder; /* DADDI_WAR */ \ 495 - s_exc_p ## n ## u: \ 495 + .Ls_exc_p ## n ## u: \ 496 496 ADD len, len, n*NBYTES; \ 497 497 jr ra; \ 498 498 .set noreorder ··· 506 506 SEXC(2) 507 507 SEXC(1) 508 508 509 - s_exc_p1: 509 + .Ls_exc_p1: 510 510 .set reorder /* DADDI_WAR */ 511 511 ADD len, len, 1 512 512 jr ra 513 513 .set noreorder 514 - s_exc: 514 + .Ls_exc: 515 515 jr ra 516 516 nop 517 517 ··· 522 522 sltu t0, a1, t0 # dst + len <= src -> memcpy 523 523 sltu t1, a0, t1 # dst >= src + len -> memcpy 524 524 and t0, t1 525 - beqz t0, __memcpy 525 + beqz t0, .L__memcpy 526 526 move v0, a0 /* return value */ 527 - beqz a2, r_out 527 + beqz a2, .Lr_out 528 528 END(memmove) 529 529 530 530 /* fall through to __rmemcpy */ 531 531 LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ 532 532 sltu t0, a1, a0 533 - beqz t0, r_end_bytes_up # src >= dst 533 + beqz t0, .Lr_end_bytes_up # src >= dst 534 534 nop 535 535 ADD a0, a2 # dst = dst + len 536 536 ADD a1, a2 # src = src + len 537 537 538 - r_end_bytes: 538 + .Lr_end_bytes: 539 539 R10KCBARRIER(0(ra)) 540 540 lb t0, -1(a1) 541 541 SUB a2, a2, 0x1 ··· 543 543 SUB a1, a1, 0x1 544 544 .set reorder /* DADDI_WAR */ 545 545 SUB a0, a0, 0x1 546 - bnez a2, r_end_bytes 546 + bnez a2, .Lr_end_bytes 547 547 .set noreorder 548 548 549 - r_out: 549 + .Lr_out: 550 550 jr ra 551 551 move a2, zero 552 552 553 - r_end_bytes_up: 553 + .Lr_end_bytes_up: 554 554 R10KCBARRIER(0(ra)) 555 555 lb t0, (a1) 556 556 SUB a2, a2, 0x1 ··· 558 558 ADD a1, a1, 0x1 559 559 .set reorder /* DADDI_WAR */ 560 560 ADD a0, a0, 0x1 561 - bnez a2, r_end_bytes_up 561 + bnez a2, .Lr_end_bytes_up 562 562 .set noreorder 563 563 564 564 jr ra
+14 -14
arch/mips/lib/memset.S
··· 72 72 73 73 FEXPORT(__bzero) 74 74 sltiu t0, a2, LONGSIZE /* very small region? */ 75 - bnez t0, small_memset 75 + bnez t0, .Lsmall_memset 76 76 andi t0, a0, LONGMASK /* aligned? */ 77 77 78 78 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS ··· 88 88 89 89 R10KCBARRIER(0(ra)) 90 90 #ifdef __MIPSEB__ 91 - EX(LONG_S_L, a1, (a0), first_fixup) /* make word/dword aligned */ 91 + EX(LONG_S_L, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */ 92 92 #endif 93 93 #ifdef __MIPSEL__ 94 - EX(LONG_S_R, a1, (a0), first_fixup) /* make word/dword aligned */ 94 + EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */ 95 95 #endif 96 96 PTR_SUBU a0, t0 /* long align ptr */ 97 97 PTR_ADDU a2, t0 /* correct size */ 98 98 99 99 1: ori t1, a2, 0x3f /* # of full blocks */ 100 100 xori t1, 0x3f 101 - beqz t1, memset_partial /* no block to fill */ 101 + beqz t1, .Lmemset_partial /* no block to fill */ 102 102 andi t0, a2, 0x40-LONGSIZE 103 103 104 104 PTR_ADDU t1, a0 /* end address */ 105 105 .set reorder 106 106 1: PTR_ADDIU a0, 64 107 107 R10KCBARRIER(0(ra)) 108 - f_fill64 a0, -64, a1, fwd_fixup 108 + f_fill64 a0, -64, a1, .Lfwd_fixup 109 109 bne t1, a0, 1b 110 110 .set noreorder 111 111 112 - memset_partial: 112 + .Lmemset_partial: 113 113 R10KCBARRIER(0(ra)) 114 114 PTR_LA t1, 2f /* where to start */ 115 115 #if LONGSIZE == 4 ··· 126 126 .set push 127 127 .set noreorder 128 128 .set nomacro 129 - f_fill64 a0, -64, a1, partial_fixup /* ... but first do longs ... */ 129 + f_fill64 a0, -64, a1, .Lpartial_fixup /* ... but first do longs ... */ 130 130 2: .set pop 131 131 andi a2, LONGMASK /* At most one long to go */ 132 132 ··· 134 134 PTR_ADDU a0, a2 /* What's left */ 135 135 R10KCBARRIER(0(ra)) 136 136 #ifdef __MIPSEB__ 137 - EX(LONG_S_R, a1, -1(a0), last_fixup) 137 + EX(LONG_S_R, a1, -1(a0), .Llast_fixup) 138 138 #endif 139 139 #ifdef __MIPSEL__ 140 - EX(LONG_S_L, a1, -1(a0), last_fixup) 140 + EX(LONG_S_L, a1, -1(a0), .Llast_fixup) 141 141 #endif 142 142 1: jr ra 143 143 move a2, zero 144 144 145 - small_memset: 145 + .Lsmall_memset: 146 146 beqz a2, 2f 147 147 PTR_ADDU t1, a0, a2 148 148 ··· 155 155 move a2, zero 156 156 END(memset) 157 157 158 - first_fixup: 158 + .Lfirst_fixup: 159 159 jr ra 160 160 nop 161 161 162 - fwd_fixup: 162 + .Lfwd_fixup: 163 163 PTR_L t0, TI_TASK($28) 164 164 LONG_L t0, THREAD_BUADDR(t0) 165 165 andi a2, 0x3f ··· 167 167 jr ra 168 168 LONG_SUBU a2, t0 169 169 170 - partial_fixup: 170 + .Lpartial_fixup: 171 171 PTR_L t0, TI_TASK($28) 172 172 LONG_L t0, THREAD_BUADDR(t0) 173 173 andi a2, LONGMASK ··· 175 175 jr ra 176 176 LONG_SUBU a2, t0 177 177 178 - last_fixup: 178 + .Llast_fixup: 179 179 jr ra 180 180 andi v1, a2, LONGMASK
+3 -3
arch/mips/lib/strlen_user.S
··· 24 24 LEAF(__strlen_user_asm) 25 25 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 26 26 and v0, a0 27 - bnez v0, fault 27 + bnez v0, .Lfault 28 28 29 29 FEXPORT(__strlen_user_nocheck_asm) 30 30 move v0, a0 31 - 1: EX(lb, t0, (v0), fault) 31 + 1: EX(lb, t0, (v0), .Lfault) 32 32 PTR_ADDIU v0, 1 33 33 bnez t0, 1b 34 34 PTR_SUBU v0, a0 35 35 jr ra 36 36 END(__strlen_user_asm) 37 37 38 - fault: move v0, zero 38 + .Lfault: move v0, zero 39 39 jr ra
+5 -5
arch/mips/lib/strncpy_user.S
··· 30 30 LEAF(__strncpy_from_user_asm) 31 31 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 32 32 and v0, a1 33 - bnez v0, fault 33 + bnez v0, .Lfault 34 34 35 35 FEXPORT(__strncpy_from_user_nocheck_asm) 36 36 move v0, zero 37 37 move v1, a1 38 38 .set noreorder 39 - 1: EX(lbu, t0, (v1), fault) 39 + 1: EX(lbu, t0, (v1), .Lfault) 40 40 PTR_ADDIU v1, 1 41 41 R10KCBARRIER(0(ra)) 42 42 beqz t0, 2f ··· 47 47 bne v0, a2, 1b 48 48 2: PTR_ADDU t0, a1, v0 49 49 xor t0, a1 50 - bltz t0, fault 50 + bltz t0, .Lfault 51 51 jr ra # return n 52 52 END(__strncpy_from_user_asm) 53 53 54 - fault: li v0, -EFAULT 54 + .Lfault: li v0, -EFAULT 55 55 jr ra 56 56 57 57 .section __ex_table,"a" 58 - PTR 1b, fault 58 + PTR 1b, .Lfault 59 59 .previous
+4 -3
arch/mips/lib/strnlen_user.S
··· 28 28 LEAF(__strnlen_user_asm) 29 29 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 30 30 and v0, a0 31 - bnez v0, fault 31 + bnez v0, .Lfault 32 32 33 33 FEXPORT(__strnlen_user_nocheck_asm) 34 34 move v0, a0 35 35 PTR_ADDU a1, a0 # stop pointer 36 36 1: beq v0, a1, 1f # limit reached? 37 - EX(lb, t0, (v0), fault) 37 + EX(lb, t0, (v0), .Lfault) 38 38 PTR_ADDU v0, 1 39 39 bnez t0, 1b 40 40 1: PTR_SUBU v0, a0 41 41 jr ra 42 42 END(__strnlen_user_asm) 43 43 44 - fault: move v0, zero 44 + .Lfault: 45 + move v0, zero 45 46 jr ra