Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] 3152/1: make various assembly local labels actually local (the rest)

Patch from Nicolas Pitre

For assembly labels to actually be local they must start with ".L" and
not only "." otherwise they still remain visible in the final link and
clutter kallsyms needlessly, and possibly make for unclear symbolic
backtrace. This patch simply inserts a"L" where appropriate. The code
itself is unchanged.

Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Nicolas Pitre and committed by
Russell King
8adbb371 a9c4814d

+61 -59
+14 -14
arch/arm/lib/csumpartial.S
··· 26 26 td2 .req r5 @ save before use 27 27 td3 .req lr 28 28 29 - .zero: mov r0, sum 29 + .Lzero: mov r0, sum 30 30 add sp, sp, #4 31 31 ldr pc, [sp], #4 32 32 ··· 34 34 * Handle 0 to 7 bytes, with any alignment of source and 35 35 * destination pointers. Note that when we get here, C = 0 36 36 */ 37 - .less8: teq len, #0 @ check for zero count 38 - beq .zero 37 + .Lless8: teq len, #0 @ check for zero count 38 + beq .Lzero 39 39 40 40 /* we must have at least one byte. */ 41 41 tst buf, #1 @ odd address? ··· 44 44 subne len, len, #1 45 45 adcnes sum, sum, td0, put_byte_1 46 46 47 - .less4: tst len, #6 48 - beq .less8_byte 47 + .Lless4: tst len, #6 48 + beq .Lless8_byte 49 49 50 50 /* we are now half-word aligned */ 51 51 52 - .less8_wordlp: 52 + .Lless8_wordlp: 53 53 #if __LINUX_ARM_ARCH__ >= 4 54 54 ldrh td0, [buf], #2 55 55 sub len, len, #2 ··· 65 65 #endif 66 66 adcs sum, sum, td0 67 67 tst len, #6 68 - bne .less8_wordlp 68 + bne .Lless8_wordlp 69 69 70 - .less8_byte: tst len, #1 @ odd number of bytes 70 + .Lless8_byte: tst len, #1 @ odd number of bytes 71 71 ldrneb td0, [buf], #1 @ include last byte 72 72 adcnes sum, sum, td0, put_byte_0 @ update checksum 73 73 74 - .done: adc r0, sum, #0 @ collect up the last carry 74 + .Ldone: adc r0, sum, #0 @ collect up the last carry 75 75 ldr td0, [sp], #4 76 76 tst td0, #1 @ check buffer alignment 77 77 movne r0, r0, ror #8 @ rotate checksum by 8 bits 78 78 ldr pc, [sp], #4 @ return 79 79 80 - .not_aligned: tst buf, #1 @ odd address 80 + .Lnot_aligned: tst buf, #1 @ odd address 81 81 ldrneb td0, [buf], #1 @ make even 82 82 subne len, len, #1 83 83 adcnes sum, sum, td0, put_byte_1 @ update checksum ··· 102 102 ENTRY(csum_partial) 103 103 stmfd sp!, {buf, lr} 104 104 cmp len, #8 @ Ensure that we have at least 105 - blo .less8 @ 8 bytes to copy. 105 + blo .Lless8 @ 8 bytes to copy. 106 106 107 107 tst buf, #1 108 108 movne sum, sum, ror #8 109 109 110 110 adds sum, sum, #0 @ C = 0 111 111 tst buf, #3 @ Test destination alignment 112 - blne .not_aligned @ aligh destination, return here 112 + blne .Lnot_aligned @ align destination, return here 113 113 114 114 1: bics ip, len, #31 115 115 beq 3f ··· 131 131 ldmfd sp!, {r4 - r5} 132 132 133 133 3: tst len, #0x1c @ should not change C 134 - beq .less4 134 + beq .Lless4 135 135 136 136 4: ldr td0, [buf], #4 137 137 sub len, len, #4 138 138 adcs sum, sum, td0 139 139 tst len, #0x1c 140 140 bne 4b 141 - b .less4 141 + b .Lless4
+36 -34
arch/arm/lib/csumpartialcopygeneric.S
··· 22 22 len .req r2 23 23 sum .req r3 24 24 25 - .zero: mov r0, sum 25 + .Lzero: mov r0, sum 26 26 load_regs ea 27 27 28 28 /* ··· 31 31 * the length. Note that the source pointer hasn't been 32 32 * aligned yet. 33 33 */ 34 - .dst_unaligned: tst dst, #1 35 - beq .dst_16bit 34 + .Ldst_unaligned: 35 + tst dst, #1 36 + beq .Ldst_16bit 36 37 37 38 load1b ip 38 39 sub len, len, #1 ··· 42 41 tst dst, #2 43 42 moveq pc, lr @ dst is now 32bit aligned 44 43 45 - .dst_16bit: load2b r8, ip 44 + .Ldst_16bit: load2b r8, ip 46 45 sub len, len, #2 47 46 adcs sum, sum, r8, put_byte_0 48 47 strb r8, [dst], #1 ··· 54 53 * Handle 0 to 7 bytes, with any alignment of source and 55 54 * destination pointers. Note that when we get here, C = 0 56 55 */ 57 - .less8: teq len, #0 @ check for zero count 58 - beq .zero 56 + .Lless8: teq len, #0 @ check for zero count 57 + beq .Lzero 59 58 60 59 /* we must have at least one byte. */ 61 60 tst dst, #1 @ dst 16-bit aligned 62 - beq .less8_aligned 61 + beq .Lless8_aligned 63 62 64 63 /* Align dst */ 65 64 load1b ip ··· 67 66 adcs sum, sum, ip, put_byte_1 @ update checksum 68 67 strb ip, [dst], #1 69 68 tst len, #6 70 - beq .less8_byteonly 69 + beq .Lless8_byteonly 71 70 72 71 1: load2b r8, ip 73 72 sub len, len, #2 ··· 75 74 strb r8, [dst], #1 76 75 adcs sum, sum, ip, put_byte_1 77 76 strb ip, [dst], #1 78 - .less8_aligned: tst len, #6 77 + .Lless8_aligned: 78 + tst len, #6 79 79 bne 1b 80 - .less8_byteonly: 80 + .Lless8_byteonly: 81 81 tst len, #1 82 - beq .done 82 + beq .Ldone 83 83 load1b r8 84 84 adcs sum, sum, r8, put_byte_0 @ update checksum 85 85 strb r8, [dst], #1 86 - b .done 86 + b .Ldone 87 87 88 88 FN_ENTRY 89 89 mov ip, sp ··· 92 90 sub fp, ip, #4 93 91 94 92 cmp len, #8 @ Ensure that we have at least 95 - blo .less8 @ 8 bytes to copy. 93 + blo .Lless8 @ 8 bytes to copy. 96 94 97 95 adds sum, sum, #0 @ C = 0 98 96 tst dst, #3 @ Test destination alignment 99 - blne .dst_unaligned @ align destination, return here 97 + blne .Ldst_unaligned @ align destination, return here 100 98 101 99 /* 102 100 * Ok, the dst pointer is now 32bit aligned, and we know ··· 105 103 */ 106 104 107 105 tst src, #3 @ Test source alignment 108 - bne .src_not_aligned 106 + bne .Lsrc_not_aligned 109 107 110 108 /* Routine for src & dst aligned */ 111 109 ··· 138 136 adcs sum, sum, r4 139 137 140 138 4: ands len, len, #3 141 - beq .done 139 + beq .Ldone 142 140 load1l r4 143 141 tst len, #2 144 142 mov r5, r4, get_byte_0 145 - beq .exit 143 + beq .Lexit 146 144 adcs sum, sum, r4, push #16 147 145 strb r5, [dst], #1 148 146 mov r5, r4, get_byte_1 149 147 strb r5, [dst], #1 150 148 mov r5, r4, get_byte_2 151 - .exit: tst len, #1 149 + .Lexit: tst len, #1 152 150 strneb r5, [dst], #1 153 151 andne r5, r5, #255 154 152 adcnes sum, sum, r5, put_byte_0 ··· 159 157 * the inefficient byte manipulations in the 160 158 * architecture independent code. 161 159 */ 162 - .done: adc r0, sum, #0 160 + .Ldone: adc r0, sum, #0 163 161 ldr sum, [sp, #0] @ dst 164 162 tst sum, #1 165 163 movne r0, r0, ror #8 166 164 load_regs ea 167 165 168 - .src_not_aligned: 166 + .Lsrc_not_aligned: 169 167 adc sum, sum, #0 @ include C from dst alignment 170 168 and ip, src, #3 171 169 bic src, src, #3 172 170 load1l r5 173 171 cmp ip, #2 174 - beq .src2_aligned 175 - bhi .src3_aligned 172 + beq .Lsrc2_aligned 173 + bhi .Lsrc3_aligned 176 174 mov r4, r5, pull #8 @ C = 0 177 175 bics ip, len, #15 178 176 beq 2f ··· 213 211 adcs sum, sum, r4 214 212 mov r4, r5, pull #8 215 213 4: ands len, len, #3 216 - beq .done 214 + beq .Ldone 217 215 mov r5, r4, get_byte_0 218 216 tst len, #2 219 - beq .exit 217 + beq .Lexit 220 218 adcs sum, sum, r4, push #16 221 219 strb r5, [dst], #1 222 220 mov r5, r4, get_byte_1 223 221 strb r5, [dst], #1 224 222 mov r5, r4, get_byte_2 225 - b .exit 223 + b .Lexit 226 224 227 - .src2_aligned: mov r4, r5, pull #16 225 + .Lsrc2_aligned: mov r4, r5, pull #16 228 226 adds sum, sum, #0 229 227 bics ip, len, #15 230 228 beq 2f ··· 265 263 adcs sum, sum, r4 266 264 mov r4, r5, pull #16 267 265 4: ands len, len, #3 268 - beq .done 266 + beq .Ldone 269 267 mov r5, r4, get_byte_0 270 268 tst len, #2 271 - beq .exit 269 + beq .Lexit 272 270 adcs sum, sum, r4 273 271 strb r5, [dst], #1 274 272 mov r5, r4, get_byte_1 275 273 strb r5, [dst], #1 276 274 tst len, #1 277 - beq .done 275 + beq .Ldone 278 276 load1b r5 279 - b .exit 277 + b .Lexit 280 278 281 - .src3_aligned: mov r4, r5, pull #24 279 + .Lsrc3_aligned: mov r4, r5, pull #24 282 280 adds sum, sum, #0 283 281 bics ip, len, #15 284 282 beq 2f ··· 319 317 adcs sum, sum, r4 320 318 mov r4, r5, pull #24 321 319 4: ands len, len, #3 322 - beq .done 320 + beq .Ldone 323 321 mov r5, r4, get_byte_0 324 322 tst len, #2 325 - beq .exit 323 + beq .Lexit 326 324 strb r5, [dst], #1 327 325 adcs sum, sum, r4 328 326 load1l r4 ··· 330 328 strb r5, [dst], #1 331 329 adcs sum, sum, r4, push #24 332 330 mov r5, r4, get_byte_1 333 - b .exit 331 + b .Lexit
+2 -2
arch/arm/lib/delay.S
··· 11 11 #include <asm/assembler.h> 12 12 .text 13 13 14 - LC0: .word loops_per_jiffy 14 + .LC0: .word loops_per_jiffy 15 15 16 16 /* 17 17 * 0 <= r0 <= 2000 ··· 21 21 orr r2, r2, #0x00db 22 22 mul r0, r2, r0 23 23 ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff 24 - ldr r2, LC0 24 + ldr r2, .LC0 25 25 ldr r2, [r2] @ max = 0x0fffffff 26 26 mov r0, r0, lsr #11 @ max = 0x00003fff 27 27 mov r2, r2, lsr #11 @ max = 0x0003ffff
+9 -9
arch/arm/lib/findbit.S
··· 27 27 mov r2, #0 28 28 1: ldrb r3, [r0, r2, lsr #3] 29 29 eors r3, r3, #0xff @ invert bits 30 - bne .found @ any now set - found zero bit 30 + bne .L_found @ any now set - found zero bit 31 31 add r2, r2, #8 @ next bit pointer 32 32 2: cmp r2, r1 @ any more? 33 33 blo 1b ··· 46 46 ldrb r3, [r0, r2, lsr #3] 47 47 eor r3, r3, #0xff @ now looking for a 1 bit 48 48 movs r3, r3, lsr ip @ shift off unused bits 49 - bne .found 49 + bne .L_found 50 50 orr r2, r2, #7 @ if zero, then no bits here 51 51 add r2, r2, #1 @ align bit pointer 52 52 b 2b @ loop for next bit ··· 61 61 mov r2, #0 62 62 1: ldrb r3, [r0, r2, lsr #3] 63 63 movs r3, r3 64 - bne .found @ any now set - found zero bit 64 + bne .L_found @ any now set - found zero bit 65 65 add r2, r2, #8 @ next bit pointer 66 66 2: cmp r2, r1 @ any more? 67 67 blo 1b ··· 79 79 beq 1b @ If new byte, goto old routine 80 80 ldrb r3, [r0, r2, lsr #3] 81 81 movs r3, r3, lsr ip @ shift off unused bits 82 - bne .found 82 + bne .L_found 83 83 orr r2, r2, #7 @ if zero, then no bits here 84 84 add r2, r2, #1 @ align bit pointer 85 85 b 2b @ loop for next bit ··· 93 93 1: eor r3, r2, #0x18 @ big endian byte ordering 94 94 ldrb r3, [r0, r3, lsr #3] 95 95 eors r3, r3, #0xff @ invert bits 96 - bne .found @ any now set - found zero bit 96 + bne .L_found @ any now set - found zero bit 97 97 add r2, r2, #8 @ next bit pointer 98 98 2: cmp r2, r1 @ any more? 99 99 blo 1b ··· 109 109 ldrb r3, [r0, r3, lsr #3] 110 110 eor r3, r3, #0xff @ now looking for a 1 bit 111 111 movs r3, r3, lsr ip @ shift off unused bits 112 - bne .found 112 + bne .L_found 113 113 orr r2, r2, #7 @ if zero, then no bits here 114 114 add r2, r2, #1 @ align bit pointer 115 115 b 2b @ loop for next bit ··· 121 121 1: eor r3, r2, #0x18 @ big endian byte ordering 122 122 ldrb r3, [r0, r3, lsr #3] 123 123 movs r3, r3 124 - bne .found @ any now set - found zero bit 124 + bne .L_found @ any now set - found zero bit 125 125 add r2, r2, #8 @ next bit pointer 126 126 2: cmp r2, r1 @ any more? 127 127 blo 1b ··· 136 136 eor r3, r2, #0x18 @ big endian byte ordering 137 137 ldrb r3, [r0, r3, lsr #3] 138 138 movs r3, r3, lsr ip @ shift off unused bits 139 - bne .found 139 + bne .L_found 140 140 orr r2, r2, #7 @ if zero, then no bits here 141 141 add r2, r2, #1 @ align bit pointer 142 142 b 2b @ loop for next bit ··· 146 146 /* 147 147 * One or more bits in the LSB of r3 are assumed to be set. 148 148 */ 149 - .found: 149 + .L_found: 150 150 #if __LINUX_ARM_ARCH__ >= 5 151 151 rsb r1, r3, #0 152 152 and r3, r3, r1