Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 8844/1: use unified assembler in assembly files

Use unified assembler syntax (UAL) in assembly files. Divided
syntax is considered deprecated. This will also allow to build
the kernel using LLVM's integrated assembler.

Signed-off-by: Stefan Agner <stefan@agner.ch>
Acked-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

authored by

Stefan Agner and committed by
Russell King
e44fc388 c001899a

+124 -124
+1 -1
arch/arm/boot/bootp/init.S
··· 44 44 */ 45 45 movne r10, #0 @ terminator 46 46 movne r4, #2 @ Size of this entry (2 words) 47 - stmneia r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator 47 + stmiane r9, {r4, r5, r10} @ Size, ATAG_CORE, terminator 48 48 49 49 /* 50 50 * find the end of the tag list, and then add an INITRD tag on the end.
+2 -2
arch/arm/boot/compressed/ll_char_wr.S
··· 75 75 tst r1, #7 @ avoid using r7 directly after 76 76 str r7, [r0, -r5]! 77 77 subne r1, r1, #1 78 - ldrneb r7, [r6, r1] 78 + ldrbne r7, [r6, r1] 79 79 bne Lrow4bpplp 80 80 ldmfd sp!, {r4 - r7, pc} 81 81 ··· 103 103 sub r0, r0, r5 @ avoid ip 104 104 stmia r0, {r4, ip} 105 105 subne r1, r1, #1 106 - ldrneb r7, [r6, r1] 106 + ldrbne r7, [r6, r1] 107 107 bne Lrow8bpplp 108 108 ldmfd sp!, {r4 - r7, pc} 109 109
+5 -5
arch/arm/include/asm/hardware/entry-macro-iomd.S
··· 16 16 ldr \tmp, =irq_prio_h 17 17 teq \irqstat, #0 18 18 #ifdef IOMD_BASE 19 - ldreqb \irqstat, [\base, #IOMD_DMAREQ] @ get dma 19 + ldrbeq \irqstat, [\base, #IOMD_DMAREQ] @ get dma 20 20 addeq \tmp, \tmp, #256 @ irq_prio_h table size 21 21 teqeq \irqstat, #0 22 22 bne 2406f 23 23 #endif 24 - ldreqb \irqstat, [\base, #IOMD_IRQREQA] @ get low priority 24 + ldrbeq \irqstat, [\base, #IOMD_IRQREQA] @ get low priority 25 25 addeq \tmp, \tmp, #256 @ irq_prio_d table size 26 26 teqeq \irqstat, #0 27 27 #ifdef IOMD_IRQREQC 28 - ldreqb \irqstat, [\base, #IOMD_IRQREQC] 28 + ldrbeq \irqstat, [\base, #IOMD_IRQREQC] 29 29 addeq \tmp, \tmp, #256 @ irq_prio_l table size 30 30 teqeq \irqstat, #0 31 31 #endif 32 32 #ifdef IOMD_IRQREQD 33 - ldreqb \irqstat, [\base, #IOMD_IRQREQD] 33 + ldrbeq \irqstat, [\base, #IOMD_IRQREQD] 34 34 addeq \tmp, \tmp, #256 @ irq_prio_lc table size 35 35 teqeq \irqstat, #0 36 36 #endif 37 - 2406: ldrneb \irqnr, [\tmp, \irqstat] @ get IRQ number 37 + 2406: ldrbne \irqnr, [\tmp, \irqstat] @ get IRQ number 38 38 .endm 39 39 40 40 /*
+1 -1
arch/arm/include/debug/tegra.S
··· 173 173 174 174 .macro senduart, rd, rx 175 175 cmp \rx, #0 176 - strneb \rd, [\rx, #UART_TX << UART_SHIFT] 176 + strbne \rd, [\rx, #UART_TX << UART_SHIFT] 177 177 1001: 178 178 .endm 179 179
+1 -1
arch/arm/kernel/debug.S
··· 86 86 ENTRY(printascii) 87 87 addruart_current r3, r1, r2 88 88 1: teq r0, #0 89 - ldrneb r1, [r0], #1 89 + ldrbne r1, [r0], #1 90 90 teqne r1, #0 91 91 reteq lr 92 92 2: teq r1, #'\n'
+6 -6
arch/arm/kernel/entry-armv.S
··· 636 636 @ Test if we need to give access to iWMMXt coprocessors 637 637 ldr r5, [r10, #TI_FLAGS] 638 638 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 639 - movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 639 + movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) 640 640 bcs iwmmxt_task_enable 641 641 #endif 642 642 ARM( add pc, pc, r8, lsr #6 ) ··· 872 872 smp_dmb arm 873 873 1: ldrexd r0, r1, [r2] @ load current val 874 874 eors r3, r0, r4 @ compare with oldval (1) 875 - eoreqs r3, r1, r5 @ compare with oldval (2) 875 + eorseq r3, r1, r5 @ compare with oldval (2) 876 876 strexdeq r3, r6, r7, [r2] @ store newval if eq 877 877 teqeq r3, #1 @ success? 878 878 beq 1b @ if no then retry ··· 896 896 ldmia r1, {r6, lr} @ load new val 897 897 1: ldmia r2, {r0, r1} @ load current val 898 898 eors r3, r0, r4 @ compare with oldval (1) 899 - eoreqs r3, r1, r5 @ compare with oldval (2) 900 - 2: stmeqia r2, {r6, lr} @ store newval if eq 899 + eorseq r3, r1, r5 @ compare with oldval (2) 900 + 2: stmiaeq r2, {r6, lr} @ store newval if eq 901 901 rsbs r0, r3, #0 @ set return val and C flag 902 902 ldmfd sp!, {r4, r5, r6, pc} 903 903 ··· 911 911 mov r7, #0xffff0fff 912 912 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) 913 913 subs r8, r4, r7 914 - rsbcss r8, r8, #(2b - 1b) 914 + rsbscs r8, r8, #(2b - 1b) 915 915 strcs r7, [sp, #S_PC] 916 916 #if __LINUX_ARM_ARCH__ < 6 917 917 bcc kuser_cmpxchg32_fixup ··· 969 969 mov r7, #0xffff0fff 970 970 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 971 971 subs r8, r4, r7 972 - rsbcss r8, r8, #(2b - 1b) 972 + rsbscs r8, r8, #(2b - 1b) 973 973 strcs r7, [sp, #S_PC] 974 974 ret lr 975 975 .previous
+1 -1
arch/arm/kernel/entry-common.S
··· 373 373 movhs scno, #0 374 374 csdb 375 375 #endif 376 - stmloia sp, {r5, r6} @ shuffle args 376 + stmialo sp, {r5, r6} @ shuffle args 377 377 movlo r0, r1 378 378 movlo r1, r2 379 379 movlo r2, r3
+4 -4
arch/arm/kernel/entry-header.S
··· 388 388 badr lr, \ret @ return address 389 389 .if \reload 390 390 add r1, sp, #S_R0 + S_OFF @ pointer to regs 391 - ldmccia r1, {r0 - r6} @ reload r0-r6 392 - stmccia sp, {r4, r5} @ update stack arguments 391 + ldmiacc r1, {r0 - r6} @ reload r0-r6 392 + stmiacc sp, {r4, r5} @ update stack arguments 393 393 .endif 394 394 ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine 395 395 #else ··· 397 397 badr lr, \ret @ return address 398 398 .if \reload 399 399 add r1, sp, #S_R0 + S_OFF @ pointer to regs 400 - ldmccia r1, {r0 - r6} @ reload r0-r6 401 - stmccia sp, {r4, r5} @ update stack arguments 400 + ldmiacc r1, {r0 - r6} @ reload r0-r6 401 + stmiacc sp, {r4, r5} @ update stack arguments 402 402 .endif 403 403 ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine 404 404 #endif
+1 -1
arch/arm/lib/clear_user.S
··· 44 44 strusr r2, r0, 1, ne, rept=2 45 45 tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 46 46 it ne @ explicit IT needed for the label 47 - USER( strnebt r2, [r0]) 47 + USER( strbtne r2, [r0]) 48 48 mov r0, #0 49 49 ldmfd sp!, {r1, pc} 50 50 UNWIND(.fnend)
+2 -2
arch/arm/lib/copy_page.S
··· 39 39 .endr 40 40 subs r2, r2, #1 @ 1 41 41 stmia r0!, {r3, r4, ip, lr} @ 4 42 - ldmgtia r1!, {r3, r4, ip, lr} @ 4 42 + ldmiagt r1!, {r3, r4, ip, lr} @ 4 43 43 bgt 1b @ 1 44 - PLD( ldmeqia r1!, {r3, r4, ip, lr} ) 44 + PLD( ldmiaeq r1!, {r3, r4, ip, lr} ) 45 45 PLD( beq 2b ) 46 46 ldmfd sp!, {r4, pc} @ 3 47 47 ENDPROC(copy_page)
+2 -2
arch/arm/lib/copy_template.S
··· 99 99 100 100 CALGN( ands ip, r0, #31 ) 101 101 CALGN( rsb r3, ip, #32 ) 102 - CALGN( sbcnes r4, r3, r2 ) @ C is always set here 102 + CALGN( sbcsne r4, r3, r2 ) @ C is always set here 103 103 CALGN( bcs 2f ) 104 104 CALGN( adr r4, 6f ) 105 105 CALGN( subs r2, r2, r3 ) @ C gets set ··· 204 204 205 205 CALGN( ands ip, r0, #31 ) 206 206 CALGN( rsb ip, ip, #32 ) 207 - CALGN( sbcnes r4, ip, r2 ) @ C is always set here 207 + CALGN( sbcsne r4, ip, r2 ) @ C is always set here 208 208 CALGN( subcc r2, r2, ip ) 209 209 CALGN( bcc 15f ) 210 210
+10 -10
arch/arm/lib/csumpartial.S
··· 40 40 /* we must have at least one byte. */ 41 41 tst buf, #1 @ odd address? 42 42 movne sum, sum, ror #8 43 - ldrneb td0, [buf], #1 43 + ldrbne td0, [buf], #1 44 44 subne len, len, #1 45 - adcnes sum, sum, td0, put_byte_1 45 + adcsne sum, sum, td0, put_byte_1 46 46 47 47 .Lless4: tst len, #6 48 48 beq .Lless8_byte ··· 68 68 bne .Lless8_wordlp 69 69 70 70 .Lless8_byte: tst len, #1 @ odd number of bytes 71 - ldrneb td0, [buf], #1 @ include last byte 72 - adcnes sum, sum, td0, put_byte_0 @ update checksum 71 + ldrbne td0, [buf], #1 @ include last byte 72 + adcsne sum, sum, td0, put_byte_0 @ update checksum 73 73 74 74 .Ldone: adc r0, sum, #0 @ collect up the last carry 75 75 ldr td0, [sp], #4 ··· 78 78 ldr pc, [sp], #4 @ return 79 79 80 80 .Lnot_aligned: tst buf, #1 @ odd address 81 - ldrneb td0, [buf], #1 @ make even 81 + ldrbne td0, [buf], #1 @ make even 82 82 subne len, len, #1 83 - adcnes sum, sum, td0, put_byte_1 @ update checksum 83 + adcsne sum, sum, td0, put_byte_1 @ update checksum 84 84 85 85 tst buf, #2 @ 32-bit aligned? 86 86 #if __LINUX_ARM_ARCH__ >= 4 87 - ldrneh td0, [buf], #2 @ make 32-bit aligned 87 + ldrhne td0, [buf], #2 @ make 32-bit aligned 88 88 subne len, len, #2 89 89 #else 90 - ldrneb td0, [buf], #1 91 - ldrneb ip, [buf], #1 90 + ldrbne td0, [buf], #1 91 + ldrbne ip, [buf], #1 92 92 subne len, len, #2 93 93 #ifndef __ARMEB__ 94 94 orrne td0, td0, ip, lsl #8 ··· 96 96 orrne td0, ip, td0, lsl #8 97 97 #endif 98 98 #endif 99 - adcnes sum, sum, td0 @ update checksum 99 + adcsne sum, sum, td0 @ update checksum 100 100 ret lr 101 101 102 102 ENTRY(csum_partial)
+2 -2
arch/arm/lib/csumpartialcopygeneric.S
··· 148 148 strb r5, [dst], #1 149 149 mov r5, r4, get_byte_2 150 150 .Lexit: tst len, #1 151 - strneb r5, [dst], #1 151 + strbne r5, [dst], #1 152 152 andne r5, r5, #255 153 - adcnes sum, sum, r5, put_byte_0 153 + adcsne sum, sum, r5, put_byte_0 154 154 155 155 /* 156 156 * If the dst pointer was not 16-bit aligned, we
+1 -1
arch/arm/lib/csumpartialcopyuser.S
··· 95 95 add r2, r2, r1 96 96 mov r0, #0 @ zero the buffer 97 97 9002: teq r2, r1 98 - strneb r0, [r1], #1 98 + strbne r0, [r1], #1 99 99 bne 9002b 100 100 load_regs 101 101 .popsection
+2 -2
arch/arm/lib/div64.S
··· 88 88 @ Break out early if dividend reaches 0. 89 89 2: cmp xh, yl 90 90 orrcs yh, yh, ip 91 - subcss xh, xh, yl 92 - movnes ip, ip, lsr #1 91 + subscs xh, xh, yl 92 + movsne ip, ip, lsr #1 93 93 mov yl, yl, lsr #1 94 94 bne 2b 95 95
+5 -5
arch/arm/lib/floppydma.S
··· 14 14 .global floppy_fiqin_end 15 15 ENTRY(floppy_fiqin_start) 16 16 subs r9, r9, #1 17 - ldrgtb r12, [r11, #-4] 18 - ldrleb r12, [r11], #0 17 + ldrbgt r12, [r11, #-4] 18 + ldrble r12, [r11], #0 19 19 strb r12, [r10], #1 20 20 subs pc, lr, #4 21 21 floppy_fiqin_end: ··· 23 23 .global floppy_fiqout_end 24 24 ENTRY(floppy_fiqout_start) 25 25 subs r9, r9, #1 26 - ldrgeb r12, [r10], #1 26 + ldrbge r12, [r10], #1 27 27 movlt r12, #0 28 - strleb r12, [r11], #0 29 - subles pc, lr, #4 28 + strble r12, [r11], #0 29 + subsle pc, lr, #4 30 30 strb r12, [r11, #-4] 31 31 subs pc, lr, #4 32 32 floppy_fiqout_end:
+10 -10
arch/arm/lib/io-readsb.S
··· 16 16 cmp ip, #2 17 17 ldrb r3, [r0] 18 18 strb r3, [r1], #1 19 - ldrgeb r3, [r0] 20 - strgeb r3, [r1], #1 21 - ldrgtb r3, [r0] 22 - strgtb r3, [r1], #1 19 + ldrbge r3, [r0] 20 + strbge r3, [r1], #1 21 + ldrbgt r3, [r0] 22 + strbgt r3, [r1], #1 23 23 subs r2, r2, ip 24 24 bne .Linsb_aligned 25 25 ··· 72 72 bpl .Linsb_16_lp 73 73 74 74 tst r2, #15 75 - ldmeqfd sp!, {r4 - r6, pc} 75 + ldmfdeq sp!, {r4 - r6, pc} 76 76 77 77 .Linsb_no_16: tst r2, #8 78 78 beq .Linsb_no_8 ··· 109 109 str r3, [r1], #4 110 110 111 111 .Linsb_no_4: ands r2, r2, #3 112 - ldmeqfd sp!, {r4 - r6, pc} 112 + ldmfdeq sp!, {r4 - r6, pc} 113 113 114 114 cmp r2, #2 115 115 ldrb r3, [r0] 116 116 strb r3, [r1], #1 117 - ldrgeb r3, [r0] 118 - strgeb r3, [r1], #1 119 - ldrgtb r3, [r0] 120 - strgtb r3, [r1] 117 + ldrbge r3, [r0] 118 + strbge r3, [r1], #1 119 + ldrbgt r3, [r0] 120 + strbgt r3, [r1] 121 121 122 122 ldmfd sp!, {r4 - r6, pc} 123 123 ENDPROC(__raw_readsb)
+1 -1
arch/arm/lib/io-readsl.S
··· 30 30 2: movs r2, r2, lsl #31 31 31 ldrcs r3, [r0, #0] 32 32 ldrcs ip, [r0, #0] 33 - stmcsia r1!, {r3, ip} 33 + stmiacs r1!, {r3, ip} 34 34 ldrne r3, [r0, #0] 35 35 strne r3, [r1, #0] 36 36 ret lr
+3 -3
arch/arm/lib/io-readsw-armv3.S
··· 68 68 bpl .Linsw_8_lp 69 69 70 70 tst r2, #7 71 - ldmeqfd sp!, {r4, r5, r6, pc} 71 + ldmfdeq sp!, {r4, r5, r6, pc} 72 72 73 73 .Lno_insw_8: tst r2, #4 74 74 beq .Lno_insw_4 ··· 97 97 98 98 .Lno_insw_2: tst r2, #1 99 99 ldrne r3, [r0] 100 - strneb r3, [r1], #1 100 + strbne r3, [r1], #1 101 101 movne r3, r3, lsr #8 102 - strneb r3, [r1] 102 + strbne r3, [r1] 103 103 104 104 ldmfd sp!, {r4, r5, r6, pc} 105 105
+6 -6
arch/arm/lib/io-readsw-armv4.S
··· 76 76 pack r3, r3, ip 77 77 str r3, [r1], #4 78 78 79 - .Lno_insw_2: ldrneh r3, [r0] 80 - strneh r3, [r1] 79 + .Lno_insw_2: ldrhne r3, [r0] 80 + strhne r3, [r1] 81 81 82 82 ldmfd sp!, {r4, r5, pc} 83 83 ··· 94 94 #endif 95 95 96 96 .Linsw_noalign: stmfd sp!, {r4, lr} 97 - ldrccb ip, [r1, #-1]! 97 + ldrbcc ip, [r1, #-1]! 98 98 bcc 1f 99 99 100 100 ldrh ip, [r0] ··· 121 121 122 122 3: tst r2, #1 123 123 strb ip, [r1], #1 124 - ldrneh ip, [r0] 124 + ldrhne ip, [r0] 125 125 _BE_ONLY_( movne ip, ip, ror #8 ) 126 - strneb ip, [r1], #1 126 + strbne ip, [r1], #1 127 127 _LE_ONLY_( movne ip, ip, lsr #8 ) 128 128 _BE_ONLY_( movne ip, ip, lsr #24 ) 129 - strneb ip, [r1] 129 + strbne ip, [r1] 130 130 ldmfd sp!, {r4, pc} 131 131 ENDPROC(__raw_readsw)
+10 -10
arch/arm/lib/io-writesb.S
··· 36 36 cmp ip, #2 37 37 ldrb r3, [r1], #1 38 38 strb r3, [r0] 39 - ldrgeb r3, [r1], #1 40 - strgeb r3, [r0] 41 - ldrgtb r3, [r1], #1 42 - strgtb r3, [r0] 39 + ldrbge r3, [r1], #1 40 + strbge r3, [r0] 41 + ldrbgt r3, [r1], #1 42 + strbgt r3, [r0] 43 43 subs r2, r2, ip 44 44 bne .Loutsb_aligned 45 45 ··· 64 64 bpl .Loutsb_16_lp 65 65 66 66 tst r2, #15 67 - ldmeqfd sp!, {r4, r5, pc} 67 + ldmfdeq sp!, {r4, r5, pc} 68 68 69 69 .Loutsb_no_16: tst r2, #8 70 70 beq .Loutsb_no_8 ··· 80 80 outword r3 81 81 82 82 .Loutsb_no_4: ands r2, r2, #3 83 - ldmeqfd sp!, {r4, r5, pc} 83 + ldmfdeq sp!, {r4, r5, pc} 84 84 85 85 cmp r2, #2 86 86 ldrb r3, [r1], #1 87 87 strb r3, [r0] 88 - ldrgeb r3, [r1], #1 89 - strgeb r3, [r0] 90 - ldrgtb r3, [r1] 91 - strgtb r3, [r0] 88 + ldrbge r3, [r1], #1 89 + strbge r3, [r0] 90 + ldrbgt r3, [r1] 91 + strbgt r3, [r0] 92 92 93 93 ldmfd sp!, {r4, r5, pc} 94 94 ENDPROC(__raw_writesb)
+1 -1
arch/arm/lib/io-writesl.S
··· 28 28 bpl 1b 29 29 ldmfd sp!, {r4, lr} 30 30 2: movs r2, r2, lsl #31 31 - ldmcsia r1!, {r3, ip} 31 + ldmiacs r1!, {r3, ip} 32 32 strcs r3, [r0, #0] 33 33 ldrne r3, [r1, #0] 34 34 strcs ip, [r0, #0]
+1 -1
arch/arm/lib/io-writesw-armv3.S
··· 79 79 bpl .Loutsw_8_lp 80 80 81 81 tst r2, #7 82 - ldmeqfd sp!, {r4, r5, r6, pc} 82 + ldmfdeq sp!, {r4, r5, r6, pc} 83 83 84 84 .Lno_outsw_8: tst r2, #4 85 85 beq .Lno_outsw_4
+3 -3
arch/arm/lib/io-writesw-armv4.S
··· 61 61 ldr r3, [r1], #4 62 62 outword r3 63 63 64 - .Lno_outsw_2: ldrneh r3, [r1] 65 - strneh r3, [r0] 64 + .Lno_outsw_2: ldrhne r3, [r1] 65 + strhne r3, [r0] 66 66 67 67 ldmfd sp!, {r4, r5, pc} 68 68 ··· 95 95 96 96 tst r2, #1 97 97 3: movne ip, r3, lsr #8 98 - strneh ip, [r0] 98 + strhne ip, [r0] 99 99 ret lr 100 100 ENDPROC(__raw_writesw)
+2 -2
arch/arm/lib/lib1funcs.S
··· 96 96 subhs \dividend, \dividend, \divisor, lsr #3 97 97 orrhs \result, \result, \curbit, lsr #3 98 98 cmp \dividend, #0 @ Early termination? 99 - movnes \curbit, \curbit, lsr #4 @ No, any more bits to do? 99 + movsne \curbit, \curbit, lsr #4 @ No, any more bits to do? 100 100 movne \divisor, \divisor, lsr #4 101 101 bne 1b 102 102 ··· 182 182 subhs \dividend, \dividend, \divisor, lsr #3 183 183 cmp \dividend, #1 184 184 mov \divisor, \divisor, lsr #4 185 - subges \order, \order, #4 185 + subsge \order, \order, #4 186 186 bge 1b 187 187 188 188 tst \order, #3
+12 -12
arch/arm/lib/memmove.S
··· 59 59 blt 5f 60 60 61 61 CALGN( ands ip, r0, #31 ) 62 - CALGN( sbcnes r4, ip, r2 ) @ C is always set here 62 + CALGN( sbcsne r4, ip, r2 ) @ C is always set here 63 63 CALGN( bcs 2f ) 64 64 CALGN( adr r4, 6f ) 65 65 CALGN( subs r2, r2, ip ) @ C is set here ··· 114 114 UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block 115 115 116 116 8: movs r2, r2, lsl #31 117 - ldrneb r3, [r1, #-1]! 118 - ldrcsb r4, [r1, #-1]! 119 - ldrcsb ip, [r1, #-1] 120 - strneb r3, [r0, #-1]! 121 - strcsb r4, [r0, #-1]! 122 - strcsb ip, [r0, #-1] 117 + ldrbne r3, [r1, #-1]! 118 + ldrbcs r4, [r1, #-1]! 119 + ldrbcs ip, [r1, #-1] 120 + strbne r3, [r0, #-1]! 121 + strbcs r4, [r0, #-1]! 122 + strbcs ip, [r0, #-1] 123 123 ldmfd sp!, {r0, r4, pc} 124 124 125 125 9: cmp ip, #2 126 - ldrgtb r3, [r1, #-1]! 127 - ldrgeb r4, [r1, #-1]! 126 + ldrbgt r3, [r1, #-1]! 127 + ldrbge r4, [r1, #-1]! 128 128 ldrb lr, [r1, #-1]! 129 - strgtb r3, [r0, #-1]! 130 - strgeb r4, [r0, #-1]! 129 + strbgt r3, [r0, #-1]! 130 + strbge r4, [r0, #-1]! 131 131 subs r2, r2, ip 132 132 strb lr, [r0, #-1]! 133 133 blt 8b ··· 150 150 blt 14f 151 151 152 152 CALGN( ands ip, r0, #31 ) 153 - CALGN( sbcnes r4, ip, r2 ) @ C is always set here 153 + CALGN( sbcsne r4, ip, r2 ) @ C is always set here 154 154 CALGN( subcc r2, r2, ip ) 155 155 CALGN( bcc 15f ) 156 156
+21 -21
arch/arm/lib/memset.S
··· 44 44 mov lr, r3 45 45 46 46 2: subs r2, r2, #64 47 - stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time. 48 - stmgeia ip!, {r1, r3, r8, lr} 49 - stmgeia ip!, {r1, r3, r8, lr} 50 - stmgeia ip!, {r1, r3, r8, lr} 47 + stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time. 48 + stmiage ip!, {r1, r3, r8, lr} 49 + stmiage ip!, {r1, r3, r8, lr} 50 + stmiage ip!, {r1, r3, r8, lr} 51 51 bgt 2b 52 - ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go. 52 + ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go. 53 53 /* 54 54 * No need to correct the count; we're only testing bits from now on 55 55 */ 56 56 tst r2, #32 57 - stmneia ip!, {r1, r3, r8, lr} 58 - stmneia ip!, {r1, r3, r8, lr} 57 + stmiane ip!, {r1, r3, r8, lr} 58 + stmiane ip!, {r1, r3, r8, lr} 59 59 tst r2, #16 60 - stmneia ip!, {r1, r3, r8, lr} 60 + stmiane ip!, {r1, r3, r8, lr} 61 61 ldmfd sp!, {r8, lr} 62 62 UNWIND( .fnend ) 63 63 ··· 87 87 rsb r8, r8, #32 88 88 sub r2, r2, r8 89 89 movs r8, r8, lsl #(32 - 4) 90 - stmcsia ip!, {r4, r5, r6, r7} 91 - stmmiia ip!, {r4, r5} 90 + stmiacs ip!, {r4, r5, r6, r7} 91 + stmiami ip!, {r4, r5} 92 92 tst r8, #(1 << 30) 93 93 mov r8, r1 94 94 strne r1, [ip], #4 95 95 96 96 3: subs r2, r2, #64 97 - stmgeia ip!, {r1, r3-r8, lr} 98 - stmgeia ip!, {r1, r3-r8, lr} 97 + stmiage ip!, {r1, r3-r8, lr} 98 + stmiage ip!, {r1, r3-r8, lr} 99 99 bgt 3b 100 - ldmeqfd sp!, {r4-r8, pc} 100 + ldmfdeq sp!, {r4-r8, pc} 101 101 102 102 tst r2, #32 103 - stmneia ip!, {r1, r3-r8, lr} 103 + stmiane ip!, {r1, r3-r8, lr} 104 104 tst r2, #16 105 - stmneia ip!, {r4-r7} 105 + stmiane ip!, {r4-r7} 106 106 ldmfd sp!, {r4-r8, lr} 107 107 UNWIND( .fnend ) 108 108 ··· 110 110 111 111 UNWIND( .fnstart ) 112 112 4: tst r2, #8 113 - stmneia ip!, {r1, r3} 113 + stmiane ip!, {r1, r3} 114 114 tst r2, #4 115 115 strne r1, [ip], #4 116 116 /* ··· 118 118 * may have an unaligned pointer as well. 119 119 */ 120 120 5: tst r2, #2 121 - strneb r1, [ip], #1 122 - strneb r1, [ip], #1 121 + strbne r1, [ip], #1 122 + strbne r1, [ip], #1 123 123 tst r2, #1 124 - strneb r1, [ip], #1 124 + strbne r1, [ip], #1 125 125 ret lr 126 126 127 127 6: subs r2, r2, #4 @ 1 do we have enough 128 128 blt 5b @ 1 bytes to align with? 129 129 cmp r3, #2 @ 1 130 - strltb r1, [ip], #1 @ 1 131 - strleb r1, [ip], #1 @ 1 130 + strblt r1, [ip], #1 @ 1 131 + strble r1, [ip], #1 @ 1 132 132 strb r1, [ip], #1 @ 1 133 133 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 134 134 b 1b
+1 -1
arch/arm/mach-ks8695/include/mach/entry-macro.S
··· 42 42 moveq \irqstat, \irqstat, lsr #2 43 43 addeq \irqnr, \irqnr, #2 44 44 tst \irqstat, #0x01 45 - addeqs \irqnr, \irqnr, #1 45 + addseq \irqnr, \irqnr, #1 46 46 1001: 47 47 .endm
+1 -1
arch/arm/mach-tegra/reset-handler.S
··· 172 172 mov32 r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET 173 173 mov r0, #CPU_NOT_RESETTABLE 174 174 cmp r10, #0 175 - strneb r0, [r5, #__tegra20_cpu1_resettable_status_offset] 175 + strbne r0, [r5, #__tegra20_cpu1_resettable_status_offset] 176 176 1: 177 177 #endif 178 178
+4 -4
arch/arm/mm/cache-v6.S
··· 215 215 #endif 216 216 tst r1, #D_CACHE_LINE_SIZE - 1 217 217 #ifdef CONFIG_DMA_CACHE_RWFO 218 - ldrneb r2, [r1, #-1] @ read for ownership 219 - strneb r2, [r1, #-1] @ write for ownership 218 + ldrbne r2, [r1, #-1] @ read for ownership 219 + strbne r2, [r1, #-1] @ write for ownership 220 220 #endif 221 221 bic r1, r1, #D_CACHE_LINE_SIZE - 1 222 222 #ifdef HARVARD_CACHE ··· 284 284 add r0, r0, #D_CACHE_LINE_SIZE 285 285 cmp r0, r1 286 286 #ifdef CONFIG_DMA_CACHE_RWFO 287 - ldrlob r2, [r0] @ read for ownership 288 - strlob r2, [r0] @ write for ownership 287 + ldrblo r2, [r0] @ read for ownership 288 + strblo r2, [r0] @ write for ownership 289 289 #endif 290 290 blo 1b 291 291 mov r0, #0
+2 -2
arch/arm/mm/proc-v7m.S
··· 152 152 153 153 @ Configure caches (if implemented) 154 154 teq r8, #0 155 - stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 155 + stmiane sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 156 156 blne v7m_invalidate_l1 157 157 teq r8, #0 @ re-evalutae condition 158 - ldmneia sp, {r0-r6, lr} 158 + ldmiane sp, {r0-r6, lr} 159 159 160 160 @ Configure the System Control Register to ensure 8-byte stack alignment 161 161 @ Note the STKALIGN bit is either RW or RAO.