Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+

ARMv6 and greater introduced a new instruction ("bx") which can be used
to return from function calls. Recent CPUs perform better when the
"bx lr" instruction is used rather than the "mov pc, lr" instruction,
and this sequence is strongly recommended to be used by the ARM
architecture manual (section A.4.1.1).

We provide a new macro "ret" with all its variants for the condition
code which will resolve to the appropriate instruction.

Rather than doing this piecemeal, and miss some instances, change all
the "mov pc" instances to use the new macro, with the exception of
the "movs" instruction and the kprobes code. This allows us to detect
the "mov pc, lr" case and fix it up - and also gives us the possibility
of deploying this for other registers depending on the CPU selection.

Reported-by: Will Deacon <will.deacon@arm.com>
Tested-by: Stephen Warren <swarren@nvidia.com> # Tegra Jetson TK1
Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> # mioa701_bootresume.S
Tested-by: Andrew Lunn <andrew@lunn.ch> # Kirkwood
Tested-by: Shawn Guo <shawn.guo@freescale.com>
Tested-by: Tony Lindgren <tony@atomide.com> # OMAPs
Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com> # Armada XP, 375, 385
Acked-by: Sekhar Nori <nsekhar@ti.com> # DaVinci
Acked-by: Christoffer Dall <christoffer.dall@linaro.org> # kvm/hyp
Acked-by: Haojian Zhuang <haojian.zhuang@gmail.com> # PXA3xx
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> # Xen
Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> # ARMv7M
Tested-by: Simon Horman <horms+renesas@verge.net.au> # Shmobile
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+644 -607
+2 -1
arch/arm/crypto/aes-armv4.S
··· 35 35 @ that is being targetted. 36 36 37 37 #include <linux/linkage.h> 38 + #include <asm/assembler.h> 38 39 39 40 .text 40 41 ··· 649 648 650 649 .Ldone: mov r0,#0 651 650 ldmia sp!,{r4-r12,lr} 652 - .Labrt: mov pc,lr 651 + .Labrt: ret lr 653 652 ENDPROC(private_AES_set_encrypt_key) 654 653 655 654 .align 5
+21
arch/arm/include/asm/assembler.h
··· 427 427 #endif 428 428 .endm 429 429 430 + .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 431 + .macro ret\c, reg 432 + #if __LINUX_ARM_ARCH__ < 6 433 + mov\c pc, \reg 434 + #else 435 + .ifeqs "\reg", "lr" 436 + bx\c \reg 437 + .else 438 + mov\c pc, \reg 439 + .endif 440 + #endif 441 + .endm 442 + .endr 443 + 444 + .macro ret.w, reg 445 + ret \reg 446 + #ifdef CONFIG_THUMB2_KERNEL 447 + nop 448 + #endif 449 + .endm 450 + 430 451 #endif /* __ASM_ASSEMBLER_H__ */
+1 -1
arch/arm/include/asm/entry-macro-multi.S
··· 35 35 \symbol_name: 36 36 mov r8, lr 37 37 arch_irq_handler_default 38 - mov pc, r8 38 + ret r8 39 39 .endm
+5 -5
arch/arm/kernel/debug.S
··· 90 90 ldrneb r1, [r0], #1 91 91 teqne r1, #0 92 92 bne 1b 93 - mov pc, lr 93 + ret lr 94 94 ENDPROC(printascii) 95 95 96 96 ENTRY(printch) ··· 105 105 addruart r2, r3, ip 106 106 str r2, [r0] 107 107 str r3, [r1] 108 - mov pc, lr 108 + ret lr 109 109 ENDPROC(debug_ll_addr) 110 110 #endif 111 111 ··· 116 116 mov r0, #0x04 @ SYS_WRITE0 117 117 ARM( svc #0x123456 ) 118 118 THUMB( svc #0xab ) 119 - mov pc, lr 119 + ret lr 120 120 ENDPROC(printascii) 121 121 122 122 ENTRY(printch) ··· 125 125 mov r0, #0x03 @ SYS_WRITEC 126 126 ARM( svc #0x123456 ) 127 127 THUMB( svc #0xab ) 128 - mov pc, lr 128 + ret lr 129 129 ENDPROC(printch) 130 130 131 131 ENTRY(debug_ll_addr) 132 132 mov r2, #0 133 133 str r2, [r0] 134 134 str r2, [r1] 135 - mov pc, lr 135 + ret lr 136 136 ENDPROC(debug_ll_addr) 137 137 138 138 #endif
+21 -21
arch/arm/kernel/entry-armv.S
··· 224 224 1: bl preempt_schedule_irq @ irq en/disable is done inside 225 225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 226 226 tst r0, #_TIF_NEED_RESCHED 227 - moveq pc, r8 @ go again 227 + reteq r8 @ go again 228 228 b 1b 229 229 #endif 230 230 ··· 490 490 .pushsection .fixup, "ax" 491 491 .align 2 492 492 4: str r4, [sp, #S_PC] @ retry current instruction 493 - mov pc, r9 493 + ret r9 494 494 .popsection 495 495 .pushsection __ex_table,"a" 496 496 .long 1b, 4b ··· 552 552 #endif 553 553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 554 554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 555 - moveq pc, lr 555 + reteq lr 556 556 and r8, r0, #0x00000f00 @ mask out CP number 557 557 THUMB( lsr r8, r8, #8 ) 558 558 mov r7, #1 ··· 571 571 THUMB( add pc, r8 ) 572 572 nop 573 573 574 - movw_pc lr @ CP#0 574 + ret.w lr @ CP#0 575 575 W(b) do_fpe @ CP#1 (FPE) 576 576 W(b) do_fpe @ CP#2 (FPE) 577 - movw_pc lr @ CP#3 577 + ret.w lr @ CP#3 578 578 #ifdef CONFIG_CRUNCH 579 579 b crunch_task_enable @ CP#4 (MaverickCrunch) 580 580 b crunch_task_enable @ CP#5 (MaverickCrunch) 581 581 b crunch_task_enable @ CP#6 (MaverickCrunch) 582 582 #else 583 - movw_pc lr @ CP#4 584 - movw_pc lr @ CP#5 585 - movw_pc lr @ CP#6 583 + ret.w lr @ CP#4 584 + ret.w lr @ CP#5 585 + ret.w lr @ CP#6 586 586 #endif 587 - movw_pc lr @ CP#7 588 - movw_pc lr @ CP#8 589 - movw_pc lr @ CP#9 587 + ret.w lr @ CP#7 588 + ret.w lr @ CP#8 589 + ret.w lr @ CP#9 590 590 #ifdef CONFIG_VFP 591 591 W(b) do_vfp @ CP#10 (VFP) 592 592 W(b) do_vfp @ CP#11 (VFP) 593 593 #else 594 - movw_pc lr @ CP#10 (VFP) 595 - movw_pc lr @ CP#11 (VFP) 594 + ret.w lr @ CP#10 (VFP) 595 + ret.w lr @ CP#11 (VFP) 596 596 #endif 597 - movw_pc lr @ CP#12 598 - movw_pc lr @ CP#13 599 - movw_pc lr @ CP#14 (Debug) 600 - movw_pc lr @ CP#15 (Control) 597 + ret.w lr @ CP#12 598 + ret.w lr @ CP#13 599 + ret.w lr @ CP#14 (Debug) 600 + ret.w lr @ CP#15 (Control) 601 601 602 602 #ifdef NEED_CPU_ARCHITECTURE 603 603 .align 2 ··· 649 649 .popsection 650 650 651 651 ENTRY(no_fp) 652 - mov pc, lr 652 + ret lr 653 653 ENDPROC(no_fp) 654 654 655 655 __und_usr_fault_32: ··· 745 745 #ifdef CONFIG_ARM_THUMB 746 746 bx \reg 747 747 #else 748 - mov pc, \reg 748 + ret \reg 749 749 #endif 750 750 .endm 751 751 ··· 837 837 #if __LINUX_ARM_ARCH__ < 6 838 838 bcc kuser_cmpxchg32_fixup 839 839 #endif 840 - mov pc, lr 840 + ret lr 841 841 .previous 842 842 843 843 #else ··· 905 905 subs r8, r4, r7 906 906 rsbcss r8, r8, #(2b - 1b) 907 907 strcs r7, [sp, #S_PC] 908 - mov pc, lr 908 + ret lr 909 909 .previous 910 910 911 911 #else
+7 -6
arch/arm/kernel/entry-common.S
··· 8 8 * published by the Free Software Foundation. 9 9 */ 10 10 11 + #include <asm/assembler.h> 11 12 #include <asm/unistd.h> 12 13 #include <asm/ftrace.h> 13 14 #include <asm/unwind.h> ··· 89 88 cmp r5, #0 90 89 movne r0, r4 91 90 adrne lr, BSYM(1f) 92 - movne pc, r5 91 + retne r5 93 92 1: get_thread_info tsk 94 93 b ret_slow_syscall 95 94 ENDPROC(ret_from_fork) ··· 291 290 292 291 .macro mcount_exit 293 292 ldmia sp!, {r0-r3, ip, lr} 294 - mov pc, ip 293 + ret ip 295 294 .endm 296 295 297 296 ENTRY(__gnu_mcount_nc) ··· 299 298 #ifdef CONFIG_DYNAMIC_FTRACE 300 299 mov ip, lr 301 300 ldmia sp!, {lr} 302 - mov pc, ip 301 + ret ip 303 302 #else 304 303 __mcount 305 304 #endif ··· 334 333 bl ftrace_return_to_handler 335 334 mov lr, r0 @ r0 has real ret addr 336 335 ldmia sp!, {r0-r3} 337 - mov pc, lr 336 + ret lr 338 337 #endif 339 338 340 339 ENTRY(ftrace_stub) 341 340 .Lftrace_stub: 342 - mov pc, lr 341 + ret lr 343 342 ENDPROC(ftrace_stub) 344 343 345 344 #endif /* CONFIG_FUNCTION_TRACER */ ··· 562 561 streq r5, [sp, #4] 563 562 beq sys_mmap_pgoff 564 563 mov r0, #-EINVAL 565 - mov pc, lr 564 + ret lr 566 565 #else 567 566 str r5, [sp, #4] 568 567 b sys_mmap_pgoff
-14
arch/arm/kernel/entry-header.S
··· 240 240 movs pc, lr @ return & move spsr_svc into cpsr 241 241 .endm 242 242 243 - @ 244 - @ 32-bit wide "mov pc, reg" 245 - @ 246 - .macro movw_pc, reg 247 - mov pc, \reg 248 - .endm 249 243 #else /* CONFIG_THUMB2_KERNEL */ 250 244 .macro svc_exit, rpsr, irq = 0 251 245 .if \irq != 0 ··· 298 304 movs pc, lr @ return & move spsr_svc into cpsr 299 305 .endm 300 306 #endif /* ifdef CONFIG_CPU_V7M / else */ 301 - 302 - @ 303 - @ 32-bit wide "mov pc, reg" 304 - @ 305 - .macro movw_pc, reg 306 - mov pc, \reg 307 - nop 308 - .endm 309 307 #endif /* !CONFIG_THUMB2_KERNEL */ 310 308 311 309 /*
+2 -2
arch/arm/kernel/fiqasm.S
··· 32 32 ldr lr, [r0] 33 33 msr cpsr_c, r1 @ return to SVC mode 34 34 mov r0, r0 @ avoid hazard prior to ARMv4 35 - mov pc, lr 35 + ret lr 36 36 ENDPROC(__set_fiq_regs) 37 37 38 38 ENTRY(__get_fiq_regs) ··· 45 45 str lr, [r0] 46 46 msr cpsr_c, r1 @ return to SVC mode 47 47 mov r0, r0 @ avoid hazard prior to ARMv4 48 - mov pc, lr 48 + ret lr 49 49 ENDPROC(__get_fiq_regs)
+4 -3
arch/arm/kernel/head-common.S
··· 10 10 * published by the Free Software Foundation. 11 11 * 12 12 */ 13 + #include <asm/assembler.h> 13 14 14 15 #define ATAG_CORE 0x54410001 15 16 #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) ··· 62 61 cmp r5, r6 63 62 bne 1f 64 63 65 - 2: mov pc, lr @ atag/dtb pointer is ok 64 + 2: ret lr @ atag/dtb pointer is ok 66 65 67 66 1: mov r2, #0 68 - mov pc, lr 67 + ret lr 69 68 ENDPROC(__vet_atags) 70 69 71 70 /* ··· 163 162 cmp r5, r6 164 163 blo 1b 165 164 mov r5, #0 @ unknown processor 166 - 2: mov pc, lr 165 + 2: ret lr 167 166 ENDPROC(__lookup_processor_type) 168 167 169 168 /*
+4 -4
arch/arm/kernel/head-nommu.S
··· 82 82 adr lr, BSYM(1f) @ return (PIC) address 83 83 ARM( add pc, r10, #PROCINFO_INITFUNC ) 84 84 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 85 - THUMB( mov pc, r12 ) 85 + THUMB( ret r12 ) 86 86 1: b __after_proc_init 87 87 ENDPROC(stext) 88 88 ··· 119 119 mov r13, r12 @ __secondary_switched address 120 120 ARM( add pc, r10, #PROCINFO_INITFUNC ) 121 121 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 122 - THUMB( mov pc, r12 ) 122 + THUMB( ret r12 ) 123 123 ENDPROC(secondary_startup) 124 124 125 125 ENTRY(__secondary_switched) ··· 164 164 #endif 165 165 mcr p15, 0, r0, c1, c0, 0 @ write control reg 166 166 #endif /* CONFIG_CPU_CP15 */ 167 - mov pc, r13 167 + ret r13 168 168 ENDPROC(__after_proc_init) 169 169 .ltorg 170 170 ··· 254 254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) 255 255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU 256 256 isb 257 - mov pc,lr 257 + ret lr 258 258 ENDPROC(__setup_mpu) 259 259 #endif 260 260 #include "head-common.S"
+9 -9
arch/arm/kernel/head.S
··· 140 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 141 141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 142 142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 143 - THUMB( mov pc, r12 ) 143 + THUMB( ret r12 ) 144 144 1: b __enable_mmu 145 145 ENDPROC(stext) 146 146 .ltorg ··· 335 335 sub r4, r4, #0x1000 @ point to the PGD table 336 336 mov r4, r4, lsr #ARCH_PGD_SHIFT 337 337 #endif 338 - mov pc, lr 338 + ret lr 339 339 ENDPROC(__create_page_tables) 340 340 .ltorg 341 341 .align ··· 383 383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 384 384 @ (return control reg) 385 385 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 386 - THUMB( mov pc, r12 ) 386 + THUMB( ret r12 ) 387 387 ENDPROC(secondary_startup) 388 388 389 389 /* ··· 468 468 instr_sync 469 469 mov r3, r3 470 470 mov r3, r13 471 - mov pc, r3 471 + ret r3 472 472 __turn_mmu_on_end: 473 473 ENDPROC(__turn_mmu_on) 474 474 .popsection ··· 487 487 orr r4, r4, #0x0000b000 488 488 orr r4, r4, #0x00000020 @ val 0x4100b020 489 489 teq r3, r4 @ ARM 11MPCore? 490 - moveq pc, lr @ yes, assume SMP 490 + reteq lr @ yes, assume SMP 491 491 492 492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 493 493 and r0, r0, #0xc0000000 @ multiprocessing extensions and ··· 500 500 orr r4, r4, #0x0000c000 501 501 orr r4, r4, #0x00000090 502 502 teq r3, r4 @ Check for ARM Cortex-A9 503 - movne pc, lr @ Not ARM Cortex-A9, 503 + retne lr @ Not ARM Cortex-A9, 504 504 505 505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the 506 506 @ below address check will need to be #ifdef'd or equivalent ··· 512 512 ARM_BE8(rev r0, r0) @ byteswap if big endian 513 513 and r0, r0, #0x3 @ number of CPUs 514 514 teq r0, #0x0 @ is 1? 515 - movne pc, lr 515 + retne lr 516 516 517 517 __fixup_smp_on_up: 518 518 adr r0, 1f ··· 539 539 .text 540 540 __do_fixup_smp_on_up: 541 541 cmp r4, r5 542 - movhs pc, lr 542 + reths lr 543 543 ldmia r4!, {r0, r6} 544 544 ARM( str r6, [r0, r3] ) 545 545 THUMB( add r0, r0, r3 ) ··· 672 672 2: cmp r4, r5 673 673 ldrcc r7, [r4], #4 @ use branch for delay slot 674 674 bcc 1b 675 - mov pc, lr 675 + ret lr 676 676 #endif 677 677 ENDPROC(__fixup_a_pv_table) 678 678
+3 -3
arch/arm/kernel/hyp-stub.S
··· 99 99 * immediately. 100 100 */ 101 101 compare_cpu_mode_with_primary r4, r5, r6, r7 102 - movne pc, lr 102 + retne lr 103 103 104 104 /* 105 105 * Once we have given up on one CPU, we do not try to install the ··· 111 111 */ 112 112 113 113 cmp r4, #HYP_MODE 114 - movne pc, lr @ give up if the CPU is not in HYP mode 114 + retne lr @ give up if the CPU is not in HYP mode 115 115 116 116 /* 117 117 * Configure HSCTLR to set correct exception endianness/instruction set ··· 201 201 @ fall through 202 202 ENTRY(__hyp_set_vectors) 203 203 __HVC(0) 204 - mov pc, lr 204 + ret lr 205 205 ENDPROC(__hyp_set_vectors) 206 206 207 207 #ifndef ZIMAGE
+5 -5
arch/arm/kernel/iwmmxt.S
··· 179 179 get_thread_info r10 180 180 #endif 181 181 4: dec_preempt_count r10, r3 182 - mov pc, lr 182 + ret lr 183 183 184 184 /* 185 185 * Back up Concan regs to save area and disable access to them ··· 265 265 mov r3, lr @ preserve return address 266 266 bl concan_dump 267 267 msr cpsr_c, ip @ restore interrupt mode 268 - mov pc, r3 268 + ret r3 269 269 270 270 /* 271 271 * Restore Concan state from given memory address ··· 301 301 mov r3, lr @ preserve return address 302 302 bl concan_load 303 303 msr cpsr_c, ip @ restore interrupt mode 304 - mov pc, r3 304 + ret r3 305 305 306 306 /* 307 307 * Concan handling on task switch ··· 323 323 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area 324 324 ldr r2, [r2] @ get current Concan owner 325 325 teq r2, r3 @ next task owns it? 326 - movne pc, lr @ no: leave Concan disabled 326 + retne lr @ no: leave Concan disabled 327 327 328 328 1: @ flip Concan access 329 329 XSC(eor r1, r1, #0x3) ··· 350 350 eors r0, r0, r1 @ if equal... 351 351 streq r0, [r3] @ then clear ownership 352 352 msr cpsr_c, r2 @ restore interrupts 353 - mov pc, lr 353 + ret lr 354 354 355 355 .data 356 356 concan_owner:
+2 -1
arch/arm/kernel/relocate_kernel.S
··· 3 3 */ 4 4 5 5 #include <linux/linkage.h> 6 + #include <asm/assembler.h> 6 7 #include <asm/kexec.h> 7 8 8 9 .align 3 /* not needed for this code, but keeps fncpy() happy */ ··· 60 59 mov r0,#0 61 60 ldr r1,kexec_mach_type 62 61 ldr r2,kexec_boot_atags 63 - ARM( mov pc, lr ) 62 + ARM( ret lr ) 64 63 THUMB( bx lr ) 65 64 66 65 .align
+1 -1
arch/arm/kernel/sleep.S
··· 107 107 instr_sync 108 108 mov r0, r0 109 109 mov r0, r0 110 - mov pc, r3 @ jump to virtual address 110 + ret r3 @ jump to virtual address 111 111 ENDPROC(cpu_resume_mmu) 112 112 .popsection 113 113 cpu_resume_after_mmu:
+2 -1
arch/arm/kvm/init.S
··· 17 17 */ 18 18 19 19 #include <linux/linkage.h> 20 + #include <asm/assembler.h> 20 21 #include <asm/unified.h> 21 22 #include <asm/asm-offsets.h> 22 23 #include <asm/kvm_asm.h> ··· 135 134 ldr r0, =TRAMPOLINE_VA 136 135 adr r1, target 137 136 bfi r0, r1, #0, #PAGE_SHIFT 138 - mov pc, r0 137 + ret r0 139 138 140 139 target: @ We're now in the trampoline code, switch page tables 141 140 mcrr p15, 4, r2, r3, c2
+2 -1
arch/arm/lib/ashldi3.S
··· 27 27 28 28 29 29 #include <linux/linkage.h> 30 + #include <asm/assembler.h> 30 31 31 32 #ifdef __ARMEB__ 32 33 #define al r1 ··· 48 47 THUMB( lsrmi r3, al, ip ) 49 48 THUMB( orrmi ah, ah, r3 ) 50 49 mov al, al, lsl r2 51 - mov pc, lr 50 + ret lr 52 51 53 52 ENDPROC(__ashldi3) 54 53 ENDPROC(__aeabi_llsl)
+2 -1
arch/arm/lib/ashrdi3.S
··· 27 27 28 28 29 29 #include <linux/linkage.h> 30 + #include <asm/assembler.h> 30 31 31 32 #ifdef __ARMEB__ 32 33 #define al r1 ··· 48 47 THUMB( lslmi r3, ah, ip ) 49 48 THUMB( orrmi al, al, r3 ) 50 49 mov ah, ah, asr r2 51 - mov pc, lr 50 + ret lr 52 51 53 52 ENDPROC(__ashrdi3) 54 53 ENDPROC(__aeabi_lasr)
+1 -1
arch/arm/lib/backtrace.S
··· 25 25 ENTRY(c_backtrace) 26 26 27 27 #if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) 28 - mov pc, lr 28 + ret lr 29 29 ENDPROC(c_backtrace) 30 30 #else 31 31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
+3 -2
arch/arm/lib/bitops.h
··· 1 + #include <asm/assembler.h> 1 2 #include <asm/unwind.h> 2 3 3 4 #if __LINUX_ARM_ARCH__ >= 6 ··· 71 70 \instr r2, r2, r3 72 71 str r2, [r1, r0, lsl #2] 73 72 restore_irqs ip 74 - mov pc, lr 73 + ret lr 75 74 UNWIND( .fnend ) 76 75 ENDPROC(\name ) 77 76 .endm ··· 99 98 \store r2, [r1] 100 99 moveq r0, #0 101 100 restore_irqs ip 102 - mov pc, lr 101 + ret lr 103 102 UNWIND( .fnend ) 104 103 ENDPROC(\name ) 105 104 .endm
+3 -2
arch/arm/lib/bswapsdi2.S
··· 1 1 #include <linux/linkage.h> 2 + #include <asm/assembler.h> 2 3 3 4 #if __LINUX_ARM_ARCH__ >= 6 4 5 ENTRY(__bswapsi2) ··· 19 18 mov r3, r3, lsr #8 20 19 bic r3, r3, #0xff00 21 20 eor r0, r3, r0, ror #8 22 - mov pc, lr 21 + ret lr 23 22 ENDPROC(__bswapsi2) 24 23 25 24 ENTRY(__bswapdi2) ··· 32 31 bic r1, r1, #0xff00 33 32 eor r1, r1, r0, ror #8 34 33 eor r0, r3, ip, ror #8 35 - mov pc, lr 34 + ret lr 36 35 ENDPROC(__bswapdi2) 37 36 #endif
+2 -2
arch/arm/lib/call_with_stack.S
··· 36 36 mov r0, r1 37 37 38 38 adr lr, BSYM(1f) 39 - mov pc, r2 39 + ret r2 40 40 41 41 1: ldr lr, [sp] 42 42 ldr sp, [sp, #4] 43 - mov pc, lr 43 + ret lr 44 44 ENDPROC(call_with_stack)
+1 -1
arch/arm/lib/csumpartial.S
··· 97 97 #endif 98 98 #endif 99 99 adcnes sum, sum, td0 @ update checksum 100 - mov pc, lr 100 + ret lr 101 101 102 102 ENTRY(csum_partial) 103 103 stmfd sp!, {buf, lr}
+3 -2
arch/arm/lib/csumpartialcopygeneric.S
··· 7 7 * it under the terms of the GNU General Public License version 2 as 8 8 * published by the Free Software Foundation. 9 9 */ 10 + #include <asm/assembler.h> 10 11 11 12 /* 12 13 * unsigned int ··· 41 40 adcs sum, sum, ip, put_byte_1 @ update checksum 42 41 strb ip, [dst], #1 43 42 tst dst, #2 44 - moveq pc, lr @ dst is now 32bit aligned 43 + reteq lr @ dst is now 32bit aligned 45 44 46 45 .Ldst_16bit: load2b r8, ip 47 46 sub len, len, #2 ··· 49 48 strb r8, [dst], #1 50 49 adcs sum, sum, ip, put_byte_1 51 50 strb ip, [dst], #1 52 - mov pc, lr @ dst is now 32bit aligned 51 + ret lr @ dst is now 32bit aligned 53 52 54 53 /* 55 54 * Handle 0 to 7 bytes, with any alignment of source and
+9 -9
arch/arm/lib/delay-loop.S
··· 35 35 mul r0, r2, r0 @ max = 2^32-1 36 36 add r0, r0, r1, lsr #32-6 37 37 movs r0, r0, lsr #6 38 - moveq pc, lr 38 + reteq lr 39 39 40 40 /* 41 41 * loops = r0 * HZ * loops_per_jiffy / 1000000 ··· 46 46 ENTRY(__loop_delay) 47 47 subs r0, r0, #1 48 48 #if 0 49 - movls pc, lr 49 + retls lr 50 50 subs r0, r0, #1 51 - movls pc, lr 51 + retls lr 52 52 subs r0, r0, #1 53 - movls pc, lr 53 + retls lr 54 54 subs r0, r0, #1 55 - movls pc, lr 55 + retls lr 56 56 subs r0, r0, #1 57 - movls pc, lr 57 + retls lr 58 58 subs r0, r0, #1 59 - movls pc, lr 59 + retls lr 60 60 subs r0, r0, #1 61 - movls pc, lr 61 + retls lr 62 62 subs r0, r0, #1 63 63 #endif 64 64 bhi __loop_delay 65 - mov pc, lr 65 + ret lr 66 66 ENDPROC(__loop_udelay) 67 67 ENDPROC(__loop_const_udelay) 68 68 ENDPROC(__loop_delay)
+7 -6
arch/arm/lib/div64.S
··· 13 13 */ 14 14 15 15 #include <linux/linkage.h> 16 + #include <asm/assembler.h> 16 17 #include <asm/unwind.h> 17 18 18 19 #ifdef __ARMEB__ ··· 98 97 mov yl, #0 99 98 cmpeq xl, r4 100 99 movlo xh, xl 101 - movlo pc, lr 100 + retlo lr 102 101 103 102 @ The division loop for lower bit positions. 104 103 @ Here we shift remainer bits leftwards rather than moving the ··· 112 111 subcs xh, xh, r4 113 112 movs ip, ip, lsr #1 114 113 bne 4b 115 - mov pc, lr 114 + ret lr 116 115 117 116 @ The top part of remainder became zero. If carry is set 118 117 @ (the 33th bit) this is a false positive so resume the loop. 119 118 @ Otherwise, if lower part is also null then we are done. 120 119 6: bcs 5b 121 120 cmp xl, #0 122 - moveq pc, lr 121 + reteq lr 123 122 124 123 @ We still have remainer bits in the low part. Bring them up. 125 124 ··· 145 144 movs ip, ip, lsr #1 146 145 mov xh, #1 147 146 bne 4b 148 - mov pc, lr 147 + ret lr 149 148 150 149 8: @ Division by a power of 2: determine what that divisor order is 151 150 @ then simply shift values around ··· 185 184 THUMB( orr yl, yl, xh ) 186 185 mov xh, xl, lsl ip 187 186 mov xh, xh, lsr ip 188 - mov pc, lr 187 + ret lr 189 188 190 189 @ eq -> division by 1: obvious enough... 191 190 9: moveq yl, xl 192 191 moveq yh, xh 193 192 moveq xh, #0 194 - moveq pc, lr 193 + reteq lr 195 194 UNWIND(.fnend) 196 195 197 196 UNWIND(.fnstart)
+5 -5
arch/arm/lib/findbit.S
··· 35 35 2: cmp r2, r1 @ any more? 36 36 blo 1b 37 37 3: mov r0, r1 @ no free bits 38 - mov pc, lr 38 + ret lr 39 39 ENDPROC(_find_first_zero_bit_le) 40 40 41 41 /* ··· 76 76 2: cmp r2, r1 @ any more? 77 77 blo 1b 78 78 3: mov r0, r1 @ no free bits 79 - mov pc, lr 79 + ret lr 80 80 ENDPROC(_find_first_bit_le) 81 81 82 82 /* ··· 114 114 2: cmp r2, r1 @ any more? 115 115 blo 1b 116 116 3: mov r0, r1 @ no free bits 117 - mov pc, lr 117 + ret lr 118 118 ENDPROC(_find_first_zero_bit_be) 119 119 120 120 ENTRY(_find_next_zero_bit_be) ··· 148 148 2: cmp r2, r1 @ any more? 149 149 blo 1b 150 150 3: mov r0, r1 @ no free bits 151 - mov pc, lr 151 + ret lr 152 152 ENDPROC(_find_first_bit_be) 153 153 154 154 ENTRY(_find_next_bit_be) ··· 192 192 #endif 193 193 cmp r1, r0 @ Clamp to maxbit 194 194 movlo r0, r1 195 - mov pc, lr 195 + ret lr 196 196
+4 -4
arch/arm/lib/getuser.S
··· 36 36 check_uaccess r0, 1, r1, r2, __get_user_bad 37 37 1: TUSER(ldrb) r2, [r0] 38 38 mov r0, #0 39 - mov pc, lr 39 + ret lr 40 40 ENDPROC(__get_user_1) 41 41 42 42 ENTRY(__get_user_2) ··· 56 56 orr r2, rb, r2, lsl #8 57 57 #endif 58 58 mov r0, #0 59 - mov pc, lr 59 + ret lr 60 60 ENDPROC(__get_user_2) 61 61 62 62 ENTRY(__get_user_4) 63 63 check_uaccess r0, 4, r1, r2, __get_user_bad 64 64 4: TUSER(ldr) r2, [r0] 65 65 mov r0, #0 66 - mov pc, lr 66 + ret lr 67 67 ENDPROC(__get_user_4) 68 68 69 69 __get_user_bad: 70 70 mov r2, #0 71 71 mov r0, #-EFAULT 72 - mov pc, lr 72 + ret lr 73 73 ENDPROC(__get_user_bad) 74 74 75 75 .pushsection __ex_table, "a"
+1 -1
arch/arm/lib/io-readsb.S
··· 25 25 26 26 ENTRY(__raw_readsb) 27 27 teq r2, #0 @ do we have to check for the zero len? 28 - moveq pc, lr 28 + reteq lr 29 29 ands ip, r1, #3 30 30 bne .Linsb_align 31 31
+3 -3
arch/arm/lib/io-readsl.S
··· 12 12 13 13 ENTRY(__raw_readsl) 14 14 teq r2, #0 @ do we have to check for the zero len? 15 - moveq pc, lr 15 + reteq lr 16 16 ands ip, r1, #3 17 17 bne 3f 18 18 ··· 33 33 stmcsia r1!, {r3, ip} 34 34 ldrne r3, [r0, #0] 35 35 strne r3, [r1, #0] 36 - mov pc, lr 36 + ret lr 37 37 38 38 3: ldr r3, [r0] 39 39 cmp ip, #2 ··· 75 75 strb r3, [r1, #1] 76 76 8: mov r3, ip, get_byte_0 77 77 strb r3, [r1, #0] 78 - mov pc, lr 78 + ret lr 79 79 ENDPROC(__raw_readsl)
+2 -2
arch/arm/lib/io-readsw-armv3.S
··· 27 27 strb r3, [r1], #1 28 28 29 29 subs r2, r2, #1 30 - moveq pc, lr 30 + reteq lr 31 31 32 32 ENTRY(__raw_readsw) 33 33 teq r2, #0 @ do we have to check for the zero len? 34 - moveq pc, lr 34 + reteq lr 35 35 tst r1, #3 36 36 bne .Linsw_align 37 37
+1 -1
arch/arm/lib/io-readsw-armv4.S
··· 26 26 27 27 ENTRY(__raw_readsw) 28 28 teq r2, #0 29 - moveq pc, lr 29 + reteq lr 30 30 tst r1, #3 31 31 bne .Linsw_align 32 32
+1 -1
arch/arm/lib/io-writesb.S
··· 45 45 46 46 ENTRY(__raw_writesb) 47 47 teq r2, #0 @ do we have to check for the zero len? 48 - moveq pc, lr 48 + reteq lr 49 49 ands ip, r1, #3 50 50 bne .Loutsb_align 51 51
+5 -5
arch/arm/lib/io-writesl.S
··· 12 12 13 13 ENTRY(__raw_writesl) 14 14 teq r2, #0 @ do we have to check for the zero len? 15 - moveq pc, lr 15 + reteq lr 16 16 ands ip, r1, #3 17 17 bne 3f 18 18 ··· 33 33 ldrne r3, [r1, #0] 34 34 strcs ip, [r0, #0] 35 35 strne r3, [r0, #0] 36 - mov pc, lr 36 + ret lr 37 37 38 38 3: bic r1, r1, #3 39 39 ldr r3, [r1], #4 ··· 47 47 orr ip, ip, r3, lspush #16 48 48 str ip, [r0] 49 49 bne 4b 50 - mov pc, lr 50 + ret lr 51 51 52 52 5: mov ip, r3, lspull #8 53 53 ldr r3, [r1], #4 ··· 55 55 orr ip, ip, r3, lspush #24 56 56 str ip, [r0] 57 57 bne 5b 58 - mov pc, lr 58 + ret lr 59 59 60 60 6: mov ip, r3, lspull #24 61 61 ldr r3, [r1], #4 ··· 63 63 orr ip, ip, r3, lspush #8 64 64 str ip, [r0] 65 65 bne 6b 66 - mov pc, lr 66 + ret lr 67 67 ENDPROC(__raw_writesl)
+2 -2
arch/arm/lib/io-writesw-armv3.S
··· 28 28 orr r3, r3, r3, lsl #16 29 29 str r3, [r0] 30 30 subs r2, r2, #1 31 - moveq pc, lr 31 + reteq lr 32 32 33 33 ENTRY(__raw_writesw) 34 34 teq r2, #0 @ do we have to check for the zero len? 35 - moveq pc, lr 35 + reteq lr 36 36 tst r1, #3 37 37 bne .Loutsw_align 38 38
+2 -2
arch/arm/lib/io-writesw-armv4.S
··· 31 31 32 32 ENTRY(__raw_writesw) 33 33 teq r2, #0 34 - moveq pc, lr 34 + reteq lr 35 35 ands r3, r1, #3 36 36 bne .Loutsw_align 37 37 ··· 96 96 tst r2, #1 97 97 3: movne ip, r3, lsr #8 98 98 strneh ip, [r0] 99 - mov pc, lr 99 + ret lr 100 100 ENDPROC(__raw_writesw)
+13 -13
arch/arm/lib/lib1funcs.S
··· 210 210 UNWIND(.fnstart) 211 211 212 212 subs r2, r1, #1 213 - moveq pc, lr 213 + reteq lr 214 214 bcc Ldiv0 215 215 cmp r0, r1 216 216 bls 11f ··· 220 220 ARM_DIV_BODY r0, r1, r2, r3 221 221 222 222 mov r0, r2 223 - mov pc, lr 223 + ret lr 224 224 225 225 11: moveq r0, #1 226 226 movne r0, #0 227 - mov pc, lr 227 + ret lr 228 228 229 229 12: ARM_DIV2_ORDER r1, r2 230 230 231 231 mov r0, r0, lsr r2 232 - mov pc, lr 232 + ret lr 233 233 234 234 UNWIND(.fnend) 235 235 ENDPROC(__udivsi3) ··· 244 244 moveq r0, #0 245 245 tsthi r1, r2 @ see if divisor is power of 2 246 246 andeq r0, r0, r2 247 - movls pc, lr 247 + retls lr 248 248 249 249 ARM_MOD_BODY r0, r1, r2, r3 250 250 251 - mov pc, lr 251 + ret lr 252 252 253 253 UNWIND(.fnend) 254 254 ENDPROC(__umodsi3) ··· 274 274 275 275 cmp ip, #0 276 276 rsbmi r0, r0, #0 277 - mov pc, lr 277 + ret lr 278 278 279 279 10: teq ip, r0 @ same sign ? 280 280 rsbmi r0, r0, #0 281 - mov pc, lr 281 + ret lr 282 282 283 283 11: movlo r0, #0 284 284 moveq r0, ip, asr #31 285 285 orreq r0, r0, #1 286 - mov pc, lr 286 + ret lr 287 287 288 288 12: ARM_DIV2_ORDER r1, r2 289 289 290 290 cmp ip, #0 291 291 mov r0, r3, lsr r2 292 292 rsbmi r0, r0, #0 293 - mov pc, lr 293 + ret lr 294 294 295 295 UNWIND(.fnend) 296 296 ENDPROC(__divsi3) ··· 315 315 316 316 10: cmp ip, #0 317 317 rsbmi r0, r0, #0 318 - mov pc, lr 318 + ret lr 319 319 320 320 UNWIND(.fnend) 321 321 ENDPROC(__modsi3) ··· 331 331 ldmfd sp!, {r1, r2, ip, lr} 332 332 mul r3, r0, r2 333 333 sub r1, r1, r3 334 - mov pc, lr 334 + ret lr 335 335 336 336 UNWIND(.fnend) 337 337 ENDPROC(__aeabi_uidivmod) ··· 344 344 ldmfd sp!, {r1, r2, ip, lr} 345 345 mul r3, r0, r2 346 346 sub r1, r1, r3 347 - mov pc, lr 347 + ret lr 348 348 349 349 UNWIND(.fnend) 350 350 ENDPROC(__aeabi_idivmod)
+2 -1
arch/arm/lib/lshrdi3.S
··· 27 27 28 28 29 29 #include <linux/linkage.h> 30 + #include <asm/assembler.h> 30 31 31 32 #ifdef __ARMEB__ 32 33 #define al r1 ··· 48 47 THUMB( lslmi r3, ah, ip ) 49 48 THUMB( orrmi al, al, r3 ) 50 49 mov ah, ah, lsr r2 51 - mov pc, lr 50 + ret lr 52 51 53 52 ENDPROC(__lshrdi3) 54 53 ENDPROC(__aeabi_llsr)
+1 -1
arch/arm/lib/memchr.S
··· 22 22 bne 1b 23 23 sub r0, r0, #1 24 24 2: movne r0, #0 25 - mov pc, lr 25 + ret lr 26 26 ENDPROC(memchr)
+1 -1
arch/arm/lib/memset.S
··· 110 110 strneb r1, [ip], #1 111 111 tst r2, #1 112 112 strneb r1, [ip], #1 113 - mov pc, lr 113 + ret lr 114 114 115 115 6: subs r2, r2, #4 @ 1 do we have enough 116 116 blt 5b @ 1 bytes to align with?
+1 -1
arch/arm/lib/memzero.S
··· 121 121 strneb r2, [r0], #1 @ 1 122 122 tst r1, #1 @ 1 a byte left over 123 123 strneb r2, [r0], #1 @ 1 124 - mov pc, lr @ 1 124 + ret lr @ 1 125 125 ENDPROC(__memzero)
+2 -1
arch/arm/lib/muldi3.S
··· 11 11 */ 12 12 13 13 #include <linux/linkage.h> 14 + #include <asm/assembler.h> 14 15 15 16 #ifdef __ARMEB__ 16 17 #define xh r0 ··· 42 41 adc xh, xh, yh, lsr #16 43 42 adds xl, xl, ip, lsl #16 44 43 adc xh, xh, ip, lsr #16 45 - mov pc, lr 44 + ret lr 46 45 47 46 ENDPROC(__muldi3) 48 47 ENDPROC(__aeabi_lmul)
+5 -5
arch/arm/lib/putuser.S
··· 36 36 check_uaccess r0, 1, r1, ip, __put_user_bad 37 37 1: TUSER(strb) r2, [r0] 38 38 mov r0, #0 39 - mov pc, lr 39 + ret lr 40 40 ENDPROC(__put_user_1) 41 41 42 42 ENTRY(__put_user_2) ··· 60 60 #endif 61 61 #endif /* CONFIG_THUMB2_KERNEL */ 62 62 mov r0, #0 63 - mov pc, lr 63 + ret lr 64 64 ENDPROC(__put_user_2) 65 65 66 66 ENTRY(__put_user_4) 67 67 check_uaccess r0, 4, r1, ip, __put_user_bad 68 68 4: TUSER(str) r2, [r0] 69 69 mov r0, #0 70 - mov pc, lr 70 + ret lr 71 71 ENDPROC(__put_user_4) 72 72 73 73 ENTRY(__put_user_8) ··· 80 80 6: TUSER(str) r3, [r0] 81 81 #endif 82 82 mov r0, #0 83 - mov pc, lr 83 + ret lr 84 84 ENDPROC(__put_user_8) 85 85 86 86 __put_user_bad: 87 87 mov r0, #-EFAULT 88 - mov pc, lr 88 + ret lr 89 89 ENDPROC(__put_user_bad) 90 90 91 91 .pushsection __ex_table, "a"
+1 -1
arch/arm/lib/strchr.S
··· 23 23 teq r2, r1 24 24 movne r0, #0 25 25 subeq r0, r0, #1 26 - mov pc, lr 26 + ret lr 27 27 ENDPROC(strchr)
+1 -1
arch/arm/lib/strrchr.S
··· 22 22 teq r2, #0 23 23 bne 1b 24 24 mov r0, r3 25 - mov pc, lr 25 + ret lr 26 26 ENDPROC(strrchr)
+3 -2
arch/arm/lib/ucmpdi2.S
··· 11 11 */ 12 12 13 13 #include <linux/linkage.h> 14 + #include <asm/assembler.h> 14 15 15 16 #ifdef __ARMEB__ 16 17 #define xh r0 ··· 32 31 movlo r0, #0 33 32 moveq r0, #1 34 33 movhi r0, #2 35 - mov pc, lr 34 + ret lr 36 35 37 36 ENDPROC(__ucmpdi2) 38 37 ··· 45 44 movlo r0, #-1 46 45 moveq r0, #0 47 46 movhi r0, #1 48 - mov pc, lr 47 + ret lr 49 48 50 49 ENDPROC(__aeabi_ulcmp) 51 50
+1 -1
arch/arm/mach-davinci/sleep.S
··· 213 213 cmp ip, r0 214 214 bne ddr2clk_stop_done 215 215 216 - mov pc, lr 216 + ret lr 217 217 ENDPROC(davinci_ddr_psc_config) 218 218 219 219 CACHE_FLUSH:
+3 -3
arch/arm/mach-ep93xx/crunch-bits.S
··· 198 198 get_thread_info r10 199 199 #endif 200 200 2: dec_preempt_count r10, r3 201 - mov pc, lr 201 + ret lr 202 202 203 203 /* 204 204 * Back up crunch regs to save area and disable access to them ··· 277 277 mov r3, lr @ preserve return address 278 278 bl crunch_save 279 279 msr cpsr_c, ip @ restore interrupt mode 280 - mov pc, r3 280 + ret r3 281 281 282 282 /* 283 283 * Restore crunch state from given memory address ··· 310 310 mov r3, lr @ preserve return address 311 311 bl crunch_load 312 312 msr cpsr_c, ip @ restore interrupt mode 313 - mov pc, r3 313 + ret r3
+3 -2
arch/arm/mach-imx/suspend-imx6.S
··· 10 10 */ 11 11 12 12 #include <linux/linkage.h> 13 + #include <asm/assembler.h> 13 14 #include <asm/asm-offsets.h> 14 15 #include <asm/hardware/cache-l2x0.h> 15 16 #include "hardware.h" ··· 302 301 resume_mmdc 303 302 304 303 /* return to suspend finish */ 305 - mov pc, lr 304 + ret lr 306 305 307 306 resume: 308 307 /* invalidate L1 I-cache first */ ··· 326 325 mov r5, #0x1 327 326 resume_mmdc 328 327 329 - mov pc, lr 328 + ret lr 330 329 ENDPROC(imx6_suspend) 331 330 332 331 /*
+5 -5
arch/arm/mach-mvebu/coherency_ll.S
··· 46 46 ldr r1, =coherency_base 47 47 ldr r1, [r1] 48 48 2: 49 - mov pc, lr 49 + ret lr 50 50 ENDPROC(ll_get_coherency_base) 51 51 52 52 /* ··· 63 63 mov r2, #(1 << 24) 64 64 lsl r3, r2, r3 65 65 ARM_BE8(rev r3, r3) 66 - mov pc, lr 66 + ret lr 67 67 ENDPROC(ll_get_coherency_cpumask) 68 68 69 69 /* ··· 94 94 strex r1, r2, [r0] 95 95 cmp r1, #0 96 96 bne 1b 97 - mov pc, lr 97 + ret lr 98 98 ENDPROC(ll_add_cpu_to_smp_group) 99 99 100 100 ENTRY(ll_enable_coherency) ··· 118 118 bne 1b 119 119 dsb 120 120 mov r0, #0 121 - mov pc, lr 121 + ret lr 122 122 ENDPROC(ll_enable_coherency) 123 123 124 124 ENTRY(ll_disable_coherency) ··· 141 141 cmp r1, #0 142 142 bne 1b 143 143 dsb 144 - mov pc, lr 144 + ret lr 145 145 ENDPROC(ll_disable_coherency) 146 146 147 147 .align 2
+2 -1
arch/arm/mach-mvebu/headsmp-a9.S
··· 14 14 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 + #include <asm/assembler.h> 17 18 18 19 __CPUINIT 19 20 #define CPU_RESUME_ADDR_REG 0xf10182d4 ··· 25 24 armada_375_smp_cpu1_enable_code_start: 26 25 ldr r0, [pc, #4] 27 26 ldr r1, [r0] 28 - mov pc, r1 27 + ret r1 29 28 .word CPU_RESUME_ADDR_REG 30 29 armada_375_smp_cpu1_enable_code_end: 31 30
+2 -1
arch/arm/mach-omap2/sleep44xx.S
··· 10 10 */ 11 11 12 12 #include <linux/linkage.h> 13 + #include <asm/assembler.h> 13 14 #include <asm/smp_scu.h> 14 15 #include <asm/memory.h> 15 16 #include <asm/hardware/cache-l2x0.h> ··· 335 334 336 335 #ifndef CONFIG_OMAP4_ERRATA_I688 337 336 ENTRY(omap_bus_sync) 338 - mov pc, lr 337 + ret lr 339 338 ENDPROC(omap_bus_sync) 340 339 #endif 341 340
+3 -3
arch/arm/mach-omap2/sram242x.S
··· 101 101 i_dll_delay: 102 102 subs r4, r4, #0x1 103 103 bne i_dll_delay 104 - mov pc, lr 104 + ret lr 105 105 106 106 /* 107 107 * shift up or down voltage, use R9 as input to tell level. ··· 125 125 ldr r7, [r3] @ get timer value 126 126 cmp r5, r7 @ time up? 127 127 bhi volt_delay @ not yet->branch 128 - mov pc, lr @ back to caller. 128 + ret lr @ back to caller. 129 129 130 130 omap242x_sdi_cm_clksel2_pll: 131 131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) ··· 220 220 ldr r7, [r10] @ get timer value 221 221 cmp r8, r7 @ time up? 222 222 bhi volt_delay_c @ not yet->branch 223 - mov pc, lr @ back to caller 223 + ret lr @ back to caller 224 224 225 225 omap242x_srs_cm_clksel2_pll: 226 226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
+3 -3
arch/arm/mach-omap2/sram243x.S
··· 101 101 i_dll_delay: 102 102 subs r4, r4, #0x1 103 103 bne i_dll_delay 104 - mov pc, lr 104 + ret lr 105 105 106 106 /* 107 107 * shift up or down voltage, use R9 as input to tell level. ··· 125 125 ldr r7, [r3] @ get timer value 126 126 cmp r5, r7 @ time up? 127 127 bhi volt_delay @ not yet->branch 128 - mov pc, lr @ back to caller. 128 + ret lr @ back to caller. 129 129 130 130 omap243x_sdi_cm_clksel2_pll: 131 131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) ··· 220 220 ldr r7, [r10] @ get timer value 221 221 cmp r8, r7 @ time up? 222 222 bhi volt_delay_c @ not yet->branch 223 - mov pc, lr @ back to caller 223 + ret lr @ back to caller 224 224 225 225 omap243x_srs_cm_clksel2_pll: 226 226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
+1 -1
arch/arm/mach-pxa/mioa701_bootresume.S
··· 29 29 str r1, [r0] @ Early disable resume for next boot 30 30 ldr r0, mioa701_jumpaddr @ (Murphy's Law) 31 31 ldr r0, [r0] 32 - mov pc, r0 32 + ret r0 33 33 2: 34 34 35 35 ENTRY(mioa701_bootstrap_lg)
+2 -2
arch/arm/mach-pxa/standby.S
··· 29 29 .align 5 30 30 1: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby 31 31 str r1, [r0] @ make sure PSSR_PH/STS are clear 32 - mov pc, lr 32 + ret lr 33 33 34 34 #endif 35 35 ··· 108 108 bic r0, r0, #0x20000000 109 109 str r0, [r1, #PXA3_DMCIER] 110 110 111 - mov pc, lr 111 + ret lr 112 112 ENTRY(pm_enter_standby_end) 113 113 114 114 #endif
+1 -1
arch/arm/mach-s3c24xx/sleep-s3c2410.S
··· 66 66 streq r8, [r5] @ SDRAM power-down config 67 67 streq r9, [r6] @ CPU sleep 68 68 1: beq 1b 69 - mov pc, r14 69 + ret lr
+1 -1
arch/arm/mach-s3c24xx/sleep-s3c2412.S
··· 65 65 strne r9, [r3] 66 66 bne s3c2412_sleep_enter1 67 67 68 - mov pc, r14 68 + ret lr
+2 -1
arch/arm/mach-shmobile/headsmp.S
··· 12 12 */ 13 13 #include <linux/linkage.h> 14 14 #include <linux/init.h> 15 + #include <asm/assembler.h> 15 16 #include <asm/memory.h> 16 17 17 18 ENTRY(shmobile_invalidate_start) ··· 76 75 77 76 shmobile_smp_boot_found: 78 77 ldr r0, [r7, r1, lsl #2] 79 - mov pc, r9 78 + ret r9 80 79 ENDPROC(shmobile_smp_boot) 81 80 82 81 ENTRY(shmobile_smp_sleep)
+12 -12
arch/arm/mach-tegra/sleep-tegra20.S
··· 78 78 /* Put this CPU down */ 79 79 cpu_id r0 80 80 bl tegra20_cpu_shutdown 81 - mov pc, lr @ should never get here 81 + ret lr @ should never get here 82 82 ENDPROC(tegra20_hotplug_shutdown) 83 83 84 84 /* ··· 96 96 */ 97 97 ENTRY(tegra20_cpu_shutdown) 98 98 cmp r0, #0 99 - moveq pc, lr @ must not be called for CPU 0 99 + reteq lr @ must not be called for CPU 0 100 100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 101 101 mov r12, #CPU_RESETTABLE 102 102 str r12, [r1] ··· 117 117 cpu_id r3 118 118 cmp r3, r0 119 119 beq . 120 - mov pc, lr 120 + ret lr 121 121 ENDPROC(tegra20_cpu_shutdown) 122 122 #endif 123 123 ··· 164 164 cmpeq r12, r0 @ !turn == cpu? 165 165 beq 1b @ while !turn == cpu && flag[!cpu] == 1 166 166 167 - mov pc, lr @ locked 167 + ret lr @ locked 168 168 ENDPROC(tegra_pen_lock) 169 169 170 170 ENTRY(tegra_pen_unlock) ··· 176 176 addne r2, r3, #PMC_SCRATCH39 177 177 mov r12, #0 178 178 str r12, [r2] 179 - mov pc, lr 179 + ret lr 180 180 ENDPROC(tegra_pen_unlock) 181 181 182 182 /* ··· 189 189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 190 190 mov r12, #CPU_NOT_RESETTABLE 191 191 str r12, [r1] 192 - mov pc, lr 192 + ret lr 193 193 ENDPROC(tegra20_cpu_clear_resettable) 194 194 195 195 /* ··· 202 202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 203 203 mov r12, #CPU_RESETTABLE_SOON 204 204 str r12, [r1] 205 - mov pc, lr 205 + ret lr 206 206 ENDPROC(tegra20_cpu_set_resettable_soon) 207 207 208 208 /* ··· 217 217 cmp r12, #CPU_RESETTABLE_SOON 218 218 moveq r0, #1 219 219 movne r0, #0 220 - mov pc, lr 220 + ret lr 221 221 ENDPROC(tegra20_cpu_is_resettable_soon) 222 222 223 223 /* ··· 239 239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 240 240 add r0, r0, r1 241 241 242 - mov pc, r3 242 + ret r3 243 243 ENDPROC(tegra20_sleep_core_finish) 244 244 245 245 /* ··· 402 402 403 403 mov32 r0, TEGRA_PMC_BASE 404 404 ldr r0, [r0, #PMC_SCRATCH41] 405 - mov pc, r0 @ jump to tegra_resume 405 + ret r0 @ jump to tegra_resume 406 406 ENDPROC(tegra20_lp1_reset) 407 407 408 408 /* ··· 455 455 mov r0, #0 /* brust policy = 32KHz */ 456 456 str r0, [r5, #CLK_RESET_SCLK_BURST] 457 457 458 - mov pc, lr 458 + ret lr 459 459 460 460 /* 461 461 * tegra20_enter_sleep ··· 535 535 adr r2, tegra20_sclk_save 536 536 str r0, [r2] 537 537 dsb 538 - mov pc, lr 538 + ret lr 539 539 540 540 tegra20_sdram_pad_address: 541 541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL
+7 -7
arch/arm/mach-tegra/sleep-tegra30.S
··· 142 142 /* Powergate this CPU */ 143 143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN 144 144 bl tegra30_cpu_shutdown 145 - mov pc, lr @ should never get here 145 + ret lr @ should never get here 146 146 ENDPROC(tegra30_hotplug_shutdown) 147 147 148 148 /* ··· 161 161 bne _no_cpu0_chk @ It's not Tegra30 162 162 163 163 cmp r3, #0 164 - moveq pc, lr @ Must never be called for CPU 0 164 + reteq lr @ Must never be called for CPU 0 165 165 _no_cpu0_chk: 166 166 167 167 ldr r12, =TEGRA_FLOW_CTRL_VIRT ··· 266 266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 267 267 add r0, r0, r1 268 268 269 - mov pc, r3 269 + ret r3 270 270 ENDPROC(tegra30_sleep_core_finish) 271 271 272 272 /* ··· 285 285 mov r0, #0 @ power mode flags (!hotplug) 286 286 bl tegra30_cpu_shutdown 287 287 mov r0, #1 @ never return here 288 - mov pc, r7 288 + ret r7 289 289 ENDPROC(tegra30_sleep_cpu_secondary_finish) 290 290 291 291 /* ··· 529 529 530 530 mov32 r0, TEGRA_PMC_BASE 531 531 ldr r0, [r0, #PMC_SCRATCH41] 532 - mov pc, r0 @ jump to tegra_resume 532 + ret r0 @ jump to tegra_resume 533 533 ENDPROC(tegra30_lp1_reset) 534 534 535 535 .align L1_CACHE_SHIFT ··· 659 659 mov r0, #0 /* brust policy = 32KHz */ 660 660 str r0, [r5, #CLK_RESET_SCLK_BURST] 661 661 662 - mov pc, lr 662 + ret lr 663 663 664 664 /* 665 665 * tegra30_enter_sleep ··· 819 819 820 820 dsb 821 821 822 - mov pc, lr 822 + ret lr 823 823 824 824 .ltorg 825 825 /* dummy symbol for end of IRAM */
+4 -4
arch/arm/mach-tegra/sleep.S
··· 87 87 mcrne p15, 0x1, r0, c9, c0, 2 88 88 _exit_init_l2_a15: 89 89 90 - mov pc, lr 90 + ret lr 91 91 ENDPROC(tegra_init_l2_for_a15) 92 92 93 93 /* ··· 111 111 add r3, r3, r0 112 112 mov r0, r1 113 113 114 - mov pc, r3 114 + ret r3 115 115 ENDPROC(tegra_sleep_cpu_finish) 116 116 117 117 /* ··· 139 139 moveq r3, #0 140 140 streq r3, [r2, #L2X0_CTRL] 141 141 #endif 142 - mov pc, r0 142 + ret r0 143 143 ENDPROC(tegra_shut_off_mmu) 144 144 .popsection 145 145 ··· 156 156 str r0, [r5, #CLK_RESET_CCLK_BURST] 157 157 mov r0, #0 158 158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] 159 - mov pc, lr 159 + ret lr 160 160 ENDPROC(tegra_switch_cpu_to_pllp) 161 161 #endif
+10 -9
arch/arm/mm/cache-fa.S
··· 15 15 */ 16 16 #include <linux/linkage.h> 17 17 #include <linux/init.h> 18 + #include <asm/assembler.h> 18 19 #include <asm/memory.h> 19 20 #include <asm/page.h> 20 21 ··· 46 45 ENTRY(fa_flush_icache_all) 47 46 mov r0, #0 48 47 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 49 - mov pc, lr 48 + ret lr 50 49 ENDPROC(fa_flush_icache_all) 51 50 52 51 /* ··· 72 71 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 73 72 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 74 73 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 75 - mov pc, lr 74 + ret lr 76 75 77 76 /* 78 77 * flush_user_cache_range(start, end, flags) ··· 100 99 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 101 100 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 102 101 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 103 - mov pc, lr 102 + ret lr 104 103 105 104 /* 106 105 * coherent_kern_range(start, end) ··· 136 135 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 137 136 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 138 137 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 139 - mov pc, lr 138 + ret lr 140 139 141 140 /* 142 141 * flush_kern_dcache_area(void *addr, size_t size) ··· 156 155 mov r0, #0 157 156 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 158 157 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 159 - mov pc, lr 158 + ret lr 160 159 161 160 /* 162 161 * dma_inv_range(start, end) ··· 182 181 blo 1b 183 182 mov r0, #0 184 183 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 185 - mov pc, lr 184 + ret lr 186 185 187 186 /* 188 187 * dma_clean_range(start, end) ··· 200 199 blo 1b 201 200 mov r0, #0 202 201 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 203 - mov pc, lr 202 + ret lr 204 203 205 204 /* 206 205 * dma_flush_range(start,end) ··· 215 214 blo 1b 216 215 mov r0, #0 217 216 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 218 - mov pc, lr 217 + ret lr 219 218 220 219 /* 221 220 * dma_map_area(start, size, dir) ··· 238 237 * - dir - DMA direction 239 238 */ 240 239 ENTRY(fa_dma_unmap_area) 241 - mov pc, lr 240 + ret lr 242 241 ENDPROC(fa_dma_unmap_area) 243 242 244 243 .globl fa_flush_kern_cache_louis
+3 -2
arch/arm/mm/cache-nop.S
··· 5 5 */ 6 6 #include <linux/linkage.h> 7 7 #include <linux/init.h> 8 + #include <asm/assembler.h> 8 9 9 10 #include "proc-macros.S" 10 11 11 12 ENTRY(nop_flush_icache_all) 12 - mov pc, lr 13 + ret lr 13 14 ENDPROC(nop_flush_icache_all) 14 15 15 16 .globl nop_flush_kern_cache_all ··· 30 29 31 30 ENTRY(nop_coherent_user_range) 32 31 mov r0, 0 33 - mov pc, lr 32 + ret lr 34 33 ENDPROC(nop_coherent_user_range) 35 34 36 35 .globl nop_flush_kern_dcache_area
+7 -6
arch/arm/mm/cache-v4.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <linux/init.h> 12 + #include <asm/assembler.h> 12 13 #include <asm/page.h> 13 14 #include "proc-macros.S" 14 15 ··· 19 18 * Unconditionally clean and invalidate the entire icache. 20 19 */ 21 20 ENTRY(v4_flush_icache_all) 22 - mov pc, lr 21 + ret lr 23 22 ENDPROC(v4_flush_icache_all) 24 23 25 24 /* ··· 41 40 #ifdef CONFIG_CPU_CP15 42 41 mov r0, #0 43 42 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 44 - mov pc, lr 43 + ret lr 45 44 #else 46 45 /* FALLTHROUGH */ 47 46 #endif ··· 60 59 #ifdef CONFIG_CPU_CP15 61 60 mov ip, #0 62 61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 63 - mov pc, lr 62 + ret lr 64 63 #else 65 64 /* FALLTHROUGH */ 66 65 #endif ··· 90 89 */ 91 90 ENTRY(v4_coherent_user_range) 92 91 mov r0, #0 93 - mov pc, lr 92 + ret lr 94 93 95 94 /* 96 95 * flush_kern_dcache_area(void *addr, size_t size) ··· 117 116 mov r0, #0 118 117 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 119 118 #endif 120 - mov pc, lr 119 + ret lr 121 120 122 121 /* 123 122 * dma_unmap_area(start, size, dir) ··· 137 136 * - dir - DMA direction 138 137 */ 139 138 ENTRY(v4_dma_map_area) 140 - mov pc, lr 139 + ret lr 141 140 ENDPROC(v4_dma_unmap_area) 142 141 ENDPROC(v4_dma_map_area) 143 142
+8 -7
arch/arm/mm/cache-v4wb.S
··· 9 9 */ 10 10 #include <linux/linkage.h> 11 11 #include <linux/init.h> 12 + #include <asm/assembler.h> 12 13 #include <asm/memory.h> 13 14 #include <asm/page.h> 14 15 #include "proc-macros.S" ··· 59 58 ENTRY(v4wb_flush_icache_all) 60 59 mov r0, #0 61 60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 62 - mov pc, lr 61 + ret lr 63 62 ENDPROC(v4wb_flush_icache_all) 64 63 65 64 /* ··· 95 94 blo 1b 96 95 #endif 97 96 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 98 - mov pc, lr 97 + ret lr 99 98 100 99 /* 101 100 * flush_user_cache_range(start, end, flags) ··· 123 122 blo 1b 124 123 tst r2, #VM_EXEC 125 124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 126 - mov pc, lr 125 + ret lr 127 126 128 127 /* 129 128 * flush_kern_dcache_area(void *addr, size_t size) ··· 171 170 mov r0, #0 172 171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 173 172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 174 - mov pc, lr 173 + ret lr 175 174 176 175 177 176 /* ··· 196 195 cmp r0, r1 197 196 blo 1b 198 197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 199 - mov pc, lr 198 + ret lr 200 199 201 200 /* 202 201 * dma_clean_range(start, end) ··· 213 212 cmp r0, r1 214 213 blo 1b 215 214 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 216 - mov pc, lr 215 + ret lr 217 216 218 217 /* 219 218 * dma_flush_range(start, end) ··· 249 248 * - dir - DMA direction 250 249 */ 251 250 ENTRY(v4wb_dma_unmap_area) 252 - mov pc, lr 251 + ret lr 253 252 ENDPROC(v4wb_dma_unmap_area) 254 253 255 254 .globl v4wb_flush_kern_cache_louis
+7 -6
arch/arm/mm/cache-v4wt.S
··· 13 13 */ 14 14 #include <linux/linkage.h> 15 15 #include <linux/init.h> 16 + #include <asm/assembler.h> 16 17 #include <asm/page.h> 17 18 #include "proc-macros.S" 18 19 ··· 49 48 ENTRY(v4wt_flush_icache_all) 50 49 mov r0, #0 51 50 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 52 - mov pc, lr 51 + ret lr 53 52 ENDPROC(v4wt_flush_icache_all) 54 53 55 54 /* ··· 72 71 tst r2, #VM_EXEC 73 72 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 74 73 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 75 - mov pc, lr 74 + ret lr 76 75 77 76 /* 78 77 * flush_user_cache_range(start, end, flags) ··· 95 94 add r0, r0, #CACHE_DLINESIZE 96 95 cmp r0, r1 97 96 blo 1b 98 - mov pc, lr 97 + ret lr 99 98 100 99 /* 101 100 * coherent_kern_range(start, end) ··· 127 126 cmp r0, r1 128 127 blo 1b 129 128 mov r0, #0 130 - mov pc, lr 129 + ret lr 131 130 132 131 /* 133 132 * flush_kern_dcache_area(void *addr, size_t size) ··· 161 160 add r0, r0, #CACHE_DLINESIZE 162 161 cmp r0, r1 163 162 blo 1b 164 - mov pc, lr 163 + ret lr 165 164 166 165 /* 167 166 * dma_flush_range(start, end) ··· 193 192 * - dir - DMA direction 194 193 */ 195 194 ENTRY(v4wt_dma_map_area) 196 - mov pc, lr 195 + ret lr 197 196 ENDPROC(v4wt_dma_unmap_area) 198 197 ENDPROC(v4wt_dma_map_area) 199 198
+10 -10
arch/arm/mm/cache-v6.S
··· 51 51 #else 52 52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 53 53 #endif 54 - mov pc, lr 54 + ret lr 55 55 ENDPROC(v6_flush_icache_all) 56 56 57 57 /* ··· 73 73 #else 74 74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 75 75 #endif 76 - mov pc, lr 76 + ret lr 77 77 78 78 /* 79 79 * v6_flush_cache_all() ··· 98 98 * - we have a VIPT cache. 99 99 */ 100 100 ENTRY(v6_flush_user_cache_range) 101 - mov pc, lr 101 + ret lr 102 102 103 103 /* 104 104 * v6_coherent_kern_range(start,end) ··· 150 150 #else 151 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 152 152 #endif 153 - mov pc, lr 153 + ret lr 154 154 155 155 /* 156 156 * Fault handling for the cache operation above. If the virtual address in r0 ··· 158 158 */ 159 159 9001: 160 160 mov r0, #-EFAULT 161 - mov pc, lr 161 + ret lr 162 162 UNWIND(.fnend ) 163 163 ENDPROC(v6_coherent_user_range) 164 164 ENDPROC(v6_coherent_kern_range) ··· 188 188 mov r0, #0 189 189 mcr p15, 0, r0, c7, c10, 4 190 190 #endif 191 - mov pc, lr 191 + ret lr 192 192 193 193 194 194 /* ··· 239 239 blo 1b 240 240 mov r0, #0 241 241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 242 - mov pc, lr 242 + ret lr 243 243 244 244 /* 245 245 * v6_dma_clean_range(start,end) ··· 262 262 blo 1b 263 263 mov r0, #0 264 264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 265 - mov pc, lr 265 + ret lr 266 266 267 267 /* 268 268 * v6_dma_flush_range(start,end) ··· 290 290 blo 1b 291 291 mov r0, #0 292 292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 293 - mov pc, lr 293 + ret lr 294 294 295 295 /* 296 296 * dma_map_area(start, size, dir) ··· 323 323 teq r2, #DMA_TO_DEVICE 324 324 bne v6_dma_inv_range 325 325 #endif 326 - mov pc, lr 326 + ret lr 327 327 ENDPROC(v6_dma_unmap_area) 328 328 329 329 .globl v6_flush_kern_cache_louis
+15 -15
arch/arm/mm/cache-v7.S
··· 61 61 bgt 1b 62 62 dsb st 63 63 isb 64 - mov pc, lr 64 + ret lr 65 65 ENDPROC(v7_invalidate_l1) 66 66 67 67 /* ··· 76 76 mov r0, #0 77 77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 78 78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 79 - mov pc, lr 79 + ret lr 80 80 ENDPROC(v7_flush_icache_all) 81 81 82 82 /* ··· 94 94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 95 95 #ifdef CONFIG_ARM_ERRATA_643719 96 96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 97 - ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do 97 + ALT_UP(reteq lr) @ LoUU is zero, so nothing to do 98 98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 99 99 biceq r2, r2, #0x0000000f @ clear minor revision number 100 100 teqeq r2, r1 @ test for errata affected core and if so... ··· 102 102 #endif 103 103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 104 104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 105 - moveq pc, lr @ return if level == 0 105 + reteq lr @ return if level == 0 106 106 mov r10, #0 @ r10 (starting level) = 0 107 107 b flush_levels @ start flushing cache levels 108 108 ENDPROC(v7_flush_dcache_louis) ··· 168 168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 169 169 dsb st 170 170 isb 171 - mov pc, lr 171 + ret lr 172 172 ENDPROC(v7_flush_dcache_all) 173 173 174 174 /* ··· 191 191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 192 192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 193 193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 194 - mov pc, lr 194 + ret lr 195 195 ENDPROC(v7_flush_kern_cache_all) 196 196 197 197 /* ··· 209 209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 210 210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 211 211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 212 - mov pc, lr 212 + ret lr 213 213 ENDPROC(v7_flush_kern_cache_louis) 214 214 215 215 /* ··· 235 235 * - we have a VIPT cache. 236 236 */ 237 237 ENTRY(v7_flush_user_cache_range) 238 - mov pc, lr 238 + ret lr 239 239 ENDPROC(v7_flush_user_cache_all) 240 240 ENDPROC(v7_flush_user_cache_range) 241 241 ··· 296 296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 297 297 dsb ishst 298 298 isb 299 - mov pc, lr 299 + ret lr 300 300 301 301 /* 302 302 * Fault handling for the cache operation above. If the virtual address in r0 ··· 307 307 dsb 308 308 #endif 309 309 mov r0, #-EFAULT 310 - mov pc, lr 310 + ret lr 311 311 UNWIND(.fnend ) 312 312 ENDPROC(v7_coherent_kern_range) 313 313 ENDPROC(v7_coherent_user_range) ··· 336 336 cmp r0, r1 337 337 blo 1b 338 338 dsb st 339 - mov pc, lr 339 + ret lr 340 340 ENDPROC(v7_flush_kern_dcache_area) 341 341 342 342 /* ··· 369 369 cmp r0, r1 370 370 blo 1b 371 371 dsb st 372 - mov pc, lr 372 + ret lr 373 373 ENDPROC(v7_dma_inv_range) 374 374 375 375 /* ··· 391 391 cmp r0, r1 392 392 blo 1b 393 393 dsb st 394 - mov pc, lr 394 + ret lr 395 395 ENDPROC(v7_dma_clean_range) 396 396 397 397 /* ··· 413 413 cmp r0, r1 414 414 blo 1b 415 415 dsb st 416 - mov pc, lr 416 + ret lr 417 417 ENDPROC(v7_dma_flush_range) 418 418 419 419 /* ··· 439 439 add r1, r1, r0 440 440 teq r2, #DMA_TO_DEVICE 441 441 bne v7_dma_inv_range 442 - mov pc, lr 442 + ret lr 443 443 ENDPROC(v7_dma_unmap_area) 444 444 445 445 __INITDATA
+4 -3
arch/arm/mm/l2c-l2x0-resume.S
··· 6 6 * This code can only be used to if you are running in the secure world. 7 7 */ 8 8 #include <linux/linkage.h> 9 + #include <asm/assembler.h> 9 10 #include <asm/hardware/cache-l2x0.h> 10 11 11 12 .text ··· 28 27 29 28 @ Check that the address has been initialised 30 29 teq r1, #0 31 - moveq pc, lr 30 + reteq lr 32 31 33 32 @ The prefetch and power control registers are revision dependent 34 33 @ and can be written whether or not the L2 cache is enabled ··· 42 41 @ Don't setup the L2 cache if it is already enabled 43 42 ldr r0, [r1, #L2X0_CTRL] 44 43 tst r0, #L2X0_CTRL_EN 45 - movne pc, lr 44 + retne lr 46 45 47 46 str r3, [r1, #L310_TAG_LATENCY_CTRL] 48 47 str r4, [r1, #L310_DATA_LATENCY_CTRL] ··· 52 51 str r2, [r1, #L2X0_AUX_CTRL] 53 52 mov r9, #L2X0_CTRL_EN 54 53 str r9, [r1, #L2X0_CTRL] 55 - mov pc, lr 54 + ret lr 56 55 ENDPROC(l2c310_early_resume) 57 56 58 57 .align
+17 -17
arch/arm/mm/proc-arm1020.S
··· 73 73 * cpu_arm1020_proc_init() 74 74 */ 75 75 ENTRY(cpu_arm1020_proc_init) 76 - mov pc, lr 76 + ret lr 77 77 78 78 /* 79 79 * cpu_arm1020_proc_fin() ··· 83 83 bic r0, r0, #0x1000 @ ...i............ 84 84 bic r0, r0, #0x000e @ ............wca. 85 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 86 - mov pc, lr 86 + ret lr 87 87 88 88 /* 89 89 * cpu_arm1020_reset(loc) ··· 107 107 bic ip, ip, #0x000f @ ............wcam 108 108 bic ip, ip, #0x1100 @ ...i...s........ 109 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 110 - mov pc, r0 110 + ret r0 111 111 ENDPROC(cpu_arm1020_reset) 112 112 .popsection 113 113 ··· 117 117 .align 5 118 118 ENTRY(cpu_arm1020_do_idle) 119 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 120 - mov pc, lr 120 + ret lr 121 121 122 122 /* ================================= CACHE ================================ */ 123 123 ··· 133 133 mov r0, #0 134 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 135 135 #endif 136 - mov pc, lr 136 + ret lr 137 137 ENDPROC(arm1020_flush_icache_all) 138 138 139 139 /* ··· 169 169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 170 170 #endif 171 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 172 - mov pc, lr 172 + ret lr 173 173 174 174 /* 175 175 * flush_user_cache_range(start, end, flags) ··· 200 200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 201 201 #endif 202 202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 203 - mov pc, lr 203 + ret lr 204 204 205 205 /* 206 206 * coherent_kern_range(start, end) ··· 242 242 blo 1b 243 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 244 244 mov r0, #0 245 - mov pc, lr 245 + ret lr 246 246 247 247 /* 248 248 * flush_kern_dcache_area(void *addr, size_t size) ··· 264 264 blo 1b 265 265 #endif 266 266 mcr p15, 0, ip, c7, c10, 4 @ drain WB 267 - mov pc, lr 267 + ret lr 268 268 269 269 /* 270 270 * dma_inv_range(start, end) ··· 297 297 blo 1b 298 298 #endif 299 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 300 - mov pc, lr 300 + ret lr 301 301 302 302 /* 303 303 * dma_clean_range(start, end) ··· 320 320 blo 1b 321 321 #endif 322 322 mcr p15, 0, ip, c7, c10, 4 @ drain WB 323 - mov pc, lr 323 + ret lr 324 324 325 325 /* 326 326 * dma_flush_range(start, end) ··· 342 342 blo 1b 343 343 #endif 344 344 mcr p15, 0, ip, c7, c10, 4 @ drain WB 345 - mov pc, lr 345 + ret lr 346 346 347 347 /* 348 348 * dma_map_area(start, size, dir) ··· 365 365 * - dir - DMA direction 366 366 */ 367 367 ENTRY(arm1020_dma_unmap_area) 368 - mov pc, lr 368 + ret lr 369 369 ENDPROC(arm1020_dma_unmap_area) 370 370 371 371 .globl arm1020_flush_kern_cache_louis ··· 384 384 subs r1, r1, #CACHE_DLINESIZE 385 385 bhi 1b 386 386 #endif 387 - mov pc, lr 387 + ret lr 388 388 389 389 /* =============================== PageTable ============================== */ 390 390 ··· 423 423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 424 424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 425 425 #endif /* CONFIG_MMU */ 426 - mov pc, lr 426 + ret lr 427 427 428 428 /* 429 429 * cpu_arm1020_set_pte(ptep, pte) ··· 441 441 #endif 442 442 mcr p15, 0, r0, c7, c10, 4 @ drain WB 443 443 #endif /* CONFIG_MMU */ 444 - mov pc, lr 444 + ret lr 445 445 446 446 .type __arm1020_setup, #function 447 447 __arm1020_setup: ··· 460 460 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 461 461 orr r0, r0, #0x4000 @ .R.. .... .... .... 462 462 #endif 463 - mov pc, lr 463 + ret lr 464 464 .size __arm1020_setup, . - __arm1020_setup 465 465 466 466 /*
+17 -17
arch/arm/mm/proc-arm1020e.S
··· 73 73 * cpu_arm1020e_proc_init() 74 74 */ 75 75 ENTRY(cpu_arm1020e_proc_init) 76 - mov pc, lr 76 + ret lr 77 77 78 78 /* 79 79 * cpu_arm1020e_proc_fin() ··· 83 83 bic r0, r0, #0x1000 @ ...i............ 84 84 bic r0, r0, #0x000e @ ............wca. 85 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 86 - mov pc, lr 86 + ret lr 87 87 88 88 /* 89 89 * cpu_arm1020e_reset(loc) ··· 107 107 bic ip, ip, #0x000f @ ............wcam 108 108 bic ip, ip, #0x1100 @ ...i...s........ 109 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 110 - mov pc, r0 110 + ret r0 111 111 ENDPROC(cpu_arm1020e_reset) 112 112 .popsection 113 113 ··· 117 117 .align 5 118 118 ENTRY(cpu_arm1020e_do_idle) 119 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 120 - mov pc, lr 120 + ret lr 121 121 122 122 /* ================================= CACHE ================================ */ 123 123 ··· 133 133 mov r0, #0 134 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 135 135 #endif 136 - mov pc, lr 136 + ret lr 137 137 ENDPROC(arm1020e_flush_icache_all) 138 138 139 139 /* ··· 168 168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 169 169 #endif 170 170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 - mov pc, lr 171 + ret lr 172 172 173 173 /* 174 174 * flush_user_cache_range(start, end, flags) ··· 197 197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 198 198 #endif 199 199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 200 - mov pc, lr 200 + ret lr 201 201 202 202 /* 203 203 * coherent_kern_range(start, end) ··· 236 236 blo 1b 237 237 mcr p15, 0, ip, c7, c10, 4 @ drain WB 238 238 mov r0, #0 239 - mov pc, lr 239 + ret lr 240 240 241 241 /* 242 242 * flush_kern_dcache_area(void *addr, size_t size) ··· 257 257 blo 1b 258 258 #endif 259 259 mcr p15, 0, ip, c7, c10, 4 @ drain WB 260 - mov pc, lr 260 + ret lr 261 261 262 262 /* 263 263 * dma_inv_range(start, end) ··· 286 286 blo 1b 287 287 #endif 288 288 mcr p15, 0, ip, c7, c10, 4 @ drain WB 289 - mov pc, lr 289 + ret lr 290 290 291 291 /* 292 292 * dma_clean_range(start, end) ··· 308 308 blo 1b 309 309 #endif 310 310 mcr p15, 0, ip, c7, c10, 4 @ drain WB 311 - mov pc, lr 311 + ret lr 312 312 313 313 /* 314 314 * dma_flush_range(start, end) ··· 328 328 blo 1b 329 329 #endif 330 330 mcr p15, 0, ip, c7, c10, 4 @ drain WB 331 - mov pc, lr 331 + ret lr 332 332 333 333 /* 334 334 * dma_map_area(start, size, dir) ··· 351 351 * - dir - DMA direction 352 352 */ 353 353 ENTRY(arm1020e_dma_unmap_area) 354 - mov pc, lr 354 + ret lr 355 355 ENDPROC(arm1020e_dma_unmap_area) 356 356 357 357 .globl arm1020e_flush_kern_cache_louis ··· 369 369 subs r1, r1, #CACHE_DLINESIZE 370 370 bhi 1b 371 371 #endif 372 - mov pc, lr 372 + ret lr 373 373 374 374 /* =============================== PageTable ============================== */ 375 375 ··· 407 407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 408 408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 409 409 #endif 410 - mov pc, lr 410 + ret lr 411 411 412 412 /* 413 413 * cpu_arm1020e_set_pte(ptep, pte) ··· 423 423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 424 424 #endif 425 425 #endif /* CONFIG_MMU */ 426 - mov pc, lr 426 + ret lr 427 427 428 428 .type __arm1020e_setup, #function 429 429 __arm1020e_setup: ··· 441 441 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 442 442 orr r0, r0, #0x4000 @ .R.. .... .... .... 443 443 #endif 444 - mov pc, lr 444 + ret lr 445 445 .size __arm1020e_setup, . - __arm1020e_setup 446 446 447 447 /*
+17 -17
arch/arm/mm/proc-arm1022.S
··· 62 62 * cpu_arm1022_proc_init() 63 63 */ 64 64 ENTRY(cpu_arm1022_proc_init) 65 - mov pc, lr 65 + ret lr 66 66 67 67 /* 68 68 * cpu_arm1022_proc_fin() ··· 72 72 bic r0, r0, #0x1000 @ ...i............ 73 73 bic r0, r0, #0x000e @ ............wca. 74 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 - mov pc, lr 75 + ret lr 76 76 77 77 /* 78 78 * cpu_arm1022_reset(loc) ··· 96 96 bic ip, ip, #0x000f @ ............wcam 97 97 bic ip, ip, #0x1100 @ ...i...s........ 98 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 - mov pc, r0 99 + ret r0 100 100 ENDPROC(cpu_arm1022_reset) 101 101 .popsection 102 102 ··· 106 106 .align 5 107 107 ENTRY(cpu_arm1022_do_idle) 108 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 109 - mov pc, lr 109 + ret lr 110 110 111 111 /* ================================= CACHE ================================ */ 112 112 ··· 122 122 mov r0, #0 123 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 124 #endif 125 - mov pc, lr 125 + ret lr 126 126 ENDPROC(arm1022_flush_icache_all) 127 127 128 128 /* ··· 156 156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 157 157 #endif 158 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 159 - mov pc, lr 159 + ret lr 160 160 161 161 /* 162 162 * flush_user_cache_range(start, end, flags) ··· 185 185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 186 186 #endif 187 187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 188 - mov pc, lr 188 + ret lr 189 189 190 190 /* 191 191 * coherent_kern_range(start, end) ··· 225 225 blo 1b 226 226 mcr p15, 0, ip, c7, c10, 4 @ drain WB 227 227 mov r0, #0 228 - mov pc, lr 228 + ret lr 229 229 230 230 /* 231 231 * flush_kern_dcache_area(void *addr, size_t size) ··· 246 246 blo 1b 247 247 #endif 248 248 mcr p15, 0, ip, c7, c10, 4 @ drain WB 249 - mov pc, lr 249 + ret lr 250 250 251 251 /* 252 252 * dma_inv_range(start, end) ··· 275 275 blo 1b 276 276 #endif 277 277 mcr p15, 0, ip, c7, c10, 4 @ drain WB 278 - mov pc, lr 278 + ret lr 279 279 280 280 /* 281 281 * dma_clean_range(start, end) ··· 297 297 blo 1b 298 298 #endif 299 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 300 - mov pc, lr 300 + ret lr 301 301 302 302 /* 303 303 * dma_flush_range(start, end) ··· 317 317 blo 1b 318 318 #endif 319 319 mcr p15, 0, ip, c7, c10, 4 @ drain WB 320 - mov pc, lr 320 + ret lr 321 321 322 322 /* 323 323 * dma_map_area(start, size, dir) ··· 340 340 * - dir - DMA direction 341 341 */ 342 342 ENTRY(arm1022_dma_unmap_area) 343 - mov pc, lr 343 + ret lr 344 344 ENDPROC(arm1022_dma_unmap_area) 345 345 346 346 .globl arm1022_flush_kern_cache_louis ··· 358 358 subs r1, r1, #CACHE_DLINESIZE 359 359 bhi 1b 360 360 #endif 361 - mov pc, lr 361 + ret lr 362 362 363 363 /* =============================== PageTable ============================== */ 364 364 ··· 389 389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 390 390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 391 391 #endif 392 - mov pc, lr 392 + ret lr 393 393 394 394 /* 395 395 * cpu_arm1022_set_pte_ext(ptep, pte, ext) ··· 405 405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 406 406 #endif 407 407 #endif /* CONFIG_MMU */ 408 - mov pc, lr 408 + ret lr 409 409 410 410 .type __arm1022_setup, #function 411 411 __arm1022_setup: ··· 423 423 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 424 424 orr r0, r0, #0x4000 @ .R.............. 425 425 #endif 426 - mov pc, lr 426 + ret lr 427 427 .size __arm1022_setup, . - __arm1022_setup 428 428 429 429 /*
+17 -17
arch/arm/mm/proc-arm1026.S
··· 62 62 * cpu_arm1026_proc_init() 63 63 */ 64 64 ENTRY(cpu_arm1026_proc_init) 65 - mov pc, lr 65 + ret lr 66 66 67 67 /* 68 68 * cpu_arm1026_proc_fin() ··· 72 72 bic r0, r0, #0x1000 @ ...i............ 73 73 bic r0, r0, #0x000e @ ............wca. 74 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 - mov pc, lr 75 + ret lr 76 76 77 77 /* 78 78 * cpu_arm1026_reset(loc) ··· 96 96 bic ip, ip, #0x000f @ ............wcam 97 97 bic ip, ip, #0x1100 @ ...i...s........ 98 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 - mov pc, r0 99 + ret r0 100 100 ENDPROC(cpu_arm1026_reset) 101 101 .popsection 102 102 ··· 106 106 .align 5 107 107 ENTRY(cpu_arm1026_do_idle) 108 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 109 - mov pc, lr 109 + ret lr 110 110 111 111 /* ================================= CACHE ================================ */ 112 112 ··· 122 122 mov r0, #0 123 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 124 #endif 125 - mov pc, lr 125 + ret lr 126 126 ENDPROC(arm1026_flush_icache_all) 127 127 128 128 /* ··· 151 151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 152 #endif 153 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 154 - mov pc, lr 154 + ret lr 155 155 156 156 /* 157 157 * flush_user_cache_range(start, end, flags) ··· 180 180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 181 181 #endif 182 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 183 - mov pc, lr 183 + ret lr 184 184 185 185 /* 186 186 * coherent_kern_range(start, end) ··· 219 219 blo 1b 220 220 mcr p15, 0, ip, c7, c10, 4 @ drain WB 221 221 mov r0, #0 222 - mov pc, lr 222 + ret lr 223 223 224 224 /* 225 225 * flush_kern_dcache_area(void *addr, size_t size) ··· 240 240 blo 1b 241 241 #endif 242 242 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 - mov pc, lr 243 + ret lr 244 244 245 245 /* 246 246 * dma_inv_range(start, end) ··· 269 269 blo 1b 270 270 #endif 271 271 mcr p15, 0, ip, c7, c10, 4 @ drain WB 272 - mov pc, lr 272 + ret lr 273 273 274 274 /* 275 275 * dma_clean_range(start, end) ··· 291 291 blo 1b 292 292 #endif 293 293 mcr p15, 0, ip, c7, c10, 4 @ drain WB 294 - mov pc, lr 294 + ret lr 295 295 296 296 /* 297 297 * dma_flush_range(start, end) ··· 311 311 blo 1b 312 312 #endif 313 313 mcr p15, 0, ip, c7, c10, 4 @ drain WB 314 - mov pc, lr 314 + ret lr 315 315 316 316 /* 317 317 * dma_map_area(start, size, dir) ··· 334 334 * - dir - DMA direction 335 335 */ 336 336 ENTRY(arm1026_dma_unmap_area) 337 - mov pc, lr 337 + ret lr 338 338 ENDPROC(arm1026_dma_unmap_area) 339 339 340 340 .globl arm1026_flush_kern_cache_louis ··· 352 352 subs r1, r1, #CACHE_DLINESIZE 353 353 bhi 1b 354 354 #endif 355 - mov pc, lr 355 + ret lr 356 356 357 357 /* =============================== PageTable ============================== */ 358 358 ··· 378 378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 379 379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 380 380 #endif 381 - mov pc, lr 381 + ret lr 382 382 383 383 /* 384 384 * cpu_arm1026_set_pte_ext(ptep, pte, ext) ··· 394 394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 395 395 #endif 396 396 #endif /* CONFIG_MMU */ 397 - mov pc, lr 397 + ret lr 398 398 399 399 .type __arm1026_setup, #function 400 400 __arm1026_setup: ··· 417 417 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 418 418 orr r0, r0, #0x4000 @ .R.. .... .... .... 419 419 #endif 420 - mov pc, lr 420 + ret lr 421 421 .size __arm1026_setup, . - __arm1026_setup 422 422 423 423 /*
+8 -8
arch/arm/mm/proc-arm720.S
··· 51 51 */ 52 52 ENTRY(cpu_arm720_dcache_clean_area) 53 53 ENTRY(cpu_arm720_proc_init) 54 - mov pc, lr 54 + ret lr 55 55 56 56 ENTRY(cpu_arm720_proc_fin) 57 57 mrc p15, 0, r0, c1, c0, 0 58 58 bic r0, r0, #0x1000 @ ...i............ 59 59 bic r0, r0, #0x000e @ ............wca. 60 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 61 - mov pc, lr 61 + ret lr 62 62 63 63 /* 64 64 * Function: arm720_proc_do_idle(void) ··· 66 66 * Purpose : put the processor in proper idle mode 67 67 */ 68 68 ENTRY(cpu_arm720_do_idle) 69 - mov pc, lr 69 + ret lr 70 70 71 71 /* 72 72 * Function: arm720_switch_mm(unsigned long pgd_phys) ··· 81 81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 82 82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 83 83 #endif 84 - mov pc, lr 84 + ret lr 85 85 86 86 /* 87 87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) ··· 94 94 #ifdef CONFIG_MMU 95 95 armv3_set_pte_ext wc_disable=0 96 96 #endif 97 - mov pc, lr 97 + ret lr 98 98 99 99 /* 100 100 * Function: arm720_reset ··· 112 112 bic ip, ip, #0x000f @ ............wcam 113 113 bic ip, ip, #0x2100 @ ..v....s........ 114 114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 115 - mov pc, r0 115 + ret r0 116 116 ENDPROC(cpu_arm720_reset) 117 117 .popsection 118 118 ··· 128 128 bic r0, r0, r5 129 129 ldr r5, arm710_cr1_set 130 130 orr r0, r0, r5 131 - mov pc, lr @ __ret (head.S) 131 + ret lr @ __ret (head.S) 132 132 .size __arm710_setup, . - __arm710_setup 133 133 134 134 /* ··· 156 156 mrc p15, 0, r0, c1, c0 @ get control register 157 157 bic r0, r0, r5 158 158 orr r0, r0, r6 159 - mov pc, lr @ __ret (head.S) 159 + ret lr @ __ret (head.S) 160 160 .size __arm720_setup, . - __arm720_setup 161 161 162 162 /*
+4 -4
arch/arm/mm/proc-arm740.S
··· 32 32 ENTRY(cpu_arm740_do_idle) 33 33 ENTRY(cpu_arm740_dcache_clean_area) 34 34 ENTRY(cpu_arm740_switch_mm) 35 - mov pc, lr 35 + ret lr 36 36 37 37 /* 38 38 * cpu_arm740_proc_fin() ··· 42 42 bic r0, r0, #0x3f000000 @ bank/f/lock/s 43 43 bic r0, r0, #0x0000000c @ w-buffer/cache 44 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 45 - mov pc, lr 45 + ret lr 46 46 47 47 /* 48 48 * cpu_arm740_reset(loc) ··· 56 56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 57 57 bic ip, ip, #0x0000000c @ ............wc.. 58 58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 59 - mov pc, r0 59 + ret r0 60 60 ENDPROC(cpu_arm740_reset) 61 61 .popsection 62 62 ··· 115 115 @ need some benchmark 116 116 orr r0, r0, #0x0000000d @ MPU/Cache/WB 117 117 118 - mov pc, lr 118 + ret lr 119 119 120 120 .size __arm740_setup, . - __arm740_setup 121 121
+4 -4
arch/arm/mm/proc-arm7tdmi.S
··· 32 32 ENTRY(cpu_arm7tdmi_do_idle) 33 33 ENTRY(cpu_arm7tdmi_dcache_clean_area) 34 34 ENTRY(cpu_arm7tdmi_switch_mm) 35 - mov pc, lr 35 + ret lr 36 36 37 37 /* 38 38 * cpu_arm7tdmi_proc_fin() 39 39 */ 40 40 ENTRY(cpu_arm7tdmi_proc_fin) 41 - mov pc, lr 41 + ret lr 42 42 43 43 /* 44 44 * Function: cpu_arm7tdmi_reset(loc) ··· 47 47 */ 48 48 .pushsection .idmap.text, "ax" 49 49 ENTRY(cpu_arm7tdmi_reset) 50 - mov pc, r0 50 + ret r0 51 51 ENDPROC(cpu_arm7tdmi_reset) 52 52 .popsection 53 53 54 54 .type __arm7tdmi_setup, #function 55 55 __arm7tdmi_setup: 56 - mov pc, lr 56 + ret lr 57 57 .size __arm7tdmi_setup, . - __arm7tdmi_setup 58 58 59 59 __INITDATA
+17 -17
arch/arm/mm/proc-arm920.S
··· 63 63 * cpu_arm920_proc_init() 64 64 */ 65 65 ENTRY(cpu_arm920_proc_init) 66 - mov pc, lr 66 + ret lr 67 67 68 68 /* 69 69 * cpu_arm920_proc_fin() ··· 73 73 bic r0, r0, #0x1000 @ ...i............ 74 74 bic r0, r0, #0x000e @ ............wca. 75 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches 76 - mov pc, lr 76 + ret lr 77 77 78 78 /* 79 79 * cpu_arm920_reset(loc) ··· 97 97 bic ip, ip, #0x000f @ ............wcam 98 98 bic ip, ip, #0x1100 @ ...i...s........ 99 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 100 - mov pc, r0 100 + ret r0 101 101 ENDPROC(cpu_arm920_reset) 102 102 .popsection 103 103 ··· 107 107 .align 5 108 108 ENTRY(cpu_arm920_do_idle) 109 109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 110 - mov pc, lr 110 + ret lr 111 111 112 112 113 113 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 120 120 ENTRY(arm920_flush_icache_all) 121 121 mov r0, #0 122 122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 - mov pc, lr 123 + ret lr 124 124 ENDPROC(arm920_flush_icache_all) 125 125 126 126 /* ··· 151 151 tst r2, #VM_EXEC 152 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 153 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 154 - mov pc, lr 154 + ret lr 155 155 156 156 /* 157 157 * flush_user_cache_range(start, end, flags) ··· 177 177 blo 1b 178 178 tst r2, #VM_EXEC 179 179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 180 - mov pc, lr 180 + ret lr 181 181 182 182 /* 183 183 * coherent_kern_range(start, end) ··· 211 211 blo 1b 212 212 mcr p15, 0, r0, c7, c10, 4 @ drain WB 213 213 mov r0, #0 214 - mov pc, lr 214 + ret lr 215 215 216 216 /* 217 217 * flush_kern_dcache_area(void *addr, size_t size) ··· 231 231 mov r0, #0 232 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 233 233 mcr p15, 0, r0, c7, c10, 4 @ drain WB 234 - mov pc, lr 234 + ret lr 235 235 236 236 /* 237 237 * dma_inv_range(start, end) ··· 257 257 cmp r0, r1 258 258 blo 1b 259 259 mcr p15, 0, r0, c7, c10, 4 @ drain WB 260 - mov pc, lr 260 + ret lr 261 261 262 262 /* 263 263 * dma_clean_range(start, end) ··· 276 276 cmp r0, r1 277 277 blo 1b 278 278 mcr p15, 0, r0, c7, c10, 4 @ drain WB 279 - mov pc, lr 279 + ret lr 280 280 281 281 /* 282 282 * dma_flush_range(start, end) ··· 293 293 cmp r0, r1 294 294 blo 1b 295 295 mcr p15, 0, r0, c7, c10, 4 @ drain WB 296 - mov pc, lr 296 + ret lr 297 297 298 298 /* 299 299 * dma_map_area(start, size, dir) ··· 316 316 * - dir - DMA direction 317 317 */ 318 318 ENTRY(arm920_dma_unmap_area) 319 - mov pc, lr 319 + ret lr 320 320 ENDPROC(arm920_dma_unmap_area) 321 321 322 322 .globl arm920_flush_kern_cache_louis ··· 332 332 add r0, r0, #CACHE_DLINESIZE 333 333 subs r1, r1, #CACHE_DLINESIZE 334 334 bhi 1b 335 - mov pc, lr 335 + ret lr 336 336 337 337 /* =============================== PageTable ============================== */ 338 338 ··· 367 367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 368 368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 369 369 #endif 370 - mov pc, lr 370 + ret lr 371 371 372 372 /* 373 373 * cpu_arm920_set_pte(ptep, pte, ext) ··· 382 382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 383 383 mcr p15, 0, r0, c7, c10, 4 @ drain WB 384 384 #endif 385 - mov pc, lr 385 + ret lr 386 386 387 387 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 388 388 .globl cpu_arm920_suspend_size ··· 423 423 mrc p15, 0, r0, c1, c0 @ get control register v4 424 424 bic r0, r0, r5 425 425 orr r0, r0, r6 426 - mov pc, lr 426 + ret lr 427 427 .size __arm920_setup, . - __arm920_setup 428 428 429 429 /*
+17 -17
arch/arm/mm/proc-arm922.S
··· 65 65 * cpu_arm922_proc_init() 66 66 */ 67 67 ENTRY(cpu_arm922_proc_init) 68 - mov pc, lr 68 + ret lr 69 69 70 70 /* 71 71 * cpu_arm922_proc_fin() ··· 75 75 bic r0, r0, #0x1000 @ ...i............ 76 76 bic r0, r0, #0x000e @ ............wca. 77 77 mcr p15, 0, r0, c1, c0, 0 @ disable caches 78 - mov pc, lr 78 + ret lr 79 79 80 80 /* 81 81 * cpu_arm922_reset(loc) ··· 99 99 bic ip, ip, #0x000f @ ............wcam 100 100 bic ip, ip, #0x1100 @ ...i...s........ 101 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 102 - mov pc, r0 102 + ret r0 103 103 ENDPROC(cpu_arm922_reset) 104 104 .popsection 105 105 ··· 109 109 .align 5 110 110 ENTRY(cpu_arm922_do_idle) 111 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 112 - mov pc, lr 112 + ret lr 113 113 114 114 115 115 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH ··· 122 122 ENTRY(arm922_flush_icache_all) 123 123 mov r0, #0 124 124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 125 - mov pc, lr 125 + ret lr 126 126 ENDPROC(arm922_flush_icache_all) 127 127 128 128 /* ··· 153 153 tst r2, #VM_EXEC 154 154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 155 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 156 - mov pc, lr 156 + ret lr 157 157 158 158 /* 159 159 * flush_user_cache_range(start, end, flags) ··· 179 179 blo 1b 180 180 tst r2, #VM_EXEC 181 181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 182 - mov pc, lr 182 + ret lr 183 183 184 184 /* 185 185 * coherent_kern_range(start, end) ··· 213 213 blo 1b 214 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 215 215 mov r0, #0 216 - mov pc, lr 216 + ret lr 217 217 218 218 /* 219 219 * flush_kern_dcache_area(void *addr, size_t size) ··· 233 233 mov r0, #0 234 234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 235 235 mcr p15, 0, r0, c7, c10, 4 @ drain WB 236 - mov pc, lr 236 + ret lr 237 237 238 238 /* 239 239 * dma_inv_range(start, end) ··· 259 259 cmp r0, r1 260 260 blo 1b 261 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 262 - mov pc, lr 262 + ret lr 263 263 264 264 /* 265 265 * dma_clean_range(start, end) ··· 278 278 cmp r0, r1 279 279 blo 1b 280 280 mcr p15, 0, r0, c7, c10, 4 @ drain WB 281 - mov pc, lr 281 + ret lr 282 282 283 283 /* 284 284 * dma_flush_range(start, end) ··· 295 295 cmp r0, r1 296 296 blo 1b 297 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 298 - mov pc, lr 298 + ret lr 299 299 300 300 /* 301 301 * dma_map_area(start, size, dir) ··· 318 318 * - dir - DMA direction 319 319 */ 320 320 ENTRY(arm922_dma_unmap_area) 321 - mov pc, lr 321 + ret lr 322 322 ENDPROC(arm922_dma_unmap_area) 323 323 324 324 .globl arm922_flush_kern_cache_louis ··· 336 336 subs r1, r1, #CACHE_DLINESIZE 337 337 bhi 1b 338 338 #endif 339 - mov pc, lr 339 + ret lr 340 340 341 341 /* =============================== PageTable ============================== */ 342 342 ··· 371 371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 372 372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 373 373 #endif 374 - mov pc, lr 374 + ret lr 375 375 376 376 /* 377 377 * cpu_arm922_set_pte_ext(ptep, pte, ext) ··· 386 386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 387 387 mcr p15, 0, r0, c7, c10, 4 @ drain WB 388 388 #endif /* CONFIG_MMU */ 389 - mov pc, lr 389 + ret lr 390 390 391 391 .type __arm922_setup, #function 392 392 __arm922_setup: ··· 401 401 mrc p15, 0, r0, c1, c0 @ get control register v4 402 402 bic r0, r0, r5 403 403 orr r0, r0, r6 404 - mov pc, lr 404 + ret lr 405 405 .size __arm922_setup, . - __arm922_setup 406 406 407 407 /*
+17 -17
arch/arm/mm/proc-arm925.S
··· 86 86 * cpu_arm925_proc_init() 87 87 */ 88 88 ENTRY(cpu_arm925_proc_init) 89 - mov pc, lr 89 + ret lr 90 90 91 91 /* 92 92 * cpu_arm925_proc_fin() ··· 96 96 bic r0, r0, #0x1000 @ ...i............ 97 97 bic r0, r0, #0x000e @ ............wca. 98 98 mcr p15, 0, r0, c1, c0, 0 @ disable caches 99 - mov pc, lr 99 + ret lr 100 100 101 101 /* 102 102 * cpu_arm925_reset(loc) ··· 129 129 bic ip, ip, #0x000f @ ............wcam 130 130 bic ip, ip, #0x1100 @ ...i...s........ 131 131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 132 - mov pc, r0 132 + ret r0 133 133 134 134 /* 135 135 * cpu_arm925_do_idle() ··· 145 145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 146 146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 147 147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 148 - mov pc, lr 148 + ret lr 149 149 150 150 /* 151 151 * flush_icache_all() ··· 155 155 ENTRY(arm925_flush_icache_all) 156 156 mov r0, #0 157 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 158 - mov pc, lr 158 + ret lr 159 159 ENDPROC(arm925_flush_icache_all) 160 160 161 161 /* ··· 188 188 tst r2, #VM_EXEC 189 189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 190 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 191 - mov pc, lr 191 + ret lr 192 192 193 193 /* 194 194 * flush_user_cache_range(start, end, flags) ··· 225 225 blo 1b 226 226 tst r2, #VM_EXEC 227 227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 228 - mov pc, lr 228 + ret lr 229 229 230 230 /* 231 231 * coherent_kern_range(start, end) ··· 259 259 blo 1b 260 260 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 261 mov r0, #0 262 - mov pc, lr 262 + ret lr 263 263 264 264 /* 265 265 * flush_kern_dcache_area(void *addr, size_t size) ··· 279 279 mov r0, #0 280 280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 281 281 mcr p15, 0, r0, c7, c10, 4 @ drain WB 282 - mov pc, lr 282 + ret lr 283 283 284 284 /* 285 285 * dma_inv_range(start, end) ··· 307 307 cmp r0, r1 308 308 blo 1b 309 309 mcr p15, 0, r0, c7, c10, 4 @ drain WB 310 - mov pc, lr 310 + ret lr 311 311 312 312 /* 313 313 * dma_clean_range(start, end) ··· 328 328 blo 1b 329 329 #endif 330 330 mcr p15, 0, r0, c7, c10, 4 @ drain WB 331 - mov pc, lr 331 + ret lr 332 332 333 333 /* 334 334 * dma_flush_range(start, end) ··· 350 350 cmp r0, r1 351 351 blo 1b 352 352 mcr p15, 0, r0, c7, c10, 4 @ drain WB 353 - mov pc, lr 353 + ret lr 354 354 355 355 /* 356 356 * dma_map_area(start, size, dir) ··· 373 373 * - dir - DMA direction 374 374 */ 375 375 ENTRY(arm925_dma_unmap_area) 376 - mov pc, lr 376 + ret lr 377 377 ENDPROC(arm925_dma_unmap_area) 378 378 379 379 .globl arm925_flush_kern_cache_louis ··· 390 390 bhi 1b 391 391 #endif 392 392 mcr p15, 0, r0, c7, c10, 4 @ drain WB 393 - mov pc, lr 393 + ret lr 394 394 395 395 /* =============================== PageTable ============================== */ 396 396 ··· 419 419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 420 420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 421 421 #endif 422 - mov pc, lr 422 + ret lr 423 423 424 424 /* 425 425 * cpu_arm925_set_pte_ext(ptep, pte, ext) ··· 436 436 #endif 437 437 mcr p15, 0, r0, c7, c10, 4 @ drain WB 438 438 #endif /* CONFIG_MMU */ 439 - mov pc, lr 439 + ret lr 440 440 441 441 .type __arm925_setup, #function 442 442 __arm925_setup: ··· 469 469 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 470 470 orr r0, r0, #0x4000 @ .1.. .... .... .... 471 471 #endif 472 - mov pc, lr 472 + ret lr 473 473 .size __arm925_setup, . - __arm925_setup 474 474 475 475 /*
+17 -17
arch/arm/mm/proc-arm926.S
··· 55 55 * cpu_arm926_proc_init() 56 56 */ 57 57 ENTRY(cpu_arm926_proc_init) 58 - mov pc, lr 58 + ret lr 59 59 60 60 /* 61 61 * cpu_arm926_proc_fin() ··· 65 65 bic r0, r0, #0x1000 @ ...i............ 66 66 bic r0, r0, #0x000e @ ............wca. 67 67 mcr p15, 0, r0, c1, c0, 0 @ disable caches 68 - mov pc, lr 68 + ret lr 69 69 70 70 /* 71 71 * cpu_arm926_reset(loc) ··· 89 89 bic ip, ip, #0x000f @ ............wcam 90 90 bic ip, ip, #0x1100 @ ...i...s........ 91 91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 92 - mov pc, r0 92 + ret r0 93 93 ENDPROC(cpu_arm926_reset) 94 94 .popsection 95 95 ··· 111 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 112 112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 113 113 msr cpsr_c, r3 @ Restore FIQ state 114 - mov pc, lr 114 + ret lr 115 115 116 116 /* 117 117 * flush_icache_all() ··· 121 121 ENTRY(arm926_flush_icache_all) 122 122 mov r0, #0 123 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 - mov pc, lr 124 + ret lr 125 125 ENDPROC(arm926_flush_icache_all) 126 126 127 127 /* ··· 151 151 tst r2, #VM_EXEC 152 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 153 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 154 - mov pc, lr 154 + ret lr 155 155 156 156 /* 157 157 * flush_user_cache_range(start, end, flags) ··· 188 188 blo 1b 189 189 tst r2, #VM_EXEC 190 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 191 - mov pc, lr 191 + ret lr 192 192 193 193 /* 194 194 * coherent_kern_range(start, end) ··· 222 222 blo 1b 223 223 mcr p15, 0, r0, c7, c10, 4 @ drain WB 224 224 mov r0, #0 225 - mov pc, lr 225 + ret lr 226 226 227 227 /* 228 228 * flush_kern_dcache_area(void *addr, size_t size) ··· 242 242 mov r0, #0 243 243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 244 244 mcr p15, 0, r0, c7, c10, 4 @ drain WB 245 - mov pc, lr 245 + ret lr 246 246 247 247 /* 248 248 * dma_inv_range(start, end) ··· 270 270 cmp r0, r1 271 271 blo 1b 272 272 mcr p15, 0, r0, c7, c10, 4 @ drain WB 273 - mov pc, lr 273 + ret lr 274 274 275 275 /* 276 276 * dma_clean_range(start, end) ··· 291 291 blo 1b 292 292 #endif 293 293 mcr p15, 0, r0, c7, c10, 4 @ drain WB 294 - mov pc, lr 294 + ret lr 295 295 296 296 /* 297 297 * dma_flush_range(start, end) ··· 313 313 cmp r0, r1 314 314 blo 1b 315 315 mcr p15, 0, r0, c7, c10, 4 @ drain WB 316 - mov pc, lr 316 + ret lr 317 317 318 318 /* 319 319 * dma_map_area(start, size, dir) ··· 336 336 * - dir - DMA direction 337 337 */ 338 338 ENTRY(arm926_dma_unmap_area) 339 - mov pc, lr 339 + ret lr 340 340 ENDPROC(arm926_dma_unmap_area) 341 341 342 342 .globl arm926_flush_kern_cache_louis ··· 353 353 bhi 1b 354 354 #endif 355 355 mcr p15, 0, r0, c7, c10, 4 @ drain WB 356 - mov pc, lr 356 + ret lr 357 357 358 358 /* =============================== PageTable ============================== */ 359 359 ··· 380 380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 381 381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 382 382 #endif 383 - mov pc, lr 383 + ret lr 384 384 385 385 /* 386 386 * cpu_arm926_set_pte_ext(ptep, pte, ext) ··· 397 397 #endif 398 398 mcr p15, 0, r0, c7, c10, 4 @ drain WB 399 399 #endif 400 - mov pc, lr 400 + ret lr 401 401 402 402 /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 403 403 .globl cpu_arm926_suspend_size ··· 448 448 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 449 449 orr r0, r0, #0x4000 @ .1.. .... .... .... 450 450 #endif 451 - mov pc, lr 451 + ret lr 452 452 .size __arm926_setup, . - __arm926_setup 453 453 454 454 /*
+12 -12
arch/arm/mm/proc-arm940.S
··· 31 31 */ 32 32 ENTRY(cpu_arm940_proc_init) 33 33 ENTRY(cpu_arm940_switch_mm) 34 - mov pc, lr 34 + ret lr 35 35 36 36 /* 37 37 * cpu_arm940_proc_fin() ··· 41 41 bic r0, r0, #0x00001000 @ i-cache 42 42 bic r0, r0, #0x00000004 @ d-cache 43 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 - mov pc, lr 44 + ret lr 45 45 46 46 /* 47 47 * cpu_arm940_reset(loc) ··· 58 58 bic ip, ip, #0x00000005 @ .............c.p 59 59 bic ip, ip, #0x00001000 @ i-cache 60 60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 61 - mov pc, r0 61 + ret r0 62 62 ENDPROC(cpu_arm940_reset) 63 63 .popsection 64 64 ··· 68 68 .align 5 69 69 ENTRY(cpu_arm940_do_idle) 70 70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 71 - mov pc, lr 71 + ret lr 72 72 73 73 /* 74 74 * flush_icache_all() ··· 78 78 ENTRY(arm940_flush_icache_all) 79 79 mov r0, #0 80 80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 81 - mov pc, lr 81 + ret lr 82 82 ENDPROC(arm940_flush_icache_all) 83 83 84 84 /* ··· 122 122 tst r2, #VM_EXEC 123 123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 124 124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 125 - mov pc, lr 125 + ret lr 126 126 127 127 /* 128 128 * coherent_kern_range(start, end) ··· 170 170 bcs 1b @ segments 7 to 0 171 171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 172 172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 173 - mov pc, lr 173 + ret lr 174 174 175 175 /* 176 176 * dma_inv_range(start, end) ··· 191 191 subs r1, r1, #1 << 4 192 192 bcs 1b @ segments 7 to 0 193 193 mcr p15, 0, ip, c7, c10, 4 @ drain WB 194 - mov pc, lr 194 + ret lr 195 195 196 196 /* 197 197 * dma_clean_range(start, end) ··· 215 215 bcs 1b @ segments 7 to 0 216 216 #endif 217 217 mcr p15, 0, ip, c7, c10, 4 @ drain WB 218 - mov pc, lr 218 + ret lr 219 219 220 220 /* 221 221 * dma_flush_range(start, end) ··· 241 241 subs r1, r1, #1 << 4 242 242 bcs 1b @ segments 7 to 0 243 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 244 - mov pc, lr 244 + ret lr 245 245 246 246 /* 247 247 * dma_map_area(start, size, dir) ··· 264 264 * - dir - DMA direction 265 265 */ 266 266 ENTRY(arm940_dma_unmap_area) 267 - mov pc, lr 267 + ret lr 268 268 ENDPROC(arm940_dma_unmap_area) 269 269 270 270 .globl arm940_flush_kern_cache_louis ··· 337 337 orr r0, r0, #0x00001000 @ I-cache 338 338 orr r0, r0, #0x00000005 @ MPU/D-cache 339 339 340 - mov pc, lr 340 + ret lr 341 341 342 342 .size __arm940_setup, . - __arm940_setup 343 343
+15 -15
arch/arm/mm/proc-arm946.S
··· 38 38 */ 39 39 ENTRY(cpu_arm946_proc_init) 40 40 ENTRY(cpu_arm946_switch_mm) 41 - mov pc, lr 41 + ret lr 42 42 43 43 /* 44 44 * cpu_arm946_proc_fin() ··· 48 48 bic r0, r0, #0x00001000 @ i-cache 49 49 bic r0, r0, #0x00000004 @ d-cache 50 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 51 - mov pc, lr 51 + ret lr 52 52 53 53 /* 54 54 * cpu_arm946_reset(loc) ··· 65 65 bic ip, ip, #0x00000005 @ .............c.p 66 66 bic ip, ip, #0x00001000 @ i-cache 67 67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 68 - mov pc, r0 68 + ret r0 69 69 ENDPROC(cpu_arm946_reset) 70 70 .popsection 71 71 ··· 75 75 .align 5 76 76 ENTRY(cpu_arm946_do_idle) 77 77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 78 - mov pc, lr 78 + ret lr 79 79 80 80 /* 81 81 * flush_icache_all() ··· 85 85 ENTRY(arm946_flush_icache_all) 86 86 mov r0, #0 87 87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 88 - mov pc, lr 88 + ret lr 89 89 ENDPROC(arm946_flush_icache_all) 90 90 91 91 /* ··· 117 117 tst r2, #VM_EXEC 118 118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 119 119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 120 - mov pc, lr 120 + ret lr 121 121 122 122 /* 123 123 * flush_user_cache_range(start, end, flags) ··· 156 156 blo 1b 157 157 tst r2, #VM_EXEC 158 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 159 - mov pc, lr 159 + ret lr 160 160 161 161 /* 162 162 * coherent_kern_range(start, end) ··· 191 191 blo 1b 192 192 mcr p15, 0, r0, c7, c10, 4 @ drain WB 193 193 mov r0, #0 194 - mov pc, lr 194 + ret lr 195 195 196 196 /* 197 197 * flush_kern_dcache_area(void *addr, size_t size) ··· 212 212 mov r0, #0 213 213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 214 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 215 - mov pc, lr 215 + ret lr 216 216 217 217 /* 218 218 * dma_inv_range(start, end) ··· 239 239 cmp r0, r1 240 240 blo 1b 241 241 mcr p15, 0, r0, c7, c10, 4 @ drain WB 242 - mov pc, lr 242 + ret lr 243 243 244 244 /* 245 245 * dma_clean_range(start, end) ··· 260 260 blo 1b 261 261 #endif 262 262 mcr p15, 0, r0, c7, c10, 4 @ drain WB 263 - mov pc, lr 263 + ret lr 264 264 265 265 /* 266 266 * dma_flush_range(start, end) ··· 284 284 cmp r0, r1 285 285 blo 1b 286 286 mcr p15, 0, r0, c7, c10, 4 @ drain WB 287 - mov pc, lr 287 + ret lr 288 288 289 289 /* 290 290 * dma_map_area(start, size, dir) ··· 307 307 * - dir - DMA direction 308 308 */ 309 309 ENTRY(arm946_dma_unmap_area) 310 - mov pc, lr 310 + ret lr 311 311 ENDPROC(arm946_dma_unmap_area) 312 312 313 313 .globl arm946_flush_kern_cache_louis ··· 324 324 bhi 1b 325 325 #endif 326 326 mcr p15, 0, r0, c7, c10, 4 @ drain WB 327 - mov pc, lr 327 + ret lr 328 328 329 329 .type __arm946_setup, #function 330 330 __arm946_setup: ··· 392 392 #ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 393 393 orr r0, r0, #0x00004000 @ .1.. .... .... .... 394 394 #endif 395 - mov pc, lr 395 + ret lr 396 396 397 397 .size __arm946_setup, . - __arm946_setup 398 398
+4 -4
arch/arm/mm/proc-arm9tdmi.S
··· 32 32 ENTRY(cpu_arm9tdmi_do_idle) 33 33 ENTRY(cpu_arm9tdmi_dcache_clean_area) 34 34 ENTRY(cpu_arm9tdmi_switch_mm) 35 - mov pc, lr 35 + ret lr 36 36 37 37 /* 38 38 * cpu_arm9tdmi_proc_fin() 39 39 */ 40 40 ENTRY(cpu_arm9tdmi_proc_fin) 41 - mov pc, lr 41 + ret lr 42 42 43 43 /* 44 44 * Function: cpu_arm9tdmi_reset(loc) ··· 47 47 */ 48 48 .pushsection .idmap.text, "ax" 49 49 ENTRY(cpu_arm9tdmi_reset) 50 - mov pc, r0 50 + ret r0 51 51 ENDPROC(cpu_arm9tdmi_reset) 52 52 .popsection 53 53 54 54 .type __arm9tdmi_setup, #function 55 55 __arm9tdmi_setup: 56 - mov pc, lr 56 + ret lr 57 57 .size __arm9tdmi_setup, . - __arm9tdmi_setup 58 58 59 59 __INITDATA
+8 -8
arch/arm/mm/proc-fa526.S
··· 32 32 * cpu_fa526_proc_init() 33 33 */ 34 34 ENTRY(cpu_fa526_proc_init) 35 - mov pc, lr 35 + ret lr 36 36 37 37 /* 38 38 * cpu_fa526_proc_fin() ··· 44 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 45 45 nop 46 46 nop 47 - mov pc, lr 47 + ret lr 48 48 49 49 /* 50 50 * cpu_fa526_reset(loc) ··· 72 72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 73 73 nop 74 74 nop 75 - mov pc, r0 75 + ret r0 76 76 ENDPROC(cpu_fa526_reset) 77 77 .popsection 78 78 ··· 81 81 */ 82 82 .align 4 83 83 ENTRY(cpu_fa526_do_idle) 84 - mov pc, lr 84 + ret lr 85 85 86 86 87 87 ENTRY(cpu_fa526_dcache_clean_area) ··· 90 90 subs r1, r1, #CACHE_DLINESIZE 91 91 bhi 1b 92 92 mcr p15, 0, r0, c7, c10, 4 @ drain WB 93 - mov pc, lr 93 + ret lr 94 94 95 95 /* =============================== PageTable ============================== */ 96 96 ··· 117 117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 118 118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB 119 119 #endif 120 - mov pc, lr 120 + ret lr 121 121 122 122 /* 123 123 * cpu_fa526_set_pte_ext(ptep, pte, ext) ··· 133 133 mov r0, #0 134 134 mcr p15, 0, r0, c7, c10, 4 @ drain WB 135 135 #endif 136 - mov pc, lr 136 + ret lr 137 137 138 138 .type __fa526_setup, #function 139 139 __fa526_setup: ··· 162 162 bic r0, r0, r5 163 163 ldr r5, fa526_cr1_set 164 164 orr r0, r0, r5 165 - mov pc, lr 165 + ret lr 166 166 .size __fa526_setup, . - __fa526_setup 167 167 168 168 /*
+22 -22
arch/arm/mm/proc-feroceon.S
··· 69 69 movne r2, r2, lsr #2 @ turned into # of sets 70 70 sub r2, r2, #(1 << 5) 71 71 stmia r1, {r2, r3} 72 - mov pc, lr 72 + ret lr 73 73 74 74 /* 75 75 * cpu_feroceon_proc_fin() ··· 86 86 bic r0, r0, #0x1000 @ ...i............ 87 87 bic r0, r0, #0x000e @ ............wca. 88 88 mcr p15, 0, r0, c1, c0, 0 @ disable caches 89 - mov pc, lr 89 + ret lr 90 90 91 91 /* 92 92 * cpu_feroceon_reset(loc) ··· 110 110 bic ip, ip, #0x000f @ ............wcam 111 111 bic ip, ip, #0x1100 @ ...i...s........ 112 112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 113 - mov pc, r0 113 + ret r0 114 114 ENDPROC(cpu_feroceon_reset) 115 115 .popsection 116 116 ··· 124 124 mov r0, #0 125 125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 126 126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 127 - mov pc, lr 127 + ret lr 128 128 129 129 /* 130 130 * flush_icache_all() ··· 134 134 ENTRY(feroceon_flush_icache_all) 135 135 mov r0, #0 136 136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 137 - mov pc, lr 137 + ret lr 138 138 ENDPROC(feroceon_flush_icache_all) 139 139 140 140 /* ··· 169 169 mov ip, #0 170 170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 171 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 172 - mov pc, lr 172 + ret lr 173 173 174 174 /* 175 175 * flush_user_cache_range(start, end, flags) ··· 198 198 tst r2, #VM_EXEC 199 199 mov ip, #0 200 200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 201 - mov pc, lr 201 + ret lr 202 202 203 203 /* 204 204 * coherent_kern_range(start, end) ··· 233 233 blo 1b 234 234 mcr p15, 0, r0, c7, c10, 4 @ drain WB 235 235 mov r0, #0 236 - mov pc, lr 236 + ret lr 237 237 238 238 /* 239 239 * flush_kern_dcache_area(void *addr, size_t size) ··· 254 254 mov r0, #0 255 255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 256 256 mcr p15, 0, r0, c7, c10, 4 @ drain WB 257 - mov pc, lr 257 + ret lr 258 258 259 259 .align 5 260 260 ENTRY(feroceon_range_flush_kern_dcache_area) ··· 268 268 mov r0, #0 269 269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 270 270 mcr p15, 0, r0, c7, c10, 4 @ drain WB 271 - mov pc, lr 271 + ret lr 272 272 273 273 /* 274 274 * dma_inv_range(start, end) ··· 295 295 cmp r0, r1 296 296 blo 1b 297 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 298 - mov pc, lr 298 + ret lr 299 299 300 300 .align 5 301 301 feroceon_range_dma_inv_range: ··· 311 311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start 312 312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top 313 313 msr cpsr_c, r2 @ restore interrupts 314 - mov pc, lr 314 + ret lr 315 315 316 316 /* 317 317 * dma_clean_range(start, end) ··· 331 331 cmp r0, r1 332 332 blo 1b 333 333 mcr p15, 0, r0, c7, c10, 4 @ drain WB 334 - mov pc, lr 334 + ret lr 335 335 336 336 .align 5 337 337 feroceon_range_dma_clean_range: ··· 344 344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top 345 345 msr cpsr_c, r2 @ restore interrupts 346 346 mcr p15, 0, r0, c7, c10, 4 @ drain WB 347 - mov pc, lr 347 + ret lr 348 348 349 349 /* 350 350 * dma_flush_range(start, end) ··· 362 362 cmp r0, r1 363 363 blo 1b 364 364 mcr p15, 0, r0, c7, c10, 4 @ drain WB 365 - mov pc, lr 365 + ret lr 366 366 367 367 .align 5 368 368 ENTRY(feroceon_range_dma_flush_range) ··· 375 375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 376 376 msr cpsr_c, r2 @ restore interrupts 377 377 mcr p15, 0, r0, c7, c10, 4 @ drain WB 378 - mov pc, lr 378 + ret lr 379 379 380 380 /* 381 381 * dma_map_area(start, size, dir) ··· 412 412 * - dir - DMA direction 413 413 */ 414 414 ENTRY(feroceon_dma_unmap_area) 415 - mov pc, lr 415 + ret lr 416 416 ENDPROC(feroceon_dma_unmap_area) 417 417 418 418 .globl feroceon_flush_kern_cache_louis ··· 461 461 bhi 1b 462 462 #endif 463 463 mcr p15, 0, r0, c7, c10, 4 @ drain WB 464 - mov pc, lr 464 + ret lr 465 465 466 466 /* =============================== PageTable ============================== */ 467 467 ··· 490 490 491 491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 492 492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 493 - mov pc, r2 493 + ret r2 494 494 #else 495 - mov pc, lr 495 + ret lr 496 496 #endif 497 497 498 498 /* ··· 512 512 #endif 513 513 mcr p15, 0, r0, c7, c10, 4 @ drain WB 514 514 #endif 515 - mov pc, lr 515 + ret lr 516 516 517 517 /* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 518 518 .globl cpu_feroceon_suspend_size ··· 554 554 mrc p15, 0, r0, c1, c0 @ get control register v4 555 555 bic r0, r0, r5 556 556 orr r0, r0, r6 557 - mov pc, lr 557 + ret lr 558 558 .size __feroceon_setup, . - __feroceon_setup 559 559 560 560 /*
+17 -17
arch/arm/mm/proc-mohawk.S
··· 45 45 * cpu_mohawk_proc_init() 46 46 */ 47 47 ENTRY(cpu_mohawk_proc_init) 48 - mov pc, lr 48 + ret lr 49 49 50 50 /* 51 51 * cpu_mohawk_proc_fin() ··· 55 55 bic r0, r0, #0x1800 @ ...iz........... 56 56 bic r0, r0, #0x0006 @ .............ca. 57 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 58 - mov pc, lr 58 + ret lr 59 59 60 60 /* 61 61 * cpu_mohawk_reset(loc) ··· 79 79 bic ip, ip, #0x0007 @ .............cam 80 80 bic ip, ip, #0x1100 @ ...i...s........ 81 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 82 - mov pc, r0 82 + ret r0 83 83 ENDPROC(cpu_mohawk_reset) 84 84 .popsection 85 85 ··· 93 93 mov r0, #0 94 94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 95 95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 96 - mov pc, lr 96 + ret lr 97 97 98 98 /* 99 99 * flush_icache_all() ··· 103 103 ENTRY(mohawk_flush_icache_all) 104 104 mov r0, #0 105 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 106 - mov pc, lr 106 + ret lr 107 107 ENDPROC(mohawk_flush_icache_all) 108 108 109 109 /* ··· 128 128 tst r2, #VM_EXEC 129 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 130 130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 131 - mov pc, lr 131 + ret lr 132 132 133 133 /* 134 134 * flush_user_cache_range(start, end, flags) ··· 158 158 blo 1b 159 159 tst r2, #VM_EXEC 160 160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 161 - mov pc, lr 161 + ret lr 162 162 163 163 /* 164 164 * coherent_kern_range(start, end) ··· 194 194 blo 1b 195 195 mcr p15, 0, r0, c7, c10, 4 @ drain WB 196 196 mov r0, #0 197 - mov pc, lr 197 + ret lr 198 198 199 199 /* 200 200 * flush_kern_dcache_area(void *addr, size_t size) ··· 214 214 mov r0, #0 215 215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 216 216 mcr p15, 0, r0, c7, c10, 4 @ drain WB 217 - mov pc, lr 217 + ret lr 218 218 219 219 /* 220 220 * dma_inv_range(start, end) ··· 240 240 cmp r0, r1 241 241 blo 1b 242 242 mcr p15, 0, r0, c7, c10, 4 @ drain WB 243 - mov pc, lr 243 + ret lr 244 244 245 245 /* 246 246 * dma_clean_range(start, end) ··· 259 259 cmp r0, r1 260 260 blo 1b 261 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 262 - mov pc, lr 262 + ret lr 263 263 264 264 /* 265 265 * dma_flush_range(start, end) ··· 277 277 cmp r0, r1 278 278 blo 1b 279 279 mcr p15, 0, r0, c7, c10, 4 @ drain WB 280 - mov pc, lr 280 + ret lr 281 281 282 282 /* 283 283 * dma_map_area(start, size, dir) ··· 300 300 * - dir - DMA direction 301 301 */ 302 302 ENTRY(mohawk_dma_unmap_area) 303 - mov pc, lr 303 + ret lr 304 304 ENDPROC(mohawk_dma_unmap_area) 305 305 306 306 .globl mohawk_flush_kern_cache_louis ··· 315 315 subs r1, r1, #CACHE_DLINESIZE 316 316 bhi 1b 317 317 mcr p15, 0, r0, c7, c10, 4 @ drain WB 318 - mov pc, lr 318 + ret lr 319 319 320 320 /* 321 321 * cpu_mohawk_switch_mm(pgd) ··· 333 333 orr r0, r0, #0x18 @ cache the page table in L2 334 334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 335 335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 336 - mov pc, lr 336 + ret lr 337 337 338 338 /* 339 339 * cpu_mohawk_set_pte_ext(ptep, pte, ext) ··· 346 346 mov r0, r0 347 347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 348 348 mcr p15, 0, r0, c7, c10, 4 @ drain WB 349 - mov pc, lr 349 + ret lr 350 350 351 351 .globl cpu_mohawk_suspend_size 352 352 .equ cpu_mohawk_suspend_size, 4 * 6 ··· 400 400 mrc p15, 0, r0, c1, c0 @ get control register 401 401 bic r0, r0, r5 402 402 orr r0, r0, r6 403 - mov pc, lr 403 + ret lr 404 404 405 405 .size __mohawk_setup, . - __mohawk_setup 406 406
+8 -8
arch/arm/mm/proc-sa110.S
··· 38 38 ENTRY(cpu_sa110_proc_init) 39 39 mov r0, #0 40 40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 41 - mov pc, lr 41 + ret lr 42 42 43 43 /* 44 44 * cpu_sa110_proc_fin() ··· 50 50 bic r0, r0, #0x1000 @ ...i............ 51 51 bic r0, r0, #0x000e @ ............wca. 52 52 mcr p15, 0, r0, c1, c0, 0 @ disable caches 53 - mov pc, lr 53 + ret lr 54 54 55 55 /* 56 56 * cpu_sa110_reset(loc) ··· 74 74 bic ip, ip, #0x000f @ ............wcam 75 75 bic ip, ip, #0x1100 @ ...i...s........ 76 76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 77 - mov pc, r0 77 + ret r0 78 78 ENDPROC(cpu_sa110_reset) 79 79 .popsection 80 80 ··· 103 103 mov r0, r0 @ safety 104 104 mov r0, r0 @ safety 105 105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 106 - mov pc, lr 106 + ret lr 107 107 108 108 /* ================================= CACHE ================================ */ 109 109 ··· 121 121 add r0, r0, #DCACHELINESIZE 122 122 subs r1, r1, #DCACHELINESIZE 123 123 bhi 1b 124 - mov pc, lr 124 + ret lr 125 125 126 126 /* =============================== PageTable ============================== */ 127 127 ··· 141 141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 142 142 ldr pc, [sp], #4 143 143 #else 144 - mov pc, lr 144 + ret lr 145 145 #endif 146 146 147 147 /* ··· 157 157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 158 158 mcr p15, 0, r0, c7, c10, 4 @ drain WB 159 159 #endif 160 - mov pc, lr 160 + ret lr 161 161 162 162 .type __sa110_setup, #function 163 163 __sa110_setup: ··· 173 173 mrc p15, 0, r0, c1, c0 @ get control register v4 174 174 bic r0, r0, r5 175 175 orr r0, r0, r6 176 - mov pc, lr 176 + ret lr 177 177 .size __sa110_setup, . - __sa110_setup 178 178 179 179 /*
+8 -8
arch/arm/mm/proc-sa1100.S
··· 43 43 mov r0, #0 44 44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 45 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 46 - mov pc, lr 46 + ret lr 47 47 48 48 /* 49 49 * cpu_sa1100_proc_fin() ··· 58 58 bic r0, r0, #0x1000 @ ...i............ 59 59 bic r0, r0, #0x000e @ ............wca. 60 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 61 - mov pc, lr 61 + ret lr 62 62 63 63 /* 64 64 * cpu_sa1100_reset(loc) ··· 82 82 bic ip, ip, #0x000f @ ............wcam 83 83 bic ip, ip, #0x1100 @ ...i...s........ 84 84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 85 - mov pc, r0 85 + ret r0 86 86 ENDPROC(cpu_sa1100_reset) 87 87 .popsection 88 88 ··· 113 113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt 114 114 mov r0, r0 @ safety 115 115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 116 - mov pc, lr 116 + ret lr 117 117 118 118 /* ================================= CACHE ================================ */ 119 119 ··· 131 131 add r0, r0, #DCACHELINESIZE 132 132 subs r1, r1, #DCACHELINESIZE 133 133 bhi 1b 134 - mov pc, lr 134 + ret lr 135 135 136 136 /* =============================== PageTable ============================== */ 137 137 ··· 152 152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 153 153 ldr pc, [sp], #4 154 154 #else 155 - mov pc, lr 155 + ret lr 156 156 #endif 157 157 158 158 /* ··· 168 168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 169 169 mcr p15, 0, r0, c7, c10, 4 @ drain WB 170 170 #endif 171 - mov pc, lr 171 + ret lr 172 172 173 173 .globl cpu_sa1100_suspend_size 174 174 .equ cpu_sa1100_suspend_size, 4 * 3 ··· 211 211 mrc p15, 0, r0, c1, c0 @ get control register v4 212 212 bic r0, r0, r5 213 213 orr r0, r0, r6 214 - mov pc, lr 214 + ret lr 215 215 .size __sa1100_setup, . - __sa1100_setup 216 216 217 217 /*
+8 -8
arch/arm/mm/proc-v6.S
··· 36 36 #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S 37 37 38 38 ENTRY(cpu_v6_proc_init) 39 - mov pc, lr 39 + ret lr 40 40 41 41 ENTRY(cpu_v6_proc_fin) 42 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 43 43 bic r0, r0, #0x1000 @ ...i............ 44 44 bic r0, r0, #0x0006 @ .............ca. 45 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 46 - mov pc, lr 46 + ret lr 47 47 48 48 /* 49 49 * cpu_v6_reset(loc) ··· 62 62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 63 63 mov r1, #0 64 64 mcr p15, 0, r1, c7, c5, 4 @ ISB 65 - mov pc, r0 65 + ret r0 66 66 ENDPROC(cpu_v6_reset) 67 67 .popsection 68 68 ··· 77 77 mov r1, #0 78 78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode 79 79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 80 - mov pc, lr 80 + ret lr 81 81 82 82 ENTRY(cpu_v6_dcache_clean_area) 83 83 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 84 84 add r0, r0, #D_CACHE_LINE_SIZE 85 85 subs r1, r1, #D_CACHE_LINE_SIZE 86 86 bhi 1b 87 - mov pc, lr 87 + ret lr 88 88 89 89 /* 90 90 * cpu_v6_switch_mm(pgd_phys, tsk) ··· 113 113 #endif 114 114 mcr p15, 0, r1, c13, c0, 1 @ set context ID 115 115 #endif 116 - mov pc, lr 116 + ret lr 117 117 118 118 /* 119 119 * cpu_v6_set_pte_ext(ptep, pte, ext) ··· 131 131 #ifdef CONFIG_MMU 132 132 armv6_set_pte_ext cpu_v6 133 133 #endif 134 - mov pc, lr 134 + ret lr 135 135 136 136 /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 137 137 .globl cpu_v6_suspend_size ··· 241 241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg 242 242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration 243 243 #endif 244 - mov pc, lr @ return to head.S:__ret 244 + ret lr @ return to head.S:__ret 245 245 246 246 /* 247 247 * V X F I D LR
+2 -2
arch/arm/mm/proc-v7-2level.S
··· 59 59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 60 60 isb 61 61 #endif 62 - mov pc, lr 62 + bx lr 63 63 ENDPROC(cpu_v7_switch_mm) 64 64 65 65 /* ··· 106 106 ALT_SMP(W(nop)) 107 107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 108 108 #endif 109 - mov pc, lr 109 + bx lr 110 110 ENDPROC(cpu_v7_set_pte_ext) 111 111 112 112 /*
+3 -2
arch/arm/mm/proc-v7-3level.S
··· 19 19 * along with this program; if not, write to the Free Software 20 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 21 */ 22 + #include <asm/assembler.h> 22 23 23 24 #define TTB_IRGN_NC (0 << 8) 24 25 #define TTB_IRGN_WBWA (1 << 8) ··· 62 61 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 63 62 isb 64 63 #endif 65 - mov pc, lr 64 + ret lr 66 65 ENDPROC(cpu_v7_switch_mm) 67 66 68 67 #ifdef __ARMEB__ ··· 93 92 ALT_SMP(W(nop)) 94 93 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 95 94 #endif 96 - mov pc, lr 95 + ret lr 97 96 ENDPROC(cpu_v7_set_pte_ext) 98 97 99 98 /*
+7 -7
arch/arm/mm/proc-v7.S
··· 26 26 #endif 27 27 28 28 ENTRY(cpu_v7_proc_init) 29 - mov pc, lr 29 + ret lr 30 30 ENDPROC(cpu_v7_proc_init) 31 31 32 32 ENTRY(cpu_v7_proc_fin) ··· 34 34 bic r0, r0, #0x1000 @ ...i............ 35 35 bic r0, r0, #0x0006 @ .............ca. 36 36 mcr p15, 0, r0, c1, c0, 0 @ disable caches 37 - mov pc, lr 37 + ret lr 38 38 ENDPROC(cpu_v7_proc_fin) 39 39 40 40 /* ··· 71 71 ENTRY(cpu_v7_do_idle) 72 72 dsb @ WFI may enter a low-power mode 73 73 wfi 74 - mov pc, lr 74 + ret lr 75 75 ENDPROC(cpu_v7_do_idle) 76 76 77 77 ENTRY(cpu_v7_dcache_clean_area) 78 78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW 79 79 ALT_UP_B(1f) 80 - mov pc, lr 80 + ret lr 81 81 1: dcache_line_size r2, r3 82 82 2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 83 83 add r0, r0, r2 84 84 subs r1, r1, r2 85 85 bhi 2b 86 86 dsb ishst 87 - mov pc, lr 87 + ret lr 88 88 ENDPROC(cpu_v7_dcache_clean_area) 89 89 90 90 string cpu_v7_name, "ARMv7 Processor" ··· 163 163 dsb @ WFI may enter a low-power mode 164 164 wfi 165 165 dsb @barrier 166 - mov pc, lr 166 + ret lr 167 167 ENDPROC(cpu_pj4b_do_idle) 168 168 #else 169 169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle ··· 407 407 bic r0, r0, r5 @ clear bits them 408 408 orr r0, r0, r6 @ set them 409 409 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions 410 - mov pc, lr @ return to head.S:__ret 410 + ret lr @ return to head.S:__ret 411 411 ENDPROC(__v7_setup) 412 412 413 413 .align 2
+9 -9
arch/arm/mm/proc-v7m.S
··· 16 16 #include "proc-macros.S" 17 17 18 18 ENTRY(cpu_v7m_proc_init) 19 - mov pc, lr 19 + ret lr 20 20 ENDPROC(cpu_v7m_proc_init) 21 21 22 22 ENTRY(cpu_v7m_proc_fin) 23 - mov pc, lr 23 + ret lr 24 24 ENDPROC(cpu_v7m_proc_fin) 25 25 26 26 /* ··· 34 34 */ 35 35 .align 5 36 36 ENTRY(cpu_v7m_reset) 37 - mov pc, r0 37 + ret r0 38 38 ENDPROC(cpu_v7m_reset) 39 39 40 40 /* ··· 46 46 */ 47 47 ENTRY(cpu_v7m_do_idle) 48 48 wfi 49 - mov pc, lr 49 + ret lr 50 50 ENDPROC(cpu_v7m_do_idle) 51 51 52 52 ENTRY(cpu_v7m_dcache_clean_area) 53 - mov pc, lr 53 + ret lr 54 54 ENDPROC(cpu_v7m_dcache_clean_area) 55 55 56 56 /* 57 57 * There is no MMU, so here is nothing to do. 58 58 */ 59 59 ENTRY(cpu_v7m_switch_mm) 60 - mov pc, lr 60 + ret lr 61 61 ENDPROC(cpu_v7m_switch_mm) 62 62 63 63 .globl cpu_v7m_suspend_size ··· 65 65 66 66 #ifdef CONFIG_ARM_CPU_SUSPEND 67 67 ENTRY(cpu_v7m_do_suspend) 68 - mov pc, lr 68 + ret lr 69 69 ENDPROC(cpu_v7m_do_suspend) 70 70 71 71 ENTRY(cpu_v7m_do_resume) 72 - mov pc, lr 72 + ret lr 73 73 ENDPROC(cpu_v7m_do_resume) 74 74 #endif 75 75 ··· 120 120 ldr r12, [r0, V7M_SCB_CCR] @ system control register 121 121 orr r12, #V7M_SCB_CCR_STKALIGN 122 122 str r12, [r0, V7M_SCB_CCR] 123 - mov pc, lr 123 + ret lr 124 124 ENDPROC(__v7m_setup) 125 125 126 126 .align 2
+16 -16
arch/arm/mm/proc-xsc3.S
··· 83 83 * Nothing too exciting at the moment 84 84 */ 85 85 ENTRY(cpu_xsc3_proc_init) 86 - mov pc, lr 86 + ret lr 87 87 88 88 /* 89 89 * cpu_xsc3_proc_fin() ··· 93 93 bic r0, r0, #0x1800 @ ...IZ........... 94 94 bic r0, r0, #0x0006 @ .............CA. 95 95 mcr p15, 0, r0, c1, c0, 0 @ disable caches 96 - mov pc, lr 96 + ret lr 97 97 98 98 /* 99 99 * cpu_xsc3_reset(loc) ··· 119 119 @ CAUTION: MMU turned off from this point. We count on the pipeline 120 120 @ already containing those two last instructions to survive. 121 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 122 - mov pc, r0 122 + ret r0 123 123 ENDPROC(cpu_xsc3_reset) 124 124 .popsection 125 125 ··· 138 138 ENTRY(cpu_xsc3_do_idle) 139 139 mov r0, #1 140 140 mcr p14, 0, r0, c7, c0, 0 @ go to idle 141 - mov pc, lr 141 + ret lr 142 142 143 143 /* ================================= CACHE ================================ */ 144 144 ··· 150 150 ENTRY(xsc3_flush_icache_all) 151 151 mov r0, #0 152 152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 153 - mov pc, lr 153 + ret lr 154 154 ENDPROC(xsc3_flush_icache_all) 155 155 156 156 /* ··· 176 176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 177 177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 178 178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 179 - mov pc, lr 179 + ret lr 180 180 181 181 /* 182 182 * flush_user_cache_range(start, end, vm_flags) ··· 205 205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 206 206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 207 207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 208 - mov pc, lr 208 + ret lr 209 209 210 210 /* 211 211 * coherent_kern_range(start, end) ··· 232 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 233 233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 234 234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 235 - mov pc, lr 235 + ret lr 236 236 237 237 /* 238 238 * flush_kern_dcache_area(void *addr, size_t size) ··· 253 253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 254 254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 255 255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 256 - mov pc, lr 256 + ret lr 257 257 258 258 /* 259 259 * dma_inv_range(start, end) ··· 277 277 cmp r0, r1 278 278 blo 1b 279 279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 280 - mov pc, lr 280 + ret lr 281 281 282 282 /* 283 283 * dma_clean_range(start, end) ··· 294 294 cmp r0, r1 295 295 blo 1b 296 296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 297 - mov pc, lr 297 + ret lr 298 298 299 299 /* 300 300 * dma_flush_range(start, end) ··· 311 311 cmp r0, r1 312 312 blo 1b 313 313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 314 - mov pc, lr 314 + ret lr 315 315 316 316 /* 317 317 * dma_map_area(start, size, dir) ··· 334 334 * - dir - DMA direction 335 335 */ 336 336 ENTRY(xsc3_dma_unmap_area) 337 - mov pc, lr 337 + ret lr 338 338 ENDPROC(xsc3_dma_unmap_area) 339 339 340 340 .globl xsc3_flush_kern_cache_louis ··· 348 348 add r0, r0, #CACHELINESIZE 349 349 subs r1, r1, #CACHELINESIZE 350 350 bhi 1b 351 - mov pc, lr 351 + ret lr 352 352 353 353 /* =============================== PageTable ============================== */ 354 354 ··· 406 406 orr r2, r2, ip 407 407 408 408 xscale_set_pte_ext_epilogue 409 - mov pc, lr 409 + ret lr 410 410 411 411 .ltorg 412 412 .align ··· 478 478 bic r0, r0, r5 @ ..V. ..R. .... ..A. 479 479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 480 480 @ ...I Z..S .... .... (uc) 481 - mov pc, lr 481 + ret lr 482 482 483 483 .size __xsc3_setup, . - __xsc3_setup 484 484
+17 -17
arch/arm/mm/proc-xscale.S
··· 118 118 mrc p15, 0, r1, c1, c0, 1 119 119 bic r1, r1, #1 120 120 mcr p15, 0, r1, c1, c0, 1 121 - mov pc, lr 121 + ret lr 122 122 123 123 /* 124 124 * cpu_xscale_proc_fin() ··· 128 128 bic r0, r0, #0x1800 @ ...IZ........... 129 129 bic r0, r0, #0x0006 @ .............CA. 130 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 - mov pc, lr 131 + ret lr 132 132 133 133 /* 134 134 * cpu_xscale_reset(loc) ··· 160 160 @ CAUTION: MMU turned off from this point. We count on the pipeline 161 161 @ already containing those two last instructions to survive. 162 162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 163 - mov pc, r0 163 + ret r0 164 164 ENDPROC(cpu_xscale_reset) 165 165 .popsection 166 166 ··· 179 179 ENTRY(cpu_xscale_do_idle) 180 180 mov r0, #1 181 181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 182 - mov pc, lr 182 + ret lr 183 183 184 184 /* ================================= CACHE ================================ */ 185 185 ··· 191 191 ENTRY(xscale_flush_icache_all) 192 192 mov r0, #0 193 193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 194 - mov pc, lr 194 + ret lr 195 195 ENDPROC(xscale_flush_icache_all) 196 196 197 197 /* ··· 216 216 tst r2, #VM_EXEC 217 217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 218 218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 219 - mov pc, lr 219 + ret lr 220 220 221 221 /* 222 222 * flush_user_cache_range(start, end, vm_flags) ··· 245 245 tst r2, #VM_EXEC 246 246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 247 247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 248 - mov pc, lr 248 + ret lr 249 249 250 250 /* 251 251 * coherent_kern_range(start, end) ··· 269 269 mov r0, #0 270 270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 271 271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 272 - mov pc, lr 272 + ret lr 273 273 274 274 /* 275 275 * coherent_user_range(start, end) ··· 291 291 mov r0, #0 292 292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 293 293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 294 - mov pc, lr 294 + ret lr 295 295 296 296 /* 297 297 * flush_kern_dcache_area(void *addr, size_t size) ··· 312 312 mov r0, #0 313 313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 314 314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 315 - mov pc, lr 315 + ret lr 316 316 317 317 /* 318 318 * dma_inv_range(start, end) ··· 336 336 cmp r0, r1 337 337 blo 1b 338 338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 339 - mov pc, lr 339 + ret lr 340 340 341 341 /* 342 342 * dma_clean_range(start, end) ··· 353 353 cmp r0, r1 354 354 blo 1b 355 355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 356 - mov pc, lr 356 + ret lr 357 357 358 358 /* 359 359 * dma_flush_range(start, end) ··· 371 371 cmp r0, r1 372 372 blo 1b 373 373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 374 - mov pc, lr 374 + ret lr 375 375 376 376 /* 377 377 * dma_map_area(start, size, dir) ··· 407 407 * - dir - DMA direction 408 408 */ 409 409 ENTRY(xscale_dma_unmap_area) 410 - mov pc, lr 410 + ret lr 411 411 ENDPROC(xscale_dma_unmap_area) 412 412 413 413 .globl xscale_flush_kern_cache_louis ··· 458 458 add r0, r0, #CACHELINESIZE 459 459 subs r1, r1, #CACHELINESIZE 460 460 bhi 1b 461 - mov pc, lr 461 + ret lr 462 462 463 463 /* =============================== PageTable ============================== */ 464 464 ··· 521 521 orr r2, r2, ip 522 522 523 523 xscale_set_pte_ext_epilogue 524 - mov pc, lr 524 + ret lr 525 525 526 526 .ltorg 527 527 .align ··· 572 572 mrc p15, 0, r0, c1, c0, 0 @ get control register 573 573 bic r0, r0, r5 574 574 orr r0, r0, r6 575 - mov pc, lr 575 + ret lr 576 576 .size __xscale_setup, . - __xscale_setup 577 577 578 578 /*
+4 -3
arch/arm/mm/tlb-fa.S
··· 18 18 */ 19 19 #include <linux/linkage.h> 20 20 #include <linux/init.h> 21 + #include <asm/assembler.h> 21 22 #include <asm/asm-offsets.h> 22 23 #include <asm/tlbflush.h> 23 24 #include "proc-macros.S" ··· 38 37 vma_vm_mm ip, r2 39 38 act_mm r3 @ get current->active_mm 40 39 eors r3, ip, r3 @ == mm ? 41 - movne pc, lr @ no, we dont do anything 40 + retne lr @ no, we dont do anything 42 41 mov r3, #0 43 42 mcr p15, 0, r3, c7, c10, 4 @ drain WB 44 43 bic r0, r0, #0x0ff ··· 48 47 cmp r0, r1 49 48 blo 1b 50 49 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 51 - mov pc, lr 50 + ret lr 52 51 53 52 54 53 ENTRY(fa_flush_kern_tlb_range) ··· 62 61 blo 1b 63 62 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 64 63 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) 65 - mov pc, lr 64 + ret lr 66 65 67 66 __INITDATA 68 67
+3 -2
arch/arm/mm/tlb-v4.S
··· 14 14 */ 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 + #include <asm/assembler.h> 17 18 #include <asm/asm-offsets.h> 18 19 #include <asm/tlbflush.h> 19 20 #include "proc-macros.S" ··· 34 33 vma_vm_mm ip, r2 35 34 act_mm r3 @ get current->active_mm 36 35 eors r3, ip, r3 @ == mm ? 37 - movne pc, lr @ no, we dont do anything 36 + retne lr @ no, we dont do anything 38 37 .v4_flush_kern_tlb_range: 39 38 bic r0, r0, #0x0ff 40 39 bic r0, r0, #0xf00 ··· 42 41 add r0, r0, #PAGE_SZ 43 42 cmp r0, r1 44 43 blo 1b 45 - mov pc, lr 44 + ret lr 46 45 47 46 /* 48 47 * v4_flush_kern_tlb_range(start, end)
+4 -3
arch/arm/mm/tlb-v4wb.S
··· 14 14 */ 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 + #include <asm/assembler.h> 17 18 #include <asm/asm-offsets.h> 18 19 #include <asm/tlbflush.h> 19 20 #include "proc-macros.S" ··· 34 33 vma_vm_mm ip, r2 35 34 act_mm r3 @ get current->active_mm 36 35 eors r3, ip, r3 @ == mm ? 37 - movne pc, lr @ no, we dont do anything 36 + retne lr @ no, we dont do anything 38 37 vma_vm_flags r2, r2 39 38 mcr p15, 0, r3, c7, c10, 4 @ drain WB 40 39 tst r2, #VM_EXEC ··· 45 44 add r0, r0, #PAGE_SZ 46 45 cmp r0, r1 47 46 blo 1b 48 - mov pc, lr 47 + ret lr 49 48 50 49 /* 51 50 * v4_flush_kern_tlb_range(start, end) ··· 66 65 add r0, r0, #PAGE_SZ 67 66 cmp r0, r1 68 67 blo 1b 69 - mov pc, lr 68 + ret lr 70 69 71 70 __INITDATA 72 71
+4 -3
arch/arm/mm/tlb-v4wbi.S
··· 14 14 */ 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 + #include <asm/assembler.h> 17 18 #include <asm/asm-offsets.h> 18 19 #include <asm/tlbflush.h> 19 20 #include "proc-macros.S" ··· 33 32 vma_vm_mm ip, r2 34 33 act_mm r3 @ get current->active_mm 35 34 eors r3, ip, r3 @ == mm ? 36 - movne pc, lr @ no, we dont do anything 35 + retne lr @ no, we dont do anything 37 36 mov r3, #0 38 37 mcr p15, 0, r3, c7, c10, 4 @ drain WB 39 38 vma_vm_flags r2, r2 ··· 45 44 add r0, r0, #PAGE_SZ 46 45 cmp r0, r1 47 46 blo 1b 48 - mov pc, lr 47 + ret lr 49 48 50 49 ENTRY(v4wbi_flush_kern_tlb_range) 51 50 mov r3, #0 ··· 57 56 add r0, r0, #PAGE_SZ 58 57 cmp r0, r1 59 58 blo 1b 60 - mov pc, lr 59 + ret lr 61 60 62 61 __INITDATA 63 62
+3 -2
arch/arm/mm/tlb-v6.S
··· 13 13 #include <linux/init.h> 14 14 #include <linux/linkage.h> 15 15 #include <asm/asm-offsets.h> 16 + #include <asm/assembler.h> 16 17 #include <asm/page.h> 17 18 #include <asm/tlbflush.h> 18 19 #include "proc-macros.S" ··· 56 55 cmp r0, r1 57 56 blo 1b 58 57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier 59 - mov pc, lr 58 + ret lr 60 59 61 60 /* 62 61 * v6wbi_flush_kern_tlb_range(start,end) ··· 85 84 blo 1b 86 85 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 87 86 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) 88 - mov pc, lr 87 + ret lr 89 88 90 89 __INIT 91 90
+2 -2
arch/arm/mm/tlb-v7.S
··· 57 57 cmp r0, r1 58 58 blo 1b 59 59 dsb ish 60 - mov pc, lr 60 + ret lr 61 61 ENDPROC(v7wbi_flush_user_tlb_range) 62 62 63 63 /* ··· 86 86 blo 1b 87 87 dsb ish 88 88 isb 89 - mov pc, lr 89 + ret lr 90 90 ENDPROC(v7wbi_flush_kern_tlb_range) 91 91 92 92 __INIT
+4 -4
arch/arm/nwfpe/entry.S
··· 19 19 along with this program; if not, write to the Free Software 20 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 21 */ 22 - 22 + #include <asm/assembler.h> 23 23 #include <asm/opcodes.h> 24 24 25 25 /* This is the kernel's entry point into the floating point emulator. ··· 92 92 mov r0, r6 @ prepare for EmulateAll() 93 93 bl EmulateAll @ emulate the instruction 94 94 cmp r0, #0 @ was emulation successful 95 - moveq pc, r4 @ no, return failure 95 + reteq r4 @ no, return failure 96 96 97 97 next: 98 98 .Lx1: ldrt r6, [r5], #4 @ get the next instruction and ··· 102 102 teq r2, #0x0C000000 103 103 teqne r2, #0x0D000000 104 104 teqne r2, #0x0E000000 105 - movne pc, r9 @ return ok if not a fp insn 105 + retne r9 @ return ok if not a fp insn 106 106 107 107 str r5, [sp, #S_PC] @ update PC copy in regs 108 108 ··· 115 115 @ plain LDR instruction. Weird, but it seems harmless. 116 116 .pushsection .fixup,"ax" 117 117 .align 2 118 - .Lfix: mov pc, r9 @ let the user eat segfaults 118 + .Lfix: ret r9 @ let the user eat segfaults 119 119 .popsection 120 120 121 121 .pushsection __ex_table,"a"
+2 -2
arch/arm/vfp/entry.S
··· 34 34 35 35 ENTRY(vfp_null_entry) 36 36 dec_preempt_count_ti r10, r4 37 - mov pc, lr 37 + ret lr 38 38 ENDPROC(vfp_null_entry) 39 39 40 40 .align 2 ··· 49 49 dec_preempt_count_ti r10, r4 50 50 ldr r0, VFP_arch_address 51 51 str r0, [r0] @ set to non-zero value 52 - mov pc, r9 @ we have handled the fault 52 + ret r9 @ we have handled the fault 53 53 ENDPROC(vfp_testing_entry) 54 54 55 55 .align 2
+13 -13
arch/arm/vfp/vfphw.S
··· 183 183 @ always subtract 4 from the following 184 184 @ instruction address. 185 185 dec_preempt_count_ti r10, r4 186 - mov pc, r9 @ we think we have handled things 186 + ret r9 @ we think we have handled things 187 187 188 188 189 189 look_for_VFP_exceptions: ··· 202 202 203 203 DBGSTR "not VFP" 204 204 dec_preempt_count_ti r10, r4 205 - mov pc, lr 205 + ret lr 206 206 207 207 process_exception: 208 208 DBGSTR "bounce" ··· 234 234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) 235 235 1: 236 236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 237 - mov pc, lr 237 + ret lr 238 238 ENDPROC(vfp_save_state) 239 239 240 240 .align ··· 245 245 #ifdef CONFIG_THUMB2_KERNEL 246 246 adr \tmp, 1f 247 247 add \tmp, \tmp, \base, lsl \shift 248 - mov pc, \tmp 248 + ret \tmp 249 249 #else 250 250 add pc, pc, \base, lsl \shift 251 251 mov r0, r0 ··· 257 257 tbl_branch r0, r3, #3 258 258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 259 259 1: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 260 - mov pc, lr 260 + ret lr 261 261 .org 1b + 8 262 262 1: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 263 - mov pc, lr 263 + ret lr 264 264 .org 1b + 8 265 265 .endr 266 266 ENDPROC(vfp_get_float) ··· 269 269 tbl_branch r1, r3, #3 270 270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 271 271 1: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 272 - mov pc, lr 272 + ret lr 273 273 .org 1b + 8 274 274 1: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 275 - mov pc, lr 275 + ret lr 276 276 .org 1b + 8 277 277 .endr 278 278 ENDPROC(vfp_put_float) ··· 281 281 tbl_branch r0, r3, #3 282 282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 283 283 1: fmrrd r0, r1, d\dr 284 - mov pc, lr 284 + ret lr 285 285 .org 1b + 8 286 286 .endr 287 287 #ifdef CONFIG_VFPv3 288 288 @ d16 - d31 registers 289 289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 290 290 1: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 291 - mov pc, lr 291 + ret lr 292 292 .org 1b + 8 293 293 .endr 294 294 #endif ··· 296 296 @ virtual register 16 (or 32 if VFPv3) for compare with zero 297 297 mov r0, #0 298 298 mov r1, #0 299 - mov pc, lr 299 + ret lr 300 300 ENDPROC(vfp_get_double) 301 301 302 302 ENTRY(vfp_put_double) 303 303 tbl_branch r2, r3, #3 304 304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 305 305 1: fmdrr d\dr, r0, r1 306 - mov pc, lr 306 + ret lr 307 307 .org 1b + 8 308 308 .endr 309 309 #ifdef CONFIG_VFPv3 310 310 @ d16 - d31 registers 311 311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 312 312 1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr 313 - mov pc, lr 313 + ret lr 314 314 .org 1b + 8 315 315 .endr 316 316 #endif
+3 -3
arch/arm/xen/hypercall.S
··· 58 58 ENTRY(HYPERVISOR_##hypercall) \ 59 59 mov r12, #__HYPERVISOR_##hypercall; \ 60 60 __HVC(XEN_IMM); \ 61 - mov pc, lr; \ 61 + ret lr; \ 62 62 ENDPROC(HYPERVISOR_##hypercall) 63 63 64 64 #define HYPERCALL0 HYPERCALL_SIMPLE ··· 74 74 mov r12, #__HYPERVISOR_##hypercall; \ 75 75 __HVC(XEN_IMM); \ 76 76 ldm sp!, {r4} \ 77 - mov pc, lr \ 77 + ret lr \ 78 78 ENDPROC(HYPERVISOR_##hypercall) 79 79 80 80 .text ··· 101 101 ldr r4, [sp, #4] 102 102 __HVC(XEN_IMM) 103 103 ldm sp!, {r4} 104 - mov pc, lr 104 + ret lr 105 105 ENDPROC(privcmd_call);