Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] 5227/1: Add the ENDPROC declarations to the .S files

This declaration specifies the "function" type and size for various
assembly functions, mainly needed for generating the correct branch
instructions in Thumb-2.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Catalin Marinas and committed by
Russell King
93ed3970 8d5796d2

+201 -50
+1
arch/arm/boot/compressed/head.S
··· 421 421 add r1, r1, #1048576 422 422 str r1, [r0] 423 423 mov pc, lr 424 + ENDPROC(__setup_mmu) 424 425 425 426 __armv4_mmu_cache_on: 426 427 mov r12, lr
+5
arch/arm/kernel/debug.S
··· 89 89 ENTRY(printhex8) 90 90 mov r1, #8 91 91 b printhex 92 + ENDPROC(printhex8) 92 93 93 94 ENTRY(printhex4) 94 95 mov r1, #4 95 96 b printhex 97 + ENDPROC(printhex4) 96 98 97 99 ENTRY(printhex2) 98 100 mov r1, #2 ··· 112 110 bne 1b 113 111 mov r0, r2 114 112 b printascii 113 + ENDPROC(printhex2) 115 114 116 115 .ltorg 117 116 ··· 130 127 teqne r1, #0 131 128 bne 1b 132 129 mov pc, lr 130 + ENDPROC(printascii) 133 131 134 132 ENTRY(printch) 135 133 addruart r3 136 134 mov r1, r0 137 135 mov r0, #0 138 136 b 1b 137 + ENDPROC(printch) 139 138 140 139 hexbuf: .space 16
+16
arch/arm/kernel/entry-armv.S
··· 76 76 __pabt_invalid: 77 77 inv_entry BAD_PREFETCH 78 78 b common_invalid 79 + ENDPROC(__pabt_invalid) 79 80 80 81 __dabt_invalid: 81 82 inv_entry BAD_DATA 82 83 b common_invalid 84 + ENDPROC(__dabt_invalid) 83 85 84 86 __irq_invalid: 85 87 inv_entry BAD_IRQ 86 88 b common_invalid 89 + ENDPROC(__irq_invalid) 87 90 88 91 __und_invalid: 89 92 inv_entry BAD_UNDEFINSTR ··· 110 107 111 108 mov r0, sp 112 109 b bad_mode 110 + ENDPROC(__und_invalid) 113 111 114 112 /* 115 113 * SVC mode handlers ··· 196 192 ldr r0, [sp, #S_PSR] 197 193 msr spsr_cxsf, r0 198 194 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 195 + ENDPROC(__dabt_svc) 199 196 200 197 .align 5 201 198 __irq_svc: ··· 228 223 bleq trace_hardirqs_on 229 224 #endif 230 225 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 226 + ENDPROC(__irq_svc) 231 227 232 228 .ltorg 233 229 ··· 278 272 ldr lr, [sp, #S_PSR] @ Get SVC cpsr 279 273 msr spsr_cxsf, lr 280 274 ldmia sp, {r0 - pc}^ @ Restore SVC registers 275 + ENDPROC(__und_svc) 281 276 282 277 .align 5 283 278 __pabt_svc: ··· 320 313 ldr r0, [sp, #S_PSR] 321 314 msr spsr_cxsf, r0 322 315 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 316 + ENDPROC(__pabt_svc) 323 317 324 318 .align 5 325 319 .LCcralign: ··· 420 412 mov r2, sp 421 413 adr lr, ret_from_exception 422 414 b do_DataAbort 415 + ENDPROC(__dabt_usr) 423 416 424 417 .align 5 425 418 __irq_usr: ··· 450 441 451 442 mov why, #0 452 443 b ret_to_user 444 + ENDPROC(__irq_usr) 453 445 454 446 .ltorg 455 447 ··· 484 474 #else 485 475 b __und_usr_unknown 486 476 #endif 477 + ENDPROC(__und_usr) 487 478 488 479 @ 489 480 @ fallthrough to call_fpe ··· 653 642 mov r0, sp 654 643 adr lr, ret_from_exception 655 644 b do_undefinstr 645 + ENDPROC(__und_usr_unknown) 656 646 657 647 .align 5 658 648 __pabt_usr: ··· 678 666 get_thread_info tsk 679 667 mov why, #0 680 668 b ret_to_user 669 + ENDPROC(__pabt_usr) 670 + ENDPROC(ret_from_exception) 681 671 682 672 /* 683 673 * Register switch for ARMv3 and ARMv4 processors ··· 716 702 bl atomic_notifier_call_chain 717 703 mov r0, r5 718 704 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously 705 + ENDPROC(__switch_to) 719 706 720 707 __INIT 721 708 ··· 1044 1029 mov r0, sp 1045 1030 ldr lr, [pc, lr, lsl #2] 1046 1031 movs pc, lr @ branch to handler in SVC mode 1032 + ENDPROC(vector_\name) 1047 1033 .endm 1048 1034 1049 1035 .globl __stubs_start
+23 -2
arch/arm/kernel/entry-common.S
··· 77 77 mov r0, r0 78 78 add sp, sp, #S_FRAME_SIZE - S_PC 79 79 movs pc, lr @ return & move spsr_svc into cpsr 80 + ENDPROC(ret_to_user) 80 81 81 82 /* 82 83 * This is how we return from a fork. ··· 93 92 mov r0, #1 @ trace exit [IP = 1] 94 93 bl syscall_trace 95 94 b ret_slow_syscall 96 - 95 + ENDPROC(ret_from_fork) 97 96 98 97 .equ NR_syscalls,0 99 98 #define CALL(x) .equ NR_syscalls,NR_syscalls+1 ··· 270 269 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 271 270 bcs arm_syscall 272 271 b sys_ni_syscall @ not private func 272 + ENDPROC(vector_swi) 273 273 274 274 /* 275 275 * This is the really slow path. We're going to be doing ··· 328 326 */ 329 327 @ r0 = syscall number 330 328 @ r8 = syscall table 331 - .type sys_syscall, #function 332 329 sys_syscall: 333 330 bic scno, r0, #__NR_OABI_SYSCALL_BASE 334 331 cmp scno, #__NR_syscall - __NR_SYSCALL_BASE ··· 339 338 movlo r3, r4 340 339 ldrlo pc, [tbl, scno, lsl #2] 341 340 b sys_ni_syscall 341 + ENDPROC(sys_syscall) 342 342 343 343 sys_fork_wrapper: 344 344 add r0, sp, #S_OFF 345 345 b sys_fork 346 + ENDPROC(sys_fork_wrapper) 346 347 347 348 sys_vfork_wrapper: 348 349 add r0, sp, #S_OFF 349 350 b sys_vfork 351 + ENDPROC(sys_vfork_wrapper) 350 352 351 353 sys_execve_wrapper: 352 354 add r3, sp, #S_OFF 353 355 b sys_execve 356 + ENDPROC(sys_execve_wrapper) 354 357 355 358 sys_clone_wrapper: 356 359 add ip, sp, #S_OFF 357 360 str ip, [sp, #4] 358 361 b sys_clone 362 + ENDPROC(sys_clone_wrapper) 359 363 360 364 sys_sigsuspend_wrapper: 361 365 add r3, sp, #S_OFF 362 366 b sys_sigsuspend 367 + ENDPROC(sys_sigsuspend_wrapper) 363 368 364 369 sys_rt_sigsuspend_wrapper: 365 370 add r2, sp, #S_OFF 366 371 b sys_rt_sigsuspend 372 + ENDPROC(sys_rt_sigsuspend_wrapper) 367 373 368 374 sys_sigreturn_wrapper: 369 375 add r0, sp, #S_OFF 370 376 b sys_sigreturn 377 + ENDPROC(sys_sigreturn_wrapper) 371 378 372 379 sys_rt_sigreturn_wrapper: 373 380 add r0, sp, #S_OFF 374 381 b sys_rt_sigreturn 382 + ENDPROC(sys_rt_sigreturn_wrapper) 375 383 376 384 sys_sigaltstack_wrapper: 377 385 ldr r2, [sp, #S_OFF + S_SP] 378 386 b do_sigaltstack 387 + ENDPROC(sys_sigaltstack_wrapper) 379 388 380 389 sys_statfs64_wrapper: 381 390 teq r1, #88 382 391 moveq r1, #84 383 392 b sys_statfs64 393 + ENDPROC(sys_statfs64_wrapper) 384 394 385 395 sys_fstatfs64_wrapper: 386 396 teq r1, #88 387 397 moveq r1, #84 388 398 b sys_fstatfs64 399 + ENDPROC(sys_fstatfs64_wrapper) 389 400 390 401 /* 391 402 * Note: off_4k (r5) is always units of 4K. If we can't do the requested ··· 415 402 str r5, [sp, #4] 416 403 b do_mmap2 417 404 #endif 405 + ENDPROC(sys_mmap2) 418 406 419 407 ENTRY(pabort_ifar) 420 408 mrc p15, 0, r0, cr6, cr0, 2 421 409 ENTRY(pabort_noifar) 422 410 mov pc, lr 411 + ENDPROC(pabort_ifar) 412 + ENDPROC(pabort_noifar) 423 413 424 414 #ifdef CONFIG_OABI_COMPAT 425 415 ··· 433 417 sys_oabi_pread64: 434 418 stmia sp, {r3, r4} 435 419 b sys_pread64 420 + ENDPROC(sys_oabi_pread64) 436 421 437 422 sys_oabi_pwrite64: 438 423 stmia sp, {r3, r4} 439 424 b sys_pwrite64 425 + ENDPROC(sys_oabi_pwrite64) 440 426 441 427 sys_oabi_truncate64: 442 428 mov r3, r2 443 429 mov r2, r1 444 430 b sys_truncate64 431 + ENDPROC(sys_oabi_truncate64) 445 432 446 433 sys_oabi_ftruncate64: 447 434 mov r3, r2 448 435 mov r2, r1 449 436 b sys_ftruncate64 437 + ENDPROC(sys_oabi_ftruncate64) 450 438 451 439 sys_oabi_readahead: 452 440 str r3, [sp] 453 441 mov r3, r2 454 442 mov r2, r1 455 443 b sys_readahead 444 + ENDPROC(sys_oabi_readahead) 456 445 457 446 /* 458 447 * Let's declare a second syscall table for old ABI binaries
+10 -9
arch/arm/kernel/head-common.S
··· 36 36 * r2 = atags pointer 37 37 * r9 = processor ID 38 38 */ 39 - .type __mmap_switched, %function 40 39 __mmap_switched: 41 40 adr r3, __switch_data + 4 42 41 ··· 58 59 bic r4, r0, #CR_A @ Clear 'A' bit 59 60 stmia r7, {r0, r4} @ Save control register values 60 61 b start_kernel 62 + ENDPROC(__mmap_switched) 61 63 62 64 /* 63 65 * Exception handling. Something went wrong and we can't proceed. We ··· 69 69 * and hope for the best (useful if bootloader fails to pass a proper 70 70 * machine ID for example). 71 71 */ 72 - 73 - .type __error_p, %function 74 72 __error_p: 75 73 #ifdef CONFIG_DEBUG_LL 76 74 adr r0, str_p1 ··· 82 84 str_p2: .asciz ").\n" 83 85 .align 84 86 #endif 87 + ENDPROC(__error_p) 85 88 86 - .type __error_a, %function 87 89 __error_a: 88 90 #ifdef CONFIG_DEBUG_LL 89 91 mov r4, r1 @ preserve machine ID ··· 113 115 adr r0, str_a3 114 116 bl printascii 115 117 b __error 118 + ENDPROC(__error_a) 119 + 116 120 str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" 117 121 str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" 118 122 str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" 119 123 .align 120 124 #endif 121 125 122 - .type __error, %function 123 126 __error: 124 127 #ifdef CONFIG_ARCH_RPC 125 128 /* ··· 137 138 #endif 138 139 1: mov r0, r0 139 140 b 1b 141 + ENDPROC(__error) 140 142 141 143 142 144 /* ··· 153 153 * r5 = proc_info pointer in physical address space 154 154 * r9 = cpuid (preserved) 155 155 */ 156 - .type __lookup_processor_type, %function 157 156 __lookup_processor_type: 158 157 adr r3, 3f 159 158 ldmda r3, {r5 - r7} ··· 168 169 blo 1b 169 170 mov r5, #0 @ unknown processor 170 171 2: mov pc, lr 172 + ENDPROC(__lookup_processor_type) 171 173 172 174 /* 173 175 * This provides a C-API version of the above function. ··· 179 179 bl __lookup_processor_type 180 180 mov r0, r5 181 181 ldmfd sp!, {r4 - r7, r9, pc} 182 + ENDPROC(lookup_processor_type) 182 183 183 184 /* 184 185 * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for ··· 202 201 * r3, r4, r6 corrupted 203 202 * r5 = mach_info pointer in physical address space 204 203 */ 205 - .type __lookup_machine_type, %function 206 204 __lookup_machine_type: 207 205 adr r3, 3b 208 206 ldmia r3, {r4, r5, r6} ··· 216 216 blo 1b 217 217 mov r5, #0 @ unknown machine 218 218 2: mov pc, lr 219 + ENDPROC(__lookup_machine_type) 219 220 220 221 /* 221 222 * This provides a C-API version of the above function. ··· 227 226 bl __lookup_machine_type 228 227 mov r0, r5 229 228 ldmfd sp!, {r4 - r6, pc} 229 + ENDPROC(lookup_machine_type) 230 230 231 231 /* Determine validity of the r2 atags pointer. The heuristic requires 232 232 * that the pointer be aligned, in the first 16k of physical RAM and ··· 241 239 * r2 either valid atags pointer, or zero 242 240 * r5, r6 corrupted 243 241 */ 244 - 245 - .type __vet_atags, %function 246 242 __vet_atags: 247 243 tst r2, #0x3 @ aligned? 248 244 bne 1f ··· 257 257 258 258 1: mov r2, #0 259 259 mov pc, lr 260 + ENDPROC(__vet_atags)
+2 -2
arch/arm/kernel/head-nommu.S
··· 33 33 * 34 34 */ 35 35 .section ".text.head", "ax" 36 - .type stext, %function 37 36 ENTRY(stext) 38 37 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 39 38 @ and irqs disabled ··· 52 53 @ the initialization is done 53 54 adr lr, __after_proc_init @ return (PIC) address 54 55 add pc, r10, #PROCINFO_INITFUNC 56 + ENDPROC(stext) 55 57 56 58 /* 57 59 * Set the Control Register and Read the process ID. 58 60 */ 59 - .type __after_proc_init, %function 60 61 __after_proc_init: 61 62 #ifdef CONFIG_CPU_CP15 62 63 mrc p15, 0, r0, c1, c0, 0 @ read control reg ··· 84 85 85 86 mov pc, r13 @ clear the BSS and jump 86 87 @ to start_kernel 88 + ENDPROC(__after_proc_init) 87 89 .ltorg 88 90 89 91 #include "head-common.S"
+6 -6
arch/arm/kernel/head.S
··· 75 75 * circumstances, zImage) is for. 76 76 */ 77 77 .section ".text.head", "ax" 78 - .type stext, %function 79 78 ENTRY(stext) 80 79 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 81 80 @ and irqs disabled ··· 99 100 @ mmu has been enabled 100 101 adr lr, __enable_mmu @ return (PIC) address 101 102 add pc, r10, #PROCINFO_INITFUNC 103 + ENDPROC(stext) 102 104 103 105 #if defined(CONFIG_SMP) 104 - .type secondary_startup, #function 105 106 ENTRY(secondary_startup) 106 107 /* 107 108 * Common entry point for secondary CPUs. ··· 127 128 adr lr, __enable_mmu @ return address 128 129 add pc, r10, #PROCINFO_INITFUNC @ initialise processor 129 130 @ (return control reg) 131 + ENDPROC(secondary_startup) 130 132 131 133 /* 132 134 * r6 = &secondary_data ··· 136 136 ldr sp, [r7, #4] @ get secondary_data.stack 137 137 mov fp, #0 138 138 b secondary_start_kernel 139 + ENDPROC(__secondary_switched) 139 140 140 141 .type __secondary_data, %object 141 142 __secondary_data: ··· 152 151 * this is just loading the page table pointer and domain access 153 152 * registers. 154 153 */ 155 - .type __enable_mmu, %function 156 154 __enable_mmu: 157 155 #ifdef CONFIG_ALIGNMENT_TRAP 158 156 orr r0, r0, #CR_A ··· 174 174 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 175 175 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 176 176 b __turn_mmu_on 177 + ENDPROC(__enable_mmu) 177 178 178 179 /* 179 180 * Enable the MMU. This completely changes the structure of the visible ··· 188 187 * other registers depend on the function called upon completion 189 188 */ 190 189 .align 5 191 - .type __turn_mmu_on, %function 192 190 __turn_mmu_on: 193 191 mov r0, r0 194 192 mcr p15, 0, r0, c1, c0, 0 @ write control reg ··· 195 195 mov r3, r3 196 196 mov r3, r3 197 197 mov pc, r13 198 - 198 + ENDPROC(__turn_mmu_on) 199 199 200 200 201 201 /* ··· 211 211 * r0, r3, r6, r7 corrupted 212 212 * r4 = physical page table address 213 213 */ 214 - .type __create_page_tables, %function 215 214 __create_page_tables: 216 215 pgtbl r4 @ page table address 217 216 ··· 324 325 #endif 325 326 #endif 326 327 mov pc, lr 328 + ENDPROC(__create_page_tables) 327 329 .ltorg 328 330 329 331 #include "head-common.S"
+2
arch/arm/lib/ashldi3.S
··· 47 47 mov al, al, lsl r2 48 48 mov pc, lr 49 49 50 + ENDPROC(__ashldi3) 51 + ENDPROC(__aeabi_llsl)
+2
arch/arm/lib/ashrdi3.S
··· 47 47 mov ah, ah, asr r2 48 48 mov pc, lr 49 49 50 + ENDPROC(__ashrdi3) 51 + ENDPROC(__aeabi_lasr)
+4
arch/arm/lib/backtrace.S
··· 30 30 31 31 #if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) 32 32 mov pc, lr 33 + ENDPROC(__backtrace) 34 + ENDPROC(c_backtrace) 33 35 #else 34 36 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... 35 37 movs frame, r0 @ if frame pointer is zero ··· 105 103 mov r1, frame 106 104 bl printk 107 105 no_frame: ldmfd sp!, {r4 - r8, pc} 106 + ENDPROC(__backtrace) 107 + ENDPROC(c_backtrace) 108 108 109 109 .section __ex_table,"a" 110 110 .align 3
+2
arch/arm/lib/changebit.S
··· 19 19 eor r0, r0, #0x18 @ big endian byte ordering 20 20 ENTRY(_change_bit_le) 21 21 bitop eor 22 + ENDPROC(_change_bit_be) 23 + ENDPROC(_change_bit_le)
+1
arch/arm/lib/clear_user.S
··· 44 44 USER( strnebt r2, [r0], #1) 45 45 mov r0, #0 46 46 ldmfd sp!, {r1, pc} 47 + ENDPROC(__clear_user) 47 48 48 49 .section .fixup,"ax" 49 50 .align 0
+2
arch/arm/lib/clearbit.S
··· 20 20 eor r0, r0, #0x18 @ big endian byte ordering 21 21 ENTRY(_clear_bit_le) 22 22 bitop bic 23 + ENDPROC(_clear_bit_be) 24 + ENDPROC(_clear_bit_le)
+2
arch/arm/lib/copy_from_user.S
··· 87 87 88 88 #include "copy_template.S" 89 89 90 + ENDPROC(__copy_from_user) 91 + 90 92 .section .fixup,"ax" 91 93 .align 0 92 94 copy_abort_preamble
+1
arch/arm/lib/copy_page.S
··· 44 44 PLD( ldmeqia r1!, {r3, r4, ip, lr} ) 45 45 PLD( beq 2b ) 46 46 ldmfd sp!, {r4, pc} @ 3 47 + ENDPROC(copy_page)
+2
arch/arm/lib/copy_to_user.S
··· 90 90 91 91 #include "copy_template.S" 92 92 93 + ENDPROC(__copy_to_user) 94 + 93 95 .section .fixup,"ax" 94 96 .align 0 95 97 copy_abort_preamble
+1
arch/arm/lib/csumipv6.S
··· 29 29 adcs r0, r0, r2 30 30 adcs r0, r0, #0 31 31 ldmfd sp!, {pc} 32 + ENDPROC(__csum_ipv6_magic) 32 33
+1
arch/arm/lib/csumpartial.S
··· 139 139 tst len, #0x1c 140 140 bne 4b 141 141 b .Lless4 142 + ENDPROC(csum_partial)
+1
arch/arm/lib/csumpartialcopy.S
··· 50 50 .endm 51 51 52 52 #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) 53 + #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) 53 54 54 55 #include "csumpartialcopygeneric.S"
+1
arch/arm/lib/csumpartialcopygeneric.S
··· 329 329 adcs sum, sum, r4, push #24 330 330 mov r5, r4, get_byte_1 331 331 b .Lexit 332 + FN_EXIT
+1
arch/arm/lib/csumpartialcopyuser.S
··· 82 82 */ 83 83 84 84 #define FN_ENTRY ENTRY(csum_partial_copy_from_user) 85 + #define FN_EXIT ENDPROC(csum_partial_copy_from_user) 85 86 86 87 #include "csumpartialcopygeneric.S" 87 88
+3
arch/arm/lib/delay.S
··· 60 60 #endif 61 61 bhi __delay 62 62 mov pc, lr 63 + ENDPROC(__udelay) 64 + ENDPROC(__const_udelay) 65 + ENDPROC(__delay)
+1
arch/arm/lib/div64.S
··· 198 198 mov xh, #0 199 199 ldr pc, [sp], #8 200 200 201 + ENDPROC(__do_div64)
+8
arch/arm/lib/findbit.S
··· 33 33 blo 1b 34 34 3: mov r0, r1 @ no free bits 35 35 mov pc, lr 36 + ENDPROC(_find_first_zero_bit_le) 36 37 37 38 /* 38 39 * Purpose : Find next 'zero' bit ··· 51 50 orr r2, r2, #7 @ if zero, then no bits here 52 51 add r2, r2, #1 @ align bit pointer 53 52 b 2b @ loop for next bit 53 + ENDPROC(_find_next_zero_bit_le) 54 54 55 55 /* 56 56 * Purpose : Find a 'one' bit ··· 69 67 blo 1b 70 68 3: mov r0, r1 @ no free bits 71 69 mov pc, lr 70 + ENDPROC(_find_first_bit_le) 72 71 73 72 /* 74 73 * Purpose : Find next 'one' bit ··· 86 83 orr r2, r2, #7 @ if zero, then no bits here 87 84 add r2, r2, #1 @ align bit pointer 88 85 b 2b @ loop for next bit 86 + ENDPROC(_find_next_bit_le) 89 87 90 88 #ifdef __ARMEB__ 91 89 ··· 103 99 blo 1b 104 100 3: mov r0, r1 @ no free bits 105 101 mov pc, lr 102 + ENDPROC(_find_first_zero_bit_be) 106 103 107 104 ENTRY(_find_next_zero_bit_be) 108 105 teq r1, #0 ··· 118 113 orr r2, r2, #7 @ if zero, then no bits here 119 114 add r2, r2, #1 @ align bit pointer 120 115 b 2b @ loop for next bit 116 + ENDPROC(_find_next_zero_bit_be) 121 117 122 118 ENTRY(_find_first_bit_be) 123 119 teq r1, #0 ··· 133 127 blo 1b 134 128 3: mov r0, r1 @ no free bits 135 129 mov pc, lr 130 + ENDPROC(_find_first_bit_be) 136 131 137 132 ENTRY(_find_next_bit_be) 138 133 teq r1, #0 ··· 147 140 orr r2, r2, #7 @ if zero, then no bits here 148 141 add r2, r2, #1 @ align bit pointer 149 142 b 2b @ loop for next bit 143 + ENDPROC(_find_next_bit_be) 150 144 151 145 #endif 152 146
+8 -6
arch/arm/lib/getuser.S
··· 26 26 * Note that ADDR_LIMIT is either 0 or 0xc0000000. 27 27 * Note also that it is intended that __get_user_bad is not global. 28 28 */ 29 + #include <linux/linkage.h> 29 30 #include <asm/errno.h> 30 31 31 - .global __get_user_1 32 - __get_user_1: 32 + ENTRY(__get_user_1) 33 33 1: ldrbt r2, [r0] 34 34 mov r0, #0 35 35 mov pc, lr 36 + ENDPROC(__get_user_1) 36 37 37 - .global __get_user_2 38 - __get_user_2: 38 + ENTRY(__get_user_2) 39 39 2: ldrbt r2, [r0], #1 40 40 3: ldrbt r3, [r0] 41 41 #ifndef __ARMEB__ ··· 45 45 #endif 46 46 mov r0, #0 47 47 mov pc, lr 48 + ENDPROC(__get_user_2) 48 49 49 - .global __get_user_4 50 - __get_user_4: 50 + ENTRY(__get_user_4) 51 51 4: ldrt r2, [r0] 52 52 mov r0, #0 53 53 mov pc, lr 54 + ENDPROC(__get_user_4) 54 55 55 56 __get_user_bad: 56 57 mov r2, #0 57 58 mov r0, #-EFAULT 58 59 mov pc, lr 60 + ENDPROC(__get_user_bad) 59 61 60 62 .section __ex_table, "a" 61 63 .long 1b, __get_user_bad
+1
arch/arm/lib/io-readsb.S
··· 120 120 strgtb r3, [r1] 121 121 122 122 ldmfd sp!, {r4 - r6, pc} 123 + ENDPROC(__raw_readsb)
+1
arch/arm/lib/io-readsl.S
··· 76 76 8: mov r3, ip, get_byte_0 77 77 strb r3, [r1, #0] 78 78 mov pc, lr 79 + ENDPROC(__raw_readsl)
+1
arch/arm/lib/io-readsw-armv4.S
··· 128 128 _BE_ONLY_( movne ip, ip, lsr #24 ) 129 129 strneb ip, [r1] 130 130 ldmfd sp!, {r4, pc} 131 + ENDPROC(__raw_readsw)
+1
arch/arm/lib/io-writesb.S
··· 91 91 strgtb r3, [r0] 92 92 93 93 ldmfd sp!, {r4, r5, pc} 94 + ENDPROC(__raw_writesb)
+1
arch/arm/lib/io-writesl.S
··· 64 64 str ip, [r0] 65 65 bne 6b 66 66 mov pc, lr 67 + ENDPROC(__raw_writesl)
+1
arch/arm/lib/io-writesw-armv4.S
··· 94 94 3: movne ip, r3, lsr #8 95 95 strneh ip, [r0] 96 96 mov pc, lr 97 + ENDPROC(__raw_writesw)
+11
arch/arm/lib/lib1funcs.S
··· 230 230 mov r0, r0, lsr r2 231 231 mov pc, lr 232 232 233 + ENDPROC(__udivsi3) 234 + ENDPROC(__aeabi_uidiv) 233 235 234 236 ENTRY(__umodsi3) 235 237 ··· 247 245 248 246 mov pc, lr 249 247 248 + ENDPROC(__umodsi3) 250 249 251 250 ENTRY(__divsi3) 252 251 ENTRY(__aeabi_idiv) ··· 287 284 rsbmi r0, r0, #0 288 285 mov pc, lr 289 286 287 + ENDPROC(__divsi3) 288 + ENDPROC(__aeabi_idiv) 290 289 291 290 ENTRY(__modsi3) 292 291 ··· 310 305 rsbmi r0, r0, #0 311 306 mov pc, lr 312 307 308 + ENDPROC(__modsi3) 309 + 313 310 #ifdef CONFIG_AEABI 314 311 315 312 ENTRY(__aeabi_uidivmod) ··· 323 316 sub r1, r1, r3 324 317 mov pc, lr 325 318 319 + ENDPROC(__aeabi_uidivmod) 320 + 326 321 ENTRY(__aeabi_idivmod) 327 322 328 323 stmfd sp!, {r0, r1, ip, lr} ··· 333 324 mul r3, r0, r2 334 325 sub r1, r1, r3 335 326 mov pc, lr 327 + 328 + ENDPROC(__aeabi_idivmod) 336 329 337 330 #endif 338 331
+2
arch/arm/lib/lshrdi3.S
··· 47 47 mov ah, ah, lsr r2 48 48 mov pc, lr 49 49 50 + ENDPROC(__lshrdi3) 51 + ENDPROC(__aeabi_llsr)
+1
arch/arm/lib/memchr.S
··· 23 23 sub r0, r0, #1 24 24 2: movne r0, #0 25 25 mov pc, lr 26 + ENDPROC(memchr)
+1
arch/arm/lib/memcpy.S
··· 57 57 58 58 #include "copy_template.S" 59 59 60 + ENDPROC(memcpy)
+1
arch/arm/lib/memmove.S
··· 196 196 197 197 18: backward_copy_shift push=24 pull=8 198 198 199 + ENDPROC(memmove)
+1
arch/arm/lib/memset.S
··· 124 124 tst r2, #1 125 125 strneb r1, [r0], #1 126 126 mov pc, lr 127 + ENDPROC(memset)
+1
arch/arm/lib/memzero.S
··· 122 122 tst r1, #1 @ 1 a byte left over 123 123 strneb r2, [r0], #1 @ 1 124 124 mov pc, lr @ 1 125 + ENDPROC(__memzero)
+2
arch/arm/lib/muldi3.S
··· 43 43 adc xh, xh, ip, lsr #16 44 44 mov pc, lr 45 45 46 + ENDPROC(__muldi3) 47 + ENDPROC(__aeabi_lmul)
+10 -8
arch/arm/lib/putuser.S
··· 26 26 * Note that ADDR_LIMIT is either 0 or 0xc0000000 27 27 * Note also that it is intended that __put_user_bad is not global. 28 28 */ 29 + #include <linux/linkage.h> 29 30 #include <asm/errno.h> 30 31 31 - .global __put_user_1 32 - __put_user_1: 32 + ENTRY(__put_user_1) 33 33 1: strbt r2, [r0] 34 34 mov r0, #0 35 35 mov pc, lr 36 + ENDPROC(__put_user_1) 36 37 37 - .global __put_user_2 38 - __put_user_2: 38 + ENTRY(__put_user_2) 39 39 mov ip, r2, lsr #8 40 40 #ifndef __ARMEB__ 41 41 2: strbt r2, [r0], #1 ··· 46 46 #endif 47 47 mov r0, #0 48 48 mov pc, lr 49 + ENDPROC(__put_user_2) 49 50 50 - .global __put_user_4 51 - __put_user_4: 51 + ENTRY(__put_user_4) 52 52 4: strt r2, [r0] 53 53 mov r0, #0 54 54 mov pc, lr 55 + ENDPROC(__put_user_4) 55 56 56 - .global __put_user_8 57 - __put_user_8: 57 + ENTRY(__put_user_8) 58 58 5: strt r2, [r0], #4 59 59 6: strt r3, [r0] 60 60 mov r0, #0 61 61 mov pc, lr 62 + ENDPROC(__put_user_8) 62 63 63 64 __put_user_bad: 64 65 mov r0, #-EFAULT 65 66 mov pc, lr 67 + ENDPROC(__put_user_bad) 66 68 67 69 .section __ex_table, "a" 68 70 .long 1b, __put_user_bad
+2
arch/arm/lib/setbit.S
··· 20 20 eor r0, r0, #0x18 @ big endian byte ordering 21 21 ENTRY(_set_bit_le) 22 22 bitop orr 23 + ENDPROC(_set_bit_be) 24 + ENDPROC(_set_bit_le)
+3
arch/arm/lib/sha1.S
··· 185 185 186 186 ldmfd sp!, {r4 - r8, pc} 187 187 188 + ENDPROC(sha_transform) 189 + 188 190 .L_sha_K: 189 191 .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 190 192 ··· 206 204 stmia r0, {r1, r2, r3, ip, lr} 207 205 ldr pc, [sp], #4 208 206 207 + ENDPROC(sha_init)
+1
arch/arm/lib/strchr.S
··· 24 24 movne r0, #0 25 25 subeq r0, r0, #1 26 26 mov pc, lr 27 + ENDPROC(strchr)
+1
arch/arm/lib/strncpy_from_user.S
··· 31 31 sub r1, r1, #1 @ take NUL character out of count 32 32 2: sub r0, r1, ip 33 33 mov pc, lr 34 + ENDPROC(__strncpy_from_user) 34 35 35 36 .section .fixup,"ax" 36 37 .align 0
+1
arch/arm/lib/strnlen_user.S
··· 31 31 add r0, r0, #1 32 32 2: sub r0, r0, r2 33 33 mov pc, lr 34 + ENDPROC(__strnlen_user) 34 35 35 36 .section .fixup,"ax" 36 37 .align 0
+1
arch/arm/lib/strrchr.S
··· 23 23 bne 1b 24 24 mov r0, r3 25 25 mov pc, lr 26 + ENDPROC(strrchr)
+2
arch/arm/lib/testchangebit.S
··· 16 16 eor r0, r0, #0x18 @ big endian byte ordering 17 17 ENTRY(_test_and_change_bit_le) 18 18 testop eor, strb 19 + ENDPROC(_test_and_change_bit_be) 20 + ENDPROC(_test_and_change_bit_le)
+2
arch/arm/lib/testclearbit.S
··· 16 16 eor r0, r0, #0x18 @ big endian byte ordering 17 17 ENTRY(_test_and_clear_bit_le) 18 18 testop bicne, strneb 19 + ENDPROC(_test_and_clear_bit_be) 20 + ENDPROC(_test_and_clear_bit_le)
+2
arch/arm/lib/testsetbit.S
··· 16 16 eor r0, r0, #0x18 @ big endian byte ordering 17 17 ENTRY(_test_and_set_bit_le) 18 18 testop orreq, streqb 19 + ENDPROC(_test_and_set_bit_be) 20 + ENDPROC(_test_and_set_bit_le)
+2
arch/arm/lib/uaccess.S
··· 277 277 ldrgtb r3, [r1], #0 278 278 USER( strgtbt r3, [r0], #1) @ May fault 279 279 b .Lc2u_finished 280 + ENDPROC(__copy_to_user) 280 281 281 282 .section .fixup,"ax" 282 283 .align 0 ··· 543 542 USER( ldrgtbt r3, [r1], #1) @ May fault 544 543 strgtb r3, [r0], #1 545 544 b .Lcfu_finished 545 + ENDPROC(__copy_from_user) 546 546 547 547 .section .fixup,"ax" 548 548 .align 0
+4
arch/arm/lib/ucmpdi2.S
··· 33 33 movhi r0, #2 34 34 mov pc, lr 35 35 36 + ENDPROC(__ucmpdi2) 37 + 36 38 #ifdef CONFIG_AEABI 37 39 38 40 ENTRY(__aeabi_ulcmp) ··· 45 43 moveq r0, #0 46 44 movhi r0, #1 47 45 mov pc, lr 46 + 47 + ENDPROC(__aeabi_ulcmp) 48 48 49 49 #endif 50 50
+1
arch/arm/mm/abort-ev7.S
··· 30 30 * New designs should not need to patch up faults. 31 31 */ 32 32 mov pc, lr 33 + ENDPROC(v7_early_abort)
+1
arch/arm/mm/abort-nommu.S
··· 17 17 mov r0, #0 @ clear r0, r1 (no FSR/FAR) 18 18 mov r1, #0 19 19 mov pc, lr 20 + ENDPROC(nommu_early_abort)
+10
arch/arm/mm/cache-v7.S
··· 66 66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 67 67 isb 68 68 mov pc, lr 69 + ENDPROC(v7_flush_dcache_all) 69 70 70 71 /* 71 72 * v7_flush_cache_all() ··· 86 85 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 87 86 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 88 87 mov pc, lr 88 + ENDPROC(v7_flush_kern_cache_all) 89 89 90 90 /* 91 91 * v7_flush_cache_all() ··· 112 110 */ 113 111 ENTRY(v7_flush_user_cache_range) 114 112 mov pc, lr 113 + ENDPROC(v7_flush_user_cache_all) 114 + ENDPROC(v7_flush_user_cache_range) 115 115 116 116 /* 117 117 * v7_coherent_kern_range(start,end) ··· 159 155 dsb 160 156 isb 161 157 mov pc, lr 158 + ENDPROC(v7_coherent_kern_range) 159 + ENDPROC(v7_coherent_user_range) 162 160 163 161 /* 164 162 * v7_flush_kern_dcache_page(kaddr) ··· 180 174 blo 1b 181 175 dsb 182 176 mov pc, lr 177 + ENDPROC(v7_flush_kern_dcache_page) 183 178 184 179 /* 185 180 * v7_dma_inv_range(start,end) ··· 209 202 blo 1b 210 203 dsb 211 204 mov pc, lr 205 + ENDPROC(v7_dma_inv_range) 212 206 213 207 /* 214 208 * v7_dma_clean_range(start,end) ··· 227 219 blo 1b 228 220 dsb 229 221 mov pc, lr 222 + ENDPROC(v7_dma_clean_range) 230 223 231 224 /* 232 225 * v7_dma_flush_range(start,end) ··· 245 236 blo 1b 246 237 dsb 247 238 mov pc, lr 239 + ENDPROC(v7_dma_flush_range) 248 240 249 241 __INITDATA 250 242
+8
arch/arm/mm/proc-v7.S
··· 25 25 26 26 ENTRY(cpu_v7_proc_init) 27 27 mov pc, lr 28 + ENDPROC(cpu_v7_proc_init) 28 29 29 30 ENTRY(cpu_v7_proc_fin) 30 31 mov pc, lr 32 + ENDPROC(cpu_v7_proc_fin) 31 33 32 34 /* 33 35 * cpu_v7_reset(loc) ··· 45 43 .align 5 46 44 ENTRY(cpu_v7_reset) 47 45 mov pc, r0 46 + ENDPROC(cpu_v7_reset) 48 47 49 48 /* 50 49 * cpu_v7_do_idle() ··· 57 54 ENTRY(cpu_v7_do_idle) 58 55 .long 0xe320f003 @ ARM V7 WFI instruction 59 56 mov pc, lr 57 + ENDPROC(cpu_v7_do_idle) 60 58 61 59 ENTRY(cpu_v7_dcache_clean_area) 62 60 #ifndef TLB_CAN_READ_FROM_L1_CACHE ··· 69 65 dsb 70 66 #endif 71 67 mov pc, lr 68 + ENDPROC(cpu_v7_dcache_clean_area) 72 69 73 70 /* 74 71 * cpu_v7_switch_mm(pgd_phys, tsk) ··· 94 89 isb 95 90 #endif 96 91 mov pc, lr 92 + ENDPROC(cpu_v7_switch_mm) 97 93 98 94 /* 99 95 * cpu_v7_set_pte_ext(ptep, pte) ··· 147 141 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 148 142 #endif 149 143 mov pc, lr 144 + ENDPROC(cpu_v7_set_pte_ext) 150 145 151 146 cpu_v7_name: 152 147 .ascii "ARMv7 Processor" ··· 195 188 bic r0, r0, r5 @ clear bits them 196 189 orr r0, r0, r6 @ set them 197 190 mov pc, lr @ return to head.S:__ret 191 + ENDPROC(__v7_setup) 198 192 199 193 /* 200 194 * V X F I D LR
+2
arch/arm/mm/tlb-v7.S
··· 51 51 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB 52 52 dsb 53 53 mov pc, lr 54 + ENDPROC(v7wbi_flush_user_tlb_range) 54 55 55 56 /* 56 57 * v7wbi_flush_kern_tlb_range(start,end) ··· 78 77 dsb 79 78 isb 80 79 mov pc, lr 80 + ENDPROC(v7wbi_flush_kern_tlb_range) 81 81 82 82 .section ".text.init", #alloc, #execinstr 83 83
+4 -4
arch/arm/vfp/entry.S
··· 21 21 #include <asm/assembler.h> 22 22 #include <asm/vfpmacros.h> 23 23 24 - .globl do_vfp 25 - do_vfp: 24 + ENTRY(do_vfp) 26 25 enable_irq 27 26 ldr r4, .LCvfp 28 27 ldr r11, [r10, #TI_CPU] @ CPU number 29 28 add r10, r10, #TI_VFPSTATE @ r10 = workspace 30 29 ldr pc, [r4] @ call VFP entry point 30 + ENDPROC(do_vfp) 31 31 32 32 ENTRY(vfp_null_entry) 33 33 mov pc, lr ··· 40 40 @ failure to the VFP initialisation code. 41 41 42 42 __INIT 43 - .globl vfp_testing_entry 44 - vfp_testing_entry: 43 + ENTRY(vfp_testing_entry) 45 44 ldr r0, VFP_arch_address 46 45 str r5, [r0] @ known non-zero value 47 46 mov pc, r9 @ we have handled the fault 47 + ENDPROC(vfp_testing_entry) 48 48 49 49 VFP_arch_address: 50 50 .word VFP_arch
+12 -13
arch/arm/vfp/vfphw.S
··· 68 68 @ r11 = CPU number 69 69 @ lr = failure return 70 70 71 - .globl vfp_support_entry 72 - vfp_support_entry: 71 + ENTRY(vfp_support_entry) 73 72 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 74 73 75 74 VFPFMRX r1, FPEXC @ Is the VFP enabled? ··· 164 165 @ code will raise an exception if 165 166 @ required. If not, the user code will 166 167 @ retry the faulted instruction 168 + ENDPROC(vfp_support_entry) 167 169 168 170 #ifdef CONFIG_SMP 169 - .globl vfp_save_state 170 - .type vfp_save_state, %function 171 - vfp_save_state: 171 + ENTRY(vfp_save_state) 172 172 @ Save the current VFP state 173 173 @ r0 - save location 174 174 @ r1 - FPEXC ··· 180 182 VFPFMRX r12, FPINST2, NE @ FPINST2 if needed (and present) 181 183 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 182 184 mov pc, lr 185 + ENDPROC(vfp_save_state) 183 186 #endif 184 187 185 188 last_VFP_context_address: 186 189 .word last_VFP_context 187 190 188 - .globl vfp_get_float 189 - vfp_get_float: 191 + ENTRY(vfp_get_float) 190 192 add pc, pc, r0, lsl #3 191 193 mov r0, r0 192 194 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ··· 195 197 mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 196 198 mov pc, lr 197 199 .endr 200 + ENDPROC(vfp_get_float) 198 201 199 - .globl vfp_put_float 200 - vfp_put_float: 202 + ENTRY(vfp_put_float) 201 203 add pc, pc, r1, lsl #3 202 204 mov r0, r0 203 205 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ··· 206 208 mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 207 209 mov pc, lr 208 210 .endr 211 + ENDPROC(vfp_put_float) 209 212 210 - .globl vfp_get_double 211 - vfp_get_double: 213 + ENTRY(vfp_get_double) 212 214 add pc, pc, r0, lsl #3 213 215 mov r0, r0 214 216 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ··· 227 229 mov r0, #0 228 230 mov r1, #0 229 231 mov pc, lr 232 + ENDPROC(vfp_get_double) 230 233 231 - .globl vfp_put_double 232 - vfp_put_double: 234 + ENTRY(vfp_put_double) 233 235 add pc, pc, r2, lsl #3 234 236 mov r0, r0 235 237 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ··· 243 245 mov pc, lr 244 246 .endr 245 247 #endif 248 + ENDPROC(vfp_put_double)