Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: clean up functions in assembly code

Use ENTRY and ENDPROC throughout arch/xtensa/lib assembly sources.
Introduce asm/linkage.h and define xtensa-specific __ALIGN macro there.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

+36 -36
+9
arch/xtensa/include/asm/linkage.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_LINKAGE_H 4 + #define __ASM_LINKAGE_H 5 + 6 + #define __ALIGN .align 4 7 + #define __ALIGN_STR ".align 4" 8 + 9 + #endif
+10 -20
arch/xtensa/lib/memcopy.S
··· 9 9 * Copyright (C) 2002 - 2012 Tensilica Inc. 10 10 */ 11 11 12 + #include <linux/linkage.h> 12 13 #include <variant/core.h> 13 14 #include <asm/asmmacro.h> 14 15 ··· 109 108 addi a5, a5, 2 110 109 j .Ldstaligned # dst is now aligned, return to main algorithm 111 110 112 - .align 4 113 - .global memcpy 114 - .type memcpy,@function 115 - memcpy: 111 + ENTRY(memcpy) 116 112 117 113 entry sp, 16 # minimal stack frame 118 114 # a2/ dst, a3/ src, a4/ len ··· 271 273 s8i a6, a5, 0 272 274 retw 273 275 276 + ENDPROC(memcpy) 274 277 275 278 /* 276 279 * void bcopy(const void *src, void *dest, size_t n); 277 280 */ 278 - .align 4 279 - .global bcopy 280 - .type bcopy,@function 281 - bcopy: 281 + 282 + ENTRY(bcopy) 283 + 282 284 entry sp, 16 # minimal stack frame 283 285 # a2=src, a3=dst, a4=len 284 286 mov a5, a3 285 287 mov a3, a2 286 288 mov a2, a5 287 289 j .Lmovecommon # go to common code for memmove+bcopy 290 + 291 + ENDPROC(bcopy) 288 292 289 293 /* 290 294 * void *memmove(void *dst, const void *src, size_t len); ··· 376 376 j .Lbackdstaligned # dst is now aligned, 377 377 # return to main algorithm 378 378 379 - .align 4 380 - .global memmove 381 - .type memmove,@function 382 - memmove: 379 + ENTRY(memmove) 383 380 384 381 entry sp, 16 # minimal stack frame 385 382 # a2/ dst, a3/ src, a4/ len ··· 548 551 s8i a6, a5, 0 549 552 retw 550 553 551 - 552 - /* 553 - * Local Variables: 554 - * mode:fundamental 555 - * comment-start: "# " 556 - * comment-start-skip: "# *" 557 - * End: 558 - */ 554 + ENDPROC(memmove)
+4 -4
arch/xtensa/lib/memset.S
··· 11 11 * Copyright (C) 2002 Tensilica Inc. 12 12 */ 13 13 14 + #include <linux/linkage.h> 14 15 #include <variant/core.h> 15 16 #include <asm/asmmacro.h> 16 17 ··· 31 30 */ 32 31 33 32 .text 34 - .align 4 35 - .global memset 36 - .type memset,@function 37 - memset: 33 + ENTRY(memset) 34 + 38 35 entry sp, 16 # minimal stack frame 39 36 # a2/ dst, a3/ c, a4/ length 40 37 extui a3, a3, 0, 8 # mask to just 8 bits ··· 140 141 .Lbytesetdone: 141 142 retw 142 143 144 + ENDPROC(memset) 143 145 144 146 .section .fixup, "ax" 145 147 .align 4
+4 -4
arch/xtensa/lib/strncpy_user.S
··· 12 12 */ 13 13 14 14 #include <linux/errno.h> 15 + #include <linux/linkage.h> 15 16 #include <variant/core.h> 16 17 #include <asm/asmmacro.h> 17 18 ··· 48 47 # a12/ tmp 49 48 50 49 .text 51 - .align 4 52 - .global __strncpy_user 53 - .type __strncpy_user,@function 54 - __strncpy_user: 50 + ENTRY(__strncpy_user) 51 + 55 52 entry sp, 16 # minimal stack frame 56 53 # a2/ dst, a3/ src, a4/ len 57 54 mov a11, a2 # leave dst in return value register ··· 201 202 sub a2, a11, a2 # compute strlen 202 203 retw 203 204 205 + ENDPROC(__strncpy_user) 204 206 205 207 .section .fixup, "ax" 206 208 .align 4
+5 -4
arch/xtensa/lib/strnlen_user.S
··· 11 11 * Copyright (C) 2002 Tensilica Inc. 12 12 */ 13 13 14 + #include <linux/linkage.h> 14 15 #include <variant/core.h> 15 16 #include <asm/asmmacro.h> 16 17 ··· 43 42 # a10/ tmp 44 43 45 44 .text 46 - .align 4 47 - .global __strnlen_user 48 - .type __strnlen_user,@function 49 - __strnlen_user: 45 + ENTRY(__strnlen_user) 46 + 50 47 entry sp, 16 # minimal stack frame 51 48 # a2/ s, a3/ len 52 49 addi a4, a2, -4 # because we overincrement at the end; ··· 131 132 addi a4, a4, 3+1 # point just beyond zero byte 132 133 sub a2, a4, a2 # subtract to get length 133 134 retw 135 + 136 + ENDPROC(__strnlen_user) 134 137 135 138 .section .fixup, "ax" 136 139 .align 4
+4 -4
arch/xtensa/lib/usercopy.S
··· 53 53 * a11/ original length 54 54 */ 55 55 56 + #include <linux/linkage.h> 56 57 #include <variant/core.h> 57 58 #include <asm/asmmacro.h> 58 59 59 60 .text 60 - .align 4 61 - .global __xtensa_copy_user 62 - .type __xtensa_copy_user,@function 63 - __xtensa_copy_user: 61 + ENTRY(__xtensa_copy_user) 62 + 64 63 entry sp, 16 # minimal stack frame 65 64 # a2/ dst, a3/ src, a4/ len 66 65 mov a5, a2 # copy dst so that a2 is return value ··· 266 267 movi a2, 0 # return success for len bytes copied 267 268 retw 268 269 270 + ENDPROC(__xtensa_copy_user) 269 271 270 272 .section .fixup, "ax" 271 273 .align 4