Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm: Introduce push/pop macros which generate CFI_REL_OFFSET and CFI_RESTORE

Sequences:

pushl_cfi %reg
CFI_REL_OFFSET reg, 0

and:

popl_cfi %reg
CFI_RESTORE reg

happen quite often. This patch adds macros which generate them.

No assembly changes (verified with objdump -dr vmlinux.o).

Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Will Drewry <wad@chromium.org>
Link: http://lkml.kernel.org/r/1421017655-25561-1-git-send-email-dvlasenk@redhat.com
Link: http://lkml.kernel.org/r/2202eb90f175cf45d1b2d1c64dbb5676a8ad07ad.1424989793.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Denys Vlasenko and committed by
Ingo Molnar
49db46a6 69e8544c

+141 -196
+14 -28
arch/x86/include/asm/calling.h
··· 210 210 */ 211 211 212 212 .macro SAVE_ALL 213 - pushl_cfi %eax 214 - CFI_REL_OFFSET eax, 0 215 - pushl_cfi %ebp 216 - CFI_REL_OFFSET ebp, 0 217 - pushl_cfi %edi 218 - CFI_REL_OFFSET edi, 0 219 - pushl_cfi %esi 220 - CFI_REL_OFFSET esi, 0 221 - pushl_cfi %edx 222 - CFI_REL_OFFSET edx, 0 223 - pushl_cfi %ecx 224 - CFI_REL_OFFSET ecx, 0 225 - pushl_cfi %ebx 226 - CFI_REL_OFFSET ebx, 0 213 + pushl_cfi_reg eax 214 + pushl_cfi_reg ebp 215 + pushl_cfi_reg edi 216 + pushl_cfi_reg esi 217 + pushl_cfi_reg edx 218 + pushl_cfi_reg ecx 219 + pushl_cfi_reg ebx 227 220 .endm 228 221 229 222 .macro RESTORE_ALL 230 - popl_cfi %ebx 231 - CFI_RESTORE ebx 232 - popl_cfi %ecx 233 - CFI_RESTORE ecx 234 - popl_cfi %edx 235 - CFI_RESTORE edx 236 - popl_cfi %esi 237 - CFI_RESTORE esi 238 - popl_cfi %edi 239 - CFI_RESTORE edi 240 - popl_cfi %ebp 241 - CFI_RESTORE ebp 242 - popl_cfi %eax 243 - CFI_RESTORE eax 223 + popl_cfi_reg ebx 224 + popl_cfi_reg ecx 225 + popl_cfi_reg edx 226 + popl_cfi_reg esi 227 + popl_cfi_reg edi 228 + popl_cfi_reg ebp 229 + popl_cfi_reg eax 244 230 .endm 245 231 246 232 #endif /* CONFIG_X86_64 */
+24
arch/x86/include/asm/dwarf2.h
··· 86 86 CFI_ADJUST_CFA_OFFSET 8 87 87 .endm 88 88 89 + .macro pushq_cfi_reg reg 90 + pushq %\reg 91 + CFI_ADJUST_CFA_OFFSET 8 92 + CFI_REL_OFFSET \reg, 0 93 + .endm 94 + 89 95 .macro popq_cfi reg 90 96 popq \reg 91 97 CFI_ADJUST_CFA_OFFSET -8 98 + .endm 99 + 100 + .macro popq_cfi_reg reg 101 + popq %\reg 102 + CFI_ADJUST_CFA_OFFSET -8 103 + CFI_RESTORE \reg 92 104 .endm 93 105 94 106 .macro pushfq_cfi ··· 128 116 CFI_ADJUST_CFA_OFFSET 4 129 117 .endm 130 118 119 + .macro pushl_cfi_reg reg 120 + pushl %\reg 121 + CFI_ADJUST_CFA_OFFSET 4 122 + CFI_REL_OFFSET \reg, 0 123 + .endm 124 + 131 125 .macro popl_cfi reg 132 126 popl \reg 133 127 CFI_ADJUST_CFA_OFFSET -4 128 + .endm 129 + 130 + .macro popl_cfi_reg reg 131 + popl %\reg 132 + CFI_ADJUST_CFA_OFFSET -4 133 + CFI_RESTORE \reg 134 134 .endm 135 135 136 136 .macro pushfl_cfi
+7 -14
arch/x86/kernel/entry_32.S
··· 1234 1234 /*CFI_REL_OFFSET es, 0*/ 1235 1235 pushl_cfi %ds 1236 1236 /*CFI_REL_OFFSET ds, 0*/ 1237 - pushl_cfi %eax 1238 - CFI_REL_OFFSET eax, 0 1239 - pushl_cfi %ebp 1240 - CFI_REL_OFFSET ebp, 0 1241 - pushl_cfi %edi 1242 - CFI_REL_OFFSET edi, 0 1243 - pushl_cfi %esi 1244 - CFI_REL_OFFSET esi, 0 1245 - pushl_cfi %edx 1246 - CFI_REL_OFFSET edx, 0 1247 - pushl_cfi %ecx 1248 - CFI_REL_OFFSET ecx, 0 1249 - pushl_cfi %ebx 1250 - CFI_REL_OFFSET ebx, 0 1237 + pushl_cfi_reg eax 1238 + pushl_cfi_reg ebp 1239 + pushl_cfi_reg edi 1240 + pushl_cfi_reg esi 1241 + pushl_cfi_reg edx 1242 + pushl_cfi_reg ecx 1243 + pushl_cfi_reg ebx 1251 1244 cld 1252 1245 movl $(__KERNEL_PERCPU), %ecx 1253 1246 movl %ecx, %fs
+20 -30
arch/x86/lib/atomic64_cx8_32.S
··· 13 13 #include <asm/alternative-asm.h> 14 14 #include <asm/dwarf2.h> 15 15 16 - .macro SAVE reg 17 - pushl_cfi %\reg 18 - CFI_REL_OFFSET \reg, 0 19 - .endm 20 - 21 - .macro RESTORE reg 22 - popl_cfi %\reg 23 - CFI_RESTORE \reg 24 - .endm 25 - 26 16 .macro read64 reg 27 17 movl %ebx, %eax 28 18 movl %ecx, %edx ··· 57 67 .macro addsub_return func ins insc 58 68 ENTRY(atomic64_\func\()_return_cx8) 59 69 CFI_STARTPROC 60 - SAVE ebp 61 - SAVE ebx 62 - SAVE esi 63 - SAVE edi 70 + pushl_cfi_reg ebp 71 + pushl_cfi_reg ebx 72 + pushl_cfi_reg esi 73 + pushl_cfi_reg edi 64 74 65 75 movl %eax, %esi 66 76 movl %edx, %edi ··· 79 89 10: 80 90 movl %ebx, %eax 81 91 movl %ecx, %edx 82 - RESTORE edi 83 - RESTORE esi 84 - RESTORE ebx 85 - RESTORE ebp 92 + popl_cfi_reg edi 93 + popl_cfi_reg esi 94 + popl_cfi_reg ebx 95 + popl_cfi_reg ebp 86 96 ret 87 97 CFI_ENDPROC 88 98 ENDPROC(atomic64_\func\()_return_cx8) ··· 94 104 .macro incdec_return func ins insc 95 105 ENTRY(atomic64_\func\()_return_cx8) 96 106 CFI_STARTPROC 97 - SAVE ebx 107 + pushl_cfi_reg ebx 98 108 99 109 read64 %esi 100 110 1: ··· 109 119 10: 110 120 movl %ebx, %eax 111 121 movl %ecx, %edx 112 - RESTORE ebx 122 + popl_cfi_reg ebx 113 123 ret 114 124 CFI_ENDPROC 115 125 ENDPROC(atomic64_\func\()_return_cx8) ··· 120 130 121 131 ENTRY(atomic64_dec_if_positive_cx8) 122 132 CFI_STARTPROC 123 - SAVE ebx 133 + pushl_cfi_reg ebx 124 134 125 135 read64 %esi 126 136 1: ··· 136 146 2: 137 147 movl %ebx, %eax 138 148 movl %ecx, %edx 139 - RESTORE ebx 149 + popl_cfi_reg ebx 140 150 ret 141 151 CFI_ENDPROC 142 152 ENDPROC(atomic64_dec_if_positive_cx8) 143 153 144 154 ENTRY(atomic64_add_unless_cx8) 145 155 CFI_STARTPROC 146 - SAVE ebp 147 - SAVE ebx 156 + pushl_cfi_reg ebp 157 + pushl_cfi_reg ebx 148 158 /* these just push these two parameters on the stack */ 149 - SAVE edi 150 - SAVE ecx 159 + pushl_cfi_reg edi 160 + pushl_cfi_reg ecx 151 161 152 162 movl %eax, %ebp 153 163 movl %edx, %edi ··· 169 179 3: 170 180 addl $8, %esp 171 181 CFI_ADJUST_CFA_OFFSET -8 172 - RESTORE ebx 173 - RESTORE ebp 182 + popl_cfi_reg ebx 183 + popl_cfi_reg ebp 174 184 ret 175 185 4: 176 186 cmpl %edx, 4(%esp) ··· 182 192 183 193 ENTRY(atomic64_inc_not_zero_cx8) 184 194 CFI_STARTPROC 185 - SAVE ebx 195 + pushl_cfi_reg ebx 186 196 187 197 read64 %esi 188 198 1: ··· 199 209 200 210 movl $1, %eax 201 211 3: 202 - RESTORE ebx 212 + popl_cfi_reg ebx 203 213 ret 204 214 CFI_ENDPROC 205 215 ENDPROC(atomic64_inc_not_zero_cx8)
+20 -40
arch/x86/lib/checksum_32.S
··· 51 51 */ 52 52 ENTRY(csum_partial) 53 53 CFI_STARTPROC 54 - pushl_cfi %esi 55 - CFI_REL_OFFSET esi, 0 56 - pushl_cfi %ebx 57 - CFI_REL_OFFSET ebx, 0 54 + pushl_cfi_reg esi 55 + pushl_cfi_reg ebx 58 56 movl 20(%esp),%eax # Function arg: unsigned int sum 59 57 movl 16(%esp),%ecx # Function arg: int len 60 58 movl 12(%esp),%esi # Function arg: unsigned char *buff ··· 129 131 jz 8f 130 132 roll $8, %eax 131 133 8: 132 - popl_cfi %ebx 133 - CFI_RESTORE ebx 134 - popl_cfi %esi 135 - CFI_RESTORE esi 134 + popl_cfi_reg ebx 135 + popl_cfi_reg esi 136 136 ret 137 137 CFI_ENDPROC 138 138 ENDPROC(csum_partial) ··· 141 145 142 146 ENTRY(csum_partial) 143 147 CFI_STARTPROC 144 - pushl_cfi %esi 145 - CFI_REL_OFFSET esi, 0 146 - pushl_cfi %ebx 147 - CFI_REL_OFFSET ebx, 0 148 + pushl_cfi_reg esi 149 + pushl_cfi_reg ebx 148 150 movl 20(%esp),%eax # Function arg: unsigned int sum 149 151 movl 16(%esp),%ecx # Function arg: int len 150 152 movl 12(%esp),%esi # Function arg: const unsigned char *buf ··· 249 255 jz 90f 250 256 roll $8, %eax 251 257 90: 252 - popl_cfi %ebx 253 - CFI_RESTORE ebx 254 - popl_cfi %esi 255 - CFI_RESTORE esi 258 + popl_cfi_reg ebx 259 + popl_cfi_reg esi 256 260 ret 257 261 CFI_ENDPROC 258 262 ENDPROC(csum_partial) ··· 290 298 CFI_STARTPROC 291 299 subl $4,%esp 292 300 CFI_ADJUST_CFA_OFFSET 4 293 - pushl_cfi %edi 294 - CFI_REL_OFFSET edi, 0 295 - pushl_cfi %esi 296 - CFI_REL_OFFSET esi, 0 297 - pushl_cfi %ebx 298 - CFI_REL_OFFSET ebx, 0 301 + pushl_cfi_reg edi 302 + pushl_cfi_reg esi 303 + pushl_cfi_reg ebx 299 304 movl ARGBASE+16(%esp),%eax # sum 300 305 movl ARGBASE+12(%esp),%ecx # len 301 306 movl ARGBASE+4(%esp),%esi # src ··· 401 412 402 413 .previous 403 414 404 - popl_cfi %ebx 405 - CFI_RESTORE ebx 406 - popl_cfi %esi 407 - CFI_RESTORE esi 408 - popl_cfi %edi 409 - CFI_RESTORE edi 415 + popl_cfi_reg ebx 416 + popl_cfi_reg esi 417 + popl_cfi_reg edi 410 418 popl_cfi %ecx # equivalent to addl $4,%esp 411 419 ret 412 420 CFI_ENDPROC ··· 427 441 428 442 ENTRY(csum_partial_copy_generic) 429 443 CFI_STARTPROC 430 - pushl_cfi %ebx 431 - CFI_REL_OFFSET ebx, 0 432 - pushl_cfi %edi 433 - CFI_REL_OFFSET edi, 0 434 - pushl_cfi %esi 435 - CFI_REL_OFFSET esi, 0 444 + pushl_cfi_reg ebx 445 + pushl_cfi_reg edi 446 + pushl_cfi_reg esi 436 447 movl ARGBASE+4(%esp),%esi #src 437 448 movl ARGBASE+8(%esp),%edi #dst 438 449 movl ARGBASE+12(%esp),%ecx #len ··· 489 506 jmp 7b 490 507 .previous 491 508 492 - popl_cfi %esi 493 - CFI_RESTORE esi 494 - popl_cfi %edi 495 - CFI_RESTORE edi 496 - popl_cfi %ebx 497 - CFI_RESTORE ebx 509 + popl_cfi_reg esi 510 + popl_cfi_reg edi 511 + popl_cfi_reg ebx 498 512 ret 499 513 CFI_ENDPROC 500 514 ENDPROC(csum_partial_copy_generic)
+12 -12
arch/x86/lib/msr-reg.S
··· 14 14 .macro op_safe_regs op 15 15 ENTRY(\op\()_safe_regs) 16 16 CFI_STARTPROC 17 - pushq_cfi %rbx 18 - pushq_cfi %rbp 17 + pushq_cfi_reg rbx 18 + pushq_cfi_reg rbp 19 19 movq %rdi, %r10 /* Save pointer */ 20 20 xorl %r11d, %r11d /* Return value */ 21 21 movl (%rdi), %eax ··· 35 35 movl %ebp, 20(%r10) 36 36 movl %esi, 24(%r10) 37 37 movl %edi, 28(%r10) 38 - popq_cfi %rbp 39 - popq_cfi %rbx 38 + popq_cfi_reg rbp 39 + popq_cfi_reg rbx 40 40 ret 41 41 3: 42 42 CFI_RESTORE_STATE ··· 53 53 .macro op_safe_regs op 54 54 ENTRY(\op\()_safe_regs) 55 55 CFI_STARTPROC 56 - pushl_cfi %ebx 57 - pushl_cfi %ebp 58 - pushl_cfi %esi 59 - pushl_cfi %edi 56 + pushl_cfi_reg ebx 57 + pushl_cfi_reg ebp 58 + pushl_cfi_reg esi 59 + pushl_cfi_reg edi 60 60 pushl_cfi $0 /* Return value */ 61 61 pushl_cfi %eax 62 62 movl 4(%eax), %ecx ··· 80 80 movl %esi, 24(%eax) 81 81 movl %edi, 28(%eax) 82 82 popl_cfi %eax 83 - popl_cfi %edi 84 - popl_cfi %esi 85 - popl_cfi %ebp 86 - popl_cfi %ebx 83 + popl_cfi_reg edi 84 + popl_cfi_reg esi 85 + popl_cfi_reg ebp 86 + popl_cfi_reg ebx 87 87 ret 88 88 3: 89 89 CFI_RESTORE_STATE
+20 -24
arch/x86/lib/rwsem.S
··· 34 34 */ 35 35 36 36 #define save_common_regs \ 37 - pushl_cfi %ecx; CFI_REL_OFFSET ecx, 0 37 + pushl_cfi_reg ecx 38 38 39 39 #define restore_common_regs \ 40 - popl_cfi %ecx; CFI_RESTORE ecx 40 + popl_cfi_reg ecx 41 41 42 42 /* Avoid uglifying the argument copying x86-64 needs to do. */ 43 43 .macro movq src, dst ··· 64 64 */ 65 65 66 66 #define save_common_regs \ 67 - pushq_cfi %rdi; CFI_REL_OFFSET rdi, 0; \ 68 - pushq_cfi %rsi; CFI_REL_OFFSET rsi, 0; \ 69 - pushq_cfi %rcx; CFI_REL_OFFSET rcx, 0; \ 70 - pushq_cfi %r8; CFI_REL_OFFSET r8, 0; \ 71 - pushq_cfi %r9; CFI_REL_OFFSET r9, 0; \ 72 - pushq_cfi %r10; CFI_REL_OFFSET r10, 0; \ 73 - pushq_cfi %r11; CFI_REL_OFFSET r11, 0 67 + pushq_cfi_reg rdi; \ 68 + pushq_cfi_reg rsi; \ 69 + pushq_cfi_reg rcx; \ 70 + pushq_cfi_reg r8; \ 71 + pushq_cfi_reg r9; \ 72 + pushq_cfi_reg r10; \ 73 + pushq_cfi_reg r11 74 74 75 75 #define restore_common_regs \ 76 - popq_cfi %r11; CFI_RESTORE r11; \ 77 - popq_cfi %r10; CFI_RESTORE r10; \ 78 - popq_cfi %r9; CFI_RESTORE r9; \ 79 - popq_cfi %r8; CFI_RESTORE r8; \ 80 - popq_cfi %rcx; CFI_RESTORE rcx; \ 81 - popq_cfi %rsi; CFI_RESTORE rsi; \ 82 - popq_cfi %rdi; CFI_RESTORE rdi 76 + popq_cfi_reg r11; \ 77 + popq_cfi_reg r10; \ 78 + popq_cfi_reg r9; \ 79 + popq_cfi_reg r8; \ 80 + popq_cfi_reg rcx; \ 81 + popq_cfi_reg rsi; \ 82 + popq_cfi_reg rdi 83 83 84 84 #endif 85 85 ··· 87 87 ENTRY(call_rwsem_down_read_failed) 88 88 CFI_STARTPROC 89 89 save_common_regs 90 - __ASM_SIZE(push,_cfi) %__ASM_REG(dx) 91 - CFI_REL_OFFSET __ASM_REG(dx), 0 90 + __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 92 91 movq %rax,%rdi 93 92 call rwsem_down_read_failed 94 - __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) 95 - CFI_RESTORE __ASM_REG(dx) 93 + __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 96 94 restore_common_regs 97 95 ret 98 96 CFI_ENDPROC ··· 122 124 ENTRY(call_rwsem_downgrade_wake) 123 125 CFI_STARTPROC 124 126 save_common_regs 125 - __ASM_SIZE(push,_cfi) %__ASM_REG(dx) 126 - CFI_REL_OFFSET __ASM_REG(dx), 0 127 + __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 127 128 movq %rax,%rdi 128 129 call rwsem_downgrade_wake 129 - __ASM_SIZE(pop,_cfi) %__ASM_REG(dx) 130 - CFI_RESTORE __ASM_REG(dx) 130 + __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 131 131 restore_common_regs 132 132 ret 133 133 CFI_ENDPROC
+6 -12
arch/x86/lib/thunk_32.S
··· 13 13 .globl \name 14 14 \name: 15 15 CFI_STARTPROC 16 - pushl_cfi %eax 17 - CFI_REL_OFFSET eax, 0 18 - pushl_cfi %ecx 19 - CFI_REL_OFFSET ecx, 0 20 - pushl_cfi %edx 21 - CFI_REL_OFFSET edx, 0 16 + pushl_cfi_reg eax 17 + pushl_cfi_reg ecx 18 + pushl_cfi_reg edx 22 19 23 20 .if \put_ret_addr_in_eax 24 21 /* Place EIP in the arg1 */ ··· 23 26 .endif 24 27 25 28 call \func 26 - popl_cfi %edx 27 - CFI_RESTORE edx 28 - popl_cfi %ecx 29 - CFI_RESTORE ecx 30 - popl_cfi %eax 31 - CFI_RESTORE eax 29 + popl_cfi_reg edx 30 + popl_cfi_reg ecx 31 + popl_cfi_reg eax 32 32 ret 33 33 CFI_ENDPROC 34 34 _ASM_NOKPROBE(\name)
+18 -36
arch/x86/lib/thunk_64.S
··· 17 17 CFI_STARTPROC 18 18 19 19 /* this one pushes 9 elems, the next one would be %rIP */ 20 - pushq_cfi %rdi 21 - CFI_REL_OFFSET rdi, 0 22 - pushq_cfi %rsi 23 - CFI_REL_OFFSET rsi, 0 24 - pushq_cfi %rdx 25 - CFI_REL_OFFSET rdx, 0 26 - pushq_cfi %rcx 27 - CFI_REL_OFFSET rcx, 0 28 - pushq_cfi %rax 29 - CFI_REL_OFFSET rax, 0 30 - pushq_cfi %r8 31 - CFI_REL_OFFSET r8, 0 32 - pushq_cfi %r9 33 - CFI_REL_OFFSET r9, 0 34 - pushq_cfi %r10 35 - CFI_REL_OFFSET r10, 0 36 - pushq_cfi %r11 37 - CFI_REL_OFFSET r11, 0 20 + pushq_cfi_reg rdi 21 + pushq_cfi_reg rsi 22 + pushq_cfi_reg rdx 23 + pushq_cfi_reg rcx 24 + pushq_cfi_reg rax 25 + pushq_cfi_reg r8 26 + pushq_cfi_reg r9 27 + pushq_cfi_reg r10 28 + pushq_cfi_reg r11 38 29 39 30 .if \put_ret_addr_in_rdi 40 31 /* 9*8(%rsp) is return addr on stack */ ··· 60 69 CFI_STARTPROC 61 70 CFI_ADJUST_CFA_OFFSET 9*8 62 71 restore: 63 - popq_cfi %r11 64 - CFI_RESTORE r11 65 - popq_cfi %r10 66 - CFI_RESTORE r10 67 - popq_cfi %r9 68 - CFI_RESTORE r9 69 - popq_cfi %r8 70 - CFI_RESTORE r8 71 - popq_cfi %rax 72 - CFI_RESTORE rax 73 - popq_cfi %rcx 74 - CFI_RESTORE rcx 75 - popq_cfi %rdx 76 - CFI_RESTORE rdx 77 - popq_cfi %rsi 78 - CFI_RESTORE rsi 79 - popq_cfi %rdi 80 - CFI_RESTORE rdi 72 + popq_cfi_reg r11 73 + popq_cfi_reg r10 74 + popq_cfi_reg r9 75 + popq_cfi_reg r8 76 + popq_cfi_reg rax 77 + popq_cfi_reg rcx 78 + popq_cfi_reg rdx 79 + popq_cfi_reg rsi 80 + popq_cfi_reg rdi 81 81 ret 82 82 CFI_ENDPROC 83 83 _ASM_NOKPROBE(restore)