Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/debug: Remove perpetually broken, unmaintainable dwarf annotations

So the dwarf2 annotations in low level assembly code have
become an increasing hindrance: unreadable, messy macros
mixed into some of the most security sensitive code paths
of the Linux kernel.

These debug info annotations don't even buy the upstream
kernel anything: dwarf driven stack unwinding has caused
problems in the past so it's out of tree, and the upstream
kernel only uses the much more robust framepointers based
stack unwinding method.

In addition to that there's a steady, slow bitrot going
on with these annotations, requiring frequent fixups.
There's no tooling and no functionality upstream that
keeps it correct.

So burn down the sick forest, allowing new, healthier growth:

27 files changed, 350 insertions(+), 1101 deletions(-)

Someone who has the willingness and time to do this
properly can attempt to reintroduce dwarf debuginfo in x86
assembly code plus dwarf unwinding from first principles,
with the following conditions:

- it should be maximally readable, and maximally low-key to
'ordinary' code reading and maintenance.

- find a build time method to insert dwarf annotations
automatically in the most common cases, for pop/push
instructions that manipulate the stack pointer. This could
be done for example via a preprocessing step that just
looks for common patterns - plus special annotations for
the few cases where we want to depart from the default.
We have hundreds of CFI annotations, so automating most of
that makes sense.

- it should come with build tooling checks that ensure that
CFI annotations are sensible. We've seen such efforts from
the framepointer side, and there's no reason it couldn't be
done on the dwarf side.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+350 -1101
+2 -8
arch/x86/Makefile
··· 149 149 sp-$(CONFIG_X86_32) := esp 150 150 sp-$(CONFIG_X86_64) := rsp 151 151 152 - # do binutils support CFI? 153 - cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) 154 - # is .cfi_signal_frame supported too? 155 - cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) 156 - cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) 157 - 158 152 # does binutils support specific instructions? 159 153 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) 160 154 asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) ··· 156 162 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) 157 163 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) 158 164 159 - KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) 160 - KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) 165 + KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) 166 + KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) 161 167 162 168 LDFLAGS := -m elf_$(UTS_MACHINE) 163 169
+32 -101
arch/x86/ia32/ia32entry.S
··· 4 4 * Copyright 2000-2002 Andi Kleen, SuSE Labs. 5 5 */ 6 6 7 - #include <asm/dwarf2.h> 8 7 #include <asm/calling.h> 9 8 #include <asm/asm-offsets.h> 10 9 #include <asm/current.h> ··· 59 60 movl %eax,%eax /* zero extension */ 60 61 .endm 61 62 62 - .macro CFI_STARTPROC32 simple 63 - CFI_STARTPROC \simple 64 - CFI_UNDEFINED r8 65 - CFI_UNDEFINED r9 66 - CFI_UNDEFINED r10 67 - CFI_UNDEFINED r11 68 - CFI_UNDEFINED r12 69 - CFI_UNDEFINED r13 70 - CFI_UNDEFINED r14 71 - CFI_UNDEFINED r15 72 - .endm 73 63 74 64 #ifdef CONFIG_PARAVIRT 75 65 ENTRY(native_usergs_sysret32) ··· 90 102 * with the int 0x80 path. 91 103 */ 92 104 ENTRY(ia32_sysenter_target) 93 - CFI_STARTPROC32 simple 94 - CFI_SIGNAL_FRAME 95 - CFI_DEF_CFA rsp,0 96 - CFI_REGISTER rsp,rbp 97 - 98 105 /* 99 106 * Interrupts are off on entry. 100 107 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, ··· 104 121 movl %eax, %eax 105 122 106 123 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d 107 - CFI_REGISTER rip,r10 108 124 109 125 /* Construct struct pt_regs on stack */ 110 - pushq_cfi $__USER32_DS /* pt_regs->ss */ 111 - pushq_cfi %rbp /* pt_regs->sp */ 112 - CFI_REL_OFFSET rsp,0 113 - pushfq_cfi /* pt_regs->flags */ 114 - pushq_cfi $__USER32_CS /* pt_regs->cs */ 115 - pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */ 116 - CFI_REL_OFFSET rip,0 117 - pushq_cfi_reg rax /* pt_regs->orig_ax */ 118 - pushq_cfi_reg rdi /* pt_regs->di */ 119 - pushq_cfi_reg rsi /* pt_regs->si */ 120 - pushq_cfi_reg rdx /* pt_regs->dx */ 121 - pushq_cfi_reg rcx /* pt_regs->cx */ 122 - pushq_cfi $-ENOSYS /* pt_regs->ax */ 126 + pushq $__USER32_DS /* pt_regs->ss */ 127 + pushq %rbp /* pt_regs->sp */ 128 + pushfq /* pt_regs->flags */ 129 + pushq $__USER32_CS /* pt_regs->cs */ 130 + pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */ 131 + pushq %rax /* pt_regs->orig_ax */ 132 + pushq %rdi /* pt_regs->di */ 133 + pushq %rsi /* pt_regs->si */ 134 + pushq %rdx /* pt_regs->dx */ 135 + pushq %rcx /* pt_regs->cx */ 136 + pushq $-ENOSYS /* pt_regs->ax */ 123 137 cld 124 138 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ 125 - CFI_ADJUST_CFA_OFFSET 10*8 126 139 127 140 /* 128 141 * no need to do an access_ok check here because rbp has been ··· 140 161 141 162 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 142 163 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 143 - CFI_REMEMBER_STATE 144 164 jnz sysenter_tracesys 165 + 145 166 sysenter_do_call: 146 167 /* 32bit syscall -> 64bit C ABI argument conversion */ 147 168 movl %edi,%r8d /* arg5 */ ··· 172 193 */ 173 194 andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 174 195 movl RIP(%rsp),%ecx /* User %eip */ 175 - CFI_REGISTER rip,rcx 176 196 RESTORE_RSI_RDI 177 197 xorl %edx,%edx /* avoid info leaks */ 178 198 xorq %r8,%r8 179 199 xorq %r9,%r9 180 200 xorq %r10,%r10 181 201 movl EFLAGS(%rsp),%r11d /* User eflags */ 182 - /*CFI_RESTORE rflags*/ 183 202 TRACE_IRQS_ON 184 203 185 204 /* ··· 207 230 * kernel data. 208 231 */ 209 232 USERGS_SYSRET32 210 - 211 - CFI_RESTORE_STATE 212 233 213 234 #ifdef CONFIG_AUDITSYSCALL 214 235 .macro auditsys_entry_common ··· 257 282 #endif 258 283 259 284 sysenter_fix_flags: 260 - pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) 261 - popfq_cfi 285 + pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) 286 + popfq 262 287 jmp sysenter_flags_fixed 263 288 264 289 sysenter_tracesys: ··· 273 298 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ 274 299 RESTORE_EXTRA_REGS 275 300 jmp sysenter_do_call 276 - CFI_ENDPROC 277 301 ENDPROC(ia32_sysenter_target) 278 302 279 303 /* ··· 306 332 * with the int 0x80 path. 307 333 */ 308 334 ENTRY(ia32_cstar_target) 309 - CFI_STARTPROC32 simple 310 - CFI_SIGNAL_FRAME 311 - CFI_DEF_CFA rsp,0 312 - CFI_REGISTER rip,rcx 313 - /*CFI_REGISTER rflags,r11*/ 314 - 315 335 /* 316 336 * Interrupts are off on entry. 317 337 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, ··· 313 345 */ 314 346 SWAPGS_UNSAFE_STACK 315 347 movl %esp,%r8d 316 - CFI_REGISTER rsp,r8 317 348 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp 318 349 ENABLE_INTERRUPTS(CLBR_NONE) 319 350 ··· 320 353 movl %eax,%eax 321 354 322 355 /* Construct struct pt_regs on stack */ 323 - pushq_cfi $__USER32_DS /* pt_regs->ss */ 324 - pushq_cfi %r8 /* pt_regs->sp */ 325 - CFI_REL_OFFSET rsp,0 326 - pushq_cfi %r11 /* pt_regs->flags */ 327 - pushq_cfi $__USER32_CS /* pt_regs->cs */ 328 - pushq_cfi %rcx /* pt_regs->ip */ 329 - CFI_REL_OFFSET rip,0 330 - pushq_cfi_reg rax /* pt_regs->orig_ax */ 331 - pushq_cfi_reg rdi /* pt_regs->di */ 332 - pushq_cfi_reg rsi /* pt_regs->si */ 333 - pushq_cfi_reg rdx /* pt_regs->dx */ 334 - pushq_cfi_reg rbp /* pt_regs->cx */ 356 + pushq $__USER32_DS /* pt_regs->ss */ 357 + pushq %r8 /* pt_regs->sp */ 358 + pushq %r11 /* pt_regs->flags */ 359 + pushq $__USER32_CS /* pt_regs->cs */ 360 + pushq %rcx /* pt_regs->ip */ 361 + pushq %rax /* pt_regs->orig_ax */ 362 + pushq %rdi /* pt_regs->di */ 363 + pushq %rsi /* pt_regs->si */ 364 + pushq %rdx /* pt_regs->dx */ 365 + pushq %rbp /* pt_regs->cx */ 335 366 movl %ebp,%ecx 336 - pushq_cfi $-ENOSYS /* pt_regs->ax */ 367 + pushq $-ENOSYS /* pt_regs->ax */ 337 368 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ 338 - CFI_ADJUST_CFA_OFFSET 10*8 339 369 340 370 /* 341 371 * no need to do an access_ok check here because r8 has been ··· 344 380 ASM_CLAC 345 381 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 346 382 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 347 - CFI_REMEMBER_STATE 348 383 jnz cstar_tracesys 384 + 349 385 cstar_do_call: 350 386 /* 32bit syscall -> 64bit C ABI argument conversion */ 351 387 movl %edi,%r8d /* arg5 */ ··· 367 403 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 368 404 RESTORE_RSI_RDI_RDX 369 405 movl RIP(%rsp),%ecx 370 - CFI_REGISTER rip,rcx 371 406 movl EFLAGS(%rsp),%r11d 372 - /*CFI_REGISTER rflags,r11*/ 373 407 xorq %r10,%r10 374 408 xorq %r9,%r9 375 409 xorq %r8,%r8 376 410 TRACE_IRQS_ON 377 411 movl RSP(%rsp),%esp 378 - CFI_RESTORE rsp 379 412 /* 380 413 * 64bit->32bit SYSRET restores eip from ecx, 381 414 * eflags from r11 (but RF and VM bits are forced to 0), ··· 391 430 392 431 #ifdef CONFIG_AUDITSYSCALL 393 432 cstar_auditsys: 394 - CFI_RESTORE_STATE 395 433 movl %r9d,R9(%rsp) /* register to be clobbered by call */ 396 434 auditsys_entry_common 397 435 movl R9(%rsp),%r9d /* reload 6th syscall arg */ ··· 420 460 ASM_CLAC 421 461 movq $-EFAULT,%rax 422 462 jmp ia32_sysret 423 - CFI_ENDPROC 424 463 425 464 /* 426 465 * Emulated IA32 system calls via int 0x80. ··· 443 484 */ 444 485 445 486 ENTRY(ia32_syscall) 446 - CFI_STARTPROC32 simple 447 - CFI_SIGNAL_FRAME 448 - CFI_DEF_CFA rsp,5*8 449 - /*CFI_REL_OFFSET ss,4*8 */ 450 - CFI_REL_OFFSET rsp,3*8 451 - /*CFI_REL_OFFSET rflags,2*8 */ 452 - /*CFI_REL_OFFSET cs,1*8 */ 453 - CFI_REL_OFFSET rip,0*8 454 - 455 487 /* 456 488 * Interrupts are off on entry. 457 489 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, ··· 456 506 movl %eax,%eax 457 507 458 508 /* Construct struct pt_regs on stack (iret frame is already on stack) */ 459 - pushq_cfi_reg rax /* pt_regs->orig_ax */ 460 - pushq_cfi_reg rdi /* pt_regs->di */ 461 - pushq_cfi_reg rsi /* pt_regs->si */ 462 - pushq_cfi_reg rdx /* pt_regs->dx */ 463 - pushq_cfi_reg rcx /* pt_regs->cx */ 464 - pushq_cfi $-ENOSYS /* pt_regs->ax */ 509 + pushq %rax /* pt_regs->orig_ax */ 510 + pushq %rdi /* pt_regs->di */ 511 + pushq %rsi /* pt_regs->si */ 512 + pushq %rdx /* pt_regs->dx */ 513 + pushq %rcx /* pt_regs->cx */ 514 + pushq $-ENOSYS /* pt_regs->ax */ 465 515 cld 466 516 sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */ 467 - CFI_ADJUST_CFA_OFFSET 10*8 468 517 469 518 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 470 519 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) ··· 493 544 LOAD_ARGS32 /* reload args from stack in case ptrace changed it */ 494 545 RESTORE_EXTRA_REGS 495 546 jmp ia32_do_call 496 - CFI_ENDPROC 497 547 END(ia32_syscall) 498 548 499 549 .macro PTREGSCALL label, func ··· 501 553 leaq \func(%rip),%rax 502 554 jmp ia32_ptregs_common 503 555 .endm 504 - 505 - CFI_STARTPROC32 506 556 507 557 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn 508 558 PTREGSCALL stub32_sigreturn, sys32_sigreturn ··· 515 569 516 570 ALIGN 517 571 ia32_ptregs_common: 518 - CFI_ENDPROC 519 - CFI_STARTPROC32 simple 520 - CFI_SIGNAL_FRAME 521 - CFI_DEF_CFA rsp,SIZEOF_PTREGS 522 - CFI_REL_OFFSET rax,RAX 523 - CFI_REL_OFFSET rcx,RCX 524 - CFI_REL_OFFSET rdx,RDX 525 - CFI_REL_OFFSET rsi,RSI 526 - CFI_REL_OFFSET rdi,RDI 527 - CFI_REL_OFFSET rip,RIP 528 - /* CFI_REL_OFFSET cs,CS*/ 529 - /* CFI_REL_OFFSET rflags,EFLAGS*/ 530 - CFI_REL_OFFSET rsp,RSP 531 - /* CFI_REL_OFFSET ss,SS*/ 532 572 SAVE_EXTRA_REGS 8 533 573 call *%rax 534 574 RESTORE_EXTRA_REGS 8 535 575 ret 536 - CFI_ENDPROC 537 576 END(ia32_ptregs_common)
+45 -49
arch/x86/include/asm/calling.h
··· 46 46 47 47 */ 48 48 49 - #include <asm/dwarf2.h> 50 - 51 49 #ifdef CONFIG_X86_64 52 50 53 51 /* ··· 90 92 91 93 .macro ALLOC_PT_GPREGS_ON_STACK addskip=0 92 94 subq $15*8+\addskip, %rsp 93 - CFI_ADJUST_CFA_OFFSET 15*8+\addskip 94 95 .endm 95 96 96 97 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1 97 98 .if \r11 98 - movq_cfi r11, 6*8+\offset 99 + movq %r11, 6*8+\offset(%rsp) 99 100 .endif 100 101 .if \r8910 101 - movq_cfi r10, 7*8+\offset 102 - movq_cfi r9, 8*8+\offset 103 - movq_cfi r8, 9*8+\offset 102 + movq %r10, 7*8+\offset(%rsp) 103 + movq %r9, 8*8+\offset(%rsp) 104 + movq %r8, 9*8+\offset(%rsp) 104 105 .endif 105 106 .if \rax 106 - movq_cfi rax, 10*8+\offset 107 + movq %rax, 10*8+\offset(%rsp) 107 108 .endif 108 109 .if \rcx 109 - movq_cfi rcx, 11*8+\offset 110 + movq %rcx, 11*8+\offset(%rsp) 110 111 .endif 111 - movq_cfi rdx, 12*8+\offset 112 - movq_cfi rsi, 13*8+\offset 113 - movq_cfi rdi, 14*8+\offset 112 + movq %rdx, 12*8+\offset(%rsp) 113 + movq %rsi, 13*8+\offset(%rsp) 114 + movq %rdi, 14*8+\offset(%rsp) 114 115 .endm 115 116 .macro SAVE_C_REGS offset=0 116 117 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 ··· 128 131 .endm 129 132 130 133 .macro SAVE_EXTRA_REGS offset=0 131 - movq_cfi r15, 0*8+\offset 132 - movq_cfi r14, 1*8+\offset 133 - movq_cfi r13, 2*8+\offset 134 - movq_cfi r12, 3*8+\offset 135 - movq_cfi rbp, 4*8+\offset 136 - movq_cfi rbx, 5*8+\offset 134 + movq %r15, 0*8+\offset(%rsp) 135 + movq %r14, 1*8+\offset(%rsp) 136 + movq %r13, 2*8+\offset(%rsp) 137 + movq %r12, 3*8+\offset(%rsp) 138 + movq %rbp, 4*8+\offset(%rsp) 139 + movq %rbx, 5*8+\offset(%rsp) 137 140 .endm 138 141 .macro SAVE_EXTRA_REGS_RBP offset=0 139 - movq_cfi rbp, 4*8+\offset 142 + movq %rbp, 4*8+\offset(%rsp) 140 143 .endm 141 144 142 145 .macro RESTORE_EXTRA_REGS offset=0 143 - movq_cfi_restore 0*8+\offset, r15 144 - movq_cfi_restore 1*8+\offset, r14 145 - movq_cfi_restore 2*8+\offset, r13 146 - movq_cfi_restore 3*8+\offset, r12 147 - movq_cfi_restore 4*8+\offset, rbp 148 - movq_cfi_restore 5*8+\offset, rbx 146 + movq 0*8+\offset(%rsp), %r15 147 + movq 1*8+\offset(%rsp), %r14 148 + movq 2*8+\offset(%rsp), %r13 149 + movq 3*8+\offset(%rsp), %r12 150 + movq 4*8+\offset(%rsp), %rbp 151 + movq 5*8+\offset(%rsp), %rbx 149 152 .endm 150 153 151 154 .macro ZERO_EXTRA_REGS ··· 159 162 160 163 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 161 164 .if \rstor_r11 162 - movq_cfi_restore 6*8, r11 165 + movq 6*8(%rsp), %r11 163 166 .endif 164 167 .if \rstor_r8910 165 - movq_cfi_restore 7*8, r10 166 - movq_cfi_restore 8*8, r9 167 - movq_cfi_restore 9*8, r8 168 + movq 7*8(%rsp), %r10 169 + movq 8*8(%rsp), %r9 170 + movq 9*8(%rsp), %r8 168 171 .endif 169 172 .if \rstor_rax 170 - movq_cfi_restore 10*8, rax 173 + movq 10*8(%rsp), %rax 171 174 .endif 172 175 .if \rstor_rcx 173 - movq_cfi_restore 11*8, rcx 176 + movq 11*8(%rsp), %rcx 174 177 .endif 175 178 .if \rstor_rdx 176 - movq_cfi_restore 12*8, rdx 179 + movq 12*8(%rsp), %rdx 177 180 .endif 178 - movq_cfi_restore 13*8, rsi 179 - movq_cfi_restore 14*8, rdi 181 + movq 13*8(%rsp), %rsi 182 + movq 14*8(%rsp), %rdi 180 183 .endm 181 184 .macro RESTORE_C_REGS 182 185 RESTORE_C_REGS_HELPER 1,1,1,1,1 ··· 202 205 203 206 .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0 204 207 addq $15*8+\addskip, %rsp 205 - CFI_ADJUST_CFA_OFFSET -(15*8+\addskip) 206 208 .endm 207 209 208 210 .macro icebp ··· 220 224 */ 221 225 222 226 .macro SAVE_ALL 223 - pushl_cfi_reg eax 224 - pushl_cfi_reg ebp 225 - pushl_cfi_reg edi 226 - pushl_cfi_reg esi 227 - pushl_cfi_reg edx 228 - pushl_cfi_reg ecx 229 - pushl_cfi_reg ebx 227 + pushl %eax 228 + pushl %ebp 229 + pushl %edi 230 + pushl %esi 231 + pushl %edx 232 + pushl %ecx 233 + pushl %ebx 230 234 .endm 231 235 232 236 .macro RESTORE_ALL 233 - popl_cfi_reg ebx 234 - popl_cfi_reg ecx 235 - popl_cfi_reg edx 236 - popl_cfi_reg esi 237 - popl_cfi_reg edi 238 - popl_cfi_reg ebp 239 - popl_cfi_reg eax 237 + popl %ebx 238 + popl %ecx 239 + popl %edx 240 + popl %esi 241 + popl %edi 242 + popl %ebp 243 + popl %eax 240 244 .endm 241 245 242 246 #endif /* CONFIG_X86_64 */
-170
arch/x86/include/asm/dwarf2.h
··· 1 - #ifndef _ASM_X86_DWARF2_H 2 - #define _ASM_X86_DWARF2_H 3 - 4 - #ifndef __ASSEMBLY__ 5 - #warning "asm/dwarf2.h should be only included in pure assembly files" 6 - #endif 7 - 8 - /* 9 - * Macros for dwarf2 CFI unwind table entries. 10 - * See "as.info" for details on these pseudo ops. Unfortunately 11 - * they are only supported in very new binutils, so define them 12 - * away for older version. 13 - */ 14 - 15 - #ifdef CONFIG_AS_CFI 16 - 17 - #define CFI_STARTPROC .cfi_startproc 18 - #define CFI_ENDPROC .cfi_endproc 19 - #define CFI_DEF_CFA .cfi_def_cfa 20 - #define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register 21 - #define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset 22 - #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset 23 - #define CFI_OFFSET .cfi_offset 24 - #define CFI_REL_OFFSET .cfi_rel_offset 25 - #define CFI_REGISTER .cfi_register 26 - #define CFI_RESTORE .cfi_restore 27 - #define CFI_REMEMBER_STATE .cfi_remember_state 28 - #define CFI_RESTORE_STATE .cfi_restore_state 29 - #define CFI_UNDEFINED .cfi_undefined 30 - #define CFI_ESCAPE .cfi_escape 31 - 32 - #ifdef CONFIG_AS_CFI_SIGNAL_FRAME 33 - #define CFI_SIGNAL_FRAME .cfi_signal_frame 34 - #else 35 - #define CFI_SIGNAL_FRAME 36 - #endif 37 - 38 - #if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) 39 - /* 40 - * Emit CFI data in .debug_frame sections, not .eh_frame sections. 41 - * The latter we currently just discard since we don't do DWARF 42 - * unwinding at runtime. So only the offline DWARF information is 43 - * useful to anyone. Note we should not use this directive if this 44 - * file is used in the vDSO assembly, or if vmlinux.lds.S gets 45 - * changed so it doesn't discard .eh_frame. 46 - */ 47 - .cfi_sections .debug_frame 48 - #endif 49 - 50 - #else 51 - 52 - /* 53 - * Due to the structure of pre-exisiting code, don't use assembler line 54 - * comment character # to ignore the arguments. Instead, use a dummy macro. 55 - */ 56 - .macro cfi_ignore a=0, b=0, c=0, d=0 57 - .endm 58 - 59 - #define CFI_STARTPROC cfi_ignore 60 - #define CFI_ENDPROC cfi_ignore 61 - #define CFI_DEF_CFA cfi_ignore 62 - #define CFI_DEF_CFA_REGISTER cfi_ignore 63 - #define CFI_DEF_CFA_OFFSET cfi_ignore 64 - #define CFI_ADJUST_CFA_OFFSET cfi_ignore 65 - #define CFI_OFFSET cfi_ignore 66 - #define CFI_REL_OFFSET cfi_ignore 67 - #define CFI_REGISTER cfi_ignore 68 - #define CFI_RESTORE cfi_ignore 69 - #define CFI_REMEMBER_STATE cfi_ignore 70 - #define CFI_RESTORE_STATE cfi_ignore 71 - #define CFI_UNDEFINED cfi_ignore 72 - #define CFI_ESCAPE cfi_ignore 73 - #define CFI_SIGNAL_FRAME cfi_ignore 74 - 75 - #endif 76 - 77 - /* 78 - * An attempt to make CFI annotations more or less 79 - * correct and shorter. It is implied that you know 80 - * what you're doing if you use them. 81 - */ 82 - #ifdef __ASSEMBLY__ 83 - #ifdef CONFIG_X86_64 84 - .macro pushq_cfi reg 85 - pushq \reg 86 - CFI_ADJUST_CFA_OFFSET 8 87 - .endm 88 - 89 - .macro pushq_cfi_reg reg 90 - pushq %\reg 91 - CFI_ADJUST_CFA_OFFSET 8 92 - CFI_REL_OFFSET \reg, 0 93 - .endm 94 - 95 - .macro popq_cfi reg 96 - popq \reg 97 - CFI_ADJUST_CFA_OFFSET -8 98 - .endm 99 - 100 - .macro popq_cfi_reg reg 101 - popq %\reg 102 - CFI_ADJUST_CFA_OFFSET -8 103 - CFI_RESTORE \reg 104 - .endm 105 - 106 - .macro pushfq_cfi 107 - pushfq 108 - CFI_ADJUST_CFA_OFFSET 8 109 - .endm 110 - 111 - .macro popfq_cfi 112 - popfq 113 - CFI_ADJUST_CFA_OFFSET -8 114 - .endm 115 - 116 - .macro movq_cfi reg offset=0 117 - movq %\reg, \offset(%rsp) 118 - CFI_REL_OFFSET \reg, \offset 119 - .endm 120 - 121 - .macro movq_cfi_restore offset reg 122 - movq \offset(%rsp), %\reg 123 - CFI_RESTORE \reg 124 - .endm 125 - #else /*!CONFIG_X86_64*/ 126 - .macro pushl_cfi reg 127 - pushl \reg 128 - CFI_ADJUST_CFA_OFFSET 4 129 - .endm 130 - 131 - .macro pushl_cfi_reg reg 132 - pushl %\reg 133 - CFI_ADJUST_CFA_OFFSET 4 134 - CFI_REL_OFFSET \reg, 0 135 - .endm 136 - 137 - .macro popl_cfi reg 138 - popl \reg 139 - CFI_ADJUST_CFA_OFFSET -4 140 - .endm 141 - 142 - .macro popl_cfi_reg reg 143 - popl %\reg 144 - CFI_ADJUST_CFA_OFFSET -4 145 - CFI_RESTORE \reg 146 - .endm 147 - 148 - .macro pushfl_cfi 149 - pushfl 150 - CFI_ADJUST_CFA_OFFSET 4 151 - .endm 152 - 153 - .macro popfl_cfi 154 - popfl 155 - CFI_ADJUST_CFA_OFFSET -4 156 - .endm 157 - 158 - .macro movl_cfi reg offset=0 159 - movl %\reg, \offset(%esp) 160 - CFI_REL_OFFSET \reg, \offset 161 - .endm 162 - 163 - .macro movl_cfi_restore offset reg 164 - movl \offset(%esp), %\reg 165 - CFI_RESTORE \reg 166 - .endm 167 - #endif /*!CONFIG_X86_64*/ 168 - #endif /*__ASSEMBLY__*/ 169 - 170 - #endif /* _ASM_X86_DWARF2_H */
+2 -5
arch/x86/include/asm/frame.h
··· 1 1 #ifdef __ASSEMBLY__ 2 2 3 3 #include <asm/asm.h> 4 - #include <asm/dwarf2.h> 5 4 6 5 /* The annotation hides the frame from the unwinder and makes it look 7 6 like a ordinary ebp save/restore. This avoids some special cases for 8 7 frame pointer later */ 9 8 #ifdef CONFIG_FRAME_POINTER 10 9 .macro FRAME 11 - __ASM_SIZE(push,_cfi) %__ASM_REG(bp) 12 - CFI_REL_OFFSET __ASM_REG(bp), 0 10 + __ASM_SIZE(push,) %__ASM_REG(bp) 13 11 __ASM_SIZE(mov) %__ASM_REG(sp), %__ASM_REG(bp) 14 12 .endm 15 13 .macro ENDFRAME 16 - __ASM_SIZE(pop,_cfi) %__ASM_REG(bp) 17 - CFI_RESTORE __ASM_REG(bp) 14 + __ASM_SIZE(pop,) %__ASM_REG(bp) 18 15 .endm 19 16 #else 20 17 .macro FRAME
+108 -260
arch/x86/kernel/entry_32.S
··· 50 50 #include <asm/smp.h> 51 51 #include <asm/page_types.h> 52 52 #include <asm/percpu.h> 53 - #include <asm/dwarf2.h> 54 53 #include <asm/processor-flags.h> 55 54 #include <asm/ftrace.h> 56 55 #include <asm/irq_vectors.h> ··· 112 113 113 114 /* unfortunately push/pop can't be no-op */ 114 115 .macro PUSH_GS 115 - pushl_cfi $0 116 + pushl $0 116 117 .endm 117 118 .macro POP_GS pop=0 118 119 addl $(4 + \pop), %esp 119 - CFI_ADJUST_CFA_OFFSET -(4 + \pop) 120 120 .endm 121 121 .macro POP_GS_EX 122 122 .endm ··· 135 137 #else /* CONFIG_X86_32_LAZY_GS */ 136 138 137 139 .macro PUSH_GS 138 - pushl_cfi %gs 139 - /*CFI_REL_OFFSET gs, 0*/ 140 + pushl %gs 140 141 .endm 141 142 142 143 .macro POP_GS pop=0 143 - 98: popl_cfi %gs 144 - /*CFI_RESTORE gs*/ 144 + 98: popl %gs 145 145 .if \pop <> 0 146 146 add $\pop, %esp 147 - CFI_ADJUST_CFA_OFFSET -\pop 148 147 .endif 149 148 .endm 150 149 .macro POP_GS_EX ··· 165 170 166 171 .macro GS_TO_REG reg 167 172 movl %gs, \reg 168 - /*CFI_REGISTER gs, \reg*/ 169 173 .endm 170 174 .macro REG_TO_PTGS reg 171 175 movl \reg, PT_GS(%esp) 172 - /*CFI_REL_OFFSET gs, PT_GS*/ 173 176 .endm 174 177 .macro SET_KERNEL_GS reg 175 178 movl $(__KERNEL_STACK_CANARY), \reg ··· 179 186 .macro SAVE_ALL 180 187 cld 181 188 PUSH_GS 182 - pushl_cfi %fs 183 - /*CFI_REL_OFFSET fs, 0;*/ 184 - pushl_cfi %es 185 - /*CFI_REL_OFFSET es, 0;*/ 186 - pushl_cfi %ds 187 - /*CFI_REL_OFFSET ds, 0;*/ 188 - pushl_cfi %eax 189 - CFI_REL_OFFSET eax, 0 190 - pushl_cfi %ebp 191 - CFI_REL_OFFSET ebp, 0 192 - pushl_cfi %edi 193 - CFI_REL_OFFSET edi, 0 194 - pushl_cfi %esi 195 - CFI_REL_OFFSET esi, 0 196 - pushl_cfi %edx 197 - CFI_REL_OFFSET edx, 0 198 - pushl_cfi %ecx 199 - CFI_REL_OFFSET ecx, 0 200 - pushl_cfi %ebx 201 - CFI_REL_OFFSET ebx, 0 189 + pushl %fs 190 + pushl %es 191 + pushl %ds 192 + pushl %eax 193 + pushl %ebp 194 + pushl %edi 195 + pushl %esi 196 + pushl %edx 197 + pushl %ecx 198 + pushl %ebx 202 199 movl $(__USER_DS), %edx 203 200 movl %edx, %ds 204 201 movl %edx, %es ··· 198 215 .endm 199 216 200 217 .macro RESTORE_INT_REGS 201 - popl_cfi %ebx 202 - CFI_RESTORE ebx 203 - popl_cfi %ecx 204 - CFI_RESTORE ecx 205 - popl_cfi %edx 206 - CFI_RESTORE edx 207 - popl_cfi %esi 208 - CFI_RESTORE esi 209 - popl_cfi %edi 210 - CFI_RESTORE edi 211 - popl_cfi %ebp 212 - CFI_RESTORE ebp 213 - popl_cfi %eax 214 - CFI_RESTORE eax 218 + popl %ebx 219 + popl %ecx 220 + popl %edx 221 + popl %esi 222 + popl %edi 223 + popl %ebp 224 + popl %eax 215 225 .endm 216 226 217 227 .macro RESTORE_REGS pop=0 218 228 RESTORE_INT_REGS 219 - 1: popl_cfi %ds 220 - /*CFI_RESTORE ds;*/ 221 - 2: popl_cfi %es 222 - /*CFI_RESTORE es;*/ 223 - 3: popl_cfi %fs 224 - /*CFI_RESTORE fs;*/ 229 + 1: popl %ds 230 + 2: popl %es 231 + 3: popl %fs 225 232 POP_GS \pop 226 233 .pushsection .fixup, "ax" 227 234 4: movl $0, (%esp) ··· 227 254 POP_GS_EX 228 255 .endm 229 256 230 - .macro RING0_INT_FRAME 231 - CFI_STARTPROC simple 232 - CFI_SIGNAL_FRAME 233 - CFI_DEF_CFA esp, 3*4 234 - /*CFI_OFFSET cs, -2*4;*/ 235 - CFI_OFFSET eip, -3*4 236 - .endm 237 - 238 - .macro RING0_EC_FRAME 239 - CFI_STARTPROC simple 240 - CFI_SIGNAL_FRAME 241 - CFI_DEF_CFA esp, 4*4 242 - /*CFI_OFFSET cs, -2*4;*/ 243 - CFI_OFFSET eip, -3*4 244 - .endm 245 - 246 - .macro RING0_PTREGS_FRAME 247 - CFI_STARTPROC simple 248 - CFI_SIGNAL_FRAME 249 - CFI_DEF_CFA esp, PT_OLDESP-PT_EBX 250 - /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/ 251 - CFI_OFFSET eip, PT_EIP-PT_OLDESP 252 - /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/ 253 - /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/ 254 - CFI_OFFSET eax, PT_EAX-PT_OLDESP 255 - CFI_OFFSET ebp, PT_EBP-PT_OLDESP 256 - CFI_OFFSET edi, PT_EDI-PT_OLDESP 257 - CFI_OFFSET esi, PT_ESI-PT_OLDESP 258 - CFI_OFFSET edx, PT_EDX-PT_OLDESP 259 - CFI_OFFSET ecx, PT_ECX-PT_OLDESP 260 - CFI_OFFSET ebx, PT_EBX-PT_OLDESP 261 - .endm 262 - 263 257 ENTRY(ret_from_fork) 264 - CFI_STARTPROC 265 - pushl_cfi %eax 258 + pushl %eax 266 259 call schedule_tail 267 260 GET_THREAD_INFO(%ebp) 268 - popl_cfi %eax 269 - pushl_cfi $0x0202 # Reset kernel eflags 270 - popfl_cfi 261 + popl %eax 262 + pushl $0x0202 # Reset kernel eflags 263 + popfl 271 264 jmp syscall_exit 272 - CFI_ENDPROC 273 265 END(ret_from_fork) 274 266 275 267 ENTRY(ret_from_kernel_thread) 276 - CFI_STARTPROC 277 - pushl_cfi %eax 268 + pushl %eax 278 269 call schedule_tail 279 270 GET_THREAD_INFO(%ebp) 280 - popl_cfi %eax 281 - pushl_cfi $0x0202 # Reset kernel eflags 282 - popfl_cfi 271 + popl %eax 272 + pushl $0x0202 # Reset kernel eflags 273 + popfl 283 274 movl PT_EBP(%esp),%eax 284 275 call *PT_EBX(%esp) 285 276 movl $0,PT_EAX(%esp) 286 277 jmp syscall_exit 287 - CFI_ENDPROC 288 278 ENDPROC(ret_from_kernel_thread) 289 279 290 280 /* ··· 259 323 260 324 # userspace resumption stub bypassing syscall exit tracing 261 325 ALIGN 262 - RING0_PTREGS_FRAME 263 326 ret_from_exception: 264 327 preempt_stop(CLBR_ANY) 265 328 ret_from_intr: ··· 302 367 jmp need_resched 303 368 END(resume_kernel) 304 369 #endif 305 - CFI_ENDPROC 306 370 307 371 /* SYSENTER_RETURN points to after the "sysenter" instruction in 308 372 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ 309 373 310 374 # sysenter call handler stub 311 375 ENTRY(ia32_sysenter_target) 312 - CFI_STARTPROC simple 313 - CFI_SIGNAL_FRAME 314 - CFI_DEF_CFA esp, 0 315 - CFI_REGISTER esp, ebp 316 376 movl TSS_sysenter_sp0(%esp),%esp 317 377 sysenter_past_esp: 318 378 /* ··· 315 385 * enough kernel state to call TRACE_IRQS_OFF can be called - but 316 386 * we immediately enable interrupts at that point anyway. 317 387 */ 318 - pushl_cfi $__USER_DS 319 - /*CFI_REL_OFFSET ss, 0*/ 320 - pushl_cfi %ebp 321 - CFI_REL_OFFSET esp, 0 322 - pushfl_cfi 388 + pushl $__USER_DS 389 + pushl %ebp 390 + pushfl 323 391 orl $X86_EFLAGS_IF, (%esp) 324 - pushl_cfi $__USER_CS 325 - /*CFI_REL_OFFSET cs, 0*/ 392 + pushl $__USER_CS 326 393 /* 327 394 * Push current_thread_info()->sysenter_return to the stack. 328 395 * A tiny bit of offset fixup is necessary: TI_sysenter_return ··· 328 401 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; 329 402 * and THREAD_SIZE takes us to the bottom. 330 403 */ 331 - pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) 332 - CFI_REL_OFFSET eip, 0 404 + pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) 333 405 334 - pushl_cfi %eax 406 + pushl %eax 335 407 SAVE_ALL 336 408 ENABLE_INTERRUPTS(CLBR_NONE) 337 409 ··· 379 453 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ 380 454 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ 381 455 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ 382 - pushl_cfi PT_ESI(%esp) /* a3: 5th arg */ 383 - pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */ 456 + pushl PT_ESI(%esp) /* a3: 5th arg */ 457 + pushl PT_EDX+4(%esp) /* a2: 4th arg */ 384 458 call __audit_syscall_entry 385 - popl_cfi %ecx /* get that remapped edx off the stack */ 386 - popl_cfi %ecx /* get that remapped esi off the stack */ 459 + popl %ecx /* get that remapped edx off the stack */ 460 + popl %ecx /* get that remapped esi off the stack */ 387 461 movl PT_EAX(%esp),%eax /* reload syscall number */ 388 462 jmp sysenter_do_call 389 463 ··· 406 480 jmp sysenter_exit 407 481 #endif 408 482 409 - CFI_ENDPROC 410 483 .pushsection .fixup,"ax" 411 484 2: movl $0,PT_FS(%esp) 412 485 jmp 1b ··· 416 491 417 492 # system call handler stub 418 493 ENTRY(system_call) 419 - RING0_INT_FRAME # can't unwind into user space anyway 420 494 ASM_CLAC 421 - pushl_cfi %eax # save orig_eax 495 + pushl %eax # save orig_eax 422 496 SAVE_ALL 423 497 GET_THREAD_INFO(%ebp) 424 498 # system call tracing in operation / emulation ··· 451 527 movb PT_CS(%esp), %al 452 528 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax 453 529 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax 454 - CFI_REMEMBER_STATE 455 530 je ldt_ss # returning to user-space with LDT SS 456 531 #endif 457 532 restore_nocheck: ··· 466 543 _ASM_EXTABLE(irq_return,iret_exc) 467 544 468 545 #ifdef CONFIG_X86_ESPFIX32 469 - CFI_RESTORE_STATE 470 546 ldt_ss: 471 547 #ifdef CONFIG_PARAVIRT 472 548 /* ··· 499 577 shr $16, %edx 500 578 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ 501 579 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ 502 - pushl_cfi $__ESPFIX_SS 503 - pushl_cfi %eax /* new kernel esp */ 580 + pushl $__ESPFIX_SS 581 + pushl %eax /* new kernel esp */ 504 582 /* Disable interrupts, but do not irqtrace this section: we 505 583 * will soon execute iret and the tracer was already set to 506 584 * the irqstate after the iret */ 507 585 DISABLE_INTERRUPTS(CLBR_EAX) 508 586 lss (%esp), %esp /* switch to espfix segment */ 509 - CFI_ADJUST_CFA_OFFSET -8 510 587 jmp restore_nocheck 511 588 #endif 512 - CFI_ENDPROC 513 589 ENDPROC(system_call) 514 590 515 591 # perform work that needs to be done immediately before resumption 516 592 ALIGN 517 - RING0_PTREGS_FRAME # can't unwind into user space anyway 518 593 work_pending: 519 594 testb $_TIF_NEED_RESCHED, %cl 520 595 jz work_notifysig ··· 553 634 #ifdef CONFIG_VM86 554 635 ALIGN 555 636 work_notifysig_v86: 556 - pushl_cfi %ecx # save ti_flags for do_notify_resume 637 + pushl %ecx # save ti_flags for do_notify_resume 557 638 call save_v86_state # %eax contains pt_regs pointer 558 - popl_cfi %ecx 639 + popl %ecx 559 640 movl %eax, %esp 560 641 jmp 1b 561 642 #endif ··· 585 666 call syscall_trace_leave 586 667 jmp resume_userspace 587 668 END(syscall_exit_work) 588 - CFI_ENDPROC 589 669 590 - RING0_INT_FRAME # can't unwind into user space anyway 591 670 syscall_fault: 592 671 ASM_CLAC 593 672 GET_THREAD_INFO(%ebp) ··· 602 685 movl $-ENOSYS,%eax 603 686 jmp sysenter_after_call 604 687 END(sysenter_badsys) 605 - CFI_ENDPROC 606 688 607 689 .macro FIXUP_ESPFIX_STACK 608 690 /* ··· 617 701 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ 618 702 shl $16, %eax 619 703 addl %esp, %eax /* the adjusted stack pointer */ 620 - pushl_cfi $__KERNEL_DS 621 - pushl_cfi %eax 704 + pushl $__KERNEL_DS 705 + pushl %eax 622 706 lss (%esp), %esp /* switch to the normal stack segment */ 623 - CFI_ADJUST_CFA_OFFSET -8 624 707 #endif 625 708 .endm 626 709 .macro UNWIND_ESPFIX_STACK ··· 643 728 */ 644 729 .align 8 645 730 ENTRY(irq_entries_start) 646 - RING0_INT_FRAME 647 731 vector=FIRST_EXTERNAL_VECTOR 648 732 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 649 - pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */ 733 + pushl $(~vector+0x80) /* Note: always in signed byte range */ 650 734 vector=vector+1 651 735 jmp common_interrupt 652 - CFI_ADJUST_CFA_OFFSET -4 653 736 .align 8 654 737 .endr 655 738 END(irq_entries_start) ··· 666 753 call do_IRQ 667 754 jmp ret_from_intr 668 755 ENDPROC(common_interrupt) 669 - CFI_ENDPROC 670 756 671 757 #define BUILD_INTERRUPT3(name, nr, fn) \ 672 758 ENTRY(name) \ 673 - RING0_INT_FRAME; \ 674 759 ASM_CLAC; \ 675 - pushl_cfi $~(nr); \ 760 + pushl $~(nr); \ 676 761 SAVE_ALL; \ 677 762 TRACE_IRQS_OFF \ 678 763 movl %esp,%eax; \ 679 764 call fn; \ 680 765 jmp ret_from_intr; \ 681 - CFI_ENDPROC; \ 682 766 ENDPROC(name) 683 767 684 768 ··· 694 784 #include <asm/entry_arch.h> 695 785 696 786 ENTRY(coprocessor_error) 697 - RING0_INT_FRAME 698 787 ASM_CLAC 699 - pushl_cfi $0 700 - pushl_cfi $do_coprocessor_error 788 + pushl $0 789 + pushl $do_coprocessor_error 701 790 jmp error_code 702 - CFI_ENDPROC 703 791 END(coprocessor_error) 704 792 705 793 ENTRY(simd_coprocessor_error) 706 - RING0_INT_FRAME 707 794 ASM_CLAC 708 - pushl_cfi $0 795 + pushl $0 709 796 #ifdef CONFIG_X86_INVD_BUG 710 797 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ 711 - ALTERNATIVE "pushl_cfi $do_general_protection", \ 798 + ALTERNATIVE "pushl $do_general_protection", \ 712 799 "pushl $do_simd_coprocessor_error", \ 713 800 X86_FEATURE_XMM 714 801 #else 715 - pushl_cfi $do_simd_coprocessor_error 802 + pushl $do_simd_coprocessor_error 716 803 #endif 717 804 jmp error_code 718 - CFI_ENDPROC 719 805 END(simd_coprocessor_error) 720 806 721 807 ENTRY(device_not_available) 722 - RING0_INT_FRAME 723 808 ASM_CLAC 724 - pushl_cfi $-1 # mark this as an int 725 - pushl_cfi $do_device_not_available 809 + pushl $-1 # mark this as an int 810 + pushl $do_device_not_available 726 811 jmp error_code 727 - CFI_ENDPROC 728 812 END(device_not_available) 729 813 730 814 #ifdef CONFIG_PARAVIRT ··· 734 830 #endif 735 831 736 832 ENTRY(overflow) 737 - RING0_INT_FRAME 738 833 ASM_CLAC 739 - pushl_cfi $0 740 - pushl_cfi $do_overflow 834 + pushl $0 835 + pushl $do_overflow 741 836 jmp error_code 742 - CFI_ENDPROC 743 837 END(overflow) 744 838 745 839 ENTRY(bounds) 746 - RING0_INT_FRAME 747 840 ASM_CLAC 748 - pushl_cfi $0 749 - pushl_cfi $do_bounds 841 + pushl $0 842 + pushl $do_bounds 750 843 jmp error_code 751 - CFI_ENDPROC 752 844 END(bounds) 753 845 754 846 ENTRY(invalid_op) 755 - RING0_INT_FRAME 756 847 ASM_CLAC 757 - pushl_cfi $0 758 - pushl_cfi $do_invalid_op 848 + pushl $0 849 + pushl $do_invalid_op 759 850 jmp error_code 760 - CFI_ENDPROC 761 851 END(invalid_op) 762 852 763 853 ENTRY(coprocessor_segment_overrun) 764 - RING0_INT_FRAME 765 854 ASM_CLAC 766 - pushl_cfi $0 767 - pushl_cfi $do_coprocessor_segment_overrun 855 + pushl $0 856 + pushl $do_coprocessor_segment_overrun 768 857 jmp error_code 769 - CFI_ENDPROC 770 858 END(coprocessor_segment_overrun) 771 859 772 860 ENTRY(invalid_TSS) 773 - RING0_EC_FRAME 774 861 ASM_CLAC 775 - pushl_cfi $do_invalid_TSS 862 + pushl $do_invalid_TSS 776 863 jmp error_code 777 - CFI_ENDPROC 778 864 END(invalid_TSS) 779 865 780 866 ENTRY(segment_not_present) 781 - RING0_EC_FRAME 782 867 ASM_CLAC 783 - pushl_cfi $do_segment_not_present 868 + pushl $do_segment_not_present 784 869 jmp error_code 785 - CFI_ENDPROC 786 870 END(segment_not_present) 787 871 788 872 ENTRY(stack_segment) 789 - RING0_EC_FRAME 790 873 ASM_CLAC 791 - pushl_cfi $do_stack_segment 874 + pushl $do_stack_segment 792 875 jmp error_code 793 - CFI_ENDPROC 794 876 END(stack_segment) 795 877 796 878 ENTRY(alignment_check) 797 - RING0_EC_FRAME 798 879 ASM_CLAC 799 - pushl_cfi $do_alignment_check 880 + pushl $do_alignment_check 800 881 jmp error_code 801 - CFI_ENDPROC 802 882 END(alignment_check) 803 883 804 884 ENTRY(divide_error) 805 - RING0_INT_FRAME 806 885 ASM_CLAC 807 - pushl_cfi $0 # no error code 808 - pushl_cfi $do_divide_error 886 + pushl $0 # no error code 887 + pushl $do_divide_error 809 888 jmp error_code 810 - CFI_ENDPROC 811 889 END(divide_error) 812 890 813 891 #ifdef CONFIG_X86_MCE 814 892 ENTRY(machine_check) 815 - RING0_INT_FRAME 816 893 ASM_CLAC 817 - pushl_cfi $0 818 - pushl_cfi machine_check_vector 894 + pushl $0 895 + pushl machine_check_vector 819 896 jmp error_code 820 - CFI_ENDPROC 821 897 END(machine_check) 822 898 #endif 823 899 824 900 ENTRY(spurious_interrupt_bug) 825 - RING0_INT_FRAME 826 901 ASM_CLAC 827 - pushl_cfi $0 828 - pushl_cfi $do_spurious_interrupt_bug 902 + pushl $0 903 + pushl $do_spurious_interrupt_bug 829 904 jmp error_code 830 - CFI_ENDPROC 831 905 END(spurious_interrupt_bug) 832 906 833 907 #ifdef CONFIG_XEN 834 908 /* Xen doesn't set %esp to be precisely what the normal sysenter 835 909 entrypoint expects, so fix it up before using the normal path. */ 836 910 ENTRY(xen_sysenter_target) 837 - RING0_INT_FRAME 838 911 addl $5*4, %esp /* remove xen-provided frame */ 839 - CFI_ADJUST_CFA_OFFSET -5*4 840 912 jmp sysenter_past_esp 841 - CFI_ENDPROC 842 913 843 914 ENTRY(xen_hypervisor_callback) 844 - CFI_STARTPROC 845 - pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 915 + pushl $-1 /* orig_ax = -1 => not a system call */ 846 916 SAVE_ALL 847 917 TRACE_IRQS_OFF 848 918 ··· 840 962 call xen_maybe_preempt_hcall 841 963 #endif 842 964 jmp ret_from_intr 843 - CFI_ENDPROC 844 965 ENDPROC(xen_hypervisor_callback) 845 966 846 967 # Hypervisor uses this for application faults while it executes. ··· 853 976 # to pop the stack frame we end up in an infinite loop of failsafe callbacks. 854 977 # We distinguish between categories by maintaining a status value in EAX. 855 978 ENTRY(xen_failsafe_callback) 856 - CFI_STARTPROC 857 - pushl_cfi %eax 979 + pushl %eax 858 980 movl $1,%eax 859 981 1: mov 4(%esp),%ds 860 982 2: mov 8(%esp),%es ··· 862 986 /* EAX == 0 => Category 1 (Bad segment) 863 987 EAX != 0 => Category 2 (Bad IRET) */ 864 988 testl %eax,%eax 865 - popl_cfi %eax 989 + popl %eax 866 990 lea 16(%esp),%esp 867 - CFI_ADJUST_CFA_OFFSET -16 868 991 jz 5f 869 992 jmp iret_exc 870 - 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ 993 + 5: pushl $-1 /* orig_ax = -1 => not a system call */ 871 994 SAVE_ALL 872 995 jmp ret_from_exception 873 - CFI_ENDPROC 874 996 875 997 .section .fixup,"ax" 876 998 6: xorl %eax,%eax ··· 1069 1195 1070 1196 #ifdef CONFIG_TRACING 1071 1197 ENTRY(trace_page_fault) 1072 - RING0_EC_FRAME 1073 1198 ASM_CLAC 1074 - pushl_cfi $trace_do_page_fault 1199 + pushl $trace_do_page_fault 1075 1200 jmp error_code 1076 - CFI_ENDPROC 1077 1201 END(trace_page_fault) 1078 1202 #endif 1079 1203 1080 1204 ENTRY(page_fault) 1081 - RING0_EC_FRAME 1082 1205 ASM_CLAC 1083 - pushl_cfi $do_page_fault 1206 + pushl $do_page_fault 1084 1207 ALIGN 1085 1208 error_code: 1086 1209 /* the function address is in %gs's slot on the stack */ 1087 - pushl_cfi %fs 1088 - /*CFI_REL_OFFSET fs, 0*/ 1089 - pushl_cfi %es 1090 - /*CFI_REL_OFFSET es, 0*/ 1091 - pushl_cfi %ds 1092 - /*CFI_REL_OFFSET ds, 0*/ 1093 - pushl_cfi_reg eax 1094 - pushl_cfi_reg ebp 1095 - pushl_cfi_reg edi 1096 - pushl_cfi_reg esi 1097 - pushl_cfi_reg edx 1098 - pushl_cfi_reg ecx 1099 - pushl_cfi_reg ebx 1210 + pushl %fs 1211 + pushl %es 1212 + pushl %ds 1213 + pushl %eax 1214 + pushl %ebp 1215 + pushl %edi 1216 + pushl %esi 1217 + pushl %edx 1218 + pushl %ecx 1219 + pushl %ebx 1100 1220 cld 1101 1221 movl $(__KERNEL_PERCPU), %ecx 1102 1222 movl %ecx, %fs ··· 1108 1240 movl %esp,%eax # pt_regs pointer 1109 1241 call *%edi 1110 1242 jmp ret_from_exception 1111 - CFI_ENDPROC 1112 1243 END(page_fault) 1113 1244 1114 1245 /* ··· 1128 1261 jne \ok 1129 1262 \label: 1130 1263 movl TSS_sysenter_sp0 + \offset(%esp), %esp 1131 - CFI_DEF_CFA esp, 0 1132 - CFI_UNDEFINED eip 1133 - pushfl_cfi 1134 - pushl_cfi $__KERNEL_CS 1135 - pushl_cfi $sysenter_past_esp 1136 - CFI_REL_OFFSET eip, 0 1264 + pushfl 1265 + pushl $__KERNEL_CS 1266 + pushl $sysenter_past_esp 1137 1267 .endm 1138 1268 1139 1269 ENTRY(debug) 1140 - RING0_INT_FRAME 1141 1270 ASM_CLAC 1142 1271 cmpl $ia32_sysenter_target,(%esp) 1143 1272 jne debug_stack_correct 1144 1273 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn 1145 1274 debug_stack_correct: 1146 - pushl_cfi $-1 # mark this as an int 1275 + pushl $-1 # mark this as an int 1147 1276 SAVE_ALL 1148 1277 TRACE_IRQS_OFF 1149 1278 xorl %edx,%edx # error code 0 1150 1279 movl %esp,%eax # pt_regs pointer 1151 1280 call do_debug 1152 1281 jmp ret_from_exception 1153 - CFI_ENDPROC 1154 1282 END(debug) 1155 1283 1156 1284 /* ··· 1157 1295 * fault happened on the sysenter path. 1158 1296 */ 1159 1297 ENTRY(nmi) 1160 - RING0_INT_FRAME 1161 1298 ASM_CLAC 1162 1299 #ifdef CONFIG_X86_ESPFIX32 1163 - pushl_cfi %eax 1300 + pushl %eax 1164 1301 movl %ss, %eax 1165 1302 cmpw $__ESPFIX_SS, %ax 1166 - popl_cfi %eax 1303 + popl %eax 1167 1304 je nmi_espfix_stack 1168 1305 #endif 1169 1306 cmpl $ia32_sysenter_target,(%esp) 1170 1307 je nmi_stack_fixup 1171 - pushl_cfi %eax 1308 + pushl %eax 1172 1309 movl %esp,%eax 1173 1310 /* Do not access memory above the end of our stack page, 1174 1311 * it might not exist. 1175 1312 */ 1176 1313 andl $(THREAD_SIZE-1),%eax 1177 1314 cmpl $(THREAD_SIZE-20),%eax 1178 - popl_cfi %eax 1315 + popl %eax 1179 1316 jae nmi_stack_correct 1180 1317 cmpl $ia32_sysenter_target,12(%esp) 1181 1318 je nmi_debug_stack_check 1182 1319 nmi_stack_correct: 1183 - /* We have a RING0_INT_FRAME here */ 1184 - pushl_cfi %eax 1320 + pushl %eax 1185 1321 SAVE_ALL 1186 1322 xorl %edx,%edx # zero error code 1187 1323 movl %esp,%eax # pt_regs pointer 1188 1324 call do_nmi 1189 1325 jmp restore_all_notrace 1190 - CFI_ENDPROC 1191 1326 1192 1327 nmi_stack_fixup: 1193 - RING0_INT_FRAME 1194 1328 FIX_STACK 12, nmi_stack_correct, 1 1195 1329 jmp nmi_stack_correct 1196 1330 1197 1331 nmi_debug_stack_check: 1198 - /* We have a RING0_INT_FRAME here */ 1199 1332 cmpw $__KERNEL_CS,16(%esp) 1200 1333 jne nmi_stack_correct 1201 1334 cmpl $debug,(%esp) ··· 1202 1345 1203 1346 #ifdef CONFIG_X86_ESPFIX32 1204 1347 nmi_espfix_stack: 1205 - /* We have a RING0_INT_FRAME here. 1206 - * 1348 + /* 1207 1349 * create the pointer to lss back 1208 1350 */ 1209 - pushl_cfi %ss 1210 - pushl_cfi %esp 1351 + pushl %ss 1352 + pushl %esp 1211 1353 addl $4, (%esp) 1212 1354 /* copy the iret frame of 12 bytes */ 1213 1355 .rept 3 1214 - pushl_cfi 16(%esp) 1356 + pushl 16(%esp) 1215 1357 .endr 1216 - pushl_cfi %eax 1358 + pushl %eax 1217 1359 SAVE_ALL 1218 1360 FIXUP_ESPFIX_STACK # %eax == %esp 1219 1361 xorl %edx,%edx # zero error code 1220 1362 call do_nmi 1221 1363 RESTORE_REGS 1222 1364 lss 12+4(%esp), %esp # back to espfix stack 1223 - CFI_ADJUST_CFA_OFFSET -24 1224 1365 jmp irq_return 1225 1366 #endif 1226 - CFI_ENDPROC 1227 1367 END(nmi) 1228 1368 1229 1369 ENTRY(int3) 1230 - RING0_INT_FRAME 1231 1370 ASM_CLAC 1232 - pushl_cfi $-1 # mark this as an int 1371 + pushl $-1 # mark this as an int 1233 1372 SAVE_ALL 1234 1373 TRACE_IRQS_OFF 1235 1374 xorl %edx,%edx # zero error code 1236 1375 movl %esp,%eax # pt_regs pointer 1237 1376 call do_int3 1238 1377 jmp ret_from_exception 1239 - CFI_ENDPROC 1240 1378 END(int3) 1241 1379 1242 1380 ENTRY(general_protection) 1243 - RING0_EC_FRAME 1244 - pushl_cfi $do_general_protection 1381 + pushl $do_general_protection 1245 1382 jmp error_code 1246 - CFI_ENDPROC 1247 1383 END(general_protection) 1248 1384 1249 1385 #ifdef CONFIG_KVM_GUEST 1250 1386 ENTRY(async_page_fault) 1251 - RING0_EC_FRAME 1252 1387 ASM_CLAC 1253 - pushl_cfi $do_async_page_fault 1388 + pushl $do_async_page_fault 1254 1389 jmp error_code 1255 - CFI_ENDPROC 1256 1390 END(async_page_fault) 1257 1391 #endif 1258 1392
+48 -240
arch/x86/kernel/entry_64.S
··· 19 19 * at the top of the kernel process stack. 20 20 * 21 21 * Some macro usage: 22 - * - CFI macros are used to generate dwarf2 unwind information for better 23 - * backtraces. They don't change any code. 24 22 * - ENTRY/END Define functions in the symbol table. 25 23 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. 26 24 * - idtentry - Define exception entry points. ··· 28 30 #include <asm/segment.h> 29 31 #include <asm/cache.h> 30 32 #include <asm/errno.h> 31 - #include <asm/dwarf2.h> 32 33 #include <asm/calling.h> 33 34 #include <asm/asm-offsets.h> 34 35 #include <asm/msr.h> ··· 110 113 #endif 111 114 112 115 /* 113 - * empty frame 114 - */ 115 - .macro EMPTY_FRAME start=1 offset=0 116 - .if \start 117 - CFI_STARTPROC simple 118 - CFI_SIGNAL_FRAME 119 - CFI_DEF_CFA rsp,8+\offset 120 - .else 121 - CFI_DEF_CFA_OFFSET 8+\offset 122 - .endif 123 - .endm 124 - 125 - /* 126 - * initial frame state for interrupts (and exceptions without error code) 127 - */ 128 - .macro INTR_FRAME start=1 offset=0 129 - EMPTY_FRAME \start, 5*8+\offset 130 - /*CFI_REL_OFFSET ss, 4*8+\offset*/ 131 - CFI_REL_OFFSET rsp, 3*8+\offset 132 - /*CFI_REL_OFFSET rflags, 2*8+\offset*/ 133 - /*CFI_REL_OFFSET cs, 1*8+\offset*/ 134 - CFI_REL_OFFSET rip, 0*8+\offset 135 - .endm 136 - 137 - /* 138 - * initial frame state for exceptions with error code (and interrupts 139 - * with vector already pushed) 140 - */ 141 - .macro XCPT_FRAME start=1 offset=0 142 - INTR_FRAME \start, 1*8+\offset 143 - .endm 144 - 145 - /* 146 - * frame that enables passing a complete pt_regs to a C function. 147 - */ 148 - .macro DEFAULT_FRAME start=1 offset=0 149 - XCPT_FRAME \start, ORIG_RAX+\offset 150 - CFI_REL_OFFSET rdi, RDI+\offset 151 - CFI_REL_OFFSET rsi, RSI+\offset 152 - CFI_REL_OFFSET rdx, RDX+\offset 153 - CFI_REL_OFFSET rcx, RCX+\offset 154 - CFI_REL_OFFSET rax, RAX+\offset 155 - CFI_REL_OFFSET r8, R8+\offset 156 - CFI_REL_OFFSET r9, R9+\offset 157 - CFI_REL_OFFSET r10, R10+\offset 158 - CFI_REL_OFFSET r11, R11+\offset 159 - CFI_REL_OFFSET rbx, RBX+\offset 160 - CFI_REL_OFFSET rbp, RBP+\offset 161 - CFI_REL_OFFSET r12, R12+\offset 162 - CFI_REL_OFFSET r13, R13+\offset 163 - CFI_REL_OFFSET r14, R14+\offset 164 - CFI_REL_OFFSET r15, R15+\offset 165 - .endm 166 - 167 - /* 168 116 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers. 169 117 * 170 118 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, ··· 138 196 */ 139 197 140 198 ENTRY(system_call) 141 - CFI_STARTPROC simple 142 - CFI_SIGNAL_FRAME 143 - CFI_DEF_CFA rsp,0 144 - CFI_REGISTER rip,rcx 145 - /*CFI_REGISTER rflags,r11*/ 146 - 147 199 /* 148 200 * Interrupts are off on entry. 149 201 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, ··· 155 219 movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp 156 220 157 221 /* Construct struct pt_regs on stack */ 158 - pushq_cfi $__USER_DS /* pt_regs->ss */ 159 - pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 222 + pushq $__USER_DS /* pt_regs->ss */ 223 + pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ 160 224 /* 161 225 * Re-enable interrupts. 162 226 * We use 'rsp_scratch' as a scratch space, hence irq-off block above ··· 165 229 * with using rsp_scratch: 166 230 */ 167 231 ENABLE_INTERRUPTS(CLBR_NONE) 168 - pushq_cfi %r11 /* pt_regs->flags */ 169 - pushq_cfi $__USER_CS /* pt_regs->cs */ 170 - pushq_cfi %rcx /* pt_regs->ip */ 171 - CFI_REL_OFFSET rip,0 172 - pushq_cfi_reg rax /* pt_regs->orig_ax */ 173 - pushq_cfi_reg rdi /* pt_regs->di */ 174 - pushq_cfi_reg rsi /* pt_regs->si */ 175 - pushq_cfi_reg rdx /* pt_regs->dx */ 176 - pushq_cfi_reg rcx /* pt_regs->cx */ 177 - pushq_cfi $-ENOSYS /* pt_regs->ax */ 178 - pushq_cfi_reg r8 /* pt_regs->r8 */ 179 - pushq_cfi_reg r9 /* pt_regs->r9 */ 180 - pushq_cfi_reg r10 /* pt_regs->r10 */ 181 - pushq_cfi_reg r11 /* pt_regs->r11 */ 232 + pushq %r11 /* pt_regs->flags */ 233 + pushq $__USER_CS /* pt_regs->cs */ 234 + pushq %rcx /* pt_regs->ip */ 235 + pushq %rax /* pt_regs->orig_ax */ 236 + pushq %rdi /* pt_regs->di */ 237 + pushq %rsi /* pt_regs->si */ 238 + pushq %rdx /* pt_regs->dx */ 239 + pushq %rcx /* pt_regs->cx */ 240 + pushq $-ENOSYS /* pt_regs->ax */ 241 + pushq %r8 /* pt_regs->r8 */ 242 + pushq %r9 /* pt_regs->r9 */ 243 + pushq %r10 /* pt_regs->r10 */ 244 + pushq %r11 /* pt_regs->r11 */ 182 245 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */ 183 - CFI_ADJUST_CFA_OFFSET 6*8 184 246 185 247 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 186 248 jnz tracesys ··· 216 282 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 217 283 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */ 218 284 219 - CFI_REMEMBER_STATE 220 - 221 285 RESTORE_C_REGS_EXCEPT_RCX_R11 222 286 movq RIP(%rsp),%rcx 223 - CFI_REGISTER rip,rcx 224 287 movq EFLAGS(%rsp),%r11 225 - /*CFI_REGISTER rflags,r11*/ 226 288 movq RSP(%rsp),%rsp 227 289 /* 228 290 * 64bit SYSRET restores rip from rcx, ··· 236 306 * done.) 237 307 */ 238 308 USERGS_SYSRET64 239 - 240 - CFI_RESTORE_STATE 241 309 242 310 /* Do syscall entry tracing */ 243 311 tracesys: ··· 302 374 jnc int_very_careful 303 375 TRACE_IRQS_ON 304 376 ENABLE_INTERRUPTS(CLBR_NONE) 305 - pushq_cfi %rdi 377 + pushq %rdi 306 378 SCHEDULE_USER 307 - popq_cfi %rdi 379 + popq %rdi 308 380 DISABLE_INTERRUPTS(CLBR_NONE) 309 381 TRACE_IRQS_OFF 310 382 jmp int_with_check ··· 317 389 /* Check for syscall exit trace */ 318 390 testl $_TIF_WORK_SYSCALL_EXIT,%edx 319 391 jz int_signal 320 - pushq_cfi %rdi 392 + pushq %rdi 321 393 leaq 8(%rsp),%rdi # &ptregs -> arg1 322 394 call syscall_trace_leave 323 - popq_cfi %rdi 395 + popq %rdi 324 396 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi 325 397 jmp int_restore_rest 326 398 ··· 403 475 * perf profiles. Nothing jumps here. 404 476 */ 405 477 syscall_return_via_sysret: 406 - CFI_REMEMBER_STATE 407 478 /* rcx and r11 are already restored (see code above) */ 408 479 RESTORE_C_REGS_EXCEPT_RCX_R11 409 480 movq RSP(%rsp),%rsp 410 481 USERGS_SYSRET64 411 - CFI_RESTORE_STATE 412 482 413 483 opportunistic_sysret_failed: 414 484 SWAPGS 415 485 jmp restore_c_regs_and_iret 416 - CFI_ENDPROC 417 486 END(system_call) 418 487 419 488 420 489 .macro FORK_LIKE func 421 490 ENTRY(stub_\func) 422 - CFI_STARTPROC 423 - DEFAULT_FRAME 0, 8 /* offset 8: return address */ 424 491 SAVE_EXTRA_REGS 8 425 492 jmp sys_\func 426 - CFI_ENDPROC 427 493 END(stub_\func) 428 494 .endm 429 495 ··· 426 504 FORK_LIKE vfork 427 505 428 506 ENTRY(stub_execve) 429 - CFI_STARTPROC 430 - DEFAULT_FRAME 0, 8 431 507 call sys_execve 432 508 return_from_execve: 433 509 testl %eax, %eax ··· 435 515 1: 436 516 /* must use IRET code path (pt_regs->cs may have changed) */ 437 517 addq $8, %rsp 438 - CFI_ADJUST_CFA_OFFSET -8 439 518 ZERO_EXTRA_REGS 440 519 movq %rax,RAX(%rsp) 441 520 jmp int_ret_from_sys_call 442 - CFI_ENDPROC 443 521 END(stub_execve) 444 522 /* 445 523 * Remaining execve stubs are only 7 bytes long. ··· 445 527 */ 446 528 .align 8 447 529 GLOBAL(stub_execveat) 448 - CFI_STARTPROC 449 - DEFAULT_FRAME 0, 8 450 530 call sys_execveat 451 531 jmp return_from_execve 452 - CFI_ENDPROC 453 532 END(stub_execveat) 454 533 455 534 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION) 456 535 .align 8 457 536 GLOBAL(stub_x32_execve) 458 537 GLOBAL(stub32_execve) 459 - CFI_STARTPROC 460 - DEFAULT_FRAME 0, 8 461 538 call compat_sys_execve 462 539 jmp return_from_execve 463 - CFI_ENDPROC 464 540 END(stub32_execve) 465 541 END(stub_x32_execve) 466 542 .align 8 467 543 GLOBAL(stub_x32_execveat) 468 544 GLOBAL(stub32_execveat) 469 - CFI_STARTPROC 470 - DEFAULT_FRAME 0, 8 471 545 call compat_sys_execveat 472 546 jmp return_from_execve 473 - CFI_ENDPROC 474 547 END(stub32_execveat) 475 548 END(stub_x32_execveat) 476 549 #endif ··· 471 562 * This cannot be done with SYSRET, so use the IRET return path instead. 472 563 */ 473 564 ENTRY(stub_rt_sigreturn) 474 - CFI_STARTPROC 475 - DEFAULT_FRAME 0, 8 476 565 /* 477 566 * SAVE_EXTRA_REGS result is not normally needed: 478 567 * sigreturn overwrites all pt_regs->GPREGS. ··· 482 575 call sys_rt_sigreturn 483 576 return_from_stub: 484 577 addq $8, %rsp 485 - CFI_ADJUST_CFA_OFFSET -8 486 578 RESTORE_EXTRA_REGS 487 579 movq %rax,RAX(%rsp) 488 580 jmp int_ret_from_sys_call 489 - CFI_ENDPROC 490 581 END(stub_rt_sigreturn) 491 582 492 583 #ifdef CONFIG_X86_X32_ABI 493 584 ENTRY(stub_x32_rt_sigreturn) 494 - CFI_STARTPROC 495 - DEFAULT_FRAME 0, 8 496 585 SAVE_EXTRA_REGS 8 497 586 call sys32_x32_rt_sigreturn 498 587 jmp return_from_stub 499 - CFI_ENDPROC 500 588 END(stub_x32_rt_sigreturn) 501 589 #endif 502 590 ··· 501 599 * rdi: prev task we switched from 502 600 */ 503 601 ENTRY(ret_from_fork) 504 - DEFAULT_FRAME 505 602 506 603 LOCK ; btr $TIF_FORK,TI_flags(%r8) 507 604 508 - pushq_cfi $0x0002 509 - popfq_cfi # reset kernel eflags 605 + pushq $0x0002 606 + popfq # reset kernel eflags 510 607 511 608 call schedule_tail # rdi: 'prev' task parameter 512 609 ··· 529 628 movl $0, RAX(%rsp) 530 629 RESTORE_EXTRA_REGS 531 630 jmp int_ret_from_sys_call 532 - CFI_ENDPROC 533 631 END(ret_from_fork) 534 632 535 633 /* ··· 537 637 */ 538 638 .align 8 539 639 ENTRY(irq_entries_start) 540 - INTR_FRAME 541 640 vector=FIRST_EXTERNAL_VECTOR 542 641 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) 543 - pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ 642 + pushq $(~vector+0x80) /* Note: always in signed byte range */ 544 643 vector=vector+1 545 644 jmp common_interrupt 546 - CFI_ADJUST_CFA_OFFSET -8 547 645 .align 8 548 646 .endr 549 - CFI_ENDPROC 550 647 END(irq_entries_start) 551 648 552 649 /* ··· 585 688 movq %rsp, %rsi 586 689 incl PER_CPU_VAR(irq_count) 587 690 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 588 - CFI_DEF_CFA_REGISTER rsi 589 691 pushq %rsi 590 - /* 591 - * For debugger: 592 - * "CFA (Current Frame Address) is the value on stack + offset" 593 - */ 594 - CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ 595 - 0x77 /* DW_OP_breg7 (rsp) */, 0, \ 596 - 0x06 /* DW_OP_deref */, \ 597 - 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \ 598 - 0x22 /* DW_OP_plus */ 599 692 /* We entered an interrupt context - irqs are off: */ 600 693 TRACE_IRQS_OFF 601 694 ··· 598 711 */ 599 712 .p2align CONFIG_X86_L1_CACHE_SHIFT 600 713 common_interrupt: 601 - XCPT_FRAME 602 714 ASM_CLAC 603 715 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 604 716 interrupt do_IRQ ··· 609 723 610 724 /* Restore saved previous stack */ 611 725 popq %rsi 612 - CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */ 613 726 /* return code expects complete pt_regs - adjust rsp accordingly: */ 614 727 leaq -RBP(%rsi),%rsp 615 - CFI_DEF_CFA_REGISTER rsp 616 - CFI_ADJUST_CFA_OFFSET RBP 617 728 618 729 testb $3, CS(%rsp) 619 730 jz retint_kernel ··· 626 743 LOCKDEP_SYS_EXIT_IRQ 627 744 movl TI_flags(%rcx),%edx 628 745 andl %edi,%edx 629 - CFI_REMEMBER_STATE 630 746 jnz retint_careful 631 747 632 748 retint_swapgs: /* return to user-space */ ··· 689 807 690 808 #ifdef CONFIG_X86_ESPFIX64 691 809 native_irq_return_ldt: 692 - pushq_cfi %rax 693 - pushq_cfi %rdi 810 + pushq %rax 811 + pushq %rdi 694 812 SWAPGS 695 813 movq PER_CPU_VAR(espfix_waddr),%rdi 696 814 movq %rax,(0*8)(%rdi) /* RAX */ ··· 705 823 movq (5*8)(%rsp),%rax /* RSP */ 706 824 movq %rax,(4*8)(%rdi) 707 825 andl $0xffff0000,%eax 708 - popq_cfi %rdi 826 + popq %rdi 709 827 orq PER_CPU_VAR(espfix_stack),%rax 710 828 SWAPGS 711 829 movq %rax,%rsp 712 - popq_cfi %rax 830 + popq %rax 713 831 jmp native_irq_return_iret 714 832 #endif 715 833 716 834 /* edi: workmask, edx: work */ 717 835 retint_careful: 718 - CFI_RESTORE_STATE 719 836 bt $TIF_NEED_RESCHED,%edx 720 837 jnc retint_signal 721 838 TRACE_IRQS_ON 722 839 ENABLE_INTERRUPTS(CLBR_NONE) 723 - pushq_cfi %rdi 840 + pushq %rdi 724 841 SCHEDULE_USER 725 - popq_cfi %rdi 842 + popq %rdi 726 843 GET_THREAD_INFO(%rcx) 727 844 DISABLE_INTERRUPTS(CLBR_NONE) 728 845 TRACE_IRQS_OFF ··· 743 862 GET_THREAD_INFO(%rcx) 744 863 jmp retint_with_reschedule 745 864 746 - CFI_ENDPROC 747 865 END(common_interrupt) 748 866 749 867 /* ··· 750 870 */ 751 871 .macro apicinterrupt3 num sym do_sym 752 872 ENTRY(\sym) 753 - INTR_FRAME 754 873 ASM_CLAC 755 - pushq_cfi $~(\num) 874 + pushq $~(\num) 756 875 .Lcommon_\sym: 757 876 interrupt \do_sym 758 877 jmp ret_from_intr 759 - CFI_ENDPROC 760 878 END(\sym) 761 879 .endm 762 880 ··· 837 959 .error "using shift_ist requires paranoid=1" 838 960 .endif 839 961 840 - .if \has_error_code 841 - XCPT_FRAME 842 - .else 843 - INTR_FRAME 844 - .endif 845 - 846 962 ASM_CLAC 847 963 PARAVIRT_ADJUST_EXCEPTION_FRAME 848 964 849 965 .ifeq \has_error_code 850 - pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 966 + pushq $-1 /* ORIG_RAX: no syscall to restart */ 851 967 .endif 852 968 853 969 ALLOC_PT_GPREGS_ON_STACK 854 970 855 971 .if \paranoid 856 972 .if \paranoid == 1 857 - CFI_REMEMBER_STATE 858 973 testb $3, CS(%rsp) /* If coming from userspace, switch */ 859 974 jnz 1f /* stacks. */ 860 975 .endif ··· 856 985 call error_entry 857 986 .endif 858 987 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ 859 - 860 - DEFAULT_FRAME 0 861 988 862 989 .if \paranoid 863 990 .if \shift_ist != -1 ··· 892 1023 .endif 893 1024 894 1025 .if \paranoid == 1 895 - CFI_RESTORE_STATE 896 1026 /* 897 1027 * Paranoid entry from userspace. Switch stacks and treat it 898 1028 * as a normal entry. This means that paranoid handlers ··· 900 1032 1: 901 1033 call error_entry 902 1034 903 - DEFAULT_FRAME 0 904 1035 905 1036 movq %rsp,%rdi /* pt_regs pointer */ 906 1037 call sync_regs ··· 918 1051 919 1052 jmp error_exit /* %ebx: no swapgs flag */ 920 1053 .endif 921 - 922 - CFI_ENDPROC 923 1054 END(\sym) 924 1055 .endm 925 1056 ··· 950 1085 /* Reload gs selector with exception handling */ 951 1086 /* edi: new selector */ 952 1087 ENTRY(native_load_gs_index) 953 - CFI_STARTPROC 954 - pushfq_cfi 1088 + pushfq 955 1089 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) 956 1090 SWAPGS 957 1091 gs_change: 958 1092 movl %edi,%gs 959 1093 2: mfence /* workaround */ 960 1094 SWAPGS 961 - popfq_cfi 1095 + popfq 962 1096 ret 963 - CFI_ENDPROC 964 1097 END(native_load_gs_index) 965 1098 966 1099 _ASM_EXTABLE(gs_change,bad_gs) ··· 973 1110 974 1111 /* Call softirq on interrupt stack. Interrupts are off. */ 975 1112 ENTRY(do_softirq_own_stack) 976 - CFI_STARTPROC 977 - pushq_cfi %rbp 978 - CFI_REL_OFFSET rbp,0 1113 + pushq %rbp 979 1114 mov %rsp,%rbp 980 - CFI_DEF_CFA_REGISTER rbp 981 1115 incl PER_CPU_VAR(irq_count) 982 1116 cmove PER_CPU_VAR(irq_stack_ptr),%rsp 983 1117 push %rbp # backlink for old unwinder 984 1118 call __do_softirq 985 1119 leaveq 986 - CFI_RESTORE rbp 987 - CFI_DEF_CFA_REGISTER rsp 988 - CFI_ADJUST_CFA_OFFSET -8 989 1120 decl PER_CPU_VAR(irq_count) 990 1121 ret 991 - CFI_ENDPROC 992 1122 END(do_softirq_own_stack) 993 1123 994 1124 #ifdef CONFIG_XEN ··· 1001 1145 * activation and restart the handler using the previous one. 1002 1146 */ 1003 1147 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 1004 - CFI_STARTPROC 1005 1148 /* 1006 1149 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1007 1150 * see the correct pointer to the pt_regs 1008 1151 */ 1009 1152 movq %rdi, %rsp # we don't return, adjust the stack frame 1010 - CFI_ENDPROC 1011 - DEFAULT_FRAME 1012 1153 11: incl PER_CPU_VAR(irq_count) 1013 1154 movq %rsp,%rbp 1014 - CFI_DEF_CFA_REGISTER rbp 1015 1155 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp 1016 1156 pushq %rbp # backlink for old unwinder 1017 1157 call xen_evtchn_do_upcall 1018 1158 popq %rsp 1019 - CFI_DEF_CFA_REGISTER rsp 1020 1159 decl PER_CPU_VAR(irq_count) 1021 1160 #ifndef CONFIG_PREEMPT 1022 1161 call xen_maybe_preempt_hcall 1023 1162 #endif 1024 1163 jmp error_exit 1025 - CFI_ENDPROC 1026 1164 END(xen_do_hypervisor_callback) 1027 1165 1028 1166 /* ··· 1033 1183 * with its current contents: any discrepancy means we in category 1. 1034 1184 */ 1035 1185 ENTRY(xen_failsafe_callback) 1036 - INTR_FRAME 1 (6*8) 1037 - /*CFI_REL_OFFSET gs,GS*/ 1038 - /*CFI_REL_OFFSET fs,FS*/ 1039 - /*CFI_REL_OFFSET es,ES*/ 1040 - /*CFI_REL_OFFSET ds,DS*/ 1041 - CFI_REL_OFFSET r11,8 1042 - CFI_REL_OFFSET rcx,0 1043 1186 movl %ds,%ecx 1044 1187 cmpw %cx,0x10(%rsp) 1045 - CFI_REMEMBER_STATE 1046 1188 jne 1f 1047 1189 movl %es,%ecx 1048 1190 cmpw %cx,0x18(%rsp) ··· 1047 1205 jne 1f 1048 1206 /* All segments match their saved values => Category 2 (Bad IRET). */ 1049 1207 movq (%rsp),%rcx 1050 - CFI_RESTORE rcx 1051 1208 movq 8(%rsp),%r11 1052 - CFI_RESTORE r11 1053 1209 addq $0x30,%rsp 1054 - CFI_ADJUST_CFA_OFFSET -0x30 1055 - pushq_cfi $0 /* RIP */ 1056 - pushq_cfi %r11 1057 - pushq_cfi %rcx 1210 + pushq $0 /* RIP */ 1211 + pushq %r11 1212 + pushq %rcx 1058 1213 jmp general_protection 1059 - CFI_RESTORE_STATE 1060 1214 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 1061 1215 movq (%rsp),%rcx 1062 - CFI_RESTORE rcx 1063 1216 movq 8(%rsp),%r11 1064 - CFI_RESTORE r11 1065 1217 addq $0x30,%rsp 1066 - CFI_ADJUST_CFA_OFFSET -0x30 1067 - pushq_cfi $-1 /* orig_ax = -1 => not a system call */ 1218 + pushq $-1 /* orig_ax = -1 => not a system call */ 1068 1219 ALLOC_PT_GPREGS_ON_STACK 1069 1220 SAVE_C_REGS 1070 1221 SAVE_EXTRA_REGS 1071 1222 jmp error_exit 1072 - CFI_ENDPROC 1073 1223 END(xen_failsafe_callback) 1074 1224 1075 1225 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ ··· 1097 1263 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1098 1264 */ 1099 1265 ENTRY(paranoid_entry) 1100 - XCPT_FRAME 1 15*8 1101 1266 cld 1102 1267 SAVE_C_REGS 8 1103 1268 SAVE_EXTRA_REGS 8 ··· 1108 1275 SWAPGS 1109 1276 xorl %ebx,%ebx 1110 1277 1: ret 1111 - CFI_ENDPROC 1112 1278 END(paranoid_entry) 1113 1279 1114 1280 /* ··· 1122 1290 */ 1123 1291 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ 1124 1292 ENTRY(paranoid_exit) 1125 - DEFAULT_FRAME 1126 1293 DISABLE_INTERRUPTS(CLBR_NONE) 1127 1294 TRACE_IRQS_OFF_DEBUG 1128 1295 testl %ebx,%ebx /* swapgs needed? */ ··· 1136 1305 RESTORE_C_REGS 1137 1306 REMOVE_PT_GPREGS_FROM_STACK 8 1138 1307 INTERRUPT_RETURN 1139 - CFI_ENDPROC 1140 1308 END(paranoid_exit) 1141 1309 1142 1310 /* ··· 1143 1313 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise 1144 1314 */ 1145 1315 ENTRY(error_entry) 1146 - XCPT_FRAME 1 15*8 1147 1316 cld 1148 1317 SAVE_C_REGS 8 1149 1318 SAVE_EXTRA_REGS 8 ··· 1162 1333 * for these here too. 1163 1334 */ 1164 1335 error_kernelspace: 1165 - CFI_REL_OFFSET rcx, RCX+8 1166 1336 incl %ebx 1167 1337 leaq native_irq_return_iret(%rip),%rcx 1168 1338 cmpq %rcx,RIP+8(%rsp) ··· 1185 1357 mov %rax,%rsp 1186 1358 decl %ebx /* Return to usergs */ 1187 1359 jmp error_sti 1188 - CFI_ENDPROC 1189 1360 END(error_entry) 1190 1361 1191 1362 1192 1363 /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ 1193 1364 ENTRY(error_exit) 1194 - DEFAULT_FRAME 1195 1365 movl %ebx,%eax 1196 1366 RESTORE_EXTRA_REGS 1197 1367 DISABLE_INTERRUPTS(CLBR_NONE) ··· 1203 1377 andl %edi,%edx 1204 1378 jnz retint_careful 1205 1379 jmp retint_swapgs 1206 - CFI_ENDPROC 1207 1380 END(error_exit) 1208 1381 1209 1382 /* Runs on exception stack */ 1210 1383 ENTRY(nmi) 1211 - INTR_FRAME 1212 1384 PARAVIRT_ADJUST_EXCEPTION_FRAME 1213 1385 /* 1214 1386 * We allow breakpoints in NMIs. If a breakpoint occurs, then ··· 1241 1417 */ 1242 1418 1243 1419 /* Use %rdx as our temp variable throughout */ 1244 - pushq_cfi %rdx 1245 - CFI_REL_OFFSET rdx, 0 1420 + pushq %rdx 1246 1421 1247 1422 /* 1248 1423 * If %cs was not the kernel segment, then the NMI triggered in user ··· 1275 1452 jb first_nmi 1276 1453 /* Ah, it is within the NMI stack, treat it as nested */ 1277 1454 1278 - CFI_REMEMBER_STATE 1279 - 1280 1455 nested_nmi: 1281 1456 /* 1282 1457 * Do nothing if we interrupted the fixup in repeat_nmi. ··· 1292 1471 /* Set up the interrupted NMIs stack to jump to repeat_nmi */ 1293 1472 leaq -1*8(%rsp), %rdx 1294 1473 movq %rdx, %rsp 1295 - CFI_ADJUST_CFA_OFFSET 1*8 1296 1474 leaq -10*8(%rsp), %rdx 1297 - pushq_cfi $__KERNEL_DS 1298 - pushq_cfi %rdx 1299 - pushfq_cfi 1300 - pushq_cfi $__KERNEL_CS 1301 - pushq_cfi $repeat_nmi 1475 + pushq $__KERNEL_DS 1476 + pushq %rdx 1477 + pushfq 1478 + pushq $__KERNEL_CS 1479 + pushq $repeat_nmi 1302 1480 1303 1481 /* Put stack back */ 1304 1482 addq $(6*8), %rsp 1305 - CFI_ADJUST_CFA_OFFSET -6*8 1306 1483 1307 1484 nested_nmi_out: 1308 - popq_cfi %rdx 1309 - CFI_RESTORE rdx 1485 + popq %rdx 1310 1486 1311 1487 /* No need to check faults here */ 1312 1488 INTERRUPT_RETURN 1313 1489 1314 - CFI_RESTORE_STATE 1315 1490 first_nmi: 1316 1491 /* 1317 1492 * Because nested NMIs will use the pushed location that we ··· 1346 1529 */ 1347 1530 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */ 1348 1531 movq (%rsp), %rdx 1349 - CFI_RESTORE rdx 1350 1532 1351 1533 /* Set the NMI executing variable on the stack. */ 1352 - pushq_cfi $1 1534 + pushq $1 1353 1535 1354 1536 /* 1355 1537 * Leave room for the "copied" frame 1356 1538 */ 1357 1539 subq $(5*8), %rsp 1358 - CFI_ADJUST_CFA_OFFSET 5*8 1359 1540 1360 1541 /* Copy the stack frame to the Saved frame */ 1361 1542 .rept 5 1362 - pushq_cfi 11*8(%rsp) 1543 + pushq 11*8(%rsp) 1363 1544 .endr 1364 - CFI_DEF_CFA_OFFSET 5*8 1365 1545 1366 1546 /* Everything up to here is safe from nested NMIs */ 1367 1547 ··· 1381 1567 1382 1568 /* Make another copy, this one may be modified by nested NMIs */ 1383 1569 addq $(10*8), %rsp 1384 - CFI_ADJUST_CFA_OFFSET -10*8 1385 1570 .rept 5 1386 - pushq_cfi -6*8(%rsp) 1571 + pushq -6*8(%rsp) 1387 1572 .endr 1388 1573 subq $(5*8), %rsp 1389 - CFI_DEF_CFA_OFFSET 5*8 1390 1574 end_repeat_nmi: 1391 1575 1392 1576 /* ··· 1392 1580 * NMI if the first NMI took an exception and reset our iret stack 1393 1581 * so that we repeat another NMI. 1394 1582 */ 1395 - pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1583 + pushq $-1 /* ORIG_RAX: no syscall to restart */ 1396 1584 ALLOC_PT_GPREGS_ON_STACK 1397 1585 1398 1586 /* ··· 1403 1591 * exceptions might do. 1404 1592 */ 1405 1593 call paranoid_entry 1406 - DEFAULT_FRAME 0 1407 1594 1408 1595 /* 1409 1596 * Save off the CR2 register. If we take a page fault in the NMI then ··· 1439 1628 /* Clear the NMI executing stack variable */ 1440 1629 movq $0, 5*8(%rsp) 1441 1630 jmp irq_return 1442 - CFI_ENDPROC 1443 1631 END(nmi) 1444 1632 1445 1633 ENTRY(ignore_sysret) 1446 - CFI_STARTPROC 1447 1634 mov $-ENOSYS,%eax 1448 1635 sysret 1449 - CFI_ENDPROC 1450 1636 END(ignore_sysret) 1451 1637
+2 -5
arch/x86/lib/atomic64_386_32.S
··· 11 11 12 12 #include <linux/linkage.h> 13 13 #include <asm/alternative-asm.h> 14 - #include <asm/dwarf2.h> 15 14 16 15 /* if you want SMP support, implement these with real spinlocks */ 17 16 .macro LOCK reg 18 - pushfl_cfi 17 + pushfl 19 18 cli 20 19 .endm 21 20 22 21 .macro UNLOCK reg 23 - popfl_cfi 22 + popfl 24 23 .endm 25 24 26 25 #define BEGIN(op) \ 27 26 .macro endp; \ 28 - CFI_ENDPROC; \ 29 27 ENDPROC(atomic64_##op##_386); \ 30 28 .purgem endp; \ 31 29 .endm; \ 32 30 ENTRY(atomic64_##op##_386); \ 33 - CFI_STARTPROC; \ 34 31 LOCK v; 35 32 36 33 #define ENDP endp
+20 -41
arch/x86/lib/atomic64_cx8_32.S
··· 11 11 12 12 #include <linux/linkage.h> 13 13 #include <asm/alternative-asm.h> 14 - #include <asm/dwarf2.h> 15 14 16 15 .macro read64 reg 17 16 movl %ebx, %eax ··· 21 22 .endm 22 23 23 24 ENTRY(atomic64_read_cx8) 24 - CFI_STARTPROC 25 - 26 25 read64 %ecx 27 26 ret 28 - CFI_ENDPROC 29 27 ENDPROC(atomic64_read_cx8) 30 28 31 29 ENTRY(atomic64_set_cx8) 32 - CFI_STARTPROC 33 - 34 30 1: 35 31 /* we don't need LOCK_PREFIX since aligned 64-bit writes 36 32 * are atomic on 586 and newer */ ··· 33 39 jne 1b 34 40 35 41 ret 36 - CFI_ENDPROC 37 42 ENDPROC(atomic64_set_cx8) 38 43 39 44 ENTRY(atomic64_xchg_cx8) 40 - CFI_STARTPROC 41 - 42 45 1: 43 46 LOCK_PREFIX 44 47 cmpxchg8b (%esi) 45 48 jne 1b 46 49 47 50 ret 48 - CFI_ENDPROC 49 51 ENDPROC(atomic64_xchg_cx8) 50 52 51 53 .macro addsub_return func ins insc 52 54 ENTRY(atomic64_\func\()_return_cx8) 53 - CFI_STARTPROC 54 - pushl_cfi_reg ebp 55 - pushl_cfi_reg ebx 56 - pushl_cfi_reg esi 57 - pushl_cfi_reg edi 55 + pushl %ebp 56 + pushl %ebx 57 + pushl %esi 58 + pushl %edi 58 59 59 60 movl %eax, %esi 60 61 movl %edx, %edi ··· 68 79 10: 69 80 movl %ebx, %eax 70 81 movl %ecx, %edx 71 - popl_cfi_reg edi 72 - popl_cfi_reg esi 73 - popl_cfi_reg ebx 74 - popl_cfi_reg ebp 82 + popl %edi 83 + popl %esi 84 + popl %ebx 85 + popl %ebp 75 86 ret 76 - CFI_ENDPROC 77 87 ENDPROC(atomic64_\func\()_return_cx8) 78 88 .endm 79 89 ··· 81 93 82 94 .macro incdec_return func ins insc 83 95 ENTRY(atomic64_\func\()_return_cx8) 84 - CFI_STARTPROC 85 - pushl_cfi_reg ebx 96 + pushl %ebx 86 97 87 98 read64 %esi 88 99 1: ··· 96 109 10: 97 110 movl %ebx, %eax 98 111 movl %ecx, %edx 99 - popl_cfi_reg ebx 112 + popl %ebx 100 113 ret 101 - CFI_ENDPROC 102 114 ENDPROC(atomic64_\func\()_return_cx8) 103 115 .endm 104 116 ··· 105 119 incdec_return dec sub sbb 106 120 107 121 ENTRY(atomic64_dec_if_positive_cx8) 108 - CFI_STARTPROC 109 - pushl_cfi_reg ebx 122 + pushl %ebx 110 123 111 124 read64 %esi 112 125 1: ··· 121 136 2: 122 137 movl %ebx, %eax 123 138 movl %ecx, %edx 124 - popl_cfi_reg ebx 139 + popl %ebx 125 140 ret 126 - CFI_ENDPROC 127 141 ENDPROC(atomic64_dec_if_positive_cx8) 128 142 129 143 ENTRY(atomic64_add_unless_cx8) 130 - CFI_STARTPROC 131 - pushl_cfi_reg ebp 132 - pushl_cfi_reg ebx 144 + pushl %ebp 145 + pushl %ebx 133 146 /* these just push these two parameters on the stack */ 134 - pushl_cfi_reg edi 135 - pushl_cfi_reg ecx 147 + pushl %edi 148 + pushl %ecx 136 149 137 150 movl %eax, %ebp 138 151 movl %edx, %edi ··· 151 168 movl $1, %eax 152 169 3: 153 170 addl $8, %esp 154 - CFI_ADJUST_CFA_OFFSET -8 155 - popl_cfi_reg ebx 156 - popl_cfi_reg ebp 171 + popl %ebx 172 + popl %ebp 157 173 ret 158 174 4: 159 175 cmpl %edx, 4(%esp) 160 176 jne 2b 161 177 xorl %eax, %eax 162 178 jmp 3b 163 - CFI_ENDPROC 164 179 ENDPROC(atomic64_add_unless_cx8) 165 180 166 181 ENTRY(atomic64_inc_not_zero_cx8) 167 - CFI_STARTPROC 168 - pushl_cfi_reg ebx 182 + pushl %ebx 169 183 170 184 read64 %esi 171 185 1: ··· 179 199 180 200 movl $1, %eax 181 201 3: 182 - popl_cfi_reg ebx 202 + popl %ebx 183 203 ret 184 - CFI_ENDPROC 185 204 ENDPROC(atomic64_inc_not_zero_cx8)
+21 -31
arch/x86/lib/checksum_32.S
··· 26 26 */ 27 27 28 28 #include <linux/linkage.h> 29 - #include <asm/dwarf2.h> 30 29 #include <asm/errno.h> 31 30 #include <asm/asm.h> 32 31 ··· 49 50 * alignment for the unrolled loop. 50 51 */ 51 52 ENTRY(csum_partial) 52 - CFI_STARTPROC 53 - pushl_cfi_reg esi 54 - pushl_cfi_reg ebx 53 + pushl %esi 54 + pushl %ebx 55 55 movl 20(%esp),%eax # Function arg: unsigned int sum 56 56 movl 16(%esp),%ecx # Function arg: int len 57 57 movl 12(%esp),%esi # Function arg: unsigned char *buff ··· 127 129 jz 8f 128 130 roll $8, %eax 129 131 8: 130 - popl_cfi_reg ebx 131 - popl_cfi_reg esi 132 + popl %ebx 133 + popl %esi 132 134 ret 133 - CFI_ENDPROC 134 135 ENDPROC(csum_partial) 135 136 136 137 #else ··· 137 140 /* Version for PentiumII/PPro */ 138 141 139 142 ENTRY(csum_partial) 140 - CFI_STARTPROC 141 - pushl_cfi_reg esi 142 - pushl_cfi_reg ebx 143 + pushl %esi 144 + pushl %ebx 143 145 movl 20(%esp),%eax # Function arg: unsigned int sum 144 146 movl 16(%esp),%ecx # Function arg: int len 145 147 movl 12(%esp),%esi # Function arg: const unsigned char *buf ··· 245 249 jz 90f 246 250 roll $8, %eax 247 251 90: 248 - popl_cfi_reg ebx 249 - popl_cfi_reg esi 252 + popl %ebx 253 + popl %esi 250 254 ret 251 - CFI_ENDPROC 252 255 ENDPROC(csum_partial) 253 256 254 257 #endif ··· 282 287 #define FP 12 283 288 284 289 ENTRY(csum_partial_copy_generic) 285 - CFI_STARTPROC 286 290 subl $4,%esp 287 - CFI_ADJUST_CFA_OFFSET 4 288 - pushl_cfi_reg edi 289 - pushl_cfi_reg esi 290 - pushl_cfi_reg ebx 291 + pushl %edi 292 + pushl %esi 293 + pushl %ebx 291 294 movl ARGBASE+16(%esp),%eax # sum 292 295 movl ARGBASE+12(%esp),%ecx # len 293 296 movl ARGBASE+4(%esp),%esi # src ··· 394 401 395 402 .previous 396 403 397 - popl_cfi_reg ebx 398 - popl_cfi_reg esi 399 - popl_cfi_reg edi 400 - popl_cfi %ecx # equivalent to addl $4,%esp 404 + popl %ebx 405 + popl %esi 406 + popl %edi 407 + popl %ecx # equivalent to addl $4,%esp 401 408 ret 402 - CFI_ENDPROC 403 409 ENDPROC(csum_partial_copy_generic) 404 410 405 411 #else ··· 418 426 #define ARGBASE 12 419 427 420 428 ENTRY(csum_partial_copy_generic) 421 - CFI_STARTPROC 422 - pushl_cfi_reg ebx 423 - pushl_cfi_reg edi 424 - pushl_cfi_reg esi 429 + pushl %ebx 430 + pushl %edi 431 + pushl %esi 425 432 movl ARGBASE+4(%esp),%esi #src 426 433 movl ARGBASE+8(%esp),%edi #dst 427 434 movl ARGBASE+12(%esp),%ecx #len ··· 480 489 jmp 7b 481 490 .previous 482 491 483 - popl_cfi_reg esi 484 - popl_cfi_reg edi 485 - popl_cfi_reg ebx 492 + popl %esi 493 + popl %edi 494 + popl %ebx 486 495 ret 487 - CFI_ENDPROC 488 496 ENDPROC(csum_partial_copy_generic) 489 497 490 498 #undef ROUND
-7
arch/x86/lib/clear_page_64.S
··· 1 1 #include <linux/linkage.h> 2 - #include <asm/dwarf2.h> 3 2 #include <asm/cpufeature.h> 4 3 #include <asm/alternative-asm.h> 5 4 ··· 14 15 * %rdi - page 15 16 */ 16 17 ENTRY(clear_page) 17 - CFI_STARTPROC 18 18 19 19 ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \ 20 20 "jmp clear_page_c_e", X86_FEATURE_ERMS ··· 22 24 xorl %eax,%eax 23 25 rep stosq 24 26 ret 25 - CFI_ENDPROC 26 27 ENDPROC(clear_page) 27 28 28 29 ENTRY(clear_page_orig) 29 - CFI_STARTPROC 30 30 31 31 xorl %eax,%eax 32 32 movl $4096/64,%ecx ··· 44 48 jnz .Lloop 45 49 nop 46 50 ret 47 - CFI_ENDPROC 48 51 ENDPROC(clear_page_orig) 49 52 50 53 ENTRY(clear_page_c_e) 51 - CFI_STARTPROC 52 54 movl $4096,%ecx 53 55 xorl %eax,%eax 54 56 rep stosb 55 57 ret 56 - CFI_ENDPROC 57 58 ENDPROC(clear_page_c_e)
+3 -9
arch/x86/lib/cmpxchg16b_emu.S
··· 6 6 * 7 7 */ 8 8 #include <linux/linkage.h> 9 - #include <asm/dwarf2.h> 10 9 #include <asm/percpu.h> 11 10 12 11 .text ··· 20 21 * %al : Operation successful 21 22 */ 22 23 ENTRY(this_cpu_cmpxchg16b_emu) 23 - CFI_STARTPROC 24 24 25 25 # 26 26 # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not ··· 30 32 # *atomic* on a single cpu (as provided by the this_cpu_xx class of 31 33 # macros). 32 34 # 33 - pushfq_cfi 35 + pushfq 34 36 cli 35 37 36 38 cmpq PER_CPU_VAR((%rsi)), %rax ··· 41 43 movq %rbx, PER_CPU_VAR((%rsi)) 42 44 movq %rcx, PER_CPU_VAR(8(%rsi)) 43 45 44 - CFI_REMEMBER_STATE 45 - popfq_cfi 46 + popfq 46 47 mov $1, %al 47 48 ret 48 49 49 - CFI_RESTORE_STATE 50 50 .Lnot_same: 51 - popfq_cfi 51 + popfq 52 52 xor %al,%al 53 53 ret 54 - 55 - CFI_ENDPROC 56 54 57 55 ENDPROC(this_cpu_cmpxchg16b_emu)
+3 -8
arch/x86/lib/cmpxchg8b_emu.S
··· 7 7 */ 8 8 9 9 #include <linux/linkage.h> 10 - #include <asm/dwarf2.h> 11 10 12 11 .text 13 12 ··· 19 20 * %ecx : high 32 bits of new value 20 21 */ 21 22 ENTRY(cmpxchg8b_emu) 22 - CFI_STARTPROC 23 23 24 24 # 25 25 # Emulate 'cmpxchg8b (%esi)' on UP except we don't 26 26 # set the whole ZF thing (caller will just compare 27 27 # eax:edx with the expected value) 28 28 # 29 - pushfl_cfi 29 + pushfl 30 30 cli 31 31 32 32 cmpl (%esi), %eax ··· 36 38 movl %ebx, (%esi) 37 39 movl %ecx, 4(%esi) 38 40 39 - CFI_REMEMBER_STATE 40 - popfl_cfi 41 + popfl 41 42 ret 42 43 43 - CFI_RESTORE_STATE 44 44 .Lnot_same: 45 45 movl (%esi), %eax 46 46 .Lhalf_same: 47 47 movl 4(%esi), %edx 48 48 49 - popfl_cfi 49 + popfl 50 50 ret 51 51 52 - CFI_ENDPROC 53 52 ENDPROC(cmpxchg8b_emu)
-11
arch/x86/lib/copy_page_64.S
··· 1 1 /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ 2 2 3 3 #include <linux/linkage.h> 4 - #include <asm/dwarf2.h> 5 4 #include <asm/cpufeature.h> 6 5 #include <asm/alternative-asm.h> 7 6 ··· 12 13 */ 13 14 ALIGN 14 15 ENTRY(copy_page) 15 - CFI_STARTPROC 16 16 ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD 17 17 movl $4096/8, %ecx 18 18 rep movsq 19 19 ret 20 - CFI_ENDPROC 21 20 ENDPROC(copy_page) 22 21 23 22 ENTRY(copy_page_regs) 24 - CFI_STARTPROC 25 23 subq $2*8, %rsp 26 - CFI_ADJUST_CFA_OFFSET 2*8 27 24 movq %rbx, (%rsp) 28 - CFI_REL_OFFSET rbx, 0 29 25 movq %r12, 1*8(%rsp) 30 - CFI_REL_OFFSET r12, 1*8 31 26 32 27 movl $(4096/64)-5, %ecx 33 28 .p2align 4 ··· 80 87 jnz .Loop2 81 88 82 89 movq (%rsp), %rbx 83 - CFI_RESTORE rbx 84 90 movq 1*8(%rsp), %r12 85 - CFI_RESTORE r12 86 91 addq $2*8, %rsp 87 - CFI_ADJUST_CFA_OFFSET -2*8 88 92 ret 89 - CFI_ENDPROC 90 93 ENDPROC(copy_page_regs)
-15
arch/x86/lib/copy_user_64.S
··· 7 7 */ 8 8 9 9 #include <linux/linkage.h> 10 - #include <asm/dwarf2.h> 11 10 #include <asm/current.h> 12 11 #include <asm/asm-offsets.h> 13 12 #include <asm/thread_info.h> ··· 17 18 18 19 /* Standard copy_to_user with segment limit checking */ 19 20 ENTRY(_copy_to_user) 20 - CFI_STARTPROC 21 21 GET_THREAD_INFO(%rax) 22 22 movq %rdi,%rcx 23 23 addq %rdx,%rcx ··· 28 30 X86_FEATURE_REP_GOOD, \ 29 31 "jmp copy_user_enhanced_fast_string", \ 30 32 X86_FEATURE_ERMS 31 - CFI_ENDPROC 32 33 ENDPROC(_copy_to_user) 33 34 34 35 /* Standard copy_from_user with segment limit checking */ 35 36 ENTRY(_copy_from_user) 36 - CFI_STARTPROC 37 37 GET_THREAD_INFO(%rax) 38 38 movq %rsi,%rcx 39 39 addq %rdx,%rcx ··· 43 47 X86_FEATURE_REP_GOOD, \ 44 48 "jmp copy_user_enhanced_fast_string", \ 45 49 X86_FEATURE_ERMS 46 - CFI_ENDPROC 47 50 ENDPROC(_copy_from_user) 48 51 49 52 .section .fixup,"ax" 50 53 /* must zero dest */ 51 54 ENTRY(bad_from_user) 52 55 bad_from_user: 53 - CFI_STARTPROC 54 56 movl %edx,%ecx 55 57 xorl %eax,%eax 56 58 rep ··· 56 62 bad_to_user: 57 63 movl %edx,%eax 58 64 ret 59 - CFI_ENDPROC 60 65 ENDPROC(bad_from_user) 61 66 .previous 62 67 ··· 73 80 * eax uncopied bytes or 0 if successful. 74 81 */ 75 82 ENTRY(copy_user_generic_unrolled) 76 - CFI_STARTPROC 77 83 ASM_STAC 78 84 cmpl $8,%edx 79 85 jb 20f /* less then 8 bytes, go to byte copy loop */ ··· 154 162 _ASM_EXTABLE(19b,40b) 155 163 _ASM_EXTABLE(21b,50b) 156 164 _ASM_EXTABLE(22b,50b) 157 - CFI_ENDPROC 158 165 ENDPROC(copy_user_generic_unrolled) 159 166 160 167 /* Some CPUs run faster using the string copy instructions. ··· 175 184 * eax uncopied bytes or 0 if successful. 176 185 */ 177 186 ENTRY(copy_user_generic_string) 178 - CFI_STARTPROC 179 187 ASM_STAC 180 188 cmpl $8,%edx 181 189 jb 2f /* less than 8 bytes, go to byte copy loop */ ··· 199 209 200 210 _ASM_EXTABLE(1b,11b) 201 211 _ASM_EXTABLE(3b,12b) 202 - CFI_ENDPROC 203 212 ENDPROC(copy_user_generic_string) 204 213 205 214 /* ··· 214 225 * eax uncopied bytes or 0 if successful. 215 226 */ 216 227 ENTRY(copy_user_enhanced_fast_string) 217 - CFI_STARTPROC 218 228 ASM_STAC 219 229 movl %edx,%ecx 220 230 1: rep ··· 228 240 .previous 229 241 230 242 _ASM_EXTABLE(1b,12b) 231 - CFI_ENDPROC 232 243 ENDPROC(copy_user_enhanced_fast_string) 233 244 234 245 /* ··· 235 248 * This will force destination/source out of cache for more performance. 236 249 */ 237 250 ENTRY(__copy_user_nocache) 238 - CFI_STARTPROC 239 251 ASM_STAC 240 252 cmpl $8,%edx 241 253 jb 20f /* less then 8 bytes, go to byte copy loop */ ··· 318 332 _ASM_EXTABLE(19b,40b) 319 333 _ASM_EXTABLE(21b,50b) 320 334 _ASM_EXTABLE(22b,50b) 321 - CFI_ENDPROC 322 335 ENDPROC(__copy_user_nocache)
-17
arch/x86/lib/csum-copy_64.S
··· 6 6 * for more details. No warranty for anything given at all. 7 7 */ 8 8 #include <linux/linkage.h> 9 - #include <asm/dwarf2.h> 10 9 #include <asm/errno.h> 11 10 #include <asm/asm.h> 12 11 ··· 46 47 47 48 48 49 ENTRY(csum_partial_copy_generic) 49 - CFI_STARTPROC 50 50 cmpl $3*64, %edx 51 51 jle .Lignore 52 52 53 53 .Lignore: 54 54 subq $7*8, %rsp 55 - CFI_ADJUST_CFA_OFFSET 7*8 56 55 movq %rbx, 2*8(%rsp) 57 - CFI_REL_OFFSET rbx, 2*8 58 56 movq %r12, 3*8(%rsp) 59 - CFI_REL_OFFSET r12, 3*8 60 57 movq %r14, 4*8(%rsp) 61 - CFI_REL_OFFSET r14, 4*8 62 58 movq %r13, 5*8(%rsp) 63 - CFI_REL_OFFSET r13, 5*8 64 59 movq %rbp, 6*8(%rsp) 65 - CFI_REL_OFFSET rbp, 6*8 66 60 67 61 movq %r8, (%rsp) 68 62 movq %r9, 1*8(%rsp) ··· 198 206 addl %ebx, %eax 199 207 adcl %r9d, %eax /* carry */ 200 208 201 - CFI_REMEMBER_STATE 202 209 .Lende: 203 210 movq 2*8(%rsp), %rbx 204 - CFI_RESTORE rbx 205 211 movq 3*8(%rsp), %r12 206 - CFI_RESTORE r12 207 212 movq 4*8(%rsp), %r14 208 - CFI_RESTORE r14 209 213 movq 5*8(%rsp), %r13 210 - CFI_RESTORE r13 211 214 movq 6*8(%rsp), %rbp 212 - CFI_RESTORE rbp 213 215 addq $7*8, %rsp 214 - CFI_ADJUST_CFA_OFFSET -7*8 215 216 ret 216 - CFI_RESTORE_STATE 217 217 218 218 /* Exception handlers. Very simple, zeroing is done in the wrappers */ 219 219 .Lbad_source: ··· 221 237 jz .Lende 222 238 movl $-EFAULT, (%rax) 223 239 jmp .Lende 224 - CFI_ENDPROC 225 240 ENDPROC(csum_partial_copy_generic)
-13
arch/x86/lib/getuser.S
··· 26 26 */ 27 27 28 28 #include <linux/linkage.h> 29 - #include <asm/dwarf2.h> 30 29 #include <asm/page_types.h> 31 30 #include <asm/errno.h> 32 31 #include <asm/asm-offsets.h> ··· 35 36 36 37 .text 37 38 ENTRY(__get_user_1) 38 - CFI_STARTPROC 39 39 GET_THREAD_INFO(%_ASM_DX) 40 40 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX 41 41 jae bad_get_user ··· 43 45 xor %eax,%eax 44 46 ASM_CLAC 45 47 ret 46 - CFI_ENDPROC 47 48 ENDPROC(__get_user_1) 48 49 49 50 ENTRY(__get_user_2) 50 - CFI_STARTPROC 51 51 add $1,%_ASM_AX 52 52 jc bad_get_user 53 53 GET_THREAD_INFO(%_ASM_DX) ··· 56 60 xor %eax,%eax 57 61 ASM_CLAC 58 62 ret 59 - CFI_ENDPROC 60 63 ENDPROC(__get_user_2) 61 64 62 65 ENTRY(__get_user_4) 63 - CFI_STARTPROC 64 66 add $3,%_ASM_AX 65 67 jc bad_get_user 66 68 GET_THREAD_INFO(%_ASM_DX) ··· 69 75 xor %eax,%eax 70 76 ASM_CLAC 71 77 ret 72 - CFI_ENDPROC 73 78 ENDPROC(__get_user_4) 74 79 75 80 ENTRY(__get_user_8) 76 - CFI_STARTPROC 77 81 #ifdef CONFIG_X86_64 78 82 add $7,%_ASM_AX 79 83 jc bad_get_user ··· 96 104 ASM_CLAC 97 105 ret 98 106 #endif 99 - CFI_ENDPROC 100 107 ENDPROC(__get_user_8) 101 108 102 109 103 110 bad_get_user: 104 - CFI_STARTPROC 105 111 xor %edx,%edx 106 112 mov $(-EFAULT),%_ASM_AX 107 113 ASM_CLAC 108 114 ret 109 - CFI_ENDPROC 110 115 END(bad_get_user) 111 116 112 117 #ifdef CONFIG_X86_32 113 118 bad_get_user_8: 114 - CFI_STARTPROC 115 119 xor %edx,%edx 116 120 xor %ecx,%ecx 117 121 mov $(-EFAULT),%_ASM_AX 118 122 ASM_CLAC 119 123 ret 120 - CFI_ENDPROC 121 124 END(bad_get_user_8) 122 125 #endif 123 126
-3
arch/x86/lib/iomap_copy_64.S
··· 16 16 */ 17 17 18 18 #include <linux/linkage.h> 19 - #include <asm/dwarf2.h> 20 19 21 20 /* 22 21 * override generic version in lib/iomap_copy.c 23 22 */ 24 23 ENTRY(__iowrite32_copy) 25 - CFI_STARTPROC 26 24 movl %edx,%ecx 27 25 rep movsd 28 26 ret 29 - CFI_ENDPROC 30 27 ENDPROC(__iowrite32_copy)
-3
arch/x86/lib/memcpy_64.S
··· 2 2 3 3 #include <linux/linkage.h> 4 4 #include <asm/cpufeature.h> 5 - #include <asm/dwarf2.h> 6 5 #include <asm/alternative-asm.h> 7 6 8 7 /* ··· 52 53 ENDPROC(memcpy_erms) 53 54 54 55 ENTRY(memcpy_orig) 55 - CFI_STARTPROC 56 56 movq %rdi, %rax 57 57 58 58 cmpq $0x20, %rdx ··· 176 178 177 179 .Lend: 178 180 retq 179 - CFI_ENDPROC 180 181 ENDPROC(memcpy_orig)
-3
arch/x86/lib/memmove_64.S
··· 6 6 * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> 7 7 */ 8 8 #include <linux/linkage.h> 9 - #include <asm/dwarf2.h> 10 9 #include <asm/cpufeature.h> 11 10 #include <asm/alternative-asm.h> 12 11 ··· 26 27 27 28 ENTRY(memmove) 28 29 ENTRY(__memmove) 29 - CFI_STARTPROC 30 30 31 31 /* Handle more 32 bytes in loop */ 32 32 mov %rdi, %rax ··· 205 207 movb %r11b, (%rdi) 206 208 13: 207 209 retq 208 - CFI_ENDPROC 209 210 ENDPROC(__memmove) 210 211 ENDPROC(memmove)
-5
arch/x86/lib/memset_64.S
··· 1 1 /* Copyright 2002 Andi Kleen, SuSE Labs */ 2 2 3 3 #include <linux/linkage.h> 4 - #include <asm/dwarf2.h> 5 4 #include <asm/cpufeature.h> 6 5 #include <asm/alternative-asm.h> 7 6 ··· 65 66 ENDPROC(memset_erms) 66 67 67 68 ENTRY(memset_orig) 68 - CFI_STARTPROC 69 69 movq %rdi,%r10 70 70 71 71 /* expand byte value */ ··· 76 78 movl %edi,%r9d 77 79 andl $7,%r9d 78 80 jnz .Lbad_alignment 79 - CFI_REMEMBER_STATE 80 81 .Lafter_bad_alignment: 81 82 82 83 movq %rdx,%rcx ··· 125 128 movq %r10,%rax 126 129 ret 127 130 128 - CFI_RESTORE_STATE 129 131 .Lbad_alignment: 130 132 cmpq $7,%rdx 131 133 jbe .Lhandle_7 ··· 135 139 subq %r8,%rdx 136 140 jmp .Lafter_bad_alignment 137 141 .Lfinal: 138 - CFI_ENDPROC 139 142 ENDPROC(memset_orig)
+17 -27
arch/x86/lib/msr-reg.S
··· 1 1 #include <linux/linkage.h> 2 2 #include <linux/errno.h> 3 - #include <asm/dwarf2.h> 4 3 #include <asm/asm.h> 5 4 #include <asm/msr.h> 6 5 ··· 12 13 */ 13 14 .macro op_safe_regs op 14 15 ENTRY(\op\()_safe_regs) 15 - CFI_STARTPROC 16 - pushq_cfi_reg rbx 17 - pushq_cfi_reg rbp 16 + pushq %rbx 17 + pushq %rbp 18 18 movq %rdi, %r10 /* Save pointer */ 19 19 xorl %r11d, %r11d /* Return value */ 20 20 movl (%rdi), %eax ··· 23 25 movl 20(%rdi), %ebp 24 26 movl 24(%rdi), %esi 25 27 movl 28(%rdi), %edi 26 - CFI_REMEMBER_STATE 27 28 1: \op 28 29 2: movl %eax, (%r10) 29 30 movl %r11d, %eax /* Return value */ ··· 32 35 movl %ebp, 20(%r10) 33 36 movl %esi, 24(%r10) 34 37 movl %edi, 28(%r10) 35 - popq_cfi_reg rbp 36 - popq_cfi_reg rbx 38 + popq %rbp 39 + popq %rbx 37 40 ret 38 41 3: 39 - CFI_RESTORE_STATE 40 42 movl $-EIO, %r11d 41 43 jmp 2b 42 44 43 45 _ASM_EXTABLE(1b, 3b) 44 - CFI_ENDPROC 45 46 ENDPROC(\op\()_safe_regs) 46 47 .endm 47 48 ··· 47 52 48 53 .macro op_safe_regs op 49 54 ENTRY(\op\()_safe_regs) 50 - CFI_STARTPROC 51 - pushl_cfi_reg ebx 52 - pushl_cfi_reg ebp 53 - pushl_cfi_reg esi 54 - pushl_cfi_reg edi 55 - pushl_cfi $0 /* Return value */ 56 - pushl_cfi %eax 55 + pushl %ebx 56 + pushl %ebp 57 + pushl %esi 58 + pushl %edi 59 + pushl $0 /* Return value */ 60 + pushl %eax 57 61 movl 4(%eax), %ecx 58 62 movl 8(%eax), %edx 59 63 movl 12(%eax), %ebx ··· 60 66 movl 24(%eax), %esi 61 67 movl 28(%eax), %edi 62 68 movl (%eax), %eax 63 - CFI_REMEMBER_STATE 64 69 1: \op 65 - 2: pushl_cfi %eax 70 + 2: pushl %eax 66 71 movl 4(%esp), %eax 67 - popl_cfi (%eax) 72 + popl (%eax) 68 73 addl $4, %esp 69 - CFI_ADJUST_CFA_OFFSET -4 70 74 movl %ecx, 4(%eax) 71 75 movl %edx, 8(%eax) 72 76 movl %ebx, 12(%eax) 73 77 movl %ebp, 20(%eax) 74 78 movl %esi, 24(%eax) 75 79 movl %edi, 28(%eax) 76 - popl_cfi %eax 77 - popl_cfi_reg edi 78 - popl_cfi_reg esi 79 - popl_cfi_reg ebp 80 - popl_cfi_reg ebx 80 + popl %eax 81 + popl %edi 82 + popl %esi 83 + popl %ebp 84 + popl %ebx 81 85 ret 82 86 3: 83 - CFI_RESTORE_STATE 84 87 movl $-EIO, 4(%esp) 85 88 jmp 2b 86 89 87 90 _ASM_EXTABLE(1b, 3b) 88 - CFI_ENDPROC 89 91 ENDPROC(\op\()_safe_regs) 90 92 .endm 91 93
+2 -6
arch/x86/lib/putuser.S
··· 11 11 * return value. 12 12 */ 13 13 #include <linux/linkage.h> 14 - #include <asm/dwarf2.h> 15 14 #include <asm/thread_info.h> 16 15 #include <asm/errno.h> 17 16 #include <asm/asm.h> ··· 29 30 * as they get called from within inline assembly. 30 31 */ 31 32 32 - #define ENTER CFI_STARTPROC ; \ 33 - GET_THREAD_INFO(%_ASM_BX) 33 + #define ENTER GET_THREAD_INFO(%_ASM_BX) 34 34 #define EXIT ASM_CLAC ; \ 35 - ret ; \ 36 - CFI_ENDPROC 35 + ret 37 36 38 37 .text 39 38 ENTRY(__put_user_1) ··· 84 87 ENDPROC(__put_user_8) 85 88 86 89 bad_put_user: 87 - CFI_STARTPROC 88 90 movl $-EFAULT,%eax 89 91 EXIT 90 92 END(bad_put_user)
+20 -29
arch/x86/lib/rwsem.S
··· 15 15 16 16 #include <linux/linkage.h> 17 17 #include <asm/alternative-asm.h> 18 - #include <asm/dwarf2.h> 19 18 20 19 #define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg) 21 20 #define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l) ··· 33 34 */ 34 35 35 36 #define save_common_regs \ 36 - pushl_cfi_reg ecx 37 + pushl %ecx 37 38 38 39 #define restore_common_regs \ 39 - popl_cfi_reg ecx 40 + popl %ecx 40 41 41 42 /* Avoid uglifying the argument copying x86-64 needs to do. */ 42 43 .macro movq src, dst ··· 63 64 */ 64 65 65 66 #define save_common_regs \ 66 - pushq_cfi_reg rdi; \ 67 - pushq_cfi_reg rsi; \ 68 - pushq_cfi_reg rcx; \ 69 - pushq_cfi_reg r8; \ 70 - pushq_cfi_reg r9; \ 71 - pushq_cfi_reg r10; \ 72 - pushq_cfi_reg r11 67 + pushq %rdi; \ 68 + pushq %rsi; \ 69 + pushq %rcx; \ 70 + pushq %r8; \ 71 + pushq %r9; \ 72 + pushq %r10; \ 73 + pushq %r11 73 74 74 75 #define restore_common_regs \ 75 - popq_cfi_reg r11; \ 76 - popq_cfi_reg r10; \ 77 - popq_cfi_reg r9; \ 78 - popq_cfi_reg r8; \ 79 - popq_cfi_reg rcx; \ 80 - popq_cfi_reg rsi; \ 81 - popq_cfi_reg rdi 76 + popq %r11; \ 77 + popq %r10; \ 78 + popq %r9; \ 79 + popq %r8; \ 80 + popq %rcx; \ 81 + popq %rsi; \ 82 + popq %rdi 82 83 83 84 #endif 84 85 85 86 /* Fix up special calling conventions */ 86 87 ENTRY(call_rwsem_down_read_failed) 87 - CFI_STARTPROC 88 88 save_common_regs 89 - __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 89 + __ASM_SIZE(push,) %__ASM_REG(dx) 90 90 movq %rax,%rdi 91 91 call rwsem_down_read_failed 92 - __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 92 + __ASM_SIZE(pop,) %__ASM_REG(dx) 93 93 restore_common_regs 94 94 ret 95 - CFI_ENDPROC 96 95 ENDPROC(call_rwsem_down_read_failed) 97 96 98 97 ENTRY(call_rwsem_down_write_failed) 99 - CFI_STARTPROC 100 98 save_common_regs 101 99 movq %rax,%rdi 102 100 call rwsem_down_write_failed 103 101 restore_common_regs 104 102 ret 105 - CFI_ENDPROC 106 103 ENDPROC(call_rwsem_down_write_failed) 107 104 108 105 ENTRY(call_rwsem_wake) 109 - CFI_STARTPROC 110 106 /* do nothing if still outstanding active readers */ 111 107 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx) 112 108 jnz 1f ··· 110 116 call rwsem_wake 111 117 restore_common_regs 112 118 1: ret 113 - CFI_ENDPROC 114 119 ENDPROC(call_rwsem_wake) 115 120 116 121 ENTRY(call_rwsem_downgrade_wake) 117 - CFI_STARTPROC 118 122 save_common_regs 119 - __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx) 123 + __ASM_SIZE(push,) %__ASM_REG(dx) 120 124 movq %rax,%rdi 121 125 call rwsem_downgrade_wake 122 - __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx) 126 + __ASM_SIZE(pop,) %__ASM_REG(dx) 123 127 restore_common_regs 124 128 ret 125 - CFI_ENDPROC 126 129 ENDPROC(call_rwsem_downgrade_wake)
+6 -9
arch/x86/lib/thunk_32.S
··· 6 6 */ 7 7 #include <linux/linkage.h> 8 8 #include <asm/asm.h> 9 - #include <asm/dwarf2.h> 10 9 11 10 /* put return address in eax (arg1) */ 12 11 .macro THUNK name, func, put_ret_addr_in_eax=0 13 12 .globl \name 14 13 \name: 15 - CFI_STARTPROC 16 - pushl_cfi_reg eax 17 - pushl_cfi_reg ecx 18 - pushl_cfi_reg edx 14 + pushl %eax 15 + pushl %ecx 16 + pushl %edx 19 17 20 18 .if \put_ret_addr_in_eax 21 19 /* Place EIP in the arg1 */ ··· 21 23 .endif 22 24 23 25 call \func 24 - popl_cfi_reg edx 25 - popl_cfi_reg ecx 26 - popl_cfi_reg eax 26 + popl %edx 27 + popl %ecx 28 + popl %eax 27 29 ret 28 - CFI_ENDPROC 29 30 _ASM_NOKPROBE(\name) 30 31 .endm 31 32
+19 -25
arch/x86/lib/thunk_64.S
··· 6 6 * Subject to the GNU public license, v.2. No warranty of any kind. 7 7 */ 8 8 #include <linux/linkage.h> 9 - #include <asm/dwarf2.h> 10 9 #include <asm/calling.h> 11 10 #include <asm/asm.h> 12 11 ··· 13 14 .macro THUNK name, func, put_ret_addr_in_rdi=0 14 15 .globl \name 15 16 \name: 16 - CFI_STARTPROC 17 17 18 18 /* this one pushes 9 elems, the next one would be %rIP */ 19 - pushq_cfi_reg rdi 20 - pushq_cfi_reg rsi 21 - pushq_cfi_reg rdx 22 - pushq_cfi_reg rcx 23 - pushq_cfi_reg rax 24 - pushq_cfi_reg r8 25 - pushq_cfi_reg r9 26 - pushq_cfi_reg r10 27 - pushq_cfi_reg r11 19 + pushq %rdi 20 + pushq %rsi 21 + pushq %rdx 22 + pushq %rcx 23 + pushq %rax 24 + pushq %r8 25 + pushq %r9 26 + pushq %r10 27 + pushq %r11 28 28 29 29 .if \put_ret_addr_in_rdi 30 30 /* 9*8(%rsp) is return addr on stack */ 31 - movq_cfi_restore 9*8, rdi 31 + movq 9*8(%rsp), %rdi 32 32 .endif 33 33 34 34 call \func 35 35 jmp restore 36 - CFI_ENDPROC 37 36 _ASM_NOKPROBE(\name) 38 37 .endm 39 38 ··· 54 57 #if defined(CONFIG_TRACE_IRQFLAGS) \ 55 58 || defined(CONFIG_DEBUG_LOCK_ALLOC) \ 56 59 || defined(CONFIG_PREEMPT) 57 - CFI_STARTPROC 58 - CFI_ADJUST_CFA_OFFSET 9*8 59 60 restore: 60 - popq_cfi_reg r11 61 - popq_cfi_reg r10 62 - popq_cfi_reg r9 63 - popq_cfi_reg r8 64 - popq_cfi_reg rax 65 - popq_cfi_reg rcx 66 - popq_cfi_reg rdx 67 - popq_cfi_reg rsi 68 - popq_cfi_reg rdi 61 + popq %r11 62 + popq %r10 63 + popq %r9 64 + popq %r8 65 + popq %rax 66 + popq %rcx 67 + popq %rdx 68 + popq %rsi 69 + popq %rdi 69 70 ret 70 - CFI_ENDPROC 71 71 _ASM_NOKPROBE(restore) 72 72 #endif
-1
arch/x86/net/bpf_jit.S
··· 8 8 * of the License. 9 9 */ 10 10 #include <linux/linkage.h> 11 - #include <asm/dwarf2.h> 12 11 13 12 /* 14 13 * Calling convention :