Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/asm/entry/64: Save all regs on interrupt entry

To prepare for the big rewrite of the error and interrupt exit
paths, we will need pt_regs completely filled in.

It's already completely filled in when error_exit runs, so rearrange
interrupt handling to match it. This will slow down interrupt
handling very slightly (eight instructions), but the
simplification it enables will be more than worth it.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/d8a766a7f558b30e6e01352854628a2d9943460c.1435952415.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Andy Lutomirski and committed by
Ingo Molnar
ff467594 29ea1b25

+9 -23
-3
arch/x86/entry/calling.h
··· 135 135 movq %rbp, 4*8+\offset(%rsp) 136 136 movq %rbx, 5*8+\offset(%rsp) 137 137 .endm 138 - .macro SAVE_EXTRA_REGS_RBP offset=0 139 - movq %rbp, 4*8+\offset(%rsp) 140 - .endm 141 138 142 139 .macro RESTORE_EXTRA_REGS offset=0 143 140 movq 0*8+\offset(%rsp), %r15
+9 -20
arch/x86/entry/entry_64.S
··· 502 502 /* 0(%rsp): ~(interrupt number) */ 503 503 .macro interrupt func 504 504 cld 505 - /* 506 - * Since nothing in interrupt handling code touches r12...r15 members 507 - * of "struct pt_regs", and since interrupts can nest, we can save 508 - * four stack slots and simultaneously provide 509 - * an unwind-friendly stack layout by saving "truncated" pt_regs 510 - * exactly up to rbp slot, without these members. 511 - */ 512 - ALLOC_PT_GPREGS_ON_STACK -RBP 513 - SAVE_C_REGS -RBP 514 - /* this goes to 0(%rsp) for unwinder, not for saving the value: */ 515 - SAVE_EXTRA_REGS_RBP -RBP 505 + ALLOC_PT_GPREGS_ON_STACK 506 + SAVE_C_REGS 507 + SAVE_EXTRA_REGS 516 508 517 - leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */ 509 + movq %rsp,%rdi /* arg1 for \func (pointer to pt_regs) */ 518 510 519 - testb $3, CS-RBP(%rsp) 511 + testb $3, CS(%rsp) 520 512 jz 1f 521 513 SWAPGS 522 514 1: ··· 545 553 decl PER_CPU_VAR(irq_count) 546 554 547 555 /* Restore saved previous stack */ 548 - popq %rsi 549 - /* return code expects complete pt_regs - adjust rsp accordingly: */ 550 - leaq -RBP(%rsi), %rsp 556 + popq %rsp 551 557 552 558 testb $3, CS(%rsp) 553 559 jz retint_kernel ··· 570 580 TRACE_IRQS_IRETQ 571 581 572 582 SWAPGS 573 - jmp restore_c_regs_and_iret 583 + jmp restore_regs_and_iret 574 584 575 585 /* Returning to kernel space */ 576 586 retint_kernel: ··· 594 604 * At this label, code paths which return to kernel and to user, 595 605 * which come from interrupts/exception and from syscalls, merge. 596 606 */ 607 + restore_regs_and_iret: 608 + RESTORE_EXTRA_REGS 597 609 restore_c_regs_and_iret: 598 610 RESTORE_C_REGS 599 611 REMOVE_PT_GPREGS_FROM_STACK 8 ··· 666 674 jz retint_swapgs 667 675 TRACE_IRQS_ON 668 676 ENABLE_INTERRUPTS(CLBR_NONE) 669 - SAVE_EXTRA_REGS 670 677 movq $-1, ORIG_RAX(%rsp) 671 678 xorl %esi, %esi /* oldset */ 672 679 movq %rsp, %rdi /* &pt_regs */ 673 680 call do_notify_resume 674 - RESTORE_EXTRA_REGS 675 681 DISABLE_INTERRUPTS(CLBR_NONE) 676 682 TRACE_IRQS_OFF 677 683 GET_THREAD_INFO(%rcx) ··· 1150 1160 */ 1151 1161 ENTRY(error_exit) 1152 1162 movl %ebx, %eax 1153 - RESTORE_EXTRA_REGS 1154 1163 DISABLE_INTERRUPTS(CLBR_NONE) 1155 1164 TRACE_IRQS_OFF 1156 1165 testl %eax, %eax