Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/arch/x86_64/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
8 *
9 * entry.S contains the system-call and fault low-level handling routines.
10 *
11 * Some of this is documented in Documentation/x86/entry_64.rst
12 *
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
16 *
17 * Some macro usage:
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
20 */
21#include <linux/linkage.h>
22#include <asm/segment.h>
23#include <asm/cache.h>
24#include <asm/errno.h>
25#include <asm/asm-offsets.h>
26#include <asm/msr.h>
27#include <asm/unistd.h>
28#include <asm/thread_info.h>
29#include <asm/hw_irq.h>
30#include <asm/page_types.h>
31#include <asm/irqflags.h>
32#include <asm/paravirt.h>
33#include <asm/percpu.h>
34#include <asm/asm.h>
35#include <asm/smap.h>
36#include <asm/pgtable_types.h>
37#include <asm/export.h>
38#include <asm/frame.h>
39#include <asm/trapnr.h>
40#include <asm/nospec-branch.h>
41#include <asm/fsgsbase.h>
42#include <linux/err.h>
43
44#include "calling.h"
45
46.code64
47.section .entry.text, "ax"
48
49/*
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
51 *
52 * This is the only entry point used for 64-bit system calls. The
53 * hardware interface is reasonably well designed and the register to
54 * argument mapping Linux uses fits well with the registers that are
55 * available when SYSCALL is used.
56 *
57 * SYSCALL instructions can be found inlined in libc implementations as
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
60 * clock_gettimeofday fallback.
61 *
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
63 * then loads new ss, cs, and rip from previously programmed MSRs.
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
65 * are not needed). SYSCALL does not save anything on the stack
66 * and does not change rsp.
67 *
68 * Registers on entry:
69 * rax system call number
70 * rcx return address
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
72 * rdi arg0
73 * rsi arg1
74 * rdx arg2
75 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
76 * r8 arg4
77 * r9 arg5
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
79 *
80 * Only called from user space.
81 *
82 * When user can change pt_regs->foo always force IRET. That is because
83 * it deals with uncanonical addresses better. SYSRET has trouble
84 * with them due to bugs in both AMD and Intel CPUs.
85 */
86
87SYM_CODE_START(entry_SYSCALL_64)
88 UNWIND_HINT_EMPTY
89 ENDBR
90
91 swapgs
92 /* tss.sp2 is scratch space. */
93 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
94 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
95 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
96
97SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
98 ANNOTATE_NOENDBR
99
100 /* Construct struct pt_regs on stack */
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
106SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
107 pushq %rax /* pt_regs->orig_ax */
108
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
110
111 /* IRQs are off. */
112 movq %rsp, %rdi
113 /* Sign extend the lower 32bit as syscall numbers are treated as int */
114 movslq %eax, %rsi
115 call do_syscall_64 /* returns with IRQs disabled */
116
117 /*
118 * Try to use SYSRET instead of IRET if we're returning to
119 * a completely clean 64-bit userspace context. If we're not,
120 * go to the slow exit path.
121 * In the Xen PV case we must use iret anyway.
122 */
123
124 ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \
125 X86_FEATURE_XENPV
126
127 movq RCX(%rsp), %rcx
128 movq RIP(%rsp), %r11
129
130 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */
131 jne swapgs_restore_regs_and_return_to_usermode
132
133 /*
134 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
135 * in kernel space. This essentially lets the user take over
136 * the kernel, since userspace controls RSP.
137 *
138 * If width of "canonical tail" ever becomes variable, this will need
139 * to be updated to remain correct on both old and new CPUs.
140 *
141 * Change top bits to match most significant bit (47th or 56th bit
142 * depending on paging mode) in the address.
143 */
144#ifdef CONFIG_X86_5LEVEL
145 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
146 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
147#else
148 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
149 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
150#endif
151
152 /* If this changed %rcx, it was not canonical */
153 cmpq %rcx, %r11
154 jne swapgs_restore_regs_and_return_to_usermode
155
156 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
157 jne swapgs_restore_regs_and_return_to_usermode
158
159 movq R11(%rsp), %r11
160 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
161 jne swapgs_restore_regs_and_return_to_usermode
162
163 /*
164 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
165 * restore RF properly. If the slowpath sets it for whatever reason, we
166 * need to restore it correctly.
167 *
168 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
169 * trap from userspace immediately after SYSRET. This would cause an
170 * infinite loop whenever #DB happens with register state that satisfies
171 * the opportunistic SYSRET conditions. For example, single-stepping
172 * this user code:
173 *
174 * movq $stuck_here, %rcx
175 * pushfq
176 * popq %r11
177 * stuck_here:
178 *
179 * would never get past 'stuck_here'.
180 */
181 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
182 jnz swapgs_restore_regs_and_return_to_usermode
183
184 /* nothing to check for RSP */
185
186 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
187 jne swapgs_restore_regs_and_return_to_usermode
188
189 /*
190 * We win! This label is here just for ease of understanding
191 * perf profiles. Nothing jumps here.
192 */
193syscall_return_via_sysret:
194 /* rcx and r11 are already restored (see code above) */
195 POP_REGS pop_rdi=0 skip_r11rcx=1
196
197 /*
198 * Now all regs are restored except RSP and RDI.
199 * Save old stack pointer and switch to trampoline stack.
200 */
201 movq %rsp, %rdi
202 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
203 UNWIND_HINT_EMPTY
204
205 pushq RSP-RDI(%rdi) /* RSP */
206 pushq (%rdi) /* RDI */
207
208 /*
209 * We are on the trampoline stack. All regs except RDI are live.
210 * We can do future final exit work right here.
211 */
212 STACKLEAK_ERASE_NOCLOBBER
213
214 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
215
216 popq %rdi
217 popq %rsp
218 swapgs
219 sysretq
220SYM_CODE_END(entry_SYSCALL_64)
221
222/*
223 * %rdi: prev task
224 * %rsi: next task
225 */
226.pushsection .text, "ax"
227SYM_FUNC_START(__switch_to_asm)
228 /*
229 * Save callee-saved registers
230 * This must match the order in inactive_task_frame
231 */
232 pushq %rbp
233 pushq %rbx
234 pushq %r12
235 pushq %r13
236 pushq %r14
237 pushq %r15
238
239 /* switch stack */
240 movq %rsp, TASK_threadsp(%rdi)
241 movq TASK_threadsp(%rsi), %rsp
242
243#ifdef CONFIG_STACKPROTECTOR
244 movq TASK_stack_canary(%rsi), %rbx
245 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
246#endif
247
248#ifdef CONFIG_RETPOLINE
249 /*
250 * When switching from a shallower to a deeper call stack
251 * the RSB may either underflow or use entries populated
252 * with userspace addresses. On CPUs where those concerns
253 * exist, overwrite the RSB with entries which capture
254 * speculative execution to prevent attack.
255 */
256 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
257#endif
258
259 /* restore callee-saved registers */
260 popq %r15
261 popq %r14
262 popq %r13
263 popq %r12
264 popq %rbx
265 popq %rbp
266
267 jmp __switch_to
268SYM_FUNC_END(__switch_to_asm)
269.popsection
270
271/*
272 * A newly forked process directly context switches into this address.
273 *
274 * rax: prev task we switched from
275 * rbx: kernel thread func (NULL for user thread)
276 * r12: kernel thread arg
277 */
278.pushsection .text, "ax"
279SYM_CODE_START(ret_from_fork)
280 UNWIND_HINT_EMPTY
281 ANNOTATE_NOENDBR // copy_thread
282 movq %rax, %rdi
283 call schedule_tail /* rdi: 'prev' task parameter */
284
285 testq %rbx, %rbx /* from kernel_thread? */
286 jnz 1f /* kernel threads are uncommon */
287
2882:
289 UNWIND_HINT_REGS
290 movq %rsp, %rdi
291 call syscall_exit_to_user_mode /* returns with IRQs disabled */
292 jmp swapgs_restore_regs_and_return_to_usermode
293
2941:
295 /* kernel thread */
296 UNWIND_HINT_EMPTY
297 movq %r12, %rdi
298 CALL_NOSPEC rbx
299 /*
300 * A kernel thread is allowed to return here after successfully
301 * calling kernel_execve(). Exit to userspace to complete the execve()
302 * syscall.
303 */
304 movq $0, RAX(%rsp)
305 jmp 2b
306SYM_CODE_END(ret_from_fork)
307.popsection
308
309.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
310#ifdef CONFIG_DEBUG_ENTRY
311 pushq %rax
312 SAVE_FLAGS
313 testl $X86_EFLAGS_IF, %eax
314 jz .Lokay_\@
315 ud2
316.Lokay_\@:
317 popq %rax
318#endif
319.endm
320
321/**
322 * idtentry_body - Macro to emit code calling the C function
323 * @cfunc: C function to be called
324 * @has_error_code: Hardware pushed error code on stack
325 */
326.macro idtentry_body cfunc has_error_code:req
327
328 call error_entry
329 UNWIND_HINT_REGS
330
331 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
332
333 .if \has_error_code == 1
334 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
335 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
336 .endif
337
338 call \cfunc
339
340 /* For some configurations \cfunc ends up being a noreturn. */
341 REACHABLE
342
343 jmp error_return
344.endm
345
346/**
347 * idtentry - Macro to generate entry stubs for simple IDT entries
348 * @vector: Vector number
349 * @asmsym: ASM symbol for the entry point
350 * @cfunc: C function to be called
351 * @has_error_code: Hardware pushed error code on stack
352 *
353 * The macro emits code to set up the kernel context for straight forward
354 * and simple IDT entries. No IST stack, no paranoid entry checks.
355 */
356.macro idtentry vector asmsym cfunc has_error_code:req
357SYM_CODE_START(\asmsym)
358 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
359 ENDBR
360 ASM_CLAC
361
362 .if \has_error_code == 0
363 pushq $-1 /* ORIG_RAX: no syscall to restart */
364 .endif
365
366 .if \vector == X86_TRAP_BP
367 /*
368 * If coming from kernel space, create a 6-word gap to allow the
369 * int3 handler to emulate a call instruction.
370 */
371 testb $3, CS-ORIG_RAX(%rsp)
372 jnz .Lfrom_usermode_no_gap_\@
373 .rept 6
374 pushq 5*8(%rsp)
375 .endr
376 UNWIND_HINT_IRET_REGS offset=8
377.Lfrom_usermode_no_gap_\@:
378 .endif
379
380 idtentry_body \cfunc \has_error_code
381
382_ASM_NOKPROBE(\asmsym)
383SYM_CODE_END(\asmsym)
384.endm
385
386/*
387 * Interrupt entry/exit.
388 *
389 + The interrupt stubs push (vector) onto the stack, which is the error_code
390 * position of idtentry exceptions, and jump to one of the two idtentry points
391 * (common/spurious).
392 *
393 * common_interrupt is a hotpath, align it to a cache line
394 */
395.macro idtentry_irq vector cfunc
396 .p2align CONFIG_X86_L1_CACHE_SHIFT
397 idtentry \vector asm_\cfunc \cfunc has_error_code=1
398.endm
399
400/*
401 * System vectors which invoke their handlers directly and are not
402 * going through the regular common device interrupt handling code.
403 */
404.macro idtentry_sysvec vector cfunc
405 idtentry \vector asm_\cfunc \cfunc has_error_code=0
406.endm
407
408/**
409 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
410 * @vector: Vector number
411 * @asmsym: ASM symbol for the entry point
412 * @cfunc: C function to be called
413 *
414 * The macro emits code to set up the kernel context for #MC and #DB
415 *
416 * If the entry comes from user space it uses the normal entry path
417 * including the return to user space work and preemption checks on
418 * exit.
419 *
420 * If hits in kernel mode then it needs to go through the paranoid
421 * entry as the exception can hit any random state. No preemption
422 * check on exit to keep the paranoid path simple.
423 */
424.macro idtentry_mce_db vector asmsym cfunc
425SYM_CODE_START(\asmsym)
426 UNWIND_HINT_IRET_REGS
427 ENDBR
428 ASM_CLAC
429
430 pushq $-1 /* ORIG_RAX: no syscall to restart */
431
432 /*
433 * If the entry is from userspace, switch stacks and treat it as
434 * a normal entry.
435 */
436 testb $3, CS-ORIG_RAX(%rsp)
437 jnz .Lfrom_usermode_switch_stack_\@
438
439 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
440 call paranoid_entry
441
442 UNWIND_HINT_REGS
443
444 movq %rsp, %rdi /* pt_regs pointer */
445
446 call \cfunc
447
448 jmp paranoid_exit
449
450 /* Switch to the regular task stack and use the noist entry point */
451.Lfrom_usermode_switch_stack_\@:
452 idtentry_body noist_\cfunc, has_error_code=0
453
454_ASM_NOKPROBE(\asmsym)
455SYM_CODE_END(\asmsym)
456.endm
457
458#ifdef CONFIG_AMD_MEM_ENCRYPT
459/**
460 * idtentry_vc - Macro to generate entry stub for #VC
461 * @vector: Vector number
462 * @asmsym: ASM symbol for the entry point
463 * @cfunc: C function to be called
464 *
465 * The macro emits code to set up the kernel context for #VC. The #VC handler
466 * runs on an IST stack and needs to be able to cause nested #VC exceptions.
467 *
468 * To make this work the #VC entry code tries its best to pretend it doesn't use
469 * an IST stack by switching to the task stack if coming from user-space (which
470 * includes early SYSCALL entry path) or back to the stack in the IRET frame if
471 * entered from kernel-mode.
472 *
473 * If entered from kernel-mode the return stack is validated first, and if it is
474 * not safe to use (e.g. because it points to the entry stack) the #VC handler
475 * will switch to a fall-back stack (VC2) and call a special handler function.
476 *
477 * The macro is only used for one vector, but it is planned to be extended in
478 * the future for the #HV exception.
479 */
480.macro idtentry_vc vector asmsym cfunc
481SYM_CODE_START(\asmsym)
482 UNWIND_HINT_IRET_REGS
483 ENDBR
484 ASM_CLAC
485
486 /*
487 * If the entry is from userspace, switch stacks and treat it as
488 * a normal entry.
489 */
490 testb $3, CS-ORIG_RAX(%rsp)
491 jnz .Lfrom_usermode_switch_stack_\@
492
493 /*
494 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
495 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
496 */
497 call paranoid_entry
498
499 UNWIND_HINT_REGS
500
501 /*
502 * Switch off the IST stack to make it free for nested exceptions. The
503 * vc_switch_off_ist() function will switch back to the interrupted
504 * stack if it is safe to do so. If not it switches to the VC fall-back
505 * stack.
506 */
507 movq %rsp, %rdi /* pt_regs pointer */
508 call vc_switch_off_ist
509 movq %rax, %rsp /* Switch to new stack */
510
511 UNWIND_HINT_REGS
512
513 /* Update pt_regs */
514 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
515 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
516
517 movq %rsp, %rdi /* pt_regs pointer */
518
519 call kernel_\cfunc
520
521 /*
522 * No need to switch back to the IST stack. The current stack is either
523 * identical to the stack in the IRET frame or the VC fall-back stack,
524 * so it is definitely mapped even with PTI enabled.
525 */
526 jmp paranoid_exit
527
528 /* Switch to the regular task stack */
529.Lfrom_usermode_switch_stack_\@:
530 idtentry_body user_\cfunc, has_error_code=1
531
532_ASM_NOKPROBE(\asmsym)
533SYM_CODE_END(\asmsym)
534.endm
535#endif
536
537/*
538 * Double fault entry. Straight paranoid. No checks from which context
539 * this comes because for the espfix induced #DF this would do the wrong
540 * thing.
541 */
542.macro idtentry_df vector asmsym cfunc
543SYM_CODE_START(\asmsym)
544 UNWIND_HINT_IRET_REGS offset=8
545 ENDBR
546 ASM_CLAC
547
548 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
549 call paranoid_entry
550 UNWIND_HINT_REGS
551
552 movq %rsp, %rdi /* pt_regs pointer into first argument */
553 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
554 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
555 call \cfunc
556
557 /* For some configurations \cfunc ends up being a noreturn. */
558 REACHABLE
559
560 jmp paranoid_exit
561
562_ASM_NOKPROBE(\asmsym)
563SYM_CODE_END(\asmsym)
564.endm
565
566/*
567 * Include the defines which emit the idt entries which are shared
568 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
569 * so the stacktrace boundary checks work.
570 */
571 .align 16
572 .globl __irqentry_text_start
573__irqentry_text_start:
574
575#include <asm/idtentry.h>
576
577 .align 16
578 .globl __irqentry_text_end
579__irqentry_text_end:
580 ANNOTATE_NOENDBR
581
582SYM_CODE_START_LOCAL(common_interrupt_return)
583SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
584#ifdef CONFIG_DEBUG_ENTRY
585 /* Assert that pt_regs indicates user mode. */
586 testb $3, CS(%rsp)
587 jnz 1f
588 ud2
5891:
590#endif
591#ifdef CONFIG_XEN_PV
592 ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
593#endif
594
595 POP_REGS pop_rdi=0
596
597 /*
598 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
599 * Save old stack pointer and switch to trampoline stack.
600 */
601 movq %rsp, %rdi
602 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
603 UNWIND_HINT_EMPTY
604
605 /* Copy the IRET frame to the trampoline stack. */
606 pushq 6*8(%rdi) /* SS */
607 pushq 5*8(%rdi) /* RSP */
608 pushq 4*8(%rdi) /* EFLAGS */
609 pushq 3*8(%rdi) /* CS */
610 pushq 2*8(%rdi) /* RIP */
611
612 /* Push user RDI on the trampoline stack. */
613 pushq (%rdi)
614
615 /*
616 * We are on the trampoline stack. All regs except RDI are live.
617 * We can do future final exit work right here.
618 */
619 STACKLEAK_ERASE_NOCLOBBER
620
621 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
622
623 /* Restore RDI. */
624 popq %rdi
625 swapgs
626 jmp .Lnative_iret
627
628
629SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
630#ifdef CONFIG_DEBUG_ENTRY
631 /* Assert that pt_regs indicates kernel mode. */
632 testb $3, CS(%rsp)
633 jz 1f
634 ud2
6351:
636#endif
637 POP_REGS
638 addq $8, %rsp /* skip regs->orig_ax */
639 /*
640 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
641 * when returning from IPI handler.
642 */
643#ifdef CONFIG_XEN_PV
644SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
645 ANNOTATE_NOENDBR
646 .byte 0xe9
647 .long .Lnative_iret - (. + 4)
648#endif
649
650.Lnative_iret:
651 UNWIND_HINT_IRET_REGS
652 /*
653 * Are we returning to a stack segment from the LDT? Note: in
654 * 64-bit mode SS:RSP on the exception stack is always valid.
655 */
656#ifdef CONFIG_X86_ESPFIX64
657 testb $4, (SS-RIP)(%rsp)
658 jnz native_irq_return_ldt
659#endif
660
661SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
662 ANNOTATE_NOENDBR // exc_double_fault
663 /*
664 * This may fault. Non-paranoid faults on return to userspace are
665 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
666 * Double-faults due to espfix64 are handled in exc_double_fault.
667 * Other faults here are fatal.
668 */
669 iretq
670
671#ifdef CONFIG_X86_ESPFIX64
672native_irq_return_ldt:
673 /*
674 * We are running with user GSBASE. All GPRs contain their user
675 * values. We have a percpu ESPFIX stack that is eight slots
676 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
677 * of the ESPFIX stack.
678 *
679 * We clobber RAX and RDI in this code. We stash RDI on the
680 * normal stack and RAX on the ESPFIX stack.
681 *
682 * The ESPFIX stack layout we set up looks like this:
683 *
684 * --- top of ESPFIX stack ---
685 * SS
686 * RSP
687 * RFLAGS
688 * CS
689 * RIP <-- RSP points here when we're done
690 * RAX <-- espfix_waddr points here
691 * --- bottom of ESPFIX stack ---
692 */
693
694 pushq %rdi /* Stash user RDI */
695 swapgs /* to kernel GS */
696 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
697
698 movq PER_CPU_VAR(espfix_waddr), %rdi
699 movq %rax, (0*8)(%rdi) /* user RAX */
700 movq (1*8)(%rsp), %rax /* user RIP */
701 movq %rax, (1*8)(%rdi)
702 movq (2*8)(%rsp), %rax /* user CS */
703 movq %rax, (2*8)(%rdi)
704 movq (3*8)(%rsp), %rax /* user RFLAGS */
705 movq %rax, (3*8)(%rdi)
706 movq (5*8)(%rsp), %rax /* user SS */
707 movq %rax, (5*8)(%rdi)
708 movq (4*8)(%rsp), %rax /* user RSP */
709 movq %rax, (4*8)(%rdi)
710 /* Now RAX == RSP. */
711
712 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
713
714 /*
715 * espfix_stack[31:16] == 0. The page tables are set up such that
716 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
717 * espfix_waddr for any X. That is, there are 65536 RO aliases of
718 * the same page. Set up RSP so that RSP[31:16] contains the
719 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
720 * still points to an RO alias of the ESPFIX stack.
721 */
722 orq PER_CPU_VAR(espfix_stack), %rax
723
724 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
725 swapgs /* to user GS */
726 popq %rdi /* Restore user RDI */
727
728 movq %rax, %rsp
729 UNWIND_HINT_IRET_REGS offset=8
730
731 /*
732 * At this point, we cannot write to the stack any more, but we can
733 * still read.
734 */
735 popq %rax /* Restore user RAX */
736
737 /*
738 * RSP now points to an ordinary IRET frame, except that the page
739 * is read-only and RSP[31:16] are preloaded with the userspace
740 * values. We can now IRET back to userspace.
741 */
742 jmp native_irq_return_iret
743#endif
744SYM_CODE_END(common_interrupt_return)
745_ASM_NOKPROBE(common_interrupt_return)
746
747/*
748 * Reload gs selector with exception handling
749 * edi: new selector
750 *
751 * Is in entry.text as it shouldn't be instrumented.
752 */
753SYM_FUNC_START(asm_load_gs_index)
754 FRAME_BEGIN
755 swapgs
756.Lgs_change:
757 ANNOTATE_NOENDBR // error_entry
758 movl %edi, %gs
7592: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
760 swapgs
761 FRAME_END
762 RET
763
764 /* running with kernelgs */
765.Lbad_gs:
766 swapgs /* switch back to user gs */
767.macro ZAP_GS
768 /* This can't be a string because the preprocessor needs to see it. */
769 movl $__USER_DS, %eax
770 movl %eax, %gs
771.endm
772 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
773 xorl %eax, %eax
774 movl %eax, %gs
775 jmp 2b
776
777 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
778
779SYM_FUNC_END(asm_load_gs_index)
780EXPORT_SYMBOL(asm_load_gs_index)
781
782#ifdef CONFIG_XEN_PV
783/*
784 * A note on the "critical region" in our callback handler.
785 * We want to avoid stacking callback handlers due to events occurring
786 * during handling of the last event. To do this, we keep events disabled
787 * until we've done all processing. HOWEVER, we must enable events before
788 * popping the stack frame (can't be done atomically) and so it would still
789 * be possible to get enough handler activations to overflow the stack.
790 * Although unlikely, bugs of that kind are hard to track down, so we'd
791 * like to avoid the possibility.
792 * So, on entry to the handler we detect whether we interrupted an
793 * existing activation in its critical region -- if so, we pop the current
794 * activation and restart the handler using the previous one.
795 *
796 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
797 */
798SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback)
799
800/*
801 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
802 * see the correct pointer to the pt_regs
803 */
804 UNWIND_HINT_FUNC
805 movq %rdi, %rsp /* we don't return, adjust the stack frame */
806 UNWIND_HINT_REGS
807
808 call xen_pv_evtchn_do_upcall
809
810 jmp error_return
811SYM_CODE_END(exc_xen_hypervisor_callback)
812
813/*
814 * Hypervisor uses this for application faults while it executes.
815 * We get here for two reasons:
816 * 1. Fault while reloading DS, ES, FS or GS
817 * 2. Fault while executing IRET
818 * Category 1 we do not need to fix up as Xen has already reloaded all segment
819 * registers that could be reloaded and zeroed the others.
820 * Category 2 we fix up by killing the current process. We cannot use the
821 * normal Linux return path in this case because if we use the IRET hypercall
822 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
823 * We distinguish between categories by comparing each saved segment register
824 * with its current contents: any discrepancy means we in category 1.
825 */
826SYM_CODE_START(xen_failsafe_callback)
827 UNWIND_HINT_EMPTY
828 ENDBR
829 movl %ds, %ecx
830 cmpw %cx, 0x10(%rsp)
831 jne 1f
832 movl %es, %ecx
833 cmpw %cx, 0x18(%rsp)
834 jne 1f
835 movl %fs, %ecx
836 cmpw %cx, 0x20(%rsp)
837 jne 1f
838 movl %gs, %ecx
839 cmpw %cx, 0x28(%rsp)
840 jne 1f
841 /* All segments match their saved values => Category 2 (Bad IRET). */
842 movq (%rsp), %rcx
843 movq 8(%rsp), %r11
844 addq $0x30, %rsp
845 pushq $0 /* RIP */
846 UNWIND_HINT_IRET_REGS offset=8
847 jmp asm_exc_general_protection
8481: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
849 movq (%rsp), %rcx
850 movq 8(%rsp), %r11
851 addq $0x30, %rsp
852 UNWIND_HINT_IRET_REGS
853 pushq $-1 /* orig_ax = -1 => not a system call */
854 PUSH_AND_CLEAR_REGS
855 ENCODE_FRAME_POINTER
856 jmp error_return
857SYM_CODE_END(xen_failsafe_callback)
858#endif /* CONFIG_XEN_PV */
859
860/*
861 * Save all registers in pt_regs. Return GSBASE related information
862 * in EBX depending on the availability of the FSGSBASE instructions:
863 *
864 * FSGSBASE R/EBX
865 * N 0 -> SWAPGS on exit
866 * 1 -> no SWAPGS on exit
867 *
868 * Y GSBASE value at entry, must be restored in paranoid_exit
869 */
870SYM_CODE_START_LOCAL(paranoid_entry)
871 UNWIND_HINT_FUNC
872 cld
873 PUSH_AND_CLEAR_REGS save_ret=1
874 ENCODE_FRAME_POINTER 8
875
876 /*
877 * Always stash CR3 in %r14. This value will be restored,
878 * verbatim, at exit. Needed if paranoid_entry interrupted
879 * another entry that already switched to the user CR3 value
880 * but has not yet returned to userspace.
881 *
882 * This is also why CS (stashed in the "iret frame" by the
883 * hardware at entry) can not be used: this may be a return
884 * to kernel code, but with a user CR3 value.
885 *
886 * Switching CR3 does not depend on kernel GSBASE so it can
887 * be done before switching to the kernel GSBASE. This is
888 * required for FSGSBASE because the kernel GSBASE has to
889 * be retrieved from a kernel internal table.
890 */
891 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
892
893 /*
894 * Handling GSBASE depends on the availability of FSGSBASE.
895 *
896 * Without FSGSBASE the kernel enforces that negative GSBASE
897 * values indicate kernel GSBASE. With FSGSBASE no assumptions
898 * can be made about the GSBASE value when entering from user
899 * space.
900 */
901 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
902
903 /*
904 * Read the current GSBASE and store it in %rbx unconditionally,
905 * retrieve and set the current CPUs kernel GSBASE. The stored value
906 * has to be restored in paranoid_exit unconditionally.
907 *
908 * The unconditional write to GS base below ensures that no subsequent
909 * loads based on a mispredicted GS base can happen, therefore no LFENCE
910 * is needed here.
911 */
912 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
913 RET
914
915.Lparanoid_entry_checkgs:
916 /* EBX = 1 -> kernel GSBASE active, no restore required */
917 movl $1, %ebx
918
919 /*
920 * The kernel-enforced convention is a negative GSBASE indicates
921 * a kernel value. No SWAPGS needed on entry and exit.
922 */
923 movl $MSR_GS_BASE, %ecx
924 rdmsr
925 testl %edx, %edx
926 js .Lparanoid_kernel_gsbase
927
928 /* EBX = 0 -> SWAPGS required on exit */
929 xorl %ebx, %ebx
930 swapgs
931.Lparanoid_kernel_gsbase:
932
933 FENCE_SWAPGS_KERNEL_ENTRY
934 RET
935SYM_CODE_END(paranoid_entry)
936
937/*
938 * "Paranoid" exit path from exception stack. This is invoked
939 * only on return from non-NMI IST interrupts that came
940 * from kernel space.
941 *
942 * We may be returning to very strange contexts (e.g. very early
943 * in syscall entry), so checking for preemption here would
944 * be complicated. Fortunately, there's no good reason to try
945 * to handle preemption here.
946 *
947 * R/EBX contains the GSBASE related information depending on the
948 * availability of the FSGSBASE instructions:
949 *
950 * FSGSBASE R/EBX
951 * N 0 -> SWAPGS on exit
952 * 1 -> no SWAPGS on exit
953 *
954 * Y User space GSBASE, must be restored unconditionally
955 */
956SYM_CODE_START_LOCAL(paranoid_exit)
957 UNWIND_HINT_REGS
958 /*
959 * The order of operations is important. RESTORE_CR3 requires
960 * kernel GSBASE.
961 *
962 * NB to anyone to try to optimize this code: this code does
963 * not execute at all for exceptions from user mode. Those
964 * exceptions go through error_exit instead.
965 */
966 RESTORE_CR3 scratch_reg=%rax save_reg=%r14
967
968 /* Handle the three GSBASE cases */
969 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
970
971 /* With FSGSBASE enabled, unconditionally restore GSBASE */
972 wrgsbase %rbx
973 jmp restore_regs_and_return_to_kernel
974
975.Lparanoid_exit_checkgs:
976 /* On non-FSGSBASE systems, conditionally do SWAPGS */
977 testl %ebx, %ebx
978 jnz restore_regs_and_return_to_kernel
979
980 /* We are returning to a context with user GSBASE */
981 swapgs
982 jmp restore_regs_and_return_to_kernel
983SYM_CODE_END(paranoid_exit)
984
985/*
986 * Save all registers in pt_regs, and switch GS if needed.
987 */
988SYM_CODE_START_LOCAL(error_entry)
989 UNWIND_HINT_FUNC
990 cld
991 PUSH_AND_CLEAR_REGS save_ret=1
992 ENCODE_FRAME_POINTER 8
993 testb $3, CS+8(%rsp)
994 jz .Lerror_kernelspace
995
996 /*
997 * We entered from user mode or we're pretending to have entered
998 * from user mode due to an IRET fault.
999 */
1000 SWAPGS
1001 FENCE_SWAPGS_USER_ENTRY
1002 /* We have user CR3. Change to kernel CR3. */
1003 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1004
1005.Lerror_entry_from_usermode_after_swapgs:
1006 /* Put us onto the real thread stack. */
1007 popq %r12 /* save return addr in %12 */
1008 movq %rsp, %rdi /* arg0 = pt_regs pointer */
1009 call sync_regs
1010 movq %rax, %rsp /* switch stack */
1011 ENCODE_FRAME_POINTER
1012 pushq %r12
1013 RET
1014
1015 /*
1016 * There are two places in the kernel that can potentially fault with
1017 * usergs. Handle them here. B stepping K8s sometimes report a
1018 * truncated RIP for IRET exceptions returning to compat mode. Check
1019 * for these here too.
1020 */
1021.Lerror_kernelspace:
1022 leaq native_irq_return_iret(%rip), %rcx
1023 cmpq %rcx, RIP+8(%rsp)
1024 je .Lerror_bad_iret
1025 movl %ecx, %eax /* zero extend */
1026 cmpq %rax, RIP+8(%rsp)
1027 je .Lbstep_iret
1028 cmpq $.Lgs_change, RIP+8(%rsp)
1029 jne .Lerror_entry_done_lfence
1030
1031 /*
1032 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
1033 * gsbase and proceed. We'll fix up the exception and land in
1034 * .Lgs_change's error handler with kernel gsbase.
1035 */
1036 SWAPGS
1037
1038 /*
1039 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1040 * kernel or user gsbase.
1041 */
1042.Lerror_entry_done_lfence:
1043 FENCE_SWAPGS_KERNEL_ENTRY
1044 RET
1045
1046.Lbstep_iret:
1047 /* Fix truncated RIP */
1048 movq %rcx, RIP+8(%rsp)
1049 /* fall through */
1050
1051.Lerror_bad_iret:
1052 /*
1053 * We came from an IRET to user mode, so we have user
1054 * gsbase and CR3. Switch to kernel gsbase and CR3:
1055 */
1056 SWAPGS
1057 FENCE_SWAPGS_USER_ENTRY
1058 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1059
1060 /*
1061 * Pretend that the exception came from user mode: set up pt_regs
1062 * as if we faulted immediately after IRET.
1063 */
1064 mov %rsp, %rdi
1065 call fixup_bad_iret
1066 mov %rax, %rsp
1067 jmp .Lerror_entry_from_usermode_after_swapgs
1068SYM_CODE_END(error_entry)
1069
1070SYM_CODE_START_LOCAL(error_return)
1071 UNWIND_HINT_REGS
1072 DEBUG_ENTRY_ASSERT_IRQS_OFF
1073 testb $3, CS(%rsp)
1074 jz restore_regs_and_return_to_kernel
1075 jmp swapgs_restore_regs_and_return_to_usermode
1076SYM_CODE_END(error_return)
1077
1078/*
1079 * Runs on exception stack. Xen PV does not go through this path at all,
1080 * so we can use real assembly here.
1081 *
1082 * Registers:
1083 * %r14: Used to save/restore the CR3 of the interrupted context
1084 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1085 */
1086SYM_CODE_START(asm_exc_nmi)
1087 UNWIND_HINT_IRET_REGS
1088 ENDBR
1089
1090 /*
1091 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1092 * the iretq it performs will take us out of NMI context.
1093 * This means that we can have nested NMIs where the next
1094 * NMI is using the top of the stack of the previous NMI. We
1095 * can't let it execute because the nested NMI will corrupt the
1096 * stack of the previous NMI. NMI handlers are not re-entrant
1097 * anyway.
1098 *
1099 * To handle this case we do the following:
1100 * Check the a special location on the stack that contains
1101 * a variable that is set when NMIs are executing.
1102 * The interrupted task's stack is also checked to see if it
1103 * is an NMI stack.
1104 * If the variable is not set and the stack is not the NMI
1105 * stack then:
1106 * o Set the special variable on the stack
1107 * o Copy the interrupt frame into an "outermost" location on the
1108 * stack
1109 * o Copy the interrupt frame into an "iret" location on the stack
1110 * o Continue processing the NMI
1111 * If the variable is set or the previous stack is the NMI stack:
1112 * o Modify the "iret" location to jump to the repeat_nmi
1113 * o return back to the first NMI
1114 *
1115 * Now on exit of the first NMI, we first clear the stack variable
1116 * The NMI stack will tell any nested NMIs at that point that it is
1117 * nested. Then we pop the stack normally with iret, and if there was
1118 * a nested NMI that updated the copy interrupt stack frame, a
1119 * jump will be made to the repeat_nmi code that will handle the second
1120 * NMI.
1121 *
1122 * However, espfix prevents us from directly returning to userspace
1123 * with a single IRET instruction. Similarly, IRET to user mode
1124 * can fault. We therefore handle NMIs from user space like
1125 * other IST entries.
1126 */
1127
1128 ASM_CLAC
1129
1130 /* Use %rdx as our temp variable throughout */
1131 pushq %rdx
1132
1133 testb $3, CS-RIP+8(%rsp)
1134 jz .Lnmi_from_kernel
1135
1136 /*
1137 * NMI from user mode. We need to run on the thread stack, but we
1138 * can't go through the normal entry paths: NMIs are masked, and
1139 * we don't want to enable interrupts, because then we'll end
1140 * up in an awkward situation in which IRQs are on but NMIs
1141 * are off.
1142 *
1143 * We also must not push anything to the stack before switching
1144 * stacks lest we corrupt the "NMI executing" variable.
1145 */
1146
1147 swapgs
1148 cld
1149 FENCE_SWAPGS_USER_ENTRY
1150 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1151 movq %rsp, %rdx
1152 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1153 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1154 pushq 5*8(%rdx) /* pt_regs->ss */
1155 pushq 4*8(%rdx) /* pt_regs->rsp */
1156 pushq 3*8(%rdx) /* pt_regs->flags */
1157 pushq 2*8(%rdx) /* pt_regs->cs */
1158 pushq 1*8(%rdx) /* pt_regs->rip */
1159 UNWIND_HINT_IRET_REGS
1160 pushq $-1 /* pt_regs->orig_ax */
1161 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1162 ENCODE_FRAME_POINTER
1163
1164 /*
1165 * At this point we no longer need to worry about stack damage
1166 * due to nesting -- we're on the normal thread stack and we're
1167 * done with the NMI stack.
1168 */
1169
1170 movq %rsp, %rdi
1171 movq $-1, %rsi
1172 call exc_nmi
1173
1174 /*
1175 * Return back to user mode. We must *not* do the normal exit
1176 * work, because we don't want to enable interrupts.
1177 */
1178 jmp swapgs_restore_regs_and_return_to_usermode
1179
1180.Lnmi_from_kernel:
1181 /*
1182 * Here's what our stack frame will look like:
1183 * +---------------------------------------------------------+
1184 * | original SS |
1185 * | original Return RSP |
1186 * | original RFLAGS |
1187 * | original CS |
1188 * | original RIP |
1189 * +---------------------------------------------------------+
1190 * | temp storage for rdx |
1191 * +---------------------------------------------------------+
1192 * | "NMI executing" variable |
1193 * +---------------------------------------------------------+
1194 * | iret SS } Copied from "outermost" frame |
1195 * | iret Return RSP } on each loop iteration; overwritten |
1196 * | iret RFLAGS } by a nested NMI to force another |
1197 * | iret CS } iteration if needed. |
1198 * | iret RIP } |
1199 * +---------------------------------------------------------+
1200 * | outermost SS } initialized in first_nmi; |
1201 * | outermost Return RSP } will not be changed before |
1202 * | outermost RFLAGS } NMI processing is done. |
1203 * | outermost CS } Copied to "iret" frame on each |
1204 * | outermost RIP } iteration. |
1205 * +---------------------------------------------------------+
1206 * | pt_regs |
1207 * +---------------------------------------------------------+
1208 *
1209 * The "original" frame is used by hardware. Before re-enabling
1210 * NMIs, we need to be done with it, and we need to leave enough
1211 * space for the asm code here.
1212 *
1213 * We return by executing IRET while RSP points to the "iret" frame.
1214 * That will either return for real or it will loop back into NMI
1215 * processing.
1216 *
1217 * The "outermost" frame is copied to the "iret" frame on each
1218 * iteration of the loop, so each iteration starts with the "iret"
1219 * frame pointing to the final return target.
1220 */
1221
1222 /*
1223 * Determine whether we're a nested NMI.
1224 *
1225 * If we interrupted kernel code between repeat_nmi and
1226 * end_repeat_nmi, then we are a nested NMI. We must not
1227 * modify the "iret" frame because it's being written by
1228 * the outer NMI. That's okay; the outer NMI handler is
1229 * about to about to call exc_nmi() anyway, so we can just
1230 * resume the outer NMI.
1231 */
1232
1233 movq $repeat_nmi, %rdx
1234 cmpq 8(%rsp), %rdx
1235 ja 1f
1236 movq $end_repeat_nmi, %rdx
1237 cmpq 8(%rsp), %rdx
1238 ja nested_nmi_out
12391:
1240
1241 /*
1242 * Now check "NMI executing". If it's set, then we're nested.
1243 * This will not detect if we interrupted an outer NMI just
1244 * before IRET.
1245 */
1246 cmpl $1, -8(%rsp)
1247 je nested_nmi
1248
1249 /*
1250 * Now test if the previous stack was an NMI stack. This covers
1251 * the case where we interrupt an outer NMI after it clears
1252 * "NMI executing" but before IRET. We need to be careful, though:
1253 * there is one case in which RSP could point to the NMI stack
1254 * despite there being no NMI active: naughty userspace controls
1255 * RSP at the very beginning of the SYSCALL targets. We can
1256 * pull a fast one on naughty userspace, though: we program
1257 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1258 * if it controls the kernel's RSP. We set DF before we clear
1259 * "NMI executing".
1260 */
1261 lea 6*8(%rsp), %rdx
1262 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1263 cmpq %rdx, 4*8(%rsp)
1264 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1265 ja first_nmi
1266
1267 subq $EXCEPTION_STKSZ, %rdx
1268 cmpq %rdx, 4*8(%rsp)
1269 /* If it is below the NMI stack, it is a normal NMI */
1270 jb first_nmi
1271
1272 /* Ah, it is within the NMI stack. */
1273
1274 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1275 jz first_nmi /* RSP was user controlled. */
1276
1277 /* This is a nested NMI. */
1278
1279nested_nmi:
1280 /*
1281 * Modify the "iret" frame to point to repeat_nmi, forcing another
1282 * iteration of NMI handling.
1283 */
1284 subq $8, %rsp
1285 leaq -10*8(%rsp), %rdx
1286 pushq $__KERNEL_DS
1287 pushq %rdx
1288 pushfq
1289 pushq $__KERNEL_CS
1290 pushq $repeat_nmi
1291
1292 /* Put stack back */
1293 addq $(6*8), %rsp
1294
1295nested_nmi_out:
1296 popq %rdx
1297
1298 /* We are returning to kernel mode, so this cannot result in a fault. */
1299 iretq
1300
1301first_nmi:
1302 /* Restore rdx. */
1303 movq (%rsp), %rdx
1304
1305 /* Make room for "NMI executing". */
1306 pushq $0
1307
1308 /* Leave room for the "iret" frame */
1309 subq $(5*8), %rsp
1310
1311 /* Copy the "original" frame to the "outermost" frame */
1312 .rept 5
1313 pushq 11*8(%rsp)
1314 .endr
1315 UNWIND_HINT_IRET_REGS
1316
1317 /* Everything up to here is safe from nested NMIs */
1318
1319#ifdef CONFIG_DEBUG_ENTRY
1320 /*
1321 * For ease of testing, unmask NMIs right away. Disabled by
1322 * default because IRET is very expensive.
1323 */
1324 pushq $0 /* SS */
1325 pushq %rsp /* RSP (minus 8 because of the previous push) */
1326 addq $8, (%rsp) /* Fix up RSP */
1327 pushfq /* RFLAGS */
1328 pushq $__KERNEL_CS /* CS */
1329 pushq $1f /* RIP */
1330 iretq /* continues at repeat_nmi below */
1331 UNWIND_HINT_IRET_REGS
13321:
1333#endif
1334
1335repeat_nmi:
1336 ANNOTATE_NOENDBR // this code
1337 /*
1338 * If there was a nested NMI, the first NMI's iret will return
1339 * here. But NMIs are still enabled and we can take another
1340 * nested NMI. The nested NMI checks the interrupted RIP to see
1341 * if it is between repeat_nmi and end_repeat_nmi, and if so
1342 * it will just return, as we are about to repeat an NMI anyway.
1343 * This makes it safe to copy to the stack frame that a nested
1344 * NMI will update.
1345 *
1346 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1347 * we're repeating an NMI, gsbase has the same value that it had on
1348 * the first iteration. paranoid_entry will load the kernel
1349 * gsbase if needed before we call exc_nmi(). "NMI executing"
1350 * is zero.
1351 */
1352 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1353
1354 /*
1355 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1356 * here must not modify the "iret" frame while we're writing to
1357 * it or it will end up containing garbage.
1358 */
1359 addq $(10*8), %rsp
1360 .rept 5
1361 pushq -6*8(%rsp)
1362 .endr
1363 subq $(5*8), %rsp
1364end_repeat_nmi:
1365 ANNOTATE_NOENDBR // this code
1366
1367 /*
1368 * Everything below this point can be preempted by a nested NMI.
1369 * If this happens, then the inner NMI will change the "iret"
1370 * frame to point back to repeat_nmi.
1371 */
1372 pushq $-1 /* ORIG_RAX: no syscall to restart */
1373
1374 /*
1375 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1376 * as we should not be calling schedule in NMI context.
1377 * Even with normal interrupts enabled. An NMI should not be
1378 * setting NEED_RESCHED or anything that normal interrupts and
1379 * exceptions might do.
1380 */
1381 call paranoid_entry
1382 UNWIND_HINT_REGS
1383
1384 movq %rsp, %rdi
1385 movq $-1, %rsi
1386 call exc_nmi
1387
1388 /* Always restore stashed CR3 value (see paranoid_entry) */
1389 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1390
1391 /*
1392 * The above invocation of paranoid_entry stored the GSBASE
1393 * related information in R/EBX depending on the availability
1394 * of FSGSBASE.
1395 *
1396 * If FSGSBASE is enabled, restore the saved GSBASE value
1397 * unconditionally, otherwise take the conditional SWAPGS path.
1398 */
1399 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1400
1401 wrgsbase %rbx
1402 jmp nmi_restore
1403
1404nmi_no_fsgsbase:
1405 /* EBX == 0 -> invoke SWAPGS */
1406 testl %ebx, %ebx
1407 jnz nmi_restore
1408
1409nmi_swapgs:
1410 swapgs
1411
1412nmi_restore:
1413 POP_REGS
1414
1415 /*
1416 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1417 * at the "iret" frame.
1418 */
1419 addq $6*8, %rsp
1420
1421 /*
1422 * Clear "NMI executing". Set DF first so that we can easily
1423 * distinguish the remaining code between here and IRET from
1424 * the SYSCALL entry and exit paths.
1425 *
1426 * We arguably should just inspect RIP instead, but I (Andy) wrote
1427 * this code when I had the misapprehension that Xen PV supported
1428 * NMIs, and Xen PV would break that approach.
1429 */
1430 std
1431 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1432
1433 /*
1434 * iretq reads the "iret" frame and exits the NMI stack in a
1435 * single instruction. We are returning to kernel mode, so this
1436 * cannot result in a fault. Similarly, we don't need to worry
1437 * about espfix64 on the way back to kernel mode.
1438 */
1439 iretq
1440SYM_CODE_END(asm_exc_nmi)
1441
1442#ifndef CONFIG_IA32_EMULATION
1443/*
1444 * This handles SYSCALL from 32-bit code. There is no way to program
1445 * MSRs to fully disable 32-bit SYSCALL.
1446 */
1447SYM_CODE_START(ignore_sysret)
1448 UNWIND_HINT_EMPTY
1449 ENDBR
1450 mov $-ENOSYS, %eax
1451 sysretl
1452SYM_CODE_END(ignore_sysret)
1453#endif
1454
1455.pushsection .text, "ax"
1456SYM_CODE_START(rewind_stack_and_make_dead)
1457 UNWIND_HINT_FUNC
1458 /* Prevent any naive code from trying to unwind to our caller. */
1459 xorl %ebp, %ebp
1460
1461 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
1462 leaq -PTREGS_SIZE(%rax), %rsp
1463 UNWIND_HINT_REGS
1464
1465 call make_task_dead
1466SYM_CODE_END(rewind_stack_and_make_dead)
1467.popsection