Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * common.c - C code for kernel entry and exit
4 * Copyright (c) 2015 Andrew Lutomirski
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/sched/task_stack.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h>
18#include <linux/audit.h>
19#include <linux/seccomp.h>
20#include <linux/signal.h>
21#include <linux/export.h>
22#include <linux/context_tracking.h>
23#include <linux/user-return-notifier.h>
24#include <linux/nospec.h>
25#include <linux/uprobes.h>
26#include <linux/livepatch.h>
27#include <linux/syscalls.h>
28#include <linux/uaccess.h>
29
30#include <asm/desc.h>
31#include <asm/traps.h>
32#include <asm/vdso.h>
33#include <asm/cpufeature.h>
34#include <asm/fpu/api.h>
35#include <asm/nospec-branch.h>
36#include <asm/io_bitmap.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/syscalls.h>
40
41#ifdef CONFIG_CONTEXT_TRACKING
42/* Called on entry from user mode with IRQs off. */
43__visible inline void enter_from_user_mode(void)
44{
45 CT_WARN_ON(ct_state() != CONTEXT_USER);
46 user_exit_irqoff();
47}
48#else
49static inline void enter_from_user_mode(void) {}
50#endif
51
52static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
53{
54#ifdef CONFIG_X86_64
55 if (arch == AUDIT_ARCH_X86_64) {
56 audit_syscall_entry(regs->orig_ax, regs->di,
57 regs->si, regs->dx, regs->r10);
58 } else
59#endif
60 {
61 audit_syscall_entry(regs->orig_ax, regs->bx,
62 regs->cx, regs->dx, regs->si);
63 }
64}
65
66/*
67 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
68 * to skip the syscall.
69 */
70static long syscall_trace_enter(struct pt_regs *regs)
71{
72 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
73
74 struct thread_info *ti = current_thread_info();
75 unsigned long ret = 0;
76 u32 work;
77
78 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
79 BUG_ON(regs != task_pt_regs(current));
80
81 work = READ_ONCE(ti->flags);
82
83 if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
84 ret = tracehook_report_syscall_entry(regs);
85 if (ret || (work & _TIF_SYSCALL_EMU))
86 return -1L;
87 }
88
89#ifdef CONFIG_SECCOMP
90 /*
91 * Do seccomp after ptrace, to catch any tracer changes.
92 */
93 if (work & _TIF_SECCOMP) {
94 struct seccomp_data sd;
95
96 sd.arch = arch;
97 sd.nr = regs->orig_ax;
98 sd.instruction_pointer = regs->ip;
99#ifdef CONFIG_X86_64
100 if (arch == AUDIT_ARCH_X86_64) {
101 sd.args[0] = regs->di;
102 sd.args[1] = regs->si;
103 sd.args[2] = regs->dx;
104 sd.args[3] = regs->r10;
105 sd.args[4] = regs->r8;
106 sd.args[5] = regs->r9;
107 } else
108#endif
109 {
110 sd.args[0] = regs->bx;
111 sd.args[1] = regs->cx;
112 sd.args[2] = regs->dx;
113 sd.args[3] = regs->si;
114 sd.args[4] = regs->di;
115 sd.args[5] = regs->bp;
116 }
117
118 ret = __secure_computing(&sd);
119 if (ret == -1)
120 return ret;
121 }
122#endif
123
124 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
125 trace_sys_enter(regs, regs->orig_ax);
126
127 do_audit_syscall_entry(regs, arch);
128
129 return ret ?: regs->orig_ax;
130}
131
132#define EXIT_TO_USERMODE_LOOP_FLAGS \
133 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
134 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
135
136static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
137{
138 /*
139 * In order to return to user mode, we need to have IRQs off with
140 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
141 * can be set at any time on preemptible kernels if we have IRQs on,
142 * so we need to loop. Disabling preemption wouldn't help: doing the
143 * work to clear some of the flags can sleep.
144 */
145 while (true) {
146 /* We have work to do. */
147 local_irq_enable();
148
149 if (cached_flags & _TIF_NEED_RESCHED)
150 schedule();
151
152 if (cached_flags & _TIF_UPROBE)
153 uprobe_notify_resume(regs);
154
155 if (cached_flags & _TIF_PATCH_PENDING)
156 klp_update_patch_state(current);
157
158 /* deal with pending signal delivery */
159 if (cached_flags & _TIF_SIGPENDING)
160 do_signal(regs);
161
162 if (cached_flags & _TIF_NOTIFY_RESUME) {
163 clear_thread_flag(TIF_NOTIFY_RESUME);
164 tracehook_notify_resume(regs);
165 rseq_handle_notify_resume(NULL, regs);
166 }
167
168 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
169 fire_user_return_notifiers();
170
171 /* Disable IRQs and retry */
172 local_irq_disable();
173
174 cached_flags = READ_ONCE(current_thread_info()->flags);
175
176 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
177 break;
178 }
179}
180
181/* Called with IRQs disabled. */
182__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
183{
184 struct thread_info *ti = current_thread_info();
185 u32 cached_flags;
186
187 addr_limit_user_check();
188
189 lockdep_assert_irqs_disabled();
190 lockdep_sys_exit();
191
192 cached_flags = READ_ONCE(ti->flags);
193
194 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
195 exit_to_usermode_loop(regs, cached_flags);
196
197 /* Reload ti->flags; we may have rescheduled above. */
198 cached_flags = READ_ONCE(ti->flags);
199
200 if (unlikely(cached_flags & _TIF_IO_BITMAP))
201 tss_update_io_bitmap();
202
203 fpregs_assert_state_consistent();
204 if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
205 switch_fpu_return();
206
207#ifdef CONFIG_COMPAT
208 /*
209 * Compat syscalls set TS_COMPAT. Make sure we clear it before
210 * returning to user mode. We need to clear it *after* signal
211 * handling, because syscall restart has a fixup for compat
212 * syscalls. The fixup is exercised by the ptrace_syscall_32
213 * selftest.
214 *
215 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
216 * special case only applies after poking regs and before the
217 * very next return to user mode.
218 */
219 ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
220#endif
221
222 user_enter_irqoff();
223
224 mds_user_clear_cpu_buffers();
225}
226
227#define SYSCALL_EXIT_WORK_FLAGS \
228 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
229 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
230
231static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
232{
233 bool step;
234
235 audit_syscall_exit(regs);
236
237 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
238 trace_sys_exit(regs, regs->ax);
239
240 /*
241 * If TIF_SYSCALL_EMU is set, we only get here because of
242 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
243 * We already reported this syscall instruction in
244 * syscall_trace_enter().
245 */
246 step = unlikely(
247 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
248 == _TIF_SINGLESTEP);
249 if (step || cached_flags & _TIF_SYSCALL_TRACE)
250 tracehook_report_syscall_exit(regs, step);
251}
252
253/*
254 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
255 * state such that we can immediately switch to user mode.
256 */
257__visible inline void syscall_return_slowpath(struct pt_regs *regs)
258{
259 struct thread_info *ti = current_thread_info();
260 u32 cached_flags = READ_ONCE(ti->flags);
261
262 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
263
264 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
265 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
266 local_irq_enable();
267
268 rseq_syscall(regs);
269
270 /*
271 * First do one-time work. If these work items are enabled, we
272 * want to run them exactly once per syscall exit with IRQs on.
273 */
274 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
275 syscall_slow_exit_work(regs, cached_flags);
276
277 local_irq_disable();
278 prepare_exit_to_usermode(regs);
279}
280
281#ifdef CONFIG_X86_64
282__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
283{
284 struct thread_info *ti;
285
286 enter_from_user_mode();
287 local_irq_enable();
288 ti = current_thread_info();
289 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
290 nr = syscall_trace_enter(regs);
291
292 if (likely(nr < NR_syscalls)) {
293 nr = array_index_nospec(nr, NR_syscalls);
294 regs->ax = sys_call_table[nr](regs);
295#ifdef CONFIG_X86_X32_ABI
296 } else if (likely((nr & __X32_SYSCALL_BIT) &&
297 (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
298 nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
299 X32_NR_syscalls);
300 regs->ax = x32_sys_call_table[nr](regs);
301#endif
302 }
303
304 syscall_return_slowpath(regs);
305}
306#endif
307
308#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
309/*
310 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
311 * all entry and exit work and returns with IRQs off. This function is
312 * extremely hot in workloads that use it, and it's usually called from
313 * do_fast_syscall_32, so forcibly inline it to improve performance.
314 */
315static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
316{
317 struct thread_info *ti = current_thread_info();
318 unsigned int nr = (unsigned int)regs->orig_ax;
319
320#ifdef CONFIG_IA32_EMULATION
321 ti->status |= TS_COMPAT;
322#endif
323
324 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
325 /*
326 * Subtlety here: if ptrace pokes something larger than
327 * 2^32-1 into orig_ax, this truncates it. This may or
328 * may not be necessary, but it matches the old asm
329 * behavior.
330 */
331 nr = syscall_trace_enter(regs);
332 }
333
334 if (likely(nr < IA32_NR_syscalls)) {
335 nr = array_index_nospec(nr, IA32_NR_syscalls);
336#ifdef CONFIG_IA32_EMULATION
337 regs->ax = ia32_sys_call_table[nr](regs);
338#else
339 /*
340 * It's possible that a 32-bit syscall implementation
341 * takes a 64-bit parameter but nonetheless assumes that
342 * the high bits are zero. Make sure we zero-extend all
343 * of the args.
344 */
345 regs->ax = ia32_sys_call_table[nr](
346 (unsigned int)regs->bx, (unsigned int)regs->cx,
347 (unsigned int)regs->dx, (unsigned int)regs->si,
348 (unsigned int)regs->di, (unsigned int)regs->bp);
349#endif /* CONFIG_IA32_EMULATION */
350 }
351
352 syscall_return_slowpath(regs);
353}
354
355/* Handles int $0x80 */
356__visible void do_int80_syscall_32(struct pt_regs *regs)
357{
358 enter_from_user_mode();
359 local_irq_enable();
360 do_syscall_32_irqs_on(regs);
361}
362
363/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
364__visible long do_fast_syscall_32(struct pt_regs *regs)
365{
366 /*
367 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
368 * convention. Adjust regs so it looks like we entered using int80.
369 */
370
371 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
372 vdso_image_32.sym_int80_landing_pad;
373
374 /*
375 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
376 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
377 * Fix it up.
378 */
379 regs->ip = landing_pad;
380
381 enter_from_user_mode();
382
383 local_irq_enable();
384
385 /* Fetch EBP from where the vDSO stashed it. */
386 if (
387#ifdef CONFIG_X86_64
388 /*
389 * Micro-optimization: the pointer we're following is explicitly
390 * 32 bits, so it can't be out of range.
391 */
392 __get_user(*(u32 *)®s->bp,
393 (u32 __user __force *)(unsigned long)(u32)regs->sp)
394#else
395 get_user(*(u32 *)®s->bp,
396 (u32 __user __force *)(unsigned long)(u32)regs->sp)
397#endif
398 ) {
399
400 /* User code screwed up. */
401 local_irq_disable();
402 regs->ax = -EFAULT;
403 prepare_exit_to_usermode(regs);
404 return 0; /* Keep it simple: use IRET. */
405 }
406
407 /* Now this is just like a normal syscall. */
408 do_syscall_32_irqs_on(regs);
409
410#ifdef CONFIG_X86_64
411 /*
412 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
413 * SYSRETL is available on all 64-bit CPUs, so we don't need to
414 * bother with SYSEXIT.
415 *
416 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
417 * because the ECX fixup above will ensure that this is essentially
418 * never the case.
419 */
420 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
421 regs->ip == landing_pad &&
422 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
423#else
424 /*
425 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
426 *
427 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
428 * because the ECX fixup above will ensure that this is essentially
429 * never the case.
430 *
431 * We don't allow syscalls at all from VM86 mode, but we still
432 * need to check VM, because we might be returning from sys_vm86.
433 */
434 return static_cpu_has(X86_FEATURE_SEP) &&
435 regs->cs == __USER_CS && regs->ss == __USER_DS &&
436 regs->ip == landing_pad &&
437 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
438#endif
439}
440#endif