x86/entry/compat: Keep TS_COMPAT set during signal delivery
[deliverable/linux.git] / arch / x86 / entry / common.c
CommitLineData
1f484aa6
AL
1/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
19#include <linux/signal.h>
20#include <linux/export.h>
21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h>
23#include <linux/uprobes.h>
24
25#include <asm/desc.h>
26#include <asm/traps.h>
710246df
AL
27#include <asm/vdso.h>
28#include <asm/uaccess.h>
cd4d09ec 29#include <asm/cpufeature.h>
1f484aa6
AL
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/syscalls.h>
33
dd636071
AL
34static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
35{
36 unsigned long top_of_stack =
37 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
38 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
39}
40
feed36cd
AL
41#ifdef CONFIG_CONTEXT_TRACKING
42/* Called on entry from user mode with IRQs off. */
43__visible void enter_from_user_mode(void)
44{
45 CT_WARN_ON(ct_state() != CONTEXT_USER);
46 user_exit();
47}
48#endif
49
1f484aa6
AL
50static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
51{
52#ifdef CONFIG_X86_64
53 if (arch == AUDIT_ARCH_X86_64) {
54 audit_syscall_entry(regs->orig_ax, regs->di,
55 regs->si, regs->dx, regs->r10);
56 } else
57#endif
58 {
59 audit_syscall_entry(regs->orig_ax, regs->bx,
60 regs->cx, regs->dx, regs->si);
61 }
62}
63
64/*
65 * We can return 0 to resume the syscall or anything else to go to phase
66 * 2. If we resume the syscall, we need to put something appropriate in
67 * regs->orig_ax.
68 *
69 * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
70 * are fully functional.
71 *
72 * For phase 2's benefit, our return value is:
73 * 0: resume the syscall
74 * 1: go to phase 2; no seccomp phase 2 needed
75 * anything else: go to phase 2; pass return value to seccomp
76 */
77unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
78{
dd636071 79 struct thread_info *ti = pt_regs_to_thread_info(regs);
1f484aa6
AL
80 unsigned long ret = 0;
81 u32 work;
82
4aabd140
AL
83 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
84 BUG_ON(regs != task_pt_regs(current));
1f484aa6 85
dd636071 86 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
1f484aa6 87
feed36cd 88#ifdef CONFIG_CONTEXT_TRACKING
1f484aa6
AL
89 /*
90 * If TIF_NOHZ is set, we are required to call user_exit() before
91 * doing anything that could touch RCU.
92 */
93 if (work & _TIF_NOHZ) {
feed36cd 94 enter_from_user_mode();
1f484aa6
AL
95 work &= ~_TIF_NOHZ;
96 }
feed36cd 97#endif
1f484aa6
AL
98
99#ifdef CONFIG_SECCOMP
100 /*
101 * Do seccomp first -- it should minimize exposure of other
102 * code, and keeping seccomp fast is probably more valuable
103 * than the rest of this.
104 */
105 if (work & _TIF_SECCOMP) {
106 struct seccomp_data sd;
107
108 sd.arch = arch;
109 sd.nr = regs->orig_ax;
110 sd.instruction_pointer = regs->ip;
111#ifdef CONFIG_X86_64
112 if (arch == AUDIT_ARCH_X86_64) {
113 sd.args[0] = regs->di;
114 sd.args[1] = regs->si;
115 sd.args[2] = regs->dx;
116 sd.args[3] = regs->r10;
117 sd.args[4] = regs->r8;
118 sd.args[5] = regs->r9;
119 } else
120#endif
121 {
122 sd.args[0] = regs->bx;
123 sd.args[1] = regs->cx;
124 sd.args[2] = regs->dx;
125 sd.args[3] = regs->si;
126 sd.args[4] = regs->di;
127 sd.args[5] = regs->bp;
128 }
129
130 BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
131 BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
132
133 ret = seccomp_phase1(&sd);
134 if (ret == SECCOMP_PHASE1_SKIP) {
135 regs->orig_ax = -1;
136 ret = 0;
137 } else if (ret != SECCOMP_PHASE1_OK) {
138 return ret; /* Go directly to phase 2 */
139 }
140
141 work &= ~_TIF_SECCOMP;
142 }
143#endif
144
145 /* Do our best to finish without phase 2. */
146 if (work == 0)
147 return ret; /* seccomp and/or nohz only (ret == 0 here) */
148
149#ifdef CONFIG_AUDITSYSCALL
150 if (work == _TIF_SYSCALL_AUDIT) {
151 /*
152 * If there is no more work to be done except auditing,
153 * then audit in phase 1. Phase 2 always audits, so, if
154 * we audit here, then we can't go on to phase 2.
155 */
156 do_audit_syscall_entry(regs, arch);
157 return 0;
158 }
159#endif
160
161 return 1; /* Something is enabled that we can't handle in phase 1 */
162}
163
164/* Returns the syscall nr to run (which should match regs->orig_ax). */
165long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
166 unsigned long phase1_result)
167{
dd636071 168 struct thread_info *ti = pt_regs_to_thread_info(regs);
1f484aa6 169 long ret = 0;
dd636071 170 u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
1f484aa6 171
4aabd140
AL
172 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
173 BUG_ON(regs != task_pt_regs(current));
1f484aa6
AL
174
175 /*
176 * If we stepped into a sysenter/syscall insn, it trapped in
177 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
178 * If user-mode had set TF itself, then it's still clear from
179 * do_debug() and we need to set it again to restore the user
180 * state. If we entered on the slow path, TF was already set.
181 */
182 if (work & _TIF_SINGLESTEP)
183 regs->flags |= X86_EFLAGS_TF;
184
185#ifdef CONFIG_SECCOMP
186 /*
187 * Call seccomp_phase2 before running the other hooks so that
188 * they can see any changes made by a seccomp tracer.
189 */
190 if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
191 /* seccomp failures shouldn't expose any additional code. */
192 return -1;
193 }
194#endif
195
196 if (unlikely(work & _TIF_SYSCALL_EMU))
197 ret = -1L;
198
199 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
200 tracehook_report_syscall_entry(regs))
201 ret = -1L;
202
203 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
204 trace_sys_enter(regs, regs->orig_ax);
205
206 do_audit_syscall_entry(regs, arch);
207
208 return ret ?: regs->orig_ax;
209}
210
211long syscall_trace_enter(struct pt_regs *regs)
212{
213 u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
214 unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
215
216 if (phase1_result == 0)
217 return regs->orig_ax;
218 else
219 return syscall_trace_enter_phase2(regs, arch, phase1_result);
220}
221
39b48e57
AL
222#define EXIT_TO_USERMODE_LOOP_FLAGS \
223 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
224 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
72f92478 225
39b48e57
AL
226static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
227{
c5c46f59
AL
228 /*
229 * In order to return to user mode, we need to have IRQs off with
230 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
231 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
232 * can be set at any time on preemptable kernels if we have IRQs on,
233 * so we need to loop. Disabling preemption wouldn't help: doing the
234 * work to clear some of the flags can sleep.
235 */
236 while (true) {
c5c46f59
AL
237 /* We have work to do. */
238 local_irq_enable();
239
240 if (cached_flags & _TIF_NEED_RESCHED)
241 schedule();
242
243 if (cached_flags & _TIF_UPROBE)
244 uprobe_notify_resume(regs);
245
246 /* deal with pending signal delivery */
247 if (cached_flags & _TIF_SIGPENDING)
248 do_signal(regs);
249
250 if (cached_flags & _TIF_NOTIFY_RESUME) {
251 clear_thread_flag(TIF_NOTIFY_RESUME);
252 tracehook_notify_resume(regs);
253 }
254
255 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
256 fire_user_return_notifiers();
257
258 /* Disable IRQs and retry */
259 local_irq_disable();
39b48e57
AL
260
261 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
262
263 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
264 break;
265
c5c46f59 266 }
39b48e57
AL
267}
268
269/* Called with IRQs disabled. */
270__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
271{
4e79e182 272 struct thread_info *ti = pt_regs_to_thread_info(regs);
39b48e57
AL
273 u32 cached_flags;
274
275 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
276 local_irq_disable();
277
278 lockdep_sys_exit();
279
4e79e182 280 cached_flags = READ_ONCE(ti->flags);
39b48e57
AL
281
282 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
283 exit_to_usermode_loop(regs, cached_flags);
c5c46f59 284
4e79e182
AL
285#ifdef CONFIG_COMPAT
286 /*
287 * Compat syscalls set TS_COMPAT. Make sure we clear it before
288 * returning to user mode. We need to clear it *after* signal
289 * handling, because syscall restart has a fixup for compat
290 * syscalls. The fixup is exercised by the ptrace_syscall_32
291 * selftest.
292 */
293 ti->status &= ~TS_COMPAT;
294#endif
295
c5c46f59
AL
296 user_enter();
297}
298
f5e6a975
AL
299#define SYSCALL_EXIT_WORK_FLAGS \
300 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
301 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
302
303static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
304{
305 bool step;
306
307 audit_syscall_exit(regs);
308
309 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
310 trace_sys_exit(regs, regs->ax);
311
312 /*
313 * If TIF_SYSCALL_EMU is set, we only get here because of
314 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
315 * We already reported this syscall instruction in
316 * syscall_trace_enter().
317 */
318 step = unlikely(
319 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
320 == _TIF_SINGLESTEP);
321 if (step || cached_flags & _TIF_SYSCALL_TRACE)
322 tracehook_report_syscall_exit(regs, step);
323}
324
c5c46f59
AL
325/*
326 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
327 * state such that we can immediately switch to user mode.
328 */
f5e6a975 329__visible inline void syscall_return_slowpath(struct pt_regs *regs)
c5c46f59
AL
330{
331 struct thread_info *ti = pt_regs_to_thread_info(regs);
332 u32 cached_flags = READ_ONCE(ti->flags);
c5c46f59
AL
333
334 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
335
460d1245
AL
336 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
337 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
c5c46f59
AL
338 local_irq_enable();
339
340 /*
341 * First do one-time work. If these work items are enabled, we
342 * want to run them exactly once per syscall exit with IRQs on.
343 */
f5e6a975
AL
344 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
345 syscall_slow_exit_work(regs, cached_flags);
c5c46f59 346
c5c46f59
AL
347 local_irq_disable();
348 prepare_exit_to_usermode(regs);
349}
bd2d3a3b 350
1e423bff
AL
351#ifdef CONFIG_X86_64
352__visible void do_syscall_64(struct pt_regs *regs)
353{
354 struct thread_info *ti = pt_regs_to_thread_info(regs);
355 unsigned long nr = regs->orig_ax;
356
357 local_irq_enable();
358
359 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
360 nr = syscall_trace_enter(regs);
361
362 /*
363 * NB: Native and x32 syscalls are dispatched from the same
364 * table. The only functional difference is the x32 bit in
365 * regs->orig_ax, which changes the behavior of some syscalls.
366 */
367 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
368 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
369 regs->di, regs->si, regs->dx,
370 regs->r10, regs->r8, regs->r9);
371 }
372
373 syscall_return_slowpath(regs);
374}
375#endif
376
bd2d3a3b
AL
377#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
378/*
8b13c255 379 * Does a 32-bit syscall. Called with IRQs on and does all entry and
33c52129
AL
380 * exit work and returns with IRQs off. This function is extremely hot
381 * in workloads that use it, and it's usually called from
382 * do_fast_syscall_32, so forcibly inline it to improve performance.
bd2d3a3b 383 */
657c1eea
AL
384#ifdef CONFIG_X86_32
385/* 32-bit kernels use a trap gate for INT80, and the asm code calls here. */
386__visible
387#else
388/* 64-bit kernels use do_syscall_32_irqs_off() instead. */
389static
390#endif
391__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
bd2d3a3b
AL
392{
393 struct thread_info *ti = pt_regs_to_thread_info(regs);
394 unsigned int nr = (unsigned int)regs->orig_ax;
395
396#ifdef CONFIG_IA32_EMULATION
397 ti->status |= TS_COMPAT;
398#endif
399
bd2d3a3b
AL
400 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
401 /*
402 * Subtlety here: if ptrace pokes something larger than
403 * 2^32-1 into orig_ax, this truncates it. This may or
404 * may not be necessary, but it matches the old asm
405 * behavior.
406 */
407 nr = syscall_trace_enter(regs);
408 }
409
33c52129 410 if (likely(nr < IA32_NR_syscalls)) {
bd2d3a3b
AL
411 /*
412 * It's possible that a 32-bit syscall implementation
413 * takes a 64-bit parameter but nonetheless assumes that
414 * the high bits are zero. Make sure we zero-extend all
415 * of the args.
416 */
417 regs->ax = ia32_sys_call_table[nr](
418 (unsigned int)regs->bx, (unsigned int)regs->cx,
419 (unsigned int)regs->dx, (unsigned int)regs->si,
420 (unsigned int)regs->di, (unsigned int)regs->bp);
421 }
422
423 syscall_return_slowpath(regs);
424}
710246df 425
657c1eea
AL
426#ifdef CONFIG_X86_64
427/* Handles INT80 on 64-bit kernels */
428__visible void do_syscall_32_irqs_off(struct pt_regs *regs)
8b13c255
AL
429{
430 local_irq_enable();
431 do_syscall_32_irqs_on(regs);
432}
657c1eea 433#endif
8b13c255 434
5f310f73 435/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
7841b408 436__visible long do_fast_syscall_32(struct pt_regs *regs)
710246df
AL
437{
438 /*
439 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
440 * convention. Adjust regs so it looks like we entered using int80.
441 */
442
443 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
444 vdso_image_32.sym_int80_landing_pad;
445
446 /*
447 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
448 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
449 * Fix it up.
450 */
451 regs->ip = landing_pad;
452
453 /*
30bfa7b3 454 * Fetch EBP from where the vDSO stashed it.
710246df
AL
455 *
456 * WARNING: We are in CONTEXT_USER and RCU isn't paying attention!
457 */
458 local_irq_enable();
c68ca678
AL
459 if (
460#ifdef CONFIG_X86_64
461 /*
462 * Micro-optimization: the pointer we're following is explicitly
463 * 32 bits, so it can't be out of range.
464 */
30bfa7b3 465 __get_user(*(u32 *)&regs->bp,
c68ca678
AL
466 (u32 __user __force *)(unsigned long)(u32)regs->sp)
467#else
30bfa7b3 468 get_user(*(u32 *)&regs->bp,
c68ca678
AL
469 (u32 __user __force *)(unsigned long)(u32)regs->sp)
470#endif
471 ) {
472
710246df
AL
473 /* User code screwed up. */
474 local_irq_disable();
475 regs->ax = -EFAULT;
476#ifdef CONFIG_CONTEXT_TRACKING
477 enter_from_user_mode();
478#endif
479 prepare_exit_to_usermode(regs);
7841b408 480 return 0; /* Keep it simple: use IRET. */
710246df 481 }
710246df
AL
482
483 /* Now this is just like a normal syscall. */
8b13c255 484 do_syscall_32_irqs_on(regs);
7841b408
AL
485
486#ifdef CONFIG_X86_64
487 /*
488 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
489 * SYSRETL is available on all 64-bit CPUs, so we don't need to
490 * bother with SYSEXIT.
491 *
492 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
493 * because the ECX fixup above will ensure that this is essentially
494 * never the case.
495 */
496 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
497 regs->ip == landing_pad &&
498 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
499#else
5f310f73
AL
500 /*
501 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
502 *
503 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
504 * because the ECX fixup above will ensure that this is essentially
505 * never the case.
506 *
507 * We don't allow syscalls at all from VM86 mode, but we still
508 * need to check VM, because we might be returning from sys_vm86.
509 */
510 return static_cpu_has(X86_FEATURE_SEP) &&
511 regs->cs == __USER_CS && regs->ss == __USER_DS &&
512 regs->ip == landing_pad &&
513 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
7841b408 514#endif
710246df 515}
bd2d3a3b 516#endif
This page took 0.060897 seconds and 5 git commands to generate.