Commit | Line | Data |
---|---|---|
1f484aa6 AL |
1 | /* |
2 | * common.c - C code for kernel entry and exit | |
3 | * Copyright (c) 2015 Andrew Lutomirski | |
4 | * GPL v2 | |
5 | * | |
6 | * Based on asm and ptrace code by many authors. The code here originated | |
7 | * in ptrace.c and signal.c. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/smp.h> | |
14 | #include <linux/errno.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/tracehook.h> | |
17 | #include <linux/audit.h> | |
18 | #include <linux/seccomp.h> | |
19 | #include <linux/signal.h> | |
20 | #include <linux/export.h> | |
21 | #include <linux/context_tracking.h> | |
22 | #include <linux/user-return-notifier.h> | |
23 | #include <linux/uprobes.h> | |
24 | ||
25 | #include <asm/desc.h> | |
26 | #include <asm/traps.h> | |
710246df AL |
27 | #include <asm/vdso.h> |
28 | #include <asm/uaccess.h> | |
cd4d09ec | 29 | #include <asm/cpufeature.h> |
1f484aa6 AL |
30 | |
31 | #define CREATE_TRACE_POINTS | |
32 | #include <trace/events/syscalls.h> | |
33 | ||
dd636071 AL |
34 | static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs) |
35 | { | |
36 | unsigned long top_of_stack = | |
37 | (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING; | |
38 | return (struct thread_info *)(top_of_stack - THREAD_SIZE); | |
39 | } | |
40 | ||
feed36cd AL |
41 | #ifdef CONFIG_CONTEXT_TRACKING |
42 | /* Called on entry from user mode with IRQs off. */ | |
43 | __visible void enter_from_user_mode(void) | |
44 | { | |
45 | CT_WARN_ON(ct_state() != CONTEXT_USER); | |
46 | user_exit(); | |
47 | } | |
9999c8c0 AL |
48 | #else |
49 | static inline void enter_from_user_mode(void) {} | |
feed36cd AL |
50 | #endif |
51 | ||
1f484aa6 AL |
52 | static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) |
53 | { | |
54 | #ifdef CONFIG_X86_64 | |
55 | if (arch == AUDIT_ARCH_X86_64) { | |
56 | audit_syscall_entry(regs->orig_ax, regs->di, | |
57 | regs->si, regs->dx, regs->r10); | |
58 | } else | |
59 | #endif | |
60 | { | |
61 | audit_syscall_entry(regs->orig_ax, regs->bx, | |
62 | regs->cx, regs->dx, regs->si); | |
63 | } | |
64 | } | |
65 | ||
66 | /* | |
67 | * We can return 0 to resume the syscall or anything else to go to phase | |
68 | * 2. If we resume the syscall, we need to put something appropriate in | |
69 | * regs->orig_ax. | |
70 | * | |
71 | * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax | |
72 | * are fully functional. | |
73 | * | |
74 | * For phase 2's benefit, our return value is: | |
75 | * 0: resume the syscall | |
76 | * 1: go to phase 2; no seccomp phase 2 needed | |
77 | * anything else: go to phase 2; pass return value to seccomp | |
78 | */ | |
79 | unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) | |
80 | { | |
dd636071 | 81 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
1f484aa6 AL |
82 | unsigned long ret = 0; |
83 | u32 work; | |
84 | ||
4aabd140 AL |
85 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
86 | BUG_ON(regs != task_pt_regs(current)); | |
1f484aa6 | 87 | |
dd636071 | 88 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
1f484aa6 | 89 | |
1f484aa6 AL |
90 | #ifdef CONFIG_SECCOMP |
91 | /* | |
92 | * Do seccomp first -- it should minimize exposure of other | |
93 | * code, and keeping seccomp fast is probably more valuable | |
94 | * than the rest of this. | |
95 | */ | |
96 | if (work & _TIF_SECCOMP) { | |
97 | struct seccomp_data sd; | |
98 | ||
99 | sd.arch = arch; | |
100 | sd.nr = regs->orig_ax; | |
101 | sd.instruction_pointer = regs->ip; | |
102 | #ifdef CONFIG_X86_64 | |
103 | if (arch == AUDIT_ARCH_X86_64) { | |
104 | sd.args[0] = regs->di; | |
105 | sd.args[1] = regs->si; | |
106 | sd.args[2] = regs->dx; | |
107 | sd.args[3] = regs->r10; | |
108 | sd.args[4] = regs->r8; | |
109 | sd.args[5] = regs->r9; | |
110 | } else | |
111 | #endif | |
112 | { | |
113 | sd.args[0] = regs->bx; | |
114 | sd.args[1] = regs->cx; | |
115 | sd.args[2] = regs->dx; | |
116 | sd.args[3] = regs->si; | |
117 | sd.args[4] = regs->di; | |
118 | sd.args[5] = regs->bp; | |
119 | } | |
120 | ||
121 | BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0); | |
122 | BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1); | |
123 | ||
124 | ret = seccomp_phase1(&sd); | |
125 | if (ret == SECCOMP_PHASE1_SKIP) { | |
126 | regs->orig_ax = -1; | |
127 | ret = 0; | |
128 | } else if (ret != SECCOMP_PHASE1_OK) { | |
129 | return ret; /* Go directly to phase 2 */ | |
130 | } | |
131 | ||
132 | work &= ~_TIF_SECCOMP; | |
133 | } | |
134 | #endif | |
135 | ||
136 | /* Do our best to finish without phase 2. */ | |
137 | if (work == 0) | |
138 | return ret; /* seccomp and/or nohz only (ret == 0 here) */ | |
139 | ||
140 | #ifdef CONFIG_AUDITSYSCALL | |
141 | if (work == _TIF_SYSCALL_AUDIT) { | |
142 | /* | |
143 | * If there is no more work to be done except auditing, | |
144 | * then audit in phase 1. Phase 2 always audits, so, if | |
145 | * we audit here, then we can't go on to phase 2. | |
146 | */ | |
147 | do_audit_syscall_entry(regs, arch); | |
148 | return 0; | |
149 | } | |
150 | #endif | |
151 | ||
152 | return 1; /* Something is enabled that we can't handle in phase 1 */ | |
153 | } | |
154 | ||
155 | /* Returns the syscall nr to run (which should match regs->orig_ax). */ | |
156 | long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch, | |
157 | unsigned long phase1_result) | |
158 | { | |
dd636071 | 159 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
1f484aa6 | 160 | long ret = 0; |
dd636071 | 161 | u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
1f484aa6 | 162 | |
4aabd140 AL |
163 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
164 | BUG_ON(regs != task_pt_regs(current)); | |
1f484aa6 | 165 | |
1f484aa6 AL |
166 | #ifdef CONFIG_SECCOMP |
167 | /* | |
168 | * Call seccomp_phase2 before running the other hooks so that | |
169 | * they can see any changes made by a seccomp tracer. | |
170 | */ | |
171 | if (phase1_result > 1 && seccomp_phase2(phase1_result)) { | |
172 | /* seccomp failures shouldn't expose any additional code. */ | |
173 | return -1; | |
174 | } | |
175 | #endif | |
176 | ||
177 | if (unlikely(work & _TIF_SYSCALL_EMU)) | |
178 | ret = -1L; | |
179 | ||
180 | if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && | |
181 | tracehook_report_syscall_entry(regs)) | |
182 | ret = -1L; | |
183 | ||
184 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | |
185 | trace_sys_enter(regs, regs->orig_ax); | |
186 | ||
187 | do_audit_syscall_entry(regs, arch); | |
188 | ||
189 | return ret ?: regs->orig_ax; | |
190 | } | |
191 | ||
192 | long syscall_trace_enter(struct pt_regs *regs) | |
193 | { | |
abfb9498 | 194 | u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; |
1f484aa6 AL |
195 | unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch); |
196 | ||
197 | if (phase1_result == 0) | |
198 | return regs->orig_ax; | |
199 | else | |
200 | return syscall_trace_enter_phase2(regs, arch, phase1_result); | |
201 | } | |
202 | ||
39b48e57 AL |
203 | #define EXIT_TO_USERMODE_LOOP_FLAGS \ |
204 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ | |
205 | _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) | |
72f92478 | 206 | |
39b48e57 AL |
207 | static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) |
208 | { | |
c5c46f59 AL |
209 | /* |
210 | * In order to return to user mode, we need to have IRQs off with | |
211 | * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY, | |
212 | * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags | |
213 | * can be set at any time on preemptable kernels if we have IRQs on, | |
214 | * so we need to loop. Disabling preemption wouldn't help: doing the | |
215 | * work to clear some of the flags can sleep. | |
216 | */ | |
217 | while (true) { | |
c5c46f59 AL |
218 | /* We have work to do. */ |
219 | local_irq_enable(); | |
220 | ||
221 | if (cached_flags & _TIF_NEED_RESCHED) | |
222 | schedule(); | |
223 | ||
224 | if (cached_flags & _TIF_UPROBE) | |
225 | uprobe_notify_resume(regs); | |
226 | ||
227 | /* deal with pending signal delivery */ | |
228 | if (cached_flags & _TIF_SIGPENDING) | |
229 | do_signal(regs); | |
230 | ||
231 | if (cached_flags & _TIF_NOTIFY_RESUME) { | |
232 | clear_thread_flag(TIF_NOTIFY_RESUME); | |
233 | tracehook_notify_resume(regs); | |
234 | } | |
235 | ||
236 | if (cached_flags & _TIF_USER_RETURN_NOTIFY) | |
237 | fire_user_return_notifiers(); | |
238 | ||
239 | /* Disable IRQs and retry */ | |
240 | local_irq_disable(); | |
39b48e57 AL |
241 | |
242 | cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags); | |
243 | ||
244 | if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) | |
245 | break; | |
246 | ||
c5c46f59 | 247 | } |
39b48e57 AL |
248 | } |
249 | ||
250 | /* Called with IRQs disabled. */ | |
251 | __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) | |
252 | { | |
4e79e182 | 253 | struct thread_info *ti = pt_regs_to_thread_info(regs); |
39b48e57 AL |
254 | u32 cached_flags; |
255 | ||
256 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled())) | |
257 | local_irq_disable(); | |
258 | ||
259 | lockdep_sys_exit(); | |
260 | ||
4e79e182 | 261 | cached_flags = READ_ONCE(ti->flags); |
39b48e57 AL |
262 | |
263 | if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) | |
264 | exit_to_usermode_loop(regs, cached_flags); | |
c5c46f59 | 265 | |
4e79e182 AL |
266 | #ifdef CONFIG_COMPAT |
267 | /* | |
268 | * Compat syscalls set TS_COMPAT. Make sure we clear it before | |
269 | * returning to user mode. We need to clear it *after* signal | |
270 | * handling, because syscall restart has a fixup for compat | |
271 | * syscalls. The fixup is exercised by the ptrace_syscall_32 | |
272 | * selftest. | |
273 | */ | |
274 | ti->status &= ~TS_COMPAT; | |
275 | #endif | |
276 | ||
c5c46f59 AL |
277 | user_enter(); |
278 | } | |
279 | ||
f5e6a975 AL |
280 | #define SYSCALL_EXIT_WORK_FLAGS \ |
281 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | |
282 | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) | |
283 | ||
284 | static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags) | |
285 | { | |
286 | bool step; | |
287 | ||
288 | audit_syscall_exit(regs); | |
289 | ||
290 | if (cached_flags & _TIF_SYSCALL_TRACEPOINT) | |
291 | trace_sys_exit(regs, regs->ax); | |
292 | ||
293 | /* | |
294 | * If TIF_SYSCALL_EMU is set, we only get here because of | |
295 | * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). | |
296 | * We already reported this syscall instruction in | |
297 | * syscall_trace_enter(). | |
298 | */ | |
299 | step = unlikely( | |
300 | (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) | |
301 | == _TIF_SINGLESTEP); | |
302 | if (step || cached_flags & _TIF_SYSCALL_TRACE) | |
303 | tracehook_report_syscall_exit(regs, step); | |
304 | } | |
305 | ||
c5c46f59 AL |
306 | /* |
307 | * Called with IRQs on and fully valid regs. Returns with IRQs off in a | |
308 | * state such that we can immediately switch to user mode. | |
309 | */ | |
f5e6a975 | 310 | __visible inline void syscall_return_slowpath(struct pt_regs *regs) |
c5c46f59 AL |
311 | { |
312 | struct thread_info *ti = pt_regs_to_thread_info(regs); | |
313 | u32 cached_flags = READ_ONCE(ti->flags); | |
c5c46f59 AL |
314 | |
315 | CT_WARN_ON(ct_state() != CONTEXT_KERNEL); | |
316 | ||
460d1245 AL |
317 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && |
318 | WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax)) | |
c5c46f59 AL |
319 | local_irq_enable(); |
320 | ||
321 | /* | |
322 | * First do one-time work. If these work items are enabled, we | |
323 | * want to run them exactly once per syscall exit with IRQs on. | |
324 | */ | |
f5e6a975 AL |
325 | if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS)) |
326 | syscall_slow_exit_work(regs, cached_flags); | |
c5c46f59 | 327 | |
c5c46f59 AL |
328 | local_irq_disable(); |
329 | prepare_exit_to_usermode(regs); | |
330 | } | |
bd2d3a3b | 331 | |
1e423bff AL |
332 | #ifdef CONFIG_X86_64 |
333 | __visible void do_syscall_64(struct pt_regs *regs) | |
334 | { | |
335 | struct thread_info *ti = pt_regs_to_thread_info(regs); | |
336 | unsigned long nr = regs->orig_ax; | |
337 | ||
9999c8c0 | 338 | enter_from_user_mode(); |
1e423bff AL |
339 | local_irq_enable(); |
340 | ||
341 | if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) | |
342 | nr = syscall_trace_enter(regs); | |
343 | ||
344 | /* | |
345 | * NB: Native and x32 syscalls are dispatched from the same | |
346 | * table. The only functional difference is the x32 bit in | |
347 | * regs->orig_ax, which changes the behavior of some syscalls. | |
348 | */ | |
349 | if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { | |
350 | regs->ax = sys_call_table[nr & __SYSCALL_MASK]( | |
351 | regs->di, regs->si, regs->dx, | |
352 | regs->r10, regs->r8, regs->r9); | |
353 | } | |
354 | ||
355 | syscall_return_slowpath(regs); | |
356 | } | |
357 | #endif | |
358 | ||
bd2d3a3b AL |
359 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
360 | /* | |
9999c8c0 AL |
361 | * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does |
362 | * all entry and exit work and returns with IRQs off. This function is | |
363 | * extremely hot in workloads that use it, and it's usually called from | |
33c52129 | 364 | * do_fast_syscall_32, so forcibly inline it to improve performance. |
bd2d3a3b | 365 | */ |
a798f091 | 366 | static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) |
bd2d3a3b AL |
367 | { |
368 | struct thread_info *ti = pt_regs_to_thread_info(regs); | |
369 | unsigned int nr = (unsigned int)regs->orig_ax; | |
370 | ||
371 | #ifdef CONFIG_IA32_EMULATION | |
372 | ti->status |= TS_COMPAT; | |
373 | #endif | |
374 | ||
bd2d3a3b AL |
375 | if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { |
376 | /* | |
377 | * Subtlety here: if ptrace pokes something larger than | |
378 | * 2^32-1 into orig_ax, this truncates it. This may or | |
379 | * may not be necessary, but it matches the old asm | |
380 | * behavior. | |
381 | */ | |
382 | nr = syscall_trace_enter(regs); | |
383 | } | |
384 | ||
33c52129 | 385 | if (likely(nr < IA32_NR_syscalls)) { |
bd2d3a3b AL |
386 | /* |
387 | * It's possible that a 32-bit syscall implementation | |
388 | * takes a 64-bit parameter but nonetheless assumes that | |
389 | * the high bits are zero. Make sure we zero-extend all | |
390 | * of the args. | |
391 | */ | |
392 | regs->ax = ia32_sys_call_table[nr]( | |
393 | (unsigned int)regs->bx, (unsigned int)regs->cx, | |
394 | (unsigned int)regs->dx, (unsigned int)regs->si, | |
395 | (unsigned int)regs->di, (unsigned int)regs->bp); | |
396 | } | |
397 | ||
398 | syscall_return_slowpath(regs); | |
399 | } | |
710246df | 400 | |
a798f091 AL |
401 | /* Handles int $0x80 */ |
402 | __visible void do_int80_syscall_32(struct pt_regs *regs) | |
8b13c255 | 403 | { |
9999c8c0 | 404 | enter_from_user_mode(); |
8b13c255 AL |
405 | local_irq_enable(); |
406 | do_syscall_32_irqs_on(regs); | |
407 | } | |
408 | ||
5f310f73 | 409 | /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ |
7841b408 | 410 | __visible long do_fast_syscall_32(struct pt_regs *regs) |
710246df AL |
411 | { |
412 | /* | |
413 | * Called using the internal vDSO SYSENTER/SYSCALL32 calling | |
414 | * convention. Adjust regs so it looks like we entered using int80. | |
415 | */ | |
416 | ||
417 | unsigned long landing_pad = (unsigned long)current->mm->context.vdso + | |
418 | vdso_image_32.sym_int80_landing_pad; | |
419 | ||
420 | /* | |
421 | * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward | |
422 | * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. | |
423 | * Fix it up. | |
424 | */ | |
425 | regs->ip = landing_pad; | |
426 | ||
9999c8c0 AL |
427 | enter_from_user_mode(); |
428 | ||
710246df | 429 | local_irq_enable(); |
9999c8c0 AL |
430 | |
431 | /* Fetch EBP from where the vDSO stashed it. */ | |
c68ca678 AL |
432 | if ( |
433 | #ifdef CONFIG_X86_64 | |
434 | /* | |
435 | * Micro-optimization: the pointer we're following is explicitly | |
436 | * 32 bits, so it can't be out of range. | |
437 | */ | |
30bfa7b3 | 438 | __get_user(*(u32 *)®s->bp, |
c68ca678 AL |
439 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
440 | #else | |
30bfa7b3 | 441 | get_user(*(u32 *)®s->bp, |
c68ca678 AL |
442 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
443 | #endif | |
444 | ) { | |
445 | ||
710246df AL |
446 | /* User code screwed up. */ |
447 | local_irq_disable(); | |
448 | regs->ax = -EFAULT; | |
710246df | 449 | prepare_exit_to_usermode(regs); |
7841b408 | 450 | return 0; /* Keep it simple: use IRET. */ |
710246df | 451 | } |
710246df AL |
452 | |
453 | /* Now this is just like a normal syscall. */ | |
8b13c255 | 454 | do_syscall_32_irqs_on(regs); |
7841b408 AL |
455 | |
456 | #ifdef CONFIG_X86_64 | |
457 | /* | |
458 | * Opportunistic SYSRETL: if possible, try to return using SYSRETL. | |
459 | * SYSRETL is available on all 64-bit CPUs, so we don't need to | |
460 | * bother with SYSEXIT. | |
461 | * | |
462 | * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, | |
463 | * because the ECX fixup above will ensure that this is essentially | |
464 | * never the case. | |
465 | */ | |
466 | return regs->cs == __USER32_CS && regs->ss == __USER_DS && | |
467 | regs->ip == landing_pad && | |
468 | (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0; | |
469 | #else | |
5f310f73 AL |
470 | /* |
471 | * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT. | |
472 | * | |
473 | * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, | |
474 | * because the ECX fixup above will ensure that this is essentially | |
475 | * never the case. | |
476 | * | |
477 | * We don't allow syscalls at all from VM86 mode, but we still | |
478 | * need to check VM, because we might be returning from sys_vm86. | |
479 | */ | |
480 | return static_cpu_has(X86_FEATURE_SEP) && | |
481 | regs->cs == __USER_CS && regs->ss == __USER_DS && | |
482 | regs->ip == landing_pad && | |
483 | (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; | |
7841b408 | 484 | #endif |
710246df | 485 | } |
bd2d3a3b | 486 | #endif |