Merge branch 'tracing/core' into tracing/hw-breakpoints
[deliverable/linux.git] / arch / x86 / kernel / ptrace.c
1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 *
6 * BTS tracing
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/tracehook.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/signal.h>
24 #include <linux/workqueue.h>
25
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
28 #include <asm/system.h>
29 #include <asm/processor.h>
30 #include <asm/i387.h>
31 #include <asm/debugreg.h>
32 #include <asm/ldt.h>
33 #include <asm/desc.h>
34 #include <asm/prctl.h>
35 #include <asm/proto.h>
36 #include <asm/ds.h>
37 #include <asm/hw_breakpoint.h>
38
39 #include "tls.h"
40
41 #define CREATE_TRACE_POINTS
42 #include <trace/events/syscalls.h>
43
44 enum x86_regset {
45 REGSET_GENERAL,
46 REGSET_FP,
47 REGSET_XFP,
48 REGSET_IOPERM64 = REGSET_XFP,
49 REGSET_TLS,
50 REGSET_IOPERM32,
51 };
52
53 /*
54 * does not yet catch signals sent when the child dies.
55 * in exit.c or in signal.c.
56 */
57
58 /*
59 * Determines which flags the user has access to [1 = access, 0 = no access].
60 */
61 #define FLAG_MASK_32 ((unsigned long) \
62 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
63 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
64 X86_EFLAGS_SF | X86_EFLAGS_TF | \
65 X86_EFLAGS_DF | X86_EFLAGS_OF | \
66 X86_EFLAGS_RF | X86_EFLAGS_AC))
67
68 /*
69 * Determines whether a value may be installed in a segment register.
70 */
71 static inline bool invalid_selector(u16 value)
72 {
73 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
74 }
75
76 #ifdef CONFIG_X86_32
77
78 #define FLAG_MASK FLAG_MASK_32
79
80 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
81 {
82 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
83 return &regs->bx + (regno >> 2);
84 }
85
86 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
87 {
88 /*
89 * Returning the value truncates it to 16 bits.
90 */
91 unsigned int retval;
92 if (offset != offsetof(struct user_regs_struct, gs))
93 retval = *pt_regs_access(task_pt_regs(task), offset);
94 else {
95 if (task == current)
96 retval = get_user_gs(task_pt_regs(task));
97 else
98 retval = task_user_gs(task);
99 }
100 return retval;
101 }
102
103 static int set_segment_reg(struct task_struct *task,
104 unsigned long offset, u16 value)
105 {
106 /*
107 * The value argument was already truncated to 16 bits.
108 */
109 if (invalid_selector(value))
110 return -EIO;
111
112 /*
113 * For %cs and %ss we cannot permit a null selector.
114 * We can permit a bogus selector as long as it has USER_RPL.
115 * Null selectors are fine for other segment registers, but
116 * we will never get back to user mode with invalid %cs or %ss
117 * and will take the trap in iret instead. Much code relies
118 * on user_mode() to distinguish a user trap frame (which can
119 * safely use invalid selectors) from a kernel trap frame.
120 */
121 switch (offset) {
122 case offsetof(struct user_regs_struct, cs):
123 case offsetof(struct user_regs_struct, ss):
124 if (unlikely(value == 0))
125 return -EIO;
126
127 default:
128 *pt_regs_access(task_pt_regs(task), offset) = value;
129 break;
130
131 case offsetof(struct user_regs_struct, gs):
132 if (task == current)
133 set_user_gs(task_pt_regs(task), value);
134 else
135 task_user_gs(task) = value;
136 }
137
138 return 0;
139 }
140
141 #else /* CONFIG_X86_64 */
142
143 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
144
145 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
146 {
147 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
148 return &regs->r15 + (offset / sizeof(regs->r15));
149 }
150
151 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
152 {
153 /*
154 * Returning the value truncates it to 16 bits.
155 */
156 unsigned int seg;
157
158 switch (offset) {
159 case offsetof(struct user_regs_struct, fs):
160 if (task == current) {
161 /* Older gas can't assemble movq %?s,%r?? */
162 asm("movl %%fs,%0" : "=r" (seg));
163 return seg;
164 }
165 return task->thread.fsindex;
166 case offsetof(struct user_regs_struct, gs):
167 if (task == current) {
168 asm("movl %%gs,%0" : "=r" (seg));
169 return seg;
170 }
171 return task->thread.gsindex;
172 case offsetof(struct user_regs_struct, ds):
173 if (task == current) {
174 asm("movl %%ds,%0" : "=r" (seg));
175 return seg;
176 }
177 return task->thread.ds;
178 case offsetof(struct user_regs_struct, es):
179 if (task == current) {
180 asm("movl %%es,%0" : "=r" (seg));
181 return seg;
182 }
183 return task->thread.es;
184
185 case offsetof(struct user_regs_struct, cs):
186 case offsetof(struct user_regs_struct, ss):
187 break;
188 }
189 return *pt_regs_access(task_pt_regs(task), offset);
190 }
191
192 static int set_segment_reg(struct task_struct *task,
193 unsigned long offset, u16 value)
194 {
195 /*
196 * The value argument was already truncated to 16 bits.
197 */
198 if (invalid_selector(value))
199 return -EIO;
200
201 switch (offset) {
202 case offsetof(struct user_regs_struct,fs):
203 /*
204 * If this is setting fs as for normal 64-bit use but
205 * setting fs_base has implicitly changed it, leave it.
206 */
207 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
208 task->thread.fs != 0) ||
209 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
210 task->thread.fs == 0))
211 break;
212 task->thread.fsindex = value;
213 if (task == current)
214 loadsegment(fs, task->thread.fsindex);
215 break;
216 case offsetof(struct user_regs_struct,gs):
217 /*
218 * If this is setting gs as for normal 64-bit use but
219 * setting gs_base has implicitly changed it, leave it.
220 */
221 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
222 task->thread.gs != 0) ||
223 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
224 task->thread.gs == 0))
225 break;
226 task->thread.gsindex = value;
227 if (task == current)
228 load_gs_index(task->thread.gsindex);
229 break;
230 case offsetof(struct user_regs_struct,ds):
231 task->thread.ds = value;
232 if (task == current)
233 loadsegment(ds, task->thread.ds);
234 break;
235 case offsetof(struct user_regs_struct,es):
236 task->thread.es = value;
237 if (task == current)
238 loadsegment(es, task->thread.es);
239 break;
240
241 /*
242 * Can't actually change these in 64-bit mode.
243 */
244 case offsetof(struct user_regs_struct,cs):
245 if (unlikely(value == 0))
246 return -EIO;
247 #ifdef CONFIG_IA32_EMULATION
248 if (test_tsk_thread_flag(task, TIF_IA32))
249 task_pt_regs(task)->cs = value;
250 #endif
251 break;
252 case offsetof(struct user_regs_struct,ss):
253 if (unlikely(value == 0))
254 return -EIO;
255 #ifdef CONFIG_IA32_EMULATION
256 if (test_tsk_thread_flag(task, TIF_IA32))
257 task_pt_regs(task)->ss = value;
258 #endif
259 break;
260 }
261
262 return 0;
263 }
264
265 #endif /* CONFIG_X86_32 */
266
267 static unsigned long get_flags(struct task_struct *task)
268 {
269 unsigned long retval = task_pt_regs(task)->flags;
270
271 /*
272 * If the debugger set TF, hide it from the readout.
273 */
274 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
275 retval &= ~X86_EFLAGS_TF;
276
277 return retval;
278 }
279
280 static int set_flags(struct task_struct *task, unsigned long value)
281 {
282 struct pt_regs *regs = task_pt_regs(task);
283
284 /*
285 * If the user value contains TF, mark that
286 * it was not "us" (the debugger) that set it.
287 * If not, make sure it stays set if we had.
288 */
289 if (value & X86_EFLAGS_TF)
290 clear_tsk_thread_flag(task, TIF_FORCED_TF);
291 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
292 value |= X86_EFLAGS_TF;
293
294 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
295
296 return 0;
297 }
298
299 static int putreg(struct task_struct *child,
300 unsigned long offset, unsigned long value)
301 {
302 switch (offset) {
303 case offsetof(struct user_regs_struct, cs):
304 case offsetof(struct user_regs_struct, ds):
305 case offsetof(struct user_regs_struct, es):
306 case offsetof(struct user_regs_struct, fs):
307 case offsetof(struct user_regs_struct, gs):
308 case offsetof(struct user_regs_struct, ss):
309 return set_segment_reg(child, offset, value);
310
311 case offsetof(struct user_regs_struct, flags):
312 return set_flags(child, value);
313
314 #ifdef CONFIG_X86_64
315 /*
316 * Orig_ax is really just a flag with small positive and
317 * negative values, so make sure to always sign-extend it
318 * from 32 bits so that it works correctly regardless of
319 * whether we come from a 32-bit environment or not.
320 */
321 case offsetof(struct user_regs_struct, orig_ax):
322 value = (long) (s32) value;
323 break;
324
325 case offsetof(struct user_regs_struct,fs_base):
326 if (value >= TASK_SIZE_OF(child))
327 return -EIO;
328 /*
329 * When changing the segment base, use do_arch_prctl
330 * to set either thread.fs or thread.fsindex and the
331 * corresponding GDT slot.
332 */
333 if (child->thread.fs != value)
334 return do_arch_prctl(child, ARCH_SET_FS, value);
335 return 0;
336 case offsetof(struct user_regs_struct,gs_base):
337 /*
338 * Exactly the same here as the %fs handling above.
339 */
340 if (value >= TASK_SIZE_OF(child))
341 return -EIO;
342 if (child->thread.gs != value)
343 return do_arch_prctl(child, ARCH_SET_GS, value);
344 return 0;
345 #endif
346 }
347
348 *pt_regs_access(task_pt_regs(child), offset) = value;
349 return 0;
350 }
351
352 static unsigned long getreg(struct task_struct *task, unsigned long offset)
353 {
354 switch (offset) {
355 case offsetof(struct user_regs_struct, cs):
356 case offsetof(struct user_regs_struct, ds):
357 case offsetof(struct user_regs_struct, es):
358 case offsetof(struct user_regs_struct, fs):
359 case offsetof(struct user_regs_struct, gs):
360 case offsetof(struct user_regs_struct, ss):
361 return get_segment_reg(task, offset);
362
363 case offsetof(struct user_regs_struct, flags):
364 return get_flags(task);
365
366 #ifdef CONFIG_X86_64
367 case offsetof(struct user_regs_struct, fs_base): {
368 /*
369 * do_arch_prctl may have used a GDT slot instead of
370 * the MSR. To userland, it appears the same either
371 * way, except the %fs segment selector might not be 0.
372 */
373 unsigned int seg = task->thread.fsindex;
374 if (task->thread.fs != 0)
375 return task->thread.fs;
376 if (task == current)
377 asm("movl %%fs,%0" : "=r" (seg));
378 if (seg != FS_TLS_SEL)
379 return 0;
380 return get_desc_base(&task->thread.tls_array[FS_TLS]);
381 }
382 case offsetof(struct user_regs_struct, gs_base): {
383 /*
384 * Exactly the same here as the %fs handling above.
385 */
386 unsigned int seg = task->thread.gsindex;
387 if (task->thread.gs != 0)
388 return task->thread.gs;
389 if (task == current)
390 asm("movl %%gs,%0" : "=r" (seg));
391 if (seg != GS_TLS_SEL)
392 return 0;
393 return get_desc_base(&task->thread.tls_array[GS_TLS]);
394 }
395 #endif
396 }
397
398 return *pt_regs_access(task_pt_regs(task), offset);
399 }
400
401 static int genregs_get(struct task_struct *target,
402 const struct user_regset *regset,
403 unsigned int pos, unsigned int count,
404 void *kbuf, void __user *ubuf)
405 {
406 if (kbuf) {
407 unsigned long *k = kbuf;
408 while (count > 0) {
409 *k++ = getreg(target, pos);
410 count -= sizeof(*k);
411 pos += sizeof(*k);
412 }
413 } else {
414 unsigned long __user *u = ubuf;
415 while (count > 0) {
416 if (__put_user(getreg(target, pos), u++))
417 return -EFAULT;
418 count -= sizeof(*u);
419 pos += sizeof(*u);
420 }
421 }
422
423 return 0;
424 }
425
426 static int genregs_set(struct task_struct *target,
427 const struct user_regset *regset,
428 unsigned int pos, unsigned int count,
429 const void *kbuf, const void __user *ubuf)
430 {
431 int ret = 0;
432 if (kbuf) {
433 const unsigned long *k = kbuf;
434 while (count > 0 && !ret) {
435 ret = putreg(target, pos, *k++);
436 count -= sizeof(*k);
437 pos += sizeof(*k);
438 }
439 } else {
440 const unsigned long __user *u = ubuf;
441 while (count > 0 && !ret) {
442 unsigned long word;
443 ret = __get_user(word, u++);
444 if (ret)
445 break;
446 ret = putreg(target, pos, word);
447 count -= sizeof(*u);
448 pos += sizeof(*u);
449 }
450 }
451 return ret;
452 }
453
454 /*
455 * Decode the length and type bits for a particular breakpoint as
456 * stored in debug register 7. Return the "enabled" status.
457 */
458 static int decode_dr7(unsigned long dr7, int bpnum, unsigned *len,
459 unsigned *type)
460 {
461 int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE);
462
463 *len = (bp_info & 0xc) | 0x40;
464 *type = (bp_info & 0x3) | 0x80;
465 return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3;
466 }
467
468 static void ptrace_triggered(struct hw_breakpoint *bp, struct pt_regs *regs)
469 {
470 struct thread_struct *thread = &(current->thread);
471 int i;
472
473 /*
474 * Store in the virtual DR6 register the fact that the breakpoint
475 * was hit so the thread's debugger will see it.
476 */
477 for (i = 0; i < hbp_kernel_pos; i++)
478 /*
479 * We will check bp->info.address against the address stored in
480 * thread's hbp structure and not debugreg[i]. This is to ensure
481 * that the corresponding bit for 'i' in DR7 register is enabled
482 */
483 if (bp->info.address == thread->hbp[i]->info.address)
484 break;
485
486 thread->debugreg6 |= (DR_TRAP0 << i);
487 }
488
489 /*
490 * Handle ptrace writes to debug register 7.
491 */
492 static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
493 {
494 struct thread_struct *thread = &(tsk->thread);
495 unsigned long old_dr7 = thread->debugreg7;
496 int i, orig_ret = 0, rc = 0;
497 int enabled, second_pass = 0;
498 unsigned len, type;
499 struct hw_breakpoint *bp;
500
501 data &= ~DR_CONTROL_RESERVED;
502 restore:
503 /*
504 * Loop through all the hardware breakpoints, making the
505 * appropriate changes to each.
506 */
507 for (i = 0; i < HBP_NUM; i++) {
508 enabled = decode_dr7(data, i, &len, &type);
509 bp = thread->hbp[i];
510
511 if (!enabled) {
512 if (bp) {
513 /* Don't unregister the breakpoints right-away,
514 * unless all register_user_hw_breakpoint()
515 * requests have succeeded. This prevents
516 * any window of opportunity for debug
517 * register grabbing by other users.
518 */
519 if (!second_pass)
520 continue;
521 unregister_user_hw_breakpoint(tsk, bp);
522 kfree(bp);
523 }
524 continue;
525 }
526 if (!bp) {
527 rc = -ENOMEM;
528 bp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL);
529 if (bp) {
530 bp->info.address = thread->debugreg[i];
531 bp->triggered = ptrace_triggered;
532 bp->info.len = len;
533 bp->info.type = type;
534 rc = register_user_hw_breakpoint(tsk, bp);
535 if (rc)
536 kfree(bp);
537 }
538 } else
539 rc = modify_user_hw_breakpoint(tsk, bp);
540 if (rc)
541 break;
542 }
543 /*
544 * Make a second pass to free the remaining unused breakpoints
545 * or to restore the original breakpoints if an error occurred.
546 */
547 if (!second_pass) {
548 second_pass = 1;
549 if (rc < 0) {
550 orig_ret = rc;
551 data = old_dr7;
552 }
553 goto restore;
554 }
555 return ((orig_ret < 0) ? orig_ret : rc);
556 }
557
558 /*
559 * Handle PTRACE_PEEKUSR calls for the debug register area.
560 */
561 static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
562 {
563 struct thread_struct *thread = &(tsk->thread);
564 unsigned long val = 0;
565
566 if (n < HBP_NUM)
567 val = thread->debugreg[n];
568 else if (n == 6)
569 val = thread->debugreg6;
570 else if (n == 7)
571 val = thread->debugreg7;
572 return val;
573 }
574
575 /*
576 * Handle PTRACE_POKEUSR calls for the debug register area.
577 */
578 int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
579 {
580 struct thread_struct *thread = &(tsk->thread);
581 int rc = 0;
582
583 /* There are no DR4 or DR5 registers */
584 if (n == 4 || n == 5)
585 return -EIO;
586
587 if (n == 6) {
588 tsk->thread.debugreg6 = val;
589 goto ret_path;
590 }
591 if (n < HBP_NUM) {
592 if (thread->hbp[n]) {
593 if (arch_check_va_in_userspace(val,
594 thread->hbp[n]->info.len) == 0) {
595 rc = -EIO;
596 goto ret_path;
597 }
598 thread->hbp[n]->info.address = val;
599 }
600 thread->debugreg[n] = val;
601 }
602 /* All that's left is DR7 */
603 if (n == 7)
604 rc = ptrace_write_dr7(tsk, val);
605
606 ret_path:
607 return rc;
608 }
609
610 /*
611 * These access the current or another (stopped) task's io permission
612 * bitmap for debugging or core dump.
613 */
614 static int ioperm_active(struct task_struct *target,
615 const struct user_regset *regset)
616 {
617 return target->thread.io_bitmap_max / regset->size;
618 }
619
620 static int ioperm_get(struct task_struct *target,
621 const struct user_regset *regset,
622 unsigned int pos, unsigned int count,
623 void *kbuf, void __user *ubuf)
624 {
625 if (!target->thread.io_bitmap_ptr)
626 return -ENXIO;
627
628 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
629 target->thread.io_bitmap_ptr,
630 0, IO_BITMAP_BYTES);
631 }
632
633 #ifdef CONFIG_X86_PTRACE_BTS
634 /*
635 * A branch trace store context.
636 *
637 * Contexts may only be installed by ptrace_bts_config() and only for
638 * ptraced tasks.
639 *
640 * Contexts are destroyed when the tracee is detached from the tracer.
641 * The actual destruction work requires interrupts enabled, so the
642 * work is deferred and will be scheduled during __ptrace_unlink().
643 *
644 * Contexts hold an additional task_struct reference on the traced
645 * task, as well as a reference on the tracer's mm.
646 *
647 * Ptrace already holds a task_struct for the duration of ptrace operations,
648 * but since destruction is deferred, it may be executed after both
649 * tracer and tracee exited.
650 */
651 struct bts_context {
652 /* The branch trace handle. */
653 struct bts_tracer *tracer;
654
655 /* The buffer used to store the branch trace and its size. */
656 void *buffer;
657 unsigned int size;
658
659 /* The mm that paid for the above buffer. */
660 struct mm_struct *mm;
661
662 /* The task this context belongs to. */
663 struct task_struct *task;
664
665 /* The signal to send on a bts buffer overflow. */
666 unsigned int bts_ovfl_signal;
667
668 /* The work struct to destroy a context. */
669 struct work_struct work;
670 };
671
672 static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
673 {
674 void *buffer = NULL;
675 int err = -ENOMEM;
676
677 err = account_locked_memory(current->mm, current->signal->rlim, size);
678 if (err < 0)
679 return err;
680
681 buffer = kzalloc(size, GFP_KERNEL);
682 if (!buffer)
683 goto out_refund;
684
685 context->buffer = buffer;
686 context->size = size;
687 context->mm = get_task_mm(current);
688
689 return 0;
690
691 out_refund:
692 refund_locked_memory(current->mm, size);
693 return err;
694 }
695
696 static inline void free_bts_buffer(struct bts_context *context)
697 {
698 if (!context->buffer)
699 return;
700
701 kfree(context->buffer);
702 context->buffer = NULL;
703
704 refund_locked_memory(context->mm, context->size);
705 context->size = 0;
706
707 mmput(context->mm);
708 context->mm = NULL;
709 }
710
711 static void free_bts_context_work(struct work_struct *w)
712 {
713 struct bts_context *context;
714
715 context = container_of(w, struct bts_context, work);
716
717 ds_release_bts(context->tracer);
718 put_task_struct(context->task);
719 free_bts_buffer(context);
720 kfree(context);
721 }
722
723 static inline void free_bts_context(struct bts_context *context)
724 {
725 INIT_WORK(&context->work, free_bts_context_work);
726 schedule_work(&context->work);
727 }
728
729 static inline struct bts_context *alloc_bts_context(struct task_struct *task)
730 {
731 struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
732 if (context) {
733 context->task = task;
734 task->bts = context;
735
736 get_task_struct(task);
737 }
738
739 return context;
740 }
741
742 static int ptrace_bts_read_record(struct task_struct *child, size_t index,
743 struct bts_struct __user *out)
744 {
745 struct bts_context *context;
746 const struct bts_trace *trace;
747 struct bts_struct bts;
748 const unsigned char *at;
749 int error;
750
751 context = child->bts;
752 if (!context)
753 return -ESRCH;
754
755 trace = ds_read_bts(context->tracer);
756 if (!trace)
757 return -ESRCH;
758
759 at = trace->ds.top - ((index + 1) * trace->ds.size);
760 if ((void *)at < trace->ds.begin)
761 at += (trace->ds.n * trace->ds.size);
762
763 if (!trace->read)
764 return -EOPNOTSUPP;
765
766 error = trace->read(context->tracer, at, &bts);
767 if (error < 0)
768 return error;
769
770 if (copy_to_user(out, &bts, sizeof(bts)))
771 return -EFAULT;
772
773 return sizeof(bts);
774 }
775
776 static int ptrace_bts_drain(struct task_struct *child,
777 long size,
778 struct bts_struct __user *out)
779 {
780 struct bts_context *context;
781 const struct bts_trace *trace;
782 const unsigned char *at;
783 int error, drained = 0;
784
785 context = child->bts;
786 if (!context)
787 return -ESRCH;
788
789 trace = ds_read_bts(context->tracer);
790 if (!trace)
791 return -ESRCH;
792
793 if (!trace->read)
794 return -EOPNOTSUPP;
795
796 if (size < (trace->ds.top - trace->ds.begin))
797 return -EIO;
798
799 for (at = trace->ds.begin; (void *)at < trace->ds.top;
800 out++, drained++, at += trace->ds.size) {
801 struct bts_struct bts;
802
803 error = trace->read(context->tracer, at, &bts);
804 if (error < 0)
805 return error;
806
807 if (copy_to_user(out, &bts, sizeof(bts)))
808 return -EFAULT;
809 }
810
811 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
812
813 error = ds_reset_bts(context->tracer);
814 if (error < 0)
815 return error;
816
817 return drained;
818 }
819
820 static int ptrace_bts_config(struct task_struct *child,
821 long cfg_size,
822 const struct ptrace_bts_config __user *ucfg)
823 {
824 struct bts_context *context;
825 struct ptrace_bts_config cfg;
826 unsigned int flags = 0;
827
828 if (cfg_size < sizeof(cfg))
829 return -EIO;
830
831 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
832 return -EFAULT;
833
834 context = child->bts;
835 if (!context)
836 context = alloc_bts_context(child);
837 if (!context)
838 return -ENOMEM;
839
840 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
841 if (!cfg.signal)
842 return -EINVAL;
843
844 return -EOPNOTSUPP;
845 context->bts_ovfl_signal = cfg.signal;
846 }
847
848 ds_release_bts(context->tracer);
849 context->tracer = NULL;
850
851 if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
852 int err;
853
854 free_bts_buffer(context);
855 if (!cfg.size)
856 return 0;
857
858 err = alloc_bts_buffer(context, cfg.size);
859 if (err < 0)
860 return err;
861 }
862
863 if (cfg.flags & PTRACE_BTS_O_TRACE)
864 flags |= BTS_USER;
865
866 if (cfg.flags & PTRACE_BTS_O_SCHED)
867 flags |= BTS_TIMESTAMPS;
868
869 context->tracer =
870 ds_request_bts_task(child, context->buffer, context->size,
871 NULL, (size_t)-1, flags);
872 if (unlikely(IS_ERR(context->tracer))) {
873 int error = PTR_ERR(context->tracer);
874
875 free_bts_buffer(context);
876 context->tracer = NULL;
877 return error;
878 }
879
880 return sizeof(cfg);
881 }
882
883 static int ptrace_bts_status(struct task_struct *child,
884 long cfg_size,
885 struct ptrace_bts_config __user *ucfg)
886 {
887 struct bts_context *context;
888 const struct bts_trace *trace;
889 struct ptrace_bts_config cfg;
890
891 context = child->bts;
892 if (!context)
893 return -ESRCH;
894
895 if (cfg_size < sizeof(cfg))
896 return -EIO;
897
898 trace = ds_read_bts(context->tracer);
899 if (!trace)
900 return -ESRCH;
901
902 memset(&cfg, 0, sizeof(cfg));
903 cfg.size = trace->ds.end - trace->ds.begin;
904 cfg.signal = context->bts_ovfl_signal;
905 cfg.bts_size = sizeof(struct bts_struct);
906
907 if (cfg.signal)
908 cfg.flags |= PTRACE_BTS_O_SIGNAL;
909
910 if (trace->ds.flags & BTS_USER)
911 cfg.flags |= PTRACE_BTS_O_TRACE;
912
913 if (trace->ds.flags & BTS_TIMESTAMPS)
914 cfg.flags |= PTRACE_BTS_O_SCHED;
915
916 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
917 return -EFAULT;
918
919 return sizeof(cfg);
920 }
921
922 static int ptrace_bts_clear(struct task_struct *child)
923 {
924 struct bts_context *context;
925 const struct bts_trace *trace;
926
927 context = child->bts;
928 if (!context)
929 return -ESRCH;
930
931 trace = ds_read_bts(context->tracer);
932 if (!trace)
933 return -ESRCH;
934
935 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
936
937 return ds_reset_bts(context->tracer);
938 }
939
940 static int ptrace_bts_size(struct task_struct *child)
941 {
942 struct bts_context *context;
943 const struct bts_trace *trace;
944
945 context = child->bts;
946 if (!context)
947 return -ESRCH;
948
949 trace = ds_read_bts(context->tracer);
950 if (!trace)
951 return -ESRCH;
952
953 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
954 }
955
956 /*
957 * Called from __ptrace_unlink() after the child has been moved back
958 * to its original parent.
959 */
960 void ptrace_bts_untrace(struct task_struct *child)
961 {
962 if (unlikely(child->bts)) {
963 free_bts_context(child->bts);
964 child->bts = NULL;
965 }
966 }
967 #endif /* CONFIG_X86_PTRACE_BTS */
968
969 /*
970 * Called by kernel/ptrace.c when detaching..
971 *
972 * Make sure the single step bit is not set.
973 */
974 void ptrace_disable(struct task_struct *child)
975 {
976 user_disable_single_step(child);
977 #ifdef TIF_SYSCALL_EMU
978 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
979 #endif
980 }
981
982 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
983 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
984 #endif
985
986 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
987 {
988 int ret;
989 unsigned long __user *datap = (unsigned long __user *)data;
990
991 switch (request) {
992 /* read the word at location addr in the USER area. */
993 case PTRACE_PEEKUSR: {
994 unsigned long tmp;
995
996 ret = -EIO;
997 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
998 addr >= sizeof(struct user))
999 break;
1000
1001 tmp = 0; /* Default return condition */
1002 if (addr < sizeof(struct user_regs_struct))
1003 tmp = getreg(child, addr);
1004 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1005 addr <= offsetof(struct user, u_debugreg[7])) {
1006 addr -= offsetof(struct user, u_debugreg[0]);
1007 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
1008 }
1009 ret = put_user(tmp, datap);
1010 break;
1011 }
1012
1013 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
1014 ret = -EIO;
1015 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
1016 addr >= sizeof(struct user))
1017 break;
1018
1019 if (addr < sizeof(struct user_regs_struct))
1020 ret = putreg(child, addr, data);
1021 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
1022 addr <= offsetof(struct user, u_debugreg[7])) {
1023 addr -= offsetof(struct user, u_debugreg[0]);
1024 ret = ptrace_set_debugreg(child,
1025 addr / sizeof(data), data);
1026 }
1027 break;
1028
1029 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1030 return copy_regset_to_user(child,
1031 task_user_regset_view(current),
1032 REGSET_GENERAL,
1033 0, sizeof(struct user_regs_struct),
1034 datap);
1035
1036 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1037 return copy_regset_from_user(child,
1038 task_user_regset_view(current),
1039 REGSET_GENERAL,
1040 0, sizeof(struct user_regs_struct),
1041 datap);
1042
1043 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1044 return copy_regset_to_user(child,
1045 task_user_regset_view(current),
1046 REGSET_FP,
1047 0, sizeof(struct user_i387_struct),
1048 datap);
1049
1050 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1051 return copy_regset_from_user(child,
1052 task_user_regset_view(current),
1053 REGSET_FP,
1054 0, sizeof(struct user_i387_struct),
1055 datap);
1056
1057 #ifdef CONFIG_X86_32
1058 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1059 return copy_regset_to_user(child, &user_x86_32_view,
1060 REGSET_XFP,
1061 0, sizeof(struct user_fxsr_struct),
1062 datap) ? -EIO : 0;
1063
1064 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1065 return copy_regset_from_user(child, &user_x86_32_view,
1066 REGSET_XFP,
1067 0, sizeof(struct user_fxsr_struct),
1068 datap) ? -EIO : 0;
1069 #endif
1070
1071 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1072 case PTRACE_GET_THREAD_AREA:
1073 if (addr < 0)
1074 return -EIO;
1075 ret = do_get_thread_area(child, addr,
1076 (struct user_desc __user *) data);
1077 break;
1078
1079 case PTRACE_SET_THREAD_AREA:
1080 if (addr < 0)
1081 return -EIO;
1082 ret = do_set_thread_area(child, addr,
1083 (struct user_desc __user *) data, 0);
1084 break;
1085 #endif
1086
1087 #ifdef CONFIG_X86_64
1088 /* normal 64bit interface to access TLS data.
1089 Works just like arch_prctl, except that the arguments
1090 are reversed. */
1091 case PTRACE_ARCH_PRCTL:
1092 ret = do_arch_prctl(child, data, addr);
1093 break;
1094 #endif
1095
1096 /*
1097 * These bits need more cooking - not enabled yet:
1098 */
1099 #ifdef CONFIG_X86_PTRACE_BTS
1100 case PTRACE_BTS_CONFIG:
1101 ret = ptrace_bts_config
1102 (child, data, (struct ptrace_bts_config __user *)addr);
1103 break;
1104
1105 case PTRACE_BTS_STATUS:
1106 ret = ptrace_bts_status
1107 (child, data, (struct ptrace_bts_config __user *)addr);
1108 break;
1109
1110 case PTRACE_BTS_SIZE:
1111 ret = ptrace_bts_size(child);
1112 break;
1113
1114 case PTRACE_BTS_GET:
1115 ret = ptrace_bts_read_record
1116 (child, data, (struct bts_struct __user *) addr);
1117 break;
1118
1119 case PTRACE_BTS_CLEAR:
1120 ret = ptrace_bts_clear(child);
1121 break;
1122
1123 case PTRACE_BTS_DRAIN:
1124 ret = ptrace_bts_drain
1125 (child, data, (struct bts_struct __user *) addr);
1126 break;
1127 #endif /* CONFIG_X86_PTRACE_BTS */
1128
1129 default:
1130 ret = ptrace_request(child, request, addr, data);
1131 break;
1132 }
1133
1134 return ret;
1135 }
1136
1137 #ifdef CONFIG_IA32_EMULATION
1138
1139 #include <linux/compat.h>
1140 #include <linux/syscalls.h>
1141 #include <asm/ia32.h>
1142 #include <asm/user32.h>
1143
1144 #define R32(l,q) \
1145 case offsetof(struct user32, regs.l): \
1146 regs->q = value; break
1147
1148 #define SEG32(rs) \
1149 case offsetof(struct user32, regs.rs): \
1150 return set_segment_reg(child, \
1151 offsetof(struct user_regs_struct, rs), \
1152 value); \
1153 break
1154
1155 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1156 {
1157 struct pt_regs *regs = task_pt_regs(child);
1158
1159 switch (regno) {
1160
1161 SEG32(cs);
1162 SEG32(ds);
1163 SEG32(es);
1164 SEG32(fs);
1165 SEG32(gs);
1166 SEG32(ss);
1167
1168 R32(ebx, bx);
1169 R32(ecx, cx);
1170 R32(edx, dx);
1171 R32(edi, di);
1172 R32(esi, si);
1173 R32(ebp, bp);
1174 R32(eax, ax);
1175 R32(eip, ip);
1176 R32(esp, sp);
1177
1178 case offsetof(struct user32, regs.orig_eax):
1179 /*
1180 * Sign-extend the value so that orig_eax = -1
1181 * causes (long)orig_ax < 0 tests to fire correctly.
1182 */
1183 regs->orig_ax = (long) (s32) value;
1184 break;
1185
1186 case offsetof(struct user32, regs.eflags):
1187 return set_flags(child, value);
1188
1189 case offsetof(struct user32, u_debugreg[0]) ...
1190 offsetof(struct user32, u_debugreg[7]):
1191 regno -= offsetof(struct user32, u_debugreg[0]);
1192 return ptrace_set_debugreg(child, regno / 4, value);
1193
1194 default:
1195 if (regno > sizeof(struct user32) || (regno & 3))
1196 return -EIO;
1197
1198 /*
1199 * Other dummy fields in the virtual user structure
1200 * are ignored
1201 */
1202 break;
1203 }
1204 return 0;
1205 }
1206
1207 #undef R32
1208 #undef SEG32
1209
1210 #define R32(l,q) \
1211 case offsetof(struct user32, regs.l): \
1212 *val = regs->q; break
1213
1214 #define SEG32(rs) \
1215 case offsetof(struct user32, regs.rs): \
1216 *val = get_segment_reg(child, \
1217 offsetof(struct user_regs_struct, rs)); \
1218 break
1219
1220 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1221 {
1222 struct pt_regs *regs = task_pt_regs(child);
1223
1224 switch (regno) {
1225
1226 SEG32(ds);
1227 SEG32(es);
1228 SEG32(fs);
1229 SEG32(gs);
1230
1231 R32(cs, cs);
1232 R32(ss, ss);
1233 R32(ebx, bx);
1234 R32(ecx, cx);
1235 R32(edx, dx);
1236 R32(edi, di);
1237 R32(esi, si);
1238 R32(ebp, bp);
1239 R32(eax, ax);
1240 R32(orig_eax, orig_ax);
1241 R32(eip, ip);
1242 R32(esp, sp);
1243
1244 case offsetof(struct user32, regs.eflags):
1245 *val = get_flags(child);
1246 break;
1247
1248 case offsetof(struct user32, u_debugreg[0]) ...
1249 offsetof(struct user32, u_debugreg[7]):
1250 regno -= offsetof(struct user32, u_debugreg[0]);
1251 *val = ptrace_get_debugreg(child, regno / 4);
1252 break;
1253
1254 default:
1255 if (regno > sizeof(struct user32) || (regno & 3))
1256 return -EIO;
1257
1258 /*
1259 * Other dummy fields in the virtual user structure
1260 * are ignored
1261 */
1262 *val = 0;
1263 break;
1264 }
1265 return 0;
1266 }
1267
1268 #undef R32
1269 #undef SEG32
1270
1271 static int genregs32_get(struct task_struct *target,
1272 const struct user_regset *regset,
1273 unsigned int pos, unsigned int count,
1274 void *kbuf, void __user *ubuf)
1275 {
1276 if (kbuf) {
1277 compat_ulong_t *k = kbuf;
1278 while (count > 0) {
1279 getreg32(target, pos, k++);
1280 count -= sizeof(*k);
1281 pos += sizeof(*k);
1282 }
1283 } else {
1284 compat_ulong_t __user *u = ubuf;
1285 while (count > 0) {
1286 compat_ulong_t word;
1287 getreg32(target, pos, &word);
1288 if (__put_user(word, u++))
1289 return -EFAULT;
1290 count -= sizeof(*u);
1291 pos += sizeof(*u);
1292 }
1293 }
1294
1295 return 0;
1296 }
1297
1298 static int genregs32_set(struct task_struct *target,
1299 const struct user_regset *regset,
1300 unsigned int pos, unsigned int count,
1301 const void *kbuf, const void __user *ubuf)
1302 {
1303 int ret = 0;
1304 if (kbuf) {
1305 const compat_ulong_t *k = kbuf;
1306 while (count > 0 && !ret) {
1307 ret = putreg32(target, pos, *k++);
1308 count -= sizeof(*k);
1309 pos += sizeof(*k);
1310 }
1311 } else {
1312 const compat_ulong_t __user *u = ubuf;
1313 while (count > 0 && !ret) {
1314 compat_ulong_t word;
1315 ret = __get_user(word, u++);
1316 if (ret)
1317 break;
1318 ret = putreg32(target, pos, word);
1319 count -= sizeof(*u);
1320 pos += sizeof(*u);
1321 }
1322 }
1323 return ret;
1324 }
1325
1326 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1327 compat_ulong_t caddr, compat_ulong_t cdata)
1328 {
1329 unsigned long addr = caddr;
1330 unsigned long data = cdata;
1331 void __user *datap = compat_ptr(data);
1332 int ret;
1333 __u32 val;
1334
1335 switch (request) {
1336 case PTRACE_PEEKUSR:
1337 ret = getreg32(child, addr, &val);
1338 if (ret == 0)
1339 ret = put_user(val, (__u32 __user *)datap);
1340 break;
1341
1342 case PTRACE_POKEUSR:
1343 ret = putreg32(child, addr, data);
1344 break;
1345
1346 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1347 return copy_regset_to_user(child, &user_x86_32_view,
1348 REGSET_GENERAL,
1349 0, sizeof(struct user_regs_struct32),
1350 datap);
1351
1352 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1353 return copy_regset_from_user(child, &user_x86_32_view,
1354 REGSET_GENERAL, 0,
1355 sizeof(struct user_regs_struct32),
1356 datap);
1357
1358 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1359 return copy_regset_to_user(child, &user_x86_32_view,
1360 REGSET_FP, 0,
1361 sizeof(struct user_i387_ia32_struct),
1362 datap);
1363
1364 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1365 return copy_regset_from_user(
1366 child, &user_x86_32_view, REGSET_FP,
1367 0, sizeof(struct user_i387_ia32_struct), datap);
1368
1369 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1370 return copy_regset_to_user(child, &user_x86_32_view,
1371 REGSET_XFP, 0,
1372 sizeof(struct user32_fxsr_struct),
1373 datap);
1374
1375 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1376 return copy_regset_from_user(child, &user_x86_32_view,
1377 REGSET_XFP, 0,
1378 sizeof(struct user32_fxsr_struct),
1379 datap);
1380
1381 case PTRACE_GET_THREAD_AREA:
1382 case PTRACE_SET_THREAD_AREA:
1383 #ifdef CONFIG_X86_PTRACE_BTS
1384 case PTRACE_BTS_CONFIG:
1385 case PTRACE_BTS_STATUS:
1386 case PTRACE_BTS_SIZE:
1387 case PTRACE_BTS_GET:
1388 case PTRACE_BTS_CLEAR:
1389 case PTRACE_BTS_DRAIN:
1390 #endif /* CONFIG_X86_PTRACE_BTS */
1391 return arch_ptrace(child, request, addr, data);
1392
1393 default:
1394 return compat_ptrace_request(child, request, addr, data);
1395 }
1396
1397 return ret;
1398 }
1399
1400 #endif /* CONFIG_IA32_EMULATION */
1401
1402 #ifdef CONFIG_X86_64
1403
1404 static const struct user_regset x86_64_regsets[] = {
1405 [REGSET_GENERAL] = {
1406 .core_note_type = NT_PRSTATUS,
1407 .n = sizeof(struct user_regs_struct) / sizeof(long),
1408 .size = sizeof(long), .align = sizeof(long),
1409 .get = genregs_get, .set = genregs_set
1410 },
1411 [REGSET_FP] = {
1412 .core_note_type = NT_PRFPREG,
1413 .n = sizeof(struct user_i387_struct) / sizeof(long),
1414 .size = sizeof(long), .align = sizeof(long),
1415 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1416 },
1417 [REGSET_IOPERM64] = {
1418 .core_note_type = NT_386_IOPERM,
1419 .n = IO_BITMAP_LONGS,
1420 .size = sizeof(long), .align = sizeof(long),
1421 .active = ioperm_active, .get = ioperm_get
1422 },
1423 };
1424
1425 static const struct user_regset_view user_x86_64_view = {
1426 .name = "x86_64", .e_machine = EM_X86_64,
1427 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1428 };
1429
1430 #else /* CONFIG_X86_32 */
1431
1432 #define user_regs_struct32 user_regs_struct
1433 #define genregs32_get genregs_get
1434 #define genregs32_set genregs_set
1435
1436 #define user_i387_ia32_struct user_i387_struct
1437 #define user32_fxsr_struct user_fxsr_struct
1438
1439 #endif /* CONFIG_X86_64 */
1440
1441 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1442 static const struct user_regset x86_32_regsets[] = {
1443 [REGSET_GENERAL] = {
1444 .core_note_type = NT_PRSTATUS,
1445 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1446 .size = sizeof(u32), .align = sizeof(u32),
1447 .get = genregs32_get, .set = genregs32_set
1448 },
1449 [REGSET_FP] = {
1450 .core_note_type = NT_PRFPREG,
1451 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1452 .size = sizeof(u32), .align = sizeof(u32),
1453 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1454 },
1455 [REGSET_XFP] = {
1456 .core_note_type = NT_PRXFPREG,
1457 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1458 .size = sizeof(u32), .align = sizeof(u32),
1459 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1460 },
1461 [REGSET_TLS] = {
1462 .core_note_type = NT_386_TLS,
1463 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1464 .size = sizeof(struct user_desc),
1465 .align = sizeof(struct user_desc),
1466 .active = regset_tls_active,
1467 .get = regset_tls_get, .set = regset_tls_set
1468 },
1469 [REGSET_IOPERM32] = {
1470 .core_note_type = NT_386_IOPERM,
1471 .n = IO_BITMAP_BYTES / sizeof(u32),
1472 .size = sizeof(u32), .align = sizeof(u32),
1473 .active = ioperm_active, .get = ioperm_get
1474 },
1475 };
1476
1477 static const struct user_regset_view user_x86_32_view = {
1478 .name = "i386", .e_machine = EM_386,
1479 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1480 };
1481 #endif
1482
1483 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1484 {
1485 #ifdef CONFIG_IA32_EMULATION
1486 if (test_tsk_thread_flag(task, TIF_IA32))
1487 #endif
1488 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1489 return &user_x86_32_view;
1490 #endif
1491 #ifdef CONFIG_X86_64
1492 return &user_x86_64_view;
1493 #endif
1494 }
1495
1496 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1497 int error_code, int si_code)
1498 {
1499 struct siginfo info;
1500
1501 tsk->thread.trap_no = 1;
1502 tsk->thread.error_code = error_code;
1503
1504 memset(&info, 0, sizeof(info));
1505 info.si_signo = SIGTRAP;
1506 info.si_code = si_code;
1507
1508 /* User-mode ip? */
1509 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1510
1511 /* Send us the fake SIGTRAP */
1512 force_sig_info(SIGTRAP, &info, tsk);
1513 }
1514
1515
1516 #ifdef CONFIG_X86_32
1517 # define IS_IA32 1
1518 #elif defined CONFIG_IA32_EMULATION
1519 # define IS_IA32 is_compat_task()
1520 #else
1521 # define IS_IA32 0
1522 #endif
1523
1524 /*
1525 * We must return the syscall number to actually look up in the table.
1526 * This can be -1L to skip running any syscall at all.
1527 */
1528 asmregparm long syscall_trace_enter(struct pt_regs *regs)
1529 {
1530 long ret = 0;
1531
1532 /*
1533 * If we stepped into a sysenter/syscall insn, it trapped in
1534 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1535 * If user-mode had set TF itself, then it's still clear from
1536 * do_debug() and we need to set it again to restore the user
1537 * state. If we entered on the slow path, TF was already set.
1538 */
1539 if (test_thread_flag(TIF_SINGLESTEP))
1540 regs->flags |= X86_EFLAGS_TF;
1541
1542 /* do the secure computing check first */
1543 secure_computing(regs->orig_ax);
1544
1545 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1546 ret = -1L;
1547
1548 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1549 tracehook_report_syscall_entry(regs))
1550 ret = -1L;
1551
1552 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1553 trace_sys_enter(regs, regs->orig_ax);
1554
1555 if (unlikely(current->audit_context)) {
1556 if (IS_IA32)
1557 audit_syscall_entry(AUDIT_ARCH_I386,
1558 regs->orig_ax,
1559 regs->bx, regs->cx,
1560 regs->dx, regs->si);
1561 #ifdef CONFIG_X86_64
1562 else
1563 audit_syscall_entry(AUDIT_ARCH_X86_64,
1564 regs->orig_ax,
1565 regs->di, regs->si,
1566 regs->dx, regs->r10);
1567 #endif
1568 }
1569
1570 return ret ?: regs->orig_ax;
1571 }
1572
1573 asmregparm void syscall_trace_leave(struct pt_regs *regs)
1574 {
1575 if (unlikely(current->audit_context))
1576 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1577
1578 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1579 trace_sys_exit(regs, regs->ax);
1580
1581 if (test_thread_flag(TIF_SYSCALL_TRACE))
1582 tracehook_report_syscall_exit(regs, 0);
1583
1584 /*
1585 * If TIF_SYSCALL_EMU is set, we only get here because of
1586 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1587 * We already reported this syscall instruction in
1588 * syscall_trace_enter(), so don't do any more now.
1589 */
1590 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1591 return;
1592
1593 /*
1594 * If we are single-stepping, synthesize a trap to follow the
1595 * system call instruction.
1596 */
1597 if (test_thread_flag(TIF_SINGLESTEP) &&
1598 tracehook_consider_fatal_signal(current, SIGTRAP))
1599 send_sigtrap(current, regs, 0, TRAP_BRKPT);
1600 }
This page took 0.069329 seconds and 5 git commands to generate.