[PATCH] Uml support: add PTRACE_SYSEMU_SINGLESTEP option to i386
[deliverable/linux.git] / arch / i386 / kernel / ptrace.c
CommitLineData
1da177e4
LT
1/* ptrace.c */
2/* By Ross Biro 1/23/92 */
3/*
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 */
7
8#include <linux/kernel.h>
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/smp.h>
12#include <linux/smp_lock.h>
13#include <linux/errno.h>
14#include <linux/ptrace.h>
15#include <linux/user.h>
16#include <linux/security.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
7ed20e1a 19#include <linux/signal.h>
1da177e4
LT
20
21#include <asm/uaccess.h>
22#include <asm/pgtable.h>
23#include <asm/system.h>
24#include <asm/processor.h>
25#include <asm/i387.h>
26#include <asm/debugreg.h>
27#include <asm/ldt.h>
28#include <asm/desc.h>
29
30/*
31 * does not yet catch signals sent when the child dies.
32 * in exit.c or in signal.c.
33 */
34
35/* determines which flags the user has access to. */
36/* 1 = access 0 = no access */
37#define FLAG_MASK 0x00044dd5
38
39/* set's the trap flag. */
40#define TRAP_FLAG 0x100
41
42/*
43 * Offset of eflags on child stack..
44 */
45#define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs))
46
47static inline struct pt_regs *get_child_regs(struct task_struct *task)
48{
49 void *stack_top = (void *)task->thread.esp0;
50 return stack_top - sizeof(struct pt_regs);
51}
52
53/*
54 * this routine will get a word off of the processes privileged stack.
55 * the offset is how far from the base addr as stored in the TSS.
56 * this routine assumes that all the privileged stacks are in our
57 * data space.
58 */
59static inline int get_stack_long(struct task_struct *task, int offset)
60{
61 unsigned char *stack;
62
63 stack = (unsigned char *)task->thread.esp0;
64 stack += offset;
65 return (*((int *)stack));
66}
67
68/*
69 * this routine will put a word on the processes privileged stack.
70 * the offset is how far from the base addr as stored in the TSS.
71 * this routine assumes that all the privileged stacks are in our
72 * data space.
73 */
74static inline int put_stack_long(struct task_struct *task, int offset,
75 unsigned long data)
76{
77 unsigned char * stack;
78
79 stack = (unsigned char *) task->thread.esp0;
80 stack += offset;
81 *(unsigned long *) stack = data;
82 return 0;
83}
84
85static int putreg(struct task_struct *child,
86 unsigned long regno, unsigned long value)
87{
88 switch (regno >> 2) {
89 case FS:
90 if (value && (value & 3) != 3)
91 return -EIO;
92 child->thread.fs = value;
93 return 0;
94 case GS:
95 if (value && (value & 3) != 3)
96 return -EIO;
97 child->thread.gs = value;
98 return 0;
99 case DS:
100 case ES:
101 if (value && (value & 3) != 3)
102 return -EIO;
103 value &= 0xffff;
104 break;
105 case SS:
106 case CS:
107 if ((value & 3) != 3)
108 return -EIO;
109 value &= 0xffff;
110 break;
111 case EFL:
112 value &= FLAG_MASK;
113 value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
114 break;
115 }
116 if (regno > GS*4)
117 regno -= 2*4;
118 put_stack_long(child, regno - sizeof(struct pt_regs), value);
119 return 0;
120}
121
122static unsigned long getreg(struct task_struct *child,
123 unsigned long regno)
124{
125 unsigned long retval = ~0UL;
126
127 switch (regno >> 2) {
128 case FS:
129 retval = child->thread.fs;
130 break;
131 case GS:
132 retval = child->thread.gs;
133 break;
134 case DS:
135 case ES:
136 case SS:
137 case CS:
138 retval = 0xffff;
139 /* fall through */
140 default:
141 if (regno > GS*4)
142 regno -= 2*4;
143 regno = regno - sizeof(struct pt_regs);
144 retval &= get_stack_long(child, regno);
145 }
146 return retval;
147}
148
149#define LDT_SEGMENT 4
150
151static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
152{
153 unsigned long addr, seg;
154
155 addr = regs->eip;
156 seg = regs->xcs & 0xffff;
157 if (regs->eflags & VM_MASK) {
158 addr = (addr & 0xffff) + (seg << 4);
159 return addr;
160 }
161
162 /*
163 * We'll assume that the code segments in the GDT
164 * are all zero-based. That is largely true: the
165 * TLS segments are used for data, and the PNPBIOS
166 * and APM bios ones we just ignore here.
167 */
168 if (seg & LDT_SEGMENT) {
169 u32 *desc;
170 unsigned long base;
171
172 down(&child->mm->context.sem);
173 desc = child->mm->context.ldt + (seg & ~7);
174 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
175
176 /* 16-bit code segment? */
177 if (!((desc[1] >> 22) & 1))
178 addr &= 0xffff;
179 addr += base;
180 up(&child->mm->context.sem);
181 }
182 return addr;
183}
184
185static inline int is_at_popf(struct task_struct *child, struct pt_regs *regs)
186{
187 int i, copied;
188 unsigned char opcode[16];
189 unsigned long addr = convert_eip_to_linear(child, regs);
190
191 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
192 for (i = 0; i < copied; i++) {
193 switch (opcode[i]) {
194 /* popf */
195 case 0x9d:
196 return 1;
197 /* opcode and address size prefixes */
198 case 0x66: case 0x67:
199 continue;
200 /* irrelevant prefixes (segment overrides and repeats) */
201 case 0x26: case 0x2e:
202 case 0x36: case 0x3e:
203 case 0x64: case 0x65:
204 case 0xf0: case 0xf2: case 0xf3:
205 continue;
206
207 /*
208 * pushf: NOTE! We should probably not let
209 * the user see the TF bit being set. But
210 * it's more pain than it's worth to avoid
211 * it, and a debugger could emulate this
212 * all in user space if it _really_ cares.
213 */
214 case 0x9c:
215 default:
216 return 0;
217 }
218 }
219 return 0;
220}
221
222static void set_singlestep(struct task_struct *child)
223{
224 struct pt_regs *regs = get_child_regs(child);
225
226 /*
227 * Always set TIF_SINGLESTEP - this guarantees that
228 * we single-step system calls etc.. This will also
229 * cause us to set TF when returning to user mode.
230 */
231 set_tsk_thread_flag(child, TIF_SINGLESTEP);
232
233 /*
234 * If TF was already set, don't do anything else
235 */
236 if (regs->eflags & TRAP_FLAG)
237 return;
238
239 /* Set TF on the kernel stack.. */
240 regs->eflags |= TRAP_FLAG;
241
242 /*
243 * ..but if TF is changed by the instruction we will trace,
244 * don't mark it as being "us" that set it, so that we
245 * won't clear it by hand later.
246 */
247 if (is_at_popf(child, regs))
248 return;
249
250 child->ptrace |= PT_DTRACE;
251}
252
253static void clear_singlestep(struct task_struct *child)
254{
255 /* Always clear TIF_SINGLESTEP... */
256 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
257
258 /* But touch TF only if it was set by us.. */
259 if (child->ptrace & PT_DTRACE) {
260 struct pt_regs *regs = get_child_regs(child);
261 regs->eflags &= ~TRAP_FLAG;
262 child->ptrace &= ~PT_DTRACE;
263 }
264}
265
266/*
267 * Called by kernel/ptrace.c when detaching..
268 *
269 * Make sure the single step bit is not set.
270 */
271void ptrace_disable(struct task_struct *child)
272{
273 clear_singlestep(child);
274}
275
276/*
277 * Perform get_thread_area on behalf of the traced child.
278 */
279static int
280ptrace_get_thread_area(struct task_struct *child,
281 int idx, struct user_desc __user *user_desc)
282{
283 struct user_desc info;
284 struct desc_struct *desc;
285
286/*
287 * Get the current Thread-Local Storage area:
288 */
289
290#define GET_BASE(desc) ( \
291 (((desc)->a >> 16) & 0x0000ffff) | \
292 (((desc)->b << 16) & 0x00ff0000) | \
293 ( (desc)->b & 0xff000000) )
294
295#define GET_LIMIT(desc) ( \
296 ((desc)->a & 0x0ffff) | \
297 ((desc)->b & 0xf0000) )
298
299#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
300#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
301#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
302#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
303#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
304#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
305
306 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
307 return -EINVAL;
308
309 desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
310
311 info.entry_number = idx;
312 info.base_addr = GET_BASE(desc);
313 info.limit = GET_LIMIT(desc);
314 info.seg_32bit = GET_32BIT(desc);
315 info.contents = GET_CONTENTS(desc);
316 info.read_exec_only = !GET_WRITABLE(desc);
317 info.limit_in_pages = GET_LIMIT_PAGES(desc);
318 info.seg_not_present = !GET_PRESENT(desc);
319 info.useable = GET_USEABLE(desc);
320
321 if (copy_to_user(user_desc, &info, sizeof(info)))
322 return -EFAULT;
323
324 return 0;
325}
326
327/*
328 * Perform set_thread_area on behalf of the traced child.
329 */
330static int
331ptrace_set_thread_area(struct task_struct *child,
332 int idx, struct user_desc __user *user_desc)
333{
334 struct user_desc info;
335 struct desc_struct *desc;
336
337 if (copy_from_user(&info, user_desc, sizeof(info)))
338 return -EFAULT;
339
340 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
341 return -EINVAL;
342
343 desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
344 if (LDT_empty(&info)) {
345 desc->a = 0;
346 desc->b = 0;
347 } else {
348 desc->a = LDT_entry_a(&info);
349 desc->b = LDT_entry_b(&info);
350 }
351
352 return 0;
353}
354
355asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
356{
357 struct task_struct *child;
358 struct user * dummy = NULL;
359 int i, ret;
360 unsigned long __user *datap = (unsigned long __user *)data;
361
362 lock_kernel();
363 ret = -EPERM;
364 if (request == PTRACE_TRACEME) {
365 /* are we already being traced? */
366 if (current->ptrace & PT_PTRACED)
367 goto out;
368 ret = security_ptrace(current->parent, current);
369 if (ret)
370 goto out;
371 /* set the ptrace bit in the process flags. */
372 current->ptrace |= PT_PTRACED;
373 ret = 0;
374 goto out;
375 }
376 ret = -ESRCH;
377 read_lock(&tasklist_lock);
378 child = find_task_by_pid(pid);
379 if (child)
380 get_task_struct(child);
381 read_unlock(&tasklist_lock);
382 if (!child)
383 goto out;
384
385 ret = -EPERM;
386 if (pid == 1) /* you may not mess with init */
387 goto out_tsk;
388
389 if (request == PTRACE_ATTACH) {
390 ret = ptrace_attach(child);
391 goto out_tsk;
392 }
393
394 ret = ptrace_check_attach(child, request == PTRACE_KILL);
395 if (ret < 0)
396 goto out_tsk;
397
398 switch (request) {
399 /* when I and D space are separate, these will need to be fixed. */
400 case PTRACE_PEEKTEXT: /* read word at location addr. */
401 case PTRACE_PEEKDATA: {
402 unsigned long tmp;
403 int copied;
404
405 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
406 ret = -EIO;
407 if (copied != sizeof(tmp))
408 break;
409 ret = put_user(tmp, datap);
410 break;
411 }
412
413 /* read the word at location addr in the USER area. */
414 case PTRACE_PEEKUSR: {
415 unsigned long tmp;
416
417 ret = -EIO;
418 if ((addr & 3) || addr < 0 ||
419 addr > sizeof(struct user) - 3)
420 break;
421
422 tmp = 0; /* Default return condition */
423 if(addr < FRAME_SIZE*sizeof(long))
424 tmp = getreg(child, addr);
425 if(addr >= (long) &dummy->u_debugreg[0] &&
426 addr <= (long) &dummy->u_debugreg[7]){
427 addr -= (long) &dummy->u_debugreg[0];
428 addr = addr >> 2;
429 tmp = child->thread.debugreg[addr];
430 }
431 ret = put_user(tmp, datap);
432 break;
433 }
434
435 /* when I and D space are separate, this will have to be fixed. */
436 case PTRACE_POKETEXT: /* write the word at location addr. */
437 case PTRACE_POKEDATA:
438 ret = 0;
439 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
440 break;
441 ret = -EIO;
442 break;
443
444 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
445 ret = -EIO;
446 if ((addr & 3) || addr < 0 ||
447 addr > sizeof(struct user) - 3)
448 break;
449
450 if (addr < FRAME_SIZE*sizeof(long)) {
451 ret = putreg(child, addr, data);
452 break;
453 }
454 /* We need to be very careful here. We implicitly
455 want to modify a portion of the task_struct, and we
456 have to be selective about what portions we allow someone
457 to modify. */
458
459 ret = -EIO;
460 if(addr >= (long) &dummy->u_debugreg[0] &&
461 addr <= (long) &dummy->u_debugreg[7]){
462
463 if(addr == (long) &dummy->u_debugreg[4]) break;
464 if(addr == (long) &dummy->u_debugreg[5]) break;
465 if(addr < (long) &dummy->u_debugreg[4] &&
466 ((unsigned long) data) >= TASK_SIZE-3) break;
467
468 /* Sanity-check data. Take one half-byte at once with
469 * check = (val >> (16 + 4*i)) & 0xf. It contains the
470 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
471 * 2 and 3 are LENi. Given a list of invalid values,
472 * we do mask |= 1 << invalid_value, so that
473 * (mask >> check) & 1 is a correct test for invalid
474 * values.
475 *
476 * R/Wi contains the type of the breakpoint /
477 * watchpoint, LENi contains the length of the watched
478 * data in the watchpoint case.
479 *
480 * The invalid values are:
481 * - LENi == 0x10 (undefined), so mask |= 0x0f00.
482 * - R/Wi == 0x10 (break on I/O reads or writes), so
483 * mask |= 0x4444.
484 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
485 * 0x1110.
486 *
487 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
488 *
489 * See the Intel Manual "System Programming Guide",
490 * 15.2.4
491 *
492 * Note that LENi == 0x10 is defined on x86_64 in long
493 * mode (i.e. even for 32-bit userspace software, but
494 * 64-bit kernel), so the x86_64 mask value is 0x5454.
495 * See the AMD manual no. 24593 (AMD64 System
496 * Programming)*/
497
498 if(addr == (long) &dummy->u_debugreg[7]) {
499 data &= ~DR_CONTROL_RESERVED;
500 for(i=0; i<4; i++)
501 if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
502 goto out_tsk;
503 }
504
505 addr -= (long) &dummy->u_debugreg;
506 addr = addr >> 2;
507 child->thread.debugreg[addr] = data;
508 ret = 0;
509 }
510 break;
511
ed75e8d5 512 case PTRACE_SYSEMU: /* continue and stop at next syscall, which will not be executed */
1da177e4
LT
513 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
514 case PTRACE_CONT: /* restart after signal. */
515 ret = -EIO;
7ed20e1a 516 if (!valid_signal(data))
1da177e4 517 break;
ed75e8d5
LV
518 if (request == PTRACE_SYSEMU) {
519 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
c8c86cec
BS
520 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
521 } else if (request == PTRACE_SYSCALL) {
1da177e4 522 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
c8c86cec 523 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
ed75e8d5 524 } else {
c8c86cec 525 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
1da177e4
LT
526 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
527 }
528 child->exit_code = data;
529 /* make sure the single step bit is not set. */
530 clear_singlestep(child);
531 wake_up_process(child);
532 ret = 0;
533 break;
534
535/*
536 * make the child exit. Best I can do is send it a sigkill.
537 * perhaps it should be put in the status that it wants to
538 * exit.
539 */
540 case PTRACE_KILL:
541 ret = 0;
542 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
543 break;
544 child->exit_code = SIGKILL;
545 /* make sure the single step bit is not set. */
546 clear_singlestep(child);
547 wake_up_process(child);
548 break;
549
1b38f006 550 case PTRACE_SYSEMU_SINGLESTEP: /* Same as SYSEMU, but singlestep if not syscall */
1da177e4
LT
551 case PTRACE_SINGLESTEP: /* set the trap flag. */
552 ret = -EIO;
7ed20e1a 553 if (!valid_signal(data))
1da177e4 554 break;
1b38f006
BS
555
556 if (request == PTRACE_SYSEMU_SINGLESTEP)
557 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
558 else
559 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
560
1da177e4
LT
561 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
562 set_singlestep(child);
563 child->exit_code = data;
564 /* give it a chance to run. */
565 wake_up_process(child);
566 ret = 0;
567 break;
568
569 case PTRACE_DETACH:
570 /* detach a process that was attached. */
571 ret = ptrace_detach(child, data);
572 break;
573
574 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
575 if (!access_ok(VERIFY_WRITE, datap, FRAME_SIZE*sizeof(long))) {
576 ret = -EIO;
577 break;
578 }
579 for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
580 __put_user(getreg(child, i), datap);
581 datap++;
582 }
583 ret = 0;
584 break;
585 }
586
587 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
588 unsigned long tmp;
589 if (!access_ok(VERIFY_READ, datap, FRAME_SIZE*sizeof(long))) {
590 ret = -EIO;
591 break;
592 }
593 for ( i = 0; i < FRAME_SIZE*sizeof(long); i += sizeof(long) ) {
594 __get_user(tmp, datap);
595 putreg(child, i, tmp);
596 datap++;
597 }
598 ret = 0;
599 break;
600 }
601
602 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
603 if (!access_ok(VERIFY_WRITE, datap,
604 sizeof(struct user_i387_struct))) {
605 ret = -EIO;
606 break;
607 }
608 ret = 0;
609 if (!tsk_used_math(child))
610 init_fpu(child);
611 get_fpregs((struct user_i387_struct __user *)data, child);
612 break;
613 }
614
615 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
616 if (!access_ok(VERIFY_READ, datap,
617 sizeof(struct user_i387_struct))) {
618 ret = -EIO;
619 break;
620 }
621 set_stopped_child_used_math(child);
622 set_fpregs(child, (struct user_i387_struct __user *)data);
623 ret = 0;
624 break;
625 }
626
627 case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
628 if (!access_ok(VERIFY_WRITE, datap,
629 sizeof(struct user_fxsr_struct))) {
630 ret = -EIO;
631 break;
632 }
633 if (!tsk_used_math(child))
634 init_fpu(child);
635 ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
636 break;
637 }
638
639 case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
640 if (!access_ok(VERIFY_READ, datap,
641 sizeof(struct user_fxsr_struct))) {
642 ret = -EIO;
643 break;
644 }
645 set_stopped_child_used_math(child);
646 ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
647 break;
648 }
649
650 case PTRACE_GET_THREAD_AREA:
651 ret = ptrace_get_thread_area(child, addr,
652 (struct user_desc __user *) data);
653 break;
654
655 case PTRACE_SET_THREAD_AREA:
656 ret = ptrace_set_thread_area(child, addr,
657 (struct user_desc __user *) data);
658 break;
659
660 default:
661 ret = ptrace_request(child, request, addr, data);
662 break;
663 }
664out_tsk:
665 put_task_struct(child);
666out:
667 unlock_kernel();
668 return ret;
669}
670
671void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
672{
673 struct siginfo info;
674
675 tsk->thread.trap_no = 1;
676 tsk->thread.error_code = error_code;
677
678 memset(&info, 0, sizeof(info));
679 info.si_signo = SIGTRAP;
680 info.si_code = TRAP_BRKPT;
681
682 /* User-mode eip? */
fa1e1bdf 683 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
1da177e4
LT
684
685 /* Send us the fakey SIGTRAP */
686 force_sig_info(SIGTRAP, &info, tsk);
687}
688
689/* notification of system call entry/exit
690 * - triggered by current->work.syscall_trace
691 */
692__attribute__((regparm(3)))
ed75e8d5 693int do_syscall_trace(struct pt_regs *regs, int entryexit)
1da177e4 694{
1b38f006
BS
695 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU), ret = 0;
696 /* With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP */
697 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
698
1da177e4
LT
699 /* do the secure computing check first */
700 secure_computing(regs->orig_eax);
701
ed75e8d5
LV
702 if (unlikely(current->audit_context) && entryexit)
703 audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), regs->eax);
1da177e4
LT
704
705 if (!(current->ptrace & PT_PTRACED))
2fd6f58b 706 goto out;
1da177e4 707
1b38f006
BS
708 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
709 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
710 * here. We have to check this and return */
711 if (is_sysemu && entryexit)
712 return 0;
ed75e8d5 713
1da177e4 714 /* Fake a debug trap */
c8c86cec 715 if (is_singlestep)
1da177e4
LT
716 send_sigtrap(current, regs, 0);
717
c8c86cec 718 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
2fd6f58b 719 goto out;
1da177e4
LT
720
721 /* the 0x80 provides a way for the tracing parent to distinguish
722 between a syscall stop and SIGTRAP delivery */
ed75e8d5 723 /* Note that the debugger could change the result of test_thread_flag!*/
1da177e4
LT
724 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
725
726 /*
727 * this isn't the same as continuing with a signal, but it will do
728 * for normal use. strace only continues with a signal if the
729 * stopping signal is not SIGTRAP. -brl
730 */
731 if (current->exit_code) {
732 send_sig(current->exit_code, current, 1);
733 current->exit_code = 0;
734 }
ed75e8d5 735 ret = is_sysemu;
2fd6f58b 736 out:
737 if (unlikely(current->audit_context) && !entryexit)
738 audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax,
739 regs->ebx, regs->ecx, regs->edx, regs->esi);
c8c86cec
BS
740 if (ret == 0)
741 return 0;
742
1b38f006 743 regs->orig_eax = -1; /* force skip of syscall restarting */
c8c86cec
BS
744 if (unlikely(current->audit_context))
745 audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), regs->eax);
746 return 1;
1da177e4 747}
This page took 0.088188 seconds and 5 git commands to generate.