x86: x86 user_regset general regs
[deliverable/linux.git] / arch / x86 / kernel / ptrace.c
1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 *
6 * BTS tracing
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
22
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/system.h>
26 #include <asm/processor.h>
27 #include <asm/i387.h>
28 #include <asm/debugreg.h>
29 #include <asm/ldt.h>
30 #include <asm/desc.h>
31 #include <asm/prctl.h>
32 #include <asm/proto.h>
33 #include <asm/ds.h>
34
35
36 /*
37 * does not yet catch signals sent when the child dies.
38 * in exit.c or in signal.c.
39 */
40
41 /*
42 * Determines which flags the user has access to [1 = access, 0 = no access].
43 */
44 #define FLAG_MASK_32 ((unsigned long) \
45 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
46 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
47 X86_EFLAGS_SF | X86_EFLAGS_TF | \
48 X86_EFLAGS_DF | X86_EFLAGS_OF | \
49 X86_EFLAGS_RF | X86_EFLAGS_AC))
50
51 /*
52 * Determines whether a value may be installed in a segment register.
53 */
54 static inline bool invalid_selector(u16 value)
55 {
56 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
57 }
58
59 #ifdef CONFIG_X86_32
60
61 #define FLAG_MASK FLAG_MASK_32
62
63 static long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
64 {
65 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
66 regno >>= 2;
67 if (regno > FS)
68 --regno;
69 return &regs->bx + regno;
70 }
71
72 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
73 {
74 /*
75 * Returning the value truncates it to 16 bits.
76 */
77 unsigned int retval;
78 if (offset != offsetof(struct user_regs_struct, gs))
79 retval = *pt_regs_access(task_pt_regs(task), offset);
80 else {
81 retval = task->thread.gs;
82 if (task == current)
83 savesegment(gs, retval);
84 }
85 return retval;
86 }
87
88 static int set_segment_reg(struct task_struct *task,
89 unsigned long offset, u16 value)
90 {
91 /*
92 * The value argument was already truncated to 16 bits.
93 */
94 if (invalid_selector(value))
95 return -EIO;
96
97 if (offset != offsetof(struct user_regs_struct, gs))
98 *pt_regs_access(task_pt_regs(task), offset) = value;
99 else {
100 task->thread.gs = value;
101 if (task == current)
102 /*
103 * The user-mode %gs is not affected by
104 * kernel entry, so we must update the CPU.
105 */
106 loadsegment(gs, value);
107 }
108
109 return 0;
110 }
111
112 static unsigned long debugreg_addr_limit(struct task_struct *task)
113 {
114 return TASK_SIZE - 3;
115 }
116
117 #else /* CONFIG_X86_64 */
118
119 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
120
121 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
122 {
123 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
124 return &regs->r15 + (offset / sizeof(regs->r15));
125 }
126
127 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
128 {
129 /*
130 * Returning the value truncates it to 16 bits.
131 */
132 unsigned int seg;
133
134 switch (offset) {
135 case offsetof(struct user_regs_struct, fs):
136 if (task == current) {
137 /* Older gas can't assemble movq %?s,%r?? */
138 asm("movl %%fs,%0" : "=r" (seg));
139 return seg;
140 }
141 return task->thread.fsindex;
142 case offsetof(struct user_regs_struct, gs):
143 if (task == current) {
144 asm("movl %%gs,%0" : "=r" (seg));
145 return seg;
146 }
147 return task->thread.gsindex;
148 case offsetof(struct user_regs_struct, ds):
149 if (task == current) {
150 asm("movl %%ds,%0" : "=r" (seg));
151 return seg;
152 }
153 return task->thread.ds;
154 case offsetof(struct user_regs_struct, es):
155 if (task == current) {
156 asm("movl %%es,%0" : "=r" (seg));
157 return seg;
158 }
159 return task->thread.es;
160
161 case offsetof(struct user_regs_struct, cs):
162 case offsetof(struct user_regs_struct, ss):
163 break;
164 }
165 return *pt_regs_access(task_pt_regs(task), offset);
166 }
167
168 static int set_segment_reg(struct task_struct *task,
169 unsigned long offset, u16 value)
170 {
171 /*
172 * The value argument was already truncated to 16 bits.
173 */
174 if (invalid_selector(value))
175 return -EIO;
176
177 switch (offset) {
178 case offsetof(struct user_regs_struct,fs):
179 /*
180 * If this is setting fs as for normal 64-bit use but
181 * setting fs_base has implicitly changed it, leave it.
182 */
183 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
184 task->thread.fs != 0) ||
185 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
186 task->thread.fs == 0))
187 break;
188 task->thread.fsindex = value;
189 if (task == current)
190 loadsegment(fs, task->thread.fsindex);
191 break;
192 case offsetof(struct user_regs_struct,gs):
193 /*
194 * If this is setting gs as for normal 64-bit use but
195 * setting gs_base has implicitly changed it, leave it.
196 */
197 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
198 task->thread.gs != 0) ||
199 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
200 task->thread.gs == 0))
201 break;
202 task->thread.gsindex = value;
203 if (task == current)
204 load_gs_index(task->thread.gsindex);
205 break;
206 case offsetof(struct user_regs_struct,ds):
207 task->thread.ds = value;
208 if (task == current)
209 loadsegment(ds, task->thread.ds);
210 break;
211 case offsetof(struct user_regs_struct,es):
212 task->thread.es = value;
213 if (task == current)
214 loadsegment(es, task->thread.es);
215 break;
216
217 /*
218 * Can't actually change these in 64-bit mode.
219 */
220 case offsetof(struct user_regs_struct,cs):
221 #ifdef CONFIG_IA32_EMULATION
222 if (test_tsk_thread_flag(task, TIF_IA32))
223 task_pt_regs(task)->cs = value;
224 #endif
225 break;
226 case offsetof(struct user_regs_struct,ss):
227 #ifdef CONFIG_IA32_EMULATION
228 if (test_tsk_thread_flag(task, TIF_IA32))
229 task_pt_regs(task)->ss = value;
230 #endif
231 break;
232 }
233
234 return 0;
235 }
236
237 static unsigned long debugreg_addr_limit(struct task_struct *task)
238 {
239 #ifdef CONFIG_IA32_EMULATION
240 if (test_tsk_thread_flag(task, TIF_IA32))
241 return IA32_PAGE_OFFSET - 3;
242 #endif
243 return TASK_SIZE64 - 7;
244 }
245
246 #endif /* CONFIG_X86_32 */
247
248 static unsigned long get_flags(struct task_struct *task)
249 {
250 unsigned long retval = task_pt_regs(task)->flags;
251
252 /*
253 * If the debugger set TF, hide it from the readout.
254 */
255 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
256 retval &= ~X86_EFLAGS_TF;
257
258 return retval;
259 }
260
261 static int set_flags(struct task_struct *task, unsigned long value)
262 {
263 struct pt_regs *regs = task_pt_regs(task);
264
265 /*
266 * If the user value contains TF, mark that
267 * it was not "us" (the debugger) that set it.
268 * If not, make sure it stays set if we had.
269 */
270 if (value & X86_EFLAGS_TF)
271 clear_tsk_thread_flag(task, TIF_FORCED_TF);
272 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
273 value |= X86_EFLAGS_TF;
274
275 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
276
277 return 0;
278 }
279
280 static int putreg(struct task_struct *child,
281 unsigned long offset, unsigned long value)
282 {
283 switch (offset) {
284 case offsetof(struct user_regs_struct, cs):
285 case offsetof(struct user_regs_struct, ds):
286 case offsetof(struct user_regs_struct, es):
287 case offsetof(struct user_regs_struct, fs):
288 case offsetof(struct user_regs_struct, gs):
289 case offsetof(struct user_regs_struct, ss):
290 return set_segment_reg(child, offset, value);
291
292 case offsetof(struct user_regs_struct, flags):
293 return set_flags(child, value);
294
295 #ifdef CONFIG_X86_64
296 case offsetof(struct user_regs_struct,fs_base):
297 if (value >= TASK_SIZE_OF(child))
298 return -EIO;
299 /*
300 * When changing the segment base, use do_arch_prctl
301 * to set either thread.fs or thread.fsindex and the
302 * corresponding GDT slot.
303 */
304 if (child->thread.fs != value)
305 return do_arch_prctl(child, ARCH_SET_FS, value);
306 return 0;
307 case offsetof(struct user_regs_struct,gs_base):
308 /*
309 * Exactly the same here as the %fs handling above.
310 */
311 if (value >= TASK_SIZE_OF(child))
312 return -EIO;
313 if (child->thread.gs != value)
314 return do_arch_prctl(child, ARCH_SET_GS, value);
315 return 0;
316 #endif
317 }
318
319 *pt_regs_access(task_pt_regs(child), offset) = value;
320 return 0;
321 }
322
323 static unsigned long getreg(struct task_struct *task, unsigned long offset)
324 {
325 switch (offset) {
326 case offsetof(struct user_regs_struct, cs):
327 case offsetof(struct user_regs_struct, ds):
328 case offsetof(struct user_regs_struct, es):
329 case offsetof(struct user_regs_struct, fs):
330 case offsetof(struct user_regs_struct, gs):
331 case offsetof(struct user_regs_struct, ss):
332 return get_segment_reg(task, offset);
333
334 case offsetof(struct user_regs_struct, flags):
335 return get_flags(task);
336
337 #ifdef CONFIG_X86_64
338 case offsetof(struct user_regs_struct, fs_base): {
339 /*
340 * do_arch_prctl may have used a GDT slot instead of
341 * the MSR. To userland, it appears the same either
342 * way, except the %fs segment selector might not be 0.
343 */
344 unsigned int seg = task->thread.fsindex;
345 if (task->thread.fs != 0)
346 return task->thread.fs;
347 if (task == current)
348 asm("movl %%fs,%0" : "=r" (seg));
349 if (seg != FS_TLS_SEL)
350 return 0;
351 return get_desc_base(&task->thread.tls_array[FS_TLS]);
352 }
353 case offsetof(struct user_regs_struct, gs_base): {
354 /*
355 * Exactly the same here as the %fs handling above.
356 */
357 unsigned int seg = task->thread.gsindex;
358 if (task->thread.gs != 0)
359 return task->thread.gs;
360 if (task == current)
361 asm("movl %%gs,%0" : "=r" (seg));
362 if (seg != GS_TLS_SEL)
363 return 0;
364 return get_desc_base(&task->thread.tls_array[GS_TLS]);
365 }
366 #endif
367 }
368
369 return *pt_regs_access(task_pt_regs(task), offset);
370 }
371
372 static int genregs_get(struct task_struct *target,
373 const struct user_regset *regset,
374 unsigned int pos, unsigned int count,
375 void *kbuf, void __user *ubuf)
376 {
377 if (kbuf) {
378 unsigned long *k = kbuf;
379 while (count > 0) {
380 *k++ = getreg(target, pos);
381 count -= sizeof(*k);
382 pos += sizeof(*k);
383 }
384 } else {
385 unsigned long __user *u = ubuf;
386 while (count > 0) {
387 if (__put_user(getreg(target, pos), u++))
388 return -EFAULT;
389 count -= sizeof(*u);
390 pos += sizeof(*u);
391 }
392 }
393
394 return 0;
395 }
396
397 static int genregs_set(struct task_struct *target,
398 const struct user_regset *regset,
399 unsigned int pos, unsigned int count,
400 const void *kbuf, const void __user *ubuf)
401 {
402 int ret = 0;
403 if (kbuf) {
404 const unsigned long *k = kbuf;
405 while (count > 0 && !ret) {
406 ret = putreg(target, pos, *k++);
407 count -= sizeof(*k);
408 pos += sizeof(*k);
409 }
410 } else {
411 const unsigned long __user *u = ubuf;
412 while (count > 0 && !ret) {
413 unsigned long word;
414 ret = __get_user(word, u++);
415 if (ret)
416 break;
417 ret = putreg(target, pos, word);
418 count -= sizeof(*u);
419 pos += sizeof(*u);
420 }
421 }
422 return ret;
423 }
424
425 /*
426 * This function is trivial and will be inlined by the compiler.
427 * Having it separates the implementation details of debug
428 * registers from the interface details of ptrace.
429 */
430 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
431 {
432 switch (n) {
433 case 0: return child->thread.debugreg0;
434 case 1: return child->thread.debugreg1;
435 case 2: return child->thread.debugreg2;
436 case 3: return child->thread.debugreg3;
437 case 6: return child->thread.debugreg6;
438 case 7: return child->thread.debugreg7;
439 }
440 return 0;
441 }
442
443 static int ptrace_set_debugreg(struct task_struct *child,
444 int n, unsigned long data)
445 {
446 int i;
447
448 if (unlikely(n == 4 || n == 5))
449 return -EIO;
450
451 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
452 return -EIO;
453
454 switch (n) {
455 case 0: child->thread.debugreg0 = data; break;
456 case 1: child->thread.debugreg1 = data; break;
457 case 2: child->thread.debugreg2 = data; break;
458 case 3: child->thread.debugreg3 = data; break;
459
460 case 6:
461 if ((data & ~0xffffffffUL) != 0)
462 return -EIO;
463 child->thread.debugreg6 = data;
464 break;
465
466 case 7:
467 /*
468 * Sanity-check data. Take one half-byte at once with
469 * check = (val >> (16 + 4*i)) & 0xf. It contains the
470 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
471 * 2 and 3 are LENi. Given a list of invalid values,
472 * we do mask |= 1 << invalid_value, so that
473 * (mask >> check) & 1 is a correct test for invalid
474 * values.
475 *
476 * R/Wi contains the type of the breakpoint /
477 * watchpoint, LENi contains the length of the watched
478 * data in the watchpoint case.
479 *
480 * The invalid values are:
481 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
482 * - R/Wi == 0x10 (break on I/O reads or writes), so
483 * mask |= 0x4444.
484 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
485 * 0x1110.
486 *
487 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
488 *
489 * See the Intel Manual "System Programming Guide",
490 * 15.2.4
491 *
492 * Note that LENi == 0x10 is defined on x86_64 in long
493 * mode (i.e. even for 32-bit userspace software, but
494 * 64-bit kernel), so the x86_64 mask value is 0x5454.
495 * See the AMD manual no. 24593 (AMD64 System Programming)
496 */
497 #ifdef CONFIG_X86_32
498 #define DR7_MASK 0x5f54
499 #else
500 #define DR7_MASK 0x5554
501 #endif
502 data &= ~DR_CONTROL_RESERVED;
503 for (i = 0; i < 4; i++)
504 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
505 return -EIO;
506 child->thread.debugreg7 = data;
507 if (data)
508 set_tsk_thread_flag(child, TIF_DEBUG);
509 else
510 clear_tsk_thread_flag(child, TIF_DEBUG);
511 break;
512 }
513
514 return 0;
515 }
516
517 static int ptrace_bts_get_size(struct task_struct *child)
518 {
519 if (!child->thread.ds_area_msr)
520 return -ENXIO;
521
522 return ds_get_bts_index((void *)child->thread.ds_area_msr);
523 }
524
525 static int ptrace_bts_read_record(struct task_struct *child,
526 long index,
527 struct bts_struct __user *out)
528 {
529 struct bts_struct ret;
530 int retval;
531 int bts_end;
532 int bts_index;
533
534 if (!child->thread.ds_area_msr)
535 return -ENXIO;
536
537 if (index < 0)
538 return -EINVAL;
539
540 bts_end = ds_get_bts_end((void *)child->thread.ds_area_msr);
541 if (bts_end <= index)
542 return -EINVAL;
543
544 /* translate the ptrace bts index into the ds bts index */
545 bts_index = ds_get_bts_index((void *)child->thread.ds_area_msr);
546 bts_index -= (index + 1);
547 if (bts_index < 0)
548 bts_index += bts_end;
549
550 retval = ds_read_bts((void *)child->thread.ds_area_msr,
551 bts_index, &ret);
552 if (retval)
553 return retval;
554
555 if (copy_to_user(out, &ret, sizeof(ret)))
556 return -EFAULT;
557
558 return sizeof(ret);
559 }
560
561 static int ptrace_bts_write_record(struct task_struct *child,
562 const struct bts_struct *in)
563 {
564 int retval;
565
566 if (!child->thread.ds_area_msr)
567 return -ENXIO;
568
569 retval = ds_write_bts((void *)child->thread.ds_area_msr, in);
570 if (retval)
571 return retval;
572
573 return sizeof(*in);
574 }
575
576 static int ptrace_bts_clear(struct task_struct *child)
577 {
578 if (!child->thread.ds_area_msr)
579 return -ENXIO;
580
581 return ds_clear((void *)child->thread.ds_area_msr);
582 }
583
584 static int ptrace_bts_drain(struct task_struct *child,
585 struct bts_struct __user *out)
586 {
587 int end, i;
588 void *ds = (void *)child->thread.ds_area_msr;
589
590 if (!ds)
591 return -ENXIO;
592
593 end = ds_get_bts_index(ds);
594 if (end <= 0)
595 return end;
596
597 for (i = 0; i < end; i++, out++) {
598 struct bts_struct ret;
599 int retval;
600
601 retval = ds_read_bts(ds, i, &ret);
602 if (retval < 0)
603 return retval;
604
605 if (copy_to_user(out, &ret, sizeof(ret)))
606 return -EFAULT;
607 }
608
609 ds_clear(ds);
610
611 return i;
612 }
613
614 static int ptrace_bts_config(struct task_struct *child,
615 const struct ptrace_bts_config __user *ucfg)
616 {
617 struct ptrace_bts_config cfg;
618 unsigned long debugctl_mask;
619 int bts_size, ret;
620 void *ds;
621
622 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
623 return -EFAULT;
624
625 bts_size = 0;
626 ds = (void *)child->thread.ds_area_msr;
627 if (ds) {
628 bts_size = ds_get_bts_size(ds);
629 if (bts_size < 0)
630 return bts_size;
631 }
632
633 if (bts_size != cfg.size) {
634 ret = ds_free((void **)&child->thread.ds_area_msr);
635 if (ret < 0)
636 return ret;
637
638 if (cfg.size > 0)
639 ret = ds_allocate((void **)&child->thread.ds_area_msr,
640 cfg.size);
641 ds = (void *)child->thread.ds_area_msr;
642 if (ds)
643 set_tsk_thread_flag(child, TIF_DS_AREA_MSR);
644 else
645 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
646
647 if (ret < 0)
648 return ret;
649
650 bts_size = ds_get_bts_size(ds);
651 if (bts_size <= 0)
652 return bts_size;
653 }
654
655 if (ds) {
656 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
657 ret = ds_set_overflow(ds, DS_O_SIGNAL);
658 } else {
659 ret = ds_set_overflow(ds, DS_O_WRAP);
660 }
661 if (ret < 0)
662 return ret;
663 }
664
665 debugctl_mask = ds_debugctl_mask();
666 if (ds && (cfg.flags & PTRACE_BTS_O_TRACE)) {
667 child->thread.debugctlmsr |= debugctl_mask;
668 set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
669 } else {
670 /* there is no way for us to check whether we 'own'
671 * the respective bits in the DEBUGCTL MSR, we're
672 * about to clear */
673 child->thread.debugctlmsr &= ~debugctl_mask;
674
675 if (!child->thread.debugctlmsr)
676 clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
677 }
678
679 if (ds && (cfg.flags & PTRACE_BTS_O_SCHED))
680 set_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
681 else
682 clear_tsk_thread_flag(child, TIF_BTS_TRACE_TS);
683
684 return 0;
685 }
686
687 static int ptrace_bts_status(struct task_struct *child,
688 struct ptrace_bts_config __user *ucfg)
689 {
690 void *ds = (void *)child->thread.ds_area_msr;
691 struct ptrace_bts_config cfg;
692
693 memset(&cfg, 0, sizeof(cfg));
694
695 if (ds) {
696 cfg.size = ds_get_bts_size(ds);
697
698 if (ds_get_overflow(ds) == DS_O_SIGNAL)
699 cfg.flags |= PTRACE_BTS_O_SIGNAL;
700
701 if (test_tsk_thread_flag(child, TIF_DEBUGCTLMSR) &&
702 child->thread.debugctlmsr & ds_debugctl_mask())
703 cfg.flags |= PTRACE_BTS_O_TRACE;
704
705 if (test_tsk_thread_flag(child, TIF_BTS_TRACE_TS))
706 cfg.flags |= PTRACE_BTS_O_SCHED;
707 }
708
709 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
710 return -EFAULT;
711
712 return sizeof(cfg);
713 }
714
715 void ptrace_bts_take_timestamp(struct task_struct *tsk,
716 enum bts_qualifier qualifier)
717 {
718 struct bts_struct rec = {
719 .qualifier = qualifier,
720 .variant.jiffies = jiffies
721 };
722
723 ptrace_bts_write_record(tsk, &rec);
724 }
725
726 /*
727 * Called by kernel/ptrace.c when detaching..
728 *
729 * Make sure the single step bit is not set.
730 */
731 void ptrace_disable(struct task_struct *child)
732 {
733 user_disable_single_step(child);
734 #ifdef TIF_SYSCALL_EMU
735 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
736 #endif
737 ptrace_bts_config(child, /* options = */ 0);
738 if (child->thread.ds_area_msr) {
739 ds_free((void **)&child->thread.ds_area_msr);
740 clear_tsk_thread_flag(child, TIF_DS_AREA_MSR);
741 }
742 }
743
744 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
745 {
746 int i, ret;
747 unsigned long __user *datap = (unsigned long __user *)data;
748
749 switch (request) {
750 /* when I and D space are separate, these will need to be fixed. */
751 case PTRACE_PEEKTEXT: /* read word at location addr. */
752 case PTRACE_PEEKDATA:
753 ret = generic_ptrace_peekdata(child, addr, data);
754 break;
755
756 /* read the word at location addr in the USER area. */
757 case PTRACE_PEEKUSR: {
758 unsigned long tmp;
759
760 ret = -EIO;
761 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
762 addr >= sizeof(struct user))
763 break;
764
765 tmp = 0; /* Default return condition */
766 if (addr < sizeof(struct user_regs_struct))
767 tmp = getreg(child, addr);
768 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
769 addr <= offsetof(struct user, u_debugreg[7])) {
770 addr -= offsetof(struct user, u_debugreg[0]);
771 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
772 }
773 ret = put_user(tmp, datap);
774 break;
775 }
776
777 /* when I and D space are separate, this will have to be fixed. */
778 case PTRACE_POKETEXT: /* write the word at location addr. */
779 case PTRACE_POKEDATA:
780 ret = generic_ptrace_pokedata(child, addr, data);
781 break;
782
783 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
784 ret = -EIO;
785 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
786 addr >= sizeof(struct user))
787 break;
788
789 if (addr < sizeof(struct user_regs_struct))
790 ret = putreg(child, addr, data);
791 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
792 addr <= offsetof(struct user, u_debugreg[7])) {
793 addr -= offsetof(struct user, u_debugreg[0]);
794 ret = ptrace_set_debugreg(child,
795 addr / sizeof(data), data);
796 }
797 break;
798
799 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
800 if (!access_ok(VERIFY_WRITE, datap, sizeof(struct user_regs_struct))) {
801 ret = -EIO;
802 break;
803 }
804 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
805 __put_user(getreg(child, i), datap);
806 datap++;
807 }
808 ret = 0;
809 break;
810 }
811
812 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
813 unsigned long tmp;
814 if (!access_ok(VERIFY_READ, datap, sizeof(struct user_regs_struct))) {
815 ret = -EIO;
816 break;
817 }
818 for (i = 0; i < sizeof(struct user_regs_struct); i += sizeof(long)) {
819 __get_user(tmp, datap);
820 putreg(child, i, tmp);
821 datap++;
822 }
823 ret = 0;
824 break;
825 }
826
827 case PTRACE_GETFPREGS: { /* Get the child FPU state. */
828 if (!access_ok(VERIFY_WRITE, datap,
829 sizeof(struct user_i387_struct))) {
830 ret = -EIO;
831 break;
832 }
833 ret = 0;
834 if (!tsk_used_math(child))
835 init_fpu(child);
836 get_fpregs((struct user_i387_struct __user *)data, child);
837 break;
838 }
839
840 case PTRACE_SETFPREGS: { /* Set the child FPU state. */
841 if (!access_ok(VERIFY_READ, datap,
842 sizeof(struct user_i387_struct))) {
843 ret = -EIO;
844 break;
845 }
846 set_stopped_child_used_math(child);
847 set_fpregs(child, (struct user_i387_struct __user *)data);
848 ret = 0;
849 break;
850 }
851
852 #ifdef CONFIG_X86_32
853 case PTRACE_GETFPXREGS: { /* Get the child extended FPU state. */
854 if (!access_ok(VERIFY_WRITE, datap,
855 sizeof(struct user_fxsr_struct))) {
856 ret = -EIO;
857 break;
858 }
859 if (!tsk_used_math(child))
860 init_fpu(child);
861 ret = get_fpxregs((struct user_fxsr_struct __user *)data, child);
862 break;
863 }
864
865 case PTRACE_SETFPXREGS: { /* Set the child extended FPU state. */
866 if (!access_ok(VERIFY_READ, datap,
867 sizeof(struct user_fxsr_struct))) {
868 ret = -EIO;
869 break;
870 }
871 set_stopped_child_used_math(child);
872 ret = set_fpxregs(child, (struct user_fxsr_struct __user *)data);
873 break;
874 }
875 #endif
876
877 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
878 case PTRACE_GET_THREAD_AREA:
879 if (addr < 0)
880 return -EIO;
881 ret = do_get_thread_area(child, addr,
882 (struct user_desc __user *) data);
883 break;
884
885 case PTRACE_SET_THREAD_AREA:
886 if (addr < 0)
887 return -EIO;
888 ret = do_set_thread_area(child, addr,
889 (struct user_desc __user *) data, 0);
890 break;
891 #endif
892
893 #ifdef CONFIG_X86_64
894 /* normal 64bit interface to access TLS data.
895 Works just like arch_prctl, except that the arguments
896 are reversed. */
897 case PTRACE_ARCH_PRCTL:
898 ret = do_arch_prctl(child, data, addr);
899 break;
900 #endif
901
902 case PTRACE_BTS_CONFIG:
903 ret = ptrace_bts_config
904 (child, (struct ptrace_bts_config __user *)addr);
905 break;
906
907 case PTRACE_BTS_STATUS:
908 ret = ptrace_bts_status
909 (child, (struct ptrace_bts_config __user *)addr);
910 break;
911
912 case PTRACE_BTS_SIZE:
913 ret = ptrace_bts_get_size(child);
914 break;
915
916 case PTRACE_BTS_GET:
917 ret = ptrace_bts_read_record
918 (child, data, (struct bts_struct __user *) addr);
919 break;
920
921 case PTRACE_BTS_CLEAR:
922 ret = ptrace_bts_clear(child);
923 break;
924
925 case PTRACE_BTS_DRAIN:
926 ret = ptrace_bts_drain
927 (child, (struct bts_struct __user *) addr);
928 break;
929
930 default:
931 ret = ptrace_request(child, request, addr, data);
932 break;
933 }
934
935 return ret;
936 }
937
938 #ifdef CONFIG_IA32_EMULATION
939
940 #include <linux/compat.h>
941 #include <linux/syscalls.h>
942 #include <asm/ia32.h>
943 #include <asm/user32.h>
944
945 #define R32(l,q) \
946 case offsetof(struct user32, regs.l): \
947 regs->q = value; break
948
949 #define SEG32(rs) \
950 case offsetof(struct user32, regs.rs): \
951 return set_segment_reg(child, \
952 offsetof(struct user_regs_struct, rs), \
953 value); \
954 break
955
956 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
957 {
958 struct pt_regs *regs = task_pt_regs(child);
959
960 switch (regno) {
961
962 SEG32(cs);
963 SEG32(ds);
964 SEG32(es);
965 SEG32(fs);
966 SEG32(gs);
967 SEG32(ss);
968
969 R32(ebx, bx);
970 R32(ecx, cx);
971 R32(edx, dx);
972 R32(edi, di);
973 R32(esi, si);
974 R32(ebp, bp);
975 R32(eax, ax);
976 R32(orig_eax, orig_ax);
977 R32(eip, ip);
978 R32(esp, sp);
979
980 case offsetof(struct user32, regs.eflags):
981 return set_flags(child, value);
982
983 case offsetof(struct user32, u_debugreg[0]) ...
984 offsetof(struct user32, u_debugreg[7]):
985 regno -= offsetof(struct user32, u_debugreg[0]);
986 return ptrace_set_debugreg(child, regno / 4, value);
987
988 default:
989 if (regno > sizeof(struct user32) || (regno & 3))
990 return -EIO;
991
992 /*
993 * Other dummy fields in the virtual user structure
994 * are ignored
995 */
996 break;
997 }
998 return 0;
999 }
1000
1001 #undef R32
1002 #undef SEG32
1003
1004 #define R32(l,q) \
1005 case offsetof(struct user32, regs.l): \
1006 *val = regs->q; break
1007
1008 #define SEG32(rs) \
1009 case offsetof(struct user32, regs.rs): \
1010 *val = get_segment_reg(child, \
1011 offsetof(struct user_regs_struct, rs)); \
1012 break
1013
1014 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1015 {
1016 struct pt_regs *regs = task_pt_regs(child);
1017
1018 switch (regno) {
1019
1020 SEG32(ds);
1021 SEG32(es);
1022 SEG32(fs);
1023 SEG32(gs);
1024
1025 R32(cs, cs);
1026 R32(ss, ss);
1027 R32(ebx, bx);
1028 R32(ecx, cx);
1029 R32(edx, dx);
1030 R32(edi, di);
1031 R32(esi, si);
1032 R32(ebp, bp);
1033 R32(eax, ax);
1034 R32(orig_eax, orig_ax);
1035 R32(eip, ip);
1036 R32(esp, sp);
1037
1038 case offsetof(struct user32, regs.eflags):
1039 *val = get_flags(child);
1040 break;
1041
1042 case offsetof(struct user32, u_debugreg[0]) ...
1043 offsetof(struct user32, u_debugreg[7]):
1044 regno -= offsetof(struct user32, u_debugreg[0]);
1045 *val = ptrace_get_debugreg(child, regno / 4);
1046 break;
1047
1048 default:
1049 if (regno > sizeof(struct user32) || (regno & 3))
1050 return -EIO;
1051
1052 /*
1053 * Other dummy fields in the virtual user structure
1054 * are ignored
1055 */
1056 *val = 0;
1057 break;
1058 }
1059 return 0;
1060 }
1061
1062 #undef R32
1063 #undef SEG32
1064
1065 static int genregs32_get(struct task_struct *target,
1066 const struct user_regset *regset,
1067 unsigned int pos, unsigned int count,
1068 void *kbuf, void __user *ubuf)
1069 {
1070 if (kbuf) {
1071 compat_ulong_t *k = kbuf;
1072 while (count > 0) {
1073 getreg32(target, pos, k++);
1074 count -= sizeof(*k);
1075 pos += sizeof(*k);
1076 }
1077 } else {
1078 compat_ulong_t __user *u = ubuf;
1079 while (count > 0) {
1080 compat_ulong_t word;
1081 getreg32(target, pos, &word);
1082 if (__put_user(word, u++))
1083 return -EFAULT;
1084 count -= sizeof(*u);
1085 pos += sizeof(*u);
1086 }
1087 }
1088
1089 return 0;
1090 }
1091
1092 static int genregs32_set(struct task_struct *target,
1093 const struct user_regset *regset,
1094 unsigned int pos, unsigned int count,
1095 const void *kbuf, const void __user *ubuf)
1096 {
1097 int ret = 0;
1098 if (kbuf) {
1099 const compat_ulong_t *k = kbuf;
1100 while (count > 0 && !ret) {
1101 ret = putreg(target, pos, *k++);
1102 count -= sizeof(*k);
1103 pos += sizeof(*k);
1104 }
1105 } else {
1106 const compat_ulong_t __user *u = ubuf;
1107 while (count > 0 && !ret) {
1108 compat_ulong_t word;
1109 ret = __get_user(word, u++);
1110 if (ret)
1111 break;
1112 ret = putreg(target, pos, word);
1113 count -= sizeof(*u);
1114 pos += sizeof(*u);
1115 }
1116 }
1117 return ret;
1118 }
1119
1120 static long ptrace32_siginfo(unsigned request, u32 pid, u32 addr, u32 data)
1121 {
1122 siginfo_t __user *si = compat_alloc_user_space(sizeof(siginfo_t));
1123 compat_siginfo_t __user *si32 = compat_ptr(data);
1124 siginfo_t ssi;
1125 int ret;
1126
1127 if (request == PTRACE_SETSIGINFO) {
1128 memset(&ssi, 0, sizeof(siginfo_t));
1129 ret = copy_siginfo_from_user32(&ssi, si32);
1130 if (ret)
1131 return ret;
1132 if (copy_to_user(si, &ssi, sizeof(siginfo_t)))
1133 return -EFAULT;
1134 }
1135 ret = sys_ptrace(request, pid, addr, (unsigned long)si);
1136 if (ret)
1137 return ret;
1138 if (request == PTRACE_GETSIGINFO) {
1139 if (copy_from_user(&ssi, si, sizeof(siginfo_t)))
1140 return -EFAULT;
1141 ret = copy_siginfo_to_user32(si32, &ssi);
1142 }
1143 return ret;
1144 }
1145
1146 asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
1147 {
1148 struct task_struct *child;
1149 struct pt_regs *childregs;
1150 void __user *datap = compat_ptr(data);
1151 int ret;
1152 __u32 val;
1153
1154 switch (request) {
1155 case PTRACE_TRACEME:
1156 case PTRACE_ATTACH:
1157 case PTRACE_KILL:
1158 case PTRACE_CONT:
1159 case PTRACE_SINGLESTEP:
1160 case PTRACE_SINGLEBLOCK:
1161 case PTRACE_DETACH:
1162 case PTRACE_SYSCALL:
1163 case PTRACE_OLDSETOPTIONS:
1164 case PTRACE_SETOPTIONS:
1165 case PTRACE_SET_THREAD_AREA:
1166 case PTRACE_GET_THREAD_AREA:
1167 case PTRACE_BTS_CONFIG:
1168 case PTRACE_BTS_STATUS:
1169 case PTRACE_BTS_SIZE:
1170 case PTRACE_BTS_GET:
1171 case PTRACE_BTS_CLEAR:
1172 case PTRACE_BTS_DRAIN:
1173 return sys_ptrace(request, pid, addr, data);
1174
1175 default:
1176 return -EINVAL;
1177
1178 case PTRACE_PEEKTEXT:
1179 case PTRACE_PEEKDATA:
1180 case PTRACE_POKEDATA:
1181 case PTRACE_POKETEXT:
1182 case PTRACE_POKEUSR:
1183 case PTRACE_PEEKUSR:
1184 case PTRACE_GETREGS:
1185 case PTRACE_SETREGS:
1186 case PTRACE_SETFPREGS:
1187 case PTRACE_GETFPREGS:
1188 case PTRACE_SETFPXREGS:
1189 case PTRACE_GETFPXREGS:
1190 case PTRACE_GETEVENTMSG:
1191 break;
1192
1193 case PTRACE_SETSIGINFO:
1194 case PTRACE_GETSIGINFO:
1195 return ptrace32_siginfo(request, pid, addr, data);
1196 }
1197
1198 child = ptrace_get_task_struct(pid);
1199 if (IS_ERR(child))
1200 return PTR_ERR(child);
1201
1202 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1203 if (ret < 0)
1204 goto out;
1205
1206 childregs = task_pt_regs(child);
1207
1208 switch (request) {
1209 case PTRACE_PEEKDATA:
1210 case PTRACE_PEEKTEXT:
1211 ret = 0;
1212 if (access_process_vm(child, addr, &val, sizeof(u32), 0) !=
1213 sizeof(u32))
1214 ret = -EIO;
1215 else
1216 ret = put_user(val, (unsigned int __user *)datap);
1217 break;
1218
1219 case PTRACE_POKEDATA:
1220 case PTRACE_POKETEXT:
1221 ret = 0;
1222 if (access_process_vm(child, addr, &data, sizeof(u32), 1) !=
1223 sizeof(u32))
1224 ret = -EIO;
1225 break;
1226
1227 case PTRACE_PEEKUSR:
1228 ret = getreg32(child, addr, &val);
1229 if (ret == 0)
1230 ret = put_user(val, (__u32 __user *)datap);
1231 break;
1232
1233 case PTRACE_POKEUSR:
1234 ret = putreg32(child, addr, data);
1235 break;
1236
1237 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
1238 int i;
1239
1240 if (!access_ok(VERIFY_WRITE, datap, 16*4)) {
1241 ret = -EIO;
1242 break;
1243 }
1244 ret = 0;
1245 for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(__u32)) {
1246 getreg32(child, i, &val);
1247 ret |= __put_user(val, (u32 __user *)datap);
1248 datap += sizeof(u32);
1249 }
1250 break;
1251 }
1252
1253 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
1254 unsigned long tmp;
1255 int i;
1256
1257 if (!access_ok(VERIFY_READ, datap, 16*4)) {
1258 ret = -EIO;
1259 break;
1260 }
1261 ret = 0;
1262 for (i = 0; i < sizeof(struct user_regs_struct32); i += sizeof(u32)) {
1263 ret |= __get_user(tmp, (u32 __user *)datap);
1264 putreg32(child, i, tmp);
1265 datap += sizeof(u32);
1266 }
1267 break;
1268 }
1269
1270 case PTRACE_GETFPREGS:
1271 ret = -EIO;
1272 if (!access_ok(VERIFY_READ, compat_ptr(data),
1273 sizeof(struct user_i387_struct)))
1274 break;
1275 save_i387_ia32(child, datap, childregs, 1);
1276 ret = 0;
1277 break;
1278
1279 case PTRACE_SETFPREGS:
1280 ret = -EIO;
1281 if (!access_ok(VERIFY_WRITE, datap,
1282 sizeof(struct user_i387_struct)))
1283 break;
1284 ret = 0;
1285 /* don't check EFAULT to be bug-to-bug compatible to i386 */
1286 restore_i387_ia32(child, datap, 1);
1287 break;
1288
1289 case PTRACE_GETFPXREGS: {
1290 struct user32_fxsr_struct __user *u = datap;
1291
1292 init_fpu(child);
1293 ret = -EIO;
1294 if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
1295 break;
1296 ret = -EFAULT;
1297 if (__copy_to_user(u, &child->thread.i387.fxsave, sizeof(*u)))
1298 break;
1299 ret = __put_user(childregs->cs, &u->fcs);
1300 ret |= __put_user(child->thread.ds, &u->fos);
1301 break;
1302 }
1303 case PTRACE_SETFPXREGS: {
1304 struct user32_fxsr_struct __user *u = datap;
1305
1306 unlazy_fpu(child);
1307 ret = -EIO;
1308 if (!access_ok(VERIFY_READ, u, sizeof(*u)))
1309 break;
1310 /*
1311 * no checking to be bug-to-bug compatible with i386.
1312 * but silence warning
1313 */
1314 if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
1315 ;
1316 set_stopped_child_used_math(child);
1317 child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
1318 ret = 0;
1319 break;
1320 }
1321
1322 case PTRACE_GETEVENTMSG:
1323 ret = put_user(child->ptrace_message,
1324 (unsigned int __user *)compat_ptr(data));
1325 break;
1326
1327 default:
1328 BUG();
1329 }
1330
1331 out:
1332 put_task_struct(child);
1333 return ret;
1334 }
1335
1336 #endif /* CONFIG_IA32_EMULATION */
1337
1338 #ifdef CONFIG_X86_32
1339
1340 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
1341 {
1342 struct siginfo info;
1343
1344 tsk->thread.trap_no = 1;
1345 tsk->thread.error_code = error_code;
1346
1347 memset(&info, 0, sizeof(info));
1348 info.si_signo = SIGTRAP;
1349 info.si_code = TRAP_BRKPT;
1350
1351 /* User-mode ip? */
1352 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1353
1354 /* Send us the fake SIGTRAP */
1355 force_sig_info(SIGTRAP, &info, tsk);
1356 }
1357
1358 /* notification of system call entry/exit
1359 * - triggered by current->work.syscall_trace
1360 */
1361 __attribute__((regparm(3)))
1362 int do_syscall_trace(struct pt_regs *regs, int entryexit)
1363 {
1364 int is_sysemu = test_thread_flag(TIF_SYSCALL_EMU);
1365 /*
1366 * With TIF_SYSCALL_EMU set we want to ignore TIF_SINGLESTEP for syscall
1367 * interception
1368 */
1369 int is_singlestep = !is_sysemu && test_thread_flag(TIF_SINGLESTEP);
1370 int ret = 0;
1371
1372 /* do the secure computing check first */
1373 if (!entryexit)
1374 secure_computing(regs->orig_ax);
1375
1376 if (unlikely(current->audit_context)) {
1377 if (entryexit)
1378 audit_syscall_exit(AUDITSC_RESULT(regs->ax),
1379 regs->ax);
1380 /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
1381 * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
1382 * not used, entry.S will call us only on syscall exit, not
1383 * entry; so when TIF_SYSCALL_AUDIT is used we must avoid
1384 * calling send_sigtrap() on syscall entry.
1385 *
1386 * Note that when PTRACE_SYSEMU_SINGLESTEP is used,
1387 * is_singlestep is false, despite his name, so we will still do
1388 * the correct thing.
1389 */
1390 else if (is_singlestep)
1391 goto out;
1392 }
1393
1394 if (!(current->ptrace & PT_PTRACED))
1395 goto out;
1396
1397 /* If a process stops on the 1st tracepoint with SYSCALL_TRACE
1398 * and then is resumed with SYSEMU_SINGLESTEP, it will come in
1399 * here. We have to check this and return */
1400 if (is_sysemu && entryexit)
1401 return 0;
1402
1403 /* Fake a debug trap */
1404 if (is_singlestep)
1405 send_sigtrap(current, regs, 0);
1406
1407 if (!test_thread_flag(TIF_SYSCALL_TRACE) && !is_sysemu)
1408 goto out;
1409
1410 /* the 0x80 provides a way for the tracing parent to distinguish
1411 between a syscall stop and SIGTRAP delivery */
1412 /* Note that the debugger could change the result of test_thread_flag!*/
1413 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80:0));
1414
1415 /*
1416 * this isn't the same as continuing with a signal, but it will do
1417 * for normal use. strace only continues with a signal if the
1418 * stopping signal is not SIGTRAP. -brl
1419 */
1420 if (current->exit_code) {
1421 send_sig(current->exit_code, current, 1);
1422 current->exit_code = 0;
1423 }
1424 ret = is_sysemu;
1425 out:
1426 if (unlikely(current->audit_context) && !entryexit)
1427 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax,
1428 regs->bx, regs->cx, regs->dx, regs->si);
1429 if (ret == 0)
1430 return 0;
1431
1432 regs->orig_ax = -1; /* force skip of syscall restarting */
1433 if (unlikely(current->audit_context))
1434 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1435 return 1;
1436 }
1437
1438 #else /* CONFIG_X86_64 */
1439
1440 static void syscall_trace(struct pt_regs *regs)
1441 {
1442
1443 #if 0
1444 printk("trace %s ip %lx sp %lx ax %d origrax %d caller %lx tiflags %x ptrace %x\n",
1445 current->comm,
1446 regs->ip, regs->sp, regs->ax, regs->orig_ax, __builtin_return_address(0),
1447 current_thread_info()->flags, current->ptrace);
1448 #endif
1449
1450 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1451 ? 0x80 : 0));
1452 /*
1453 * this isn't the same as continuing with a signal, but it will do
1454 * for normal use. strace only continues with a signal if the
1455 * stopping signal is not SIGTRAP. -brl
1456 */
1457 if (current->exit_code) {
1458 send_sig(current->exit_code, current, 1);
1459 current->exit_code = 0;
1460 }
1461 }
1462
1463 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
1464 {
1465 /* do the secure computing check first */
1466 secure_computing(regs->orig_ax);
1467
1468 if (test_thread_flag(TIF_SYSCALL_TRACE)
1469 && (current->ptrace & PT_PTRACED))
1470 syscall_trace(regs);
1471
1472 if (unlikely(current->audit_context)) {
1473 if (test_thread_flag(TIF_IA32)) {
1474 audit_syscall_entry(AUDIT_ARCH_I386,
1475 regs->orig_ax,
1476 regs->bx, regs->cx,
1477 regs->dx, regs->si);
1478 } else {
1479 audit_syscall_entry(AUDIT_ARCH_X86_64,
1480 regs->orig_ax,
1481 regs->di, regs->si,
1482 regs->dx, regs->r10);
1483 }
1484 }
1485 }
1486
1487 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1488 {
1489 if (unlikely(current->audit_context))
1490 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1491
1492 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1493 || test_thread_flag(TIF_SINGLESTEP))
1494 && (current->ptrace & PT_PTRACED))
1495 syscall_trace(regs);
1496 }
1497
1498 #endif /* CONFIG_X86_32 */
This page took 0.060426 seconds and 6 git commands to generate.