Merge branches 'x86/paravirt', 'x86/pat', 'x86/setup-v2', 'x86/subarch', 'x86/uaccess...
[deliverable/linux.git] / arch / x86 / kernel / ptrace.c
1 /* By Ross Biro 1/23/92 */
2 /*
3 * Pentium III FXSR, SSE support
4 * Gareth Hughes <gareth@valinux.com>, May 2000
5 *
6 * BTS tracing
7 * Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/regset.h>
17 #include <linux/tracehook.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/audit.h>
22 #include <linux/seccomp.h>
23 #include <linux/signal.h>
24
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/processor.h>
29 #include <asm/i387.h>
30 #include <asm/debugreg.h>
31 #include <asm/ldt.h>
32 #include <asm/desc.h>
33 #include <asm/prctl.h>
34 #include <asm/proto.h>
35 #include <asm/ds.h>
36
37 #include "tls.h"
38
39 enum x86_regset {
40 REGSET_GENERAL,
41 REGSET_FP,
42 REGSET_XFP,
43 REGSET_IOPERM64 = REGSET_XFP,
44 REGSET_TLS,
45 REGSET_IOPERM32,
46 };
47
48 /*
49 * does not yet catch signals sent when the child dies.
50 * in exit.c or in signal.c.
51 */
52
53 /*
54 * Determines which flags the user has access to [1 = access, 0 = no access].
55 */
56 #define FLAG_MASK_32 ((unsigned long) \
57 (X86_EFLAGS_CF | X86_EFLAGS_PF | \
58 X86_EFLAGS_AF | X86_EFLAGS_ZF | \
59 X86_EFLAGS_SF | X86_EFLAGS_TF | \
60 X86_EFLAGS_DF | X86_EFLAGS_OF | \
61 X86_EFLAGS_RF | X86_EFLAGS_AC))
62
63 /*
64 * Determines whether a value may be installed in a segment register.
65 */
66 static inline bool invalid_selector(u16 value)
67 {
68 return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
69 }
70
71 #ifdef CONFIG_X86_32
72
73 #define FLAG_MASK FLAG_MASK_32
74
75 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
76 {
77 BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
78 return &regs->bx + (regno >> 2);
79 }
80
81 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
82 {
83 /*
84 * Returning the value truncates it to 16 bits.
85 */
86 unsigned int retval;
87 if (offset != offsetof(struct user_regs_struct, gs))
88 retval = *pt_regs_access(task_pt_regs(task), offset);
89 else {
90 if (task == current)
91 retval = get_user_gs(task_pt_regs(task));
92 else
93 retval = task_user_gs(task);
94 }
95 return retval;
96 }
97
98 static int set_segment_reg(struct task_struct *task,
99 unsigned long offset, u16 value)
100 {
101 /*
102 * The value argument was already truncated to 16 bits.
103 */
104 if (invalid_selector(value))
105 return -EIO;
106
107 /*
108 * For %cs and %ss we cannot permit a null selector.
109 * We can permit a bogus selector as long as it has USER_RPL.
110 * Null selectors are fine for other segment registers, but
111 * we will never get back to user mode with invalid %cs or %ss
112 * and will take the trap in iret instead. Much code relies
113 * on user_mode() to distinguish a user trap frame (which can
114 * safely use invalid selectors) from a kernel trap frame.
115 */
116 switch (offset) {
117 case offsetof(struct user_regs_struct, cs):
118 case offsetof(struct user_regs_struct, ss):
119 if (unlikely(value == 0))
120 return -EIO;
121
122 default:
123 *pt_regs_access(task_pt_regs(task), offset) = value;
124 break;
125
126 case offsetof(struct user_regs_struct, gs):
127 if (task == current)
128 set_user_gs(task_pt_regs(task), value);
129 else
130 task_user_gs(task) = value;
131 }
132
133 return 0;
134 }
135
136 static unsigned long debugreg_addr_limit(struct task_struct *task)
137 {
138 return TASK_SIZE - 3;
139 }
140
141 #else /* CONFIG_X86_64 */
142
143 #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
144
145 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
146 {
147 BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
148 return &regs->r15 + (offset / sizeof(regs->r15));
149 }
150
151 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
152 {
153 /*
154 * Returning the value truncates it to 16 bits.
155 */
156 unsigned int seg;
157
158 switch (offset) {
159 case offsetof(struct user_regs_struct, fs):
160 if (task == current) {
161 /* Older gas can't assemble movq %?s,%r?? */
162 asm("movl %%fs,%0" : "=r" (seg));
163 return seg;
164 }
165 return task->thread.fsindex;
166 case offsetof(struct user_regs_struct, gs):
167 if (task == current) {
168 asm("movl %%gs,%0" : "=r" (seg));
169 return seg;
170 }
171 return task->thread.gsindex;
172 case offsetof(struct user_regs_struct, ds):
173 if (task == current) {
174 asm("movl %%ds,%0" : "=r" (seg));
175 return seg;
176 }
177 return task->thread.ds;
178 case offsetof(struct user_regs_struct, es):
179 if (task == current) {
180 asm("movl %%es,%0" : "=r" (seg));
181 return seg;
182 }
183 return task->thread.es;
184
185 case offsetof(struct user_regs_struct, cs):
186 case offsetof(struct user_regs_struct, ss):
187 break;
188 }
189 return *pt_regs_access(task_pt_regs(task), offset);
190 }
191
192 static int set_segment_reg(struct task_struct *task,
193 unsigned long offset, u16 value)
194 {
195 /*
196 * The value argument was already truncated to 16 bits.
197 */
198 if (invalid_selector(value))
199 return -EIO;
200
201 switch (offset) {
202 case offsetof(struct user_regs_struct,fs):
203 /*
204 * If this is setting fs as for normal 64-bit use but
205 * setting fs_base has implicitly changed it, leave it.
206 */
207 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
208 task->thread.fs != 0) ||
209 (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
210 task->thread.fs == 0))
211 break;
212 task->thread.fsindex = value;
213 if (task == current)
214 loadsegment(fs, task->thread.fsindex);
215 break;
216 case offsetof(struct user_regs_struct,gs):
217 /*
218 * If this is setting gs as for normal 64-bit use but
219 * setting gs_base has implicitly changed it, leave it.
220 */
221 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
222 task->thread.gs != 0) ||
223 (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
224 task->thread.gs == 0))
225 break;
226 task->thread.gsindex = value;
227 if (task == current)
228 load_gs_index(task->thread.gsindex);
229 break;
230 case offsetof(struct user_regs_struct,ds):
231 task->thread.ds = value;
232 if (task == current)
233 loadsegment(ds, task->thread.ds);
234 break;
235 case offsetof(struct user_regs_struct,es):
236 task->thread.es = value;
237 if (task == current)
238 loadsegment(es, task->thread.es);
239 break;
240
241 /*
242 * Can't actually change these in 64-bit mode.
243 */
244 case offsetof(struct user_regs_struct,cs):
245 if (unlikely(value == 0))
246 return -EIO;
247 #ifdef CONFIG_IA32_EMULATION
248 if (test_tsk_thread_flag(task, TIF_IA32))
249 task_pt_regs(task)->cs = value;
250 #endif
251 break;
252 case offsetof(struct user_regs_struct,ss):
253 if (unlikely(value == 0))
254 return -EIO;
255 #ifdef CONFIG_IA32_EMULATION
256 if (test_tsk_thread_flag(task, TIF_IA32))
257 task_pt_regs(task)->ss = value;
258 #endif
259 break;
260 }
261
262 return 0;
263 }
264
265 static unsigned long debugreg_addr_limit(struct task_struct *task)
266 {
267 #ifdef CONFIG_IA32_EMULATION
268 if (test_tsk_thread_flag(task, TIF_IA32))
269 return IA32_PAGE_OFFSET - 3;
270 #endif
271 return TASK_SIZE64 - 7;
272 }
273
274 #endif /* CONFIG_X86_32 */
275
276 static unsigned long get_flags(struct task_struct *task)
277 {
278 unsigned long retval = task_pt_regs(task)->flags;
279
280 /*
281 * If the debugger set TF, hide it from the readout.
282 */
283 if (test_tsk_thread_flag(task, TIF_FORCED_TF))
284 retval &= ~X86_EFLAGS_TF;
285
286 return retval;
287 }
288
289 static int set_flags(struct task_struct *task, unsigned long value)
290 {
291 struct pt_regs *regs = task_pt_regs(task);
292
293 /*
294 * If the user value contains TF, mark that
295 * it was not "us" (the debugger) that set it.
296 * If not, make sure it stays set if we had.
297 */
298 if (value & X86_EFLAGS_TF)
299 clear_tsk_thread_flag(task, TIF_FORCED_TF);
300 else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
301 value |= X86_EFLAGS_TF;
302
303 regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
304
305 return 0;
306 }
307
308 static int putreg(struct task_struct *child,
309 unsigned long offset, unsigned long value)
310 {
311 switch (offset) {
312 case offsetof(struct user_regs_struct, cs):
313 case offsetof(struct user_regs_struct, ds):
314 case offsetof(struct user_regs_struct, es):
315 case offsetof(struct user_regs_struct, fs):
316 case offsetof(struct user_regs_struct, gs):
317 case offsetof(struct user_regs_struct, ss):
318 return set_segment_reg(child, offset, value);
319
320 case offsetof(struct user_regs_struct, flags):
321 return set_flags(child, value);
322
323 #ifdef CONFIG_X86_64
324 /*
325 * Orig_ax is really just a flag with small positive and
326 * negative values, so make sure to always sign-extend it
327 * from 32 bits so that it works correctly regardless of
328 * whether we come from a 32-bit environment or not.
329 */
330 case offsetof(struct user_regs_struct, orig_ax):
331 value = (long) (s32) value;
332 break;
333
334 case offsetof(struct user_regs_struct,fs_base):
335 if (value >= TASK_SIZE_OF(child))
336 return -EIO;
337 /*
338 * When changing the segment base, use do_arch_prctl
339 * to set either thread.fs or thread.fsindex and the
340 * corresponding GDT slot.
341 */
342 if (child->thread.fs != value)
343 return do_arch_prctl(child, ARCH_SET_FS, value);
344 return 0;
345 case offsetof(struct user_regs_struct,gs_base):
346 /*
347 * Exactly the same here as the %fs handling above.
348 */
349 if (value >= TASK_SIZE_OF(child))
350 return -EIO;
351 if (child->thread.gs != value)
352 return do_arch_prctl(child, ARCH_SET_GS, value);
353 return 0;
354 #endif
355 }
356
357 *pt_regs_access(task_pt_regs(child), offset) = value;
358 return 0;
359 }
360
361 static unsigned long getreg(struct task_struct *task, unsigned long offset)
362 {
363 switch (offset) {
364 case offsetof(struct user_regs_struct, cs):
365 case offsetof(struct user_regs_struct, ds):
366 case offsetof(struct user_regs_struct, es):
367 case offsetof(struct user_regs_struct, fs):
368 case offsetof(struct user_regs_struct, gs):
369 case offsetof(struct user_regs_struct, ss):
370 return get_segment_reg(task, offset);
371
372 case offsetof(struct user_regs_struct, flags):
373 return get_flags(task);
374
375 #ifdef CONFIG_X86_64
376 case offsetof(struct user_regs_struct, fs_base): {
377 /*
378 * do_arch_prctl may have used a GDT slot instead of
379 * the MSR. To userland, it appears the same either
380 * way, except the %fs segment selector might not be 0.
381 */
382 unsigned int seg = task->thread.fsindex;
383 if (task->thread.fs != 0)
384 return task->thread.fs;
385 if (task == current)
386 asm("movl %%fs,%0" : "=r" (seg));
387 if (seg != FS_TLS_SEL)
388 return 0;
389 return get_desc_base(&task->thread.tls_array[FS_TLS]);
390 }
391 case offsetof(struct user_regs_struct, gs_base): {
392 /*
393 * Exactly the same here as the %fs handling above.
394 */
395 unsigned int seg = task->thread.gsindex;
396 if (task->thread.gs != 0)
397 return task->thread.gs;
398 if (task == current)
399 asm("movl %%gs,%0" : "=r" (seg));
400 if (seg != GS_TLS_SEL)
401 return 0;
402 return get_desc_base(&task->thread.tls_array[GS_TLS]);
403 }
404 #endif
405 }
406
407 return *pt_regs_access(task_pt_regs(task), offset);
408 }
409
410 static int genregs_get(struct task_struct *target,
411 const struct user_regset *regset,
412 unsigned int pos, unsigned int count,
413 void *kbuf, void __user *ubuf)
414 {
415 if (kbuf) {
416 unsigned long *k = kbuf;
417 while (count > 0) {
418 *k++ = getreg(target, pos);
419 count -= sizeof(*k);
420 pos += sizeof(*k);
421 }
422 } else {
423 unsigned long __user *u = ubuf;
424 while (count > 0) {
425 if (__put_user(getreg(target, pos), u++))
426 return -EFAULT;
427 count -= sizeof(*u);
428 pos += sizeof(*u);
429 }
430 }
431
432 return 0;
433 }
434
435 static int genregs_set(struct task_struct *target,
436 const struct user_regset *regset,
437 unsigned int pos, unsigned int count,
438 const void *kbuf, const void __user *ubuf)
439 {
440 int ret = 0;
441 if (kbuf) {
442 const unsigned long *k = kbuf;
443 while (count > 0 && !ret) {
444 ret = putreg(target, pos, *k++);
445 count -= sizeof(*k);
446 pos += sizeof(*k);
447 }
448 } else {
449 const unsigned long __user *u = ubuf;
450 while (count > 0 && !ret) {
451 unsigned long word;
452 ret = __get_user(word, u++);
453 if (ret)
454 break;
455 ret = putreg(target, pos, word);
456 count -= sizeof(*u);
457 pos += sizeof(*u);
458 }
459 }
460 return ret;
461 }
462
463 /*
464 * This function is trivial and will be inlined by the compiler.
465 * Having it separates the implementation details of debug
466 * registers from the interface details of ptrace.
467 */
468 static unsigned long ptrace_get_debugreg(struct task_struct *child, int n)
469 {
470 switch (n) {
471 case 0: return child->thread.debugreg0;
472 case 1: return child->thread.debugreg1;
473 case 2: return child->thread.debugreg2;
474 case 3: return child->thread.debugreg3;
475 case 6: return child->thread.debugreg6;
476 case 7: return child->thread.debugreg7;
477 }
478 return 0;
479 }
480
481 static int ptrace_set_debugreg(struct task_struct *child,
482 int n, unsigned long data)
483 {
484 int i;
485
486 if (unlikely(n == 4 || n == 5))
487 return -EIO;
488
489 if (n < 4 && unlikely(data >= debugreg_addr_limit(child)))
490 return -EIO;
491
492 switch (n) {
493 case 0: child->thread.debugreg0 = data; break;
494 case 1: child->thread.debugreg1 = data; break;
495 case 2: child->thread.debugreg2 = data; break;
496 case 3: child->thread.debugreg3 = data; break;
497
498 case 6:
499 if ((data & ~0xffffffffUL) != 0)
500 return -EIO;
501 child->thread.debugreg6 = data;
502 break;
503
504 case 7:
505 /*
506 * Sanity-check data. Take one half-byte at once with
507 * check = (val >> (16 + 4*i)) & 0xf. It contains the
508 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
509 * 2 and 3 are LENi. Given a list of invalid values,
510 * we do mask |= 1 << invalid_value, so that
511 * (mask >> check) & 1 is a correct test for invalid
512 * values.
513 *
514 * R/Wi contains the type of the breakpoint /
515 * watchpoint, LENi contains the length of the watched
516 * data in the watchpoint case.
517 *
518 * The invalid values are:
519 * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit]
520 * - R/Wi == 0x10 (break on I/O reads or writes), so
521 * mask |= 0x4444.
522 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
523 * 0x1110.
524 *
525 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
526 *
527 * See the Intel Manual "System Programming Guide",
528 * 15.2.4
529 *
530 * Note that LENi == 0x10 is defined on x86_64 in long
531 * mode (i.e. even for 32-bit userspace software, but
532 * 64-bit kernel), so the x86_64 mask value is 0x5454.
533 * See the AMD manual no. 24593 (AMD64 System Programming)
534 */
535 #ifdef CONFIG_X86_32
536 #define DR7_MASK 0x5f54
537 #else
538 #define DR7_MASK 0x5554
539 #endif
540 data &= ~DR_CONTROL_RESERVED;
541 for (i = 0; i < 4; i++)
542 if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1)
543 return -EIO;
544 child->thread.debugreg7 = data;
545 if (data)
546 set_tsk_thread_flag(child, TIF_DEBUG);
547 else
548 clear_tsk_thread_flag(child, TIF_DEBUG);
549 break;
550 }
551
552 return 0;
553 }
554
555 /*
556 * These access the current or another (stopped) task's io permission
557 * bitmap for debugging or core dump.
558 */
559 static int ioperm_active(struct task_struct *target,
560 const struct user_regset *regset)
561 {
562 return target->thread.io_bitmap_max / regset->size;
563 }
564
565 static int ioperm_get(struct task_struct *target,
566 const struct user_regset *regset,
567 unsigned int pos, unsigned int count,
568 void *kbuf, void __user *ubuf)
569 {
570 if (!target->thread.io_bitmap_ptr)
571 return -ENXIO;
572
573 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
574 target->thread.io_bitmap_ptr,
575 0, IO_BITMAP_BYTES);
576 }
577
578 #ifdef CONFIG_X86_PTRACE_BTS
579 static int ptrace_bts_read_record(struct task_struct *child, size_t index,
580 struct bts_struct __user *out)
581 {
582 const struct bts_trace *trace;
583 struct bts_struct bts;
584 const unsigned char *at;
585 int error;
586
587 trace = ds_read_bts(child->bts);
588 if (!trace)
589 return -EPERM;
590
591 at = trace->ds.top - ((index + 1) * trace->ds.size);
592 if ((void *)at < trace->ds.begin)
593 at += (trace->ds.n * trace->ds.size);
594
595 if (!trace->read)
596 return -EOPNOTSUPP;
597
598 error = trace->read(child->bts, at, &bts);
599 if (error < 0)
600 return error;
601
602 if (copy_to_user(out, &bts, sizeof(bts)))
603 return -EFAULT;
604
605 return sizeof(bts);
606 }
607
608 static int ptrace_bts_drain(struct task_struct *child,
609 long size,
610 struct bts_struct __user *out)
611 {
612 const struct bts_trace *trace;
613 const unsigned char *at;
614 int error, drained = 0;
615
616 trace = ds_read_bts(child->bts);
617 if (!trace)
618 return -EPERM;
619
620 if (!trace->read)
621 return -EOPNOTSUPP;
622
623 if (size < (trace->ds.top - trace->ds.begin))
624 return -EIO;
625
626 for (at = trace->ds.begin; (void *)at < trace->ds.top;
627 out++, drained++, at += trace->ds.size) {
628 struct bts_struct bts;
629 int error;
630
631 error = trace->read(child->bts, at, &bts);
632 if (error < 0)
633 return error;
634
635 if (copy_to_user(out, &bts, sizeof(bts)))
636 return -EFAULT;
637 }
638
639 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
640
641 error = ds_reset_bts(child->bts);
642 if (error < 0)
643 return error;
644
645 return drained;
646 }
647
648 static int ptrace_bts_allocate_buffer(struct task_struct *child, size_t size)
649 {
650 child->bts_buffer = alloc_locked_buffer(size);
651 if (!child->bts_buffer)
652 return -ENOMEM;
653
654 child->bts_size = size;
655
656 return 0;
657 }
658
659 static void ptrace_bts_free_buffer(struct task_struct *child)
660 {
661 free_locked_buffer(child->bts_buffer, child->bts_size);
662 child->bts_buffer = NULL;
663 child->bts_size = 0;
664 }
665
666 static int ptrace_bts_config(struct task_struct *child,
667 long cfg_size,
668 const struct ptrace_bts_config __user *ucfg)
669 {
670 struct ptrace_bts_config cfg;
671 unsigned int flags = 0;
672
673 if (cfg_size < sizeof(cfg))
674 return -EIO;
675
676 if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
677 return -EFAULT;
678
679 if (child->bts) {
680 ds_release_bts(child->bts);
681 child->bts = NULL;
682 }
683
684 if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
685 if (!cfg.signal)
686 return -EINVAL;
687
688 return -EOPNOTSUPP;
689
690 child->thread.bts_ovfl_signal = cfg.signal;
691 }
692
693 if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
694 (cfg.size != child->bts_size)) {
695 int error;
696
697 ptrace_bts_free_buffer(child);
698
699 error = ptrace_bts_allocate_buffer(child, cfg.size);
700 if (error < 0)
701 return error;
702 }
703
704 if (cfg.flags & PTRACE_BTS_O_TRACE)
705 flags |= BTS_USER;
706
707 if (cfg.flags & PTRACE_BTS_O_SCHED)
708 flags |= BTS_TIMESTAMPS;
709
710 child->bts = ds_request_bts(child, child->bts_buffer, child->bts_size,
711 /* ovfl = */ NULL, /* th = */ (size_t)-1,
712 flags);
713 if (IS_ERR(child->bts)) {
714 int error = PTR_ERR(child->bts);
715
716 ptrace_bts_free_buffer(child);
717 child->bts = NULL;
718
719 return error;
720 }
721
722 return sizeof(cfg);
723 }
724
725 static int ptrace_bts_status(struct task_struct *child,
726 long cfg_size,
727 struct ptrace_bts_config __user *ucfg)
728 {
729 const struct bts_trace *trace;
730 struct ptrace_bts_config cfg;
731
732 if (cfg_size < sizeof(cfg))
733 return -EIO;
734
735 trace = ds_read_bts(child->bts);
736 if (!trace)
737 return -EPERM;
738
739 memset(&cfg, 0, sizeof(cfg));
740 cfg.size = trace->ds.end - trace->ds.begin;
741 cfg.signal = child->thread.bts_ovfl_signal;
742 cfg.bts_size = sizeof(struct bts_struct);
743
744 if (cfg.signal)
745 cfg.flags |= PTRACE_BTS_O_SIGNAL;
746
747 if (trace->ds.flags & BTS_USER)
748 cfg.flags |= PTRACE_BTS_O_TRACE;
749
750 if (trace->ds.flags & BTS_TIMESTAMPS)
751 cfg.flags |= PTRACE_BTS_O_SCHED;
752
753 if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
754 return -EFAULT;
755
756 return sizeof(cfg);
757 }
758
759 static int ptrace_bts_clear(struct task_struct *child)
760 {
761 const struct bts_trace *trace;
762
763 trace = ds_read_bts(child->bts);
764 if (!trace)
765 return -EPERM;
766
767 memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
768
769 return ds_reset_bts(child->bts);
770 }
771
772 static int ptrace_bts_size(struct task_struct *child)
773 {
774 const struct bts_trace *trace;
775
776 trace = ds_read_bts(child->bts);
777 if (!trace)
778 return -EPERM;
779
780 return (trace->ds.top - trace->ds.begin) / trace->ds.size;
781 }
782
783 static void ptrace_bts_fork(struct task_struct *tsk)
784 {
785 tsk->bts = NULL;
786 tsk->bts_buffer = NULL;
787 tsk->bts_size = 0;
788 tsk->thread.bts_ovfl_signal = 0;
789 }
790
791 static void ptrace_bts_untrace(struct task_struct *child)
792 {
793 if (unlikely(child->bts)) {
794 ds_release_bts(child->bts);
795 child->bts = NULL;
796
797 /* We cannot update total_vm and locked_vm since
798 child's mm is already gone. But we can reclaim the
799 memory. */
800 kfree(child->bts_buffer);
801 child->bts_buffer = NULL;
802 child->bts_size = 0;
803 }
804 }
805
806 static void ptrace_bts_detach(struct task_struct *child)
807 {
808 /*
809 * Ptrace_detach() races with ptrace_untrace() in case
810 * the child dies and is reaped by another thread.
811 *
812 * We only do the memory accounting at this point and
813 * leave the buffer deallocation and the bts tracer
814 * release to ptrace_bts_untrace() which will be called
815 * later on with tasklist_lock held.
816 */
817 release_locked_buffer(child->bts_buffer, child->bts_size);
818 }
819 #else
820 static inline void ptrace_bts_fork(struct task_struct *tsk) {}
821 static inline void ptrace_bts_detach(struct task_struct *child) {}
822 static inline void ptrace_bts_untrace(struct task_struct *child) {}
823 #endif /* CONFIG_X86_PTRACE_BTS */
824
825 void x86_ptrace_fork(struct task_struct *child, unsigned long clone_flags)
826 {
827 ptrace_bts_fork(child);
828 }
829
830 void x86_ptrace_untrace(struct task_struct *child)
831 {
832 ptrace_bts_untrace(child);
833 }
834
835 /*
836 * Called by kernel/ptrace.c when detaching..
837 *
838 * Make sure the single step bit is not set.
839 */
840 void ptrace_disable(struct task_struct *child)
841 {
842 user_disable_single_step(child);
843 #ifdef TIF_SYSCALL_EMU
844 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
845 #endif
846 ptrace_bts_detach(child);
847 }
848
849 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
850 static const struct user_regset_view user_x86_32_view; /* Initialized below. */
851 #endif
852
853 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
854 {
855 int ret;
856 unsigned long __user *datap = (unsigned long __user *)data;
857
858 switch (request) {
859 /* read the word at location addr in the USER area. */
860 case PTRACE_PEEKUSR: {
861 unsigned long tmp;
862
863 ret = -EIO;
864 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
865 addr >= sizeof(struct user))
866 break;
867
868 tmp = 0; /* Default return condition */
869 if (addr < sizeof(struct user_regs_struct))
870 tmp = getreg(child, addr);
871 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
872 addr <= offsetof(struct user, u_debugreg[7])) {
873 addr -= offsetof(struct user, u_debugreg[0]);
874 tmp = ptrace_get_debugreg(child, addr / sizeof(data));
875 }
876 ret = put_user(tmp, datap);
877 break;
878 }
879
880 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
881 ret = -EIO;
882 if ((addr & (sizeof(data) - 1)) || addr < 0 ||
883 addr >= sizeof(struct user))
884 break;
885
886 if (addr < sizeof(struct user_regs_struct))
887 ret = putreg(child, addr, data);
888 else if (addr >= offsetof(struct user, u_debugreg[0]) &&
889 addr <= offsetof(struct user, u_debugreg[7])) {
890 addr -= offsetof(struct user, u_debugreg[0]);
891 ret = ptrace_set_debugreg(child,
892 addr / sizeof(data), data);
893 }
894 break;
895
896 case PTRACE_GETREGS: /* Get all gp regs from the child. */
897 return copy_regset_to_user(child,
898 task_user_regset_view(current),
899 REGSET_GENERAL,
900 0, sizeof(struct user_regs_struct),
901 datap);
902
903 case PTRACE_SETREGS: /* Set all gp regs in the child. */
904 return copy_regset_from_user(child,
905 task_user_regset_view(current),
906 REGSET_GENERAL,
907 0, sizeof(struct user_regs_struct),
908 datap);
909
910 case PTRACE_GETFPREGS: /* Get the child FPU state. */
911 return copy_regset_to_user(child,
912 task_user_regset_view(current),
913 REGSET_FP,
914 0, sizeof(struct user_i387_struct),
915 datap);
916
917 case PTRACE_SETFPREGS: /* Set the child FPU state. */
918 return copy_regset_from_user(child,
919 task_user_regset_view(current),
920 REGSET_FP,
921 0, sizeof(struct user_i387_struct),
922 datap);
923
924 #ifdef CONFIG_X86_32
925 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
926 return copy_regset_to_user(child, &user_x86_32_view,
927 REGSET_XFP,
928 0, sizeof(struct user_fxsr_struct),
929 datap) ? -EIO : 0;
930
931 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
932 return copy_regset_from_user(child, &user_x86_32_view,
933 REGSET_XFP,
934 0, sizeof(struct user_fxsr_struct),
935 datap) ? -EIO : 0;
936 #endif
937
938 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
939 case PTRACE_GET_THREAD_AREA:
940 if (addr < 0)
941 return -EIO;
942 ret = do_get_thread_area(child, addr,
943 (struct user_desc __user *) data);
944 break;
945
946 case PTRACE_SET_THREAD_AREA:
947 if (addr < 0)
948 return -EIO;
949 ret = do_set_thread_area(child, addr,
950 (struct user_desc __user *) data, 0);
951 break;
952 #endif
953
954 #ifdef CONFIG_X86_64
955 /* normal 64bit interface to access TLS data.
956 Works just like arch_prctl, except that the arguments
957 are reversed. */
958 case PTRACE_ARCH_PRCTL:
959 ret = do_arch_prctl(child, data, addr);
960 break;
961 #endif
962
963 /*
964 * These bits need more cooking - not enabled yet:
965 */
966 #ifdef CONFIG_X86_PTRACE_BTS
967 case PTRACE_BTS_CONFIG:
968 ret = ptrace_bts_config
969 (child, data, (struct ptrace_bts_config __user *)addr);
970 break;
971
972 case PTRACE_BTS_STATUS:
973 ret = ptrace_bts_status
974 (child, data, (struct ptrace_bts_config __user *)addr);
975 break;
976
977 case PTRACE_BTS_SIZE:
978 ret = ptrace_bts_size(child);
979 break;
980
981 case PTRACE_BTS_GET:
982 ret = ptrace_bts_read_record
983 (child, data, (struct bts_struct __user *) addr);
984 break;
985
986 case PTRACE_BTS_CLEAR:
987 ret = ptrace_bts_clear(child);
988 break;
989
990 case PTRACE_BTS_DRAIN:
991 ret = ptrace_bts_drain
992 (child, data, (struct bts_struct __user *) addr);
993 break;
994 #endif /* CONFIG_X86_PTRACE_BTS */
995
996 default:
997 ret = ptrace_request(child, request, addr, data);
998 break;
999 }
1000
1001 return ret;
1002 }
1003
1004 #ifdef CONFIG_IA32_EMULATION
1005
1006 #include <linux/compat.h>
1007 #include <linux/syscalls.h>
1008 #include <asm/ia32.h>
1009 #include <asm/user32.h>
1010
1011 #define R32(l,q) \
1012 case offsetof(struct user32, regs.l): \
1013 regs->q = value; break
1014
1015 #define SEG32(rs) \
1016 case offsetof(struct user32, regs.rs): \
1017 return set_segment_reg(child, \
1018 offsetof(struct user_regs_struct, rs), \
1019 value); \
1020 break
1021
1022 static int putreg32(struct task_struct *child, unsigned regno, u32 value)
1023 {
1024 struct pt_regs *regs = task_pt_regs(child);
1025
1026 switch (regno) {
1027
1028 SEG32(cs);
1029 SEG32(ds);
1030 SEG32(es);
1031 SEG32(fs);
1032 SEG32(gs);
1033 SEG32(ss);
1034
1035 R32(ebx, bx);
1036 R32(ecx, cx);
1037 R32(edx, dx);
1038 R32(edi, di);
1039 R32(esi, si);
1040 R32(ebp, bp);
1041 R32(eax, ax);
1042 R32(eip, ip);
1043 R32(esp, sp);
1044
1045 case offsetof(struct user32, regs.orig_eax):
1046 /*
1047 * Sign-extend the value so that orig_eax = -1
1048 * causes (long)orig_ax < 0 tests to fire correctly.
1049 */
1050 regs->orig_ax = (long) (s32) value;
1051 break;
1052
1053 case offsetof(struct user32, regs.eflags):
1054 return set_flags(child, value);
1055
1056 case offsetof(struct user32, u_debugreg[0]) ...
1057 offsetof(struct user32, u_debugreg[7]):
1058 regno -= offsetof(struct user32, u_debugreg[0]);
1059 return ptrace_set_debugreg(child, regno / 4, value);
1060
1061 default:
1062 if (regno > sizeof(struct user32) || (regno & 3))
1063 return -EIO;
1064
1065 /*
1066 * Other dummy fields in the virtual user structure
1067 * are ignored
1068 */
1069 break;
1070 }
1071 return 0;
1072 }
1073
1074 #undef R32
1075 #undef SEG32
1076
1077 #define R32(l,q) \
1078 case offsetof(struct user32, regs.l): \
1079 *val = regs->q; break
1080
1081 #define SEG32(rs) \
1082 case offsetof(struct user32, regs.rs): \
1083 *val = get_segment_reg(child, \
1084 offsetof(struct user_regs_struct, rs)); \
1085 break
1086
1087 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
1088 {
1089 struct pt_regs *regs = task_pt_regs(child);
1090
1091 switch (regno) {
1092
1093 SEG32(ds);
1094 SEG32(es);
1095 SEG32(fs);
1096 SEG32(gs);
1097
1098 R32(cs, cs);
1099 R32(ss, ss);
1100 R32(ebx, bx);
1101 R32(ecx, cx);
1102 R32(edx, dx);
1103 R32(edi, di);
1104 R32(esi, si);
1105 R32(ebp, bp);
1106 R32(eax, ax);
1107 R32(orig_eax, orig_ax);
1108 R32(eip, ip);
1109 R32(esp, sp);
1110
1111 case offsetof(struct user32, regs.eflags):
1112 *val = get_flags(child);
1113 break;
1114
1115 case offsetof(struct user32, u_debugreg[0]) ...
1116 offsetof(struct user32, u_debugreg[7]):
1117 regno -= offsetof(struct user32, u_debugreg[0]);
1118 *val = ptrace_get_debugreg(child, regno / 4);
1119 break;
1120
1121 default:
1122 if (regno > sizeof(struct user32) || (regno & 3))
1123 return -EIO;
1124
1125 /*
1126 * Other dummy fields in the virtual user structure
1127 * are ignored
1128 */
1129 *val = 0;
1130 break;
1131 }
1132 return 0;
1133 }
1134
1135 #undef R32
1136 #undef SEG32
1137
1138 static int genregs32_get(struct task_struct *target,
1139 const struct user_regset *regset,
1140 unsigned int pos, unsigned int count,
1141 void *kbuf, void __user *ubuf)
1142 {
1143 if (kbuf) {
1144 compat_ulong_t *k = kbuf;
1145 while (count > 0) {
1146 getreg32(target, pos, k++);
1147 count -= sizeof(*k);
1148 pos += sizeof(*k);
1149 }
1150 } else {
1151 compat_ulong_t __user *u = ubuf;
1152 while (count > 0) {
1153 compat_ulong_t word;
1154 getreg32(target, pos, &word);
1155 if (__put_user(word, u++))
1156 return -EFAULT;
1157 count -= sizeof(*u);
1158 pos += sizeof(*u);
1159 }
1160 }
1161
1162 return 0;
1163 }
1164
1165 static int genregs32_set(struct task_struct *target,
1166 const struct user_regset *regset,
1167 unsigned int pos, unsigned int count,
1168 const void *kbuf, const void __user *ubuf)
1169 {
1170 int ret = 0;
1171 if (kbuf) {
1172 const compat_ulong_t *k = kbuf;
1173 while (count > 0 && !ret) {
1174 ret = putreg32(target, pos, *k++);
1175 count -= sizeof(*k);
1176 pos += sizeof(*k);
1177 }
1178 } else {
1179 const compat_ulong_t __user *u = ubuf;
1180 while (count > 0 && !ret) {
1181 compat_ulong_t word;
1182 ret = __get_user(word, u++);
1183 if (ret)
1184 break;
1185 ret = putreg32(target, pos, word);
1186 count -= sizeof(*u);
1187 pos += sizeof(*u);
1188 }
1189 }
1190 return ret;
1191 }
1192
1193 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1194 compat_ulong_t caddr, compat_ulong_t cdata)
1195 {
1196 unsigned long addr = caddr;
1197 unsigned long data = cdata;
1198 void __user *datap = compat_ptr(data);
1199 int ret;
1200 __u32 val;
1201
1202 switch (request) {
1203 case PTRACE_PEEKUSR:
1204 ret = getreg32(child, addr, &val);
1205 if (ret == 0)
1206 ret = put_user(val, (__u32 __user *)datap);
1207 break;
1208
1209 case PTRACE_POKEUSR:
1210 ret = putreg32(child, addr, data);
1211 break;
1212
1213 case PTRACE_GETREGS: /* Get all gp regs from the child. */
1214 return copy_regset_to_user(child, &user_x86_32_view,
1215 REGSET_GENERAL,
1216 0, sizeof(struct user_regs_struct32),
1217 datap);
1218
1219 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1220 return copy_regset_from_user(child, &user_x86_32_view,
1221 REGSET_GENERAL, 0,
1222 sizeof(struct user_regs_struct32),
1223 datap);
1224
1225 case PTRACE_GETFPREGS: /* Get the child FPU state. */
1226 return copy_regset_to_user(child, &user_x86_32_view,
1227 REGSET_FP, 0,
1228 sizeof(struct user_i387_ia32_struct),
1229 datap);
1230
1231 case PTRACE_SETFPREGS: /* Set the child FPU state. */
1232 return copy_regset_from_user(
1233 child, &user_x86_32_view, REGSET_FP,
1234 0, sizeof(struct user_i387_ia32_struct), datap);
1235
1236 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
1237 return copy_regset_to_user(child, &user_x86_32_view,
1238 REGSET_XFP, 0,
1239 sizeof(struct user32_fxsr_struct),
1240 datap);
1241
1242 case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
1243 return copy_regset_from_user(child, &user_x86_32_view,
1244 REGSET_XFP, 0,
1245 sizeof(struct user32_fxsr_struct),
1246 datap);
1247
1248 case PTRACE_GET_THREAD_AREA:
1249 case PTRACE_SET_THREAD_AREA:
1250 #ifdef CONFIG_X86_PTRACE_BTS
1251 case PTRACE_BTS_CONFIG:
1252 case PTRACE_BTS_STATUS:
1253 case PTRACE_BTS_SIZE:
1254 case PTRACE_BTS_GET:
1255 case PTRACE_BTS_CLEAR:
1256 case PTRACE_BTS_DRAIN:
1257 #endif /* CONFIG_X86_PTRACE_BTS */
1258 return arch_ptrace(child, request, addr, data);
1259
1260 default:
1261 return compat_ptrace_request(child, request, addr, data);
1262 }
1263
1264 return ret;
1265 }
1266
1267 #endif /* CONFIG_IA32_EMULATION */
1268
1269 #ifdef CONFIG_X86_64
1270
1271 static const struct user_regset x86_64_regsets[] = {
1272 [REGSET_GENERAL] = {
1273 .core_note_type = NT_PRSTATUS,
1274 .n = sizeof(struct user_regs_struct) / sizeof(long),
1275 .size = sizeof(long), .align = sizeof(long),
1276 .get = genregs_get, .set = genregs_set
1277 },
1278 [REGSET_FP] = {
1279 .core_note_type = NT_PRFPREG,
1280 .n = sizeof(struct user_i387_struct) / sizeof(long),
1281 .size = sizeof(long), .align = sizeof(long),
1282 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1283 },
1284 [REGSET_IOPERM64] = {
1285 .core_note_type = NT_386_IOPERM,
1286 .n = IO_BITMAP_LONGS,
1287 .size = sizeof(long), .align = sizeof(long),
1288 .active = ioperm_active, .get = ioperm_get
1289 },
1290 };
1291
1292 static const struct user_regset_view user_x86_64_view = {
1293 .name = "x86_64", .e_machine = EM_X86_64,
1294 .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
1295 };
1296
1297 #else /* CONFIG_X86_32 */
1298
1299 #define user_regs_struct32 user_regs_struct
1300 #define genregs32_get genregs_get
1301 #define genregs32_set genregs_set
1302
1303 #define user_i387_ia32_struct user_i387_struct
1304 #define user32_fxsr_struct user_fxsr_struct
1305
1306 #endif /* CONFIG_X86_64 */
1307
1308 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1309 static const struct user_regset x86_32_regsets[] = {
1310 [REGSET_GENERAL] = {
1311 .core_note_type = NT_PRSTATUS,
1312 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
1313 .size = sizeof(u32), .align = sizeof(u32),
1314 .get = genregs32_get, .set = genregs32_set
1315 },
1316 [REGSET_FP] = {
1317 .core_note_type = NT_PRFPREG,
1318 .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
1319 .size = sizeof(u32), .align = sizeof(u32),
1320 .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
1321 },
1322 [REGSET_XFP] = {
1323 .core_note_type = NT_PRXFPREG,
1324 .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
1325 .size = sizeof(u32), .align = sizeof(u32),
1326 .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
1327 },
1328 [REGSET_TLS] = {
1329 .core_note_type = NT_386_TLS,
1330 .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
1331 .size = sizeof(struct user_desc),
1332 .align = sizeof(struct user_desc),
1333 .active = regset_tls_active,
1334 .get = regset_tls_get, .set = regset_tls_set
1335 },
1336 [REGSET_IOPERM32] = {
1337 .core_note_type = NT_386_IOPERM,
1338 .n = IO_BITMAP_BYTES / sizeof(u32),
1339 .size = sizeof(u32), .align = sizeof(u32),
1340 .active = ioperm_active, .get = ioperm_get
1341 },
1342 };
1343
1344 static const struct user_regset_view user_x86_32_view = {
1345 .name = "i386", .e_machine = EM_386,
1346 .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
1347 };
1348 #endif
1349
1350 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1351 {
1352 #ifdef CONFIG_IA32_EMULATION
1353 if (test_tsk_thread_flag(task, TIF_IA32))
1354 #endif
1355 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
1356 return &user_x86_32_view;
1357 #endif
1358 #ifdef CONFIG_X86_64
1359 return &user_x86_64_view;
1360 #endif
1361 }
1362
1363 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1364 int error_code, int si_code)
1365 {
1366 struct siginfo info;
1367
1368 tsk->thread.trap_no = 1;
1369 tsk->thread.error_code = error_code;
1370
1371 memset(&info, 0, sizeof(info));
1372 info.si_signo = SIGTRAP;
1373 info.si_code = si_code;
1374
1375 /* User-mode ip? */
1376 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
1377
1378 /* Send us the fake SIGTRAP */
1379 force_sig_info(SIGTRAP, &info, tsk);
1380 }
1381
1382
1383 #ifdef CONFIG_X86_32
1384 # define IS_IA32 1
1385 #elif defined CONFIG_IA32_EMULATION
1386 # define IS_IA32 test_thread_flag(TIF_IA32)
1387 #else
1388 # define IS_IA32 0
1389 #endif
1390
1391 /*
1392 * We must return the syscall number to actually look up in the table.
1393 * This can be -1L to skip running any syscall at all.
1394 */
1395 asmregparm long syscall_trace_enter(struct pt_regs *regs)
1396 {
1397 long ret = 0;
1398
1399 /*
1400 * If we stepped into a sysenter/syscall insn, it trapped in
1401 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
1402 * If user-mode had set TF itself, then it's still clear from
1403 * do_debug() and we need to set it again to restore the user
1404 * state. If we entered on the slow path, TF was already set.
1405 */
1406 if (test_thread_flag(TIF_SINGLESTEP))
1407 regs->flags |= X86_EFLAGS_TF;
1408
1409 /* do the secure computing check first */
1410 secure_computing(regs->orig_ax);
1411
1412 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1413 ret = -1L;
1414
1415 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
1416 tracehook_report_syscall_entry(regs))
1417 ret = -1L;
1418
1419 if (unlikely(current->audit_context)) {
1420 if (IS_IA32)
1421 audit_syscall_entry(AUDIT_ARCH_I386,
1422 regs->orig_ax,
1423 regs->bx, regs->cx,
1424 regs->dx, regs->si);
1425 #ifdef CONFIG_X86_64
1426 else
1427 audit_syscall_entry(AUDIT_ARCH_X86_64,
1428 regs->orig_ax,
1429 regs->di, regs->si,
1430 regs->dx, regs->r10);
1431 #endif
1432 }
1433
1434 return ret ?: regs->orig_ax;
1435 }
1436
1437 asmregparm void syscall_trace_leave(struct pt_regs *regs)
1438 {
1439 if (unlikely(current->audit_context))
1440 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
1441
1442 if (test_thread_flag(TIF_SYSCALL_TRACE))
1443 tracehook_report_syscall_exit(regs, 0);
1444
1445 /*
1446 * If TIF_SYSCALL_EMU is set, we only get here because of
1447 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
1448 * We already reported this syscall instruction in
1449 * syscall_trace_enter(), so don't do any more now.
1450 */
1451 if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
1452 return;
1453
1454 /*
1455 * If we are single-stepping, synthesize a trap to follow the
1456 * system call instruction.
1457 */
1458 if (test_thread_flag(TIF_SINGLESTEP) &&
1459 tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
1460 send_sigtrap(current, regs, 0, TRAP_BRKPT);
1461 }
This page took 0.081706 seconds and 5 git commands to generate.