Merge git://git.infradead.org/mtd-2.6
[deliverable/linux.git] / arch / powerpc / kernel / ptrace.c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
12 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/elf.h>
26 #include <linux/user.h>
27 #include <linux/security.h>
28 #include <linux/signal.h>
29 #include <linux/seccomp.h>
30 #include <linux/audit.h>
31 #ifdef CONFIG_PPC32
32 #include <linux/module.h>
33 #endif
34
35 #include <asm/uaccess.h>
36 #include <asm/page.h>
37 #include <asm/pgtable.h>
38 #include <asm/system.h>
39
40 /*
41 * does not yet catch signals sent when the child dies.
42 * in exit.c or in signal.c.
43 */
44
45 /*
46 * Set of msr bits that gdb can change on behalf of a process.
47 */
48 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
49 #define MSR_DEBUGCHANGE 0
50 #else
51 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
52 #endif
53
54 /*
55 * Max register writeable via put_reg
56 */
57 #ifdef CONFIG_PPC32
58 #define PT_MAX_PUT_REG PT_MQ
59 #else
60 #define PT_MAX_PUT_REG PT_CCR
61 #endif
62
63 static unsigned long get_user_msr(struct task_struct *task)
64 {
65 return task->thread.regs->msr | task->thread.fpexc_mode;
66 }
67
68 static int set_user_msr(struct task_struct *task, unsigned long msr)
69 {
70 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
71 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
72 return 0;
73 }
74
75 /*
76 * We prevent mucking around with the reserved area of trap
77 * which are used internally by the kernel.
78 */
79 static int set_user_trap(struct task_struct *task, unsigned long trap)
80 {
81 task->thread.regs->trap = trap & 0xfff0;
82 return 0;
83 }
84
85 /*
86 * Get contents of register REGNO in task TASK.
87 */
88 unsigned long ptrace_get_reg(struct task_struct *task, int regno)
89 {
90 if (task->thread.regs == NULL)
91 return -EIO;
92
93 if (regno == PT_MSR)
94 return get_user_msr(task);
95
96 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
97 return ((unsigned long *)task->thread.regs)[regno];
98
99 return -EIO;
100 }
101
102 /*
103 * Write contents of register REGNO in task TASK.
104 */
105 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
106 {
107 if (task->thread.regs == NULL)
108 return -EIO;
109
110 if (regno == PT_MSR)
111 return set_user_msr(task, data);
112 if (regno == PT_TRAP)
113 return set_user_trap(task, data);
114
115 if (regno <= PT_MAX_PUT_REG) {
116 ((unsigned long *)task->thread.regs)[regno] = data;
117 return 0;
118 }
119 return -EIO;
120 }
121
122 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
123 unsigned int pos, unsigned int count,
124 void *kbuf, void __user *ubuf)
125 {
126 int ret;
127
128 if (target->thread.regs == NULL)
129 return -EIO;
130
131 CHECK_FULL_REGS(target->thread.regs);
132
133 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
134 target->thread.regs,
135 0, offsetof(struct pt_regs, msr));
136 if (!ret) {
137 unsigned long msr = get_user_msr(target);
138 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
139 offsetof(struct pt_regs, msr),
140 offsetof(struct pt_regs, msr) +
141 sizeof(msr));
142 }
143
144 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
145 offsetof(struct pt_regs, msr) + sizeof(long));
146
147 if (!ret)
148 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
149 &target->thread.regs->orig_gpr3,
150 offsetof(struct pt_regs, orig_gpr3),
151 sizeof(struct pt_regs));
152 if (!ret)
153 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
154 sizeof(struct pt_regs), -1);
155
156 return ret;
157 }
158
159 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
160 unsigned int pos, unsigned int count,
161 const void *kbuf, const void __user *ubuf)
162 {
163 unsigned long reg;
164 int ret;
165
166 if (target->thread.regs == NULL)
167 return -EIO;
168
169 CHECK_FULL_REGS(target->thread.regs);
170
171 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
172 target->thread.regs,
173 0, PT_MSR * sizeof(reg));
174
175 if (!ret && count > 0) {
176 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
177 PT_MSR * sizeof(reg),
178 (PT_MSR + 1) * sizeof(reg));
179 if (!ret)
180 ret = set_user_msr(target, reg);
181 }
182
183 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
184 offsetof(struct pt_regs, msr) + sizeof(long));
185
186 if (!ret)
187 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
188 &target->thread.regs->orig_gpr3,
189 PT_ORIG_R3 * sizeof(reg),
190 (PT_MAX_PUT_REG + 1) * sizeof(reg));
191
192 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
193 ret = user_regset_copyin_ignore(
194 &pos, &count, &kbuf, &ubuf,
195 (PT_MAX_PUT_REG + 1) * sizeof(reg),
196 PT_TRAP * sizeof(reg));
197
198 if (!ret && count > 0) {
199 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
200 PT_TRAP * sizeof(reg),
201 (PT_TRAP + 1) * sizeof(reg));
202 if (!ret)
203 ret = set_user_trap(target, reg);
204 }
205
206 if (!ret)
207 ret = user_regset_copyin_ignore(
208 &pos, &count, &kbuf, &ubuf,
209 (PT_TRAP + 1) * sizeof(reg), -1);
210
211 return ret;
212 }
213
214 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
215 unsigned int pos, unsigned int count,
216 void *kbuf, void __user *ubuf)
217 {
218 #ifdef CONFIG_VSX
219 double buf[33];
220 int i;
221 #endif
222 flush_fp_to_thread(target);
223
224 #ifdef CONFIG_VSX
225 /* copy to local buffer then write that out */
226 for (i = 0; i < 32 ; i++)
227 buf[i] = target->thread.TS_FPR(i);
228 memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
229 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
230
231 #else
232 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
233 offsetof(struct thread_struct, TS_FPR(32)));
234
235 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
236 &target->thread.fpr, 0, -1);
237 #endif
238 }
239
240 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
241 unsigned int pos, unsigned int count,
242 const void *kbuf, const void __user *ubuf)
243 {
244 #ifdef CONFIG_VSX
245 double buf[33];
246 int i;
247 #endif
248 flush_fp_to_thread(target);
249
250 #ifdef CONFIG_VSX
251 /* copy to local buffer then write that out */
252 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
253 if (i)
254 return i;
255 for (i = 0; i < 32 ; i++)
256 target->thread.TS_FPR(i) = buf[i];
257 memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
258 return 0;
259 #else
260 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
261 offsetof(struct thread_struct, TS_FPR(32)));
262
263 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
264 &target->thread.fpr, 0, -1);
265 #endif
266 }
267
268 #ifdef CONFIG_ALTIVEC
269 /*
270 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
271 * The transfer totals 34 quadword. Quadwords 0-31 contain the
272 * corresponding vector registers. Quadword 32 contains the vscr as the
273 * last word (offset 12) within that quadword. Quadword 33 contains the
274 * vrsave as the first word (offset 0) within the quadword.
275 *
276 * This definition of the VMX state is compatible with the current PPC32
277 * ptrace interface. This allows signal handling and ptrace to use the
278 * same structures. This also simplifies the implementation of a bi-arch
279 * (combined (32- and 64-bit) gdb.
280 */
281
282 static int vr_active(struct task_struct *target,
283 const struct user_regset *regset)
284 {
285 flush_altivec_to_thread(target);
286 return target->thread.used_vr ? regset->n : 0;
287 }
288
289 static int vr_get(struct task_struct *target, const struct user_regset *regset,
290 unsigned int pos, unsigned int count,
291 void *kbuf, void __user *ubuf)
292 {
293 int ret;
294
295 flush_altivec_to_thread(target);
296
297 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
298 offsetof(struct thread_struct, vr[32]));
299
300 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
301 &target->thread.vr, 0,
302 33 * sizeof(vector128));
303 if (!ret) {
304 /*
305 * Copy out only the low-order word of vrsave.
306 */
307 union {
308 elf_vrreg_t reg;
309 u32 word;
310 } vrsave;
311 memset(&vrsave, 0, sizeof(vrsave));
312 vrsave.word = target->thread.vrsave;
313 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
314 33 * sizeof(vector128), -1);
315 }
316
317 return ret;
318 }
319
320 static int vr_set(struct task_struct *target, const struct user_regset *regset,
321 unsigned int pos, unsigned int count,
322 const void *kbuf, const void __user *ubuf)
323 {
324 int ret;
325
326 flush_altivec_to_thread(target);
327
328 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
329 offsetof(struct thread_struct, vr[32]));
330
331 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
332 &target->thread.vr, 0, 33 * sizeof(vector128));
333 if (!ret && count > 0) {
334 /*
335 * We use only the first word of vrsave.
336 */
337 union {
338 elf_vrreg_t reg;
339 u32 word;
340 } vrsave;
341 memset(&vrsave, 0, sizeof(vrsave));
342 vrsave.word = target->thread.vrsave;
343 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
344 33 * sizeof(vector128), -1);
345 if (!ret)
346 target->thread.vrsave = vrsave.word;
347 }
348
349 return ret;
350 }
351 #endif /* CONFIG_ALTIVEC */
352
353 #ifdef CONFIG_VSX
354 /*
355 * Currently to set and and get all the vsx state, you need to call
356 * the fp and VMX calls aswell. This only get/sets the lower 32
357 * 128bit VSX registers.
358 */
359
360 static int vsr_active(struct task_struct *target,
361 const struct user_regset *regset)
362 {
363 flush_vsx_to_thread(target);
364 return target->thread.used_vsr ? regset->n : 0;
365 }
366
367 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
368 unsigned int pos, unsigned int count,
369 void *kbuf, void __user *ubuf)
370 {
371 double buf[32];
372 int ret, i;
373
374 flush_vsx_to_thread(target);
375
376 for (i = 0; i < 32 ; i++)
377 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
378 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
379 buf, 0, 32 * sizeof(double));
380
381 return ret;
382 }
383
384 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
385 unsigned int pos, unsigned int count,
386 const void *kbuf, const void __user *ubuf)
387 {
388 double buf[32];
389 int ret,i;
390
391 flush_vsx_to_thread(target);
392
393 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
394 buf, 0, 32 * sizeof(double));
395 for (i = 0; i < 32 ; i++)
396 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
397
398
399 return ret;
400 }
401 #endif /* CONFIG_VSX */
402
403 #ifdef CONFIG_SPE
404
405 /*
406 * For get_evrregs/set_evrregs functions 'data' has the following layout:
407 *
408 * struct {
409 * u32 evr[32];
410 * u64 acc;
411 * u32 spefscr;
412 * }
413 */
414
415 static int evr_active(struct task_struct *target,
416 const struct user_regset *regset)
417 {
418 flush_spe_to_thread(target);
419 return target->thread.used_spe ? regset->n : 0;
420 }
421
422 static int evr_get(struct task_struct *target, const struct user_regset *regset,
423 unsigned int pos, unsigned int count,
424 void *kbuf, void __user *ubuf)
425 {
426 int ret;
427
428 flush_spe_to_thread(target);
429
430 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
431 &target->thread.evr,
432 0, sizeof(target->thread.evr));
433
434 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
435 offsetof(struct thread_struct, spefscr));
436
437 if (!ret)
438 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
439 &target->thread.acc,
440 sizeof(target->thread.evr), -1);
441
442 return ret;
443 }
444
445 static int evr_set(struct task_struct *target, const struct user_regset *regset,
446 unsigned int pos, unsigned int count,
447 const void *kbuf, const void __user *ubuf)
448 {
449 int ret;
450
451 flush_spe_to_thread(target);
452
453 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
454 &target->thread.evr,
455 0, sizeof(target->thread.evr));
456
457 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
458 offsetof(struct thread_struct, spefscr));
459
460 if (!ret)
461 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
462 &target->thread.acc,
463 sizeof(target->thread.evr), -1);
464
465 return ret;
466 }
467 #endif /* CONFIG_SPE */
468
469
470 /*
471 * These are our native regset flavors.
472 */
473 enum powerpc_regset {
474 REGSET_GPR,
475 REGSET_FPR,
476 #ifdef CONFIG_ALTIVEC
477 REGSET_VMX,
478 #endif
479 #ifdef CONFIG_VSX
480 REGSET_VSX,
481 #endif
482 #ifdef CONFIG_SPE
483 REGSET_SPE,
484 #endif
485 };
486
487 static const struct user_regset native_regsets[] = {
488 [REGSET_GPR] = {
489 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
490 .size = sizeof(long), .align = sizeof(long),
491 .get = gpr_get, .set = gpr_set
492 },
493 [REGSET_FPR] = {
494 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
495 .size = sizeof(double), .align = sizeof(double),
496 .get = fpr_get, .set = fpr_set
497 },
498 #ifdef CONFIG_ALTIVEC
499 [REGSET_VMX] = {
500 .core_note_type = NT_PPC_VMX, .n = 34,
501 .size = sizeof(vector128), .align = sizeof(vector128),
502 .active = vr_active, .get = vr_get, .set = vr_set
503 },
504 #endif
505 #ifdef CONFIG_VSX
506 [REGSET_VSX] = {
507 .core_note_type = NT_PPC_VSX, .n = 32,
508 .size = sizeof(double), .align = sizeof(double),
509 .active = vsr_active, .get = vsr_get, .set = vsr_set
510 },
511 #endif
512 #ifdef CONFIG_SPE
513 [REGSET_SPE] = {
514 .n = 35,
515 .size = sizeof(u32), .align = sizeof(u32),
516 .active = evr_active, .get = evr_get, .set = evr_set
517 },
518 #endif
519 };
520
521 static const struct user_regset_view user_ppc_native_view = {
522 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
523 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
524 };
525
526 #ifdef CONFIG_PPC64
527 #include <linux/compat.h>
528
529 static int gpr32_get(struct task_struct *target,
530 const struct user_regset *regset,
531 unsigned int pos, unsigned int count,
532 void *kbuf, void __user *ubuf)
533 {
534 const unsigned long *regs = &target->thread.regs->gpr[0];
535 compat_ulong_t *k = kbuf;
536 compat_ulong_t __user *u = ubuf;
537 compat_ulong_t reg;
538
539 if (target->thread.regs == NULL)
540 return -EIO;
541
542 CHECK_FULL_REGS(target->thread.regs);
543
544 pos /= sizeof(reg);
545 count /= sizeof(reg);
546
547 if (kbuf)
548 for (; count > 0 && pos < PT_MSR; --count)
549 *k++ = regs[pos++];
550 else
551 for (; count > 0 && pos < PT_MSR; --count)
552 if (__put_user((compat_ulong_t) regs[pos++], u++))
553 return -EFAULT;
554
555 if (count > 0 && pos == PT_MSR) {
556 reg = get_user_msr(target);
557 if (kbuf)
558 *k++ = reg;
559 else if (__put_user(reg, u++))
560 return -EFAULT;
561 ++pos;
562 --count;
563 }
564
565 if (kbuf)
566 for (; count > 0 && pos < PT_REGS_COUNT; --count)
567 *k++ = regs[pos++];
568 else
569 for (; count > 0 && pos < PT_REGS_COUNT; --count)
570 if (__put_user((compat_ulong_t) regs[pos++], u++))
571 return -EFAULT;
572
573 kbuf = k;
574 ubuf = u;
575 pos *= sizeof(reg);
576 count *= sizeof(reg);
577 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
578 PT_REGS_COUNT * sizeof(reg), -1);
579 }
580
581 static int gpr32_set(struct task_struct *target,
582 const struct user_regset *regset,
583 unsigned int pos, unsigned int count,
584 const void *kbuf, const void __user *ubuf)
585 {
586 unsigned long *regs = &target->thread.regs->gpr[0];
587 const compat_ulong_t *k = kbuf;
588 const compat_ulong_t __user *u = ubuf;
589 compat_ulong_t reg;
590
591 if (target->thread.regs == NULL)
592 return -EIO;
593
594 CHECK_FULL_REGS(target->thread.regs);
595
596 pos /= sizeof(reg);
597 count /= sizeof(reg);
598
599 if (kbuf)
600 for (; count > 0 && pos < PT_MSR; --count)
601 regs[pos++] = *k++;
602 else
603 for (; count > 0 && pos < PT_MSR; --count) {
604 if (__get_user(reg, u++))
605 return -EFAULT;
606 regs[pos++] = reg;
607 }
608
609
610 if (count > 0 && pos == PT_MSR) {
611 if (kbuf)
612 reg = *k++;
613 else if (__get_user(reg, u++))
614 return -EFAULT;
615 set_user_msr(target, reg);
616 ++pos;
617 --count;
618 }
619
620 if (kbuf) {
621 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
622 regs[pos++] = *k++;
623 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
624 ++k;
625 } else {
626 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
627 if (__get_user(reg, u++))
628 return -EFAULT;
629 regs[pos++] = reg;
630 }
631 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
632 if (__get_user(reg, u++))
633 return -EFAULT;
634 }
635
636 if (count > 0 && pos == PT_TRAP) {
637 if (kbuf)
638 reg = *k++;
639 else if (__get_user(reg, u++))
640 return -EFAULT;
641 set_user_trap(target, reg);
642 ++pos;
643 --count;
644 }
645
646 kbuf = k;
647 ubuf = u;
648 pos *= sizeof(reg);
649 count *= sizeof(reg);
650 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
651 (PT_TRAP + 1) * sizeof(reg), -1);
652 }
653
654 /*
655 * These are the regset flavors matching the CONFIG_PPC32 native set.
656 */
657 static const struct user_regset compat_regsets[] = {
658 [REGSET_GPR] = {
659 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
660 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
661 .get = gpr32_get, .set = gpr32_set
662 },
663 [REGSET_FPR] = {
664 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
665 .size = sizeof(double), .align = sizeof(double),
666 .get = fpr_get, .set = fpr_set
667 },
668 #ifdef CONFIG_ALTIVEC
669 [REGSET_VMX] = {
670 .core_note_type = NT_PPC_VMX, .n = 34,
671 .size = sizeof(vector128), .align = sizeof(vector128),
672 .active = vr_active, .get = vr_get, .set = vr_set
673 },
674 #endif
675 #ifdef CONFIG_SPE
676 [REGSET_SPE] = {
677 .core_note_type = NT_PPC_SPE, .n = 35,
678 .size = sizeof(u32), .align = sizeof(u32),
679 .active = evr_active, .get = evr_get, .set = evr_set
680 },
681 #endif
682 };
683
684 static const struct user_regset_view user_ppc_compat_view = {
685 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
686 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
687 };
688 #endif /* CONFIG_PPC64 */
689
690 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
691 {
692 #ifdef CONFIG_PPC64
693 if (test_tsk_thread_flag(task, TIF_32BIT))
694 return &user_ppc_compat_view;
695 #endif
696 return &user_ppc_native_view;
697 }
698
699
700 void user_enable_single_step(struct task_struct *task)
701 {
702 struct pt_regs *regs = task->thread.regs;
703
704 if (regs != NULL) {
705 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
706 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
707 regs->msr |= MSR_DE;
708 #else
709 regs->msr |= MSR_SE;
710 #endif
711 }
712 set_tsk_thread_flag(task, TIF_SINGLESTEP);
713 }
714
715 void user_disable_single_step(struct task_struct *task)
716 {
717 struct pt_regs *regs = task->thread.regs;
718
719
720 #if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
721 /* If DAC then do not single step, skip */
722 if (task->thread.dabr)
723 return;
724 #endif
725
726 if (regs != NULL) {
727 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
728 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
729 regs->msr &= ~MSR_DE;
730 #else
731 regs->msr &= ~MSR_SE;
732 #endif
733 }
734 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
735 }
736
737 int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
738 unsigned long data)
739 {
740 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
741 * For embedded processors we support one DAC and no IAC's at the
742 * moment.
743 */
744 if (addr > 0)
745 return -EINVAL;
746
747 if ((data & ~0x7UL) >= TASK_SIZE)
748 return -EIO;
749
750 #ifdef CONFIG_PPC64
751
752 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
753 * It was assumed, on previous implementations, that 3 bits were
754 * passed together with the data address, fitting the design of the
755 * DABR register, as follows:
756 *
757 * bit 0: Read flag
758 * bit 1: Write flag
759 * bit 2: Breakpoint translation
760 *
761 * Thus, we use them here as so.
762 */
763
764 /* Ensure breakpoint translation bit is set */
765 if (data && !(data & DABR_TRANSLATION))
766 return -EIO;
767
768 /* Move contents to the DABR register */
769 task->thread.dabr = data;
770
771 #endif
772 #if defined(CONFIG_44x) || defined(CONFIG_BOOKE)
773
774 /* As described above, it was assumed 3 bits were passed with the data
775 * address, but we will assume only the mode bits will be passed
776 * as to not cause alignment restrictions for DAC-based processors.
777 */
778
779 /* DAC's hold the whole address without any mode flags */
780 task->thread.dabr = data & ~0x3UL;
781
782 if (task->thread.dabr == 0) {
783 task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
784 task->thread.regs->msr &= ~MSR_DE;
785 return 0;
786 }
787
788 /* Read or Write bits must be set */
789
790 if (!(data & 0x3UL))
791 return -EINVAL;
792
793 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
794 register */
795 task->thread.dbcr0 = DBCR0_IDM;
796
797 /* Check for write and read flags and set DBCR0
798 accordingly */
799 if (data & 0x1UL)
800 task->thread.dbcr0 |= DBSR_DAC1R;
801 if (data & 0x2UL)
802 task->thread.dbcr0 |= DBSR_DAC1W;
803
804 task->thread.regs->msr |= MSR_DE;
805 #endif
806 return 0;
807 }
808
809 /*
810 * Called by kernel/ptrace.c when detaching..
811 *
812 * Make sure single step bits etc are not set.
813 */
814 void ptrace_disable(struct task_struct *child)
815 {
816 /* make sure the single step bit is not set. */
817 user_disable_single_step(child);
818 }
819
820 /*
821 * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
822 * we mark them as obsolete now, they will be removed in a future version
823 */
824 static long arch_ptrace_old(struct task_struct *child, long request, long addr,
825 long data)
826 {
827 switch (request) {
828 case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
829 return copy_regset_to_user(child, &user_ppc_native_view,
830 REGSET_GPR, 0, 32 * sizeof(long),
831 (void __user *) data);
832
833 case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
834 return copy_regset_from_user(child, &user_ppc_native_view,
835 REGSET_GPR, 0, 32 * sizeof(long),
836 (const void __user *) data);
837
838 case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
839 return copy_regset_to_user(child, &user_ppc_native_view,
840 REGSET_FPR, 0, 32 * sizeof(double),
841 (void __user *) data);
842
843 case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
844 return copy_regset_from_user(child, &user_ppc_native_view,
845 REGSET_FPR, 0, 32 * sizeof(double),
846 (const void __user *) data);
847 }
848
849 return -EPERM;
850 }
851
852 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
853 {
854 int ret = -EPERM;
855
856 switch (request) {
857 /* read the word at location addr in the USER area. */
858 case PTRACE_PEEKUSR: {
859 unsigned long index, tmp;
860
861 ret = -EIO;
862 /* convert to index and check */
863 #ifdef CONFIG_PPC32
864 index = (unsigned long) addr >> 2;
865 if ((addr & 3) || (index > PT_FPSCR)
866 || (child->thread.regs == NULL))
867 #else
868 index = (unsigned long) addr >> 3;
869 if ((addr & 7) || (index > PT_FPSCR))
870 #endif
871 break;
872
873 CHECK_FULL_REGS(child->thread.regs);
874 if (index < PT_FPR0) {
875 tmp = ptrace_get_reg(child, (int) index);
876 } else {
877 flush_fp_to_thread(child);
878 tmp = ((unsigned long *)child->thread.fpr)
879 [TS_FPRWIDTH * (index - PT_FPR0)];
880 }
881 ret = put_user(tmp,(unsigned long __user *) data);
882 break;
883 }
884
885 /* write the word at location addr in the USER area */
886 case PTRACE_POKEUSR: {
887 unsigned long index;
888
889 ret = -EIO;
890 /* convert to index and check */
891 #ifdef CONFIG_PPC32
892 index = (unsigned long) addr >> 2;
893 if ((addr & 3) || (index > PT_FPSCR)
894 || (child->thread.regs == NULL))
895 #else
896 index = (unsigned long) addr >> 3;
897 if ((addr & 7) || (index > PT_FPSCR))
898 #endif
899 break;
900
901 CHECK_FULL_REGS(child->thread.regs);
902 if (index < PT_FPR0) {
903 ret = ptrace_put_reg(child, index, data);
904 } else {
905 flush_fp_to_thread(child);
906 ((unsigned long *)child->thread.fpr)
907 [TS_FPRWIDTH * (index - PT_FPR0)] = data;
908 ret = 0;
909 }
910 break;
911 }
912
913 case PTRACE_GET_DEBUGREG: {
914 ret = -EINVAL;
915 /* We only support one DABR and no IABRS at the moment */
916 if (addr > 0)
917 break;
918 ret = put_user(child->thread.dabr,
919 (unsigned long __user *)data);
920 break;
921 }
922
923 case PTRACE_SET_DEBUGREG:
924 ret = ptrace_set_debugreg(child, addr, data);
925 break;
926
927 #ifdef CONFIG_PPC64
928 case PTRACE_GETREGS64:
929 #endif
930 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
931 return copy_regset_to_user(child, &user_ppc_native_view,
932 REGSET_GPR,
933 0, sizeof(struct pt_regs),
934 (void __user *) data);
935
936 #ifdef CONFIG_PPC64
937 case PTRACE_SETREGS64:
938 #endif
939 case PTRACE_SETREGS: /* Set all gp regs in the child. */
940 return copy_regset_from_user(child, &user_ppc_native_view,
941 REGSET_GPR,
942 0, sizeof(struct pt_regs),
943 (const void __user *) data);
944
945 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
946 return copy_regset_to_user(child, &user_ppc_native_view,
947 REGSET_FPR,
948 0, sizeof(elf_fpregset_t),
949 (void __user *) data);
950
951 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
952 return copy_regset_from_user(child, &user_ppc_native_view,
953 REGSET_FPR,
954 0, sizeof(elf_fpregset_t),
955 (const void __user *) data);
956
957 #ifdef CONFIG_ALTIVEC
958 case PTRACE_GETVRREGS:
959 return copy_regset_to_user(child, &user_ppc_native_view,
960 REGSET_VMX,
961 0, (33 * sizeof(vector128) +
962 sizeof(u32)),
963 (void __user *) data);
964
965 case PTRACE_SETVRREGS:
966 return copy_regset_from_user(child, &user_ppc_native_view,
967 REGSET_VMX,
968 0, (33 * sizeof(vector128) +
969 sizeof(u32)),
970 (const void __user *) data);
971 #endif
972 #ifdef CONFIG_VSX
973 case PTRACE_GETVSRREGS:
974 return copy_regset_to_user(child, &user_ppc_native_view,
975 REGSET_VSX,
976 0, (32 * sizeof(vector128) +
977 sizeof(u32)),
978 (void __user *) data);
979
980 case PTRACE_SETVSRREGS:
981 return copy_regset_from_user(child, &user_ppc_native_view,
982 REGSET_VSX,
983 0, (32 * sizeof(vector128) +
984 sizeof(u32)),
985 (const void __user *) data);
986 #endif
987 #ifdef CONFIG_SPE
988 case PTRACE_GETEVRREGS:
989 /* Get the child spe register state. */
990 return copy_regset_to_user(child, &user_ppc_native_view,
991 REGSET_SPE, 0, 35 * sizeof(u32),
992 (void __user *) data);
993
994 case PTRACE_SETEVRREGS:
995 /* Set the child spe register state. */
996 return copy_regset_from_user(child, &user_ppc_native_view,
997 REGSET_SPE, 0, 35 * sizeof(u32),
998 (const void __user *) data);
999 #endif
1000
1001 /* Old reverse args ptrace callss */
1002 case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
1003 case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
1004 case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
1005 case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
1006 ret = arch_ptrace_old(child, request, addr, data);
1007 break;
1008
1009 default:
1010 ret = ptrace_request(child, request, addr, data);
1011 break;
1012 }
1013 return ret;
1014 }
1015
1016 static void do_syscall_trace(void)
1017 {
1018 /* the 0x80 provides a way for the tracing parent to distinguish
1019 between a syscall stop and SIGTRAP delivery */
1020 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
1021 ? 0x80 : 0));
1022
1023 /*
1024 * this isn't the same as continuing with a signal, but it will do
1025 * for normal use. strace only continues with a signal if the
1026 * stopping signal is not SIGTRAP. -brl
1027 */
1028 if (current->exit_code) {
1029 send_sig(current->exit_code, current, 1);
1030 current->exit_code = 0;
1031 }
1032 }
1033
1034 void do_syscall_trace_enter(struct pt_regs *regs)
1035 {
1036 secure_computing(regs->gpr[0]);
1037
1038 if (test_thread_flag(TIF_SYSCALL_TRACE)
1039 && (current->ptrace & PT_PTRACED))
1040 do_syscall_trace();
1041
1042 if (unlikely(current->audit_context)) {
1043 #ifdef CONFIG_PPC64
1044 if (!test_thread_flag(TIF_32BIT))
1045 audit_syscall_entry(AUDIT_ARCH_PPC64,
1046 regs->gpr[0],
1047 regs->gpr[3], regs->gpr[4],
1048 regs->gpr[5], regs->gpr[6]);
1049 else
1050 #endif
1051 audit_syscall_entry(AUDIT_ARCH_PPC,
1052 regs->gpr[0],
1053 regs->gpr[3] & 0xffffffff,
1054 regs->gpr[4] & 0xffffffff,
1055 regs->gpr[5] & 0xffffffff,
1056 regs->gpr[6] & 0xffffffff);
1057 }
1058 }
1059
1060 void do_syscall_trace_leave(struct pt_regs *regs)
1061 {
1062 if (unlikely(current->audit_context))
1063 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1064 regs->result);
1065
1066 if ((test_thread_flag(TIF_SYSCALL_TRACE)
1067 || test_thread_flag(TIF_SINGLESTEP))
1068 && (current->ptrace & PT_PTRACED))
1069 do_syscall_trace();
1070 }
This page took 0.067083 seconds and 6 git commands to generate.