Merge branch 'for-4.8/hid-led' into for-linus
[deliverable/linux.git] / arch / powerpc / kernel / ptrace.c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
9 *
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
12 *
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
16 */
17
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36
37 #include <asm/uaccess.h>
38 #include <asm/page.h>
39 #include <asm/pgtable.h>
40 #include <asm/switch_to.h>
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/syscalls.h>
44
45 /*
46 * The parameter save area on the stack is used to store arguments being passed
47 * to callee function and is located at fixed offset from stack pointer.
48 */
49 #ifdef CONFIG_PPC32
50 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
51 #else /* CONFIG_PPC32 */
52 #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
53 #endif
54
55 struct pt_regs_offset {
56 const char *name;
57 int offset;
58 };
59
60 #define STR(s) #s /* convert to string */
61 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
62 #define GPR_OFFSET_NAME(num) \
63 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
64 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
65 #define REG_OFFSET_END {.name = NULL, .offset = 0}
66
67 static const struct pt_regs_offset regoffset_table[] = {
68 GPR_OFFSET_NAME(0),
69 GPR_OFFSET_NAME(1),
70 GPR_OFFSET_NAME(2),
71 GPR_OFFSET_NAME(3),
72 GPR_OFFSET_NAME(4),
73 GPR_OFFSET_NAME(5),
74 GPR_OFFSET_NAME(6),
75 GPR_OFFSET_NAME(7),
76 GPR_OFFSET_NAME(8),
77 GPR_OFFSET_NAME(9),
78 GPR_OFFSET_NAME(10),
79 GPR_OFFSET_NAME(11),
80 GPR_OFFSET_NAME(12),
81 GPR_OFFSET_NAME(13),
82 GPR_OFFSET_NAME(14),
83 GPR_OFFSET_NAME(15),
84 GPR_OFFSET_NAME(16),
85 GPR_OFFSET_NAME(17),
86 GPR_OFFSET_NAME(18),
87 GPR_OFFSET_NAME(19),
88 GPR_OFFSET_NAME(20),
89 GPR_OFFSET_NAME(21),
90 GPR_OFFSET_NAME(22),
91 GPR_OFFSET_NAME(23),
92 GPR_OFFSET_NAME(24),
93 GPR_OFFSET_NAME(25),
94 GPR_OFFSET_NAME(26),
95 GPR_OFFSET_NAME(27),
96 GPR_OFFSET_NAME(28),
97 GPR_OFFSET_NAME(29),
98 GPR_OFFSET_NAME(30),
99 GPR_OFFSET_NAME(31),
100 REG_OFFSET_NAME(nip),
101 REG_OFFSET_NAME(msr),
102 REG_OFFSET_NAME(ctr),
103 REG_OFFSET_NAME(link),
104 REG_OFFSET_NAME(xer),
105 REG_OFFSET_NAME(ccr),
106 #ifdef CONFIG_PPC64
107 REG_OFFSET_NAME(softe),
108 #else
109 REG_OFFSET_NAME(mq),
110 #endif
111 REG_OFFSET_NAME(trap),
112 REG_OFFSET_NAME(dar),
113 REG_OFFSET_NAME(dsisr),
114 REG_OFFSET_END,
115 };
116
117 /**
118 * regs_query_register_offset() - query register offset from its name
119 * @name: the name of a register
120 *
121 * regs_query_register_offset() returns the offset of a register in struct
122 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
123 */
124 int regs_query_register_offset(const char *name)
125 {
126 const struct pt_regs_offset *roff;
127 for (roff = regoffset_table; roff->name != NULL; roff++)
128 if (!strcmp(roff->name, name))
129 return roff->offset;
130 return -EINVAL;
131 }
132
133 /**
134 * regs_query_register_name() - query register name from its offset
135 * @offset: the offset of a register in struct pt_regs.
136 *
137 * regs_query_register_name() returns the name of a register from its
138 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
139 */
140 const char *regs_query_register_name(unsigned int offset)
141 {
142 const struct pt_regs_offset *roff;
143 for (roff = regoffset_table; roff->name != NULL; roff++)
144 if (roff->offset == offset)
145 return roff->name;
146 return NULL;
147 }
148
149 /*
150 * does not yet catch signals sent when the child dies.
151 * in exit.c or in signal.c.
152 */
153
154 /*
155 * Set of msr bits that gdb can change on behalf of a process.
156 */
157 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
158 #define MSR_DEBUGCHANGE 0
159 #else
160 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
161 #endif
162
163 /*
164 * Max register writeable via put_reg
165 */
166 #ifdef CONFIG_PPC32
167 #define PT_MAX_PUT_REG PT_MQ
168 #else
169 #define PT_MAX_PUT_REG PT_CCR
170 #endif
171
172 static unsigned long get_user_msr(struct task_struct *task)
173 {
174 return task->thread.regs->msr | task->thread.fpexc_mode;
175 }
176
177 static int set_user_msr(struct task_struct *task, unsigned long msr)
178 {
179 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
180 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
181 return 0;
182 }
183
184 #ifdef CONFIG_PPC64
185 static int get_user_dscr(struct task_struct *task, unsigned long *data)
186 {
187 *data = task->thread.dscr;
188 return 0;
189 }
190
191 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
192 {
193 task->thread.dscr = dscr;
194 task->thread.dscr_inherit = 1;
195 return 0;
196 }
197 #else
198 static int get_user_dscr(struct task_struct *task, unsigned long *data)
199 {
200 return -EIO;
201 }
202
203 static int set_user_dscr(struct task_struct *task, unsigned long dscr)
204 {
205 return -EIO;
206 }
207 #endif
208
209 /*
210 * We prevent mucking around with the reserved area of trap
211 * which are used internally by the kernel.
212 */
213 static int set_user_trap(struct task_struct *task, unsigned long trap)
214 {
215 task->thread.regs->trap = trap & 0xfff0;
216 return 0;
217 }
218
219 /*
220 * Get contents of register REGNO in task TASK.
221 */
222 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
223 {
224 if ((task->thread.regs == NULL) || !data)
225 return -EIO;
226
227 if (regno == PT_MSR) {
228 *data = get_user_msr(task);
229 return 0;
230 }
231
232 if (regno == PT_DSCR)
233 return get_user_dscr(task, data);
234
235 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
236 *data = ((unsigned long *)task->thread.regs)[regno];
237 return 0;
238 }
239
240 return -EIO;
241 }
242
243 /*
244 * Write contents of register REGNO in task TASK.
245 */
246 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
247 {
248 if (task->thread.regs == NULL)
249 return -EIO;
250
251 if (regno == PT_MSR)
252 return set_user_msr(task, data);
253 if (regno == PT_TRAP)
254 return set_user_trap(task, data);
255 if (regno == PT_DSCR)
256 return set_user_dscr(task, data);
257
258 if (regno <= PT_MAX_PUT_REG) {
259 ((unsigned long *)task->thread.regs)[regno] = data;
260 return 0;
261 }
262 return -EIO;
263 }
264
265 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
266 unsigned int pos, unsigned int count,
267 void *kbuf, void __user *ubuf)
268 {
269 int i, ret;
270
271 if (target->thread.regs == NULL)
272 return -EIO;
273
274 if (!FULL_REGS(target->thread.regs)) {
275 /* We have a partial register set. Fill 14-31 with bogus values */
276 for (i = 14; i < 32; i++)
277 target->thread.regs->gpr[i] = NV_REG_POISON;
278 }
279
280 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
281 target->thread.regs,
282 0, offsetof(struct pt_regs, msr));
283 if (!ret) {
284 unsigned long msr = get_user_msr(target);
285 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
286 offsetof(struct pt_regs, msr),
287 offsetof(struct pt_regs, msr) +
288 sizeof(msr));
289 }
290
291 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
292 offsetof(struct pt_regs, msr) + sizeof(long));
293
294 if (!ret)
295 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
296 &target->thread.regs->orig_gpr3,
297 offsetof(struct pt_regs, orig_gpr3),
298 sizeof(struct pt_regs));
299 if (!ret)
300 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
301 sizeof(struct pt_regs), -1);
302
303 return ret;
304 }
305
306 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
307 unsigned int pos, unsigned int count,
308 const void *kbuf, const void __user *ubuf)
309 {
310 unsigned long reg;
311 int ret;
312
313 if (target->thread.regs == NULL)
314 return -EIO;
315
316 CHECK_FULL_REGS(target->thread.regs);
317
318 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
319 target->thread.regs,
320 0, PT_MSR * sizeof(reg));
321
322 if (!ret && count > 0) {
323 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
324 PT_MSR * sizeof(reg),
325 (PT_MSR + 1) * sizeof(reg));
326 if (!ret)
327 ret = set_user_msr(target, reg);
328 }
329
330 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
331 offsetof(struct pt_regs, msr) + sizeof(long));
332
333 if (!ret)
334 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
335 &target->thread.regs->orig_gpr3,
336 PT_ORIG_R3 * sizeof(reg),
337 (PT_MAX_PUT_REG + 1) * sizeof(reg));
338
339 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
340 ret = user_regset_copyin_ignore(
341 &pos, &count, &kbuf, &ubuf,
342 (PT_MAX_PUT_REG + 1) * sizeof(reg),
343 PT_TRAP * sizeof(reg));
344
345 if (!ret && count > 0) {
346 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
347 PT_TRAP * sizeof(reg),
348 (PT_TRAP + 1) * sizeof(reg));
349 if (!ret)
350 ret = set_user_trap(target, reg);
351 }
352
353 if (!ret)
354 ret = user_regset_copyin_ignore(
355 &pos, &count, &kbuf, &ubuf,
356 (PT_TRAP + 1) * sizeof(reg), -1);
357
358 return ret;
359 }
360
361 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
362 unsigned int pos, unsigned int count,
363 void *kbuf, void __user *ubuf)
364 {
365 #ifdef CONFIG_VSX
366 u64 buf[33];
367 int i;
368 #endif
369 flush_fp_to_thread(target);
370
371 #ifdef CONFIG_VSX
372 /* copy to local buffer then write that out */
373 for (i = 0; i < 32 ; i++)
374 buf[i] = target->thread.TS_FPR(i);
375 buf[32] = target->thread.fp_state.fpscr;
376 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
377
378 #else
379 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
380 offsetof(struct thread_fp_state, fpr[32]));
381
382 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
383 &target->thread.fp_state, 0, -1);
384 #endif
385 }
386
387 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
388 unsigned int pos, unsigned int count,
389 const void *kbuf, const void __user *ubuf)
390 {
391 #ifdef CONFIG_VSX
392 u64 buf[33];
393 int i;
394 #endif
395 flush_fp_to_thread(target);
396
397 #ifdef CONFIG_VSX
398 /* copy to local buffer then write that out */
399 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
400 if (i)
401 return i;
402 for (i = 0; i < 32 ; i++)
403 target->thread.TS_FPR(i) = buf[i];
404 target->thread.fp_state.fpscr = buf[32];
405 return 0;
406 #else
407 BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
408 offsetof(struct thread_fp_state, fpr[32]));
409
410 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
411 &target->thread.fp_state, 0, -1);
412 #endif
413 }
414
415 #ifdef CONFIG_ALTIVEC
416 /*
417 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
418 * The transfer totals 34 quadword. Quadwords 0-31 contain the
419 * corresponding vector registers. Quadword 32 contains the vscr as the
420 * last word (offset 12) within that quadword. Quadword 33 contains the
421 * vrsave as the first word (offset 0) within the quadword.
422 *
423 * This definition of the VMX state is compatible with the current PPC32
424 * ptrace interface. This allows signal handling and ptrace to use the
425 * same structures. This also simplifies the implementation of a bi-arch
426 * (combined (32- and 64-bit) gdb.
427 */
428
429 static int vr_active(struct task_struct *target,
430 const struct user_regset *regset)
431 {
432 flush_altivec_to_thread(target);
433 return target->thread.used_vr ? regset->n : 0;
434 }
435
436 static int vr_get(struct task_struct *target, const struct user_regset *regset,
437 unsigned int pos, unsigned int count,
438 void *kbuf, void __user *ubuf)
439 {
440 int ret;
441
442 flush_altivec_to_thread(target);
443
444 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
445 offsetof(struct thread_vr_state, vr[32]));
446
447 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
448 &target->thread.vr_state, 0,
449 33 * sizeof(vector128));
450 if (!ret) {
451 /*
452 * Copy out only the low-order word of vrsave.
453 */
454 union {
455 elf_vrreg_t reg;
456 u32 word;
457 } vrsave;
458 memset(&vrsave, 0, sizeof(vrsave));
459 vrsave.word = target->thread.vrsave;
460 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
461 33 * sizeof(vector128), -1);
462 }
463
464 return ret;
465 }
466
467 static int vr_set(struct task_struct *target, const struct user_regset *regset,
468 unsigned int pos, unsigned int count,
469 const void *kbuf, const void __user *ubuf)
470 {
471 int ret;
472
473 flush_altivec_to_thread(target);
474
475 BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
476 offsetof(struct thread_vr_state, vr[32]));
477
478 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
479 &target->thread.vr_state, 0,
480 33 * sizeof(vector128));
481 if (!ret && count > 0) {
482 /*
483 * We use only the first word of vrsave.
484 */
485 union {
486 elf_vrreg_t reg;
487 u32 word;
488 } vrsave;
489 memset(&vrsave, 0, sizeof(vrsave));
490 vrsave.word = target->thread.vrsave;
491 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
492 33 * sizeof(vector128), -1);
493 if (!ret)
494 target->thread.vrsave = vrsave.word;
495 }
496
497 return ret;
498 }
499 #endif /* CONFIG_ALTIVEC */
500
501 #ifdef CONFIG_VSX
502 /*
503 * Currently to set and and get all the vsx state, you need to call
504 * the fp and VMX calls as well. This only get/sets the lower 32
505 * 128bit VSX registers.
506 */
507
508 static int vsr_active(struct task_struct *target,
509 const struct user_regset *regset)
510 {
511 flush_vsx_to_thread(target);
512 return target->thread.used_vsr ? regset->n : 0;
513 }
514
515 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
516 unsigned int pos, unsigned int count,
517 void *kbuf, void __user *ubuf)
518 {
519 u64 buf[32];
520 int ret, i;
521
522 flush_vsx_to_thread(target);
523
524 for (i = 0; i < 32 ; i++)
525 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
526 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
527 buf, 0, 32 * sizeof(double));
528
529 return ret;
530 }
531
532 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
533 unsigned int pos, unsigned int count,
534 const void *kbuf, const void __user *ubuf)
535 {
536 u64 buf[32];
537 int ret,i;
538
539 flush_vsx_to_thread(target);
540
541 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
542 buf, 0, 32 * sizeof(double));
543 for (i = 0; i < 32 ; i++)
544 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
545
546
547 return ret;
548 }
549 #endif /* CONFIG_VSX */
550
551 #ifdef CONFIG_SPE
552
553 /*
554 * For get_evrregs/set_evrregs functions 'data' has the following layout:
555 *
556 * struct {
557 * u32 evr[32];
558 * u64 acc;
559 * u32 spefscr;
560 * }
561 */
562
563 static int evr_active(struct task_struct *target,
564 const struct user_regset *regset)
565 {
566 flush_spe_to_thread(target);
567 return target->thread.used_spe ? regset->n : 0;
568 }
569
570 static int evr_get(struct task_struct *target, const struct user_regset *regset,
571 unsigned int pos, unsigned int count,
572 void *kbuf, void __user *ubuf)
573 {
574 int ret;
575
576 flush_spe_to_thread(target);
577
578 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
579 &target->thread.evr,
580 0, sizeof(target->thread.evr));
581
582 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
583 offsetof(struct thread_struct, spefscr));
584
585 if (!ret)
586 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
587 &target->thread.acc,
588 sizeof(target->thread.evr), -1);
589
590 return ret;
591 }
592
593 static int evr_set(struct task_struct *target, const struct user_regset *regset,
594 unsigned int pos, unsigned int count,
595 const void *kbuf, const void __user *ubuf)
596 {
597 int ret;
598
599 flush_spe_to_thread(target);
600
601 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
602 &target->thread.evr,
603 0, sizeof(target->thread.evr));
604
605 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
606 offsetof(struct thread_struct, spefscr));
607
608 if (!ret)
609 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
610 &target->thread.acc,
611 sizeof(target->thread.evr), -1);
612
613 return ret;
614 }
615 #endif /* CONFIG_SPE */
616
617
618 /*
619 * These are our native regset flavors.
620 */
621 enum powerpc_regset {
622 REGSET_GPR,
623 REGSET_FPR,
624 #ifdef CONFIG_ALTIVEC
625 REGSET_VMX,
626 #endif
627 #ifdef CONFIG_VSX
628 REGSET_VSX,
629 #endif
630 #ifdef CONFIG_SPE
631 REGSET_SPE,
632 #endif
633 };
634
635 static const struct user_regset native_regsets[] = {
636 [REGSET_GPR] = {
637 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
638 .size = sizeof(long), .align = sizeof(long),
639 .get = gpr_get, .set = gpr_set
640 },
641 [REGSET_FPR] = {
642 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
643 .size = sizeof(double), .align = sizeof(double),
644 .get = fpr_get, .set = fpr_set
645 },
646 #ifdef CONFIG_ALTIVEC
647 [REGSET_VMX] = {
648 .core_note_type = NT_PPC_VMX, .n = 34,
649 .size = sizeof(vector128), .align = sizeof(vector128),
650 .active = vr_active, .get = vr_get, .set = vr_set
651 },
652 #endif
653 #ifdef CONFIG_VSX
654 [REGSET_VSX] = {
655 .core_note_type = NT_PPC_VSX, .n = 32,
656 .size = sizeof(double), .align = sizeof(double),
657 .active = vsr_active, .get = vsr_get, .set = vsr_set
658 },
659 #endif
660 #ifdef CONFIG_SPE
661 [REGSET_SPE] = {
662 .core_note_type = NT_PPC_SPE, .n = 35,
663 .size = sizeof(u32), .align = sizeof(u32),
664 .active = evr_active, .get = evr_get, .set = evr_set
665 },
666 #endif
667 };
668
669 static const struct user_regset_view user_ppc_native_view = {
670 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
671 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
672 };
673
674 #ifdef CONFIG_PPC64
675 #include <linux/compat.h>
676
677 static int gpr32_get(struct task_struct *target,
678 const struct user_regset *regset,
679 unsigned int pos, unsigned int count,
680 void *kbuf, void __user *ubuf)
681 {
682 const unsigned long *regs = &target->thread.regs->gpr[0];
683 compat_ulong_t *k = kbuf;
684 compat_ulong_t __user *u = ubuf;
685 compat_ulong_t reg;
686 int i;
687
688 if (target->thread.regs == NULL)
689 return -EIO;
690
691 if (!FULL_REGS(target->thread.regs)) {
692 /* We have a partial register set. Fill 14-31 with bogus values */
693 for (i = 14; i < 32; i++)
694 target->thread.regs->gpr[i] = NV_REG_POISON;
695 }
696
697 pos /= sizeof(reg);
698 count /= sizeof(reg);
699
700 if (kbuf)
701 for (; count > 0 && pos < PT_MSR; --count)
702 *k++ = regs[pos++];
703 else
704 for (; count > 0 && pos < PT_MSR; --count)
705 if (__put_user((compat_ulong_t) regs[pos++], u++))
706 return -EFAULT;
707
708 if (count > 0 && pos == PT_MSR) {
709 reg = get_user_msr(target);
710 if (kbuf)
711 *k++ = reg;
712 else if (__put_user(reg, u++))
713 return -EFAULT;
714 ++pos;
715 --count;
716 }
717
718 if (kbuf)
719 for (; count > 0 && pos < PT_REGS_COUNT; --count)
720 *k++ = regs[pos++];
721 else
722 for (; count > 0 && pos < PT_REGS_COUNT; --count)
723 if (__put_user((compat_ulong_t) regs[pos++], u++))
724 return -EFAULT;
725
726 kbuf = k;
727 ubuf = u;
728 pos *= sizeof(reg);
729 count *= sizeof(reg);
730 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
731 PT_REGS_COUNT * sizeof(reg), -1);
732 }
733
734 static int gpr32_set(struct task_struct *target,
735 const struct user_regset *regset,
736 unsigned int pos, unsigned int count,
737 const void *kbuf, const void __user *ubuf)
738 {
739 unsigned long *regs = &target->thread.regs->gpr[0];
740 const compat_ulong_t *k = kbuf;
741 const compat_ulong_t __user *u = ubuf;
742 compat_ulong_t reg;
743
744 if (target->thread.regs == NULL)
745 return -EIO;
746
747 CHECK_FULL_REGS(target->thread.regs);
748
749 pos /= sizeof(reg);
750 count /= sizeof(reg);
751
752 if (kbuf)
753 for (; count > 0 && pos < PT_MSR; --count)
754 regs[pos++] = *k++;
755 else
756 for (; count > 0 && pos < PT_MSR; --count) {
757 if (__get_user(reg, u++))
758 return -EFAULT;
759 regs[pos++] = reg;
760 }
761
762
763 if (count > 0 && pos == PT_MSR) {
764 if (kbuf)
765 reg = *k++;
766 else if (__get_user(reg, u++))
767 return -EFAULT;
768 set_user_msr(target, reg);
769 ++pos;
770 --count;
771 }
772
773 if (kbuf) {
774 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
775 regs[pos++] = *k++;
776 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
777 ++k;
778 } else {
779 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
780 if (__get_user(reg, u++))
781 return -EFAULT;
782 regs[pos++] = reg;
783 }
784 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
785 if (__get_user(reg, u++))
786 return -EFAULT;
787 }
788
789 if (count > 0 && pos == PT_TRAP) {
790 if (kbuf)
791 reg = *k++;
792 else if (__get_user(reg, u++))
793 return -EFAULT;
794 set_user_trap(target, reg);
795 ++pos;
796 --count;
797 }
798
799 kbuf = k;
800 ubuf = u;
801 pos *= sizeof(reg);
802 count *= sizeof(reg);
803 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
804 (PT_TRAP + 1) * sizeof(reg), -1);
805 }
806
807 /*
808 * These are the regset flavors matching the CONFIG_PPC32 native set.
809 */
810 static const struct user_regset compat_regsets[] = {
811 [REGSET_GPR] = {
812 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
813 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
814 .get = gpr32_get, .set = gpr32_set
815 },
816 [REGSET_FPR] = {
817 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
818 .size = sizeof(double), .align = sizeof(double),
819 .get = fpr_get, .set = fpr_set
820 },
821 #ifdef CONFIG_ALTIVEC
822 [REGSET_VMX] = {
823 .core_note_type = NT_PPC_VMX, .n = 34,
824 .size = sizeof(vector128), .align = sizeof(vector128),
825 .active = vr_active, .get = vr_get, .set = vr_set
826 },
827 #endif
828 #ifdef CONFIG_SPE
829 [REGSET_SPE] = {
830 .core_note_type = NT_PPC_SPE, .n = 35,
831 .size = sizeof(u32), .align = sizeof(u32),
832 .active = evr_active, .get = evr_get, .set = evr_set
833 },
834 #endif
835 };
836
837 static const struct user_regset_view user_ppc_compat_view = {
838 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
839 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
840 };
841 #endif /* CONFIG_PPC64 */
842
843 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
844 {
845 #ifdef CONFIG_PPC64
846 if (test_tsk_thread_flag(task, TIF_32BIT))
847 return &user_ppc_compat_view;
848 #endif
849 return &user_ppc_native_view;
850 }
851
852
853 void user_enable_single_step(struct task_struct *task)
854 {
855 struct pt_regs *regs = task->thread.regs;
856
857 if (regs != NULL) {
858 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
859 task->thread.debug.dbcr0 &= ~DBCR0_BT;
860 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
861 regs->msr |= MSR_DE;
862 #else
863 regs->msr &= ~MSR_BE;
864 regs->msr |= MSR_SE;
865 #endif
866 }
867 set_tsk_thread_flag(task, TIF_SINGLESTEP);
868 }
869
870 void user_enable_block_step(struct task_struct *task)
871 {
872 struct pt_regs *regs = task->thread.regs;
873
874 if (regs != NULL) {
875 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
876 task->thread.debug.dbcr0 &= ~DBCR0_IC;
877 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
878 regs->msr |= MSR_DE;
879 #else
880 regs->msr &= ~MSR_SE;
881 regs->msr |= MSR_BE;
882 #endif
883 }
884 set_tsk_thread_flag(task, TIF_SINGLESTEP);
885 }
886
887 void user_disable_single_step(struct task_struct *task)
888 {
889 struct pt_regs *regs = task->thread.regs;
890
891 if (regs != NULL) {
892 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
893 /*
894 * The logic to disable single stepping should be as
895 * simple as turning off the Instruction Complete flag.
896 * And, after doing so, if all debug flags are off, turn
897 * off DBCR0(IDM) and MSR(DE) .... Torez
898 */
899 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
900 /*
901 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
902 */
903 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
904 task->thread.debug.dbcr1)) {
905 /*
906 * All debug events were off.....
907 */
908 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
909 regs->msr &= ~MSR_DE;
910 }
911 #else
912 regs->msr &= ~(MSR_SE | MSR_BE);
913 #endif
914 }
915 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
916 }
917
918 #ifdef CONFIG_HAVE_HW_BREAKPOINT
919 void ptrace_triggered(struct perf_event *bp,
920 struct perf_sample_data *data, struct pt_regs *regs)
921 {
922 struct perf_event_attr attr;
923
924 /*
925 * Disable the breakpoint request here since ptrace has defined a
926 * one-shot behaviour for breakpoint exceptions in PPC64.
927 * The SIGTRAP signal is generated automatically for us in do_dabr().
928 * We don't have to do anything about that here
929 */
930 attr = bp->attr;
931 attr.disabled = true;
932 modify_user_hw_breakpoint(bp, &attr);
933 }
934 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
935
936 static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
937 unsigned long data)
938 {
939 #ifdef CONFIG_HAVE_HW_BREAKPOINT
940 int ret;
941 struct thread_struct *thread = &(task->thread);
942 struct perf_event *bp;
943 struct perf_event_attr attr;
944 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
945 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
946 struct arch_hw_breakpoint hw_brk;
947 #endif
948
949 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
950 * For embedded processors we support one DAC and no IAC's at the
951 * moment.
952 */
953 if (addr > 0)
954 return -EINVAL;
955
956 /* The bottom 3 bits in dabr are flags */
957 if ((data & ~0x7UL) >= TASK_SIZE)
958 return -EIO;
959
960 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
961 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
962 * It was assumed, on previous implementations, that 3 bits were
963 * passed together with the data address, fitting the design of the
964 * DABR register, as follows:
965 *
966 * bit 0: Read flag
967 * bit 1: Write flag
968 * bit 2: Breakpoint translation
969 *
970 * Thus, we use them here as so.
971 */
972
973 /* Ensure breakpoint translation bit is set */
974 if (data && !(data & HW_BRK_TYPE_TRANSLATE))
975 return -EIO;
976 hw_brk.address = data & (~HW_BRK_TYPE_DABR);
977 hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
978 hw_brk.len = 8;
979 #ifdef CONFIG_HAVE_HW_BREAKPOINT
980 bp = thread->ptrace_bps[0];
981 if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
982 if (bp) {
983 unregister_hw_breakpoint(bp);
984 thread->ptrace_bps[0] = NULL;
985 }
986 return 0;
987 }
988 if (bp) {
989 attr = bp->attr;
990 attr.bp_addr = hw_brk.address;
991 arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
992
993 /* Enable breakpoint */
994 attr.disabled = false;
995
996 ret = modify_user_hw_breakpoint(bp, &attr);
997 if (ret) {
998 return ret;
999 }
1000 thread->ptrace_bps[0] = bp;
1001 thread->hw_brk = hw_brk;
1002 return 0;
1003 }
1004
1005 /* Create a new breakpoint request if one doesn't exist already */
1006 hw_breakpoint_init(&attr);
1007 attr.bp_addr = hw_brk.address;
1008 arch_bp_generic_fields(hw_brk.type,
1009 &attr.bp_type);
1010
1011 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
1012 ptrace_triggered, NULL, task);
1013 if (IS_ERR(bp)) {
1014 thread->ptrace_bps[0] = NULL;
1015 return PTR_ERR(bp);
1016 }
1017
1018 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1019 task->thread.hw_brk = hw_brk;
1020 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
1021 /* As described above, it was assumed 3 bits were passed with the data
1022 * address, but we will assume only the mode bits will be passed
1023 * as to not cause alignment restrictions for DAC-based processors.
1024 */
1025
1026 /* DAC's hold the whole address without any mode flags */
1027 task->thread.debug.dac1 = data & ~0x3UL;
1028
1029 if (task->thread.debug.dac1 == 0) {
1030 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1031 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1032 task->thread.debug.dbcr1)) {
1033 task->thread.regs->msr &= ~MSR_DE;
1034 task->thread.debug.dbcr0 &= ~DBCR0_IDM;
1035 }
1036 return 0;
1037 }
1038
1039 /* Read or Write bits must be set */
1040
1041 if (!(data & 0x3UL))
1042 return -EINVAL;
1043
1044 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1045 register */
1046 task->thread.debug.dbcr0 |= DBCR0_IDM;
1047
1048 /* Check for write and read flags and set DBCR0
1049 accordingly */
1050 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
1051 if (data & 0x1UL)
1052 dbcr_dac(task) |= DBCR_DAC1R;
1053 if (data & 0x2UL)
1054 dbcr_dac(task) |= DBCR_DAC1W;
1055 task->thread.regs->msr |= MSR_DE;
1056 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1057 return 0;
1058 }
1059
1060 /*
1061 * Called by kernel/ptrace.c when detaching..
1062 *
1063 * Make sure single step bits etc are not set.
1064 */
1065 void ptrace_disable(struct task_struct *child)
1066 {
1067 /* make sure the single step bit is not set. */
1068 user_disable_single_step(child);
1069 }
1070
1071 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1072 static long set_instruction_bp(struct task_struct *child,
1073 struct ppc_hw_breakpoint *bp_info)
1074 {
1075 int slot;
1076 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
1077 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
1078 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
1079 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
1080
1081 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1082 slot2_in_use = 1;
1083 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1084 slot4_in_use = 1;
1085
1086 if (bp_info->addr >= TASK_SIZE)
1087 return -EIO;
1088
1089 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
1090
1091 /* Make sure range is valid. */
1092 if (bp_info->addr2 >= TASK_SIZE)
1093 return -EIO;
1094
1095 /* We need a pair of IAC regsisters */
1096 if ((!slot1_in_use) && (!slot2_in_use)) {
1097 slot = 1;
1098 child->thread.debug.iac1 = bp_info->addr;
1099 child->thread.debug.iac2 = bp_info->addr2;
1100 child->thread.debug.dbcr0 |= DBCR0_IAC1;
1101 if (bp_info->addr_mode ==
1102 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1103 dbcr_iac_range(child) |= DBCR_IAC12X;
1104 else
1105 dbcr_iac_range(child) |= DBCR_IAC12I;
1106 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1107 } else if ((!slot3_in_use) && (!slot4_in_use)) {
1108 slot = 3;
1109 child->thread.debug.iac3 = bp_info->addr;
1110 child->thread.debug.iac4 = bp_info->addr2;
1111 child->thread.debug.dbcr0 |= DBCR0_IAC3;
1112 if (bp_info->addr_mode ==
1113 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1114 dbcr_iac_range(child) |= DBCR_IAC34X;
1115 else
1116 dbcr_iac_range(child) |= DBCR_IAC34I;
1117 #endif
1118 } else
1119 return -ENOSPC;
1120 } else {
1121 /* We only need one. If possible leave a pair free in
1122 * case a range is needed later
1123 */
1124 if (!slot1_in_use) {
1125 /*
1126 * Don't use iac1 if iac1-iac2 are free and either
1127 * iac3 or iac4 (but not both) are free
1128 */
1129 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1130 slot = 1;
1131 child->thread.debug.iac1 = bp_info->addr;
1132 child->thread.debug.dbcr0 |= DBCR0_IAC1;
1133 goto out;
1134 }
1135 }
1136 if (!slot2_in_use) {
1137 slot = 2;
1138 child->thread.debug.iac2 = bp_info->addr;
1139 child->thread.debug.dbcr0 |= DBCR0_IAC2;
1140 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1141 } else if (!slot3_in_use) {
1142 slot = 3;
1143 child->thread.debug.iac3 = bp_info->addr;
1144 child->thread.debug.dbcr0 |= DBCR0_IAC3;
1145 } else if (!slot4_in_use) {
1146 slot = 4;
1147 child->thread.debug.iac4 = bp_info->addr;
1148 child->thread.debug.dbcr0 |= DBCR0_IAC4;
1149 #endif
1150 } else
1151 return -ENOSPC;
1152 }
1153 out:
1154 child->thread.debug.dbcr0 |= DBCR0_IDM;
1155 child->thread.regs->msr |= MSR_DE;
1156
1157 return slot;
1158 }
1159
1160 static int del_instruction_bp(struct task_struct *child, int slot)
1161 {
1162 switch (slot) {
1163 case 1:
1164 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
1165 return -ENOENT;
1166
1167 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1168 /* address range - clear slots 1 & 2 */
1169 child->thread.debug.iac2 = 0;
1170 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1171 }
1172 child->thread.debug.iac1 = 0;
1173 child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1174 break;
1175 case 2:
1176 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
1177 return -ENOENT;
1178
1179 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1180 /* used in a range */
1181 return -EINVAL;
1182 child->thread.debug.iac2 = 0;
1183 child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1184 break;
1185 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1186 case 3:
1187 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
1188 return -ENOENT;
1189
1190 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1191 /* address range - clear slots 3 & 4 */
1192 child->thread.debug.iac4 = 0;
1193 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1194 }
1195 child->thread.debug.iac3 = 0;
1196 child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1197 break;
1198 case 4:
1199 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
1200 return -ENOENT;
1201
1202 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1203 /* Used in a range */
1204 return -EINVAL;
1205 child->thread.debug.iac4 = 0;
1206 child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1207 break;
1208 #endif
1209 default:
1210 return -EINVAL;
1211 }
1212 return 0;
1213 }
1214
1215 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1216 {
1217 int byte_enable =
1218 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
1219 & 0xf;
1220 int condition_mode =
1221 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
1222 int slot;
1223
1224 if (byte_enable && (condition_mode == 0))
1225 return -EINVAL;
1226
1227 if (bp_info->addr >= TASK_SIZE)
1228 return -EIO;
1229
1230 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
1231 slot = 1;
1232 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1233 dbcr_dac(child) |= DBCR_DAC1R;
1234 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1235 dbcr_dac(child) |= DBCR_DAC1W;
1236 child->thread.debug.dac1 = (unsigned long)bp_info->addr;
1237 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1238 if (byte_enable) {
1239 child->thread.debug.dvc1 =
1240 (unsigned long)bp_info->condition_value;
1241 child->thread.debug.dbcr2 |=
1242 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
1243 (condition_mode << DBCR2_DVC1M_SHIFT));
1244 }
1245 #endif
1246 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1247 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1248 /* Both dac1 and dac2 are part of a range */
1249 return -ENOSPC;
1250 #endif
1251 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
1252 slot = 2;
1253 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1254 dbcr_dac(child) |= DBCR_DAC2R;
1255 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1256 dbcr_dac(child) |= DBCR_DAC2W;
1257 child->thread.debug.dac2 = (unsigned long)bp_info->addr;
1258 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1259 if (byte_enable) {
1260 child->thread.debug.dvc2 =
1261 (unsigned long)bp_info->condition_value;
1262 child->thread.debug.dbcr2 |=
1263 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
1264 (condition_mode << DBCR2_DVC2M_SHIFT));
1265 }
1266 #endif
1267 } else
1268 return -ENOSPC;
1269 child->thread.debug.dbcr0 |= DBCR0_IDM;
1270 child->thread.regs->msr |= MSR_DE;
1271
1272 return slot + 4;
1273 }
1274
1275 static int del_dac(struct task_struct *child, int slot)
1276 {
1277 if (slot == 1) {
1278 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1279 return -ENOENT;
1280
1281 child->thread.debug.dac1 = 0;
1282 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1283 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1284 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1285 child->thread.debug.dac2 = 0;
1286 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1287 }
1288 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1289 #endif
1290 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1291 child->thread.debug.dvc1 = 0;
1292 #endif
1293 } else if (slot == 2) {
1294 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1295 return -ENOENT;
1296
1297 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1298 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
1299 /* Part of a range */
1300 return -EINVAL;
1301 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1302 #endif
1303 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1304 child->thread.debug.dvc2 = 0;
1305 #endif
1306 child->thread.debug.dac2 = 0;
1307 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1308 } else
1309 return -EINVAL;
1310
1311 return 0;
1312 }
1313 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1314
1315 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1316 static int set_dac_range(struct task_struct *child,
1317 struct ppc_hw_breakpoint *bp_info)
1318 {
1319 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
1320
1321 /* We don't allow range watchpoints to be used with DVC */
1322 if (bp_info->condition_mode)
1323 return -EINVAL;
1324
1325 /*
1326 * Best effort to verify the address range. The user/supervisor bits
1327 * prevent trapping in kernel space, but let's fail on an obvious bad
1328 * range. The simple test on the mask is not fool-proof, and any
1329 * exclusive range will spill over into kernel space.
1330 */
1331 if (bp_info->addr >= TASK_SIZE)
1332 return -EIO;
1333 if (mode == PPC_BREAKPOINT_MODE_MASK) {
1334 /*
1335 * dac2 is a bitmask. Don't allow a mask that makes a
1336 * kernel space address from a valid dac1 value
1337 */
1338 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
1339 return -EIO;
1340 } else {
1341 /*
1342 * For range breakpoints, addr2 must also be a valid address
1343 */
1344 if (bp_info->addr2 >= TASK_SIZE)
1345 return -EIO;
1346 }
1347
1348 if (child->thread.debug.dbcr0 &
1349 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1350 return -ENOSPC;
1351
1352 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1353 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1354 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1355 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1356 child->thread.debug.dac1 = bp_info->addr;
1357 child->thread.debug.dac2 = bp_info->addr2;
1358 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1359 child->thread.debug.dbcr2 |= DBCR2_DAC12M;
1360 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1361 child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
1362 else /* PPC_BREAKPOINT_MODE_MASK */
1363 child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
1364 child->thread.regs->msr |= MSR_DE;
1365
1366 return 5;
1367 }
1368 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1369
1370 static long ppc_set_hwdebug(struct task_struct *child,
1371 struct ppc_hw_breakpoint *bp_info)
1372 {
1373 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1374 int len = 0;
1375 struct thread_struct *thread = &(child->thread);
1376 struct perf_event *bp;
1377 struct perf_event_attr attr;
1378 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1379 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1380 struct arch_hw_breakpoint brk;
1381 #endif
1382
1383 if (bp_info->version != 1)
1384 return -ENOTSUPP;
1385 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1386 /*
1387 * Check for invalid flags and combinations
1388 */
1389 if ((bp_info->trigger_type == 0) ||
1390 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
1391 PPC_BREAKPOINT_TRIGGER_RW)) ||
1392 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
1393 (bp_info->condition_mode &
1394 ~(PPC_BREAKPOINT_CONDITION_MODE |
1395 PPC_BREAKPOINT_CONDITION_BE_ALL)))
1396 return -EINVAL;
1397 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1398 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1399 return -EINVAL;
1400 #endif
1401
1402 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
1403 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
1404 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1405 return -EINVAL;
1406 return set_instruction_bp(child, bp_info);
1407 }
1408 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1409 return set_dac(child, bp_info);
1410
1411 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1412 return set_dac_range(child, bp_info);
1413 #else
1414 return -EINVAL;
1415 #endif
1416 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1417 /*
1418 * We only support one data breakpoint
1419 */
1420 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
1421 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
1422 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1423 return -EINVAL;
1424
1425 if ((unsigned long)bp_info->addr >= TASK_SIZE)
1426 return -EIO;
1427
1428 brk.address = bp_info->addr & ~7UL;
1429 brk.type = HW_BRK_TYPE_TRANSLATE;
1430 brk.len = 8;
1431 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1432 brk.type |= HW_BRK_TYPE_READ;
1433 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1434 brk.type |= HW_BRK_TYPE_WRITE;
1435 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1436 /*
1437 * Check if the request is for 'range' breakpoints. We can
1438 * support it if range < 8 bytes.
1439 */
1440 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1441 len = bp_info->addr2 - bp_info->addr;
1442 else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1443 len = 1;
1444 else
1445 return -EINVAL;
1446 bp = thread->ptrace_bps[0];
1447 if (bp)
1448 return -ENOSPC;
1449
1450 /* Create a new breakpoint request if one doesn't exist already */
1451 hw_breakpoint_init(&attr);
1452 attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
1453 attr.bp_len = len;
1454 arch_bp_generic_fields(brk.type, &attr.bp_type);
1455
1456 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
1457 ptrace_triggered, NULL, child);
1458 if (IS_ERR(bp)) {
1459 thread->ptrace_bps[0] = NULL;
1460 return PTR_ERR(bp);
1461 }
1462
1463 return 1;
1464 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1465
1466 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
1467 return -EINVAL;
1468
1469 if (child->thread.hw_brk.address)
1470 return -ENOSPC;
1471
1472 child->thread.hw_brk = brk;
1473
1474 return 1;
1475 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1476 }
1477
1478 static long ppc_del_hwdebug(struct task_struct *child, long data)
1479 {
1480 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1481 int ret = 0;
1482 struct thread_struct *thread = &(child->thread);
1483 struct perf_event *bp;
1484 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1485 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1486 int rc;
1487
1488 if (data <= 4)
1489 rc = del_instruction_bp(child, (int)data);
1490 else
1491 rc = del_dac(child, (int)data - 4);
1492
1493 if (!rc) {
1494 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
1495 child->thread.debug.dbcr1)) {
1496 child->thread.debug.dbcr0 &= ~DBCR0_IDM;
1497 child->thread.regs->msr &= ~MSR_DE;
1498 }
1499 }
1500 return rc;
1501 #else
1502 if (data != 1)
1503 return -EINVAL;
1504
1505 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1506 bp = thread->ptrace_bps[0];
1507 if (bp) {
1508 unregister_hw_breakpoint(bp);
1509 thread->ptrace_bps[0] = NULL;
1510 } else
1511 ret = -ENOENT;
1512 return ret;
1513 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1514 if (child->thread.hw_brk.address == 0)
1515 return -ENOENT;
1516
1517 child->thread.hw_brk.address = 0;
1518 child->thread.hw_brk.type = 0;
1519 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1520
1521 return 0;
1522 #endif
1523 }
1524
1525 long arch_ptrace(struct task_struct *child, long request,
1526 unsigned long addr, unsigned long data)
1527 {
1528 int ret = -EPERM;
1529 void __user *datavp = (void __user *) data;
1530 unsigned long __user *datalp = datavp;
1531
1532 switch (request) {
1533 /* read the word at location addr in the USER area. */
1534 case PTRACE_PEEKUSR: {
1535 unsigned long index, tmp;
1536
1537 ret = -EIO;
1538 /* convert to index and check */
1539 #ifdef CONFIG_PPC32
1540 index = addr >> 2;
1541 if ((addr & 3) || (index > PT_FPSCR)
1542 || (child->thread.regs == NULL))
1543 #else
1544 index = addr >> 3;
1545 if ((addr & 7) || (index > PT_FPSCR))
1546 #endif
1547 break;
1548
1549 CHECK_FULL_REGS(child->thread.regs);
1550 if (index < PT_FPR0) {
1551 ret = ptrace_get_reg(child, (int) index, &tmp);
1552 if (ret)
1553 break;
1554 } else {
1555 unsigned int fpidx = index - PT_FPR0;
1556
1557 flush_fp_to_thread(child);
1558 if (fpidx < (PT_FPSCR - PT_FPR0))
1559 memcpy(&tmp, &child->thread.TS_FPR(fpidx),
1560 sizeof(long));
1561 else
1562 tmp = child->thread.fp_state.fpscr;
1563 }
1564 ret = put_user(tmp, datalp);
1565 break;
1566 }
1567
1568 /* write the word at location addr in the USER area */
1569 case PTRACE_POKEUSR: {
1570 unsigned long index;
1571
1572 ret = -EIO;
1573 /* convert to index and check */
1574 #ifdef CONFIG_PPC32
1575 index = addr >> 2;
1576 if ((addr & 3) || (index > PT_FPSCR)
1577 || (child->thread.regs == NULL))
1578 #else
1579 index = addr >> 3;
1580 if ((addr & 7) || (index > PT_FPSCR))
1581 #endif
1582 break;
1583
1584 CHECK_FULL_REGS(child->thread.regs);
1585 if (index < PT_FPR0) {
1586 ret = ptrace_put_reg(child, index, data);
1587 } else {
1588 unsigned int fpidx = index - PT_FPR0;
1589
1590 flush_fp_to_thread(child);
1591 if (fpidx < (PT_FPSCR - PT_FPR0))
1592 memcpy(&child->thread.TS_FPR(fpidx), &data,
1593 sizeof(long));
1594 else
1595 child->thread.fp_state.fpscr = data;
1596 ret = 0;
1597 }
1598 break;
1599 }
1600
1601 case PPC_PTRACE_GETHWDBGINFO: {
1602 struct ppc_debug_info dbginfo;
1603
1604 dbginfo.version = 1;
1605 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1606 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
1607 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
1608 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
1609 dbginfo.data_bp_alignment = 4;
1610 dbginfo.sizeof_condition = 4;
1611 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
1612 PPC_DEBUG_FEATURE_INSN_BP_MASK;
1613 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1614 dbginfo.features |=
1615 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
1616 PPC_DEBUG_FEATURE_DATA_BP_MASK;
1617 #endif
1618 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1619 dbginfo.num_instruction_bps = 0;
1620 dbginfo.num_data_bps = 1;
1621 dbginfo.num_condition_regs = 0;
1622 #ifdef CONFIG_PPC64
1623 dbginfo.data_bp_alignment = 8;
1624 #else
1625 dbginfo.data_bp_alignment = 4;
1626 #endif
1627 dbginfo.sizeof_condition = 0;
1628 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1629 dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
1630 if (cpu_has_feature(CPU_FTR_DAWR))
1631 dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
1632 #else
1633 dbginfo.features = 0;
1634 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1635 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1636
1637 if (!access_ok(VERIFY_WRITE, datavp,
1638 sizeof(struct ppc_debug_info)))
1639 return -EFAULT;
1640 ret = __copy_to_user(datavp, &dbginfo,
1641 sizeof(struct ppc_debug_info)) ?
1642 -EFAULT : 0;
1643 break;
1644 }
1645
1646 case PPC_PTRACE_SETHWDEBUG: {
1647 struct ppc_hw_breakpoint bp_info;
1648
1649 if (!access_ok(VERIFY_READ, datavp,
1650 sizeof(struct ppc_hw_breakpoint)))
1651 return -EFAULT;
1652 ret = __copy_from_user(&bp_info, datavp,
1653 sizeof(struct ppc_hw_breakpoint)) ?
1654 -EFAULT : 0;
1655 if (!ret)
1656 ret = ppc_set_hwdebug(child, &bp_info);
1657 break;
1658 }
1659
1660 case PPC_PTRACE_DELHWDEBUG: {
1661 ret = ppc_del_hwdebug(child, data);
1662 break;
1663 }
1664
1665 case PTRACE_GET_DEBUGREG: {
1666 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
1667 unsigned long dabr_fake;
1668 #endif
1669 ret = -EINVAL;
1670 /* We only support one DABR and no IABRS at the moment */
1671 if (addr > 0)
1672 break;
1673 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1674 ret = put_user(child->thread.debug.dac1, datalp);
1675 #else
1676 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
1677 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
1678 ret = put_user(dabr_fake, datalp);
1679 #endif
1680 break;
1681 }
1682
1683 case PTRACE_SET_DEBUGREG:
1684 ret = ptrace_set_debugreg(child, addr, data);
1685 break;
1686
1687 #ifdef CONFIG_PPC64
1688 case PTRACE_GETREGS64:
1689 #endif
1690 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
1691 return copy_regset_to_user(child, &user_ppc_native_view,
1692 REGSET_GPR,
1693 0, sizeof(struct pt_regs),
1694 datavp);
1695
1696 #ifdef CONFIG_PPC64
1697 case PTRACE_SETREGS64:
1698 #endif
1699 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1700 return copy_regset_from_user(child, &user_ppc_native_view,
1701 REGSET_GPR,
1702 0, sizeof(struct pt_regs),
1703 datavp);
1704
1705 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1706 return copy_regset_to_user(child, &user_ppc_native_view,
1707 REGSET_FPR,
1708 0, sizeof(elf_fpregset_t),
1709 datavp);
1710
1711 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1712 return copy_regset_from_user(child, &user_ppc_native_view,
1713 REGSET_FPR,
1714 0, sizeof(elf_fpregset_t),
1715 datavp);
1716
1717 #ifdef CONFIG_ALTIVEC
1718 case PTRACE_GETVRREGS:
1719 return copy_regset_to_user(child, &user_ppc_native_view,
1720 REGSET_VMX,
1721 0, (33 * sizeof(vector128) +
1722 sizeof(u32)),
1723 datavp);
1724
1725 case PTRACE_SETVRREGS:
1726 return copy_regset_from_user(child, &user_ppc_native_view,
1727 REGSET_VMX,
1728 0, (33 * sizeof(vector128) +
1729 sizeof(u32)),
1730 datavp);
1731 #endif
1732 #ifdef CONFIG_VSX
1733 case PTRACE_GETVSRREGS:
1734 return copy_regset_to_user(child, &user_ppc_native_view,
1735 REGSET_VSX,
1736 0, 32 * sizeof(double),
1737 datavp);
1738
1739 case PTRACE_SETVSRREGS:
1740 return copy_regset_from_user(child, &user_ppc_native_view,
1741 REGSET_VSX,
1742 0, 32 * sizeof(double),
1743 datavp);
1744 #endif
1745 #ifdef CONFIG_SPE
1746 case PTRACE_GETEVRREGS:
1747 /* Get the child spe register state. */
1748 return copy_regset_to_user(child, &user_ppc_native_view,
1749 REGSET_SPE, 0, 35 * sizeof(u32),
1750 datavp);
1751
1752 case PTRACE_SETEVRREGS:
1753 /* Set the child spe register state. */
1754 return copy_regset_from_user(child, &user_ppc_native_view,
1755 REGSET_SPE, 0, 35 * sizeof(u32),
1756 datavp);
1757 #endif
1758
1759 default:
1760 ret = ptrace_request(child, request, addr, data);
1761 break;
1762 }
1763 return ret;
1764 }
1765
1766 #ifdef CONFIG_SECCOMP
1767 static int do_seccomp(struct pt_regs *regs)
1768 {
1769 if (!test_thread_flag(TIF_SECCOMP))
1770 return 0;
1771
1772 /*
1773 * The ABI we present to seccomp tracers is that r3 contains
1774 * the syscall return value and orig_gpr3 contains the first
1775 * syscall parameter. This is different to the ptrace ABI where
1776 * both r3 and orig_gpr3 contain the first syscall parameter.
1777 */
1778 regs->gpr[3] = -ENOSYS;
1779
1780 /*
1781 * We use the __ version here because we have already checked
1782 * TIF_SECCOMP. If this fails, there is nothing left to do, we
1783 * have already loaded -ENOSYS into r3, or seccomp has put
1784 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
1785 */
1786 if (__secure_computing())
1787 return -1;
1788
1789 /*
1790 * The syscall was allowed by seccomp, restore the register
1791 * state to what ptrace and audit expect.
1792 * Note that we use orig_gpr3, which means a seccomp tracer can
1793 * modify the first syscall parameter (in orig_gpr3) and also
1794 * allow the syscall to proceed.
1795 */
1796 regs->gpr[3] = regs->orig_gpr3;
1797
1798 return 0;
1799 }
1800 #else
1801 static inline int do_seccomp(struct pt_regs *regs) { return 0; }
1802 #endif /* CONFIG_SECCOMP */
1803
1804 /**
1805 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
1806 * @regs: the pt_regs of the task to trace (current)
1807 *
1808 * Performs various types of tracing on syscall entry. This includes seccomp,
1809 * ptrace, syscall tracepoints and audit.
1810 *
1811 * The pt_regs are potentially visible to userspace via ptrace, so their
1812 * contents is ABI.
1813 *
1814 * One or more of the tracers may modify the contents of pt_regs, in particular
1815 * to modify arguments or even the syscall number itself.
1816 *
1817 * It's also possible that a tracer can choose to reject the system call. In
1818 * that case this function will return an illegal syscall number, and will put
1819 * an appropriate return value in regs->r3.
1820 *
1821 * Return: the (possibly changed) syscall number.
1822 */
1823 long do_syscall_trace_enter(struct pt_regs *regs)
1824 {
1825 bool abort = false;
1826
1827 user_exit();
1828
1829 if (do_seccomp(regs))
1830 return -1;
1831
1832 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1833 /*
1834 * The tracer may decide to abort the syscall, if so tracehook
1835 * will return !0. Note that the tracer may also just change
1836 * regs->gpr[0] to an invalid syscall number, that is handled
1837 * below on the exit path.
1838 */
1839 abort = tracehook_report_syscall_entry(regs) != 0;
1840 }
1841
1842 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1843 trace_sys_enter(regs, regs->gpr[0]);
1844
1845 #ifdef CONFIG_PPC64
1846 if (!is_32bit_task())
1847 audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
1848 regs->gpr[5], regs->gpr[6]);
1849 else
1850 #endif
1851 audit_syscall_entry(regs->gpr[0],
1852 regs->gpr[3] & 0xffffffff,
1853 regs->gpr[4] & 0xffffffff,
1854 regs->gpr[5] & 0xffffffff,
1855 regs->gpr[6] & 0xffffffff);
1856
1857 if (abort || regs->gpr[0] >= NR_syscalls) {
1858 /*
1859 * If we are aborting explicitly, or if the syscall number is
1860 * now invalid, set the return value to -ENOSYS.
1861 */
1862 regs->gpr[3] = -ENOSYS;
1863 return -1;
1864 }
1865
1866 /* Return the possibly modified but valid syscall number */
1867 return regs->gpr[0];
1868 }
1869
1870 void do_syscall_trace_leave(struct pt_regs *regs)
1871 {
1872 int step;
1873
1874 audit_syscall_exit(regs);
1875
1876 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1877 trace_sys_exit(regs, regs->result);
1878
1879 step = test_thread_flag(TIF_SINGLESTEP);
1880 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1881 tracehook_report_syscall_exit(regs, step);
1882
1883 user_enter();
1884 }
This page took 0.06603 seconds and 6 git commands to generate.