2 * Based on arch/arm/kernel/ptrace.c
5 * edited by Linus Torvalds
6 * ARM modifications Copyright (C) 2000 Russell King
7 * Copyright (C) 2012 ARM Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/audit.h>
23 #include <linux/compat.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/ptrace.h>
29 #include <linux/user.h>
30 #include <linux/seccomp.h>
31 #include <linux/security.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
34 #include <linux/uaccess.h>
35 #include <linux/perf_event.h>
36 #include <linux/hw_breakpoint.h>
37 #include <linux/regset.h>
38 #include <linux/tracehook.h>
39 #include <linux/elf.h>
41 #include <asm/compat.h>
42 #include <asm/debug-monitors.h>
43 #include <asm/pgtable.h>
44 #include <asm/syscall.h>
45 #include <asm/traps.h>
46 #include <asm/system_misc.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
52 * TODO: does not yet catch signals sent when the child dies.
53 * in exit.c or in signal.c.
57 * Called by kernel/ptrace.c when detaching..
59 void ptrace_disable(struct task_struct
*child
)
62 * This would be better off in core code, but PTRACE_DETACH has
63 * grown its fair share of arch-specific worts and changing it
64 * is likely to cause regressions on obscure architectures.
66 user_disable_single_step(child
);
69 #ifdef CONFIG_HAVE_HW_BREAKPOINT
71 * Handle hitting a HW-breakpoint.
73 static void ptrace_hbptriggered(struct perf_event
*bp
,
74 struct perf_sample_data
*data
,
77 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
81 .si_code
= TRAP_HWBKPT
,
82 .si_addr
= (void __user
*)(bkpt
->trigger
),
88 if (!is_compat_task())
91 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
92 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
93 info
.si_errno
= (i
<< 1) + 1;
98 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
99 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
100 info
.si_errno
= -((i
<< 1) + 1);
107 force_sig_info(SIGTRAP
, &info
, current
);
111 * Unregister breakpoints from this task and reset the pointers in
114 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
117 struct thread_struct
*t
= &tsk
->thread
;
119 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
120 if (t
->debug
.hbp_break
[i
]) {
121 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
122 t
->debug
.hbp_break
[i
] = NULL
;
126 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
127 if (t
->debug
.hbp_watch
[i
]) {
128 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
129 t
->debug
.hbp_watch
[i
] = NULL
;
134 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
136 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
139 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
140 struct task_struct
*tsk
,
143 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
146 case NT_ARM_HW_BREAK
:
147 if (idx
< ARM_MAX_BRP
)
148 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
150 case NT_ARM_HW_WATCH
:
151 if (idx
< ARM_MAX_WRP
)
152 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
159 static int ptrace_hbp_set_event(unsigned int note_type
,
160 struct task_struct
*tsk
,
162 struct perf_event
*bp
)
167 case NT_ARM_HW_BREAK
:
168 if (idx
< ARM_MAX_BRP
) {
169 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
173 case NT_ARM_HW_WATCH
:
174 if (idx
< ARM_MAX_WRP
) {
175 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
184 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
185 struct task_struct
*tsk
,
188 struct perf_event
*bp
;
189 struct perf_event_attr attr
;
193 case NT_ARM_HW_BREAK
:
194 type
= HW_BREAKPOINT_X
;
196 case NT_ARM_HW_WATCH
:
197 type
= HW_BREAKPOINT_RW
;
200 return ERR_PTR(-EINVAL
);
203 ptrace_breakpoint_init(&attr
);
206 * Initialise fields to sane defaults
207 * (i.e. values that will pass validation).
210 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
214 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
218 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
225 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
226 struct arch_hw_breakpoint_ctrl ctrl
,
227 struct perf_event_attr
*attr
)
229 int err
, len
, type
, disabled
= !ctrl
.enabled
;
231 attr
->disabled
= disabled
;
235 err
= arch_bp_generic_fields(ctrl
, &len
, &type
);
240 case NT_ARM_HW_BREAK
:
241 if ((type
& HW_BREAKPOINT_X
) != type
)
244 case NT_ARM_HW_WATCH
:
245 if ((type
& HW_BREAKPOINT_RW
) != type
)
253 attr
->bp_type
= type
;
258 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
264 case NT_ARM_HW_BREAK
:
265 num
= hw_breakpoint_slots(TYPE_INST
);
267 case NT_ARM_HW_WATCH
:
268 num
= hw_breakpoint_slots(TYPE_DATA
);
274 reg
|= debug_monitors_arch();
282 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
283 struct task_struct
*tsk
,
287 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
292 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
296 static int ptrace_hbp_get_addr(unsigned int note_type
,
297 struct task_struct
*tsk
,
301 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
306 *addr
= bp
? bp
->attr
.bp_addr
: 0;
310 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
311 struct task_struct
*tsk
,
314 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
317 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
322 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
323 struct task_struct
*tsk
,
328 struct perf_event
*bp
;
329 struct perf_event_attr attr
;
330 struct arch_hw_breakpoint_ctrl ctrl
;
332 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
339 decode_ctrl_reg(uctrl
, &ctrl
);
340 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
344 return modify_user_hw_breakpoint(bp
, &attr
);
347 static int ptrace_hbp_set_addr(unsigned int note_type
,
348 struct task_struct
*tsk
,
353 struct perf_event
*bp
;
354 struct perf_event_attr attr
;
356 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
364 err
= modify_user_hw_breakpoint(bp
, &attr
);
368 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
369 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
370 #define PTRACE_HBP_PAD_SZ sizeof(u32)
372 static int hw_break_get(struct task_struct
*target
,
373 const struct user_regset
*regset
,
374 unsigned int pos
, unsigned int count
,
375 void *kbuf
, void __user
*ubuf
)
377 unsigned int note_type
= regset
->core_note_type
;
378 int ret
, idx
= 0, offset
, limit
;
383 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
387 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &info
, 0,
393 offset
= offsetof(struct user_hwdebug_state
, pad
);
394 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
, offset
,
395 offset
+ PTRACE_HBP_PAD_SZ
);
399 /* (address, ctrl) registers */
400 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
401 limit
= regset
->n
* regset
->size
;
402 while (count
&& offset
< limit
) {
403 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
406 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
407 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
410 offset
+= PTRACE_HBP_ADDR_SZ
;
412 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
415 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
416 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
419 offset
+= PTRACE_HBP_CTRL_SZ
;
421 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
423 offset
+ PTRACE_HBP_PAD_SZ
);
426 offset
+= PTRACE_HBP_PAD_SZ
;
433 static int hw_break_set(struct task_struct
*target
,
434 const struct user_regset
*regset
,
435 unsigned int pos
, unsigned int count
,
436 const void *kbuf
, const void __user
*ubuf
)
438 unsigned int note_type
= regset
->core_note_type
;
439 int ret
, idx
= 0, offset
, limit
;
443 /* Resource info and pad */
444 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
445 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
449 /* (address, ctrl) registers */
450 limit
= regset
->n
* regset
->size
;
451 while (count
&& offset
< limit
) {
452 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
453 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
456 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
459 offset
+= PTRACE_HBP_ADDR_SZ
;
461 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
462 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
465 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
468 offset
+= PTRACE_HBP_CTRL_SZ
;
470 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
472 offset
+ PTRACE_HBP_PAD_SZ
);
475 offset
+= PTRACE_HBP_PAD_SZ
;
481 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
483 static int gpr_get(struct task_struct
*target
,
484 const struct user_regset
*regset
,
485 unsigned int pos
, unsigned int count
,
486 void *kbuf
, void __user
*ubuf
)
488 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
489 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
492 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
493 unsigned int pos
, unsigned int count
,
494 const void *kbuf
, const void __user
*ubuf
)
497 struct user_pt_regs newregs
;
499 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
503 if (!valid_user_regs(&newregs
, target
))
506 task_pt_regs(target
)->user_regs
= newregs
;
511 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
513 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
514 unsigned int pos
, unsigned int count
,
515 void *kbuf
, void __user
*ubuf
)
517 struct user_fpsimd_state
*uregs
;
518 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
519 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0, -1);
522 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
523 unsigned int pos
, unsigned int count
,
524 const void *kbuf
, const void __user
*ubuf
)
527 struct user_fpsimd_state newstate
;
529 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
, 0, -1);
533 target
->thread
.fpsimd_state
.user_fpsimd
= newstate
;
534 fpsimd_flush_task_state(target
);
538 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
539 unsigned int pos
, unsigned int count
,
540 void *kbuf
, void __user
*ubuf
)
542 unsigned long *tls
= &target
->thread
.tp_value
;
543 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, tls
, 0, -1);
546 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
547 unsigned int pos
, unsigned int count
,
548 const void *kbuf
, const void __user
*ubuf
)
553 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
557 target
->thread
.tp_value
= tls
;
561 static int system_call_get(struct task_struct
*target
,
562 const struct user_regset
*regset
,
563 unsigned int pos
, unsigned int count
,
564 void *kbuf
, void __user
*ubuf
)
566 int syscallno
= task_pt_regs(target
)->syscallno
;
568 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
572 static int system_call_set(struct task_struct
*target
,
573 const struct user_regset
*regset
,
574 unsigned int pos
, unsigned int count
,
575 const void *kbuf
, const void __user
*ubuf
)
579 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &syscallno
, 0, -1);
583 task_pt_regs(target
)->syscallno
= syscallno
;
587 enum aarch64_regset
{
591 #ifdef CONFIG_HAVE_HW_BREAKPOINT
598 static const struct user_regset aarch64_regsets
[] = {
600 .core_note_type
= NT_PRSTATUS
,
601 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
603 .align
= sizeof(u64
),
608 .core_note_type
= NT_PRFPREG
,
609 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
611 * We pretend we have 32-bit registers because the fpsr and
612 * fpcr are 32-bits wide.
615 .align
= sizeof(u32
),
620 .core_note_type
= NT_ARM_TLS
,
622 .size
= sizeof(void *),
623 .align
= sizeof(void *),
627 #ifdef CONFIG_HAVE_HW_BREAKPOINT
628 [REGSET_HW_BREAK
] = {
629 .core_note_type
= NT_ARM_HW_BREAK
,
630 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
632 .align
= sizeof(u32
),
636 [REGSET_HW_WATCH
] = {
637 .core_note_type
= NT_ARM_HW_WATCH
,
638 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
640 .align
= sizeof(u32
),
645 [REGSET_SYSTEM_CALL
] = {
646 .core_note_type
= NT_ARM_SYSTEM_CALL
,
649 .align
= sizeof(int),
650 .get
= system_call_get
,
651 .set
= system_call_set
,
655 static const struct user_regset_view user_aarch64_view
= {
656 .name
= "aarch64", .e_machine
= EM_AARCH64
,
657 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
661 #include <linux/compat.h>
668 static int compat_gpr_get(struct task_struct
*target
,
669 const struct user_regset
*regset
,
670 unsigned int pos
, unsigned int count
,
671 void *kbuf
, void __user
*ubuf
)
674 unsigned int i
, start
, num_regs
;
676 /* Calculate the number of AArch32 registers contained in count */
677 num_regs
= count
/ regset
->size
;
679 /* Convert pos into an register number */
680 start
= pos
/ regset
->size
;
682 if (start
+ num_regs
> regset
->n
)
685 for (i
= 0; i
< num_regs
; ++i
) {
686 unsigned int idx
= start
+ i
;
691 reg
= task_pt_regs(target
)->pc
;
694 reg
= task_pt_regs(target
)->pstate
;
697 reg
= task_pt_regs(target
)->orig_x0
;
700 reg
= task_pt_regs(target
)->regs
[idx
];
704 memcpy(kbuf
, ®
, sizeof(reg
));
707 ret
= copy_to_user(ubuf
, ®
, sizeof(reg
));
720 static int compat_gpr_set(struct task_struct
*target
,
721 const struct user_regset
*regset
,
722 unsigned int pos
, unsigned int count
,
723 const void *kbuf
, const void __user
*ubuf
)
725 struct pt_regs newregs
;
727 unsigned int i
, start
, num_regs
;
729 /* Calculate the number of AArch32 registers contained in count */
730 num_regs
= count
/ regset
->size
;
732 /* Convert pos into an register number */
733 start
= pos
/ regset
->size
;
735 if (start
+ num_regs
> regset
->n
)
738 newregs
= *task_pt_regs(target
);
740 for (i
= 0; i
< num_regs
; ++i
) {
741 unsigned int idx
= start
+ i
;
745 memcpy(®
, kbuf
, sizeof(reg
));
748 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
762 newregs
.pstate
= reg
;
765 newregs
.orig_x0
= reg
;
768 newregs
.regs
[idx
] = reg
;
773 if (valid_user_regs(&newregs
.user_regs
, target
))
774 *task_pt_regs(target
) = newregs
;
781 static int compat_vfp_get(struct task_struct
*target
,
782 const struct user_regset
*regset
,
783 unsigned int pos
, unsigned int count
,
784 void *kbuf
, void __user
*ubuf
)
786 struct user_fpsimd_state
*uregs
;
787 compat_ulong_t fpscr
;
790 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
793 * The VFP registers are packed into the fpsimd_state, so they all sit
794 * nicely together for us. We just need to create the fpscr separately.
796 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
797 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
800 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
801 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
802 ret
= put_user(fpscr
, (compat_ulong_t
*)ubuf
);
808 static int compat_vfp_set(struct task_struct
*target
,
809 const struct user_regset
*regset
,
810 unsigned int pos
, unsigned int count
,
811 const void *kbuf
, const void __user
*ubuf
)
813 struct user_fpsimd_state
*uregs
;
814 compat_ulong_t fpscr
;
817 if (pos
+ count
> VFP_STATE_SIZE
)
820 uregs
= &target
->thread
.fpsimd_state
.user_fpsimd
;
822 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
823 VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
826 ret
= get_user(fpscr
, (compat_ulong_t
*)ubuf
);
827 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
828 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
831 fpsimd_flush_task_state(target
);
835 static int compat_tls_get(struct task_struct
*target
,
836 const struct user_regset
*regset
, unsigned int pos
,
837 unsigned int count
, void *kbuf
, void __user
*ubuf
)
839 compat_ulong_t tls
= (compat_ulong_t
)target
->thread
.tp_value
;
840 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
843 static int compat_tls_set(struct task_struct
*target
,
844 const struct user_regset
*regset
, unsigned int pos
,
845 unsigned int count
, const void *kbuf
,
846 const void __user
*ubuf
)
851 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
855 target
->thread
.tp_value
= tls
;
859 static const struct user_regset aarch32_regsets
[] = {
860 [REGSET_COMPAT_GPR
] = {
861 .core_note_type
= NT_PRSTATUS
,
862 .n
= COMPAT_ELF_NGREG
,
863 .size
= sizeof(compat_elf_greg_t
),
864 .align
= sizeof(compat_elf_greg_t
),
865 .get
= compat_gpr_get
,
866 .set
= compat_gpr_set
868 [REGSET_COMPAT_VFP
] = {
869 .core_note_type
= NT_ARM_VFP
,
870 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
871 .size
= sizeof(compat_ulong_t
),
872 .align
= sizeof(compat_ulong_t
),
873 .get
= compat_vfp_get
,
874 .set
= compat_vfp_set
878 static const struct user_regset_view user_aarch32_view
= {
879 .name
= "aarch32", .e_machine
= EM_ARM
,
880 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
883 static const struct user_regset aarch32_ptrace_regsets
[] = {
885 .core_note_type
= NT_PRSTATUS
,
886 .n
= COMPAT_ELF_NGREG
,
887 .size
= sizeof(compat_elf_greg_t
),
888 .align
= sizeof(compat_elf_greg_t
),
889 .get
= compat_gpr_get
,
890 .set
= compat_gpr_set
893 .core_note_type
= NT_ARM_VFP
,
894 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
895 .size
= sizeof(compat_ulong_t
),
896 .align
= sizeof(compat_ulong_t
),
897 .get
= compat_vfp_get
,
898 .set
= compat_vfp_set
901 .core_note_type
= NT_ARM_TLS
,
903 .size
= sizeof(compat_ulong_t
),
904 .align
= sizeof(compat_ulong_t
),
905 .get
= compat_tls_get
,
906 .set
= compat_tls_set
,
908 #ifdef CONFIG_HAVE_HW_BREAKPOINT
909 [REGSET_HW_BREAK
] = {
910 .core_note_type
= NT_ARM_HW_BREAK
,
911 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
913 .align
= sizeof(u32
),
917 [REGSET_HW_WATCH
] = {
918 .core_note_type
= NT_ARM_HW_WATCH
,
919 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
921 .align
= sizeof(u32
),
926 [REGSET_SYSTEM_CALL
] = {
927 .core_note_type
= NT_ARM_SYSTEM_CALL
,
930 .align
= sizeof(int),
931 .get
= system_call_get
,
932 .set
= system_call_set
,
936 static const struct user_regset_view user_aarch32_ptrace_view
= {
937 .name
= "aarch32", .e_machine
= EM_ARM
,
938 .regsets
= aarch32_ptrace_regsets
, .n
= ARRAY_SIZE(aarch32_ptrace_regsets
)
941 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
942 compat_ulong_t __user
*ret
)
949 if (off
== COMPAT_PT_TEXT_ADDR
)
950 tmp
= tsk
->mm
->start_code
;
951 else if (off
== COMPAT_PT_DATA_ADDR
)
952 tmp
= tsk
->mm
->start_data
;
953 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
954 tmp
= tsk
->mm
->end_code
;
955 else if (off
< sizeof(compat_elf_gregset_t
))
956 return copy_regset_to_user(tsk
, &user_aarch32_view
,
957 REGSET_COMPAT_GPR
, off
,
958 sizeof(compat_ulong_t
), ret
);
959 else if (off
>= COMPAT_USER_SZ
)
964 return put_user(tmp
, ret
);
967 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
971 mm_segment_t old_fs
= get_fs();
973 if (off
& 3 || off
>= COMPAT_USER_SZ
)
976 if (off
>= sizeof(compat_elf_gregset_t
))
980 ret
= copy_regset_from_user(tsk
, &user_aarch32_view
,
981 REGSET_COMPAT_GPR
, off
,
982 sizeof(compat_ulong_t
),
989 #ifdef CONFIG_HAVE_HW_BREAKPOINT
992 * Convert a virtual register number into an index for a thread_info
993 * breakpoint array. Breakpoints are identified using positive numbers
994 * whilst watchpoints are negative. The registers are laid out as pairs
995 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
996 * Register 0 is reserved for describing resource information.
998 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
1000 return (abs(num
) - 1) >> 1;
1003 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
1005 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
1008 num_brps
= hw_breakpoint_slots(TYPE_INST
);
1009 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
1011 debug_arch
= debug_monitors_arch();
1025 static int compat_ptrace_hbp_get(unsigned int note_type
,
1026 struct task_struct
*tsk
,
1033 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);;
1036 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
1039 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
1046 static int compat_ptrace_hbp_set(unsigned int note_type
,
1047 struct task_struct
*tsk
,
1054 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1058 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
1061 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
1067 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1068 compat_ulong_t __user
*data
)
1072 mm_segment_t old_fs
= get_fs();
1077 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1079 } else if (num
== 0) {
1080 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
1083 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1088 ret
= put_user(kdata
, data
);
1093 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1094 compat_ulong_t __user
*data
)
1098 mm_segment_t old_fs
= get_fs();
1103 ret
= get_user(kdata
, data
);
1109 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1111 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1116 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1118 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1119 compat_ulong_t caddr
, compat_ulong_t cdata
)
1121 unsigned long addr
= caddr
;
1122 unsigned long data
= cdata
;
1123 void __user
*datap
= compat_ptr(data
);
1127 case PTRACE_PEEKUSR
:
1128 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1131 case PTRACE_POKEUSR
:
1132 ret
= compat_ptrace_write_user(child
, addr
, data
);
1135 case COMPAT_PTRACE_GETREGS
:
1136 ret
= copy_regset_to_user(child
,
1139 0, sizeof(compat_elf_gregset_t
),
1143 case COMPAT_PTRACE_SETREGS
:
1144 ret
= copy_regset_from_user(child
,
1147 0, sizeof(compat_elf_gregset_t
),
1151 case COMPAT_PTRACE_GET_THREAD_AREA
:
1152 ret
= put_user((compat_ulong_t
)child
->thread
.tp_value
,
1153 (compat_ulong_t __user
*)datap
);
1156 case COMPAT_PTRACE_SET_SYSCALL
:
1157 task_pt_regs(child
)->syscallno
= data
;
1161 case COMPAT_PTRACE_GETVFPREGS
:
1162 ret
= copy_regset_to_user(child
,
1169 case COMPAT_PTRACE_SETVFPREGS
:
1170 ret
= copy_regset_from_user(child
,
1177 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1178 case COMPAT_PTRACE_GETHBPREGS
:
1179 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1182 case COMPAT_PTRACE_SETHBPREGS
:
1183 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1188 ret
= compat_ptrace_request(child
, request
, addr
,
1195 #endif /* CONFIG_COMPAT */
1197 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1199 #ifdef CONFIG_COMPAT
1201 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1202 * user_aarch32_view compatible with arm32. Native ptrace requests on
1203 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1204 * access to the TLS register.
1206 if (is_compat_task())
1207 return &user_aarch32_view
;
1208 else if (is_compat_thread(task_thread_info(task
)))
1209 return &user_aarch32_ptrace_view
;
1211 return &user_aarch64_view
;
1214 long arch_ptrace(struct task_struct
*child
, long request
,
1215 unsigned long addr
, unsigned long data
)
1217 return ptrace_request(child
, request
, addr
, data
);
1220 enum ptrace_syscall_dir
{
1221 PTRACE_SYSCALL_ENTER
= 0,
1222 PTRACE_SYSCALL_EXIT
,
1225 static void tracehook_report_syscall(struct pt_regs
*regs
,
1226 enum ptrace_syscall_dir dir
)
1229 unsigned long saved_reg
;
1232 * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1233 * used to denote syscall entry/exit:
1235 regno
= (is_compat_task() ? 12 : 7);
1236 saved_reg
= regs
->regs
[regno
];
1237 regs
->regs
[regno
] = dir
;
1239 if (dir
== PTRACE_SYSCALL_EXIT
)
1240 tracehook_report_syscall_exit(regs
, 0);
1241 else if (tracehook_report_syscall_entry(regs
))
1242 regs
->syscallno
= ~0UL;
1244 regs
->regs
[regno
] = saved_reg
;
1247 asmlinkage
int syscall_trace_enter(struct pt_regs
*regs
)
1249 /* Do the secure computing check first; failures should be fast. */
1250 if (secure_computing() == -1)
1253 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1254 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1256 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1257 trace_sys_enter(regs
, regs
->syscallno
);
1259 audit_syscall_entry(regs
->syscallno
, regs
->orig_x0
, regs
->regs
[1],
1260 regs
->regs
[2], regs
->regs
[3]);
1262 return regs
->syscallno
;
1265 asmlinkage
void syscall_trace_exit(struct pt_regs
*regs
)
1267 audit_syscall_exit(regs
);
1269 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1270 trace_sys_exit(regs
, regs_return_value(regs
));
1272 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1273 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);
1277 * Bits which are always architecturally RES0 per ARM DDI 0487A.h
1278 * Userspace cannot use these until they have an architectural meaning.
1279 * We also reserve IL for the kernel; SS is handled dynamically.
1281 #define SPSR_EL1_AARCH64_RES0_BITS \
1282 (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
1284 #define SPSR_EL1_AARCH32_RES0_BITS \
1285 (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
1287 static int valid_compat_regs(struct user_pt_regs
*regs
)
1289 regs
->pstate
&= ~SPSR_EL1_AARCH32_RES0_BITS
;
1291 if (!system_supports_mixed_endian_el0()) {
1292 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1293 regs
->pstate
|= COMPAT_PSR_E_BIT
;
1295 regs
->pstate
&= ~COMPAT_PSR_E_BIT
;
1298 if (user_mode(regs
) && (regs
->pstate
& PSR_MODE32_BIT
) &&
1299 (regs
->pstate
& COMPAT_PSR_A_BIT
) == 0 &&
1300 (regs
->pstate
& COMPAT_PSR_I_BIT
) == 0 &&
1301 (regs
->pstate
& COMPAT_PSR_F_BIT
) == 0) {
1306 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1309 regs
->pstate
&= COMPAT_PSR_N_BIT
| COMPAT_PSR_Z_BIT
|
1310 COMPAT_PSR_C_BIT
| COMPAT_PSR_V_BIT
|
1311 COMPAT_PSR_Q_BIT
| COMPAT_PSR_IT_MASK
|
1312 COMPAT_PSR_GE_MASK
| COMPAT_PSR_E_BIT
|
1314 regs
->pstate
|= PSR_MODE32_BIT
;
1319 static int valid_native_regs(struct user_pt_regs
*regs
)
1321 regs
->pstate
&= ~SPSR_EL1_AARCH64_RES0_BITS
;
1323 if (user_mode(regs
) && !(regs
->pstate
& PSR_MODE32_BIT
) &&
1324 (regs
->pstate
& PSR_D_BIT
) == 0 &&
1325 (regs
->pstate
& PSR_A_BIT
) == 0 &&
1326 (regs
->pstate
& PSR_I_BIT
) == 0 &&
1327 (regs
->pstate
& PSR_F_BIT
) == 0) {
1331 /* Force PSR to a valid 64-bit EL0t */
1332 regs
->pstate
&= PSR_N_BIT
| PSR_Z_BIT
| PSR_C_BIT
| PSR_V_BIT
;
1338 * Are the current registers suitable for user mode? (used to maintain
1339 * security in signal handlers)
1341 int valid_user_regs(struct user_pt_regs
*regs
, struct task_struct
*task
)
1343 if (!test_tsk_thread_flag(task
, TIF_SINGLESTEP
))
1344 regs
->pstate
&= ~DBG_SPSR_SS
;
1346 if (is_compat_thread(task_thread_info(task
)))
1347 return valid_compat_regs(regs
);
1349 return valid_native_regs(regs
);