2 * Based on arch/arm/kernel/signal.c
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Modified by Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/compat.h>
22 #include <linux/signal.h>
23 #include <linux/syscalls.h>
24 #include <linux/ratelimit.h>
26 #include <asm/fpsimd.h>
27 #include <asm/signal32.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd32.h>
31 struct compat_sigaction
{
32 compat_uptr_t sa_handler
;
33 compat_ulong_t sa_flags
;
34 compat_uptr_t sa_restorer
;
35 compat_sigset_t sa_mask
;
38 struct compat_old_sigaction
{
39 compat_uptr_t sa_handler
;
40 compat_old_sigset_t sa_mask
;
41 compat_ulong_t sa_flags
;
42 compat_uptr_t sa_restorer
;
45 struct compat_sigcontext
{
46 /* We always set these two fields to 0 */
47 compat_ulong_t trap_no
;
48 compat_ulong_t error_code
;
50 compat_ulong_t oldmask
;
51 compat_ulong_t arm_r0
;
52 compat_ulong_t arm_r1
;
53 compat_ulong_t arm_r2
;
54 compat_ulong_t arm_r3
;
55 compat_ulong_t arm_r4
;
56 compat_ulong_t arm_r5
;
57 compat_ulong_t arm_r6
;
58 compat_ulong_t arm_r7
;
59 compat_ulong_t arm_r8
;
60 compat_ulong_t arm_r9
;
61 compat_ulong_t arm_r10
;
62 compat_ulong_t arm_fp
;
63 compat_ulong_t arm_ip
;
64 compat_ulong_t arm_sp
;
65 compat_ulong_t arm_lr
;
66 compat_ulong_t arm_pc
;
67 compat_ulong_t arm_cpsr
;
68 compat_ulong_t fault_address
;
71 struct compat_ucontext
{
72 compat_ulong_t uc_flags
;
73 struct compat_ucontext
*uc_link
;
74 compat_stack_t uc_stack
;
75 struct compat_sigcontext uc_mcontext
;
76 compat_sigset_t uc_sigmask
;
77 int __unused
[32 - (sizeof (compat_sigset_t
) / sizeof (int))];
78 compat_ulong_t uc_regspace
[128] __attribute__((__aligned__(8)));
81 struct compat_vfp_sigframe
{
84 struct compat_user_vfp
{
85 compat_u64 fpregs
[32];
88 struct compat_user_vfp_exc
{
90 compat_ulong_t fpinst
;
91 compat_ulong_t fpinst2
;
93 } __attribute__((__aligned__(8)));
95 #define VFP_MAGIC 0x56465001
96 #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
98 struct compat_aux_sigframe
{
99 struct compat_vfp_sigframe vfp
;
101 /* Something that isn't a valid magic number for any coprocessor. */
102 unsigned long end_magic
;
103 } __attribute__((__aligned__(8)));
105 struct compat_sigframe
{
106 struct compat_ucontext uc
;
107 compat_ulong_t retcode
[2];
110 struct compat_rt_sigframe
{
111 struct compat_siginfo info
;
112 struct compat_sigframe sig
;
115 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
118 * For ARM syscalls, the syscall number has to be loaded into r7.
119 * We do not support an OABI userspace.
121 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
122 #define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
123 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
124 #define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
127 * For Thumb syscalls, we also pass the syscall number via r7. We therefore
128 * need two 16-bit instructions.
130 #define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
131 0x2700 | __NR_compat_sigreturn)
132 #define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
133 0x2700 | __NR_compat_rt_sigreturn)
135 const compat_ulong_t aarch32_sigret_code
[6] = {
137 * AArch32 sigreturn code.
138 * We don't construct an OABI SWI - instead we just set the imm24 field
139 * to the EABI syscall number so that we create a sane disassembly.
141 MOV_R7_NR_SIGRETURN
, SVC_SYS_SIGRETURN
, SVC_THUMB_SIGRETURN
,
142 MOV_R7_NR_RT_SIGRETURN
, SVC_SYS_RT_SIGRETURN
, SVC_THUMB_RT_SIGRETURN
,
145 static inline int put_sigset_t(compat_sigset_t __user
*uset
, sigset_t
*set
)
147 compat_sigset_t cset
;
149 cset
.sig
[0] = set
->sig
[0] & 0xffffffffull
;
150 cset
.sig
[1] = set
->sig
[0] >> 32;
152 return copy_to_user(uset
, &cset
, sizeof(*uset
));
155 static inline int get_sigset_t(sigset_t
*set
,
156 const compat_sigset_t __user
*uset
)
160 if (copy_from_user(&s32
, uset
, sizeof(*uset
)))
163 set
->sig
[0] = s32
.sig
[0] | (((long)s32
.sig
[1]) << 32);
167 int copy_siginfo_to_user32(compat_siginfo_t __user
*to
, siginfo_t
*from
)
171 if (!access_ok(VERIFY_WRITE
, to
, sizeof(*to
)))
174 /* If you change siginfo_t structure, please be sure
175 * this code is fixed accordingly.
176 * It should never copy any pad contained in the structure
177 * to avoid security leaks, but must copy the generic
178 * 3 ints plus the relevant union member.
179 * This routine must convert siginfo from 64bit to 32bit as well
182 err
= __put_user(from
->si_signo
, &to
->si_signo
);
183 err
|= __put_user(from
->si_errno
, &to
->si_errno
);
184 err
|= __put_user((short)from
->si_code
, &to
->si_code
);
185 if (from
->si_code
< 0)
186 err
|= __copy_to_user(&to
->_sifields
._pad
, &from
->_sifields
._pad
,
188 else switch (from
->si_code
& __SI_MASK
) {
190 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
191 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
194 err
|= __put_user(from
->si_tid
, &to
->si_tid
);
195 err
|= __put_user(from
->si_overrun
, &to
->si_overrun
);
196 err
|= __put_user((compat_uptr_t
)(unsigned long)from
->si_ptr
,
200 err
|= __put_user(from
->si_band
, &to
->si_band
);
201 err
|= __put_user(from
->si_fd
, &to
->si_fd
);
204 err
|= __put_user((compat_uptr_t
)(unsigned long)from
->si_addr
,
208 * Other callers might not initialize the si_lsb field,
209 * so check explicitely for the right codes here.
211 if (from
->si_code
== BUS_MCEERR_AR
|| from
->si_code
== BUS_MCEERR_AO
)
212 err
|= __put_user(from
->si_addr_lsb
, &to
->si_addr_lsb
);
216 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
217 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
218 err
|= __put_user(from
->si_status
, &to
->si_status
);
219 err
|= __put_user(from
->si_utime
, &to
->si_utime
);
220 err
|= __put_user(from
->si_stime
, &to
->si_stime
);
222 case __SI_RT
: /* This is not generated by the kernel as of now. */
223 case __SI_MESGQ
: /* But this is */
224 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
225 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
226 err
|= __put_user((compat_uptr_t
)(unsigned long)from
->si_ptr
, &to
->si_ptr
);
228 default: /* this is just in case for now ... */
229 err
|= __put_user(from
->si_pid
, &to
->si_pid
);
230 err
|= __put_user(from
->si_uid
, &to
->si_uid
);
236 int copy_siginfo_from_user32(siginfo_t
*to
, compat_siginfo_t __user
*from
)
238 memset(to
, 0, sizeof *to
);
240 if (copy_from_user(to
, from
, __ARCH_SI_PREAMBLE_SIZE
) ||
241 copy_from_user(to
->_sifields
._pad
,
242 from
->_sifields
._pad
, SI_PAD_SIZE
))
249 * VFP save/restore code.
251 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user
*frame
)
253 struct fpsimd_state
*fpsimd
= ¤t
->thread
.fpsimd_state
;
254 compat_ulong_t magic
= VFP_MAGIC
;
255 compat_ulong_t size
= VFP_STORAGE_SIZE
;
256 compat_ulong_t fpscr
, fpexc
;
260 * Save the hardware registers to the fpsimd_state structure.
261 * Note that this also saves V16-31, which aren't visible
264 fpsimd_save_state(fpsimd
);
266 /* Place structure header on the stack */
267 __put_user_error(magic
, &frame
->magic
, err
);
268 __put_user_error(size
, &frame
->size
, err
);
271 * Now copy the FP registers. Since the registers are packed,
272 * we can copy the prefix we want (V0-V15) as it is.
273 * FIXME: Won't work if big endian.
275 err
|= __copy_to_user(&frame
->ufp
.fpregs
, fpsimd
->vregs
,
276 sizeof(frame
->ufp
.fpregs
));
278 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
279 fpscr
= (fpsimd
->fpsr
& VFP_FPSCR_STAT_MASK
) |
280 (fpsimd
->fpcr
& VFP_FPSCR_CTRL_MASK
);
281 __put_user_error(fpscr
, &frame
->ufp
.fpscr
, err
);
284 * The exception register aren't available so we fake up a
285 * basic FPEXC and zero everything else.
288 __put_user_error(fpexc
, &frame
->ufp_exc
.fpexc
, err
);
289 __put_user_error(0, &frame
->ufp_exc
.fpinst
, err
);
290 __put_user_error(0, &frame
->ufp_exc
.fpinst2
, err
);
292 return err
? -EFAULT
: 0;
295 static int compat_restore_vfp_context(struct compat_vfp_sigframe __user
*frame
)
297 struct fpsimd_state fpsimd
;
298 compat_ulong_t magic
= VFP_MAGIC
;
299 compat_ulong_t size
= VFP_STORAGE_SIZE
;
300 compat_ulong_t fpscr
;
303 __get_user_error(magic
, &frame
->magic
, err
);
304 __get_user_error(size
, &frame
->size
, err
);
308 if (magic
!= VFP_MAGIC
|| size
!= VFP_STORAGE_SIZE
)
312 * Copy the FP registers into the start of the fpsimd_state.
313 * FIXME: Won't work if big endian.
315 err
|= __copy_from_user(fpsimd
.vregs
, frame
->ufp
.fpregs
,
316 sizeof(frame
->ufp
.fpregs
));
318 /* Extract the fpsr and the fpcr from the fpscr */
319 __get_user_error(fpscr
, &frame
->ufp
.fpscr
, err
);
320 fpsimd
.fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
321 fpsimd
.fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
324 * We don't need to touch the exception register, so
325 * reload the hardware state.
329 fpsimd_load_state(&fpsimd
);
333 return err
? -EFAULT
: 0;
337 * atomically swap in the new signal mask, and wait for a signal.
339 asmlinkage
int compat_sys_sigsuspend(int restart
, compat_ulong_t oldmask
,
340 compat_old_sigset_t mask
)
344 siginitset(¤t
->blocked
, mask
);
345 return sigsuspend(&blocked
);
348 asmlinkage
int compat_sys_sigaction(int sig
,
349 const struct compat_old_sigaction __user
*act
,
350 struct compat_old_sigaction __user
*oact
)
352 struct k_sigaction new_ka
, old_ka
;
354 compat_old_sigset_t mask
;
355 compat_uptr_t handler
, restorer
;
358 if (!access_ok(VERIFY_READ
, act
, sizeof(*act
)) ||
359 __get_user(handler
, &act
->sa_handler
) ||
360 __get_user(restorer
, &act
->sa_restorer
) ||
361 __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
) ||
362 __get_user(mask
, &act
->sa_mask
))
365 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
366 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
367 siginitset(&new_ka
.sa
.sa_mask
, mask
);
370 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
373 if (!access_ok(VERIFY_WRITE
, oact
, sizeof(*oact
)) ||
374 __put_user(ptr_to_compat(old_ka
.sa
.sa_handler
),
375 &oact
->sa_handler
) ||
376 __put_user(ptr_to_compat(old_ka
.sa
.sa_restorer
),
377 &oact
->sa_restorer
) ||
378 __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
) ||
379 __put_user(old_ka
.sa
.sa_mask
.sig
[0], &oact
->sa_mask
))
386 asmlinkage
int compat_sys_rt_sigaction(int sig
,
387 const struct compat_sigaction __user
*act
,
388 struct compat_sigaction __user
*oact
,
389 compat_size_t sigsetsize
)
391 struct k_sigaction new_ka
, old_ka
;
394 /* XXX: Don't preclude handling different sized sigset_t's. */
395 if (sigsetsize
!= sizeof(compat_sigset_t
))
399 compat_uptr_t handler
, restorer
;
401 ret
= get_user(handler
, &act
->sa_handler
);
402 new_ka
.sa
.sa_handler
= compat_ptr(handler
);
403 ret
|= get_user(restorer
, &act
->sa_restorer
);
404 new_ka
.sa
.sa_restorer
= compat_ptr(restorer
);
405 ret
|= get_sigset_t(&new_ka
.sa
.sa_mask
, &act
->sa_mask
);
406 ret
|= __get_user(new_ka
.sa
.sa_flags
, &act
->sa_flags
);
411 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
413 ret
= put_user(ptr_to_compat(old_ka
.sa
.sa_handler
), &oact
->sa_handler
);
414 ret
|= put_sigset_t(&oact
->sa_mask
, &old_ka
.sa
.sa_mask
);
415 ret
|= __put_user(old_ka
.sa
.sa_flags
, &oact
->sa_flags
);
420 static int compat_restore_sigframe(struct pt_regs
*regs
,
421 struct compat_sigframe __user
*sf
)
425 struct compat_aux_sigframe __user
*aux
;
427 err
= get_sigset_t(&set
, &sf
->uc
.uc_sigmask
);
429 sigdelsetmask(&set
, ~_BLOCKABLE
);
430 set_current_blocked(&set
);
433 __get_user_error(regs
->regs
[0], &sf
->uc
.uc_mcontext
.arm_r0
, err
);
434 __get_user_error(regs
->regs
[1], &sf
->uc
.uc_mcontext
.arm_r1
, err
);
435 __get_user_error(regs
->regs
[2], &sf
->uc
.uc_mcontext
.arm_r2
, err
);
436 __get_user_error(regs
->regs
[3], &sf
->uc
.uc_mcontext
.arm_r3
, err
);
437 __get_user_error(regs
->regs
[4], &sf
->uc
.uc_mcontext
.arm_r4
, err
);
438 __get_user_error(regs
->regs
[5], &sf
->uc
.uc_mcontext
.arm_r5
, err
);
439 __get_user_error(regs
->regs
[6], &sf
->uc
.uc_mcontext
.arm_r6
, err
);
440 __get_user_error(regs
->regs
[7], &sf
->uc
.uc_mcontext
.arm_r7
, err
);
441 __get_user_error(regs
->regs
[8], &sf
->uc
.uc_mcontext
.arm_r8
, err
);
442 __get_user_error(regs
->regs
[9], &sf
->uc
.uc_mcontext
.arm_r9
, err
);
443 __get_user_error(regs
->regs
[10], &sf
->uc
.uc_mcontext
.arm_r10
, err
);
444 __get_user_error(regs
->regs
[11], &sf
->uc
.uc_mcontext
.arm_fp
, err
);
445 __get_user_error(regs
->regs
[12], &sf
->uc
.uc_mcontext
.arm_ip
, err
);
446 __get_user_error(regs
->compat_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
447 __get_user_error(regs
->compat_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
448 __get_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
449 __get_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
452 * Avoid compat_sys_sigreturn() restarting.
454 regs
->syscallno
= ~0UL;
456 err
|= !valid_user_regs(®s
->user_regs
);
458 aux
= (struct compat_aux_sigframe __user
*) sf
->uc
.uc_regspace
;
460 err
|= compat_restore_vfp_context(&aux
->vfp
);
465 asmlinkage
int compat_sys_sigreturn(struct pt_regs
*regs
)
467 struct compat_sigframe __user
*frame
;
469 /* Always make any pending restarted system calls return -EINTR */
470 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
473 * Since we stacked the signal on a 64-bit boundary,
474 * then 'sp' should be word aligned here. If it's
475 * not, then the user is trying to mess with us.
477 if (regs
->compat_sp
& 7)
480 frame
= (struct compat_sigframe __user
*)regs
->compat_sp
;
482 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
485 if (compat_restore_sigframe(regs
, frame
))
488 return regs
->regs
[0];
491 if (show_unhandled_signals
)
492 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
493 current
->comm
, task_pid_nr(current
), __func__
,
495 force_sig(SIGSEGV
, current
);
499 asmlinkage
int compat_sys_rt_sigreturn(struct pt_regs
*regs
)
501 struct compat_rt_sigframe __user
*frame
;
503 /* Always make any pending restarted system calls return -EINTR */
504 current_thread_info()->restart_block
.fn
= do_no_restart_syscall
;
507 * Since we stacked the signal on a 64-bit boundary,
508 * then 'sp' should be word aligned here. If it's
509 * not, then the user is trying to mess with us.
511 if (regs
->compat_sp
& 7)
514 frame
= (struct compat_rt_sigframe __user
*)regs
->compat_sp
;
516 if (!access_ok(VERIFY_READ
, frame
, sizeof (*frame
)))
519 if (compat_restore_sigframe(regs
, &frame
->sig
))
522 if (compat_restore_altstack(&frame
->sig
.uc
.uc_stack
))
525 return regs
->regs
[0];
528 if (show_unhandled_signals
)
529 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
530 current
->comm
, task_pid_nr(current
), __func__
,
532 force_sig(SIGSEGV
, current
);
536 static void __user
*compat_get_sigframe(struct k_sigaction
*ka
,
537 struct pt_regs
*regs
,
540 compat_ulong_t sp
= regs
->compat_sp
;
544 * This is the X/Open sanctioned signal stack switching.
546 if ((ka
->sa
.sa_flags
& SA_ONSTACK
) && !sas_ss_flags(sp
))
547 sp
= current
->sas_ss_sp
+ current
->sas_ss_size
;
550 * ATPCS B01 mandates 8-byte alignment
552 frame
= compat_ptr((compat_uptr_t
)((sp
- framesize
) & ~7));
555 * Check that we can actually write to the signal frame.
557 if (!access_ok(VERIFY_WRITE
, frame
, framesize
))
563 static void compat_setup_return(struct pt_regs
*regs
, struct k_sigaction
*ka
,
564 compat_ulong_t __user
*rc
, void __user
*frame
,
567 compat_ulong_t handler
= ptr_to_compat(ka
->sa
.sa_handler
);
568 compat_ulong_t retcode
;
569 compat_ulong_t spsr
= regs
->pstate
& ~PSR_f
;
572 /* Check if the handler is written for ARM or Thumb */
576 spsr
|= COMPAT_PSR_T_BIT
;
577 spsr
&= ~COMPAT_PSR_IT_MASK
;
579 spsr
&= ~COMPAT_PSR_T_BIT
;
582 if (ka
->sa
.sa_flags
& SA_RESTORER
) {
583 retcode
= ptr_to_compat(ka
->sa
.sa_restorer
);
585 /* Set up sigreturn pointer */
586 unsigned int idx
= thumb
<< 1;
588 if (ka
->sa
.sa_flags
& SA_SIGINFO
)
591 retcode
= AARCH32_VECTORS_BASE
+
592 AARCH32_KERN_SIGRET_CODE_OFFSET
+
596 regs
->regs
[0] = usig
;
597 regs
->compat_sp
= ptr_to_compat(frame
);
598 regs
->compat_lr
= retcode
;
603 static int compat_setup_sigframe(struct compat_sigframe __user
*sf
,
604 struct pt_regs
*regs
, sigset_t
*set
)
606 struct compat_aux_sigframe __user
*aux
;
609 __put_user_error(regs
->regs
[0], &sf
->uc
.uc_mcontext
.arm_r0
, err
);
610 __put_user_error(regs
->regs
[1], &sf
->uc
.uc_mcontext
.arm_r1
, err
);
611 __put_user_error(regs
->regs
[2], &sf
->uc
.uc_mcontext
.arm_r2
, err
);
612 __put_user_error(regs
->regs
[3], &sf
->uc
.uc_mcontext
.arm_r3
, err
);
613 __put_user_error(regs
->regs
[4], &sf
->uc
.uc_mcontext
.arm_r4
, err
);
614 __put_user_error(regs
->regs
[5], &sf
->uc
.uc_mcontext
.arm_r5
, err
);
615 __put_user_error(regs
->regs
[6], &sf
->uc
.uc_mcontext
.arm_r6
, err
);
616 __put_user_error(regs
->regs
[7], &sf
->uc
.uc_mcontext
.arm_r7
, err
);
617 __put_user_error(regs
->regs
[8], &sf
->uc
.uc_mcontext
.arm_r8
, err
);
618 __put_user_error(regs
->regs
[9], &sf
->uc
.uc_mcontext
.arm_r9
, err
);
619 __put_user_error(regs
->regs
[10], &sf
->uc
.uc_mcontext
.arm_r10
, err
);
620 __put_user_error(regs
->regs
[11], &sf
->uc
.uc_mcontext
.arm_fp
, err
);
621 __put_user_error(regs
->regs
[12], &sf
->uc
.uc_mcontext
.arm_ip
, err
);
622 __put_user_error(regs
->compat_sp
, &sf
->uc
.uc_mcontext
.arm_sp
, err
);
623 __put_user_error(regs
->compat_lr
, &sf
->uc
.uc_mcontext
.arm_lr
, err
);
624 __put_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.arm_pc
, err
);
625 __put_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.arm_cpsr
, err
);
627 __put_user_error((compat_ulong_t
)0, &sf
->uc
.uc_mcontext
.trap_no
, err
);
628 __put_user_error((compat_ulong_t
)0, &sf
->uc
.uc_mcontext
.error_code
, err
);
629 __put_user_error(current
->thread
.fault_address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
630 __put_user_error(set
->sig
[0], &sf
->uc
.uc_mcontext
.oldmask
, err
);
632 err
|= put_sigset_t(&sf
->uc
.uc_sigmask
, set
);
634 aux
= (struct compat_aux_sigframe __user
*) sf
->uc
.uc_regspace
;
637 err
|= compat_preserve_vfp_context(&aux
->vfp
);
638 __put_user_error(0, &aux
->end_magic
, err
);
644 * 32-bit signal handling routines called from signal.c
646 int compat_setup_rt_frame(int usig
, struct k_sigaction
*ka
, siginfo_t
*info
,
647 sigset_t
*set
, struct pt_regs
*regs
)
649 struct compat_rt_sigframe __user
*frame
;
650 compat_stack_t stack
;
653 frame
= compat_get_sigframe(ka
, regs
, sizeof(*frame
));
658 err
|= copy_siginfo_to_user32(&frame
->info
, info
);
660 __put_user_error(0, &frame
->sig
.uc
.uc_flags
, err
);
661 __put_user_error(NULL
, &frame
->sig
.uc
.uc_link
, err
);
663 err
|= __compat_save_altstack(&frame
->sig
.uc
.uc_stack
, regs
->compat_sp
);
665 err
|= compat_setup_sigframe(&frame
->sig
, regs
, set
);
668 compat_setup_return(regs
, ka
, frame
->sig
.retcode
, frame
, usig
);
669 regs
->regs
[1] = (compat_ulong_t
)(unsigned long)&frame
->info
;
670 regs
->regs
[2] = (compat_ulong_t
)(unsigned long)&frame
->sig
.uc
;
676 int compat_setup_frame(int usig
, struct k_sigaction
*ka
, sigset_t
*set
,
677 struct pt_regs
*regs
)
679 struct compat_sigframe __user
*frame
;
682 frame
= compat_get_sigframe(ka
, regs
, sizeof(*frame
));
687 __put_user_error(0x5ac3c35a, &frame
->uc
.uc_flags
, err
);
689 err
|= compat_setup_sigframe(frame
, regs
, set
);
691 compat_setup_return(regs
, ka
, frame
->retcode
, frame
, usig
);
696 void compat_setup_restart_syscall(struct pt_regs
*regs
)
698 regs
->regs
[7] = __NR_compat_restart_syscall
;