arm64: switch to generic compat rt_sigqueueinfo()
[deliverable/linux.git] / arch / arm64 / kernel / signal32.c
1 /*
2 * Based on arch/arm/kernel/signal.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 * Modified by Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include <linux/compat.h>
22 #include <linux/signal.h>
23 #include <linux/syscalls.h>
24 #include <linux/ratelimit.h>
25
26 #include <asm/fpsimd.h>
27 #include <asm/signal32.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd32.h>
30
31 struct compat_sigaction {
32 compat_uptr_t sa_handler;
33 compat_ulong_t sa_flags;
34 compat_uptr_t sa_restorer;
35 compat_sigset_t sa_mask;
36 };
37
38 struct compat_old_sigaction {
39 compat_uptr_t sa_handler;
40 compat_old_sigset_t sa_mask;
41 compat_ulong_t sa_flags;
42 compat_uptr_t sa_restorer;
43 };
44
45 struct compat_sigcontext {
46 /* We always set these two fields to 0 */
47 compat_ulong_t trap_no;
48 compat_ulong_t error_code;
49
50 compat_ulong_t oldmask;
51 compat_ulong_t arm_r0;
52 compat_ulong_t arm_r1;
53 compat_ulong_t arm_r2;
54 compat_ulong_t arm_r3;
55 compat_ulong_t arm_r4;
56 compat_ulong_t arm_r5;
57 compat_ulong_t arm_r6;
58 compat_ulong_t arm_r7;
59 compat_ulong_t arm_r8;
60 compat_ulong_t arm_r9;
61 compat_ulong_t arm_r10;
62 compat_ulong_t arm_fp;
63 compat_ulong_t arm_ip;
64 compat_ulong_t arm_sp;
65 compat_ulong_t arm_lr;
66 compat_ulong_t arm_pc;
67 compat_ulong_t arm_cpsr;
68 compat_ulong_t fault_address;
69 };
70
71 struct compat_ucontext {
72 compat_ulong_t uc_flags;
73 struct compat_ucontext *uc_link;
74 compat_stack_t uc_stack;
75 struct compat_sigcontext uc_mcontext;
76 compat_sigset_t uc_sigmask;
77 int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))];
78 compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8)));
79 };
80
81 struct compat_vfp_sigframe {
82 compat_ulong_t magic;
83 compat_ulong_t size;
84 struct compat_user_vfp {
85 compat_u64 fpregs[32];
86 compat_ulong_t fpscr;
87 } ufp;
88 struct compat_user_vfp_exc {
89 compat_ulong_t fpexc;
90 compat_ulong_t fpinst;
91 compat_ulong_t fpinst2;
92 } ufp_exc;
93 } __attribute__((__aligned__(8)));
94
95 #define VFP_MAGIC 0x56465001
96 #define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe)
97
98 struct compat_aux_sigframe {
99 struct compat_vfp_sigframe vfp;
100
101 /* Something that isn't a valid magic number for any coprocessor. */
102 unsigned long end_magic;
103 } __attribute__((__aligned__(8)));
104
105 struct compat_sigframe {
106 struct compat_ucontext uc;
107 compat_ulong_t retcode[2];
108 };
109
110 struct compat_rt_sigframe {
111 struct compat_siginfo info;
112 struct compat_sigframe sig;
113 };
114
115 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
116
117 /*
118 * For ARM syscalls, the syscall number has to be loaded into r7.
119 * We do not support an OABI userspace.
120 */
121 #define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
122 #define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
123 #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
124 #define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
125
126 /*
127 * For Thumb syscalls, we also pass the syscall number via r7. We therefore
128 * need two 16-bit instructions.
129 */
130 #define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
131 0x2700 | __NR_compat_sigreturn)
132 #define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
133 0x2700 | __NR_compat_rt_sigreturn)
134
135 const compat_ulong_t aarch32_sigret_code[6] = {
136 /*
137 * AArch32 sigreturn code.
138 * We don't construct an OABI SWI - instead we just set the imm24 field
139 * to the EABI syscall number so that we create a sane disassembly.
140 */
141 MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
142 MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
143 };
144
145 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
146 {
147 compat_sigset_t cset;
148
149 cset.sig[0] = set->sig[0] & 0xffffffffull;
150 cset.sig[1] = set->sig[0] >> 32;
151
152 return copy_to_user(uset, &cset, sizeof(*uset));
153 }
154
155 static inline int get_sigset_t(sigset_t *set,
156 const compat_sigset_t __user *uset)
157 {
158 compat_sigset_t s32;
159
160 if (copy_from_user(&s32, uset, sizeof(*uset)))
161 return -EFAULT;
162
163 set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
164 return 0;
165 }
166
167 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
168 {
169 int err;
170
171 if (!access_ok(VERIFY_WRITE, to, sizeof(*to)))
172 return -EFAULT;
173
174 /* If you change siginfo_t structure, please be sure
175 * this code is fixed accordingly.
176 * It should never copy any pad contained in the structure
177 * to avoid security leaks, but must copy the generic
178 * 3 ints plus the relevant union member.
179 * This routine must convert siginfo from 64bit to 32bit as well
180 * at the same time.
181 */
182 err = __put_user(from->si_signo, &to->si_signo);
183 err |= __put_user(from->si_errno, &to->si_errno);
184 err |= __put_user((short)from->si_code, &to->si_code);
185 if (from->si_code < 0)
186 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad,
187 SI_PAD_SIZE);
188 else switch (from->si_code & __SI_MASK) {
189 case __SI_KILL:
190 err |= __put_user(from->si_pid, &to->si_pid);
191 err |= __put_user(from->si_uid, &to->si_uid);
192 break;
193 case __SI_TIMER:
194 err |= __put_user(from->si_tid, &to->si_tid);
195 err |= __put_user(from->si_overrun, &to->si_overrun);
196 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
197 &to->si_ptr);
198 break;
199 case __SI_POLL:
200 err |= __put_user(from->si_band, &to->si_band);
201 err |= __put_user(from->si_fd, &to->si_fd);
202 break;
203 case __SI_FAULT:
204 err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr,
205 &to->si_addr);
206 #ifdef BUS_MCEERR_AO
207 /*
208 * Other callers might not initialize the si_lsb field,
209 * so check explicitely for the right codes here.
210 */
211 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
212 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
213 #endif
214 break;
215 case __SI_CHLD:
216 err |= __put_user(from->si_pid, &to->si_pid);
217 err |= __put_user(from->si_uid, &to->si_uid);
218 err |= __put_user(from->si_status, &to->si_status);
219 err |= __put_user(from->si_utime, &to->si_utime);
220 err |= __put_user(from->si_stime, &to->si_stime);
221 break;
222 case __SI_RT: /* This is not generated by the kernel as of now. */
223 case __SI_MESGQ: /* But this is */
224 err |= __put_user(from->si_pid, &to->si_pid);
225 err |= __put_user(from->si_uid, &to->si_uid);
226 err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
227 break;
228 default: /* this is just in case for now ... */
229 err |= __put_user(from->si_pid, &to->si_pid);
230 err |= __put_user(from->si_uid, &to->si_uid);
231 break;
232 }
233 return err;
234 }
235
236 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
237 {
238 memset(to, 0, sizeof *to);
239
240 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
241 copy_from_user(to->_sifields._pad,
242 from->_sifields._pad, SI_PAD_SIZE))
243 return -EFAULT;
244
245 return 0;
246 }
247
248 /*
249 * VFP save/restore code.
250 */
251 static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
252 {
253 struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
254 compat_ulong_t magic = VFP_MAGIC;
255 compat_ulong_t size = VFP_STORAGE_SIZE;
256 compat_ulong_t fpscr, fpexc;
257 int err = 0;
258
259 /*
260 * Save the hardware registers to the fpsimd_state structure.
261 * Note that this also saves V16-31, which aren't visible
262 * in AArch32.
263 */
264 fpsimd_save_state(fpsimd);
265
266 /* Place structure header on the stack */
267 __put_user_error(magic, &frame->magic, err);
268 __put_user_error(size, &frame->size, err);
269
270 /*
271 * Now copy the FP registers. Since the registers are packed,
272 * we can copy the prefix we want (V0-V15) as it is.
273 * FIXME: Won't work if big endian.
274 */
275 err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
276 sizeof(frame->ufp.fpregs));
277
278 /* Create an AArch32 fpscr from the fpsr and the fpcr. */
279 fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
280 (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK);
281 __put_user_error(fpscr, &frame->ufp.fpscr, err);
282
283 /*
284 * The exception register aren't available so we fake up a
285 * basic FPEXC and zero everything else.
286 */
287 fpexc = (1 << 30);
288 __put_user_error(fpexc, &frame->ufp_exc.fpexc, err);
289 __put_user_error(0, &frame->ufp_exc.fpinst, err);
290 __put_user_error(0, &frame->ufp_exc.fpinst2, err);
291
292 return err ? -EFAULT : 0;
293 }
294
295 static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
296 {
297 struct fpsimd_state fpsimd;
298 compat_ulong_t magic = VFP_MAGIC;
299 compat_ulong_t size = VFP_STORAGE_SIZE;
300 compat_ulong_t fpscr;
301 int err = 0;
302
303 __get_user_error(magic, &frame->magic, err);
304 __get_user_error(size, &frame->size, err);
305
306 if (err)
307 return -EFAULT;
308 if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
309 return -EINVAL;
310
311 /*
312 * Copy the FP registers into the start of the fpsimd_state.
313 * FIXME: Won't work if big endian.
314 */
315 err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
316 sizeof(frame->ufp.fpregs));
317
318 /* Extract the fpsr and the fpcr from the fpscr */
319 __get_user_error(fpscr, &frame->ufp.fpscr, err);
320 fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK;
321 fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
322
323 /*
324 * We don't need to touch the exception register, so
325 * reload the hardware state.
326 */
327 if (!err) {
328 preempt_disable();
329 fpsimd_load_state(&fpsimd);
330 preempt_enable();
331 }
332
333 return err ? -EFAULT : 0;
334 }
335
336 /*
337 * atomically swap in the new signal mask, and wait for a signal.
338 */
339 asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask,
340 compat_old_sigset_t mask)
341 {
342 sigset_t blocked;
343
344 siginitset(&current->blocked, mask);
345 return sigsuspend(&blocked);
346 }
347
348 asmlinkage int compat_sys_sigaction(int sig,
349 const struct compat_old_sigaction __user *act,
350 struct compat_old_sigaction __user *oact)
351 {
352 struct k_sigaction new_ka, old_ka;
353 int ret;
354 compat_old_sigset_t mask;
355 compat_uptr_t handler, restorer;
356
357 if (act) {
358 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
359 __get_user(handler, &act->sa_handler) ||
360 __get_user(restorer, &act->sa_restorer) ||
361 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
362 __get_user(mask, &act->sa_mask))
363 return -EFAULT;
364
365 new_ka.sa.sa_handler = compat_ptr(handler);
366 new_ka.sa.sa_restorer = compat_ptr(restorer);
367 siginitset(&new_ka.sa.sa_mask, mask);
368 }
369
370 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
371
372 if (!ret && oact) {
373 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
374 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
375 &oact->sa_handler) ||
376 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
377 &oact->sa_restorer) ||
378 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
379 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
380 return -EFAULT;
381 }
382
383 return ret;
384 }
385
386 asmlinkage int compat_sys_rt_sigaction(int sig,
387 const struct compat_sigaction __user *act,
388 struct compat_sigaction __user *oact,
389 compat_size_t sigsetsize)
390 {
391 struct k_sigaction new_ka, old_ka;
392 int ret;
393
394 /* XXX: Don't preclude handling different sized sigset_t's. */
395 if (sigsetsize != sizeof(compat_sigset_t))
396 return -EINVAL;
397
398 if (act) {
399 compat_uptr_t handler, restorer;
400
401 ret = get_user(handler, &act->sa_handler);
402 new_ka.sa.sa_handler = compat_ptr(handler);
403 ret |= get_user(restorer, &act->sa_restorer);
404 new_ka.sa.sa_restorer = compat_ptr(restorer);
405 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
406 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
407 if (ret)
408 return -EFAULT;
409 }
410
411 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
412 if (!ret && oact) {
413 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
414 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
415 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
416 }
417 return ret;
418 }
419
420 static int compat_restore_sigframe(struct pt_regs *regs,
421 struct compat_sigframe __user *sf)
422 {
423 int err;
424 sigset_t set;
425 struct compat_aux_sigframe __user *aux;
426
427 err = get_sigset_t(&set, &sf->uc.uc_sigmask);
428 if (err == 0) {
429 sigdelsetmask(&set, ~_BLOCKABLE);
430 set_current_blocked(&set);
431 }
432
433 __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
434 __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
435 __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
436 __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
437 __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
438 __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
439 __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
440 __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
441 __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
442 __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
443 __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
444 __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
445 __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
446 __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
447 __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
448 __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
449 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
450
451 /*
452 * Avoid compat_sys_sigreturn() restarting.
453 */
454 regs->syscallno = ~0UL;
455
456 err |= !valid_user_regs(&regs->user_regs);
457
458 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
459 if (err == 0)
460 err |= compat_restore_vfp_context(&aux->vfp);
461
462 return err;
463 }
464
465 asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
466 {
467 struct compat_sigframe __user *frame;
468
469 /* Always make any pending restarted system calls return -EINTR */
470 current_thread_info()->restart_block.fn = do_no_restart_syscall;
471
472 /*
473 * Since we stacked the signal on a 64-bit boundary,
474 * then 'sp' should be word aligned here. If it's
475 * not, then the user is trying to mess with us.
476 */
477 if (regs->compat_sp & 7)
478 goto badframe;
479
480 frame = (struct compat_sigframe __user *)regs->compat_sp;
481
482 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
483 goto badframe;
484
485 if (compat_restore_sigframe(regs, frame))
486 goto badframe;
487
488 return regs->regs[0];
489
490 badframe:
491 if (show_unhandled_signals)
492 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
493 current->comm, task_pid_nr(current), __func__,
494 regs->pc, regs->sp);
495 force_sig(SIGSEGV, current);
496 return 0;
497 }
498
499 asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
500 {
501 struct compat_rt_sigframe __user *frame;
502
503 /* Always make any pending restarted system calls return -EINTR */
504 current_thread_info()->restart_block.fn = do_no_restart_syscall;
505
506 /*
507 * Since we stacked the signal on a 64-bit boundary,
508 * then 'sp' should be word aligned here. If it's
509 * not, then the user is trying to mess with us.
510 */
511 if (regs->compat_sp & 7)
512 goto badframe;
513
514 frame = (struct compat_rt_sigframe __user *)regs->compat_sp;
515
516 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
517 goto badframe;
518
519 if (compat_restore_sigframe(regs, &frame->sig))
520 goto badframe;
521
522 if (compat_restore_altstack(&frame->sig.uc.uc_stack))
523 goto badframe;
524
525 return regs->regs[0];
526
527 badframe:
528 if (show_unhandled_signals)
529 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
530 current->comm, task_pid_nr(current), __func__,
531 regs->pc, regs->sp);
532 force_sig(SIGSEGV, current);
533 return 0;
534 }
535
536 static void __user *compat_get_sigframe(struct k_sigaction *ka,
537 struct pt_regs *regs,
538 int framesize)
539 {
540 compat_ulong_t sp = regs->compat_sp;
541 void __user *frame;
542
543 /*
544 * This is the X/Open sanctioned signal stack switching.
545 */
546 if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
547 sp = current->sas_ss_sp + current->sas_ss_size;
548
549 /*
550 * ATPCS B01 mandates 8-byte alignment
551 */
552 frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7));
553
554 /*
555 * Check that we can actually write to the signal frame.
556 */
557 if (!access_ok(VERIFY_WRITE, frame, framesize))
558 frame = NULL;
559
560 return frame;
561 }
562
563 static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
564 compat_ulong_t __user *rc, void __user *frame,
565 int usig)
566 {
567 compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
568 compat_ulong_t retcode;
569 compat_ulong_t spsr = regs->pstate & ~PSR_f;
570 int thumb;
571
572 /* Check if the handler is written for ARM or Thumb */
573 thumb = handler & 1;
574
575 if (thumb) {
576 spsr |= COMPAT_PSR_T_BIT;
577 spsr &= ~COMPAT_PSR_IT_MASK;
578 } else {
579 spsr &= ~COMPAT_PSR_T_BIT;
580 }
581
582 if (ka->sa.sa_flags & SA_RESTORER) {
583 retcode = ptr_to_compat(ka->sa.sa_restorer);
584 } else {
585 /* Set up sigreturn pointer */
586 unsigned int idx = thumb << 1;
587
588 if (ka->sa.sa_flags & SA_SIGINFO)
589 idx += 3;
590
591 retcode = AARCH32_VECTORS_BASE +
592 AARCH32_KERN_SIGRET_CODE_OFFSET +
593 (idx << 2) + thumb;
594 }
595
596 regs->regs[0] = usig;
597 regs->compat_sp = ptr_to_compat(frame);
598 regs->compat_lr = retcode;
599 regs->pc = handler;
600 regs->pstate = spsr;
601 }
602
603 static int compat_setup_sigframe(struct compat_sigframe __user *sf,
604 struct pt_regs *regs, sigset_t *set)
605 {
606 struct compat_aux_sigframe __user *aux;
607 int err = 0;
608
609 __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err);
610 __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err);
611 __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err);
612 __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err);
613 __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err);
614 __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err);
615 __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err);
616 __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err);
617 __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err);
618 __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err);
619 __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err);
620 __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err);
621 __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err);
622 __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err);
623 __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err);
624 __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err);
625 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err);
626
627 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
628 __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err);
629 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
630 __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
631
632 err |= put_sigset_t(&sf->uc.uc_sigmask, set);
633
634 aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
635
636 if (err == 0)
637 err |= compat_preserve_vfp_context(&aux->vfp);
638 __put_user_error(0, &aux->end_magic, err);
639
640 return err;
641 }
642
643 /*
644 * 32-bit signal handling routines called from signal.c
645 */
646 int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
647 sigset_t *set, struct pt_regs *regs)
648 {
649 struct compat_rt_sigframe __user *frame;
650 compat_stack_t stack;
651 int err = 0;
652
653 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
654
655 if (!frame)
656 return 1;
657
658 err |= copy_siginfo_to_user32(&frame->info, info);
659
660 __put_user_error(0, &frame->sig.uc.uc_flags, err);
661 __put_user_error(NULL, &frame->sig.uc.uc_link, err);
662
663 err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp);
664
665 err |= compat_setup_sigframe(&frame->sig, regs, set);
666
667 if (err == 0) {
668 compat_setup_return(regs, ka, frame->sig.retcode, frame, usig);
669 regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info;
670 regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc;
671 }
672
673 return err;
674 }
675
676 int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set,
677 struct pt_regs *regs)
678 {
679 struct compat_sigframe __user *frame;
680 int err = 0;
681
682 frame = compat_get_sigframe(ka, regs, sizeof(*frame));
683
684 if (!frame)
685 return 1;
686
687 __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
688
689 err |= compat_setup_sigframe(frame, regs, set);
690 if (err == 0)
691 compat_setup_return(regs, ka, frame->retcode, frame, usig);
692
693 return err;
694 }
695
696 void compat_setup_restart_syscall(struct pt_regs *regs)
697 {
698 regs->regs[7] = __NR_compat_restart_syscall;
699 }
This page took 0.072476 seconds and 5 git commands to generate.