MIPS: Use struct mips_abi offsets to save FP context
[deliverable/linux.git] / arch / mips / kernel / signal.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2014, Imagination Technologies Ltd.
10 */
11 #include <linux/cache.h>
12 #include <linux/context_tracking.h>
13 #include <linux/irqflags.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/personality.h>
17 #include <linux/smp.h>
18 #include <linux/kernel.h>
19 #include <linux/signal.h>
20 #include <linux/errno.h>
21 #include <linux/wait.h>
22 #include <linux/ptrace.h>
23 #include <linux/unistd.h>
24 #include <linux/compiler.h>
25 #include <linux/syscalls.h>
26 #include <linux/uaccess.h>
27 #include <linux/tracehook.h>
28
29 #include <asm/abi.h>
30 #include <asm/asm.h>
31 #include <linux/bitops.h>
32 #include <asm/cacheflush.h>
33 #include <asm/fpu.h>
34 #include <asm/sim.h>
35 #include <asm/ucontext.h>
36 #include <asm/cpu-features.h>
37 #include <asm/war.h>
38 #include <asm/vdso.h>
39 #include <asm/dsp.h>
40 #include <asm/inst.h>
41
42 #include "signal-common.h"
43
44 static int (*save_fp_context)(void __user *sc);
45 static int (*restore_fp_context)(void __user *sc);
46
47 struct sigframe {
48 u32 sf_ass[4]; /* argument save space for o32 */
49 u32 sf_pad[2]; /* Was: signal trampoline */
50 struct sigcontext sf_sc;
51 sigset_t sf_mask;
52 };
53
54 struct rt_sigframe {
55 u32 rs_ass[4]; /* argument save space for o32 */
56 u32 rs_pad[2]; /* Was: signal trampoline */
57 struct siginfo rs_info;
58 struct ucontext rs_uc;
59 };
60
61 /*
62 * Thread saved context copy to/from a signal context presumed to be on the
63 * user stack, and therefore accessed with appropriate macros from uaccess.h.
64 */
65 static int copy_fp_to_sigcontext(void __user *sc)
66 {
67 struct mips_abi *abi = current->thread.abi;
68 uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
69 uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
70 int i;
71 int err = 0;
72
73 for (i = 0; i < NUM_FPU_REGS; i++) {
74 err |=
75 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
76 &fpregs[i]);
77 }
78 err |= __put_user(current->thread.fpu.fcr31, csr);
79
80 return err;
81 }
82
83 static int copy_fp_from_sigcontext(void __user *sc)
84 {
85 struct mips_abi *abi = current->thread.abi;
86 uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
87 uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
88 int i;
89 int err = 0;
90 u64 fpr_val;
91
92 for (i = 0; i < NUM_FPU_REGS; i++) {
93 err |= __get_user(fpr_val, &fpregs[i]);
94 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
95 }
96 err |= __get_user(current->thread.fpu.fcr31, csr);
97
98 return err;
99 }
100
101 /*
102 * Wrappers for the assembly _{save,restore}_fp_context functions.
103 */
104 static int save_hw_fp_context(void __user *sc)
105 {
106 struct mips_abi *abi = current->thread.abi;
107 uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
108 uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
109
110 return _save_fp_context(fpregs, csr);
111 }
112
113 static int restore_hw_fp_context(void __user *sc)
114 {
115 struct mips_abi *abi = current->thread.abi;
116 uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
117 uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
118
119 return _restore_fp_context(fpregs, csr);
120 }
121
122 /*
123 * Helper routines
124 */
125 static int protected_save_fp_context(void __user *sc)
126 {
127 struct mips_abi *abi = current->thread.abi;
128 uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
129 uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
130 int err;
131
132 /*
133 * EVA does not have userland equivalents of ldc1 or sdc1, so
134 * save to the kernel FP context & copy that to userland below.
135 */
136 if (config_enabled(CONFIG_EVA))
137 lose_fpu(1);
138
139 while (1) {
140 lock_fpu_owner();
141 if (is_fpu_owner()) {
142 err = save_fp_context(sc);
143 unlock_fpu_owner();
144 } else {
145 unlock_fpu_owner();
146 err = copy_fp_to_sigcontext(sc);
147 }
148 if (likely(!err))
149 break;
150 /* touch the sigcontext and try again */
151 err = __put_user(0, &fpregs[0]) |
152 __put_user(0, &fpregs[31]) |
153 __put_user(0, csr);
154 if (err)
155 break; /* really bad sigcontext */
156 }
157
158 return err;
159 }
160
161 static int protected_restore_fp_context(void __user *sc)
162 {
163 struct mips_abi *abi = current->thread.abi;
164 uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
165 uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
166 int err, tmp __maybe_unused;
167
168 /*
169 * EVA does not have userland equivalents of ldc1 or sdc1, so we
170 * disable the FPU here such that the code below simply copies to
171 * the kernel FP context.
172 */
173 if (config_enabled(CONFIG_EVA))
174 lose_fpu(0);
175
176 while (1) {
177 lock_fpu_owner();
178 if (is_fpu_owner()) {
179 err = restore_fp_context(sc);
180 unlock_fpu_owner();
181 } else {
182 unlock_fpu_owner();
183 err = copy_fp_from_sigcontext(sc);
184 }
185 if (likely(!err))
186 break;
187 /* touch the sigcontext and try again */
188 err = __get_user(tmp, &fpregs[0]) |
189 __get_user(tmp, &fpregs[31]) |
190 __get_user(tmp, csr);
191 if (err)
192 break; /* really bad sigcontext */
193 }
194
195 return err;
196 }
197
198 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
199 {
200 int err = 0;
201 int i;
202 unsigned int used_math;
203
204 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
205
206 err |= __put_user(0, &sc->sc_regs[0]);
207 for (i = 1; i < 32; i++)
208 err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
209
210 #ifdef CONFIG_CPU_HAS_SMARTMIPS
211 err |= __put_user(regs->acx, &sc->sc_acx);
212 #endif
213 err |= __put_user(regs->hi, &sc->sc_mdhi);
214 err |= __put_user(regs->lo, &sc->sc_mdlo);
215 if (cpu_has_dsp) {
216 err |= __put_user(mfhi1(), &sc->sc_hi1);
217 err |= __put_user(mflo1(), &sc->sc_lo1);
218 err |= __put_user(mfhi2(), &sc->sc_hi2);
219 err |= __put_user(mflo2(), &sc->sc_lo2);
220 err |= __put_user(mfhi3(), &sc->sc_hi3);
221 err |= __put_user(mflo3(), &sc->sc_lo3);
222 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
223 }
224
225 used_math = !!used_math();
226 err |= __put_user(used_math, &sc->sc_used_math);
227
228 if (used_math) {
229 /*
230 * Save FPU state to signal context. Signal handler
231 * will "inherit" current FPU state.
232 */
233 err |= protected_save_fp_context(sc);
234 }
235 return err;
236 }
237
238 int fpcsr_pending(unsigned int __user *fpcsr)
239 {
240 int err, sig = 0;
241 unsigned int csr, enabled;
242
243 err = __get_user(csr, fpcsr);
244 enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
245 /*
246 * If the signal handler set some FPU exceptions, clear it and
247 * send SIGFPE.
248 */
249 if (csr & enabled) {
250 csr &= ~enabled;
251 err |= __put_user(csr, fpcsr);
252 sig = SIGFPE;
253 }
254 return err ?: sig;
255 }
256
257 static int
258 check_and_restore_fp_context(void __user *sc)
259 {
260 struct mips_abi *abi = current->thread.abi;
261 int err, sig;
262
263 err = sig = fpcsr_pending(sc + abi->off_sc_fpc_csr);
264 if (err > 0)
265 err = 0;
266 err |= protected_restore_fp_context(sc);
267 return err ?: sig;
268 }
269
270 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
271 {
272 unsigned int used_math;
273 unsigned long treg;
274 int err = 0;
275 int i;
276
277 /* Always make any pending restarted system calls return -EINTR */
278 current->restart_block.fn = do_no_restart_syscall;
279
280 err |= __get_user(regs->cp0_epc, &sc->sc_pc);
281
282 #ifdef CONFIG_CPU_HAS_SMARTMIPS
283 err |= __get_user(regs->acx, &sc->sc_acx);
284 #endif
285 err |= __get_user(regs->hi, &sc->sc_mdhi);
286 err |= __get_user(regs->lo, &sc->sc_mdlo);
287 if (cpu_has_dsp) {
288 err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
289 err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
290 err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
291 err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
292 err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
293 err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
294 err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
295 }
296
297 for (i = 1; i < 32; i++)
298 err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
299
300 err |= __get_user(used_math, &sc->sc_used_math);
301 conditional_used_math(used_math);
302
303 if (used_math) {
304 /* restore fpu context if we have used it before */
305 if (!err)
306 err = check_and_restore_fp_context(sc);
307 } else {
308 /* signal handler may have used FPU. Give it up. */
309 lose_fpu(0);
310 }
311
312 return err;
313 }
314
315 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
316 size_t frame_size)
317 {
318 unsigned long sp;
319
320 /* Default to using normal stack */
321 sp = regs->regs[29];
322
323 /*
324 * FPU emulator may have it's own trampoline active just
325 * above the user stack, 16-bytes before the next lowest
326 * 16 byte boundary. Try to avoid trashing it.
327 */
328 sp -= 32;
329
330 sp = sigsp(sp, ksig);
331
332 return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK));
333 }
334
335 /*
336 * Atomically swap in the new signal mask, and wait for a signal.
337 */
338
339 #ifdef CONFIG_TRAD_SIGNALS
340 SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
341 {
342 return sys_rt_sigsuspend(uset, sizeof(sigset_t));
343 }
344 #endif
345
346 #ifdef CONFIG_TRAD_SIGNALS
347 SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
348 struct sigaction __user *, oact)
349 {
350 struct k_sigaction new_ka, old_ka;
351 int ret;
352 int err = 0;
353
354 if (act) {
355 old_sigset_t mask;
356
357 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
358 return -EFAULT;
359 err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
360 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
361 err |= __get_user(mask, &act->sa_mask.sig[0]);
362 if (err)
363 return -EFAULT;
364
365 siginitset(&new_ka.sa.sa_mask, mask);
366 }
367
368 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
369
370 if (!ret && oact) {
371 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
372 return -EFAULT;
373 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
374 err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
375 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
376 err |= __put_user(0, &oact->sa_mask.sig[1]);
377 err |= __put_user(0, &oact->sa_mask.sig[2]);
378 err |= __put_user(0, &oact->sa_mask.sig[3]);
379 if (err)
380 return -EFAULT;
381 }
382
383 return ret;
384 }
385 #endif
386
387 #ifdef CONFIG_TRAD_SIGNALS
388 asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
389 {
390 struct sigframe __user *frame;
391 sigset_t blocked;
392 int sig;
393
394 frame = (struct sigframe __user *) regs.regs[29];
395 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
396 goto badframe;
397 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
398 goto badframe;
399
400 set_current_blocked(&blocked);
401
402 sig = restore_sigcontext(&regs, &frame->sf_sc);
403 if (sig < 0)
404 goto badframe;
405 else if (sig)
406 force_sig(sig, current);
407
408 /*
409 * Don't let your children do this ...
410 */
411 __asm__ __volatile__(
412 "move\t$29, %0\n\t"
413 "j\tsyscall_exit"
414 :/* no outputs */
415 :"r" (&regs));
416 /* Unreached */
417
418 badframe:
419 force_sig(SIGSEGV, current);
420 }
421 #endif /* CONFIG_TRAD_SIGNALS */
422
423 asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
424 {
425 struct rt_sigframe __user *frame;
426 sigset_t set;
427 int sig;
428
429 frame = (struct rt_sigframe __user *) regs.regs[29];
430 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
431 goto badframe;
432 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
433 goto badframe;
434
435 set_current_blocked(&set);
436
437 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
438 if (sig < 0)
439 goto badframe;
440 else if (sig)
441 force_sig(sig, current);
442
443 if (restore_altstack(&frame->rs_uc.uc_stack))
444 goto badframe;
445
446 /*
447 * Don't let your children do this ...
448 */
449 __asm__ __volatile__(
450 "move\t$29, %0\n\t"
451 "j\tsyscall_exit"
452 :/* no outputs */
453 :"r" (&regs));
454 /* Unreached */
455
456 badframe:
457 force_sig(SIGSEGV, current);
458 }
459
460 #ifdef CONFIG_TRAD_SIGNALS
461 static int setup_frame(void *sig_return, struct ksignal *ksig,
462 struct pt_regs *regs, sigset_t *set)
463 {
464 struct sigframe __user *frame;
465 int err = 0;
466
467 frame = get_sigframe(ksig, regs, sizeof(*frame));
468 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
469 return -EFAULT;
470
471 err |= setup_sigcontext(regs, &frame->sf_sc);
472 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
473 if (err)
474 return -EFAULT;
475
476 /*
477 * Arguments to signal handler:
478 *
479 * a0 = signal number
480 * a1 = 0 (should be cause)
481 * a2 = pointer to struct sigcontext
482 *
483 * $25 and c0_epc point to the signal handler, $29 points to the
484 * struct sigframe.
485 */
486 regs->regs[ 4] = ksig->sig;
487 regs->regs[ 5] = 0;
488 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
489 regs->regs[29] = (unsigned long) frame;
490 regs->regs[31] = (unsigned long) sig_return;
491 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
492
493 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
494 current->comm, current->pid,
495 frame, regs->cp0_epc, regs->regs[31]);
496 return 0;
497 }
498 #endif
499
500 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
501 struct pt_regs *regs, sigset_t *set)
502 {
503 struct rt_sigframe __user *frame;
504 int err = 0;
505
506 frame = get_sigframe(ksig, regs, sizeof(*frame));
507 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
508 return -EFAULT;
509
510 /* Create siginfo. */
511 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
512
513 /* Create the ucontext. */
514 err |= __put_user(0, &frame->rs_uc.uc_flags);
515 err |= __put_user(NULL, &frame->rs_uc.uc_link);
516 err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
517 err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
518 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
519
520 if (err)
521 return -EFAULT;
522
523 /*
524 * Arguments to signal handler:
525 *
526 * a0 = signal number
527 * a1 = 0 (should be cause)
528 * a2 = pointer to ucontext
529 *
530 * $25 and c0_epc point to the signal handler, $29 points to
531 * the struct rt_sigframe.
532 */
533 regs->regs[ 4] = ksig->sig;
534 regs->regs[ 5] = (unsigned long) &frame->rs_info;
535 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
536 regs->regs[29] = (unsigned long) frame;
537 regs->regs[31] = (unsigned long) sig_return;
538 regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
539
540 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
541 current->comm, current->pid,
542 frame, regs->cp0_epc, regs->regs[31]);
543
544 return 0;
545 }
546
547 struct mips_abi mips_abi = {
548 #ifdef CONFIG_TRAD_SIGNALS
549 .setup_frame = setup_frame,
550 .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
551 #endif
552 .setup_rt_frame = setup_rt_frame,
553 .rt_signal_return_offset =
554 offsetof(struct mips_vdso, rt_signal_trampoline),
555 .restart = __NR_restart_syscall,
556
557 .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
558 .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
559 .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
560 };
561
562 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
563 {
564 sigset_t *oldset = sigmask_to_save();
565 int ret;
566 struct mips_abi *abi = current->thread.abi;
567 #ifdef CONFIG_CPU_MICROMIPS
568 void *vdso;
569 unsigned long tmp = (unsigned long)current->mm->context.vdso;
570
571 set_isa16_mode(tmp);
572 vdso = (void *)tmp;
573 #else
574 void *vdso = current->mm->context.vdso;
575 #endif
576
577 if (regs->regs[0]) {
578 switch(regs->regs[2]) {
579 case ERESTART_RESTARTBLOCK:
580 case ERESTARTNOHAND:
581 regs->regs[2] = EINTR;
582 break;
583 case ERESTARTSYS:
584 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
585 regs->regs[2] = EINTR;
586 break;
587 }
588 /* fallthrough */
589 case ERESTARTNOINTR:
590 regs->regs[7] = regs->regs[26];
591 regs->regs[2] = regs->regs[0];
592 regs->cp0_epc -= 4;
593 }
594
595 regs->regs[0] = 0; /* Don't deal with this again. */
596 }
597
598 if (sig_uses_siginfo(&ksig->ka))
599 ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
600 ksig, regs, oldset);
601 else
602 ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig,
603 regs, oldset);
604
605 signal_setup_done(ret, ksig, 0);
606 }
607
608 static void do_signal(struct pt_regs *regs)
609 {
610 struct ksignal ksig;
611
612 if (get_signal(&ksig)) {
613 /* Whee! Actually deliver the signal. */
614 handle_signal(&ksig, regs);
615 return;
616 }
617
618 if (regs->regs[0]) {
619 switch (regs->regs[2]) {
620 case ERESTARTNOHAND:
621 case ERESTARTSYS:
622 case ERESTARTNOINTR:
623 regs->regs[2] = regs->regs[0];
624 regs->regs[7] = regs->regs[26];
625 regs->cp0_epc -= 4;
626 break;
627
628 case ERESTART_RESTARTBLOCK:
629 regs->regs[2] = current->thread.abi->restart;
630 regs->regs[7] = regs->regs[26];
631 regs->cp0_epc -= 4;
632 break;
633 }
634 regs->regs[0] = 0; /* Don't deal with this again. */
635 }
636
637 /*
638 * If there's no signal to deliver, we just put the saved sigmask
639 * back
640 */
641 restore_saved_sigmask();
642 }
643
644 /*
645 * notification of userspace execution resumption
646 * - triggered by the TIF_WORK_MASK flags
647 */
648 asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
649 __u32 thread_info_flags)
650 {
651 local_irq_enable();
652
653 user_exit();
654
655 /* deal with pending signal delivery */
656 if (thread_info_flags & _TIF_SIGPENDING)
657 do_signal(regs);
658
659 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
660 clear_thread_flag(TIF_NOTIFY_RESUME);
661 tracehook_notify_resume(regs);
662 }
663
664 user_enter();
665 }
666
667 #ifdef CONFIG_SMP
668 static int smp_save_fp_context(void __user *sc)
669 {
670 return raw_cpu_has_fpu
671 ? save_hw_fp_context(sc)
672 : copy_fp_to_sigcontext(sc);
673 }
674
675 static int smp_restore_fp_context(void __user *sc)
676 {
677 return raw_cpu_has_fpu
678 ? restore_hw_fp_context(sc)
679 : copy_fp_from_sigcontext(sc);
680 }
681 #endif
682
683 static int signal_setup(void)
684 {
685 #ifdef CONFIG_SMP
686 /* For now just do the cpu_has_fpu check when the functions are invoked */
687 save_fp_context = smp_save_fp_context;
688 restore_fp_context = smp_restore_fp_context;
689 #else
690 if (cpu_has_fpu) {
691 save_fp_context = save_hw_fp_context;
692 restore_fp_context = restore_hw_fp_context;
693 } else {
694 save_fp_context = copy_fp_to_sigcontext;
695 restore_fp_context = copy_fp_from_sigcontext;
696 }
697 #endif /* CONFIG_SMP */
698
699 return 0;
700 }
701
702 arch_initcall(signal_setup);
This page took 0.064443 seconds and 5 git commands to generate.