Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | |
7 | * Copyright (C) 1994 - 2000 Ralf Baechle | |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
ca750649 | 9 | * Copyright (C) 2014, Imagination Technologies Ltd. |
1da177e4 | 10 | */ |
02416dcf | 11 | #include <linux/cache.h> |
c3fc5cd5 | 12 | #include <linux/context_tracking.h> |
1f717929 | 13 | #include <linux/irqflags.h> |
1da177e4 LT |
14 | #include <linux/sched.h> |
15 | #include <linux/mm.h> | |
16 | #include <linux/personality.h> | |
17 | #include <linux/smp.h> | |
1da177e4 LT |
18 | #include <linux/kernel.h> |
19 | #include <linux/signal.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/wait.h> | |
22 | #include <linux/ptrace.h> | |
23 | #include <linux/unistd.h> | |
40e084a5 | 24 | #include <linux/uprobes.h> |
1da177e4 | 25 | #include <linux/compiler.h> |
dbda6ac0 | 26 | #include <linux/syscalls.h> |
faea6234 | 27 | #include <linux/uaccess.h> |
733e5e4b | 28 | #include <linux/tracehook.h> |
1da177e4 | 29 | |
e50c0a8f | 30 | #include <asm/abi.h> |
1da177e4 LT |
31 | #include <asm/asm.h> |
32 | #include <linux/bitops.h> | |
33 | #include <asm/cacheflush.h> | |
34 | #include <asm/fpu.h> | |
35 | #include <asm/sim.h> | |
1da177e4 LT |
36 | #include <asm/ucontext.h> |
37 | #include <asm/cpu-features.h> | |
02416dcf | 38 | #include <asm/war.h> |
d814c28c | 39 | #include <asm/vdso.h> |
b81947c6 | 40 | #include <asm/dsp.h> |
01be057b | 41 | #include <asm/inst.h> |
bf82cb30 | 42 | #include <asm/msa.h> |
1da177e4 LT |
43 | |
44 | #include "signal-common.h" | |
45 | ||
2db9ca0a PB |
46 | static int (*save_fp_context)(void __user *sc); |
47 | static int (*restore_fp_context)(void __user *sc); | |
137f6f3e | 48 | |
66680583 RB |
49 | struct sigframe { |
50 | u32 sf_ass[4]; /* argument save space for o32 */ | |
d814c28c | 51 | u32 sf_pad[2]; /* Was: signal trampoline */ |
f1fe2d21 PB |
52 | |
53 | /* Matches struct ucontext from its uc_mcontext field onwards */ | |
66680583 RB |
54 | struct sigcontext sf_sc; |
55 | sigset_t sf_mask; | |
f1fe2d21 | 56 | unsigned long long sf_extcontext[0]; |
66680583 RB |
57 | }; |
58 | ||
c0b9bae9 FBH |
59 | struct rt_sigframe { |
60 | u32 rs_ass[4]; /* argument save space for o32 */ | |
d814c28c | 61 | u32 rs_pad[2]; /* Was: signal trampoline */ |
c0b9bae9 FBH |
62 | struct siginfo rs_info; |
63 | struct ucontext rs_uc; | |
64 | }; | |
65 | ||
b2ead528 PB |
66 | /* |
67 | * Thread saved context copy to/from a signal context presumed to be on the | |
68 | * user stack, and therefore accessed with appropriate macros from uaccess.h. | |
69 | */ | |
2db9ca0a | 70 | static int copy_fp_to_sigcontext(void __user *sc) |
b2ead528 | 71 | { |
2db9ca0a PB |
72 | struct mips_abi *abi = current->thread.abi; |
73 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | |
74 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | |
b2ead528 PB |
75 | int i; |
76 | int err = 0; | |
6f0aba63 | 77 | int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; |
b2ead528 | 78 | |
6f0aba63 | 79 | for (i = 0; i < NUM_FPU_REGS; i += inc) { |
b2ead528 PB |
80 | err |= |
81 | __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), | |
2db9ca0a | 82 | &fpregs[i]); |
b2ead528 | 83 | } |
2db9ca0a | 84 | err |= __put_user(current->thread.fpu.fcr31, csr); |
b2ead528 PB |
85 | |
86 | return err; | |
87 | } | |
88 | ||
2db9ca0a | 89 | static int copy_fp_from_sigcontext(void __user *sc) |
b2ead528 | 90 | { |
2db9ca0a PB |
91 | struct mips_abi *abi = current->thread.abi; |
92 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | |
93 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | |
b2ead528 PB |
94 | int i; |
95 | int err = 0; | |
6f0aba63 | 96 | int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; |
b2ead528 PB |
97 | u64 fpr_val; |
98 | ||
6f0aba63 | 99 | for (i = 0; i < NUM_FPU_REGS; i += inc) { |
2db9ca0a | 100 | err |= __get_user(fpr_val, &fpregs[i]); |
b2ead528 PB |
101 | set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); |
102 | } | |
2db9ca0a | 103 | err |= __get_user(current->thread.fpu.fcr31, csr); |
b2ead528 PB |
104 | |
105 | return err; | |
106 | } | |
107 | ||
2db9ca0a PB |
108 | /* |
109 | * Wrappers for the assembly _{save,restore}_fp_context functions. | |
110 | */ | |
111 | static int save_hw_fp_context(void __user *sc) | |
112 | { | |
113 | struct mips_abi *abi = current->thread.abi; | |
114 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | |
115 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | |
116 | ||
117 | return _save_fp_context(fpregs, csr); | |
118 | } | |
119 | ||
120 | static int restore_hw_fp_context(void __user *sc) | |
121 | { | |
122 | struct mips_abi *abi = current->thread.abi; | |
123 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | |
124 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | |
125 | ||
126 | return _restore_fp_context(fpregs, csr); | |
127 | } | |
128 | ||
bf82cb30 PB |
129 | /* |
130 | * Extended context handling. | |
131 | */ | |
132 | ||
133 | static inline void __user *sc_to_extcontext(void __user *sc) | |
134 | { | |
135 | struct ucontext __user *uc; | |
136 | ||
137 | /* | |
138 | * We can just pretend the sigcontext is always embedded in a struct | |
139 | * ucontext here, because the offset from sigcontext to extended | |
140 | * context is the same in the struct sigframe case. | |
141 | */ | |
142 | uc = container_of(sc, struct ucontext, uc_mcontext); | |
143 | return &uc->uc_extcontext; | |
144 | } | |
145 | ||
146 | static int save_msa_extcontext(void __user *buf) | |
147 | { | |
148 | struct msa_extcontext __user *msa = buf; | |
149 | uint64_t val; | |
150 | int i, err; | |
151 | ||
152 | if (!thread_msa_context_live()) | |
153 | return 0; | |
154 | ||
155 | /* | |
156 | * Ensure that we can't lose the live MSA context between checking | |
157 | * for it & writing it to memory. | |
158 | */ | |
159 | preempt_disable(); | |
160 | ||
161 | if (is_msa_enabled()) { | |
162 | /* | |
163 | * There are no EVA versions of the vector register load/store | |
164 | * instructions, so MSA context has to be saved to kernel memory | |
165 | * and then copied to user memory. The save to kernel memory | |
166 | * should already have been done when handling scalar FP | |
167 | * context. | |
168 | */ | |
169 | BUG_ON(config_enabled(CONFIG_EVA)); | |
170 | ||
171 | err = __put_user(read_msa_csr(), &msa->csr); | |
172 | err |= _save_msa_all_upper(&msa->wr); | |
173 | ||
174 | preempt_enable(); | |
175 | } else { | |
176 | preempt_enable(); | |
177 | ||
178 | err = __put_user(current->thread.fpu.msacsr, &msa->csr); | |
179 | ||
180 | for (i = 0; i < NUM_FPU_REGS; i++) { | |
181 | val = get_fpr64(¤t->thread.fpu.fpr[i], 1); | |
182 | err |= __put_user(val, &msa->wr[i]); | |
183 | } | |
184 | } | |
185 | ||
186 | err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); | |
187 | err |= __put_user(sizeof(*msa), &msa->ext.size); | |
188 | ||
189 | return err ? -EFAULT : sizeof(*msa); | |
190 | } | |
191 | ||
192 | static int restore_msa_extcontext(void __user *buf, unsigned int size) | |
193 | { | |
194 | struct msa_extcontext __user *msa = buf; | |
195 | unsigned long long val; | |
196 | unsigned int csr; | |
197 | int i, err; | |
198 | ||
199 | if (size != sizeof(*msa)) | |
200 | return -EINVAL; | |
201 | ||
202 | err = get_user(csr, &msa->csr); | |
203 | if (err) | |
204 | return err; | |
205 | ||
206 | preempt_disable(); | |
207 | ||
208 | if (is_msa_enabled()) { | |
209 | /* | |
210 | * There are no EVA versions of the vector register load/store | |
211 | * instructions, so MSA context has to be copied to kernel | |
212 | * memory and later loaded to registers. The same is true of | |
213 | * scalar FP context, so FPU & MSA should have already been | |
214 | * disabled whilst handling scalar FP context. | |
215 | */ | |
216 | BUG_ON(config_enabled(CONFIG_EVA)); | |
217 | ||
218 | write_msa_csr(csr); | |
219 | err |= _restore_msa_all_upper(&msa->wr); | |
220 | preempt_enable(); | |
221 | } else { | |
222 | preempt_enable(); | |
223 | ||
224 | current->thread.fpu.msacsr = csr; | |
225 | ||
226 | for (i = 0; i < NUM_FPU_REGS; i++) { | |
227 | err |= __get_user(val, &msa->wr[i]); | |
228 | set_fpr64(¤t->thread.fpu.fpr[i], 1, val); | |
229 | } | |
230 | } | |
231 | ||
232 | return err; | |
233 | } | |
234 | ||
235 | static int save_extcontext(void __user *buf) | |
236 | { | |
237 | int sz; | |
238 | ||
239 | sz = save_msa_extcontext(buf); | |
240 | if (sz < 0) | |
241 | return sz; | |
242 | buf += sz; | |
243 | ||
244 | /* If no context was saved then trivially return */ | |
245 | if (!sz) | |
246 | return 0; | |
247 | ||
248 | /* Write the end marker */ | |
249 | if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) | |
250 | return -EFAULT; | |
251 | ||
252 | sz += sizeof(((struct extcontext *)NULL)->magic); | |
253 | return sz; | |
254 | } | |
255 | ||
256 | static int restore_extcontext(void __user *buf) | |
257 | { | |
258 | struct extcontext ext; | |
259 | int err; | |
260 | ||
261 | while (1) { | |
262 | err = __get_user(ext.magic, (unsigned int *)buf); | |
263 | if (err) | |
264 | return err; | |
265 | ||
266 | if (ext.magic == END_EXTCONTEXT_MAGIC) | |
267 | return 0; | |
268 | ||
269 | err = __get_user(ext.size, (unsigned int *)(buf | |
270 | + offsetof(struct extcontext, size))); | |
271 | if (err) | |
272 | return err; | |
273 | ||
274 | switch (ext.magic) { | |
275 | case MSA_EXTCONTEXT_MAGIC: | |
276 | err = restore_msa_extcontext(buf, ext.size); | |
277 | break; | |
278 | ||
279 | default: | |
280 | err = -EINVAL; | |
281 | break; | |
282 | } | |
283 | ||
284 | if (err) | |
285 | return err; | |
286 | ||
287 | buf += ext.size; | |
288 | } | |
289 | } | |
290 | ||
c3fc4ab3 FBH |
291 | /* |
292 | * Helper routines | |
293 | */ | |
d02a40af | 294 | int protected_save_fp_context(void __user *sc) |
faea6234 | 295 | { |
2db9ca0a PB |
296 | struct mips_abi *abi = current->thread.abi; |
297 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | |
298 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | |
64243c2a | 299 | uint32_t __user *used_math = sc + abi->off_sc_used_math; |
bf82cb30 | 300 | unsigned int used, ext_sz; |
faea6234 | 301 | int err; |
689ee856 | 302 | |
0d071fa3 | 303 | used = used_math() ? USED_FP : 0; |
bf82cb30 PB |
304 | if (!used) |
305 | goto fp_done; | |
0d071fa3 | 306 | |
bf82cb30 PB |
307 | if (!test_thread_flag(TIF_32BIT_FPREGS)) |
308 | used |= USED_FR1; | |
309 | if (test_thread_flag(TIF_HYBRID_FPREGS)) | |
310 | used |= USED_HYBRID_FPRS; | |
64243c2a | 311 | |
689ee856 PB |
312 | /* |
313 | * EVA does not have userland equivalents of ldc1 or sdc1, so | |
314 | * save to the kernel FP context & copy that to userland below. | |
315 | */ | |
316 | if (config_enabled(CONFIG_EVA)) | |
317 | lose_fpu(1); | |
318 | ||
faea6234 AN |
319 | while (1) { |
320 | lock_fpu_owner(); | |
ff3aa5f2 PB |
321 | if (is_fpu_owner()) { |
322 | err = save_fp_context(sc); | |
323 | unlock_fpu_owner(); | |
324 | } else { | |
325 | unlock_fpu_owner(); | |
326 | err = copy_fp_to_sigcontext(sc); | |
327 | } | |
faea6234 AN |
328 | if (likely(!err)) |
329 | break; | |
330 | /* touch the sigcontext and try again */ | |
2db9ca0a PB |
331 | err = __put_user(0, &fpregs[0]) | |
332 | __put_user(0, &fpregs[31]) | | |
333 | __put_user(0, csr); | |
faea6234 | 334 | if (err) |
bf82cb30 | 335 | return err; /* really bad sigcontext */ |
faea6234 | 336 | } |
689ee856 | 337 | |
bf82cb30 PB |
338 | fp_done: |
339 | ext_sz = err = save_extcontext(sc_to_extcontext(sc)); | |
340 | if (err < 0) | |
341 | return err; | |
342 | used |= ext_sz ? USED_EXTCONTEXT : 0; | |
343 | ||
344 | return __put_user(used, used_math); | |
faea6234 AN |
345 | } |
346 | ||
d02a40af | 347 | int protected_restore_fp_context(void __user *sc) |
faea6234 | 348 | { |
2db9ca0a PB |
349 | struct mips_abi *abi = current->thread.abi; |
350 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | |
351 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | |
64243c2a PB |
352 | uint32_t __user *used_math = sc + abi->off_sc_used_math; |
353 | unsigned int used; | |
bf82cb30 | 354 | int err, sig = 0, tmp __maybe_unused; |
64243c2a PB |
355 | |
356 | err = __get_user(used, used_math); | |
0d071fa3 | 357 | conditional_used_math(used & USED_FP); |
64243c2a PB |
358 | |
359 | /* | |
360 | * The signal handler may have used FPU; give it up if the program | |
361 | * doesn't want it following sigreturn. | |
362 | */ | |
bf82cb30 | 363 | if (err || !(used & USED_FP)) |
64243c2a | 364 | lose_fpu(0); |
bf82cb30 | 365 | if (err) |
64243c2a | 366 | return err; |
bf82cb30 PB |
367 | if (!(used & USED_FP)) |
368 | goto fp_done; | |
64243c2a PB |
369 | |
370 | err = sig = fpcsr_pending(csr); | |
371 | if (err < 0) | |
372 | return err; | |
689ee856 PB |
373 | |
374 | /* | |
375 | * EVA does not have userland equivalents of ldc1 or sdc1, so we | |
376 | * disable the FPU here such that the code below simply copies to | |
377 | * the kernel FP context. | |
378 | */ | |
379 | if (config_enabled(CONFIG_EVA)) | |
380 | lose_fpu(0); | |
381 | ||
faea6234 AN |
382 | while (1) { |
383 | lock_fpu_owner(); | |
ff3aa5f2 PB |
384 | if (is_fpu_owner()) { |
385 | err = restore_fp_context(sc); | |
386 | unlock_fpu_owner(); | |
387 | } else { | |
388 | unlock_fpu_owner(); | |
389 | err = copy_fp_from_sigcontext(sc); | |
390 | } | |
faea6234 AN |
391 | if (likely(!err)) |
392 | break; | |
393 | /* touch the sigcontext and try again */ | |
2db9ca0a PB |
394 | err = __get_user(tmp, &fpregs[0]) | |
395 | __get_user(tmp, &fpregs[31]) | | |
396 | __get_user(tmp, csr); | |
faea6234 AN |
397 | if (err) |
398 | break; /* really bad sigcontext */ | |
399 | } | |
689ee856 | 400 | |
bf82cb30 PB |
401 | fp_done: |
402 | if (used & USED_EXTCONTEXT) | |
403 | err |= restore_extcontext(sc_to_extcontext(sc)); | |
404 | ||
64243c2a | 405 | return err ?: sig; |
faea6234 AN |
406 | } |
407 | ||
c3fc4ab3 FBH |
408 | int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) |
409 | { | |
410 | int err = 0; | |
411 | int i; | |
412 | ||
413 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | |
414 | ||
415 | err |= __put_user(0, &sc->sc_regs[0]); | |
416 | for (i = 1; i < 32; i++) | |
417 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | |
418 | ||
9693a853 FBH |
419 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
420 | err |= __put_user(regs->acx, &sc->sc_acx); | |
421 | #endif | |
c3fc4ab3 FBH |
422 | err |= __put_user(regs->hi, &sc->sc_mdhi); |
423 | err |= __put_user(regs->lo, &sc->sc_mdlo); | |
424 | if (cpu_has_dsp) { | |
425 | err |= __put_user(mfhi1(), &sc->sc_hi1); | |
426 | err |= __put_user(mflo1(), &sc->sc_lo1); | |
427 | err |= __put_user(mfhi2(), &sc->sc_hi2); | |
428 | err |= __put_user(mflo2(), &sc->sc_lo2); | |
429 | err |= __put_user(mfhi3(), &sc->sc_hi3); | |
430 | err |= __put_user(mflo3(), &sc->sc_lo3); | |
431 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | |
432 | } | |
433 | ||
c3fc4ab3 | 434 | |
64243c2a PB |
435 | /* |
436 | * Save FPU state to signal context. Signal handler | |
437 | * will "inherit" current FPU state. | |
438 | */ | |
439 | err |= protected_save_fp_context(sc); | |
440 | ||
c3fc4ab3 FBH |
441 | return err; |
442 | } | |
443 | ||
bf82cb30 PB |
444 | static size_t extcontext_max_size(void) |
445 | { | |
446 | size_t sz = 0; | |
447 | ||
448 | /* | |
449 | * The assumption here is that between this point & the point at which | |
450 | * the extended context is saved the size of the context should only | |
451 | * ever be able to shrink (if the task is preempted), but never grow. | |
452 | * That is, what this function returns is an upper bound on the size of | |
453 | * the extended context for the current task at the current time. | |
454 | */ | |
455 | ||
456 | if (thread_msa_context_live()) | |
457 | sz += sizeof(struct msa_extcontext); | |
458 | ||
459 | /* If any context is saved then we'll append the end marker */ | |
460 | if (sz) | |
461 | sz += sizeof(((struct extcontext *)NULL)->magic); | |
462 | ||
463 | return sz; | |
464 | } | |
465 | ||
c6a2f467 AN |
466 | int fpcsr_pending(unsigned int __user *fpcsr) |
467 | { | |
468 | int err, sig = 0; | |
469 | unsigned int csr, enabled; | |
470 | ||
471 | err = __get_user(csr, fpcsr); | |
472 | enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); | |
473 | /* | |
474 | * If the signal handler set some FPU exceptions, clear it and | |
475 | * send SIGFPE. | |
476 | */ | |
477 | if (csr & enabled) { | |
478 | csr &= ~enabled; | |
479 | err |= __put_user(csr, fpcsr); | |
480 | sig = SIGFPE; | |
481 | } | |
482 | return err ?: sig; | |
483 | } | |
484 | ||
c3fc4ab3 FBH |
485 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) |
486 | { | |
c3fc4ab3 FBH |
487 | unsigned long treg; |
488 | int err = 0; | |
489 | int i; | |
490 | ||
491 | /* Always make any pending restarted system calls return -EINTR */ | |
f56141e3 | 492 | current->restart_block.fn = do_no_restart_syscall; |
c3fc4ab3 FBH |
493 | |
494 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | |
9693a853 FBH |
495 | |
496 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | |
497 | err |= __get_user(regs->acx, &sc->sc_acx); | |
498 | #endif | |
c3fc4ab3 FBH |
499 | err |= __get_user(regs->hi, &sc->sc_mdhi); |
500 | err |= __get_user(regs->lo, &sc->sc_mdlo); | |
501 | if (cpu_has_dsp) { | |
502 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | |
503 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | |
504 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | |
505 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | |
506 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | |
507 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | |
508 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | |
509 | } | |
510 | ||
511 | for (i = 1; i < 32; i++) | |
512 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | |
513 | ||
64243c2a | 514 | return err ?: protected_restore_fp_context(sc); |
c3fc4ab3 FBH |
515 | } |
516 | ||
7c4f5635 | 517 | void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, |
c3fc4ab3 FBH |
518 | size_t frame_size) |
519 | { | |
520 | unsigned long sp; | |
521 | ||
bf82cb30 PB |
522 | /* Leave space for potential extended context */ |
523 | frame_size += extcontext_max_size(); | |
524 | ||
c3fc4ab3 FBH |
525 | /* Default to using normal stack */ |
526 | sp = regs->regs[29]; | |
527 | ||
528 | /* | |
529 | * FPU emulator may have it's own trampoline active just | |
530 | * above the user stack, 16-bytes before the next lowest | |
531 | * 16 byte boundary. Try to avoid trashing it. | |
532 | */ | |
533 | sp -= 32; | |
534 | ||
7c4f5635 | 535 | sp = sigsp(sp, ksig); |
c3fc4ab3 FBH |
536 | |
537 | return (void __user *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? ~(cpu_icache_line_size()-1) : ALMASK)); | |
538 | } | |
539 | ||
1da177e4 LT |
540 | /* |
541 | * Atomically swap in the new signal mask, and wait for a signal. | |
542 | */ | |
543 | ||
544 | #ifdef CONFIG_TRAD_SIGNALS | |
1910f4ab | 545 | SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) |
1da177e4 | 546 | { |
1910f4ab | 547 | return sys_rt_sigsuspend(uset, sizeof(sigset_t)); |
1da177e4 LT |
548 | } |
549 | #endif | |
550 | ||
1da177e4 | 551 | #ifdef CONFIG_TRAD_SIGNALS |
dbda6ac0 RB |
552 | SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, |
553 | struct sigaction __user *, oact) | |
1da177e4 LT |
554 | { |
555 | struct k_sigaction new_ka, old_ka; | |
556 | int ret; | |
557 | int err = 0; | |
558 | ||
559 | if (act) { | |
560 | old_sigset_t mask; | |
561 | ||
562 | if (!access_ok(VERIFY_READ, act, sizeof(*act))) | |
563 | return -EFAULT; | |
564 | err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); | |
565 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | |
566 | err |= __get_user(mask, &act->sa_mask.sig[0]); | |
567 | if (err) | |
568 | return -EFAULT; | |
569 | ||
570 | siginitset(&new_ka.sa.sa_mask, mask); | |
571 | } | |
572 | ||
573 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
574 | ||
575 | if (!ret && oact) { | |
576 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) | |
e0daad44 | 577 | return -EFAULT; |
1da177e4 LT |
578 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
579 | err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); | |
580 | err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); | |
581 | err |= __put_user(0, &oact->sa_mask.sig[1]); | |
582 | err |= __put_user(0, &oact->sa_mask.sig[2]); | |
583 | err |= __put_user(0, &oact->sa_mask.sig[3]); | |
584 | if (err) | |
585 | return -EFAULT; | |
586 | } | |
587 | ||
588 | return ret; | |
589 | } | |
590 | #endif | |
591 | ||
1da177e4 | 592 | #ifdef CONFIG_TRAD_SIGNALS |
f90080a0 | 593 | asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) |
1da177e4 | 594 | { |
9bbf28a3 | 595 | struct sigframe __user *frame; |
1da177e4 | 596 | sigset_t blocked; |
c6a2f467 | 597 | int sig; |
1da177e4 | 598 | |
9bbf28a3 | 599 | frame = (struct sigframe __user *) regs.regs[29]; |
1da177e4 LT |
600 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
601 | goto badframe; | |
602 | if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) | |
603 | goto badframe; | |
604 | ||
8598f3cd | 605 | set_current_blocked(&blocked); |
1da177e4 | 606 | |
c6a2f467 AN |
607 | sig = restore_sigcontext(®s, &frame->sf_sc); |
608 | if (sig < 0) | |
1da177e4 | 609 | goto badframe; |
c6a2f467 AN |
610 | else if (sig) |
611 | force_sig(sig, current); | |
1da177e4 LT |
612 | |
613 | /* | |
614 | * Don't let your children do this ... | |
615 | */ | |
1da177e4 LT |
616 | __asm__ __volatile__( |
617 | "move\t$29, %0\n\t" | |
618 | "j\tsyscall_exit" | |
619 | :/* no outputs */ | |
620 | :"r" (®s)); | |
621 | /* Unreached */ | |
622 | ||
623 | badframe: | |
624 | force_sig(SIGSEGV, current); | |
625 | } | |
e50c0a8f | 626 | #endif /* CONFIG_TRAD_SIGNALS */ |
1da177e4 | 627 | |
f90080a0 | 628 | asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) |
1da177e4 | 629 | { |
9bbf28a3 | 630 | struct rt_sigframe __user *frame; |
1da177e4 | 631 | sigset_t set; |
c6a2f467 | 632 | int sig; |
1da177e4 | 633 | |
9bbf28a3 | 634 | frame = (struct rt_sigframe __user *) regs.regs[29]; |
1da177e4 LT |
635 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
636 | goto badframe; | |
637 | if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) | |
638 | goto badframe; | |
639 | ||
8598f3cd | 640 | set_current_blocked(&set); |
1da177e4 | 641 | |
c6a2f467 AN |
642 | sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); |
643 | if (sig < 0) | |
1da177e4 | 644 | goto badframe; |
c6a2f467 AN |
645 | else if (sig) |
646 | force_sig(sig, current); | |
1da177e4 | 647 | |
ea536ad4 AV |
648 | if (restore_altstack(&frame->rs_uc.uc_stack)) |
649 | goto badframe; | |
1da177e4 LT |
650 | |
651 | /* | |
652 | * Don't let your children do this ... | |
653 | */ | |
654 | __asm__ __volatile__( | |
655 | "move\t$29, %0\n\t" | |
656 | "j\tsyscall_exit" | |
657 | :/* no outputs */ | |
658 | :"r" (®s)); | |
659 | /* Unreached */ | |
660 | ||
661 | badframe: | |
662 | force_sig(SIGSEGV, current); | |
663 | } | |
664 | ||
665 | #ifdef CONFIG_TRAD_SIGNALS | |
81d103bf RW |
666 | static int setup_frame(void *sig_return, struct ksignal *ksig, |
667 | struct pt_regs *regs, sigset_t *set) | |
1da177e4 | 668 | { |
9bbf28a3 | 669 | struct sigframe __user *frame; |
1da177e4 LT |
670 | int err = 0; |
671 | ||
7c4f5635 | 672 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
1da177e4 | 673 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
81d103bf | 674 | return -EFAULT; |
1da177e4 | 675 | |
1da177e4 LT |
676 | err |= setup_sigcontext(regs, &frame->sf_sc); |
677 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | |
678 | if (err) | |
81d103bf | 679 | return -EFAULT; |
1da177e4 LT |
680 | |
681 | /* | |
682 | * Arguments to signal handler: | |
683 | * | |
684 | * a0 = signal number | |
685 | * a1 = 0 (should be cause) | |
686 | * a2 = pointer to struct sigcontext | |
687 | * | |
688 | * $25 and c0_epc point to the signal handler, $29 points to the | |
689 | * struct sigframe. | |
690 | */ | |
81d103bf | 691 | regs->regs[ 4] = ksig->sig; |
1da177e4 LT |
692 | regs->regs[ 5] = 0; |
693 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | |
694 | regs->regs[29] = (unsigned long) frame; | |
d814c28c | 695 | regs->regs[31] = (unsigned long) sig_return; |
81d103bf | 696 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
1da177e4 | 697 | |
722bb63d | 698 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
1da177e4 | 699 | current->comm, current->pid, |
722bb63d | 700 | frame, regs->cp0_epc, regs->regs[31]); |
e0daad44 | 701 | return 0; |
1da177e4 LT |
702 | } |
703 | #endif | |
704 | ||
81d103bf RW |
705 | static int setup_rt_frame(void *sig_return, struct ksignal *ksig, |
706 | struct pt_regs *regs, sigset_t *set) | |
1da177e4 | 707 | { |
9bbf28a3 | 708 | struct rt_sigframe __user *frame; |
1da177e4 LT |
709 | int err = 0; |
710 | ||
7c4f5635 | 711 | frame = get_sigframe(ksig, regs, sizeof(*frame)); |
1da177e4 | 712 | if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) |
81d103bf | 713 | return -EFAULT; |
1da177e4 | 714 | |
1da177e4 | 715 | /* Create siginfo. */ |
81d103bf | 716 | err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); |
1da177e4 | 717 | |
70342287 | 718 | /* Create the ucontext. */ |
1da177e4 | 719 | err |= __put_user(0, &frame->rs_uc.uc_flags); |
5665a0ac | 720 | err |= __put_user(NULL, &frame->rs_uc.uc_link); |
ea536ad4 | 721 | err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); |
1da177e4 LT |
722 | err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); |
723 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); | |
724 | ||
725 | if (err) | |
81d103bf | 726 | return -EFAULT; |
1da177e4 LT |
727 | |
728 | /* | |
729 | * Arguments to signal handler: | |
730 | * | |
731 | * a0 = signal number | |
732 | * a1 = 0 (should be cause) | |
733 | * a2 = pointer to ucontext | |
734 | * | |
735 | * $25 and c0_epc point to the signal handler, $29 points to | |
736 | * the struct rt_sigframe. | |
737 | */ | |
81d103bf | 738 | regs->regs[ 4] = ksig->sig; |
1da177e4 LT |
739 | regs->regs[ 5] = (unsigned long) &frame->rs_info; |
740 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | |
741 | regs->regs[29] = (unsigned long) frame; | |
d814c28c | 742 | regs->regs[31] = (unsigned long) sig_return; |
81d103bf | 743 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; |
1da177e4 | 744 | |
722bb63d | 745 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", |
1da177e4 LT |
746 | current->comm, current->pid, |
747 | frame, regs->cp0_epc, regs->regs[31]); | |
722bb63d | 748 | |
7b3e2fc8 | 749 | return 0; |
1da177e4 LT |
750 | } |
751 | ||
151fd6ac RB |
752 | struct mips_abi mips_abi = { |
753 | #ifdef CONFIG_TRAD_SIGNALS | |
754 | .setup_frame = setup_frame, | |
d814c28c | 755 | .signal_return_offset = offsetof(struct mips_vdso, signal_trampoline), |
151fd6ac | 756 | #endif |
70342287 | 757 | .setup_rt_frame = setup_rt_frame, |
d814c28c DD |
758 | .rt_signal_return_offset = |
759 | offsetof(struct mips_vdso, rt_signal_trampoline), | |
77856100 PB |
760 | .restart = __NR_restart_syscall, |
761 | ||
762 | .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), | |
763 | .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), | |
764 | .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), | |
151fd6ac RB |
765 | }; |
766 | ||
81d103bf | 767 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
1da177e4 | 768 | { |
b7f9a11a | 769 | sigset_t *oldset = sigmask_to_save(); |
129bc8f7 | 770 | int ret; |
d814c28c | 771 | struct mips_abi *abi = current->thread.abi; |
01be057b DL |
772 | #ifdef CONFIG_CPU_MICROMIPS |
773 | void *vdso; | |
2fabc7d2 | 774 | unsigned long tmp = (unsigned long)current->mm->context.vdso; |
01be057b DL |
775 | |
776 | set_isa16_mode(tmp); | |
777 | vdso = (void *)tmp; | |
778 | #else | |
d814c28c | 779 | void *vdso = current->mm->context.vdso; |
01be057b | 780 | #endif |
129bc8f7 | 781 | |
8f5a00eb AV |
782 | if (regs->regs[0]) { |
783 | switch(regs->regs[2]) { | |
784 | case ERESTART_RESTARTBLOCK: | |
785 | case ERESTARTNOHAND: | |
1da177e4 LT |
786 | regs->regs[2] = EINTR; |
787 | break; | |
8f5a00eb | 788 | case ERESTARTSYS: |
81d103bf | 789 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { |
8f5a00eb AV |
790 | regs->regs[2] = EINTR; |
791 | break; | |
792 | } | |
793 | /* fallthrough */ | |
794 | case ERESTARTNOINTR: | |
795 | regs->regs[7] = regs->regs[26]; | |
796 | regs->regs[2] = regs->regs[0]; | |
797 | regs->cp0_epc -= 4; | |
1da177e4 | 798 | } |
1da177e4 | 799 | |
70342287 | 800 | regs->regs[0] = 0; /* Don't deal with this again. */ |
8f5a00eb | 801 | } |
1da177e4 | 802 | |
81d103bf | 803 | if (sig_uses_siginfo(&ksig->ka)) |
d814c28c | 804 | ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset, |
81d103bf | 805 | ksig, regs, oldset); |
1da177e4 | 806 | else |
81d103bf RW |
807 | ret = abi->setup_frame(vdso + abi->signal_return_offset, ksig, |
808 | regs, oldset); | |
062ab57b | 809 | |
81d103bf | 810 | signal_setup_done(ret, ksig, 0); |
1da177e4 LT |
811 | } |
812 | ||
151fd6ac | 813 | static void do_signal(struct pt_regs *regs) |
1da177e4 | 814 | { |
81d103bf | 815 | struct ksignal ksig; |
1da177e4 | 816 | |
81d103bf | 817 | if (get_signal(&ksig)) { |
70342287 | 818 | /* Whee! Actually deliver the signal. */ |
81d103bf | 819 | handle_signal(&ksig, regs); |
45887e12 | 820 | return; |
7b3e2fc8 | 821 | } |
1da177e4 | 822 | |
1da177e4 | 823 | if (regs->regs[0]) { |
9ec9b5ac RB |
824 | switch (regs->regs[2]) { |
825 | case ERESTARTNOHAND: | |
826 | case ERESTARTSYS: | |
827 | case ERESTARTNOINTR: | |
8f5a00eb | 828 | regs->regs[2] = regs->regs[0]; |
1da177e4 | 829 | regs->regs[7] = regs->regs[26]; |
8f5a00eb | 830 | regs->cp0_epc -= 4; |
9ec9b5ac RB |
831 | break; |
832 | ||
833 | case ERESTART_RESTARTBLOCK: | |
151fd6ac | 834 | regs->regs[2] = current->thread.abi->restart; |
1da177e4 LT |
835 | regs->regs[7] = regs->regs[26]; |
836 | regs->cp0_epc -= 4; | |
9ec9b5ac | 837 | break; |
1da177e4 | 838 | } |
70342287 | 839 | regs->regs[0] = 0; /* Don't deal with this again. */ |
1da177e4 | 840 | } |
7b3e2fc8 RB |
841 | |
842 | /* | |
843 | * If there's no signal to deliver, we just put the saved sigmask | |
844 | * back | |
845 | */ | |
51a7b448 | 846 | restore_saved_sigmask(); |
1da177e4 LT |
847 | } |
848 | ||
849 | /* | |
850 | * notification of userspace execution resumption | |
7b3e2fc8 | 851 | * - triggered by the TIF_WORK_MASK flags |
1da177e4 | 852 | */ |
7b3e2fc8 | 853 | asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, |
1da177e4 LT |
854 | __u32 thread_info_flags) |
855 | { | |
1f717929 RB |
856 | local_irq_enable(); |
857 | ||
c3fc5cd5 RB |
858 | user_exit(); |
859 | ||
40e084a5 RB |
860 | if (thread_info_flags & _TIF_UPROBE) |
861 | uprobe_notify_resume(regs); | |
862 | ||
1da177e4 | 863 | /* deal with pending signal delivery */ |
6fd84c08 | 864 | if (thread_info_flags & _TIF_SIGPENDING) |
151fd6ac | 865 | do_signal(regs); |
d0420c83 DH |
866 | |
867 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | |
868 | clear_thread_flag(TIF_NOTIFY_RESUME); | |
869 | tracehook_notify_resume(regs); | |
870 | } | |
c3fc5cd5 RB |
871 | |
872 | user_enter(); | |
1da177e4 | 873 | } |
137f6f3e RB |
874 | |
875 | #ifdef CONFIG_SMP | |
2db9ca0a | 876 | static int smp_save_fp_context(void __user *sc) |
137f6f3e RB |
877 | { |
878 | return raw_cpu_has_fpu | |
2db9ca0a | 879 | ? save_hw_fp_context(sc) |
b2ead528 | 880 | : copy_fp_to_sigcontext(sc); |
137f6f3e RB |
881 | } |
882 | ||
2db9ca0a | 883 | static int smp_restore_fp_context(void __user *sc) |
137f6f3e RB |
884 | { |
885 | return raw_cpu_has_fpu | |
2db9ca0a | 886 | ? restore_hw_fp_context(sc) |
b2ead528 | 887 | : copy_fp_from_sigcontext(sc); |
137f6f3e RB |
888 | } |
889 | #endif | |
890 | ||
891 | static int signal_setup(void) | |
892 | { | |
f1fe2d21 PB |
893 | /* |
894 | * The offset from sigcontext to extended context should be the same | |
895 | * regardless of the type of signal, such that userland can always know | |
896 | * where to look if it wishes to find the extended context structures. | |
897 | */ | |
898 | BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - | |
899 | offsetof(struct sigframe, sf_sc)) != | |
900 | (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - | |
901 | offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); | |
902 | ||
137f6f3e RB |
903 | #ifdef CONFIG_SMP |
904 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | |
905 | save_fp_context = smp_save_fp_context; | |
906 | restore_fp_context = smp_restore_fp_context; | |
907 | #else | |
908 | if (cpu_has_fpu) { | |
2db9ca0a PB |
909 | save_fp_context = save_hw_fp_context; |
910 | restore_fp_context = restore_hw_fp_context; | |
137f6f3e | 911 | } else { |
14fa12df PB |
912 | save_fp_context = copy_fp_to_sigcontext; |
913 | restore_fp_context = copy_fp_from_sigcontext; | |
137f6f3e | 914 | } |
ca750649 | 915 | #endif /* CONFIG_SMP */ |
137f6f3e RB |
916 | |
917 | return 0; | |
918 | } | |
919 | ||
920 | arch_initcall(signal_setup); |