[PATCH] sys_times: don't take tasklist_lock
[deliverable/linux.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/config.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/tty.h>
21#include <linux/binfmts.h>
22#include <linux/security.h>
23#include <linux/syscalls.h>
24#include <linux/ptrace.h>
7ed20e1a 25#include <linux/signal.h>
c2f0c7c3 26#include <linux/audit.h>
c59ede7b 27#include <linux/capability.h>
1da177e4
LT
28#include <asm/param.h>
29#include <asm/uaccess.h>
30#include <asm/unistd.h>
31#include <asm/siginfo.h>
32
33/*
34 * SLAB caches for signal bits.
35 */
36
37static kmem_cache_t *sigqueue_cachep;
38
39/*
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
47 *
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 *
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
60 *
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
85 * | SIGURG | ignore |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
101 *
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
111 */
112
113#ifdef SIGEMT
114#define M_SIGEMT M(SIGEMT)
115#else
116#define M_SIGEMT 0
117#endif
118
119#if SIGRTMIN > BITS_PER_LONG
120#define M(sig) (1ULL << ((sig)-1))
121#else
122#define M(sig) (1UL << ((sig)-1))
123#endif
124#define T(sig, mask) (M(sig) & (mask))
125
126#define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
128
129#define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131
132#define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136
137#define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139
140#define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142#define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144#define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146#define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148
a9e88e84
ON
149#define sig_needs_tasklist(sig) \
150 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK | M(SIGCONT)))
151
1da177e4
LT
152#define sig_user_defined(t, signr) \
153 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
154 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
155
156#define sig_fatal(t, signr) \
157 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
158 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
159
160static int sig_ignored(struct task_struct *t, int sig)
161{
162 void __user * handler;
163
164 /*
165 * Tracers always want to know about signals..
166 */
167 if (t->ptrace & PT_PTRACED)
168 return 0;
169
170 /*
171 * Blocked signals are never ignored, since the
172 * signal handler may change by the time it is
173 * unblocked.
174 */
175 if (sigismember(&t->blocked, sig))
176 return 0;
177
178 /* Is it explicitly or implicitly ignored? */
179 handler = t->sighand->action[sig-1].sa.sa_handler;
180 return handler == SIG_IGN ||
181 (handler == SIG_DFL && sig_kernel_ignore(sig));
182}
183
184/*
185 * Re-calculate pending state from the set of locally pending
186 * signals, globally pending signals, and blocked signals.
187 */
188static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
189{
190 unsigned long ready;
191 long i;
192
193 switch (_NSIG_WORDS) {
194 default:
195 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
196 ready |= signal->sig[i] &~ blocked->sig[i];
197 break;
198
199 case 4: ready = signal->sig[3] &~ blocked->sig[3];
200 ready |= signal->sig[2] &~ blocked->sig[2];
201 ready |= signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
203 break;
204
205 case 2: ready = signal->sig[1] &~ blocked->sig[1];
206 ready |= signal->sig[0] &~ blocked->sig[0];
207 break;
208
209 case 1: ready = signal->sig[0] &~ blocked->sig[0];
210 }
211 return ready != 0;
212}
213
214#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
215
216fastcall void recalc_sigpending_tsk(struct task_struct *t)
217{
218 if (t->signal->group_stop_count > 0 ||
3e1d1d28 219 (freezing(t)) ||
1da177e4
LT
220 PENDING(&t->pending, &t->blocked) ||
221 PENDING(&t->signal->shared_pending, &t->blocked))
222 set_tsk_thread_flag(t, TIF_SIGPENDING);
223 else
224 clear_tsk_thread_flag(t, TIF_SIGPENDING);
225}
226
227void recalc_sigpending(void)
228{
229 recalc_sigpending_tsk(current);
230}
231
232/* Given the mask, find the first available signal that should be serviced. */
233
234static int
235next_signal(struct sigpending *pending, sigset_t *mask)
236{
237 unsigned long i, *s, *m, x;
238 int sig = 0;
239
240 s = pending->signal.sig;
241 m = mask->sig;
242 switch (_NSIG_WORDS) {
243 default:
244 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
245 if ((x = *s &~ *m) != 0) {
246 sig = ffz(~x) + i*_NSIG_BPW + 1;
247 break;
248 }
249 break;
250
251 case 2: if ((x = s[0] &~ m[0]) != 0)
252 sig = 1;
253 else if ((x = s[1] &~ m[1]) != 0)
254 sig = _NSIG_BPW + 1;
255 else
256 break;
257 sig += ffz(~x);
258 break;
259
260 case 1: if ((x = *s &~ *m) != 0)
261 sig = ffz(~x) + 1;
262 break;
263 }
264
265 return sig;
266}
267
dd0fc66f 268static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
1da177e4
LT
269 int override_rlimit)
270{
271 struct sigqueue *q = NULL;
272
273 atomic_inc(&t->user->sigpending);
274 if (override_rlimit ||
275 atomic_read(&t->user->sigpending) <=
276 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
277 q = kmem_cache_alloc(sigqueue_cachep, flags);
278 if (unlikely(q == NULL)) {
279 atomic_dec(&t->user->sigpending);
280 } else {
281 INIT_LIST_HEAD(&q->list);
282 q->flags = 0;
1da177e4
LT
283 q->user = get_uid(t->user);
284 }
285 return(q);
286}
287
514a01b8 288static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
289{
290 if (q->flags & SIGQUEUE_PREALLOC)
291 return;
292 atomic_dec(&q->user->sigpending);
293 free_uid(q->user);
294 kmem_cache_free(sigqueue_cachep, q);
295}
296
6a14c5c9 297void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
298{
299 struct sigqueue *q;
300
301 sigemptyset(&queue->signal);
302 while (!list_empty(&queue->list)) {
303 q = list_entry(queue->list.next, struct sigqueue , list);
304 list_del_init(&q->list);
305 __sigqueue_free(q);
306 }
307}
308
309/*
310 * Flush all pending signals for a task.
311 */
c81addc9 312void flush_signals(struct task_struct *t)
1da177e4
LT
313{
314 unsigned long flags;
315
316 spin_lock_irqsave(&t->sighand->siglock, flags);
317 clear_tsk_thread_flag(t,TIF_SIGPENDING);
318 flush_sigqueue(&t->pending);
319 flush_sigqueue(&t->signal->shared_pending);
320 spin_unlock_irqrestore(&t->sighand->siglock, flags);
321}
322
1da177e4
LT
323/*
324 * Flush all handlers for a task.
325 */
326
327void
328flush_signal_handlers(struct task_struct *t, int force_default)
329{
330 int i;
331 struct k_sigaction *ka = &t->sighand->action[0];
332 for (i = _NSIG ; i != 0 ; i--) {
333 if (force_default || ka->sa.sa_handler != SIG_IGN)
334 ka->sa.sa_handler = SIG_DFL;
335 ka->sa.sa_flags = 0;
336 sigemptyset(&ka->sa.sa_mask);
337 ka++;
338 }
339}
340
341
342/* Notify the system that a driver wants to block all signals for this
343 * process, and wants to be notified if any signals at all were to be
344 * sent/acted upon. If the notifier routine returns non-zero, then the
345 * signal will be acted upon after all. If the notifier routine returns 0,
346 * then then signal will be blocked. Only one block per process is
347 * allowed. priv is a pointer to private data that the notifier routine
348 * can use to determine if the signal should be blocked or not. */
349
350void
351block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
352{
353 unsigned long flags;
354
355 spin_lock_irqsave(&current->sighand->siglock, flags);
356 current->notifier_mask = mask;
357 current->notifier_data = priv;
358 current->notifier = notifier;
359 spin_unlock_irqrestore(&current->sighand->siglock, flags);
360}
361
362/* Notify the system that blocking has ended. */
363
364void
365unblock_all_signals(void)
366{
367 unsigned long flags;
368
369 spin_lock_irqsave(&current->sighand->siglock, flags);
370 current->notifier = NULL;
371 current->notifier_data = NULL;
372 recalc_sigpending();
373 spin_unlock_irqrestore(&current->sighand->siglock, flags);
374}
375
858119e1 376static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
377{
378 struct sigqueue *q, *first = NULL;
379 int still_pending = 0;
380
381 if (unlikely(!sigismember(&list->signal, sig)))
382 return 0;
383
384 /*
385 * Collect the siginfo appropriate to this signal. Check if
386 * there is another siginfo for the same signal.
387 */
388 list_for_each_entry(q, &list->list, list) {
389 if (q->info.si_signo == sig) {
390 if (first) {
391 still_pending = 1;
392 break;
393 }
394 first = q;
395 }
396 }
397 if (first) {
398 list_del_init(&first->list);
399 copy_siginfo(info, &first->info);
400 __sigqueue_free(first);
401 if (!still_pending)
402 sigdelset(&list->signal, sig);
403 } else {
404
405 /* Ok, it wasn't in the queue. This must be
406 a fast-pathed signal or we must have been
407 out of queue space. So zero out the info.
408 */
409 sigdelset(&list->signal, sig);
410 info->si_signo = sig;
411 info->si_errno = 0;
412 info->si_code = 0;
413 info->si_pid = 0;
414 info->si_uid = 0;
415 }
416 return 1;
417}
418
419static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
420 siginfo_t *info)
421{
422 int sig = 0;
423
b17b0421 424 sig = next_signal(pending, mask);
1da177e4
LT
425 if (sig) {
426 if (current->notifier) {
427 if (sigismember(current->notifier_mask, sig)) {
428 if (!(current->notifier)(current->notifier_data)) {
429 clear_thread_flag(TIF_SIGPENDING);
430 return 0;
431 }
432 }
433 }
434
435 if (!collect_signal(sig, pending, info))
436 sig = 0;
437
438 }
439 recalc_sigpending();
440
441 return sig;
442}
443
444/*
445 * Dequeue a signal and return the element to the caller, which is
446 * expected to free it.
447 *
448 * All callers have to hold the siglock.
449 */
450int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
451{
452 int signr = __dequeue_signal(&tsk->pending, mask, info);
453 if (!signr)
454 signr = __dequeue_signal(&tsk->signal->shared_pending,
455 mask, info);
456 if (signr && unlikely(sig_kernel_stop(signr))) {
457 /*
458 * Set a marker that we have dequeued a stop signal. Our
459 * caller might release the siglock and then the pending
460 * stop signal it is about to process is no longer in the
461 * pending bitmasks, but must still be cleared by a SIGCONT
462 * (and overruled by a SIGKILL). So those cases clear this
463 * shared flag after we've set it. Note that this flag may
464 * remain set after the signal we return is ignored or
465 * handled. That doesn't matter because its only purpose
466 * is to alert stop-signal processing code when another
467 * processor has come along and cleared the flag.
468 */
788e05a6
ON
469 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
470 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1da177e4
LT
471 }
472 if ( signr &&
473 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
474 info->si_sys_private){
475 /*
476 * Release the siglock to ensure proper locking order
477 * of timer locks outside of siglocks. Note, we leave
478 * irqs disabled here, since the posix-timers code is
479 * about to disable them again anyway.
480 */
481 spin_unlock(&tsk->sighand->siglock);
482 do_schedule_next_timer(info);
483 spin_lock(&tsk->sighand->siglock);
484 }
485 return signr;
486}
487
488/*
489 * Tell a process that it has a new active signal..
490 *
491 * NOTE! we rely on the previous spin_lock to
492 * lock interrupts for us! We can only be called with
493 * "siglock" held, and the local interrupt must
494 * have been disabled when that got acquired!
495 *
496 * No need to set need_resched since signal event passing
497 * goes through ->blocked
498 */
499void signal_wake_up(struct task_struct *t, int resume)
500{
501 unsigned int mask;
502
503 set_tsk_thread_flag(t, TIF_SIGPENDING);
504
505 /*
506 * For SIGKILL, we want to wake it up in the stopped/traced case.
507 * We don't check t->state here because there is a race with it
508 * executing another processor and just now entering stopped state.
509 * By using wake_up_state, we ensure the process will wake up and
510 * handle its death signal.
511 */
512 mask = TASK_INTERRUPTIBLE;
513 if (resume)
514 mask |= TASK_STOPPED | TASK_TRACED;
515 if (!wake_up_state(t, mask))
516 kick_process(t);
517}
518
71fabd5e
GA
519/*
520 * Remove signals in mask from the pending set and queue.
521 * Returns 1 if any signals were found.
522 *
523 * All callers must be holding the siglock.
524 *
525 * This version takes a sigset mask and looks at all signals,
526 * not just those in the first mask word.
527 */
528static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
529{
530 struct sigqueue *q, *n;
531 sigset_t m;
532
533 sigandsets(&m, mask, &s->signal);
534 if (sigisemptyset(&m))
535 return 0;
536
537 signandsets(&s->signal, &s->signal, mask);
538 list_for_each_entry_safe(q, n, &s->list, list) {
539 if (sigismember(mask, q->info.si_signo)) {
540 list_del_init(&q->list);
541 __sigqueue_free(q);
542 }
543 }
544 return 1;
545}
1da177e4
LT
546/*
547 * Remove signals in mask from the pending set and queue.
548 * Returns 1 if any signals were found.
549 *
550 * All callers must be holding the siglock.
551 */
552static int rm_from_queue(unsigned long mask, struct sigpending *s)
553{
554 struct sigqueue *q, *n;
555
556 if (!sigtestsetmask(&s->signal, mask))
557 return 0;
558
559 sigdelsetmask(&s->signal, mask);
560 list_for_each_entry_safe(q, n, &s->list, list) {
561 if (q->info.si_signo < SIGRTMIN &&
562 (mask & sigmask(q->info.si_signo))) {
563 list_del_init(&q->list);
564 __sigqueue_free(q);
565 }
566 }
567 return 1;
568}
569
570/*
571 * Bad permissions for sending the signal
572 */
573static int check_kill_permission(int sig, struct siginfo *info,
574 struct task_struct *t)
575{
576 int error = -EINVAL;
7ed20e1a 577 if (!valid_signal(sig))
1da177e4
LT
578 return error;
579 error = -EPERM;
621d3121 580 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1da177e4
LT
581 && ((sig != SIGCONT) ||
582 (current->signal->session != t->signal->session))
583 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
584 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
585 && !capable(CAP_KILL))
586 return error;
c2f0c7c3
SG
587
588 error = security_task_kill(t, info, sig);
589 if (!error)
590 audit_signal_info(sig, t); /* Let audit system see the signal */
591 return error;
1da177e4
LT
592}
593
594/* forward decl */
595static void do_notify_parent_cldstop(struct task_struct *tsk,
bc505a47 596 int to_self,
1da177e4
LT
597 int why);
598
599/*
600 * Handle magic process-wide effects of stop/continue signals.
601 * Unlike the signal actions, these happen immediately at signal-generation
602 * time regardless of blocking, ignoring, or handling. This does the
603 * actual continuing for SIGCONT, but not the actual stopping for stop
604 * signals. The process stop is done as a signal action for SIG_DFL.
605 */
606static void handle_stop_signal(int sig, struct task_struct *p)
607{
608 struct task_struct *t;
609
dd12f48d 610 if (p->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
611 /*
612 * The process is in the middle of dying already.
613 */
614 return;
615
616 if (sig_kernel_stop(sig)) {
617 /*
618 * This is a stop signal. Remove SIGCONT from all queues.
619 */
620 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
621 t = p;
622 do {
623 rm_from_queue(sigmask(SIGCONT), &t->pending);
624 t = next_thread(t);
625 } while (t != p);
626 } else if (sig == SIGCONT) {
627 /*
628 * Remove all stop signals from all queues,
629 * and wake all threads.
630 */
631 if (unlikely(p->signal->group_stop_count > 0)) {
632 /*
633 * There was a group stop in progress. We'll
634 * pretend it finished before we got here. We are
635 * obliged to report it to the parent: if the
636 * SIGSTOP happened "after" this SIGCONT, then it
637 * would have cleared this pending SIGCONT. If it
638 * happened "before" this SIGCONT, then the parent
639 * got the SIGCHLD about the stop finishing before
640 * the continue happened. We do the notification
641 * now, and it's as if the stop had finished and
642 * the SIGCHLD was pending on entry to this kill.
643 */
644 p->signal->group_stop_count = 0;
645 p->signal->flags = SIGNAL_STOP_CONTINUED;
646 spin_unlock(&p->sighand->siglock);
bc505a47 647 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
1da177e4
LT
648 spin_lock(&p->sighand->siglock);
649 }
650 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
651 t = p;
652 do {
653 unsigned int state;
654 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
655
656 /*
657 * If there is a handler for SIGCONT, we must make
658 * sure that no thread returns to user mode before
659 * we post the signal, in case it was the only
660 * thread eligible to run the signal handler--then
661 * it must not do anything between resuming and
662 * running the handler. With the TIF_SIGPENDING
663 * flag set, the thread will pause and acquire the
664 * siglock that we hold now and until we've queued
665 * the pending signal.
666 *
667 * Wake up the stopped thread _after_ setting
668 * TIF_SIGPENDING
669 */
670 state = TASK_STOPPED;
671 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
672 set_tsk_thread_flag(t, TIF_SIGPENDING);
673 state |= TASK_INTERRUPTIBLE;
674 }
675 wake_up_state(t, state);
676
677 t = next_thread(t);
678 } while (t != p);
679
680 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
681 /*
682 * We were in fact stopped, and are now continued.
683 * Notify the parent with CLD_CONTINUED.
684 */
685 p->signal->flags = SIGNAL_STOP_CONTINUED;
686 p->signal->group_exit_code = 0;
687 spin_unlock(&p->sighand->siglock);
bc505a47 688 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
1da177e4
LT
689 spin_lock(&p->sighand->siglock);
690 } else {
691 /*
692 * We are not stopped, but there could be a stop
693 * signal in the middle of being processed after
694 * being removed from the queue. Clear that too.
695 */
696 p->signal->flags = 0;
697 }
698 } else if (sig == SIGKILL) {
699 /*
700 * Make sure that any pending stop signal already dequeued
701 * is undone by the wakeup for SIGKILL.
702 */
703 p->signal->flags = 0;
704 }
705}
706
707static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
708 struct sigpending *signals)
709{
710 struct sigqueue * q = NULL;
711 int ret = 0;
712
713 /*
714 * fast-pathed signals for kernel-internal things like SIGSTOP
715 * or SIGKILL.
716 */
b67a1b9e 717 if (info == SEND_SIG_FORCED)
1da177e4
LT
718 goto out_set;
719
720 /* Real-time signals must be queued if sent by sigqueue, or
721 some other real-time mechanism. It is implementation
722 defined whether kill() does so. We attempt to do so, on
723 the principle of least surprise, but since kill is not
724 allowed to fail with EAGAIN when low on memory we just
725 make sure at least one signal gets delivered and don't
726 pass on the info struct. */
727
728 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
621d3121 729 (is_si_special(info) ||
1da177e4
LT
730 info->si_code >= 0)));
731 if (q) {
732 list_add_tail(&q->list, &signals->list);
733 switch ((unsigned long) info) {
b67a1b9e 734 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
735 q->info.si_signo = sig;
736 q->info.si_errno = 0;
737 q->info.si_code = SI_USER;
738 q->info.si_pid = current->pid;
739 q->info.si_uid = current->uid;
740 break;
b67a1b9e 741 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
742 q->info.si_signo = sig;
743 q->info.si_errno = 0;
744 q->info.si_code = SI_KERNEL;
745 q->info.si_pid = 0;
746 q->info.si_uid = 0;
747 break;
748 default:
749 copy_siginfo(&q->info, info);
750 break;
751 }
621d3121
ON
752 } else if (!is_si_special(info)) {
753 if (sig >= SIGRTMIN && info->si_code != SI_USER)
1da177e4
LT
754 /*
755 * Queue overflow, abort. We may abort if the signal was rt
756 * and sent by user using something other than kill().
757 */
758 return -EAGAIN;
1da177e4
LT
759 }
760
761out_set:
762 sigaddset(&signals->signal, sig);
763 return ret;
764}
765
766#define LEGACY_QUEUE(sigptr, sig) \
767 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
768
769
770static int
771specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
772{
773 int ret = 0;
774
775 if (!irqs_disabled())
776 BUG();
777 assert_spin_locked(&t->sighand->siglock);
778
1da177e4
LT
779 /* Short-circuit ignored signals. */
780 if (sig_ignored(t, sig))
781 goto out;
782
783 /* Support queueing exactly one non-rt signal, so that we
784 can get more detailed information about the cause of
785 the signal. */
786 if (LEGACY_QUEUE(&t->pending, sig))
787 goto out;
788
789 ret = send_signal(sig, info, t, &t->pending);
790 if (!ret && !sigismember(&t->blocked, sig))
791 signal_wake_up(t, sig == SIGKILL);
792out:
793 return ret;
794}
795
796/*
797 * Force a signal that the process can't ignore: if necessary
798 * we unblock the signal and change any SIG_IGN to SIG_DFL.
799 */
800
801int
802force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
803{
804 unsigned long int flags;
805 int ret;
806
807 spin_lock_irqsave(&t->sighand->siglock, flags);
b0423a0d 808 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
1da177e4 809 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
b0423a0d
PM
810 }
811 if (sigismember(&t->blocked, sig)) {
1da177e4 812 sigdelset(&t->blocked, sig);
1da177e4 813 }
b0423a0d 814 recalc_sigpending_tsk(t);
1da177e4
LT
815 ret = specific_send_sig_info(sig, info, t);
816 spin_unlock_irqrestore(&t->sighand->siglock, flags);
817
818 return ret;
819}
820
821void
822force_sig_specific(int sig, struct task_struct *t)
823{
b0423a0d 824 force_sig_info(sig, SEND_SIG_FORCED, t);
1da177e4
LT
825}
826
827/*
828 * Test if P wants to take SIG. After we've checked all threads with this,
829 * it's equivalent to finding no threads not blocking SIG. Any threads not
830 * blocking SIG were ruled out because they are not running and already
831 * have pending signals. Such threads will dequeue from the shared queue
832 * as soon as they're available, so putting the signal on the shared queue
833 * will be equivalent to sending it to one such thread.
834 */
188a1eaf
LT
835static inline int wants_signal(int sig, struct task_struct *p)
836{
837 if (sigismember(&p->blocked, sig))
838 return 0;
839 if (p->flags & PF_EXITING)
840 return 0;
841 if (sig == SIGKILL)
842 return 1;
843 if (p->state & (TASK_STOPPED | TASK_TRACED))
844 return 0;
845 return task_curr(p) || !signal_pending(p);
846}
1da177e4
LT
847
848static void
849__group_complete_signal(int sig, struct task_struct *p)
850{
1da177e4
LT
851 struct task_struct *t;
852
1da177e4
LT
853 /*
854 * Now find a thread we can wake up to take the signal off the queue.
855 *
856 * If the main thread wants the signal, it gets first crack.
857 * Probably the least surprising to the average bear.
858 */
188a1eaf 859 if (wants_signal(sig, p))
1da177e4
LT
860 t = p;
861 else if (thread_group_empty(p))
862 /*
863 * There is just one thread and it does not need to be woken.
864 * It will dequeue unblocked signals before it runs again.
865 */
866 return;
867 else {
868 /*
869 * Otherwise try to find a suitable thread.
870 */
871 t = p->signal->curr_target;
872 if (t == NULL)
873 /* restart balancing at this thread */
874 t = p->signal->curr_target = p;
875 BUG_ON(t->tgid != p->tgid);
876
188a1eaf 877 while (!wants_signal(sig, t)) {
1da177e4
LT
878 t = next_thread(t);
879 if (t == p->signal->curr_target)
880 /*
881 * No thread needs to be woken.
882 * Any eligible threads will see
883 * the signal in the queue soon.
884 */
885 return;
886 }
887 p->signal->curr_target = t;
888 }
889
890 /*
891 * Found a killable thread. If the signal will be fatal,
892 * then start taking the whole group down immediately.
893 */
894 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
895 !sigismember(&t->real_blocked, sig) &&
896 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
897 /*
898 * This signal will be fatal to the whole group.
899 */
900 if (!sig_kernel_coredump(sig)) {
901 /*
902 * Start a group exit and wake everybody up.
903 * This way we don't have other threads
904 * running and doing things after a slower
905 * thread has the fatal signal pending.
906 */
907 p->signal->flags = SIGNAL_GROUP_EXIT;
908 p->signal->group_exit_code = sig;
909 p->signal->group_stop_count = 0;
910 t = p;
911 do {
912 sigaddset(&t->pending.signal, SIGKILL);
913 signal_wake_up(t, 1);
914 t = next_thread(t);
915 } while (t != p);
916 return;
917 }
918
919 /*
920 * There will be a core dump. We make all threads other
921 * than the chosen one go into a group stop so that nothing
922 * happens until it gets scheduled, takes the signal off
923 * the shared queue, and does the core dump. This is a
924 * little more complicated than strictly necessary, but it
925 * keeps the signal state that winds up in the core dump
926 * unchanged from the death state, e.g. which thread had
927 * the core-dump signal unblocked.
928 */
929 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
930 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
931 p->signal->group_stop_count = 0;
932 p->signal->group_exit_task = t;
933 t = p;
934 do {
935 p->signal->group_stop_count++;
936 signal_wake_up(t, 0);
937 t = next_thread(t);
938 } while (t != p);
939 wake_up_process(p->signal->group_exit_task);
940 return;
941 }
942
943 /*
944 * The signal is already in the shared-pending queue.
945 * Tell the chosen thread to wake up and dequeue it.
946 */
947 signal_wake_up(t, sig == SIGKILL);
948 return;
949}
950
951int
952__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
953{
954 int ret = 0;
955
956 assert_spin_locked(&p->sighand->siglock);
957 handle_stop_signal(sig, p);
958
1da177e4
LT
959 /* Short-circuit ignored signals. */
960 if (sig_ignored(p, sig))
961 return ret;
962
963 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
964 /* This is a non-RT signal and we already have one queued. */
965 return ret;
966
967 /*
968 * Put this signal on the shared-pending queue, or fail with EAGAIN.
969 * We always use the shared queue for process-wide signals,
970 * to avoid several races.
971 */
972 ret = send_signal(sig, info, p, &p->signal->shared_pending);
973 if (unlikely(ret))
974 return ret;
975
976 __group_complete_signal(sig, p);
977 return 0;
978}
979
980/*
981 * Nuke all other threads in the group.
982 */
983void zap_other_threads(struct task_struct *p)
984{
985 struct task_struct *t;
986
987 p->signal->flags = SIGNAL_GROUP_EXIT;
988 p->signal->group_stop_count = 0;
989
990 if (thread_group_empty(p))
991 return;
992
993 for (t = next_thread(p); t != p; t = next_thread(t)) {
994 /*
995 * Don't bother with already dead threads
996 */
997 if (t->exit_state)
998 continue;
999
1000 /*
1001 * We don't want to notify the parent, since we are
1002 * killed as part of a thread group due to another
1003 * thread doing an execve() or similar. So set the
1004 * exit signal to -1 to allow immediate reaping of
1005 * the process. But don't detach the thread group
1006 * leader.
1007 */
1008 if (t != p->group_leader)
1009 t->exit_signal = -1;
1010
30e0fca6 1011 /* SIGKILL will be handled before any pending SIGSTOP */
1da177e4 1012 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1013 signal_wake_up(t, 1);
1014 }
1015}
1016
1017/*
e56d0903 1018 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1da177e4 1019 */
f63ee72e
ON
1020struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1021{
1022 struct sighand_struct *sighand;
1023
1024 for (;;) {
1025 sighand = rcu_dereference(tsk->sighand);
1026 if (unlikely(sighand == NULL))
1027 break;
1028
1029 spin_lock_irqsave(&sighand->siglock, *flags);
1030 if (likely(sighand == tsk->sighand))
1031 break;
1032 spin_unlock_irqrestore(&sighand->siglock, *flags);
1033 }
1034
1035 return sighand;
1036}
1037
1da177e4
LT
1038int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1039{
1040 unsigned long flags;
1041 int ret;
1042
1043 ret = check_kill_permission(sig, info, p);
f63ee72e
ON
1044
1045 if (!ret && sig) {
1046 ret = -ESRCH;
1047 if (lock_task_sighand(p, &flags)) {
1048 ret = __group_send_sig_info(sig, info, p);
1049 unlock_task_sighand(p, &flags);
2d89c929 1050 }
1da177e4
LT
1051 }
1052
1053 return ret;
1054}
1055
1056/*
1057 * kill_pg_info() sends a signal to a process group: this is what the tty
1058 * control characters do (^C, ^Z etc)
1059 */
1060
1061int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1062{
1063 struct task_struct *p = NULL;
1064 int retval, success;
1065
1066 if (pgrp <= 0)
1067 return -EINVAL;
1068
1069 success = 0;
1070 retval = -ESRCH;
1071 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1072 int err = group_send_sig_info(sig, info, p);
1073 success |= !err;
1074 retval = err;
1075 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1076 return success ? 0 : retval;
1077}
1078
1079int
1080kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1081{
1082 int retval;
1083
1084 read_lock(&tasklist_lock);
1085 retval = __kill_pg_info(sig, info, pgrp);
1086 read_unlock(&tasklist_lock);
1087
1088 return retval;
1089}
1090
1091int
1092kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1093{
1094 int error;
e56d0903 1095 int acquired_tasklist_lock = 0;
1da177e4
LT
1096 struct task_struct *p;
1097
e56d0903 1098 rcu_read_lock();
a9e88e84 1099 if (unlikely(sig_needs_tasklist(sig))) {
e56d0903
IM
1100 read_lock(&tasklist_lock);
1101 acquired_tasklist_lock = 1;
1102 }
1da177e4
LT
1103 p = find_task_by_pid(pid);
1104 error = -ESRCH;
1105 if (p)
1106 error = group_send_sig_info(sig, info, p);
e56d0903
IM
1107 if (unlikely(acquired_tasklist_lock))
1108 read_unlock(&tasklist_lock);
1109 rcu_read_unlock();
1da177e4
LT
1110 return error;
1111}
1112
46113830
HW
1113/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1114int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1115 uid_t uid, uid_t euid)
1116{
1117 int ret = -EINVAL;
1118 struct task_struct *p;
1119
1120 if (!valid_signal(sig))
1121 return ret;
1122
1123 read_lock(&tasklist_lock);
1124 p = find_task_by_pid(pid);
1125 if (!p) {
1126 ret = -ESRCH;
1127 goto out_unlock;
1128 }
0811af28 1129 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
46113830
HW
1130 && (euid != p->suid) && (euid != p->uid)
1131 && (uid != p->suid) && (uid != p->uid)) {
1132 ret = -EPERM;
1133 goto out_unlock;
1134 }
1135 if (sig && p->sighand) {
1136 unsigned long flags;
1137 spin_lock_irqsave(&p->sighand->siglock, flags);
1138 ret = __group_send_sig_info(sig, info, p);
1139 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1140 }
1141out_unlock:
1142 read_unlock(&tasklist_lock);
1143 return ret;
1144}
1145EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1da177e4
LT
1146
1147/*
1148 * kill_something_info() interprets pid in interesting ways just like kill(2).
1149 *
1150 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1151 * is probably wrong. Should make it like BSD or SYSV.
1152 */
1153
1154static int kill_something_info(int sig, struct siginfo *info, int pid)
1155{
1156 if (!pid) {
1157 return kill_pg_info(sig, info, process_group(current));
1158 } else if (pid == -1) {
1159 int retval = 0, count = 0;
1160 struct task_struct * p;
1161
1162 read_lock(&tasklist_lock);
1163 for_each_process(p) {
1164 if (p->pid > 1 && p->tgid != current->tgid) {
1165 int err = group_send_sig_info(sig, info, p);
1166 ++count;
1167 if (err != -EPERM)
1168 retval = err;
1169 }
1170 }
1171 read_unlock(&tasklist_lock);
1172 return count ? retval : -ESRCH;
1173 } else if (pid < 0) {
1174 return kill_pg_info(sig, info, -pid);
1175 } else {
1176 return kill_proc_info(sig, info, pid);
1177 }
1178}
1179
1180/*
1181 * These are for backward compatibility with the rest of the kernel source.
1182 */
1183
1184/*
1185 * These two are the most common entry points. They send a signal
1186 * just to the specific thread.
1187 */
1188int
1189send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1190{
1191 int ret;
1192 unsigned long flags;
1193
1194 /*
1195 * Make sure legacy kernel users don't send in bad values
1196 * (normal paths check this in check_kill_permission).
1197 */
7ed20e1a 1198 if (!valid_signal(sig))
1da177e4
LT
1199 return -EINVAL;
1200
1201 /*
1202 * We need the tasklist lock even for the specific
1203 * thread case (when we don't need to follow the group
1204 * lists) in order to avoid races with "p->sighand"
1205 * going away or changing from under us.
1206 */
1207 read_lock(&tasklist_lock);
1208 spin_lock_irqsave(&p->sighand->siglock, flags);
1209 ret = specific_send_sig_info(sig, info, p);
1210 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1211 read_unlock(&tasklist_lock);
1212 return ret;
1213}
1214
b67a1b9e
ON
1215#define __si_special(priv) \
1216 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1217
1da177e4
LT
1218int
1219send_sig(int sig, struct task_struct *p, int priv)
1220{
b67a1b9e 1221 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1222}
1223
1224/*
1225 * This is the entry point for "process-wide" signals.
1226 * They will go to an appropriate thread in the thread group.
1227 */
1228int
1229send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1230{
1231 int ret;
1232 read_lock(&tasklist_lock);
1233 ret = group_send_sig_info(sig, info, p);
1234 read_unlock(&tasklist_lock);
1235 return ret;
1236}
1237
1238void
1239force_sig(int sig, struct task_struct *p)
1240{
b67a1b9e 1241 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1242}
1243
1244/*
1245 * When things go south during signal handling, we
1246 * will force a SIGSEGV. And if the signal that caused
1247 * the problem was already a SIGSEGV, we'll want to
1248 * make sure we don't even try to deliver the signal..
1249 */
1250int
1251force_sigsegv(int sig, struct task_struct *p)
1252{
1253 if (sig == SIGSEGV) {
1254 unsigned long flags;
1255 spin_lock_irqsave(&p->sighand->siglock, flags);
1256 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1257 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1258 }
1259 force_sig(SIGSEGV, p);
1260 return 0;
1261}
1262
1263int
1264kill_pg(pid_t pgrp, int sig, int priv)
1265{
b67a1b9e 1266 return kill_pg_info(sig, __si_special(priv), pgrp);
1da177e4
LT
1267}
1268
1269int
1270kill_proc(pid_t pid, int sig, int priv)
1271{
b67a1b9e 1272 return kill_proc_info(sig, __si_special(priv), pid);
1da177e4
LT
1273}
1274
1275/*
1276 * These functions support sending signals using preallocated sigqueue
1277 * structures. This is needed "because realtime applications cannot
1278 * afford to lose notifications of asynchronous events, like timer
1279 * expirations or I/O completions". In the case of Posix Timers
1280 * we allocate the sigqueue structure from the timer_create. If this
1281 * allocation fails we are able to report the failure to the application
1282 * with an EAGAIN error.
1283 */
1284
1285struct sigqueue *sigqueue_alloc(void)
1286{
1287 struct sigqueue *q;
1288
1289 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1290 q->flags |= SIGQUEUE_PREALLOC;
1291 return(q);
1292}
1293
1294void sigqueue_free(struct sigqueue *q)
1295{
1296 unsigned long flags;
1297 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1298 /*
1299 * If the signal is still pending remove it from the
1300 * pending queue.
1301 */
1302 if (unlikely(!list_empty(&q->list))) {
19a4fcb5
ON
1303 spinlock_t *lock = &current->sighand->siglock;
1304 read_lock(&tasklist_lock);
1305 spin_lock_irqsave(lock, flags);
1da177e4
LT
1306 if (!list_empty(&q->list))
1307 list_del_init(&q->list);
19a4fcb5 1308 spin_unlock_irqrestore(lock, flags);
1da177e4
LT
1309 read_unlock(&tasklist_lock);
1310 }
1311 q->flags &= ~SIGQUEUE_PREALLOC;
1312 __sigqueue_free(q);
1313}
1314
1315int
1316send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1317{
1318 unsigned long flags;
1319 int ret = 0;
e56d0903 1320 struct sighand_struct *sh;
1da177e4 1321
1da177e4 1322 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903
IM
1323
1324 /*
1325 * The rcu based delayed sighand destroy makes it possible to
1326 * run this without tasklist lock held. The task struct itself
1327 * cannot go away as create_timer did get_task_struct().
1328 *
1329 * We return -1, when the task is marked exiting, so
1330 * posix_timer_event can redirect it to the group leader
1331 */
1332 rcu_read_lock();
e752dd6c
ON
1333
1334 if (unlikely(p->flags & PF_EXITING)) {
1335 ret = -1;
1336 goto out_err;
1337 }
1338
e56d0903
IM
1339retry:
1340 sh = rcu_dereference(p->sighand);
1341
1342 spin_lock_irqsave(&sh->siglock, flags);
1343 if (p->sighand != sh) {
1344 /* We raced with exec() in a multithreaded process... */
1345 spin_unlock_irqrestore(&sh->siglock, flags);
1346 goto retry;
1347 }
1348
1349 /*
1350 * We do the check here again to handle the following scenario:
1351 *
1352 * CPU 0 CPU 1
1353 * send_sigqueue
1354 * check PF_EXITING
1355 * interrupt exit code running
1356 * __exit_signal
1357 * lock sighand->siglock
1358 * unlock sighand->siglock
1359 * lock sh->siglock
1360 * add(tsk->pending) flush_sigqueue(tsk->pending)
1361 *
1362 */
1363
1364 if (unlikely(p->flags & PF_EXITING)) {
1365 ret = -1;
1366 goto out;
1367 }
e752dd6c 1368
1da177e4
LT
1369 if (unlikely(!list_empty(&q->list))) {
1370 /*
1371 * If an SI_TIMER entry is already queue just increment
1372 * the overrun count.
1373 */
1374 if (q->info.si_code != SI_TIMER)
1375 BUG();
1376 q->info.si_overrun++;
1377 goto out;
e752dd6c 1378 }
1da177e4
LT
1379 /* Short-circuit ignored signals. */
1380 if (sig_ignored(p, sig)) {
1381 ret = 1;
1382 goto out;
1383 }
1384
1da177e4
LT
1385 list_add_tail(&q->list, &p->pending.list);
1386 sigaddset(&p->pending.signal, sig);
1387 if (!sigismember(&p->blocked, sig))
1388 signal_wake_up(p, sig == SIGKILL);
1389
1390out:
e56d0903 1391 spin_unlock_irqrestore(&sh->siglock, flags);
e752dd6c 1392out_err:
e56d0903 1393 rcu_read_unlock();
e752dd6c
ON
1394
1395 return ret;
1da177e4
LT
1396}
1397
1398int
1399send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1400{
1401 unsigned long flags;
1402 int ret = 0;
1403
1404 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e56d0903 1405
1da177e4 1406 read_lock(&tasklist_lock);
e56d0903 1407 /* Since it_lock is held, p->sighand cannot be NULL. */
1da177e4
LT
1408 spin_lock_irqsave(&p->sighand->siglock, flags);
1409 handle_stop_signal(sig, p);
1410
1411 /* Short-circuit ignored signals. */
1412 if (sig_ignored(p, sig)) {
1413 ret = 1;
1414 goto out;
1415 }
1416
1417 if (unlikely(!list_empty(&q->list))) {
1418 /*
1419 * If an SI_TIMER entry is already queue just increment
1420 * the overrun count. Other uses should not try to
1421 * send the signal multiple times.
1422 */
1423 if (q->info.si_code != SI_TIMER)
1424 BUG();
1425 q->info.si_overrun++;
1426 goto out;
1427 }
1428
1429 /*
1430 * Put this signal on the shared-pending queue.
1431 * We always use the shared queue for process-wide signals,
1432 * to avoid several races.
1433 */
1da177e4
LT
1434 list_add_tail(&q->list, &p->signal->shared_pending.list);
1435 sigaddset(&p->signal->shared_pending.signal, sig);
1436
1437 __group_complete_signal(sig, p);
1438out:
1439 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1440 read_unlock(&tasklist_lock);
e56d0903 1441 return ret;
1da177e4
LT
1442}
1443
1444/*
1445 * Wake up any threads in the parent blocked in wait* syscalls.
1446 */
1447static inline void __wake_up_parent(struct task_struct *p,
1448 struct task_struct *parent)
1449{
1450 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1451}
1452
1453/*
1454 * Let a parent know about the death of a child.
1455 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1456 */
1457
1458void do_notify_parent(struct task_struct *tsk, int sig)
1459{
1460 struct siginfo info;
1461 unsigned long flags;
1462 struct sighand_struct *psig;
1463
1464 BUG_ON(sig == -1);
1465
1466 /* do_notify_parent_cldstop should have been called instead. */
1467 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1468
1469 BUG_ON(!tsk->ptrace &&
1470 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1471
1472 info.si_signo = sig;
1473 info.si_errno = 0;
1474 info.si_pid = tsk->pid;
1475 info.si_uid = tsk->uid;
1476
1477 /* FIXME: find out whether or not this is supposed to be c*time. */
1478 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1479 tsk->signal->utime));
1480 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1481 tsk->signal->stime));
1482
1483 info.si_status = tsk->exit_code & 0x7f;
1484 if (tsk->exit_code & 0x80)
1485 info.si_code = CLD_DUMPED;
1486 else if (tsk->exit_code & 0x7f)
1487 info.si_code = CLD_KILLED;
1488 else {
1489 info.si_code = CLD_EXITED;
1490 info.si_status = tsk->exit_code >> 8;
1491 }
1492
1493 psig = tsk->parent->sighand;
1494 spin_lock_irqsave(&psig->siglock, flags);
7ed0175a 1495 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1496 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1497 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1498 /*
1499 * We are exiting and our parent doesn't care. POSIX.1
1500 * defines special semantics for setting SIGCHLD to SIG_IGN
1501 * or setting the SA_NOCLDWAIT flag: we should be reaped
1502 * automatically and not left for our parent's wait4 call.
1503 * Rather than having the parent do it as a magic kind of
1504 * signal handler, we just set this to tell do_exit that we
1505 * can be cleaned up without becoming a zombie. Note that
1506 * we still call __wake_up_parent in this case, because a
1507 * blocked sys_wait4 might now return -ECHILD.
1508 *
1509 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1510 * is implementation-defined: we do (if you don't want
1511 * it, just use SIG_IGN instead).
1512 */
1513 tsk->exit_signal = -1;
1514 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1515 sig = 0;
1516 }
7ed20e1a 1517 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1518 __group_send_sig_info(sig, &info, tsk->parent);
1519 __wake_up_parent(tsk, tsk->parent);
1520 spin_unlock_irqrestore(&psig->siglock, flags);
1521}
1522
bc505a47 1523static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1da177e4
LT
1524{
1525 struct siginfo info;
1526 unsigned long flags;
bc505a47 1527 struct task_struct *parent;
1da177e4
LT
1528 struct sighand_struct *sighand;
1529
bc505a47
ON
1530 if (to_self)
1531 parent = tsk->parent;
1532 else {
1533 tsk = tsk->group_leader;
1534 parent = tsk->real_parent;
1535 }
1536
1da177e4
LT
1537 info.si_signo = SIGCHLD;
1538 info.si_errno = 0;
1539 info.si_pid = tsk->pid;
1540 info.si_uid = tsk->uid;
1541
1542 /* FIXME: find out whether or not this is supposed to be c*time. */
1543 info.si_utime = cputime_to_jiffies(tsk->utime);
1544 info.si_stime = cputime_to_jiffies(tsk->stime);
1545
1546 info.si_code = why;
1547 switch (why) {
1548 case CLD_CONTINUED:
1549 info.si_status = SIGCONT;
1550 break;
1551 case CLD_STOPPED:
1552 info.si_status = tsk->signal->group_exit_code & 0x7f;
1553 break;
1554 case CLD_TRAPPED:
1555 info.si_status = tsk->exit_code & 0x7f;
1556 break;
1557 default:
1558 BUG();
1559 }
1560
1561 sighand = parent->sighand;
1562 spin_lock_irqsave(&sighand->siglock, flags);
1563 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1564 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1565 __group_send_sig_info(SIGCHLD, &info, parent);
1566 /*
1567 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1568 */
1569 __wake_up_parent(tsk, parent);
1570 spin_unlock_irqrestore(&sighand->siglock, flags);
1571}
1572
1573/*
1574 * This must be called with current->sighand->siglock held.
1575 *
1576 * This should be the path for all ptrace stops.
1577 * We always set current->last_siginfo while stopped here.
1578 * That makes it a way to test a stopped process for
1579 * being ptrace-stopped vs being job-control-stopped.
1580 *
1581 * If we actually decide not to stop at all because the tracer is gone,
1582 * we leave nostop_code in current->exit_code.
1583 */
1584static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1585{
1586 /*
1587 * If there is a group stop in progress,
1588 * we must participate in the bookkeeping.
1589 */
1590 if (current->signal->group_stop_count > 0)
1591 --current->signal->group_stop_count;
1592
1593 current->last_siginfo = info;
1594 current->exit_code = exit_code;
1595
1596 /* Let the debugger run. */
1597 set_current_state(TASK_TRACED);
1598 spin_unlock_irq(&current->sighand->siglock);
1599 read_lock(&tasklist_lock);
1600 if (likely(current->ptrace & PT_PTRACED) &&
1601 likely(current->parent != current->real_parent ||
1602 !(current->ptrace & PT_ATTACHED)) &&
1603 (likely(current->parent->signal != current->signal) ||
1604 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
bc505a47 1605 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1da177e4
LT
1606 read_unlock(&tasklist_lock);
1607 schedule();
1608 } else {
1609 /*
1610 * By the time we got the lock, our tracer went away.
1611 * Don't stop here.
1612 */
1613 read_unlock(&tasklist_lock);
1614 set_current_state(TASK_RUNNING);
1615 current->exit_code = nostop_code;
1616 }
1617
1618 /*
1619 * We are back. Now reacquire the siglock before touching
1620 * last_siginfo, so that we are sure to have synchronized with
1621 * any signal-sending on another CPU that wants to examine it.
1622 */
1623 spin_lock_irq(&current->sighand->siglock);
1624 current->last_siginfo = NULL;
1625
1626 /*
1627 * Queued signals ignored us while we were stopped for tracing.
1628 * So check for any that we should take before resuming user mode.
1629 */
1630 recalc_sigpending();
1631}
1632
1633void ptrace_notify(int exit_code)
1634{
1635 siginfo_t info;
1636
1637 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1638
1639 memset(&info, 0, sizeof info);
1640 info.si_signo = SIGTRAP;
1641 info.si_code = exit_code;
1642 info.si_pid = current->pid;
1643 info.si_uid = current->uid;
1644
1645 /* Let the debugger run. */
1646 spin_lock_irq(&current->sighand->siglock);
1647 ptrace_stop(exit_code, 0, &info);
1648 spin_unlock_irq(&current->sighand->siglock);
1649}
1650
1da177e4
LT
1651static void
1652finish_stop(int stop_count)
1653{
bc505a47
ON
1654 int to_self;
1655
1da177e4
LT
1656 /*
1657 * If there are no other threads in the group, or if there is
1658 * a group stop in progress and we are the last to stop,
1659 * report to the parent. When ptraced, every thread reports itself.
1660 */
bc505a47
ON
1661 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1662 to_self = 1;
1663 else if (stop_count == 0)
1664 to_self = 0;
1665 else
1666 goto out;
1da177e4 1667
bc505a47
ON
1668 read_lock(&tasklist_lock);
1669 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1670 read_unlock(&tasklist_lock);
1671
1672out:
1da177e4
LT
1673 schedule();
1674 /*
1675 * Now we don't run again until continued.
1676 */
1677 current->exit_code = 0;
1678}
1679
1680/*
1681 * This performs the stopping for SIGSTOP and other stop signals.
1682 * We have to stop all threads in the thread group.
1683 * Returns nonzero if we've actually stopped and released the siglock.
1684 * Returns zero if we didn't stop and still hold the siglock.
1685 */
1686static int
1687do_signal_stop(int signr)
1688{
1689 struct signal_struct *sig = current->signal;
1690 struct sighand_struct *sighand = current->sighand;
1691 int stop_count = -1;
1692
1693 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1694 return 0;
1695
1696 if (sig->group_stop_count > 0) {
1697 /*
1698 * There is a group stop in progress. We don't need to
1699 * start another one.
1700 */
1701 signr = sig->group_exit_code;
1702 stop_count = --sig->group_stop_count;
1703 current->exit_code = signr;
1704 set_current_state(TASK_STOPPED);
1705 if (stop_count == 0)
1706 sig->flags = SIGNAL_STOP_STOPPED;
1707 spin_unlock_irq(&sighand->siglock);
1708 }
1709 else if (thread_group_empty(current)) {
1710 /*
1711 * Lock must be held through transition to stopped state.
1712 */
1713 current->exit_code = current->signal->group_exit_code = signr;
1714 set_current_state(TASK_STOPPED);
1715 sig->flags = SIGNAL_STOP_STOPPED;
1716 spin_unlock_irq(&sighand->siglock);
1717 }
1718 else {
1719 /*
1720 * There is no group stop already in progress.
1721 * We must initiate one now, but that requires
1722 * dropping siglock to get both the tasklist lock
1723 * and siglock again in the proper order. Note that
1724 * this allows an intervening SIGCONT to be posted.
1725 * We need to check for that and bail out if necessary.
1726 */
1727 struct task_struct *t;
1728
1729 spin_unlock_irq(&sighand->siglock);
1730
1731 /* signals can be posted during this window */
1732
1733 read_lock(&tasklist_lock);
1734 spin_lock_irq(&sighand->siglock);
1735
1736 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1737 /*
1738 * Another stop or continue happened while we
1739 * didn't have the lock. We can just swallow this
1740 * signal now. If we raced with a SIGCONT, that
1741 * should have just cleared it now. If we raced
1742 * with another processor delivering a stop signal,
1743 * then the SIGCONT that wakes us up should clear it.
1744 */
1745 read_unlock(&tasklist_lock);
1746 return 0;
1747 }
1748
1749 if (sig->group_stop_count == 0) {
1750 sig->group_exit_code = signr;
1751 stop_count = 0;
1752 for (t = next_thread(current); t != current;
1753 t = next_thread(t))
1754 /*
1755 * Setting state to TASK_STOPPED for a group
1756 * stop is always done with the siglock held,
1757 * so this check has no races.
1758 */
5acbc5cb
RM
1759 if (!t->exit_state &&
1760 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1da177e4
LT
1761 stop_count++;
1762 signal_wake_up(t, 0);
1763 }
1764 sig->group_stop_count = stop_count;
1765 }
1766 else {
1767 /* A race with another thread while unlocked. */
1768 signr = sig->group_exit_code;
1769 stop_count = --sig->group_stop_count;
1770 }
1771
1772 current->exit_code = signr;
1773 set_current_state(TASK_STOPPED);
1774 if (stop_count == 0)
1775 sig->flags = SIGNAL_STOP_STOPPED;
1776
1777 spin_unlock_irq(&sighand->siglock);
1778 read_unlock(&tasklist_lock);
1779 }
1780
1781 finish_stop(stop_count);
1782 return 1;
1783}
1784
1785/*
1786 * Do appropriate magic when group_stop_count > 0.
1787 * We return nonzero if we stopped, after releasing the siglock.
1788 * We return zero if we still hold the siglock and should look
1789 * for another signal without checking group_stop_count again.
1790 */
858119e1 1791static int handle_group_stop(void)
1da177e4
LT
1792{
1793 int stop_count;
1794
1795 if (current->signal->group_exit_task == current) {
1796 /*
1797 * Group stop is so we can do a core dump,
1798 * We are the initiating thread, so get on with it.
1799 */
1800 current->signal->group_exit_task = NULL;
1801 return 0;
1802 }
1803
1804 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1805 /*
1806 * Group stop is so another thread can do a core dump,
1807 * or else we are racing against a death signal.
1808 * Just punt the stop so we can get the next signal.
1809 */
1810 return 0;
1811
1812 /*
1813 * There is a group stop in progress. We stop
1814 * without any associated signal being in our queue.
1815 */
1816 stop_count = --current->signal->group_stop_count;
1817 if (stop_count == 0)
1818 current->signal->flags = SIGNAL_STOP_STOPPED;
1819 current->exit_code = current->signal->group_exit_code;
1820 set_current_state(TASK_STOPPED);
1821 spin_unlock_irq(&current->sighand->siglock);
1822 finish_stop(stop_count);
1823 return 1;
1824}
1825
1826int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1827 struct pt_regs *regs, void *cookie)
1828{
1829 sigset_t *mask = &current->blocked;
1830 int signr = 0;
1831
fc558a74
RW
1832 try_to_freeze();
1833
1da177e4
LT
1834relock:
1835 spin_lock_irq(&current->sighand->siglock);
1836 for (;;) {
1837 struct k_sigaction *ka;
1838
1839 if (unlikely(current->signal->group_stop_count > 0) &&
1840 handle_group_stop())
1841 goto relock;
1842
1843 signr = dequeue_signal(current, mask, info);
1844
1845 if (!signr)
1846 break; /* will return 0 */
1847
1848 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1849 ptrace_signal_deliver(regs, cookie);
1850
1851 /* Let the debugger run. */
1852 ptrace_stop(signr, signr, info);
1853
30e0fca6 1854 /* We're back. Did the debugger cancel the sig or group_exit? */
1da177e4 1855 signr = current->exit_code;
30e0fca6 1856 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
1857 continue;
1858
1859 current->exit_code = 0;
1860
1861 /* Update the siginfo structure if the signal has
1862 changed. If the debugger wanted something
1863 specific in the siginfo structure then it should
1864 have updated *info via PTRACE_SETSIGINFO. */
1865 if (signr != info->si_signo) {
1866 info->si_signo = signr;
1867 info->si_errno = 0;
1868 info->si_code = SI_USER;
1869 info->si_pid = current->parent->pid;
1870 info->si_uid = current->parent->uid;
1871 }
1872
1873 /* If the (new) signal is now blocked, requeue it. */
1874 if (sigismember(&current->blocked, signr)) {
1875 specific_send_sig_info(signr, info, current);
1876 continue;
1877 }
1878 }
1879
1880 ka = &current->sighand->action[signr-1];
1881 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1882 continue;
1883 if (ka->sa.sa_handler != SIG_DFL) {
1884 /* Run the handler. */
1885 *return_ka = *ka;
1886
1887 if (ka->sa.sa_flags & SA_ONESHOT)
1888 ka->sa.sa_handler = SIG_DFL;
1889
1890 break; /* will return non-zero "signr" value */
1891 }
1892
1893 /*
1894 * Now we are doing the default action for this signal.
1895 */
1896 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1897 continue;
1898
1899 /* Init gets no signals it doesn't want. */
fef23e7f 1900 if (current == child_reaper)
1da177e4
LT
1901 continue;
1902
1903 if (sig_kernel_stop(signr)) {
1904 /*
1905 * The default action is to stop all threads in
1906 * the thread group. The job control signals
1907 * do nothing in an orphaned pgrp, but SIGSTOP
1908 * always works. Note that siglock needs to be
1909 * dropped during the call to is_orphaned_pgrp()
1910 * because of lock ordering with tasklist_lock.
1911 * This allows an intervening SIGCONT to be posted.
1912 * We need to check for that and bail out if necessary.
1913 */
1914 if (signr != SIGSTOP) {
1915 spin_unlock_irq(&current->sighand->siglock);
1916
1917 /* signals can be posted during this window */
1918
1919 if (is_orphaned_pgrp(process_group(current)))
1920 goto relock;
1921
1922 spin_lock_irq(&current->sighand->siglock);
1923 }
1924
1925 if (likely(do_signal_stop(signr))) {
1926 /* It released the siglock. */
1927 goto relock;
1928 }
1929
1930 /*
1931 * We didn't actually stop, due to a race
1932 * with SIGCONT or something like that.
1933 */
1934 continue;
1935 }
1936
1937 spin_unlock_irq(&current->sighand->siglock);
1938
1939 /*
1940 * Anything else is fatal, maybe with a core dump.
1941 */
1942 current->flags |= PF_SIGNALED;
1943 if (sig_kernel_coredump(signr)) {
1944 /*
1945 * If it was able to dump core, this kills all
1946 * other threads in the group and synchronizes with
1947 * their demise. If we lost the race with another
1948 * thread getting here, it set group_exit_code
1949 * first and our do_group_exit call below will use
1950 * that value and ignore the one we pass it.
1951 */
1952 do_coredump((long)signr, signr, regs);
1953 }
1954
1955 /*
1956 * Death signals, no core dump.
1957 */
1958 do_group_exit(signr);
1959 /* NOTREACHED */
1960 }
1961 spin_unlock_irq(&current->sighand->siglock);
1962 return signr;
1963}
1964
1da177e4
LT
1965EXPORT_SYMBOL(recalc_sigpending);
1966EXPORT_SYMBOL_GPL(dequeue_signal);
1967EXPORT_SYMBOL(flush_signals);
1968EXPORT_SYMBOL(force_sig);
1969EXPORT_SYMBOL(kill_pg);
1970EXPORT_SYMBOL(kill_proc);
1971EXPORT_SYMBOL(ptrace_notify);
1972EXPORT_SYMBOL(send_sig);
1973EXPORT_SYMBOL(send_sig_info);
1974EXPORT_SYMBOL(sigprocmask);
1975EXPORT_SYMBOL(block_all_signals);
1976EXPORT_SYMBOL(unblock_all_signals);
1977
1978
1979/*
1980 * System call entry points.
1981 */
1982
1983asmlinkage long sys_restart_syscall(void)
1984{
1985 struct restart_block *restart = &current_thread_info()->restart_block;
1986 return restart->fn(restart);
1987}
1988
1989long do_no_restart_syscall(struct restart_block *param)
1990{
1991 return -EINTR;
1992}
1993
1994/*
1995 * We don't need to get the kernel lock - this is all local to this
1996 * particular thread.. (and that's good, because this is _heavily_
1997 * used by various programs)
1998 */
1999
2000/*
2001 * This is also useful for kernel threads that want to temporarily
2002 * (or permanently) block certain signals.
2003 *
2004 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2005 * interface happily blocks "unblockable" signals like SIGKILL
2006 * and friends.
2007 */
2008int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2009{
2010 int error;
1da177e4
LT
2011
2012 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
2013 if (oldset)
2014 *oldset = current->blocked;
2015
1da177e4
LT
2016 error = 0;
2017 switch (how) {
2018 case SIG_BLOCK:
2019 sigorsets(&current->blocked, &current->blocked, set);
2020 break;
2021 case SIG_UNBLOCK:
2022 signandsets(&current->blocked, &current->blocked, set);
2023 break;
2024 case SIG_SETMASK:
2025 current->blocked = *set;
2026 break;
2027 default:
2028 error = -EINVAL;
2029 }
2030 recalc_sigpending();
2031 spin_unlock_irq(&current->sighand->siglock);
a26fd335 2032
1da177e4
LT
2033 return error;
2034}
2035
2036asmlinkage long
2037sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2038{
2039 int error = -EINVAL;
2040 sigset_t old_set, new_set;
2041
2042 /* XXX: Don't preclude handling different sized sigset_t's. */
2043 if (sigsetsize != sizeof(sigset_t))
2044 goto out;
2045
2046 if (set) {
2047 error = -EFAULT;
2048 if (copy_from_user(&new_set, set, sizeof(*set)))
2049 goto out;
2050 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2051
2052 error = sigprocmask(how, &new_set, &old_set);
2053 if (error)
2054 goto out;
2055 if (oset)
2056 goto set_old;
2057 } else if (oset) {
2058 spin_lock_irq(&current->sighand->siglock);
2059 old_set = current->blocked;
2060 spin_unlock_irq(&current->sighand->siglock);
2061
2062 set_old:
2063 error = -EFAULT;
2064 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2065 goto out;
2066 }
2067 error = 0;
2068out:
2069 return error;
2070}
2071
2072long do_sigpending(void __user *set, unsigned long sigsetsize)
2073{
2074 long error = -EINVAL;
2075 sigset_t pending;
2076
2077 if (sigsetsize > sizeof(sigset_t))
2078 goto out;
2079
2080 spin_lock_irq(&current->sighand->siglock);
2081 sigorsets(&pending, &current->pending.signal,
2082 &current->signal->shared_pending.signal);
2083 spin_unlock_irq(&current->sighand->siglock);
2084
2085 /* Outside the lock because only this thread touches it. */
2086 sigandsets(&pending, &current->blocked, &pending);
2087
2088 error = -EFAULT;
2089 if (!copy_to_user(set, &pending, sigsetsize))
2090 error = 0;
2091
2092out:
2093 return error;
2094}
2095
2096asmlinkage long
2097sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2098{
2099 return do_sigpending(set, sigsetsize);
2100}
2101
2102#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2103
2104int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2105{
2106 int err;
2107
2108 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2109 return -EFAULT;
2110 if (from->si_code < 0)
2111 return __copy_to_user(to, from, sizeof(siginfo_t))
2112 ? -EFAULT : 0;
2113 /*
2114 * If you change siginfo_t structure, please be sure
2115 * this code is fixed accordingly.
2116 * It should never copy any pad contained in the structure
2117 * to avoid security leaks, but must copy the generic
2118 * 3 ints plus the relevant union member.
2119 */
2120 err = __put_user(from->si_signo, &to->si_signo);
2121 err |= __put_user(from->si_errno, &to->si_errno);
2122 err |= __put_user((short)from->si_code, &to->si_code);
2123 switch (from->si_code & __SI_MASK) {
2124 case __SI_KILL:
2125 err |= __put_user(from->si_pid, &to->si_pid);
2126 err |= __put_user(from->si_uid, &to->si_uid);
2127 break;
2128 case __SI_TIMER:
2129 err |= __put_user(from->si_tid, &to->si_tid);
2130 err |= __put_user(from->si_overrun, &to->si_overrun);
2131 err |= __put_user(from->si_ptr, &to->si_ptr);
2132 break;
2133 case __SI_POLL:
2134 err |= __put_user(from->si_band, &to->si_band);
2135 err |= __put_user(from->si_fd, &to->si_fd);
2136 break;
2137 case __SI_FAULT:
2138 err |= __put_user(from->si_addr, &to->si_addr);
2139#ifdef __ARCH_SI_TRAPNO
2140 err |= __put_user(from->si_trapno, &to->si_trapno);
2141#endif
2142 break;
2143 case __SI_CHLD:
2144 err |= __put_user(from->si_pid, &to->si_pid);
2145 err |= __put_user(from->si_uid, &to->si_uid);
2146 err |= __put_user(from->si_status, &to->si_status);
2147 err |= __put_user(from->si_utime, &to->si_utime);
2148 err |= __put_user(from->si_stime, &to->si_stime);
2149 break;
2150 case __SI_RT: /* This is not generated by the kernel as of now. */
2151 case __SI_MESGQ: /* But this is */
2152 err |= __put_user(from->si_pid, &to->si_pid);
2153 err |= __put_user(from->si_uid, &to->si_uid);
2154 err |= __put_user(from->si_ptr, &to->si_ptr);
2155 break;
2156 default: /* this is just in case for now ... */
2157 err |= __put_user(from->si_pid, &to->si_pid);
2158 err |= __put_user(from->si_uid, &to->si_uid);
2159 break;
2160 }
2161 return err;
2162}
2163
2164#endif
2165
2166asmlinkage long
2167sys_rt_sigtimedwait(const sigset_t __user *uthese,
2168 siginfo_t __user *uinfo,
2169 const struct timespec __user *uts,
2170 size_t sigsetsize)
2171{
2172 int ret, sig;
2173 sigset_t these;
2174 struct timespec ts;
2175 siginfo_t info;
2176 long timeout = 0;
2177
2178 /* XXX: Don't preclude handling different sized sigset_t's. */
2179 if (sigsetsize != sizeof(sigset_t))
2180 return -EINVAL;
2181
2182 if (copy_from_user(&these, uthese, sizeof(these)))
2183 return -EFAULT;
2184
2185 /*
2186 * Invert the set of allowed signals to get those we
2187 * want to block.
2188 */
2189 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2190 signotset(&these);
2191
2192 if (uts) {
2193 if (copy_from_user(&ts, uts, sizeof(ts)))
2194 return -EFAULT;
2195 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2196 || ts.tv_sec < 0)
2197 return -EINVAL;
2198 }
2199
2200 spin_lock_irq(&current->sighand->siglock);
2201 sig = dequeue_signal(current, &these, &info);
2202 if (!sig) {
2203 timeout = MAX_SCHEDULE_TIMEOUT;
2204 if (uts)
2205 timeout = (timespec_to_jiffies(&ts)
2206 + (ts.tv_sec || ts.tv_nsec));
2207
2208 if (timeout) {
2209 /* None ready -- temporarily unblock those we're
2210 * interested while we are sleeping in so that we'll
2211 * be awakened when they arrive. */
2212 current->real_blocked = current->blocked;
2213 sigandsets(&current->blocked, &current->blocked, &these);
2214 recalc_sigpending();
2215 spin_unlock_irq(&current->sighand->siglock);
2216
75bcc8c5 2217 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2218
1da177e4
LT
2219 spin_lock_irq(&current->sighand->siglock);
2220 sig = dequeue_signal(current, &these, &info);
2221 current->blocked = current->real_blocked;
2222 siginitset(&current->real_blocked, 0);
2223 recalc_sigpending();
2224 }
2225 }
2226 spin_unlock_irq(&current->sighand->siglock);
2227
2228 if (sig) {
2229 ret = sig;
2230 if (uinfo) {
2231 if (copy_siginfo_to_user(uinfo, &info))
2232 ret = -EFAULT;
2233 }
2234 } else {
2235 ret = -EAGAIN;
2236 if (timeout)
2237 ret = -EINTR;
2238 }
2239
2240 return ret;
2241}
2242
2243asmlinkage long
2244sys_kill(int pid, int sig)
2245{
2246 struct siginfo info;
2247
2248 info.si_signo = sig;
2249 info.si_errno = 0;
2250 info.si_code = SI_USER;
2251 info.si_pid = current->tgid;
2252 info.si_uid = current->uid;
2253
2254 return kill_something_info(sig, &info, pid);
2255}
2256
6dd69f10 2257static int do_tkill(int tgid, int pid, int sig)
1da177e4 2258{
1da177e4 2259 int error;
6dd69f10 2260 struct siginfo info;
1da177e4
LT
2261 struct task_struct *p;
2262
6dd69f10 2263 error = -ESRCH;
1da177e4
LT
2264 info.si_signo = sig;
2265 info.si_errno = 0;
2266 info.si_code = SI_TKILL;
2267 info.si_pid = current->tgid;
2268 info.si_uid = current->uid;
2269
2270 read_lock(&tasklist_lock);
2271 p = find_task_by_pid(pid);
6dd69f10 2272 if (p && (tgid <= 0 || p->tgid == tgid)) {
1da177e4
LT
2273 error = check_kill_permission(sig, &info, p);
2274 /*
2275 * The null signal is a permissions and process existence
2276 * probe. No signal is actually delivered.
2277 */
2278 if (!error && sig && p->sighand) {
2279 spin_lock_irq(&p->sighand->siglock);
2280 handle_stop_signal(sig, p);
2281 error = specific_send_sig_info(sig, &info, p);
2282 spin_unlock_irq(&p->sighand->siglock);
2283 }
2284 }
2285 read_unlock(&tasklist_lock);
6dd69f10 2286
1da177e4
LT
2287 return error;
2288}
2289
6dd69f10
VL
2290/**
2291 * sys_tgkill - send signal to one specific thread
2292 * @tgid: the thread group ID of the thread
2293 * @pid: the PID of the thread
2294 * @sig: signal to be sent
2295 *
2296 * This syscall also checks the tgid and returns -ESRCH even if the PID
2297 * exists but it's not belonging to the target process anymore. This
2298 * method solves the problem of threads exiting and PIDs getting reused.
2299 */
2300asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2301{
2302 /* This is only valid for single tasks */
2303 if (pid <= 0 || tgid <= 0)
2304 return -EINVAL;
2305
2306 return do_tkill(tgid, pid, sig);
2307}
2308
1da177e4
LT
2309/*
2310 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2311 */
2312asmlinkage long
2313sys_tkill(int pid, int sig)
2314{
1da177e4
LT
2315 /* This is only valid for single tasks */
2316 if (pid <= 0)
2317 return -EINVAL;
2318
6dd69f10 2319 return do_tkill(0, pid, sig);
1da177e4
LT
2320}
2321
2322asmlinkage long
2323sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2324{
2325 siginfo_t info;
2326
2327 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2328 return -EFAULT;
2329
2330 /* Not even root can pretend to send signals from the kernel.
2331 Nor can they impersonate a kill(), which adds source info. */
2332 if (info.si_code >= 0)
2333 return -EPERM;
2334 info.si_signo = sig;
2335
2336 /* POSIX.1b doesn't mention process groups. */
2337 return kill_proc_info(sig, &info, pid);
2338}
2339
2340int
9ac95f2f 2341do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4
LT
2342{
2343 struct k_sigaction *k;
71fabd5e 2344 sigset_t mask;
1da177e4 2345
7ed20e1a 2346 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2347 return -EINVAL;
2348
2349 k = &current->sighand->action[sig-1];
2350
2351 spin_lock_irq(&current->sighand->siglock);
2352 if (signal_pending(current)) {
2353 /*
2354 * If there might be a fatal signal pending on multiple
2355 * threads, make sure we take it before changing the action.
2356 */
2357 spin_unlock_irq(&current->sighand->siglock);
2358 return -ERESTARTNOINTR;
2359 }
2360
2361 if (oact)
2362 *oact = *k;
2363
2364 if (act) {
9ac95f2f
ON
2365 sigdelsetmask(&act->sa.sa_mask,
2366 sigmask(SIGKILL) | sigmask(SIGSTOP));
1da177e4
LT
2367 /*
2368 * POSIX 3.3.1.3:
2369 * "Setting a signal action to SIG_IGN for a signal that is
2370 * pending shall cause the pending signal to be discarded,
2371 * whether or not it is blocked."
2372 *
2373 * "Setting a signal action to SIG_DFL for a signal that is
2374 * pending and whose default action is to ignore the signal
2375 * (for example, SIGCHLD), shall cause the pending signal to
2376 * be discarded, whether or not it is blocked"
2377 */
2378 if (act->sa.sa_handler == SIG_IGN ||
2379 (act->sa.sa_handler == SIG_DFL &&
2380 sig_kernel_ignore(sig))) {
2381 /*
2382 * This is a fairly rare case, so we only take the
2383 * tasklist_lock once we're sure we'll need it.
2384 * Now we must do this little unlock and relock
2385 * dance to maintain the lock hierarchy.
2386 */
2387 struct task_struct *t = current;
2388 spin_unlock_irq(&t->sighand->siglock);
2389 read_lock(&tasklist_lock);
2390 spin_lock_irq(&t->sighand->siglock);
2391 *k = *act;
71fabd5e
GA
2392 sigemptyset(&mask);
2393 sigaddset(&mask, sig);
2394 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2395 do {
71fabd5e 2396 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2397 recalc_sigpending_tsk(t);
2398 t = next_thread(t);
2399 } while (t != current);
2400 spin_unlock_irq(&current->sighand->siglock);
2401 read_unlock(&tasklist_lock);
2402 return 0;
2403 }
2404
2405 *k = *act;
1da177e4
LT
2406 }
2407
2408 spin_unlock_irq(&current->sighand->siglock);
2409 return 0;
2410}
2411
2412int
2413do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2414{
2415 stack_t oss;
2416 int error;
2417
2418 if (uoss) {
2419 oss.ss_sp = (void __user *) current->sas_ss_sp;
2420 oss.ss_size = current->sas_ss_size;
2421 oss.ss_flags = sas_ss_flags(sp);
2422 }
2423
2424 if (uss) {
2425 void __user *ss_sp;
2426 size_t ss_size;
2427 int ss_flags;
2428
2429 error = -EFAULT;
2430 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2431 || __get_user(ss_sp, &uss->ss_sp)
2432 || __get_user(ss_flags, &uss->ss_flags)
2433 || __get_user(ss_size, &uss->ss_size))
2434 goto out;
2435
2436 error = -EPERM;
2437 if (on_sig_stack(sp))
2438 goto out;
2439
2440 error = -EINVAL;
2441 /*
2442 *
2443 * Note - this code used to test ss_flags incorrectly
2444 * old code may have been written using ss_flags==0
2445 * to mean ss_flags==SS_ONSTACK (as this was the only
2446 * way that worked) - this fix preserves that older
2447 * mechanism
2448 */
2449 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2450 goto out;
2451
2452 if (ss_flags == SS_DISABLE) {
2453 ss_size = 0;
2454 ss_sp = NULL;
2455 } else {
2456 error = -ENOMEM;
2457 if (ss_size < MINSIGSTKSZ)
2458 goto out;
2459 }
2460
2461 current->sas_ss_sp = (unsigned long) ss_sp;
2462 current->sas_ss_size = ss_size;
2463 }
2464
2465 if (uoss) {
2466 error = -EFAULT;
2467 if (copy_to_user(uoss, &oss, sizeof(oss)))
2468 goto out;
2469 }
2470
2471 error = 0;
2472out:
2473 return error;
2474}
2475
2476#ifdef __ARCH_WANT_SYS_SIGPENDING
2477
2478asmlinkage long
2479sys_sigpending(old_sigset_t __user *set)
2480{
2481 return do_sigpending(set, sizeof(*set));
2482}
2483
2484#endif
2485
2486#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2487/* Some platforms have their own version with special arguments others
2488 support only sys_rt_sigprocmask. */
2489
2490asmlinkage long
2491sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2492{
2493 int error;
2494 old_sigset_t old_set, new_set;
2495
2496 if (set) {
2497 error = -EFAULT;
2498 if (copy_from_user(&new_set, set, sizeof(*set)))
2499 goto out;
2500 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2501
2502 spin_lock_irq(&current->sighand->siglock);
2503 old_set = current->blocked.sig[0];
2504
2505 error = 0;
2506 switch (how) {
2507 default:
2508 error = -EINVAL;
2509 break;
2510 case SIG_BLOCK:
2511 sigaddsetmask(&current->blocked, new_set);
2512 break;
2513 case SIG_UNBLOCK:
2514 sigdelsetmask(&current->blocked, new_set);
2515 break;
2516 case SIG_SETMASK:
2517 current->blocked.sig[0] = new_set;
2518 break;
2519 }
2520
2521 recalc_sigpending();
2522 spin_unlock_irq(&current->sighand->siglock);
2523 if (error)
2524 goto out;
2525 if (oset)
2526 goto set_old;
2527 } else if (oset) {
2528 old_set = current->blocked.sig[0];
2529 set_old:
2530 error = -EFAULT;
2531 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2532 goto out;
2533 }
2534 error = 0;
2535out:
2536 return error;
2537}
2538#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2539
2540#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2541asmlinkage long
2542sys_rt_sigaction(int sig,
2543 const struct sigaction __user *act,
2544 struct sigaction __user *oact,
2545 size_t sigsetsize)
2546{
2547 struct k_sigaction new_sa, old_sa;
2548 int ret = -EINVAL;
2549
2550 /* XXX: Don't preclude handling different sized sigset_t's. */
2551 if (sigsetsize != sizeof(sigset_t))
2552 goto out;
2553
2554 if (act) {
2555 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2556 return -EFAULT;
2557 }
2558
2559 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2560
2561 if (!ret && oact) {
2562 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2563 return -EFAULT;
2564 }
2565out:
2566 return ret;
2567}
2568#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2569
2570#ifdef __ARCH_WANT_SYS_SGETMASK
2571
2572/*
2573 * For backwards compatibility. Functionality superseded by sigprocmask.
2574 */
2575asmlinkage long
2576sys_sgetmask(void)
2577{
2578 /* SMP safe */
2579 return current->blocked.sig[0];
2580}
2581
2582asmlinkage long
2583sys_ssetmask(int newmask)
2584{
2585 int old;
2586
2587 spin_lock_irq(&current->sighand->siglock);
2588 old = current->blocked.sig[0];
2589
2590 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2591 sigmask(SIGSTOP)));
2592 recalc_sigpending();
2593 spin_unlock_irq(&current->sighand->siglock);
2594
2595 return old;
2596}
2597#endif /* __ARCH_WANT_SGETMASK */
2598
2599#ifdef __ARCH_WANT_SYS_SIGNAL
2600/*
2601 * For backwards compatibility. Functionality superseded by sigaction.
2602 */
2603asmlinkage unsigned long
2604sys_signal(int sig, __sighandler_t handler)
2605{
2606 struct k_sigaction new_sa, old_sa;
2607 int ret;
2608
2609 new_sa.sa.sa_handler = handler;
2610 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2611 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2612
2613 ret = do_sigaction(sig, &new_sa, &old_sa);
2614
2615 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2616}
2617#endif /* __ARCH_WANT_SYS_SIGNAL */
2618
2619#ifdef __ARCH_WANT_SYS_PAUSE
2620
2621asmlinkage long
2622sys_pause(void)
2623{
2624 current->state = TASK_INTERRUPTIBLE;
2625 schedule();
2626 return -ERESTARTNOHAND;
2627}
2628
2629#endif
2630
150256d8
DW
2631#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2632asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2633{
2634 sigset_t newset;
2635
2636 /* XXX: Don't preclude handling different sized sigset_t's. */
2637 if (sigsetsize != sizeof(sigset_t))
2638 return -EINVAL;
2639
2640 if (copy_from_user(&newset, unewset, sizeof(newset)))
2641 return -EFAULT;
2642 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2643
2644 spin_lock_irq(&current->sighand->siglock);
2645 current->saved_sigmask = current->blocked;
2646 current->blocked = newset;
2647 recalc_sigpending();
2648 spin_unlock_irq(&current->sighand->siglock);
2649
2650 current->state = TASK_INTERRUPTIBLE;
2651 schedule();
2652 set_thread_flag(TIF_RESTORE_SIGMASK);
2653 return -ERESTARTNOHAND;
2654}
2655#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2656
1da177e4
LT
2657void __init signals_init(void)
2658{
2659 sigqueue_cachep =
2660 kmem_cache_create("sigqueue",
2661 sizeof(struct sigqueue),
2662 __alignof__(struct sigqueue),
2663 SLAB_PANIC, NULL, NULL);
2664}
This page took 0.221387 seconds and 5 git commands to generate.