4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/module.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
35 #include <linux/compat.h>
36 #include <linux/syscalls.h>
37 #include <linux/kprobes.h>
38 #include <linux/user_namespace.h>
40 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
44 #ifndef SET_UNALIGN_CTL
45 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
47 #ifndef GET_UNALIGN_CTL
48 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
51 # define SET_FPEMU_CTL(a,b) (-EINVAL)
54 # define GET_FPEMU_CTL(a,b) (-EINVAL)
57 # define SET_FPEXC_CTL(a,b) (-EINVAL)
60 # define GET_FPEXC_CTL(a,b) (-EINVAL)
63 # define GET_ENDIAN(a,b) (-EINVAL)
66 # define SET_ENDIAN(a,b) (-EINVAL)
70 * this is where the system-wide overflow UID and GID are defined, for
71 * architectures that now have 32-bit UID/GID but didn't in the past
74 int overflowuid
= DEFAULT_OVERFLOWUID
;
75 int overflowgid
= DEFAULT_OVERFLOWGID
;
78 EXPORT_SYMBOL(overflowuid
);
79 EXPORT_SYMBOL(overflowgid
);
83 * the same as above, but for filesystems which can only store a 16-bit
84 * UID and GID. as such, this is needed on all architectures
87 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
88 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
90 EXPORT_SYMBOL(fs_overflowuid
);
91 EXPORT_SYMBOL(fs_overflowgid
);
94 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
99 EXPORT_SYMBOL(cad_pid
);
102 * Notifier list for kernel code which wants to be called
103 * at shutdown. This is used to stop any idling DMA operations
107 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list
);
110 * Notifier chain core routines. The exported routines below
111 * are layered on top of these, with appropriate locking added.
114 static int notifier_chain_register(struct notifier_block
**nl
,
115 struct notifier_block
*n
)
117 while ((*nl
) != NULL
) {
118 if (n
->priority
> (*nl
)->priority
)
123 rcu_assign_pointer(*nl
, n
);
127 static int notifier_chain_unregister(struct notifier_block
**nl
,
128 struct notifier_block
*n
)
130 while ((*nl
) != NULL
) {
132 rcu_assign_pointer(*nl
, n
->next
);
141 * notifier_call_chain - Informs the registered notifiers about an event.
142 * @nl: Pointer to head of the blocking notifier chain
143 * @val: Value passed unmodified to notifier function
144 * @v: Pointer passed unmodified to notifier function
145 * @nr_to_call: Number of notifier functions to be called. Don't care
146 * value of this parameter is -1.
147 * @nr_calls: Records the number of notifications sent. Don't care
148 * value of this field is NULL.
149 * @returns: notifier_call_chain returns the value returned by the
150 * last notifier function called.
153 static int __kprobes
notifier_call_chain(struct notifier_block
**nl
,
154 unsigned long val
, void *v
,
155 int nr_to_call
, int *nr_calls
)
157 int ret
= NOTIFY_DONE
;
158 struct notifier_block
*nb
, *next_nb
;
160 nb
= rcu_dereference(*nl
);
162 while (nb
&& nr_to_call
) {
163 next_nb
= rcu_dereference(nb
->next
);
164 ret
= nb
->notifier_call(nb
, val
, v
);
169 if ((ret
& NOTIFY_STOP_MASK
) == NOTIFY_STOP_MASK
)
178 * Atomic notifier chain routines. Registration and unregistration
179 * use a spinlock, and call_chain is synchronized by RCU (no locks).
183 * atomic_notifier_chain_register - Add notifier to an atomic notifier chain
184 * @nh: Pointer to head of the atomic notifier chain
185 * @n: New entry in notifier chain
187 * Adds a notifier to an atomic notifier chain.
189 * Currently always returns zero.
192 int atomic_notifier_chain_register(struct atomic_notifier_head
*nh
,
193 struct notifier_block
*n
)
198 spin_lock_irqsave(&nh
->lock
, flags
);
199 ret
= notifier_chain_register(&nh
->head
, n
);
200 spin_unlock_irqrestore(&nh
->lock
, flags
);
204 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register
);
207 * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
208 * @nh: Pointer to head of the atomic notifier chain
209 * @n: Entry to remove from notifier chain
211 * Removes a notifier from an atomic notifier chain.
213 * Returns zero on success or %-ENOENT on failure.
215 int atomic_notifier_chain_unregister(struct atomic_notifier_head
*nh
,
216 struct notifier_block
*n
)
221 spin_lock_irqsave(&nh
->lock
, flags
);
222 ret
= notifier_chain_unregister(&nh
->head
, n
);
223 spin_unlock_irqrestore(&nh
->lock
, flags
);
228 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister
);
231 * __atomic_notifier_call_chain - Call functions in an atomic notifier chain
232 * @nh: Pointer to head of the atomic notifier chain
233 * @val: Value passed unmodified to notifier function
234 * @v: Pointer passed unmodified to notifier function
235 * @nr_to_call: See the comment for notifier_call_chain.
236 * @nr_calls: See the comment for notifier_call_chain.
238 * Calls each function in a notifier chain in turn. The functions
239 * run in an atomic context, so they must not block.
240 * This routine uses RCU to synchronize with changes to the chain.
242 * If the return value of the notifier can be and'ed
243 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
244 * will return immediately, with the return value of
245 * the notifier function which halted execution.
246 * Otherwise the return value is the return value
247 * of the last notifier function called.
250 int __kprobes
__atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
251 unsigned long val
, void *v
,
252 int nr_to_call
, int *nr_calls
)
257 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
262 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain
);
264 int __kprobes
atomic_notifier_call_chain(struct atomic_notifier_head
*nh
,
265 unsigned long val
, void *v
)
267 return __atomic_notifier_call_chain(nh
, val
, v
, -1, NULL
);
270 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain
);
272 * Blocking notifier chain routines. All access to the chain is
273 * synchronized by an rwsem.
277 * blocking_notifier_chain_register - Add notifier to a blocking notifier chain
278 * @nh: Pointer to head of the blocking notifier chain
279 * @n: New entry in notifier chain
281 * Adds a notifier to a blocking notifier chain.
282 * Must be called in process context.
284 * Currently always returns zero.
287 int blocking_notifier_chain_register(struct blocking_notifier_head
*nh
,
288 struct notifier_block
*n
)
293 * This code gets used during boot-up, when task switching is
294 * not yet working and interrupts must remain disabled. At
295 * such times we must not call down_write().
297 if (unlikely(system_state
== SYSTEM_BOOTING
))
298 return notifier_chain_register(&nh
->head
, n
);
300 down_write(&nh
->rwsem
);
301 ret
= notifier_chain_register(&nh
->head
, n
);
302 up_write(&nh
->rwsem
);
306 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register
);
309 * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
310 * @nh: Pointer to head of the blocking notifier chain
311 * @n: Entry to remove from notifier chain
313 * Removes a notifier from a blocking notifier chain.
314 * Must be called from process context.
316 * Returns zero on success or %-ENOENT on failure.
318 int blocking_notifier_chain_unregister(struct blocking_notifier_head
*nh
,
319 struct notifier_block
*n
)
324 * This code gets used during boot-up, when task switching is
325 * not yet working and interrupts must remain disabled. At
326 * such times we must not call down_write().
328 if (unlikely(system_state
== SYSTEM_BOOTING
))
329 return notifier_chain_unregister(&nh
->head
, n
);
331 down_write(&nh
->rwsem
);
332 ret
= notifier_chain_unregister(&nh
->head
, n
);
333 up_write(&nh
->rwsem
);
337 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister
);
340 * __blocking_notifier_call_chain - Call functions in a blocking notifier chain
341 * @nh: Pointer to head of the blocking notifier chain
342 * @val: Value passed unmodified to notifier function
343 * @v: Pointer passed unmodified to notifier function
344 * @nr_to_call: See comment for notifier_call_chain.
345 * @nr_calls: See comment for notifier_call_chain.
347 * Calls each function in a notifier chain in turn. The functions
348 * run in a process context, so they are allowed to block.
350 * If the return value of the notifier can be and'ed
351 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
352 * will return immediately, with the return value of
353 * the notifier function which halted execution.
354 * Otherwise the return value is the return value
355 * of the last notifier function called.
358 int __blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
359 unsigned long val
, void *v
,
360 int nr_to_call
, int *nr_calls
)
362 int ret
= NOTIFY_DONE
;
365 * We check the head outside the lock, but if this access is
366 * racy then it does not matter what the result of the test
367 * is, we re-check the list after having taken the lock anyway:
369 if (rcu_dereference(nh
->head
)) {
370 down_read(&nh
->rwsem
);
371 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
,
377 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain
);
379 int blocking_notifier_call_chain(struct blocking_notifier_head
*nh
,
380 unsigned long val
, void *v
)
382 return __blocking_notifier_call_chain(nh
, val
, v
, -1, NULL
);
384 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain
);
387 * Raw notifier chain routines. There is no protection;
388 * the caller must provide it. Use at your own risk!
392 * raw_notifier_chain_register - Add notifier to a raw notifier chain
393 * @nh: Pointer to head of the raw notifier chain
394 * @n: New entry in notifier chain
396 * Adds a notifier to a raw notifier chain.
397 * All locking must be provided by the caller.
399 * Currently always returns zero.
402 int raw_notifier_chain_register(struct raw_notifier_head
*nh
,
403 struct notifier_block
*n
)
405 return notifier_chain_register(&nh
->head
, n
);
408 EXPORT_SYMBOL_GPL(raw_notifier_chain_register
);
411 * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
412 * @nh: Pointer to head of the raw notifier chain
413 * @n: Entry to remove from notifier chain
415 * Removes a notifier from a raw notifier chain.
416 * All locking must be provided by the caller.
418 * Returns zero on success or %-ENOENT on failure.
420 int raw_notifier_chain_unregister(struct raw_notifier_head
*nh
,
421 struct notifier_block
*n
)
423 return notifier_chain_unregister(&nh
->head
, n
);
426 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister
);
429 * __raw_notifier_call_chain - Call functions in a raw notifier chain
430 * @nh: Pointer to head of the raw notifier chain
431 * @val: Value passed unmodified to notifier function
432 * @v: Pointer passed unmodified to notifier function
433 * @nr_to_call: See comment for notifier_call_chain.
434 * @nr_calls: See comment for notifier_call_chain
436 * Calls each function in a notifier chain in turn. The functions
437 * run in an undefined context.
438 * All locking must be provided by the caller.
440 * If the return value of the notifier can be and'ed
441 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
442 * will return immediately, with the return value of
443 * the notifier function which halted execution.
444 * Otherwise the return value is the return value
445 * of the last notifier function called.
448 int __raw_notifier_call_chain(struct raw_notifier_head
*nh
,
449 unsigned long val
, void *v
,
450 int nr_to_call
, int *nr_calls
)
452 return notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
455 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain
);
457 int raw_notifier_call_chain(struct raw_notifier_head
*nh
,
458 unsigned long val
, void *v
)
460 return __raw_notifier_call_chain(nh
, val
, v
, -1, NULL
);
463 EXPORT_SYMBOL_GPL(raw_notifier_call_chain
);
466 * SRCU notifier chain routines. Registration and unregistration
467 * use a mutex, and call_chain is synchronized by SRCU (no locks).
471 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
472 * @nh: Pointer to head of the SRCU notifier chain
473 * @n: New entry in notifier chain
475 * Adds a notifier to an SRCU notifier chain.
476 * Must be called in process context.
478 * Currently always returns zero.
481 int srcu_notifier_chain_register(struct srcu_notifier_head
*nh
,
482 struct notifier_block
*n
)
487 * This code gets used during boot-up, when task switching is
488 * not yet working and interrupts must remain disabled. At
489 * such times we must not call mutex_lock().
491 if (unlikely(system_state
== SYSTEM_BOOTING
))
492 return notifier_chain_register(&nh
->head
, n
);
494 mutex_lock(&nh
->mutex
);
495 ret
= notifier_chain_register(&nh
->head
, n
);
496 mutex_unlock(&nh
->mutex
);
500 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register
);
503 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
504 * @nh: Pointer to head of the SRCU notifier chain
505 * @n: Entry to remove from notifier chain
507 * Removes a notifier from an SRCU notifier chain.
508 * Must be called from process context.
510 * Returns zero on success or %-ENOENT on failure.
512 int srcu_notifier_chain_unregister(struct srcu_notifier_head
*nh
,
513 struct notifier_block
*n
)
518 * This code gets used during boot-up, when task switching is
519 * not yet working and interrupts must remain disabled. At
520 * such times we must not call mutex_lock().
522 if (unlikely(system_state
== SYSTEM_BOOTING
))
523 return notifier_chain_unregister(&nh
->head
, n
);
525 mutex_lock(&nh
->mutex
);
526 ret
= notifier_chain_unregister(&nh
->head
, n
);
527 mutex_unlock(&nh
->mutex
);
528 synchronize_srcu(&nh
->srcu
);
532 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister
);
535 * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
536 * @nh: Pointer to head of the SRCU notifier chain
537 * @val: Value passed unmodified to notifier function
538 * @v: Pointer passed unmodified to notifier function
539 * @nr_to_call: See comment for notifier_call_chain.
540 * @nr_calls: See comment for notifier_call_chain
542 * Calls each function in a notifier chain in turn. The functions
543 * run in a process context, so they are allowed to block.
545 * If the return value of the notifier can be and'ed
546 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
547 * will return immediately, with the return value of
548 * the notifier function which halted execution.
549 * Otherwise the return value is the return value
550 * of the last notifier function called.
553 int __srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
554 unsigned long val
, void *v
,
555 int nr_to_call
, int *nr_calls
)
560 idx
= srcu_read_lock(&nh
->srcu
);
561 ret
= notifier_call_chain(&nh
->head
, val
, v
, nr_to_call
, nr_calls
);
562 srcu_read_unlock(&nh
->srcu
, idx
);
565 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain
);
567 int srcu_notifier_call_chain(struct srcu_notifier_head
*nh
,
568 unsigned long val
, void *v
)
570 return __srcu_notifier_call_chain(nh
, val
, v
, -1, NULL
);
572 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain
);
575 * srcu_init_notifier_head - Initialize an SRCU notifier head
576 * @nh: Pointer to head of the srcu notifier chain
578 * Unlike other sorts of notifier heads, SRCU notifier heads require
579 * dynamic initialization. Be sure to call this routine before
580 * calling any of the other SRCU notifier routines for this head.
582 * If an SRCU notifier head is deallocated, it must first be cleaned
583 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
584 * per-cpu data (used by the SRCU mechanism) will leak.
587 void srcu_init_notifier_head(struct srcu_notifier_head
*nh
)
589 mutex_init(&nh
->mutex
);
590 if (init_srcu_struct(&nh
->srcu
) < 0)
595 EXPORT_SYMBOL_GPL(srcu_init_notifier_head
);
598 * register_reboot_notifier - Register function to be called at reboot time
599 * @nb: Info about notifier function to be called
601 * Registers a function with the list of functions
602 * to be called at reboot time.
604 * Currently always returns zero, as blocking_notifier_chain_register()
605 * always returns zero.
608 int register_reboot_notifier(struct notifier_block
* nb
)
610 return blocking_notifier_chain_register(&reboot_notifier_list
, nb
);
613 EXPORT_SYMBOL(register_reboot_notifier
);
616 * unregister_reboot_notifier - Unregister previously registered reboot notifier
617 * @nb: Hook to be unregistered
619 * Unregisters a previously registered reboot
622 * Returns zero on success, or %-ENOENT on failure.
625 int unregister_reboot_notifier(struct notifier_block
* nb
)
627 return blocking_notifier_chain_unregister(&reboot_notifier_list
, nb
);
630 EXPORT_SYMBOL(unregister_reboot_notifier
);
632 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
636 if (p
->uid
!= current
->euid
&&
637 p
->euid
!= current
->euid
&& !capable(CAP_SYS_NICE
)) {
641 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
645 no_nice
= security_task_setnice(p
, niceval
);
652 set_user_nice(p
, niceval
);
657 asmlinkage
long sys_setpriority(int which
, int who
, int niceval
)
659 struct task_struct
*g
, *p
;
660 struct user_struct
*user
;
664 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
667 /* normalize: avoid signed division (rounding problems) */
674 read_lock(&tasklist_lock
);
678 p
= find_task_by_pid(who
);
682 error
= set_one_prio(p
, niceval
, error
);
686 pgrp
= find_pid(who
);
688 pgrp
= task_pgrp(current
);
689 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
690 error
= set_one_prio(p
, niceval
, error
);
691 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
694 user
= current
->user
;
698 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
699 goto out_unlock
; /* No processes for this user */
703 error
= set_one_prio(p
, niceval
, error
);
704 while_each_thread(g
, p
);
705 if (who
!= current
->uid
)
706 free_uid(user
); /* For find_user() */
710 read_unlock(&tasklist_lock
);
716 * Ugh. To avoid negative return values, "getpriority()" will
717 * not return the normal nice-value, but a negated value that
718 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
719 * to stay compatible.
721 asmlinkage
long sys_getpriority(int which
, int who
)
723 struct task_struct
*g
, *p
;
724 struct user_struct
*user
;
725 long niceval
, retval
= -ESRCH
;
728 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
731 read_lock(&tasklist_lock
);
735 p
= find_task_by_pid(who
);
739 niceval
= 20 - task_nice(p
);
740 if (niceval
> retval
)
746 pgrp
= find_pid(who
);
748 pgrp
= task_pgrp(current
);
749 do_each_pid_task(pgrp
, PIDTYPE_PGID
, p
) {
750 niceval
= 20 - task_nice(p
);
751 if (niceval
> retval
)
753 } while_each_pid_task(pgrp
, PIDTYPE_PGID
, p
);
756 user
= current
->user
;
760 if ((who
!= current
->uid
) && !(user
= find_user(who
)))
761 goto out_unlock
; /* No processes for this user */
765 niceval
= 20 - task_nice(p
);
766 if (niceval
> retval
)
769 while_each_thread(g
, p
);
770 if (who
!= current
->uid
)
771 free_uid(user
); /* for find_user() */
775 read_unlock(&tasklist_lock
);
781 * emergency_restart - reboot the system
783 * Without shutting down any hardware or taking any locks
784 * reboot the system. This is called when we know we are in
785 * trouble so this is our best effort to reboot. This is
786 * safe to call in interrupt context.
788 void emergency_restart(void)
790 machine_emergency_restart();
792 EXPORT_SYMBOL_GPL(emergency_restart
);
794 static void kernel_restart_prepare(char *cmd
)
796 blocking_notifier_call_chain(&reboot_notifier_list
, SYS_RESTART
, cmd
);
797 system_state
= SYSTEM_RESTART
;
802 * kernel_restart - reboot the system
803 * @cmd: pointer to buffer containing command to execute for restart
806 * Shutdown everything and perform a clean reboot.
807 * This is not safe to call in interrupt context.
809 void kernel_restart(char *cmd
)
811 kernel_restart_prepare(cmd
);
813 printk(KERN_EMERG
"Restarting system.\n");
815 printk(KERN_EMERG
"Restarting system with command '%s'.\n", cmd
);
816 machine_restart(cmd
);
818 EXPORT_SYMBOL_GPL(kernel_restart
);
821 * kernel_kexec - reboot the system
823 * Move into place and start executing a preloaded standalone
824 * executable. If nothing was preloaded return an error.
826 static void kernel_kexec(void)
829 struct kimage
*image
;
830 image
= xchg(&kexec_image
, NULL
);
833 kernel_restart_prepare(NULL
);
834 printk(KERN_EMERG
"Starting new kernel\n");
836 machine_kexec(image
);
840 void kernel_shutdown_prepare(enum system_states state
)
842 blocking_notifier_call_chain(&reboot_notifier_list
,
843 (state
== SYSTEM_HALT
)?SYS_HALT
:SYS_POWER_OFF
, NULL
);
844 system_state
= state
;
848 * kernel_halt - halt the system
850 * Shutdown everything and perform a clean system halt.
852 void kernel_halt(void)
854 kernel_shutdown_prepare(SYSTEM_HALT
);
855 printk(KERN_EMERG
"System halted.\n");
859 EXPORT_SYMBOL_GPL(kernel_halt
);
862 * kernel_power_off - power_off the system
864 * Shutdown everything and perform a clean system power_off.
866 void kernel_power_off(void)
868 kernel_shutdown_prepare(SYSTEM_POWER_OFF
);
869 printk(KERN_EMERG
"Power down.\n");
872 EXPORT_SYMBOL_GPL(kernel_power_off
);
874 * Reboot system call: for obvious reasons only root may call it,
875 * and even root needs to set up some magic numbers in the registers
876 * so that some mistake won't make this reboot the whole machine.
877 * You can also set the meaning of the ctrl-alt-del-key here.
879 * reboot doesn't sync: do that yourself before calling this.
881 asmlinkage
long sys_reboot(int magic1
, int magic2
, unsigned int cmd
, void __user
* arg
)
885 /* We only trust the superuser with rebooting the system. */
886 if (!capable(CAP_SYS_BOOT
))
889 /* For safety, we require "magic" arguments. */
890 if (magic1
!= LINUX_REBOOT_MAGIC1
||
891 (magic2
!= LINUX_REBOOT_MAGIC2
&&
892 magic2
!= LINUX_REBOOT_MAGIC2A
&&
893 magic2
!= LINUX_REBOOT_MAGIC2B
&&
894 magic2
!= LINUX_REBOOT_MAGIC2C
))
897 /* Instead of trying to make the power_off code look like
898 * halt when pm_power_off is not set do it the easy way.
900 if ((cmd
== LINUX_REBOOT_CMD_POWER_OFF
) && !pm_power_off
)
901 cmd
= LINUX_REBOOT_CMD_HALT
;
905 case LINUX_REBOOT_CMD_RESTART
:
906 kernel_restart(NULL
);
909 case LINUX_REBOOT_CMD_CAD_ON
:
913 case LINUX_REBOOT_CMD_CAD_OFF
:
917 case LINUX_REBOOT_CMD_HALT
:
923 case LINUX_REBOOT_CMD_POWER_OFF
:
929 case LINUX_REBOOT_CMD_RESTART2
:
930 if (strncpy_from_user(&buffer
[0], arg
, sizeof(buffer
) - 1) < 0) {
934 buffer
[sizeof(buffer
) - 1] = '\0';
936 kernel_restart(buffer
);
939 case LINUX_REBOOT_CMD_KEXEC
:
944 #ifdef CONFIG_SOFTWARE_SUSPEND
945 case LINUX_REBOOT_CMD_SW_SUSPEND
:
947 int ret
= hibernate();
961 static void deferred_cad(struct work_struct
*dummy
)
963 kernel_restart(NULL
);
967 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
968 * As it's called within an interrupt, it may NOT sync: the only choice
969 * is whether to reboot at once, or just ignore the ctrl-alt-del.
971 void ctrl_alt_del(void)
973 static DECLARE_WORK(cad_work
, deferred_cad
);
976 schedule_work(&cad_work
);
978 kill_cad_pid(SIGINT
, 1);
982 * Unprivileged users may change the real gid to the effective gid
983 * or vice versa. (BSD-style)
985 * If you set the real gid at all, or set the effective gid to a value not
986 * equal to the real gid, then the saved gid is set to the new effective gid.
988 * This makes it possible for a setgid program to completely drop its
989 * privileges, which is often a useful assertion to make when you are doing
990 * a security audit over a program.
992 * The general idea is that a program which uses just setregid() will be
993 * 100% compatible with BSD. A program which uses just setgid() will be
994 * 100% compatible with POSIX with saved IDs.
996 * SMP: There are not races, the GIDs are checked only by filesystem
997 * operations (as far as semantic preservation is concerned).
999 asmlinkage
long sys_setregid(gid_t rgid
, gid_t egid
)
1001 int old_rgid
= current
->gid
;
1002 int old_egid
= current
->egid
;
1003 int new_rgid
= old_rgid
;
1004 int new_egid
= old_egid
;
1007 retval
= security_task_setgid(rgid
, egid
, (gid_t
)-1, LSM_SETID_RE
);
1011 if (rgid
!= (gid_t
) -1) {
1012 if ((old_rgid
== rgid
) ||
1013 (current
->egid
==rgid
) ||
1014 capable(CAP_SETGID
))
1019 if (egid
!= (gid_t
) -1) {
1020 if ((old_rgid
== egid
) ||
1021 (current
->egid
== egid
) ||
1022 (current
->sgid
== egid
) ||
1023 capable(CAP_SETGID
))
1028 if (new_egid
!= old_egid
) {
1029 current
->mm
->dumpable
= suid_dumpable
;
1032 if (rgid
!= (gid_t
) -1 ||
1033 (egid
!= (gid_t
) -1 && egid
!= old_rgid
))
1034 current
->sgid
= new_egid
;
1035 current
->fsgid
= new_egid
;
1036 current
->egid
= new_egid
;
1037 current
->gid
= new_rgid
;
1038 key_fsgid_changed(current
);
1039 proc_id_connector(current
, PROC_EVENT_GID
);
1044 * setgid() is implemented like SysV w/ SAVED_IDS
1046 * SMP: Same implicit races as above.
1048 asmlinkage
long sys_setgid(gid_t gid
)
1050 int old_egid
= current
->egid
;
1053 retval
= security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_ID
);
1057 if (capable(CAP_SETGID
)) {
1058 if (old_egid
!= gid
) {
1059 current
->mm
->dumpable
= suid_dumpable
;
1062 current
->gid
= current
->egid
= current
->sgid
= current
->fsgid
= gid
;
1063 } else if ((gid
== current
->gid
) || (gid
== current
->sgid
)) {
1064 if (old_egid
!= gid
) {
1065 current
->mm
->dumpable
= suid_dumpable
;
1068 current
->egid
= current
->fsgid
= gid
;
1073 key_fsgid_changed(current
);
1074 proc_id_connector(current
, PROC_EVENT_GID
);
1078 static int set_user(uid_t new_ruid
, int dumpclear
)
1080 struct user_struct
*new_user
;
1082 new_user
= alloc_uid(current
->nsproxy
->user_ns
, new_ruid
);
1086 if (atomic_read(&new_user
->processes
) >=
1087 current
->signal
->rlim
[RLIMIT_NPROC
].rlim_cur
&&
1088 new_user
!= current
->nsproxy
->user_ns
->root_user
) {
1093 switch_uid(new_user
);
1096 current
->mm
->dumpable
= suid_dumpable
;
1099 current
->uid
= new_ruid
;
1104 * Unprivileged users may change the real uid to the effective uid
1105 * or vice versa. (BSD-style)
1107 * If you set the real uid at all, or set the effective uid to a value not
1108 * equal to the real uid, then the saved uid is set to the new effective uid.
1110 * This makes it possible for a setuid program to completely drop its
1111 * privileges, which is often a useful assertion to make when you are doing
1112 * a security audit over a program.
1114 * The general idea is that a program which uses just setreuid() will be
1115 * 100% compatible with BSD. A program which uses just setuid() will be
1116 * 100% compatible with POSIX with saved IDs.
1118 asmlinkage
long sys_setreuid(uid_t ruid
, uid_t euid
)
1120 int old_ruid
, old_euid
, old_suid
, new_ruid
, new_euid
;
1123 retval
= security_task_setuid(ruid
, euid
, (uid_t
)-1, LSM_SETID_RE
);
1127 new_ruid
= old_ruid
= current
->uid
;
1128 new_euid
= old_euid
= current
->euid
;
1129 old_suid
= current
->suid
;
1131 if (ruid
!= (uid_t
) -1) {
1133 if ((old_ruid
!= ruid
) &&
1134 (current
->euid
!= ruid
) &&
1135 !capable(CAP_SETUID
))
1139 if (euid
!= (uid_t
) -1) {
1141 if ((old_ruid
!= euid
) &&
1142 (current
->euid
!= euid
) &&
1143 (current
->suid
!= euid
) &&
1144 !capable(CAP_SETUID
))
1148 if (new_ruid
!= old_ruid
&& set_user(new_ruid
, new_euid
!= old_euid
) < 0)
1151 if (new_euid
!= old_euid
) {
1152 current
->mm
->dumpable
= suid_dumpable
;
1155 current
->fsuid
= current
->euid
= new_euid
;
1156 if (ruid
!= (uid_t
) -1 ||
1157 (euid
!= (uid_t
) -1 && euid
!= old_ruid
))
1158 current
->suid
= current
->euid
;
1159 current
->fsuid
= current
->euid
;
1161 key_fsuid_changed(current
);
1162 proc_id_connector(current
, PROC_EVENT_UID
);
1164 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RE
);
1170 * setuid() is implemented like SysV with SAVED_IDS
1172 * Note that SAVED_ID's is deficient in that a setuid root program
1173 * like sendmail, for example, cannot set its uid to be a normal
1174 * user and then switch back, because if you're root, setuid() sets
1175 * the saved uid too. If you don't like this, blame the bright people
1176 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
1177 * will allow a root program to temporarily drop privileges and be able to
1178 * regain them by swapping the real and effective uid.
1180 asmlinkage
long sys_setuid(uid_t uid
)
1182 int old_euid
= current
->euid
;
1183 int old_ruid
, old_suid
, new_suid
;
1186 retval
= security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_ID
);
1190 old_ruid
= current
->uid
;
1191 old_suid
= current
->suid
;
1192 new_suid
= old_suid
;
1194 if (capable(CAP_SETUID
)) {
1195 if (uid
!= old_ruid
&& set_user(uid
, old_euid
!= uid
) < 0)
1198 } else if ((uid
!= current
->uid
) && (uid
!= new_suid
))
1201 if (old_euid
!= uid
) {
1202 current
->mm
->dumpable
= suid_dumpable
;
1205 current
->fsuid
= current
->euid
= uid
;
1206 current
->suid
= new_suid
;
1208 key_fsuid_changed(current
);
1209 proc_id_connector(current
, PROC_EVENT_UID
);
1211 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_ID
);
1216 * This function implements a generic ability to update ruid, euid,
1217 * and suid. This allows you to implement the 4.4 compatible seteuid().
1219 asmlinkage
long sys_setresuid(uid_t ruid
, uid_t euid
, uid_t suid
)
1221 int old_ruid
= current
->uid
;
1222 int old_euid
= current
->euid
;
1223 int old_suid
= current
->suid
;
1226 retval
= security_task_setuid(ruid
, euid
, suid
, LSM_SETID_RES
);
1230 if (!capable(CAP_SETUID
)) {
1231 if ((ruid
!= (uid_t
) -1) && (ruid
!= current
->uid
) &&
1232 (ruid
!= current
->euid
) && (ruid
!= current
->suid
))
1234 if ((euid
!= (uid_t
) -1) && (euid
!= current
->uid
) &&
1235 (euid
!= current
->euid
) && (euid
!= current
->suid
))
1237 if ((suid
!= (uid_t
) -1) && (suid
!= current
->uid
) &&
1238 (suid
!= current
->euid
) && (suid
!= current
->suid
))
1241 if (ruid
!= (uid_t
) -1) {
1242 if (ruid
!= current
->uid
&& set_user(ruid
, euid
!= current
->euid
) < 0)
1245 if (euid
!= (uid_t
) -1) {
1246 if (euid
!= current
->euid
) {
1247 current
->mm
->dumpable
= suid_dumpable
;
1250 current
->euid
= euid
;
1252 current
->fsuid
= current
->euid
;
1253 if (suid
!= (uid_t
) -1)
1254 current
->suid
= suid
;
1256 key_fsuid_changed(current
);
1257 proc_id_connector(current
, PROC_EVENT_UID
);
1259 return security_task_post_setuid(old_ruid
, old_euid
, old_suid
, LSM_SETID_RES
);
1262 asmlinkage
long sys_getresuid(uid_t __user
*ruid
, uid_t __user
*euid
, uid_t __user
*suid
)
1266 if (!(retval
= put_user(current
->uid
, ruid
)) &&
1267 !(retval
= put_user(current
->euid
, euid
)))
1268 retval
= put_user(current
->suid
, suid
);
1274 * Same as above, but for rgid, egid, sgid.
1276 asmlinkage
long sys_setresgid(gid_t rgid
, gid_t egid
, gid_t sgid
)
1280 retval
= security_task_setgid(rgid
, egid
, sgid
, LSM_SETID_RES
);
1284 if (!capable(CAP_SETGID
)) {
1285 if ((rgid
!= (gid_t
) -1) && (rgid
!= current
->gid
) &&
1286 (rgid
!= current
->egid
) && (rgid
!= current
->sgid
))
1288 if ((egid
!= (gid_t
) -1) && (egid
!= current
->gid
) &&
1289 (egid
!= current
->egid
) && (egid
!= current
->sgid
))
1291 if ((sgid
!= (gid_t
) -1) && (sgid
!= current
->gid
) &&
1292 (sgid
!= current
->egid
) && (sgid
!= current
->sgid
))
1295 if (egid
!= (gid_t
) -1) {
1296 if (egid
!= current
->egid
) {
1297 current
->mm
->dumpable
= suid_dumpable
;
1300 current
->egid
= egid
;
1302 current
->fsgid
= current
->egid
;
1303 if (rgid
!= (gid_t
) -1)
1304 current
->gid
= rgid
;
1305 if (sgid
!= (gid_t
) -1)
1306 current
->sgid
= sgid
;
1308 key_fsgid_changed(current
);
1309 proc_id_connector(current
, PROC_EVENT_GID
);
1313 asmlinkage
long sys_getresgid(gid_t __user
*rgid
, gid_t __user
*egid
, gid_t __user
*sgid
)
1317 if (!(retval
= put_user(current
->gid
, rgid
)) &&
1318 !(retval
= put_user(current
->egid
, egid
)))
1319 retval
= put_user(current
->sgid
, sgid
);
1326 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1327 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1328 * whatever uid it wants to). It normally shadows "euid", except when
1329 * explicitly set by setfsuid() or for access..
1331 asmlinkage
long sys_setfsuid(uid_t uid
)
1335 old_fsuid
= current
->fsuid
;
1336 if (security_task_setuid(uid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
))
1339 if (uid
== current
->uid
|| uid
== current
->euid
||
1340 uid
== current
->suid
|| uid
== current
->fsuid
||
1341 capable(CAP_SETUID
)) {
1342 if (uid
!= old_fsuid
) {
1343 current
->mm
->dumpable
= suid_dumpable
;
1346 current
->fsuid
= uid
;
1349 key_fsuid_changed(current
);
1350 proc_id_connector(current
, PROC_EVENT_UID
);
1352 security_task_post_setuid(old_fsuid
, (uid_t
)-1, (uid_t
)-1, LSM_SETID_FS
);
1358 * Samma på svenska..
1360 asmlinkage
long sys_setfsgid(gid_t gid
)
1364 old_fsgid
= current
->fsgid
;
1365 if (security_task_setgid(gid
, (gid_t
)-1, (gid_t
)-1, LSM_SETID_FS
))
1368 if (gid
== current
->gid
|| gid
== current
->egid
||
1369 gid
== current
->sgid
|| gid
== current
->fsgid
||
1370 capable(CAP_SETGID
)) {
1371 if (gid
!= old_fsgid
) {
1372 current
->mm
->dumpable
= suid_dumpable
;
1375 current
->fsgid
= gid
;
1376 key_fsgid_changed(current
);
1377 proc_id_connector(current
, PROC_EVENT_GID
);
1382 asmlinkage
long sys_times(struct tms __user
* tbuf
)
1385 * In the SMP world we might just be unlucky and have one of
1386 * the times increment as we use it. Since the value is an
1387 * atomically safe type this is just fine. Conceptually its
1388 * as if the syscall took an instant longer to occur.
1392 struct task_struct
*tsk
= current
;
1393 struct task_struct
*t
;
1394 cputime_t utime
, stime
, cutime
, cstime
;
1396 spin_lock_irq(&tsk
->sighand
->siglock
);
1397 utime
= tsk
->signal
->utime
;
1398 stime
= tsk
->signal
->stime
;
1401 utime
= cputime_add(utime
, t
->utime
);
1402 stime
= cputime_add(stime
, t
->stime
);
1406 cutime
= tsk
->signal
->cutime
;
1407 cstime
= tsk
->signal
->cstime
;
1408 spin_unlock_irq(&tsk
->sighand
->siglock
);
1410 tmp
.tms_utime
= cputime_to_clock_t(utime
);
1411 tmp
.tms_stime
= cputime_to_clock_t(stime
);
1412 tmp
.tms_cutime
= cputime_to_clock_t(cutime
);
1413 tmp
.tms_cstime
= cputime_to_clock_t(cstime
);
1414 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
1417 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1421 * This needs some heavy checking ...
1422 * I just haven't the stomach for it. I also don't fully
1423 * understand sessions/pgrp etc. Let somebody who does explain it.
1425 * OK, I think I have the protection semantics right.... this is really
1426 * only important on a multi-user system anyway, to make sure one user
1427 * can't send a signal to a process owned by another. -TYT, 12/12/91
1429 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1433 asmlinkage
long sys_setpgid(pid_t pid
, pid_t pgid
)
1435 struct task_struct
*p
;
1436 struct task_struct
*group_leader
= current
->group_leader
;
1440 pid
= group_leader
->pid
;
1446 /* From this point forward we keep holding onto the tasklist lock
1447 * so that our parent does not change from under us. -DaveM
1449 write_lock_irq(&tasklist_lock
);
1452 p
= find_task_by_pid(pid
);
1457 if (!thread_group_leader(p
))
1460 if (p
->real_parent
== group_leader
) {
1462 if (task_session(p
) != task_session(group_leader
))
1469 if (p
!= group_leader
)
1474 if (p
->signal
->leader
)
1478 struct task_struct
*g
=
1479 find_task_by_pid_type(PIDTYPE_PGID
, pgid
);
1481 if (!g
|| task_session(g
) != task_session(group_leader
))
1485 err
= security_task_setpgid(p
, pgid
);
1489 if (process_group(p
) != pgid
) {
1490 detach_pid(p
, PIDTYPE_PGID
);
1491 p
->signal
->pgrp
= pgid
;
1492 attach_pid(p
, PIDTYPE_PGID
, find_pid(pgid
));
1497 /* All paths lead to here, thus we are safe. -DaveM */
1498 write_unlock_irq(&tasklist_lock
);
1502 asmlinkage
long sys_getpgid(pid_t pid
)
1505 return process_group(current
);
1508 struct task_struct
*p
;
1510 read_lock(&tasklist_lock
);
1511 p
= find_task_by_pid(pid
);
1515 retval
= security_task_getpgid(p
);
1517 retval
= process_group(p
);
1519 read_unlock(&tasklist_lock
);
1524 #ifdef __ARCH_WANT_SYS_GETPGRP
1526 asmlinkage
long sys_getpgrp(void)
1528 /* SMP - assuming writes are word atomic this is fine */
1529 return process_group(current
);
1534 asmlinkage
long sys_getsid(pid_t pid
)
1537 return process_session(current
);
1540 struct task_struct
*p
;
1542 read_lock(&tasklist_lock
);
1543 p
= find_task_by_pid(pid
);
1547 retval
= security_task_getsid(p
);
1549 retval
= process_session(p
);
1551 read_unlock(&tasklist_lock
);
1556 asmlinkage
long sys_setsid(void)
1558 struct task_struct
*group_leader
= current
->group_leader
;
1562 write_lock_irq(&tasklist_lock
);
1564 /* Fail if I am already a session leader */
1565 if (group_leader
->signal
->leader
)
1568 session
= group_leader
->pid
;
1569 /* Fail if a process group id already exists that equals the
1570 * proposed session id.
1572 * Don't check if session id == 1 because kernel threads use this
1573 * session id and so the check will always fail and make it so
1574 * init cannot successfully call setsid.
1576 if (session
> 1 && find_task_by_pid_type(PIDTYPE_PGID
, session
))
1579 group_leader
->signal
->leader
= 1;
1580 __set_special_pids(session
, session
);
1582 spin_lock(&group_leader
->sighand
->siglock
);
1583 group_leader
->signal
->tty
= NULL
;
1584 spin_unlock(&group_leader
->sighand
->siglock
);
1586 err
= process_group(group_leader
);
1588 write_unlock_irq(&tasklist_lock
);
1593 * Supplementary group IDs
1596 /* init to 2 - one for init_task, one to ensure it is never freed */
1597 struct group_info init_groups
= { .usage
= ATOMIC_INIT(2) };
1599 struct group_info
*groups_alloc(int gidsetsize
)
1601 struct group_info
*group_info
;
1605 nblocks
= (gidsetsize
+ NGROUPS_PER_BLOCK
- 1) / NGROUPS_PER_BLOCK
;
1606 /* Make sure we always allocate at least one indirect block pointer */
1607 nblocks
= nblocks
? : 1;
1608 group_info
= kmalloc(sizeof(*group_info
) + nblocks
*sizeof(gid_t
*), GFP_USER
);
1611 group_info
->ngroups
= gidsetsize
;
1612 group_info
->nblocks
= nblocks
;
1613 atomic_set(&group_info
->usage
, 1);
1615 if (gidsetsize
<= NGROUPS_SMALL
)
1616 group_info
->blocks
[0] = group_info
->small_block
;
1618 for (i
= 0; i
< nblocks
; i
++) {
1620 b
= (void *)__get_free_page(GFP_USER
);
1622 goto out_undo_partial_alloc
;
1623 group_info
->blocks
[i
] = b
;
1628 out_undo_partial_alloc
:
1630 free_page((unsigned long)group_info
->blocks
[i
]);
1636 EXPORT_SYMBOL(groups_alloc
);
1638 void groups_free(struct group_info
*group_info
)
1640 if (group_info
->blocks
[0] != group_info
->small_block
) {
1642 for (i
= 0; i
< group_info
->nblocks
; i
++)
1643 free_page((unsigned long)group_info
->blocks
[i
]);
1648 EXPORT_SYMBOL(groups_free
);
1650 /* export the group_info to a user-space array */
1651 static int groups_to_user(gid_t __user
*grouplist
,
1652 struct group_info
*group_info
)
1655 int count
= group_info
->ngroups
;
1657 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1658 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1659 int off
= i
* NGROUPS_PER_BLOCK
;
1660 int len
= cp_count
* sizeof(*grouplist
);
1662 if (copy_to_user(grouplist
+off
, group_info
->blocks
[i
], len
))
1670 /* fill a group_info from a user-space array - it must be allocated already */
1671 static int groups_from_user(struct group_info
*group_info
,
1672 gid_t __user
*grouplist
)
1675 int count
= group_info
->ngroups
;
1677 for (i
= 0; i
< group_info
->nblocks
; i
++) {
1678 int cp_count
= min(NGROUPS_PER_BLOCK
, count
);
1679 int off
= i
* NGROUPS_PER_BLOCK
;
1680 int len
= cp_count
* sizeof(*grouplist
);
1682 if (copy_from_user(group_info
->blocks
[i
], grouplist
+off
, len
))
1690 /* a simple Shell sort */
1691 static void groups_sort(struct group_info
*group_info
)
1693 int base
, max
, stride
;
1694 int gidsetsize
= group_info
->ngroups
;
1696 for (stride
= 1; stride
< gidsetsize
; stride
= 3 * stride
+ 1)
1701 max
= gidsetsize
- stride
;
1702 for (base
= 0; base
< max
; base
++) {
1704 int right
= left
+ stride
;
1705 gid_t tmp
= GROUP_AT(group_info
, right
);
1707 while (left
>= 0 && GROUP_AT(group_info
, left
) > tmp
) {
1708 GROUP_AT(group_info
, right
) =
1709 GROUP_AT(group_info
, left
);
1713 GROUP_AT(group_info
, right
) = tmp
;
1719 /* a simple bsearch */
1720 int groups_search(struct group_info
*group_info
, gid_t grp
)
1722 unsigned int left
, right
;
1728 right
= group_info
->ngroups
;
1729 while (left
< right
) {
1730 unsigned int mid
= (left
+right
)/2;
1731 int cmp
= grp
- GROUP_AT(group_info
, mid
);
1742 /* validate and set current->group_info */
1743 int set_current_groups(struct group_info
*group_info
)
1746 struct group_info
*old_info
;
1748 retval
= security_task_setgroups(group_info
);
1752 groups_sort(group_info
);
1753 get_group_info(group_info
);
1756 old_info
= current
->group_info
;
1757 current
->group_info
= group_info
;
1758 task_unlock(current
);
1760 put_group_info(old_info
);
1765 EXPORT_SYMBOL(set_current_groups
);
1767 asmlinkage
long sys_getgroups(int gidsetsize
, gid_t __user
*grouplist
)
1772 * SMP: Nobody else can change our grouplist. Thus we are
1779 /* no need to grab task_lock here; it cannot change */
1780 i
= current
->group_info
->ngroups
;
1782 if (i
> gidsetsize
) {
1786 if (groups_to_user(grouplist
, current
->group_info
)) {
1796 * SMP: Our groups are copy-on-write. We can set them safely
1797 * without another task interfering.
1800 asmlinkage
long sys_setgroups(int gidsetsize
, gid_t __user
*grouplist
)
1802 struct group_info
*group_info
;
1805 if (!capable(CAP_SETGID
))
1807 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
1810 group_info
= groups_alloc(gidsetsize
);
1813 retval
= groups_from_user(group_info
, grouplist
);
1815 put_group_info(group_info
);
1819 retval
= set_current_groups(group_info
);
1820 put_group_info(group_info
);
1826 * Check whether we're fsgid/egid or in the supplemental group..
1828 int in_group_p(gid_t grp
)
1831 if (grp
!= current
->fsgid
)
1832 retval
= groups_search(current
->group_info
, grp
);
1836 EXPORT_SYMBOL(in_group_p
);
1838 int in_egroup_p(gid_t grp
)
1841 if (grp
!= current
->egid
)
1842 retval
= groups_search(current
->group_info
, grp
);
1846 EXPORT_SYMBOL(in_egroup_p
);
1848 DECLARE_RWSEM(uts_sem
);
1850 EXPORT_SYMBOL(uts_sem
);
1852 asmlinkage
long sys_newuname(struct new_utsname __user
* name
)
1856 down_read(&uts_sem
);
1857 if (copy_to_user(name
, utsname(), sizeof *name
))
1863 asmlinkage
long sys_sethostname(char __user
*name
, int len
)
1866 char tmp
[__NEW_UTS_LEN
];
1868 if (!capable(CAP_SYS_ADMIN
))
1870 if (len
< 0 || len
> __NEW_UTS_LEN
)
1872 down_write(&uts_sem
);
1874 if (!copy_from_user(tmp
, name
, len
)) {
1875 memcpy(utsname()->nodename
, tmp
, len
);
1876 utsname()->nodename
[len
] = 0;
1883 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1885 asmlinkage
long sys_gethostname(char __user
*name
, int len
)
1891 down_read(&uts_sem
);
1892 i
= 1 + strlen(utsname()->nodename
);
1896 if (copy_to_user(name
, utsname()->nodename
, i
))
1905 * Only setdomainname; getdomainname can be implemented by calling
1908 asmlinkage
long sys_setdomainname(char __user
*name
, int len
)
1911 char tmp
[__NEW_UTS_LEN
];
1913 if (!capable(CAP_SYS_ADMIN
))
1915 if (len
< 0 || len
> __NEW_UTS_LEN
)
1918 down_write(&uts_sem
);
1920 if (!copy_from_user(tmp
, name
, len
)) {
1921 memcpy(utsname()->domainname
, tmp
, len
);
1922 utsname()->domainname
[len
] = 0;
1929 asmlinkage
long sys_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1931 if (resource
>= RLIM_NLIMITS
)
1934 struct rlimit value
;
1935 task_lock(current
->group_leader
);
1936 value
= current
->signal
->rlim
[resource
];
1937 task_unlock(current
->group_leader
);
1938 return copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1942 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1945 * Back compatibility for getrlimit. Needed for some apps.
1948 asmlinkage
long sys_old_getrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1951 if (resource
>= RLIM_NLIMITS
)
1954 task_lock(current
->group_leader
);
1955 x
= current
->signal
->rlim
[resource
];
1956 task_unlock(current
->group_leader
);
1957 if (x
.rlim_cur
> 0x7FFFFFFF)
1958 x
.rlim_cur
= 0x7FFFFFFF;
1959 if (x
.rlim_max
> 0x7FFFFFFF)
1960 x
.rlim_max
= 0x7FFFFFFF;
1961 return copy_to_user(rlim
, &x
, sizeof(x
))?-EFAULT
:0;
1966 asmlinkage
long sys_setrlimit(unsigned int resource
, struct rlimit __user
*rlim
)
1968 struct rlimit new_rlim
, *old_rlim
;
1969 unsigned long it_prof_secs
;
1972 if (resource
>= RLIM_NLIMITS
)
1974 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1976 if (new_rlim
.rlim_cur
> new_rlim
.rlim_max
)
1978 old_rlim
= current
->signal
->rlim
+ resource
;
1979 if ((new_rlim
.rlim_max
> old_rlim
->rlim_max
) &&
1980 !capable(CAP_SYS_RESOURCE
))
1982 if (resource
== RLIMIT_NOFILE
&& new_rlim
.rlim_max
> NR_OPEN
)
1985 retval
= security_task_setrlimit(resource
, &new_rlim
);
1989 if (resource
== RLIMIT_CPU
&& new_rlim
.rlim_cur
== 0) {
1991 * The caller is asking for an immediate RLIMIT_CPU
1992 * expiry. But we use the zero value to mean "it was
1993 * never set". So let's cheat and make it one second
1996 new_rlim
.rlim_cur
= 1;
1999 task_lock(current
->group_leader
);
2000 *old_rlim
= new_rlim
;
2001 task_unlock(current
->group_leader
);
2003 if (resource
!= RLIMIT_CPU
)
2007 * RLIMIT_CPU handling. Note that the kernel fails to return an error
2008 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
2009 * very long-standing error, and fixing it now risks breakage of
2010 * applications, so we live with it
2012 if (new_rlim
.rlim_cur
== RLIM_INFINITY
)
2015 it_prof_secs
= cputime_to_secs(current
->signal
->it_prof_expires
);
2016 if (it_prof_secs
== 0 || new_rlim
.rlim_cur
<= it_prof_secs
) {
2017 unsigned long rlim_cur
= new_rlim
.rlim_cur
;
2020 cputime
= secs_to_cputime(rlim_cur
);
2021 read_lock(&tasklist_lock
);
2022 spin_lock_irq(¤t
->sighand
->siglock
);
2023 set_process_cpu_timer(current
, CPUCLOCK_PROF
, &cputime
, NULL
);
2024 spin_unlock_irq(¤t
->sighand
->siglock
);
2025 read_unlock(&tasklist_lock
);
2032 * It would make sense to put struct rusage in the task_struct,
2033 * except that would make the task_struct be *really big*. After
2034 * task_struct gets moved into malloc'ed memory, it would
2035 * make sense to do this. It will make moving the rest of the information
2036 * a lot simpler! (Which we're not doing right now because we're not
2037 * measuring them yet).
2039 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
2040 * races with threads incrementing their own counters. But since word
2041 * reads are atomic, we either get new values or old values and we don't
2042 * care which for the sums. We always take the siglock to protect reading
2043 * the c* fields from p->signal from races with exit.c updating those
2044 * fields when reaping, so a sample either gets all the additions of a
2045 * given child after it's reaped, or none so this sample is before reaping.
2048 * We need to take the siglock for CHILDEREN, SELF and BOTH
2049 * for the cases current multithreaded, non-current single threaded
2050 * non-current multithreaded. Thread traversal is now safe with
2052 * Strictly speaking, we donot need to take the siglock if we are current and
2053 * single threaded, as no one else can take our signal_struct away, no one
2054 * else can reap the children to update signal->c* counters, and no one else
2055 * can race with the signal-> fields. If we do not take any lock, the
2056 * signal-> fields could be read out of order while another thread was just
2057 * exiting. So we should place a read memory barrier when we avoid the lock.
2058 * On the writer side, write memory barrier is implied in __exit_signal
2059 * as __exit_signal releases the siglock spinlock after updating the signal->
2060 * fields. But we don't do this yet to keep things simple.
2064 static void k_getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
2066 struct task_struct
*t
;
2067 unsigned long flags
;
2068 cputime_t utime
, stime
;
2070 memset((char *) r
, 0, sizeof *r
);
2071 utime
= stime
= cputime_zero
;
2074 if (!lock_task_sighand(p
, &flags
)) {
2081 case RUSAGE_CHILDREN
:
2082 utime
= p
->signal
->cutime
;
2083 stime
= p
->signal
->cstime
;
2084 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
2085 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
2086 r
->ru_minflt
= p
->signal
->cmin_flt
;
2087 r
->ru_majflt
= p
->signal
->cmaj_flt
;
2088 r
->ru_inblock
= p
->signal
->cinblock
;
2089 r
->ru_oublock
= p
->signal
->coublock
;
2091 if (who
== RUSAGE_CHILDREN
)
2095 utime
= cputime_add(utime
, p
->signal
->utime
);
2096 stime
= cputime_add(stime
, p
->signal
->stime
);
2097 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
2098 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
2099 r
->ru_minflt
+= p
->signal
->min_flt
;
2100 r
->ru_majflt
+= p
->signal
->maj_flt
;
2101 r
->ru_inblock
+= p
->signal
->inblock
;
2102 r
->ru_oublock
+= p
->signal
->oublock
;
2105 utime
= cputime_add(utime
, t
->utime
);
2106 stime
= cputime_add(stime
, t
->stime
);
2107 r
->ru_nvcsw
+= t
->nvcsw
;
2108 r
->ru_nivcsw
+= t
->nivcsw
;
2109 r
->ru_minflt
+= t
->min_flt
;
2110 r
->ru_majflt
+= t
->maj_flt
;
2111 r
->ru_inblock
+= task_io_get_inblock(t
);
2112 r
->ru_oublock
+= task_io_get_oublock(t
);
2121 unlock_task_sighand(p
, &flags
);
2124 cputime_to_timeval(utime
, &r
->ru_utime
);
2125 cputime_to_timeval(stime
, &r
->ru_stime
);
2128 int getrusage(struct task_struct
*p
, int who
, struct rusage __user
*ru
)
2131 k_getrusage(p
, who
, &r
);
2132 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
2135 asmlinkage
long sys_getrusage(int who
, struct rusage __user
*ru
)
2137 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
)
2139 return getrusage(current
, who
, ru
);
2142 asmlinkage
long sys_umask(int mask
)
2144 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
2148 asmlinkage
long sys_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
2149 unsigned long arg4
, unsigned long arg5
)
2153 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
2158 case PR_SET_PDEATHSIG
:
2159 if (!valid_signal(arg2
)) {
2163 current
->pdeath_signal
= arg2
;
2165 case PR_GET_PDEATHSIG
:
2166 error
= put_user(current
->pdeath_signal
, (int __user
*)arg2
);
2168 case PR_GET_DUMPABLE
:
2169 error
= current
->mm
->dumpable
;
2171 case PR_SET_DUMPABLE
:
2172 if (arg2
< 0 || arg2
> 1) {
2176 current
->mm
->dumpable
= arg2
;
2179 case PR_SET_UNALIGN
:
2180 error
= SET_UNALIGN_CTL(current
, arg2
);
2182 case PR_GET_UNALIGN
:
2183 error
= GET_UNALIGN_CTL(current
, arg2
);
2186 error
= SET_FPEMU_CTL(current
, arg2
);
2189 error
= GET_FPEMU_CTL(current
, arg2
);
2192 error
= SET_FPEXC_CTL(current
, arg2
);
2195 error
= GET_FPEXC_CTL(current
, arg2
);
2198 error
= PR_TIMING_STATISTICAL
;
2201 if (arg2
== PR_TIMING_STATISTICAL
)
2207 case PR_GET_KEEPCAPS
:
2208 if (current
->keep_capabilities
)
2211 case PR_SET_KEEPCAPS
:
2212 if (arg2
!= 0 && arg2
!= 1) {
2216 current
->keep_capabilities
= arg2
;
2219 struct task_struct
*me
= current
;
2220 unsigned char ncomm
[sizeof(me
->comm
)];
2222 ncomm
[sizeof(me
->comm
)-1] = 0;
2223 if (strncpy_from_user(ncomm
, (char __user
*)arg2
,
2224 sizeof(me
->comm
)-1) < 0)
2226 set_task_comm(me
, ncomm
);
2230 struct task_struct
*me
= current
;
2231 unsigned char tcomm
[sizeof(me
->comm
)];
2233 get_task_comm(tcomm
, me
);
2234 if (copy_to_user((char __user
*)arg2
, tcomm
, sizeof(tcomm
)))
2239 error
= GET_ENDIAN(current
, arg2
);
2242 error
= SET_ENDIAN(current
, arg2
);
2252 asmlinkage
long sys_getcpu(unsigned __user
*cpup
, unsigned __user
*nodep
,
2253 struct getcpu_cache __user
*cache
)
2256 int cpu
= raw_smp_processor_id();
2258 err
|= put_user(cpu
, cpup
);
2260 err
|= put_user(cpu_to_node(cpu
), nodep
);
2263 * The cache is not needed for this implementation,
2264 * but make sure user programs pass something
2265 * valid. vsyscall implementations can instead make
2266 * good use of the cache. Only use t0 and t1 because
2267 * these are available in both 32bit and 64bit ABI (no
2268 * need for a compat_getcpu). 32bit has enough
2271 unsigned long t0
, t1
;
2272 get_user(t0
, &cache
->blob
[0]);
2273 get_user(t1
, &cache
->blob
[1]);
2276 put_user(t0
, &cache
->blob
[0]);
2277 put_user(t1
, &cache
->blob
[1]);
2279 return err
? -EFAULT
: 0;