Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / kernel / sys.c
1 /*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
14 #include <linux/fs.h>
15 #include <linux/perf_event.h>
16 #include <linux/resource.h>
17 #include <linux/kernel.h>
18 #include <linux/kexec.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/seccomp.h>
34 #include <linux/cpu.h>
35 #include <linux/personality.h>
36 #include <linux/ptrace.h>
37 #include <linux/fs_struct.h>
38 #include <linux/gfp.h>
39 #include <linux/syscore_ops.h>
40
41 #include <linux/compat.h>
42 #include <linux/syscalls.h>
43 #include <linux/kprobes.h>
44 #include <linux/user_namespace.h>
45
46 #include <linux/kmsg_dump.h>
47
48 #include <asm/uaccess.h>
49 #include <asm/io.h>
50 #include <asm/unistd.h>
51
52 #ifndef SET_UNALIGN_CTL
53 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
54 #endif
55 #ifndef GET_UNALIGN_CTL
56 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
57 #endif
58 #ifndef SET_FPEMU_CTL
59 # define SET_FPEMU_CTL(a,b) (-EINVAL)
60 #endif
61 #ifndef GET_FPEMU_CTL
62 # define GET_FPEMU_CTL(a,b) (-EINVAL)
63 #endif
64 #ifndef SET_FPEXC_CTL
65 # define SET_FPEXC_CTL(a,b) (-EINVAL)
66 #endif
67 #ifndef GET_FPEXC_CTL
68 # define GET_FPEXC_CTL(a,b) (-EINVAL)
69 #endif
70 #ifndef GET_ENDIAN
71 # define GET_ENDIAN(a,b) (-EINVAL)
72 #endif
73 #ifndef SET_ENDIAN
74 # define SET_ENDIAN(a,b) (-EINVAL)
75 #endif
76 #ifndef GET_TSC_CTL
77 # define GET_TSC_CTL(a) (-EINVAL)
78 #endif
79 #ifndef SET_TSC_CTL
80 # define SET_TSC_CTL(a) (-EINVAL)
81 #endif
82
83 /*
84 * this is where the system-wide overflow UID and GID are defined, for
85 * architectures that now have 32-bit UID/GID but didn't in the past
86 */
87
88 int overflowuid = DEFAULT_OVERFLOWUID;
89 int overflowgid = DEFAULT_OVERFLOWGID;
90
91 #ifdef CONFIG_UID16
92 EXPORT_SYMBOL(overflowuid);
93 EXPORT_SYMBOL(overflowgid);
94 #endif
95
96 /*
97 * the same as above, but for filesystems which can only store a 16-bit
98 * UID and GID. as such, this is needed on all architectures
99 */
100
101 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
102 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
103
104 EXPORT_SYMBOL(fs_overflowuid);
105 EXPORT_SYMBOL(fs_overflowgid);
106
107 /*
108 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
109 */
110
111 int C_A_D = 1;
112 struct pid *cad_pid;
113 EXPORT_SYMBOL(cad_pid);
114
115 /*
116 * If set, this is used for preparing the system to power off.
117 */
118
119 void (*pm_power_off_prepare)(void);
120
121 /*
122 * Returns true if current's euid is same as p's uid or euid,
123 * or has CAP_SYS_NICE to p's user_ns.
124 *
125 * Called with rcu_read_lock, creds are safe
126 */
127 static bool set_one_prio_perm(struct task_struct *p)
128 {
129 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
130
131 if (pcred->user->user_ns == cred->user->user_ns &&
132 (pcred->uid == cred->euid ||
133 pcred->euid == cred->euid))
134 return true;
135 if (ns_capable(pcred->user->user_ns, CAP_SYS_NICE))
136 return true;
137 return false;
138 }
139
140 /*
141 * set the priority of a task
142 * - the caller must hold the RCU read lock
143 */
144 static int set_one_prio(struct task_struct *p, int niceval, int error)
145 {
146 int no_nice;
147
148 if (!set_one_prio_perm(p)) {
149 error = -EPERM;
150 goto out;
151 }
152 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
153 error = -EACCES;
154 goto out;
155 }
156 no_nice = security_task_setnice(p, niceval);
157 if (no_nice) {
158 error = no_nice;
159 goto out;
160 }
161 if (error == -ESRCH)
162 error = 0;
163 set_user_nice(p, niceval);
164 out:
165 return error;
166 }
167
168 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
169 {
170 struct task_struct *g, *p;
171 struct user_struct *user;
172 const struct cred *cred = current_cred();
173 int error = -EINVAL;
174 struct pid *pgrp;
175
176 if (which > PRIO_USER || which < PRIO_PROCESS)
177 goto out;
178
179 /* normalize: avoid signed division (rounding problems) */
180 error = -ESRCH;
181 if (niceval < -20)
182 niceval = -20;
183 if (niceval > 19)
184 niceval = 19;
185
186 rcu_read_lock();
187 read_lock(&tasklist_lock);
188 switch (which) {
189 case PRIO_PROCESS:
190 if (who)
191 p = find_task_by_vpid(who);
192 else
193 p = current;
194 if (p)
195 error = set_one_prio(p, niceval, error);
196 break;
197 case PRIO_PGRP:
198 if (who)
199 pgrp = find_vpid(who);
200 else
201 pgrp = task_pgrp(current);
202 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
203 error = set_one_prio(p, niceval, error);
204 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
205 break;
206 case PRIO_USER:
207 user = (struct user_struct *) cred->user;
208 if (!who)
209 who = cred->uid;
210 else if ((who != cred->uid) &&
211 !(user = find_user(who)))
212 goto out_unlock; /* No processes for this user */
213
214 do_each_thread(g, p) {
215 if (__task_cred(p)->uid == who)
216 error = set_one_prio(p, niceval, error);
217 } while_each_thread(g, p);
218 if (who != cred->uid)
219 free_uid(user); /* For find_user() */
220 break;
221 }
222 out_unlock:
223 read_unlock(&tasklist_lock);
224 rcu_read_unlock();
225 out:
226 return error;
227 }
228
229 /*
230 * Ugh. To avoid negative return values, "getpriority()" will
231 * not return the normal nice-value, but a negated value that
232 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
233 * to stay compatible.
234 */
235 SYSCALL_DEFINE2(getpriority, int, which, int, who)
236 {
237 struct task_struct *g, *p;
238 struct user_struct *user;
239 const struct cred *cred = current_cred();
240 long niceval, retval = -ESRCH;
241 struct pid *pgrp;
242
243 if (which > PRIO_USER || which < PRIO_PROCESS)
244 return -EINVAL;
245
246 rcu_read_lock();
247 read_lock(&tasklist_lock);
248 switch (which) {
249 case PRIO_PROCESS:
250 if (who)
251 p = find_task_by_vpid(who);
252 else
253 p = current;
254 if (p) {
255 niceval = 20 - task_nice(p);
256 if (niceval > retval)
257 retval = niceval;
258 }
259 break;
260 case PRIO_PGRP:
261 if (who)
262 pgrp = find_vpid(who);
263 else
264 pgrp = task_pgrp(current);
265 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
266 niceval = 20 - task_nice(p);
267 if (niceval > retval)
268 retval = niceval;
269 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
270 break;
271 case PRIO_USER:
272 user = (struct user_struct *) cred->user;
273 if (!who)
274 who = cred->uid;
275 else if ((who != cred->uid) &&
276 !(user = find_user(who)))
277 goto out_unlock; /* No processes for this user */
278
279 do_each_thread(g, p) {
280 if (__task_cred(p)->uid == who) {
281 niceval = 20 - task_nice(p);
282 if (niceval > retval)
283 retval = niceval;
284 }
285 } while_each_thread(g, p);
286 if (who != cred->uid)
287 free_uid(user); /* for find_user() */
288 break;
289 }
290 out_unlock:
291 read_unlock(&tasklist_lock);
292 rcu_read_unlock();
293
294 return retval;
295 }
296
297 /**
298 * emergency_restart - reboot the system
299 *
300 * Without shutting down any hardware or taking any locks
301 * reboot the system. This is called when we know we are in
302 * trouble so this is our best effort to reboot. This is
303 * safe to call in interrupt context.
304 */
305 void emergency_restart(void)
306 {
307 kmsg_dump(KMSG_DUMP_EMERG);
308 machine_emergency_restart();
309 }
310 EXPORT_SYMBOL_GPL(emergency_restart);
311
312 void kernel_restart_prepare(char *cmd)
313 {
314 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
315 system_state = SYSTEM_RESTART;
316 usermodehelper_disable();
317 device_shutdown();
318 syscore_shutdown();
319 }
320
321 /**
322 * register_reboot_notifier - Register function to be called at reboot time
323 * @nb: Info about notifier function to be called
324 *
325 * Registers a function with the list of functions
326 * to be called at reboot time.
327 *
328 * Currently always returns zero, as blocking_notifier_chain_register()
329 * always returns zero.
330 */
331 int register_reboot_notifier(struct notifier_block *nb)
332 {
333 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
334 }
335 EXPORT_SYMBOL(register_reboot_notifier);
336
337 /**
338 * unregister_reboot_notifier - Unregister previously registered reboot notifier
339 * @nb: Hook to be unregistered
340 *
341 * Unregisters a previously registered reboot
342 * notifier function.
343 *
344 * Returns zero on success, or %-ENOENT on failure.
345 */
346 int unregister_reboot_notifier(struct notifier_block *nb)
347 {
348 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
349 }
350 EXPORT_SYMBOL(unregister_reboot_notifier);
351
352 /**
353 * kernel_restart - reboot the system
354 * @cmd: pointer to buffer containing command to execute for restart
355 * or %NULL
356 *
357 * Shutdown everything and perform a clean reboot.
358 * This is not safe to call in interrupt context.
359 */
360 void kernel_restart(char *cmd)
361 {
362 kernel_restart_prepare(cmd);
363 if (!cmd)
364 printk(KERN_EMERG "Restarting system.\n");
365 else
366 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
367 kmsg_dump(KMSG_DUMP_RESTART);
368 machine_restart(cmd);
369 }
370 EXPORT_SYMBOL_GPL(kernel_restart);
371
372 static void kernel_shutdown_prepare(enum system_states state)
373 {
374 blocking_notifier_call_chain(&reboot_notifier_list,
375 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
376 system_state = state;
377 usermodehelper_disable();
378 device_shutdown();
379 }
380 /**
381 * kernel_halt - halt the system
382 *
383 * Shutdown everything and perform a clean system halt.
384 */
385 void kernel_halt(void)
386 {
387 kernel_shutdown_prepare(SYSTEM_HALT);
388 syscore_shutdown();
389 printk(KERN_EMERG "System halted.\n");
390 kmsg_dump(KMSG_DUMP_HALT);
391 machine_halt();
392 }
393
394 EXPORT_SYMBOL_GPL(kernel_halt);
395
396 /**
397 * kernel_power_off - power_off the system
398 *
399 * Shutdown everything and perform a clean system power_off.
400 */
401 void kernel_power_off(void)
402 {
403 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
404 if (pm_power_off_prepare)
405 pm_power_off_prepare();
406 disable_nonboot_cpus();
407 syscore_shutdown();
408 printk(KERN_EMERG "Power down.\n");
409 kmsg_dump(KMSG_DUMP_POWEROFF);
410 machine_power_off();
411 }
412 EXPORT_SYMBOL_GPL(kernel_power_off);
413
414 static DEFINE_MUTEX(reboot_mutex);
415
416 /*
417 * Reboot system call: for obvious reasons only root may call it,
418 * and even root needs to set up some magic numbers in the registers
419 * so that some mistake won't make this reboot the whole machine.
420 * You can also set the meaning of the ctrl-alt-del-key here.
421 *
422 * reboot doesn't sync: do that yourself before calling this.
423 */
424 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
425 void __user *, arg)
426 {
427 char buffer[256];
428 int ret = 0;
429
430 /* We only trust the superuser with rebooting the system. */
431 if (!capable(CAP_SYS_BOOT))
432 return -EPERM;
433
434 /* For safety, we require "magic" arguments. */
435 if (magic1 != LINUX_REBOOT_MAGIC1 ||
436 (magic2 != LINUX_REBOOT_MAGIC2 &&
437 magic2 != LINUX_REBOOT_MAGIC2A &&
438 magic2 != LINUX_REBOOT_MAGIC2B &&
439 magic2 != LINUX_REBOOT_MAGIC2C))
440 return -EINVAL;
441
442 /* Instead of trying to make the power_off code look like
443 * halt when pm_power_off is not set do it the easy way.
444 */
445 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
446 cmd = LINUX_REBOOT_CMD_HALT;
447
448 mutex_lock(&reboot_mutex);
449 switch (cmd) {
450 case LINUX_REBOOT_CMD_RESTART:
451 kernel_restart(NULL);
452 break;
453
454 case LINUX_REBOOT_CMD_CAD_ON:
455 C_A_D = 1;
456 break;
457
458 case LINUX_REBOOT_CMD_CAD_OFF:
459 C_A_D = 0;
460 break;
461
462 case LINUX_REBOOT_CMD_HALT:
463 kernel_halt();
464 do_exit(0);
465 panic("cannot halt");
466
467 case LINUX_REBOOT_CMD_POWER_OFF:
468 kernel_power_off();
469 do_exit(0);
470 break;
471
472 case LINUX_REBOOT_CMD_RESTART2:
473 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
474 ret = -EFAULT;
475 break;
476 }
477 buffer[sizeof(buffer) - 1] = '\0';
478
479 kernel_restart(buffer);
480 break;
481
482 #ifdef CONFIG_KEXEC
483 case LINUX_REBOOT_CMD_KEXEC:
484 ret = kernel_kexec();
485 break;
486 #endif
487
488 #ifdef CONFIG_HIBERNATION
489 case LINUX_REBOOT_CMD_SW_SUSPEND:
490 ret = hibernate();
491 break;
492 #endif
493
494 default:
495 ret = -EINVAL;
496 break;
497 }
498 mutex_unlock(&reboot_mutex);
499 return ret;
500 }
501
502 static void deferred_cad(struct work_struct *dummy)
503 {
504 kernel_restart(NULL);
505 }
506
507 /*
508 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
509 * As it's called within an interrupt, it may NOT sync: the only choice
510 * is whether to reboot at once, or just ignore the ctrl-alt-del.
511 */
512 void ctrl_alt_del(void)
513 {
514 static DECLARE_WORK(cad_work, deferred_cad);
515
516 if (C_A_D)
517 schedule_work(&cad_work);
518 else
519 kill_cad_pid(SIGINT, 1);
520 }
521
522 /*
523 * Unprivileged users may change the real gid to the effective gid
524 * or vice versa. (BSD-style)
525 *
526 * If you set the real gid at all, or set the effective gid to a value not
527 * equal to the real gid, then the saved gid is set to the new effective gid.
528 *
529 * This makes it possible for a setgid program to completely drop its
530 * privileges, which is often a useful assertion to make when you are doing
531 * a security audit over a program.
532 *
533 * The general idea is that a program which uses just setregid() will be
534 * 100% compatible with BSD. A program which uses just setgid() will be
535 * 100% compatible with POSIX with saved IDs.
536 *
537 * SMP: There are not races, the GIDs are checked only by filesystem
538 * operations (as far as semantic preservation is concerned).
539 */
540 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
541 {
542 const struct cred *old;
543 struct cred *new;
544 int retval;
545
546 new = prepare_creds();
547 if (!new)
548 return -ENOMEM;
549 old = current_cred();
550
551 retval = -EPERM;
552 if (rgid != (gid_t) -1) {
553 if (old->gid == rgid ||
554 old->egid == rgid ||
555 nsown_capable(CAP_SETGID))
556 new->gid = rgid;
557 else
558 goto error;
559 }
560 if (egid != (gid_t) -1) {
561 if (old->gid == egid ||
562 old->egid == egid ||
563 old->sgid == egid ||
564 nsown_capable(CAP_SETGID))
565 new->egid = egid;
566 else
567 goto error;
568 }
569
570 if (rgid != (gid_t) -1 ||
571 (egid != (gid_t) -1 && egid != old->gid))
572 new->sgid = new->egid;
573 new->fsgid = new->egid;
574
575 return commit_creds(new);
576
577 error:
578 abort_creds(new);
579 return retval;
580 }
581
582 /*
583 * setgid() is implemented like SysV w/ SAVED_IDS
584 *
585 * SMP: Same implicit races as above.
586 */
587 SYSCALL_DEFINE1(setgid, gid_t, gid)
588 {
589 const struct cred *old;
590 struct cred *new;
591 int retval;
592
593 new = prepare_creds();
594 if (!new)
595 return -ENOMEM;
596 old = current_cred();
597
598 retval = -EPERM;
599 if (nsown_capable(CAP_SETGID))
600 new->gid = new->egid = new->sgid = new->fsgid = gid;
601 else if (gid == old->gid || gid == old->sgid)
602 new->egid = new->fsgid = gid;
603 else
604 goto error;
605
606 return commit_creds(new);
607
608 error:
609 abort_creds(new);
610 return retval;
611 }
612
613 /*
614 * change the user struct in a credentials set to match the new UID
615 */
616 static int set_user(struct cred *new)
617 {
618 struct user_struct *new_user;
619
620 new_user = alloc_uid(current_user_ns(), new->uid);
621 if (!new_user)
622 return -EAGAIN;
623
624 /*
625 * We don't fail in case of NPROC limit excess here because too many
626 * poorly written programs don't check set*uid() return code, assuming
627 * it never fails if called by root. We may still enforce NPROC limit
628 * for programs doing set*uid()+execve() by harmlessly deferring the
629 * failure to the execve() stage.
630 */
631 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
632 new_user != INIT_USER)
633 current->flags |= PF_NPROC_EXCEEDED;
634 else
635 current->flags &= ~PF_NPROC_EXCEEDED;
636
637 free_uid(new->user);
638 new->user = new_user;
639 return 0;
640 }
641
642 /*
643 * Unprivileged users may change the real uid to the effective uid
644 * or vice versa. (BSD-style)
645 *
646 * If you set the real uid at all, or set the effective uid to a value not
647 * equal to the real uid, then the saved uid is set to the new effective uid.
648 *
649 * This makes it possible for a setuid program to completely drop its
650 * privileges, which is often a useful assertion to make when you are doing
651 * a security audit over a program.
652 *
653 * The general idea is that a program which uses just setreuid() will be
654 * 100% compatible with BSD. A program which uses just setuid() will be
655 * 100% compatible with POSIX with saved IDs.
656 */
657 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
658 {
659 const struct cred *old;
660 struct cred *new;
661 int retval;
662
663 new = prepare_creds();
664 if (!new)
665 return -ENOMEM;
666 old = current_cred();
667
668 retval = -EPERM;
669 if (ruid != (uid_t) -1) {
670 new->uid = ruid;
671 if (old->uid != ruid &&
672 old->euid != ruid &&
673 !nsown_capable(CAP_SETUID))
674 goto error;
675 }
676
677 if (euid != (uid_t) -1) {
678 new->euid = euid;
679 if (old->uid != euid &&
680 old->euid != euid &&
681 old->suid != euid &&
682 !nsown_capable(CAP_SETUID))
683 goto error;
684 }
685
686 if (new->uid != old->uid) {
687 retval = set_user(new);
688 if (retval < 0)
689 goto error;
690 }
691 if (ruid != (uid_t) -1 ||
692 (euid != (uid_t) -1 && euid != old->uid))
693 new->suid = new->euid;
694 new->fsuid = new->euid;
695
696 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
697 if (retval < 0)
698 goto error;
699
700 return commit_creds(new);
701
702 error:
703 abort_creds(new);
704 return retval;
705 }
706
707 /*
708 * setuid() is implemented like SysV with SAVED_IDS
709 *
710 * Note that SAVED_ID's is deficient in that a setuid root program
711 * like sendmail, for example, cannot set its uid to be a normal
712 * user and then switch back, because if you're root, setuid() sets
713 * the saved uid too. If you don't like this, blame the bright people
714 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
715 * will allow a root program to temporarily drop privileges and be able to
716 * regain them by swapping the real and effective uid.
717 */
718 SYSCALL_DEFINE1(setuid, uid_t, uid)
719 {
720 const struct cred *old;
721 struct cred *new;
722 int retval;
723
724 new = prepare_creds();
725 if (!new)
726 return -ENOMEM;
727 old = current_cred();
728
729 retval = -EPERM;
730 if (nsown_capable(CAP_SETUID)) {
731 new->suid = new->uid = uid;
732 if (uid != old->uid) {
733 retval = set_user(new);
734 if (retval < 0)
735 goto error;
736 }
737 } else if (uid != old->uid && uid != new->suid) {
738 goto error;
739 }
740
741 new->fsuid = new->euid = uid;
742
743 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
744 if (retval < 0)
745 goto error;
746
747 return commit_creds(new);
748
749 error:
750 abort_creds(new);
751 return retval;
752 }
753
754
755 /*
756 * This function implements a generic ability to update ruid, euid,
757 * and suid. This allows you to implement the 4.4 compatible seteuid().
758 */
759 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
760 {
761 const struct cred *old;
762 struct cred *new;
763 int retval;
764
765 new = prepare_creds();
766 if (!new)
767 return -ENOMEM;
768
769 old = current_cred();
770
771 retval = -EPERM;
772 if (!nsown_capable(CAP_SETUID)) {
773 if (ruid != (uid_t) -1 && ruid != old->uid &&
774 ruid != old->euid && ruid != old->suid)
775 goto error;
776 if (euid != (uid_t) -1 && euid != old->uid &&
777 euid != old->euid && euid != old->suid)
778 goto error;
779 if (suid != (uid_t) -1 && suid != old->uid &&
780 suid != old->euid && suid != old->suid)
781 goto error;
782 }
783
784 if (ruid != (uid_t) -1) {
785 new->uid = ruid;
786 if (ruid != old->uid) {
787 retval = set_user(new);
788 if (retval < 0)
789 goto error;
790 }
791 }
792 if (euid != (uid_t) -1)
793 new->euid = euid;
794 if (suid != (uid_t) -1)
795 new->suid = suid;
796 new->fsuid = new->euid;
797
798 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
799 if (retval < 0)
800 goto error;
801
802 return commit_creds(new);
803
804 error:
805 abort_creds(new);
806 return retval;
807 }
808
809 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
810 {
811 const struct cred *cred = current_cred();
812 int retval;
813
814 if (!(retval = put_user(cred->uid, ruid)) &&
815 !(retval = put_user(cred->euid, euid)))
816 retval = put_user(cred->suid, suid);
817
818 return retval;
819 }
820
821 /*
822 * Same as above, but for rgid, egid, sgid.
823 */
824 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
825 {
826 const struct cred *old;
827 struct cred *new;
828 int retval;
829
830 new = prepare_creds();
831 if (!new)
832 return -ENOMEM;
833 old = current_cred();
834
835 retval = -EPERM;
836 if (!nsown_capable(CAP_SETGID)) {
837 if (rgid != (gid_t) -1 && rgid != old->gid &&
838 rgid != old->egid && rgid != old->sgid)
839 goto error;
840 if (egid != (gid_t) -1 && egid != old->gid &&
841 egid != old->egid && egid != old->sgid)
842 goto error;
843 if (sgid != (gid_t) -1 && sgid != old->gid &&
844 sgid != old->egid && sgid != old->sgid)
845 goto error;
846 }
847
848 if (rgid != (gid_t) -1)
849 new->gid = rgid;
850 if (egid != (gid_t) -1)
851 new->egid = egid;
852 if (sgid != (gid_t) -1)
853 new->sgid = sgid;
854 new->fsgid = new->egid;
855
856 return commit_creds(new);
857
858 error:
859 abort_creds(new);
860 return retval;
861 }
862
863 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
864 {
865 const struct cred *cred = current_cred();
866 int retval;
867
868 if (!(retval = put_user(cred->gid, rgid)) &&
869 !(retval = put_user(cred->egid, egid)))
870 retval = put_user(cred->sgid, sgid);
871
872 return retval;
873 }
874
875
876 /*
877 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
878 * is used for "access()" and for the NFS daemon (letting nfsd stay at
879 * whatever uid it wants to). It normally shadows "euid", except when
880 * explicitly set by setfsuid() or for access..
881 */
882 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
883 {
884 const struct cred *old;
885 struct cred *new;
886 uid_t old_fsuid;
887
888 new = prepare_creds();
889 if (!new)
890 return current_fsuid();
891 old = current_cred();
892 old_fsuid = old->fsuid;
893
894 if (uid == old->uid || uid == old->euid ||
895 uid == old->suid || uid == old->fsuid ||
896 nsown_capable(CAP_SETUID)) {
897 if (uid != old_fsuid) {
898 new->fsuid = uid;
899 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
900 goto change_okay;
901 }
902 }
903
904 abort_creds(new);
905 return old_fsuid;
906
907 change_okay:
908 commit_creds(new);
909 return old_fsuid;
910 }
911
912 /*
913 * Samma på svenska..
914 */
915 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
916 {
917 const struct cred *old;
918 struct cred *new;
919 gid_t old_fsgid;
920
921 new = prepare_creds();
922 if (!new)
923 return current_fsgid();
924 old = current_cred();
925 old_fsgid = old->fsgid;
926
927 if (gid == old->gid || gid == old->egid ||
928 gid == old->sgid || gid == old->fsgid ||
929 nsown_capable(CAP_SETGID)) {
930 if (gid != old_fsgid) {
931 new->fsgid = gid;
932 goto change_okay;
933 }
934 }
935
936 abort_creds(new);
937 return old_fsgid;
938
939 change_okay:
940 commit_creds(new);
941 return old_fsgid;
942 }
943
944 void do_sys_times(struct tms *tms)
945 {
946 cputime_t tgutime, tgstime, cutime, cstime;
947
948 spin_lock_irq(&current->sighand->siglock);
949 thread_group_times(current, &tgutime, &tgstime);
950 cutime = current->signal->cutime;
951 cstime = current->signal->cstime;
952 spin_unlock_irq(&current->sighand->siglock);
953 tms->tms_utime = cputime_to_clock_t(tgutime);
954 tms->tms_stime = cputime_to_clock_t(tgstime);
955 tms->tms_cutime = cputime_to_clock_t(cutime);
956 tms->tms_cstime = cputime_to_clock_t(cstime);
957 }
958
959 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
960 {
961 if (tbuf) {
962 struct tms tmp;
963
964 do_sys_times(&tmp);
965 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
966 return -EFAULT;
967 }
968 force_successful_syscall_return();
969 return (long) jiffies_64_to_clock_t(get_jiffies_64());
970 }
971
972 /*
973 * This needs some heavy checking ...
974 * I just haven't the stomach for it. I also don't fully
975 * understand sessions/pgrp etc. Let somebody who does explain it.
976 *
977 * OK, I think I have the protection semantics right.... this is really
978 * only important on a multi-user system anyway, to make sure one user
979 * can't send a signal to a process owned by another. -TYT, 12/12/91
980 *
981 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
982 * LBT 04.03.94
983 */
984 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
985 {
986 struct task_struct *p;
987 struct task_struct *group_leader = current->group_leader;
988 struct pid *pgrp;
989 int err;
990
991 if (!pid)
992 pid = task_pid_vnr(group_leader);
993 if (!pgid)
994 pgid = pid;
995 if (pgid < 0)
996 return -EINVAL;
997 rcu_read_lock();
998
999 /* From this point forward we keep holding onto the tasklist lock
1000 * so that our parent does not change from under us. -DaveM
1001 */
1002 write_lock_irq(&tasklist_lock);
1003
1004 err = -ESRCH;
1005 p = find_task_by_vpid(pid);
1006 if (!p)
1007 goto out;
1008
1009 err = -EINVAL;
1010 if (!thread_group_leader(p))
1011 goto out;
1012
1013 if (same_thread_group(p->real_parent, group_leader)) {
1014 err = -EPERM;
1015 if (task_session(p) != task_session(group_leader))
1016 goto out;
1017 err = -EACCES;
1018 if (p->did_exec)
1019 goto out;
1020 } else {
1021 err = -ESRCH;
1022 if (p != group_leader)
1023 goto out;
1024 }
1025
1026 err = -EPERM;
1027 if (p->signal->leader)
1028 goto out;
1029
1030 pgrp = task_pid(p);
1031 if (pgid != pid) {
1032 struct task_struct *g;
1033
1034 pgrp = find_vpid(pgid);
1035 g = pid_task(pgrp, PIDTYPE_PGID);
1036 if (!g || task_session(g) != task_session(group_leader))
1037 goto out;
1038 }
1039
1040 err = security_task_setpgid(p, pgid);
1041 if (err)
1042 goto out;
1043
1044 if (task_pgrp(p) != pgrp)
1045 change_pid(p, PIDTYPE_PGID, pgrp);
1046
1047 err = 0;
1048 out:
1049 /* All paths lead to here, thus we are safe. -DaveM */
1050 write_unlock_irq(&tasklist_lock);
1051 rcu_read_unlock();
1052 return err;
1053 }
1054
1055 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1056 {
1057 struct task_struct *p;
1058 struct pid *grp;
1059 int retval;
1060
1061 rcu_read_lock();
1062 if (!pid)
1063 grp = task_pgrp(current);
1064 else {
1065 retval = -ESRCH;
1066 p = find_task_by_vpid(pid);
1067 if (!p)
1068 goto out;
1069 grp = task_pgrp(p);
1070 if (!grp)
1071 goto out;
1072
1073 retval = security_task_getpgid(p);
1074 if (retval)
1075 goto out;
1076 }
1077 retval = pid_vnr(grp);
1078 out:
1079 rcu_read_unlock();
1080 return retval;
1081 }
1082
1083 #ifdef __ARCH_WANT_SYS_GETPGRP
1084
1085 SYSCALL_DEFINE0(getpgrp)
1086 {
1087 return sys_getpgid(0);
1088 }
1089
1090 #endif
1091
1092 SYSCALL_DEFINE1(getsid, pid_t, pid)
1093 {
1094 struct task_struct *p;
1095 struct pid *sid;
1096 int retval;
1097
1098 rcu_read_lock();
1099 if (!pid)
1100 sid = task_session(current);
1101 else {
1102 retval = -ESRCH;
1103 p = find_task_by_vpid(pid);
1104 if (!p)
1105 goto out;
1106 sid = task_session(p);
1107 if (!sid)
1108 goto out;
1109
1110 retval = security_task_getsid(p);
1111 if (retval)
1112 goto out;
1113 }
1114 retval = pid_vnr(sid);
1115 out:
1116 rcu_read_unlock();
1117 return retval;
1118 }
1119
1120 SYSCALL_DEFINE0(setsid)
1121 {
1122 struct task_struct *group_leader = current->group_leader;
1123 struct pid *sid = task_pid(group_leader);
1124 pid_t session = pid_vnr(sid);
1125 int err = -EPERM;
1126
1127 write_lock_irq(&tasklist_lock);
1128 /* Fail if I am already a session leader */
1129 if (group_leader->signal->leader)
1130 goto out;
1131
1132 /* Fail if a process group id already exists that equals the
1133 * proposed session id.
1134 */
1135 if (pid_task(sid, PIDTYPE_PGID))
1136 goto out;
1137
1138 group_leader->signal->leader = 1;
1139 __set_special_pids(sid);
1140
1141 proc_clear_tty(group_leader);
1142
1143 err = session;
1144 out:
1145 write_unlock_irq(&tasklist_lock);
1146 if (err > 0) {
1147 proc_sid_connector(group_leader);
1148 sched_autogroup_create_attach(group_leader);
1149 }
1150 return err;
1151 }
1152
1153 DECLARE_RWSEM(uts_sem);
1154
1155 #ifdef COMPAT_UTS_MACHINE
1156 #define override_architecture(name) \
1157 (personality(current->personality) == PER_LINUX32 && \
1158 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1159 sizeof(COMPAT_UTS_MACHINE)))
1160 #else
1161 #define override_architecture(name) 0
1162 #endif
1163
1164 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1165 {
1166 int errno = 0;
1167
1168 down_read(&uts_sem);
1169 if (copy_to_user(name, utsname(), sizeof *name))
1170 errno = -EFAULT;
1171 up_read(&uts_sem);
1172
1173 if (!errno && override_architecture(name))
1174 errno = -EFAULT;
1175 return errno;
1176 }
1177
1178 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1179 /*
1180 * Old cruft
1181 */
1182 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1183 {
1184 int error = 0;
1185
1186 if (!name)
1187 return -EFAULT;
1188
1189 down_read(&uts_sem);
1190 if (copy_to_user(name, utsname(), sizeof(*name)))
1191 error = -EFAULT;
1192 up_read(&uts_sem);
1193
1194 if (!error && override_architecture(name))
1195 error = -EFAULT;
1196 return error;
1197 }
1198
1199 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1200 {
1201 int error;
1202
1203 if (!name)
1204 return -EFAULT;
1205 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1206 return -EFAULT;
1207
1208 down_read(&uts_sem);
1209 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1210 __OLD_UTS_LEN);
1211 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1212 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1213 __OLD_UTS_LEN);
1214 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1215 error |= __copy_to_user(&name->release, &utsname()->release,
1216 __OLD_UTS_LEN);
1217 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1218 error |= __copy_to_user(&name->version, &utsname()->version,
1219 __OLD_UTS_LEN);
1220 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1221 error |= __copy_to_user(&name->machine, &utsname()->machine,
1222 __OLD_UTS_LEN);
1223 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1224 up_read(&uts_sem);
1225
1226 if (!error && override_architecture(name))
1227 error = -EFAULT;
1228 return error ? -EFAULT : 0;
1229 }
1230 #endif
1231
1232 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1233 {
1234 int errno;
1235 char tmp[__NEW_UTS_LEN];
1236
1237 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1238 return -EPERM;
1239
1240 if (len < 0 || len > __NEW_UTS_LEN)
1241 return -EINVAL;
1242 down_write(&uts_sem);
1243 errno = -EFAULT;
1244 if (!copy_from_user(tmp, name, len)) {
1245 struct new_utsname *u = utsname();
1246
1247 memcpy(u->nodename, tmp, len);
1248 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1249 errno = 0;
1250 }
1251 up_write(&uts_sem);
1252 return errno;
1253 }
1254
1255 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1256
1257 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1258 {
1259 int i, errno;
1260 struct new_utsname *u;
1261
1262 if (len < 0)
1263 return -EINVAL;
1264 down_read(&uts_sem);
1265 u = utsname();
1266 i = 1 + strlen(u->nodename);
1267 if (i > len)
1268 i = len;
1269 errno = 0;
1270 if (copy_to_user(name, u->nodename, i))
1271 errno = -EFAULT;
1272 up_read(&uts_sem);
1273 return errno;
1274 }
1275
1276 #endif
1277
1278 /*
1279 * Only setdomainname; getdomainname can be implemented by calling
1280 * uname()
1281 */
1282 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1283 {
1284 int errno;
1285 char tmp[__NEW_UTS_LEN];
1286
1287 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1288 return -EPERM;
1289 if (len < 0 || len > __NEW_UTS_LEN)
1290 return -EINVAL;
1291
1292 down_write(&uts_sem);
1293 errno = -EFAULT;
1294 if (!copy_from_user(tmp, name, len)) {
1295 struct new_utsname *u = utsname();
1296
1297 memcpy(u->domainname, tmp, len);
1298 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1299 errno = 0;
1300 }
1301 up_write(&uts_sem);
1302 return errno;
1303 }
1304
1305 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1306 {
1307 struct rlimit value;
1308 int ret;
1309
1310 ret = do_prlimit(current, resource, NULL, &value);
1311 if (!ret)
1312 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1313
1314 return ret;
1315 }
1316
1317 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1318
1319 /*
1320 * Back compatibility for getrlimit. Needed for some apps.
1321 */
1322
1323 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1324 struct rlimit __user *, rlim)
1325 {
1326 struct rlimit x;
1327 if (resource >= RLIM_NLIMITS)
1328 return -EINVAL;
1329
1330 task_lock(current->group_leader);
1331 x = current->signal->rlim[resource];
1332 task_unlock(current->group_leader);
1333 if (x.rlim_cur > 0x7FFFFFFF)
1334 x.rlim_cur = 0x7FFFFFFF;
1335 if (x.rlim_max > 0x7FFFFFFF)
1336 x.rlim_max = 0x7FFFFFFF;
1337 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1338 }
1339
1340 #endif
1341
1342 static inline bool rlim64_is_infinity(__u64 rlim64)
1343 {
1344 #if BITS_PER_LONG < 64
1345 return rlim64 >= ULONG_MAX;
1346 #else
1347 return rlim64 == RLIM64_INFINITY;
1348 #endif
1349 }
1350
1351 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1352 {
1353 if (rlim->rlim_cur == RLIM_INFINITY)
1354 rlim64->rlim_cur = RLIM64_INFINITY;
1355 else
1356 rlim64->rlim_cur = rlim->rlim_cur;
1357 if (rlim->rlim_max == RLIM_INFINITY)
1358 rlim64->rlim_max = RLIM64_INFINITY;
1359 else
1360 rlim64->rlim_max = rlim->rlim_max;
1361 }
1362
1363 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1364 {
1365 if (rlim64_is_infinity(rlim64->rlim_cur))
1366 rlim->rlim_cur = RLIM_INFINITY;
1367 else
1368 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1369 if (rlim64_is_infinity(rlim64->rlim_max))
1370 rlim->rlim_max = RLIM_INFINITY;
1371 else
1372 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1373 }
1374
1375 /* make sure you are allowed to change @tsk limits before calling this */
1376 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1377 struct rlimit *new_rlim, struct rlimit *old_rlim)
1378 {
1379 struct rlimit *rlim;
1380 int retval = 0;
1381
1382 if (resource >= RLIM_NLIMITS)
1383 return -EINVAL;
1384 if (new_rlim) {
1385 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1386 return -EINVAL;
1387 if (resource == RLIMIT_NOFILE &&
1388 new_rlim->rlim_max > sysctl_nr_open)
1389 return -EPERM;
1390 }
1391
1392 /* protect tsk->signal and tsk->sighand from disappearing */
1393 read_lock(&tasklist_lock);
1394 if (!tsk->sighand) {
1395 retval = -ESRCH;
1396 goto out;
1397 }
1398
1399 rlim = tsk->signal->rlim + resource;
1400 task_lock(tsk->group_leader);
1401 if (new_rlim) {
1402 /* Keep the capable check against init_user_ns until
1403 cgroups can contain all limits */
1404 if (new_rlim->rlim_max > rlim->rlim_max &&
1405 !capable(CAP_SYS_RESOURCE))
1406 retval = -EPERM;
1407 if (!retval)
1408 retval = security_task_setrlimit(tsk->group_leader,
1409 resource, new_rlim);
1410 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1411 /*
1412 * The caller is asking for an immediate RLIMIT_CPU
1413 * expiry. But we use the zero value to mean "it was
1414 * never set". So let's cheat and make it one second
1415 * instead
1416 */
1417 new_rlim->rlim_cur = 1;
1418 }
1419 }
1420 if (!retval) {
1421 if (old_rlim)
1422 *old_rlim = *rlim;
1423 if (new_rlim)
1424 *rlim = *new_rlim;
1425 }
1426 task_unlock(tsk->group_leader);
1427
1428 /*
1429 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1430 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1431 * very long-standing error, and fixing it now risks breakage of
1432 * applications, so we live with it
1433 */
1434 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1435 new_rlim->rlim_cur != RLIM_INFINITY)
1436 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1437 out:
1438 read_unlock(&tasklist_lock);
1439 return retval;
1440 }
1441
1442 /* rcu lock must be held */
1443 static int check_prlimit_permission(struct task_struct *task)
1444 {
1445 const struct cred *cred = current_cred(), *tcred;
1446
1447 if (current == task)
1448 return 0;
1449
1450 tcred = __task_cred(task);
1451 if (cred->user->user_ns == tcred->user->user_ns &&
1452 (cred->uid == tcred->euid &&
1453 cred->uid == tcred->suid &&
1454 cred->uid == tcred->uid &&
1455 cred->gid == tcred->egid &&
1456 cred->gid == tcred->sgid &&
1457 cred->gid == tcred->gid))
1458 return 0;
1459 if (ns_capable(tcred->user->user_ns, CAP_SYS_RESOURCE))
1460 return 0;
1461
1462 return -EPERM;
1463 }
1464
1465 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1466 const struct rlimit64 __user *, new_rlim,
1467 struct rlimit64 __user *, old_rlim)
1468 {
1469 struct rlimit64 old64, new64;
1470 struct rlimit old, new;
1471 struct task_struct *tsk;
1472 int ret;
1473
1474 if (new_rlim) {
1475 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1476 return -EFAULT;
1477 rlim64_to_rlim(&new64, &new);
1478 }
1479
1480 rcu_read_lock();
1481 tsk = pid ? find_task_by_vpid(pid) : current;
1482 if (!tsk) {
1483 rcu_read_unlock();
1484 return -ESRCH;
1485 }
1486 ret = check_prlimit_permission(tsk);
1487 if (ret) {
1488 rcu_read_unlock();
1489 return ret;
1490 }
1491 get_task_struct(tsk);
1492 rcu_read_unlock();
1493
1494 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1495 old_rlim ? &old : NULL);
1496
1497 if (!ret && old_rlim) {
1498 rlim_to_rlim64(&old, &old64);
1499 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1500 ret = -EFAULT;
1501 }
1502
1503 put_task_struct(tsk);
1504 return ret;
1505 }
1506
1507 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1508 {
1509 struct rlimit new_rlim;
1510
1511 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1512 return -EFAULT;
1513 return do_prlimit(current, resource, &new_rlim, NULL);
1514 }
1515
1516 /*
1517 * It would make sense to put struct rusage in the task_struct,
1518 * except that would make the task_struct be *really big*. After
1519 * task_struct gets moved into malloc'ed memory, it would
1520 * make sense to do this. It will make moving the rest of the information
1521 * a lot simpler! (Which we're not doing right now because we're not
1522 * measuring them yet).
1523 *
1524 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1525 * races with threads incrementing their own counters. But since word
1526 * reads are atomic, we either get new values or old values and we don't
1527 * care which for the sums. We always take the siglock to protect reading
1528 * the c* fields from p->signal from races with exit.c updating those
1529 * fields when reaping, so a sample either gets all the additions of a
1530 * given child after it's reaped, or none so this sample is before reaping.
1531 *
1532 * Locking:
1533 * We need to take the siglock for CHILDEREN, SELF and BOTH
1534 * for the cases current multithreaded, non-current single threaded
1535 * non-current multithreaded. Thread traversal is now safe with
1536 * the siglock held.
1537 * Strictly speaking, we donot need to take the siglock if we are current and
1538 * single threaded, as no one else can take our signal_struct away, no one
1539 * else can reap the children to update signal->c* counters, and no one else
1540 * can race with the signal-> fields. If we do not take any lock, the
1541 * signal-> fields could be read out of order while another thread was just
1542 * exiting. So we should place a read memory barrier when we avoid the lock.
1543 * On the writer side, write memory barrier is implied in __exit_signal
1544 * as __exit_signal releases the siglock spinlock after updating the signal->
1545 * fields. But we don't do this yet to keep things simple.
1546 *
1547 */
1548
1549 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1550 {
1551 r->ru_nvcsw += t->nvcsw;
1552 r->ru_nivcsw += t->nivcsw;
1553 r->ru_minflt += t->min_flt;
1554 r->ru_majflt += t->maj_flt;
1555 r->ru_inblock += task_io_get_inblock(t);
1556 r->ru_oublock += task_io_get_oublock(t);
1557 }
1558
1559 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1560 {
1561 struct task_struct *t;
1562 unsigned long flags;
1563 cputime_t tgutime, tgstime, utime, stime;
1564 unsigned long maxrss = 0;
1565
1566 memset((char *) r, 0, sizeof *r);
1567 utime = stime = cputime_zero;
1568
1569 if (who == RUSAGE_THREAD) {
1570 task_times(current, &utime, &stime);
1571 accumulate_thread_rusage(p, r);
1572 maxrss = p->signal->maxrss;
1573 goto out;
1574 }
1575
1576 if (!lock_task_sighand(p, &flags))
1577 return;
1578
1579 switch (who) {
1580 case RUSAGE_BOTH:
1581 case RUSAGE_CHILDREN:
1582 utime = p->signal->cutime;
1583 stime = p->signal->cstime;
1584 r->ru_nvcsw = p->signal->cnvcsw;
1585 r->ru_nivcsw = p->signal->cnivcsw;
1586 r->ru_minflt = p->signal->cmin_flt;
1587 r->ru_majflt = p->signal->cmaj_flt;
1588 r->ru_inblock = p->signal->cinblock;
1589 r->ru_oublock = p->signal->coublock;
1590 maxrss = p->signal->cmaxrss;
1591
1592 if (who == RUSAGE_CHILDREN)
1593 break;
1594
1595 case RUSAGE_SELF:
1596 thread_group_times(p, &tgutime, &tgstime);
1597 utime = cputime_add(utime, tgutime);
1598 stime = cputime_add(stime, tgstime);
1599 r->ru_nvcsw += p->signal->nvcsw;
1600 r->ru_nivcsw += p->signal->nivcsw;
1601 r->ru_minflt += p->signal->min_flt;
1602 r->ru_majflt += p->signal->maj_flt;
1603 r->ru_inblock += p->signal->inblock;
1604 r->ru_oublock += p->signal->oublock;
1605 if (maxrss < p->signal->maxrss)
1606 maxrss = p->signal->maxrss;
1607 t = p;
1608 do {
1609 accumulate_thread_rusage(t, r);
1610 t = next_thread(t);
1611 } while (t != p);
1612 break;
1613
1614 default:
1615 BUG();
1616 }
1617 unlock_task_sighand(p, &flags);
1618
1619 out:
1620 cputime_to_timeval(utime, &r->ru_utime);
1621 cputime_to_timeval(stime, &r->ru_stime);
1622
1623 if (who != RUSAGE_CHILDREN) {
1624 struct mm_struct *mm = get_task_mm(p);
1625 if (mm) {
1626 setmax_mm_hiwater_rss(&maxrss, mm);
1627 mmput(mm);
1628 }
1629 }
1630 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1631 }
1632
1633 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1634 {
1635 struct rusage r;
1636 k_getrusage(p, who, &r);
1637 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1638 }
1639
1640 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1641 {
1642 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1643 who != RUSAGE_THREAD)
1644 return -EINVAL;
1645 return getrusage(current, who, ru);
1646 }
1647
1648 SYSCALL_DEFINE1(umask, int, mask)
1649 {
1650 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1651 return mask;
1652 }
1653
1654 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1655 unsigned long, arg4, unsigned long, arg5)
1656 {
1657 struct task_struct *me = current;
1658 unsigned char comm[sizeof(me->comm)];
1659 long error;
1660
1661 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1662 if (error != -ENOSYS)
1663 return error;
1664
1665 error = 0;
1666 switch (option) {
1667 case PR_SET_PDEATHSIG:
1668 if (!valid_signal(arg2)) {
1669 error = -EINVAL;
1670 break;
1671 }
1672 me->pdeath_signal = arg2;
1673 error = 0;
1674 break;
1675 case PR_GET_PDEATHSIG:
1676 error = put_user(me->pdeath_signal, (int __user *)arg2);
1677 break;
1678 case PR_GET_DUMPABLE:
1679 error = get_dumpable(me->mm);
1680 break;
1681 case PR_SET_DUMPABLE:
1682 if (arg2 < 0 || arg2 > 1) {
1683 error = -EINVAL;
1684 break;
1685 }
1686 set_dumpable(me->mm, arg2);
1687 error = 0;
1688 break;
1689
1690 case PR_SET_UNALIGN:
1691 error = SET_UNALIGN_CTL(me, arg2);
1692 break;
1693 case PR_GET_UNALIGN:
1694 error = GET_UNALIGN_CTL(me, arg2);
1695 break;
1696 case PR_SET_FPEMU:
1697 error = SET_FPEMU_CTL(me, arg2);
1698 break;
1699 case PR_GET_FPEMU:
1700 error = GET_FPEMU_CTL(me, arg2);
1701 break;
1702 case PR_SET_FPEXC:
1703 error = SET_FPEXC_CTL(me, arg2);
1704 break;
1705 case PR_GET_FPEXC:
1706 error = GET_FPEXC_CTL(me, arg2);
1707 break;
1708 case PR_GET_TIMING:
1709 error = PR_TIMING_STATISTICAL;
1710 break;
1711 case PR_SET_TIMING:
1712 if (arg2 != PR_TIMING_STATISTICAL)
1713 error = -EINVAL;
1714 else
1715 error = 0;
1716 break;
1717
1718 case PR_SET_NAME:
1719 comm[sizeof(me->comm)-1] = 0;
1720 if (strncpy_from_user(comm, (char __user *)arg2,
1721 sizeof(me->comm) - 1) < 0)
1722 return -EFAULT;
1723 set_task_comm(me, comm);
1724 return 0;
1725 case PR_GET_NAME:
1726 get_task_comm(comm, me);
1727 if (copy_to_user((char __user *)arg2, comm,
1728 sizeof(comm)))
1729 return -EFAULT;
1730 return 0;
1731 case PR_GET_ENDIAN:
1732 error = GET_ENDIAN(me, arg2);
1733 break;
1734 case PR_SET_ENDIAN:
1735 error = SET_ENDIAN(me, arg2);
1736 break;
1737
1738 case PR_GET_SECCOMP:
1739 error = prctl_get_seccomp();
1740 break;
1741 case PR_SET_SECCOMP:
1742 error = prctl_set_seccomp(arg2);
1743 break;
1744 case PR_GET_TSC:
1745 error = GET_TSC_CTL(arg2);
1746 break;
1747 case PR_SET_TSC:
1748 error = SET_TSC_CTL(arg2);
1749 break;
1750 case PR_TASK_PERF_EVENTS_DISABLE:
1751 error = perf_event_task_disable();
1752 break;
1753 case PR_TASK_PERF_EVENTS_ENABLE:
1754 error = perf_event_task_enable();
1755 break;
1756 case PR_GET_TIMERSLACK:
1757 error = current->timer_slack_ns;
1758 break;
1759 case PR_SET_TIMERSLACK:
1760 if (arg2 <= 0)
1761 current->timer_slack_ns =
1762 current->default_timer_slack_ns;
1763 else
1764 current->timer_slack_ns = arg2;
1765 error = 0;
1766 break;
1767 case PR_MCE_KILL:
1768 if (arg4 | arg5)
1769 return -EINVAL;
1770 switch (arg2) {
1771 case PR_MCE_KILL_CLEAR:
1772 if (arg3 != 0)
1773 return -EINVAL;
1774 current->flags &= ~PF_MCE_PROCESS;
1775 break;
1776 case PR_MCE_KILL_SET:
1777 current->flags |= PF_MCE_PROCESS;
1778 if (arg3 == PR_MCE_KILL_EARLY)
1779 current->flags |= PF_MCE_EARLY;
1780 else if (arg3 == PR_MCE_KILL_LATE)
1781 current->flags &= ~PF_MCE_EARLY;
1782 else if (arg3 == PR_MCE_KILL_DEFAULT)
1783 current->flags &=
1784 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1785 else
1786 return -EINVAL;
1787 break;
1788 default:
1789 return -EINVAL;
1790 }
1791 error = 0;
1792 break;
1793 case PR_MCE_KILL_GET:
1794 if (arg2 | arg3 | arg4 | arg5)
1795 return -EINVAL;
1796 if (current->flags & PF_MCE_PROCESS)
1797 error = (current->flags & PF_MCE_EARLY) ?
1798 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1799 else
1800 error = PR_MCE_KILL_DEFAULT;
1801 break;
1802 default:
1803 error = -EINVAL;
1804 break;
1805 }
1806 return error;
1807 }
1808
1809 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1810 struct getcpu_cache __user *, unused)
1811 {
1812 int err = 0;
1813 int cpu = raw_smp_processor_id();
1814 if (cpup)
1815 err |= put_user(cpu, cpup);
1816 if (nodep)
1817 err |= put_user(cpu_to_node(cpu), nodep);
1818 return err ? -EFAULT : 0;
1819 }
1820
1821 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
1822
1823 static void argv_cleanup(struct subprocess_info *info)
1824 {
1825 argv_free(info->argv);
1826 }
1827
1828 /**
1829 * orderly_poweroff - Trigger an orderly system poweroff
1830 * @force: force poweroff if command execution fails
1831 *
1832 * This may be called from any context to trigger a system shutdown.
1833 * If the orderly shutdown fails, it will force an immediate shutdown.
1834 */
1835 int orderly_poweroff(bool force)
1836 {
1837 int argc;
1838 char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
1839 static char *envp[] = {
1840 "HOME=/",
1841 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
1842 NULL
1843 };
1844 int ret = -ENOMEM;
1845 struct subprocess_info *info;
1846
1847 if (argv == NULL) {
1848 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
1849 __func__, poweroff_cmd);
1850 goto out;
1851 }
1852
1853 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1854 if (info == NULL) {
1855 argv_free(argv);
1856 goto out;
1857 }
1858
1859 call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
1860
1861 ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1862
1863 out:
1864 if (ret && force) {
1865 printk(KERN_WARNING "Failed to start orderly shutdown: "
1866 "forcing the issue\n");
1867
1868 /* I guess this should try to kick off some daemon to
1869 sync and poweroff asap. Or not even bother syncing
1870 if we're doing an emergency shutdown? */
1871 emergency_sync();
1872 kernel_power_off();
1873 }
1874
1875 return ret;
1876 }
1877 EXPORT_SYMBOL_GPL(orderly_poweroff);
This page took 0.102519 seconds and 6 git commands to generate.