Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/sys.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/module.h> |
8 | #include <linux/mm.h> | |
9 | #include <linux/utsname.h> | |
10 | #include <linux/mman.h> | |
11 | #include <linux/smp_lock.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/reboot.h> | |
14 | #include <linux/prctl.h> | |
1da177e4 LT |
15 | #include <linux/highuid.h> |
16 | #include <linux/fs.h> | |
dc009d92 EB |
17 | #include <linux/kernel.h> |
18 | #include <linux/kexec.h> | |
1da177e4 | 19 | #include <linux/workqueue.h> |
c59ede7b | 20 | #include <linux/capability.h> |
1da177e4 LT |
21 | #include <linux/device.h> |
22 | #include <linux/key.h> | |
23 | #include <linux/times.h> | |
24 | #include <linux/posix-timers.h> | |
25 | #include <linux/security.h> | |
26 | #include <linux/dcookies.h> | |
27 | #include <linux/suspend.h> | |
28 | #include <linux/tty.h> | |
7ed20e1a | 29 | #include <linux/signal.h> |
9f46080c | 30 | #include <linux/cn_proc.h> |
3cfc348b | 31 | #include <linux/getcpu.h> |
1da177e4 LT |
32 | |
33 | #include <linux/compat.h> | |
34 | #include <linux/syscalls.h> | |
00d7c05a | 35 | #include <linux/kprobes.h> |
1da177e4 LT |
36 | |
37 | #include <asm/uaccess.h> | |
38 | #include <asm/io.h> | |
39 | #include <asm/unistd.h> | |
40 | ||
41 | #ifndef SET_UNALIGN_CTL | |
42 | # define SET_UNALIGN_CTL(a,b) (-EINVAL) | |
43 | #endif | |
44 | #ifndef GET_UNALIGN_CTL | |
45 | # define GET_UNALIGN_CTL(a,b) (-EINVAL) | |
46 | #endif | |
47 | #ifndef SET_FPEMU_CTL | |
48 | # define SET_FPEMU_CTL(a,b) (-EINVAL) | |
49 | #endif | |
50 | #ifndef GET_FPEMU_CTL | |
51 | # define GET_FPEMU_CTL(a,b) (-EINVAL) | |
52 | #endif | |
53 | #ifndef SET_FPEXC_CTL | |
54 | # define SET_FPEXC_CTL(a,b) (-EINVAL) | |
55 | #endif | |
56 | #ifndef GET_FPEXC_CTL | |
57 | # define GET_FPEXC_CTL(a,b) (-EINVAL) | |
58 | #endif | |
651d765d AB |
59 | #ifndef GET_ENDIAN |
60 | # define GET_ENDIAN(a,b) (-EINVAL) | |
61 | #endif | |
62 | #ifndef SET_ENDIAN | |
63 | # define SET_ENDIAN(a,b) (-EINVAL) | |
64 | #endif | |
1da177e4 LT |
65 | |
66 | /* | |
67 | * this is where the system-wide overflow UID and GID are defined, for | |
68 | * architectures that now have 32-bit UID/GID but didn't in the past | |
69 | */ | |
70 | ||
71 | int overflowuid = DEFAULT_OVERFLOWUID; | |
72 | int overflowgid = DEFAULT_OVERFLOWGID; | |
73 | ||
74 | #ifdef CONFIG_UID16 | |
75 | EXPORT_SYMBOL(overflowuid); | |
76 | EXPORT_SYMBOL(overflowgid); | |
77 | #endif | |
78 | ||
79 | /* | |
80 | * the same as above, but for filesystems which can only store a 16-bit | |
81 | * UID and GID. as such, this is needed on all architectures | |
82 | */ | |
83 | ||
84 | int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; | |
85 | int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; | |
86 | ||
87 | EXPORT_SYMBOL(fs_overflowuid); | |
88 | EXPORT_SYMBOL(fs_overflowgid); | |
89 | ||
90 | /* | |
91 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes | |
92 | */ | |
93 | ||
94 | int C_A_D = 1; | |
9ec52099 CLG |
95 | struct pid *cad_pid; |
96 | EXPORT_SYMBOL(cad_pid); | |
1da177e4 LT |
97 | |
98 | /* | |
99 | * Notifier list for kernel code which wants to be called | |
100 | * at shutdown. This is used to stop any idling DMA operations | |
101 | * and the like. | |
102 | */ | |
103 | ||
e041c683 AS |
104 | static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); |
105 | ||
106 | /* | |
107 | * Notifier chain core routines. The exported routines below | |
108 | * are layered on top of these, with appropriate locking added. | |
109 | */ | |
110 | ||
111 | static int notifier_chain_register(struct notifier_block **nl, | |
112 | struct notifier_block *n) | |
113 | { | |
114 | while ((*nl) != NULL) { | |
115 | if (n->priority > (*nl)->priority) | |
116 | break; | |
117 | nl = &((*nl)->next); | |
118 | } | |
119 | n->next = *nl; | |
120 | rcu_assign_pointer(*nl, n); | |
121 | return 0; | |
122 | } | |
123 | ||
124 | static int notifier_chain_unregister(struct notifier_block **nl, | |
125 | struct notifier_block *n) | |
126 | { | |
127 | while ((*nl) != NULL) { | |
128 | if ((*nl) == n) { | |
129 | rcu_assign_pointer(*nl, n->next); | |
130 | return 0; | |
131 | } | |
132 | nl = &((*nl)->next); | |
133 | } | |
134 | return -ENOENT; | |
135 | } | |
136 | ||
137 | static int __kprobes notifier_call_chain(struct notifier_block **nl, | |
138 | unsigned long val, void *v) | |
139 | { | |
140 | int ret = NOTIFY_DONE; | |
bbb1747d | 141 | struct notifier_block *nb, *next_nb; |
e041c683 AS |
142 | |
143 | nb = rcu_dereference(*nl); | |
144 | while (nb) { | |
bbb1747d | 145 | next_nb = rcu_dereference(nb->next); |
e041c683 AS |
146 | ret = nb->notifier_call(nb, val, v); |
147 | if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) | |
148 | break; | |
bbb1747d | 149 | nb = next_nb; |
e041c683 AS |
150 | } |
151 | return ret; | |
152 | } | |
153 | ||
154 | /* | |
155 | * Atomic notifier chain routines. Registration and unregistration | |
eabc0694 | 156 | * use a spinlock, and call_chain is synchronized by RCU (no locks). |
e041c683 | 157 | */ |
1da177e4 LT |
158 | |
159 | /** | |
e041c683 AS |
160 | * atomic_notifier_chain_register - Add notifier to an atomic notifier chain |
161 | * @nh: Pointer to head of the atomic notifier chain | |
1da177e4 LT |
162 | * @n: New entry in notifier chain |
163 | * | |
e041c683 | 164 | * Adds a notifier to an atomic notifier chain. |
1da177e4 LT |
165 | * |
166 | * Currently always returns zero. | |
167 | */ | |
e041c683 AS |
168 | |
169 | int atomic_notifier_chain_register(struct atomic_notifier_head *nh, | |
170 | struct notifier_block *n) | |
171 | { | |
172 | unsigned long flags; | |
173 | int ret; | |
174 | ||
175 | spin_lock_irqsave(&nh->lock, flags); | |
176 | ret = notifier_chain_register(&nh->head, n); | |
177 | spin_unlock_irqrestore(&nh->lock, flags); | |
178 | return ret; | |
179 | } | |
180 | ||
181 | EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); | |
182 | ||
183 | /** | |
184 | * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain | |
185 | * @nh: Pointer to head of the atomic notifier chain | |
186 | * @n: Entry to remove from notifier chain | |
187 | * | |
188 | * Removes a notifier from an atomic notifier chain. | |
189 | * | |
190 | * Returns zero on success or %-ENOENT on failure. | |
191 | */ | |
192 | int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, | |
193 | struct notifier_block *n) | |
194 | { | |
195 | unsigned long flags; | |
196 | int ret; | |
197 | ||
198 | spin_lock_irqsave(&nh->lock, flags); | |
199 | ret = notifier_chain_unregister(&nh->head, n); | |
200 | spin_unlock_irqrestore(&nh->lock, flags); | |
201 | synchronize_rcu(); | |
202 | return ret; | |
203 | } | |
204 | ||
205 | EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); | |
206 | ||
207 | /** | |
208 | * atomic_notifier_call_chain - Call functions in an atomic notifier chain | |
209 | * @nh: Pointer to head of the atomic notifier chain | |
210 | * @val: Value passed unmodified to notifier function | |
211 | * @v: Pointer passed unmodified to notifier function | |
212 | * | |
213 | * Calls each function in a notifier chain in turn. The functions | |
214 | * run in an atomic context, so they must not block. | |
215 | * This routine uses RCU to synchronize with changes to the chain. | |
216 | * | |
217 | * If the return value of the notifier can be and'ed | |
72fd4a35 | 218 | * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() |
e041c683 AS |
219 | * will return immediately, with the return value of |
220 | * the notifier function which halted execution. | |
221 | * Otherwise the return value is the return value | |
222 | * of the last notifier function called. | |
223 | */ | |
1da177e4 | 224 | |
f2aa85a0 | 225 | int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, |
e041c683 | 226 | unsigned long val, void *v) |
1da177e4 | 227 | { |
e041c683 AS |
228 | int ret; |
229 | ||
230 | rcu_read_lock(); | |
231 | ret = notifier_call_chain(&nh->head, val, v); | |
232 | rcu_read_unlock(); | |
233 | return ret; | |
1da177e4 LT |
234 | } |
235 | ||
e041c683 AS |
236 | EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); |
237 | ||
238 | /* | |
239 | * Blocking notifier chain routines. All access to the chain is | |
240 | * synchronized by an rwsem. | |
241 | */ | |
1da177e4 LT |
242 | |
243 | /** | |
e041c683 AS |
244 | * blocking_notifier_chain_register - Add notifier to a blocking notifier chain |
245 | * @nh: Pointer to head of the blocking notifier chain | |
1da177e4 LT |
246 | * @n: New entry in notifier chain |
247 | * | |
e041c683 AS |
248 | * Adds a notifier to a blocking notifier chain. |
249 | * Must be called in process context. | |
1da177e4 | 250 | * |
e041c683 | 251 | * Currently always returns zero. |
1da177e4 LT |
252 | */ |
253 | ||
e041c683 AS |
254 | int blocking_notifier_chain_register(struct blocking_notifier_head *nh, |
255 | struct notifier_block *n) | |
1da177e4 | 256 | { |
e041c683 AS |
257 | int ret; |
258 | ||
259 | /* | |
260 | * This code gets used during boot-up, when task switching is | |
261 | * not yet working and interrupts must remain disabled. At | |
262 | * such times we must not call down_write(). | |
263 | */ | |
264 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
265 | return notifier_chain_register(&nh->head, n); | |
266 | ||
267 | down_write(&nh->rwsem); | |
268 | ret = notifier_chain_register(&nh->head, n); | |
269 | up_write(&nh->rwsem); | |
270 | return ret; | |
1da177e4 LT |
271 | } |
272 | ||
e041c683 | 273 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); |
1da177e4 LT |
274 | |
275 | /** | |
e041c683 AS |
276 | * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain |
277 | * @nh: Pointer to head of the blocking notifier chain | |
278 | * @n: Entry to remove from notifier chain | |
279 | * | |
280 | * Removes a notifier from a blocking notifier chain. | |
281 | * Must be called from process context. | |
282 | * | |
283 | * Returns zero on success or %-ENOENT on failure. | |
284 | */ | |
285 | int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, | |
286 | struct notifier_block *n) | |
287 | { | |
288 | int ret; | |
289 | ||
290 | /* | |
291 | * This code gets used during boot-up, when task switching is | |
292 | * not yet working and interrupts must remain disabled. At | |
293 | * such times we must not call down_write(). | |
294 | */ | |
295 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
296 | return notifier_chain_unregister(&nh->head, n); | |
297 | ||
298 | down_write(&nh->rwsem); | |
299 | ret = notifier_chain_unregister(&nh->head, n); | |
300 | up_write(&nh->rwsem); | |
301 | return ret; | |
302 | } | |
303 | ||
304 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); | |
305 | ||
306 | /** | |
307 | * blocking_notifier_call_chain - Call functions in a blocking notifier chain | |
308 | * @nh: Pointer to head of the blocking notifier chain | |
1da177e4 LT |
309 | * @val: Value passed unmodified to notifier function |
310 | * @v: Pointer passed unmodified to notifier function | |
311 | * | |
e041c683 AS |
312 | * Calls each function in a notifier chain in turn. The functions |
313 | * run in a process context, so they are allowed to block. | |
1da177e4 | 314 | * |
e041c683 | 315 | * If the return value of the notifier can be and'ed |
72fd4a35 | 316 | * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() |
1da177e4 LT |
317 | * will return immediately, with the return value of |
318 | * the notifier function which halted execution. | |
e041c683 | 319 | * Otherwise the return value is the return value |
1da177e4 LT |
320 | * of the last notifier function called. |
321 | */ | |
322 | ||
e041c683 AS |
323 | int blocking_notifier_call_chain(struct blocking_notifier_head *nh, |
324 | unsigned long val, void *v) | |
1da177e4 | 325 | { |
1b5180b6 | 326 | int ret = NOTIFY_DONE; |
e041c683 | 327 | |
1b5180b6 IM |
328 | /* |
329 | * We check the head outside the lock, but if this access is | |
330 | * racy then it does not matter what the result of the test | |
331 | * is, we re-check the list after having taken the lock anyway: | |
332 | */ | |
333 | if (rcu_dereference(nh->head)) { | |
334 | down_read(&nh->rwsem); | |
335 | ret = notifier_call_chain(&nh->head, val, v); | |
336 | up_read(&nh->rwsem); | |
337 | } | |
1da177e4 LT |
338 | return ret; |
339 | } | |
340 | ||
e041c683 AS |
341 | EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); |
342 | ||
343 | /* | |
344 | * Raw notifier chain routines. There is no protection; | |
345 | * the caller must provide it. Use at your own risk! | |
346 | */ | |
347 | ||
348 | /** | |
349 | * raw_notifier_chain_register - Add notifier to a raw notifier chain | |
350 | * @nh: Pointer to head of the raw notifier chain | |
351 | * @n: New entry in notifier chain | |
352 | * | |
353 | * Adds a notifier to a raw notifier chain. | |
354 | * All locking must be provided by the caller. | |
355 | * | |
356 | * Currently always returns zero. | |
357 | */ | |
358 | ||
359 | int raw_notifier_chain_register(struct raw_notifier_head *nh, | |
360 | struct notifier_block *n) | |
361 | { | |
362 | return notifier_chain_register(&nh->head, n); | |
363 | } | |
364 | ||
365 | EXPORT_SYMBOL_GPL(raw_notifier_chain_register); | |
366 | ||
367 | /** | |
368 | * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain | |
369 | * @nh: Pointer to head of the raw notifier chain | |
370 | * @n: Entry to remove from notifier chain | |
371 | * | |
372 | * Removes a notifier from a raw notifier chain. | |
373 | * All locking must be provided by the caller. | |
374 | * | |
375 | * Returns zero on success or %-ENOENT on failure. | |
376 | */ | |
377 | int raw_notifier_chain_unregister(struct raw_notifier_head *nh, | |
378 | struct notifier_block *n) | |
379 | { | |
380 | return notifier_chain_unregister(&nh->head, n); | |
381 | } | |
382 | ||
383 | EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); | |
384 | ||
385 | /** | |
386 | * raw_notifier_call_chain - Call functions in a raw notifier chain | |
387 | * @nh: Pointer to head of the raw notifier chain | |
388 | * @val: Value passed unmodified to notifier function | |
389 | * @v: Pointer passed unmodified to notifier function | |
390 | * | |
391 | * Calls each function in a notifier chain in turn. The functions | |
392 | * run in an undefined context. | |
393 | * All locking must be provided by the caller. | |
394 | * | |
395 | * If the return value of the notifier can be and'ed | |
72fd4a35 | 396 | * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() |
e041c683 AS |
397 | * will return immediately, with the return value of |
398 | * the notifier function which halted execution. | |
399 | * Otherwise the return value is the return value | |
400 | * of the last notifier function called. | |
401 | */ | |
402 | ||
403 | int raw_notifier_call_chain(struct raw_notifier_head *nh, | |
404 | unsigned long val, void *v) | |
405 | { | |
406 | return notifier_call_chain(&nh->head, val, v); | |
407 | } | |
408 | ||
409 | EXPORT_SYMBOL_GPL(raw_notifier_call_chain); | |
1da177e4 | 410 | |
eabc0694 AS |
411 | /* |
412 | * SRCU notifier chain routines. Registration and unregistration | |
413 | * use a mutex, and call_chain is synchronized by SRCU (no locks). | |
414 | */ | |
415 | ||
416 | /** | |
417 | * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain | |
418 | * @nh: Pointer to head of the SRCU notifier chain | |
419 | * @n: New entry in notifier chain | |
420 | * | |
421 | * Adds a notifier to an SRCU notifier chain. | |
422 | * Must be called in process context. | |
423 | * | |
424 | * Currently always returns zero. | |
425 | */ | |
426 | ||
427 | int srcu_notifier_chain_register(struct srcu_notifier_head *nh, | |
428 | struct notifier_block *n) | |
429 | { | |
430 | int ret; | |
431 | ||
432 | /* | |
433 | * This code gets used during boot-up, when task switching is | |
434 | * not yet working and interrupts must remain disabled. At | |
435 | * such times we must not call mutex_lock(). | |
436 | */ | |
437 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
438 | return notifier_chain_register(&nh->head, n); | |
439 | ||
440 | mutex_lock(&nh->mutex); | |
441 | ret = notifier_chain_register(&nh->head, n); | |
442 | mutex_unlock(&nh->mutex); | |
443 | return ret; | |
444 | } | |
445 | ||
446 | EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); | |
447 | ||
448 | /** | |
449 | * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain | |
450 | * @nh: Pointer to head of the SRCU notifier chain | |
451 | * @n: Entry to remove from notifier chain | |
452 | * | |
453 | * Removes a notifier from an SRCU notifier chain. | |
454 | * Must be called from process context. | |
455 | * | |
456 | * Returns zero on success or %-ENOENT on failure. | |
457 | */ | |
458 | int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, | |
459 | struct notifier_block *n) | |
460 | { | |
461 | int ret; | |
462 | ||
463 | /* | |
464 | * This code gets used during boot-up, when task switching is | |
465 | * not yet working and interrupts must remain disabled. At | |
466 | * such times we must not call mutex_lock(). | |
467 | */ | |
468 | if (unlikely(system_state == SYSTEM_BOOTING)) | |
469 | return notifier_chain_unregister(&nh->head, n); | |
470 | ||
471 | mutex_lock(&nh->mutex); | |
472 | ret = notifier_chain_unregister(&nh->head, n); | |
473 | mutex_unlock(&nh->mutex); | |
474 | synchronize_srcu(&nh->srcu); | |
475 | return ret; | |
476 | } | |
477 | ||
478 | EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); | |
479 | ||
480 | /** | |
481 | * srcu_notifier_call_chain - Call functions in an SRCU notifier chain | |
482 | * @nh: Pointer to head of the SRCU notifier chain | |
483 | * @val: Value passed unmodified to notifier function | |
484 | * @v: Pointer passed unmodified to notifier function | |
485 | * | |
486 | * Calls each function in a notifier chain in turn. The functions | |
487 | * run in a process context, so they are allowed to block. | |
488 | * | |
489 | * If the return value of the notifier can be and'ed | |
72fd4a35 | 490 | * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() |
eabc0694 AS |
491 | * will return immediately, with the return value of |
492 | * the notifier function which halted execution. | |
493 | * Otherwise the return value is the return value | |
494 | * of the last notifier function called. | |
495 | */ | |
496 | ||
497 | int srcu_notifier_call_chain(struct srcu_notifier_head *nh, | |
498 | unsigned long val, void *v) | |
499 | { | |
500 | int ret; | |
501 | int idx; | |
502 | ||
503 | idx = srcu_read_lock(&nh->srcu); | |
504 | ret = notifier_call_chain(&nh->head, val, v); | |
505 | srcu_read_unlock(&nh->srcu, idx); | |
506 | return ret; | |
507 | } | |
508 | ||
509 | EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); | |
510 | ||
511 | /** | |
512 | * srcu_init_notifier_head - Initialize an SRCU notifier head | |
513 | * @nh: Pointer to head of the srcu notifier chain | |
514 | * | |
515 | * Unlike other sorts of notifier heads, SRCU notifier heads require | |
516 | * dynamic initialization. Be sure to call this routine before | |
517 | * calling any of the other SRCU notifier routines for this head. | |
518 | * | |
519 | * If an SRCU notifier head is deallocated, it must first be cleaned | |
520 | * up by calling srcu_cleanup_notifier_head(). Otherwise the head's | |
521 | * per-cpu data (used by the SRCU mechanism) will leak. | |
522 | */ | |
523 | ||
524 | void srcu_init_notifier_head(struct srcu_notifier_head *nh) | |
525 | { | |
526 | mutex_init(&nh->mutex); | |
e6a92013 AS |
527 | if (init_srcu_struct(&nh->srcu) < 0) |
528 | BUG(); | |
eabc0694 AS |
529 | nh->head = NULL; |
530 | } | |
531 | ||
532 | EXPORT_SYMBOL_GPL(srcu_init_notifier_head); | |
533 | ||
1da177e4 LT |
534 | /** |
535 | * register_reboot_notifier - Register function to be called at reboot time | |
536 | * @nb: Info about notifier function to be called | |
537 | * | |
538 | * Registers a function with the list of functions | |
539 | * to be called at reboot time. | |
540 | * | |
72fd4a35 | 541 | * Currently always returns zero, as blocking_notifier_chain_register() |
1da177e4 LT |
542 | * always returns zero. |
543 | */ | |
544 | ||
545 | int register_reboot_notifier(struct notifier_block * nb) | |
546 | { | |
e041c683 | 547 | return blocking_notifier_chain_register(&reboot_notifier_list, nb); |
1da177e4 LT |
548 | } |
549 | ||
550 | EXPORT_SYMBOL(register_reboot_notifier); | |
551 | ||
552 | /** | |
553 | * unregister_reboot_notifier - Unregister previously registered reboot notifier | |
554 | * @nb: Hook to be unregistered | |
555 | * | |
556 | * Unregisters a previously registered reboot | |
557 | * notifier function. | |
558 | * | |
559 | * Returns zero on success, or %-ENOENT on failure. | |
560 | */ | |
561 | ||
562 | int unregister_reboot_notifier(struct notifier_block * nb) | |
563 | { | |
e041c683 | 564 | return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); |
1da177e4 LT |
565 | } |
566 | ||
567 | EXPORT_SYMBOL(unregister_reboot_notifier); | |
568 | ||
569 | static int set_one_prio(struct task_struct *p, int niceval, int error) | |
570 | { | |
571 | int no_nice; | |
572 | ||
573 | if (p->uid != current->euid && | |
574 | p->euid != current->euid && !capable(CAP_SYS_NICE)) { | |
575 | error = -EPERM; | |
576 | goto out; | |
577 | } | |
e43379f1 | 578 | if (niceval < task_nice(p) && !can_nice(p, niceval)) { |
1da177e4 LT |
579 | error = -EACCES; |
580 | goto out; | |
581 | } | |
582 | no_nice = security_task_setnice(p, niceval); | |
583 | if (no_nice) { | |
584 | error = no_nice; | |
585 | goto out; | |
586 | } | |
587 | if (error == -ESRCH) | |
588 | error = 0; | |
589 | set_user_nice(p, niceval); | |
590 | out: | |
591 | return error; | |
592 | } | |
593 | ||
594 | asmlinkage long sys_setpriority(int which, int who, int niceval) | |
595 | { | |
596 | struct task_struct *g, *p; | |
597 | struct user_struct *user; | |
598 | int error = -EINVAL; | |
41487c65 | 599 | struct pid *pgrp; |
1da177e4 LT |
600 | |
601 | if (which > 2 || which < 0) | |
602 | goto out; | |
603 | ||
604 | /* normalize: avoid signed division (rounding problems) */ | |
605 | error = -ESRCH; | |
606 | if (niceval < -20) | |
607 | niceval = -20; | |
608 | if (niceval > 19) | |
609 | niceval = 19; | |
610 | ||
611 | read_lock(&tasklist_lock); | |
612 | switch (which) { | |
613 | case PRIO_PROCESS: | |
41487c65 EB |
614 | if (who) |
615 | p = find_task_by_pid(who); | |
616 | else | |
617 | p = current; | |
1da177e4 LT |
618 | if (p) |
619 | error = set_one_prio(p, niceval, error); | |
620 | break; | |
621 | case PRIO_PGRP: | |
41487c65 EB |
622 | if (who) |
623 | pgrp = find_pid(who); | |
624 | else | |
625 | pgrp = task_pgrp(current); | |
626 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | |
1da177e4 | 627 | error = set_one_prio(p, niceval, error); |
41487c65 | 628 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
629 | break; |
630 | case PRIO_USER: | |
631 | user = current->user; | |
632 | if (!who) | |
633 | who = current->uid; | |
634 | else | |
635 | if ((who != current->uid) && !(user = find_user(who))) | |
636 | goto out_unlock; /* No processes for this user */ | |
637 | ||
638 | do_each_thread(g, p) | |
639 | if (p->uid == who) | |
640 | error = set_one_prio(p, niceval, error); | |
641 | while_each_thread(g, p); | |
642 | if (who != current->uid) | |
643 | free_uid(user); /* For find_user() */ | |
644 | break; | |
645 | } | |
646 | out_unlock: | |
647 | read_unlock(&tasklist_lock); | |
648 | out: | |
649 | return error; | |
650 | } | |
651 | ||
652 | /* | |
653 | * Ugh. To avoid negative return values, "getpriority()" will | |
654 | * not return the normal nice-value, but a negated value that | |
655 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) | |
656 | * to stay compatible. | |
657 | */ | |
658 | asmlinkage long sys_getpriority(int which, int who) | |
659 | { | |
660 | struct task_struct *g, *p; | |
661 | struct user_struct *user; | |
662 | long niceval, retval = -ESRCH; | |
41487c65 | 663 | struct pid *pgrp; |
1da177e4 LT |
664 | |
665 | if (which > 2 || which < 0) | |
666 | return -EINVAL; | |
667 | ||
668 | read_lock(&tasklist_lock); | |
669 | switch (which) { | |
670 | case PRIO_PROCESS: | |
41487c65 EB |
671 | if (who) |
672 | p = find_task_by_pid(who); | |
673 | else | |
674 | p = current; | |
1da177e4 LT |
675 | if (p) { |
676 | niceval = 20 - task_nice(p); | |
677 | if (niceval > retval) | |
678 | retval = niceval; | |
679 | } | |
680 | break; | |
681 | case PRIO_PGRP: | |
41487c65 EB |
682 | if (who) |
683 | pgrp = find_pid(who); | |
684 | else | |
685 | pgrp = task_pgrp(current); | |
686 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | |
1da177e4 LT |
687 | niceval = 20 - task_nice(p); |
688 | if (niceval > retval) | |
689 | retval = niceval; | |
41487c65 | 690 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
691 | break; |
692 | case PRIO_USER: | |
693 | user = current->user; | |
694 | if (!who) | |
695 | who = current->uid; | |
696 | else | |
697 | if ((who != current->uid) && !(user = find_user(who))) | |
698 | goto out_unlock; /* No processes for this user */ | |
699 | ||
700 | do_each_thread(g, p) | |
701 | if (p->uid == who) { | |
702 | niceval = 20 - task_nice(p); | |
703 | if (niceval > retval) | |
704 | retval = niceval; | |
705 | } | |
706 | while_each_thread(g, p); | |
707 | if (who != current->uid) | |
708 | free_uid(user); /* for find_user() */ | |
709 | break; | |
710 | } | |
711 | out_unlock: | |
712 | read_unlock(&tasklist_lock); | |
713 | ||
714 | return retval; | |
715 | } | |
716 | ||
e4c94330 EB |
717 | /** |
718 | * emergency_restart - reboot the system | |
719 | * | |
720 | * Without shutting down any hardware or taking any locks | |
721 | * reboot the system. This is called when we know we are in | |
722 | * trouble so this is our best effort to reboot. This is | |
723 | * safe to call in interrupt context. | |
724 | */ | |
7c903473 EB |
725 | void emergency_restart(void) |
726 | { | |
727 | machine_emergency_restart(); | |
728 | } | |
729 | EXPORT_SYMBOL_GPL(emergency_restart); | |
730 | ||
83cc5ed3 | 731 | static void kernel_restart_prepare(char *cmd) |
4a00ea1e | 732 | { |
e041c683 | 733 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
4a00ea1e | 734 | system_state = SYSTEM_RESTART; |
4a00ea1e | 735 | device_shutdown(); |
e4c94330 | 736 | } |
1e5d5331 RD |
737 | |
738 | /** | |
739 | * kernel_restart - reboot the system | |
740 | * @cmd: pointer to buffer containing command to execute for restart | |
b8887e6e | 741 | * or %NULL |
1e5d5331 RD |
742 | * |
743 | * Shutdown everything and perform a clean reboot. | |
744 | * This is not safe to call in interrupt context. | |
745 | */ | |
e4c94330 EB |
746 | void kernel_restart(char *cmd) |
747 | { | |
748 | kernel_restart_prepare(cmd); | |
756184b7 | 749 | if (!cmd) |
4a00ea1e | 750 | printk(KERN_EMERG "Restarting system.\n"); |
756184b7 | 751 | else |
4a00ea1e | 752 | printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); |
4a00ea1e EB |
753 | machine_restart(cmd); |
754 | } | |
755 | EXPORT_SYMBOL_GPL(kernel_restart); | |
756 | ||
e4c94330 EB |
757 | /** |
758 | * kernel_kexec - reboot the system | |
759 | * | |
760 | * Move into place and start executing a preloaded standalone | |
761 | * executable. If nothing was preloaded return an error. | |
762 | */ | |
83cc5ed3 | 763 | static void kernel_kexec(void) |
4a00ea1e EB |
764 | { |
765 | #ifdef CONFIG_KEXEC | |
766 | struct kimage *image; | |
4bb8089c | 767 | image = xchg(&kexec_image, NULL); |
756184b7 | 768 | if (!image) |
4a00ea1e | 769 | return; |
e4c94330 | 770 | kernel_restart_prepare(NULL); |
4a00ea1e EB |
771 | printk(KERN_EMERG "Starting new kernel\n"); |
772 | machine_shutdown(); | |
773 | machine_kexec(image); | |
774 | #endif | |
775 | } | |
4a00ea1e | 776 | |
729b4d4c AS |
777 | void kernel_shutdown_prepare(enum system_states state) |
778 | { | |
e041c683 | 779 | blocking_notifier_call_chain(&reboot_notifier_list, |
729b4d4c AS |
780 | (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL); |
781 | system_state = state; | |
782 | device_shutdown(); | |
783 | } | |
e4c94330 EB |
784 | /** |
785 | * kernel_halt - halt the system | |
786 | * | |
787 | * Shutdown everything and perform a clean system halt. | |
788 | */ | |
e4c94330 EB |
789 | void kernel_halt(void) |
790 | { | |
729b4d4c | 791 | kernel_shutdown_prepare(SYSTEM_HALT); |
4a00ea1e EB |
792 | printk(KERN_EMERG "System halted.\n"); |
793 | machine_halt(); | |
794 | } | |
729b4d4c | 795 | |
4a00ea1e EB |
796 | EXPORT_SYMBOL_GPL(kernel_halt); |
797 | ||
e4c94330 EB |
798 | /** |
799 | * kernel_power_off - power_off the system | |
800 | * | |
801 | * Shutdown everything and perform a clean system power_off. | |
802 | */ | |
e4c94330 EB |
803 | void kernel_power_off(void) |
804 | { | |
729b4d4c | 805 | kernel_shutdown_prepare(SYSTEM_POWER_OFF); |
4a00ea1e EB |
806 | printk(KERN_EMERG "Power down.\n"); |
807 | machine_power_off(); | |
808 | } | |
809 | EXPORT_SYMBOL_GPL(kernel_power_off); | |
1da177e4 LT |
810 | /* |
811 | * Reboot system call: for obvious reasons only root may call it, | |
812 | * and even root needs to set up some magic numbers in the registers | |
813 | * so that some mistake won't make this reboot the whole machine. | |
814 | * You can also set the meaning of the ctrl-alt-del-key here. | |
815 | * | |
816 | * reboot doesn't sync: do that yourself before calling this. | |
817 | */ | |
818 | asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) | |
819 | { | |
820 | char buffer[256]; | |
821 | ||
822 | /* We only trust the superuser with rebooting the system. */ | |
823 | if (!capable(CAP_SYS_BOOT)) | |
824 | return -EPERM; | |
825 | ||
826 | /* For safety, we require "magic" arguments. */ | |
827 | if (magic1 != LINUX_REBOOT_MAGIC1 || | |
828 | (magic2 != LINUX_REBOOT_MAGIC2 && | |
829 | magic2 != LINUX_REBOOT_MAGIC2A && | |
830 | magic2 != LINUX_REBOOT_MAGIC2B && | |
831 | magic2 != LINUX_REBOOT_MAGIC2C)) | |
832 | return -EINVAL; | |
833 | ||
5e38291d EB |
834 | /* Instead of trying to make the power_off code look like |
835 | * halt when pm_power_off is not set do it the easy way. | |
836 | */ | |
837 | if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off) | |
838 | cmd = LINUX_REBOOT_CMD_HALT; | |
839 | ||
1da177e4 LT |
840 | lock_kernel(); |
841 | switch (cmd) { | |
842 | case LINUX_REBOOT_CMD_RESTART: | |
4a00ea1e | 843 | kernel_restart(NULL); |
1da177e4 LT |
844 | break; |
845 | ||
846 | case LINUX_REBOOT_CMD_CAD_ON: | |
847 | C_A_D = 1; | |
848 | break; | |
849 | ||
850 | case LINUX_REBOOT_CMD_CAD_OFF: | |
851 | C_A_D = 0; | |
852 | break; | |
853 | ||
854 | case LINUX_REBOOT_CMD_HALT: | |
4a00ea1e | 855 | kernel_halt(); |
1da177e4 LT |
856 | unlock_kernel(); |
857 | do_exit(0); | |
858 | break; | |
859 | ||
860 | case LINUX_REBOOT_CMD_POWER_OFF: | |
4a00ea1e | 861 | kernel_power_off(); |
1da177e4 LT |
862 | unlock_kernel(); |
863 | do_exit(0); | |
864 | break; | |
865 | ||
866 | case LINUX_REBOOT_CMD_RESTART2: | |
867 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { | |
868 | unlock_kernel(); | |
869 | return -EFAULT; | |
870 | } | |
871 | buffer[sizeof(buffer) - 1] = '\0'; | |
872 | ||
4a00ea1e | 873 | kernel_restart(buffer); |
1da177e4 LT |
874 | break; |
875 | ||
dc009d92 | 876 | case LINUX_REBOOT_CMD_KEXEC: |
4a00ea1e EB |
877 | kernel_kexec(); |
878 | unlock_kernel(); | |
879 | return -EINVAL; | |
880 | ||
1da177e4 LT |
881 | #ifdef CONFIG_SOFTWARE_SUSPEND |
882 | case LINUX_REBOOT_CMD_SW_SUSPEND: | |
883 | { | |
884 | int ret = software_suspend(); | |
885 | unlock_kernel(); | |
886 | return ret; | |
887 | } | |
888 | #endif | |
889 | ||
890 | default: | |
891 | unlock_kernel(); | |
892 | return -EINVAL; | |
893 | } | |
894 | unlock_kernel(); | |
895 | return 0; | |
896 | } | |
897 | ||
65f27f38 | 898 | static void deferred_cad(struct work_struct *dummy) |
1da177e4 | 899 | { |
abcd9e51 | 900 | kernel_restart(NULL); |
1da177e4 LT |
901 | } |
902 | ||
903 | /* | |
904 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. | |
905 | * As it's called within an interrupt, it may NOT sync: the only choice | |
906 | * is whether to reboot at once, or just ignore the ctrl-alt-del. | |
907 | */ | |
908 | void ctrl_alt_del(void) | |
909 | { | |
65f27f38 | 910 | static DECLARE_WORK(cad_work, deferred_cad); |
1da177e4 LT |
911 | |
912 | if (C_A_D) | |
913 | schedule_work(&cad_work); | |
914 | else | |
9ec52099 | 915 | kill_cad_pid(SIGINT, 1); |
1da177e4 LT |
916 | } |
917 | ||
1da177e4 LT |
918 | /* |
919 | * Unprivileged users may change the real gid to the effective gid | |
920 | * or vice versa. (BSD-style) | |
921 | * | |
922 | * If you set the real gid at all, or set the effective gid to a value not | |
923 | * equal to the real gid, then the saved gid is set to the new effective gid. | |
924 | * | |
925 | * This makes it possible for a setgid program to completely drop its | |
926 | * privileges, which is often a useful assertion to make when you are doing | |
927 | * a security audit over a program. | |
928 | * | |
929 | * The general idea is that a program which uses just setregid() will be | |
930 | * 100% compatible with BSD. A program which uses just setgid() will be | |
931 | * 100% compatible with POSIX with saved IDs. | |
932 | * | |
933 | * SMP: There are not races, the GIDs are checked only by filesystem | |
934 | * operations (as far as semantic preservation is concerned). | |
935 | */ | |
936 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | |
937 | { | |
938 | int old_rgid = current->gid; | |
939 | int old_egid = current->egid; | |
940 | int new_rgid = old_rgid; | |
941 | int new_egid = old_egid; | |
942 | int retval; | |
943 | ||
944 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); | |
945 | if (retval) | |
946 | return retval; | |
947 | ||
948 | if (rgid != (gid_t) -1) { | |
949 | if ((old_rgid == rgid) || | |
950 | (current->egid==rgid) || | |
951 | capable(CAP_SETGID)) | |
952 | new_rgid = rgid; | |
953 | else | |
954 | return -EPERM; | |
955 | } | |
956 | if (egid != (gid_t) -1) { | |
957 | if ((old_rgid == egid) || | |
958 | (current->egid == egid) || | |
959 | (current->sgid == egid) || | |
960 | capable(CAP_SETGID)) | |
961 | new_egid = egid; | |
756184b7 | 962 | else |
1da177e4 | 963 | return -EPERM; |
1da177e4 | 964 | } |
756184b7 | 965 | if (new_egid != old_egid) { |
d6e71144 | 966 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 967 | smp_wmb(); |
1da177e4 LT |
968 | } |
969 | if (rgid != (gid_t) -1 || | |
970 | (egid != (gid_t) -1 && egid != old_rgid)) | |
971 | current->sgid = new_egid; | |
972 | current->fsgid = new_egid; | |
973 | current->egid = new_egid; | |
974 | current->gid = new_rgid; | |
975 | key_fsgid_changed(current); | |
9f46080c | 976 | proc_id_connector(current, PROC_EVENT_GID); |
1da177e4 LT |
977 | return 0; |
978 | } | |
979 | ||
980 | /* | |
981 | * setgid() is implemented like SysV w/ SAVED_IDS | |
982 | * | |
983 | * SMP: Same implicit races as above. | |
984 | */ | |
985 | asmlinkage long sys_setgid(gid_t gid) | |
986 | { | |
987 | int old_egid = current->egid; | |
988 | int retval; | |
989 | ||
990 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); | |
991 | if (retval) | |
992 | return retval; | |
993 | ||
756184b7 CP |
994 | if (capable(CAP_SETGID)) { |
995 | if (old_egid != gid) { | |
d6e71144 | 996 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 997 | smp_wmb(); |
1da177e4 LT |
998 | } |
999 | current->gid = current->egid = current->sgid = current->fsgid = gid; | |
756184b7 CP |
1000 | } else if ((gid == current->gid) || (gid == current->sgid)) { |
1001 | if (old_egid != gid) { | |
d6e71144 | 1002 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1003 | smp_wmb(); |
1da177e4 LT |
1004 | } |
1005 | current->egid = current->fsgid = gid; | |
1006 | } | |
1007 | else | |
1008 | return -EPERM; | |
1009 | ||
1010 | key_fsgid_changed(current); | |
9f46080c | 1011 | proc_id_connector(current, PROC_EVENT_GID); |
1da177e4 LT |
1012 | return 0; |
1013 | } | |
1014 | ||
1015 | static int set_user(uid_t new_ruid, int dumpclear) | |
1016 | { | |
1017 | struct user_struct *new_user; | |
1018 | ||
1019 | new_user = alloc_uid(new_ruid); | |
1020 | if (!new_user) | |
1021 | return -EAGAIN; | |
1022 | ||
1023 | if (atomic_read(&new_user->processes) >= | |
1024 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && | |
1025 | new_user != &root_user) { | |
1026 | free_uid(new_user); | |
1027 | return -EAGAIN; | |
1028 | } | |
1029 | ||
1030 | switch_uid(new_user); | |
1031 | ||
756184b7 | 1032 | if (dumpclear) { |
d6e71144 | 1033 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1034 | smp_wmb(); |
1da177e4 LT |
1035 | } |
1036 | current->uid = new_ruid; | |
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Unprivileged users may change the real uid to the effective uid | |
1042 | * or vice versa. (BSD-style) | |
1043 | * | |
1044 | * If you set the real uid at all, or set the effective uid to a value not | |
1045 | * equal to the real uid, then the saved uid is set to the new effective uid. | |
1046 | * | |
1047 | * This makes it possible for a setuid program to completely drop its | |
1048 | * privileges, which is often a useful assertion to make when you are doing | |
1049 | * a security audit over a program. | |
1050 | * | |
1051 | * The general idea is that a program which uses just setreuid() will be | |
1052 | * 100% compatible with BSD. A program which uses just setuid() will be | |
1053 | * 100% compatible with POSIX with saved IDs. | |
1054 | */ | |
1055 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | |
1056 | { | |
1057 | int old_ruid, old_euid, old_suid, new_ruid, new_euid; | |
1058 | int retval; | |
1059 | ||
1060 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); | |
1061 | if (retval) | |
1062 | return retval; | |
1063 | ||
1064 | new_ruid = old_ruid = current->uid; | |
1065 | new_euid = old_euid = current->euid; | |
1066 | old_suid = current->suid; | |
1067 | ||
1068 | if (ruid != (uid_t) -1) { | |
1069 | new_ruid = ruid; | |
1070 | if ((old_ruid != ruid) && | |
1071 | (current->euid != ruid) && | |
1072 | !capable(CAP_SETUID)) | |
1073 | return -EPERM; | |
1074 | } | |
1075 | ||
1076 | if (euid != (uid_t) -1) { | |
1077 | new_euid = euid; | |
1078 | if ((old_ruid != euid) && | |
1079 | (current->euid != euid) && | |
1080 | (current->suid != euid) && | |
1081 | !capable(CAP_SETUID)) | |
1082 | return -EPERM; | |
1083 | } | |
1084 | ||
1085 | if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) | |
1086 | return -EAGAIN; | |
1087 | ||
756184b7 | 1088 | if (new_euid != old_euid) { |
d6e71144 | 1089 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1090 | smp_wmb(); |
1da177e4 LT |
1091 | } |
1092 | current->fsuid = current->euid = new_euid; | |
1093 | if (ruid != (uid_t) -1 || | |
1094 | (euid != (uid_t) -1 && euid != old_ruid)) | |
1095 | current->suid = current->euid; | |
1096 | current->fsuid = current->euid; | |
1097 | ||
1098 | key_fsuid_changed(current); | |
9f46080c | 1099 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1100 | |
1101 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); | |
1102 | } | |
1103 | ||
1104 | ||
1105 | ||
1106 | /* | |
1107 | * setuid() is implemented like SysV with SAVED_IDS | |
1108 | * | |
1109 | * Note that SAVED_ID's is deficient in that a setuid root program | |
1110 | * like sendmail, for example, cannot set its uid to be a normal | |
1111 | * user and then switch back, because if you're root, setuid() sets | |
1112 | * the saved uid too. If you don't like this, blame the bright people | |
1113 | * in the POSIX committee and/or USG. Note that the BSD-style setreuid() | |
1114 | * will allow a root program to temporarily drop privileges and be able to | |
1115 | * regain them by swapping the real and effective uid. | |
1116 | */ | |
1117 | asmlinkage long sys_setuid(uid_t uid) | |
1118 | { | |
1119 | int old_euid = current->euid; | |
a09c17a6 | 1120 | int old_ruid, old_suid, new_suid; |
1da177e4 LT |
1121 | int retval; |
1122 | ||
1123 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); | |
1124 | if (retval) | |
1125 | return retval; | |
1126 | ||
a09c17a6 | 1127 | old_ruid = current->uid; |
1da177e4 LT |
1128 | old_suid = current->suid; |
1129 | new_suid = old_suid; | |
1130 | ||
1131 | if (capable(CAP_SETUID)) { | |
1132 | if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) | |
1133 | return -EAGAIN; | |
1134 | new_suid = uid; | |
1135 | } else if ((uid != current->uid) && (uid != new_suid)) | |
1136 | return -EPERM; | |
1137 | ||
756184b7 | 1138 | if (old_euid != uid) { |
d6e71144 | 1139 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1140 | smp_wmb(); |
1da177e4 LT |
1141 | } |
1142 | current->fsuid = current->euid = uid; | |
1143 | current->suid = new_suid; | |
1144 | ||
1145 | key_fsuid_changed(current); | |
9f46080c | 1146 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1147 | |
1148 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); | |
1149 | } | |
1150 | ||
1151 | ||
1152 | /* | |
1153 | * This function implements a generic ability to update ruid, euid, | |
1154 | * and suid. This allows you to implement the 4.4 compatible seteuid(). | |
1155 | */ | |
1156 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) | |
1157 | { | |
1158 | int old_ruid = current->uid; | |
1159 | int old_euid = current->euid; | |
1160 | int old_suid = current->suid; | |
1161 | int retval; | |
1162 | ||
1163 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); | |
1164 | if (retval) | |
1165 | return retval; | |
1166 | ||
1167 | if (!capable(CAP_SETUID)) { | |
1168 | if ((ruid != (uid_t) -1) && (ruid != current->uid) && | |
1169 | (ruid != current->euid) && (ruid != current->suid)) | |
1170 | return -EPERM; | |
1171 | if ((euid != (uid_t) -1) && (euid != current->uid) && | |
1172 | (euid != current->euid) && (euid != current->suid)) | |
1173 | return -EPERM; | |
1174 | if ((suid != (uid_t) -1) && (suid != current->uid) && | |
1175 | (suid != current->euid) && (suid != current->suid)) | |
1176 | return -EPERM; | |
1177 | } | |
1178 | if (ruid != (uid_t) -1) { | |
1179 | if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) | |
1180 | return -EAGAIN; | |
1181 | } | |
1182 | if (euid != (uid_t) -1) { | |
756184b7 | 1183 | if (euid != current->euid) { |
d6e71144 | 1184 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1185 | smp_wmb(); |
1da177e4 LT |
1186 | } |
1187 | current->euid = euid; | |
1188 | } | |
1189 | current->fsuid = current->euid; | |
1190 | if (suid != (uid_t) -1) | |
1191 | current->suid = suid; | |
1192 | ||
1193 | key_fsuid_changed(current); | |
9f46080c | 1194 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1195 | |
1196 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); | |
1197 | } | |
1198 | ||
1199 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) | |
1200 | { | |
1201 | int retval; | |
1202 | ||
1203 | if (!(retval = put_user(current->uid, ruid)) && | |
1204 | !(retval = put_user(current->euid, euid))) | |
1205 | retval = put_user(current->suid, suid); | |
1206 | ||
1207 | return retval; | |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * Same as above, but for rgid, egid, sgid. | |
1212 | */ | |
1213 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) | |
1214 | { | |
1215 | int retval; | |
1216 | ||
1217 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); | |
1218 | if (retval) | |
1219 | return retval; | |
1220 | ||
1221 | if (!capable(CAP_SETGID)) { | |
1222 | if ((rgid != (gid_t) -1) && (rgid != current->gid) && | |
1223 | (rgid != current->egid) && (rgid != current->sgid)) | |
1224 | return -EPERM; | |
1225 | if ((egid != (gid_t) -1) && (egid != current->gid) && | |
1226 | (egid != current->egid) && (egid != current->sgid)) | |
1227 | return -EPERM; | |
1228 | if ((sgid != (gid_t) -1) && (sgid != current->gid) && | |
1229 | (sgid != current->egid) && (sgid != current->sgid)) | |
1230 | return -EPERM; | |
1231 | } | |
1232 | if (egid != (gid_t) -1) { | |
756184b7 | 1233 | if (egid != current->egid) { |
d6e71144 | 1234 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1235 | smp_wmb(); |
1da177e4 LT |
1236 | } |
1237 | current->egid = egid; | |
1238 | } | |
1239 | current->fsgid = current->egid; | |
1240 | if (rgid != (gid_t) -1) | |
1241 | current->gid = rgid; | |
1242 | if (sgid != (gid_t) -1) | |
1243 | current->sgid = sgid; | |
1244 | ||
1245 | key_fsgid_changed(current); | |
9f46080c | 1246 | proc_id_connector(current, PROC_EVENT_GID); |
1da177e4 LT |
1247 | return 0; |
1248 | } | |
1249 | ||
1250 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) | |
1251 | { | |
1252 | int retval; | |
1253 | ||
1254 | if (!(retval = put_user(current->gid, rgid)) && | |
1255 | !(retval = put_user(current->egid, egid))) | |
1256 | retval = put_user(current->sgid, sgid); | |
1257 | ||
1258 | return retval; | |
1259 | } | |
1260 | ||
1261 | ||
1262 | /* | |
1263 | * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This | |
1264 | * is used for "access()" and for the NFS daemon (letting nfsd stay at | |
1265 | * whatever uid it wants to). It normally shadows "euid", except when | |
1266 | * explicitly set by setfsuid() or for access.. | |
1267 | */ | |
1268 | asmlinkage long sys_setfsuid(uid_t uid) | |
1269 | { | |
1270 | int old_fsuid; | |
1271 | ||
1272 | old_fsuid = current->fsuid; | |
1273 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) | |
1274 | return old_fsuid; | |
1275 | ||
1276 | if (uid == current->uid || uid == current->euid || | |
1277 | uid == current->suid || uid == current->fsuid || | |
756184b7 CP |
1278 | capable(CAP_SETUID)) { |
1279 | if (uid != old_fsuid) { | |
d6e71144 | 1280 | current->mm->dumpable = suid_dumpable; |
d59dd462 | 1281 | smp_wmb(); |
1da177e4 LT |
1282 | } |
1283 | current->fsuid = uid; | |
1284 | } | |
1285 | ||
1286 | key_fsuid_changed(current); | |
9f46080c | 1287 | proc_id_connector(current, PROC_EVENT_UID); |
1da177e4 LT |
1288 | |
1289 | security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); | |
1290 | ||
1291 | return old_fsuid; | |
1292 | } | |
1293 | ||
1294 | /* |