user namespaces: require cap_set{ug}id for CLONE_NEWUSER
[deliverable/linux.git] / kernel / user.c
CommitLineData
1da177e4
LT
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
4021cb27 16#include <linux/interrupt.h>
acce292c
CLG
17#include <linux/module.h>
18#include <linux/user_namespace.h>
d84f4f99 19#include "cred-internals.h"
1da177e4 20
aee16ce7
PE
21struct user_namespace init_user_ns = {
22 .kref = {
18b6e041 23 .refcount = ATOMIC_INIT(1),
aee16ce7 24 },
18b6e041 25 .creator = &root_user,
aee16ce7
PE
26};
27EXPORT_SYMBOL_GPL(init_user_ns);
28
1da177e4
LT
29/*
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
32 */
33
1da177e4
LT
34#define UIDHASH_MASK (UIDHASH_SZ - 1)
35#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
acce292c 36#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
1da177e4 37
e18b890b 38static struct kmem_cache *uid_cachep;
4021cb27
IM
39
40/*
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
3fa97c9d
AM
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
4021cb27 48 */
1da177e4
LT
49static DEFINE_SPINLOCK(uidhash_lock);
50
18b6e041 51/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
1da177e4 52struct user_struct root_user = {
18b6e041 53 .__count = ATOMIC_INIT(2),
1da177e4
LT
54 .processes = ATOMIC_INIT(1),
55 .files = ATOMIC_INIT(0),
56 .sigpending = ATOMIC_INIT(0),
1da177e4 57 .locked_shm = 0,
18b6e041 58 .user_ns = &init_user_ns,
052f1dc7 59#ifdef CONFIG_USER_SCHED
4cf86d77 60 .tg = &init_task_group,
24e377a8 61#endif
1da177e4
LT
62};
63
5cb350ba
DG
64/*
65 * These routines must be called with the uidhash spinlock held!
66 */
40aeb400 67static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
5cb350ba
DG
68{
69 hlist_add_head(&up->uidhash_node, hashent);
70}
71
40aeb400 72static void uid_hash_remove(struct user_struct *up)
5cb350ba
DG
73{
74 hlist_del_init(&up->uidhash_node);
75}
76
40aeb400 77static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
5cb350ba
DG
78{
79 struct user_struct *user;
80 struct hlist_node *h;
81
82 hlist_for_each_entry(user, h, hashent, uidhash_node) {
83 if (user->uid == uid) {
84 atomic_inc(&user->__count);
85 return user;
86 }
87 }
88
89 return NULL;
90}
91
052f1dc7 92#ifdef CONFIG_USER_SCHED
5cb350ba 93
24e377a8
SV
94static void sched_destroy_user(struct user_struct *up)
95{
96 sched_destroy_group(up->tg);
97}
98
99static int sched_create_user(struct user_struct *up)
100{
101 int rc = 0;
102
eff766a6 103 up->tg = sched_create_group(&root_task_group);
24e377a8
SV
104 if (IS_ERR(up->tg))
105 rc = -ENOMEM;
106
107 return rc;
108}
109
052f1dc7 110#else /* CONFIG_USER_SCHED */
b1a8c172
DG
111
112static void sched_destroy_user(struct user_struct *up) { }
113static int sched_create_user(struct user_struct *up) { return 0; }
b1a8c172 114
052f1dc7 115#endif /* CONFIG_USER_SCHED */
b1a8c172 116
052f1dc7 117#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
b1a8c172 118
eb41d946 119static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
b1a8c172
DG
120static DEFINE_MUTEX(uids_mutex);
121
5cb350ba
DG
122static inline void uids_mutex_lock(void)
123{
124 mutex_lock(&uids_mutex);
125}
24e377a8 126
5cb350ba
DG
127static inline void uids_mutex_unlock(void)
128{
129 mutex_unlock(&uids_mutex);
130}
24e377a8 131
eb41d946 132/* uid directory attributes */
052f1dc7 133#ifdef CONFIG_FAIR_GROUP_SCHED
eb41d946
KS
134static ssize_t cpu_shares_show(struct kobject *kobj,
135 struct kobj_attribute *attr,
136 char *buf)
5cb350ba 137{
eb41d946 138 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
24e377a8 139
eb41d946 140 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
5cb350ba
DG
141}
142
eb41d946
KS
143static ssize_t cpu_shares_store(struct kobject *kobj,
144 struct kobj_attribute *attr,
145 const char *buf, size_t size)
5cb350ba 146{
eb41d946 147 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
5cb350ba
DG
148 unsigned long shares;
149 int rc;
150
eb41d946 151 sscanf(buf, "%lu", &shares);
5cb350ba
DG
152
153 rc = sched_group_set_shares(up->tg, shares);
154
155 return (rc ? rc : size);
156}
157
eb41d946
KS
158static struct kobj_attribute cpu_share_attr =
159 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
052f1dc7 160#endif
eb41d946 161
052f1dc7 162#ifdef CONFIG_RT_GROUP_SCHED
9f0c1e56
PZ
163static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
164 struct kobj_attribute *attr,
165 char *buf)
166{
167 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
168
af4491e5 169 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
9f0c1e56
PZ
170}
171
172static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
173 struct kobj_attribute *attr,
174 const char *buf, size_t size)
175{
176 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
177 unsigned long rt_runtime;
178 int rc;
179
af4491e5 180 sscanf(buf, "%ld", &rt_runtime);
9f0c1e56
PZ
181
182 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
183
184 return (rc ? rc : size);
185}
186
187static struct kobj_attribute cpu_rt_runtime_attr =
188 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
d0b27fa7
PZ
189
190static ssize_t cpu_rt_period_show(struct kobject *kobj,
191 struct kobj_attribute *attr,
192 char *buf)
193{
194 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
195
196 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
197}
198
199static ssize_t cpu_rt_period_store(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 const char *buf, size_t size)
202{
203 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
204 unsigned long rt_period;
205 int rc;
206
207 sscanf(buf, "%lu", &rt_period);
208
209 rc = sched_group_set_rt_period(up->tg, rt_period);
210
211 return (rc ? rc : size);
212}
213
214static struct kobj_attribute cpu_rt_period_attr =
215 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
052f1dc7 216#endif
9f0c1e56 217
eb41d946
KS
218/* default attributes per uid directory */
219static struct attribute *uids_attributes[] = {
052f1dc7 220#ifdef CONFIG_FAIR_GROUP_SCHED
eb41d946 221 &cpu_share_attr.attr,
052f1dc7
PZ
222#endif
223#ifdef CONFIG_RT_GROUP_SCHED
9f0c1e56 224 &cpu_rt_runtime_attr.attr,
d0b27fa7 225 &cpu_rt_period_attr.attr,
052f1dc7 226#endif
eb41d946
KS
227 NULL
228};
229
230/* the lifetime of user_struct is not managed by the core (now) */
231static void uids_release(struct kobject *kobj)
5cb350ba 232{
eb41d946 233 return;
5cb350ba
DG
234}
235
eb41d946
KS
236static struct kobj_type uids_ktype = {
237 .sysfs_ops = &kobj_sysfs_ops,
238 .default_attrs = uids_attributes,
239 .release = uids_release,
240};
241
242/* create /sys/kernel/uids/<uid>/cpu_share file for this user */
243static int uids_user_create(struct user_struct *up)
1da177e4 244{
eb41d946 245 struct kobject *kobj = &up->kobj;
5cb350ba
DG
246 int error;
247
eb41d946 248 memset(kobj, 0, sizeof(struct kobject));
c37bbb0f
SH
249 if (up->user_ns != &init_user_ns)
250 return 0;
eb41d946 251 kobj->kset = uids_kset;
cf15126b
GKH
252 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
253 if (error) {
254 kobject_put(kobj);
5cb350ba 255 goto done;
cf15126b 256 }
5cb350ba 257
fb7dde37 258 kobject_uevent(kobj, KOBJ_ADD);
5cb350ba
DG
259done:
260 return error;
1da177e4
LT
261}
262
eb41d946 263/* create these entries in sysfs:
5cb350ba
DG
264 * "/sys/kernel/uids" directory
265 * "/sys/kernel/uids/0" directory (for root user)
266 * "/sys/kernel/uids/0/cpu_share" file (for root user)
267 */
eb41d946 268int __init uids_sysfs_init(void)
1da177e4 269{
0ff21e46 270 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
eb41d946
KS
271 if (!uids_kset)
272 return -ENOMEM;
5cb350ba 273
eb41d946 274 return uids_user_create(&root_user);
1da177e4
LT
275}
276
5cb350ba
DG
277/* work function to remove sysfs directory for a user and free up
278 * corresponding structures.
279 */
280static void remove_user_sysfs_dir(struct work_struct *w)
1da177e4 281{
5cb350ba 282 struct user_struct *up = container_of(w, struct user_struct, work);
5cb350ba
DG
283 unsigned long flags;
284 int remove_user = 0;
1da177e4 285
c37bbb0f
SH
286 if (up->user_ns != &init_user_ns)
287 return;
5cb350ba
DG
288 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
289 * atomic.
290 */
291 uids_mutex_lock();
292
293 local_irq_save(flags);
294
295 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
296 uid_hash_remove(up);
297 remove_user = 1;
298 spin_unlock_irqrestore(&uidhash_lock, flags);
299 } else {
300 local_irq_restore(flags);
1da177e4
LT
301 }
302
5cb350ba
DG
303 if (!remove_user)
304 goto done;
305
eb41d946
KS
306 kobject_uevent(&up->kobj, KOBJ_REMOVE);
307 kobject_del(&up->kobj);
308 kobject_put(&up->kobj);
5cb350ba
DG
309
310 sched_destroy_user(up);
311 key_put(up->uid_keyring);
312 key_put(up->session_keyring);
313 kmem_cache_free(uid_cachep, up);
314
315done:
316 uids_mutex_unlock();
317}
318
319/* IRQs are disabled and uidhash_lock is held upon function entry.
320 * IRQ state (as stored in flags) is restored and uidhash_lock released
321 * upon function exit.
322 */
18b6e041 323static void free_user(struct user_struct *up, unsigned long flags)
5cb350ba
DG
324{
325 /* restore back the count */
326 atomic_inc(&up->__count);
327 spin_unlock_irqrestore(&uidhash_lock, flags);
328
18b6e041 329 put_user_ns(up->user_ns);
5cb350ba
DG
330 INIT_WORK(&up->work, remove_user_sysfs_dir);
331 schedule_work(&up->work);
1da177e4
LT
332}
333
052f1dc7 334#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
5cb350ba 335
eb41d946
KS
336int uids_sysfs_init(void) { return 0; }
337static inline int uids_user_create(struct user_struct *up) { return 0; }
5cb350ba
DG
338static inline void uids_mutex_lock(void) { }
339static inline void uids_mutex_unlock(void) { }
340
341/* IRQs are disabled and uidhash_lock is held upon function entry.
342 * IRQ state (as stored in flags) is restored and uidhash_lock released
343 * upon function exit.
344 */
18b6e041 345static void free_user(struct user_struct *up, unsigned long flags)
5cb350ba
DG
346{
347 uid_hash_remove(up);
348 spin_unlock_irqrestore(&uidhash_lock, flags);
349 sched_destroy_user(up);
350 key_put(up->uid_keyring);
351 key_put(up->session_keyring);
18b6e041 352 put_user_ns(up->user_ns);
5cb350ba
DG
353 kmem_cache_free(uid_cachep, up);
354}
355
b1a8c172 356#endif
5cb350ba 357
1da177e4
LT
358/*
359 * Locate the user_struct for the passed UID. If found, take a ref on it. The
360 * caller must undo that ref with free_uid().
361 *
362 * If the user_struct could not be found, return NULL.
363 */
364struct user_struct *find_user(uid_t uid)
365{
366 struct user_struct *ret;
3fa97c9d 367 unsigned long flags;
6ded6ab9 368 struct user_namespace *ns = current_user_ns();
1da177e4 369
3fa97c9d 370 spin_lock_irqsave(&uidhash_lock, flags);
acce292c 371 ret = uid_hash_find(uid, uidhashentry(ns, uid));
3fa97c9d 372 spin_unlock_irqrestore(&uidhash_lock, flags);
1da177e4
LT
373 return ret;
374}
375
376void free_uid(struct user_struct *up)
377{
3fa97c9d
AM
378 unsigned long flags;
379
36f57413
AM
380 if (!up)
381 return;
382
3fa97c9d 383 local_irq_save(flags);
5cb350ba
DG
384 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
385 free_user(up, flags);
386 else
36f57413 387 local_irq_restore(flags);
1da177e4
LT
388}
389
354a1f4d 390struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
1da177e4 391{
735de223 392 struct hlist_head *hashent = uidhashentry(ns, uid);
8eb703e4 393 struct user_struct *up, *new;
1da177e4 394
eb41d946 395 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
5cb350ba
DG
396 * atomic.
397 */
398 uids_mutex_lock();
399
3fa97c9d 400 spin_lock_irq(&uidhash_lock);
1da177e4 401 up = uid_hash_find(uid, hashent);
3fa97c9d 402 spin_unlock_irq(&uidhash_lock);
1da177e4
LT
403
404 if (!up) {
354a1f4d 405 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
8eb703e4
PE
406 if (!new)
407 goto out_unlock;
5e8869bb 408
1da177e4
LT
409 new->uid = uid;
410 atomic_set(&new->__count, 1);
1da177e4 411
8eb703e4 412 if (sched_create_user(new) < 0)
69664cf1 413 goto out_free_user;
24e377a8 414
18b6e041
SH
415 new->user_ns = get_user_ns(ns);
416
8eb703e4
PE
417 if (uids_user_create(new))
418 goto out_destoy_sched;
5cb350ba 419
1da177e4
LT
420 /*
421 * Before adding this, check whether we raced
422 * on adding the same user already..
423 */
3fa97c9d 424 spin_lock_irq(&uidhash_lock);
1da177e4
LT
425 up = uid_hash_find(uid, hashent);
426 if (up) {
052f1dc7 427 /* This case is not possible when CONFIG_USER_SCHED
5cb350ba
DG
428 * is defined, since we serialize alloc_uid() using
429 * uids_mutex. Hence no need to call
430 * sched_destroy_user() or remove_user_sysfs_dir().
431 */
1da177e4
LT
432 key_put(new->uid_keyring);
433 key_put(new->session_keyring);
434 kmem_cache_free(uid_cachep, new);
435 } else {
436 uid_hash_insert(new, hashent);
437 up = new;
438 }
3fa97c9d 439 spin_unlock_irq(&uidhash_lock);
1da177e4 440 }
5cb350ba
DG
441
442 uids_mutex_unlock();
443
1da177e4 444 return up;
8eb703e4
PE
445
446out_destoy_sched:
447 sched_destroy_user(new);
18b6e041 448 put_user_ns(new->user_ns);
8eb703e4
PE
449out_free_user:
450 kmem_cache_free(uid_cachep, new);
451out_unlock:
452 uids_mutex_unlock();
453 return NULL;
1da177e4
LT
454}
455
1da177e4
LT
456static int __init uid_cache_init(void)
457{
458 int n;
459
460 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
20c2df83 461 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4
LT
462
463 for(n = 0; n < UIDHASH_SZ; ++n)
735de223 464 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
1da177e4
LT
465
466 /* Insert the root user immediately (init already runs as root) */
3fa97c9d 467 spin_lock_irq(&uidhash_lock);
acce292c 468 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
3fa97c9d 469 spin_unlock_irq(&uidhash_lock);
1da177e4
LT
470
471 return 0;
472}
473
474module_init(uid_cache_init);
This page took 0.489693 seconds and 5 git commands to generate.