Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The "user cache". | |
3 | * | |
4 | * (C) Copyright 1991-2000 Linus Torvalds | |
5 | * | |
6 | * We have a per-user structure to keep track of how many | |
7 | * processes, files etc the user has claimed, in order to be | |
8 | * able to have per-user limits for system resources. | |
9 | */ | |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/bitops.h> | |
15 | #include <linux/key.h> | |
4021cb27 | 16 | #include <linux/interrupt.h> |
acce292c CLG |
17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | |
1da177e4 | 19 | |
aee16ce7 PE |
20 | struct user_namespace init_user_ns = { |
21 | .kref = { | |
22 | .refcount = ATOMIC_INIT(2), | |
23 | }, | |
24 | .root_user = &root_user, | |
25 | }; | |
26 | EXPORT_SYMBOL_GPL(init_user_ns); | |
27 | ||
1da177e4 LT |
28 | /* |
29 | * UID task count cache, to get fast user lookup in "alloc_uid" | |
30 | * when changing user ID's (ie setuid() and friends). | |
31 | */ | |
32 | ||
1da177e4 LT |
33 | #define UIDHASH_MASK (UIDHASH_SZ - 1) |
34 | #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) | |
acce292c | 35 | #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) |
1da177e4 | 36 | |
e18b890b | 37 | static struct kmem_cache *uid_cachep; |
4021cb27 IM |
38 | |
39 | /* | |
40 | * The uidhash_lock is mostly taken from process context, but it is | |
41 | * occasionally also taken from softirq/tasklet context, when | |
42 | * task-structs get RCU-freed. Hence all locking must be softirq-safe. | |
3fa97c9d AM |
43 | * But free_uid() is also called with local interrupts disabled, and running |
44 | * local_bh_enable() with local interrupts disabled is an error - we'll run | |
45 | * softirq callbacks, and they can unconditionally enable interrupts, and | |
46 | * the caller of free_uid() didn't expect that.. | |
4021cb27 | 47 | */ |
1da177e4 LT |
48 | static DEFINE_SPINLOCK(uidhash_lock); |
49 | ||
50 | struct user_struct root_user = { | |
51 | .__count = ATOMIC_INIT(1), | |
52 | .processes = ATOMIC_INIT(1), | |
53 | .files = ATOMIC_INIT(0), | |
54 | .sigpending = ATOMIC_INIT(0), | |
1da177e4 | 55 | .locked_shm = 0, |
052f1dc7 | 56 | #ifdef CONFIG_USER_SCHED |
4cf86d77 | 57 | .tg = &init_task_group, |
24e377a8 | 58 | #endif |
1da177e4 LT |
59 | }; |
60 | ||
5cb350ba DG |
61 | /* |
62 | * These routines must be called with the uidhash spinlock held! | |
63 | */ | |
40aeb400 | 64 | static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
5cb350ba DG |
65 | { |
66 | hlist_add_head(&up->uidhash_node, hashent); | |
67 | } | |
68 | ||
40aeb400 | 69 | static void uid_hash_remove(struct user_struct *up) |
5cb350ba DG |
70 | { |
71 | hlist_del_init(&up->uidhash_node); | |
72 | } | |
73 | ||
40aeb400 | 74 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
5cb350ba DG |
75 | { |
76 | struct user_struct *user; | |
77 | struct hlist_node *h; | |
78 | ||
79 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | |
80 | if (user->uid == uid) { | |
81 | atomic_inc(&user->__count); | |
82 | return user; | |
83 | } | |
84 | } | |
85 | ||
86 | return NULL; | |
87 | } | |
88 | ||
052f1dc7 | 89 | #ifdef CONFIG_USER_SCHED |
5cb350ba | 90 | |
24e377a8 SV |
91 | static void sched_destroy_user(struct user_struct *up) |
92 | { | |
93 | sched_destroy_group(up->tg); | |
94 | } | |
95 | ||
96 | static int sched_create_user(struct user_struct *up) | |
97 | { | |
98 | int rc = 0; | |
99 | ||
eff766a6 | 100 | up->tg = sched_create_group(&root_task_group); |
24e377a8 SV |
101 | if (IS_ERR(up->tg)) |
102 | rc = -ENOMEM; | |
103 | ||
104 | return rc; | |
105 | } | |
106 | ||
107 | static void sched_switch_user(struct task_struct *p) | |
108 | { | |
109 | sched_move_task(p); | |
110 | } | |
111 | ||
052f1dc7 | 112 | #else /* CONFIG_USER_SCHED */ |
b1a8c172 DG |
113 | |
114 | static void sched_destroy_user(struct user_struct *up) { } | |
115 | static int sched_create_user(struct user_struct *up) { return 0; } | |
116 | static void sched_switch_user(struct task_struct *p) { } | |
117 | ||
052f1dc7 | 118 | #endif /* CONFIG_USER_SCHED */ |
b1a8c172 | 119 | |
052f1dc7 | 120 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
b1a8c172 | 121 | |
eb41d946 | 122 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
b1a8c172 DG |
123 | static DEFINE_MUTEX(uids_mutex); |
124 | ||
5cb350ba DG |
125 | static inline void uids_mutex_lock(void) |
126 | { | |
127 | mutex_lock(&uids_mutex); | |
128 | } | |
24e377a8 | 129 | |
5cb350ba DG |
130 | static inline void uids_mutex_unlock(void) |
131 | { | |
132 | mutex_unlock(&uids_mutex); | |
133 | } | |
24e377a8 | 134 | |
eb41d946 | 135 | /* uid directory attributes */ |
052f1dc7 | 136 | #ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d946 KS |
137 | static ssize_t cpu_shares_show(struct kobject *kobj, |
138 | struct kobj_attribute *attr, | |
139 | char *buf) | |
5cb350ba | 140 | { |
eb41d946 | 141 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
24e377a8 | 142 | |
eb41d946 | 143 | return sprintf(buf, "%lu\n", sched_group_shares(up->tg)); |
5cb350ba DG |
144 | } |
145 | ||
eb41d946 KS |
146 | static ssize_t cpu_shares_store(struct kobject *kobj, |
147 | struct kobj_attribute *attr, | |
148 | const char *buf, size_t size) | |
5cb350ba | 149 | { |
eb41d946 | 150 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
5cb350ba DG |
151 | unsigned long shares; |
152 | int rc; | |
153 | ||
eb41d946 | 154 | sscanf(buf, "%lu", &shares); |
5cb350ba DG |
155 | |
156 | rc = sched_group_set_shares(up->tg, shares); | |
157 | ||
158 | return (rc ? rc : size); | |
159 | } | |
160 | ||
eb41d946 KS |
161 | static struct kobj_attribute cpu_share_attr = |
162 | __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store); | |
052f1dc7 | 163 | #endif |
eb41d946 | 164 | |
052f1dc7 | 165 | #ifdef CONFIG_RT_GROUP_SCHED |
9f0c1e56 PZ |
166 | static ssize_t cpu_rt_runtime_show(struct kobject *kobj, |
167 | struct kobj_attribute *attr, | |
168 | char *buf) | |
169 | { | |
170 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | |
171 | ||
172 | return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); | |
173 | } | |
174 | ||
175 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | |
176 | struct kobj_attribute *attr, | |
177 | const char *buf, size_t size) | |
178 | { | |
179 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | |
180 | unsigned long rt_runtime; | |
181 | int rc; | |
182 | ||
183 | sscanf(buf, "%lu", &rt_runtime); | |
184 | ||
185 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | |
186 | ||
187 | return (rc ? rc : size); | |
188 | } | |
189 | ||
190 | static struct kobj_attribute cpu_rt_runtime_attr = | |
191 | __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store); | |
d0b27fa7 PZ |
192 | |
193 | static ssize_t cpu_rt_period_show(struct kobject *kobj, | |
194 | struct kobj_attribute *attr, | |
195 | char *buf) | |
196 | { | |
197 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | |
198 | ||
199 | return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg)); | |
200 | } | |
201 | ||
202 | static ssize_t cpu_rt_period_store(struct kobject *kobj, | |
203 | struct kobj_attribute *attr, | |
204 | const char *buf, size_t size) | |
205 | { | |
206 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | |
207 | unsigned long rt_period; | |
208 | int rc; | |
209 | ||
210 | sscanf(buf, "%lu", &rt_period); | |
211 | ||
212 | rc = sched_group_set_rt_period(up->tg, rt_period); | |
213 | ||
214 | return (rc ? rc : size); | |
215 | } | |
216 | ||
217 | static struct kobj_attribute cpu_rt_period_attr = | |
218 | __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store); | |
052f1dc7 | 219 | #endif |
9f0c1e56 | 220 | |
eb41d946 KS |
221 | /* default attributes per uid directory */ |
222 | static struct attribute *uids_attributes[] = { | |
052f1dc7 | 223 | #ifdef CONFIG_FAIR_GROUP_SCHED |
eb41d946 | 224 | &cpu_share_attr.attr, |
052f1dc7 PZ |
225 | #endif |
226 | #ifdef CONFIG_RT_GROUP_SCHED | |
9f0c1e56 | 227 | &cpu_rt_runtime_attr.attr, |
d0b27fa7 | 228 | &cpu_rt_period_attr.attr, |
052f1dc7 | 229 | #endif |
eb41d946 KS |
230 | NULL |
231 | }; | |
232 | ||
233 | /* the lifetime of user_struct is not managed by the core (now) */ | |
234 | static void uids_release(struct kobject *kobj) | |
5cb350ba | 235 | { |
eb41d946 | 236 | return; |
5cb350ba DG |
237 | } |
238 | ||
eb41d946 KS |
239 | static struct kobj_type uids_ktype = { |
240 | .sysfs_ops = &kobj_sysfs_ops, | |
241 | .default_attrs = uids_attributes, | |
242 | .release = uids_release, | |
243 | }; | |
244 | ||
245 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | |
246 | static int uids_user_create(struct user_struct *up) | |
1da177e4 | 247 | { |
eb41d946 | 248 | struct kobject *kobj = &up->kobj; |
5cb350ba DG |
249 | int error; |
250 | ||
eb41d946 | 251 | memset(kobj, 0, sizeof(struct kobject)); |
eb41d946 | 252 | kobj->kset = uids_kset; |
cf15126b GKH |
253 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
254 | if (error) { | |
255 | kobject_put(kobj); | |
5cb350ba | 256 | goto done; |
cf15126b | 257 | } |
5cb350ba | 258 | |
fb7dde37 | 259 | kobject_uevent(kobj, KOBJ_ADD); |
5cb350ba DG |
260 | done: |
261 | return error; | |
1da177e4 LT |
262 | } |
263 | ||
eb41d946 | 264 | /* create these entries in sysfs: |
5cb350ba DG |
265 | * "/sys/kernel/uids" directory |
266 | * "/sys/kernel/uids/0" directory (for root user) | |
267 | * "/sys/kernel/uids/0/cpu_share" file (for root user) | |
268 | */ | |
eb41d946 | 269 | int __init uids_sysfs_init(void) |
1da177e4 | 270 | { |
0ff21e46 | 271 | uids_kset = kset_create_and_add("uids", NULL, kernel_kobj); |
eb41d946 KS |
272 | if (!uids_kset) |
273 | return -ENOMEM; | |
5cb350ba | 274 | |
eb41d946 | 275 | return uids_user_create(&root_user); |
1da177e4 LT |
276 | } |
277 | ||
5cb350ba DG |
278 | /* work function to remove sysfs directory for a user and free up |
279 | * corresponding structures. | |
280 | */ | |
281 | static void remove_user_sysfs_dir(struct work_struct *w) | |
1da177e4 | 282 | { |
5cb350ba | 283 | struct user_struct *up = container_of(w, struct user_struct, work); |
5cb350ba DG |
284 | unsigned long flags; |
285 | int remove_user = 0; | |
1da177e4 | 286 | |
5cb350ba DG |
287 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
288 | * atomic. | |
289 | */ | |
290 | uids_mutex_lock(); | |
291 | ||
292 | local_irq_save(flags); | |
293 | ||
294 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | |
295 | uid_hash_remove(up); | |
296 | remove_user = 1; | |
297 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
298 | } else { | |
299 | local_irq_restore(flags); | |
1da177e4 LT |
300 | } |
301 | ||
5cb350ba DG |
302 | if (!remove_user) |
303 | goto done; | |
304 | ||
eb41d946 KS |
305 | kobject_uevent(&up->kobj, KOBJ_REMOVE); |
306 | kobject_del(&up->kobj); | |
307 | kobject_put(&up->kobj); | |
5cb350ba DG |
308 | |
309 | sched_destroy_user(up); | |
310 | key_put(up->uid_keyring); | |
311 | key_put(up->session_keyring); | |
312 | kmem_cache_free(uid_cachep, up); | |
313 | ||
314 | done: | |
315 | uids_mutex_unlock(); | |
316 | } | |
317 | ||
318 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
319 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
320 | * upon function exit. | |
321 | */ | |
322 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
323 | { | |
324 | /* restore back the count */ | |
325 | atomic_inc(&up->__count); | |
326 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
327 | ||
328 | INIT_WORK(&up->work, remove_user_sysfs_dir); | |
329 | schedule_work(&up->work); | |
1da177e4 LT |
330 | } |
331 | ||
052f1dc7 | 332 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
5cb350ba | 333 | |
eb41d946 KS |
334 | int uids_sysfs_init(void) { return 0; } |
335 | static inline int uids_user_create(struct user_struct *up) { return 0; } | |
5cb350ba DG |
336 | static inline void uids_mutex_lock(void) { } |
337 | static inline void uids_mutex_unlock(void) { } | |
338 | ||
339 | /* IRQs are disabled and uidhash_lock is held upon function entry. | |
340 | * IRQ state (as stored in flags) is restored and uidhash_lock released | |
341 | * upon function exit. | |
342 | */ | |
343 | static inline void free_user(struct user_struct *up, unsigned long flags) | |
344 | { | |
345 | uid_hash_remove(up); | |
346 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
347 | sched_destroy_user(up); | |
348 | key_put(up->uid_keyring); | |
349 | key_put(up->session_keyring); | |
350 | kmem_cache_free(uid_cachep, up); | |
351 | } | |
352 | ||
b1a8c172 | 353 | #endif |
5cb350ba | 354 | |
1da177e4 LT |
355 | /* |
356 | * Locate the user_struct for the passed UID. If found, take a ref on it. The | |
357 | * caller must undo that ref with free_uid(). | |
358 | * | |
359 | * If the user_struct could not be found, return NULL. | |
360 | */ | |
361 | struct user_struct *find_user(uid_t uid) | |
362 | { | |
363 | struct user_struct *ret; | |
3fa97c9d | 364 | unsigned long flags; |
acce292c | 365 | struct user_namespace *ns = current->nsproxy->user_ns; |
1da177e4 | 366 | |
3fa97c9d | 367 | spin_lock_irqsave(&uidhash_lock, flags); |
acce292c | 368 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
3fa97c9d | 369 | spin_unlock_irqrestore(&uidhash_lock, flags); |
1da177e4 LT |
370 | return ret; |
371 | } | |
372 | ||
373 | void free_uid(struct user_struct *up) | |
374 | { | |
3fa97c9d AM |
375 | unsigned long flags; |
376 | ||
36f57413 AM |
377 | if (!up) |
378 | return; | |
379 | ||
3fa97c9d | 380 | local_irq_save(flags); |
5cb350ba DG |
381 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) |
382 | free_user(up, flags); | |
383 | else | |
36f57413 | 384 | local_irq_restore(flags); |
1da177e4 LT |
385 | } |
386 | ||
354a1f4d | 387 | struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) |
1da177e4 | 388 | { |
735de223 | 389 | struct hlist_head *hashent = uidhashentry(ns, uid); |
8eb703e4 | 390 | struct user_struct *up, *new; |
1da177e4 | 391 | |
eb41d946 | 392 | /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() |
5cb350ba DG |
393 | * atomic. |
394 | */ | |
395 | uids_mutex_lock(); | |
396 | ||
3fa97c9d | 397 | spin_lock_irq(&uidhash_lock); |
1da177e4 | 398 | up = uid_hash_find(uid, hashent); |
3fa97c9d | 399 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
400 | |
401 | if (!up) { | |
354a1f4d | 402 | new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); |
8eb703e4 PE |
403 | if (!new) |
404 | goto out_unlock; | |
5e8869bb | 405 | |
1da177e4 LT |
406 | new->uid = uid; |
407 | atomic_set(&new->__count, 1); | |
1da177e4 | 408 | |
8eb703e4 | 409 | if (sched_create_user(new) < 0) |
69664cf1 | 410 | goto out_free_user; |
24e377a8 | 411 | |
8eb703e4 PE |
412 | if (uids_user_create(new)) |
413 | goto out_destoy_sched; | |
5cb350ba | 414 | |
1da177e4 LT |
415 | /* |
416 | * Before adding this, check whether we raced | |
417 | * on adding the same user already.. | |
418 | */ | |
3fa97c9d | 419 | spin_lock_irq(&uidhash_lock); |
1da177e4 LT |
420 | up = uid_hash_find(uid, hashent); |
421 | if (up) { | |
052f1dc7 | 422 | /* This case is not possible when CONFIG_USER_SCHED |
5cb350ba DG |
423 | * is defined, since we serialize alloc_uid() using |
424 | * uids_mutex. Hence no need to call | |
425 | * sched_destroy_user() or remove_user_sysfs_dir(). | |
426 | */ | |
1da177e4 LT |
427 | key_put(new->uid_keyring); |
428 | key_put(new->session_keyring); | |
429 | kmem_cache_free(uid_cachep, new); | |
430 | } else { | |
431 | uid_hash_insert(new, hashent); | |
432 | up = new; | |
433 | } | |
3fa97c9d | 434 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
435 | |
436 | } | |
5cb350ba DG |
437 | |
438 | uids_mutex_unlock(); | |
439 | ||
1da177e4 | 440 | return up; |
8eb703e4 PE |
441 | |
442 | out_destoy_sched: | |
443 | sched_destroy_user(new); | |
8eb703e4 PE |
444 | out_free_user: |
445 | kmem_cache_free(uid_cachep, new); | |
446 | out_unlock: | |
447 | uids_mutex_unlock(); | |
448 | return NULL; | |
1da177e4 LT |
449 | } |
450 | ||
451 | void switch_uid(struct user_struct *new_user) | |
452 | { | |
453 | struct user_struct *old_user; | |
454 | ||
455 | /* What if a process setreuid()'s and this brings the | |
456 | * new uid over his NPROC rlimit? We can check this now | |
457 | * cheaply with the new uid cache, so if it matters | |
458 | * we should be checking for it. -DaveM | |
459 | */ | |
460 | old_user = current->user; | |
461 | atomic_inc(&new_user->processes); | |
462 | atomic_dec(&old_user->processes); | |
463 | switch_uid_keyring(new_user); | |
464 | current->user = new_user; | |
24e377a8 | 465 | sched_switch_user(current); |
45c18b0b LT |
466 | |
467 | /* | |
468 | * We need to synchronize with __sigqueue_alloc() | |
469 | * doing a get_uid(p->user).. If that saw the old | |
470 | * user value, we need to wait until it has exited | |
471 | * its critical region before we can free the old | |
472 | * structure. | |
473 | */ | |
474 | smp_mb(); | |
475 | spin_unlock_wait(¤t->sighand->siglock); | |
476 | ||
1da177e4 LT |
477 | free_uid(old_user); |
478 | suid_keys(current); | |
479 | } | |
480 | ||
aee16ce7 | 481 | #ifdef CONFIG_USER_NS |
28f300d2 PE |
482 | void release_uids(struct user_namespace *ns) |
483 | { | |
484 | int i; | |
485 | unsigned long flags; | |
486 | struct hlist_head *head; | |
487 | struct hlist_node *nd; | |
488 | ||
489 | spin_lock_irqsave(&uidhash_lock, flags); | |
490 | /* | |
491 | * collapse the chains so that the user_struct-s will | |
492 | * be still alive, but not in hashes. subsequent free_uid() | |
493 | * will free them. | |
494 | */ | |
495 | for (i = 0; i < UIDHASH_SZ; i++) { | |
496 | head = ns->uidhash_table + i; | |
497 | while (!hlist_empty(head)) { | |
498 | nd = head->first; | |
499 | hlist_del_init(nd); | |
500 | } | |
501 | } | |
502 | spin_unlock_irqrestore(&uidhash_lock, flags); | |
503 | ||
504 | free_uid(ns->root_user); | |
505 | } | |
aee16ce7 | 506 | #endif |
1da177e4 LT |
507 | |
508 | static int __init uid_cache_init(void) | |
509 | { | |
510 | int n; | |
511 | ||
512 | uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), | |
20c2df83 | 513 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 LT |
514 | |
515 | for(n = 0; n < UIDHASH_SZ; ++n) | |
735de223 | 516 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
1da177e4 LT |
517 | |
518 | /* Insert the root user immediately (init already runs as root) */ | |
3fa97c9d | 519 | spin_lock_irq(&uidhash_lock); |
acce292c | 520 | uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); |
3fa97c9d | 521 | spin_unlock_irq(&uidhash_lock); |
1da177e4 LT |
522 | |
523 | return 0; | |
524 | } | |
525 | ||
526 | module_init(uid_cache_init); |