4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
21 * userns count is 1 for root user, 1 for init_uts_ns,
24 struct user_namespace init_user_ns
= {
26 .refcount
= ATOMIC_INIT(3),
28 .owner
= GLOBAL_ROOT_UID
,
29 .group
= GLOBAL_ROOT_GID
,
31 EXPORT_SYMBOL_GPL(init_user_ns
);
34 * UID task count cache, to get fast user lookup in "alloc_uid"
35 * when changing user ID's (ie setuid() and friends).
38 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
39 #define UIDHASH_SZ (1 << UIDHASH_BITS)
40 #define UIDHASH_MASK (UIDHASH_SZ - 1)
41 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
42 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
44 static struct kmem_cache
*uid_cachep
;
45 struct hlist_head uidhash_table
[UIDHASH_SZ
];
48 * The uidhash_lock is mostly taken from process context, but it is
49 * occasionally also taken from softirq/tasklet context, when
50 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
51 * But free_uid() is also called with local interrupts disabled, and running
52 * local_bh_enable() with local interrupts disabled is an error - we'll run
53 * softirq callbacks, and they can unconditionally enable interrupts, and
54 * the caller of free_uid() didn't expect that..
56 static DEFINE_SPINLOCK(uidhash_lock
);
58 /* root_user.__count is 1, for init task cred */
59 struct user_struct root_user
= {
60 .__count
= ATOMIC_INIT(1),
61 .processes
= ATOMIC_INIT(1),
62 .files
= ATOMIC_INIT(0),
63 .sigpending
= ATOMIC_INIT(0),
65 .uid
= GLOBAL_ROOT_UID
,
69 * These routines must be called with the uidhash spinlock held!
71 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
73 hlist_add_head(&up
->uidhash_node
, hashent
);
76 static void uid_hash_remove(struct user_struct
*up
)
78 hlist_del_init(&up
->uidhash_node
);
81 static struct user_struct
*uid_hash_find(kuid_t uid
, struct hlist_head
*hashent
)
83 struct user_struct
*user
;
86 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
87 if (uid_eq(user
->uid
, uid
)) {
88 atomic_inc(&user
->__count
);
96 /* IRQs are disabled and uidhash_lock is held upon function entry.
97 * IRQ state (as stored in flags) is restored and uidhash_lock released
100 static void free_user(struct user_struct
*up
, unsigned long flags
)
101 __releases(&uidhash_lock
)
104 spin_unlock_irqrestore(&uidhash_lock
, flags
);
105 key_put(up
->uid_keyring
);
106 key_put(up
->session_keyring
);
107 kmem_cache_free(uid_cachep
, up
);
111 * Locate the user_struct for the passed UID. If found, take a ref on it. The
112 * caller must undo that ref with free_uid().
114 * If the user_struct could not be found, return NULL.
116 struct user_struct
*find_user(kuid_t uid
)
118 struct user_struct
*ret
;
121 spin_lock_irqsave(&uidhash_lock
, flags
);
122 ret
= uid_hash_find(uid
, uidhashentry(uid
));
123 spin_unlock_irqrestore(&uidhash_lock
, flags
);
127 void free_uid(struct user_struct
*up
)
134 local_irq_save(flags
);
135 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
136 free_user(up
, flags
);
138 local_irq_restore(flags
);
141 struct user_struct
*alloc_uid(kuid_t uid
)
143 struct hlist_head
*hashent
= uidhashentry(uid
);
144 struct user_struct
*up
, *new;
146 spin_lock_irq(&uidhash_lock
);
147 up
= uid_hash_find(uid
, hashent
);
148 spin_unlock_irq(&uidhash_lock
);
151 new = kmem_cache_zalloc(uid_cachep
, GFP_KERNEL
);
156 atomic_set(&new->__count
, 1);
159 * Before adding this, check whether we raced
160 * on adding the same user already..
162 spin_lock_irq(&uidhash_lock
);
163 up
= uid_hash_find(uid
, hashent
);
165 key_put(new->uid_keyring
);
166 key_put(new->session_keyring
);
167 kmem_cache_free(uid_cachep
, new);
169 uid_hash_insert(new, hashent
);
172 spin_unlock_irq(&uidhash_lock
);
181 static int __init
uid_cache_init(void)
185 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
186 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
188 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
189 INIT_HLIST_HEAD(uidhash_table
+ n
);
191 /* Insert the root user immediately (init already runs as root) */
192 spin_lock_irq(&uidhash_lock
);
193 uid_hash_insert(&root_user
, uidhashentry(GLOBAL_ROOT_UID
));
194 spin_unlock_irq(&uidhash_lock
);
199 module_init(uid_cache_init
);