Merge tag 'please-pull-morepstore' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / kernel / pid_namespace.c
1 /*
2 * Pid namespaces
3 *
4 * Authors:
5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7 * Many thanks to Oleg Nesterov for comments and help
8 *
9 */
10
11 #include <linux/pid.h>
12 #include <linux/pid_namespace.h>
13 #include <linux/user_namespace.h>
14 #include <linux/syscalls.h>
15 #include <linux/err.h>
16 #include <linux/acct.h>
17 #include <linux/slab.h>
18 #include <linux/proc_ns.h>
19 #include <linux/reboot.h>
20 #include <linux/export.h>
21
22 struct pid_cache {
23 int nr_ids;
24 char name[16];
25 struct kmem_cache *cachep;
26 struct list_head list;
27 };
28
29 static LIST_HEAD(pid_caches_lh);
30 static DEFINE_MUTEX(pid_caches_mutex);
31 static struct kmem_cache *pid_ns_cachep;
32
33 /*
34 * creates the kmem cache to allocate pids from.
35 * @nr_ids: the number of numerical ids this pid will have to carry
36 */
37
38 static struct kmem_cache *create_pid_cachep(int nr_ids)
39 {
40 struct pid_cache *pcache;
41 struct kmem_cache *cachep;
42
43 mutex_lock(&pid_caches_mutex);
44 list_for_each_entry(pcache, &pid_caches_lh, list)
45 if (pcache->nr_ids == nr_ids)
46 goto out;
47
48 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
49 if (pcache == NULL)
50 goto err_alloc;
51
52 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
53 cachep = kmem_cache_create(pcache->name,
54 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
55 0, SLAB_HWCACHE_ALIGN, NULL);
56 if (cachep == NULL)
57 goto err_cachep;
58
59 pcache->nr_ids = nr_ids;
60 pcache->cachep = cachep;
61 list_add(&pcache->list, &pid_caches_lh);
62 out:
63 mutex_unlock(&pid_caches_mutex);
64 return pcache->cachep;
65
66 err_cachep:
67 kfree(pcache);
68 err_alloc:
69 mutex_unlock(&pid_caches_mutex);
70 return NULL;
71 }
72
73 static void proc_cleanup_work(struct work_struct *work)
74 {
75 struct pid_namespace *ns = container_of(work, struct pid_namespace, proc_work);
76 pid_ns_release_proc(ns);
77 }
78
79 /* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
80 #define MAX_PID_NS_LEVEL 32
81
82 static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns,
83 struct pid_namespace *parent_pid_ns)
84 {
85 struct pid_namespace *ns;
86 unsigned int level = parent_pid_ns->level + 1;
87 int i;
88 int err;
89
90 if (level > MAX_PID_NS_LEVEL) {
91 err = -EINVAL;
92 goto out;
93 }
94
95 err = -ENOMEM;
96 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
97 if (ns == NULL)
98 goto out;
99
100 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
101 if (!ns->pidmap[0].page)
102 goto out_free;
103
104 ns->pid_cachep = create_pid_cachep(level + 1);
105 if (ns->pid_cachep == NULL)
106 goto out_free_map;
107
108 err = proc_alloc_inum(&ns->proc_inum);
109 if (err)
110 goto out_free_map;
111
112 kref_init(&ns->kref);
113 ns->level = level;
114 ns->parent = get_pid_ns(parent_pid_ns);
115 ns->user_ns = get_user_ns(user_ns);
116 ns->nr_hashed = PIDNS_HASH_ADDING;
117 INIT_WORK(&ns->proc_work, proc_cleanup_work);
118
119 set_bit(0, ns->pidmap[0].page);
120 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
121
122 for (i = 1; i < PIDMAP_ENTRIES; i++)
123 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
124
125 return ns;
126
127 out_free_map:
128 kfree(ns->pidmap[0].page);
129 out_free:
130 kmem_cache_free(pid_ns_cachep, ns);
131 out:
132 return ERR_PTR(err);
133 }
134
135 static void delayed_free_pidns(struct rcu_head *p)
136 {
137 kmem_cache_free(pid_ns_cachep,
138 container_of(p, struct pid_namespace, rcu));
139 }
140
141 static void destroy_pid_namespace(struct pid_namespace *ns)
142 {
143 int i;
144
145 proc_free_inum(ns->proc_inum);
146 for (i = 0; i < PIDMAP_ENTRIES; i++)
147 kfree(ns->pidmap[i].page);
148 put_user_ns(ns->user_ns);
149 call_rcu(&ns->rcu, delayed_free_pidns);
150 }
151
152 struct pid_namespace *copy_pid_ns(unsigned long flags,
153 struct user_namespace *user_ns, struct pid_namespace *old_ns)
154 {
155 if (!(flags & CLONE_NEWPID))
156 return get_pid_ns(old_ns);
157 if (task_active_pid_ns(current) != old_ns)
158 return ERR_PTR(-EINVAL);
159 return create_pid_namespace(user_ns, old_ns);
160 }
161
162 static void free_pid_ns(struct kref *kref)
163 {
164 struct pid_namespace *ns;
165
166 ns = container_of(kref, struct pid_namespace, kref);
167 destroy_pid_namespace(ns);
168 }
169
170 void put_pid_ns(struct pid_namespace *ns)
171 {
172 struct pid_namespace *parent;
173
174 while (ns != &init_pid_ns) {
175 parent = ns->parent;
176 if (!kref_put(&ns->kref, free_pid_ns))
177 break;
178 ns = parent;
179 }
180 }
181 EXPORT_SYMBOL_GPL(put_pid_ns);
182
183 void zap_pid_ns_processes(struct pid_namespace *pid_ns)
184 {
185 int nr;
186 int rc;
187 struct task_struct *task, *me = current;
188 int init_pids = thread_group_leader(me) ? 1 : 2;
189
190 /* Don't allow any more processes into the pid namespace */
191 disable_pid_allocation(pid_ns);
192
193 /*
194 * Ignore SIGCHLD causing any terminated children to autoreap.
195 * This speeds up the namespace shutdown, plus see the comment
196 * below.
197 */
198 spin_lock_irq(&me->sighand->siglock);
199 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
200 spin_unlock_irq(&me->sighand->siglock);
201
202 /*
203 * The last thread in the cgroup-init thread group is terminating.
204 * Find remaining pid_ts in the namespace, signal and wait for them
205 * to exit.
206 *
207 * Note: This signals each threads in the namespace - even those that
208 * belong to the same thread group, To avoid this, we would have
209 * to walk the entire tasklist looking a processes in this
210 * namespace, but that could be unnecessarily expensive if the
211 * pid namespace has just a few processes. Or we need to
212 * maintain a tasklist for each pid namespace.
213 *
214 */
215 read_lock(&tasklist_lock);
216 nr = next_pidmap(pid_ns, 1);
217 while (nr > 0) {
218 rcu_read_lock();
219
220 task = pid_task(find_vpid(nr), PIDTYPE_PID);
221 if (task && !__fatal_signal_pending(task))
222 send_sig_info(SIGKILL, SEND_SIG_FORCED, task);
223
224 rcu_read_unlock();
225
226 nr = next_pidmap(pid_ns, nr);
227 }
228 read_unlock(&tasklist_lock);
229
230 /*
231 * Reap the EXIT_ZOMBIE children we had before we ignored SIGCHLD.
232 * sys_wait4() will also block until our children traced from the
233 * parent namespace are detached and become EXIT_DEAD.
234 */
235 do {
236 clear_thread_flag(TIF_SIGPENDING);
237 rc = sys_wait4(-1, NULL, __WALL, NULL);
238 } while (rc != -ECHILD);
239
240 /*
241 * sys_wait4() above can't reap the EXIT_DEAD children but we do not
242 * really care, we could reparent them to the global init. We could
243 * exit and reap ->child_reaper even if it is not the last thread in
244 * this pid_ns, free_pid(nr_hashed == 0) calls proc_cleanup_work(),
245 * pid_ns can not go away until proc_kill_sb() drops the reference.
246 *
247 * But this ns can also have other tasks injected by setns()+fork().
248 * Again, ignoring the user visible semantics we do not really need
249 * to wait until they are all reaped, but they can be reparented to
250 * us and thus we need to ensure that pid->child_reaper stays valid
251 * until they all go away. See free_pid()->wake_up_process().
252 *
253 * We rely on ignored SIGCHLD, an injected zombie must be autoreaped
254 * if reparented.
255 */
256 for (;;) {
257 set_current_state(TASK_UNINTERRUPTIBLE);
258 if (pid_ns->nr_hashed == init_pids)
259 break;
260 schedule();
261 }
262 __set_current_state(TASK_RUNNING);
263
264 if (pid_ns->reboot)
265 current->signal->group_exit_code = pid_ns->reboot;
266
267 acct_exit_ns(pid_ns);
268 return;
269 }
270
271 #ifdef CONFIG_CHECKPOINT_RESTORE
272 static int pid_ns_ctl_handler(struct ctl_table *table, int write,
273 void __user *buffer, size_t *lenp, loff_t *ppos)
274 {
275 struct pid_namespace *pid_ns = task_active_pid_ns(current);
276 struct ctl_table tmp = *table;
277
278 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
279 return -EPERM;
280
281 /*
282 * Writing directly to ns' last_pid field is OK, since this field
283 * is volatile in a living namespace anyway and a code writing to
284 * it should synchronize its usage with external means.
285 */
286
287 tmp.data = &pid_ns->last_pid;
288 return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
289 }
290
291 extern int pid_max;
292 static int zero = 0;
293 static struct ctl_table pid_ns_ctl_table[] = {
294 {
295 .procname = "ns_last_pid",
296 .maxlen = sizeof(int),
297 .mode = 0666, /* permissions are checked in the handler */
298 .proc_handler = pid_ns_ctl_handler,
299 .extra1 = &zero,
300 .extra2 = &pid_max,
301 },
302 { }
303 };
304 static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
305 #endif /* CONFIG_CHECKPOINT_RESTORE */
306
307 int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
308 {
309 if (pid_ns == &init_pid_ns)
310 return 0;
311
312 switch (cmd) {
313 case LINUX_REBOOT_CMD_RESTART2:
314 case LINUX_REBOOT_CMD_RESTART:
315 pid_ns->reboot = SIGHUP;
316 break;
317
318 case LINUX_REBOOT_CMD_POWER_OFF:
319 case LINUX_REBOOT_CMD_HALT:
320 pid_ns->reboot = SIGINT;
321 break;
322 default:
323 return -EINVAL;
324 }
325
326 read_lock(&tasklist_lock);
327 force_sig(SIGKILL, pid_ns->child_reaper);
328 read_unlock(&tasklist_lock);
329
330 do_exit(0);
331
332 /* Not reached */
333 return 0;
334 }
335
336 static void *pidns_get(struct task_struct *task)
337 {
338 struct pid_namespace *ns;
339
340 rcu_read_lock();
341 ns = task_active_pid_ns(task);
342 if (ns)
343 get_pid_ns(ns);
344 rcu_read_unlock();
345
346 return ns;
347 }
348
349 static void pidns_put(void *ns)
350 {
351 put_pid_ns(ns);
352 }
353
354 static int pidns_install(struct nsproxy *nsproxy, void *ns)
355 {
356 struct pid_namespace *active = task_active_pid_ns(current);
357 struct pid_namespace *ancestor, *new = ns;
358
359 if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
360 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
361 return -EPERM;
362
363 /*
364 * Only allow entering the current active pid namespace
365 * or a child of the current active pid namespace.
366 *
367 * This is required for fork to return a usable pid value and
368 * this maintains the property that processes and their
369 * children can not escape their current pid namespace.
370 */
371 if (new->level < active->level)
372 return -EINVAL;
373
374 ancestor = new;
375 while (ancestor->level > active->level)
376 ancestor = ancestor->parent;
377 if (ancestor != active)
378 return -EINVAL;
379
380 put_pid_ns(nsproxy->pid_ns_for_children);
381 nsproxy->pid_ns_for_children = get_pid_ns(new);
382 return 0;
383 }
384
385 static unsigned int pidns_inum(void *ns)
386 {
387 struct pid_namespace *pid_ns = ns;
388 return pid_ns->proc_inum;
389 }
390
391 const struct proc_ns_operations pidns_operations = {
392 .name = "pid",
393 .type = CLONE_NEWPID,
394 .get = pidns_get,
395 .put = pidns_put,
396 .install = pidns_install,
397 .inum = pidns_inum,
398 };
399
400 static __init int pid_namespaces_init(void)
401 {
402 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
403
404 #ifdef CONFIG_CHECKPOINT_RESTORE
405 register_sysctl_paths(kern_path, pid_ns_ctl_table);
406 #endif
407 return 0;
408 }
409
410 __initcall(pid_namespaces_init);
This page took 0.043412 seconds and 6 git commands to generate.