keep shadowed vfsmounts together
[deliverable/linux.git] / fs / namespace.c
1 /*
2 * linux/fs/namespace.c
3 *
4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
6 *
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
8 * Heavily rewritten.
9 */
10
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/acct.h> /* acct_auto_close_mnt */
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
26 #include <linux/bootmem.h>
27 #include "pnode.h"
28 #include "internal.h"
29
30 static unsigned int m_hash_mask __read_mostly;
31 static unsigned int m_hash_shift __read_mostly;
32 static unsigned int mp_hash_mask __read_mostly;
33 static unsigned int mp_hash_shift __read_mostly;
34
35 static __initdata unsigned long mhash_entries;
36 static int __init set_mhash_entries(char *str)
37 {
38 if (!str)
39 return 0;
40 mhash_entries = simple_strtoul(str, &str, 0);
41 return 1;
42 }
43 __setup("mhash_entries=", set_mhash_entries);
44
45 static __initdata unsigned long mphash_entries;
46 static int __init set_mphash_entries(char *str)
47 {
48 if (!str)
49 return 0;
50 mphash_entries = simple_strtoul(str, &str, 0);
51 return 1;
52 }
53 __setup("mphash_entries=", set_mphash_entries);
54
55 static int event;
56 static DEFINE_IDA(mnt_id_ida);
57 static DEFINE_IDA(mnt_group_ida);
58 static DEFINE_SPINLOCK(mnt_id_lock);
59 static int mnt_id_start = 0;
60 static int mnt_group_start = 1;
61
62 static struct list_head *mount_hashtable __read_mostly;
63 static struct hlist_head *mountpoint_hashtable __read_mostly;
64 static struct kmem_cache *mnt_cache __read_mostly;
65 static DECLARE_RWSEM(namespace_sem);
66
67 /* /sys/fs */
68 struct kobject *fs_kobj;
69 EXPORT_SYMBOL_GPL(fs_kobj);
70
71 /*
72 * vfsmount lock may be taken for read to prevent changes to the
73 * vfsmount hash, ie. during mountpoint lookups or walking back
74 * up the tree.
75 *
76 * It should be taken for write in all cases where the vfsmount
77 * tree or hash is modified or when a vfsmount structure is modified.
78 */
79 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
80
81 static inline struct list_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
82 {
83 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
84 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
85 tmp = tmp + (tmp >> m_hash_shift);
86 return &mount_hashtable[tmp & m_hash_mask];
87 }
88
89 static inline struct hlist_head *mp_hash(struct dentry *dentry)
90 {
91 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
92 tmp = tmp + (tmp >> mp_hash_shift);
93 return &mountpoint_hashtable[tmp & mp_hash_mask];
94 }
95
96 /*
97 * allocation is serialized by namespace_sem, but we need the spinlock to
98 * serialize with freeing.
99 */
100 static int mnt_alloc_id(struct mount *mnt)
101 {
102 int res;
103
104 retry:
105 ida_pre_get(&mnt_id_ida, GFP_KERNEL);
106 spin_lock(&mnt_id_lock);
107 res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
108 if (!res)
109 mnt_id_start = mnt->mnt_id + 1;
110 spin_unlock(&mnt_id_lock);
111 if (res == -EAGAIN)
112 goto retry;
113
114 return res;
115 }
116
117 static void mnt_free_id(struct mount *mnt)
118 {
119 int id = mnt->mnt_id;
120 spin_lock(&mnt_id_lock);
121 ida_remove(&mnt_id_ida, id);
122 if (mnt_id_start > id)
123 mnt_id_start = id;
124 spin_unlock(&mnt_id_lock);
125 }
126
127 /*
128 * Allocate a new peer group ID
129 *
130 * mnt_group_ida is protected by namespace_sem
131 */
132 static int mnt_alloc_group_id(struct mount *mnt)
133 {
134 int res;
135
136 if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL))
137 return -ENOMEM;
138
139 res = ida_get_new_above(&mnt_group_ida,
140 mnt_group_start,
141 &mnt->mnt_group_id);
142 if (!res)
143 mnt_group_start = mnt->mnt_group_id + 1;
144
145 return res;
146 }
147
148 /*
149 * Release a peer group ID
150 */
151 void mnt_release_group_id(struct mount *mnt)
152 {
153 int id = mnt->mnt_group_id;
154 ida_remove(&mnt_group_ida, id);
155 if (mnt_group_start > id)
156 mnt_group_start = id;
157 mnt->mnt_group_id = 0;
158 }
159
160 /*
161 * vfsmount lock must be held for read
162 */
163 static inline void mnt_add_count(struct mount *mnt, int n)
164 {
165 #ifdef CONFIG_SMP
166 this_cpu_add(mnt->mnt_pcp->mnt_count, n);
167 #else
168 preempt_disable();
169 mnt->mnt_count += n;
170 preempt_enable();
171 #endif
172 }
173
174 /*
175 * vfsmount lock must be held for write
176 */
177 unsigned int mnt_get_count(struct mount *mnt)
178 {
179 #ifdef CONFIG_SMP
180 unsigned int count = 0;
181 int cpu;
182
183 for_each_possible_cpu(cpu) {
184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
185 }
186
187 return count;
188 #else
189 return mnt->mnt_count;
190 #endif
191 }
192
193 static struct mount *alloc_vfsmnt(const char *name)
194 {
195 struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
196 if (mnt) {
197 int err;
198
199 err = mnt_alloc_id(mnt);
200 if (err)
201 goto out_free_cache;
202
203 if (name) {
204 mnt->mnt_devname = kstrdup(name, GFP_KERNEL);
205 if (!mnt->mnt_devname)
206 goto out_free_id;
207 }
208
209 #ifdef CONFIG_SMP
210 mnt->mnt_pcp = alloc_percpu(struct mnt_pcp);
211 if (!mnt->mnt_pcp)
212 goto out_free_devname;
213
214 this_cpu_add(mnt->mnt_pcp->mnt_count, 1);
215 #else
216 mnt->mnt_count = 1;
217 mnt->mnt_writers = 0;
218 #endif
219
220 INIT_LIST_HEAD(&mnt->mnt_hash);
221 INIT_LIST_HEAD(&mnt->mnt_child);
222 INIT_LIST_HEAD(&mnt->mnt_mounts);
223 INIT_LIST_HEAD(&mnt->mnt_list);
224 INIT_LIST_HEAD(&mnt->mnt_expire);
225 INIT_LIST_HEAD(&mnt->mnt_share);
226 INIT_LIST_HEAD(&mnt->mnt_slave_list);
227 INIT_LIST_HEAD(&mnt->mnt_slave);
228 #ifdef CONFIG_FSNOTIFY
229 INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
230 #endif
231 }
232 return mnt;
233
234 #ifdef CONFIG_SMP
235 out_free_devname:
236 kfree(mnt->mnt_devname);
237 #endif
238 out_free_id:
239 mnt_free_id(mnt);
240 out_free_cache:
241 kmem_cache_free(mnt_cache, mnt);
242 return NULL;
243 }
244
245 /*
246 * Most r/o checks on a fs are for operations that take
247 * discrete amounts of time, like a write() or unlink().
248 * We must keep track of when those operations start
249 * (for permission checks) and when they end, so that
250 * we can determine when writes are able to occur to
251 * a filesystem.
252 */
253 /*
254 * __mnt_is_readonly: check whether a mount is read-only
255 * @mnt: the mount to check for its write status
256 *
257 * This shouldn't be used directly ouside of the VFS.
258 * It does not guarantee that the filesystem will stay
259 * r/w, just that it is right *now*. This can not and
260 * should not be used in place of IS_RDONLY(inode).
261 * mnt_want/drop_write() will _keep_ the filesystem
262 * r/w.
263 */
264 int __mnt_is_readonly(struct vfsmount *mnt)
265 {
266 if (mnt->mnt_flags & MNT_READONLY)
267 return 1;
268 if (mnt->mnt_sb->s_flags & MS_RDONLY)
269 return 1;
270 return 0;
271 }
272 EXPORT_SYMBOL_GPL(__mnt_is_readonly);
273
274 static inline void mnt_inc_writers(struct mount *mnt)
275 {
276 #ifdef CONFIG_SMP
277 this_cpu_inc(mnt->mnt_pcp->mnt_writers);
278 #else
279 mnt->mnt_writers++;
280 #endif
281 }
282
283 static inline void mnt_dec_writers(struct mount *mnt)
284 {
285 #ifdef CONFIG_SMP
286 this_cpu_dec(mnt->mnt_pcp->mnt_writers);
287 #else
288 mnt->mnt_writers--;
289 #endif
290 }
291
292 static unsigned int mnt_get_writers(struct mount *mnt)
293 {
294 #ifdef CONFIG_SMP
295 unsigned int count = 0;
296 int cpu;
297
298 for_each_possible_cpu(cpu) {
299 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers;
300 }
301
302 return count;
303 #else
304 return mnt->mnt_writers;
305 #endif
306 }
307
308 static int mnt_is_readonly(struct vfsmount *mnt)
309 {
310 if (mnt->mnt_sb->s_readonly_remount)
311 return 1;
312 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
313 smp_rmb();
314 return __mnt_is_readonly(mnt);
315 }
316
317 /*
318 * Most r/o & frozen checks on a fs are for operations that take discrete
319 * amounts of time, like a write() or unlink(). We must keep track of when
320 * those operations start (for permission checks) and when they end, so that we
321 * can determine when writes are able to occur to a filesystem.
322 */
323 /**
324 * __mnt_want_write - get write access to a mount without freeze protection
325 * @m: the mount on which to take a write
326 *
327 * This tells the low-level filesystem that a write is about to be performed to
328 * it, and makes sure that writes are allowed (mnt it read-write) before
329 * returning success. This operation does not protect against filesystem being
330 * frozen. When the write operation is finished, __mnt_drop_write() must be
331 * called. This is effectively a refcount.
332 */
333 int __mnt_want_write(struct vfsmount *m)
334 {
335 struct mount *mnt = real_mount(m);
336 int ret = 0;
337
338 preempt_disable();
339 mnt_inc_writers(mnt);
340 /*
341 * The store to mnt_inc_writers must be visible before we pass
342 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
343 * incremented count after it has set MNT_WRITE_HOLD.
344 */
345 smp_mb();
346 while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
347 cpu_relax();
348 /*
349 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
350 * be set to match its requirements. So we must not load that until
351 * MNT_WRITE_HOLD is cleared.
352 */
353 smp_rmb();
354 if (mnt_is_readonly(m)) {
355 mnt_dec_writers(mnt);
356 ret = -EROFS;
357 }
358 preempt_enable();
359
360 return ret;
361 }
362
363 /**
364 * mnt_want_write - get write access to a mount
365 * @m: the mount on which to take a write
366 *
367 * This tells the low-level filesystem that a write is about to be performed to
368 * it, and makes sure that writes are allowed (mount is read-write, filesystem
369 * is not frozen) before returning success. When the write operation is
370 * finished, mnt_drop_write() must be called. This is effectively a refcount.
371 */
372 int mnt_want_write(struct vfsmount *m)
373 {
374 int ret;
375
376 sb_start_write(m->mnt_sb);
377 ret = __mnt_want_write(m);
378 if (ret)
379 sb_end_write(m->mnt_sb);
380 return ret;
381 }
382 EXPORT_SYMBOL_GPL(mnt_want_write);
383
384 /**
385 * mnt_clone_write - get write access to a mount
386 * @mnt: the mount on which to take a write
387 *
388 * This is effectively like mnt_want_write, except
389 * it must only be used to take an extra write reference
390 * on a mountpoint that we already know has a write reference
391 * on it. This allows some optimisation.
392 *
393 * After finished, mnt_drop_write must be called as usual to
394 * drop the reference.
395 */
396 int mnt_clone_write(struct vfsmount *mnt)
397 {
398 /* superblock may be r/o */
399 if (__mnt_is_readonly(mnt))
400 return -EROFS;
401 preempt_disable();
402 mnt_inc_writers(real_mount(mnt));
403 preempt_enable();
404 return 0;
405 }
406 EXPORT_SYMBOL_GPL(mnt_clone_write);
407
408 /**
409 * __mnt_want_write_file - get write access to a file's mount
410 * @file: the file who's mount on which to take a write
411 *
412 * This is like __mnt_want_write, but it takes a file and can
413 * do some optimisations if the file is open for write already
414 */
415 int __mnt_want_write_file(struct file *file)
416 {
417 struct inode *inode = file_inode(file);
418
419 if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
420 return __mnt_want_write(file->f_path.mnt);
421 else
422 return mnt_clone_write(file->f_path.mnt);
423 }
424
425 /**
426 * mnt_want_write_file - get write access to a file's mount
427 * @file: the file who's mount on which to take a write
428 *
429 * This is like mnt_want_write, but it takes a file and can
430 * do some optimisations if the file is open for write already
431 */
432 int mnt_want_write_file(struct file *file)
433 {
434 int ret;
435
436 sb_start_write(file->f_path.mnt->mnt_sb);
437 ret = __mnt_want_write_file(file);
438 if (ret)
439 sb_end_write(file->f_path.mnt->mnt_sb);
440 return ret;
441 }
442 EXPORT_SYMBOL_GPL(mnt_want_write_file);
443
444 /**
445 * __mnt_drop_write - give up write access to a mount
446 * @mnt: the mount on which to give up write access
447 *
448 * Tells the low-level filesystem that we are done
449 * performing writes to it. Must be matched with
450 * __mnt_want_write() call above.
451 */
452 void __mnt_drop_write(struct vfsmount *mnt)
453 {
454 preempt_disable();
455 mnt_dec_writers(real_mount(mnt));
456 preempt_enable();
457 }
458
459 /**
460 * mnt_drop_write - give up write access to a mount
461 * @mnt: the mount on which to give up write access
462 *
463 * Tells the low-level filesystem that we are done performing writes to it and
464 * also allows filesystem to be frozen again. Must be matched with
465 * mnt_want_write() call above.
466 */
467 void mnt_drop_write(struct vfsmount *mnt)
468 {
469 __mnt_drop_write(mnt);
470 sb_end_write(mnt->mnt_sb);
471 }
472 EXPORT_SYMBOL_GPL(mnt_drop_write);
473
474 void __mnt_drop_write_file(struct file *file)
475 {
476 __mnt_drop_write(file->f_path.mnt);
477 }
478
479 void mnt_drop_write_file(struct file *file)
480 {
481 mnt_drop_write(file->f_path.mnt);
482 }
483 EXPORT_SYMBOL(mnt_drop_write_file);
484
485 static int mnt_make_readonly(struct mount *mnt)
486 {
487 int ret = 0;
488
489 lock_mount_hash();
490 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
491 /*
492 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
493 * should be visible before we do.
494 */
495 smp_mb();
496
497 /*
498 * With writers on hold, if this value is zero, then there are
499 * definitely no active writers (although held writers may subsequently
500 * increment the count, they'll have to wait, and decrement it after
501 * seeing MNT_READONLY).
502 *
503 * It is OK to have counter incremented on one CPU and decremented on
504 * another: the sum will add up correctly. The danger would be when we
505 * sum up each counter, if we read a counter before it is incremented,
506 * but then read another CPU's count which it has been subsequently
507 * decremented from -- we would see more decrements than we should.
508 * MNT_WRITE_HOLD protects against this scenario, because
509 * mnt_want_write first increments count, then smp_mb, then spins on
510 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
511 * we're counting up here.
512 */
513 if (mnt_get_writers(mnt) > 0)
514 ret = -EBUSY;
515 else
516 mnt->mnt.mnt_flags |= MNT_READONLY;
517 /*
518 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
519 * that become unheld will see MNT_READONLY.
520 */
521 smp_wmb();
522 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
523 unlock_mount_hash();
524 return ret;
525 }
526
527 static void __mnt_unmake_readonly(struct mount *mnt)
528 {
529 lock_mount_hash();
530 mnt->mnt.mnt_flags &= ~MNT_READONLY;
531 unlock_mount_hash();
532 }
533
534 int sb_prepare_remount_readonly(struct super_block *sb)
535 {
536 struct mount *mnt;
537 int err = 0;
538
539 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
540 if (atomic_long_read(&sb->s_remove_count))
541 return -EBUSY;
542
543 lock_mount_hash();
544 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
545 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
546 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
547 smp_mb();
548 if (mnt_get_writers(mnt) > 0) {
549 err = -EBUSY;
550 break;
551 }
552 }
553 }
554 if (!err && atomic_long_read(&sb->s_remove_count))
555 err = -EBUSY;
556
557 if (!err) {
558 sb->s_readonly_remount = 1;
559 smp_wmb();
560 }
561 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
562 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
563 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
564 }
565 unlock_mount_hash();
566
567 return err;
568 }
569
570 static void free_vfsmnt(struct mount *mnt)
571 {
572 kfree(mnt->mnt_devname);
573 mnt_free_id(mnt);
574 #ifdef CONFIG_SMP
575 free_percpu(mnt->mnt_pcp);
576 #endif
577 kmem_cache_free(mnt_cache, mnt);
578 }
579
580 /* call under rcu_read_lock */
581 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
582 {
583 struct mount *mnt;
584 if (read_seqretry(&mount_lock, seq))
585 return false;
586 if (bastard == NULL)
587 return true;
588 mnt = real_mount(bastard);
589 mnt_add_count(mnt, 1);
590 if (likely(!read_seqretry(&mount_lock, seq)))
591 return true;
592 if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
593 mnt_add_count(mnt, -1);
594 return false;
595 }
596 rcu_read_unlock();
597 mntput(bastard);
598 rcu_read_lock();
599 return false;
600 }
601
602 /*
603 * find the first mount at @dentry on vfsmount @mnt.
604 * call under rcu_read_lock()
605 */
606 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
607 {
608 struct list_head *head = m_hash(mnt, dentry);
609 struct mount *p;
610
611 list_for_each_entry_rcu(p, head, mnt_hash)
612 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
613 return p;
614 return NULL;
615 }
616
617 /*
618 * find the last mount at @dentry on vfsmount @mnt.
619 * mount_lock must be held.
620 */
621 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
622 {
623 struct list_head *head = m_hash(mnt, dentry);
624 struct mount *p, *res = NULL;
625
626 list_for_each_entry(p, head, mnt_hash)
627 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
628 goto found;
629 return res;
630 found:
631 res = p;
632 list_for_each_entry_continue(p, head, mnt_hash) {
633 if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
634 break;
635 res = p;
636 }
637 return res;
638 }
639
640 /*
641 * lookup_mnt - Return the first child mount mounted at path
642 *
643 * "First" means first mounted chronologically. If you create the
644 * following mounts:
645 *
646 * mount /dev/sda1 /mnt
647 * mount /dev/sda2 /mnt
648 * mount /dev/sda3 /mnt
649 *
650 * Then lookup_mnt() on the base /mnt dentry in the root mount will
651 * return successively the root dentry and vfsmount of /dev/sda1, then
652 * /dev/sda2, then /dev/sda3, then NULL.
653 *
654 * lookup_mnt takes a reference to the found vfsmount.
655 */
656 struct vfsmount *lookup_mnt(struct path *path)
657 {
658 struct mount *child_mnt;
659 struct vfsmount *m;
660 unsigned seq;
661
662 rcu_read_lock();
663 do {
664 seq = read_seqbegin(&mount_lock);
665 child_mnt = __lookup_mnt(path->mnt, path->dentry);
666 m = child_mnt ? &child_mnt->mnt : NULL;
667 } while (!legitimize_mnt(m, seq));
668 rcu_read_unlock();
669 return m;
670 }
671
672 static struct mountpoint *new_mountpoint(struct dentry *dentry)
673 {
674 struct hlist_head *chain = mp_hash(dentry);
675 struct mountpoint *mp;
676 int ret;
677
678 hlist_for_each_entry(mp, chain, m_hash) {
679 if (mp->m_dentry == dentry) {
680 /* might be worth a WARN_ON() */
681 if (d_unlinked(dentry))
682 return ERR_PTR(-ENOENT);
683 mp->m_count++;
684 return mp;
685 }
686 }
687
688 mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
689 if (!mp)
690 return ERR_PTR(-ENOMEM);
691
692 ret = d_set_mounted(dentry);
693 if (ret) {
694 kfree(mp);
695 return ERR_PTR(ret);
696 }
697
698 mp->m_dentry = dentry;
699 mp->m_count = 1;
700 hlist_add_head(&mp->m_hash, chain);
701 return mp;
702 }
703
704 static void put_mountpoint(struct mountpoint *mp)
705 {
706 if (!--mp->m_count) {
707 struct dentry *dentry = mp->m_dentry;
708 spin_lock(&dentry->d_lock);
709 dentry->d_flags &= ~DCACHE_MOUNTED;
710 spin_unlock(&dentry->d_lock);
711 hlist_del(&mp->m_hash);
712 kfree(mp);
713 }
714 }
715
716 static inline int check_mnt(struct mount *mnt)
717 {
718 return mnt->mnt_ns == current->nsproxy->mnt_ns;
719 }
720
721 /*
722 * vfsmount lock must be held for write
723 */
724 static void touch_mnt_namespace(struct mnt_namespace *ns)
725 {
726 if (ns) {
727 ns->event = ++event;
728 wake_up_interruptible(&ns->poll);
729 }
730 }
731
732 /*
733 * vfsmount lock must be held for write
734 */
735 static void __touch_mnt_namespace(struct mnt_namespace *ns)
736 {
737 if (ns && ns->event != event) {
738 ns->event = event;
739 wake_up_interruptible(&ns->poll);
740 }
741 }
742
743 /*
744 * vfsmount lock must be held for write
745 */
746 static void detach_mnt(struct mount *mnt, struct path *old_path)
747 {
748 old_path->dentry = mnt->mnt_mountpoint;
749 old_path->mnt = &mnt->mnt_parent->mnt;
750 mnt->mnt_parent = mnt;
751 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
752 list_del_init(&mnt->mnt_child);
753 list_del_init(&mnt->mnt_hash);
754 put_mountpoint(mnt->mnt_mp);
755 mnt->mnt_mp = NULL;
756 }
757
758 /*
759 * vfsmount lock must be held for write
760 */
761 void mnt_set_mountpoint(struct mount *mnt,
762 struct mountpoint *mp,
763 struct mount *child_mnt)
764 {
765 mp->m_count++;
766 mnt_add_count(mnt, 1); /* essentially, that's mntget */
767 child_mnt->mnt_mountpoint = dget(mp->m_dentry);
768 child_mnt->mnt_parent = mnt;
769 child_mnt->mnt_mp = mp;
770 }
771
772 /*
773 * vfsmount lock must be held for write
774 */
775 static void attach_mnt(struct mount *mnt,
776 struct mount *parent,
777 struct mountpoint *mp)
778 {
779 mnt_set_mountpoint(parent, mp, mnt);
780 list_add(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
781 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
782 }
783
784 /*
785 * vfsmount lock must be held for write
786 */
787 static void commit_tree(struct mount *mnt, struct mount *shadows)
788 {
789 struct mount *parent = mnt->mnt_parent;
790 struct mount *m;
791 LIST_HEAD(head);
792 struct mnt_namespace *n = parent->mnt_ns;
793
794 BUG_ON(parent == mnt);
795
796 list_add_tail(&head, &mnt->mnt_list);
797 list_for_each_entry(m, &head, mnt_list)
798 m->mnt_ns = n;
799
800 list_splice(&head, n->list.prev);
801
802 if (shadows)
803 list_add(&mnt->mnt_hash, &shadows->mnt_hash);
804 else
805 list_add(&mnt->mnt_hash,
806 m_hash(&parent->mnt, mnt->mnt_mountpoint));
807 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
808 touch_mnt_namespace(n);
809 }
810
811 static struct mount *next_mnt(struct mount *p, struct mount *root)
812 {
813 struct list_head *next = p->mnt_mounts.next;
814 if (next == &p->mnt_mounts) {
815 while (1) {
816 if (p == root)
817 return NULL;
818 next = p->mnt_child.next;
819 if (next != &p->mnt_parent->mnt_mounts)
820 break;
821 p = p->mnt_parent;
822 }
823 }
824 return list_entry(next, struct mount, mnt_child);
825 }
826
827 static struct mount *skip_mnt_tree(struct mount *p)
828 {
829 struct list_head *prev = p->mnt_mounts.prev;
830 while (prev != &p->mnt_mounts) {
831 p = list_entry(prev, struct mount, mnt_child);
832 prev = p->mnt_mounts.prev;
833 }
834 return p;
835 }
836
837 struct vfsmount *
838 vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
839 {
840 struct mount *mnt;
841 struct dentry *root;
842
843 if (!type)
844 return ERR_PTR(-ENODEV);
845
846 mnt = alloc_vfsmnt(name);
847 if (!mnt)
848 return ERR_PTR(-ENOMEM);
849
850 if (flags & MS_KERNMOUNT)
851 mnt->mnt.mnt_flags = MNT_INTERNAL;
852
853 root = mount_fs(type, flags, name, data);
854 if (IS_ERR(root)) {
855 free_vfsmnt(mnt);
856 return ERR_CAST(root);
857 }
858
859 mnt->mnt.mnt_root = root;
860 mnt->mnt.mnt_sb = root->d_sb;
861 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
862 mnt->mnt_parent = mnt;
863 lock_mount_hash();
864 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
865 unlock_mount_hash();
866 return &mnt->mnt;
867 }
868 EXPORT_SYMBOL_GPL(vfs_kern_mount);
869
870 static struct mount *clone_mnt(struct mount *old, struct dentry *root,
871 int flag)
872 {
873 struct super_block *sb = old->mnt.mnt_sb;
874 struct mount *mnt;
875 int err;
876
877 mnt = alloc_vfsmnt(old->mnt_devname);
878 if (!mnt)
879 return ERR_PTR(-ENOMEM);
880
881 if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE))
882 mnt->mnt_group_id = 0; /* not a peer of original */
883 else
884 mnt->mnt_group_id = old->mnt_group_id;
885
886 if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
887 err = mnt_alloc_group_id(mnt);
888 if (err)
889 goto out_free;
890 }
891
892 mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
893 /* Don't allow unprivileged users to change mount flags */
894 if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
895 mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
896
897 /* Don't allow unprivileged users to reveal what is under a mount */
898 if ((flag & CL_UNPRIVILEGED) && list_empty(&old->mnt_expire))
899 mnt->mnt.mnt_flags |= MNT_LOCKED;
900
901 atomic_inc(&sb->s_active);
902 mnt->mnt.mnt_sb = sb;
903 mnt->mnt.mnt_root = dget(root);
904 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
905 mnt->mnt_parent = mnt;
906 lock_mount_hash();
907 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
908 unlock_mount_hash();
909
910 if ((flag & CL_SLAVE) ||
911 ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) {
912 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
913 mnt->mnt_master = old;
914 CLEAR_MNT_SHARED(mnt);
915 } else if (!(flag & CL_PRIVATE)) {
916 if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
917 list_add(&mnt->mnt_share, &old->mnt_share);
918 if (IS_MNT_SLAVE(old))
919 list_add(&mnt->mnt_slave, &old->mnt_slave);
920 mnt->mnt_master = old->mnt_master;
921 }
922 if (flag & CL_MAKE_SHARED)
923 set_mnt_shared(mnt);
924
925 /* stick the duplicate mount on the same expiry list
926 * as the original if that was on one */
927 if (flag & CL_EXPIRE) {
928 if (!list_empty(&old->mnt_expire))
929 list_add(&mnt->mnt_expire, &old->mnt_expire);
930 }
931
932 return mnt;
933
934 out_free:
935 free_vfsmnt(mnt);
936 return ERR_PTR(err);
937 }
938
939 static void delayed_free(struct rcu_head *head)
940 {
941 struct mount *mnt = container_of(head, struct mount, mnt_rcu);
942 kfree(mnt->mnt_devname);
943 #ifdef CONFIG_SMP
944 free_percpu(mnt->mnt_pcp);
945 #endif
946 kmem_cache_free(mnt_cache, mnt);
947 }
948
949 static void mntput_no_expire(struct mount *mnt)
950 {
951 put_again:
952 rcu_read_lock();
953 mnt_add_count(mnt, -1);
954 if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
955 rcu_read_unlock();
956 return;
957 }
958 lock_mount_hash();
959 if (mnt_get_count(mnt)) {
960 rcu_read_unlock();
961 unlock_mount_hash();
962 return;
963 }
964 if (unlikely(mnt->mnt_pinned)) {
965 mnt_add_count(mnt, mnt->mnt_pinned + 1);
966 mnt->mnt_pinned = 0;
967 rcu_read_unlock();
968 unlock_mount_hash();
969 acct_auto_close_mnt(&mnt->mnt);
970 goto put_again;
971 }
972 if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) {
973 rcu_read_unlock();
974 unlock_mount_hash();
975 return;
976 }
977 mnt->mnt.mnt_flags |= MNT_DOOMED;
978 rcu_read_unlock();
979
980 list_del(&mnt->mnt_instance);
981 unlock_mount_hash();
982
983 /*
984 * This probably indicates that somebody messed
985 * up a mnt_want/drop_write() pair. If this
986 * happens, the filesystem was probably unable
987 * to make r/w->r/o transitions.
988 */
989 /*
990 * The locking used to deal with mnt_count decrement provides barriers,
991 * so mnt_get_writers() below is safe.
992 */
993 WARN_ON(mnt_get_writers(mnt));
994 fsnotify_vfsmount_delete(&mnt->mnt);
995 dput(mnt->mnt.mnt_root);
996 deactivate_super(mnt->mnt.mnt_sb);
997 mnt_free_id(mnt);
998 call_rcu(&mnt->mnt_rcu, delayed_free);
999 }
1000
1001 void mntput(struct vfsmount *mnt)
1002 {
1003 if (mnt) {
1004 struct mount *m = real_mount(mnt);
1005 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1006 if (unlikely(m->mnt_expiry_mark))
1007 m->mnt_expiry_mark = 0;
1008 mntput_no_expire(m);
1009 }
1010 }
1011 EXPORT_SYMBOL(mntput);
1012
1013 struct vfsmount *mntget(struct vfsmount *mnt)
1014 {
1015 if (mnt)
1016 mnt_add_count(real_mount(mnt), 1);
1017 return mnt;
1018 }
1019 EXPORT_SYMBOL(mntget);
1020
1021 void mnt_pin(struct vfsmount *mnt)
1022 {
1023 lock_mount_hash();
1024 real_mount(mnt)->mnt_pinned++;
1025 unlock_mount_hash();
1026 }
1027 EXPORT_SYMBOL(mnt_pin);
1028
1029 void mnt_unpin(struct vfsmount *m)
1030 {
1031 struct mount *mnt = real_mount(m);
1032 lock_mount_hash();
1033 if (mnt->mnt_pinned) {
1034 mnt_add_count(mnt, 1);
1035 mnt->mnt_pinned--;
1036 }
1037 unlock_mount_hash();
1038 }
1039 EXPORT_SYMBOL(mnt_unpin);
1040
1041 static inline void mangle(struct seq_file *m, const char *s)
1042 {
1043 seq_escape(m, s, " \t\n\\");
1044 }
1045
1046 /*
1047 * Simple .show_options callback for filesystems which don't want to
1048 * implement more complex mount option showing.
1049 *
1050 * See also save_mount_options().
1051 */
1052 int generic_show_options(struct seq_file *m, struct dentry *root)
1053 {
1054 const char *options;
1055
1056 rcu_read_lock();
1057 options = rcu_dereference(root->d_sb->s_options);
1058
1059 if (options != NULL && options[0]) {
1060 seq_putc(m, ',');
1061 mangle(m, options);
1062 }
1063 rcu_read_unlock();
1064
1065 return 0;
1066 }
1067 EXPORT_SYMBOL(generic_show_options);
1068
1069 /*
1070 * If filesystem uses generic_show_options(), this function should be
1071 * called from the fill_super() callback.
1072 *
1073 * The .remount_fs callback usually needs to be handled in a special
1074 * way, to make sure, that previous options are not overwritten if the
1075 * remount fails.
1076 *
1077 * Also note, that if the filesystem's .remount_fs function doesn't
1078 * reset all options to their default value, but changes only newly
1079 * given options, then the displayed options will not reflect reality
1080 * any more.
1081 */
1082 void save_mount_options(struct super_block *sb, char *options)
1083 {
1084 BUG_ON(sb->s_options);
1085 rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL));
1086 }
1087 EXPORT_SYMBOL(save_mount_options);
1088
1089 void replace_mount_options(struct super_block *sb, char *options)
1090 {
1091 char *old = sb->s_options;
1092 rcu_assign_pointer(sb->s_options, options);
1093 if (old) {
1094 synchronize_rcu();
1095 kfree(old);
1096 }
1097 }
1098 EXPORT_SYMBOL(replace_mount_options);
1099
1100 #ifdef CONFIG_PROC_FS
1101 /* iterator; we want it to have access to namespace_sem, thus here... */
1102 static void *m_start(struct seq_file *m, loff_t *pos)
1103 {
1104 struct proc_mounts *p = proc_mounts(m);
1105
1106 down_read(&namespace_sem);
1107 return seq_list_start(&p->ns->list, *pos);
1108 }
1109
1110 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
1111 {
1112 struct proc_mounts *p = proc_mounts(m);
1113
1114 return seq_list_next(v, &p->ns->list, pos);
1115 }
1116
1117 static void m_stop(struct seq_file *m, void *v)
1118 {
1119 up_read(&namespace_sem);
1120 }
1121
1122 static int m_show(struct seq_file *m, void *v)
1123 {
1124 struct proc_mounts *p = proc_mounts(m);
1125 struct mount *r = list_entry(v, struct mount, mnt_list);
1126 return p->show(m, &r->mnt);
1127 }
1128
1129 const struct seq_operations mounts_op = {
1130 .start = m_start,
1131 .next = m_next,
1132 .stop = m_stop,
1133 .show = m_show,
1134 };
1135 #endif /* CONFIG_PROC_FS */
1136
1137 /**
1138 * may_umount_tree - check if a mount tree is busy
1139 * @mnt: root of mount tree
1140 *
1141 * This is called to check if a tree of mounts has any
1142 * open files, pwds, chroots or sub mounts that are
1143 * busy.
1144 */
1145 int may_umount_tree(struct vfsmount *m)
1146 {
1147 struct mount *mnt = real_mount(m);
1148 int actual_refs = 0;
1149 int minimum_refs = 0;
1150 struct mount *p;
1151 BUG_ON(!m);
1152
1153 /* write lock needed for mnt_get_count */
1154 lock_mount_hash();
1155 for (p = mnt; p; p = next_mnt(p, mnt)) {
1156 actual_refs += mnt_get_count(p);
1157 minimum_refs += 2;
1158 }
1159 unlock_mount_hash();
1160
1161 if (actual_refs > minimum_refs)
1162 return 0;
1163
1164 return 1;
1165 }
1166
1167 EXPORT_SYMBOL(may_umount_tree);
1168
1169 /**
1170 * may_umount - check if a mount point is busy
1171 * @mnt: root of mount
1172 *
1173 * This is called to check if a mount point has any
1174 * open files, pwds, chroots or sub mounts. If the
1175 * mount has sub mounts this will return busy
1176 * regardless of whether the sub mounts are busy.
1177 *
1178 * Doesn't take quota and stuff into account. IOW, in some cases it will
1179 * give false negatives. The main reason why it's here is that we need
1180 * a non-destructive way to look for easily umountable filesystems.
1181 */
1182 int may_umount(struct vfsmount *mnt)
1183 {
1184 int ret = 1;
1185 down_read(&namespace_sem);
1186 lock_mount_hash();
1187 if (propagate_mount_busy(real_mount(mnt), 2))
1188 ret = 0;
1189 unlock_mount_hash();
1190 up_read(&namespace_sem);
1191 return ret;
1192 }
1193
1194 EXPORT_SYMBOL(may_umount);
1195
1196 static LIST_HEAD(unmounted); /* protected by namespace_sem */
1197
1198 static void namespace_unlock(void)
1199 {
1200 struct mount *mnt;
1201 LIST_HEAD(head);
1202
1203 if (likely(list_empty(&unmounted))) {
1204 up_write(&namespace_sem);
1205 return;
1206 }
1207
1208 list_splice_init(&unmounted, &head);
1209 up_write(&namespace_sem);
1210
1211 synchronize_rcu();
1212
1213 while (!list_empty(&head)) {
1214 mnt = list_first_entry(&head, struct mount, mnt_hash);
1215 list_del_init(&mnt->mnt_hash);
1216 if (mnt->mnt_ex_mountpoint.mnt)
1217 path_put(&mnt->mnt_ex_mountpoint);
1218 mntput(&mnt->mnt);
1219 }
1220 }
1221
1222 static inline void namespace_lock(void)
1223 {
1224 down_write(&namespace_sem);
1225 }
1226
1227 /*
1228 * mount_lock must be held
1229 * namespace_sem must be held for write
1230 * how = 0 => just this tree, don't propagate
1231 * how = 1 => propagate; we know that nobody else has reference to any victims
1232 * how = 2 => lazy umount
1233 */
1234 void umount_tree(struct mount *mnt, int how)
1235 {
1236 LIST_HEAD(tmp_list);
1237 struct mount *p;
1238
1239 for (p = mnt; p; p = next_mnt(p, mnt))
1240 list_move(&p->mnt_hash, &tmp_list);
1241
1242 if (how)
1243 propagate_umount(&tmp_list);
1244
1245 list_for_each_entry(p, &tmp_list, mnt_hash) {
1246 list_del_init(&p->mnt_expire);
1247 list_del_init(&p->mnt_list);
1248 __touch_mnt_namespace(p->mnt_ns);
1249 p->mnt_ns = NULL;
1250 if (how < 2)
1251 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1252 list_del_init(&p->mnt_child);
1253 if (mnt_has_parent(p)) {
1254 put_mountpoint(p->mnt_mp);
1255 /* move the reference to mountpoint into ->mnt_ex_mountpoint */
1256 p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint;
1257 p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt;
1258 p->mnt_mountpoint = p->mnt.mnt_root;
1259 p->mnt_parent = p;
1260 p->mnt_mp = NULL;
1261 }
1262 change_mnt_propagation(p, MS_PRIVATE);
1263 }
1264 list_splice(&tmp_list, &unmounted);
1265 }
1266
1267 static void shrink_submounts(struct mount *mnt);
1268
1269 static int do_umount(struct mount *mnt, int flags)
1270 {
1271 struct super_block *sb = mnt->mnt.mnt_sb;
1272 int retval;
1273
1274 retval = security_sb_umount(&mnt->mnt, flags);
1275 if (retval)
1276 return retval;
1277
1278 /*
1279 * Allow userspace to request a mountpoint be expired rather than
1280 * unmounting unconditionally. Unmount only happens if:
1281 * (1) the mark is already set (the mark is cleared by mntput())
1282 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1283 */
1284 if (flags & MNT_EXPIRE) {
1285 if (&mnt->mnt == current->fs->root.mnt ||
1286 flags & (MNT_FORCE | MNT_DETACH))
1287 return -EINVAL;
1288
1289 /*
1290 * probably don't strictly need the lock here if we examined
1291 * all race cases, but it's a slowpath.
1292 */
1293 lock_mount_hash();
1294 if (mnt_get_count(mnt) != 2) {
1295 unlock_mount_hash();
1296 return -EBUSY;
1297 }
1298 unlock_mount_hash();
1299
1300 if (!xchg(&mnt->mnt_expiry_mark, 1))
1301 return -EAGAIN;
1302 }
1303
1304 /*
1305 * If we may have to abort operations to get out of this
1306 * mount, and they will themselves hold resources we must
1307 * allow the fs to do things. In the Unix tradition of
1308 * 'Gee thats tricky lets do it in userspace' the umount_begin
1309 * might fail to complete on the first run through as other tasks
1310 * must return, and the like. Thats for the mount program to worry
1311 * about for the moment.
1312 */
1313
1314 if (flags & MNT_FORCE && sb->s_op->umount_begin) {
1315 sb->s_op->umount_begin(sb);
1316 }
1317
1318 /*
1319 * No sense to grab the lock for this test, but test itself looks
1320 * somewhat bogus. Suggestions for better replacement?
1321 * Ho-hum... In principle, we might treat that as umount + switch
1322 * to rootfs. GC would eventually take care of the old vfsmount.
1323 * Actually it makes sense, especially if rootfs would contain a
1324 * /reboot - static binary that would close all descriptors and
1325 * call reboot(9). Then init(8) could umount root and exec /reboot.
1326 */
1327 if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) {
1328 /*
1329 * Special case for "unmounting" root ...
1330 * we just try to remount it readonly.
1331 */
1332 down_write(&sb->s_umount);
1333 if (!(sb->s_flags & MS_RDONLY))
1334 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
1335 up_write(&sb->s_umount);
1336 return retval;
1337 }
1338
1339 namespace_lock();
1340 lock_mount_hash();
1341 event++;
1342
1343 if (flags & MNT_DETACH) {
1344 if (!list_empty(&mnt->mnt_list))
1345 umount_tree(mnt, 2);
1346 retval = 0;
1347 } else {
1348 shrink_submounts(mnt);
1349 retval = -EBUSY;
1350 if (!propagate_mount_busy(mnt, 2)) {
1351 if (!list_empty(&mnt->mnt_list))
1352 umount_tree(mnt, 1);
1353 retval = 0;
1354 }
1355 }
1356 unlock_mount_hash();
1357 namespace_unlock();
1358 return retval;
1359 }
1360
1361 /*
1362 * Is the caller allowed to modify his namespace?
1363 */
1364 static inline bool may_mount(void)
1365 {
1366 return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
1367 }
1368
1369 /*
1370 * Now umount can handle mount points as well as block devices.
1371 * This is important for filesystems which use unnamed block devices.
1372 *
1373 * We now support a flag for forced unmount like the other 'big iron'
1374 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1375 */
1376
1377 SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
1378 {
1379 struct path path;
1380 struct mount *mnt;
1381 int retval;
1382 int lookup_flags = 0;
1383
1384 if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
1385 return -EINVAL;
1386
1387 if (!may_mount())
1388 return -EPERM;
1389
1390 if (!(flags & UMOUNT_NOFOLLOW))
1391 lookup_flags |= LOOKUP_FOLLOW;
1392
1393 retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
1394 if (retval)
1395 goto out;
1396 mnt = real_mount(path.mnt);
1397 retval = -EINVAL;
1398 if (path.dentry != path.mnt->mnt_root)
1399 goto dput_and_out;
1400 if (!check_mnt(mnt))
1401 goto dput_and_out;
1402 if (mnt->mnt.mnt_flags & MNT_LOCKED)
1403 goto dput_and_out;
1404
1405 retval = do_umount(mnt, flags);
1406 dput_and_out:
1407 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1408 dput(path.dentry);
1409 mntput_no_expire(mnt);
1410 out:
1411 return retval;
1412 }
1413
1414 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1415
1416 /*
1417 * The 2.0 compatible umount. No flags.
1418 */
1419 SYSCALL_DEFINE1(oldumount, char __user *, name)
1420 {
1421 return sys_umount(name, 0);
1422 }
1423
1424 #endif
1425
1426 static bool is_mnt_ns_file(struct dentry *dentry)
1427 {
1428 /* Is this a proxy for a mount namespace? */
1429 struct inode *inode = dentry->d_inode;
1430 struct proc_ns *ei;
1431
1432 if (!proc_ns_inode(inode))
1433 return false;
1434
1435 ei = get_proc_ns(inode);
1436 if (ei->ns_ops != &mntns_operations)
1437 return false;
1438
1439 return true;
1440 }
1441
1442 static bool mnt_ns_loop(struct dentry *dentry)
1443 {
1444 /* Could bind mounting the mount namespace inode cause a
1445 * mount namespace loop?
1446 */
1447 struct mnt_namespace *mnt_ns;
1448 if (!is_mnt_ns_file(dentry))
1449 return false;
1450
1451 mnt_ns = get_proc_ns(dentry->d_inode)->ns;
1452 return current->nsproxy->mnt_ns->seq >= mnt_ns->seq;
1453 }
1454
1455 struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1456 int flag)
1457 {
1458 struct mount *res, *p, *q, *r, *parent;
1459
1460 if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt))
1461 return ERR_PTR(-EINVAL);
1462
1463 if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry))
1464 return ERR_PTR(-EINVAL);
1465
1466 res = q = clone_mnt(mnt, dentry, flag);
1467 if (IS_ERR(q))
1468 return q;
1469
1470 q->mnt.mnt_flags &= ~MNT_LOCKED;
1471 q->mnt_mountpoint = mnt->mnt_mountpoint;
1472
1473 p = mnt;
1474 list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
1475 struct mount *s;
1476 if (!is_subdir(r->mnt_mountpoint, dentry))
1477 continue;
1478
1479 for (s = r; s; s = next_mnt(s, r)) {
1480 if (!(flag & CL_COPY_UNBINDABLE) &&
1481 IS_MNT_UNBINDABLE(s)) {
1482 s = skip_mnt_tree(s);
1483 continue;
1484 }
1485 if (!(flag & CL_COPY_MNT_NS_FILE) &&
1486 is_mnt_ns_file(s->mnt.mnt_root)) {
1487 s = skip_mnt_tree(s);
1488 continue;
1489 }
1490 while (p != s->mnt_parent) {
1491 p = p->mnt_parent;
1492 q = q->mnt_parent;
1493 }
1494 p = s;
1495 parent = q;
1496 q = clone_mnt(p, p->mnt.mnt_root, flag);
1497 if (IS_ERR(q))
1498 goto out;
1499 lock_mount_hash();
1500 list_add_tail(&q->mnt_list, &res->mnt_list);
1501 attach_mnt(q, parent, p->mnt_mp);
1502 unlock_mount_hash();
1503 }
1504 }
1505 return res;
1506 out:
1507 if (res) {
1508 lock_mount_hash();
1509 umount_tree(res, 0);
1510 unlock_mount_hash();
1511 }
1512 return q;
1513 }
1514
1515 /* Caller should check returned pointer for errors */
1516
1517 struct vfsmount *collect_mounts(struct path *path)
1518 {
1519 struct mount *tree;
1520 namespace_lock();
1521 tree = copy_tree(real_mount(path->mnt), path->dentry,
1522 CL_COPY_ALL | CL_PRIVATE);
1523 namespace_unlock();
1524 if (IS_ERR(tree))
1525 return ERR_CAST(tree);
1526 return &tree->mnt;
1527 }
1528
1529 void drop_collected_mounts(struct vfsmount *mnt)
1530 {
1531 namespace_lock();
1532 lock_mount_hash();
1533 umount_tree(real_mount(mnt), 0);
1534 unlock_mount_hash();
1535 namespace_unlock();
1536 }
1537
1538 int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1539 struct vfsmount *root)
1540 {
1541 struct mount *mnt;
1542 int res = f(root, arg);
1543 if (res)
1544 return res;
1545 list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) {
1546 res = f(&mnt->mnt, arg);
1547 if (res)
1548 return res;
1549 }
1550 return 0;
1551 }
1552
1553 static void cleanup_group_ids(struct mount *mnt, struct mount *end)
1554 {
1555 struct mount *p;
1556
1557 for (p = mnt; p != end; p = next_mnt(p, mnt)) {
1558 if (p->mnt_group_id && !IS_MNT_SHARED(p))
1559 mnt_release_group_id(p);
1560 }
1561 }
1562
1563 static int invent_group_ids(struct mount *mnt, bool recurse)
1564 {
1565 struct mount *p;
1566
1567 for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) {
1568 if (!p->mnt_group_id && !IS_MNT_SHARED(p)) {
1569 int err = mnt_alloc_group_id(p);
1570 if (err) {
1571 cleanup_group_ids(mnt, p);
1572 return err;
1573 }
1574 }
1575 }
1576
1577 return 0;
1578 }
1579
1580 /*
1581 * @source_mnt : mount tree to be attached
1582 * @nd : place the mount tree @source_mnt is attached
1583 * @parent_nd : if non-null, detach the source_mnt from its parent and
1584 * store the parent mount and mountpoint dentry.
1585 * (done when source_mnt is moved)
1586 *
1587 * NOTE: in the table below explains the semantics when a source mount
1588 * of a given type is attached to a destination mount of a given type.
1589 * ---------------------------------------------------------------------------
1590 * | BIND MOUNT OPERATION |
1591 * |**************************************************************************
1592 * | source-->| shared | private | slave | unbindable |
1593 * | dest | | | | |
1594 * | | | | | | |
1595 * | v | | | | |
1596 * |**************************************************************************
1597 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1598 * | | | | | |
1599 * |non-shared| shared (+) | private | slave (*) | invalid |
1600 * ***************************************************************************
1601 * A bind operation clones the source mount and mounts the clone on the
1602 * destination mount.
1603 *
1604 * (++) the cloned mount is propagated to all the mounts in the propagation
1605 * tree of the destination mount and the cloned mount is added to
1606 * the peer group of the source mount.
1607 * (+) the cloned mount is created under the destination mount and is marked
1608 * as shared. The cloned mount is added to the peer group of the source
1609 * mount.
1610 * (+++) the mount is propagated to all the mounts in the propagation tree
1611 * of the destination mount and the cloned mount is made slave
1612 * of the same master as that of the source mount. The cloned mount
1613 * is marked as 'shared and slave'.
1614 * (*) the cloned mount is made a slave of the same master as that of the
1615 * source mount.
1616 *
1617 * ---------------------------------------------------------------------------
1618 * | MOVE MOUNT OPERATION |
1619 * |**************************************************************************
1620 * | source-->| shared | private | slave | unbindable |
1621 * | dest | | | | |
1622 * | | | | | | |
1623 * | v | | | | |
1624 * |**************************************************************************
1625 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1626 * | | | | | |
1627 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1628 * ***************************************************************************
1629 *
1630 * (+) the mount is moved to the destination. And is then propagated to
1631 * all the mounts in the propagation tree of the destination mount.
1632 * (+*) the mount is moved to the destination.
1633 * (+++) the mount is moved to the destination and is then propagated to
1634 * all the mounts belonging to the destination mount's propagation tree.
1635 * the mount is marked as 'shared and slave'.
1636 * (*) the mount continues to be a slave at the new location.
1637 *
1638 * if the source mount is a tree, the operations explained above is
1639 * applied to each mount in the tree.
1640 * Must be called without spinlocks held, since this function can sleep
1641 * in allocations.
1642 */
1643 static int attach_recursive_mnt(struct mount *source_mnt,
1644 struct mount *dest_mnt,
1645 struct mountpoint *dest_mp,
1646 struct path *parent_path)
1647 {
1648 LIST_HEAD(tree_list);
1649 struct mount *child, *p;
1650 int err;
1651
1652 if (IS_MNT_SHARED(dest_mnt)) {
1653 err = invent_group_ids(source_mnt, true);
1654 if (err)
1655 goto out;
1656 }
1657 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1658 if (err)
1659 goto out_cleanup_ids;
1660
1661 lock_mount_hash();
1662
1663 if (IS_MNT_SHARED(dest_mnt)) {
1664 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1665 set_mnt_shared(p);
1666 }
1667 if (parent_path) {
1668 detach_mnt(source_mnt, parent_path);
1669 attach_mnt(source_mnt, dest_mnt, dest_mp);
1670 touch_mnt_namespace(source_mnt->mnt_ns);
1671 } else {
1672 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1673 commit_tree(source_mnt, NULL);
1674 }
1675
1676 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
1677 struct mount *q;
1678 list_del_init(&child->mnt_hash);
1679 q = __lookup_mnt_last(&child->mnt_parent->mnt,
1680 child->mnt_mountpoint);
1681 commit_tree(child, q);
1682 }
1683 unlock_mount_hash();
1684
1685 return 0;
1686
1687 out_cleanup_ids:
1688 if (IS_MNT_SHARED(dest_mnt))
1689 cleanup_group_ids(source_mnt, NULL);
1690 out:
1691 return err;
1692 }
1693
1694 static struct mountpoint *lock_mount(struct path *path)
1695 {
1696 struct vfsmount *mnt;
1697 struct dentry *dentry = path->dentry;
1698 retry:
1699 mutex_lock(&dentry->d_inode->i_mutex);
1700 if (unlikely(cant_mount(dentry))) {
1701 mutex_unlock(&dentry->d_inode->i_mutex);
1702 return ERR_PTR(-ENOENT);
1703 }
1704 namespace_lock();
1705 mnt = lookup_mnt(path);
1706 if (likely(!mnt)) {
1707 struct mountpoint *mp = new_mountpoint(dentry);
1708 if (IS_ERR(mp)) {
1709 namespace_unlock();
1710 mutex_unlock(&dentry->d_inode->i_mutex);
1711 return mp;
1712 }
1713 return mp;
1714 }
1715 namespace_unlock();
1716 mutex_unlock(&path->dentry->d_inode->i_mutex);
1717 path_put(path);
1718 path->mnt = mnt;
1719 dentry = path->dentry = dget(mnt->mnt_root);
1720 goto retry;
1721 }
1722
1723 static void unlock_mount(struct mountpoint *where)
1724 {
1725 struct dentry *dentry = where->m_dentry;
1726 put_mountpoint(where);
1727 namespace_unlock();
1728 mutex_unlock(&dentry->d_inode->i_mutex);
1729 }
1730
1731 static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
1732 {
1733 if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
1734 return -EINVAL;
1735
1736 if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
1737 S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
1738 return -ENOTDIR;
1739
1740 return attach_recursive_mnt(mnt, p, mp, NULL);
1741 }
1742
1743 /*
1744 * Sanity check the flags to change_mnt_propagation.
1745 */
1746
1747 static int flags_to_propagation_type(int flags)
1748 {
1749 int type = flags & ~(MS_REC | MS_SILENT);
1750
1751 /* Fail if any non-propagation flags are set */
1752 if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
1753 return 0;
1754 /* Only one propagation flag should be set */
1755 if (!is_power_of_2(type))
1756 return 0;
1757 return type;
1758 }
1759
1760 /*
1761 * recursively change the type of the mountpoint.
1762 */
1763 static int do_change_type(struct path *path, int flag)
1764 {
1765 struct mount *m;
1766 struct mount *mnt = real_mount(path->mnt);
1767 int recurse = flag & MS_REC;
1768 int type;
1769 int err = 0;
1770
1771 if (path->dentry != path->mnt->mnt_root)
1772 return -EINVAL;
1773
1774 type = flags_to_propagation_type(flag);
1775 if (!type)
1776 return -EINVAL;
1777
1778 namespace_lock();
1779 if (type == MS_SHARED) {
1780 err = invent_group_ids(mnt, recurse);
1781 if (err)
1782 goto out_unlock;
1783 }
1784
1785 lock_mount_hash();
1786 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1787 change_mnt_propagation(m, type);
1788 unlock_mount_hash();
1789
1790 out_unlock:
1791 namespace_unlock();
1792 return err;
1793 }
1794
1795 static bool has_locked_children(struct mount *mnt, struct dentry *dentry)
1796 {
1797 struct mount *child;
1798 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
1799 if (!is_subdir(child->mnt_mountpoint, dentry))
1800 continue;
1801
1802 if (child->mnt.mnt_flags & MNT_LOCKED)
1803 return true;
1804 }
1805 return false;
1806 }
1807
1808 /*
1809 * do loopback mount.
1810 */
1811 static int do_loopback(struct path *path, const char *old_name,
1812 int recurse)
1813 {
1814 struct path old_path;
1815 struct mount *mnt = NULL, *old, *parent;
1816 struct mountpoint *mp;
1817 int err;
1818 if (!old_name || !*old_name)
1819 return -EINVAL;
1820 err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path);
1821 if (err)
1822 return err;
1823
1824 err = -EINVAL;
1825 if (mnt_ns_loop(old_path.dentry))
1826 goto out;
1827
1828 mp = lock_mount(path);
1829 err = PTR_ERR(mp);
1830 if (IS_ERR(mp))
1831 goto out;
1832
1833 old = real_mount(old_path.mnt);
1834 parent = real_mount(path->mnt);
1835
1836 err = -EINVAL;
1837 if (IS_MNT_UNBINDABLE(old))
1838 goto out2;
1839
1840 if (!check_mnt(parent) || !check_mnt(old))
1841 goto out2;
1842
1843 if (!recurse && has_locked_children(old, old_path.dentry))
1844 goto out2;
1845
1846 if (recurse)
1847 mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE);
1848 else
1849 mnt = clone_mnt(old, old_path.dentry, 0);
1850
1851 if (IS_ERR(mnt)) {
1852 err = PTR_ERR(mnt);
1853 goto out2;
1854 }
1855
1856 mnt->mnt.mnt_flags &= ~MNT_LOCKED;
1857
1858 err = graft_tree(mnt, parent, mp);
1859 if (err) {
1860 lock_mount_hash();
1861 umount_tree(mnt, 0);
1862 unlock_mount_hash();
1863 }
1864 out2:
1865 unlock_mount(mp);
1866 out:
1867 path_put(&old_path);
1868 return err;
1869 }
1870
1871 static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
1872 {
1873 int error = 0;
1874 int readonly_request = 0;
1875
1876 if (ms_flags & MS_RDONLY)
1877 readonly_request = 1;
1878 if (readonly_request == __mnt_is_readonly(mnt))
1879 return 0;
1880
1881 if (mnt->mnt_flags & MNT_LOCK_READONLY)
1882 return -EPERM;
1883
1884 if (readonly_request)
1885 error = mnt_make_readonly(real_mount(mnt));
1886 else
1887 __mnt_unmake_readonly(real_mount(mnt));
1888 return error;
1889 }
1890
1891 /*
1892 * change filesystem flags. dir should be a physical root of filesystem.
1893 * If you've mounted a non-root directory somewhere and want to do remount
1894 * on it - tough luck.
1895 */
1896 static int do_remount(struct path *path, int flags, int mnt_flags,
1897 void *data)
1898 {
1899 int err;
1900 struct super_block *sb = path->mnt->mnt_sb;
1901 struct mount *mnt = real_mount(path->mnt);
1902
1903 if (!check_mnt(mnt))
1904 return -EINVAL;
1905
1906 if (path->dentry != path->mnt->mnt_root)
1907 return -EINVAL;
1908
1909 err = security_sb_remount(sb, data);
1910 if (err)
1911 return err;
1912
1913 down_write(&sb->s_umount);
1914 if (flags & MS_BIND)
1915 err = change_mount_flags(path->mnt, flags);
1916 else if (!capable(CAP_SYS_ADMIN))
1917 err = -EPERM;
1918 else
1919 err = do_remount_sb(sb, flags, data, 0);
1920 if (!err) {
1921 lock_mount_hash();
1922 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
1923 mnt->mnt.mnt_flags = mnt_flags;
1924 touch_mnt_namespace(mnt->mnt_ns);
1925 unlock_mount_hash();
1926 }
1927 up_write(&sb->s_umount);
1928 return err;
1929 }
1930
1931 static inline int tree_contains_unbindable(struct mount *mnt)
1932 {
1933 struct mount *p;
1934 for (p = mnt; p; p = next_mnt(p, mnt)) {
1935 if (IS_MNT_UNBINDABLE(p))
1936 return 1;
1937 }
1938 return 0;
1939 }
1940
1941 static int do_move_mount(struct path *path, const char *old_name)
1942 {
1943 struct path old_path, parent_path;
1944 struct mount *p;
1945 struct mount *old;
1946 struct mountpoint *mp;
1947 int err;
1948 if (!old_name || !*old_name)
1949 return -EINVAL;
1950 err = kern_path(old_name, LOOKUP_FOLLOW, &old_path);
1951 if (err)
1952 return err;
1953
1954 mp = lock_mount(path);
1955 err = PTR_ERR(mp);
1956 if (IS_ERR(mp))
1957 goto out;
1958
1959 old = real_mount(old_path.mnt);
1960 p = real_mount(path->mnt);
1961
1962 err = -EINVAL;
1963 if (!check_mnt(p) || !check_mnt(old))
1964 goto out1;
1965
1966 if (old->mnt.mnt_flags & MNT_LOCKED)
1967 goto out1;
1968
1969 err = -EINVAL;
1970 if (old_path.dentry != old_path.mnt->mnt_root)
1971 goto out1;
1972
1973 if (!mnt_has_parent(old))
1974 goto out1;
1975
1976 if (S_ISDIR(path->dentry->d_inode->i_mode) !=
1977 S_ISDIR(old_path.dentry->d_inode->i_mode))
1978 goto out1;
1979 /*
1980 * Don't move a mount residing in a shared parent.
1981 */
1982 if (IS_MNT_SHARED(old->mnt_parent))
1983 goto out1;
1984 /*
1985 * Don't move a mount tree containing unbindable mounts to a destination
1986 * mount which is shared.
1987 */
1988 if (IS_MNT_SHARED(p) && tree_contains_unbindable(old))
1989 goto out1;
1990 err = -ELOOP;
1991 for (; mnt_has_parent(p); p = p->mnt_parent)
1992 if (p == old)
1993 goto out1;
1994
1995 err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path);
1996 if (err)
1997 goto out1;
1998
1999 /* if the mount is moved, it should no longer be expire
2000 * automatically */
2001 list_del_init(&old->mnt_expire);
2002 out1:
2003 unlock_mount(mp);
2004 out:
2005 if (!err)
2006 path_put(&parent_path);
2007 path_put(&old_path);
2008 return err;
2009 }
2010
2011 static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype)
2012 {
2013 int err;
2014 const char *subtype = strchr(fstype, '.');
2015 if (subtype) {
2016 subtype++;
2017 err = -EINVAL;
2018 if (!subtype[0])
2019 goto err;
2020 } else
2021 subtype = "";
2022
2023 mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL);
2024 err = -ENOMEM;
2025 if (!mnt->mnt_sb->s_subtype)
2026 goto err;
2027 return mnt;
2028
2029 err:
2030 mntput(mnt);
2031 return ERR_PTR(err);
2032 }
2033
2034 /*
2035 * add a mount into a namespace's mount tree
2036 */
2037 static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
2038 {
2039 struct mountpoint *mp;
2040 struct mount *parent;
2041 int err;
2042
2043 mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
2044
2045 mp = lock_mount(path);
2046 if (IS_ERR(mp))
2047 return PTR_ERR(mp);
2048
2049 parent = real_mount(path->mnt);
2050 err = -EINVAL;
2051 if (unlikely(!check_mnt(parent))) {
2052 /* that's acceptable only for automounts done in private ns */
2053 if (!(mnt_flags & MNT_SHRINKABLE))
2054 goto unlock;
2055 /* ... and for those we'd better have mountpoint still alive */
2056 if (!parent->mnt_ns)
2057 goto unlock;
2058 }
2059
2060 /* Refuse the same filesystem on the same mount point */
2061 err = -EBUSY;
2062 if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb &&
2063 path->mnt->mnt_root == path->dentry)
2064 goto unlock;
2065
2066 err = -EINVAL;
2067 if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
2068 goto unlock;
2069
2070 newmnt->mnt.mnt_flags = mnt_flags;
2071 err = graft_tree(newmnt, parent, mp);
2072
2073 unlock:
2074 unlock_mount(mp);
2075 return err;
2076 }
2077
2078 /*
2079 * create a new mount for userspace and request it to be added into the
2080 * namespace's tree
2081 */
2082 static int do_new_mount(struct path *path, const char *fstype, int flags,
2083 int mnt_flags, const char *name, void *data)
2084 {
2085 struct file_system_type *type;
2086 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
2087 struct vfsmount *mnt;
2088 int err;
2089
2090 if (!fstype)
2091 return -EINVAL;
2092
2093 type = get_fs_type(fstype);
2094 if (!type)
2095 return -ENODEV;
2096
2097 if (user_ns != &init_user_ns) {
2098 if (!(type->fs_flags & FS_USERNS_MOUNT)) {
2099 put_filesystem(type);
2100 return -EPERM;
2101 }
2102 /* Only in special cases allow devices from mounts
2103 * created outside the initial user namespace.
2104 */
2105 if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
2106 flags |= MS_NODEV;
2107 mnt_flags |= MNT_NODEV;
2108 }
2109 }
2110
2111 mnt = vfs_kern_mount(type, flags, name, data);
2112 if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
2113 !mnt->mnt_sb->s_subtype)
2114 mnt = fs_set_subtype(mnt, fstype);
2115
2116 put_filesystem(type);
2117 if (IS_ERR(mnt))
2118 return PTR_ERR(mnt);
2119
2120 err = do_add_mount(real_mount(mnt), path, mnt_flags);
2121 if (err)
2122 mntput(mnt);
2123 return err;
2124 }
2125
2126 int finish_automount(struct vfsmount *m, struct path *path)
2127 {
2128 struct mount *mnt = real_mount(m);
2129 int err;
2130 /* The new mount record should have at least 2 refs to prevent it being
2131 * expired before we get a chance to add it
2132 */
2133 BUG_ON(mnt_get_count(mnt) < 2);
2134
2135 if (m->mnt_sb == path->mnt->mnt_sb &&
2136 m->mnt_root == path->dentry) {
2137 err = -ELOOP;
2138 goto fail;
2139 }
2140
2141 err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE);
2142 if (!err)
2143 return 0;
2144 fail:
2145 /* remove m from any expiration list it may be on */
2146 if (!list_empty(&mnt->mnt_expire)) {
2147 namespace_lock();
2148 list_del_init(&mnt->mnt_expire);
2149 namespace_unlock();
2150 }
2151 mntput(m);
2152 mntput(m);
2153 return err;
2154 }
2155
2156 /**
2157 * mnt_set_expiry - Put a mount on an expiration list
2158 * @mnt: The mount to list.
2159 * @expiry_list: The list to add the mount to.
2160 */
2161 void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
2162 {
2163 namespace_lock();
2164
2165 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
2166
2167 namespace_unlock();
2168 }
2169 EXPORT_SYMBOL(mnt_set_expiry);
2170
2171 /*
2172 * process a list of expirable mountpoints with the intent of discarding any
2173 * mountpoints that aren't in use and haven't been touched since last we came
2174 * here
2175 */
2176 void mark_mounts_for_expiry(struct list_head *mounts)
2177 {
2178 struct mount *mnt, *next;
2179 LIST_HEAD(graveyard);
2180
2181 if (list_empty(mounts))
2182 return;
2183
2184 namespace_lock();
2185 lock_mount_hash();
2186
2187 /* extract from the expiration list every vfsmount that matches the
2188 * following criteria:
2189 * - only referenced by its parent vfsmount
2190 * - still marked for expiry (marked on the last call here; marks are
2191 * cleared by mntput())
2192 */
2193 list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
2194 if (!xchg(&mnt->mnt_expiry_mark, 1) ||
2195 propagate_mount_busy(mnt, 1))
2196 continue;
2197 list_move(&mnt->mnt_expire, &graveyard);
2198 }
2199 while (!list_empty(&graveyard)) {
2200 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
2201 touch_mnt_namespace(mnt->mnt_ns);
2202 umount_tree(mnt, 1);
2203 }
2204 unlock_mount_hash();
2205 namespace_unlock();
2206 }
2207
2208 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
2209
2210 /*
2211 * Ripoff of 'select_parent()'
2212 *
2213 * search the list of submounts for a given mountpoint, and move any
2214 * shrinkable submounts to the 'graveyard' list.
2215 */
2216 static int select_submounts(struct mount *parent, struct list_head *graveyard)
2217 {
2218 struct mount *this_parent = parent;
2219 struct list_head *next;
2220 int found = 0;
2221
2222 repeat:
2223 next = this_parent->mnt_mounts.next;
2224 resume:
2225 while (next != &this_parent->mnt_mounts) {
2226 struct list_head *tmp = next;
2227 struct mount *mnt = list_entry(tmp, struct mount, mnt_child);
2228
2229 next = tmp->next;
2230 if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE))
2231 continue;
2232 /*
2233 * Descend a level if the d_mounts list is non-empty.
2234 */
2235 if (!list_empty(&mnt->mnt_mounts)) {
2236 this_parent = mnt;
2237 goto repeat;
2238 }
2239
2240 if (!propagate_mount_busy(mnt, 1)) {
2241 list_move_tail(&mnt->mnt_expire, graveyard);
2242 found++;
2243 }
2244 }
2245 /*
2246 * All done at this level ... ascend and resume the search
2247 */
2248 if (this_parent != parent) {
2249 next = this_parent->mnt_child.next;
2250 this_parent = this_parent->mnt_parent;
2251 goto resume;
2252 }
2253 return found;
2254 }
2255
2256 /*
2257 * process a list of expirable mountpoints with the intent of discarding any
2258 * submounts of a specific parent mountpoint
2259 *
2260 * mount_lock must be held for write
2261 */
2262 static void shrink_submounts(struct mount *mnt)
2263 {
2264 LIST_HEAD(graveyard);
2265 struct mount *m;
2266
2267 /* extract submounts of 'mountpoint' from the expiration list */
2268 while (select_submounts(mnt, &graveyard)) {
2269 while (!list_empty(&graveyard)) {
2270 m = list_first_entry(&graveyard, struct mount,
2271 mnt_expire);
2272 touch_mnt_namespace(m->mnt_ns);
2273 umount_tree(m, 1);
2274 }
2275 }
2276 }
2277
2278 /*
2279 * Some copy_from_user() implementations do not return the exact number of
2280 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2281 * Note that this function differs from copy_from_user() in that it will oops
2282 * on bad values of `to', rather than returning a short copy.
2283 */
2284 static long exact_copy_from_user(void *to, const void __user * from,
2285 unsigned long n)
2286 {
2287 char *t = to;
2288 const char __user *f = from;
2289 char c;
2290
2291 if (!access_ok(VERIFY_READ, from, n))
2292 return n;
2293
2294 while (n) {
2295 if (__get_user(c, f)) {
2296 memset(t, 0, n);
2297 break;
2298 }
2299 *t++ = c;
2300 f++;
2301 n--;
2302 }
2303 return n;
2304 }
2305
2306 int copy_mount_options(const void __user * data, unsigned long *where)
2307 {
2308 int i;
2309 unsigned long page;
2310 unsigned long size;
2311
2312 *where = 0;
2313 if (!data)
2314 return 0;
2315
2316 if (!(page = __get_free_page(GFP_KERNEL)))
2317 return -ENOMEM;
2318
2319 /* We only care that *some* data at the address the user
2320 * gave us is valid. Just in case, we'll zero
2321 * the remainder of the page.
2322 */
2323 /* copy_from_user cannot cross TASK_SIZE ! */
2324 size = TASK_SIZE - (unsigned long)data;
2325 if (size > PAGE_SIZE)
2326 size = PAGE_SIZE;
2327
2328 i = size - exact_copy_from_user((void *)page, data, size);
2329 if (!i) {
2330 free_page(page);
2331 return -EFAULT;
2332 }
2333 if (i != PAGE_SIZE)
2334 memset((char *)page + i, 0, PAGE_SIZE - i);
2335 *where = page;
2336 return 0;
2337 }
2338
2339 int copy_mount_string(const void __user *data, char **where)
2340 {
2341 char *tmp;
2342
2343 if (!data) {
2344 *where = NULL;
2345 return 0;
2346 }
2347
2348 tmp = strndup_user(data, PAGE_SIZE);
2349 if (IS_ERR(tmp))
2350 return PTR_ERR(tmp);
2351
2352 *where = tmp;
2353 return 0;
2354 }
2355
2356 /*
2357 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2358 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2359 *
2360 * data is a (void *) that can point to any structure up to
2361 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2362 * information (or be NULL).
2363 *
2364 * Pre-0.97 versions of mount() didn't have a flags word.
2365 * When the flags word was introduced its top half was required
2366 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2367 * Therefore, if this magic number is present, it carries no information
2368 * and must be discarded.
2369 */
2370 long do_mount(const char *dev_name, const char *dir_name,
2371 const char *type_page, unsigned long flags, void *data_page)
2372 {
2373 struct path path;
2374 int retval = 0;
2375 int mnt_flags = 0;
2376
2377 /* Discard magic */
2378 if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
2379 flags &= ~MS_MGC_MSK;
2380
2381 /* Basic sanity checks */
2382
2383 if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
2384 return -EINVAL;
2385
2386 if (data_page)
2387 ((char *)data_page)[PAGE_SIZE - 1] = 0;
2388
2389 /* ... and get the mountpoint */
2390 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
2391 if (retval)
2392 return retval;
2393
2394 retval = security_sb_mount(dev_name, &path,
2395 type_page, flags, data_page);
2396 if (!retval && !may_mount())
2397 retval = -EPERM;
2398 if (retval)
2399 goto dput_out;
2400
2401 /* Default to relatime unless overriden */
2402 if (!(flags & MS_NOATIME))
2403 mnt_flags |= MNT_RELATIME;
2404
2405 /* Separate the per-mountpoint flags */
2406 if (flags & MS_NOSUID)
2407 mnt_flags |= MNT_NOSUID;
2408 if (flags & MS_NODEV)
2409 mnt_flags |= MNT_NODEV;
2410 if (flags & MS_NOEXEC)
2411 mnt_flags |= MNT_NOEXEC;
2412 if (flags & MS_NOATIME)
2413 mnt_flags |= MNT_NOATIME;
2414 if (flags & MS_NODIRATIME)
2415 mnt_flags |= MNT_NODIRATIME;
2416 if (flags & MS_STRICTATIME)
2417 mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
2418 if (flags & MS_RDONLY)
2419 mnt_flags |= MNT_READONLY;
2420
2421 flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN |
2422 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
2423 MS_STRICTATIME);
2424
2425 if (flags & MS_REMOUNT)
2426 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
2427 data_page);
2428 else if (flags & MS_BIND)
2429 retval = do_loopback(&path, dev_name, flags & MS_REC);
2430 else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
2431 retval = do_change_type(&path, flags);
2432 else if (flags & MS_MOVE)
2433 retval = do_move_mount(&path, dev_name);
2434 else
2435 retval = do_new_mount(&path, type_page, flags, mnt_flags,
2436 dev_name, data_page);
2437 dput_out:
2438 path_put(&path);
2439 return retval;
2440 }
2441
2442 static void free_mnt_ns(struct mnt_namespace *ns)
2443 {
2444 proc_free_inum(ns->proc_inum);
2445 put_user_ns(ns->user_ns);
2446 kfree(ns);
2447 }
2448
2449 /*
2450 * Assign a sequence number so we can detect when we attempt to bind
2451 * mount a reference to an older mount namespace into the current
2452 * mount namespace, preventing reference counting loops. A 64bit
2453 * number incrementing at 10Ghz will take 12,427 years to wrap which
2454 * is effectively never, so we can ignore the possibility.
2455 */
2456 static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
2457
2458 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
2459 {
2460 struct mnt_namespace *new_ns;
2461 int ret;
2462
2463 new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
2464 if (!new_ns)
2465 return ERR_PTR(-ENOMEM);
2466 ret = proc_alloc_inum(&new_ns->proc_inum);
2467 if (ret) {
2468 kfree(new_ns);
2469 return ERR_PTR(ret);
2470 }
2471 new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
2472 atomic_set(&new_ns->count, 1);
2473 new_ns->root = NULL;
2474 INIT_LIST_HEAD(&new_ns->list);
2475 init_waitqueue_head(&new_ns->poll);
2476 new_ns->event = 0;
2477 new_ns->user_ns = get_user_ns(user_ns);
2478 return new_ns;
2479 }
2480
2481 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
2482 struct user_namespace *user_ns, struct fs_struct *new_fs)
2483 {
2484 struct mnt_namespace *new_ns;
2485 struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
2486 struct mount *p, *q;
2487 struct mount *old;
2488 struct mount *new;
2489 int copy_flags;
2490
2491 BUG_ON(!ns);
2492
2493 if (likely(!(flags & CLONE_NEWNS))) {
2494 get_mnt_ns(ns);
2495 return ns;
2496 }
2497
2498 old = ns->root;
2499
2500 new_ns = alloc_mnt_ns(user_ns);
2501 if (IS_ERR(new_ns))
2502 return new_ns;
2503
2504 namespace_lock();
2505 /* First pass: copy the tree topology */
2506 copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE;
2507 if (user_ns != ns->user_ns)
2508 copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
2509 new = copy_tree(old, old->mnt.mnt_root, copy_flags);
2510 if (IS_ERR(new)) {
2511 namespace_unlock();
2512 free_mnt_ns(new_ns);
2513 return ERR_CAST(new);
2514 }
2515 new_ns->root = new;
2516 list_add_tail(&new_ns->list, &new->mnt_list);
2517
2518 /*
2519 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2520 * as belonging to new namespace. We have already acquired a private
2521 * fs_struct, so tsk->fs->lock is not needed.
2522 */
2523 p = old;
2524 q = new;
2525 while (p) {
2526 q->mnt_ns = new_ns;
2527 if (new_fs) {
2528 if (&p->mnt == new_fs->root.mnt) {
2529 new_fs->root.mnt = mntget(&q->mnt);
2530 rootmnt = &p->mnt;
2531 }
2532 if (&p->mnt == new_fs->pwd.mnt) {
2533 new_fs->pwd.mnt = mntget(&q->mnt);
2534 pwdmnt = &p->mnt;
2535 }
2536 }
2537 p = next_mnt(p, old);
2538 q = next_mnt(q, new);
2539 if (!q)
2540 break;
2541 while (p->mnt.mnt_root != q->mnt.mnt_root)
2542 p = next_mnt(p, old);
2543 }
2544 namespace_unlock();
2545
2546 if (rootmnt)
2547 mntput(rootmnt);
2548 if (pwdmnt)
2549 mntput(pwdmnt);
2550
2551 return new_ns;
2552 }
2553
2554 /**
2555 * create_mnt_ns - creates a private namespace and adds a root filesystem
2556 * @mnt: pointer to the new root filesystem mountpoint
2557 */
2558 static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
2559 {
2560 struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns);
2561 if (!IS_ERR(new_ns)) {
2562 struct mount *mnt = real_mount(m);
2563 mnt->mnt_ns = new_ns;
2564 new_ns->root = mnt;
2565 list_add(&mnt->mnt_list, &new_ns->list);
2566 } else {
2567 mntput(m);
2568 }
2569 return new_ns;
2570 }
2571
2572 struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
2573 {
2574 struct mnt_namespace *ns;
2575 struct super_block *s;
2576 struct path path;
2577 int err;
2578
2579 ns = create_mnt_ns(mnt);
2580 if (IS_ERR(ns))
2581 return ERR_CAST(ns);
2582
2583 err = vfs_path_lookup(mnt->mnt_root, mnt,
2584 name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
2585
2586 put_mnt_ns(ns);
2587
2588 if (err)
2589 return ERR_PTR(err);
2590
2591 /* trade a vfsmount reference for active sb one */
2592 s = path.mnt->mnt_sb;
2593 atomic_inc(&s->s_active);
2594 mntput(path.mnt);
2595 /* lock the sucker */
2596 down_write(&s->s_umount);
2597 /* ... and return the root of (sub)tree on it */
2598 return path.dentry;
2599 }
2600 EXPORT_SYMBOL(mount_subtree);
2601
2602 SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
2603 char __user *, type, unsigned long, flags, void __user *, data)
2604 {
2605 int ret;
2606 char *kernel_type;
2607 struct filename *kernel_dir;
2608 char *kernel_dev;
2609 unsigned long data_page;
2610
2611 ret = copy_mount_string(type, &kernel_type);
2612 if (ret < 0)
2613 goto out_type;
2614
2615 kernel_dir = getname(dir_name);
2616 if (IS_ERR(kernel_dir)) {
2617 ret = PTR_ERR(kernel_dir);
2618 goto out_dir;
2619 }
2620
2621 ret = copy_mount_string(dev_name, &kernel_dev);
2622 if (ret < 0)
2623 goto out_dev;
2624
2625 ret = copy_mount_options(data, &data_page);
2626 if (ret < 0)
2627 goto out_data;
2628
2629 ret = do_mount(kernel_dev, kernel_dir->name, kernel_type, flags,
2630 (void *) data_page);
2631
2632 free_page(data_page);
2633 out_data:
2634 kfree(kernel_dev);
2635 out_dev:
2636 putname(kernel_dir);
2637 out_dir:
2638 kfree(kernel_type);
2639 out_type:
2640 return ret;
2641 }
2642
2643 /*
2644 * Return true if path is reachable from root
2645 *
2646 * namespace_sem or mount_lock is held
2647 */
2648 bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2649 const struct path *root)
2650 {
2651 while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) {
2652 dentry = mnt->mnt_mountpoint;
2653 mnt = mnt->mnt_parent;
2654 }
2655 return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry);
2656 }
2657
2658 int path_is_under(struct path *path1, struct path *path2)
2659 {
2660 int res;
2661 read_seqlock_excl(&mount_lock);
2662 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2663 read_sequnlock_excl(&mount_lock);
2664 return res;
2665 }
2666 EXPORT_SYMBOL(path_is_under);
2667
2668 /*
2669 * pivot_root Semantics:
2670 * Moves the root file system of the current process to the directory put_old,
2671 * makes new_root as the new root file system of the current process, and sets
2672 * root/cwd of all processes which had them on the current root to new_root.
2673 *
2674 * Restrictions:
2675 * The new_root and put_old must be directories, and must not be on the
2676 * same file system as the current process root. The put_old must be
2677 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2678 * pointed to by put_old must yield the same directory as new_root. No other
2679 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2680 *
2681 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2682 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2683 * in this situation.
2684 *
2685 * Notes:
2686 * - we don't move root/cwd if they are not at the root (reason: if something
2687 * cared enough to change them, it's probably wrong to force them elsewhere)
2688 * - it's okay to pick a root that isn't the root of a file system, e.g.
2689 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2690 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2691 * first.
2692 */
2693 SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2694 const char __user *, put_old)
2695 {
2696 struct path new, old, parent_path, root_parent, root;
2697 struct mount *new_mnt, *root_mnt, *old_mnt;
2698 struct mountpoint *old_mp, *root_mp;
2699 int error;
2700
2701 if (!may_mount())
2702 return -EPERM;
2703
2704 error = user_path_dir(new_root, &new);
2705 if (error)
2706 goto out0;
2707
2708 error = user_path_dir(put_old, &old);
2709 if (error)
2710 goto out1;
2711
2712 error = security_sb_pivotroot(&old, &new);
2713 if (error)
2714 goto out2;
2715
2716 get_fs_root(current->fs, &root);
2717 old_mp = lock_mount(&old);
2718 error = PTR_ERR(old_mp);
2719 if (IS_ERR(old_mp))
2720 goto out3;
2721
2722 error = -EINVAL;
2723 new_mnt = real_mount(new.mnt);
2724 root_mnt = real_mount(root.mnt);
2725 old_mnt = real_mount(old.mnt);
2726 if (IS_MNT_SHARED(old_mnt) ||
2727 IS_MNT_SHARED(new_mnt->mnt_parent) ||
2728 IS_MNT_SHARED(root_mnt->mnt_parent))
2729 goto out4;
2730 if (!check_mnt(root_mnt) || !check_mnt(new_mnt))
2731 goto out4;
2732 if (new_mnt->mnt.mnt_flags & MNT_LOCKED)
2733 goto out4;
2734 error = -ENOENT;
2735 if (d_unlinked(new.dentry))
2736 goto out4;
2737 error = -EBUSY;
2738 if (new_mnt == root_mnt || old_mnt == root_mnt)
2739 goto out4; /* loop, on the same file system */
2740 error = -EINVAL;
2741 if (root.mnt->mnt_root != root.dentry)
2742 goto out4; /* not a mountpoint */
2743 if (!mnt_has_parent(root_mnt))
2744 goto out4; /* not attached */
2745 root_mp = root_mnt->mnt_mp;
2746 if (new.mnt->mnt_root != new.dentry)
2747 goto out4; /* not a mountpoint */
2748 if (!mnt_has_parent(new_mnt))
2749 goto out4; /* not attached */
2750 /* make sure we can reach put_old from new_root */
2751 if (!is_path_reachable(old_mnt, old.dentry, &new))
2752 goto out4;
2753 root_mp->m_count++; /* pin it so it won't go away */
2754 lock_mount_hash();
2755 detach_mnt(new_mnt, &parent_path);
2756 detach_mnt(root_mnt, &root_parent);
2757 if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {
2758 new_mnt->mnt.mnt_flags |= MNT_LOCKED;
2759 root_mnt->mnt.mnt_flags &= ~MNT_LOCKED;
2760 }
2761 /* mount old root on put_old */
2762 attach_mnt(root_mnt, old_mnt, old_mp);
2763 /* mount new_root on / */
2764 attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);
2765 touch_mnt_namespace(current->nsproxy->mnt_ns);
2766 unlock_mount_hash();
2767 chroot_fs_refs(&root, &new);
2768 put_mountpoint(root_mp);
2769 error = 0;
2770 out4:
2771 unlock_mount(old_mp);
2772 if (!error) {
2773 path_put(&root_parent);
2774 path_put(&parent_path);
2775 }
2776 out3:
2777 path_put(&root);
2778 out2:
2779 path_put(&old);
2780 out1:
2781 path_put(&new);
2782 out0:
2783 return error;
2784 }
2785
2786 static void __init init_mount_tree(void)
2787 {
2788 struct vfsmount *mnt;
2789 struct mnt_namespace *ns;
2790 struct path root;
2791 struct file_system_type *type;
2792
2793 type = get_fs_type("rootfs");
2794 if (!type)
2795 panic("Can't find rootfs type");
2796 mnt = vfs_kern_mount(type, 0, "rootfs", NULL);
2797 put_filesystem(type);
2798 if (IS_ERR(mnt))
2799 panic("Can't create rootfs");
2800
2801 ns = create_mnt_ns(mnt);
2802 if (IS_ERR(ns))
2803 panic("Can't allocate initial namespace");
2804
2805 init_task.nsproxy->mnt_ns = ns;
2806 get_mnt_ns(ns);
2807
2808 root.mnt = mnt;
2809 root.dentry = mnt->mnt_root;
2810
2811 set_fs_pwd(current->fs, &root);
2812 set_fs_root(current->fs, &root);
2813 }
2814
2815 void __init mnt_init(void)
2816 {
2817 unsigned u;
2818 int err;
2819
2820 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
2821 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2822
2823 mount_hashtable = alloc_large_system_hash("Mount-cache",
2824 sizeof(struct list_head),
2825 mhash_entries, 19,
2826 0,
2827 &m_hash_shift, &m_hash_mask, 0, 0);
2828 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
2829 sizeof(struct hlist_head),
2830 mphash_entries, 19,
2831 0,
2832 &mp_hash_shift, &mp_hash_mask, 0, 0);
2833
2834 if (!mount_hashtable || !mountpoint_hashtable)
2835 panic("Failed to allocate mount hash table\n");
2836
2837 for (u = 0; u <= m_hash_mask; u++)
2838 INIT_LIST_HEAD(&mount_hashtable[u]);
2839 for (u = 0; u <= mp_hash_mask; u++)
2840 INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
2841
2842 kernfs_init();
2843
2844 err = sysfs_init();
2845 if (err)
2846 printk(KERN_WARNING "%s: sysfs_init error: %d\n",
2847 __func__, err);
2848 fs_kobj = kobject_create_and_add("fs", NULL);
2849 if (!fs_kobj)
2850 printk(KERN_WARNING "%s: kobj create error\n", __func__);
2851 init_rootfs();
2852 init_mount_tree();
2853 }
2854
2855 void put_mnt_ns(struct mnt_namespace *ns)
2856 {
2857 if (!atomic_dec_and_test(&ns->count))
2858 return;
2859 drop_collected_mounts(&ns->root->mnt);
2860 free_mnt_ns(ns);
2861 }
2862
2863 struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
2864 {
2865 struct vfsmount *mnt;
2866 mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
2867 if (!IS_ERR(mnt)) {
2868 /*
2869 * it is a longterm mount, don't release mnt until
2870 * we unmount before file sys is unregistered
2871 */
2872 real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
2873 }
2874 return mnt;
2875 }
2876 EXPORT_SYMBOL_GPL(kern_mount_data);
2877
2878 void kern_unmount(struct vfsmount *mnt)
2879 {
2880 /* release long term mount so mount point can be released */
2881 if (!IS_ERR_OR_NULL(mnt)) {
2882 real_mount(mnt)->mnt_ns = NULL;
2883 synchronize_rcu(); /* yecchhh... */
2884 mntput(mnt);
2885 }
2886 }
2887 EXPORT_SYMBOL(kern_unmount);
2888
2889 bool our_mnt(struct vfsmount *mnt)
2890 {
2891 return check_mnt(real_mount(mnt));
2892 }
2893
2894 bool current_chrooted(void)
2895 {
2896 /* Does the current process have a non-standard root */
2897 struct path ns_root;
2898 struct path fs_root;
2899 bool chrooted;
2900
2901 /* Find the namespace root */
2902 ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
2903 ns_root.dentry = ns_root.mnt->mnt_root;
2904 path_get(&ns_root);
2905 while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
2906 ;
2907
2908 get_fs_root(current->fs, &fs_root);
2909
2910 chrooted = !path_equal(&fs_root, &ns_root);
2911
2912 path_put(&fs_root);
2913 path_put(&ns_root);
2914
2915 return chrooted;
2916 }
2917
2918 bool fs_fully_visible(struct file_system_type *type)
2919 {
2920 struct mnt_namespace *ns = current->nsproxy->mnt_ns;
2921 struct mount *mnt;
2922 bool visible = false;
2923
2924 if (unlikely(!ns))
2925 return false;
2926
2927 down_read(&namespace_sem);
2928 list_for_each_entry(mnt, &ns->list, mnt_list) {
2929 struct mount *child;
2930 if (mnt->mnt.mnt_sb->s_type != type)
2931 continue;
2932
2933 /* This mount is not fully visible if there are any child mounts
2934 * that cover anything except for empty directories.
2935 */
2936 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
2937 struct inode *inode = child->mnt_mountpoint->d_inode;
2938 if (!S_ISDIR(inode->i_mode))
2939 goto next;
2940 if (inode->i_nlink > 2)
2941 goto next;
2942 }
2943 visible = true;
2944 goto found;
2945 next: ;
2946 }
2947 found:
2948 up_read(&namespace_sem);
2949 return visible;
2950 }
2951
2952 static void *mntns_get(struct task_struct *task)
2953 {
2954 struct mnt_namespace *ns = NULL;
2955 struct nsproxy *nsproxy;
2956
2957 rcu_read_lock();
2958 nsproxy = task_nsproxy(task);
2959 if (nsproxy) {
2960 ns = nsproxy->mnt_ns;
2961 get_mnt_ns(ns);
2962 }
2963 rcu_read_unlock();
2964
2965 return ns;
2966 }
2967
2968 static void mntns_put(void *ns)
2969 {
2970 put_mnt_ns(ns);
2971 }
2972
2973 static int mntns_install(struct nsproxy *nsproxy, void *ns)
2974 {
2975 struct fs_struct *fs = current->fs;
2976 struct mnt_namespace *mnt_ns = ns;
2977 struct path root;
2978
2979 if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
2980 !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
2981 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
2982 return -EPERM;
2983
2984 if (fs->users != 1)
2985 return -EINVAL;
2986
2987 get_mnt_ns(mnt_ns);
2988 put_mnt_ns(nsproxy->mnt_ns);
2989 nsproxy->mnt_ns = mnt_ns;
2990
2991 /* Find the root */
2992 root.mnt = &mnt_ns->root->mnt;
2993 root.dentry = mnt_ns->root->mnt.mnt_root;
2994 path_get(&root);
2995 while(d_mountpoint(root.dentry) && follow_down_one(&root))
2996 ;
2997
2998 /* Update the pwd and root */
2999 set_fs_pwd(fs, &root);
3000 set_fs_root(fs, &root);
3001
3002 path_put(&root);
3003 return 0;
3004 }
3005
3006 static unsigned int mntns_inum(void *ns)
3007 {
3008 struct mnt_namespace *mnt_ns = ns;
3009 return mnt_ns->proc_inum;
3010 }
3011
3012 const struct proc_ns_operations mntns_operations = {
3013 .name = "mnt",
3014 .type = CLONE_NEWNS,
3015 .get = mntns_get,
3016 .put = mntns_put,
3017 .install = mntns_install,
3018 .inum = mntns_inum,
3019 };
This page took 0.120899 seconds and 5 git commands to generate.