4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/percpu.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/acct.h>
19 #include <linux/capability.h>
20 #include <linux/cpumask.h>
21 #include <linux/module.h>
22 #include <linux/sysfs.h>
23 #include <linux/seq_file.h>
24 #include <linux/mnt_namespace.h>
25 #include <linux/namei.h>
26 #include <linux/nsproxy.h>
27 #include <linux/security.h>
28 #include <linux/mount.h>
29 #include <linux/ramfs.h>
30 #include <linux/log2.h>
31 #include <linux/idr.h>
32 #include <linux/fs_struct.h>
33 #include <linux/fsnotify.h>
34 #include <asm/uaccess.h>
35 #include <asm/unistd.h>
39 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
40 #define HASH_SIZE (1UL << HASH_SHIFT)
43 static DEFINE_IDA(mnt_id_ida
);
44 static DEFINE_IDA(mnt_group_ida
);
45 static DEFINE_SPINLOCK(mnt_id_lock
);
46 static int mnt_id_start
= 0;
47 static int mnt_group_start
= 1;
49 static struct list_head
*mount_hashtable __read_mostly
;
50 static struct kmem_cache
*mnt_cache __read_mostly
;
51 static struct rw_semaphore namespace_sem
;
54 struct kobject
*fs_kobj
;
55 EXPORT_SYMBOL_GPL(fs_kobj
);
58 * vfsmount lock may be taken for read to prevent changes to the
59 * vfsmount hash, ie. during mountpoint lookups or walking back
62 * It should be taken for write in all cases where the vfsmount
63 * tree or hash is modified or when a vfsmount structure is modified.
65 DEFINE_BRLOCK(vfsmount_lock
);
67 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
69 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
70 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
71 tmp
= tmp
+ (tmp
>> HASH_SHIFT
);
72 return tmp
& (HASH_SIZE
- 1);
75 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
78 * allocation is serialized by namespace_sem, but we need the spinlock to
79 * serialize with freeing.
81 static int mnt_alloc_id(struct vfsmount
*mnt
)
86 ida_pre_get(&mnt_id_ida
, GFP_KERNEL
);
87 spin_lock(&mnt_id_lock
);
88 res
= ida_get_new_above(&mnt_id_ida
, mnt_id_start
, &mnt
->mnt_id
);
90 mnt_id_start
= mnt
->mnt_id
+ 1;
91 spin_unlock(&mnt_id_lock
);
98 static void mnt_free_id(struct vfsmount
*mnt
)
100 int id
= mnt
->mnt_id
;
101 spin_lock(&mnt_id_lock
);
102 ida_remove(&mnt_id_ida
, id
);
103 if (mnt_id_start
> id
)
105 spin_unlock(&mnt_id_lock
);
109 * Allocate a new peer group ID
111 * mnt_group_ida is protected by namespace_sem
113 static int mnt_alloc_group_id(struct vfsmount
*mnt
)
117 if (!ida_pre_get(&mnt_group_ida
, GFP_KERNEL
))
120 res
= ida_get_new_above(&mnt_group_ida
,
124 mnt_group_start
= mnt
->mnt_group_id
+ 1;
130 * Release a peer group ID
132 void mnt_release_group_id(struct vfsmount
*mnt
)
134 int id
= mnt
->mnt_group_id
;
135 ida_remove(&mnt_group_ida
, id
);
136 if (mnt_group_start
> id
)
137 mnt_group_start
= id
;
138 mnt
->mnt_group_id
= 0;
142 * vfsmount lock must be held for read
144 static inline void mnt_add_count(struct vfsmount
*mnt
, int n
)
147 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
156 * vfsmount lock must be held for write
158 unsigned int mnt_get_count(struct vfsmount
*mnt
)
161 unsigned int count
= 0;
164 for_each_possible_cpu(cpu
) {
165 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
170 return mnt
->mnt_count
;
174 static struct vfsmount
*alloc_vfsmnt(const char *name
)
176 struct vfsmount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
180 err
= mnt_alloc_id(mnt
);
185 mnt
->mnt_devname
= kstrdup(name
, GFP_KERNEL
);
186 if (!mnt
->mnt_devname
)
191 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
193 goto out_free_devname
;
195 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
198 mnt
->mnt_writers
= 0;
201 INIT_LIST_HEAD(&mnt
->mnt_hash
);
202 INIT_LIST_HEAD(&mnt
->mnt_child
);
203 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
204 INIT_LIST_HEAD(&mnt
->mnt_list
);
205 INIT_LIST_HEAD(&mnt
->mnt_expire
);
206 INIT_LIST_HEAD(&mnt
->mnt_share
);
207 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
208 INIT_LIST_HEAD(&mnt
->mnt_slave
);
209 #ifdef CONFIG_FSNOTIFY
210 INIT_HLIST_HEAD(&mnt
->mnt_fsnotify_marks
);
217 kfree(mnt
->mnt_devname
);
222 kmem_cache_free(mnt_cache
, mnt
);
227 * Most r/o checks on a fs are for operations that take
228 * discrete amounts of time, like a write() or unlink().
229 * We must keep track of when those operations start
230 * (for permission checks) and when they end, so that
231 * we can determine when writes are able to occur to
235 * __mnt_is_readonly: check whether a mount is read-only
236 * @mnt: the mount to check for its write status
238 * This shouldn't be used directly ouside of the VFS.
239 * It does not guarantee that the filesystem will stay
240 * r/w, just that it is right *now*. This can not and
241 * should not be used in place of IS_RDONLY(inode).
242 * mnt_want/drop_write() will _keep_ the filesystem
245 int __mnt_is_readonly(struct vfsmount
*mnt
)
247 if (mnt
->mnt_flags
& MNT_READONLY
)
249 if (mnt
->mnt_sb
->s_flags
& MS_RDONLY
)
253 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
255 static inline void mnt_inc_writers(struct vfsmount
*mnt
)
258 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
264 static inline void mnt_dec_writers(struct vfsmount
*mnt
)
267 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
273 static unsigned int mnt_get_writers(struct vfsmount
*mnt
)
276 unsigned int count
= 0;
279 for_each_possible_cpu(cpu
) {
280 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
285 return mnt
->mnt_writers
;
290 * Most r/o checks on a fs are for operations that take
291 * discrete amounts of time, like a write() or unlink().
292 * We must keep track of when those operations start
293 * (for permission checks) and when they end, so that
294 * we can determine when writes are able to occur to
298 * mnt_want_write - get write access to a mount
299 * @mnt: the mount on which to take a write
301 * This tells the low-level filesystem that a write is
302 * about to be performed to it, and makes sure that
303 * writes are allowed before returning success. When
304 * the write operation is finished, mnt_drop_write()
305 * must be called. This is effectively a refcount.
307 int mnt_want_write(struct vfsmount
*mnt
)
312 mnt_inc_writers(mnt
);
314 * The store to mnt_inc_writers must be visible before we pass
315 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
316 * incremented count after it has set MNT_WRITE_HOLD.
319 while (mnt
->mnt_flags
& MNT_WRITE_HOLD
)
322 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
323 * be set to match its requirements. So we must not load that until
324 * MNT_WRITE_HOLD is cleared.
327 if (__mnt_is_readonly(mnt
)) {
328 mnt_dec_writers(mnt
);
336 EXPORT_SYMBOL_GPL(mnt_want_write
);
339 * mnt_clone_write - get write access to a mount
340 * @mnt: the mount on which to take a write
342 * This is effectively like mnt_want_write, except
343 * it must only be used to take an extra write reference
344 * on a mountpoint that we already know has a write reference
345 * on it. This allows some optimisation.
347 * After finished, mnt_drop_write must be called as usual to
348 * drop the reference.
350 int mnt_clone_write(struct vfsmount
*mnt
)
352 /* superblock may be r/o */
353 if (__mnt_is_readonly(mnt
))
356 mnt_inc_writers(mnt
);
360 EXPORT_SYMBOL_GPL(mnt_clone_write
);
363 * mnt_want_write_file - get write access to a file's mount
364 * @file: the file who's mount on which to take a write
366 * This is like mnt_want_write, but it takes a file and can
367 * do some optimisations if the file is open for write already
369 int mnt_want_write_file(struct file
*file
)
371 struct inode
*inode
= file
->f_dentry
->d_inode
;
372 if (!(file
->f_mode
& FMODE_WRITE
) || special_file(inode
->i_mode
))
373 return mnt_want_write(file
->f_path
.mnt
);
375 return mnt_clone_write(file
->f_path
.mnt
);
377 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
380 * mnt_drop_write - give up write access to a mount
381 * @mnt: the mount on which to give up write access
383 * Tells the low-level filesystem that we are done
384 * performing writes to it. Must be matched with
385 * mnt_want_write() call above.
387 void mnt_drop_write(struct vfsmount
*mnt
)
390 mnt_dec_writers(mnt
);
393 EXPORT_SYMBOL_GPL(mnt_drop_write
);
395 static int mnt_make_readonly(struct vfsmount
*mnt
)
399 br_write_lock(vfsmount_lock
);
400 mnt
->mnt_flags
|= MNT_WRITE_HOLD
;
402 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
403 * should be visible before we do.
408 * With writers on hold, if this value is zero, then there are
409 * definitely no active writers (although held writers may subsequently
410 * increment the count, they'll have to wait, and decrement it after
411 * seeing MNT_READONLY).
413 * It is OK to have counter incremented on one CPU and decremented on
414 * another: the sum will add up correctly. The danger would be when we
415 * sum up each counter, if we read a counter before it is incremented,
416 * but then read another CPU's count which it has been subsequently
417 * decremented from -- we would see more decrements than we should.
418 * MNT_WRITE_HOLD protects against this scenario, because
419 * mnt_want_write first increments count, then smp_mb, then spins on
420 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
421 * we're counting up here.
423 if (mnt_get_writers(mnt
) > 0)
426 mnt
->mnt_flags
|= MNT_READONLY
;
428 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
429 * that become unheld will see MNT_READONLY.
432 mnt
->mnt_flags
&= ~MNT_WRITE_HOLD
;
433 br_write_unlock(vfsmount_lock
);
437 static void __mnt_unmake_readonly(struct vfsmount
*mnt
)
439 br_write_lock(vfsmount_lock
);
440 mnt
->mnt_flags
&= ~MNT_READONLY
;
441 br_write_unlock(vfsmount_lock
);
444 static void free_vfsmnt(struct vfsmount
*mnt
)
446 kfree(mnt
->mnt_devname
);
449 free_percpu(mnt
->mnt_pcp
);
451 kmem_cache_free(mnt_cache
, mnt
);
455 * find the first or last mount at @dentry on vfsmount @mnt depending on
456 * @dir. If @dir is set return the first mount else return the last mount.
457 * vfsmount_lock must be held for read or write.
459 struct vfsmount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
,
462 struct list_head
*head
= mount_hashtable
+ hash(mnt
, dentry
);
463 struct list_head
*tmp
= head
;
464 struct vfsmount
*p
, *found
= NULL
;
467 tmp
= dir
? tmp
->next
: tmp
->prev
;
471 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
472 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
) {
481 * lookup_mnt increments the ref count before returning
482 * the vfsmount struct.
484 struct vfsmount
*lookup_mnt(struct path
*path
)
486 struct vfsmount
*child_mnt
;
488 br_read_lock(vfsmount_lock
);
489 if ((child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
, 1)))
491 br_read_unlock(vfsmount_lock
);
495 static inline int check_mnt(struct vfsmount
*mnt
)
497 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
501 * vfsmount lock must be held for write
503 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
507 wake_up_interruptible(&ns
->poll
);
512 * vfsmount lock must be held for write
514 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
516 if (ns
&& ns
->event
!= event
) {
518 wake_up_interruptible(&ns
->poll
);
523 * Clear dentry's mounted state if it has no remaining mounts.
524 * vfsmount_lock must be held for write.
526 static void dentry_reset_mounted(struct dentry
*dentry
)
530 for (u
= 0; u
< HASH_SIZE
; u
++) {
533 list_for_each_entry(p
, &mount_hashtable
[u
], mnt_hash
) {
534 if (p
->mnt_mountpoint
== dentry
)
538 spin_lock(&dentry
->d_lock
);
539 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
540 spin_unlock(&dentry
->d_lock
);
544 * vfsmount lock must be held for write
546 static void detach_mnt(struct vfsmount
*mnt
, struct path
*old_path
)
548 old_path
->dentry
= mnt
->mnt_mountpoint
;
549 old_path
->mnt
= mnt
->mnt_parent
;
550 mnt
->mnt_parent
= mnt
;
551 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
552 list_del_init(&mnt
->mnt_child
);
553 list_del_init(&mnt
->mnt_hash
);
554 dentry_reset_mounted(old_path
->dentry
);
558 * vfsmount lock must be held for write
560 void mnt_set_mountpoint(struct vfsmount
*mnt
, struct dentry
*dentry
,
561 struct vfsmount
*child_mnt
)
563 child_mnt
->mnt_parent
= mntget(mnt
);
564 child_mnt
->mnt_mountpoint
= dget(dentry
);
565 spin_lock(&dentry
->d_lock
);
566 dentry
->d_flags
|= DCACHE_MOUNTED
;
567 spin_unlock(&dentry
->d_lock
);
571 * vfsmount lock must be held for write
573 static void attach_mnt(struct vfsmount
*mnt
, struct path
*path
)
575 mnt_set_mountpoint(path
->mnt
, path
->dentry
, mnt
);
576 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
577 hash(path
->mnt
, path
->dentry
));
578 list_add_tail(&mnt
->mnt_child
, &path
->mnt
->mnt_mounts
);
581 static inline void __mnt_make_longterm(struct vfsmount
*mnt
)
584 atomic_inc(&mnt
->mnt_longterm
);
588 /* needs vfsmount lock for write */
589 static inline void __mnt_make_shortterm(struct vfsmount
*mnt
)
592 atomic_dec(&mnt
->mnt_longterm
);
597 * vfsmount lock must be held for write
599 static void commit_tree(struct vfsmount
*mnt
)
601 struct vfsmount
*parent
= mnt
->mnt_parent
;
604 struct mnt_namespace
*n
= parent
->mnt_ns
;
606 BUG_ON(parent
== mnt
);
608 list_add_tail(&head
, &mnt
->mnt_list
);
609 list_for_each_entry(m
, &head
, mnt_list
) {
611 __mnt_make_longterm(m
);
614 list_splice(&head
, n
->list
.prev
);
616 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
617 hash(parent
, mnt
->mnt_mountpoint
));
618 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
619 touch_mnt_namespace(n
);
622 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
624 struct list_head
*next
= p
->mnt_mounts
.next
;
625 if (next
== &p
->mnt_mounts
) {
629 next
= p
->mnt_child
.next
;
630 if (next
!= &p
->mnt_parent
->mnt_mounts
)
635 return list_entry(next
, struct vfsmount
, mnt_child
);
638 static struct vfsmount
*skip_mnt_tree(struct vfsmount
*p
)
640 struct list_head
*prev
= p
->mnt_mounts
.prev
;
641 while (prev
!= &p
->mnt_mounts
) {
642 p
= list_entry(prev
, struct vfsmount
, mnt_child
);
643 prev
= p
->mnt_mounts
.prev
;
649 vfs_kern_mount(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
651 struct vfsmount
*mnt
;
655 return ERR_PTR(-ENODEV
);
657 mnt
= alloc_vfsmnt(name
);
659 return ERR_PTR(-ENOMEM
);
661 if (flags
& MS_KERNMOUNT
)
662 mnt
->mnt_flags
= MNT_INTERNAL
;
664 root
= mount_fs(type
, flags
, name
, data
);
667 return ERR_CAST(root
);
670 mnt
->mnt_root
= root
;
671 mnt
->mnt_sb
= root
->d_sb
;
672 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
673 mnt
->mnt_parent
= mnt
;
676 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
678 static struct vfsmount
*clone_mnt(struct vfsmount
*old
, struct dentry
*root
,
681 struct super_block
*sb
= old
->mnt_sb
;
682 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
685 if (flag
& (CL_SLAVE
| CL_PRIVATE
))
686 mnt
->mnt_group_id
= 0; /* not a peer of original */
688 mnt
->mnt_group_id
= old
->mnt_group_id
;
690 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
691 int err
= mnt_alloc_group_id(mnt
);
696 mnt
->mnt_flags
= old
->mnt_flags
& ~MNT_WRITE_HOLD
;
697 atomic_inc(&sb
->s_active
);
699 mnt
->mnt_root
= dget(root
);
700 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
701 mnt
->mnt_parent
= mnt
;
703 if (flag
& CL_SLAVE
) {
704 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
705 mnt
->mnt_master
= old
;
706 CLEAR_MNT_SHARED(mnt
);
707 } else if (!(flag
& CL_PRIVATE
)) {
708 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
709 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
710 if (IS_MNT_SLAVE(old
))
711 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
712 mnt
->mnt_master
= old
->mnt_master
;
714 if (flag
& CL_MAKE_SHARED
)
717 /* stick the duplicate mount on the same expiry list
718 * as the original if that was on one */
719 if (flag
& CL_EXPIRE
) {
720 if (!list_empty(&old
->mnt_expire
))
721 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
731 static inline void mntfree(struct vfsmount
*mnt
)
733 struct super_block
*sb
= mnt
->mnt_sb
;
736 * This probably indicates that somebody messed
737 * up a mnt_want/drop_write() pair. If this
738 * happens, the filesystem was probably unable
739 * to make r/w->r/o transitions.
742 * The locking used to deal with mnt_count decrement provides barriers,
743 * so mnt_get_writers() below is safe.
745 WARN_ON(mnt_get_writers(mnt
));
746 fsnotify_vfsmount_delete(mnt
);
749 deactivate_super(sb
);
752 static void mntput_no_expire(struct vfsmount
*mnt
)
756 br_read_lock(vfsmount_lock
);
757 if (likely(atomic_read(&mnt
->mnt_longterm
))) {
758 mnt_add_count(mnt
, -1);
759 br_read_unlock(vfsmount_lock
);
762 br_read_unlock(vfsmount_lock
);
764 br_write_lock(vfsmount_lock
);
765 mnt_add_count(mnt
, -1);
766 if (mnt_get_count(mnt
)) {
767 br_write_unlock(vfsmount_lock
);
771 mnt_add_count(mnt
, -1);
772 if (likely(mnt_get_count(mnt
)))
774 br_write_lock(vfsmount_lock
);
776 if (unlikely(mnt
->mnt_pinned
)) {
777 mnt_add_count(mnt
, mnt
->mnt_pinned
+ 1);
779 br_write_unlock(vfsmount_lock
);
780 acct_auto_close_mnt(mnt
);
783 br_write_unlock(vfsmount_lock
);
787 void mntput(struct vfsmount
*mnt
)
790 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
791 if (unlikely(mnt
->mnt_expiry_mark
))
792 mnt
->mnt_expiry_mark
= 0;
793 mntput_no_expire(mnt
);
796 EXPORT_SYMBOL(mntput
);
798 struct vfsmount
*mntget(struct vfsmount
*mnt
)
801 mnt_add_count(mnt
, 1);
804 EXPORT_SYMBOL(mntget
);
806 void mnt_pin(struct vfsmount
*mnt
)
808 br_write_lock(vfsmount_lock
);
810 br_write_unlock(vfsmount_lock
);
812 EXPORT_SYMBOL(mnt_pin
);
814 void mnt_unpin(struct vfsmount
*mnt
)
816 br_write_lock(vfsmount_lock
);
817 if (mnt
->mnt_pinned
) {
818 mnt_add_count(mnt
, 1);
821 br_write_unlock(vfsmount_lock
);
823 EXPORT_SYMBOL(mnt_unpin
);
825 static inline void mangle(struct seq_file
*m
, const char *s
)
827 seq_escape(m
, s
, " \t\n\\");
831 * Simple .show_options callback for filesystems which don't want to
832 * implement more complex mount option showing.
834 * See also save_mount_options().
836 int generic_show_options(struct seq_file
*m
, struct vfsmount
*mnt
)
841 options
= rcu_dereference(mnt
->mnt_sb
->s_options
);
843 if (options
!= NULL
&& options
[0]) {
851 EXPORT_SYMBOL(generic_show_options
);
854 * If filesystem uses generic_show_options(), this function should be
855 * called from the fill_super() callback.
857 * The .remount_fs callback usually needs to be handled in a special
858 * way, to make sure, that previous options are not overwritten if the
861 * Also note, that if the filesystem's .remount_fs function doesn't
862 * reset all options to their default value, but changes only newly
863 * given options, then the displayed options will not reflect reality
866 void save_mount_options(struct super_block
*sb
, char *options
)
868 BUG_ON(sb
->s_options
);
869 rcu_assign_pointer(sb
->s_options
, kstrdup(options
, GFP_KERNEL
));
871 EXPORT_SYMBOL(save_mount_options
);
873 void replace_mount_options(struct super_block
*sb
, char *options
)
875 char *old
= sb
->s_options
;
876 rcu_assign_pointer(sb
->s_options
, options
);
882 EXPORT_SYMBOL(replace_mount_options
);
884 #ifdef CONFIG_PROC_FS
886 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
888 struct proc_mounts
*p
= m
->private;
890 down_read(&namespace_sem
);
891 return seq_list_start(&p
->ns
->list
, *pos
);
894 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
896 struct proc_mounts
*p
= m
->private;
898 return seq_list_next(v
, &p
->ns
->list
, pos
);
901 static void m_stop(struct seq_file
*m
, void *v
)
903 up_read(&namespace_sem
);
906 int mnt_had_events(struct proc_mounts
*p
)
908 struct mnt_namespace
*ns
= p
->ns
;
911 br_read_lock(vfsmount_lock
);
912 if (p
->m
.poll_event
!= ns
->event
) {
913 p
->m
.poll_event
= ns
->event
;
916 br_read_unlock(vfsmount_lock
);
921 struct proc_fs_info
{
926 static int show_sb_opts(struct seq_file
*m
, struct super_block
*sb
)
928 static const struct proc_fs_info fs_info
[] = {
929 { MS_SYNCHRONOUS
, ",sync" },
930 { MS_DIRSYNC
, ",dirsync" },
931 { MS_MANDLOCK
, ",mand" },
934 const struct proc_fs_info
*fs_infop
;
936 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
937 if (sb
->s_flags
& fs_infop
->flag
)
938 seq_puts(m
, fs_infop
->str
);
941 return security_sb_show_options(m
, sb
);
944 static void show_mnt_opts(struct seq_file
*m
, struct vfsmount
*mnt
)
946 static const struct proc_fs_info mnt_info
[] = {
947 { MNT_NOSUID
, ",nosuid" },
948 { MNT_NODEV
, ",nodev" },
949 { MNT_NOEXEC
, ",noexec" },
950 { MNT_NOATIME
, ",noatime" },
951 { MNT_NODIRATIME
, ",nodiratime" },
952 { MNT_RELATIME
, ",relatime" },
955 const struct proc_fs_info
*fs_infop
;
957 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
958 if (mnt
->mnt_flags
& fs_infop
->flag
)
959 seq_puts(m
, fs_infop
->str
);
963 static void show_type(struct seq_file
*m
, struct super_block
*sb
)
965 mangle(m
, sb
->s_type
->name
);
966 if (sb
->s_subtype
&& sb
->s_subtype
[0]) {
968 mangle(m
, sb
->s_subtype
);
972 static int show_vfsmnt(struct seq_file
*m
, void *v
)
974 struct vfsmount
*mnt
= list_entry(v
, struct vfsmount
, mnt_list
);
976 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
978 if (mnt
->mnt_sb
->s_op
->show_devname
) {
979 err
= mnt
->mnt_sb
->s_op
->show_devname(m
, mnt
);
983 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
986 seq_path(m
, &mnt_path
, " \t\n\\");
988 show_type(m
, mnt
->mnt_sb
);
989 seq_puts(m
, __mnt_is_readonly(mnt
) ? " ro" : " rw");
990 err
= show_sb_opts(m
, mnt
->mnt_sb
);
993 show_mnt_opts(m
, mnt
);
994 if (mnt
->mnt_sb
->s_op
->show_options
)
995 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
996 seq_puts(m
, " 0 0\n");
1001 const struct seq_operations mounts_op
= {
1008 static int show_mountinfo(struct seq_file
*m
, void *v
)
1010 struct proc_mounts
*p
= m
->private;
1011 struct vfsmount
*mnt
= list_entry(v
, struct vfsmount
, mnt_list
);
1012 struct super_block
*sb
= mnt
->mnt_sb
;
1013 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
1014 struct path root
= p
->root
;
1017 seq_printf(m
, "%i %i %u:%u ", mnt
->mnt_id
, mnt
->mnt_parent
->mnt_id
,
1018 MAJOR(sb
->s_dev
), MINOR(sb
->s_dev
));
1019 if (sb
->s_op
->show_path
)
1020 err
= sb
->s_op
->show_path(m
, mnt
);
1022 seq_dentry(m
, mnt
->mnt_root
, " \t\n\\");
1027 /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
1028 err
= seq_path_root(m
, &mnt_path
, &root
, " \t\n\\");
1032 seq_puts(m
, mnt
->mnt_flags
& MNT_READONLY
? " ro" : " rw");
1033 show_mnt_opts(m
, mnt
);
1035 /* Tagged fields ("foo:X" or "bar") */
1036 if (IS_MNT_SHARED(mnt
))
1037 seq_printf(m
, " shared:%i", mnt
->mnt_group_id
);
1038 if (IS_MNT_SLAVE(mnt
)) {
1039 int master
= mnt
->mnt_master
->mnt_group_id
;
1040 int dom
= get_dominating_id(mnt
, &p
->root
);
1041 seq_printf(m
, " master:%i", master
);
1042 if (dom
&& dom
!= master
)
1043 seq_printf(m
, " propagate_from:%i", dom
);
1045 if (IS_MNT_UNBINDABLE(mnt
))
1046 seq_puts(m
, " unbindable");
1048 /* Filesystem specific data */
1052 if (sb
->s_op
->show_devname
)
1053 err
= sb
->s_op
->show_devname(m
, mnt
);
1055 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
1058 seq_puts(m
, sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
1059 err
= show_sb_opts(m
, sb
);
1062 if (sb
->s_op
->show_options
)
1063 err
= sb
->s_op
->show_options(m
, mnt
);
1069 const struct seq_operations mountinfo_op
= {
1073 .show
= show_mountinfo
,
1076 static int show_vfsstat(struct seq_file
*m
, void *v
)
1078 struct vfsmount
*mnt
= list_entry(v
, struct vfsmount
, mnt_list
);
1079 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
1083 if (mnt
->mnt_sb
->s_op
->show_devname
) {
1084 seq_puts(m
, "device ");
1085 err
= mnt
->mnt_sb
->s_op
->show_devname(m
, mnt
);
1087 if (mnt
->mnt_devname
) {
1088 seq_puts(m
, "device ");
1089 mangle(m
, mnt
->mnt_devname
);
1091 seq_puts(m
, "no device");
1095 seq_puts(m
, " mounted on ");
1096 seq_path(m
, &mnt_path
, " \t\n\\");
1099 /* file system type */
1100 seq_puts(m
, "with fstype ");
1101 show_type(m
, mnt
->mnt_sb
);
1103 /* optional statistics */
1104 if (mnt
->mnt_sb
->s_op
->show_stats
) {
1107 err
= mnt
->mnt_sb
->s_op
->show_stats(m
, mnt
);
1114 const struct seq_operations mountstats_op
= {
1118 .show
= show_vfsstat
,
1120 #endif /* CONFIG_PROC_FS */
1123 * may_umount_tree - check if a mount tree is busy
1124 * @mnt: root of mount tree
1126 * This is called to check if a tree of mounts has any
1127 * open files, pwds, chroots or sub mounts that are
1130 int may_umount_tree(struct vfsmount
*mnt
)
1132 int actual_refs
= 0;
1133 int minimum_refs
= 0;
1136 /* write lock needed for mnt_get_count */
1137 br_write_lock(vfsmount_lock
);
1138 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1139 actual_refs
+= mnt_get_count(p
);
1142 br_write_unlock(vfsmount_lock
);
1144 if (actual_refs
> minimum_refs
)
1150 EXPORT_SYMBOL(may_umount_tree
);
1153 * may_umount - check if a mount point is busy
1154 * @mnt: root of mount
1156 * This is called to check if a mount point has any
1157 * open files, pwds, chroots or sub mounts. If the
1158 * mount has sub mounts this will return busy
1159 * regardless of whether the sub mounts are busy.
1161 * Doesn't take quota and stuff into account. IOW, in some cases it will
1162 * give false negatives. The main reason why it's here is that we need
1163 * a non-destructive way to look for easily umountable filesystems.
1165 int may_umount(struct vfsmount
*mnt
)
1168 down_read(&namespace_sem
);
1169 br_write_lock(vfsmount_lock
);
1170 if (propagate_mount_busy(mnt
, 2))
1172 br_write_unlock(vfsmount_lock
);
1173 up_read(&namespace_sem
);
1177 EXPORT_SYMBOL(may_umount
);
1179 void release_mounts(struct list_head
*head
)
1181 struct vfsmount
*mnt
;
1182 while (!list_empty(head
)) {
1183 mnt
= list_first_entry(head
, struct vfsmount
, mnt_hash
);
1184 list_del_init(&mnt
->mnt_hash
);
1185 if (mnt_has_parent(mnt
)) {
1186 struct dentry
*dentry
;
1189 br_write_lock(vfsmount_lock
);
1190 dentry
= mnt
->mnt_mountpoint
;
1191 m
= mnt
->mnt_parent
;
1192 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
1193 mnt
->mnt_parent
= mnt
;
1195 br_write_unlock(vfsmount_lock
);
1204 * vfsmount lock must be held for write
1205 * namespace_sem must be held for write
1207 void umount_tree(struct vfsmount
*mnt
, int propagate
, struct list_head
*kill
)
1209 LIST_HEAD(tmp_list
);
1212 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
1213 list_move(&p
->mnt_hash
, &tmp_list
);
1216 propagate_umount(&tmp_list
);
1218 list_for_each_entry(p
, &tmp_list
, mnt_hash
) {
1219 list_del_init(&p
->mnt_expire
);
1220 list_del_init(&p
->mnt_list
);
1221 __touch_mnt_namespace(p
->mnt_ns
);
1223 __mnt_make_shortterm(p
);
1224 list_del_init(&p
->mnt_child
);
1225 if (mnt_has_parent(p
)) {
1226 p
->mnt_parent
->mnt_ghosts
++;
1227 dentry_reset_mounted(p
->mnt_mountpoint
);
1229 change_mnt_propagation(p
, MS_PRIVATE
);
1231 list_splice(&tmp_list
, kill
);
1234 static void shrink_submounts(struct vfsmount
*mnt
, struct list_head
*umounts
);
1236 static int do_umount(struct vfsmount
*mnt
, int flags
)
1238 struct super_block
*sb
= mnt
->mnt_sb
;
1240 LIST_HEAD(umount_list
);
1242 retval
= security_sb_umount(mnt
, flags
);
1247 * Allow userspace to request a mountpoint be expired rather than
1248 * unmounting unconditionally. Unmount only happens if:
1249 * (1) the mark is already set (the mark is cleared by mntput())
1250 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1252 if (flags
& MNT_EXPIRE
) {
1253 if (mnt
== current
->fs
->root
.mnt
||
1254 flags
& (MNT_FORCE
| MNT_DETACH
))
1258 * probably don't strictly need the lock here if we examined
1259 * all race cases, but it's a slowpath.
1261 br_write_lock(vfsmount_lock
);
1262 if (mnt_get_count(mnt
) != 2) {
1263 br_write_unlock(vfsmount_lock
);
1266 br_write_unlock(vfsmount_lock
);
1268 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1273 * If we may have to abort operations to get out of this
1274 * mount, and they will themselves hold resources we must
1275 * allow the fs to do things. In the Unix tradition of
1276 * 'Gee thats tricky lets do it in userspace' the umount_begin
1277 * might fail to complete on the first run through as other tasks
1278 * must return, and the like. Thats for the mount program to worry
1279 * about for the moment.
1282 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1283 sb
->s_op
->umount_begin(sb
);
1287 * No sense to grab the lock for this test, but test itself looks
1288 * somewhat bogus. Suggestions for better replacement?
1289 * Ho-hum... In principle, we might treat that as umount + switch
1290 * to rootfs. GC would eventually take care of the old vfsmount.
1291 * Actually it makes sense, especially if rootfs would contain a
1292 * /reboot - static binary that would close all descriptors and
1293 * call reboot(9). Then init(8) could umount root and exec /reboot.
1295 if (mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1297 * Special case for "unmounting" root ...
1298 * we just try to remount it readonly.
1300 down_write(&sb
->s_umount
);
1301 if (!(sb
->s_flags
& MS_RDONLY
))
1302 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
1303 up_write(&sb
->s_umount
);
1307 down_write(&namespace_sem
);
1308 br_write_lock(vfsmount_lock
);
1311 if (!(flags
& MNT_DETACH
))
1312 shrink_submounts(mnt
, &umount_list
);
1315 if (flags
& MNT_DETACH
|| !propagate_mount_busy(mnt
, 2)) {
1316 if (!list_empty(&mnt
->mnt_list
))
1317 umount_tree(mnt
, 1, &umount_list
);
1320 br_write_unlock(vfsmount_lock
);
1321 up_write(&namespace_sem
);
1322 release_mounts(&umount_list
);
1327 * Now umount can handle mount points as well as block devices.
1328 * This is important for filesystems which use unnamed block devices.
1330 * We now support a flag for forced unmount like the other 'big iron'
1331 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1334 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1338 int lookup_flags
= 0;
1340 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1343 if (!(flags
& UMOUNT_NOFOLLOW
))
1344 lookup_flags
|= LOOKUP_FOLLOW
;
1346 retval
= user_path_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1350 if (path
.dentry
!= path
.mnt
->mnt_root
)
1352 if (!check_mnt(path
.mnt
))
1356 if (!capable(CAP_SYS_ADMIN
))
1359 retval
= do_umount(path
.mnt
, flags
);
1361 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1363 mntput_no_expire(path
.mnt
);
1368 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1371 * The 2.0 compatible umount. No flags.
1373 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1375 return sys_umount(name
, 0);
1380 static int mount_is_safe(struct path
*path
)
1382 if (capable(CAP_SYS_ADMIN
))
1386 if (S_ISLNK(path
->dentry
->d_inode
->i_mode
))
1388 if (path
->dentry
->d_inode
->i_mode
& S_ISVTX
) {
1389 if (current_uid() != path
->dentry
->d_inode
->i_uid
)
1392 if (inode_permission(path
->dentry
->d_inode
, MAY_WRITE
))
1398 struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
,
1401 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
1404 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(mnt
))
1407 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1410 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1413 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1414 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1417 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1418 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(s
)) {
1419 s
= skip_mnt_tree(s
);
1422 while (p
!= s
->mnt_parent
) {
1428 path
.dentry
= p
->mnt_mountpoint
;
1429 q
= clone_mnt(p
, p
->mnt_root
, flag
);
1432 br_write_lock(vfsmount_lock
);
1433 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1434 attach_mnt(q
, &path
);
1435 br_write_unlock(vfsmount_lock
);
1441 LIST_HEAD(umount_list
);
1442 br_write_lock(vfsmount_lock
);
1443 umount_tree(res
, 0, &umount_list
);
1444 br_write_unlock(vfsmount_lock
);
1445 release_mounts(&umount_list
);
1450 struct vfsmount
*collect_mounts(struct path
*path
)
1452 struct vfsmount
*tree
;
1453 down_write(&namespace_sem
);
1454 tree
= copy_tree(path
->mnt
, path
->dentry
, CL_COPY_ALL
| CL_PRIVATE
);
1455 up_write(&namespace_sem
);
1459 void drop_collected_mounts(struct vfsmount
*mnt
)
1461 LIST_HEAD(umount_list
);
1462 down_write(&namespace_sem
);
1463 br_write_lock(vfsmount_lock
);
1464 umount_tree(mnt
, 0, &umount_list
);
1465 br_write_unlock(vfsmount_lock
);
1466 up_write(&namespace_sem
);
1467 release_mounts(&umount_list
);
1470 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1471 struct vfsmount
*root
)
1473 struct vfsmount
*mnt
;
1474 int res
= f(root
, arg
);
1477 list_for_each_entry(mnt
, &root
->mnt_list
, mnt_list
) {
1485 static void cleanup_group_ids(struct vfsmount
*mnt
, struct vfsmount
*end
)
1489 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1490 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1491 mnt_release_group_id(p
);
1495 static int invent_group_ids(struct vfsmount
*mnt
, bool recurse
)
1499 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1500 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1501 int err
= mnt_alloc_group_id(p
);
1503 cleanup_group_ids(mnt
, p
);
1513 * @source_mnt : mount tree to be attached
1514 * @nd : place the mount tree @source_mnt is attached
1515 * @parent_nd : if non-null, detach the source_mnt from its parent and
1516 * store the parent mount and mountpoint dentry.
1517 * (done when source_mnt is moved)
1519 * NOTE: in the table below explains the semantics when a source mount
1520 * of a given type is attached to a destination mount of a given type.
1521 * ---------------------------------------------------------------------------
1522 * | BIND MOUNT OPERATION |
1523 * |**************************************************************************
1524 * | source-->| shared | private | slave | unbindable |
1528 * |**************************************************************************
1529 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1531 * |non-shared| shared (+) | private | slave (*) | invalid |
1532 * ***************************************************************************
1533 * A bind operation clones the source mount and mounts the clone on the
1534 * destination mount.
1536 * (++) the cloned mount is propagated to all the mounts in the propagation
1537 * tree of the destination mount and the cloned mount is added to
1538 * the peer group of the source mount.
1539 * (+) the cloned mount is created under the destination mount and is marked
1540 * as shared. The cloned mount is added to the peer group of the source
1542 * (+++) the mount is propagated to all the mounts in the propagation tree
1543 * of the destination mount and the cloned mount is made slave
1544 * of the same master as that of the source mount. The cloned mount
1545 * is marked as 'shared and slave'.
1546 * (*) the cloned mount is made a slave of the same master as that of the
1549 * ---------------------------------------------------------------------------
1550 * | MOVE MOUNT OPERATION |
1551 * |**************************************************************************
1552 * | source-->| shared | private | slave | unbindable |
1556 * |**************************************************************************
1557 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1559 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1560 * ***************************************************************************
1562 * (+) the mount is moved to the destination. And is then propagated to
1563 * all the mounts in the propagation tree of the destination mount.
1564 * (+*) the mount is moved to the destination.
1565 * (+++) the mount is moved to the destination and is then propagated to
1566 * all the mounts belonging to the destination mount's propagation tree.
1567 * the mount is marked as 'shared and slave'.
1568 * (*) the mount continues to be a slave at the new location.
1570 * if the source mount is a tree, the operations explained above is
1571 * applied to each mount in the tree.
1572 * Must be called without spinlocks held, since this function can sleep
1575 static int attach_recursive_mnt(struct vfsmount
*source_mnt
,
1576 struct path
*path
, struct path
*parent_path
)
1578 LIST_HEAD(tree_list
);
1579 struct vfsmount
*dest_mnt
= path
->mnt
;
1580 struct dentry
*dest_dentry
= path
->dentry
;
1581 struct vfsmount
*child
, *p
;
1584 if (IS_MNT_SHARED(dest_mnt
)) {
1585 err
= invent_group_ids(source_mnt
, true);
1589 err
= propagate_mnt(dest_mnt
, dest_dentry
, source_mnt
, &tree_list
);
1591 goto out_cleanup_ids
;
1593 br_write_lock(vfsmount_lock
);
1595 if (IS_MNT_SHARED(dest_mnt
)) {
1596 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
1600 detach_mnt(source_mnt
, parent_path
);
1601 attach_mnt(source_mnt
, path
);
1602 touch_mnt_namespace(parent_path
->mnt
->mnt_ns
);
1604 mnt_set_mountpoint(dest_mnt
, dest_dentry
, source_mnt
);
1605 commit_tree(source_mnt
);
1608 list_for_each_entry_safe(child
, p
, &tree_list
, mnt_hash
) {
1609 list_del_init(&child
->mnt_hash
);
1612 br_write_unlock(vfsmount_lock
);
1617 if (IS_MNT_SHARED(dest_mnt
))
1618 cleanup_group_ids(source_mnt
, NULL
);
1623 static int lock_mount(struct path
*path
)
1625 struct vfsmount
*mnt
;
1627 mutex_lock(&path
->dentry
->d_inode
->i_mutex
);
1628 if (unlikely(cant_mount(path
->dentry
))) {
1629 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
1632 down_write(&namespace_sem
);
1633 mnt
= lookup_mnt(path
);
1636 up_write(&namespace_sem
);
1637 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
1640 path
->dentry
= dget(mnt
->mnt_root
);
1644 static void unlock_mount(struct path
*path
)
1646 up_write(&namespace_sem
);
1647 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
1650 static int graft_tree(struct vfsmount
*mnt
, struct path
*path
)
1652 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
1655 if (S_ISDIR(path
->dentry
->d_inode
->i_mode
) !=
1656 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
1659 if (d_unlinked(path
->dentry
))
1662 return attach_recursive_mnt(mnt
, path
, NULL
);
1666 * Sanity check the flags to change_mnt_propagation.
1669 static int flags_to_propagation_type(int flags
)
1671 int type
= flags
& ~(MS_REC
| MS_SILENT
);
1673 /* Fail if any non-propagation flags are set */
1674 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1676 /* Only one propagation flag should be set */
1677 if (!is_power_of_2(type
))
1683 * recursively change the type of the mountpoint.
1685 static int do_change_type(struct path
*path
, int flag
)
1687 struct vfsmount
*m
, *mnt
= path
->mnt
;
1688 int recurse
= flag
& MS_REC
;
1692 if (!capable(CAP_SYS_ADMIN
))
1695 if (path
->dentry
!= path
->mnt
->mnt_root
)
1698 type
= flags_to_propagation_type(flag
);
1702 down_write(&namespace_sem
);
1703 if (type
== MS_SHARED
) {
1704 err
= invent_group_ids(mnt
, recurse
);
1709 br_write_lock(vfsmount_lock
);
1710 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
1711 change_mnt_propagation(m
, type
);
1712 br_write_unlock(vfsmount_lock
);
1715 up_write(&namespace_sem
);
1720 * do loopback mount.
1722 static int do_loopback(struct path
*path
, char *old_name
,
1725 LIST_HEAD(umount_list
);
1726 struct path old_path
;
1727 struct vfsmount
*mnt
= NULL
;
1728 int err
= mount_is_safe(path
);
1731 if (!old_name
|| !*old_name
)
1733 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
1737 err
= lock_mount(path
);
1742 if (IS_MNT_UNBINDABLE(old_path
.mnt
))
1745 if (!check_mnt(path
->mnt
) || !check_mnt(old_path
.mnt
))
1750 mnt
= copy_tree(old_path
.mnt
, old_path
.dentry
, 0);
1752 mnt
= clone_mnt(old_path
.mnt
, old_path
.dentry
, 0);
1757 err
= graft_tree(mnt
, path
);
1759 br_write_lock(vfsmount_lock
);
1760 umount_tree(mnt
, 0, &umount_list
);
1761 br_write_unlock(vfsmount_lock
);
1765 release_mounts(&umount_list
);
1767 path_put(&old_path
);
1771 static int change_mount_flags(struct vfsmount
*mnt
, int ms_flags
)
1774 int readonly_request
= 0;
1776 if (ms_flags
& MS_RDONLY
)
1777 readonly_request
= 1;
1778 if (readonly_request
== __mnt_is_readonly(mnt
))
1781 if (readonly_request
)
1782 error
= mnt_make_readonly(mnt
);
1784 __mnt_unmake_readonly(mnt
);
1789 * change filesystem flags. dir should be a physical root of filesystem.
1790 * If you've mounted a non-root directory somewhere and want to do remount
1791 * on it - tough luck.
1793 static int do_remount(struct path
*path
, int flags
, int mnt_flags
,
1797 struct super_block
*sb
= path
->mnt
->mnt_sb
;
1799 if (!capable(CAP_SYS_ADMIN
))
1802 if (!check_mnt(path
->mnt
))
1805 if (path
->dentry
!= path
->mnt
->mnt_root
)
1808 err
= security_sb_remount(sb
, data
);
1812 down_write(&sb
->s_umount
);
1813 if (flags
& MS_BIND
)
1814 err
= change_mount_flags(path
->mnt
, flags
);
1816 err
= do_remount_sb(sb
, flags
, data
, 0);
1818 br_write_lock(vfsmount_lock
);
1819 mnt_flags
|= path
->mnt
->mnt_flags
& MNT_PROPAGATION_MASK
;
1820 path
->mnt
->mnt_flags
= mnt_flags
;
1821 br_write_unlock(vfsmount_lock
);
1823 up_write(&sb
->s_umount
);
1825 br_write_lock(vfsmount_lock
);
1826 touch_mnt_namespace(path
->mnt
->mnt_ns
);
1827 br_write_unlock(vfsmount_lock
);
1832 static inline int tree_contains_unbindable(struct vfsmount
*mnt
)
1835 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1836 if (IS_MNT_UNBINDABLE(p
))
1842 static int do_move_mount(struct path
*path
, char *old_name
)
1844 struct path old_path
, parent_path
;
1847 if (!capable(CAP_SYS_ADMIN
))
1849 if (!old_name
|| !*old_name
)
1851 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
1855 err
= lock_mount(path
);
1860 if (!check_mnt(path
->mnt
) || !check_mnt(old_path
.mnt
))
1863 if (d_unlinked(path
->dentry
))
1867 if (old_path
.dentry
!= old_path
.mnt
->mnt_root
)
1870 if (!mnt_has_parent(old_path
.mnt
))
1873 if (S_ISDIR(path
->dentry
->d_inode
->i_mode
) !=
1874 S_ISDIR(old_path
.dentry
->d_inode
->i_mode
))
1877 * Don't move a mount residing in a shared parent.
1879 if (IS_MNT_SHARED(old_path
.mnt
->mnt_parent
))
1882 * Don't move a mount tree containing unbindable mounts to a destination
1883 * mount which is shared.
1885 if (IS_MNT_SHARED(path
->mnt
) &&
1886 tree_contains_unbindable(old_path
.mnt
))
1889 for (p
= path
->mnt
; mnt_has_parent(p
); p
= p
->mnt_parent
)
1890 if (p
== old_path
.mnt
)
1893 err
= attach_recursive_mnt(old_path
.mnt
, path
, &parent_path
);
1897 /* if the mount is moved, it should no longer be expire
1899 list_del_init(&old_path
.mnt
->mnt_expire
);
1904 path_put(&parent_path
);
1905 path_put(&old_path
);
1909 static struct vfsmount
*fs_set_subtype(struct vfsmount
*mnt
, const char *fstype
)
1912 const char *subtype
= strchr(fstype
, '.');
1921 mnt
->mnt_sb
->s_subtype
= kstrdup(subtype
, GFP_KERNEL
);
1923 if (!mnt
->mnt_sb
->s_subtype
)
1929 return ERR_PTR(err
);
1932 static struct vfsmount
*
1933 do_kern_mount(const char *fstype
, int flags
, const char *name
, void *data
)
1935 struct file_system_type
*type
= get_fs_type(fstype
);
1936 struct vfsmount
*mnt
;
1938 return ERR_PTR(-ENODEV
);
1939 mnt
= vfs_kern_mount(type
, flags
, name
, data
);
1940 if (!IS_ERR(mnt
) && (type
->fs_flags
& FS_HAS_SUBTYPE
) &&
1941 !mnt
->mnt_sb
->s_subtype
)
1942 mnt
= fs_set_subtype(mnt
, fstype
);
1943 put_filesystem(type
);
1948 * add a mount into a namespace's mount tree
1950 static int do_add_mount(struct vfsmount
*newmnt
, struct path
*path
, int mnt_flags
)
1954 mnt_flags
&= ~(MNT_SHARED
| MNT_WRITE_HOLD
| MNT_INTERNAL
);
1956 err
= lock_mount(path
);
1961 if (!(mnt_flags
& MNT_SHRINKABLE
) && !check_mnt(path
->mnt
))
1964 /* Refuse the same filesystem on the same mount point */
1966 if (path
->mnt
->mnt_sb
== newmnt
->mnt_sb
&&
1967 path
->mnt
->mnt_root
== path
->dentry
)
1971 if (S_ISLNK(newmnt
->mnt_root
->d_inode
->i_mode
))
1974 newmnt
->mnt_flags
= mnt_flags
;
1975 err
= graft_tree(newmnt
, path
);
1983 * create a new mount for userspace and request it to be added into the
1986 static int do_new_mount(struct path
*path
, char *type
, int flags
,
1987 int mnt_flags
, char *name
, void *data
)
1989 struct vfsmount
*mnt
;
1995 /* we need capabilities... */
1996 if (!capable(CAP_SYS_ADMIN
))
1999 mnt
= do_kern_mount(type
, flags
, name
, data
);
2001 return PTR_ERR(mnt
);
2003 err
= do_add_mount(mnt
, path
, mnt_flags
);
2009 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2012 /* The new mount record should have at least 2 refs to prevent it being
2013 * expired before we get a chance to add it
2015 BUG_ON(mnt_get_count(m
) < 2);
2017 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2018 m
->mnt_root
== path
->dentry
) {
2023 err
= do_add_mount(m
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2027 /* remove m from any expiration list it may be on */
2028 if (!list_empty(&m
->mnt_expire
)) {
2029 down_write(&namespace_sem
);
2030 br_write_lock(vfsmount_lock
);
2031 list_del_init(&m
->mnt_expire
);
2032 br_write_unlock(vfsmount_lock
);
2033 up_write(&namespace_sem
);
2041 * mnt_set_expiry - Put a mount on an expiration list
2042 * @mnt: The mount to list.
2043 * @expiry_list: The list to add the mount to.
2045 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2047 down_write(&namespace_sem
);
2048 br_write_lock(vfsmount_lock
);
2050 list_add_tail(&mnt
->mnt_expire
, expiry_list
);
2052 br_write_unlock(vfsmount_lock
);
2053 up_write(&namespace_sem
);
2055 EXPORT_SYMBOL(mnt_set_expiry
);
2058 * process a list of expirable mountpoints with the intent of discarding any
2059 * mountpoints that aren't in use and haven't been touched since last we came
2062 void mark_mounts_for_expiry(struct list_head
*mounts
)
2064 struct vfsmount
*mnt
, *next
;
2065 LIST_HEAD(graveyard
);
2068 if (list_empty(mounts
))
2071 down_write(&namespace_sem
);
2072 br_write_lock(vfsmount_lock
);
2074 /* extract from the expiration list every vfsmount that matches the
2075 * following criteria:
2076 * - only referenced by its parent vfsmount
2077 * - still marked for expiry (marked on the last call here; marks are
2078 * cleared by mntput())
2080 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
2081 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
2082 propagate_mount_busy(mnt
, 1))
2084 list_move(&mnt
->mnt_expire
, &graveyard
);
2086 while (!list_empty(&graveyard
)) {
2087 mnt
= list_first_entry(&graveyard
, struct vfsmount
, mnt_expire
);
2088 touch_mnt_namespace(mnt
->mnt_ns
);
2089 umount_tree(mnt
, 1, &umounts
);
2091 br_write_unlock(vfsmount_lock
);
2092 up_write(&namespace_sem
);
2094 release_mounts(&umounts
);
2097 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
2100 * Ripoff of 'select_parent()'
2102 * search the list of submounts for a given mountpoint, and move any
2103 * shrinkable submounts to the 'graveyard' list.
2105 static int select_submounts(struct vfsmount
*parent
, struct list_head
*graveyard
)
2107 struct vfsmount
*this_parent
= parent
;
2108 struct list_head
*next
;
2112 next
= this_parent
->mnt_mounts
.next
;
2114 while (next
!= &this_parent
->mnt_mounts
) {
2115 struct list_head
*tmp
= next
;
2116 struct vfsmount
*mnt
= list_entry(tmp
, struct vfsmount
, mnt_child
);
2119 if (!(mnt
->mnt_flags
& MNT_SHRINKABLE
))
2122 * Descend a level if the d_mounts list is non-empty.
2124 if (!list_empty(&mnt
->mnt_mounts
)) {
2129 if (!propagate_mount_busy(mnt
, 1)) {
2130 list_move_tail(&mnt
->mnt_expire
, graveyard
);
2135 * All done at this level ... ascend and resume the search
2137 if (this_parent
!= parent
) {
2138 next
= this_parent
->mnt_child
.next
;
2139 this_parent
= this_parent
->mnt_parent
;
2146 * process a list of expirable mountpoints with the intent of discarding any
2147 * submounts of a specific parent mountpoint
2149 * vfsmount_lock must be held for write
2151 static void shrink_submounts(struct vfsmount
*mnt
, struct list_head
*umounts
)
2153 LIST_HEAD(graveyard
);
2156 /* extract submounts of 'mountpoint' from the expiration list */
2157 while (select_submounts(mnt
, &graveyard
)) {
2158 while (!list_empty(&graveyard
)) {
2159 m
= list_first_entry(&graveyard
, struct vfsmount
,
2161 touch_mnt_namespace(m
->mnt_ns
);
2162 umount_tree(m
, 1, umounts
);
2168 * Some copy_from_user() implementations do not return the exact number of
2169 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2170 * Note that this function differs from copy_from_user() in that it will oops
2171 * on bad values of `to', rather than returning a short copy.
2173 static long exact_copy_from_user(void *to
, const void __user
* from
,
2177 const char __user
*f
= from
;
2180 if (!access_ok(VERIFY_READ
, from
, n
))
2184 if (__get_user(c
, f
)) {
2195 int copy_mount_options(const void __user
* data
, unsigned long *where
)
2205 if (!(page
= __get_free_page(GFP_KERNEL
)))
2208 /* We only care that *some* data at the address the user
2209 * gave us is valid. Just in case, we'll zero
2210 * the remainder of the page.
2212 /* copy_from_user cannot cross TASK_SIZE ! */
2213 size
= TASK_SIZE
- (unsigned long)data
;
2214 if (size
> PAGE_SIZE
)
2217 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
2223 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
2228 int copy_mount_string(const void __user
*data
, char **where
)
2237 tmp
= strndup_user(data
, PAGE_SIZE
);
2239 return PTR_ERR(tmp
);
2246 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2247 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2249 * data is a (void *) that can point to any structure up to
2250 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2251 * information (or be NULL).
2253 * Pre-0.97 versions of mount() didn't have a flags word.
2254 * When the flags word was introduced its top half was required
2255 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2256 * Therefore, if this magic number is present, it carries no information
2257 * and must be discarded.
2259 long do_mount(char *dev_name
, char *dir_name
, char *type_page
,
2260 unsigned long flags
, void *data_page
)
2267 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
2268 flags
&= ~MS_MGC_MSK
;
2270 /* Basic sanity checks */
2272 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
2276 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
2278 /* ... and get the mountpoint */
2279 retval
= kern_path(dir_name
, LOOKUP_FOLLOW
, &path
);
2283 retval
= security_sb_mount(dev_name
, &path
,
2284 type_page
, flags
, data_page
);
2288 /* Default to relatime unless overriden */
2289 if (!(flags
& MS_NOATIME
))
2290 mnt_flags
|= MNT_RELATIME
;
2292 /* Separate the per-mountpoint flags */
2293 if (flags
& MS_NOSUID
)
2294 mnt_flags
|= MNT_NOSUID
;
2295 if (flags
& MS_NODEV
)
2296 mnt_flags
|= MNT_NODEV
;
2297 if (flags
& MS_NOEXEC
)
2298 mnt_flags
|= MNT_NOEXEC
;
2299 if (flags
& MS_NOATIME
)
2300 mnt_flags
|= MNT_NOATIME
;
2301 if (flags
& MS_NODIRATIME
)
2302 mnt_flags
|= MNT_NODIRATIME
;
2303 if (flags
& MS_STRICTATIME
)
2304 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
2305 if (flags
& MS_RDONLY
)
2306 mnt_flags
|= MNT_READONLY
;
2308 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
| MS_BORN
|
2309 MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
| MS_KERNMOUNT
|
2312 if (flags
& MS_REMOUNT
)
2313 retval
= do_remount(&path
, flags
& ~MS_REMOUNT
, mnt_flags
,
2315 else if (flags
& MS_BIND
)
2316 retval
= do_loopback(&path
, dev_name
, flags
& MS_REC
);
2317 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2318 retval
= do_change_type(&path
, flags
);
2319 else if (flags
& MS_MOVE
)
2320 retval
= do_move_mount(&path
, dev_name
);
2322 retval
= do_new_mount(&path
, type_page
, flags
, mnt_flags
,
2323 dev_name
, data_page
);
2329 static struct mnt_namespace
*alloc_mnt_ns(void)
2331 struct mnt_namespace
*new_ns
;
2333 new_ns
= kmalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
2335 return ERR_PTR(-ENOMEM
);
2336 atomic_set(&new_ns
->count
, 1);
2337 new_ns
->root
= NULL
;
2338 INIT_LIST_HEAD(&new_ns
->list
);
2339 init_waitqueue_head(&new_ns
->poll
);
2344 void mnt_make_longterm(struct vfsmount
*mnt
)
2346 __mnt_make_longterm(mnt
);
2349 void mnt_make_shortterm(struct vfsmount
*mnt
)
2352 if (atomic_add_unless(&mnt
->mnt_longterm
, -1, 1))
2354 br_write_lock(vfsmount_lock
);
2355 atomic_dec(&mnt
->mnt_longterm
);
2356 br_write_unlock(vfsmount_lock
);
2361 * Allocate a new namespace structure and populate it with contents
2362 * copied from the namespace of the passed in task structure.
2364 static struct mnt_namespace
*dup_mnt_ns(struct mnt_namespace
*mnt_ns
,
2365 struct fs_struct
*fs
)
2367 struct mnt_namespace
*new_ns
;
2368 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
2369 struct vfsmount
*p
, *q
;
2371 new_ns
= alloc_mnt_ns();
2375 down_write(&namespace_sem
);
2376 /* First pass: copy the tree topology */
2377 new_ns
->root
= copy_tree(mnt_ns
->root
, mnt_ns
->root
->mnt_root
,
2378 CL_COPY_ALL
| CL_EXPIRE
);
2379 if (!new_ns
->root
) {
2380 up_write(&namespace_sem
);
2382 return ERR_PTR(-ENOMEM
);
2384 br_write_lock(vfsmount_lock
);
2385 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
2386 br_write_unlock(vfsmount_lock
);
2389 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2390 * as belonging to new namespace. We have already acquired a private
2391 * fs_struct, so tsk->fs->lock is not needed.
2397 __mnt_make_longterm(q
);
2399 if (p
== fs
->root
.mnt
) {
2400 fs
->root
.mnt
= mntget(q
);
2401 __mnt_make_longterm(q
);
2402 mnt_make_shortterm(p
);
2405 if (p
== fs
->pwd
.mnt
) {
2406 fs
->pwd
.mnt
= mntget(q
);
2407 __mnt_make_longterm(q
);
2408 mnt_make_shortterm(p
);
2412 p
= next_mnt(p
, mnt_ns
->root
);
2413 q
= next_mnt(q
, new_ns
->root
);
2415 up_write(&namespace_sem
);
2425 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
2426 struct fs_struct
*new_fs
)
2428 struct mnt_namespace
*new_ns
;
2433 if (!(flags
& CLONE_NEWNS
))
2436 new_ns
= dup_mnt_ns(ns
, new_fs
);
2443 * create_mnt_ns - creates a private namespace and adds a root filesystem
2444 * @mnt: pointer to the new root filesystem mountpoint
2446 static struct mnt_namespace
*create_mnt_ns(struct vfsmount
*mnt
)
2448 struct mnt_namespace
*new_ns
;
2450 new_ns
= alloc_mnt_ns();
2451 if (!IS_ERR(new_ns
)) {
2452 mnt
->mnt_ns
= new_ns
;
2453 __mnt_make_longterm(mnt
);
2455 list_add(&new_ns
->list
, &new_ns
->root
->mnt_list
);
2462 struct dentry
*mount_subtree(struct vfsmount
*mnt
, const char *name
)
2464 struct mnt_namespace
*ns
;
2465 struct super_block
*s
;
2469 ns
= create_mnt_ns(mnt
);
2471 return ERR_CAST(ns
);
2473 err
= vfs_path_lookup(mnt
->mnt_root
, mnt
,
2474 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
2479 return ERR_PTR(err
);
2481 /* trade a vfsmount reference for active sb one */
2482 s
= path
.mnt
->mnt_sb
;
2483 atomic_inc(&s
->s_active
);
2485 /* lock the sucker */
2486 down_write(&s
->s_umount
);
2487 /* ... and return the root of (sub)tree on it */
2490 EXPORT_SYMBOL(mount_subtree
);
2492 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
2493 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
2499 unsigned long data_page
;
2501 ret
= copy_mount_string(type
, &kernel_type
);
2505 kernel_dir
= getname(dir_name
);
2506 if (IS_ERR(kernel_dir
)) {
2507 ret
= PTR_ERR(kernel_dir
);
2511 ret
= copy_mount_string(dev_name
, &kernel_dev
);
2515 ret
= copy_mount_options(data
, &data_page
);
2519 ret
= do_mount(kernel_dev
, kernel_dir
, kernel_type
, flags
,
2520 (void *) data_page
);
2522 free_page(data_page
);
2526 putname(kernel_dir
);
2534 * Return true if path is reachable from root
2536 * namespace_sem or vfsmount_lock is held
2538 bool is_path_reachable(struct vfsmount
*mnt
, struct dentry
*dentry
,
2539 const struct path
*root
)
2541 while (mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
2542 dentry
= mnt
->mnt_mountpoint
;
2543 mnt
= mnt
->mnt_parent
;
2545 return mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
2548 int path_is_under(struct path
*path1
, struct path
*path2
)
2551 br_read_lock(vfsmount_lock
);
2552 res
= is_path_reachable(path1
->mnt
, path1
->dentry
, path2
);
2553 br_read_unlock(vfsmount_lock
);
2556 EXPORT_SYMBOL(path_is_under
);
2559 * pivot_root Semantics:
2560 * Moves the root file system of the current process to the directory put_old,
2561 * makes new_root as the new root file system of the current process, and sets
2562 * root/cwd of all processes which had them on the current root to new_root.
2565 * The new_root and put_old must be directories, and must not be on the
2566 * same file system as the current process root. The put_old must be
2567 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2568 * pointed to by put_old must yield the same directory as new_root. No other
2569 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2571 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2572 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2573 * in this situation.
2576 * - we don't move root/cwd if they are not at the root (reason: if something
2577 * cared enough to change them, it's probably wrong to force them elsewhere)
2578 * - it's okay to pick a root that isn't the root of a file system, e.g.
2579 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2580 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2583 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
2584 const char __user
*, put_old
)
2586 struct path
new, old
, parent_path
, root_parent
, root
;
2589 if (!capable(CAP_SYS_ADMIN
))
2592 error
= user_path_dir(new_root
, &new);
2596 error
= user_path_dir(put_old
, &old
);
2600 error
= security_sb_pivotroot(&old
, &new);
2604 get_fs_root(current
->fs
, &root
);
2605 error
= lock_mount(&old
);
2610 if (IS_MNT_SHARED(old
.mnt
) ||
2611 IS_MNT_SHARED(new.mnt
->mnt_parent
) ||
2612 IS_MNT_SHARED(root
.mnt
->mnt_parent
))
2614 if (!check_mnt(root
.mnt
) || !check_mnt(new.mnt
))
2617 if (d_unlinked(new.dentry
))
2619 if (d_unlinked(old
.dentry
))
2622 if (new.mnt
== root
.mnt
||
2623 old
.mnt
== root
.mnt
)
2624 goto out4
; /* loop, on the same file system */
2626 if (root
.mnt
->mnt_root
!= root
.dentry
)
2627 goto out4
; /* not a mountpoint */
2628 if (!mnt_has_parent(root
.mnt
))
2629 goto out4
; /* not attached */
2630 if (new.mnt
->mnt_root
!= new.dentry
)
2631 goto out4
; /* not a mountpoint */
2632 if (!mnt_has_parent(new.mnt
))
2633 goto out4
; /* not attached */
2634 /* make sure we can reach put_old from new_root */
2635 if (!is_path_reachable(old
.mnt
, old
.dentry
, &new))
2637 br_write_lock(vfsmount_lock
);
2638 detach_mnt(new.mnt
, &parent_path
);
2639 detach_mnt(root
.mnt
, &root_parent
);
2640 /* mount old root on put_old */
2641 attach_mnt(root
.mnt
, &old
);
2642 /* mount new_root on / */
2643 attach_mnt(new.mnt
, &root_parent
);
2644 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
2645 br_write_unlock(vfsmount_lock
);
2646 chroot_fs_refs(&root
, &new);
2651 path_put(&root_parent
);
2652 path_put(&parent_path
);
2664 static void __init
init_mount_tree(void)
2666 struct vfsmount
*mnt
;
2667 struct mnt_namespace
*ns
;
2670 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
2672 panic("Can't create rootfs");
2674 ns
= create_mnt_ns(mnt
);
2676 panic("Can't allocate initial namespace");
2678 init_task
.nsproxy
->mnt_ns
= ns
;
2681 root
.mnt
= ns
->root
;
2682 root
.dentry
= ns
->root
->mnt_root
;
2684 set_fs_pwd(current
->fs
, &root
);
2685 set_fs_root(current
->fs
, &root
);
2688 void __init
mnt_init(void)
2693 init_rwsem(&namespace_sem
);
2695 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
2696 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2698 mount_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
2700 if (!mount_hashtable
)
2701 panic("Failed to allocate mount hash table\n");
2703 printk(KERN_INFO
"Mount-cache hash table entries: %lu\n", HASH_SIZE
);
2705 for (u
= 0; u
< HASH_SIZE
; u
++)
2706 INIT_LIST_HEAD(&mount_hashtable
[u
]);
2708 br_lock_init(vfsmount_lock
);
2712 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
2714 fs_kobj
= kobject_create_and_add("fs", NULL
);
2716 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
2721 void put_mnt_ns(struct mnt_namespace
*ns
)
2723 LIST_HEAD(umount_list
);
2725 if (!atomic_dec_and_test(&ns
->count
))
2727 down_write(&namespace_sem
);
2728 br_write_lock(vfsmount_lock
);
2729 umount_tree(ns
->root
, 0, &umount_list
);
2730 br_write_unlock(vfsmount_lock
);
2731 up_write(&namespace_sem
);
2732 release_mounts(&umount_list
);
2736 struct vfsmount
*kern_mount_data(struct file_system_type
*type
, void *data
)
2738 struct vfsmount
*mnt
;
2739 mnt
= vfs_kern_mount(type
, MS_KERNMOUNT
, type
->name
, data
);
2742 * it is a longterm mount, don't release mnt until
2743 * we unmount before file sys is unregistered
2745 mnt_make_longterm(mnt
);
2749 EXPORT_SYMBOL_GPL(kern_mount_data
);
2751 void kern_unmount(struct vfsmount
*mnt
)
2753 /* release long term mount so mount point can be released */
2754 if (!IS_ERR_OR_NULL(mnt
)) {
2755 mnt_make_shortterm(mnt
);
2759 EXPORT_SYMBOL(kern_unmount
);
2761 bool our_mnt(struct vfsmount
*mnt
)
2763 return check_mnt(mnt
);