4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/idr.h>
19 #include <linux/acct.h> /* acct_auto_close_mnt */
20 #include <linux/ramfs.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
29 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
30 #define HASH_SIZE (1UL << HASH_SHIFT)
33 static DEFINE_IDA(mnt_id_ida
);
34 static DEFINE_IDA(mnt_group_ida
);
35 static DEFINE_SPINLOCK(mnt_id_lock
);
36 static int mnt_id_start
= 0;
37 static int mnt_group_start
= 1;
39 static struct list_head
*mount_hashtable __read_mostly
;
40 static struct list_head
*mountpoint_hashtable __read_mostly
;
41 static struct kmem_cache
*mnt_cache __read_mostly
;
42 static struct rw_semaphore namespace_sem
;
45 struct kobject
*fs_kobj
;
46 EXPORT_SYMBOL_GPL(fs_kobj
);
49 * vfsmount lock may be taken for read to prevent changes to the
50 * vfsmount hash, ie. during mountpoint lookups or walking back
53 * It should be taken for write in all cases where the vfsmount
54 * tree or hash is modified or when a vfsmount structure is modified.
56 DEFINE_BRLOCK(vfsmount_lock
);
58 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
60 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
61 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
62 tmp
= tmp
+ (tmp
>> HASH_SHIFT
);
63 return tmp
& (HASH_SIZE
- 1);
66 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
69 * allocation is serialized by namespace_sem, but we need the spinlock to
70 * serialize with freeing.
72 static int mnt_alloc_id(struct mount
*mnt
)
77 ida_pre_get(&mnt_id_ida
, GFP_KERNEL
);
78 spin_lock(&mnt_id_lock
);
79 res
= ida_get_new_above(&mnt_id_ida
, mnt_id_start
, &mnt
->mnt_id
);
81 mnt_id_start
= mnt
->mnt_id
+ 1;
82 spin_unlock(&mnt_id_lock
);
89 static void mnt_free_id(struct mount
*mnt
)
92 spin_lock(&mnt_id_lock
);
93 ida_remove(&mnt_id_ida
, id
);
94 if (mnt_id_start
> id
)
96 spin_unlock(&mnt_id_lock
);
100 * Allocate a new peer group ID
102 * mnt_group_ida is protected by namespace_sem
104 static int mnt_alloc_group_id(struct mount
*mnt
)
108 if (!ida_pre_get(&mnt_group_ida
, GFP_KERNEL
))
111 res
= ida_get_new_above(&mnt_group_ida
,
115 mnt_group_start
= mnt
->mnt_group_id
+ 1;
121 * Release a peer group ID
123 void mnt_release_group_id(struct mount
*mnt
)
125 int id
= mnt
->mnt_group_id
;
126 ida_remove(&mnt_group_ida
, id
);
127 if (mnt_group_start
> id
)
128 mnt_group_start
= id
;
129 mnt
->mnt_group_id
= 0;
133 * vfsmount lock must be held for read
135 static inline void mnt_add_count(struct mount
*mnt
, int n
)
141 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
150 * vfsmount lock must be held for write
152 unsigned int mnt_get_count(struct mount
*mnt
)
155 unsigned int count
= 0;
158 for_each_possible_cpu(cpu
) {
159 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
164 return mnt
->mnt_count
;
168 static struct mount
*alloc_vfsmnt(const char *name
)
170 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
174 err
= mnt_alloc_id(mnt
);
179 mnt
->mnt_devname
= kstrdup(name
, GFP_KERNEL
);
180 if (!mnt
->mnt_devname
)
185 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
187 goto out_free_devname
;
189 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
192 mnt
->mnt_writers
= 0;
195 INIT_LIST_HEAD(&mnt
->mnt_hash
);
196 INIT_LIST_HEAD(&mnt
->mnt_child
);
197 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
198 INIT_LIST_HEAD(&mnt
->mnt_list
);
199 INIT_LIST_HEAD(&mnt
->mnt_expire
);
200 INIT_LIST_HEAD(&mnt
->mnt_share
);
201 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
202 INIT_LIST_HEAD(&mnt
->mnt_slave
);
203 #ifdef CONFIG_FSNOTIFY
204 INIT_HLIST_HEAD(&mnt
->mnt_fsnotify_marks
);
211 kfree(mnt
->mnt_devname
);
216 kmem_cache_free(mnt_cache
, mnt
);
221 * Most r/o checks on a fs are for operations that take
222 * discrete amounts of time, like a write() or unlink().
223 * We must keep track of when those operations start
224 * (for permission checks) and when they end, so that
225 * we can determine when writes are able to occur to
229 * __mnt_is_readonly: check whether a mount is read-only
230 * @mnt: the mount to check for its write status
232 * This shouldn't be used directly ouside of the VFS.
233 * It does not guarantee that the filesystem will stay
234 * r/w, just that it is right *now*. This can not and
235 * should not be used in place of IS_RDONLY(inode).
236 * mnt_want/drop_write() will _keep_ the filesystem
239 int __mnt_is_readonly(struct vfsmount
*mnt
)
241 if (mnt
->mnt_flags
& MNT_READONLY
)
243 if (mnt
->mnt_sb
->s_flags
& MS_RDONLY
)
247 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
249 static inline void mnt_inc_writers(struct mount
*mnt
)
252 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
258 static inline void mnt_dec_writers(struct mount
*mnt
)
261 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
267 static unsigned int mnt_get_writers(struct mount
*mnt
)
270 unsigned int count
= 0;
273 for_each_possible_cpu(cpu
) {
274 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
279 return mnt
->mnt_writers
;
283 static int mnt_is_readonly(struct vfsmount
*mnt
)
285 if (mnt
->mnt_sb
->s_readonly_remount
)
287 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
289 return __mnt_is_readonly(mnt
);
293 * Most r/o & frozen checks on a fs are for operations that take discrete
294 * amounts of time, like a write() or unlink(). We must keep track of when
295 * those operations start (for permission checks) and when they end, so that we
296 * can determine when writes are able to occur to a filesystem.
299 * __mnt_want_write - get write access to a mount without freeze protection
300 * @m: the mount on which to take a write
302 * This tells the low-level filesystem that a write is about to be performed to
303 * it, and makes sure that writes are allowed (mnt it read-write) before
304 * returning success. This operation does not protect against filesystem being
305 * frozen. When the write operation is finished, __mnt_drop_write() must be
306 * called. This is effectively a refcount.
308 int __mnt_want_write(struct vfsmount
*m
)
310 struct mount
*mnt
= real_mount(m
);
314 mnt_inc_writers(mnt
);
316 * The store to mnt_inc_writers must be visible before we pass
317 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
318 * incremented count after it has set MNT_WRITE_HOLD.
321 while (ACCESS_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
)
324 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
325 * be set to match its requirements. So we must not load that until
326 * MNT_WRITE_HOLD is cleared.
329 if (mnt_is_readonly(m
)) {
330 mnt_dec_writers(mnt
);
339 * mnt_want_write - get write access to a mount
340 * @m: the mount on which to take a write
342 * This tells the low-level filesystem that a write is about to be performed to
343 * it, and makes sure that writes are allowed (mount is read-write, filesystem
344 * is not frozen) before returning success. When the write operation is
345 * finished, mnt_drop_write() must be called. This is effectively a refcount.
347 int mnt_want_write(struct vfsmount
*m
)
351 sb_start_write(m
->mnt_sb
);
352 ret
= __mnt_want_write(m
);
354 sb_end_write(m
->mnt_sb
);
357 EXPORT_SYMBOL_GPL(mnt_want_write
);
360 * mnt_clone_write - get write access to a mount
361 * @mnt: the mount on which to take a write
363 * This is effectively like mnt_want_write, except
364 * it must only be used to take an extra write reference
365 * on a mountpoint that we already know has a write reference
366 * on it. This allows some optimisation.
368 * After finished, mnt_drop_write must be called as usual to
369 * drop the reference.
371 int mnt_clone_write(struct vfsmount
*mnt
)
373 /* superblock may be r/o */
374 if (__mnt_is_readonly(mnt
))
377 mnt_inc_writers(real_mount(mnt
));
381 EXPORT_SYMBOL_GPL(mnt_clone_write
);
384 * __mnt_want_write_file - get write access to a file's mount
385 * @file: the file who's mount on which to take a write
387 * This is like __mnt_want_write, but it takes a file and can
388 * do some optimisations if the file is open for write already
390 int __mnt_want_write_file(struct file
*file
)
392 struct inode
*inode
= file_inode(file
);
394 if (!(file
->f_mode
& FMODE_WRITE
) || special_file(inode
->i_mode
))
395 return __mnt_want_write(file
->f_path
.mnt
);
397 return mnt_clone_write(file
->f_path
.mnt
);
401 * mnt_want_write_file - get write access to a file's mount
402 * @file: the file who's mount on which to take a write
404 * This is like mnt_want_write, but it takes a file and can
405 * do some optimisations if the file is open for write already
407 int mnt_want_write_file(struct file
*file
)
411 sb_start_write(file
->f_path
.mnt
->mnt_sb
);
412 ret
= __mnt_want_write_file(file
);
414 sb_end_write(file
->f_path
.mnt
->mnt_sb
);
417 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
420 * __mnt_drop_write - give up write access to a mount
421 * @mnt: the mount on which to give up write access
423 * Tells the low-level filesystem that we are done
424 * performing writes to it. Must be matched with
425 * __mnt_want_write() call above.
427 void __mnt_drop_write(struct vfsmount
*mnt
)
430 mnt_dec_writers(real_mount(mnt
));
435 * mnt_drop_write - give up write access to a mount
436 * @mnt: the mount on which to give up write access
438 * Tells the low-level filesystem that we are done performing writes to it and
439 * also allows filesystem to be frozen again. Must be matched with
440 * mnt_want_write() call above.
442 void mnt_drop_write(struct vfsmount
*mnt
)
444 __mnt_drop_write(mnt
);
445 sb_end_write(mnt
->mnt_sb
);
447 EXPORT_SYMBOL_GPL(mnt_drop_write
);
449 void __mnt_drop_write_file(struct file
*file
)
451 __mnt_drop_write(file
->f_path
.mnt
);
454 void mnt_drop_write_file(struct file
*file
)
456 mnt_drop_write(file
->f_path
.mnt
);
458 EXPORT_SYMBOL(mnt_drop_write_file
);
460 static int mnt_make_readonly(struct mount
*mnt
)
464 br_write_lock(&vfsmount_lock
);
465 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
467 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
468 * should be visible before we do.
473 * With writers on hold, if this value is zero, then there are
474 * definitely no active writers (although held writers may subsequently
475 * increment the count, they'll have to wait, and decrement it after
476 * seeing MNT_READONLY).
478 * It is OK to have counter incremented on one CPU and decremented on
479 * another: the sum will add up correctly. The danger would be when we
480 * sum up each counter, if we read a counter before it is incremented,
481 * but then read another CPU's count which it has been subsequently
482 * decremented from -- we would see more decrements than we should.
483 * MNT_WRITE_HOLD protects against this scenario, because
484 * mnt_want_write first increments count, then smp_mb, then spins on
485 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
486 * we're counting up here.
488 if (mnt_get_writers(mnt
) > 0)
491 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
493 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
494 * that become unheld will see MNT_READONLY.
497 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
498 br_write_unlock(&vfsmount_lock
);
502 static void __mnt_unmake_readonly(struct mount
*mnt
)
504 br_write_lock(&vfsmount_lock
);
505 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
506 br_write_unlock(&vfsmount_lock
);
509 int sb_prepare_remount_readonly(struct super_block
*sb
)
514 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
515 if (atomic_long_read(&sb
->s_remove_count
))
518 br_write_lock(&vfsmount_lock
);
519 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
520 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
521 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
523 if (mnt_get_writers(mnt
) > 0) {
529 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
533 sb
->s_readonly_remount
= 1;
536 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
537 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
538 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
540 br_write_unlock(&vfsmount_lock
);
545 static void free_vfsmnt(struct mount
*mnt
)
547 kfree(mnt
->mnt
.data
);
548 kfree(mnt
->mnt_devname
);
551 free_percpu(mnt
->mnt_pcp
);
553 kmem_cache_free(mnt_cache
, mnt
);
557 * find the first or last mount at @dentry on vfsmount @mnt depending on
558 * @dir. If @dir is set return the first mount else return the last mount.
559 * vfsmount_lock must be held for read or write.
561 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
,
564 struct list_head
*head
= mount_hashtable
+ hash(mnt
, dentry
);
565 struct list_head
*tmp
= head
;
566 struct mount
*p
, *found
= NULL
;
569 tmp
= dir
? tmp
->next
: tmp
->prev
;
573 p
= list_entry(tmp
, struct mount
, mnt_hash
);
574 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
) {
583 * lookup_mnt - Return the first child mount mounted at path
585 * "First" means first mounted chronologically. If you create the
588 * mount /dev/sda1 /mnt
589 * mount /dev/sda2 /mnt
590 * mount /dev/sda3 /mnt
592 * Then lookup_mnt() on the base /mnt dentry in the root mount will
593 * return successively the root dentry and vfsmount of /dev/sda1, then
594 * /dev/sda2, then /dev/sda3, then NULL.
596 * lookup_mnt takes a reference to the found vfsmount.
598 struct vfsmount
*lookup_mnt(struct path
*path
)
600 struct mount
*child_mnt
;
602 br_read_lock(&vfsmount_lock
);
603 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
, 1);
605 mnt_add_count(child_mnt
, 1);
606 br_read_unlock(&vfsmount_lock
);
607 return &child_mnt
->mnt
;
609 br_read_unlock(&vfsmount_lock
);
614 static struct mountpoint
*new_mountpoint(struct dentry
*dentry
)
616 struct list_head
*chain
= mountpoint_hashtable
+ hash(NULL
, dentry
);
617 struct mountpoint
*mp
;
619 list_for_each_entry(mp
, chain
, m_hash
) {
620 if (mp
->m_dentry
== dentry
) {
621 /* might be worth a WARN_ON() */
622 if (d_unlinked(dentry
))
623 return ERR_PTR(-ENOENT
);
629 mp
= kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
631 return ERR_PTR(-ENOMEM
);
633 spin_lock(&dentry
->d_lock
);
634 if (d_unlinked(dentry
)) {
635 spin_unlock(&dentry
->d_lock
);
637 return ERR_PTR(-ENOENT
);
639 dentry
->d_flags
|= DCACHE_MOUNTED
;
640 spin_unlock(&dentry
->d_lock
);
641 mp
->m_dentry
= dentry
;
643 list_add(&mp
->m_hash
, chain
);
647 static void put_mountpoint(struct mountpoint
*mp
)
649 if (!--mp
->m_count
) {
650 struct dentry
*dentry
= mp
->m_dentry
;
651 spin_lock(&dentry
->d_lock
);
652 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
653 spin_unlock(&dentry
->d_lock
);
654 list_del(&mp
->m_hash
);
659 static inline int check_mnt(struct mount
*mnt
)
661 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
665 * vfsmount lock must be held for write
667 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
671 wake_up_interruptible(&ns
->poll
);
676 * vfsmount lock must be held for write
678 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
680 if (ns
&& ns
->event
!= event
) {
682 wake_up_interruptible(&ns
->poll
);
687 * vfsmount lock must be held for write
689 static void detach_mnt(struct mount
*mnt
, struct path
*old_path
)
691 old_path
->dentry
= mnt
->mnt_mountpoint
;
692 old_path
->mnt
= &mnt
->mnt_parent
->mnt
;
693 mnt
->mnt_parent
= mnt
;
694 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
695 list_del_init(&mnt
->mnt_child
);
696 list_del_init(&mnt
->mnt_hash
);
697 put_mountpoint(mnt
->mnt_mp
);
702 * vfsmount lock must be held for write
704 void mnt_set_mountpoint(struct mount
*mnt
,
705 struct mountpoint
*mp
,
706 struct mount
*child_mnt
)
709 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
710 child_mnt
->mnt_mountpoint
= dget(mp
->m_dentry
);
711 child_mnt
->mnt_parent
= mnt
;
712 child_mnt
->mnt_mp
= mp
;
716 * vfsmount lock must be held for write
718 static void attach_mnt(struct mount
*mnt
,
719 struct mount
*parent
,
720 struct mountpoint
*mp
)
722 mnt_set_mountpoint(parent
, mp
, mnt
);
723 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
724 hash(&parent
->mnt
, mp
->m_dentry
));
725 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
729 * vfsmount lock must be held for write
731 static void commit_tree(struct mount
*mnt
)
733 struct mount
*parent
= mnt
->mnt_parent
;
736 struct mnt_namespace
*n
= parent
->mnt_ns
;
738 BUG_ON(parent
== mnt
);
740 list_add_tail(&head
, &mnt
->mnt_list
);
741 list_for_each_entry(m
, &head
, mnt_list
)
744 list_splice(&head
, n
->list
.prev
);
746 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
747 hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
748 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
749 touch_mnt_namespace(n
);
752 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
754 struct list_head
*next
= p
->mnt_mounts
.next
;
755 if (next
== &p
->mnt_mounts
) {
759 next
= p
->mnt_child
.next
;
760 if (next
!= &p
->mnt_parent
->mnt_mounts
)
765 return list_entry(next
, struct mount
, mnt_child
);
768 static struct mount
*skip_mnt_tree(struct mount
*p
)
770 struct list_head
*prev
= p
->mnt_mounts
.prev
;
771 while (prev
!= &p
->mnt_mounts
) {
772 p
= list_entry(prev
, struct mount
, mnt_child
);
773 prev
= p
->mnt_mounts
.prev
;
779 vfs_kern_mount(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
785 return ERR_PTR(-ENODEV
);
787 mnt
= alloc_vfsmnt(name
);
789 return ERR_PTR(-ENOMEM
);
791 mnt
->mnt
.data
= NULL
;
792 if (type
->alloc_mnt_data
) {
793 mnt
->mnt
.data
= type
->alloc_mnt_data();
794 if (!mnt
->mnt
.data
) {
797 return ERR_PTR(-ENOMEM
);
800 if (flags
& MS_KERNMOUNT
)
801 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
803 root
= mount_fs(type
, flags
, name
, &mnt
->mnt
, data
);
805 kfree(mnt
->mnt
.data
);
807 return ERR_CAST(root
);
810 mnt
->mnt
.mnt_root
= root
;
811 mnt
->mnt
.mnt_sb
= root
->d_sb
;
812 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
813 mnt
->mnt_parent
= mnt
;
814 br_write_lock(&vfsmount_lock
);
815 list_add_tail(&mnt
->mnt_instance
, &root
->d_sb
->s_mounts
);
816 br_write_unlock(&vfsmount_lock
);
819 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
821 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
824 struct super_block
*sb
= old
->mnt
.mnt_sb
;
828 mnt
= alloc_vfsmnt(old
->mnt_devname
);
830 return ERR_PTR(-ENOMEM
);
832 if (sb
->s_op
->clone_mnt_data
) {
833 mnt
->mnt
.data
= sb
->s_op
->clone_mnt_data(old
->mnt
.data
);
834 if (!mnt
->mnt
.data
) {
840 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
841 mnt
->mnt_group_id
= 0; /* not a peer of original */
843 mnt
->mnt_group_id
= old
->mnt_group_id
;
845 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
846 err
= mnt_alloc_group_id(mnt
);
851 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
& ~MNT_WRITE_HOLD
;
852 /* Don't allow unprivileged users to change mount flags */
853 if (flag
& CL_UNPRIVILEGED
) {
854 mnt
->mnt
.mnt_flags
|= MNT_LOCK_ATIME
;
856 if (mnt
->mnt
.mnt_flags
& MNT_READONLY
)
857 mnt
->mnt
.mnt_flags
|= MNT_LOCK_READONLY
;
859 if (mnt
->mnt
.mnt_flags
& MNT_NODEV
)
860 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NODEV
;
862 if (mnt
->mnt
.mnt_flags
& MNT_NOSUID
)
863 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOSUID
;
865 if (mnt
->mnt
.mnt_flags
& MNT_NOEXEC
)
866 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOEXEC
;
869 atomic_inc(&sb
->s_active
);
870 mnt
->mnt
.mnt_sb
= sb
;
871 mnt
->mnt
.mnt_root
= dget(root
);
872 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
873 mnt
->mnt_parent
= mnt
;
874 br_write_lock(&vfsmount_lock
);
875 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
876 br_write_unlock(&vfsmount_lock
);
878 if ((flag
& CL_SLAVE
) ||
879 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
880 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
881 mnt
->mnt_master
= old
;
882 CLEAR_MNT_SHARED(mnt
);
883 } else if (!(flag
& CL_PRIVATE
)) {
884 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
885 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
886 if (IS_MNT_SLAVE(old
))
887 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
888 mnt
->mnt_master
= old
->mnt_master
;
890 if (flag
& CL_MAKE_SHARED
)
893 /* stick the duplicate mount on the same expiry list
894 * as the original if that was on one */
895 if (flag
& CL_EXPIRE
) {
896 if (!list_empty(&old
->mnt_expire
))
897 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
903 kfree(mnt
->mnt
.data
);
908 static inline void mntfree(struct mount
*mnt
)
910 struct vfsmount
*m
= &mnt
->mnt
;
911 struct super_block
*sb
= m
->mnt_sb
;
914 * This probably indicates that somebody messed
915 * up a mnt_want/drop_write() pair. If this
916 * happens, the filesystem was probably unable
917 * to make r/w->r/o transitions.
920 * The locking used to deal with mnt_count decrement provides barriers,
921 * so mnt_get_writers() below is safe.
923 WARN_ON(mnt_get_writers(mnt
));
924 fsnotify_vfsmount_delete(m
);
927 deactivate_super(sb
);
930 static void mntput_no_expire(struct mount
*mnt
)
934 br_read_lock(&vfsmount_lock
);
935 if (likely(mnt
->mnt_ns
)) {
936 /* shouldn't be the last one */
937 mnt_add_count(mnt
, -1);
938 br_read_unlock(&vfsmount_lock
);
941 br_read_unlock(&vfsmount_lock
);
943 br_write_lock(&vfsmount_lock
);
944 mnt_add_count(mnt
, -1);
945 if (mnt_get_count(mnt
)) {
946 br_write_unlock(&vfsmount_lock
);
950 mnt_add_count(mnt
, -1);
951 if (likely(mnt_get_count(mnt
)))
953 br_write_lock(&vfsmount_lock
);
955 if (unlikely(mnt
->mnt_pinned
)) {
956 mnt_add_count(mnt
, mnt
->mnt_pinned
+ 1);
958 br_write_unlock(&vfsmount_lock
);
959 acct_auto_close_mnt(&mnt
->mnt
);
963 list_del(&mnt
->mnt_instance
);
964 br_write_unlock(&vfsmount_lock
);
968 void mntput(struct vfsmount
*mnt
)
971 struct mount
*m
= real_mount(mnt
);
972 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
973 if (unlikely(m
->mnt_expiry_mark
))
974 m
->mnt_expiry_mark
= 0;
978 EXPORT_SYMBOL(mntput
);
980 struct vfsmount
*mntget(struct vfsmount
*mnt
)
983 mnt_add_count(real_mount(mnt
), 1);
986 EXPORT_SYMBOL(mntget
);
988 void mnt_pin(struct vfsmount
*mnt
)
990 br_write_lock(&vfsmount_lock
);
991 real_mount(mnt
)->mnt_pinned
++;
992 br_write_unlock(&vfsmount_lock
);
994 EXPORT_SYMBOL(mnt_pin
);
996 void mnt_unpin(struct vfsmount
*m
)
998 struct mount
*mnt
= real_mount(m
);
999 br_write_lock(&vfsmount_lock
);
1000 if (mnt
->mnt_pinned
) {
1001 mnt_add_count(mnt
, 1);
1004 br_write_unlock(&vfsmount_lock
);
1006 EXPORT_SYMBOL(mnt_unpin
);
1008 static inline void mangle(struct seq_file
*m
, const char *s
)
1010 seq_escape(m
, s
, " \t\n\\");
1014 * Simple .show_options callback for filesystems which don't want to
1015 * implement more complex mount option showing.
1017 * See also save_mount_options().
1019 int generic_show_options(struct seq_file
*m
, struct dentry
*root
)
1021 const char *options
;
1024 options
= rcu_dereference(root
->d_sb
->s_options
);
1026 if (options
!= NULL
&& options
[0]) {
1034 EXPORT_SYMBOL(generic_show_options
);
1037 * If filesystem uses generic_show_options(), this function should be
1038 * called from the fill_super() callback.
1040 * The .remount_fs callback usually needs to be handled in a special
1041 * way, to make sure, that previous options are not overwritten if the
1044 * Also note, that if the filesystem's .remount_fs function doesn't
1045 * reset all options to their default value, but changes only newly
1046 * given options, then the displayed options will not reflect reality
1049 void save_mount_options(struct super_block
*sb
, char *options
)
1051 BUG_ON(sb
->s_options
);
1052 rcu_assign_pointer(sb
->s_options
, kstrdup(options
, GFP_KERNEL
));
1054 EXPORT_SYMBOL(save_mount_options
);
1056 void replace_mount_options(struct super_block
*sb
, char *options
)
1058 char *old
= sb
->s_options
;
1059 rcu_assign_pointer(sb
->s_options
, options
);
1065 EXPORT_SYMBOL(replace_mount_options
);
1067 #ifdef CONFIG_PROC_FS
1068 /* iterator; we want it to have access to namespace_sem, thus here... */
1069 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1071 struct proc_mounts
*p
= proc_mounts(m
);
1073 down_read(&namespace_sem
);
1074 return seq_list_start(&p
->ns
->list
, *pos
);
1077 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1079 struct proc_mounts
*p
= proc_mounts(m
);
1081 return seq_list_next(v
, &p
->ns
->list
, pos
);
1084 static void m_stop(struct seq_file
*m
, void *v
)
1086 up_read(&namespace_sem
);
1089 static int m_show(struct seq_file
*m
, void *v
)
1091 struct proc_mounts
*p
= proc_mounts(m
);
1092 struct mount
*r
= list_entry(v
, struct mount
, mnt_list
);
1093 return p
->show(m
, &r
->mnt
);
1096 const struct seq_operations mounts_op
= {
1102 #endif /* CONFIG_PROC_FS */
1105 * may_umount_tree - check if a mount tree is busy
1106 * @mnt: root of mount tree
1108 * This is called to check if a tree of mounts has any
1109 * open files, pwds, chroots or sub mounts that are
1112 int may_umount_tree(struct vfsmount
*m
)
1114 struct mount
*mnt
= real_mount(m
);
1115 int actual_refs
= 0;
1116 int minimum_refs
= 0;
1120 /* write lock needed for mnt_get_count */
1121 br_write_lock(&vfsmount_lock
);
1122 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1123 actual_refs
+= mnt_get_count(p
);
1126 br_write_unlock(&vfsmount_lock
);
1128 if (actual_refs
> minimum_refs
)
1134 EXPORT_SYMBOL(may_umount_tree
);
1137 * may_umount - check if a mount point is busy
1138 * @mnt: root of mount
1140 * This is called to check if a mount point has any
1141 * open files, pwds, chroots or sub mounts. If the
1142 * mount has sub mounts this will return busy
1143 * regardless of whether the sub mounts are busy.
1145 * Doesn't take quota and stuff into account. IOW, in some cases it will
1146 * give false negatives. The main reason why it's here is that we need
1147 * a non-destructive way to look for easily umountable filesystems.
1149 int may_umount(struct vfsmount
*mnt
)
1152 down_read(&namespace_sem
);
1153 br_write_lock(&vfsmount_lock
);
1154 if (propagate_mount_busy(real_mount(mnt
), 2))
1156 br_write_unlock(&vfsmount_lock
);
1157 up_read(&namespace_sem
);
1161 EXPORT_SYMBOL(may_umount
);
1163 static LIST_HEAD(unmounted
); /* protected by namespace_sem */
1165 static void namespace_unlock(void)
1170 if (likely(list_empty(&unmounted
))) {
1171 up_write(&namespace_sem
);
1175 list_splice_init(&unmounted
, &head
);
1176 up_write(&namespace_sem
);
1178 while (!list_empty(&head
)) {
1179 mnt
= list_first_entry(&head
, struct mount
, mnt_hash
);
1180 list_del_init(&mnt
->mnt_hash
);
1181 if (mnt_has_parent(mnt
)) {
1182 struct dentry
*dentry
;
1185 br_write_lock(&vfsmount_lock
);
1186 dentry
= mnt
->mnt_mountpoint
;
1187 m
= mnt
->mnt_parent
;
1188 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1189 mnt
->mnt_parent
= mnt
;
1191 br_write_unlock(&vfsmount_lock
);
1199 static inline void namespace_lock(void)
1201 down_write(&namespace_sem
);
1205 * vfsmount lock must be held for write
1206 * namespace_sem must be held for write
1208 void umount_tree(struct mount
*mnt
, int propagate
)
1210 LIST_HEAD(tmp_list
);
1213 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
1214 list_move(&p
->mnt_hash
, &tmp_list
);
1217 propagate_umount(&tmp_list
);
1219 list_for_each_entry(p
, &tmp_list
, mnt_hash
) {
1220 list_del_init(&p
->mnt_expire
);
1221 list_del_init(&p
->mnt_list
);
1222 __touch_mnt_namespace(p
->mnt_ns
);
1224 list_del_init(&p
->mnt_child
);
1225 if (mnt_has_parent(p
)) {
1226 p
->mnt_parent
->mnt_ghosts
++;
1227 put_mountpoint(p
->mnt_mp
);
1230 change_mnt_propagation(p
, MS_PRIVATE
);
1232 list_splice(&tmp_list
, &unmounted
);
1235 static void shrink_submounts(struct mount
*mnt
);
1237 static int do_umount(struct mount
*mnt
, int flags
)
1239 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1242 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1247 * Allow userspace to request a mountpoint be expired rather than
1248 * unmounting unconditionally. Unmount only happens if:
1249 * (1) the mark is already set (the mark is cleared by mntput())
1250 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1252 if (flags
& MNT_EXPIRE
) {
1253 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1254 flags
& (MNT_FORCE
| MNT_DETACH
))
1258 * probably don't strictly need the lock here if we examined
1259 * all race cases, but it's a slowpath.
1261 br_write_lock(&vfsmount_lock
);
1262 if (mnt_get_count(mnt
) != 2) {
1263 br_write_unlock(&vfsmount_lock
);
1266 br_write_unlock(&vfsmount_lock
);
1268 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1273 * If we may have to abort operations to get out of this
1274 * mount, and they will themselves hold resources we must
1275 * allow the fs to do things. In the Unix tradition of
1276 * 'Gee thats tricky lets do it in userspace' the umount_begin
1277 * might fail to complete on the first run through as other tasks
1278 * must return, and the like. Thats for the mount program to worry
1279 * about for the moment.
1282 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1283 sb
->s_op
->umount_begin(sb
);
1287 * No sense to grab the lock for this test, but test itself looks
1288 * somewhat bogus. Suggestions for better replacement?
1289 * Ho-hum... In principle, we might treat that as umount + switch
1290 * to rootfs. GC would eventually take care of the old vfsmount.
1291 * Actually it makes sense, especially if rootfs would contain a
1292 * /reboot - static binary that would close all descriptors and
1293 * call reboot(9). Then init(8) could umount root and exec /reboot.
1295 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1297 * Special case for "unmounting" root ...
1298 * we just try to remount it readonly.
1300 if (!capable(CAP_SYS_ADMIN
))
1302 down_write(&sb
->s_umount
);
1303 if (!(sb
->s_flags
& MS_RDONLY
))
1304 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
1305 up_write(&sb
->s_umount
);
1310 br_write_lock(&vfsmount_lock
);
1313 if (!(flags
& MNT_DETACH
))
1314 shrink_submounts(mnt
);
1317 if (flags
& MNT_DETACH
|| !propagate_mount_busy(mnt
, 2)) {
1318 if (!list_empty(&mnt
->mnt_list
))
1319 umount_tree(mnt
, 1);
1322 br_write_unlock(&vfsmount_lock
);
1328 * Is the caller allowed to modify his namespace?
1330 static inline bool may_mount(void)
1332 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1336 * Now umount can handle mount points as well as block devices.
1337 * This is important for filesystems which use unnamed block devices.
1339 * We now support a flag for forced unmount like the other 'big iron'
1340 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1343 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1348 int lookup_flags
= 0;
1350 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1356 if (!(flags
& UMOUNT_NOFOLLOW
))
1357 lookup_flags
|= LOOKUP_FOLLOW
;
1359 retval
= user_path_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1362 mnt
= real_mount(path
.mnt
);
1364 if (path
.dentry
!= path
.mnt
->mnt_root
)
1366 if (!check_mnt(mnt
))
1369 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
1372 retval
= do_umount(mnt
, flags
);
1374 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1376 mntput_no_expire(mnt
);
1381 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1384 * The 2.0 compatible umount. No flags.
1386 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1388 return sys_umount(name
, 0);
1393 static bool mnt_ns_loop(struct path
*path
)
1395 /* Could bind mounting the mount namespace inode cause a
1396 * mount namespace loop?
1398 struct inode
*inode
= path
->dentry
->d_inode
;
1400 struct mnt_namespace
*mnt_ns
;
1402 if (!proc_ns_inode(inode
))
1405 ei
= get_proc_ns(inode
);
1406 if (ei
->ns_ops
!= &mntns_operations
)
1410 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
1413 struct mount
*copy_tree(struct mount
*mnt
, struct dentry
*dentry
,
1416 struct mount
*res
, *p
, *q
, *r
, *parent
;
1418 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(mnt
))
1419 return ERR_PTR(-EINVAL
);
1421 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1425 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1428 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1430 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1433 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1434 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(s
)) {
1435 s
= skip_mnt_tree(s
);
1438 while (p
!= s
->mnt_parent
) {
1444 q
= clone_mnt(p
, p
->mnt
.mnt_root
, flag
);
1447 br_write_lock(&vfsmount_lock
);
1448 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1449 attach_mnt(q
, parent
, p
->mnt_mp
);
1450 br_write_unlock(&vfsmount_lock
);
1456 br_write_lock(&vfsmount_lock
);
1457 umount_tree(res
, 0);
1458 br_write_unlock(&vfsmount_lock
);
1463 /* Caller should check returned pointer for errors */
1465 struct vfsmount
*collect_mounts(struct path
*path
)
1469 if (!check_mnt(real_mount(path
->mnt
)))
1470 tree
= ERR_PTR(-EINVAL
);
1472 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
1473 CL_COPY_ALL
| CL_PRIVATE
);
1476 return ERR_CAST(tree
);
1480 void drop_collected_mounts(struct vfsmount
*mnt
)
1483 br_write_lock(&vfsmount_lock
);
1484 umount_tree(real_mount(mnt
), 0);
1485 br_write_unlock(&vfsmount_lock
);
1489 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1490 struct vfsmount
*root
)
1493 int res
= f(root
, arg
);
1496 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
1497 res
= f(&mnt
->mnt
, arg
);
1504 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
1508 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1509 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1510 mnt_release_group_id(p
);
1514 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
1518 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1519 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1520 int err
= mnt_alloc_group_id(p
);
1522 cleanup_group_ids(mnt
, p
);
1532 * @source_mnt : mount tree to be attached
1533 * @nd : place the mount tree @source_mnt is attached
1534 * @parent_nd : if non-null, detach the source_mnt from its parent and
1535 * store the parent mount and mountpoint dentry.
1536 * (done when source_mnt is moved)
1538 * NOTE: in the table below explains the semantics when a source mount
1539 * of a given type is attached to a destination mount of a given type.
1540 * ---------------------------------------------------------------------------
1541 * | BIND MOUNT OPERATION |
1542 * |**************************************************************************
1543 * | source-->| shared | private | slave | unbindable |
1547 * |**************************************************************************
1548 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1550 * |non-shared| shared (+) | private | slave (*) | invalid |
1551 * ***************************************************************************
1552 * A bind operation clones the source mount and mounts the clone on the
1553 * destination mount.
1555 * (++) the cloned mount is propagated to all the mounts in the propagation
1556 * tree of the destination mount and the cloned mount is added to
1557 * the peer group of the source mount.
1558 * (+) the cloned mount is created under the destination mount and is marked
1559 * as shared. The cloned mount is added to the peer group of the source
1561 * (+++) the mount is propagated to all the mounts in the propagation tree
1562 * of the destination mount and the cloned mount is made slave
1563 * of the same master as that of the source mount. The cloned mount
1564 * is marked as 'shared and slave'.
1565 * (*) the cloned mount is made a slave of the same master as that of the
1568 * ---------------------------------------------------------------------------
1569 * | MOVE MOUNT OPERATION |
1570 * |**************************************************************************
1571 * | source-->| shared | private | slave | unbindable |
1575 * |**************************************************************************
1576 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1578 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1579 * ***************************************************************************
1581 * (+) the mount is moved to the destination. And is then propagated to
1582 * all the mounts in the propagation tree of the destination mount.
1583 * (+*) the mount is moved to the destination.
1584 * (+++) the mount is moved to the destination and is then propagated to
1585 * all the mounts belonging to the destination mount's propagation tree.
1586 * the mount is marked as 'shared and slave'.
1587 * (*) the mount continues to be a slave at the new location.
1589 * if the source mount is a tree, the operations explained above is
1590 * applied to each mount in the tree.
1591 * Must be called without spinlocks held, since this function can sleep
1594 static int attach_recursive_mnt(struct mount
*source_mnt
,
1595 struct mount
*dest_mnt
,
1596 struct mountpoint
*dest_mp
,
1597 struct path
*parent_path
)
1599 LIST_HEAD(tree_list
);
1600 struct mount
*child
, *p
;
1603 if (IS_MNT_SHARED(dest_mnt
)) {
1604 err
= invent_group_ids(source_mnt
, true);
1608 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
1610 goto out_cleanup_ids
;
1612 br_write_lock(&vfsmount_lock
);
1614 if (IS_MNT_SHARED(dest_mnt
)) {
1615 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
1619 detach_mnt(source_mnt
, parent_path
);
1620 attach_mnt(source_mnt
, dest_mnt
, dest_mp
);
1621 touch_mnt_namespace(source_mnt
->mnt_ns
);
1623 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
1624 commit_tree(source_mnt
);
1627 list_for_each_entry_safe(child
, p
, &tree_list
, mnt_hash
) {
1628 list_del_init(&child
->mnt_hash
);
1631 br_write_unlock(&vfsmount_lock
);
1636 if (IS_MNT_SHARED(dest_mnt
))
1637 cleanup_group_ids(source_mnt
, NULL
);
1642 static struct mountpoint
*lock_mount(struct path
*path
)
1644 struct vfsmount
*mnt
;
1645 struct dentry
*dentry
= path
->dentry
;
1647 mutex_lock(&dentry
->d_inode
->i_mutex
);
1648 if (unlikely(cant_mount(dentry
))) {
1649 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1650 return ERR_PTR(-ENOENT
);
1653 mnt
= lookup_mnt(path
);
1655 struct mountpoint
*mp
= new_mountpoint(dentry
);
1658 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1664 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
1667 dentry
= path
->dentry
= dget(mnt
->mnt_root
);
1671 static void unlock_mount(struct mountpoint
*where
)
1673 struct dentry
*dentry
= where
->m_dentry
;
1674 put_mountpoint(where
);
1676 mutex_unlock(&dentry
->d_inode
->i_mutex
);
1679 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
1681 if (mnt
->mnt
.mnt_sb
->s_flags
& MS_NOUSER
)
1684 if (S_ISDIR(mp
->m_dentry
->d_inode
->i_mode
) !=
1685 S_ISDIR(mnt
->mnt
.mnt_root
->d_inode
->i_mode
))
1688 return attach_recursive_mnt(mnt
, p
, mp
, NULL
);
1692 * Sanity check the flags to change_mnt_propagation.
1695 static int flags_to_propagation_type(int flags
)
1697 int type
= flags
& ~(MS_REC
| MS_SILENT
);
1699 /* Fail if any non-propagation flags are set */
1700 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1702 /* Only one propagation flag should be set */
1703 if (!is_power_of_2(type
))
1709 * recursively change the type of the mountpoint.
1711 static int do_change_type(struct path
*path
, int flag
)
1714 struct mount
*mnt
= real_mount(path
->mnt
);
1715 int recurse
= flag
& MS_REC
;
1719 if (path
->dentry
!= path
->mnt
->mnt_root
)
1722 type
= flags_to_propagation_type(flag
);
1727 if (type
== MS_SHARED
) {
1728 err
= invent_group_ids(mnt
, recurse
);
1733 br_write_lock(&vfsmount_lock
);
1734 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
1735 change_mnt_propagation(m
, type
);
1736 br_write_unlock(&vfsmount_lock
);
1744 * do loopback mount.
1746 static int do_loopback(struct path
*path
, const char *old_name
,
1749 struct path old_path
;
1750 struct mount
*mnt
= NULL
, *old
, *parent
;
1751 struct mountpoint
*mp
;
1753 if (!old_name
|| !*old_name
)
1755 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
1760 if (mnt_ns_loop(&old_path
))
1763 mp
= lock_mount(path
);
1768 old
= real_mount(old_path
.mnt
);
1769 parent
= real_mount(path
->mnt
);
1772 if (IS_MNT_UNBINDABLE(old
))
1775 if (!check_mnt(parent
) || !check_mnt(old
))
1779 mnt
= copy_tree(old
, old_path
.dentry
, 0);
1781 mnt
= clone_mnt(old
, old_path
.dentry
, 0);
1788 err
= graft_tree(mnt
, parent
, mp
);
1790 br_write_lock(&vfsmount_lock
);
1791 umount_tree(mnt
, 0);
1792 br_write_unlock(&vfsmount_lock
);
1797 path_put(&old_path
);
1801 static int change_mount_flags(struct vfsmount
*mnt
, int ms_flags
)
1804 int readonly_request
= 0;
1806 if (ms_flags
& MS_RDONLY
)
1807 readonly_request
= 1;
1808 if (readonly_request
== __mnt_is_readonly(mnt
))
1811 if (readonly_request
)
1812 error
= mnt_make_readonly(real_mount(mnt
));
1814 __mnt_unmake_readonly(real_mount(mnt
));
1819 * change filesystem flags. dir should be a physical root of filesystem.
1820 * If you've mounted a non-root directory somewhere and want to do remount
1821 * on it - tough luck.
1823 static int do_remount(struct path
*path
, int flags
, int mnt_flags
,
1827 struct super_block
*sb
= path
->mnt
->mnt_sb
;
1828 struct mount
*mnt
= real_mount(path
->mnt
);
1830 if (!check_mnt(mnt
))
1833 if (path
->dentry
!= path
->mnt
->mnt_root
)
1836 /* Don't allow changing of locked mnt flags.
1838 * No locks need to be held here while testing the various
1839 * MNT_LOCK flags because those flags can never be cleared
1840 * once they are set.
1842 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_READONLY
) &&
1843 !(mnt_flags
& MNT_READONLY
)) {
1846 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NODEV
) &&
1847 !(mnt_flags
& MNT_NODEV
)) {
1848 /* Was the nodev implicitly added in mount? */
1849 if ((mnt
->mnt_ns
->user_ns
!= &init_user_ns
) &&
1850 !(sb
->s_type
->fs_flags
& FS_USERNS_DEV_MOUNT
)) {
1851 mnt_flags
|= MNT_NODEV
;
1856 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOSUID
) &&
1857 !(mnt_flags
& MNT_NOSUID
)) {
1860 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOEXEC
) &&
1861 !(mnt_flags
& MNT_NOEXEC
)) {
1864 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_ATIME
) &&
1865 ((mnt
->mnt
.mnt_flags
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
))) {
1869 err
= security_sb_remount(sb
, data
);
1873 down_write(&sb
->s_umount
);
1874 if (flags
& MS_BIND
)
1875 err
= change_mount_flags(path
->mnt
, flags
);
1876 else if (!capable(CAP_SYS_ADMIN
))
1879 err
= do_remount_sb2(path
->mnt
, sb
, flags
, data
, 0);
1881 br_write_lock(&vfsmount_lock
);
1882 propagate_remount(mnt
);
1883 br_write_unlock(&vfsmount_lock
);
1887 br_write_lock(&vfsmount_lock
);
1888 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
1889 mnt
->mnt
.mnt_flags
= mnt_flags
;
1890 br_write_unlock(&vfsmount_lock
);
1892 up_write(&sb
->s_umount
);
1894 br_write_lock(&vfsmount_lock
);
1895 touch_mnt_namespace(mnt
->mnt_ns
);
1896 br_write_unlock(&vfsmount_lock
);
1901 static inline int tree_contains_unbindable(struct mount
*mnt
)
1904 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1905 if (IS_MNT_UNBINDABLE(p
))
1911 static int do_move_mount(struct path
*path
, const char *old_name
)
1913 struct path old_path
, parent_path
;
1916 struct mountpoint
*mp
;
1918 if (!old_name
|| !*old_name
)
1920 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
1924 mp
= lock_mount(path
);
1929 old
= real_mount(old_path
.mnt
);
1930 p
= real_mount(path
->mnt
);
1933 if (!check_mnt(p
) || !check_mnt(old
))
1937 if (old_path
.dentry
!= old_path
.mnt
->mnt_root
)
1940 if (!mnt_has_parent(old
))
1943 if (S_ISDIR(path
->dentry
->d_inode
->i_mode
) !=
1944 S_ISDIR(old_path
.dentry
->d_inode
->i_mode
))
1947 * Don't move a mount residing in a shared parent.
1949 if (IS_MNT_SHARED(old
->mnt_parent
))
1952 * Don't move a mount tree containing unbindable mounts to a destination
1953 * mount which is shared.
1955 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
1958 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
1962 err
= attach_recursive_mnt(old
, real_mount(path
->mnt
), mp
, &parent_path
);
1966 /* if the mount is moved, it should no longer be expire
1968 list_del_init(&old
->mnt_expire
);
1973 path_put(&parent_path
);
1974 path_put(&old_path
);
1978 static struct vfsmount
*fs_set_subtype(struct vfsmount
*mnt
, const char *fstype
)
1981 const char *subtype
= strchr(fstype
, '.');
1990 mnt
->mnt_sb
->s_subtype
= kstrdup(subtype
, GFP_KERNEL
);
1992 if (!mnt
->mnt_sb
->s_subtype
)
1998 return ERR_PTR(err
);
2002 * add a mount into a namespace's mount tree
2004 static int do_add_mount(struct mount
*newmnt
, struct path
*path
, int mnt_flags
)
2006 struct mountpoint
*mp
;
2007 struct mount
*parent
;
2010 mnt_flags
&= ~(MNT_SHARED
| MNT_WRITE_HOLD
| MNT_INTERNAL
);
2012 mp
= lock_mount(path
);
2016 parent
= real_mount(path
->mnt
);
2018 if (unlikely(!check_mnt(parent
))) {
2019 /* that's acceptable only for automounts done in private ns */
2020 if (!(mnt_flags
& MNT_SHRINKABLE
))
2022 /* ... and for those we'd better have mountpoint still alive */
2023 if (!parent
->mnt_ns
)
2027 /* Refuse the same filesystem on the same mount point */
2029 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&&
2030 path
->mnt
->mnt_root
== path
->dentry
)
2034 if (S_ISLNK(newmnt
->mnt
.mnt_root
->d_inode
->i_mode
))
2037 newmnt
->mnt
.mnt_flags
= mnt_flags
;
2038 err
= graft_tree(newmnt
, parent
, mp
);
2046 * create a new mount for userspace and request it to be added into the
2049 static int do_new_mount(struct path
*path
, const char *fstype
, int flags
,
2050 int mnt_flags
, const char *name
, void *data
)
2052 struct file_system_type
*type
;
2053 struct user_namespace
*user_ns
= current
->nsproxy
->mnt_ns
->user_ns
;
2054 struct vfsmount
*mnt
;
2060 type
= get_fs_type(fstype
);
2064 if (user_ns
!= &init_user_ns
) {
2065 if (!(type
->fs_flags
& FS_USERNS_MOUNT
)) {
2066 put_filesystem(type
);
2069 /* Only in special cases allow devices from mounts
2070 * created outside the initial user namespace.
2072 if (!(type
->fs_flags
& FS_USERNS_DEV_MOUNT
)) {
2074 mnt_flags
|= MNT_NODEV
| MNT_LOCK_NODEV
;
2078 mnt
= vfs_kern_mount(type
, flags
, name
, data
);
2079 if (!IS_ERR(mnt
) && (type
->fs_flags
& FS_HAS_SUBTYPE
) &&
2080 !mnt
->mnt_sb
->s_subtype
)
2081 mnt
= fs_set_subtype(mnt
, fstype
);
2083 put_filesystem(type
);
2085 return PTR_ERR(mnt
);
2087 err
= do_add_mount(real_mount(mnt
), path
, mnt_flags
);
2093 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2095 struct mount
*mnt
= real_mount(m
);
2097 /* The new mount record should have at least 2 refs to prevent it being
2098 * expired before we get a chance to add it
2100 BUG_ON(mnt_get_count(mnt
) < 2);
2102 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2103 m
->mnt_root
== path
->dentry
) {
2108 err
= do_add_mount(mnt
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2112 /* remove m from any expiration list it may be on */
2113 if (!list_empty(&mnt
->mnt_expire
)) {
2115 br_write_lock(&vfsmount_lock
);
2116 list_del_init(&mnt
->mnt_expire
);
2117 br_write_unlock(&vfsmount_lock
);
2126 * mnt_set_expiry - Put a mount on an expiration list
2127 * @mnt: The mount to list.
2128 * @expiry_list: The list to add the mount to.
2130 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2133 br_write_lock(&vfsmount_lock
);
2135 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
2137 br_write_unlock(&vfsmount_lock
);
2140 EXPORT_SYMBOL(mnt_set_expiry
);
2143 * process a list of expirable mountpoints with the intent of discarding any
2144 * mountpoints that aren't in use and haven't been touched since last we came
2147 void mark_mounts_for_expiry(struct list_head
*mounts
)
2149 struct mount
*mnt
, *next
;
2150 LIST_HEAD(graveyard
);
2152 if (list_empty(mounts
))
2156 br_write_lock(&vfsmount_lock
);
2158 /* extract from the expiration list every vfsmount that matches the
2159 * following criteria:
2160 * - only referenced by its parent vfsmount
2161 * - still marked for expiry (marked on the last call here; marks are
2162 * cleared by mntput())
2164 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
2165 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
2166 propagate_mount_busy(mnt
, 1))
2168 list_move(&mnt
->mnt_expire
, &graveyard
);
2170 while (!list_empty(&graveyard
)) {
2171 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
2172 touch_mnt_namespace(mnt
->mnt_ns
);
2173 umount_tree(mnt
, 1);
2175 br_write_unlock(&vfsmount_lock
);
2179 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
2182 * Ripoff of 'select_parent()'
2184 * search the list of submounts for a given mountpoint, and move any
2185 * shrinkable submounts to the 'graveyard' list.
2187 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
2189 struct mount
*this_parent
= parent
;
2190 struct list_head
*next
;
2194 next
= this_parent
->mnt_mounts
.next
;
2196 while (next
!= &this_parent
->mnt_mounts
) {
2197 struct list_head
*tmp
= next
;
2198 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
2201 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
2204 * Descend a level if the d_mounts list is non-empty.
2206 if (!list_empty(&mnt
->mnt_mounts
)) {
2211 if (!propagate_mount_busy(mnt
, 1)) {
2212 list_move_tail(&mnt
->mnt_expire
, graveyard
);
2217 * All done at this level ... ascend and resume the search
2219 if (this_parent
!= parent
) {
2220 next
= this_parent
->mnt_child
.next
;
2221 this_parent
= this_parent
->mnt_parent
;
2228 * process a list of expirable mountpoints with the intent of discarding any
2229 * submounts of a specific parent mountpoint
2231 * vfsmount_lock must be held for write
2233 static void shrink_submounts(struct mount
*mnt
)
2235 LIST_HEAD(graveyard
);
2238 /* extract submounts of 'mountpoint' from the expiration list */
2239 while (select_submounts(mnt
, &graveyard
)) {
2240 while (!list_empty(&graveyard
)) {
2241 m
= list_first_entry(&graveyard
, struct mount
,
2243 touch_mnt_namespace(m
->mnt_ns
);
2250 * Some copy_from_user() implementations do not return the exact number of
2251 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2252 * Note that this function differs from copy_from_user() in that it will oops
2253 * on bad values of `to', rather than returning a short copy.
2255 static long exact_copy_from_user(void *to
, const void __user
* from
,
2259 const char __user
*f
= from
;
2262 if (!access_ok(VERIFY_READ
, from
, n
))
2266 if (__get_user(c
, f
)) {
2277 int copy_mount_options(const void __user
* data
, unsigned long *where
)
2287 if (!(page
= __get_free_page(GFP_KERNEL
)))
2290 /* We only care that *some* data at the address the user
2291 * gave us is valid. Just in case, we'll zero
2292 * the remainder of the page.
2294 /* copy_from_user cannot cross TASK_SIZE ! */
2295 size
= TASK_SIZE
- (unsigned long)data
;
2296 if (size
> PAGE_SIZE
)
2299 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
2305 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
2310 int copy_mount_string(const void __user
*data
, char **where
)
2319 tmp
= strndup_user(data
, PAGE_SIZE
);
2321 return PTR_ERR(tmp
);
2328 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2329 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2331 * data is a (void *) that can point to any structure up to
2332 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2333 * information (or be NULL).
2335 * Pre-0.97 versions of mount() didn't have a flags word.
2336 * When the flags word was introduced its top half was required
2337 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2338 * Therefore, if this magic number is present, it carries no information
2339 * and must be discarded.
2341 long do_mount(const char *dev_name
, const char *dir_name
,
2342 const char *type_page
, unsigned long flags
, void *data_page
)
2349 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
2350 flags
&= ~MS_MGC_MSK
;
2352 /* Basic sanity checks */
2354 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
2358 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
2360 /* ... and get the mountpoint */
2361 retval
= kern_path(dir_name
, LOOKUP_FOLLOW
, &path
);
2365 retval
= security_sb_mount(dev_name
, &path
,
2366 type_page
, flags
, data_page
);
2367 if (!retval
&& !may_mount())
2372 /* Default to relatime unless overriden */
2373 if (!(flags
& MS_NOATIME
))
2374 mnt_flags
|= MNT_RELATIME
;
2376 /* Separate the per-mountpoint flags */
2377 if (flags
& MS_NOSUID
)
2378 mnt_flags
|= MNT_NOSUID
;
2379 if (flags
& MS_NODEV
)
2380 mnt_flags
|= MNT_NODEV
;
2381 if (flags
& MS_NOEXEC
)
2382 mnt_flags
|= MNT_NOEXEC
;
2383 if (flags
& MS_NOATIME
)
2384 mnt_flags
|= MNT_NOATIME
;
2385 if (flags
& MS_NODIRATIME
)
2386 mnt_flags
|= MNT_NODIRATIME
;
2387 if (flags
& MS_STRICTATIME
)
2388 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
2389 if (flags
& MS_RDONLY
)
2390 mnt_flags
|= MNT_READONLY
;
2392 /* The default atime for remount is preservation */
2393 if ((flags
& MS_REMOUNT
) &&
2394 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
2395 MS_STRICTATIME
)) == 0)) {
2396 mnt_flags
&= ~MNT_ATIME_MASK
;
2397 mnt_flags
|= path
.mnt
->mnt_flags
& MNT_ATIME_MASK
;
2400 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
| MS_BORN
|
2401 MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
| MS_KERNMOUNT
|
2404 if (flags
& MS_REMOUNT
)
2405 retval
= do_remount(&path
, flags
& ~MS_REMOUNT
, mnt_flags
,
2407 else if (flags
& MS_BIND
)
2408 retval
= do_loopback(&path
, dev_name
, flags
& MS_REC
);
2409 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2410 retval
= do_change_type(&path
, flags
);
2411 else if (flags
& MS_MOVE
)
2412 retval
= do_move_mount(&path
, dev_name
);
2414 retval
= do_new_mount(&path
, type_page
, flags
, mnt_flags
,
2415 dev_name
, data_page
);
2421 static void free_mnt_ns(struct mnt_namespace
*ns
)
2423 proc_free_inum(ns
->proc_inum
);
2424 put_user_ns(ns
->user_ns
);
2429 * Assign a sequence number so we can detect when we attempt to bind
2430 * mount a reference to an older mount namespace into the current
2431 * mount namespace, preventing reference counting loops. A 64bit
2432 * number incrementing at 10Ghz will take 12,427 years to wrap which
2433 * is effectively never, so we can ignore the possibility.
2435 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
2437 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
)
2439 struct mnt_namespace
*new_ns
;
2442 new_ns
= kmalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
2444 return ERR_PTR(-ENOMEM
);
2445 ret
= proc_alloc_inum(&new_ns
->proc_inum
);
2448 return ERR_PTR(ret
);
2450 new_ns
->seq
= atomic64_add_return(1, &mnt_ns_seq
);
2451 atomic_set(&new_ns
->count
, 1);
2452 new_ns
->root
= NULL
;
2453 INIT_LIST_HEAD(&new_ns
->list
);
2454 init_waitqueue_head(&new_ns
->poll
);
2456 new_ns
->user_ns
= get_user_ns(user_ns
);
2461 * Allocate a new namespace structure and populate it with contents
2462 * copied from the namespace of the passed in task structure.
2464 static struct mnt_namespace
*dup_mnt_ns(struct mnt_namespace
*mnt_ns
,
2465 struct user_namespace
*user_ns
, struct fs_struct
*fs
)
2467 struct mnt_namespace
*new_ns
;
2468 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
2469 struct mount
*p
, *q
;
2470 struct mount
*old
= mnt_ns
->root
;
2474 new_ns
= alloc_mnt_ns(user_ns
);
2479 /* First pass: copy the tree topology */
2480 copy_flags
= CL_COPY_ALL
| CL_EXPIRE
;
2481 if (user_ns
!= mnt_ns
->user_ns
)
2482 copy_flags
|= CL_SHARED_TO_SLAVE
| CL_UNPRIVILEGED
;
2483 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
2486 free_mnt_ns(new_ns
);
2487 return ERR_CAST(new);
2490 br_write_lock(&vfsmount_lock
);
2491 list_add_tail(&new_ns
->list
, &new->mnt_list
);
2492 br_write_unlock(&vfsmount_lock
);
2495 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2496 * as belonging to new namespace. We have already acquired a private
2497 * fs_struct, so tsk->fs->lock is not needed.
2504 if (&p
->mnt
== fs
->root
.mnt
) {
2505 fs
->root
.mnt
= mntget(&q
->mnt
);
2508 if (&p
->mnt
== fs
->pwd
.mnt
) {
2509 fs
->pwd
.mnt
= mntget(&q
->mnt
);
2513 p
= next_mnt(p
, old
);
2514 q
= next_mnt(q
, new);
2526 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
2527 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
2529 struct mnt_namespace
*new_ns
;
2534 if (!(flags
& CLONE_NEWNS
))
2537 new_ns
= dup_mnt_ns(ns
, user_ns
, new_fs
);
2544 * create_mnt_ns - creates a private namespace and adds a root filesystem
2545 * @mnt: pointer to the new root filesystem mountpoint
2547 static struct mnt_namespace
*create_mnt_ns(struct vfsmount
*m
)
2549 struct mnt_namespace
*new_ns
= alloc_mnt_ns(&init_user_ns
);
2550 if (!IS_ERR(new_ns
)) {
2551 struct mount
*mnt
= real_mount(m
);
2552 mnt
->mnt_ns
= new_ns
;
2554 list_add(&mnt
->mnt_list
, &new_ns
->list
);
2561 struct dentry
*mount_subtree(struct vfsmount
*mnt
, const char *name
)
2563 struct mnt_namespace
*ns
;
2564 struct super_block
*s
;
2568 ns
= create_mnt_ns(mnt
);
2570 return ERR_CAST(ns
);
2572 err
= vfs_path_lookup(mnt
->mnt_root
, mnt
,
2573 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
2578 return ERR_PTR(err
);
2580 /* trade a vfsmount reference for active sb one */
2581 s
= path
.mnt
->mnt_sb
;
2582 atomic_inc(&s
->s_active
);
2584 /* lock the sucker */
2585 down_write(&s
->s_umount
);
2586 /* ... and return the root of (sub)tree on it */
2589 EXPORT_SYMBOL(mount_subtree
);
2591 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
2592 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
2596 struct filename
*kernel_dir
;
2598 unsigned long data_page
;
2600 ret
= copy_mount_string(type
, &kernel_type
);
2604 kernel_dir
= getname(dir_name
);
2605 if (IS_ERR(kernel_dir
)) {
2606 ret
= PTR_ERR(kernel_dir
);
2610 ret
= copy_mount_string(dev_name
, &kernel_dev
);
2614 ret
= copy_mount_options(data
, &data_page
);
2618 ret
= do_mount(kernel_dev
, kernel_dir
->name
, kernel_type
, flags
,
2619 (void *) data_page
);
2621 free_page(data_page
);
2625 putname(kernel_dir
);
2633 * Return true if path is reachable from root
2635 * namespace_sem or vfsmount_lock is held
2637 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
2638 const struct path
*root
)
2640 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
2641 dentry
= mnt
->mnt_mountpoint
;
2642 mnt
= mnt
->mnt_parent
;
2644 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
2647 int path_is_under(struct path
*path1
, struct path
*path2
)
2650 br_read_lock(&vfsmount_lock
);
2651 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
2652 br_read_unlock(&vfsmount_lock
);
2655 EXPORT_SYMBOL(path_is_under
);
2658 * pivot_root Semantics:
2659 * Moves the root file system of the current process to the directory put_old,
2660 * makes new_root as the new root file system of the current process, and sets
2661 * root/cwd of all processes which had them on the current root to new_root.
2664 * The new_root and put_old must be directories, and must not be on the
2665 * same file system as the current process root. The put_old must be
2666 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2667 * pointed to by put_old must yield the same directory as new_root. No other
2668 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2670 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2671 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2672 * in this situation.
2675 * - we don't move root/cwd if they are not at the root (reason: if something
2676 * cared enough to change them, it's probably wrong to force them elsewhere)
2677 * - it's okay to pick a root that isn't the root of a file system, e.g.
2678 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2679 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2682 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
2683 const char __user
*, put_old
)
2685 struct path
new, old
, parent_path
, root_parent
, root
;
2686 struct mount
*new_mnt
, *root_mnt
, *old_mnt
;
2687 struct mountpoint
*old_mp
, *root_mp
;
2693 error
= user_path_dir(new_root
, &new);
2697 error
= user_path_dir(put_old
, &old
);
2701 error
= security_sb_pivotroot(&old
, &new);
2705 get_fs_root(current
->fs
, &root
);
2706 old_mp
= lock_mount(&old
);
2707 error
= PTR_ERR(old_mp
);
2712 new_mnt
= real_mount(new.mnt
);
2713 root_mnt
= real_mount(root
.mnt
);
2714 old_mnt
= real_mount(old
.mnt
);
2715 if (IS_MNT_SHARED(old_mnt
) ||
2716 IS_MNT_SHARED(new_mnt
->mnt_parent
) ||
2717 IS_MNT_SHARED(root_mnt
->mnt_parent
))
2719 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
2722 if (d_unlinked(new.dentry
))
2725 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
2726 goto out4
; /* loop, on the same file system */
2728 if (root
.mnt
->mnt_root
!= root
.dentry
)
2729 goto out4
; /* not a mountpoint */
2730 if (!mnt_has_parent(root_mnt
))
2731 goto out4
; /* not attached */
2732 root_mp
= root_mnt
->mnt_mp
;
2733 if (new.mnt
->mnt_root
!= new.dentry
)
2734 goto out4
; /* not a mountpoint */
2735 if (!mnt_has_parent(new_mnt
))
2736 goto out4
; /* not attached */
2737 /* make sure we can reach put_old from new_root */
2738 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
2740 /* make certain new is below the root */
2741 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
2743 root_mp
->m_count
++; /* pin it so it won't go away */
2744 br_write_lock(&vfsmount_lock
);
2745 detach_mnt(new_mnt
, &parent_path
);
2746 detach_mnt(root_mnt
, &root_parent
);
2747 /* mount old root on put_old */
2748 attach_mnt(root_mnt
, old_mnt
, old_mp
);
2749 /* mount new_root on / */
2750 attach_mnt(new_mnt
, real_mount(root_parent
.mnt
), root_mp
);
2751 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
2752 br_write_unlock(&vfsmount_lock
);
2753 chroot_fs_refs(&root
, &new);
2754 put_mountpoint(root_mp
);
2757 unlock_mount(old_mp
);
2759 path_put(&root_parent
);
2760 path_put(&parent_path
);
2772 static void __init
init_mount_tree(void)
2774 struct vfsmount
*mnt
;
2775 struct mnt_namespace
*ns
;
2777 struct file_system_type
*type
;
2779 type
= get_fs_type("rootfs");
2781 panic("Can't find rootfs type");
2782 mnt
= vfs_kern_mount(type
, 0, "rootfs", NULL
);
2783 put_filesystem(type
);
2785 panic("Can't create rootfs");
2787 ns
= create_mnt_ns(mnt
);
2789 panic("Can't allocate initial namespace");
2791 init_task
.nsproxy
->mnt_ns
= ns
;
2795 root
.dentry
= mnt
->mnt_root
;
2797 set_fs_pwd(current
->fs
, &root
);
2798 set_fs_root(current
->fs
, &root
);
2801 void __init
mnt_init(void)
2806 init_rwsem(&namespace_sem
);
2808 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
2809 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2811 mount_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
2812 mountpoint_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
2814 if (!mount_hashtable
|| !mountpoint_hashtable
)
2815 panic("Failed to allocate mount hash table\n");
2817 printk(KERN_INFO
"Mount-cache hash table entries: %lu\n", HASH_SIZE
);
2819 for (u
= 0; u
< HASH_SIZE
; u
++)
2820 INIT_LIST_HEAD(&mount_hashtable
[u
]);
2821 for (u
= 0; u
< HASH_SIZE
; u
++)
2822 INIT_LIST_HEAD(&mountpoint_hashtable
[u
]);
2824 br_lock_init(&vfsmount_lock
);
2828 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
2830 fs_kobj
= kobject_create_and_add("fs", NULL
);
2832 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
2837 void put_mnt_ns(struct mnt_namespace
*ns
)
2839 if (!atomic_dec_and_test(&ns
->count
))
2842 br_write_lock(&vfsmount_lock
);
2843 umount_tree(ns
->root
, 0);
2844 br_write_unlock(&vfsmount_lock
);
2849 struct vfsmount
*kern_mount_data(struct file_system_type
*type
, void *data
)
2851 struct vfsmount
*mnt
;
2852 mnt
= vfs_kern_mount(type
, MS_KERNMOUNT
, type
->name
, data
);
2855 * it is a longterm mount, don't release mnt until
2856 * we unmount before file sys is unregistered
2858 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
2862 EXPORT_SYMBOL_GPL(kern_mount_data
);
2864 void kern_unmount(struct vfsmount
*mnt
)
2866 /* release long term mount so mount point can be released */
2867 if (!IS_ERR_OR_NULL(mnt
)) {
2868 br_write_lock(&vfsmount_lock
);
2869 real_mount(mnt
)->mnt_ns
= NULL
;
2870 br_write_unlock(&vfsmount_lock
);
2874 EXPORT_SYMBOL(kern_unmount
);
2876 bool our_mnt(struct vfsmount
*mnt
)
2878 return check_mnt(real_mount(mnt
));
2881 bool current_chrooted(void)
2883 /* Does the current process have a non-standard root */
2884 struct path ns_root
;
2885 struct path fs_root
;
2888 /* Find the namespace root */
2889 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
2890 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
2892 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
2895 get_fs_root(current
->fs
, &fs_root
);
2897 chrooted
= !path_equal(&fs_root
, &ns_root
);
2905 void update_mnt_policy(struct user_namespace
*userns
)
2907 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
2910 down_read(&namespace_sem
);
2911 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
2912 switch (mnt
->mnt
.mnt_sb
->s_magic
) {
2914 userns
->may_mount_sysfs
= true;
2916 case PROC_SUPER_MAGIC
:
2917 userns
->may_mount_proc
= true;
2920 if (userns
->may_mount_sysfs
&& userns
->may_mount_proc
)
2923 up_read(&namespace_sem
);
2926 static void *mntns_get(struct task_struct
*task
)
2928 struct mnt_namespace
*ns
= NULL
;
2929 struct nsproxy
*nsproxy
;
2932 nsproxy
= task_nsproxy(task
);
2934 ns
= nsproxy
->mnt_ns
;
2942 static void mntns_put(void *ns
)
2947 static int mntns_install(struct nsproxy
*nsproxy
, void *ns
)
2949 struct fs_struct
*fs
= current
->fs
;
2950 struct mnt_namespace
*mnt_ns
= ns
;
2953 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
2954 !nsown_capable(CAP_SYS_CHROOT
) ||
2955 !nsown_capable(CAP_SYS_ADMIN
))
2962 put_mnt_ns(nsproxy
->mnt_ns
);
2963 nsproxy
->mnt_ns
= mnt_ns
;
2966 root
.mnt
= &mnt_ns
->root
->mnt
;
2967 root
.dentry
= mnt_ns
->root
->mnt
.mnt_root
;
2969 while(d_mountpoint(root
.dentry
) && follow_down_one(&root
))
2972 /* Update the pwd and root */
2973 set_fs_pwd(fs
, &root
);
2974 set_fs_root(fs
, &root
);
2980 static unsigned int mntns_inum(void *ns
)
2982 struct mnt_namespace
*mnt_ns
= ns
;
2983 return mnt_ns
->proc_inum
;
2986 const struct proc_ns_operations mntns_operations
= {
2988 .type
= CLONE_NEWNS
,
2991 .install
= mntns_install
,