4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/export.h>
13 #include <linux/capability.h>
14 #include <linux/mnt_namespace.h>
15 #include <linux/user_namespace.h>
16 #include <linux/namei.h>
17 #include <linux/security.h>
18 #include <linux/cred.h>
19 #include <linux/idr.h>
20 #include <linux/init.h> /* init_rootfs */
21 #include <linux/fs_struct.h> /* get_fs_root et.al. */
22 #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
23 #include <linux/uaccess.h>
24 #include <linux/proc_ns.h>
25 #include <linux/magic.h>
26 #include <linux/bootmem.h>
27 #include <linux/task_work.h>
28 #include <linux/sched/task.h>
33 /* Maximum number of mounts in a mount namespace */
34 unsigned int sysctl_mount_max __read_mostly
= 100000;
36 static unsigned int m_hash_mask __read_mostly
;
37 static unsigned int m_hash_shift __read_mostly
;
38 static unsigned int mp_hash_mask __read_mostly
;
39 static unsigned int mp_hash_shift __read_mostly
;
41 static __initdata
unsigned long mhash_entries
;
42 static int __init
set_mhash_entries(char *str
)
46 mhash_entries
= simple_strtoul(str
, &str
, 0);
49 __setup("mhash_entries=", set_mhash_entries
);
51 static __initdata
unsigned long mphash_entries
;
52 static int __init
set_mphash_entries(char *str
)
56 mphash_entries
= simple_strtoul(str
, &str
, 0);
59 __setup("mphash_entries=", set_mphash_entries
);
62 static DEFINE_IDA(mnt_id_ida
);
63 static DEFINE_IDA(mnt_group_ida
);
64 static DEFINE_SPINLOCK(mnt_id_lock
);
65 static int mnt_id_start
= 0;
66 static int mnt_group_start
= 1;
68 static struct hlist_head
*mount_hashtable __read_mostly
;
69 static struct hlist_head
*mountpoint_hashtable __read_mostly
;
70 static struct kmem_cache
*mnt_cache __read_mostly
;
71 static DECLARE_RWSEM(namespace_sem
);
74 struct kobject
*fs_kobj
;
75 EXPORT_SYMBOL_GPL(fs_kobj
);
78 * vfsmount lock may be taken for read to prevent changes to the
79 * vfsmount hash, ie. during mountpoint lookups or walking back
82 * It should be taken for write in all cases where the vfsmount
83 * tree or hash is modified or when a vfsmount structure is modified.
85 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(mount_lock
);
87 static inline struct hlist_head
*m_hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
89 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
90 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
91 tmp
= tmp
+ (tmp
>> m_hash_shift
);
92 return &mount_hashtable
[tmp
& m_hash_mask
];
95 static inline struct hlist_head
*mp_hash(struct dentry
*dentry
)
97 unsigned long tmp
= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
98 tmp
= tmp
+ (tmp
>> mp_hash_shift
);
99 return &mountpoint_hashtable
[tmp
& mp_hash_mask
];
102 static int mnt_alloc_id(struct mount
*mnt
)
107 ida_pre_get(&mnt_id_ida
, GFP_KERNEL
);
108 spin_lock(&mnt_id_lock
);
109 res
= ida_get_new_above(&mnt_id_ida
, mnt_id_start
, &mnt
->mnt_id
);
111 mnt_id_start
= mnt
->mnt_id
+ 1;
112 spin_unlock(&mnt_id_lock
);
119 static void mnt_free_id(struct mount
*mnt
)
121 int id
= mnt
->mnt_id
;
122 spin_lock(&mnt_id_lock
);
123 ida_remove(&mnt_id_ida
, id
);
124 if (mnt_id_start
> id
)
126 spin_unlock(&mnt_id_lock
);
130 * Allocate a new peer group ID
132 * mnt_group_ida is protected by namespace_sem
134 static int mnt_alloc_group_id(struct mount
*mnt
)
138 if (!ida_pre_get(&mnt_group_ida
, GFP_KERNEL
))
141 res
= ida_get_new_above(&mnt_group_ida
,
145 mnt_group_start
= mnt
->mnt_group_id
+ 1;
151 * Release a peer group ID
153 void mnt_release_group_id(struct mount
*mnt
)
155 int id
= mnt
->mnt_group_id
;
156 ida_remove(&mnt_group_ida
, id
);
157 if (mnt_group_start
> id
)
158 mnt_group_start
= id
;
159 mnt
->mnt_group_id
= 0;
163 * vfsmount lock must be held for read
165 static inline void mnt_add_count(struct mount
*mnt
, int n
)
168 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, n
);
177 * vfsmount lock must be held for write
179 unsigned int mnt_get_count(struct mount
*mnt
)
182 unsigned int count
= 0;
185 for_each_possible_cpu(cpu
) {
186 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_count
;
191 return mnt
->mnt_count
;
195 static void drop_mountpoint(struct fs_pin
*p
)
197 struct mount
*m
= container_of(p
, struct mount
, mnt_umount
);
198 dput(m
->mnt_ex_mountpoint
);
203 static struct mount
*alloc_vfsmnt(const char *name
)
205 struct mount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
209 err
= mnt_alloc_id(mnt
);
214 mnt
->mnt_devname
= kstrdup_const(name
, GFP_KERNEL
);
215 if (!mnt
->mnt_devname
)
220 mnt
->mnt_pcp
= alloc_percpu(struct mnt_pcp
);
222 goto out_free_devname
;
224 this_cpu_add(mnt
->mnt_pcp
->mnt_count
, 1);
227 mnt
->mnt_writers
= 0;
229 mnt
->mnt
.data
= NULL
;
231 INIT_HLIST_NODE(&mnt
->mnt_hash
);
232 INIT_LIST_HEAD(&mnt
->mnt_child
);
233 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
234 INIT_LIST_HEAD(&mnt
->mnt_list
);
235 INIT_LIST_HEAD(&mnt
->mnt_expire
);
236 INIT_LIST_HEAD(&mnt
->mnt_share
);
237 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
238 INIT_LIST_HEAD(&mnt
->mnt_slave
);
239 INIT_HLIST_NODE(&mnt
->mnt_mp_list
);
240 INIT_LIST_HEAD(&mnt
->mnt_umounting
);
241 init_fs_pin(&mnt
->mnt_umount
, drop_mountpoint
);
247 kfree_const(mnt
->mnt_devname
);
252 kmem_cache_free(mnt_cache
, mnt
);
257 * Most r/o checks on a fs are for operations that take
258 * discrete amounts of time, like a write() or unlink().
259 * We must keep track of when those operations start
260 * (for permission checks) and when they end, so that
261 * we can determine when writes are able to occur to
265 * __mnt_is_readonly: check whether a mount is read-only
266 * @mnt: the mount to check for its write status
268 * This shouldn't be used directly ouside of the VFS.
269 * It does not guarantee that the filesystem will stay
270 * r/w, just that it is right *now*. This can not and
271 * should not be used in place of IS_RDONLY(inode).
272 * mnt_want/drop_write() will _keep_ the filesystem
275 int __mnt_is_readonly(struct vfsmount
*mnt
)
277 if (mnt
->mnt_flags
& MNT_READONLY
)
279 if (sb_rdonly(mnt
->mnt_sb
))
283 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
285 static inline void mnt_inc_writers(struct mount
*mnt
)
288 this_cpu_inc(mnt
->mnt_pcp
->mnt_writers
);
294 static inline void mnt_dec_writers(struct mount
*mnt
)
297 this_cpu_dec(mnt
->mnt_pcp
->mnt_writers
);
303 static unsigned int mnt_get_writers(struct mount
*mnt
)
306 unsigned int count
= 0;
309 for_each_possible_cpu(cpu
) {
310 count
+= per_cpu_ptr(mnt
->mnt_pcp
, cpu
)->mnt_writers
;
315 return mnt
->mnt_writers
;
319 static int mnt_is_readonly(struct vfsmount
*mnt
)
321 if (mnt
->mnt_sb
->s_readonly_remount
)
323 /* Order wrt setting s_flags/s_readonly_remount in do_remount() */
325 return __mnt_is_readonly(mnt
);
329 * Most r/o & frozen checks on a fs are for operations that take discrete
330 * amounts of time, like a write() or unlink(). We must keep track of when
331 * those operations start (for permission checks) and when they end, so that we
332 * can determine when writes are able to occur to a filesystem.
335 * __mnt_want_write - get write access to a mount without freeze protection
336 * @m: the mount on which to take a write
338 * This tells the low-level filesystem that a write is about to be performed to
339 * it, and makes sure that writes are allowed (mnt it read-write) before
340 * returning success. This operation does not protect against filesystem being
341 * frozen. When the write operation is finished, __mnt_drop_write() must be
342 * called. This is effectively a refcount.
344 int __mnt_want_write(struct vfsmount
*m
)
346 struct mount
*mnt
= real_mount(m
);
350 mnt_inc_writers(mnt
);
352 * The store to mnt_inc_writers must be visible before we pass
353 * MNT_WRITE_HOLD loop below, so that the slowpath can see our
354 * incremented count after it has set MNT_WRITE_HOLD.
357 while (ACCESS_ONCE(mnt
->mnt
.mnt_flags
) & MNT_WRITE_HOLD
)
360 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
361 * be set to match its requirements. So we must not load that until
362 * MNT_WRITE_HOLD is cleared.
365 if (mnt_is_readonly(m
)) {
366 mnt_dec_writers(mnt
);
375 * mnt_want_write - get write access to a mount
376 * @m: the mount on which to take a write
378 * This tells the low-level filesystem that a write is about to be performed to
379 * it, and makes sure that writes are allowed (mount is read-write, filesystem
380 * is not frozen) before returning success. When the write operation is
381 * finished, mnt_drop_write() must be called. This is effectively a refcount.
383 int mnt_want_write(struct vfsmount
*m
)
387 sb_start_write(m
->mnt_sb
);
388 ret
= __mnt_want_write(m
);
390 sb_end_write(m
->mnt_sb
);
393 EXPORT_SYMBOL_GPL(mnt_want_write
);
396 * mnt_clone_write - get write access to a mount
397 * @mnt: the mount on which to take a write
399 * This is effectively like mnt_want_write, except
400 * it must only be used to take an extra write reference
401 * on a mountpoint that we already know has a write reference
402 * on it. This allows some optimisation.
404 * After finished, mnt_drop_write must be called as usual to
405 * drop the reference.
407 int mnt_clone_write(struct vfsmount
*mnt
)
409 /* superblock may be r/o */
410 if (__mnt_is_readonly(mnt
))
413 mnt_inc_writers(real_mount(mnt
));
417 EXPORT_SYMBOL_GPL(mnt_clone_write
);
420 * __mnt_want_write_file - get write access to a file's mount
421 * @file: the file who's mount on which to take a write
423 * This is like __mnt_want_write, but it takes a file and can
424 * do some optimisations if the file is open for write already
426 int __mnt_want_write_file(struct file
*file
)
428 if (!(file
->f_mode
& FMODE_WRITER
))
429 return __mnt_want_write(file
->f_path
.mnt
);
431 return mnt_clone_write(file
->f_path
.mnt
);
435 * mnt_want_write_file_path - get write access to a file's mount
436 * @file: the file who's mount on which to take a write
438 * This is like mnt_want_write, but it takes a file and can
439 * do some optimisations if the file is open for write already
441 * Called by the vfs for cases when we have an open file at hand, but will do an
442 * inode operation on it (important distinction for files opened on overlayfs,
443 * since the file operations will come from the real underlying file, while
444 * inode operations come from the overlay).
446 int mnt_want_write_file_path(struct file
*file
)
450 sb_start_write(file
->f_path
.mnt
->mnt_sb
);
451 ret
= __mnt_want_write_file(file
);
453 sb_end_write(file
->f_path
.mnt
->mnt_sb
);
457 static inline int may_write_real(struct file
*file
)
459 struct dentry
*dentry
= file
->f_path
.dentry
;
460 struct dentry
*upperdentry
;
463 if (file
->f_mode
& FMODE_WRITER
)
467 if (likely(!(dentry
->d_flags
& DCACHE_OP_REAL
)))
470 /* File refers to upper, writable layer? */
471 upperdentry
= d_real(dentry
, NULL
, 0, D_REAL_UPPER
);
473 (file_inode(file
) == d_inode(upperdentry
) ||
474 file_inode(file
) == d_inode(dentry
)))
477 /* Lower layer: can't write to real file, sorry... */
482 * mnt_want_write_file - get write access to a file's mount
483 * @file: the file who's mount on which to take a write
485 * This is like mnt_want_write, but it takes a file and can
486 * do some optimisations if the file is open for write already
488 * Mostly called by filesystems from their ioctl operation before performing
489 * modification. On overlayfs this needs to check if the file is on a read-only
490 * lower layer and deny access in that case.
492 int mnt_want_write_file(struct file
*file
)
496 ret
= may_write_real(file
);
498 sb_start_write(file_inode(file
)->i_sb
);
499 ret
= __mnt_want_write_file(file
);
501 sb_end_write(file_inode(file
)->i_sb
);
505 EXPORT_SYMBOL_GPL(mnt_want_write_file
);
508 * __mnt_drop_write - give up write access to a mount
509 * @mnt: the mount on which to give up write access
511 * Tells the low-level filesystem that we are done
512 * performing writes to it. Must be matched with
513 * __mnt_want_write() call above.
515 void __mnt_drop_write(struct vfsmount
*mnt
)
518 mnt_dec_writers(real_mount(mnt
));
523 * mnt_drop_write - give up write access to a mount
524 * @mnt: the mount on which to give up write access
526 * Tells the low-level filesystem that we are done performing writes to it and
527 * also allows filesystem to be frozen again. Must be matched with
528 * mnt_want_write() call above.
530 void mnt_drop_write(struct vfsmount
*mnt
)
532 __mnt_drop_write(mnt
);
533 sb_end_write(mnt
->mnt_sb
);
535 EXPORT_SYMBOL_GPL(mnt_drop_write
);
537 void __mnt_drop_write_file(struct file
*file
)
539 __mnt_drop_write(file
->f_path
.mnt
);
542 void mnt_drop_write_file_path(struct file
*file
)
544 mnt_drop_write(file
->f_path
.mnt
);
547 void mnt_drop_write_file(struct file
*file
)
549 __mnt_drop_write(file
->f_path
.mnt
);
550 sb_end_write(file_inode(file
)->i_sb
);
552 EXPORT_SYMBOL(mnt_drop_write_file
);
554 static int mnt_make_readonly(struct mount
*mnt
)
559 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
561 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
562 * should be visible before we do.
567 * With writers on hold, if this value is zero, then there are
568 * definitely no active writers (although held writers may subsequently
569 * increment the count, they'll have to wait, and decrement it after
570 * seeing MNT_READONLY).
572 * It is OK to have counter incremented on one CPU and decremented on
573 * another: the sum will add up correctly. The danger would be when we
574 * sum up each counter, if we read a counter before it is incremented,
575 * but then read another CPU's count which it has been subsequently
576 * decremented from -- we would see more decrements than we should.
577 * MNT_WRITE_HOLD protects against this scenario, because
578 * mnt_want_write first increments count, then smp_mb, then spins on
579 * MNT_WRITE_HOLD, so it can't be decremented by another CPU while
580 * we're counting up here.
582 if (mnt_get_writers(mnt
) > 0)
585 mnt
->mnt
.mnt_flags
|= MNT_READONLY
;
587 * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers
588 * that become unheld will see MNT_READONLY.
591 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
596 static void __mnt_unmake_readonly(struct mount
*mnt
)
599 mnt
->mnt
.mnt_flags
&= ~MNT_READONLY
;
603 int sb_prepare_remount_readonly(struct super_block
*sb
)
608 /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */
609 if (atomic_long_read(&sb
->s_remove_count
))
613 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
614 if (!(mnt
->mnt
.mnt_flags
& MNT_READONLY
)) {
615 mnt
->mnt
.mnt_flags
|= MNT_WRITE_HOLD
;
617 if (mnt_get_writers(mnt
) > 0) {
623 if (!err
&& atomic_long_read(&sb
->s_remove_count
))
627 sb
->s_readonly_remount
= 1;
630 list_for_each_entry(mnt
, &sb
->s_mounts
, mnt_instance
) {
631 if (mnt
->mnt
.mnt_flags
& MNT_WRITE_HOLD
)
632 mnt
->mnt
.mnt_flags
&= ~MNT_WRITE_HOLD
;
639 static void free_vfsmnt(struct mount
*mnt
)
641 kfree(mnt
->mnt
.data
);
642 kfree_const(mnt
->mnt_devname
);
644 free_percpu(mnt
->mnt_pcp
);
646 kmem_cache_free(mnt_cache
, mnt
);
649 static void delayed_free_vfsmnt(struct rcu_head
*head
)
651 free_vfsmnt(container_of(head
, struct mount
, mnt_rcu
));
654 /* call under rcu_read_lock */
655 int __legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
658 if (read_seqretry(&mount_lock
, seq
))
662 mnt
= real_mount(bastard
);
663 mnt_add_count(mnt
, 1);
664 if (likely(!read_seqretry(&mount_lock
, seq
)))
666 if (bastard
->mnt_flags
& MNT_SYNC_UMOUNT
) {
667 mnt_add_count(mnt
, -1);
673 /* call under rcu_read_lock */
674 bool legitimize_mnt(struct vfsmount
*bastard
, unsigned seq
)
676 int res
= __legitimize_mnt(bastard
, seq
);
679 if (unlikely(res
< 0)) {
688 * find the first mount at @dentry on vfsmount @mnt.
689 * call under rcu_read_lock()
691 struct mount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
693 struct hlist_head
*head
= m_hash(mnt
, dentry
);
696 hlist_for_each_entry_rcu(p
, head
, mnt_hash
)
697 if (&p
->mnt_parent
->mnt
== mnt
&& p
->mnt_mountpoint
== dentry
)
703 * lookup_mnt - Return the first child mount mounted at path
705 * "First" means first mounted chronologically. If you create the
708 * mount /dev/sda1 /mnt
709 * mount /dev/sda2 /mnt
710 * mount /dev/sda3 /mnt
712 * Then lookup_mnt() on the base /mnt dentry in the root mount will
713 * return successively the root dentry and vfsmount of /dev/sda1, then
714 * /dev/sda2, then /dev/sda3, then NULL.
716 * lookup_mnt takes a reference to the found vfsmount.
718 struct vfsmount
*lookup_mnt(const struct path
*path
)
720 struct mount
*child_mnt
;
726 seq
= read_seqbegin(&mount_lock
);
727 child_mnt
= __lookup_mnt(path
->mnt
, path
->dentry
);
728 m
= child_mnt
? &child_mnt
->mnt
: NULL
;
729 } while (!legitimize_mnt(m
, seq
));
735 * __is_local_mountpoint - Test to see if dentry is a mountpoint in the
736 * current mount namespace.
738 * The common case is dentries are not mountpoints at all and that
739 * test is handled inline. For the slow case when we are actually
740 * dealing with a mountpoint of some kind, walk through all of the
741 * mounts in the current mount namespace and test to see if the dentry
744 * The mount_hashtable is not usable in the context because we
745 * need to identify all mounts that may be in the current mount
746 * namespace not just a mount that happens to have some specified
749 bool __is_local_mountpoint(struct dentry
*dentry
)
751 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
753 bool is_covered
= false;
755 if (!d_mountpoint(dentry
))
758 down_read(&namespace_sem
);
759 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
760 is_covered
= (mnt
->mnt_mountpoint
== dentry
);
764 up_read(&namespace_sem
);
769 static struct mountpoint
*lookup_mountpoint(struct dentry
*dentry
)
771 struct hlist_head
*chain
= mp_hash(dentry
);
772 struct mountpoint
*mp
;
774 hlist_for_each_entry(mp
, chain
, m_hash
) {
775 if (mp
->m_dentry
== dentry
) {
776 /* might be worth a WARN_ON() */
777 if (d_unlinked(dentry
))
778 return ERR_PTR(-ENOENT
);
786 static struct mountpoint
*get_mountpoint(struct dentry
*dentry
)
788 struct mountpoint
*mp
, *new = NULL
;
791 if (d_mountpoint(dentry
)) {
793 read_seqlock_excl(&mount_lock
);
794 mp
= lookup_mountpoint(dentry
);
795 read_sequnlock_excl(&mount_lock
);
801 new = kmalloc(sizeof(struct mountpoint
), GFP_KERNEL
);
803 return ERR_PTR(-ENOMEM
);
806 /* Exactly one processes may set d_mounted */
807 ret
= d_set_mounted(dentry
);
809 /* Someone else set d_mounted? */
813 /* The dentry is not available as a mountpoint? */
818 /* Add the new mountpoint to the hash table */
819 read_seqlock_excl(&mount_lock
);
820 new->m_dentry
= dentry
;
822 hlist_add_head(&new->m_hash
, mp_hash(dentry
));
823 INIT_HLIST_HEAD(&new->m_list
);
824 read_sequnlock_excl(&mount_lock
);
833 static void put_mountpoint(struct mountpoint
*mp
)
835 if (!--mp
->m_count
) {
836 struct dentry
*dentry
= mp
->m_dentry
;
837 BUG_ON(!hlist_empty(&mp
->m_list
));
838 spin_lock(&dentry
->d_lock
);
839 dentry
->d_flags
&= ~DCACHE_MOUNTED
;
840 spin_unlock(&dentry
->d_lock
);
841 hlist_del(&mp
->m_hash
);
846 static inline int check_mnt(struct mount
*mnt
)
848 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
852 * vfsmount lock must be held for write
854 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
858 wake_up_interruptible(&ns
->poll
);
863 * vfsmount lock must be held for write
865 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
867 if (ns
&& ns
->event
!= event
) {
869 wake_up_interruptible(&ns
->poll
);
874 * vfsmount lock must be held for write
876 static void unhash_mnt(struct mount
*mnt
)
878 mnt
->mnt_parent
= mnt
;
879 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
880 list_del_init(&mnt
->mnt_child
);
881 hlist_del_init_rcu(&mnt
->mnt_hash
);
882 hlist_del_init(&mnt
->mnt_mp_list
);
883 put_mountpoint(mnt
->mnt_mp
);
888 * vfsmount lock must be held for write
890 static void detach_mnt(struct mount
*mnt
, struct path
*old_path
)
892 old_path
->dentry
= mnt
->mnt_mountpoint
;
893 old_path
->mnt
= &mnt
->mnt_parent
->mnt
;
898 * vfsmount lock must be held for write
900 static void umount_mnt(struct mount
*mnt
)
902 /* old mountpoint will be dropped when we can do that */
903 mnt
->mnt_ex_mountpoint
= mnt
->mnt_mountpoint
;
908 * vfsmount lock must be held for write
910 void mnt_set_mountpoint(struct mount
*mnt
,
911 struct mountpoint
*mp
,
912 struct mount
*child_mnt
)
915 mnt_add_count(mnt
, 1); /* essentially, that's mntget */
916 child_mnt
->mnt_mountpoint
= dget(mp
->m_dentry
);
917 child_mnt
->mnt_parent
= mnt
;
918 child_mnt
->mnt_mp
= mp
;
919 hlist_add_head(&child_mnt
->mnt_mp_list
, &mp
->m_list
);
922 static void __attach_mnt(struct mount
*mnt
, struct mount
*parent
)
924 hlist_add_head_rcu(&mnt
->mnt_hash
,
925 m_hash(&parent
->mnt
, mnt
->mnt_mountpoint
));
926 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
930 * vfsmount lock must be held for write
932 static void attach_mnt(struct mount
*mnt
,
933 struct mount
*parent
,
934 struct mountpoint
*mp
)
936 mnt_set_mountpoint(parent
, mp
, mnt
);
937 __attach_mnt(mnt
, parent
);
940 void mnt_change_mountpoint(struct mount
*parent
, struct mountpoint
*mp
, struct mount
*mnt
)
942 struct mountpoint
*old_mp
= mnt
->mnt_mp
;
943 struct dentry
*old_mountpoint
= mnt
->mnt_mountpoint
;
944 struct mount
*old_parent
= mnt
->mnt_parent
;
946 list_del_init(&mnt
->mnt_child
);
947 hlist_del_init(&mnt
->mnt_mp_list
);
948 hlist_del_init_rcu(&mnt
->mnt_hash
);
950 attach_mnt(mnt
, parent
, mp
);
952 put_mountpoint(old_mp
);
955 * Safely avoid even the suggestion this code might sleep or
956 * lock the mount hash by taking advantage of the knowledge that
957 * mnt_change_mountpoint will not release the final reference
960 * During mounting, the mount passed in as the parent mount will
961 * continue to use the old mountpoint and during unmounting, the
962 * old mountpoint will continue to exist until namespace_unlock,
963 * which happens well after mnt_change_mountpoint.
965 spin_lock(&old_mountpoint
->d_lock
);
966 old_mountpoint
->d_lockref
.count
--;
967 spin_unlock(&old_mountpoint
->d_lock
);
969 mnt_add_count(old_parent
, -1);
973 * vfsmount lock must be held for write
975 static void commit_tree(struct mount
*mnt
)
977 struct mount
*parent
= mnt
->mnt_parent
;
980 struct mnt_namespace
*n
= parent
->mnt_ns
;
982 BUG_ON(parent
== mnt
);
984 list_add_tail(&head
, &mnt
->mnt_list
);
985 list_for_each_entry(m
, &head
, mnt_list
)
988 list_splice(&head
, n
->list
.prev
);
990 n
->mounts
+= n
->pending_mounts
;
991 n
->pending_mounts
= 0;
993 __attach_mnt(mnt
, parent
);
994 touch_mnt_namespace(n
);
997 static struct mount
*next_mnt(struct mount
*p
, struct mount
*root
)
999 struct list_head
*next
= p
->mnt_mounts
.next
;
1000 if (next
== &p
->mnt_mounts
) {
1004 next
= p
->mnt_child
.next
;
1005 if (next
!= &p
->mnt_parent
->mnt_mounts
)
1010 return list_entry(next
, struct mount
, mnt_child
);
1013 static struct mount
*skip_mnt_tree(struct mount
*p
)
1015 struct list_head
*prev
= p
->mnt_mounts
.prev
;
1016 while (prev
!= &p
->mnt_mounts
) {
1017 p
= list_entry(prev
, struct mount
, mnt_child
);
1018 prev
= p
->mnt_mounts
.prev
;
1024 vfs_kern_mount(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
1027 struct dentry
*root
;
1030 return ERR_PTR(-ENODEV
);
1032 mnt
= alloc_vfsmnt(name
);
1034 return ERR_PTR(-ENOMEM
);
1036 if (type
->alloc_mnt_data
) {
1037 mnt
->mnt
.data
= type
->alloc_mnt_data();
1038 if (!mnt
->mnt
.data
) {
1041 return ERR_PTR(-ENOMEM
);
1044 if (flags
& SB_KERNMOUNT
)
1045 mnt
->mnt
.mnt_flags
= MNT_INTERNAL
;
1047 root
= mount_fs(type
, flags
, name
, &mnt
->mnt
, data
);
1051 return ERR_CAST(root
);
1054 mnt
->mnt
.mnt_root
= root
;
1055 mnt
->mnt
.mnt_sb
= root
->d_sb
;
1056 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1057 mnt
->mnt_parent
= mnt
;
1059 list_add_tail(&mnt
->mnt_instance
, &root
->d_sb
->s_mounts
);
1060 unlock_mount_hash();
1063 EXPORT_SYMBOL_GPL(vfs_kern_mount
);
1066 vfs_submount(const struct dentry
*mountpoint
, struct file_system_type
*type
,
1067 const char *name
, void *data
)
1069 /* Until it is worked out how to pass the user namespace
1070 * through from the parent mount to the submount don't support
1071 * unprivileged mounts with submounts.
1073 if (mountpoint
->d_sb
->s_user_ns
!= &init_user_ns
)
1074 return ERR_PTR(-EPERM
);
1076 return vfs_kern_mount(type
, SB_SUBMOUNT
, name
, data
);
1078 EXPORT_SYMBOL_GPL(vfs_submount
);
1080 static struct mount
*clone_mnt(struct mount
*old
, struct dentry
*root
,
1083 struct super_block
*sb
= old
->mnt
.mnt_sb
;
1087 mnt
= alloc_vfsmnt(old
->mnt_devname
);
1089 return ERR_PTR(-ENOMEM
);
1091 if (sb
->s_op
->clone_mnt_data
) {
1092 mnt
->mnt
.data
= sb
->s_op
->clone_mnt_data(old
->mnt
.data
);
1093 if (!mnt
->mnt
.data
) {
1099 if (flag
& (CL_SLAVE
| CL_PRIVATE
| CL_SHARED_TO_SLAVE
))
1100 mnt
->mnt_group_id
= 0; /* not a peer of original */
1102 mnt
->mnt_group_id
= old
->mnt_group_id
;
1104 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
1105 err
= mnt_alloc_group_id(mnt
);
1110 mnt
->mnt
.mnt_flags
= old
->mnt
.mnt_flags
;
1111 mnt
->mnt
.mnt_flags
&= ~(MNT_WRITE_HOLD
|MNT_MARKED
|MNT_INTERNAL
);
1112 /* Don't allow unprivileged users to change mount flags */
1113 if (flag
& CL_UNPRIVILEGED
) {
1114 mnt
->mnt
.mnt_flags
|= MNT_LOCK_ATIME
;
1116 if (mnt
->mnt
.mnt_flags
& MNT_READONLY
)
1117 mnt
->mnt
.mnt_flags
|= MNT_LOCK_READONLY
;
1119 if (mnt
->mnt
.mnt_flags
& MNT_NODEV
)
1120 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NODEV
;
1122 if (mnt
->mnt
.mnt_flags
& MNT_NOSUID
)
1123 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOSUID
;
1125 if (mnt
->mnt
.mnt_flags
& MNT_NOEXEC
)
1126 mnt
->mnt
.mnt_flags
|= MNT_LOCK_NOEXEC
;
1129 /* Don't allow unprivileged users to reveal what is under a mount */
1130 if ((flag
& CL_UNPRIVILEGED
) &&
1131 (!(flag
& CL_EXPIRE
) || list_empty(&old
->mnt_expire
)))
1132 mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
1134 atomic_inc(&sb
->s_active
);
1135 mnt
->mnt
.mnt_sb
= sb
;
1136 mnt
->mnt
.mnt_root
= dget(root
);
1137 mnt
->mnt_mountpoint
= mnt
->mnt
.mnt_root
;
1138 mnt
->mnt_parent
= mnt
;
1140 list_add_tail(&mnt
->mnt_instance
, &sb
->s_mounts
);
1141 unlock_mount_hash();
1143 if ((flag
& CL_SLAVE
) ||
1144 ((flag
& CL_SHARED_TO_SLAVE
) && IS_MNT_SHARED(old
))) {
1145 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
1146 mnt
->mnt_master
= old
;
1147 CLEAR_MNT_SHARED(mnt
);
1148 } else if (!(flag
& CL_PRIVATE
)) {
1149 if ((flag
& CL_MAKE_SHARED
) || IS_MNT_SHARED(old
))
1150 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
1151 if (IS_MNT_SLAVE(old
))
1152 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
1153 mnt
->mnt_master
= old
->mnt_master
;
1155 CLEAR_MNT_SHARED(mnt
);
1157 if (flag
& CL_MAKE_SHARED
)
1158 set_mnt_shared(mnt
);
1160 /* stick the duplicate mount on the same expiry list
1161 * as the original if that was on one */
1162 if (flag
& CL_EXPIRE
) {
1163 if (!list_empty(&old
->mnt_expire
))
1164 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
1172 return ERR_PTR(err
);
1175 static void cleanup_mnt(struct mount
*mnt
)
1178 * This probably indicates that somebody messed
1179 * up a mnt_want/drop_write() pair. If this
1180 * happens, the filesystem was probably unable
1181 * to make r/w->r/o transitions.
1184 * The locking used to deal with mnt_count decrement provides barriers,
1185 * so mnt_get_writers() below is safe.
1187 WARN_ON(mnt_get_writers(mnt
));
1188 if (unlikely(mnt
->mnt_pins
.first
))
1190 fsnotify_vfsmount_delete(&mnt
->mnt
);
1191 dput(mnt
->mnt
.mnt_root
);
1192 deactivate_super(mnt
->mnt
.mnt_sb
);
1194 call_rcu(&mnt
->mnt_rcu
, delayed_free_vfsmnt
);
1197 static void __cleanup_mnt(struct rcu_head
*head
)
1199 cleanup_mnt(container_of(head
, struct mount
, mnt_rcu
));
1202 static LLIST_HEAD(delayed_mntput_list
);
1203 static void delayed_mntput(struct work_struct
*unused
)
1205 struct llist_node
*node
= llist_del_all(&delayed_mntput_list
);
1206 struct mount
*m
, *t
;
1208 llist_for_each_entry_safe(m
, t
, node
, mnt_llist
)
1211 static DECLARE_DELAYED_WORK(delayed_mntput_work
, delayed_mntput
);
1213 static void mntput_no_expire(struct mount
*mnt
)
1216 mnt_add_count(mnt
, -1);
1217 if (likely(mnt
->mnt_ns
)) { /* shouldn't be the last one */
1222 if (mnt_get_count(mnt
)) {
1224 unlock_mount_hash();
1227 if (unlikely(mnt
->mnt
.mnt_flags
& MNT_DOOMED
)) {
1229 unlock_mount_hash();
1232 mnt
->mnt
.mnt_flags
|= MNT_DOOMED
;
1235 list_del(&mnt
->mnt_instance
);
1237 if (unlikely(!list_empty(&mnt
->mnt_mounts
))) {
1238 struct mount
*p
, *tmp
;
1239 list_for_each_entry_safe(p
, tmp
, &mnt
->mnt_mounts
, mnt_child
) {
1243 unlock_mount_hash();
1245 if (likely(!(mnt
->mnt
.mnt_flags
& MNT_INTERNAL
))) {
1246 struct task_struct
*task
= current
;
1247 if (likely(!(task
->flags
& PF_KTHREAD
))) {
1248 init_task_work(&mnt
->mnt_rcu
, __cleanup_mnt
);
1249 if (!task_work_add(task
, &mnt
->mnt_rcu
, true))
1252 if (llist_add(&mnt
->mnt_llist
, &delayed_mntput_list
))
1253 schedule_delayed_work(&delayed_mntput_work
, 1);
1259 void mntput(struct vfsmount
*mnt
)
1262 struct mount
*m
= real_mount(mnt
);
1263 /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
1264 if (unlikely(m
->mnt_expiry_mark
))
1265 m
->mnt_expiry_mark
= 0;
1266 mntput_no_expire(m
);
1269 EXPORT_SYMBOL(mntput
);
1271 struct vfsmount
*mntget(struct vfsmount
*mnt
)
1274 mnt_add_count(real_mount(mnt
), 1);
1277 EXPORT_SYMBOL(mntget
);
1279 /* path_is_mountpoint() - Check if path is a mount in the current
1282 * d_mountpoint() can only be used reliably to establish if a dentry is
1283 * not mounted in any namespace and that common case is handled inline.
1284 * d_mountpoint() isn't aware of the possibility there may be multiple
1285 * mounts using a given dentry in a different namespace. This function
1286 * checks if the passed in path is a mountpoint rather than the dentry
1289 bool path_is_mountpoint(const struct path
*path
)
1294 if (!d_mountpoint(path
->dentry
))
1299 seq
= read_seqbegin(&mount_lock
);
1300 res
= __path_is_mountpoint(path
);
1301 } while (read_seqretry(&mount_lock
, seq
));
1306 EXPORT_SYMBOL(path_is_mountpoint
);
1308 struct vfsmount
*mnt_clone_internal(const struct path
*path
)
1311 p
= clone_mnt(real_mount(path
->mnt
), path
->dentry
, CL_PRIVATE
);
1314 p
->mnt
.mnt_flags
|= MNT_INTERNAL
;
1318 #ifdef CONFIG_PROC_FS
1319 /* iterator; we want it to have access to namespace_sem, thus here... */
1320 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1322 struct proc_mounts
*p
= m
->private;
1324 down_read(&namespace_sem
);
1325 if (p
->cached_event
== p
->ns
->event
) {
1326 void *v
= p
->cached_mount
;
1327 if (*pos
== p
->cached_index
)
1329 if (*pos
== p
->cached_index
+ 1) {
1330 v
= seq_list_next(v
, &p
->ns
->list
, &p
->cached_index
);
1331 return p
->cached_mount
= v
;
1335 p
->cached_event
= p
->ns
->event
;
1336 p
->cached_mount
= seq_list_start(&p
->ns
->list
, *pos
);
1337 p
->cached_index
= *pos
;
1338 return p
->cached_mount
;
1341 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1343 struct proc_mounts
*p
= m
->private;
1345 p
->cached_mount
= seq_list_next(v
, &p
->ns
->list
, pos
);
1346 p
->cached_index
= *pos
;
1347 return p
->cached_mount
;
1350 static void m_stop(struct seq_file
*m
, void *v
)
1352 up_read(&namespace_sem
);
1355 static int m_show(struct seq_file
*m
, void *v
)
1357 struct proc_mounts
*p
= m
->private;
1358 struct mount
*r
= list_entry(v
, struct mount
, mnt_list
);
1359 return p
->show(m
, &r
->mnt
);
1362 const struct seq_operations mounts_op
= {
1368 #endif /* CONFIG_PROC_FS */
1371 * may_umount_tree - check if a mount tree is busy
1372 * @mnt: root of mount tree
1374 * This is called to check if a tree of mounts has any
1375 * open files, pwds, chroots or sub mounts that are
1378 int may_umount_tree(struct vfsmount
*m
)
1380 struct mount
*mnt
= real_mount(m
);
1381 int actual_refs
= 0;
1382 int minimum_refs
= 0;
1386 /* write lock needed for mnt_get_count */
1388 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1389 actual_refs
+= mnt_get_count(p
);
1392 unlock_mount_hash();
1394 if (actual_refs
> minimum_refs
)
1400 EXPORT_SYMBOL(may_umount_tree
);
1403 * may_umount - check if a mount point is busy
1404 * @mnt: root of mount
1406 * This is called to check if a mount point has any
1407 * open files, pwds, chroots or sub mounts. If the
1408 * mount has sub mounts this will return busy
1409 * regardless of whether the sub mounts are busy.
1411 * Doesn't take quota and stuff into account. IOW, in some cases it will
1412 * give false negatives. The main reason why it's here is that we need
1413 * a non-destructive way to look for easily umountable filesystems.
1415 int may_umount(struct vfsmount
*mnt
)
1418 down_read(&namespace_sem
);
1420 if (propagate_mount_busy(real_mount(mnt
), 2))
1422 unlock_mount_hash();
1423 up_read(&namespace_sem
);
1427 EXPORT_SYMBOL(may_umount
);
1429 static HLIST_HEAD(unmounted
); /* protected by namespace_sem */
1431 static void namespace_unlock(void)
1433 struct hlist_head head
;
1435 hlist_move_list(&unmounted
, &head
);
1437 up_write(&namespace_sem
);
1439 if (likely(hlist_empty(&head
)))
1444 group_pin_kill(&head
);
1447 static inline void namespace_lock(void)
1449 down_write(&namespace_sem
);
1452 enum umount_tree_flags
{
1454 UMOUNT_PROPAGATE
= 2,
1455 UMOUNT_CONNECTED
= 4,
1458 static bool disconnect_mount(struct mount
*mnt
, enum umount_tree_flags how
)
1460 /* Leaving mounts connected is only valid for lazy umounts */
1461 if (how
& UMOUNT_SYNC
)
1464 /* A mount without a parent has nothing to be connected to */
1465 if (!mnt_has_parent(mnt
))
1468 /* Because the reference counting rules change when mounts are
1469 * unmounted and connected, umounted mounts may not be
1470 * connected to mounted mounts.
1472 if (!(mnt
->mnt_parent
->mnt
.mnt_flags
& MNT_UMOUNT
))
1475 /* Has it been requested that the mount remain connected? */
1476 if (how
& UMOUNT_CONNECTED
)
1479 /* Is the mount locked such that it needs to remain connected? */
1480 if (IS_MNT_LOCKED(mnt
))
1483 /* By default disconnect the mount */
1488 * mount_lock must be held
1489 * namespace_sem must be held for write
1491 static void umount_tree(struct mount
*mnt
, enum umount_tree_flags how
)
1493 LIST_HEAD(tmp_list
);
1496 if (how
& UMOUNT_PROPAGATE
)
1497 propagate_mount_unlock(mnt
);
1499 /* Gather the mounts to umount */
1500 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1501 p
->mnt
.mnt_flags
|= MNT_UMOUNT
;
1502 list_move(&p
->mnt_list
, &tmp_list
);
1505 /* Hide the mounts from mnt_mounts */
1506 list_for_each_entry(p
, &tmp_list
, mnt_list
) {
1507 list_del_init(&p
->mnt_child
);
1510 /* Add propogated mounts to the tmp_list */
1511 if (how
& UMOUNT_PROPAGATE
)
1512 propagate_umount(&tmp_list
);
1514 while (!list_empty(&tmp_list
)) {
1515 struct mnt_namespace
*ns
;
1517 p
= list_first_entry(&tmp_list
, struct mount
, mnt_list
);
1518 list_del_init(&p
->mnt_expire
);
1519 list_del_init(&p
->mnt_list
);
1523 __touch_mnt_namespace(ns
);
1526 if (how
& UMOUNT_SYNC
)
1527 p
->mnt
.mnt_flags
|= MNT_SYNC_UMOUNT
;
1529 disconnect
= disconnect_mount(p
, how
);
1531 pin_insert_group(&p
->mnt_umount
, &p
->mnt_parent
->mnt
,
1532 disconnect
? &unmounted
: NULL
);
1533 if (mnt_has_parent(p
)) {
1534 mnt_add_count(p
->mnt_parent
, -1);
1536 /* Don't forget about p */
1537 list_add_tail(&p
->mnt_child
, &p
->mnt_parent
->mnt_mounts
);
1542 change_mnt_propagation(p
, MS_PRIVATE
);
1546 static void shrink_submounts(struct mount
*mnt
);
1548 static int do_umount(struct mount
*mnt
, int flags
)
1550 struct super_block
*sb
= mnt
->mnt
.mnt_sb
;
1553 retval
= security_sb_umount(&mnt
->mnt
, flags
);
1558 * Allow userspace to request a mountpoint be expired rather than
1559 * unmounting unconditionally. Unmount only happens if:
1560 * (1) the mark is already set (the mark is cleared by mntput())
1561 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1563 if (flags
& MNT_EXPIRE
) {
1564 if (&mnt
->mnt
== current
->fs
->root
.mnt
||
1565 flags
& (MNT_FORCE
| MNT_DETACH
))
1569 * probably don't strictly need the lock here if we examined
1570 * all race cases, but it's a slowpath.
1573 if (mnt_get_count(mnt
) != 2) {
1574 unlock_mount_hash();
1577 unlock_mount_hash();
1579 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1584 * If we may have to abort operations to get out of this
1585 * mount, and they will themselves hold resources we must
1586 * allow the fs to do things. In the Unix tradition of
1587 * 'Gee thats tricky lets do it in userspace' the umount_begin
1588 * might fail to complete on the first run through as other tasks
1589 * must return, and the like. Thats for the mount program to worry
1590 * about for the moment.
1593 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1594 sb
->s_op
->umount_begin(sb
);
1598 * No sense to grab the lock for this test, but test itself looks
1599 * somewhat bogus. Suggestions for better replacement?
1600 * Ho-hum... In principle, we might treat that as umount + switch
1601 * to rootfs. GC would eventually take care of the old vfsmount.
1602 * Actually it makes sense, especially if rootfs would contain a
1603 * /reboot - static binary that would close all descriptors and
1604 * call reboot(9). Then init(8) could umount root and exec /reboot.
1606 if (&mnt
->mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1608 * Special case for "unmounting" root ...
1609 * we just try to remount it readonly.
1611 if (!capable(CAP_SYS_ADMIN
))
1613 down_write(&sb
->s_umount
);
1615 retval
= do_remount_sb(sb
, SB_RDONLY
, NULL
, 0);
1616 up_write(&sb
->s_umount
);
1624 if (flags
& MNT_DETACH
) {
1625 if (!list_empty(&mnt
->mnt_list
))
1626 umount_tree(mnt
, UMOUNT_PROPAGATE
);
1629 shrink_submounts(mnt
);
1631 if (!propagate_mount_busy(mnt
, 2)) {
1632 if (!list_empty(&mnt
->mnt_list
))
1633 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
1637 unlock_mount_hash();
1643 * __detach_mounts - lazily unmount all mounts on the specified dentry
1645 * During unlink, rmdir, and d_drop it is possible to loose the path
1646 * to an existing mountpoint, and wind up leaking the mount.
1647 * detach_mounts allows lazily unmounting those mounts instead of
1650 * The caller may hold dentry->d_inode->i_mutex.
1652 void __detach_mounts(struct dentry
*dentry
)
1654 struct mountpoint
*mp
;
1659 mp
= lookup_mountpoint(dentry
);
1660 if (IS_ERR_OR_NULL(mp
))
1664 while (!hlist_empty(&mp
->m_list
)) {
1665 mnt
= hlist_entry(mp
->m_list
.first
, struct mount
, mnt_mp_list
);
1666 if (mnt
->mnt
.mnt_flags
& MNT_UMOUNT
) {
1667 hlist_add_head(&mnt
->mnt_umount
.s_list
, &unmounted
);
1670 else umount_tree(mnt
, UMOUNT_CONNECTED
);
1674 unlock_mount_hash();
1679 * Is the caller allowed to modify his namespace?
1681 static inline bool may_mount(void)
1683 return ns_capable(current
->nsproxy
->mnt_ns
->user_ns
, CAP_SYS_ADMIN
);
1686 static inline bool may_mandlock(void)
1688 #ifndef CONFIG_MANDATORY_FILE_LOCKING
1691 return capable(CAP_SYS_ADMIN
);
1695 * Now umount can handle mount points as well as block devices.
1696 * This is important for filesystems which use unnamed block devices.
1698 * We now support a flag for forced unmount like the other 'big iron'
1699 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1702 SYSCALL_DEFINE2(umount
, char __user
*, name
, int, flags
)
1707 int lookup_flags
= 0;
1709 if (flags
& ~(MNT_FORCE
| MNT_DETACH
| MNT_EXPIRE
| UMOUNT_NOFOLLOW
))
1715 if (!(flags
& UMOUNT_NOFOLLOW
))
1716 lookup_flags
|= LOOKUP_FOLLOW
;
1718 retval
= user_path_mountpoint_at(AT_FDCWD
, name
, lookup_flags
, &path
);
1721 mnt
= real_mount(path
.mnt
);
1723 if (path
.dentry
!= path
.mnt
->mnt_root
)
1725 if (!check_mnt(mnt
))
1727 if (mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
1730 if (flags
& MNT_FORCE
&& !capable(CAP_SYS_ADMIN
))
1733 retval
= do_umount(mnt
, flags
);
1735 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1737 mntput_no_expire(mnt
);
1742 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1745 * The 2.0 compatible umount. No flags.
1747 SYSCALL_DEFINE1(oldumount
, char __user
*, name
)
1749 return sys_umount(name
, 0);
1754 static bool is_mnt_ns_file(struct dentry
*dentry
)
1756 /* Is this a proxy for a mount namespace? */
1757 return dentry
->d_op
== &ns_dentry_operations
&&
1758 dentry
->d_fsdata
== &mntns_operations
;
1761 struct mnt_namespace
*to_mnt_ns(struct ns_common
*ns
)
1763 return container_of(ns
, struct mnt_namespace
, ns
);
1766 static bool mnt_ns_loop(struct dentry
*dentry
)
1768 /* Could bind mounting the mount namespace inode cause a
1769 * mount namespace loop?
1771 struct mnt_namespace
*mnt_ns
;
1772 if (!is_mnt_ns_file(dentry
))
1775 mnt_ns
= to_mnt_ns(get_proc_ns(dentry
->d_inode
));
1776 return current
->nsproxy
->mnt_ns
->seq
>= mnt_ns
->seq
;
1779 struct mount
*copy_tree(struct mount
*mnt
, struct dentry
*dentry
,
1782 struct mount
*res
, *p
, *q
, *r
, *parent
;
1784 if (!(flag
& CL_COPY_UNBINDABLE
) && IS_MNT_UNBINDABLE(mnt
))
1785 return ERR_PTR(-EINVAL
);
1787 if (!(flag
& CL_COPY_MNT_NS_FILE
) && is_mnt_ns_file(dentry
))
1788 return ERR_PTR(-EINVAL
);
1790 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1794 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1797 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1799 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1802 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1803 if (!(flag
& CL_COPY_UNBINDABLE
) &&
1804 IS_MNT_UNBINDABLE(s
)) {
1805 s
= skip_mnt_tree(s
);
1808 if (!(flag
& CL_COPY_MNT_NS_FILE
) &&
1809 is_mnt_ns_file(s
->mnt
.mnt_root
)) {
1810 s
= skip_mnt_tree(s
);
1813 while (p
!= s
->mnt_parent
) {
1819 q
= clone_mnt(p
, p
->mnt
.mnt_root
, flag
);
1823 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1824 attach_mnt(q
, parent
, p
->mnt_mp
);
1825 unlock_mount_hash();
1832 umount_tree(res
, UMOUNT_SYNC
);
1833 unlock_mount_hash();
1838 /* Caller should check returned pointer for errors */
1840 struct vfsmount
*collect_mounts(const struct path
*path
)
1844 if (!check_mnt(real_mount(path
->mnt
)))
1845 tree
= ERR_PTR(-EINVAL
);
1847 tree
= copy_tree(real_mount(path
->mnt
), path
->dentry
,
1848 CL_COPY_ALL
| CL_PRIVATE
);
1851 return ERR_CAST(tree
);
1855 void drop_collected_mounts(struct vfsmount
*mnt
)
1859 umount_tree(real_mount(mnt
), UMOUNT_SYNC
);
1860 unlock_mount_hash();
1865 * clone_private_mount - create a private clone of a path
1867 * This creates a new vfsmount, which will be the clone of @path. The new will
1868 * not be attached anywhere in the namespace and will be private (i.e. changes
1869 * to the originating mount won't be propagated into this).
1871 * Release with mntput().
1873 struct vfsmount
*clone_private_mount(const struct path
*path
)
1875 struct mount
*old_mnt
= real_mount(path
->mnt
);
1876 struct mount
*new_mnt
;
1878 if (IS_MNT_UNBINDABLE(old_mnt
))
1879 return ERR_PTR(-EINVAL
);
1881 new_mnt
= clone_mnt(old_mnt
, path
->dentry
, CL_PRIVATE
);
1882 if (IS_ERR(new_mnt
))
1883 return ERR_CAST(new_mnt
);
1885 return &new_mnt
->mnt
;
1887 EXPORT_SYMBOL_GPL(clone_private_mount
);
1889 int iterate_mounts(int (*f
)(struct vfsmount
*, void *), void *arg
,
1890 struct vfsmount
*root
)
1893 int res
= f(root
, arg
);
1896 list_for_each_entry(mnt
, &real_mount(root
)->mnt_list
, mnt_list
) {
1897 res
= f(&mnt
->mnt
, arg
);
1904 static void cleanup_group_ids(struct mount
*mnt
, struct mount
*end
)
1908 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1909 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1910 mnt_release_group_id(p
);
1914 static int invent_group_ids(struct mount
*mnt
, bool recurse
)
1918 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1919 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1920 int err
= mnt_alloc_group_id(p
);
1922 cleanup_group_ids(mnt
, p
);
1931 int count_mounts(struct mnt_namespace
*ns
, struct mount
*mnt
)
1933 unsigned int max
= READ_ONCE(sysctl_mount_max
);
1934 unsigned int mounts
= 0, old
, pending
, sum
;
1937 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
1941 pending
= ns
->pending_mounts
;
1942 sum
= old
+ pending
;
1946 (mounts
> (max
- sum
)))
1949 ns
->pending_mounts
= pending
+ mounts
;
1954 * @source_mnt : mount tree to be attached
1955 * @nd : place the mount tree @source_mnt is attached
1956 * @parent_nd : if non-null, detach the source_mnt from its parent and
1957 * store the parent mount and mountpoint dentry.
1958 * (done when source_mnt is moved)
1960 * NOTE: in the table below explains the semantics when a source mount
1961 * of a given type is attached to a destination mount of a given type.
1962 * ---------------------------------------------------------------------------
1963 * | BIND MOUNT OPERATION |
1964 * |**************************************************************************
1965 * | source-->| shared | private | slave | unbindable |
1969 * |**************************************************************************
1970 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1972 * |non-shared| shared (+) | private | slave (*) | invalid |
1973 * ***************************************************************************
1974 * A bind operation clones the source mount and mounts the clone on the
1975 * destination mount.
1977 * (++) the cloned mount is propagated to all the mounts in the propagation
1978 * tree of the destination mount and the cloned mount is added to
1979 * the peer group of the source mount.
1980 * (+) the cloned mount is created under the destination mount and is marked
1981 * as shared. The cloned mount is added to the peer group of the source
1983 * (+++) the mount is propagated to all the mounts in the propagation tree
1984 * of the destination mount and the cloned mount is made slave
1985 * of the same master as that of the source mount. The cloned mount
1986 * is marked as 'shared and slave'.
1987 * (*) the cloned mount is made a slave of the same master as that of the
1990 * ---------------------------------------------------------------------------
1991 * | MOVE MOUNT OPERATION |
1992 * |**************************************************************************
1993 * | source-->| shared | private | slave | unbindable |
1997 * |**************************************************************************
1998 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
2000 * |non-shared| shared (+*) | private | slave (*) | unbindable |
2001 * ***************************************************************************
2003 * (+) the mount is moved to the destination. And is then propagated to
2004 * all the mounts in the propagation tree of the destination mount.
2005 * (+*) the mount is moved to the destination.
2006 * (+++) the mount is moved to the destination and is then propagated to
2007 * all the mounts belonging to the destination mount's propagation tree.
2008 * the mount is marked as 'shared and slave'.
2009 * (*) the mount continues to be a slave at the new location.
2011 * if the source mount is a tree, the operations explained above is
2012 * applied to each mount in the tree.
2013 * Must be called without spinlocks held, since this function can sleep
2016 static int attach_recursive_mnt(struct mount
*source_mnt
,
2017 struct mount
*dest_mnt
,
2018 struct mountpoint
*dest_mp
,
2019 struct path
*parent_path
)
2021 HLIST_HEAD(tree_list
);
2022 struct mnt_namespace
*ns
= dest_mnt
->mnt_ns
;
2023 struct mountpoint
*smp
;
2024 struct mount
*child
, *p
;
2025 struct hlist_node
*n
;
2028 /* Preallocate a mountpoint in case the new mounts need
2029 * to be tucked under other mounts.
2031 smp
= get_mountpoint(source_mnt
->mnt
.mnt_root
);
2033 return PTR_ERR(smp
);
2035 /* Is there space to add these mounts to the mount namespace? */
2037 err
= count_mounts(ns
, source_mnt
);
2042 if (IS_MNT_SHARED(dest_mnt
)) {
2043 err
= invent_group_ids(source_mnt
, true);
2046 err
= propagate_mnt(dest_mnt
, dest_mp
, source_mnt
, &tree_list
);
2049 goto out_cleanup_ids
;
2050 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
2056 detach_mnt(source_mnt
, parent_path
);
2057 attach_mnt(source_mnt
, dest_mnt
, dest_mp
);
2058 touch_mnt_namespace(source_mnt
->mnt_ns
);
2060 mnt_set_mountpoint(dest_mnt
, dest_mp
, source_mnt
);
2061 commit_tree(source_mnt
);
2064 hlist_for_each_entry_safe(child
, n
, &tree_list
, mnt_hash
) {
2066 hlist_del_init(&child
->mnt_hash
);
2067 q
= __lookup_mnt(&child
->mnt_parent
->mnt
,
2068 child
->mnt_mountpoint
);
2070 mnt_change_mountpoint(child
, smp
, q
);
2073 put_mountpoint(smp
);
2074 unlock_mount_hash();
2079 while (!hlist_empty(&tree_list
)) {
2080 child
= hlist_entry(tree_list
.first
, struct mount
, mnt_hash
);
2081 child
->mnt_parent
->mnt_ns
->pending_mounts
= 0;
2082 umount_tree(child
, UMOUNT_SYNC
);
2084 unlock_mount_hash();
2085 cleanup_group_ids(source_mnt
, NULL
);
2087 ns
->pending_mounts
= 0;
2089 read_seqlock_excl(&mount_lock
);
2090 put_mountpoint(smp
);
2091 read_sequnlock_excl(&mount_lock
);
2096 static struct mountpoint
*lock_mount(struct path
*path
)
2098 struct vfsmount
*mnt
;
2099 struct dentry
*dentry
= path
->dentry
;
2101 inode_lock(dentry
->d_inode
);
2102 if (unlikely(cant_mount(dentry
))) {
2103 inode_unlock(dentry
->d_inode
);
2104 return ERR_PTR(-ENOENT
);
2107 mnt
= lookup_mnt(path
);
2109 struct mountpoint
*mp
= get_mountpoint(dentry
);
2112 inode_unlock(dentry
->d_inode
);
2118 inode_unlock(path
->dentry
->d_inode
);
2121 dentry
= path
->dentry
= dget(mnt
->mnt_root
);
2125 static void unlock_mount(struct mountpoint
*where
)
2127 struct dentry
*dentry
= where
->m_dentry
;
2129 read_seqlock_excl(&mount_lock
);
2130 put_mountpoint(where
);
2131 read_sequnlock_excl(&mount_lock
);
2134 inode_unlock(dentry
->d_inode
);
2137 static int graft_tree(struct mount
*mnt
, struct mount
*p
, struct mountpoint
*mp
)
2139 if (mnt
->mnt
.mnt_sb
->s_flags
& SB_NOUSER
)
2142 if (d_is_dir(mp
->m_dentry
) !=
2143 d_is_dir(mnt
->mnt
.mnt_root
))
2146 return attach_recursive_mnt(mnt
, p
, mp
, NULL
);
2150 * Sanity check the flags to change_mnt_propagation.
2153 static int flags_to_propagation_type(int ms_flags
)
2155 int type
= ms_flags
& ~(MS_REC
| MS_SILENT
);
2157 /* Fail if any non-propagation flags are set */
2158 if (type
& ~(MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2160 /* Only one propagation flag should be set */
2161 if (!is_power_of_2(type
))
2167 * recursively change the type of the mountpoint.
2169 static int do_change_type(struct path
*path
, int ms_flags
)
2172 struct mount
*mnt
= real_mount(path
->mnt
);
2173 int recurse
= ms_flags
& MS_REC
;
2177 if (path
->dentry
!= path
->mnt
->mnt_root
)
2180 type
= flags_to_propagation_type(ms_flags
);
2185 if (type
== MS_SHARED
) {
2186 err
= invent_group_ids(mnt
, recurse
);
2192 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
2193 change_mnt_propagation(m
, type
);
2194 unlock_mount_hash();
2201 static bool has_locked_children(struct mount
*mnt
, struct dentry
*dentry
)
2203 struct mount
*child
;
2204 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
2205 if (!is_subdir(child
->mnt_mountpoint
, dentry
))
2208 if (child
->mnt
.mnt_flags
& MNT_LOCKED
)
2215 * do loopback mount.
2217 static int do_loopback(struct path
*path
, const char *old_name
,
2220 struct path old_path
;
2221 struct mount
*mnt
= NULL
, *old
, *parent
;
2222 struct mountpoint
*mp
;
2224 if (!old_name
|| !*old_name
)
2226 err
= kern_path(old_name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &old_path
);
2231 if (mnt_ns_loop(old_path
.dentry
))
2234 mp
= lock_mount(path
);
2239 old
= real_mount(old_path
.mnt
);
2240 parent
= real_mount(path
->mnt
);
2243 if (IS_MNT_UNBINDABLE(old
))
2246 if (!check_mnt(parent
))
2249 if (!check_mnt(old
) && old_path
.dentry
->d_op
!= &ns_dentry_operations
)
2252 if (!recurse
&& has_locked_children(old
, old_path
.dentry
))
2256 mnt
= copy_tree(old
, old_path
.dentry
, CL_COPY_MNT_NS_FILE
);
2258 mnt
= clone_mnt(old
, old_path
.dentry
, 0);
2265 mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
2267 err
= graft_tree(mnt
, parent
, mp
);
2270 umount_tree(mnt
, UMOUNT_SYNC
);
2271 unlock_mount_hash();
2276 path_put(&old_path
);
2280 static int change_mount_flags(struct vfsmount
*mnt
, int ms_flags
)
2283 int readonly_request
= 0;
2285 if (ms_flags
& MS_RDONLY
)
2286 readonly_request
= 1;
2287 if (readonly_request
== __mnt_is_readonly(mnt
))
2290 if (readonly_request
)
2291 error
= mnt_make_readonly(real_mount(mnt
));
2293 __mnt_unmake_readonly(real_mount(mnt
));
2298 * change filesystem flags. dir should be a physical root of filesystem.
2299 * If you've mounted a non-root directory somewhere and want to do remount
2300 * on it - tough luck.
2302 static int do_remount(struct path
*path
, int ms_flags
, int sb_flags
,
2303 int mnt_flags
, void *data
)
2306 struct super_block
*sb
= path
->mnt
->mnt_sb
;
2307 struct mount
*mnt
= real_mount(path
->mnt
);
2309 if (!check_mnt(mnt
))
2312 if (path
->dentry
!= path
->mnt
->mnt_root
)
2315 /* Don't allow changing of locked mnt flags.
2317 * No locks need to be held here while testing the various
2318 * MNT_LOCK flags because those flags can never be cleared
2319 * once they are set.
2321 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_READONLY
) &&
2322 !(mnt_flags
& MNT_READONLY
)) {
2325 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NODEV
) &&
2326 !(mnt_flags
& MNT_NODEV
)) {
2329 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOSUID
) &&
2330 !(mnt_flags
& MNT_NOSUID
)) {
2333 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_NOEXEC
) &&
2334 !(mnt_flags
& MNT_NOEXEC
)) {
2337 if ((mnt
->mnt
.mnt_flags
& MNT_LOCK_ATIME
) &&
2338 ((mnt
->mnt
.mnt_flags
& MNT_ATIME_MASK
) != (mnt_flags
& MNT_ATIME_MASK
))) {
2342 err
= security_sb_remount(sb
, data
);
2346 down_write(&sb
->s_umount
);
2347 if (ms_flags
& MS_BIND
)
2348 err
= change_mount_flags(path
->mnt
, ms_flags
);
2349 else if (!capable(CAP_SYS_ADMIN
))
2352 err
= do_remount_sb2(path
->mnt
, sb
, sb_flags
, data
, 0);
2355 propagate_remount(mnt
);
2356 unlock_mount_hash();
2361 mnt_flags
|= mnt
->mnt
.mnt_flags
& ~MNT_USER_SETTABLE_MASK
;
2362 mnt
->mnt
.mnt_flags
= mnt_flags
;
2363 touch_mnt_namespace(mnt
->mnt_ns
);
2364 unlock_mount_hash();
2366 up_write(&sb
->s_umount
);
2370 static inline int tree_contains_unbindable(struct mount
*mnt
)
2373 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
2374 if (IS_MNT_UNBINDABLE(p
))
2380 static int do_move_mount(struct path
*path
, const char *old_name
)
2382 struct path old_path
, parent_path
;
2385 struct mountpoint
*mp
;
2387 if (!old_name
|| !*old_name
)
2389 err
= kern_path(old_name
, LOOKUP_FOLLOW
, &old_path
);
2393 mp
= lock_mount(path
);
2398 old
= real_mount(old_path
.mnt
);
2399 p
= real_mount(path
->mnt
);
2402 if (!check_mnt(p
) || !check_mnt(old
))
2405 if (old
->mnt
.mnt_flags
& MNT_LOCKED
)
2409 if (old_path
.dentry
!= old_path
.mnt
->mnt_root
)
2412 if (!mnt_has_parent(old
))
2415 if (d_is_dir(path
->dentry
) !=
2416 d_is_dir(old_path
.dentry
))
2419 * Don't move a mount residing in a shared parent.
2421 if (IS_MNT_SHARED(old
->mnt_parent
))
2424 * Don't move a mount tree containing unbindable mounts to a destination
2425 * mount which is shared.
2427 if (IS_MNT_SHARED(p
) && tree_contains_unbindable(old
))
2430 for (; mnt_has_parent(p
); p
= p
->mnt_parent
)
2434 err
= attach_recursive_mnt(old
, real_mount(path
->mnt
), mp
, &parent_path
);
2438 /* if the mount is moved, it should no longer be expire
2440 list_del_init(&old
->mnt_expire
);
2445 path_put(&parent_path
);
2446 path_put(&old_path
);
2450 static struct vfsmount
*fs_set_subtype(struct vfsmount
*mnt
, const char *fstype
)
2453 const char *subtype
= strchr(fstype
, '.');
2462 mnt
->mnt_sb
->s_subtype
= kstrdup(subtype
, GFP_KERNEL
);
2464 if (!mnt
->mnt_sb
->s_subtype
)
2470 return ERR_PTR(err
);
2474 * add a mount into a namespace's mount tree
2476 static int do_add_mount(struct mount
*newmnt
, struct path
*path
, int mnt_flags
)
2478 struct mountpoint
*mp
;
2479 struct mount
*parent
;
2482 mnt_flags
&= ~MNT_INTERNAL_FLAGS
;
2484 mp
= lock_mount(path
);
2488 parent
= real_mount(path
->mnt
);
2490 if (unlikely(!check_mnt(parent
))) {
2491 /* that's acceptable only for automounts done in private ns */
2492 if (!(mnt_flags
& MNT_SHRINKABLE
))
2494 /* ... and for those we'd better have mountpoint still alive */
2495 if (!parent
->mnt_ns
)
2499 /* Refuse the same filesystem on the same mount point */
2501 if (path
->mnt
->mnt_sb
== newmnt
->mnt
.mnt_sb
&&
2502 path
->mnt
->mnt_root
== path
->dentry
)
2506 if (d_is_symlink(newmnt
->mnt
.mnt_root
))
2509 newmnt
->mnt
.mnt_flags
= mnt_flags
;
2510 err
= graft_tree(newmnt
, parent
, mp
);
2517 static bool mount_too_revealing(struct vfsmount
*mnt
, int *new_mnt_flags
);
2520 * create a new mount for userspace and request it to be added into the
2523 static int do_new_mount(struct path
*path
, const char *fstype
, int sb_flags
,
2524 int mnt_flags
, const char *name
, void *data
)
2526 struct file_system_type
*type
;
2527 struct vfsmount
*mnt
;
2533 type
= get_fs_type(fstype
);
2537 mnt
= vfs_kern_mount(type
, sb_flags
, name
, data
);
2538 if (!IS_ERR(mnt
) && (type
->fs_flags
& FS_HAS_SUBTYPE
) &&
2539 !mnt
->mnt_sb
->s_subtype
)
2540 mnt
= fs_set_subtype(mnt
, fstype
);
2542 put_filesystem(type
);
2544 return PTR_ERR(mnt
);
2546 if (mount_too_revealing(mnt
, &mnt_flags
)) {
2551 err
= do_add_mount(real_mount(mnt
), path
, mnt_flags
);
2557 int finish_automount(struct vfsmount
*m
, struct path
*path
)
2559 struct mount
*mnt
= real_mount(m
);
2561 /* The new mount record should have at least 2 refs to prevent it being
2562 * expired before we get a chance to add it
2564 BUG_ON(mnt_get_count(mnt
) < 2);
2566 if (m
->mnt_sb
== path
->mnt
->mnt_sb
&&
2567 m
->mnt_root
== path
->dentry
) {
2572 err
= do_add_mount(mnt
, path
, path
->mnt
->mnt_flags
| MNT_SHRINKABLE
);
2576 /* remove m from any expiration list it may be on */
2577 if (!list_empty(&mnt
->mnt_expire
)) {
2579 list_del_init(&mnt
->mnt_expire
);
2588 * mnt_set_expiry - Put a mount on an expiration list
2589 * @mnt: The mount to list.
2590 * @expiry_list: The list to add the mount to.
2592 void mnt_set_expiry(struct vfsmount
*mnt
, struct list_head
*expiry_list
)
2596 list_add_tail(&real_mount(mnt
)->mnt_expire
, expiry_list
);
2600 EXPORT_SYMBOL(mnt_set_expiry
);
2603 * process a list of expirable mountpoints with the intent of discarding any
2604 * mountpoints that aren't in use and haven't been touched since last we came
2607 void mark_mounts_for_expiry(struct list_head
*mounts
)
2609 struct mount
*mnt
, *next
;
2610 LIST_HEAD(graveyard
);
2612 if (list_empty(mounts
))
2618 /* extract from the expiration list every vfsmount that matches the
2619 * following criteria:
2620 * - only referenced by its parent vfsmount
2621 * - still marked for expiry (marked on the last call here; marks are
2622 * cleared by mntput())
2624 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
2625 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
2626 propagate_mount_busy(mnt
, 1))
2628 list_move(&mnt
->mnt_expire
, &graveyard
);
2630 while (!list_empty(&graveyard
)) {
2631 mnt
= list_first_entry(&graveyard
, struct mount
, mnt_expire
);
2632 touch_mnt_namespace(mnt
->mnt_ns
);
2633 umount_tree(mnt
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
2635 unlock_mount_hash();
2639 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
2642 * Ripoff of 'select_parent()'
2644 * search the list of submounts for a given mountpoint, and move any
2645 * shrinkable submounts to the 'graveyard' list.
2647 static int select_submounts(struct mount
*parent
, struct list_head
*graveyard
)
2649 struct mount
*this_parent
= parent
;
2650 struct list_head
*next
;
2654 next
= this_parent
->mnt_mounts
.next
;
2656 while (next
!= &this_parent
->mnt_mounts
) {
2657 struct list_head
*tmp
= next
;
2658 struct mount
*mnt
= list_entry(tmp
, struct mount
, mnt_child
);
2661 if (!(mnt
->mnt
.mnt_flags
& MNT_SHRINKABLE
))
2664 * Descend a level if the d_mounts list is non-empty.
2666 if (!list_empty(&mnt
->mnt_mounts
)) {
2671 if (!propagate_mount_busy(mnt
, 1)) {
2672 list_move_tail(&mnt
->mnt_expire
, graveyard
);
2677 * All done at this level ... ascend and resume the search
2679 if (this_parent
!= parent
) {
2680 next
= this_parent
->mnt_child
.next
;
2681 this_parent
= this_parent
->mnt_parent
;
2688 * process a list of expirable mountpoints with the intent of discarding any
2689 * submounts of a specific parent mountpoint
2691 * mount_lock must be held for write
2693 static void shrink_submounts(struct mount
*mnt
)
2695 LIST_HEAD(graveyard
);
2698 /* extract submounts of 'mountpoint' from the expiration list */
2699 while (select_submounts(mnt
, &graveyard
)) {
2700 while (!list_empty(&graveyard
)) {
2701 m
= list_first_entry(&graveyard
, struct mount
,
2703 touch_mnt_namespace(m
->mnt_ns
);
2704 umount_tree(m
, UMOUNT_PROPAGATE
|UMOUNT_SYNC
);
2710 * Some copy_from_user() implementations do not return the exact number of
2711 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
2712 * Note that this function differs from copy_from_user() in that it will oops
2713 * on bad values of `to', rather than returning a short copy.
2715 static long exact_copy_from_user(void *to
, const void __user
* from
,
2719 const char __user
*f
= from
;
2722 if (!access_ok(VERIFY_READ
, from
, n
))
2726 if (__get_user(c
, f
)) {
2737 void *copy_mount_options(const void __user
* data
)
2746 copy
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
2748 return ERR_PTR(-ENOMEM
);
2750 /* We only care that *some* data at the address the user
2751 * gave us is valid. Just in case, we'll zero
2752 * the remainder of the page.
2754 /* copy_from_user cannot cross TASK_SIZE ! */
2755 size
= TASK_SIZE
- (unsigned long)data
;
2756 if (size
> PAGE_SIZE
)
2759 i
= size
- exact_copy_from_user(copy
, data
, size
);
2762 return ERR_PTR(-EFAULT
);
2765 memset(copy
+ i
, 0, PAGE_SIZE
- i
);
2769 char *copy_mount_string(const void __user
*data
)
2771 return data
? strndup_user(data
, PAGE_SIZE
) : NULL
;
2775 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
2776 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
2778 * data is a (void *) that can point to any structure up to
2779 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
2780 * information (or be NULL).
2782 * Pre-0.97 versions of mount() didn't have a flags word.
2783 * When the flags word was introduced its top half was required
2784 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
2785 * Therefore, if this magic number is present, it carries no information
2786 * and must be discarded.
2788 long do_mount(const char *dev_name
, const char __user
*dir_name
,
2789 const char *type_page
, unsigned long flags
, void *data_page
)
2792 unsigned int mnt_flags
= 0, sb_flags
;
2796 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
2797 flags
&= ~MS_MGC_MSK
;
2799 /* Basic sanity checks */
2801 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
2803 if (flags
& MS_NOUSER
)
2806 /* ... and get the mountpoint */
2807 retval
= user_path(dir_name
, &path
);
2811 retval
= security_sb_mount(dev_name
, &path
,
2812 type_page
, flags
, data_page
);
2813 if (!retval
&& !may_mount())
2815 if (!retval
&& (flags
& SB_MANDLOCK
) && !may_mandlock())
2820 /* Default to relatime unless overriden */
2821 if (!(flags
& MS_NOATIME
))
2822 mnt_flags
|= MNT_RELATIME
;
2824 /* Separate the per-mountpoint flags */
2825 if (flags
& MS_NOSUID
)
2826 mnt_flags
|= MNT_NOSUID
;
2827 if (flags
& MS_NODEV
)
2828 mnt_flags
|= MNT_NODEV
;
2829 if (flags
& MS_NOEXEC
)
2830 mnt_flags
|= MNT_NOEXEC
;
2831 if (flags
& MS_NOATIME
)
2832 mnt_flags
|= MNT_NOATIME
;
2833 if (flags
& MS_NODIRATIME
)
2834 mnt_flags
|= MNT_NODIRATIME
;
2835 if (flags
& MS_STRICTATIME
)
2836 mnt_flags
&= ~(MNT_RELATIME
| MNT_NOATIME
);
2837 if (flags
& SB_RDONLY
)
2838 mnt_flags
|= MNT_READONLY
;
2840 /* The default atime for remount is preservation */
2841 if ((flags
& MS_REMOUNT
) &&
2842 ((flags
& (MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
|
2843 MS_STRICTATIME
)) == 0)) {
2844 mnt_flags
&= ~MNT_ATIME_MASK
;
2845 mnt_flags
|= path
.mnt
->mnt_flags
& MNT_ATIME_MASK
;
2848 sb_flags
= flags
& (SB_RDONLY
|
2857 if (flags
& MS_REMOUNT
)
2858 retval
= do_remount(&path
, flags
, sb_flags
, mnt_flags
,
2860 else if (flags
& MS_BIND
)
2861 retval
= do_loopback(&path
, dev_name
, flags
& MS_REC
);
2862 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
2863 retval
= do_change_type(&path
, flags
);
2864 else if (flags
& MS_MOVE
)
2865 retval
= do_move_mount(&path
, dev_name
);
2867 retval
= do_new_mount(&path
, type_page
, sb_flags
, mnt_flags
,
2868 dev_name
, data_page
);
2874 static struct ucounts
*inc_mnt_namespaces(struct user_namespace
*ns
)
2876 return inc_ucount(ns
, current_euid(), UCOUNT_MNT_NAMESPACES
);
2879 static void dec_mnt_namespaces(struct ucounts
*ucounts
)
2881 dec_ucount(ucounts
, UCOUNT_MNT_NAMESPACES
);
2884 static void free_mnt_ns(struct mnt_namespace
*ns
)
2886 ns_free_inum(&ns
->ns
);
2887 dec_mnt_namespaces(ns
->ucounts
);
2888 put_user_ns(ns
->user_ns
);
2893 * Assign a sequence number so we can detect when we attempt to bind
2894 * mount a reference to an older mount namespace into the current
2895 * mount namespace, preventing reference counting loops. A 64bit
2896 * number incrementing at 10Ghz will take 12,427 years to wrap which
2897 * is effectively never, so we can ignore the possibility.
2899 static atomic64_t mnt_ns_seq
= ATOMIC64_INIT(1);
2901 static struct mnt_namespace
*alloc_mnt_ns(struct user_namespace
*user_ns
)
2903 struct mnt_namespace
*new_ns
;
2904 struct ucounts
*ucounts
;
2907 ucounts
= inc_mnt_namespaces(user_ns
);
2909 return ERR_PTR(-ENOSPC
);
2911 new_ns
= kmalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
2913 dec_mnt_namespaces(ucounts
);
2914 return ERR_PTR(-ENOMEM
);
2916 ret
= ns_alloc_inum(&new_ns
->ns
);
2919 dec_mnt_namespaces(ucounts
);
2920 return ERR_PTR(ret
);
2922 new_ns
->ns
.ops
= &mntns_operations
;
2923 new_ns
->seq
= atomic64_add_return(1, &mnt_ns_seq
);
2924 atomic_set(&new_ns
->count
, 1);
2925 new_ns
->root
= NULL
;
2926 INIT_LIST_HEAD(&new_ns
->list
);
2927 init_waitqueue_head(&new_ns
->poll
);
2929 new_ns
->user_ns
= get_user_ns(user_ns
);
2930 new_ns
->ucounts
= ucounts
;
2932 new_ns
->pending_mounts
= 0;
2937 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
2938 struct user_namespace
*user_ns
, struct fs_struct
*new_fs
)
2940 struct mnt_namespace
*new_ns
;
2941 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
;
2942 struct mount
*p
, *q
;
2949 if (likely(!(flags
& CLONE_NEWNS
))) {
2956 new_ns
= alloc_mnt_ns(user_ns
);
2961 /* First pass: copy the tree topology */
2962 copy_flags
= CL_COPY_UNBINDABLE
| CL_EXPIRE
;
2963 if (user_ns
!= ns
->user_ns
)
2964 copy_flags
|= CL_SHARED_TO_SLAVE
| CL_UNPRIVILEGED
;
2965 new = copy_tree(old
, old
->mnt
.mnt_root
, copy_flags
);
2968 free_mnt_ns(new_ns
);
2969 return ERR_CAST(new);
2972 list_add_tail(&new_ns
->list
, &new->mnt_list
);
2975 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2976 * as belonging to new namespace. We have already acquired a private
2977 * fs_struct, so tsk->fs->lock is not needed.
2985 if (&p
->mnt
== new_fs
->root
.mnt
) {
2986 new_fs
->root
.mnt
= mntget(&q
->mnt
);
2989 if (&p
->mnt
== new_fs
->pwd
.mnt
) {
2990 new_fs
->pwd
.mnt
= mntget(&q
->mnt
);
2994 p
= next_mnt(p
, old
);
2995 q
= next_mnt(q
, new);
2998 while (p
->mnt
.mnt_root
!= q
->mnt
.mnt_root
)
2999 p
= next_mnt(p
, old
);
3012 * create_mnt_ns - creates a private namespace and adds a root filesystem
3013 * @mnt: pointer to the new root filesystem mountpoint
3015 static struct mnt_namespace
*create_mnt_ns(struct vfsmount
*m
)
3017 struct mnt_namespace
*new_ns
= alloc_mnt_ns(&init_user_ns
);
3018 if (!IS_ERR(new_ns
)) {
3019 struct mount
*mnt
= real_mount(m
);
3020 mnt
->mnt_ns
= new_ns
;
3023 list_add(&mnt
->mnt_list
, &new_ns
->list
);
3030 struct dentry
*mount_subtree(struct vfsmount
*mnt
, const char *name
)
3032 struct mnt_namespace
*ns
;
3033 struct super_block
*s
;
3037 ns
= create_mnt_ns(mnt
);
3039 return ERR_CAST(ns
);
3041 err
= vfs_path_lookup(mnt
->mnt_root
, mnt
,
3042 name
, LOOKUP_FOLLOW
|LOOKUP_AUTOMOUNT
, &path
);
3047 return ERR_PTR(err
);
3049 /* trade a vfsmount reference for active sb one */
3050 s
= path
.mnt
->mnt_sb
;
3051 atomic_inc(&s
->s_active
);
3053 /* lock the sucker */
3054 down_write(&s
->s_umount
);
3055 /* ... and return the root of (sub)tree on it */
3058 EXPORT_SYMBOL(mount_subtree
);
3060 SYSCALL_DEFINE5(mount
, char __user
*, dev_name
, char __user
*, dir_name
,
3061 char __user
*, type
, unsigned long, flags
, void __user
*, data
)
3068 kernel_type
= copy_mount_string(type
);
3069 ret
= PTR_ERR(kernel_type
);
3070 if (IS_ERR(kernel_type
))
3073 kernel_dev
= copy_mount_string(dev_name
);
3074 ret
= PTR_ERR(kernel_dev
);
3075 if (IS_ERR(kernel_dev
))
3078 options
= copy_mount_options(data
);
3079 ret
= PTR_ERR(options
);
3080 if (IS_ERR(options
))
3083 ret
= do_mount(kernel_dev
, dir_name
, kernel_type
, flags
, options
);
3095 * Return true if path is reachable from root
3097 * namespace_sem or mount_lock is held
3099 bool is_path_reachable(struct mount
*mnt
, struct dentry
*dentry
,
3100 const struct path
*root
)
3102 while (&mnt
->mnt
!= root
->mnt
&& mnt_has_parent(mnt
)) {
3103 dentry
= mnt
->mnt_mountpoint
;
3104 mnt
= mnt
->mnt_parent
;
3106 return &mnt
->mnt
== root
->mnt
&& is_subdir(dentry
, root
->dentry
);
3109 bool path_is_under(const struct path
*path1
, const struct path
*path2
)
3112 read_seqlock_excl(&mount_lock
);
3113 res
= is_path_reachable(real_mount(path1
->mnt
), path1
->dentry
, path2
);
3114 read_sequnlock_excl(&mount_lock
);
3117 EXPORT_SYMBOL(path_is_under
);
3120 * pivot_root Semantics:
3121 * Moves the root file system of the current process to the directory put_old,
3122 * makes new_root as the new root file system of the current process, and sets
3123 * root/cwd of all processes which had them on the current root to new_root.
3126 * The new_root and put_old must be directories, and must not be on the
3127 * same file system as the current process root. The put_old must be
3128 * underneath new_root, i.e. adding a non-zero number of /.. to the string
3129 * pointed to by put_old must yield the same directory as new_root. No other
3130 * file system may be mounted on put_old. After all, new_root is a mountpoint.
3132 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
3133 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
3134 * in this situation.
3137 * - we don't move root/cwd if they are not at the root (reason: if something
3138 * cared enough to change them, it's probably wrong to force them elsewhere)
3139 * - it's okay to pick a root that isn't the root of a file system, e.g.
3140 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
3141 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
3144 SYSCALL_DEFINE2(pivot_root
, const char __user
*, new_root
,
3145 const char __user
*, put_old
)
3147 struct path
new, old
, parent_path
, root_parent
, root
;
3148 struct mount
*new_mnt
, *root_mnt
, *old_mnt
;
3149 struct mountpoint
*old_mp
, *root_mp
;
3155 error
= user_path_dir(new_root
, &new);
3159 error
= user_path_dir(put_old
, &old
);
3163 error
= security_sb_pivotroot(&old
, &new);
3167 get_fs_root(current
->fs
, &root
);
3168 old_mp
= lock_mount(&old
);
3169 error
= PTR_ERR(old_mp
);
3174 new_mnt
= real_mount(new.mnt
);
3175 root_mnt
= real_mount(root
.mnt
);
3176 old_mnt
= real_mount(old
.mnt
);
3177 if (IS_MNT_SHARED(old_mnt
) ||
3178 IS_MNT_SHARED(new_mnt
->mnt_parent
) ||
3179 IS_MNT_SHARED(root_mnt
->mnt_parent
))
3181 if (!check_mnt(root_mnt
) || !check_mnt(new_mnt
))
3183 if (new_mnt
->mnt
.mnt_flags
& MNT_LOCKED
)
3186 if (d_unlinked(new.dentry
))
3189 if (new_mnt
== root_mnt
|| old_mnt
== root_mnt
)
3190 goto out4
; /* loop, on the same file system */
3192 if (root
.mnt
->mnt_root
!= root
.dentry
)
3193 goto out4
; /* not a mountpoint */
3194 if (!mnt_has_parent(root_mnt
))
3195 goto out4
; /* not attached */
3196 root_mp
= root_mnt
->mnt_mp
;
3197 if (new.mnt
->mnt_root
!= new.dentry
)
3198 goto out4
; /* not a mountpoint */
3199 if (!mnt_has_parent(new_mnt
))
3200 goto out4
; /* not attached */
3201 /* make sure we can reach put_old from new_root */
3202 if (!is_path_reachable(old_mnt
, old
.dentry
, &new))
3204 /* make certain new is below the root */
3205 if (!is_path_reachable(new_mnt
, new.dentry
, &root
))
3207 root_mp
->m_count
++; /* pin it so it won't go away */
3209 detach_mnt(new_mnt
, &parent_path
);
3210 detach_mnt(root_mnt
, &root_parent
);
3211 if (root_mnt
->mnt
.mnt_flags
& MNT_LOCKED
) {
3212 new_mnt
->mnt
.mnt_flags
|= MNT_LOCKED
;
3213 root_mnt
->mnt
.mnt_flags
&= ~MNT_LOCKED
;
3215 /* mount old root on put_old */
3216 attach_mnt(root_mnt
, old_mnt
, old_mp
);
3217 /* mount new_root on / */
3218 attach_mnt(new_mnt
, real_mount(root_parent
.mnt
), root_mp
);
3219 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
3220 /* A moved mount should not expire automatically */
3221 list_del_init(&new_mnt
->mnt_expire
);
3222 put_mountpoint(root_mp
);
3223 unlock_mount_hash();
3224 chroot_fs_refs(&root
, &new);
3227 unlock_mount(old_mp
);
3229 path_put(&root_parent
);
3230 path_put(&parent_path
);
3242 static void __init
init_mount_tree(void)
3244 struct vfsmount
*mnt
;
3245 struct mnt_namespace
*ns
;
3247 struct file_system_type
*type
;
3249 type
= get_fs_type("rootfs");
3251 panic("Can't find rootfs type");
3252 mnt
= vfs_kern_mount(type
, 0, "rootfs", NULL
);
3253 put_filesystem(type
);
3255 panic("Can't create rootfs");
3257 ns
= create_mnt_ns(mnt
);
3259 panic("Can't allocate initial namespace");
3261 init_task
.nsproxy
->mnt_ns
= ns
;
3265 root
.dentry
= mnt
->mnt_root
;
3266 mnt
->mnt_flags
|= MNT_LOCKED
;
3268 set_fs_pwd(current
->fs
, &root
);
3269 set_fs_root(current
->fs
, &root
);
3272 void __init
mnt_init(void)
3276 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct mount
),
3277 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
3279 mount_hashtable
= alloc_large_system_hash("Mount-cache",
3280 sizeof(struct hlist_head
),
3283 &m_hash_shift
, &m_hash_mask
, 0, 0);
3284 mountpoint_hashtable
= alloc_large_system_hash("Mountpoint-cache",
3285 sizeof(struct hlist_head
),
3288 &mp_hash_shift
, &mp_hash_mask
, 0, 0);
3290 if (!mount_hashtable
|| !mountpoint_hashtable
)
3291 panic("Failed to allocate mount hash table\n");
3297 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
3299 fs_kobj
= kobject_create_and_add("fs", NULL
);
3301 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
3306 void put_mnt_ns(struct mnt_namespace
*ns
)
3308 if (!atomic_dec_and_test(&ns
->count
))
3310 drop_collected_mounts(&ns
->root
->mnt
);
3314 struct vfsmount
*kern_mount_data(struct file_system_type
*type
, void *data
)
3316 struct vfsmount
*mnt
;
3317 mnt
= vfs_kern_mount(type
, SB_KERNMOUNT
, type
->name
, data
);
3320 * it is a longterm mount, don't release mnt until
3321 * we unmount before file sys is unregistered
3323 real_mount(mnt
)->mnt_ns
= MNT_NS_INTERNAL
;
3327 EXPORT_SYMBOL_GPL(kern_mount_data
);
3329 void kern_unmount(struct vfsmount
*mnt
)
3331 /* release long term mount so mount point can be released */
3332 if (!IS_ERR_OR_NULL(mnt
)) {
3333 real_mount(mnt
)->mnt_ns
= NULL
;
3334 synchronize_rcu(); /* yecchhh... */
3338 EXPORT_SYMBOL(kern_unmount
);
3340 bool our_mnt(struct vfsmount
*mnt
)
3342 return check_mnt(real_mount(mnt
));
3345 bool current_chrooted(void)
3347 /* Does the current process have a non-standard root */
3348 struct path ns_root
;
3349 struct path fs_root
;
3352 /* Find the namespace root */
3353 ns_root
.mnt
= ¤t
->nsproxy
->mnt_ns
->root
->mnt
;
3354 ns_root
.dentry
= ns_root
.mnt
->mnt_root
;
3356 while (d_mountpoint(ns_root
.dentry
) && follow_down_one(&ns_root
))
3359 get_fs_root(current
->fs
, &fs_root
);
3361 chrooted
= !path_equal(&fs_root
, &ns_root
);
3369 static bool mnt_already_visible(struct mnt_namespace
*ns
, struct vfsmount
*new,
3372 int new_flags
= *new_mnt_flags
;
3374 bool visible
= false;
3376 down_read(&namespace_sem
);
3377 list_for_each_entry(mnt
, &ns
->list
, mnt_list
) {
3378 struct mount
*child
;
3381 if (mnt
->mnt
.mnt_sb
->s_type
!= new->mnt_sb
->s_type
)
3384 /* This mount is not fully visible if it's root directory
3385 * is not the root directory of the filesystem.
3387 if (mnt
->mnt
.mnt_root
!= mnt
->mnt
.mnt_sb
->s_root
)
3390 /* A local view of the mount flags */
3391 mnt_flags
= mnt
->mnt
.mnt_flags
;
3393 /* Don't miss readonly hidden in the superblock flags */
3394 if (sb_rdonly(mnt
->mnt
.mnt_sb
))
3395 mnt_flags
|= MNT_LOCK_READONLY
;
3397 /* Verify the mount flags are equal to or more permissive
3398 * than the proposed new mount.
3400 if ((mnt_flags
& MNT_LOCK_READONLY
) &&
3401 !(new_flags
& MNT_READONLY
))
3403 if ((mnt_flags
& MNT_LOCK_ATIME
) &&
3404 ((mnt_flags
& MNT_ATIME_MASK
) != (new_flags
& MNT_ATIME_MASK
)))
3407 /* This mount is not fully visible if there are any
3408 * locked child mounts that cover anything except for
3409 * empty directories.
3411 list_for_each_entry(child
, &mnt
->mnt_mounts
, mnt_child
) {
3412 struct inode
*inode
= child
->mnt_mountpoint
->d_inode
;
3413 /* Only worry about locked mounts */
3414 if (!(child
->mnt
.mnt_flags
& MNT_LOCKED
))
3416 /* Is the directory permanetly empty? */
3417 if (!is_empty_dir_inode(inode
))
3420 /* Preserve the locked attributes */
3421 *new_mnt_flags
|= mnt_flags
& (MNT_LOCK_READONLY
| \
3428 up_read(&namespace_sem
);
3432 static bool mount_too_revealing(struct vfsmount
*mnt
, int *new_mnt_flags
)
3434 const unsigned long required_iflags
= SB_I_NOEXEC
| SB_I_NODEV
;
3435 struct mnt_namespace
*ns
= current
->nsproxy
->mnt_ns
;
3436 unsigned long s_iflags
;
3438 if (ns
->user_ns
== &init_user_ns
)
3441 /* Can this filesystem be too revealing? */
3442 s_iflags
= mnt
->mnt_sb
->s_iflags
;
3443 if (!(s_iflags
& SB_I_USERNS_VISIBLE
))
3446 if ((s_iflags
& required_iflags
) != required_iflags
) {
3447 WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
3452 return !mnt_already_visible(ns
, mnt
, new_mnt_flags
);
3455 bool mnt_may_suid(struct vfsmount
*mnt
)
3458 * Foreign mounts (accessed via fchdir or through /proc
3459 * symlinks) are always treated as if they are nosuid. This
3460 * prevents namespaces from trusting potentially unsafe
3461 * suid/sgid bits, file caps, or security labels that originate
3462 * in other namespaces.
3464 return !(mnt
->mnt_flags
& MNT_NOSUID
) && check_mnt(real_mount(mnt
)) &&
3465 current_in_userns(mnt
->mnt_sb
->s_user_ns
);
3468 static struct ns_common
*mntns_get(struct task_struct
*task
)
3470 struct ns_common
*ns
= NULL
;
3471 struct nsproxy
*nsproxy
;
3474 nsproxy
= task
->nsproxy
;
3476 ns
= &nsproxy
->mnt_ns
->ns
;
3477 get_mnt_ns(to_mnt_ns(ns
));
3484 static void mntns_put(struct ns_common
*ns
)
3486 put_mnt_ns(to_mnt_ns(ns
));
3489 static int mntns_install(struct nsproxy
*nsproxy
, struct ns_common
*ns
)
3491 struct fs_struct
*fs
= current
->fs
;
3492 struct mnt_namespace
*mnt_ns
= to_mnt_ns(ns
), *old_mnt_ns
;
3496 if (!ns_capable(mnt_ns
->user_ns
, CAP_SYS_ADMIN
) ||
3497 !ns_capable(current_user_ns(), CAP_SYS_CHROOT
) ||
3498 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
3505 old_mnt_ns
= nsproxy
->mnt_ns
;
3506 nsproxy
->mnt_ns
= mnt_ns
;
3509 err
= vfs_path_lookup(mnt_ns
->root
->mnt
.mnt_root
, &mnt_ns
->root
->mnt
,
3510 "/", LOOKUP_DOWN
, &root
);
3512 /* revert to old namespace */
3513 nsproxy
->mnt_ns
= old_mnt_ns
;
3518 put_mnt_ns(old_mnt_ns
);
3520 /* Update the pwd and root */
3521 set_fs_pwd(fs
, &root
);
3522 set_fs_root(fs
, &root
);
3528 static struct user_namespace
*mntns_owner(struct ns_common
*ns
)
3530 return to_mnt_ns(ns
)->user_ns
;
3533 const struct proc_ns_operations mntns_operations
= {
3535 .type
= CLONE_NEWNS
,
3538 .install
= mntns_install
,
3539 .owner
= mntns_owner
,