4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * super.c contains code to handle: - mount structures
8 * - filesystem drivers list
10 * - umount system call
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/acct.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
40 LIST_HEAD(super_blocks
);
41 DEFINE_SPINLOCK(sb_lock
);
43 static char *sb_writers_name
[SB_FREEZE_LEVELS
] = {
50 * One thing we have to be careful of with a per-sb shrinker is that we don't
51 * drop the last active reference to the superblock from within the shrinker.
52 * If that happens we could trigger unregistering the shrinker from within the
53 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
54 * take a passive reference to the superblock to avoid this from occurring.
56 static int prune_super(struct shrinker
*shrink
, struct shrink_control
*sc
)
58 struct super_block
*sb
;
62 sb
= container_of(shrink
, struct super_block
, s_shrink
);
65 * Deadlock avoidance. We may hold various FS locks, and we don't want
66 * to recurse into the FS that called us in clear_inode() and friends..
68 if (sc
->nr_to_scan
&& !(sc
->gfp_mask
& __GFP_FS
))
71 if (!grab_super_passive(sb
))
74 if (sb
->s_op
&& sb
->s_op
->nr_cached_objects
)
75 fs_objects
= sb
->s_op
->nr_cached_objects(sb
);
77 total_objects
= sb
->s_nr_dentry_unused
+
78 sb
->s_nr_inodes_unused
+ fs_objects
+ 1;
86 /* proportion the scan between the caches */
87 dentries
= (sc
->nr_to_scan
* sb
->s_nr_dentry_unused
) /
89 inodes
= (sc
->nr_to_scan
* sb
->s_nr_inodes_unused
) /
92 fs_objects
= (sc
->nr_to_scan
* fs_objects
) /
95 * prune the dcache first as the icache is pinned by it, then
96 * prune the icache, followed by the filesystem specific caches
98 prune_dcache_sb(sb
, dentries
);
99 prune_icache_sb(sb
, inodes
);
101 if (fs_objects
&& sb
->s_op
->free_cached_objects
) {
102 sb
->s_op
->free_cached_objects(sb
, fs_objects
);
103 fs_objects
= sb
->s_op
->nr_cached_objects(sb
);
105 total_objects
= sb
->s_nr_dentry_unused
+
106 sb
->s_nr_inodes_unused
+ fs_objects
;
109 total_objects
= (total_objects
/ 100) * sysctl_vfs_cache_pressure
;
111 return total_objects
;
114 static int init_sb_writers(struct super_block
*s
, struct file_system_type
*type
)
119 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++) {
120 err
= percpu_counter_init(&s
->s_writers
.counter
[i
], 0);
123 lockdep_init_map(&s
->s_writers
.lock_map
[i
], sb_writers_name
[i
],
124 &type
->s_writers_key
[i
], 0);
126 init_waitqueue_head(&s
->s_writers
.wait
);
127 init_waitqueue_head(&s
->s_writers
.wait_unfrozen
);
131 percpu_counter_destroy(&s
->s_writers
.counter
[i
]);
135 static void destroy_sb_writers(struct super_block
*s
)
139 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++)
140 percpu_counter_destroy(&s
->s_writers
.counter
[i
]);
144 * alloc_super - create new superblock
145 * @type: filesystem type superblock should belong to
146 * @flags: the mount flags
148 * Allocates and initializes a new &struct super_block. alloc_super()
149 * returns a pointer new superblock or %NULL if allocation had failed.
151 static struct super_block
*alloc_super(struct file_system_type
*type
, int flags
)
153 struct super_block
*s
= kzalloc(sizeof(struct super_block
), GFP_USER
);
154 static const struct super_operations default_op
;
157 if (security_sb_alloc(s
)) {
159 * We cannot call security_sb_free() without
160 * security_sb_alloc() succeeding. So bail out manually
166 if (init_sb_writers(s
, type
))
169 s
->s_bdi
= &default_backing_dev_info
;
170 INIT_HLIST_NODE(&s
->s_instances
);
171 INIT_HLIST_BL_HEAD(&s
->s_anon
);
172 INIT_LIST_HEAD(&s
->s_inodes
);
173 INIT_LIST_HEAD(&s
->s_dentry_lru
);
174 INIT_LIST_HEAD(&s
->s_inode_lru
);
175 spin_lock_init(&s
->s_inode_lru_lock
);
176 INIT_LIST_HEAD(&s
->s_mounts
);
177 init_rwsem(&s
->s_umount
);
178 lockdep_set_class(&s
->s_umount
, &type
->s_umount_key
);
180 * sget() can have s_umount recursion.
182 * When it cannot find a suitable sb, it allocates a new
183 * one (this one), and tries again to find a suitable old
186 * In case that succeeds, it will acquire the s_umount
187 * lock of the old one. Since these are clearly distrinct
188 * locks, and this object isn't exposed yet, there's no
191 * Annotate this by putting this lock in a different
194 down_write_nested(&s
->s_umount
, SINGLE_DEPTH_NESTING
);
196 atomic_set(&s
->s_active
, 1);
197 mutex_init(&s
->s_vfs_rename_mutex
);
198 lockdep_set_class(&s
->s_vfs_rename_mutex
, &type
->s_vfs_rename_key
);
199 mutex_init(&s
->s_dquot
.dqio_mutex
);
200 mutex_init(&s
->s_dquot
.dqonoff_mutex
);
201 init_rwsem(&s
->s_dquot
.dqptr_sem
);
202 s
->s_maxbytes
= MAX_NON_LFS
;
203 s
->s_op
= &default_op
;
204 s
->s_time_gran
= 1000000000;
205 s
->cleancache_poolid
= -1;
207 s
->s_shrink
.seeks
= DEFAULT_SEEKS
;
208 s
->s_shrink
.shrink
= prune_super
;
209 s
->s_shrink
.batch
= 1024;
215 destroy_sb_writers(s
);
222 * destroy_super - frees a superblock
223 * @s: superblock to free
225 * Frees a superblock.
227 static inline void destroy_super(struct super_block
*s
)
229 destroy_sb_writers(s
);
231 WARN_ON(!list_empty(&s
->s_mounts
));
237 /* Superblock refcounting */
240 * Drop a superblock's refcount. The caller must hold sb_lock.
242 static void __put_super(struct super_block
*sb
)
244 if (!--sb
->s_count
) {
245 list_del_init(&sb
->s_list
);
251 * put_super - drop a temporary reference to superblock
252 * @sb: superblock in question
254 * Drops a temporary reference, frees superblock if there's no
257 static void put_super(struct super_block
*sb
)
261 spin_unlock(&sb_lock
);
266 * deactivate_locked_super - drop an active reference to superblock
267 * @s: superblock to deactivate
269 * Drops an active reference to superblock, converting it into a temprory
270 * one if there is no other active references left. In that case we
271 * tell fs driver to shut it down and drop the temporary reference we
274 * Caller holds exclusive lock on superblock; that lock is released.
276 void deactivate_locked_super(struct super_block
*s
)
278 struct file_system_type
*fs
= s
->s_type
;
279 if (atomic_dec_and_test(&s
->s_active
)) {
280 cleancache_invalidate_fs(s
);
283 /* caches are now gone, we can safely kill the shrinker now */
284 unregister_shrinker(&s
->s_shrink
);
288 up_write(&s
->s_umount
);
292 EXPORT_SYMBOL(deactivate_locked_super
);
295 * deactivate_super - drop an active reference to superblock
296 * @s: superblock to deactivate
298 * Variant of deactivate_locked_super(), except that superblock is *not*
299 * locked by caller. If we are going to drop the final active reference,
300 * lock will be acquired prior to that.
302 void deactivate_super(struct super_block
*s
)
304 if (!atomic_add_unless(&s
->s_active
, -1, 1)) {
305 down_write(&s
->s_umount
);
306 deactivate_locked_super(s
);
310 EXPORT_SYMBOL(deactivate_super
);
313 * grab_super - acquire an active reference
314 * @s: reference we are trying to make active
316 * Tries to acquire an active reference. grab_super() is used when we
317 * had just found a superblock in super_blocks or fs_type->fs_supers
318 * and want to turn it into a full-blown active reference. grab_super()
319 * is called with sb_lock held and drops it. Returns 1 in case of
320 * success, 0 if we had failed (superblock contents was already dead or
321 * dying when grab_super() had been called). Note that this is only
322 * called for superblocks not in rundown mode (== ones still on ->fs_supers
323 * of their type), so increment of ->s_count is OK here.
325 static int grab_super(struct super_block
*s
) __releases(sb_lock
)
328 spin_unlock(&sb_lock
);
329 down_write(&s
->s_umount
);
330 if ((s
->s_flags
& MS_BORN
) && atomic_inc_not_zero(&s
->s_active
)) {
334 up_write(&s
->s_umount
);
340 * grab_super_passive - acquire a passive reference
341 * @sb: reference we are trying to grab
343 * Tries to acquire a passive reference. This is used in places where we
344 * cannot take an active reference but we need to ensure that the
345 * superblock does not go away while we are working on it. It returns
346 * false if a reference was not gained, and returns true with the s_umount
347 * lock held in read mode if a reference is gained. On successful return,
348 * the caller must drop the s_umount lock and the passive reference when
351 bool grab_super_passive(struct super_block
*sb
)
354 if (hlist_unhashed(&sb
->s_instances
)) {
355 spin_unlock(&sb_lock
);
360 spin_unlock(&sb_lock
);
362 if (down_read_trylock(&sb
->s_umount
)) {
363 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
365 up_read(&sb
->s_umount
);
373 * generic_shutdown_super - common helper for ->kill_sb()
374 * @sb: superblock to kill
376 * generic_shutdown_super() does all fs-independent work on superblock
377 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
378 * that need destruction out of superblock, call generic_shutdown_super()
379 * and release aforementioned objects. Note: dentries and inodes _are_
380 * taken care of and do not need specific handling.
382 * Upon calling this function, the filesystem may no longer alter or
383 * rearrange the set of dentries belonging to this super_block, nor may it
384 * change the attachments of dentries to inodes.
386 void generic_shutdown_super(struct super_block
*sb
)
388 const struct super_operations
*sop
= sb
->s_op
;
391 shrink_dcache_for_umount(sb
);
393 sb
->s_flags
&= ~MS_ACTIVE
;
395 fsnotify_unmount_inodes(&sb
->s_inodes
);
402 if (!list_empty(&sb
->s_inodes
)) {
403 printk("VFS: Busy inodes after unmount of %s. "
404 "Self-destruct in 5 seconds. Have a nice day...\n",
409 /* should be initialized for __put_super_and_need_restart() */
410 hlist_del_init(&sb
->s_instances
);
411 spin_unlock(&sb_lock
);
412 up_write(&sb
->s_umount
);
415 EXPORT_SYMBOL(generic_shutdown_super
);
418 * sget - find or create a superblock
419 * @type: filesystem type superblock should belong to
420 * @test: comparison callback
421 * @set: setup callback
422 * @flags: mount flags
423 * @data: argument to each of them
425 struct super_block
*sget(struct file_system_type
*type
,
426 int (*test
)(struct super_block
*,void *),
427 int (*set
)(struct super_block
*,void *),
431 struct super_block
*s
= NULL
;
432 struct super_block
*old
;
438 hlist_for_each_entry(old
, &type
->fs_supers
, s_instances
) {
439 if (!test(old
, data
))
441 if (!grab_super(old
))
444 up_write(&s
->s_umount
);
452 spin_unlock(&sb_lock
);
453 s
= alloc_super(type
, flags
);
455 return ERR_PTR(-ENOMEM
);
461 spin_unlock(&sb_lock
);
462 up_write(&s
->s_umount
);
467 strlcpy(s
->s_id
, type
->name
, sizeof(s
->s_id
));
468 list_add_tail(&s
->s_list
, &super_blocks
);
469 hlist_add_head(&s
->s_instances
, &type
->fs_supers
);
470 spin_unlock(&sb_lock
);
471 get_filesystem(type
);
472 register_shrinker(&s
->s_shrink
);
478 void drop_super(struct super_block
*sb
)
480 up_read(&sb
->s_umount
);
484 EXPORT_SYMBOL(drop_super
);
487 * iterate_supers - call function for all active superblocks
488 * @f: function to call
489 * @arg: argument to pass to it
491 * Scans the superblock list and calls given function, passing it
492 * locked superblock and given argument.
494 void iterate_supers(void (*f
)(struct super_block
*, void *), void *arg
)
496 struct super_block
*sb
, *p
= NULL
;
499 list_for_each_entry(sb
, &super_blocks
, s_list
) {
500 if (hlist_unhashed(&sb
->s_instances
))
503 spin_unlock(&sb_lock
);
505 down_read(&sb
->s_umount
);
506 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
508 up_read(&sb
->s_umount
);
517 spin_unlock(&sb_lock
);
521 * iterate_supers_type - call function for superblocks of given type
523 * @f: function to call
524 * @arg: argument to pass to it
526 * Scans the superblock list and calls given function, passing it
527 * locked superblock and given argument.
529 void iterate_supers_type(struct file_system_type
*type
,
530 void (*f
)(struct super_block
*, void *), void *arg
)
532 struct super_block
*sb
, *p
= NULL
;
535 hlist_for_each_entry(sb
, &type
->fs_supers
, s_instances
) {
537 spin_unlock(&sb_lock
);
539 down_read(&sb
->s_umount
);
540 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
542 up_read(&sb
->s_umount
);
551 spin_unlock(&sb_lock
);
554 EXPORT_SYMBOL(iterate_supers_type
);
557 * get_super - get the superblock of a device
558 * @bdev: device to get the superblock for
560 * Scans the superblock list and finds the superblock of the file system
561 * mounted on the device given. %NULL is returned if no match is found.
564 struct super_block
*get_super(struct block_device
*bdev
)
566 struct super_block
*sb
;
573 list_for_each_entry(sb
, &super_blocks
, s_list
) {
574 if (hlist_unhashed(&sb
->s_instances
))
576 if (sb
->s_bdev
== bdev
) {
578 spin_unlock(&sb_lock
);
579 down_read(&sb
->s_umount
);
581 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
583 up_read(&sb
->s_umount
);
584 /* nope, got unmounted */
590 spin_unlock(&sb_lock
);
594 EXPORT_SYMBOL(get_super
);
597 * get_super_thawed - get thawed superblock of a device
598 * @bdev: device to get the superblock for
600 * Scans the superblock list and finds the superblock of the file system
601 * mounted on the device. The superblock is returned once it is thawed
602 * (or immediately if it was not frozen). %NULL is returned if no match
605 struct super_block
*get_super_thawed(struct block_device
*bdev
)
608 struct super_block
*s
= get_super(bdev
);
609 if (!s
|| s
->s_writers
.frozen
== SB_UNFROZEN
)
611 up_read(&s
->s_umount
);
612 wait_event(s
->s_writers
.wait_unfrozen
,
613 s
->s_writers
.frozen
== SB_UNFROZEN
);
617 EXPORT_SYMBOL(get_super_thawed
);
620 * get_active_super - get an active reference to the superblock of a device
621 * @bdev: device to get the superblock for
623 * Scans the superblock list and finds the superblock of the file system
624 * mounted on the device given. Returns the superblock with an active
625 * reference or %NULL if none was found.
627 struct super_block
*get_active_super(struct block_device
*bdev
)
629 struct super_block
*sb
;
636 list_for_each_entry(sb
, &super_blocks
, s_list
) {
637 if (hlist_unhashed(&sb
->s_instances
))
639 if (sb
->s_bdev
== bdev
) {
642 up_write(&sb
->s_umount
);
646 spin_unlock(&sb_lock
);
650 struct super_block
*user_get_super(dev_t dev
)
652 struct super_block
*sb
;
656 list_for_each_entry(sb
, &super_blocks
, s_list
) {
657 if (hlist_unhashed(&sb
->s_instances
))
659 if (sb
->s_dev
== dev
) {
661 spin_unlock(&sb_lock
);
662 down_read(&sb
->s_umount
);
664 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
666 up_read(&sb
->s_umount
);
667 /* nope, got unmounted */
673 spin_unlock(&sb_lock
);
678 * do_remount_sb - asks filesystem to change mount options.
679 * @sb: superblock in question
680 * @flags: numeric part of options
681 * @data: the rest of options
682 * @force: whether or not to force the change
684 * Alters the mount options of a mounted file system.
686 int do_remount_sb(struct super_block
*sb
, int flags
, void *data
, int force
)
691 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
)
695 if (!(flags
& MS_RDONLY
) && bdev_read_only(sb
->s_bdev
))
699 if (flags
& MS_RDONLY
)
701 shrink_dcache_sb(sb
);
704 remount_ro
= (flags
& MS_RDONLY
) && !(sb
->s_flags
& MS_RDONLY
);
706 /* If we are remounting RDONLY and current sb is read/write,
707 make sure there are no rw files opened */
710 sb
->s_readonly_remount
= 1;
713 retval
= sb_prepare_remount_readonly(sb
);
719 if (sb
->s_op
->remount_fs
) {
720 retval
= sb
->s_op
->remount_fs(sb
, &flags
, data
);
723 goto cancel_readonly
;
724 /* If forced remount, go ahead despite any errors */
725 WARN(1, "forced remount of a %s fs returned %i\n",
726 sb
->s_type
->name
, retval
);
729 sb
->s_flags
= (sb
->s_flags
& ~MS_RMT_MASK
) | (flags
& MS_RMT_MASK
);
730 /* Needs to be ordered wrt mnt_is_readonly() */
732 sb
->s_readonly_remount
= 0;
735 * Some filesystems modify their metadata via some other path than the
736 * bdev buffer cache (eg. use a private mapping, or directories in
737 * pagecache, etc). Also file data modifications go via their own
738 * mappings. So If we try to mount readonly then copy the filesystem
739 * from bdev, we could get stale data, so invalidate it to give a best
740 * effort at coherency.
742 if (remount_ro
&& sb
->s_bdev
)
743 invalidate_bdev(sb
->s_bdev
);
747 sb
->s_readonly_remount
= 0;
751 static void do_emergency_remount(struct work_struct
*work
)
753 struct super_block
*sb
, *p
= NULL
;
756 list_for_each_entry(sb
, &super_blocks
, s_list
) {
757 if (hlist_unhashed(&sb
->s_instances
))
760 spin_unlock(&sb_lock
);
761 down_write(&sb
->s_umount
);
762 if (sb
->s_root
&& sb
->s_bdev
&& (sb
->s_flags
& MS_BORN
) &&
763 !(sb
->s_flags
& MS_RDONLY
)) {
765 * What lock protects sb->s_flags??
767 do_remount_sb(sb
, MS_RDONLY
, NULL
, 1);
769 up_write(&sb
->s_umount
);
777 spin_unlock(&sb_lock
);
779 printk("Emergency Remount complete\n");
782 void emergency_remount(void)
784 struct work_struct
*work
;
786 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
788 INIT_WORK(work
, do_emergency_remount
);
794 * Unnamed block devices are dummy devices used by virtual
795 * filesystems which don't use real block-devices. -- jrs
798 static DEFINE_IDA(unnamed_dev_ida
);
799 static DEFINE_SPINLOCK(unnamed_dev_lock
);/* protects the above */
800 static int unnamed_dev_start
= 0; /* don't bother trying below it */
802 int get_anon_bdev(dev_t
*p
)
808 if (ida_pre_get(&unnamed_dev_ida
, GFP_ATOMIC
) == 0)
810 spin_lock(&unnamed_dev_lock
);
811 error
= ida_get_new_above(&unnamed_dev_ida
, unnamed_dev_start
, &dev
);
813 unnamed_dev_start
= dev
+ 1;
814 spin_unlock(&unnamed_dev_lock
);
815 if (error
== -EAGAIN
)
816 /* We raced and lost with another CPU. */
821 if (dev
== (1 << MINORBITS
)) {
822 spin_lock(&unnamed_dev_lock
);
823 ida_remove(&unnamed_dev_ida
, dev
);
824 if (unnamed_dev_start
> dev
)
825 unnamed_dev_start
= dev
;
826 spin_unlock(&unnamed_dev_lock
);
829 *p
= MKDEV(0, dev
& MINORMASK
);
832 EXPORT_SYMBOL(get_anon_bdev
);
834 void free_anon_bdev(dev_t dev
)
836 int slot
= MINOR(dev
);
837 spin_lock(&unnamed_dev_lock
);
838 ida_remove(&unnamed_dev_ida
, slot
);
839 if (slot
< unnamed_dev_start
)
840 unnamed_dev_start
= slot
;
841 spin_unlock(&unnamed_dev_lock
);
843 EXPORT_SYMBOL(free_anon_bdev
);
845 int set_anon_super(struct super_block
*s
, void *data
)
847 int error
= get_anon_bdev(&s
->s_dev
);
849 s
->s_bdi
= &noop_backing_dev_info
;
853 EXPORT_SYMBOL(set_anon_super
);
855 void kill_anon_super(struct super_block
*sb
)
857 dev_t dev
= sb
->s_dev
;
858 generic_shutdown_super(sb
);
862 EXPORT_SYMBOL(kill_anon_super
);
864 void kill_litter_super(struct super_block
*sb
)
867 d_genocide(sb
->s_root
);
871 EXPORT_SYMBOL(kill_litter_super
);
873 static int ns_test_super(struct super_block
*sb
, void *data
)
875 return sb
->s_fs_info
== data
;
878 static int ns_set_super(struct super_block
*sb
, void *data
)
880 sb
->s_fs_info
= data
;
881 return set_anon_super(sb
, NULL
);
884 struct dentry
*mount_ns(struct file_system_type
*fs_type
, int flags
,
885 void *data
, int (*fill_super
)(struct super_block
*, void *, int))
887 struct super_block
*sb
;
889 sb
= sget(fs_type
, ns_test_super
, ns_set_super
, flags
, data
);
895 err
= fill_super(sb
, data
, flags
& MS_SILENT
? 1 : 0);
897 deactivate_locked_super(sb
);
901 sb
->s_flags
|= MS_ACTIVE
;
904 return dget(sb
->s_root
);
907 EXPORT_SYMBOL(mount_ns
);
910 static int set_bdev_super(struct super_block
*s
, void *data
)
913 s
->s_dev
= s
->s_bdev
->bd_dev
;
916 * We set the bdi here to the queue backing, file systems can
917 * overwrite this in ->fill_super()
919 s
->s_bdi
= &bdev_get_queue(s
->s_bdev
)->backing_dev_info
;
923 static int test_bdev_super(struct super_block
*s
, void *data
)
925 return (void *)s
->s_bdev
== data
;
928 struct dentry
*mount_bdev(struct file_system_type
*fs_type
,
929 int flags
, const char *dev_name
, void *data
,
930 int (*fill_super
)(struct super_block
*, void *, int))
932 struct block_device
*bdev
;
933 struct super_block
*s
;
934 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
937 if (!(flags
& MS_RDONLY
))
940 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
942 return ERR_CAST(bdev
);
945 * once the super is inserted into the list by sget, s_umount
946 * will protect the lockfs code from trying to start a snapshot
947 * while we are mounting
949 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
950 if (bdev
->bd_fsfreeze_count
> 0) {
951 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
955 s
= sget(fs_type
, test_bdev_super
, set_bdev_super
, flags
| MS_NOSEC
,
957 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
962 if ((flags
^ s
->s_flags
) & MS_RDONLY
) {
963 deactivate_locked_super(s
);
969 * s_umount nests inside bd_mutex during
970 * __invalidate_device(). blkdev_put() acquires
971 * bd_mutex and can't be called under s_umount. Drop
972 * s_umount temporarily. This is safe as we're
973 * holding an active reference.
975 up_write(&s
->s_umount
);
976 blkdev_put(bdev
, mode
);
977 down_write(&s
->s_umount
);
979 char b
[BDEVNAME_SIZE
];
982 strlcpy(s
->s_id
, bdevname(bdev
, b
), sizeof(s
->s_id
));
983 sb_set_blocksize(s
, block_size(bdev
));
984 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
986 deactivate_locked_super(s
);
990 s
->s_flags
|= MS_ACTIVE
;
994 return dget(s
->s_root
);
999 blkdev_put(bdev
, mode
);
1001 return ERR_PTR(error
);
1003 EXPORT_SYMBOL(mount_bdev
);
1005 void kill_block_super(struct super_block
*sb
)
1007 struct block_device
*bdev
= sb
->s_bdev
;
1008 fmode_t mode
= sb
->s_mode
;
1010 bdev
->bd_super
= NULL
;
1011 generic_shutdown_super(sb
);
1012 sync_blockdev(bdev
);
1013 WARN_ON_ONCE(!(mode
& FMODE_EXCL
));
1014 blkdev_put(bdev
, mode
| FMODE_EXCL
);
1017 EXPORT_SYMBOL(kill_block_super
);
1020 struct dentry
*mount_nodev(struct file_system_type
*fs_type
,
1021 int flags
, void *data
,
1022 int (*fill_super
)(struct super_block
*, void *, int))
1025 struct super_block
*s
= sget(fs_type
, NULL
, set_anon_super
, flags
, NULL
);
1030 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1032 deactivate_locked_super(s
);
1033 return ERR_PTR(error
);
1035 s
->s_flags
|= MS_ACTIVE
;
1036 return dget(s
->s_root
);
1038 EXPORT_SYMBOL(mount_nodev
);
1040 static int compare_single(struct super_block
*s
, void *p
)
1045 struct dentry
*mount_single(struct file_system_type
*fs_type
,
1046 int flags
, void *data
,
1047 int (*fill_super
)(struct super_block
*, void *, int))
1049 struct super_block
*s
;
1052 s
= sget(fs_type
, compare_single
, set_anon_super
, flags
, NULL
);
1056 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1058 deactivate_locked_super(s
);
1059 return ERR_PTR(error
);
1061 s
->s_flags
|= MS_ACTIVE
;
1063 do_remount_sb(s
, flags
, data
, 0);
1065 return dget(s
->s_root
);
1067 EXPORT_SYMBOL(mount_single
);
1070 mount_fs(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
1072 struct dentry
*root
;
1073 struct super_block
*sb
;
1074 char *secdata
= NULL
;
1075 int error
= -ENOMEM
;
1077 if (data
&& !(type
->fs_flags
& FS_BINARY_MOUNTDATA
)) {
1078 secdata
= alloc_secdata();
1082 error
= security_sb_copy_data(data
, secdata
);
1084 goto out_free_secdata
;
1087 root
= type
->mount(type
, flags
, name
, data
);
1089 error
= PTR_ERR(root
);
1090 goto out_free_secdata
;
1094 WARN_ON(!sb
->s_bdi
);
1095 WARN_ON(sb
->s_bdi
== &default_backing_dev_info
);
1096 sb
->s_flags
|= MS_BORN
;
1098 error
= security_sb_kern_mount(sb
, flags
, secdata
);
1103 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1104 * but s_maxbytes was an unsigned long long for many releases. Throw
1105 * this warning for a little while to try and catch filesystems that
1106 * violate this rule.
1108 WARN((sb
->s_maxbytes
< 0), "%s set sb->s_maxbytes to "
1109 "negative value (%lld)\n", type
->name
, sb
->s_maxbytes
);
1111 up_write(&sb
->s_umount
);
1112 free_secdata(secdata
);
1116 deactivate_locked_super(sb
);
1118 free_secdata(secdata
);
1120 return ERR_PTR(error
);
1124 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1127 void __sb_end_write(struct super_block
*sb
, int level
)
1129 percpu_counter_dec(&sb
->s_writers
.counter
[level
-1]);
1131 * Make sure s_writers are updated before we wake up waiters in
1135 if (waitqueue_active(&sb
->s_writers
.wait
))
1136 wake_up(&sb
->s_writers
.wait
);
1137 rwsem_release(&sb
->s_writers
.lock_map
[level
-1], 1, _RET_IP_
);
1139 EXPORT_SYMBOL(__sb_end_write
);
1141 #ifdef CONFIG_LOCKDEP
1143 * We want lockdep to tell us about possible deadlocks with freezing but
1144 * it's it bit tricky to properly instrument it. Getting a freeze protection
1145 * works as getting a read lock but there are subtle problems. XFS for example
1146 * gets freeze protection on internal level twice in some cases, which is OK
1147 * only because we already hold a freeze protection also on higher level. Due
1148 * to these cases we have to tell lockdep we are doing trylock when we
1149 * already hold a freeze protection for a higher freeze level.
1151 static void acquire_freeze_lock(struct super_block
*sb
, int level
, bool trylock
,
1157 for (i
= 0; i
< level
- 1; i
++)
1158 if (lock_is_held(&sb
->s_writers
.lock_map
[i
])) {
1163 rwsem_acquire_read(&sb
->s_writers
.lock_map
[level
-1], 0, trylock
, ip
);
1168 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1171 int __sb_start_write(struct super_block
*sb
, int level
, bool wait
)
1174 if (unlikely(sb
->s_writers
.frozen
>= level
)) {
1177 wait_event(sb
->s_writers
.wait_unfrozen
,
1178 sb
->s_writers
.frozen
< level
);
1181 #ifdef CONFIG_LOCKDEP
1182 acquire_freeze_lock(sb
, level
, !wait
, _RET_IP_
);
1184 percpu_counter_inc(&sb
->s_writers
.counter
[level
-1]);
1186 * Make sure counter is updated before we check for frozen.
1187 * freeze_super() first sets frozen and then checks the counter.
1190 if (unlikely(sb
->s_writers
.frozen
>= level
)) {
1191 __sb_end_write(sb
, level
);
1196 EXPORT_SYMBOL(__sb_start_write
);
1199 * sb_wait_write - wait until all writers to given file system finish
1200 * @sb: the super for which we wait
1201 * @level: type of writers we wait for (normal vs page fault)
1203 * This function waits until there are no writers of given type to given file
1204 * system. Caller of this function should make sure there can be no new writers
1205 * of type @level before calling this function. Otherwise this function can
1208 static void sb_wait_write(struct super_block
*sb
, int level
)
1213 * We just cycle-through lockdep here so that it does not complain
1214 * about returning with lock to userspace
1216 rwsem_acquire(&sb
->s_writers
.lock_map
[level
-1], 0, 0, _THIS_IP_
);
1217 rwsem_release(&sb
->s_writers
.lock_map
[level
-1], 1, _THIS_IP_
);
1223 * We use a barrier in prepare_to_wait() to separate setting
1224 * of frozen and checking of the counter
1226 prepare_to_wait(&sb
->s_writers
.wait
, &wait
,
1227 TASK_UNINTERRUPTIBLE
);
1229 writers
= percpu_counter_sum(&sb
->s_writers
.counter
[level
-1]);
1233 finish_wait(&sb
->s_writers
.wait
, &wait
);
1238 * freeze_super - lock the filesystem and force it into a consistent state
1239 * @sb: the super to lock
1241 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1242 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1245 * During this function, sb->s_writers.frozen goes through these values:
1247 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1249 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1250 * writes should be blocked, though page faults are still allowed. We wait for
1251 * all writes to complete and then proceed to the next stage.
1253 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1254 * but internal fs threads can still modify the filesystem (although they
1255 * should not dirty new pages or inodes), writeback can run etc. After waiting
1256 * for all running page faults we sync the filesystem which will clean all
1257 * dirty pages and inodes (no new dirty pages or inodes can be created when
1260 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1261 * modification are blocked (e.g. XFS preallocation truncation on inode
1262 * reclaim). This is usually implemented by blocking new transactions for
1263 * filesystems that have them and need this additional guard. After all
1264 * internal writers are finished we call ->freeze_fs() to finish filesystem
1265 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1266 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1268 * sb->s_writers.frozen is protected by sb->s_umount.
1270 int freeze_super(struct super_block
*sb
)
1274 atomic_inc(&sb
->s_active
);
1275 down_write(&sb
->s_umount
);
1276 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
) {
1277 deactivate_locked_super(sb
);
1281 if (!(sb
->s_flags
& MS_BORN
)) {
1282 up_write(&sb
->s_umount
);
1283 return 0; /* sic - it's "nothing to do" */
1286 if (sb
->s_flags
& MS_RDONLY
) {
1287 /* Nothing to do really... */
1288 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1289 up_write(&sb
->s_umount
);
1293 /* From now on, no new normal writers can start */
1294 sb
->s_writers
.frozen
= SB_FREEZE_WRITE
;
1297 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1298 up_write(&sb
->s_umount
);
1300 sb_wait_write(sb
, SB_FREEZE_WRITE
);
1302 /* Now we go and block page faults... */
1303 down_write(&sb
->s_umount
);
1304 sb
->s_writers
.frozen
= SB_FREEZE_PAGEFAULT
;
1307 sb_wait_write(sb
, SB_FREEZE_PAGEFAULT
);
1309 /* All writers are done so after syncing there won't be dirty data */
1310 sync_filesystem(sb
);
1312 /* Now wait for internal filesystem counter */
1313 sb
->s_writers
.frozen
= SB_FREEZE_FS
;
1315 sb_wait_write(sb
, SB_FREEZE_FS
);
1317 if (sb
->s_op
->freeze_fs
) {
1318 ret
= sb
->s_op
->freeze_fs(sb
);
1321 "VFS:Filesystem freeze failed\n");
1322 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1324 wake_up(&sb
->s_writers
.wait_unfrozen
);
1325 deactivate_locked_super(sb
);
1330 * This is just for debugging purposes so that fs can warn if it
1331 * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
1333 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1334 up_write(&sb
->s_umount
);
1337 EXPORT_SYMBOL(freeze_super
);
1340 * thaw_super -- unlock filesystem
1341 * @sb: the super to thaw
1343 * Unlocks the filesystem and marks it writeable again after freeze_super().
1345 int thaw_super(struct super_block
*sb
)
1349 down_write(&sb
->s_umount
);
1350 if (sb
->s_writers
.frozen
== SB_UNFROZEN
) {
1351 up_write(&sb
->s_umount
);
1355 if (sb
->s_flags
& MS_RDONLY
)
1358 if (sb
->s_op
->unfreeze_fs
) {
1359 error
= sb
->s_op
->unfreeze_fs(sb
);
1362 "VFS:Filesystem thaw failed\n");
1363 up_write(&sb
->s_umount
);
1369 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1371 wake_up(&sb
->s_writers
.wait_unfrozen
);
1372 deactivate_locked_super(sb
);
1376 EXPORT_SYMBOL(thaw_super
);