4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * super.c contains code to handle: - mount structures
8 * - filesystem drivers list
10 * - umount system call
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/acct.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
40 LIST_HEAD(super_blocks
);
41 EXPORT_SYMBOL_GPL(super_blocks
);
43 DEFINE_SPINLOCK(sb_lock
);
45 static char *sb_writers_name
[SB_FREEZE_LEVELS
] = {
52 * One thing we have to be careful of with a per-sb shrinker is that we don't
53 * drop the last active reference to the superblock from within the shrinker.
54 * If that happens we could trigger unregistering the shrinker from within the
55 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
56 * take a passive reference to the superblock to avoid this from occurring.
58 static int prune_super(struct shrinker
*shrink
, struct shrink_control
*sc
)
60 struct super_block
*sb
;
64 sb
= container_of(shrink
, struct super_block
, s_shrink
);
67 * Deadlock avoidance. We may hold various FS locks, and we don't want
68 * to recurse into the FS that called us in clear_inode() and friends..
70 if (sc
->nr_to_scan
&& !(sc
->gfp_mask
& __GFP_FS
))
73 if (!grab_super_passive(sb
))
76 if (sb
->s_op
&& sb
->s_op
->nr_cached_objects
)
77 fs_objects
= sb
->s_op
->nr_cached_objects(sb
);
79 total_objects
= sb
->s_nr_dentry_unused
+
80 sb
->s_nr_inodes_unused
+ fs_objects
+ 1;
88 /* proportion the scan between the caches */
89 dentries
= (sc
->nr_to_scan
* sb
->s_nr_dentry_unused
) /
91 inodes
= (sc
->nr_to_scan
* sb
->s_nr_inodes_unused
) /
94 fs_objects
= (sc
->nr_to_scan
* fs_objects
) /
97 * prune the dcache first as the icache is pinned by it, then
98 * prune the icache, followed by the filesystem specific caches
100 prune_dcache_sb(sb
, dentries
);
101 prune_icache_sb(sb
, inodes
);
103 if (fs_objects
&& sb
->s_op
->free_cached_objects
) {
104 sb
->s_op
->free_cached_objects(sb
, fs_objects
);
105 fs_objects
= sb
->s_op
->nr_cached_objects(sb
);
107 total_objects
= sb
->s_nr_dentry_unused
+
108 sb
->s_nr_inodes_unused
+ fs_objects
;
111 total_objects
= (total_objects
/ 100) * sysctl_vfs_cache_pressure
;
113 return total_objects
;
116 static int init_sb_writers(struct super_block
*s
, struct file_system_type
*type
)
121 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++) {
122 err
= percpu_counter_init(&s
->s_writers
.counter
[i
], 0);
125 lockdep_init_map(&s
->s_writers
.lock_map
[i
], sb_writers_name
[i
],
126 &type
->s_writers_key
[i
], 0);
128 init_waitqueue_head(&s
->s_writers
.wait
);
129 init_waitqueue_head(&s
->s_writers
.wait_unfrozen
);
133 percpu_counter_destroy(&s
->s_writers
.counter
[i
]);
137 static void destroy_sb_writers(struct super_block
*s
)
141 for (i
= 0; i
< SB_FREEZE_LEVELS
; i
++)
142 percpu_counter_destroy(&s
->s_writers
.counter
[i
]);
146 * alloc_super - create new superblock
147 * @type: filesystem type superblock should belong to
148 * @flags: the mount flags
150 * Allocates and initializes a new &struct super_block. alloc_super()
151 * returns a pointer new superblock or %NULL if allocation had failed.
153 static struct super_block
*alloc_super(struct file_system_type
*type
, int flags
)
155 struct super_block
*s
= kzalloc(sizeof(struct super_block
), GFP_USER
);
156 static const struct super_operations default_op
;
159 if (security_sb_alloc(s
)) {
161 * We cannot call security_sb_free() without
162 * security_sb_alloc() succeeding. So bail out manually
168 if (init_sb_writers(s
, type
))
171 s
->s_bdi
= &default_backing_dev_info
;
172 INIT_HLIST_NODE(&s
->s_instances
);
173 INIT_HLIST_BL_HEAD(&s
->s_anon
);
174 INIT_LIST_HEAD(&s
->s_inodes
);
175 INIT_LIST_HEAD(&s
->s_dentry_lru
);
176 INIT_LIST_HEAD(&s
->s_inode_lru
);
177 spin_lock_init(&s
->s_inode_lru_lock
);
178 INIT_LIST_HEAD(&s
->s_mounts
);
179 init_rwsem(&s
->s_umount
);
180 lockdep_set_class(&s
->s_umount
, &type
->s_umount_key
);
182 * sget() can have s_umount recursion.
184 * When it cannot find a suitable sb, it allocates a new
185 * one (this one), and tries again to find a suitable old
188 * In case that succeeds, it will acquire the s_umount
189 * lock of the old one. Since these are clearly distrinct
190 * locks, and this object isn't exposed yet, there's no
193 * Annotate this by putting this lock in a different
196 down_write_nested(&s
->s_umount
, SINGLE_DEPTH_NESTING
);
198 atomic_set(&s
->s_active
, 1);
199 mutex_init(&s
->s_vfs_rename_mutex
);
200 lockdep_set_class(&s
->s_vfs_rename_mutex
, &type
->s_vfs_rename_key
);
201 mutex_init(&s
->s_dquot
.dqio_mutex
);
202 mutex_init(&s
->s_dquot
.dqonoff_mutex
);
203 init_rwsem(&s
->s_dquot
.dqptr_sem
);
204 s
->s_maxbytes
= MAX_NON_LFS
;
205 s
->s_op
= &default_op
;
206 s
->s_time_gran
= 1000000000;
207 s
->cleancache_poolid
= -1;
209 s
->s_shrink
.seeks
= DEFAULT_SEEKS
;
210 s
->s_shrink
.shrink
= prune_super
;
211 s
->s_shrink
.batch
= 1024;
217 destroy_sb_writers(s
);
224 * destroy_super - frees a superblock
225 * @s: superblock to free
227 * Frees a superblock.
229 static inline void destroy_super(struct super_block
*s
)
231 destroy_sb_writers(s
);
233 WARN_ON(!list_empty(&s
->s_mounts
));
239 /* Superblock refcounting */
242 * Drop a superblock's refcount. The caller must hold sb_lock.
244 static void __put_super(struct super_block
*sb
)
246 if (!--sb
->s_count
) {
247 list_del_init(&sb
->s_list
);
253 * put_super - drop a temporary reference to superblock
254 * @sb: superblock in question
256 * Drops a temporary reference, frees superblock if there's no
259 static void put_super(struct super_block
*sb
)
263 spin_unlock(&sb_lock
);
268 * deactivate_locked_super - drop an active reference to superblock
269 * @s: superblock to deactivate
271 * Drops an active reference to superblock, converting it into a temprory
272 * one if there is no other active references left. In that case we
273 * tell fs driver to shut it down and drop the temporary reference we
276 * Caller holds exclusive lock on superblock; that lock is released.
278 void deactivate_locked_super(struct super_block
*s
)
280 struct file_system_type
*fs
= s
->s_type
;
281 if (atomic_dec_and_test(&s
->s_active
)) {
282 cleancache_invalidate_fs(s
);
285 /* caches are now gone, we can safely kill the shrinker now */
286 unregister_shrinker(&s
->s_shrink
);
290 up_write(&s
->s_umount
);
294 EXPORT_SYMBOL(deactivate_locked_super
);
297 * deactivate_super - drop an active reference to superblock
298 * @s: superblock to deactivate
300 * Variant of deactivate_locked_super(), except that superblock is *not*
301 * locked by caller. If we are going to drop the final active reference,
302 * lock will be acquired prior to that.
304 void deactivate_super(struct super_block
*s
)
306 if (!atomic_add_unless(&s
->s_active
, -1, 1)) {
307 down_write(&s
->s_umount
);
308 deactivate_locked_super(s
);
312 EXPORT_SYMBOL(deactivate_super
);
315 * grab_super - acquire an active reference
316 * @s: reference we are trying to make active
318 * Tries to acquire an active reference. grab_super() is used when we
319 * had just found a superblock in super_blocks or fs_type->fs_supers
320 * and want to turn it into a full-blown active reference. grab_super()
321 * is called with sb_lock held and drops it. Returns 1 in case of
322 * success, 0 if we had failed (superblock contents was already dead or
323 * dying when grab_super() had been called). Note that this is only
324 * called for superblocks not in rundown mode (== ones still on ->fs_supers
325 * of their type), so increment of ->s_count is OK here.
327 static int grab_super(struct super_block
*s
) __releases(sb_lock
)
330 spin_unlock(&sb_lock
);
331 down_write(&s
->s_umount
);
332 if ((s
->s_flags
& MS_BORN
) && atomic_inc_not_zero(&s
->s_active
)) {
336 up_write(&s
->s_umount
);
342 * grab_super_passive - acquire a passive reference
343 * @sb: reference we are trying to grab
345 * Tries to acquire a passive reference. This is used in places where we
346 * cannot take an active reference but we need to ensure that the
347 * superblock does not go away while we are working on it. It returns
348 * false if a reference was not gained, and returns true with the s_umount
349 * lock held in read mode if a reference is gained. On successful return,
350 * the caller must drop the s_umount lock and the passive reference when
353 bool grab_super_passive(struct super_block
*sb
)
356 if (hlist_unhashed(&sb
->s_instances
)) {
357 spin_unlock(&sb_lock
);
362 spin_unlock(&sb_lock
);
364 if (down_read_trylock(&sb
->s_umount
)) {
365 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
367 up_read(&sb
->s_umount
);
375 * generic_shutdown_super - common helper for ->kill_sb()
376 * @sb: superblock to kill
378 * generic_shutdown_super() does all fs-independent work on superblock
379 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
380 * that need destruction out of superblock, call generic_shutdown_super()
381 * and release aforementioned objects. Note: dentries and inodes _are_
382 * taken care of and do not need specific handling.
384 * Upon calling this function, the filesystem may no longer alter or
385 * rearrange the set of dentries belonging to this super_block, nor may it
386 * change the attachments of dentries to inodes.
388 void generic_shutdown_super(struct super_block
*sb
)
390 const struct super_operations
*sop
= sb
->s_op
;
393 shrink_dcache_for_umount(sb
);
395 sb
->s_flags
&= ~MS_ACTIVE
;
397 fsnotify_unmount_inodes(&sb
->s_inodes
);
404 if (!list_empty(&sb
->s_inodes
)) {
405 printk("VFS: Busy inodes after unmount of %s. "
406 "Self-destruct in 5 seconds. Have a nice day...\n",
411 /* should be initialized for __put_super_and_need_restart() */
412 hlist_del_init(&sb
->s_instances
);
413 spin_unlock(&sb_lock
);
414 up_write(&sb
->s_umount
);
417 EXPORT_SYMBOL(generic_shutdown_super
);
420 * sget - find or create a superblock
421 * @type: filesystem type superblock should belong to
422 * @test: comparison callback
423 * @set: setup callback
424 * @flags: mount flags
425 * @data: argument to each of them
427 struct super_block
*sget(struct file_system_type
*type
,
428 int (*test
)(struct super_block
*,void *),
429 int (*set
)(struct super_block
*,void *),
433 struct super_block
*s
= NULL
;
434 struct super_block
*old
;
440 hlist_for_each_entry(old
, &type
->fs_supers
, s_instances
) {
441 if (!test(old
, data
))
443 if (!grab_super(old
))
446 up_write(&s
->s_umount
);
454 spin_unlock(&sb_lock
);
455 s
= alloc_super(type
, flags
);
457 return ERR_PTR(-ENOMEM
);
463 spin_unlock(&sb_lock
);
464 up_write(&s
->s_umount
);
469 strlcpy(s
->s_id
, type
->name
, sizeof(s
->s_id
));
470 list_add_tail(&s
->s_list
, &super_blocks
);
471 hlist_add_head(&s
->s_instances
, &type
->fs_supers
);
472 spin_unlock(&sb_lock
);
473 get_filesystem(type
);
474 register_shrinker(&s
->s_shrink
);
480 void drop_super(struct super_block
*sb
)
482 up_read(&sb
->s_umount
);
486 EXPORT_SYMBOL(drop_super
);
489 * iterate_supers - call function for all active superblocks
490 * @f: function to call
491 * @arg: argument to pass to it
493 * Scans the superblock list and calls given function, passing it
494 * locked superblock and given argument.
496 void iterate_supers(void (*f
)(struct super_block
*, void *), void *arg
)
498 struct super_block
*sb
, *p
= NULL
;
501 list_for_each_entry(sb
, &super_blocks
, s_list
) {
502 if (hlist_unhashed(&sb
->s_instances
))
505 spin_unlock(&sb_lock
);
507 down_read(&sb
->s_umount
);
508 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
510 up_read(&sb
->s_umount
);
519 spin_unlock(&sb_lock
);
523 * iterate_supers_type - call function for superblocks of given type
525 * @f: function to call
526 * @arg: argument to pass to it
528 * Scans the superblock list and calls given function, passing it
529 * locked superblock and given argument.
531 void iterate_supers_type(struct file_system_type
*type
,
532 void (*f
)(struct super_block
*, void *), void *arg
)
534 struct super_block
*sb
, *p
= NULL
;
537 hlist_for_each_entry(sb
, &type
->fs_supers
, s_instances
) {
539 spin_unlock(&sb_lock
);
541 down_read(&sb
->s_umount
);
542 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
544 up_read(&sb
->s_umount
);
553 spin_unlock(&sb_lock
);
556 EXPORT_SYMBOL(iterate_supers_type
);
559 * get_super - get the superblock of a device
560 * @bdev: device to get the superblock for
562 * Scans the superblock list and finds the superblock of the file system
563 * mounted on the device given. %NULL is returned if no match is found.
566 struct super_block
*get_super(struct block_device
*bdev
)
568 struct super_block
*sb
;
575 list_for_each_entry(sb
, &super_blocks
, s_list
) {
576 if (hlist_unhashed(&sb
->s_instances
))
578 if (sb
->s_bdev
== bdev
) {
580 spin_unlock(&sb_lock
);
581 down_read(&sb
->s_umount
);
583 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
585 up_read(&sb
->s_umount
);
586 /* nope, got unmounted */
592 spin_unlock(&sb_lock
);
596 EXPORT_SYMBOL(get_super
);
599 * get_super_thawed - get thawed superblock of a device
600 * @bdev: device to get the superblock for
602 * Scans the superblock list and finds the superblock of the file system
603 * mounted on the device. The superblock is returned once it is thawed
604 * (or immediately if it was not frozen). %NULL is returned if no match
607 struct super_block
*get_super_thawed(struct block_device
*bdev
)
610 struct super_block
*s
= get_super(bdev
);
611 if (!s
|| s
->s_writers
.frozen
== SB_UNFROZEN
)
613 up_read(&s
->s_umount
);
614 wait_event(s
->s_writers
.wait_unfrozen
,
615 s
->s_writers
.frozen
== SB_UNFROZEN
);
619 EXPORT_SYMBOL(get_super_thawed
);
622 * get_active_super - get an active reference to the superblock of a device
623 * @bdev: device to get the superblock for
625 * Scans the superblock list and finds the superblock of the file system
626 * mounted on the device given. Returns the superblock with an active
627 * reference or %NULL if none was found.
629 struct super_block
*get_active_super(struct block_device
*bdev
)
631 struct super_block
*sb
;
638 list_for_each_entry(sb
, &super_blocks
, s_list
) {
639 if (hlist_unhashed(&sb
->s_instances
))
641 if (sb
->s_bdev
== bdev
) {
644 up_write(&sb
->s_umount
);
648 spin_unlock(&sb_lock
);
652 struct super_block
*user_get_super(dev_t dev
)
654 struct super_block
*sb
;
658 list_for_each_entry(sb
, &super_blocks
, s_list
) {
659 if (hlist_unhashed(&sb
->s_instances
))
661 if (sb
->s_dev
== dev
) {
663 spin_unlock(&sb_lock
);
664 down_read(&sb
->s_umount
);
666 if (sb
->s_root
&& (sb
->s_flags
& MS_BORN
))
668 up_read(&sb
->s_umount
);
669 /* nope, got unmounted */
675 spin_unlock(&sb_lock
);
680 * do_remount_sb - asks filesystem to change mount options.
681 * @sb: superblock in question
682 * @flags: numeric part of options
683 * @data: the rest of options
684 * @force: whether or not to force the change
686 * Alters the mount options of a mounted file system.
688 int do_remount_sb(struct super_block
*sb
, int flags
, void *data
, int force
)
693 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
)
697 if (!(flags
& MS_RDONLY
) && bdev_read_only(sb
->s_bdev
))
701 if (flags
& MS_RDONLY
)
703 shrink_dcache_sb(sb
);
706 remount_ro
= (flags
& MS_RDONLY
) && !(sb
->s_flags
& MS_RDONLY
);
708 /* If we are remounting RDONLY and current sb is read/write,
709 make sure there are no rw files opened */
712 sb
->s_readonly_remount
= 1;
715 retval
= sb_prepare_remount_readonly(sb
);
721 if (sb
->s_op
->remount_fs
) {
722 retval
= sb
->s_op
->remount_fs(sb
, &flags
, data
);
725 goto cancel_readonly
;
726 /* If forced remount, go ahead despite any errors */
727 WARN(1, "forced remount of a %s fs returned %i\n",
728 sb
->s_type
->name
, retval
);
731 sb
->s_flags
= (sb
->s_flags
& ~MS_RMT_MASK
) | (flags
& MS_RMT_MASK
);
732 /* Needs to be ordered wrt mnt_is_readonly() */
734 sb
->s_readonly_remount
= 0;
737 * Some filesystems modify their metadata via some other path than the
738 * bdev buffer cache (eg. use a private mapping, or directories in
739 * pagecache, etc). Also file data modifications go via their own
740 * mappings. So If we try to mount readonly then copy the filesystem
741 * from bdev, we could get stale data, so invalidate it to give a best
742 * effort at coherency.
744 if (remount_ro
&& sb
->s_bdev
)
745 invalidate_bdev(sb
->s_bdev
);
749 sb
->s_readonly_remount
= 0;
753 static void do_emergency_remount(struct work_struct
*work
)
755 struct super_block
*sb
, *p
= NULL
;
758 list_for_each_entry(sb
, &super_blocks
, s_list
) {
759 if (hlist_unhashed(&sb
->s_instances
))
762 spin_unlock(&sb_lock
);
763 down_write(&sb
->s_umount
);
764 if (sb
->s_root
&& sb
->s_bdev
&& (sb
->s_flags
& MS_BORN
) &&
765 !(sb
->s_flags
& MS_RDONLY
)) {
767 * What lock protects sb->s_flags??
769 do_remount_sb(sb
, MS_RDONLY
, NULL
, 1);
771 up_write(&sb
->s_umount
);
779 spin_unlock(&sb_lock
);
781 printk("Emergency Remount complete\n");
784 void emergency_remount(void)
786 struct work_struct
*work
;
788 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
790 INIT_WORK(work
, do_emergency_remount
);
796 * Unnamed block devices are dummy devices used by virtual
797 * filesystems which don't use real block-devices. -- jrs
800 static DEFINE_IDA(unnamed_dev_ida
);
801 static DEFINE_SPINLOCK(unnamed_dev_lock
);/* protects the above */
802 static int unnamed_dev_start
= 0; /* don't bother trying below it */
804 int get_anon_bdev(dev_t
*p
)
810 if (ida_pre_get(&unnamed_dev_ida
, GFP_ATOMIC
) == 0)
812 spin_lock(&unnamed_dev_lock
);
813 error
= ida_get_new_above(&unnamed_dev_ida
, unnamed_dev_start
, &dev
);
815 unnamed_dev_start
= dev
+ 1;
816 spin_unlock(&unnamed_dev_lock
);
817 if (error
== -EAGAIN
)
818 /* We raced and lost with another CPU. */
823 if (dev
== (1 << MINORBITS
)) {
824 spin_lock(&unnamed_dev_lock
);
825 ida_remove(&unnamed_dev_ida
, dev
);
826 if (unnamed_dev_start
> dev
)
827 unnamed_dev_start
= dev
;
828 spin_unlock(&unnamed_dev_lock
);
831 *p
= MKDEV(0, dev
& MINORMASK
);
834 EXPORT_SYMBOL(get_anon_bdev
);
836 void free_anon_bdev(dev_t dev
)
838 int slot
= MINOR(dev
);
839 spin_lock(&unnamed_dev_lock
);
840 ida_remove(&unnamed_dev_ida
, slot
);
841 if (slot
< unnamed_dev_start
)
842 unnamed_dev_start
= slot
;
843 spin_unlock(&unnamed_dev_lock
);
845 EXPORT_SYMBOL(free_anon_bdev
);
847 int set_anon_super(struct super_block
*s
, void *data
)
849 int error
= get_anon_bdev(&s
->s_dev
);
851 s
->s_bdi
= &noop_backing_dev_info
;
855 EXPORT_SYMBOL(set_anon_super
);
857 void kill_anon_super(struct super_block
*sb
)
859 dev_t dev
= sb
->s_dev
;
860 generic_shutdown_super(sb
);
864 EXPORT_SYMBOL(kill_anon_super
);
866 void kill_litter_super(struct super_block
*sb
)
869 d_genocide(sb
->s_root
);
873 EXPORT_SYMBOL(kill_litter_super
);
875 static int ns_test_super(struct super_block
*sb
, void *data
)
877 return sb
->s_fs_info
== data
;
880 static int ns_set_super(struct super_block
*sb
, void *data
)
882 sb
->s_fs_info
= data
;
883 return set_anon_super(sb
, NULL
);
886 struct dentry
*mount_ns(struct file_system_type
*fs_type
, int flags
,
887 void *data
, int (*fill_super
)(struct super_block
*, void *, int))
889 struct super_block
*sb
;
891 sb
= sget(fs_type
, ns_test_super
, ns_set_super
, flags
, data
);
897 err
= fill_super(sb
, data
, flags
& MS_SILENT
? 1 : 0);
899 deactivate_locked_super(sb
);
903 sb
->s_flags
|= MS_ACTIVE
;
906 return dget(sb
->s_root
);
909 EXPORT_SYMBOL(mount_ns
);
912 static int set_bdev_super(struct super_block
*s
, void *data
)
915 s
->s_dev
= s
->s_bdev
->bd_dev
;
918 * We set the bdi here to the queue backing, file systems can
919 * overwrite this in ->fill_super()
921 s
->s_bdi
= &bdev_get_queue(s
->s_bdev
)->backing_dev_info
;
925 static int test_bdev_super(struct super_block
*s
, void *data
)
927 return (void *)s
->s_bdev
== data
;
930 struct dentry
*mount_bdev(struct file_system_type
*fs_type
,
931 int flags
, const char *dev_name
, void *data
,
932 int (*fill_super
)(struct super_block
*, void *, int))
934 struct block_device
*bdev
;
935 struct super_block
*s
;
936 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
939 if (!(flags
& MS_RDONLY
))
942 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
944 return ERR_CAST(bdev
);
947 * once the super is inserted into the list by sget, s_umount
948 * will protect the lockfs code from trying to start a snapshot
949 * while we are mounting
951 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
952 if (bdev
->bd_fsfreeze_count
> 0) {
953 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
957 s
= sget(fs_type
, test_bdev_super
, set_bdev_super
, flags
| MS_NOSEC
,
959 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
964 if ((flags
^ s
->s_flags
) & MS_RDONLY
) {
965 deactivate_locked_super(s
);
971 * s_umount nests inside bd_mutex during
972 * __invalidate_device(). blkdev_put() acquires
973 * bd_mutex and can't be called under s_umount. Drop
974 * s_umount temporarily. This is safe as we're
975 * holding an active reference.
977 up_write(&s
->s_umount
);
978 blkdev_put(bdev
, mode
);
979 down_write(&s
->s_umount
);
981 char b
[BDEVNAME_SIZE
];
984 strlcpy(s
->s_id
, bdevname(bdev
, b
), sizeof(s
->s_id
));
985 sb_set_blocksize(s
, block_size(bdev
));
986 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
988 deactivate_locked_super(s
);
992 s
->s_flags
|= MS_ACTIVE
;
996 return dget(s
->s_root
);
1001 blkdev_put(bdev
, mode
);
1003 return ERR_PTR(error
);
1005 EXPORT_SYMBOL(mount_bdev
);
1007 void kill_block_super(struct super_block
*sb
)
1009 struct block_device
*bdev
= sb
->s_bdev
;
1010 fmode_t mode
= sb
->s_mode
;
1012 bdev
->bd_super
= NULL
;
1013 generic_shutdown_super(sb
);
1014 sync_blockdev(bdev
);
1015 WARN_ON_ONCE(!(mode
& FMODE_EXCL
));
1016 blkdev_put(bdev
, mode
| FMODE_EXCL
);
1019 EXPORT_SYMBOL(kill_block_super
);
1022 struct dentry
*mount_nodev(struct file_system_type
*fs_type
,
1023 int flags
, void *data
,
1024 int (*fill_super
)(struct super_block
*, void *, int))
1027 struct super_block
*s
= sget(fs_type
, NULL
, set_anon_super
, flags
, NULL
);
1032 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1034 deactivate_locked_super(s
);
1035 return ERR_PTR(error
);
1037 s
->s_flags
|= MS_ACTIVE
;
1038 return dget(s
->s_root
);
1040 EXPORT_SYMBOL(mount_nodev
);
1042 static int compare_single(struct super_block
*s
, void *p
)
1047 struct dentry
*mount_single(struct file_system_type
*fs_type
,
1048 int flags
, void *data
,
1049 int (*fill_super
)(struct super_block
*, void *, int))
1051 struct super_block
*s
;
1054 s
= sget(fs_type
, compare_single
, set_anon_super
, flags
, NULL
);
1058 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1060 deactivate_locked_super(s
);
1061 return ERR_PTR(error
);
1063 s
->s_flags
|= MS_ACTIVE
;
1065 do_remount_sb(s
, flags
, data
, 0);
1067 return dget(s
->s_root
);
1069 EXPORT_SYMBOL(mount_single
);
1072 mount_fs(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
1074 struct dentry
*root
;
1075 struct super_block
*sb
;
1076 char *secdata
= NULL
;
1077 int error
= -ENOMEM
;
1079 if (data
&& !(type
->fs_flags
& FS_BINARY_MOUNTDATA
)) {
1080 secdata
= alloc_secdata();
1084 error
= security_sb_copy_data(data
, secdata
);
1086 goto out_free_secdata
;
1089 root
= type
->mount(type
, flags
, name
, data
);
1091 error
= PTR_ERR(root
);
1092 goto out_free_secdata
;
1096 WARN_ON(!sb
->s_bdi
);
1097 WARN_ON(sb
->s_bdi
== &default_backing_dev_info
);
1098 sb
->s_flags
|= MS_BORN
;
1100 error
= security_sb_kern_mount(sb
, flags
, secdata
);
1105 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1106 * but s_maxbytes was an unsigned long long for many releases. Throw
1107 * this warning for a little while to try and catch filesystems that
1108 * violate this rule.
1110 WARN((sb
->s_maxbytes
< 0), "%s set sb->s_maxbytes to "
1111 "negative value (%lld)\n", type
->name
, sb
->s_maxbytes
);
1113 up_write(&sb
->s_umount
);
1114 free_secdata(secdata
);
1118 deactivate_locked_super(sb
);
1120 free_secdata(secdata
);
1122 return ERR_PTR(error
);
1126 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1129 void __sb_end_write(struct super_block
*sb
, int level
)
1131 percpu_counter_dec(&sb
->s_writers
.counter
[level
-1]);
1133 * Make sure s_writers are updated before we wake up waiters in
1137 if (waitqueue_active(&sb
->s_writers
.wait
))
1138 wake_up(&sb
->s_writers
.wait
);
1140 rwsem_release(&sb
->s_writers
.lock_map
[level
-1], 1, _RET_IP_
);
1143 EXPORT_SYMBOL(__sb_end_write
);
1145 #ifdef CONFIG_LOCKDEP
1147 * We want lockdep to tell us about possible deadlocks with freezing but
1148 * it's it bit tricky to properly instrument it. Getting a freeze protection
1149 * works as getting a read lock but there are subtle problems. XFS for example
1150 * gets freeze protection on internal level twice in some cases, which is OK
1151 * only because we already hold a freeze protection also on higher level. Due
1152 * to these cases we have to tell lockdep we are doing trylock when we
1153 * already hold a freeze protection for a higher freeze level.
1155 static void acquire_freeze_lock(struct super_block
*sb
, int level
, bool trylock
,
1161 for (i
= 0; i
< level
- 1; i
++)
1162 if (lock_is_held(&sb
->s_writers
.lock_map
[i
])) {
1168 rwsem_acquire_read(&sb
->s_writers
.lock_map
[level
-1], 0, trylock
, ip
);
1174 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1177 int __sb_start_write(struct super_block
*sb
, int level
, bool wait
)
1180 if (unlikely(sb
->s_writers
.frozen
>= level
)) {
1183 wait_event(sb
->s_writers
.wait_unfrozen
,
1184 sb
->s_writers
.frozen
< level
);
1187 #ifdef CONFIG_LOCKDEP
1188 acquire_freeze_lock(sb
, level
, !wait
, _RET_IP_
);
1190 percpu_counter_inc(&sb
->s_writers
.counter
[level
-1]);
1192 * Make sure counter is updated before we check for frozen.
1193 * freeze_super() first sets frozen and then checks the counter.
1196 if (unlikely(sb
->s_writers
.frozen
>= level
)) {
1197 __sb_end_write(sb
, level
);
1202 EXPORT_SYMBOL(__sb_start_write
);
1205 * sb_wait_write - wait until all writers to given file system finish
1206 * @sb: the super for which we wait
1207 * @level: type of writers we wait for (normal vs page fault)
1209 * This function waits until there are no writers of given type to given file
1210 * system. Caller of this function should make sure there can be no new writers
1211 * of type @level before calling this function. Otherwise this function can
1214 static void sb_wait_write(struct super_block
*sb
, int level
)
1219 * We just cycle-through lockdep here so that it does not complain
1220 * about returning with lock to userspace
1222 rwsem_acquire(&sb
->s_writers
.lock_map
[level
-1], 0, 0, _THIS_IP_
);
1223 rwsem_release(&sb
->s_writers
.lock_map
[level
-1], 1, _THIS_IP_
);
1229 * We use a barrier in prepare_to_wait() to separate setting
1230 * of frozen and checking of the counter
1232 prepare_to_wait(&sb
->s_writers
.wait
, &wait
,
1233 TASK_UNINTERRUPTIBLE
);
1235 writers
= percpu_counter_sum(&sb
->s_writers
.counter
[level
-1]);
1239 finish_wait(&sb
->s_writers
.wait
, &wait
);
1244 * freeze_super - lock the filesystem and force it into a consistent state
1245 * @sb: the super to lock
1247 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1248 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1251 * During this function, sb->s_writers.frozen goes through these values:
1253 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1255 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1256 * writes should be blocked, though page faults are still allowed. We wait for
1257 * all writes to complete and then proceed to the next stage.
1259 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1260 * but internal fs threads can still modify the filesystem (although they
1261 * should not dirty new pages or inodes), writeback can run etc. After waiting
1262 * for all running page faults we sync the filesystem which will clean all
1263 * dirty pages and inodes (no new dirty pages or inodes can be created when
1266 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1267 * modification are blocked (e.g. XFS preallocation truncation on inode
1268 * reclaim). This is usually implemented by blocking new transactions for
1269 * filesystems that have them and need this additional guard. After all
1270 * internal writers are finished we call ->freeze_fs() to finish filesystem
1271 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1272 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1274 * sb->s_writers.frozen is protected by sb->s_umount.
1276 int freeze_super(struct super_block
*sb
)
1280 atomic_inc(&sb
->s_active
);
1281 down_write(&sb
->s_umount
);
1282 if (sb
->s_writers
.frozen
!= SB_UNFROZEN
) {
1283 deactivate_locked_super(sb
);
1287 if (!(sb
->s_flags
& MS_BORN
)) {
1288 up_write(&sb
->s_umount
);
1289 return 0; /* sic - it's "nothing to do" */
1292 if (sb
->s_flags
& MS_RDONLY
) {
1293 /* Nothing to do really... */
1294 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1295 up_write(&sb
->s_umount
);
1299 /* From now on, no new normal writers can start */
1300 sb
->s_writers
.frozen
= SB_FREEZE_WRITE
;
1303 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1304 up_write(&sb
->s_umount
);
1306 sb_wait_write(sb
, SB_FREEZE_WRITE
);
1308 /* Now we go and block page faults... */
1309 down_write(&sb
->s_umount
);
1310 sb
->s_writers
.frozen
= SB_FREEZE_PAGEFAULT
;
1313 sb_wait_write(sb
, SB_FREEZE_PAGEFAULT
);
1315 /* All writers are done so after syncing there won't be dirty data */
1316 sync_filesystem(sb
);
1318 /* Now wait for internal filesystem counter */
1319 sb
->s_writers
.frozen
= SB_FREEZE_FS
;
1321 sb_wait_write(sb
, SB_FREEZE_FS
);
1323 if (sb
->s_op
->freeze_fs
) {
1324 ret
= sb
->s_op
->freeze_fs(sb
);
1327 "VFS:Filesystem freeze failed\n");
1328 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1330 wake_up(&sb
->s_writers
.wait_unfrozen
);
1331 deactivate_locked_super(sb
);
1336 * For debugging purposes so that fs can warn if it sees write activity
1337 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1339 sb
->s_writers
.frozen
= SB_FREEZE_COMPLETE
;
1340 up_write(&sb
->s_umount
);
1343 EXPORT_SYMBOL(freeze_super
);
1346 * thaw_super -- unlock filesystem
1347 * @sb: the super to thaw
1349 * Unlocks the filesystem and marks it writeable again after freeze_super().
1351 int thaw_super(struct super_block
*sb
)
1355 down_write(&sb
->s_umount
);
1356 if (sb
->s_writers
.frozen
!= SB_FREEZE_COMPLETE
) {
1357 up_write(&sb
->s_umount
);
1361 if (sb
->s_flags
& MS_RDONLY
)
1364 if (sb
->s_op
->unfreeze_fs
) {
1365 error
= sb
->s_op
->unfreeze_fs(sb
);
1368 "VFS:Filesystem thaw failed\n");
1369 up_write(&sb
->s_umount
);
1375 sb
->s_writers
.frozen
= SB_UNFROZEN
;
1377 wake_up(&sb
->s_writers
.wait_unfrozen
);
1378 deactivate_locked_super(sb
);
1382 EXPORT_SYMBOL(thaw_super
);