ext4: grow the s_flex_groups array as needed when resizing
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / ext4 / super.c
index eb7aa3e4ef05caf136f24e0565a28e6d1e0a1539..b8de488889d6b256d82d18c6f057253d4158e6c9 100644 (file)
@@ -74,7 +74,6 @@ static const char *ext4_decode_error(struct super_block *sb, int errno,
 static int ext4_remount(struct super_block *sb, int *flags, char *data);
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
 static int ext4_unfreeze(struct super_block *sb);
-static void ext4_write_super(struct super_block *sb);
 static int ext4_freeze(struct super_block *sb);
 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
                       const char *dev_name, void *data);
@@ -332,33 +331,17 @@ static void ext4_put_nojournal(handle_t *handle)
  * journal_end calls result in the superblock being marked dirty, so
  * that sync() will call the filesystem's write_super callback if
  * appropriate.
- *
- * To avoid j_barrier hold in userspace when a user calls freeze(),
- * ext4 prevents a new handle from being started by s_frozen, which
- * is in an upper layer.
  */
 handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
 {
        journal_t *journal;
-       handle_t  *handle;
 
        trace_ext4_journal_start(sb, nblocks, _RET_IP_);
        if (sb->s_flags & MS_RDONLY)
                return ERR_PTR(-EROFS);
 
+       WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
        journal = EXT4_SB(sb)->s_journal;
-       handle = ext4_journal_current_handle();
-
-       /*
-        * If a handle has been started, it should be allowed to
-        * finish, otherwise deadlock could happen between freeze
-        * and others(e.g. truncate) due to the restart of the
-        * journal handle if the filesystem is forzen and active
-        * handles are not stopped.
-        */
-       if (!handle)
-               vfs_check_frozen(sb, SB_FREEZE_TRANS);
-
        if (!journal)
                return ext4_get_nojournal();
        /*
@@ -878,7 +861,6 @@ static void ext4_put_super(struct super_block *sb)
        flush_workqueue(sbi->dio_unwritten_wq);
        destroy_workqueue(sbi->dio_unwritten_wq);
 
-       lock_super(sb);
        if (sbi->s_journal) {
                err = jbd2_journal_destroy(sbi->s_journal);
                sbi->s_journal = NULL;
@@ -896,7 +878,7 @@ static void ext4_put_super(struct super_block *sb)
                EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
                es->s_state = cpu_to_le16(sbi->s_mount_state);
        }
-       if (sb->s_dirt || !(sb->s_flags & MS_RDONLY))
+       if (!(sb->s_flags & MS_RDONLY))
                ext4_commit_super(sb, 1);
 
        if (sbi->s_proc) {
@@ -945,7 +927,6 @@ static void ext4_put_super(struct super_block *sb)
         * Now that we are completely done shutting down the
         * superblock, we need to actually destroy the kobject.
         */
-       unlock_super(sb);
        kobject_put(&sbi->s_kobj);
        wait_for_completion(&sbi->s_kobj_unregister);
        if (sbi->s_chksum_driver)
@@ -976,6 +957,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
        ei->i_reserved_meta_blocks = 0;
        ei->i_allocated_meta_blocks = 0;
        ei->i_da_metadata_calc_len = 0;
+       ei->i_da_metadata_calc_last_lblock = 0;
        spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
        ei->i_reserved_quota = 0;
@@ -1137,12 +1119,18 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
 static int ext4_write_info(struct super_block *sb, int type);
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
                         struct path *path);
+static int ext4_quota_on_sysfile(struct super_block *sb, int type,
+                                int format_id);
 static int ext4_quota_off(struct super_block *sb, int type);
+static int ext4_quota_off_sysfile(struct super_block *sb, int type);
 static int ext4_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
                               size_t len, loff_t off);
 static ssize_t ext4_quota_write(struct super_block *sb, int type,
                                const char *data, size_t len, loff_t off);
+static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+                            unsigned int flags);
+static int ext4_enable_quotas(struct super_block *sb);
 
 static const struct dquot_operations ext4_quota_operations = {
        .get_reserved_space = ext4_get_reserved_space,
@@ -1164,6 +1152,16 @@ static const struct quotactl_ops ext4_qctl_operations = {
        .get_dqblk      = dquot_get_dqblk,
        .set_dqblk      = dquot_set_dqblk
 };
+
+static const struct quotactl_ops ext4_qctl_sysfile_operations = {
+       .quota_on_meta  = ext4_quota_on_sysfile,
+       .quota_off      = ext4_quota_off_sysfile,
+       .quota_sync     = dquot_quota_sync,
+       .get_info       = dquot_get_dqinfo,
+       .set_info       = dquot_set_dqinfo,
+       .get_dqblk      = dquot_get_dqblk,
+       .set_dqblk      = dquot_set_dqblk
+};
 #endif
 
 static const struct super_operations ext4_sops = {
@@ -1194,7 +1192,6 @@ static const struct super_operations ext4_nojournal_sops = {
        .dirty_inode    = ext4_dirty_inode,
        .drop_inode     = ext4_drop_inode,
        .evict_inode    = ext4_evict_inode,
-       .write_super    = ext4_write_super,
        .put_super      = ext4_put_super,
        .statfs         = ext4_statfs,
        .remount_fs     = ext4_remount,
@@ -1231,6 +1228,7 @@ enum {
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
        Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
+       Opt_max_dir_size_kb,
 };
 
 static const match_table_t tokens = {
@@ -1304,6 +1302,7 @@ static const match_table_t tokens = {
        {Opt_init_itable, "init_itable=%u"},
        {Opt_init_itable, "init_itable"},
        {Opt_noinit_itable, "noinit_itable"},
+       {Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
        {Opt_removed, "check=none"},    /* mount option from ext2/3 */
        {Opt_removed, "nocheck"},       /* mount option from ext2/3 */
        {Opt_removed, "reservation"},   /* mount option from ext2/3 */
@@ -1484,6 +1483,7 @@ static const struct mount_opts {
        {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
        {Opt_jqfmt_vfsv0, QFMT_VFS_V0, MOPT_QFMT},
        {Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
+       {Opt_max_dir_size_kb, 0, MOPT_GTE0},
        {Opt_err, 0, 0}
 };
 
@@ -1599,6 +1599,8 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                        if (!args->from)
                                arg = EXT4_DEF_LI_WAIT_MULT;
                        sbi->s_li_wait_mult = arg;
+               } else if (token == Opt_max_dir_size_kb) {
+                       sbi->s_max_dir_size_kb = arg;
                } else if (token == Opt_stripe) {
                        sbi->s_stripe = arg;
                } else if (m->flags & MOPT_DATAJ) {
@@ -1671,7 +1673,7 @@ static int parse_options(char *options, struct super_block *sb,
                 * Initialize args struct so we know whether arg was
                 * found; some options take optional arguments.
                 */
-               args[0].to = args[0].from = 0;
+               args[0].to = args[0].from = NULL;
                token = match_token(p, tokens, args);
                if (handle_mount_opt(sb, p, token, args, journal_devnum,
                                     journal_ioprio, is_remount) < 0)
@@ -1830,6 +1832,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
        if (nodefs || (test_opt(sb, INIT_INODE_TABLE) &&
                       (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)))
                SEQ_OPTS_PRINT("init_itable=%u", sbi->s_li_wait_mult);
+       if (nodefs || sbi->s_max_dir_size_kb)
+               SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
 
        ext4_show_quota_options(seq, sb);
        return 0;
@@ -1921,15 +1925,45 @@ done:
        return res;
 }
 
+int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct flex_groups *new_groups;
+       int size;
+
+       if (!sbi->s_log_groups_per_flex)
+               return 0;
+
+       size = ext4_flex_group(sbi, ngroup - 1) + 1;
+       if (size <= sbi->s_flex_groups_allocated)
+               return 0;
+
+       size = roundup_pow_of_two(size * sizeof(struct flex_groups));
+       new_groups = ext4_kvzalloc(size, GFP_KERNEL);
+       if (!new_groups) {
+               ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
+                        size / (int) sizeof(struct flex_groups));
+               return -ENOMEM;
+       }
+
+       if (sbi->s_flex_groups) {
+               memcpy(new_groups, sbi->s_flex_groups,
+                      (sbi->s_flex_groups_allocated *
+                       sizeof(struct flex_groups)));
+               ext4_kvfree(sbi->s_flex_groups);
+       }
+       sbi->s_flex_groups = new_groups;
+       sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
+       return 0;
+}
+
 static int ext4_fill_flex_info(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_group_desc *gdp = NULL;
-       ext4_group_t flex_group_count;
        ext4_group_t flex_group;
        unsigned int groups_per_flex = 0;
-       size_t size;
-       int i;
+       int i, err;
 
        sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
        if (sbi->s_log_groups_per_flex < 1 || sbi->s_log_groups_per_flex > 31) {
@@ -1938,17 +1972,9 @@ static int ext4_fill_flex_info(struct super_block *sb)
        }
        groups_per_flex = 1 << sbi->s_log_groups_per_flex;
 
-       /* We allocate both existing and potentially added groups */
-       flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
-                       ((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
-                             EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
-       size = flex_group_count * sizeof(struct flex_groups);
-       sbi->s_flex_groups = ext4_kvzalloc(size, GFP_KERNEL);
-       if (sbi->s_flex_groups == NULL) {
-               ext4_msg(sb, KERN_ERR, "not enough memory for %u flex groups",
-                        flex_group_count);
+       err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
+       if (err)
                goto failed;
-       }
 
        for (i = 0; i < sbi->s_groups_count; i++) {
                gdp = ext4_get_group_desc(sb, i, NULL);
@@ -2535,6 +2561,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
 EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
 EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
 EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
+EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
 
 static struct attribute *ext4_attrs[] = {
@@ -2550,6 +2577,7 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(mb_stream_req),
        ATTR_LIST(mb_group_prealloc),
        ATTR_LIST(max_writeback_mb_bump),
+       ATTR_LIST(extent_max_zeroout_kb),
        ATTR_LIST(trigger_fs_error),
        NULL,
 };
@@ -2661,6 +2689,16 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
                         "extents feature\n");
                return 0;
        }
+
+#ifndef CONFIG_QUOTA
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+           !readonly) {
+               ext4_msg(sb, KERN_ERR,
+                        "Filesystem with quota feature cannot be mounted RDWR "
+                        "without CONFIG_QUOTA");
+               return 0;
+       }
+#endif  /* CONFIG_QUOTA */
        return 1;
 }
 
@@ -2723,6 +2761,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
        sb = elr->lr_super;
        ngroups = EXT4_SB(sb)->s_groups_count;
 
+       sb_start_write(sb);
        for (group = elr->lr_next_group; group < ngroups; group++) {
                gdp = ext4_get_group_desc(sb, group, NULL);
                if (!gdp) {
@@ -2749,6 +2788,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
                elr->lr_next_sched = jiffies + elr->lr_timeout;
                elr->lr_next_group = group + 1;
        }
+       sb_end_write(sb);
 
        return ret;
 }
@@ -3085,6 +3125,118 @@ static int set_journal_csum_feature_set(struct super_block *sb)
        return ret;
 }
 
+/*
+ * Note: calculating the overhead so we can be compatible with
+ * historical BSD practice is quite difficult in the face of
+ * clusters/bigalloc.  This is because multiple metadata blocks from
+ * different block group can end up in the same allocation cluster.
+ * Calculating the exact overhead in the face of clustered allocation
+ * requires either O(all block bitmaps) in memory or O(number of block
+ * groups**2) in time.  We will still calculate the superblock for
+ * older file systems --- and if we come across with a bigalloc file
+ * system with zero in s_overhead_clusters the estimate will be close to
+ * correct especially for very large cluster sizes --- but for newer
+ * file systems, it's better to calculate this figure once at mkfs
+ * time, and store it in the superblock.  If the superblock value is
+ * present (even for non-bigalloc file systems), we will use it.
+ */
+static int count_overhead(struct super_block *sb, ext4_group_t grp,
+                         char *buf)
+{
+       struct ext4_sb_info     *sbi = EXT4_SB(sb);
+       struct ext4_group_desc  *gdp;
+       ext4_fsblk_t            first_block, last_block, b;
+       ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
+       int                     s, j, count = 0;
+
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+               return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+                       sbi->s_itb_per_group + 2);
+
+       first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+               (grp * EXT4_BLOCKS_PER_GROUP(sb));
+       last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+       for (i = 0; i < ngroups; i++) {
+               gdp = ext4_get_group_desc(sb, i, NULL);
+               b = ext4_block_bitmap(sb, gdp);
+               if (b >= first_block && b <= last_block) {
+                       ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+                       count++;
+               }
+               b = ext4_inode_bitmap(sb, gdp);
+               if (b >= first_block && b <= last_block) {
+                       ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+                       count++;
+               }
+               b = ext4_inode_table(sb, gdp);
+               if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
+                       for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
+                               int c = EXT4_B2C(sbi, b - first_block);
+                               ext4_set_bit(c, buf);
+                               count++;
+                       }
+               if (i != grp)
+                       continue;
+               s = 0;
+               if (ext4_bg_has_super(sb, grp)) {
+                       ext4_set_bit(s++, buf);
+                       count++;
+               }
+               for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+                       ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+                       count++;
+               }
+       }
+       if (!count)
+               return 0;
+       return EXT4_CLUSTERS_PER_GROUP(sb) -
+               ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
+}
+
+/*
+ * Compute the overhead and stash it in sbi->s_overhead
+ */
+int ext4_calculate_overhead(struct super_block *sb)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+       ext4_fsblk_t overhead = 0;
+       char *buf = (char *) get_zeroed_page(GFP_KERNEL);
+
+       memset(buf, 0, PAGE_SIZE);
+       if (!buf)
+               return -ENOMEM;
+
+       /*
+        * Compute the overhead (FS structures).  This is constant
+        * for a given filesystem unless the number of block groups
+        * changes so we cache the previous value until it does.
+        */
+
+       /*
+        * All of the blocks before first_data_block are overhead
+        */
+       overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+
+       /*
+        * Add the overhead found in each block group
+        */
+       for (i = 0; i < ngroups; i++) {
+               int blks;
+
+               blks = count_overhead(sb, i, buf);
+               overhead += blks;
+               if (blks)
+                       memset(buf, 0, PAGE_SIZE);
+               cond_resched();
+       }
+       sbi->s_overhead = overhead;
+       smp_wmb();
+       free_page((unsigned long) buf);
+       return 0;
+}
+
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 {
        char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3626,6 +3778,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_max_writeback_mb_bump = 128;
+       sbi->s_extent_max_zeroout_kb = 32;
 
        /*
         * set up enough so that it can read an inode
@@ -3640,6 +3793,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 #ifdef CONFIG_QUOTA
        sb->s_qcop = &ext4_qctl_operations;
        sb->dq_op = &ext4_quota_operations;
+
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+               /* Use qctl operations for hidden quota files. */
+               sb->s_qcop = &ext4_qctl_sysfile_operations;
+       }
 #endif
        memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
 
@@ -3734,6 +3892,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
 
 no_journal:
+       /*
+        * Get the # of file system overhead blocks from the
+        * superblock if present.
+        */
+       if (es->s_overhead_clusters)
+               sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+       else {
+               ret = ext4_calculate_overhead(sb);
+               if (ret)
+                       goto failed_mount_wq;
+       }
+
        /*
         * The maximum number of concurrent works can be high and
         * concurrency isn't really necessary.  Limit it to 1.
@@ -3840,6 +4010,16 @@ no_journal:
        } else
                descr = "out journal";
 
+#ifdef CONFIG_QUOTA
+       /* Enable quota usage during mount. */
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+           !(sb->s_flags & MS_RDONLY)) {
+               ret = ext4_enable_quotas(sb);
+               if (ret)
+                       goto failed_mount7;
+       }
+#endif  /* CONFIG_QUOTA */
+
        ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
                 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
                 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
@@ -4203,7 +4383,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        es->s_free_inodes_count =
                cpu_to_le32(percpu_counter_sum_positive(
                                &EXT4_SB(sb)->s_freeinodes_counter));
-       sb->s_dirt = 0;
        BUFFER_TRACE(sbh, "marking dirty");
        ext4_superblock_csum_set(sb, es);
        mark_buffer_dirty(sbh);
@@ -4286,6 +4465,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
                ext4_commit_super(sb, 1);
 
                jbd2_journal_clear_err(journal);
+               jbd2_journal_update_sb_errno(journal);
        }
 }
 
@@ -4302,21 +4482,12 @@ int ext4_force_commit(struct super_block *sb)
                return 0;
 
        journal = EXT4_SB(sb)->s_journal;
-       if (journal) {
-               vfs_check_frozen(sb, SB_FREEZE_TRANS);
+       if (journal)
                ret = ext4_journal_force_commit(journal);
-       }
 
        return ret;
 }
 
-static void ext4_write_super(struct super_block *sb)
-{
-       lock_super(sb);
-       ext4_commit_super(sb, 1);
-       unlock_super(sb);
-}
-
 static int ext4_sync_fs(struct super_block *sb, int wait)
 {
        int ret = 0;
@@ -4325,6 +4496,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
 
        trace_ext4_sync_fs(sb, wait);
        flush_workqueue(sbi->dio_unwritten_wq);
+       /*
+        * Writeback quota in non-journalled quota case - journalled quota has
+        * no dirty dquots
+        */
+       dquot_writeback_dquots(sb, -1);
        if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
                if (wait)
                        jbd2_log_wait_commit(sbi->s_journal, target);
@@ -4337,9 +4513,8 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
  * gives us a chance to flush the journal completely and mark the fs clean.
  *
  * Note that only this function cannot bring a filesystem to be in a clean
- * state independently, because ext4 prevents a new handle from being started
- * by @sb->s_frozen, which stays in an upper layer.  It thus needs help from
- * the upper layer.
+ * state independently. It relies on upper layer to stop all data & metadata
+ * modifications.
  */
 static int ext4_freeze(struct super_block *sb)
 {
@@ -4366,7 +4541,7 @@ static int ext4_freeze(struct super_block *sb)
        EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
        error = ext4_commit_super(sb, 1);
 out:
-       /* we rely on s_frozen to stop further updates */
+       /* we rely on upper layer to stop further updates */
        jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
        return error;
 }
@@ -4380,11 +4555,9 @@ static int ext4_unfreeze(struct super_block *sb)
        if (sb->s_flags & MS_RDONLY)
                return 0;
 
-       lock_super(sb);
        /* Reset the needs_recovery flag before the fs is unlocked. */
        EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
        ext4_commit_super(sb, 1);
-       unlock_super(sb);
        return 0;
 }
 
@@ -4420,7 +4593,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        char *orig_data = kstrdup(data, GFP_KERNEL);
 
        /* Store the original options */
-       lock_super(sb);
        old_sb_flags = sb->s_flags;
        old_opts.s_mount_opt = sbi->s_mount_opt;
        old_opts.s_mount_opt2 = sbi->s_mount_opt2;
@@ -4568,10 +4740,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                if (old_opts.s_qf_names[i] &&
                    old_opts.s_qf_names[i] != sbi->s_qf_names[i])
                        kfree(old_opts.s_qf_names[i]);
+       if (enable_quota) {
+               if (sb_any_quota_suspended(sb))
+                       dquot_resume(sb, -1);
+               else if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+                       err = ext4_enable_quotas(sb);
+                       if (err)
+                               goto restore_opts;
+               }
+       }
 #endif
-       unlock_super(sb);
-       if (enable_quota)
-               dquot_resume(sb, -1);
 
        ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
        kfree(orig_data);
@@ -4595,72 +4774,25 @@ restore_opts:
                sbi->s_qf_names[i] = old_opts.s_qf_names[i];
        }
 #endif
-       unlock_super(sb);
        kfree(orig_data);
        return err;
 }
 
-/*
- * Note: calculating the overhead so we can be compatible with
- * historical BSD practice is quite difficult in the face of
- * clusters/bigalloc.  This is because multiple metadata blocks from
- * different block group can end up in the same allocation cluster.
- * Calculating the exact overhead in the face of clustered allocation
- * requires either O(all block bitmaps) in memory or O(number of block
- * groups**2) in time.  We will still calculate the superblock for
- * older file systems --- and if we come across with a bigalloc file
- * system with zero in s_overhead_clusters the estimate will be close to
- * correct especially for very large cluster sizes --- but for newer
- * file systems, it's better to calculate this figure once at mkfs
- * time, and store it in the superblock.  If the superblock value is
- * present (even for non-bigalloc file systems), we will use it.
- */
 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_super_block *es = sbi->s_es;
-       struct ext4_group_desc *gdp;
+       ext4_fsblk_t overhead = 0;
        u64 fsid;
        s64 bfree;
 
-       if (test_opt(sb, MINIX_DF)) {
-               sbi->s_overhead_last = 0;
-       } else if (es->s_overhead_clusters) {
-               sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
-       } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
-               ext4_group_t i, ngroups = ext4_get_groups_count(sb);
-               ext4_fsblk_t overhead = 0;
-
-               /*
-                * Compute the overhead (FS structures).  This is constant
-                * for a given filesystem unless the number of block groups
-                * changes so we cache the previous value until it does.
-                */
-
-               /*
-                * All of the blocks before first_data_block are
-                * overhead
-                */
-               overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
-
-               /*
-                * Add the overhead found in each block group
-                */
-               for (i = 0; i < ngroups; i++) {
-                       gdp = ext4_get_group_desc(sb, i, NULL);
-                       overhead += ext4_num_overhead_clusters(sb, i, gdp);
-                       cond_resched();
-               }
-               sbi->s_overhead_last = overhead;
-               smp_wmb();
-               sbi->s_blocks_last = ext4_blocks_count(es);
-       }
+       if (!test_opt(sb, MINIX_DF))
+               overhead = sbi->s_overhead;
 
        buf->f_type = EXT4_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
-       buf->f_blocks = (ext4_blocks_count(es) -
-                        EXT4_C2B(sbi, sbi->s_overhead_last));
+       buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
        bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
                percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
        /* prevent underflow in case that few free space is available */
@@ -4830,6 +4962,74 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
        return dquot_quota_on(sb, type, format_id, path);
 }
 
+static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+                            unsigned int flags)
+{
+       int err;
+       struct inode *qf_inode;
+       unsigned long qf_inums[MAXQUOTAS] = {
+               le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
+               le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
+       };
+
+       BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA));
+
+       if (!qf_inums[type])
+               return -EPERM;
+
+       qf_inode = ext4_iget(sb, qf_inums[type]);
+       if (IS_ERR(qf_inode)) {
+               ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
+               return PTR_ERR(qf_inode);
+       }
+
+       err = dquot_enable(qf_inode, type, format_id, flags);
+       iput(qf_inode);
+
+       return err;
+}
+
+/* Enable usage tracking for all quota types. */
+static int ext4_enable_quotas(struct super_block *sb)
+{
+       int type, err = 0;
+       unsigned long qf_inums[MAXQUOTAS] = {
+               le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
+               le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
+       };
+
+       sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+       for (type = 0; type < MAXQUOTAS; type++) {
+               if (qf_inums[type]) {
+                       err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
+                                               DQUOT_USAGE_ENABLED);
+                       if (err) {
+                               ext4_warning(sb,
+                                       "Failed to enable quota (type=%d) "
+                                       "tracking. Please run e2fsck to fix.",
+                                       type);
+                               return err;
+                       }
+               }
+       }
+       return 0;
+}
+
+/*
+ * quota_on function that is used when QUOTA feature is set.
+ */
+static int ext4_quota_on_sysfile(struct super_block *sb, int type,
+                                int format_id)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
+               return -EINVAL;
+
+       /*
+        * USAGE was enabled at mount time. Only need to enable LIMITS now.
+        */
+       return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
+}
+
 static int ext4_quota_off(struct super_block *sb, int type)
 {
        struct inode *inode = sb_dqopt(sb)->files[type];
@@ -4856,6 +5056,18 @@ out:
        return dquot_quota_off(sb, type);
 }
 
+/*
+ * quota_off function that is used when QUOTA feature is set.
+ */
+static int ext4_quota_off_sysfile(struct super_block *sb, int type)
+{
+       if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
+               return -EINVAL;
+
+       /* Disable only the limits. */
+       return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+}
+
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
  * acquiring the locks... As quota files are never truncated and quota code
  * itself serializes the operations (and no one else should touch the files)
@@ -5086,8 +5298,10 @@ static int __init ext4_init_fs(void)
        if (err)
                goto out6;
        ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
-       if (!ext4_kset)
+       if (!ext4_kset) {
+               err = -ENOMEM;
                goto out5;
+       }
        ext4_proc_root = proc_mkdir("fs/ext4", NULL);
 
        err = ext4_init_feat_adverts();