f2fs: catch up to v4.14-rc1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / fs / f2fs / super.c
index 84d5686c4aa4c0279513efc1dd46f3af2c68850e..315e59ad1483ae6038ac1849b567da7b88ded814 100644 (file)
 #include <linux/random.h>
 #include <linux/exportfs.h>
 #include <linux/blkdev.h>
+#include <linux/quotaops.h>
 #include <linux/f2fs_fs.h>
 #include <linux/sysfs.h>
+#include <linux/quota.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -35,9 +37,7 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/f2fs.h>
 
-static struct proc_dir_entry *f2fs_proc_root;
 static struct kmem_cache *f2fs_inode_cachep;
-static struct kset *f2fs_kset;
 
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 
@@ -49,6 +49,7 @@ char *fault_name[FAULT_MAX] = {
        [FAULT_BLOCK]           = "no more block",
        [FAULT_DIR_DEPTH]       = "too big dir depth",
        [FAULT_EVICT_INODE]     = "evict_inode fail",
+       [FAULT_TRUNCATE]        = "truncate fail",
        [FAULT_IO]              = "IO error",
        [FAULT_CHECKPOINT]      = "checkpoint error",
 };
@@ -82,6 +83,7 @@ enum {
        Opt_discard,
        Opt_nodiscard,
        Opt_noheap,
+       Opt_heap,
        Opt_user_xattr,
        Opt_nouser_xattr,
        Opt_acl,
@@ -89,6 +91,7 @@ enum {
        Opt_active_logs,
        Opt_disable_ext_identify,
        Opt_inline_xattr,
+       Opt_noinline_xattr,
        Opt_inline_data,
        Opt_inline_dentry,
        Opt_noinline_dentry,
@@ -105,6 +108,20 @@ enum {
        Opt_fault_injection,
        Opt_lazytime,
        Opt_nolazytime,
+       Opt_quota,
+       Opt_noquota,
+       Opt_usrquota,
+       Opt_grpquota,
+       Opt_prjquota,
+       Opt_usrjquota,
+       Opt_grpjquota,
+       Opt_prjjquota,
+       Opt_offusrjquota,
+       Opt_offgrpjquota,
+       Opt_offprjjquota,
+       Opt_jqfmt_vfsold,
+       Opt_jqfmt_vfsv0,
+       Opt_jqfmt_vfsv1,
        Opt_err,
 };
 
@@ -115,6 +132,7 @@ static match_table_t f2fs_tokens = {
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
        {Opt_noheap, "no_heap"},
+       {Opt_heap, "heap"},
        {Opt_user_xattr, "user_xattr"},
        {Opt_nouser_xattr, "nouser_xattr"},
        {Opt_acl, "acl"},
@@ -122,6 +140,7 @@ static match_table_t f2fs_tokens = {
        {Opt_active_logs, "active_logs=%u"},
        {Opt_disable_ext_identify, "disable_ext_identify"},
        {Opt_inline_xattr, "inline_xattr"},
+       {Opt_noinline_xattr, "noinline_xattr"},
        {Opt_inline_data, "inline_data"},
        {Opt_inline_dentry, "inline_dentry"},
        {Opt_noinline_dentry, "noinline_dentry"},
@@ -138,225 +157,139 @@ static match_table_t f2fs_tokens = {
        {Opt_fault_injection, "fault_injection=%u"},
        {Opt_lazytime, "lazytime"},
        {Opt_nolazytime, "nolazytime"},
+       {Opt_quota, "quota"},
+       {Opt_noquota, "noquota"},
+       {Opt_usrquota, "usrquota"},
+       {Opt_grpquota, "grpquota"},
+       {Opt_prjquota, "prjquota"},
+       {Opt_usrjquota, "usrjquota=%s"},
+       {Opt_grpjquota, "grpjquota=%s"},
+       {Opt_prjjquota, "prjjquota=%s"},
+       {Opt_offusrjquota, "usrjquota="},
+       {Opt_offgrpjquota, "grpjquota="},
+       {Opt_offprjjquota, "prjjquota="},
+       {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
+       {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
+       {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
        {Opt_err, NULL},
 };
 
-/* Sysfs support for f2fs */
-enum {
-       GC_THREAD,      /* struct f2fs_gc_thread */
-       SM_INFO,        /* struct f2fs_sm_info */
-       NM_INFO,        /* struct f2fs_nm_info */
-       F2FS_SBI,       /* struct f2fs_sb_info */
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-       FAULT_INFO_RATE,        /* struct f2fs_fault_info */
-       FAULT_INFO_TYPE,        /* struct f2fs_fault_info */
-#endif
-};
-
-struct f2fs_attr {
-       struct attribute attr;
-       ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
-       ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
-                        const char *, size_t);
-       int struct_type;
-       int offset;
-};
-
-static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
-{
-       if (struct_type == GC_THREAD)
-               return (unsigned char *)sbi->gc_thread;
-       else if (struct_type == SM_INFO)
-               return (unsigned char *)SM_I(sbi);
-       else if (struct_type == NM_INFO)
-               return (unsigned char *)NM_I(sbi);
-       else if (struct_type == F2FS_SBI)
-               return (unsigned char *)sbi;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-       else if (struct_type == FAULT_INFO_RATE ||
-                                       struct_type == FAULT_INFO_TYPE)
-               return (unsigned char *)&sbi->fault_info;
-#endif
-       return NULL;
-}
-
-static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
-               struct f2fs_sb_info *sbi, char *buf)
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
 {
-       struct super_block *sb = sbi->sb;
-
-       if (!sb->s_bdev->bd_part)
-               return snprintf(buf, PAGE_SIZE, "0\n");
+       struct va_format vaf;
+       va_list args;
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-               (unsigned long long)(sbi->kbytes_written +
-                       BD_PART_WRITTEN(sbi)));
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       printk_ratelimited("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
+       va_end(args);
 }
 
-static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
-                       struct f2fs_sb_info *sbi, char *buf)
+static void init_once(void *foo)
 {
-       unsigned char *ptr = NULL;
-       unsigned int *ui;
-
-       ptr = __struct_ptr(sbi, a->struct_type);
-       if (!ptr)
-               return -EINVAL;
-
-       ui = (unsigned int *)(ptr + a->offset);
+       struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+       inode_init_once(&fi->vfs_inode);
 }
 
-static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
-                       struct f2fs_sb_info *sbi,
-                       const char *buf, size_t count)
+#ifdef CONFIG_QUOTA
+static const char * const quotatypes[] = INITQFNAMES;
+#define QTYPE2NAME(t) (quotatypes[t])
+static int f2fs_set_qf_name(struct super_block *sb, int qtype,
+                                                       substring_t *args)
 {
-       unsigned char *ptr;
-       unsigned long t;
-       unsigned int *ui;
-       ssize_t ret;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       char *qname;
+       int ret = -EINVAL;
 
-       ptr = __struct_ptr(sbi, a->struct_type);
-       if (!ptr)
+       if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Cannot change journaled "
+                       "quota options when quota turned on");
                return -EINVAL;
-
-       ui = (unsigned int *)(ptr + a->offset);
-
-       ret = kstrtoul(skip_spaces(buf), 0, &t);
-       if (ret < 0)
-               return ret;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-       if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
+       }
+       qname = match_strdup(args);
+       if (!qname) {
+               f2fs_msg(sb, KERN_ERR,
+                       "Not enough memory for storing quotafile name");
                return -EINVAL;
-#endif
-       *ui = t;
-       return count;
-}
-
-static ssize_t f2fs_attr_show(struct kobject *kobj,
-                               struct attribute *attr, char *buf)
-{
-       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
-                                                               s_kobj);
-       struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
-
-       return a->show ? a->show(a, sbi, buf) : 0;
+       }
+       if (sbi->s_qf_names[qtype]) {
+               if (strcmp(sbi->s_qf_names[qtype], qname) == 0)
+                       ret = 0;
+               else
+                       f2fs_msg(sb, KERN_ERR,
+                                "%s quota file already specified",
+                                QTYPE2NAME(qtype));
+               goto errout;
+       }
+       if (strchr(qname, '/')) {
+               f2fs_msg(sb, KERN_ERR,
+                       "quotafile must be on filesystem root");
+               goto errout;
+       }
+       sbi->s_qf_names[qtype] = qname;
+       set_opt(sbi, QUOTA);
+       return 0;
+errout:
+       kfree(qname);
+       return ret;
 }
 
-static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
-                                               const char *buf, size_t len)
+static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
 {
-       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
-                                                                       s_kobj);
-       struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
-       return a->store ? a->store(a, sbi, buf, len) : 0;
+       if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) {
+               f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options"
+                       " when quota turned on");
+               return -EINVAL;
+       }
+       kfree(sbi->s_qf_names[qtype]);
+       sbi->s_qf_names[qtype] = NULL;
+       return 0;
 }
 
-static void f2fs_sb_release(struct kobject *kobj)
+static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
-                                                               s_kobj);
-       complete(&sbi->s_kobj_unregister);
-}
+       /*
+        * We do the test below only for project quotas. 'usrquota' and
+        * 'grpquota' mount options are allowed even without quota feature
+        * to support legacy quotas in quota files.
+        */
+       if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
+               f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
+                        "Cannot enable project quota enforcement.");
+               return -1;
+       }
+       if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
+                       sbi->s_qf_names[PRJQUOTA]) {
+               if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+                       clear_opt(sbi, USRQUOTA);
+
+               if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
+                       clear_opt(sbi, GRPQUOTA);
+
+               if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
+                       clear_opt(sbi, PRJQUOTA);
+
+               if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
+                               test_opt(sbi, PRJQUOTA)) {
+                       f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
+                                       "format mixing");
+                       return -1;
+               }
 
-#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
-static struct f2fs_attr f2fs_attr_##_name = {                  \
-       .attr = {.name = __stringify(_name), .mode = _mode },   \
-       .show   = _show,                                        \
-       .store  = _store,                                       \
-       .struct_type = _struct_type,                            \
-       .offset = _offset                                       \
+               if (!sbi->s_jquota_fmt) {
+                       f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
+                                       "not specified");
+                       return -1;
+               }
+       }
+       return 0;
 }
-
-#define F2FS_RW_ATTR(struct_type, struct_name, name, elname)   \
-       F2FS_ATTR_OFFSET(struct_type, name, 0644,               \
-               f2fs_sbi_show, f2fs_sbi_store,                  \
-               offsetof(struct struct_name, elname))
-
-#define F2FS_GENERAL_RO_ATTR(name) \
-static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
-
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
-F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
-F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
-F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
-F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
-#endif
-F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
-
-#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
-static struct attribute *f2fs_attrs[] = {
-       ATTR_LIST(gc_min_sleep_time),
-       ATTR_LIST(gc_max_sleep_time),
-       ATTR_LIST(gc_no_gc_sleep_time),
-       ATTR_LIST(gc_idle),
-       ATTR_LIST(reclaim_segments),
-       ATTR_LIST(max_small_discards),
-       ATTR_LIST(batched_trim_sections),
-       ATTR_LIST(ipu_policy),
-       ATTR_LIST(min_ipu_util),
-       ATTR_LIST(min_fsync_blocks),
-       ATTR_LIST(max_victim_search),
-       ATTR_LIST(dir_level),
-       ATTR_LIST(ram_thresh),
-       ATTR_LIST(ra_nid_pages),
-       ATTR_LIST(dirty_nats_ratio),
-       ATTR_LIST(cp_interval),
-       ATTR_LIST(idle_interval),
-#ifdef CONFIG_F2FS_FAULT_INJECTION
-       ATTR_LIST(inject_rate),
-       ATTR_LIST(inject_type),
 #endif
-       ATTR_LIST(lifetime_write_kbytes),
-       NULL,
-};
-
-static const struct sysfs_ops f2fs_attr_ops = {
-       .show   = f2fs_attr_show,
-       .store  = f2fs_attr_store,
-};
-
-static struct kobj_type f2fs_ktype = {
-       .default_attrs  = f2fs_attrs,
-       .sysfs_ops      = &f2fs_attr_ops,
-       .release        = f2fs_sb_release,
-};
-
-void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
-{
-       struct va_format vaf;
-       va_list args;
-
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
-       va_end(args);
-}
-
-static void init_once(void *foo)
-{
-       struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
-
-       inode_init_once(&fi->vfs_inode);
-}
 
 static int parse_options(struct super_block *sb, char *options)
 {
@@ -365,6 +298,9 @@ static int parse_options(struct super_block *sb, char *options)
        substring_t args[MAX_OPT_ARGS];
        char *p, *name;
        int arg = 0;
+#ifdef CONFIG_QUOTA
+       int ret;
+#endif
 
        if (!options)
                return 0;
@@ -431,6 +367,9 @@ static int parse_options(struct super_block *sb, char *options)
                case Opt_noheap:
                        set_opt(sbi, NOHEAP);
                        break;
+               case Opt_heap:
+                       clear_opt(sbi, NOHEAP);
+                       break;
 #ifdef CONFIG_F2FS_FS_XATTR
                case Opt_user_xattr:
                        set_opt(sbi, XATTR_USER);
@@ -441,6 +380,9 @@ static int parse_options(struct super_block *sb, char *options)
                case Opt_inline_xattr:
                        set_opt(sbi, INLINE_XATTR);
                        break;
+               case Opt_noinline_xattr:
+                       clear_opt(sbi, INLINE_XATTR);
+                       break;
 #else
                case Opt_user_xattr:
                        f2fs_msg(sb, KERN_INFO,
@@ -454,6 +396,10 @@ static int parse_options(struct super_block *sb, char *options)
                        f2fs_msg(sb, KERN_INFO,
                                "inline_xattr options not supported");
                        break;
+               case Opt_noinline_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "noinline_xattr options not supported");
+                       break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
                case Opt_acl:
@@ -553,6 +499,7 @@ static int parse_options(struct super_block *sb, char *options)
                                return -EINVAL;
 #ifdef CONFIG_F2FS_FAULT_INJECTION
                        f2fs_build_fault_attr(sbi, arg);
+                       set_opt(sbi, FAULT_INJECTION);
 #else
                        f2fs_msg(sb, KERN_INFO,
                                "FAULT_INJECTION was not selected");
@@ -564,6 +511,81 @@ static int parse_options(struct super_block *sb, char *options)
                case Opt_nolazytime:
                        sb->s_flags &= ~MS_LAZYTIME;
                        break;
+#ifdef CONFIG_QUOTA
+               case Opt_quota:
+               case Opt_usrquota:
+                       set_opt(sbi, USRQUOTA);
+                       break;
+               case Opt_grpquota:
+                       set_opt(sbi, GRPQUOTA);
+                       break;
+               case Opt_prjquota:
+                       set_opt(sbi, PRJQUOTA);
+                       break;
+               case Opt_usrjquota:
+                       ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
+                       if (ret)
+                               return ret;
+                       break;
+               case Opt_grpjquota:
+                       ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
+                       if (ret)
+                               return ret;
+                       break;
+               case Opt_prjjquota:
+                       ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
+                       if (ret)
+                               return ret;
+                       break;
+               case Opt_offusrjquota:
+                       ret = f2fs_clear_qf_name(sb, USRQUOTA);
+                       if (ret)
+                               return ret;
+                       break;
+               case Opt_offgrpjquota:
+                       ret = f2fs_clear_qf_name(sb, GRPQUOTA);
+                       if (ret)
+                               return ret;
+                       break;
+               case Opt_offprjjquota:
+                       ret = f2fs_clear_qf_name(sb, PRJQUOTA);
+                       if (ret)
+                               return ret;
+                       break;
+               case Opt_jqfmt_vfsold:
+                       sbi->s_jquota_fmt = QFMT_VFS_OLD;
+                       break;
+               case Opt_jqfmt_vfsv0:
+                       sbi->s_jquota_fmt = QFMT_VFS_V0;
+                       break;
+               case Opt_jqfmt_vfsv1:
+                       sbi->s_jquota_fmt = QFMT_VFS_V1;
+                       break;
+               case Opt_noquota:
+                       clear_opt(sbi, QUOTA);
+                       clear_opt(sbi, USRQUOTA);
+                       clear_opt(sbi, GRPQUOTA);
+                       clear_opt(sbi, PRJQUOTA);
+                       break;
+#else
+               case Opt_quota:
+               case Opt_usrquota:
+               case Opt_grpquota:
+               case Opt_prjquota:
+               case Opt_usrjquota:
+               case Opt_grpjquota:
+               case Opt_prjjquota:
+               case Opt_offusrjquota:
+               case Opt_offgrpjquota:
+               case Opt_offprjjquota:
+               case Opt_jqfmt_vfsold:
+               case Opt_jqfmt_vfsv0:
+               case Opt_jqfmt_vfsv1:
+               case Opt_noquota:
+                       f2fs_msg(sb, KERN_INFO,
+                                       "quota operations not supported");
+                       break;
+#endif
                default:
                        f2fs_msg(sb, KERN_ERR,
                                "Unrecognized mount option \"%s\" or missing value",
@@ -571,6 +593,10 @@ static int parse_options(struct super_block *sb, char *options)
                        return -EINVAL;
                }
        }
+#ifdef CONFIG_QUOTA
+       if (f2fs_check_quota_options(sbi))
+               return -EINVAL;
+#endif
 
        if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
                f2fs_msg(sb, KERN_ERR,
@@ -603,14 +629,22 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
        mutex_init(&fi->inmem_lock);
        init_rwsem(&fi->dio_rwsem[READ]);
        init_rwsem(&fi->dio_rwsem[WRITE]);
+       init_rwsem(&fi->i_mmap_sem);
+       init_rwsem(&fi->i_xattr_sem);
 
+#ifdef CONFIG_QUOTA
+       memset(&fi->i_dquot, 0, sizeof(fi->i_dquot));
+       fi->i_reserved_quota = 0;
+#endif
        /* Will be used by directory only */
        fi->i_dir_level = F2FS_SB(sb)->dir_level;
+
        return &fi->vfs_inode;
 }
 
 static int f2fs_drop_inode(struct inode *inode)
 {
+       int ret;
        /*
         * This is to avoid a deadlock condition like below.
         * writeback_single_inode(inode)
@@ -643,10 +677,12 @@ static int f2fs_drop_inode(struct inode *inode)
                        spin_lock(&inode->i_lock);
                        atomic_dec(&inode->i_count);
                }
+               trace_f2fs_drop_inode(inode, 0);
                return 0;
        }
-
-       return generic_drop_inode(inode);
+       ret = generic_drop_inode(inode);
+       trace_f2fs_drop_inode(inode, ret);
+       return ret;
 }
 
 int f2fs_inode_dirtied(struct inode *inode, bool sync)
@@ -744,15 +780,9 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
 static void f2fs_put_super(struct super_block *sb)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       int i;
 
-       if (sbi->s_proc) {
-               remove_proc_entry("segment_info", sbi->s_proc);
-               remove_proc_entry("segment_bits", sbi->s_proc);
-               remove_proc_entry(sb->s_id, f2fs_proc_root);
-       }
-       kobject_del(&sbi->s_kobj);
-
-       stop_gc_thread(sbi);
+       f2fs_quota_off_umount(sb);
 
        /* prevent remaining shrinker jobs */
        mutex_lock(&sbi->umount_mutex);
@@ -771,7 +801,14 @@ static void f2fs_put_super(struct super_block *sb)
        }
 
        /* be sure to wait for any on-going discard commands */
-       f2fs_wait_discard_bio(sbi, NULL_ADDR);
+       f2fs_wait_discard_bios(sbi);
+
+       if (f2fs_discard_en(sbi) && !sbi->discard_blks) {
+               struct cp_control cpc = {
+                       .reason = CP_UMOUNT | CP_TRIMMED,
+               };
+               write_checkpoint(sbi, &cpc);
+       }
 
        /* write_checkpoint can update stat informaion */
        f2fs_destroy_stats(sbi);
@@ -786,7 +823,7 @@ static void f2fs_put_super(struct super_block *sb)
        mutex_unlock(&sbi->umount_mutex);
 
        /* our cp_error case, we can wait for any writeback page */
-       f2fs_flush_merged_bios(sbi);
+       f2fs_flush_merged_writes(sbi);
 
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
@@ -796,8 +833,8 @@ static void f2fs_put_super(struct super_block *sb)
        destroy_segment_manager(sbi);
 
        kfree(sbi->ckpt);
-       kobject_put(&sbi->s_kobj);
-       wait_for_completion(&sbi->s_kobj_unregister);
+
+       f2fs_unregister_sysfs(sbi);
 
        sb->s_fs_info = NULL;
        if (sbi->s_chksum_driver)
@@ -805,8 +842,15 @@ static void f2fs_put_super(struct super_block *sb)
        kfree(sbi->raw_super);
 
        destroy_device_list(sbi);
-
+       if (sbi->write_io_dummy)
+               mempool_destroy(sbi->write_io_dummy);
+#ifdef CONFIG_QUOTA
+       for (i = 0; i < MAXQUOTAS; i++)
+               kfree(sbi->s_qf_names[i]);
+#endif
        destroy_percpu_info(sbi);
+       for (i = 0; i < NR_PAGE_TYPE; i++)
+               kfree(sbi->write_io[i]);
        kfree(sbi);
 }
 
@@ -817,6 +861,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
 
        trace_f2fs_sync_fs(sb, sync);
 
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+               return -EAGAIN;
+
        if (sync) {
                struct cp_control cpc;
 
@@ -851,12 +898,55 @@ static int f2fs_unfreeze(struct super_block *sb)
        return 0;
 }
 
+#ifdef CONFIG_QUOTA
+static int f2fs_statfs_project(struct super_block *sb,
+                               kprojid_t projid, struct kstatfs *buf)
+{
+       struct kqid qid;
+       struct dquot *dquot;
+       u64 limit;
+       u64 curblock;
+
+       qid = make_kqid_projid(projid);
+       dquot = dqget(sb, qid);
+       if (IS_ERR(dquot))
+               return PTR_ERR(dquot);
+       spin_lock(&dq_data_lock);
+
+       limit = (dquot->dq_dqb.dqb_bsoftlimit ?
+                dquot->dq_dqb.dqb_bsoftlimit :
+                dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits;
+       if (limit && buf->f_blocks > limit) {
+               curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
+               buf->f_blocks = limit;
+               buf->f_bfree = buf->f_bavail =
+                       (buf->f_blocks > curblock) ?
+                        (buf->f_blocks - curblock) : 0;
+       }
+
+       limit = dquot->dq_dqb.dqb_isoftlimit ?
+               dquot->dq_dqb.dqb_isoftlimit :
+               dquot->dq_dqb.dqb_ihardlimit;
+       if (limit && buf->f_files > limit) {
+               buf->f_files = limit;
+               buf->f_ffree =
+                       (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
+                        (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
+       }
+
+       spin_unlock(&dq_data_lock);
+       dqput(dquot);
+       return 0;
+}
+#endif
+
 static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
        block_t total_count, user_block_count, start_count, ovp_count;
+       u64 avail_node_count;
 
        total_count = le64_to_cpu(sbi->raw_super->block_count);
        user_block_count = sbi->user_block_count;
@@ -867,19 +957,67 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
 
        buf->f_blocks = total_count - start_count;
        buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
-       buf->f_bavail = user_block_count - valid_user_blocks(sbi);
+       buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
+                                               sbi->reserved_blocks;
 
-       buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
-       buf->f_ffree = min(buf->f_files - valid_node_count(sbi),
-                                                       buf->f_bavail);
+       avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+
+       if (avail_node_count > user_block_count) {
+               buf->f_files = user_block_count;
+               buf->f_ffree = buf->f_bavail;
+       } else {
+               buf->f_files = avail_node_count;
+               buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
+                                       buf->f_bavail);
+       }
 
        buf->f_namelen = F2FS_NAME_LEN;
        buf->f_fsid.val[0] = (u32)id;
        buf->f_fsid.val[1] = (u32)(id >> 32);
 
+#ifdef CONFIG_QUOTA
+       if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
+                       sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
+               f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
+       }
+#endif
        return 0;
 }
 
+static inline void f2fs_show_quota_options(struct seq_file *seq,
+                                          struct super_block *sb)
+{
+#ifdef CONFIG_QUOTA
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+       if (sbi->s_jquota_fmt) {
+               char *fmtname = "";
+
+               switch (sbi->s_jquota_fmt) {
+               case QFMT_VFS_OLD:
+                       fmtname = "vfsold";
+                       break;
+               case QFMT_VFS_V0:
+                       fmtname = "vfsv0";
+                       break;
+               case QFMT_VFS_V1:
+                       fmtname = "vfsv1";
+                       break;
+               }
+               seq_printf(seq, ",jqfmt=%s", fmtname);
+       }
+
+       if (sbi->s_qf_names[USRQUOTA])
+               seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
+
+       if (sbi->s_qf_names[GRPQUOTA])
+               seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+
+       if (sbi->s_qf_names[PRJQUOTA])
+               seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]);
+#endif
+}
+
 static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
@@ -897,7 +1035,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
        if (test_opt(sbi, DISCARD))
                seq_puts(seq, ",discard");
        if (test_opt(sbi, NOHEAP))
-               seq_puts(seq, ",no_heap_alloc");
+               seq_puts(seq, ",no_heap");
+       else
+               seq_puts(seq, ",heap");
 #ifdef CONFIG_F2FS_FS_XATTR
        if (test_opt(sbi, XATTR_USER))
                seq_puts(seq, ",user_xattr");
@@ -905,6 +1045,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_puts(seq, ",nouser_xattr");
        if (test_opt(sbi, INLINE_XATTR))
                seq_puts(seq, ",inline_xattr");
+       else
+               seq_puts(seq, ",noinline_xattr");
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
        if (test_opt(sbi, POSIX_ACL))
@@ -943,87 +1085,37 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
        seq_printf(seq, ",active_logs=%u", sbi->active_logs);
        if (F2FS_IO_SIZE_BITS(sbi))
                seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (test_opt(sbi, FAULT_INJECTION))
+               seq_printf(seq, ",fault_injection=%u",
+                               sbi->fault_info.inject_rate);
+#endif
+#ifdef CONFIG_QUOTA
+       if (test_opt(sbi, QUOTA))
+               seq_puts(seq, ",quota");
+       if (test_opt(sbi, USRQUOTA))
+               seq_puts(seq, ",usrquota");
+       if (test_opt(sbi, GRPQUOTA))
+               seq_puts(seq, ",grpquota");
+       if (test_opt(sbi, PRJQUOTA))
+               seq_puts(seq, ",prjquota");
+#endif
+       f2fs_show_quota_options(seq, sbi->sb);
 
        return 0;
 }
 
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
-{
-       struct super_block *sb = seq->private;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       unsigned int total_segs =
-                       le32_to_cpu(sbi->raw_super->segment_count_main);
-       int i;
-
-       seq_puts(seq, "format: segment_type|valid_blocks\n"
-               "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
-
-       for (i = 0; i < total_segs; i++) {
-               struct seg_entry *se = get_seg_entry(sbi, i);
-
-               if ((i % 10) == 0)
-                       seq_printf(seq, "%-10d", i);
-               seq_printf(seq, "%d|%-3u", se->type,
-                                       get_valid_blocks(sbi, i, 1));
-               if ((i % 10) == 9 || i == (total_segs - 1))
-                       seq_putc(seq, '\n');
-               else
-                       seq_putc(seq, ' ');
-       }
-
-       return 0;
-}
-
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
-{
-       struct super_block *sb = seq->private;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       unsigned int total_segs =
-                       le32_to_cpu(sbi->raw_super->segment_count_main);
-       int i, j;
-
-       seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
-               "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
-
-       for (i = 0; i < total_segs; i++) {
-               struct seg_entry *se = get_seg_entry(sbi, i);
-
-               seq_printf(seq, "%-10d", i);
-               seq_printf(seq, "%d|%-3u|", se->type,
-                                       get_valid_blocks(sbi, i, 1));
-               for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
-                       seq_printf(seq, " %.2x", se->cur_valid_map[j]);
-               seq_putc(seq, '\n');
-       }
-       return 0;
-}
-
-#define F2FS_PROC_FILE_DEF(_name)                                      \
-static int _name##_open_fs(struct inode *inode, struct file *file)     \
-{                                                                      \
-       return single_open(file, _name##_seq_show, PDE_DATA(inode));    \
-}                                                                      \
-                                                                       \
-static const struct file_operations f2fs_seq_##_name##_fops = {                \
-       .owner = THIS_MODULE,                                           \
-       .open = _name##_open_fs,                                        \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-       .release = single_release,                                      \
-};
-
-F2FS_PROC_FILE_DEF(segment_info);
-F2FS_PROC_FILE_DEF(segment_bits);
-
 static void default_options(struct f2fs_sb_info *sbi)
 {
        /* init some FS parameters */
        sbi->active_logs = NR_CURSEG_TYPE;
 
        set_opt(sbi, BG_GC);
+       set_opt(sbi, INLINE_XATTR);
        set_opt(sbi, INLINE_DATA);
        set_opt(sbi, INLINE_DENTRY);
        set_opt(sbi, EXTENT_CACHE);
+       set_opt(sbi, NOHEAP);
        sbi->sb->s_flags |= MS_LAZYTIME;
        set_opt(sbi, FLUSH_MERGE);
        if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
@@ -1049,6 +1141,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct f2fs_mount_info org_mount_opt;
+       unsigned long old_sb_flags;
        int err, active_logs;
        bool need_restart_gc = false;
        bool need_stop_gc = false;
@@ -1056,14 +1149,37 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
 #ifdef CONFIG_F2FS_FAULT_INJECTION
        struct f2fs_fault_info ffi = sbi->fault_info;
 #endif
+#ifdef CONFIG_QUOTA
+       int s_jquota_fmt;
+       char *s_qf_names[MAXQUOTAS];
+       int i, j;
+#endif
 
        /*
         * Save the old mount options in case we
         * need to restore them.
         */
        org_mount_opt = sbi->mount_opt;
+       old_sb_flags = sb->s_flags;
        active_logs = sbi->active_logs;
 
+#ifdef CONFIG_QUOTA
+       s_jquota_fmt = sbi->s_jquota_fmt;
+       for (i = 0; i < MAXQUOTAS; i++) {
+               if (sbi->s_qf_names[i]) {
+                       s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
+                                                        GFP_KERNEL);
+                       if (!s_qf_names[i]) {
+                               for (j = 0; j < i; j++)
+                                       kfree(s_qf_names[j]);
+                               return -ENOMEM;
+                       }
+               } else {
+                       s_qf_names[i] = NULL;
+               }
+       }
+#endif
+
        /* recover superblocks we couldn't write due to previous RO mount */
        if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
                err = f2fs_commit_super(sbi, false);
@@ -1073,7 +1189,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                        clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
        }
 
-       sbi->mount_opt.opt = 0;
        default_options(sbi);
 
        /* parse mount options */
@@ -1088,6 +1203,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
                goto skip;
 
+       if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+               err = dquot_suspend(sb, -1);
+               if (err < 0)
+                       goto restore_opts;
+       } else {
+               /* dquot_resume needs RW */
+               sb->s_flags &= ~MS_RDONLY;
+               dquot_resume(sb, -1);
+       }
+
        /* disallow enable/disable extent_cache dynamically */
        if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
                err = -EINVAL;
@@ -1136,6 +1261,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                        goto restore_gc;
        }
 skip:
+#ifdef CONFIG_QUOTA
+       /* Release old quota file names */
+       for (i = 0; i < MAXQUOTAS; i++)
+               kfree(s_qf_names[i]);
+#endif
        /* Update the POSIXACL Flag */
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
@@ -1150,21 +1280,289 @@ restore_gc:
                stop_gc_thread(sbi);
        }
 restore_opts:
+#ifdef CONFIG_QUOTA
+       sbi->s_jquota_fmt = s_jquota_fmt;
+       for (i = 0; i < MAXQUOTAS; i++) {
+               kfree(sbi->s_qf_names[i]);
+               sbi->s_qf_names[i] = s_qf_names[i];
+       }
+#endif
        sbi->mount_opt = org_mount_opt;
        sbi->active_logs = active_logs;
+       sb->s_flags = old_sb_flags;
 #ifdef CONFIG_F2FS_FAULT_INJECTION
        sbi->fault_info = ffi;
 #endif
        return err;
 }
 
-static struct super_operations f2fs_sops = {
+#ifdef CONFIG_QUOTA
+/* Read data from quotafile */
+static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
+                              size_t len, loff_t off)
+{
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       struct address_space *mapping = inode->i_mapping;
+       block_t blkidx = F2FS_BYTES_TO_BLK(off);
+       int offset = off & (sb->s_blocksize - 1);
+       int tocopy;
+       size_t toread;
+       loff_t i_size = i_size_read(inode);
+       struct page *page;
+       char *kaddr;
+
+       if (off > i_size)
+               return 0;
+
+       if (off + len > i_size)
+               len = i_size - off;
+       toread = len;
+       while (toread > 0) {
+               tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
+repeat:
+               page = read_mapping_page(mapping, blkidx, NULL);
+               if (IS_ERR(page))
+                       return PTR_ERR(page);
+
+               lock_page(page);
+
+               if (unlikely(page->mapping != mapping)) {
+                       f2fs_put_page(page, 1);
+                       goto repeat;
+               }
+               if (unlikely(!PageUptodate(page))) {
+                       f2fs_put_page(page, 1);
+                       return -EIO;
+               }
+
+               kaddr = kmap_atomic(page);
+               memcpy(data, kaddr + offset, tocopy);
+               kunmap_atomic(kaddr);
+               f2fs_put_page(page, 1);
+
+               offset = 0;
+               toread -= tocopy;
+               data += tocopy;
+               blkidx++;
+       }
+       return len;
+}
+
+/* Write to quotafile */
+static ssize_t f2fs_quota_write(struct super_block *sb, int type,
+                               const char *data, size_t len, loff_t off)
+{
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       struct address_space *mapping = inode->i_mapping;
+       const struct address_space_operations *a_ops = mapping->a_ops;
+       int offset = off & (sb->s_blocksize - 1);
+       size_t towrite = len;
+       struct page *page;
+       char *kaddr;
+       int err = 0;
+       int tocopy;
+
+       while (towrite > 0) {
+               tocopy = min_t(unsigned long, sb->s_blocksize - offset,
+                                                               towrite);
+
+               err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
+                                                       &page, NULL);
+               if (unlikely(err))
+                       break;
+
+               kaddr = kmap_atomic(page);
+               memcpy(kaddr + offset, data, tocopy);
+               kunmap_atomic(kaddr);
+               flush_dcache_page(page);
+
+               a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
+                                               page, NULL);
+               offset = 0;
+               towrite -= tocopy;
+               off += tocopy;
+               data += tocopy;
+               cond_resched();
+       }
+
+       if (len == towrite)
+               return 0;
+       inode->i_version++;
+       inode->i_mtime = inode->i_ctime = current_time(inode);
+       f2fs_mark_inode_dirty_sync(inode, false);
+       return len - towrite;
+}
+
+static struct dquot **f2fs_get_dquots(struct inode *inode)
+{
+       return F2FS_I(inode)->i_dquot;
+}
+
+static qsize_t *f2fs_get_reserved_space(struct inode *inode)
+{
+       return &F2FS_I(inode)->i_reserved_quota;
+}
+
+static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
+{
+       return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type],
+                                               sbi->s_jquota_fmt, type);
+}
+
+void f2fs_enable_quota_files(struct f2fs_sb_info *sbi)
+{
+       int i, ret;
+
+       for (i = 0; i < MAXQUOTAS; i++) {
+               if (sbi->s_qf_names[i]) {
+                       ret = f2fs_quota_on_mount(sbi, i);
+                       if (ret < 0)
+                               f2fs_msg(sbi->sb, KERN_ERR,
+                                       "Cannot turn on journaled "
+                                       "quota: error %d", ret);
+               }
+       }
+}
+
+static int f2fs_quota_sync(struct super_block *sb, int type)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       int cnt;
+       int ret;
+
+       ret = dquot_writeback_dquots(sb, type);
+       if (ret)
+               return ret;
+
+       /*
+        * Now when everything is written we can discard the pagecache so
+        * that userspace sees the changes.
+        */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (type != -1 && cnt != type)
+                       continue;
+               if (!sb_has_quota_active(sb, cnt))
+                       continue;
+
+               ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
+               if (ret)
+                       return ret;
+
+               inode_lock(dqopt->files[cnt]);
+               truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+               inode_unlock(dqopt->files[cnt]);
+       }
+       return 0;
+}
+
+static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
+                                                       struct path *path)
+{
+       struct inode *inode;
+       int err;
+
+       err = f2fs_quota_sync(sb, type);
+       if (err)
+               return err;
+
+       err = dquot_quota_on(sb, type, format_id, path);
+       if (err)
+               return err;
+
+       inode = d_inode(path->dentry);
+
+       inode_lock(inode);
+       F2FS_I(inode)->i_flags |= FS_NOATIME_FL | FS_IMMUTABLE_FL;
+       inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
+                                       S_NOATIME | S_IMMUTABLE);
+       inode_unlock(inode);
+       f2fs_mark_inode_dirty_sync(inode, false);
+
+       return 0;
+}
+
+static int f2fs_quota_off(struct super_block *sb, int type)
+{
+       struct inode *inode = sb_dqopt(sb)->files[type];
+       int err;
+
+       if (!inode || !igrab(inode))
+               return dquot_quota_off(sb, type);
+
+       f2fs_quota_sync(sb, type);
+
+       err = dquot_quota_off(sb, type);
+       if (err)
+               goto out_put;
+
+       inode_lock(inode);
+       F2FS_I(inode)->i_flags &= ~(FS_NOATIME_FL | FS_IMMUTABLE_FL);
+       inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
+       inode_unlock(inode);
+       f2fs_mark_inode_dirty_sync(inode, false);
+out_put:
+       iput(inode);
+       return err;
+}
+
+void f2fs_quota_off_umount(struct super_block *sb)
+{
+       int type;
+
+       for (type = 0; type < MAXQUOTAS; type++)
+               f2fs_quota_off(sb, type);
+}
+
+#if 0
+int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+{
+       *projid = F2FS_I(inode)->i_projid;
+       return 0;
+}
+#endif
+
+static const struct dquot_operations f2fs_quota_operations = {
+       .get_reserved_space = f2fs_get_reserved_space,
+       .write_dquot    = dquot_commit,
+       .acquire_dquot  = dquot_acquire,
+       .release_dquot  = dquot_release,
+       .mark_dirty     = dquot_mark_dquot_dirty,
+       .write_info     = dquot_commit_info,
+       .alloc_dquot    = dquot_alloc,
+       .destroy_dquot  = dquot_destroy,
+#if 0
+       .get_projid     = f2fs_get_projid,
+       .get_next_id    = dquot_get_next_id,
+#endif
+};
+
+static const struct quotactl_ops f2fs_quotactl_ops = {
+       .quota_on       = f2fs_quota_on,
+       .quota_off      = f2fs_quota_off,
+       .quota_sync     = f2fs_quota_sync,
+       .get_state      = dquot_get_state,
+       .set_info       = dquot_set_dqinfo,
+       .get_dqblk      = dquot_get_dqblk,
+       .set_dqblk      = dquot_set_dqblk,
+};
+#else
+void f2fs_quota_off_umount(struct super_block *sb)
+{
+}
+#endif
+
+static const struct super_operations f2fs_sops = {
        .alloc_inode    = f2fs_alloc_inode,
        .drop_inode     = f2fs_drop_inode,
        .destroy_inode  = f2fs_destroy_inode,
        .write_inode    = f2fs_write_inode,
        .dirty_inode    = f2fs_dirty_inode,
        .show_options   = f2fs_show_options,
+#ifdef CONFIG_QUOTA
+       .quota_read     = f2fs_quota_read,
+       .quota_write    = f2fs_quota_write,
+       .get_dquots     = f2fs_get_dquots,
+#endif
        .evict_inode    = f2fs_evict_inode,
        .put_super      = f2fs_put_super,
        .sync_fs        = f2fs_sync_fs,
@@ -1182,12 +1580,6 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
                                ctx, len, NULL);
 }
 
-static int f2fs_key_prefix(struct inode *inode, u8 **key)
-{
-       *key = F2FS_I_SB(inode)->key_prefix;
-       return F2FS_I_SB(inode)->key_prefix_size;
-}
-
 static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
                                                        void *fs_data)
 {
@@ -1202,16 +1594,16 @@ static unsigned f2fs_max_namelen(struct inode *inode)
                        inode->i_sb->s_blocksize : F2FS_NAME_LEN;
 }
 
-static struct fscrypt_operations f2fs_cryptops = {
+static const struct fscrypt_operations f2fs_cryptops = {
+       .key_prefix     = "f2fs:",
        .get_context    = f2fs_get_context,
-       .key_prefix     = f2fs_key_prefix,
        .set_context    = f2fs_set_context,
        .is_encrypted   = f2fs_encrypted_inode,
        .empty_dir      = f2fs_empty_dir,
        .max_namelen    = f2fs_max_namelen,
 };
 #else
-static struct fscrypt_operations f2fs_cryptops = {
+static const struct fscrypt_operations f2fs_cryptops = {
        .is_encrypted   = f2fs_encrypted_inode,
 };
 #endif
@@ -1263,9 +1655,16 @@ static const struct export_operations f2fs_export_ops = {
 
 static loff_t max_file_blocks(void)
 {
-       loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
+       loff_t result = 0;
        loff_t leaf_count = ADDRS_PER_BLOCK;
 
+       /*
+        * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
+        * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
+        * space in inode.i_addr, it will be more safe to reassign
+        * result as zero.
+        */
+
        /* two direct node blocks */
        result += (leaf_count * 2);
 
@@ -1467,6 +1866,13 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                return 1;
        }
 
+       if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid segment count (%u)",
+                       le32_to_cpu(raw_super->segment_count));
+               return 1;
+       }
+
        /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
        if (sanity_check_area_boundary(sbi, bh))
                return 1;
@@ -1480,6 +1886,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
        unsigned int ovp_segments, reserved_segments;
+       unsigned int main_segs, blocks_per_seg;
+       int i;
 
        total = le32_to_cpu(raw_super->segment_count);
        fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1501,6 +1909,20 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
                return 1;
        }
 
+       main_segs = le32_to_cpu(raw_super->segment_count_main);
+       blocks_per_seg = sbi->blocks_per_seg;
+
+       for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
+               if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
+                       le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
+                       return 1;
+       }
+       for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
+               if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
+                       le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
+                       return 1;
+       }
+
        if (unlikely(f2fs_cp_error(sbi))) {
                f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
                return 1;
@@ -1511,7 +1933,7 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 static void init_sb_info(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *raw_super = sbi->raw_super;
-       int i;
+       int i, j;
 
        sbi->log_sectors_per_block =
                le32_to_cpu(raw_super->log_sectors_per_block);
@@ -1539,17 +1961,14 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
        for (i = 0; i < NR_COUNT_TYPE; i++)
                atomic_set(&sbi->nr_pages[i], 0);
 
+       atomic_set(&sbi->wb_sync_req, 0);
+
        INIT_LIST_HEAD(&sbi->s_list);
        mutex_init(&sbi->umount_mutex);
-       mutex_init(&sbi->wio_mutex[NODE]);
-       mutex_init(&sbi->wio_mutex[DATA]);
+       for (i = 0; i < NR_PAGE_TYPE - 1; i++)
+               for (j = HOT; j < NR_TEMP_TYPE; j++)
+                       mutex_init(&sbi->wio_mutex[i][j]);
        spin_lock_init(&sbi->cp_lock);
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
-       memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
-                               F2FS_KEY_DESC_PREFIX_SIZE);
-       sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
-#endif
 }
 
 static int init_percpu_info(struct f2fs_sb_info *sbi)
@@ -1579,16 +1998,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
                return 0;
 
        if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
-                               SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+                               SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
                return -EINVAL;
-       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
        if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
                                __ilog2_u32(sbi->blocks_per_blkz))
                return -EINVAL;
        sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
        FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
                                        sbi->log_blocks_per_blkz;
-       if (nr_sectors & (bdev_zone_size(bdev) - 1))
+       if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
                FDEV(devi).nr_blkz++;
 
        FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
@@ -1724,36 +2143,59 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
 static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+       unsigned int max_devices = MAX_DEVICES;
        int i;
 
-       for (i = 0; i < MAX_DEVICES; i++) {
-               if (!RDEV(i).path[0])
+       /* Initialize single device information */
+       if (!RDEV(0).path[0]) {
+#ifdef CONFIG_BLK_DEV_ZONED
+               if (!bdev_is_zoned(sbi->sb->s_bdev))
                        return 0;
+               max_devices = 1;
+#else
+               return 0;
+#endif
+       }
 
-               if (i == 0) {
-                       sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
-                                               MAX_DEVICES, GFP_KERNEL);
-                       if (!sbi->devs)
-                               return -ENOMEM;
-               }
+       /*
+        * Initialize multiple devices information, or single
+        * zoned block device information.
+        */
+       sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
+                               GFP_KERNEL);
+       if (!sbi->devs)
+               return -ENOMEM;
 
-               memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
-               FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
-               if (i == 0) {
-                       FDEV(i).start_blk = 0;
-                       FDEV(i).end_blk = FDEV(i).start_blk +
-                               (FDEV(i).total_segments <<
-                               sbi->log_blocks_per_seg) - 1 +
-                               le32_to_cpu(raw_super->segment0_blkaddr);
-               } else {
-                       FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
-                       FDEV(i).end_blk = FDEV(i).start_blk +
-                               (FDEV(i).total_segments <<
-                               sbi->log_blocks_per_seg) - 1;
-               }
+       for (i = 0; i < max_devices; i++) {
 
-               FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+               if (i > 0 && !RDEV(i).path[0])
+                       break;
+
+               if (max_devices == 1) {
+                       /* Single zoned block device mount */
+                       FDEV(0).bdev =
+                               blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
+                                       sbi->sb->s_mode, sbi->sb->s_type);
+               } else {
+                       /* Multi-device mount */
+                       memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+                       FDEV(i).total_segments =
+                               le32_to_cpu(RDEV(i).total_segments);
+                       if (i == 0) {
+                               FDEV(i).start_blk = 0;
+                               FDEV(i).end_blk = FDEV(i).start_blk +
+                                   (FDEV(i).total_segments <<
+                                   sbi->log_blocks_per_seg) - 1 +
+                                   le32_to_cpu(raw_super->segment0_blkaddr);
+                       } else {
+                               FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+                               FDEV(i).end_blk = FDEV(i).start_blk +
+                                       (FDEV(i).total_segments <<
+                                       sbi->log_blocks_per_seg) - 1;
+                       }
+                       FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
                                        sbi->sb->s_mode, sbi->sb->s_type);
+               }
                if (IS_ERR(FDEV(i).bdev))
                        return PTR_ERR(FDEV(i).bdev);
 
@@ -1773,6 +2215,8 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
                                        "Failed to initialize F2FS blkzone information");
                                return -EINVAL;
                        }
+                       if (max_devices == 1)
+                               break;
                        f2fs_msg(sbi->sb, KERN_INFO,
                                "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
                                i, FDEV(i).path,
@@ -1841,6 +2285,11 @@ try_onemore:
        sb->s_fs_info = sbi;
        sbi->raw_super = raw_super;
 
+       /* precompute checksum seed for metadata */
+       if (f2fs_sb_has_inode_chksum(sb))
+               sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
+                                               sizeof(raw_super->uuid));
+
        /*
         * The BLKZONED feature indicates that the drive was formatted with
         * zone alignment optimization. This is optional for host-aware
@@ -1850,6 +2299,7 @@ try_onemore:
        if (f2fs_sb_mounted_blkzoned(sb)) {
                f2fs_msg(sb, KERN_ERR,
                         "Zoned block device support is not enabled\n");
+               err = -EOPNOTSUPP;
                goto free_sb_buf;
        }
 #endif
@@ -1871,6 +2321,12 @@ try_onemore:
        sb->s_max_links = F2FS_LINK_MAX;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 
+#ifdef CONFIG_QUOTA
+       sb->dq_op = &f2fs_quota_operations;
+       sb->s_qcop = &f2fs_quotactl_ops;
+       sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
+#endif
+
        sb->s_op = &f2fs_sops;
        sb->s_cop = &f2fs_cryptops;
        sb->s_xattr = f2fs_xattr_handlers;
@@ -1886,18 +2342,34 @@ try_onemore:
        mutex_init(&sbi->gc_mutex);
        mutex_init(&sbi->cp_mutex);
        init_rwsem(&sbi->node_write);
+       init_rwsem(&sbi->node_change);
 
        /* disallow all the data/node/meta page writes */
        set_sbi_flag(sbi, SBI_POR_DOING);
        spin_lock_init(&sbi->stat_lock);
 
-       init_rwsem(&sbi->read_io.io_rwsem);
-       sbi->read_io.sbi = sbi;
-       sbi->read_io.bio = NULL;
+       /* init iostat info */
+       spin_lock_init(&sbi->iostat_lock);
+       sbi->iostat_enable = false;
+
        for (i = 0; i < NR_PAGE_TYPE; i++) {
-               init_rwsem(&sbi->write_io[i].io_rwsem);
-               sbi->write_io[i].sbi = sbi;
-               sbi->write_io[i].bio = NULL;
+               int n = (i == META) ? 1: NR_TEMP_TYPE;
+               int j;
+
+               sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
+                                                               GFP_KERNEL);
+               if (!sbi->write_io[i]) {
+                       err = -ENOMEM;
+                       goto free_options;
+               }
+
+               for (j = HOT; j < n; j++) {
+                       init_rwsem(&sbi->write_io[i][j].io_rwsem);
+                       sbi->write_io[i][j].sbi = sbi;
+                       sbi->write_io[i][j].bio = NULL;
+                       spin_lock_init(&sbi->write_io[i][j].io_lock);
+                       INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
+               }
        }
 
        init_rwsem(&sbi->cp_rwsem);
@@ -1910,9 +2382,11 @@ try_onemore:
 
        if (F2FS_IO_SIZE(sbi) > 1) {
                sbi->write_io_dummy =
-                       mempool_create_page_pool(F2FS_IO_SIZE(sbi) - 1, 0);
-               if (!sbi->write_io_dummy)
+                       mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
+               if (!sbi->write_io_dummy) {
+                       err = -ENOMEM;
                        goto free_options;
+               }
        }
 
        /* get an inode for meta space */
@@ -1944,6 +2418,7 @@ try_onemore:
        sbi->total_valid_block_count =
                                le64_to_cpu(sbi->ckpt->valid_block_count);
        sbi->last_valid_block_count = sbi->total_valid_block_count;
+       sbi->reserved_blocks = 0;
 
        for (i = 0; i < NR_INODE_TYPE; i++) {
                INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -1991,10 +2466,9 @@ try_onemore:
 
        f2fs_join_shrinker(sbi);
 
-       /* if there are nt orphan nodes free them */
-       err = recover_orphan_inodes(sbi);
+       err = f2fs_build_stats(sbi);
        if (err)
-               goto free_node_inode;
+               goto free_nm;
 
        /* read root inode and dentry */
        root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
@@ -2015,26 +2489,14 @@ try_onemore:
                goto free_root_inode;
        }
 
-       err = f2fs_build_stats(sbi);
+       err = f2fs_register_sysfs(sbi);
        if (err)
                goto free_root_inode;
 
-       if (f2fs_proc_root)
-               sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
-
-       if (sbi->s_proc) {
-               proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
-                                &f2fs_seq_segment_info_fops, sb);
-               proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
-                                &f2fs_seq_segment_bits_fops, sb);
-       }
-
-       sbi->s_kobj.kset = f2fs_kset;
-       init_completion(&sbi->s_kobj_unregister);
-       err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
-                                                       "%s", sb->s_id);
+       /* if there are nt orphan nodes free them */
+       err = recover_orphan_inodes(sbi);
        if (err)
-               goto free_proc;
+               goto free_sysfs;
 
        /* recover fsynced data */
        if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
@@ -2045,7 +2507,7 @@ try_onemore:
                if (bdev_read_only(sb->s_bdev) &&
                                !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
                        err = -EROFS;
-                       goto free_kobj;
+                       goto free_meta;
                }
 
                if (need_fsck)
@@ -2059,7 +2521,7 @@ try_onemore:
                        need_fsck = true;
                        f2fs_msg(sb, KERN_ERR,
                                "Cannot recover all fsync data errno=%d", err);
-                       goto free_kobj;
+                       goto free_meta;
                }
        } else {
                err = recover_fsync_data(sbi, true);
@@ -2068,7 +2530,7 @@ try_onemore:
                        err = -EINVAL;
                        f2fs_msg(sb, KERN_ERR,
                                "Need to recover fsync data");
-                       goto free_kobj;
+                       goto free_sysfs;
                }
        }
 skip_recovery:
@@ -2083,7 +2545,7 @@ skip_recovery:
                /* After POR, we can run background GC thread.*/
                err = start_gc_thread(sbi);
                if (err)
-                       goto free_kobj;
+                       goto free_meta;
        }
        kfree(options);
 
@@ -2095,22 +2557,23 @@ skip_recovery:
                        sbi->valid_super_block ? 1 : 2, err);
        }
 
+       f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
+                               cur_cp_version(F2FS_CKPT(sbi)));
        f2fs_update_time(sbi, CP_TIME);
        f2fs_update_time(sbi, REQ_TIME);
        return 0;
 
-free_kobj:
+free_meta:
        f2fs_sync_inode_meta(sbi);
-       kobject_del(&sbi->s_kobj);
-       kobject_put(&sbi->s_kobj);
-       wait_for_completion(&sbi->s_kobj_unregister);
-free_proc:
-       if (sbi->s_proc) {
-               remove_proc_entry("segment_info", sbi->s_proc);
-               remove_proc_entry("segment_bits", sbi->s_proc);
-               remove_proc_entry(sb->s_id, f2fs_proc_root);
-       }
-       f2fs_destroy_stats(sbi);
+       /*
+        * Some dirty meta pages can be produced by recover_orphan_inodes()
+        * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+        * followed by write_checkpoint() through f2fs_write_node_pages(), which
+        * falls into an infinite loop in sync_meta_pages().
+        */
+       truncate_inode_pages_final(META_MAPPING(sbi));
+free_sysfs:
+       f2fs_unregister_sysfs(sbi);
 free_root_inode:
        dput(sb->s_root);
        sb->s_root = NULL;
@@ -2119,15 +2582,9 @@ free_node_inode:
        mutex_lock(&sbi->umount_mutex);
        release_ino_entry(sbi, true);
        f2fs_leave_shrinker(sbi);
-       /*
-        * Some dirty meta pages can be produced by recover_orphan_inodes()
-        * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
-        * followed by write_checkpoint() through f2fs_write_node_pages(), which
-        * falls into an infinite loop in sync_meta_pages().
-        */
-       truncate_inode_pages_final(META_MAPPING(sbi));
        iput(sbi->node_inode);
        mutex_unlock(&sbi->umount_mutex);
+       f2fs_destroy_stats(sbi);
 free_nm:
        destroy_node_manager(sbi);
 free_sm:
@@ -2141,7 +2598,13 @@ free_meta_inode:
 free_io_dummy:
        mempool_destroy(sbi->write_io_dummy);
 free_options:
+       for (i = 0; i < NR_PAGE_TYPE; i++)
+               kfree(sbi->write_io[i]);
        destroy_percpu_info(sbi);
+#ifdef CONFIG_QUOTA
+       for (i = 0; i < MAXQUOTAS; i++)
+               kfree(sbi->s_qf_names[i]);
+#endif
        kfree(options);
 free_sb_buf:
        kfree(raw_super);
@@ -2167,8 +2630,11 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
 
 static void kill_f2fs_super(struct super_block *sb)
 {
-       if (sb->s_root)
+       if (sb->s_root) {
                set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
+               stop_gc_thread(F2FS_SB(sb));
+               stop_discard_thread(F2FS_SB(sb));
+       }
        kill_block_super(sb);
 }
 
@@ -2222,30 +2688,26 @@ static int __init init_f2fs_fs(void)
        err = create_extent_cache();
        if (err)
                goto free_checkpoint_caches;
-       f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
-       if (!f2fs_kset) {
-               err = -ENOMEM;
+       err = f2fs_init_sysfs();
+       if (err)
                goto free_extent_cache;
-       }
        err = register_shrinker(&f2fs_shrinker_info);
        if (err)
-               goto free_kset;
-
+               goto free_sysfs;
        err = register_filesystem(&f2fs_fs_type);
        if (err)
                goto free_shrinker;
        err = f2fs_create_root_stats();
        if (err)
                goto free_filesystem;
-       f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
        return 0;
 
 free_filesystem:
        unregister_filesystem(&f2fs_fs_type);
 free_shrinker:
        unregister_shrinker(&f2fs_shrinker_info);
-free_kset:
-       kset_unregister(f2fs_kset);
+free_sysfs:
+       f2fs_exit_sysfs();
 free_extent_cache:
        destroy_extent_cache();
 free_checkpoint_caches:
@@ -2262,11 +2724,10 @@ fail:
 
 static void __exit exit_f2fs_fs(void)
 {
-       remove_proc_entry("fs/f2fs", NULL);
        f2fs_destroy_root_stats();
        unregister_filesystem(&f2fs_fs_type);
        unregister_shrinker(&f2fs_shrinker_info);
-       kset_unregister(f2fs_kset);
+       f2fs_exit_sysfs();
        destroy_extent_cache();
        destroy_checkpoint_caches();
        destroy_segment_manager_caches();