Update f2fs and fscrypto to current f2fs-stable/linux-3.10.y (2016-12-12)
authoralexax66 <alexax66@mail.ru>
Thu, 26 Jan 2017 18:19:36 +0000 (18:19 +0000)
committerStricted <info@stricted.net>
Thu, 11 Oct 2018 16:03:54 +0000 (18:03 +0200)
https://kernel.googlesource.com/pub/scm/linux/kernel/git/jaegeuk/f2fs-stable/+/linux-3.10.y

45 files changed:
Documentation/ABI/testing/sysfs-fs-f2fs [new file with mode: 0644]
Documentation/filesystems/f2fs.txt
fs/Kconfig
fs/Makefile
fs/crypto/Kconfig [new file with mode: 0644]
fs/crypto/Makefile [new file with mode: 0644]
fs/crypto/crypto.c [new file with mode: 0644]
fs/crypto/fname.c [new file with mode: 0644]
fs/crypto/keyinfo.c [new file with mode: 0644]
fs/crypto/policy.c [new file with mode: 0644]
fs/f2fs/Kconfig
fs/f2fs/Makefile
fs/f2fs/acl.c
fs/f2fs/acl.h
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/debug.c
fs/f2fs/dir.c
fs/f2fs/extent_cache.c [new file with mode: 0644]
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/gc.h
fs/f2fs/hash.c
fs/f2fs/inline.c [new file with mode: 0644]
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/shrinker.c [new file with mode: 0644]
fs/f2fs/super.c
fs/f2fs/trace.c [new file with mode: 0644]
fs/f2fs/trace.h [new file with mode: 0644]
fs/f2fs/xattr.c
fs/f2fs/xattr.h
include/linux/dcache.h
include/linux/f2fs_fs.h
include/linux/fs.h
include/linux/fscrypto.h [new file with mode: 0644]
include/trace/events/f2fs.h
include/uapi/linux/fs.h
security/keys/encrypted-keys/encrypted.c

diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
new file mode 100644 (file)
index 0000000..a809f60
--- /dev/null
@@ -0,0 +1,114 @@
+What:          /sys/fs/f2fs/<disk>/gc_max_sleep_time
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the maximun sleep time for gc_thread. Time
+                is in milliseconds.
+
+What:          /sys/fs/f2fs/<disk>/gc_min_sleep_time
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the minimum sleep time for gc_thread. Time
+                is in milliseconds.
+
+What:          /sys/fs/f2fs/<disk>/gc_no_gc_sleep_time
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the default sleep time for gc_thread. Time
+                is in milliseconds.
+
+What:          /sys/fs/f2fs/<disk>/gc_idle
+Date:          July 2013
+Contact:       "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+                Controls the victim selection policy for garbage collection.
+
+What:          /sys/fs/f2fs/<disk>/reclaim_segments
+Date:          October 2013
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the issue rate of segment discard commands.
+
+What:          /sys/fs/f2fs/<disk>/ipu_policy
+Date:          November 2013
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the in-place-update policy.
+
+What:          /sys/fs/f2fs/<disk>/min_ipu_util
+Date:          November 2013
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the FS utilization condition for the in-place-update
+                policies.
+
+What:          /sys/fs/f2fs/<disk>/min_fsync_blocks
+Date:          September 2014
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+                Controls the dirty page count condition for the in-place-update
+                policies.
+
+What:          /sys/fs/f2fs/<disk>/max_small_discards
+Date:          November 2013
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the issue rate of small discard commands.
+
+What:          /sys/fs/f2fs/<disk>/max_victim_search
+Date:          January 2014
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the number of trials to find a victim segment.
+
+What:          /sys/fs/f2fs/<disk>/dir_level
+Date:          March 2014
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the directory level for large directory.
+
+What:          /sys/fs/f2fs/<disk>/ram_thresh
+Date:          March 2014
+Contact:       "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+Description:
+                Controls the memory footprint used by f2fs.
+
+What:          /sys/fs/f2fs/<disk>/trim_sections
+Date:          February 2015
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+                Controls the trimming rate in batch mode.
+
+What:          /sys/fs/f2fs/<disk>/cp_interval
+Date:          October 2015
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+                Controls the checkpoint timing.
+
+What:          /sys/fs/f2fs/<disk>/idle_interval
+Date:          January 2016
+Contact:       "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description:
+                Controls the idle timing.
+
+What:          /sys/fs/f2fs/<disk>/ra_nid_pages
+Date:          October 2015
+Contact:       "Chao Yu" <chao2.yu@samsung.com>
+Description:
+                Controls the count of nid pages to be readaheaded.
+
+What:          /sys/fs/f2fs/<disk>/dirty_nats_ratio
+Date:          January 2016
+Contact:       "Chao Yu" <chao2.yu@samsung.com>
+Description:
+                Controls dirty nat entries ratio threshold, if current
+                ratio exceeds configured threshold, checkpoint will
+                be triggered for flushing dirty nat entries.
+
+What:          /sys/fs/f2fs/<disk>/lifetime_write_kbytes
+Date:          January 2016
+Contact:       "Shuoran Liu" <liushuoran@huawei.com>
+Description:
+                Shows total written kbytes issued to disk.
index bd3c56c67380b1cf27d3ef82f59747e76a2ad07f..57250bbab88a4e5a275fda5846dcc91f0a9d242a 100644 (file)
@@ -98,10 +98,20 @@ Cleaning Overhead
 MOUNT OPTIONS
 ================================================================================
 
-background_gc_off      Turn off cleaning operations, namely garbage collection,
-                      triggered in background when I/O subsystem is idle.
+background_gc=%s       Turn on/off cleaning operations, namely garbage
+                       collection, triggered in background when I/O subsystem is
+                       idle. If background_gc=on, it will turn on the garbage
+                       collection and if background_gc=off, garbage collection
+                       will be turned off. If background_gc=sync, it will turn
+                       on synchronous garbage collection running in background.
+                       Default value for this option is on. So garbage
+                       collection is on by default.
 disable_roll_forward   Disable the roll-forward recovery routine
-discard                Issue discard/TRIM commands when a segment is cleaned.
+norecovery             Disable the roll-forward recovery routine, mounted read-
+                       only (i.e., -o ro,disable_roll_forward)
+discard/nodiscard      Enable/disable real-time discard in f2fs, if discard is
+                       enabled, f2fs will issue discard/TRIM commands when a
+                      segment is cleaned.
 no_heap                Disable heap-style segment allocation which finds free
                        segments for data from the beginning of main area, while
                       for node from the end of main area.
@@ -114,6 +124,39 @@ active_logs=%u         Support configuring the number of active logs. In the
                        Default number is 6.
 disable_ext_identify   Disable the extension list configured by mkfs, so f2fs
                        does not aware of cold files such as media files.
+inline_xattr           Enable the inline xattrs feature.
+inline_data            Enable the inline data feature: New created small(<~3.4k)
+                       files can be written into inode block.
+inline_dentry          Enable the inline dir feature: data in new created
+                       directory entries can be written into inode block. The
+                       space of inode block which is used to store inline
+                       dentries is limited to ~3.4k.
+noinline_dentry        Disable the inline dentry feature.
+flush_merge           Merge concurrent cache_flush commands as much as possible
+                       to eliminate redundant command issues. If the underlying
+                      device handles the cache_flush command relatively slowly,
+                      recommend to enable this option.
+nobarrier              This option can be used if underlying storage guarantees
+                       its cached data should be written to the novolatile area.
+                      If this option is set, no cache_flush commands are issued
+                      but f2fs still guarantees the write ordering of all the
+                      data writes.
+fastboot               This option is used when a system wants to reduce mount
+                       time as much as possible, even though normal performance
+                      can be sacrificed.
+extent_cache           Enable an extent cache based on rb-tree, it can cache
+                       as many as extent which map between contiguous logical
+                       address and physical address per inode, resulting in
+                       increasing the cache hit ratio. Set by default.
+noextent_cache         Disable an extent cache based on rb-tree explicitly, see
+                       the above extent_cache mount option.
+noinline_data          Disable the inline data feature, inline data feature is
+                       enabled by default.
+data_flush             Enable data flushing before checkpoint in order to
+                       persist data of regular and symlink.
+mode=%s                Control block allocation mode which supports "adaptive"
+                       and "lfs". In "lfs" mode, there should be no random
+                       writes towards main area.
 
 ================================================================================
 DEBUGFS ENTRIES
index 8ee893db74e7323a286fb73886c43f42e2134e93..e10ca4b2dc5b03505870803eb11220f408fc2240 100644 (file)
@@ -66,6 +66,8 @@ config FILE_LOCKING
           for filesystems like NFS and for the flock() system
           call. Disabling this option saves about 11k.
 
+source "fs/crypto/Kconfig"
+
 source "fs/notify/Kconfig"
 
 source "fs/quota/Kconfig"
index 3f6715464bde1038402a11208337b2280db34da5..77947d4c8d7f8d6f4483f563c7537a112b2ca276 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_SIGNALFD)                += signalfd.o
 obj-$(CONFIG_TIMERFD)          += timerfd.o
 obj-$(CONFIG_EVENTFD)          += eventfd.o
 obj-$(CONFIG_AIO)               += aio.o
+obj-$(CONFIG_FS_ENCRYPTION)    += crypto/
 obj-$(CONFIG_FILE_LOCKING)      += locks.o
 obj-$(CONFIG_COMPAT)           += compat.o compat_ioctl.o
 obj-$(CONFIG_BINFMT_AOUT)      += binfmt_aout.o
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
new file mode 100644 (file)
index 0000000..92348fa
--- /dev/null
@@ -0,0 +1,18 @@
+config FS_ENCRYPTION
+       tristate "FS Encryption (Per-file encryption)"
+       depends on BLOCK
+       select CRYPTO
+       select CRYPTO_AES
+       select CRYPTO_CBC
+       select CRYPTO_ECB
+       select CRYPTO_XTS
+       select CRYPTO_CTS
+       select CRYPTO_CTR
+       select CRYPTO_SHA256
+       select KEYS
+       select ENCRYPTED_KEYS
+       help
+         Enable encryption of files and directories.  This
+         feature is similar to ecryptfs, but it is more memory
+         efficient since it avoids caching the encrypted and
+         decrypted pages in the page cache.
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
new file mode 100644 (file)
index 0000000..f17684c
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_FS_ENCRYPTION)    += fscrypto.o
+
+fscrypto-y := crypto.o fname.o policy.o keyinfo.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
new file mode 100644 (file)
index 0000000..f5bc754
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ *     Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ *     Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ *     Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <linux/crypto.h>
+#include <linux/ecryptfs.h>
+#include <linux/pagemap.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/fscrypto.h>
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+               "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+               "Number of crypto contexts to preallocate");
+
+static mempool_t *fscrypt_bounce_page_pool = NULL;
+
+static LIST_HEAD(fscrypt_free_ctxs);
+static DEFINE_SPINLOCK(fscrypt_ctx_lock);
+
+static struct workqueue_struct *fscrypt_read_workqueue;
+static DEFINE_MUTEX(fscrypt_init_mutex);
+
+static struct kmem_cache *fscrypt_ctx_cachep;
+struct kmem_cache *fscrypt_info_cachep;
+
+/**
+ * fscrypt_release_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+       unsigned long flags;
+
+       if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+               mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
+               ctx->w.bounce_page = NULL;
+       }
+       ctx->w.control_page = NULL;
+       if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+               kmem_cache_free(fscrypt_ctx_cachep, ctx);
+       } else {
+               spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+               list_add(&ctx->free_list, &fscrypt_free_ctxs);
+               spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+       }
+}
+EXPORT_SYMBOL(fscrypt_release_ctx);
+
+/**
+ * fscrypt_get_ctx() - Gets an encryption context
+ * @inode:       The inode for which we are doing the crypto
+ * @gfp_flags:   The gfp flag for memory allocation
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
+{
+       struct fscrypt_ctx *ctx = NULL;
+       struct fscrypt_info *ci = inode->i_crypt_info;
+       unsigned long flags;
+
+       if (ci == NULL)
+               return ERR_PTR(-ENOKEY);
+
+       /*
+        * We first try getting the ctx from a free list because in
+        * the common case the ctx will have an allocated and
+        * initialized crypto tfm, so it's probably a worthwhile
+        * optimization. For the bounce page, we first try getting it
+        * from the kernel allocator because that's just about as fast
+        * as getting it from a list and because a cache of free pages
+        * should generally be a "last resort" option for a filesystem
+        * to be able to do its job.
+        */
+       spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+       ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
+                                       struct fscrypt_ctx, free_list);
+       if (ctx)
+               list_del(&ctx->free_list);
+       spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+       if (!ctx) {
+               ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
+               if (!ctx)
+                       return ERR_PTR(-ENOMEM);
+               ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+       } else {
+               ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+       }
+       ctx->flags &= ~FS_WRITE_PATH_FL;
+       return ctx;
+}
+EXPORT_SYMBOL(fscrypt_get_ctx);
+
+/**
+ * page_crypt_complete() - completion callback for page crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void page_crypt_complete(struct crypto_async_request *req, int res)
+{
+       struct fscrypt_completion_result *ecr = req->data;
+
+       if (res == -EINPROGRESS)
+               return;
+       ecr->res = res;
+       complete(&ecr->completion);
+}
+
+typedef enum {
+       FS_DECRYPT = 0,
+       FS_ENCRYPT,
+} fscrypt_direction_t;
+
+static int do_page_crypto(struct inode *inode,
+                       fscrypt_direction_t rw, pgoff_t index,
+                       struct page *src_page, struct page *dest_page,
+                       gfp_t gfp_flags)
+{
+       u8 xts_tweak[FS_XTS_TWEAK_SIZE];
+       struct ablkcipher_request *req = NULL;
+       DECLARE_FS_COMPLETION_RESULT(ecr);
+       struct scatterlist dst, src;
+       struct fscrypt_info *ci = inode->i_crypt_info;
+       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       int res = 0;
+
+       req = ablkcipher_request_alloc(tfm, gfp_flags);
+       if (!req) {
+               printk_ratelimited(KERN_ERR
+                               "%s: crypto_request_alloc() failed\n",
+                               __func__);
+               return -ENOMEM;
+       }
+
+       ablkcipher_request_set_callback(
+               req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+               page_crypt_complete, &ecr);
+
+       BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
+       memcpy(xts_tweak, &index, sizeof(index));
+       memset(&xts_tweak[sizeof(index)], 0,
+                       FS_XTS_TWEAK_SIZE - sizeof(index));
+
+       sg_init_table(&dst, 1);
+       sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
+       sg_init_table(&src, 1);
+       sg_set_page(&src, src_page, PAGE_SIZE, 0);
+       ablkcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
+                                       xts_tweak);
+       if (rw == FS_DECRYPT)
+               res = crypto_ablkcipher_decrypt(req);
+       else
+               res = crypto_ablkcipher_encrypt(req);
+       if (res == -EINPROGRESS || res == -EBUSY) {
+               BUG_ON(req->base.data != &ecr);
+               wait_for_completion(&ecr.completion);
+               res = ecr.res;
+       }
+       ablkcipher_request_free(req);
+       if (res) {
+               printk_ratelimited(KERN_ERR
+                       "%s: crypto_ablkcipher_encrypt() returned %d\n",
+                       __func__, res);
+               return res;
+       }
+       return 0;
+}
+
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
+{
+       ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
+       if (ctx->w.bounce_page == NULL)
+               return ERR_PTR(-ENOMEM);
+       ctx->flags |= FS_WRITE_PATH_FL;
+       return ctx->w.bounce_page;
+}
+
+/**
+ * fscypt_encrypt_page() - Encrypts a page
+ * @inode:          The inode for which the encryption should take place
+ * @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags:      The gfp flag for memory allocation
+ *
+ * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
+ * encryption context.
+ *
+ * Called on the page write path.  The caller must call
+ * fscrypt_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * Return: An allocated page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *fscrypt_encrypt_page(struct inode *inode,
+                               struct page *plaintext_page, gfp_t gfp_flags)
+{
+       struct fscrypt_ctx *ctx;
+       struct page *ciphertext_page = NULL;
+       int err;
+
+       BUG_ON(!PageLocked(plaintext_page));
+
+       ctx = fscrypt_get_ctx(inode, gfp_flags);
+       if (IS_ERR(ctx))
+               return (struct page *)ctx;
+
+       /* The encryption operation will require a bounce page. */
+       ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
+       if (IS_ERR(ciphertext_page))
+               goto errout;
+
+       ctx->w.control_page = plaintext_page;
+       err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
+                                       plaintext_page, ciphertext_page,
+                                       gfp_flags);
+       if (err) {
+               ciphertext_page = ERR_PTR(err);
+               goto errout;
+       }
+       SetPagePrivate(ciphertext_page);
+       set_page_private(ciphertext_page, (unsigned long)ctx);
+       lock_page(ciphertext_page);
+       return ciphertext_page;
+
+errout:
+       fscrypt_release_ctx(ctx);
+       return ciphertext_page;
+}
+EXPORT_SYMBOL(fscrypt_encrypt_page);
+
+/**
+ * f2crypt_decrypt_page() - Decrypts a page in-place
+ * @page: The page to decrypt. Must be locked.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_decrypt_page(struct page *page)
+{
+       BUG_ON(!PageLocked(page));
+
+       return do_page_crypto(page->mapping->host,
+                       FS_DECRYPT, page->index, page, page, GFP_NOFS);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_page);
+
+int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
+                               sector_t pblk, unsigned int len)
+{
+       struct fscrypt_ctx *ctx;
+       struct page *ciphertext_page = NULL;
+       struct bio *bio;
+       int ret, err = 0;
+
+       BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
+
+       ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
+
+       ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
+       if (IS_ERR(ciphertext_page)) {
+               err = PTR_ERR(ciphertext_page);
+               goto errout;
+       }
+
+       while (len--) {
+               err = do_page_crypto(inode, FS_ENCRYPT, lblk,
+                                       ZERO_PAGE(0), ciphertext_page,
+                                       GFP_NOFS);
+               if (err)
+                       goto errout;
+
+               bio = bio_alloc(GFP_NOWAIT, 1);
+               if (!bio) {
+                       err = -ENOMEM;
+                       goto errout;
+               }
+               bio->bi_bdev = inode->i_sb->s_bdev;
+               bio->bi_sector =
+                       pblk << (inode->i_sb->s_blocksize_bits - 9);
+               ret = bio_add_page(bio, ciphertext_page,
+                                       inode->i_sb->s_blocksize, 0);
+               if (ret != inode->i_sb->s_blocksize) {
+                       /* should never happen! */
+                       WARN_ON(1);
+                       bio_put(bio);
+                       err = -EIO;
+                       goto errout;
+               }
+               err = submit_bio_wait(WRITE, bio);
+               bio_put(bio);
+               if (err)
+                       goto errout;
+               lblk++;
+               pblk++;
+       }
+       err = 0;
+errout:
+       fscrypt_release_ctx(ctx);
+       return err;
+}
+EXPORT_SYMBOL(fscrypt_zeroout_range);
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct dentry *dir;
+       struct fscrypt_info *ci;
+       int dir_has_key, cached_with_key;
+
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       dir = dget_parent(dentry);
+       if (!dir->d_inode->i_sb->s_cop->is_encrypted(dir->d_inode)) {
+               dput(dir);
+               return 0;
+       }
+
+       ci = dir->d_inode->i_crypt_info;
+       if (ci && ci->ci_keyring_key &&
+           (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+                                         (1 << KEY_FLAG_REVOKED) |
+                                         (1 << KEY_FLAG_DEAD))))
+               ci = NULL;
+
+       /* this should eventually be an flag in d_flags */
+       spin_lock(&dentry->d_lock);
+       cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
+       spin_unlock(&dentry->d_lock);
+       dir_has_key = (ci != NULL);
+       dput(dir);
+
+       /*
+        * If the dentry was cached without the key, and it is a
+        * negative dentry, it might be a valid name.  We can't check
+        * if the key has since been made available due to locking
+        * reasons, so we fail the validation so ext4_lookup() can do
+        * this check.
+        *
+        * We also fail the validation if the dentry was created with
+        * the key present, but we no longer have the key, or vice versa.
+        */
+       if (!cached_with_key ||
+                       (!cached_with_key && dir_has_key) ||
+                       (cached_with_key && !dir_has_key))
+               return 0;
+       return 1;
+}
+
+const struct dentry_operations fscrypt_d_ops = {
+       .d_revalidate = fscrypt_d_revalidate,
+};
+EXPORT_SYMBOL(fscrypt_d_ops);
+
+/*
+ * Call fscrypt_decrypt_page on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+       struct fscrypt_ctx *ctx =
+               container_of(work, struct fscrypt_ctx, r.work);
+       struct bio *bio = ctx->r.bio;
+       struct bio_vec *bv;
+       int i;
+
+       bio_for_each_segment_all(bv, bio, i) {
+               struct page *page = bv->bv_page;
+               int ret = fscrypt_decrypt_page(page);
+
+               if (ret) {
+                       WARN_ON_ONCE(1);
+                       SetPageError(page);
+               } else {
+                       SetPageUptodate(page);
+               }
+               unlock_page(page);
+       }
+       fscrypt_release_ctx(ctx);
+       bio_put(bio);
+}
+
+void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+{
+       INIT_WORK(&ctx->r.work, completion_pages);
+       ctx->r.bio = bio;
+       queue_work(fscrypt_read_workqueue, &ctx->r.work);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+
+void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+       struct fscrypt_ctx *ctx;
+       struct page *bounce_page;
+
+       /* The bounce data pages are unmapped. */
+       if ((*page)->mapping)
+               return;
+
+       /* The bounce data page is unmapped. */
+       bounce_page = *page;
+       ctx = (struct fscrypt_ctx *)page_private(bounce_page);
+
+       /* restore control page */
+       *page = ctx->w.control_page;
+
+       if (restore)
+               fscrypt_restore_control_page(bounce_page);
+}
+EXPORT_SYMBOL(fscrypt_pullback_bio_page);
+
+void fscrypt_restore_control_page(struct page *page)
+{
+       struct fscrypt_ctx *ctx;
+
+       ctx = (struct fscrypt_ctx *)page_private(page);
+       set_page_private(page, (unsigned long)NULL);
+       ClearPagePrivate(page);
+       unlock_page(page);
+       fscrypt_release_ctx(ctx);
+}
+EXPORT_SYMBOL(fscrypt_restore_control_page);
+
+static void fscrypt_destroy(void)
+{
+       struct fscrypt_ctx *pos, *n;
+
+       list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
+               kmem_cache_free(fscrypt_ctx_cachep, pos);
+       INIT_LIST_HEAD(&fscrypt_free_ctxs);
+       mempool_destroy(fscrypt_bounce_page_pool);
+       fscrypt_bounce_page_pool = NULL;
+}
+
+/**
+ * fscrypt_initialize() - allocate major buffers for fs encryption.
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_initialize(void)
+{
+       int i, res = -ENOMEM;
+
+       if (fscrypt_bounce_page_pool)
+               return 0;
+
+       mutex_lock(&fscrypt_init_mutex);
+       if (fscrypt_bounce_page_pool)
+               goto already_initialized;
+
+       for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+               struct fscrypt_ctx *ctx;
+
+               ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+               if (!ctx)
+                       goto fail;
+               list_add(&ctx->free_list, &fscrypt_free_ctxs);
+       }
+
+       fscrypt_bounce_page_pool =
+               mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+       if (!fscrypt_bounce_page_pool)
+               goto fail;
+
+already_initialized:
+       mutex_unlock(&fscrypt_init_mutex);
+       return 0;
+fail:
+       fscrypt_destroy();
+       mutex_unlock(&fscrypt_init_mutex);
+       return res;
+}
+EXPORT_SYMBOL(fscrypt_initialize);
+
+/**
+ * fscrypt_init() - Set up for fs encryption.
+ */
+static int __init fscrypt_init(void)
+{
+       fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
+                                                       WQ_HIGHPRI, 0);
+       if (!fscrypt_read_workqueue)
+               goto fail;
+
+       fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
+       if (!fscrypt_ctx_cachep)
+               goto fail_free_queue;
+
+       fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
+       if (!fscrypt_info_cachep)
+               goto fail_free_ctx;
+
+       return 0;
+
+fail_free_ctx:
+       kmem_cache_destroy(fscrypt_ctx_cachep);
+fail_free_queue:
+       destroy_workqueue(fscrypt_read_workqueue);
+fail:
+       return -ENOMEM;
+}
+module_init(fscrypt_init)
+
+/**
+ * fscrypt_exit() - Shutdown the fs encryption system
+ */
+static void __exit fscrypt_exit(void)
+{
+       fscrypt_destroy();
+
+       if (fscrypt_read_workqueue)
+               destroy_workqueue(fscrypt_read_workqueue);
+       kmem_cache_destroy(fscrypt_ctx_cachep);
+       kmem_cache_destroy(fscrypt_info_cachep);
+}
+module_exit(fscrypt_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
new file mode 100644 (file)
index 0000000..e7035c6
--- /dev/null
@@ -0,0 +1,430 @@
+/*
+ * This contains functions for filename crypto management
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Uday Savagaonkar, 2014.
+ * Modified by Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ */
+
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/fscrypto.h>
+
+/**
+ * fname_crypt_complete() - completion callback for filename crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void fname_crypt_complete(struct crypto_async_request *req, int res)
+{
+       struct fscrypt_completion_result *ecr = req->data;
+
+       if (res == -EINPROGRESS)
+               return;
+       ecr->res = res;
+       complete(&ecr->completion);
+}
+
+/**
+ * fname_encrypt() - encrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_encrypt(struct inode *inode,
+                       const struct qstr *iname, struct fscrypt_str *oname)
+{
+       u32 ciphertext_len;
+       struct ablkcipher_request *req = NULL;
+       DECLARE_FS_COMPLETION_RESULT(ecr);
+       struct fscrypt_info *ci = inode->i_crypt_info;
+       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       int res = 0;
+       char iv[FS_CRYPTO_BLOCK_SIZE];
+       struct scatterlist src_sg, dst_sg;
+       int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+       char *workbuf, buf[32], *alloc_buf = NULL;
+       unsigned lim;
+
+       lim = inode->i_sb->s_cop->max_namelen(inode);
+       if (iname->len <= 0 || iname->len > lim)
+               return -EIO;
+
+       ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE);
+       ciphertext_len = round_up(ciphertext_len, padding);
+       ciphertext_len = min(ciphertext_len, lim);
+
+       if (ciphertext_len <= sizeof(buf)) {
+               workbuf = buf;
+       } else {
+               alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
+               if (!alloc_buf)
+                       return -ENOMEM;
+               workbuf = alloc_buf;
+       }
+
+       /* Allocate request */
+       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       if (!req) {
+               printk_ratelimited(KERN_ERR
+                       "%s: crypto_request_alloc() failed\n", __func__);
+               kfree(alloc_buf);
+               return -ENOMEM;
+       }
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       fname_crypt_complete, &ecr);
+
+       /* Copy the input */
+       memcpy(workbuf, iname->name, iname->len);
+       if (iname->len < ciphertext_len)
+               memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
+
+       /* Initialize IV */
+       memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+       /* Create encryption request */
+       sg_init_one(&src_sg, workbuf, ciphertext_len);
+       sg_init_one(&dst_sg, oname->name, ciphertext_len);
+       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+       res = crypto_ablkcipher_encrypt(req);
+       if (res == -EINPROGRESS || res == -EBUSY) {
+               wait_for_completion(&ecr.completion);
+               res = ecr.res;
+       }
+       kfree(alloc_buf);
+       ablkcipher_request_free(req);
+       if (res < 0) {
+               printk_ratelimited(KERN_ERR
+                               "%s: Error (error code %d)\n", __func__, res);
+               return res;
+       }
+
+       oname->len = ciphertext_len;
+       return 0;
+}
+
+/**
+ * fname_decrypt() - decrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_decrypt(struct inode *inode,
+                               const struct fscrypt_str *iname,
+                               struct fscrypt_str *oname)
+{
+       struct ablkcipher_request *req = NULL;
+       DECLARE_FS_COMPLETION_RESULT(ecr);
+       struct scatterlist src_sg, dst_sg;
+       struct fscrypt_info *ci = inode->i_crypt_info;
+       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       int res = 0;
+       char iv[FS_CRYPTO_BLOCK_SIZE];
+       unsigned lim;
+
+       lim = inode->i_sb->s_cop->max_namelen(inode);
+       if (iname->len <= 0 || iname->len > lim)
+               return -EIO;
+
+       /* Allocate request */
+       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       if (!req) {
+               printk_ratelimited(KERN_ERR
+                       "%s: crypto_request_alloc() failed\n",  __func__);
+               return -ENOMEM;
+       }
+       ablkcipher_request_set_callback(req,
+               CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+               fname_crypt_complete, &ecr);
+
+       /* Initialize IV */
+       memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+       /* Create decryption request */
+       sg_init_one(&src_sg, iname->name, iname->len);
+       sg_init_one(&dst_sg, oname->name, oname->len);
+       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+       res = crypto_ablkcipher_decrypt(req);
+       if (res == -EINPROGRESS || res == -EBUSY) {
+               wait_for_completion(&ecr.completion);
+               res = ecr.res;
+       }
+       ablkcipher_request_free(req);
+       if (res < 0) {
+               printk_ratelimited(KERN_ERR
+                               "%s: Error (error code %d)\n", __func__, res);
+               return res;
+       }
+
+       oname->len = strnlen(oname->name, iname->len);
+       return 0;
+}
+
+static const char *lookup_table =
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+/**
+ * digest_encode() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+static int digest_encode(const char *src, int len, char *dst)
+{
+       int i = 0, bits = 0, ac = 0;
+       char *cp = dst;
+
+       while (i < len) {
+               ac += (((unsigned char) src[i]) << bits);
+               bits += 8;
+               do {
+                       *cp++ = lookup_table[ac & 0x3f];
+                       ac >>= 6;
+                       bits -= 6;
+               } while (bits >= 6);
+               i++;
+       }
+       if (bits)
+               *cp++ = lookup_table[ac & 0x3f];
+       return cp - dst;
+}
+
+static int digest_decode(const char *src, int len, char *dst)
+{
+       int i = 0, bits = 0, ac = 0;
+       const char *p;
+       char *cp = dst;
+
+       while (i < len) {
+               p = strchr(lookup_table, src[i]);
+               if (p == NULL || src[i] == 0)
+                       return -2;
+               ac += (p - lookup_table) << bits;
+               bits += 6;
+               if (bits >= 8) {
+                       *cp++ = ac & 0xff;
+                       ac >>= 8;
+                       bits -= 8;
+               }
+               i++;
+       }
+       if (ac)
+               return -1;
+       return cp - dst;
+}
+
+u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
+{
+       int padding = 32;
+       struct fscrypt_info *ci = inode->i_crypt_info;
+
+       if (ci)
+               padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+       ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
+       return round_up(ilen, padding);
+}
+EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
+
+/**
+ * fscrypt_fname_crypto_alloc_obuff() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int fscrypt_fname_alloc_buffer(struct inode *inode,
+                               u32 ilen, struct fscrypt_str *crypto_str)
+{
+       unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
+
+       crypto_str->len = olen;
+       if (olen < FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
+               olen = FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
+       /*
+        * Allocated buffer can hold one more character to null-terminate the
+        * string
+        */
+       crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
+       if (!(crypto_str->name))
+               return -ENOMEM;
+       return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
+
+/**
+ * fscrypt_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+       if (!crypto_str)
+               return;
+       kfree(crypto_str->name);
+       crypto_str->name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_fname_free_buffer);
+
+/**
+ * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_disk_to_usr(struct inode *inode,
+                       u32 hash, u32 minor_hash,
+                       const struct fscrypt_str *iname,
+                       struct fscrypt_str *oname)
+{
+       const struct qstr qname = FSTR_TO_QSTR(iname);
+       char buf[24];
+
+       if (fscrypt_is_dot_dotdot(&qname)) {
+               oname->name[0] = '.';
+               oname->name[iname->len - 1] = '.';
+               oname->len = iname->len;
+               return 0;
+       }
+
+       if (iname->len < FS_CRYPTO_BLOCK_SIZE)
+               return -EUCLEAN;
+
+       if (inode->i_crypt_info)
+               return fname_decrypt(inode, iname, oname);
+
+       if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
+               oname->len = digest_encode(iname->name, iname->len,
+                                          oname->name);
+               return 0;
+       }
+       if (hash) {
+               memcpy(buf, &hash, 4);
+               memcpy(buf + 4, &minor_hash, 4);
+       } else {
+               memset(buf, 0, 8);
+       }
+       memcpy(buf + 8, iname->name + iname->len - 16, 16);
+       oname->name[0] = '_';
+       oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
+       return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
+
+/**
+ * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_usr_to_disk(struct inode *inode,
+                       const struct qstr *iname,
+                       struct fscrypt_str *oname)
+{
+       if (fscrypt_is_dot_dotdot(iname)) {
+               oname->name[0] = '.';
+               oname->name[iname->len - 1] = '.';
+               oname->len = iname->len;
+               return 0;
+       }
+       if (inode->i_crypt_info)
+               return fname_encrypt(inode, iname, oname);
+       /*
+        * Without a proper key, a user is not allowed to modify the filenames
+        * in a directory. Consequently, a user space name cannot be mapped to
+        * a disk-space name
+        */
+       return -EACCES;
+}
+EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
+
+int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
+                             int lookup, struct fscrypt_name *fname)
+{
+       int ret = 0, bigname = 0;
+
+       memset(fname, 0, sizeof(struct fscrypt_name));
+       fname->usr_fname = iname;
+
+       if (!dir->i_sb->s_cop->is_encrypted(dir) ||
+                               fscrypt_is_dot_dotdot(iname)) {
+               fname->disk_name.name = (unsigned char *)iname->name;
+               fname->disk_name.len = iname->len;
+               return 0;
+       }
+       ret = get_crypt_info(dir);
+       if (ret && ret != -EOPNOTSUPP)
+               return ret;
+
+       if (dir->i_crypt_info) {
+               ret = fscrypt_fname_alloc_buffer(dir, iname->len,
+                                                       &fname->crypto_buf);
+               if (ret)
+                       return ret;
+               ret = fname_encrypt(dir, iname, &fname->crypto_buf);
+               if (ret)
+                       goto errout;
+               fname->disk_name.name = fname->crypto_buf.name;
+               fname->disk_name.len = fname->crypto_buf.len;
+               return 0;
+       }
+       if (!lookup)
+               return -EACCES;
+
+       /*
+        * We don't have the key and we are doing a lookup; decode the
+        * user-supplied name
+        */
+       if (iname->name[0] == '_')
+               bigname = 1;
+       if ((bigname && (iname->len != 33)) || (!bigname && (iname->len > 43)))
+               return -ENOENT;
+
+       fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
+       if (fname->crypto_buf.name == NULL)
+               return -ENOMEM;
+
+       ret = digest_decode(iname->name + bigname, iname->len - bigname,
+                               fname->crypto_buf.name);
+       if (ret < 0) {
+               ret = -ENOENT;
+               goto errout;
+       }
+       fname->crypto_buf.len = ret;
+       if (bigname) {
+               memcpy(&fname->hash, fname->crypto_buf.name, 4);
+               memcpy(&fname->minor_hash, fname->crypto_buf.name + 4, 4);
+       } else {
+               fname->disk_name.name = fname->crypto_buf.name;
+               fname->disk_name.len = fname->crypto_buf.len;
+       }
+       return 0;
+
+errout:
+       fscrypt_fname_free_buffer(&fname->crypto_buf);
+       return ret;
+}
+EXPORT_SYMBOL(fscrypt_setup_filename);
+
+void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+       kfree(fname->crypto_buf.name);
+       fname->crypto_buf.name = NULL;
+       fname->usr_fname = NULL;
+       fname->disk_name.name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_free_filename);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
new file mode 100644 (file)
index 0000000..6a7ee21
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * key management facility for FS encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/encrypted-type.h>
+#include <keys/user-type.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <uapi/linux/keyctl.h>
+#include <crypto/hash.h>
+#include <linux/fscrypto.h>
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+       struct fscrypt_completion_result *ecr = req->data;
+
+       if (rc == -EINPROGRESS)
+               return;
+
+       ecr->res = rc;
+       complete(&ecr->completion);
+}
+
+/**
+ * derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivation.
+ * @source_key:   Source key to which to apply derivation.
+ * @derived_key:  Derived key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
+                               u8 source_key[FS_AES_256_XTS_KEY_SIZE],
+                               u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
+{
+       int res = 0;
+       struct ablkcipher_request *req = NULL;
+       DECLARE_FS_COMPLETION_RESULT(ecr);
+       struct scatterlist src_sg, dst_sg;
+       struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
+                                                               0);
+
+       if (IS_ERR(tfm)) {
+               res = PTR_ERR(tfm);
+               tfm = NULL;
+               goto out;
+       }
+       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       if (!req) {
+               res = -ENOMEM;
+               goto out;
+       }
+       ablkcipher_request_set_callback(req,
+                       CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+                       derive_crypt_complete, &ecr);
+       res = crypto_ablkcipher_setkey(tfm, deriving_key,
+                                       FS_AES_128_ECB_KEY_SIZE);
+       if (res < 0)
+               goto out;
+
+       sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
+       sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
+       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
+                                       FS_AES_256_XTS_KEY_SIZE, NULL);
+       res = crypto_ablkcipher_encrypt(req);
+       if (res == -EINPROGRESS || res == -EBUSY) {
+               wait_for_completion(&ecr.completion);
+               res = ecr.res;
+       }
+out:
+       ablkcipher_request_free(req);
+       crypto_free_ablkcipher(tfm);
+       return res;
+}
+
+static int validate_user_key(struct fscrypt_info *crypt_info,
+                       struct fscrypt_context *ctx, u8 *raw_key,
+                       u8 *prefix, int prefix_size)
+{
+       u8 *full_key_descriptor;
+       struct key *keyring_key;
+       struct fscrypt_key *master_key;
+       const struct user_key_payload *ukp;
+       int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1;
+       int res;
+
+       full_key_descriptor = kmalloc(full_key_len, GFP_NOFS);
+       if (!full_key_descriptor)
+               return -ENOMEM;
+
+       memcpy(full_key_descriptor, prefix, prefix_size);
+       sprintf(full_key_descriptor + prefix_size,
+                       "%*phN", FS_KEY_DESCRIPTOR_SIZE,
+                       ctx->master_key_descriptor);
+       full_key_descriptor[full_key_len - 1] = '\0';
+       keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
+       kfree(full_key_descriptor);
+       if (IS_ERR(keyring_key))
+               return PTR_ERR(keyring_key);
+
+       if (keyring_key->type != &key_type_logon) {
+               printk_once(KERN_WARNING
+                               "%s: key type must be logon\n", __func__);
+               res = -ENOKEY;
+               goto out;
+       }
+       down_read(&keyring_key->sem);
+       ukp = ((struct user_key_payload *)keyring_key->payload.data);
+       if (ukp->datalen != sizeof(struct fscrypt_key)) {
+               res = -EINVAL;
+               up_read(&keyring_key->sem);
+               goto out;
+       }
+       master_key = (struct fscrypt_key *)ukp->data;
+       BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
+
+       if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
+               printk_once(KERN_WARNING
+                               "%s: key size incorrect: %d\n",
+                               __func__, master_key->size);
+               res = -ENOKEY;
+               up_read(&keyring_key->sem);
+               goto out;
+       }
+       res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
+       up_read(&keyring_key->sem);
+       if (res)
+               goto out;
+
+       crypt_info->ci_keyring_key = keyring_key;
+       return 0;
+out:
+       key_put(keyring_key);
+       return res;
+}
+
+static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
+                                const char **cipher_str_ret, int *keysize_ret)
+{
+       if (S_ISREG(inode->i_mode)) {
+               if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
+                       *cipher_str_ret = "xts(aes)";
+                       *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+                       return 0;
+               }
+               pr_warn_once("fscrypto: unsupported contents encryption mode "
+                            "%d for inode %lu\n",
+                            ci->ci_data_mode, inode->i_ino);
+               return -ENOKEY;
+       }
+
+       if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+               if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
+                       *cipher_str_ret = "cts(cbc(aes))";
+                       *keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+                       return 0;
+               }
+               pr_warn_once("fscrypto: unsupported filenames encryption mode "
+                            "%d for inode %lu\n",
+                            ci->ci_filename_mode, inode->i_ino);
+               return -ENOKEY;
+       }
+
+       pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
+                    (inode->i_mode & S_IFMT), inode->i_ino);
+       return -ENOKEY;
+}
+
+static void put_crypt_info(struct fscrypt_info *ci)
+{
+       if (!ci)
+               return;
+
+       key_put(ci->ci_keyring_key);
+       crypto_free_ablkcipher(ci->ci_ctfm);
+       kmem_cache_free(fscrypt_info_cachep, ci);
+}
+
+int get_crypt_info(struct inode *inode)
+{
+       struct fscrypt_info *crypt_info;
+       struct fscrypt_context ctx;
+       struct crypto_ablkcipher *ctfm;
+       const char *cipher_str;
+       int keysize;
+       u8 raw_key[FS_MAX_KEY_SIZE];
+       int res;
+
+       res = fscrypt_initialize();
+       if (res)
+               return res;
+
+       if (!inode->i_sb->s_cop->get_context)
+               return -EOPNOTSUPP;
+retry:
+       crypt_info = ACCESS_ONCE(inode->i_crypt_info);
+       if (crypt_info) {
+               if (!crypt_info->ci_keyring_key ||
+                               key_validate(crypt_info->ci_keyring_key) == 0)
+                       return 0;
+               fscrypt_put_encryption_info(inode, crypt_info);
+               goto retry;
+       }
+
+       res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+       if (res < 0) {
+               if (!fscrypt_dummy_context_enabled(inode))
+                       return res;
+               ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+               ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+               ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+               ctx.flags = 0;
+       } else if (res != sizeof(ctx)) {
+               return -EINVAL;
+       }
+
+       if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+               return -EINVAL;
+
+       if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
+               return -EINVAL;
+
+       crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
+       if (!crypt_info)
+               return -ENOMEM;
+
+       crypt_info->ci_flags = ctx.flags;
+       crypt_info->ci_data_mode = ctx.contents_encryption_mode;
+       crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
+       crypt_info->ci_ctfm = NULL;
+       crypt_info->ci_keyring_key = NULL;
+       memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
+                               sizeof(crypt_info->ci_master_key));
+
+       res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+       if (res)
+               goto out;
+
+       if (fscrypt_dummy_context_enabled(inode)) {
+               memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+               goto got_key;
+       }
+
+       res = validate_user_key(crypt_info, &ctx, raw_key,
+                       FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
+       if (res && inode->i_sb->s_cop->key_prefix) {
+               u8 *prefix = NULL;
+               int prefix_size, res2;
+
+               prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
+               res2 = validate_user_key(crypt_info, &ctx, raw_key,
+                                                       prefix, prefix_size);
+               if (res2) {
+                       if (res2 == -ENOKEY)
+                               res = -ENOKEY;
+                       goto out;
+               }
+       } else if (res) {
+               goto out;
+       }
+got_key:
+       ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
+       if (!ctfm || IS_ERR(ctfm)) {
+               res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+               printk(KERN_DEBUG
+                      "%s: error %d (inode %u) allocating crypto tfm\n",
+                      __func__, res, (unsigned) inode->i_ino);
+               goto out;
+       }
+       crypt_info->ci_ctfm = ctfm;
+       crypto_ablkcipher_clear_flags(ctfm, ~0);
+       crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
+                                       CRYPTO_TFM_REQ_WEAK_KEY);
+       res = crypto_ablkcipher_setkey(ctfm, raw_key, keysize);
+       if (res)
+               goto out;
+
+       memzero_explicit(raw_key, sizeof(raw_key));
+       if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
+               put_crypt_info(crypt_info);
+               goto retry;
+       }
+       return 0;
+
+out:
+       if (res == -ENOKEY)
+               res = 0;
+       put_crypt_info(crypt_info);
+       memzero_explicit(raw_key, sizeof(raw_key));
+       return res;
+}
+
+void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
+{
+       struct fscrypt_info *prev;
+
+       if (ci == NULL)
+               ci = ACCESS_ONCE(inode->i_crypt_info);
+       if (ci == NULL)
+               return;
+
+       prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
+       if (prev != ci)
+               return;
+
+       put_crypt_info(ci);
+}
+EXPORT_SYMBOL(fscrypt_put_encryption_info);
+
+int fscrypt_get_encryption_info(struct inode *inode)
+{
+       struct fscrypt_info *ci = inode->i_crypt_info;
+
+       if (!ci ||
+               (ci->ci_keyring_key &&
+                (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+                                              (1 << KEY_FLAG_REVOKED) |
+                                              (1 << KEY_FLAG_DEAD)))))
+               return get_crypt_info(inode);
+       return 0;
+}
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
new file mode 100644 (file)
index 0000000..07e58dd
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * Encryption policy functions for per-file encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/fscrypto.h>
+#include <linux/mount.h>
+
+static int inode_has_encryption_context(struct inode *inode)
+{
+       if (!inode->i_sb->s_cop->get_context)
+               return 0;
+       return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0);
+}
+
+/*
+ * check whether the policy is consistent with the encryption context
+ * for the inode
+ */
+static int is_encryption_context_consistent_with_policy(struct inode *inode,
+                               const struct fscrypt_policy *policy)
+{
+       struct fscrypt_context ctx;
+       int res;
+
+       if (!inode->i_sb->s_cop->get_context)
+               return 0;
+
+       res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+       if (res != sizeof(ctx))
+               return 0;
+
+       return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
+                       FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+                       (ctx.flags == policy->flags) &&
+                       (ctx.contents_encryption_mode ==
+                        policy->contents_encryption_mode) &&
+                       (ctx.filenames_encryption_mode ==
+                        policy->filenames_encryption_mode));
+}
+
+static int create_encryption_context_from_policy(struct inode *inode,
+                               const struct fscrypt_policy *policy)
+{
+       struct fscrypt_context ctx;
+       int res;
+
+       if (!inode->i_sb->s_cop->set_context)
+               return -EOPNOTSUPP;
+
+       if (inode->i_sb->s_cop->prepare_context) {
+               res = inode->i_sb->s_cop->prepare_context(inode);
+               if (res)
+                       return res;
+       }
+
+       ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+       memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+                                       FS_KEY_DESCRIPTOR_SIZE);
+
+       if (!fscrypt_valid_contents_enc_mode(
+                               policy->contents_encryption_mode)) {
+               printk(KERN_WARNING
+                      "%s: Invalid contents encryption mode %d\n", __func__,
+                       policy->contents_encryption_mode);
+               return -EINVAL;
+       }
+
+       if (!fscrypt_valid_filenames_enc_mode(
+                               policy->filenames_encryption_mode)) {
+               printk(KERN_WARNING
+                       "%s: Invalid filenames encryption mode %d\n", __func__,
+                       policy->filenames_encryption_mode);
+               return -EINVAL;
+       }
+
+       if (policy->flags & ~FS_POLICY_FLAGS_VALID)
+               return -EINVAL;
+
+       ctx.contents_encryption_mode = policy->contents_encryption_mode;
+       ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+       ctx.flags = policy->flags;
+       BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
+       get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+
+       return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
+}
+
+int fscrypt_process_policy(struct file *filp,
+                               const struct fscrypt_policy *policy)
+{
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       if (policy->version != 0)
+               return -EINVAL;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       if (!inode_has_encryption_context(inode)) {
+               if (!S_ISDIR(inode->i_mode))
+                       ret = -EINVAL;
+               else if (!inode->i_sb->s_cop->empty_dir)
+                       ret = -EOPNOTSUPP;
+               else if (!inode->i_sb->s_cop->empty_dir(inode))
+                       ret = -ENOTEMPTY;
+               else
+                       ret = create_encryption_context_from_policy(inode,
+                                                                   policy);
+       } else if (!is_encryption_context_consistent_with_policy(inode,
+                                                                policy)) {
+               printk(KERN_WARNING
+                      "%s: Policy inconsistent with encryption context\n",
+                      __func__);
+               ret = -EOPNOTSUPP;
+       }
+
+       mnt_drop_write_file(filp);
+       return ret;
+}
+EXPORT_SYMBOL(fscrypt_process_policy);
+
+int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
+{
+       struct fscrypt_context ctx;
+       int res;
+
+       if (!inode->i_sb->s_cop->get_context ||
+                       !inode->i_sb->s_cop->is_encrypted(inode))
+               return -ENODATA;
+
+       res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+       if (res != sizeof(ctx))
+               return -ENODATA;
+       if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+               return -EINVAL;
+
+       policy->version = 0;
+       policy->contents_encryption_mode = ctx.contents_encryption_mode;
+       policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+       policy->flags = ctx.flags;
+       memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+                               FS_KEY_DESCRIPTOR_SIZE);
+       return 0;
+}
+EXPORT_SYMBOL(fscrypt_get_policy);
+
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+{
+       struct fscrypt_info *parent_ci, *child_ci;
+       int res;
+
+       if ((parent == NULL) || (child == NULL)) {
+               printk(KERN_ERR "parent %p child %p\n", parent, child);
+               BUG_ON(1);
+       }
+
+       /* no restrictions if the parent directory is not encrypted */
+       if (!parent->i_sb->s_cop->is_encrypted(parent))
+               return 1;
+       /* if the child directory is not encrypted, this is always a problem */
+       if (!parent->i_sb->s_cop->is_encrypted(child))
+               return 0;
+       res = fscrypt_get_encryption_info(parent);
+       if (res)
+               return 0;
+       res = fscrypt_get_encryption_info(child);
+       if (res)
+               return 0;
+       parent_ci = parent->i_crypt_info;
+       child_ci = child->i_crypt_info;
+       if (!parent_ci && !child_ci)
+               return 1;
+       if (!parent_ci || !child_ci)
+               return 0;
+
+       return (memcmp(parent_ci->ci_master_key,
+                       child_ci->ci_master_key,
+                       FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+               (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+               (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
+               (parent_ci->ci_flags == child_ci->ci_flags));
+}
+EXPORT_SYMBOL(fscrypt_has_permitted_context);
+
+/**
+ * fscrypt_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child:  Child inode that inherits the context from @parent.
+ * @fs_data:  private data given by FS.
+ * @preload:  preload child i_crypt_info
+ *
+ * Return: Zero on success, non-zero otherwise
+ */
+int fscrypt_inherit_context(struct inode *parent, struct inode *child,
+                                               void *fs_data, bool preload)
+{
+       struct fscrypt_context ctx;
+       struct fscrypt_info *ci;
+       int res;
+
+       if (!parent->i_sb->s_cop->set_context)
+               return -EOPNOTSUPP;
+
+       res = fscrypt_get_encryption_info(parent);
+       if (res < 0)
+               return res;
+
+       ci = parent->i_crypt_info;
+       if (ci == NULL)
+               return -ENOKEY;
+
+       ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+       if (fscrypt_dummy_context_enabled(parent)) {
+               ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+               ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+               ctx.flags = 0;
+               memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
+               res = 0;
+       } else {
+               ctx.contents_encryption_mode = ci->ci_data_mode;
+               ctx.filenames_encryption_mode = ci->ci_filename_mode;
+               ctx.flags = ci->ci_flags;
+               memcpy(ctx.master_key_descriptor, ci->ci_master_key,
+                               FS_KEY_DESCRIPTOR_SIZE);
+       }
+       get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+       res = parent->i_sb->s_cop->set_context(child, &ctx,
+                                               sizeof(ctx), fs_data);
+       if (res)
+               return res;
+       return preload ? fscrypt_get_encryption_info(child): 0;
+}
+EXPORT_SYMBOL(fscrypt_inherit_context);
index fd27e7e6326e61ddecee85c3435099aa12ba2dfd..378c221d68a92d8d75b74166236aae294c8db5cc 100644 (file)
@@ -1,6 +1,8 @@
 config F2FS_FS
-       tristate "F2FS filesystem support (EXPERIMENTAL)"
+       tristate "F2FS filesystem support"
        depends on BLOCK
+       select CRYPTO
+       select CRYPTO_CRC32
        help
          F2FS is based on Log-structured File System (LFS), which supports
          versatile "flash-friendly" features. The design has been focused on
@@ -23,7 +25,7 @@ config F2FS_STAT_FS
          mounted as f2fs. Each file shows the whole f2fs information.
 
          /sys/kernel/debug/f2fs/status includes:
-           - major file system information managed by f2fs currently
+           - major filesystem information managed by f2fs currently
            - average SIT information about whole segments
            - current memory footprint consumed by f2fs.
 
@@ -45,9 +47,58 @@ config F2FS_FS_POSIX_ACL
        default y
        help
          Posix Access Control Lists (ACLs) support permissions for users and
-         gourps beyond the owner/group/world scheme.
+         groups beyond the owner/group/world scheme.
 
          To learn more about Access Control Lists, visit the POSIX ACLs for
          Linux website <http://acl.bestbits.at/>.
 
          If you don't know what Access Control Lists are, say N
+
+config F2FS_FS_SECURITY
+       bool "F2FS Security Labels"
+       depends on F2FS_FS_XATTR
+       help
+         Security labels provide an access control facility to support Linux
+         Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
+         Linux. This option enables an extended attribute handler for file
+         security labels in the f2fs filesystem, so that it requires enabling
+         the extended attribute support in advance.
+
+         If you are not using a security module, say N.
+
+config F2FS_CHECK_FS
+       bool "F2FS consistency checking feature"
+       depends on F2FS_FS
+       help
+         Enables BUG_ONs which check the filesystem consistency in runtime.
+
+         If you want to improve the performance, say N.
+
+config F2FS_FS_ENCRYPTION
+       bool "F2FS Encryption"
+       depends on F2FS_FS
+       depends on F2FS_FS_XATTR
+       select FS_ENCRYPTION
+       help
+         Enable encryption of f2fs files and directories.  This
+         feature is similar to ecryptfs, but it is more memory
+         efficient since it avoids caching the encrypted and
+         decrypted pages in the page cache.
+
+config F2FS_IO_TRACE
+       bool "F2FS IO tracer"
+       depends on F2FS_FS
+       depends on FUNCTION_TRACER
+       help
+         F2FS IO trace is based on a function trace, which gathers process
+         information and block IO patterns in the filesystem level.
+
+         If unsure, say N.
+
+config F2FS_FAULT_INJECTION
+       bool "F2FS fault injection facility"
+       depends on F2FS_FS
+       help
+         Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
+
+         If unsure, say N.
index 27a0820340b9f623ef20fb5675a4ca3985bb1a89..ca949ea7c02fd98554a88b8be9cd4855e63abccf 100644 (file)
@@ -1,7 +1,9 @@
 obj-$(CONFIG_F2FS_FS) += f2fs.o
 
-f2fs-y         := dir.o file.o inode.o namei.o hash.o super.o
+f2fs-y         := dir.o file.o inode.o namei.o hash.o super.o inline.o
 f2fs-y         += checkpoint.o gc.o data.o node.o segment.o recovery.o
+f2fs-y         += shrinker.o extent_cache.o
 f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
 f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
 f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
+f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
index 44abc2f286e00ad4ecff03e0293f80ab8d3d19fd..4a8a78f0bc97610bb67da53de217bf4cf098587e 100644 (file)
@@ -17,9 +17,6 @@
 #include "xattr.h"
 #include "acl.h"
 
-#define get_inode_mode(i)      ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
-                                       (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
-
 static inline size_t f2fs_acl_size(int count)
 {
        if (count <= 4) {
@@ -65,7 +62,7 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size)
        if (count == 0)
                return NULL;
 
-       acl = posix_acl_alloc(count, GFP_KERNEL);
+       acl = posix_acl_alloc(count, GFP_NOFS);
        if (!acl)
                return ERR_PTR(-ENOMEM);
 
@@ -112,14 +109,16 @@ fail:
        return ERR_PTR(-EINVAL);
 }
 
-static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
+static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
+                               const struct posix_acl *acl, size_t *size)
 {
        struct f2fs_acl_header *f2fs_acl;
        struct f2fs_acl_entry *entry;
        int i;
 
-       f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
-                       sizeof(struct f2fs_acl_entry), GFP_KERNEL);
+       f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
+                       acl->a_count * sizeof(struct f2fs_acl_entry),
+                       GFP_NOFS);
        if (!f2fs_acl)
                return ERR_PTR(-ENOMEM);
 
@@ -165,7 +164,8 @@ fail:
        return ERR_PTR(-EINVAL);
 }
 
-struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
+static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
+                                               struct page *dpage)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        int name_index = F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT;
@@ -183,12 +183,13 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
        if (type == ACL_TYPE_ACCESS)
                name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
 
-       retval = f2fs_getxattr(inode, name_index, "", NULL, 0);
+       retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
        if (retval > 0) {
-               value = kmalloc(retval, GFP_KERNEL);
+               value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
                if (!value)
                        return ERR_PTR(-ENOMEM);
-               retval = f2fs_getxattr(inode, name_index, "", value, retval);
+               retval = f2fs_getxattr(inode, name_index, "", value,
+                                                       retval, dpage);
        }
 
        if (retval > 0)
@@ -205,10 +206,15 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
        return acl;
 }
 
-static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
+{
+       return __f2fs_get_acl(inode, type, NULL);
+}
+
+static int f2fs_set_acl(struct inode *inode, int type,
+                       struct posix_acl *acl, struct page *ipage)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct f2fs_inode_info *fi = F2FS_I(inode);
        int name_index;
        void *value = NULL;
        size_t size = 0;
@@ -223,12 +229,10 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
        case ACL_TYPE_ACCESS:
                name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
                if (acl) {
-                       error = posix_acl_equiv_mode(acl, &inode->i_mode);
-                       if (error < 0)
+                       error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+                       if (error)
                                return error;
-                       set_acl_inode(fi, inode->i_mode);
-                       if (error == 0)
-                               acl = NULL;
+                       set_acl_inode(inode, inode->i_mode);
                }
                break;
 
@@ -242,33 +246,36 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
                return -EINVAL;
        }
 
+       f2fs_mark_inode_dirty_sync(inode, true);
+
        if (acl) {
-               value = f2fs_acl_to_disk(acl, &size);
+               value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
                if (IS_ERR(value)) {
-                       cond_clear_inode_flag(fi, FI_ACL_MODE);
+                       clear_inode_flag(inode, FI_ACL_MODE);
                        return (int)PTR_ERR(value);
                }
        }
 
-       error = f2fs_setxattr(inode, name_index, "", value, size);
+       error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);
 
        kfree(value);
        if (!error)
                set_cached_acl(inode, type, acl);
 
-       cond_clear_inode_flag(fi, FI_ACL_MODE);
+       clear_inode_flag(inode, FI_ACL_MODE);
        return error;
 }
 
-int f2fs_init_acl(struct inode *inode, struct inode *dir)
+int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
+                                                       struct page *dpage)
 {
-       struct posix_acl *acl = NULL;
        struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+       struct posix_acl *acl = NULL;
        int error = 0;
 
        if (!S_ISLNK(inode->i_mode)) {
                if (test_opt(sbi, POSIX_ACL)) {
-                       acl = f2fs_get_acl(dir, ACL_TYPE_DEFAULT);
+                       acl = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage);
                        if (IS_ERR(acl))
                                return PTR_ERR(acl);
                }
@@ -276,19 +283,21 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir)
                        inode->i_mode &= ~current_umask();
        }
 
-       if (test_opt(sbi, POSIX_ACL) && acl) {
+       if (!test_opt(sbi, POSIX_ACL) || !acl)
+               goto cleanup;
 
-               if (S_ISDIR(inode->i_mode)) {
-                       error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
-                       if (error)
-                               goto cleanup;
-               }
-               error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
-               if (error < 0)
-                       return error;
-               if (error > 0)
-                       error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+       if (S_ISDIR(inode->i_mode)) {
+               error = f2fs_set_acl(inode, ACL_TYPE_DEFAULT, acl, ipage);
+               if (error)
+                       goto cleanup;
        }
+       error = posix_acl_create(&acl, GFP_KERNEL, &inode->i_mode);
+       if (error < 0)
+               return error;
+       if (error > 0)
+               error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, ipage);
+
+       f2fs_mark_inode_dirty_sync(inode, true);
 cleanup:
        posix_acl_release(acl);
        return error;
@@ -313,7 +322,8 @@ int f2fs_acl_chmod(struct inode *inode)
        error = posix_acl_chmod(&acl, GFP_KERNEL, mode);
        if (error)
                return error;
-       error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl);
+
+       error = f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, NULL);
        posix_acl_release(acl);
        return error;
 }
@@ -349,7 +359,7 @@ static int f2fs_xattr_get_acl(struct dentry *dentry, const char *name,
        if (!test_opt(sbi, POSIX_ACL))
                return -EOPNOTSUPP;
 
-       acl = f2fs_get_acl(dentry->d_inode, type);
+       acl = f2fs_get_acl(d_inode(dentry), type);
        if (IS_ERR(acl))
                return PTR_ERR(acl);
        if (!acl)
@@ -364,7 +374,7 @@ static int f2fs_xattr_set_acl(struct dentry *dentry, const char *name,
                const void *value, size_t size, int flags, int type)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = d_inode(dentry);
        struct posix_acl *acl = NULL;
        int error;
 
@@ -388,7 +398,7 @@ static int f2fs_xattr_set_acl(struct dentry *dentry, const char *name,
                acl = NULL;
        }
 
-       error = f2fs_set_acl(inode, type, acl);
+       error = f2fs_set_acl(inode, type, acl, NULL);
 
 release_and_out:
        posix_acl_release(acl);
index 80f430674417a696925f90fdf622dac8710e0909..09f93758b3f914ab0c68bba64b4d423e1d0627d6 100644 (file)
@@ -36,11 +36,11 @@ struct f2fs_acl_header {
 
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
 
-extern struct posix_acl *f2fs_get_acl(struct inode *inode, int type);
-extern int f2fs_acl_chmod(struct inode *inode);
-extern int f2fs_init_acl(struct inode *inode, struct inode *dir);
+extern struct posix_acl *f2fs_get_acl(struct inode *, int);
+extern int f2fs_acl_chmod(struct inode *);
+extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
+                                                       struct page *);
 #else
-#define f2fs_check_acl NULL
 #define f2fs_get_acl   NULL
 #define f2fs_set_acl   NULL
 
@@ -49,7 +49,8 @@ static inline int f2fs_acl_chmod(struct inode *inode)
        return 0;
 }
 
-static inline int f2fs_init_acl(struct inode *inode, struct inode *dir)
+static inline int f2fs_init_acl(struct inode *inode, struct inode *dir,
+                               struct page *ipage, struct page *dpage)
 {
        return 0;
 }
index b1de01da1a409b42ae5bc6aae4989c201c920639..0fcc5276837bdf75b99856352bd7a07b7841a3a4 100644 (file)
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include "trace.h"
 #include <trace/events/f2fs.h>
 
-static struct kmem_cache *orphan_entry_slab;
-static struct kmem_cache *inode_entry_slab;
+static struct kmem_cache *ino_entry_slab;
+struct kmem_cache *inode_entry_slab;
+
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
+{
+       set_ckpt_flags(sbi, CP_ERROR_FLAG);
+       sbi->sb->s_flags |= MS_RDONLY;
+       if (!end_io)
+               f2fs_flush_merged_bios(sbi);
+}
 
 /*
  * We guarantee no failure on the returned page.
  */
 struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
 {
-       struct address_space *mapping = sbi->meta_inode->i_mapping;
+       struct address_space *mapping = META_MAPPING(sbi);
        struct page *page = NULL;
 repeat:
-       page = grab_cache_page(mapping, index);
+       page = f2fs_grab_cache_page(mapping, index, false);
        if (!page) {
                cond_resched();
                goto repeat;
        }
-
-       /* We wait writeback only inside grab_meta_page() */
-       wait_on_page_writeback(page);
-       SetPageUptodate(page);
+       f2fs_wait_on_page_writeback(page, META, true);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
        return page;
 }
 
 /*
  * We guarantee no failure on the returned page.
  */
-struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+                                                       bool is_meta)
 {
-       struct address_space *mapping = sbi->meta_inode->i_mapping;
+       struct address_space *mapping = META_MAPPING(sbi);
        struct page *page;
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = META,
+               .rw = READ_SYNC | REQ_META | REQ_PRIO,
+               .old_blkaddr = index,
+               .new_blkaddr = index,
+               .encrypted_page = NULL,
+       };
+
+       if (unlikely(!is_meta))
+               fio.rw &= ~REQ_META;
 repeat:
-       page = grab_cache_page(mapping, index);
+       page = f2fs_grab_cache_page(mapping, index, false);
        if (!page) {
                cond_resched();
                goto repeat;
@@ -61,116 +81,298 @@ repeat:
        if (PageUptodate(page))
                goto out;
 
-       if (f2fs_readpage(sbi, page, index, READ_SYNC))
+       fio.page = page;
+
+       if (f2fs_submit_page_bio(&fio)) {
+               f2fs_put_page(page, 1);
                goto repeat;
+       }
 
        lock_page(page);
-       if (page->mapping != mapping) {
+       if (unlikely(page->mapping != mapping)) {
                f2fs_put_page(page, 1);
                goto repeat;
        }
+
+       /*
+        * if there is any IO error when accessing device, make our filesystem
+        * readonly and make sure do not write checkpoint with non-uptodate
+        * meta page.
+        */
+       if (unlikely(!PageUptodate(page)))
+               f2fs_stop_checkpoint(sbi, false);
 out:
        mark_page_accessed(page);
        return page;
 }
 
+struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+       return __get_meta_page(sbi, index, true);
+}
+
+/* for POR only */
+struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+       return __get_meta_page(sbi, index, false);
+}
+
+bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
+{
+       switch (type) {
+       case META_NAT:
+               break;
+       case META_SIT:
+               if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
+                       return false;
+               break;
+       case META_SSA:
+               if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
+                       blkaddr < SM_I(sbi)->ssa_blkaddr))
+                       return false;
+               break;
+       case META_CP:
+               if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
+                       blkaddr < __start_cp_addr(sbi)))
+                       return false;
+               break;
+       case META_POR:
+               if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
+                       blkaddr < MAIN_BLKADDR(sbi)))
+                       return false;
+               break;
+       default:
+               BUG();
+       }
+
+       return true;
+}
+
+/*
+ * Readahead CP/NAT/SIT/SSA pages
+ */
+int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
+                                                       int type, bool sync)
+{
+       struct page *page;
+       block_t blkno = start;
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = META,
+               .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
+               .encrypted_page = NULL,
+       };
+       struct blk_plug plug;
+
+       if (unlikely(type == META_POR))
+               fio.rw &= ~REQ_META;
+
+       blk_start_plug(&plug);
+       for (; nrpages-- > 0; blkno++) {
+
+               if (!is_valid_blkaddr(sbi, blkno, type))
+                       goto out;
+
+               switch (type) {
+               case META_NAT:
+                       if (unlikely(blkno >=
+                                       NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
+                               blkno = 0;
+                       /* get nat block addr */
+                       fio.new_blkaddr = current_nat_addr(sbi,
+                                       blkno * NAT_ENTRY_PER_BLOCK);
+                       break;
+               case META_SIT:
+                       /* get sit block addr */
+                       fio.new_blkaddr = current_sit_addr(sbi,
+                                       blkno * SIT_ENTRY_PER_BLOCK);
+                       break;
+               case META_SSA:
+               case META_CP:
+               case META_POR:
+                       fio.new_blkaddr = blkno;
+                       break;
+               default:
+                       BUG();
+               }
+
+               page = f2fs_grab_cache_page(META_MAPPING(sbi),
+                                               fio.new_blkaddr, false);
+               if (!page)
+                       continue;
+               if (PageUptodate(page)) {
+                       f2fs_put_page(page, 1);
+                       continue;
+               }
+
+               fio.page = page;
+               fio.old_blkaddr = fio.new_blkaddr;
+               f2fs_submit_page_mbio(&fio);
+               f2fs_put_page(page, 0);
+       }
+out:
+       f2fs_submit_merged_bio(sbi, META, READ);
+       blk_finish_plug(&plug);
+       return blkno - start;
+}
+
+void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+       struct page *page;
+       bool readahead = false;
+
+       page = find_get_page(META_MAPPING(sbi), index);
+       if (!page || !PageUptodate(page))
+               readahead = true;
+       f2fs_put_page(page, 0);
+
+       if (readahead)
+               ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
+}
+
 static int f2fs_write_meta_page(struct page *page,
                                struct writeback_control *wbc)
 {
-       struct inode *inode = page->mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
-       /* Should not write any meta pages, if any IO error was occurred */
-       if (wbc->for_reclaim ||
-                       is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
-               dec_page_count(sbi, F2FS_DIRTY_META);
-               wbc->pages_skipped++;
-               set_page_dirty(page);
-               return AOP_WRITEPAGE_ACTIVATE;
-       }
+       trace_f2fs_writepage(page, META);
 
-       wait_on_page_writeback(page);
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+               goto redirty_out;
+       if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
+               goto redirty_out;
+       if (unlikely(f2fs_cp_error(sbi)))
+               goto redirty_out;
 
        write_meta_page(sbi, page);
        dec_page_count(sbi, F2FS_DIRTY_META);
+
+       if (wbc->for_reclaim)
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
+
        unlock_page(page);
+
+       if (unlikely(f2fs_cp_error(sbi)))
+               f2fs_submit_merged_bio(sbi, META, WRITE);
+
        return 0;
+
+redirty_out:
+       redirty_page_for_writepage(wbc, page);
+       return AOP_WRITEPAGE_ACTIVATE;
 }
 
 static int f2fs_write_meta_pages(struct address_space *mapping,
                                struct writeback_control *wbc)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
-       struct block_device *bdev = sbi->sb->s_bdev;
-       long written;
+       struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+       long diff, written;
 
-       if (wbc->for_kupdate)
-               return 0;
+       /* collect a number of dirty meta pages and write together */
+       if (wbc->for_kupdate ||
+               get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
+               goto skip_write;
 
-       if (get_pages(sbi, F2FS_DIRTY_META) == 0)
-               return 0;
+       trace_f2fs_writepages(mapping->host, wbc, META);
 
        /* if mounting is failed, skip writing node pages */
        mutex_lock(&sbi->cp_mutex);
-       written = sync_meta_pages(sbi, META, bio_get_nr_vecs(bdev));
+       diff = nr_pages_to_write(sbi, META, wbc);
+       written = sync_meta_pages(sbi, META, wbc->nr_to_write);
        mutex_unlock(&sbi->cp_mutex);
-       wbc->nr_to_write -= written;
+       wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
+       return 0;
+
+skip_write:
+       wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+       trace_f2fs_writepages(mapping->host, wbc, META);
        return 0;
 }
 
 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
                                                long nr_to_write)
 {
-       struct address_space *mapping = sbi->meta_inode->i_mapping;
-       pgoff_t index = 0, end = LONG_MAX;
+       struct address_space *mapping = META_MAPPING(sbi);
+       pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
        struct pagevec pvec;
        long nwritten = 0;
        struct writeback_control wbc = {
                .for_reclaim = 0,
        };
+       struct blk_plug plug;
 
        pagevec_init(&pvec, 0);
 
+       blk_start_plug(&plug);
+
        while (index <= end) {
                int i, nr_pages;
                nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
                                PAGECACHE_TAG_DIRTY,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
-               if (nr_pages == 0)
+               if (unlikely(nr_pages == 0))
                        break;
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
+
+                       if (prev == ULONG_MAX)
+                               prev = page->index - 1;
+                       if (nr_to_write != LONG_MAX && page->index != prev + 1) {
+                               pagevec_release(&pvec);
+                               goto stop;
+                       }
+
                        lock_page(page);
-                       BUG_ON(page->mapping != mapping);
-                       BUG_ON(!PageDirty(page));
-                       clear_page_dirty_for_io(page);
-                       if (f2fs_write_meta_page(page, &wbc)) {
+
+                       if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+                               unlock_page(page);
+                               continue;
+                       }
+                       if (!PageDirty(page)) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
+                       }
+
+                       f2fs_wait_on_page_writeback(page, META, true);
+
+                       BUG_ON(PageWriteback(page));
+                       if (!clear_page_dirty_for_io(page))
+                               goto continue_unlock;
+
+                       if (mapping->a_ops->writepage(page, &wbc)) {
                                unlock_page(page);
                                break;
                        }
-                       if (nwritten++ >= nr_to_write)
+                       nwritten++;
+                       prev = page->index;
+                       if (unlikely(nwritten >= nr_to_write))
                                break;
                }
                pagevec_release(&pvec);
                cond_resched();
        }
-
+stop:
        if (nwritten)
-               f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX);
+               f2fs_submit_merged_bio(sbi, type, WRITE);
+
+       blk_finish_plug(&plug);
 
        return nwritten;
 }
 
 static int f2fs_set_meta_page_dirty(struct page *page)
 {
-       struct address_space *mapping = page->mapping;
-       struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+       trace_f2fs_set_page_dirty(page, META);
 
-       SetPageUptodate(page);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
        if (!PageDirty(page)) {
-               __set_page_dirty_nobuffers(page);
-               inc_page_count(sbi, F2FS_DIRTY_META);
+               f2fs_set_page_dirty_nobuffers(page);
+               inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
+               SetPagePrivate(page);
+               f2fs_trace_pid(page);
                return 1;
        }
        return 0;
@@ -180,138 +382,256 @@ const struct address_space_operations f2fs_meta_aops = {
        .writepage      = f2fs_write_meta_page,
        .writepages     = f2fs_write_meta_pages,
        .set_page_dirty = f2fs_set_meta_page_dirty,
+       .invalidatepage = f2fs_invalidate_page,
+       .releasepage    = f2fs_release_page,
 };
 
-int check_orphan_space(struct f2fs_sb_info *sbi)
+static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
-       unsigned int max_orphans;
-       int err = 0;
+       struct inode_management *im = &sbi->im[type];
+       struct ino_entry *e, *tmp;
 
-       /*
-        * considering 512 blocks in a segment 5 blocks are needed for cp
-        * and log segment summaries. Remaining blocks are used to keep
-        * orphan entries with the limitation one reserved segment
-        * for cp pack we can have max 1020*507 orphan entries
-        */
-       max_orphans = (sbi->blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK;
-       mutex_lock(&sbi->orphan_inode_mutex);
-       if (sbi->n_orphans >= max_orphans)
-               err = -ENOSPC;
-       mutex_unlock(&sbi->orphan_inode_mutex);
-       return err;
+       tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
+retry:
+       radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
+
+       spin_lock(&im->ino_lock);
+       e = radix_tree_lookup(&im->ino_root, ino);
+       if (!e) {
+               e = tmp;
+               if (radix_tree_insert(&im->ino_root, ino, e)) {
+                       spin_unlock(&im->ino_lock);
+                       radix_tree_preload_end();
+                       goto retry;
+               }
+               memset(e, 0, sizeof(struct ino_entry));
+               e->ino = ino;
+
+               list_add_tail(&e->list, &im->ino_list);
+               if (type != ORPHAN_INO)
+                       im->ino_num++;
+       }
+       spin_unlock(&im->ino_lock);
+       radix_tree_preload_end();
+
+       if (e != tmp)
+               kmem_cache_free(ino_entry_slab, tmp);
 }
 
-void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
-       struct list_head *head, *this;
-       struct orphan_inode_entry *new = NULL, *orphan = NULL;
-
-       mutex_lock(&sbi->orphan_inode_mutex);
-       head = &sbi->orphan_inode_list;
-       list_for_each(this, head) {
-               orphan = list_entry(this, struct orphan_inode_entry, list);
-               if (orphan->ino == ino)
-                       goto out;
-               if (orphan->ino > ino)
-                       break;
-               orphan = NULL;
+       struct inode_management *im = &sbi->im[type];
+       struct ino_entry *e;
+
+       spin_lock(&im->ino_lock);
+       e = radix_tree_lookup(&im->ino_root, ino);
+       if (e) {
+               list_del(&e->list);
+               radix_tree_delete(&im->ino_root, ino);
+               im->ino_num--;
+               spin_unlock(&im->ino_lock);
+               kmem_cache_free(ino_entry_slab, e);
+               return;
        }
-retry:
-       new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
-       if (!new) {
-               cond_resched();
-               goto retry;
+       spin_unlock(&im->ino_lock);
+}
+
+void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
+{
+       /* add new dirty ino entry into list */
+       __add_ino_entry(sbi, ino, type);
+}
+
+void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
+{
+       /* remove dirty ino entry from list */
+       __remove_ino_entry(sbi, ino, type);
+}
+
+/* mode should be APPEND_INO or UPDATE_INO */
+bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
+{
+       struct inode_management *im = &sbi->im[mode];
+       struct ino_entry *e;
+
+       spin_lock(&im->ino_lock);
+       e = radix_tree_lookup(&im->ino_root, ino);
+       spin_unlock(&im->ino_lock);
+       return e ? true : false;
+}
+
+void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
+{
+       struct ino_entry *e, *tmp;
+       int i;
+
+       for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
+               struct inode_management *im = &sbi->im[i];
+
+               spin_lock(&im->ino_lock);
+               list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
+                       list_del(&e->list);
+                       radix_tree_delete(&im->ino_root, e->ino);
+                       kmem_cache_free(ino_entry_slab, e);
+                       im->ino_num--;
+               }
+               spin_unlock(&im->ino_lock);
        }
-       new->ino = ino;
+}
+
+int acquire_orphan_inode(struct f2fs_sb_info *sbi)
+{
+       struct inode_management *im = &sbi->im[ORPHAN_INO];
+       int err = 0;
+
+       spin_lock(&im->ino_lock);
 
-       /* add new_oentry into list which is sorted by inode number */
-       if (orphan)
-               list_add(&new->list, this->prev);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(sbi, FAULT_ORPHAN)) {
+               spin_unlock(&im->ino_lock);
+               return -ENOSPC;
+       }
+#endif
+       if (unlikely(im->ino_num >= sbi->max_orphans))
+               err = -ENOSPC;
        else
-               list_add_tail(&new->list, head);
+               im->ino_num++;
+       spin_unlock(&im->ino_lock);
 
-       sbi->n_orphans++;
-out:
-       mutex_unlock(&sbi->orphan_inode_mutex);
+       return err;
+}
+
+void release_orphan_inode(struct f2fs_sb_info *sbi)
+{
+       struct inode_management *im = &sbi->im[ORPHAN_INO];
+
+       spin_lock(&im->ino_lock);
+       f2fs_bug_on(sbi, im->ino_num == 0);
+       im->ino_num--;
+       spin_unlock(&im->ino_lock);
+}
+
+void add_orphan_inode(struct inode *inode)
+{
+       /* add new orphan ino entry into list */
+       __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
+       update_inode_page(inode);
 }
 
 void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-       struct list_head *this, *next, *head;
-       struct orphan_inode_entry *orphan;
-
-       mutex_lock(&sbi->orphan_inode_mutex);
-       head = &sbi->orphan_inode_list;
-       list_for_each_safe(this, next, head) {
-               orphan = list_entry(this, struct orphan_inode_entry, list);
-               if (orphan->ino == ino) {
-                       list_del(&orphan->list);
-                       kmem_cache_free(orphan_entry_slab, orphan);
-                       sbi->n_orphans--;
-                       break;
-               }
-       }
-       mutex_unlock(&sbi->orphan_inode_mutex);
+       /* remove orphan entry from orphan list */
+       __remove_ino_entry(sbi, ino, ORPHAN_INO);
 }
 
-static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-       struct inode *inode = f2fs_iget(sbi->sb, ino);
-       BUG_ON(IS_ERR(inode));
+       struct inode *inode;
+       struct node_info ni;
+       int err = acquire_orphan_inode(sbi);
+
+       if (err) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                               "%s: orphan failed (ino=%x), run fsck to fix.",
+                               __func__, ino);
+               return err;
+       }
+
+       __add_ino_entry(sbi, ino, ORPHAN_INO);
+
+       inode = f2fs_iget_retry(sbi->sb, ino);
+       if (IS_ERR(inode)) {
+               /*
+                * there should be a bug that we can't find the entry
+                * to orphan inode.
+                */
+               f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
+               return PTR_ERR(inode);
+       }
+
        clear_nlink(inode);
 
        /* truncate all the data during iput */
        iput(inode);
+
+       get_node_info(sbi, ino, &ni);
+
+       /* ENOMEM was fully retried in f2fs_evict_inode. */
+       if (ni.blk_addr != NULL_ADDR) {
+               set_sbi_flag(sbi, SBI_NEED_FSCK);
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "%s: orphan failed (ino=%x), run fsck to fix.",
+                               __func__, ino);
+               return -EIO;
+       }
+       __remove_ino_entry(sbi, ino, ORPHAN_INO);
+       return 0;
 }
 
 int recover_orphan_inodes(struct f2fs_sb_info *sbi)
 {
-       block_t start_blk, orphan_blkaddr, i, j;
+       block_t start_blk, orphan_blocks, i, j;
+       int err;
 
-       if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
+       if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
                return 0;
 
-       sbi->por_doing = 1;
-       start_blk = __start_cp_addr(sbi) + 1;
-       orphan_blkaddr = __start_sum_addr(sbi) - 1;
+       start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
+       orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
+
+       ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
 
-       for (i = 0; i < orphan_blkaddr; i++) {
+       for (i = 0; i < orphan_blocks; i++) {
                struct page *page = get_meta_page(sbi, start_blk + i);
                struct f2fs_orphan_block *orphan_blk;
 
                orphan_blk = (struct f2fs_orphan_block *)page_address(page);
                for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
                        nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
-                       recover_orphan_inode(sbi, ino);
+                       err = recover_orphan_inode(sbi, ino);
+                       if (err) {
+                               f2fs_put_page(page, 1);
+                               return err;
+                       }
                }
                f2fs_put_page(page, 1);
        }
        /* clear Orphan Flag */
-       clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-       sbi->por_doing = 0;
+       clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
        return 0;
 }
 
 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
 {
-       struct list_head *head, *this, *next;
+       struct list_head *head;
        struct f2fs_orphan_block *orphan_blk = NULL;
-       struct page *page = NULL;
        unsigned int nentries = 0;
        unsigned short index = 1;
        unsigned short orphan_blocks;
+       struct page *page = NULL;
+       struct ino_entry *orphan = NULL;
+       struct inode_management *im = &sbi->im[ORPHAN_INO];
 
-       orphan_blocks = (unsigned short)((sbi->n_orphans +
-               (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
+       orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
 
-       mutex_lock(&sbi->orphan_inode_mutex);
-       head = &sbi->orphan_inode_list;
+       /*
+        * we don't need to do spin_lock(&im->ino_lock) here, since all the
+        * orphan inode operations are covered under f2fs_lock_op().
+        * And, spin_lock should be avoided due to page operations below.
+        */
+       head = &im->ino_list;
 
        /* loop for each orphan inode entry and write them in Jornal block */
-       list_for_each_safe(this, next, head) {
-               struct orphan_inode_entry *orphan;
+       list_for_each_entry(orphan, head, list) {
+               if (!page) {
+                       page = grab_meta_page(sbi, start_blk++);
+                       orphan_blk =
+                               (struct f2fs_orphan_block *)page_address(page);
+                       memset(orphan_blk, 0, sizeof(*orphan_blk));
+               }
 
-               orphan = list_entry(this, struct orphan_inode_entry, list);
+               orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
 
                if (nentries == F2FS_ORPHANS_PER_BLOCK) {
                        /*
@@ -325,70 +645,69 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
                        set_page_dirty(page);
                        f2fs_put_page(page, 1);
                        index++;
-                       start_blk++;
                        nentries = 0;
                        page = NULL;
                }
-               if (page)
-                       goto page_exist;
-
-               page = grab_meta_page(sbi, start_blk);
-               orphan_blk = (struct f2fs_orphan_block *)page_address(page);
-               memset(orphan_blk, 0, sizeof(*orphan_blk));
-page_exist:
-               orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
        }
-       if (!page)
-               goto end;
 
-       orphan_blk->blk_addr = cpu_to_le16(index);
-       orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
-       orphan_blk->entry_count = cpu_to_le32(nentries);
-       set_page_dirty(page);
-       f2fs_put_page(page, 1);
-end:
-       mutex_unlock(&sbi->orphan_inode_mutex);
+       if (page) {
+               orphan_blk->blk_addr = cpu_to_le16(index);
+               orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
+               orphan_blk->entry_count = cpu_to_le32(nentries);
+               set_page_dirty(page);
+               f2fs_put_page(page, 1);
+       }
 }
 
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
-                               block_t cp_addr, unsigned long long *version)
+static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+               struct f2fs_checkpoint **cp_block, struct page **cp_page,
+               unsigned long long *version)
 {
-       struct page *cp_page_1, *cp_page_2 = NULL;
        unsigned long blk_size = sbi->blocksize;
-       struct f2fs_checkpoint *cp_block;
-       unsigned long long cur_version = 0, pre_version = 0;
-       unsigned int crc = 0;
-       size_t crc_offset;
+       size_t crc_offset = 0;
+       __u32 crc = 0;
 
-       /* Read the 1st cp block in this CP pack */
-       cp_page_1 = get_meta_page(sbi, cp_addr);
+       *cp_page = get_meta_page(sbi, cp_addr);
+       *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
 
-       /* get the version number */
-       cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
-       crc_offset = le32_to_cpu(cp_block->checksum_offset);
-       if (crc_offset >= blk_size)
-               goto invalid_cp1;
+       crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
+       if (crc_offset >= blk_size) {
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "invalid crc_offset: %zu", crc_offset);
+               return -EINVAL;
+       }
 
-       crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
-       if (!f2fs_crc_valid(crc, cp_block, crc_offset))
-               goto invalid_cp1;
+       crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
+                                                       + crc_offset)));
+       if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+               f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+               return -EINVAL;
+       }
 
-       pre_version = le64_to_cpu(cp_block->checkpoint_ver);
+       *version = cur_cp_version(*cp_block);
+       return 0;
+}
 
-       /* Read the 2nd cp block in this CP pack */
-       cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
-       cp_page_2 = get_meta_page(sbi, cp_addr);
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+                               block_t cp_addr, unsigned long long *version)
+{
+       struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+       struct f2fs_checkpoint *cp_block = NULL;
+       unsigned long long cur_version = 0, pre_version = 0;
+       int err;
 
-       cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
-       crc_offset = le32_to_cpu(cp_block->checksum_offset);
-       if (crc_offset >= blk_size)
-               goto invalid_cp2;
+       err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+                                       &cp_page_1, version);
+       if (err)
+               goto invalid_cp1;
+       pre_version = *version;
 
-       crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
-       if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+       cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+       err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+                                       &cp_page_2, version);
+       if (err)
                goto invalid_cp2;
-
-       cur_version = le64_to_cpu(cp_block->checkpoint_ver);
+       cur_version = *version;
 
        if (cur_version == pre_version) {
                *version = cur_version;
@@ -410,8 +729,11 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
        unsigned long blk_size = sbi->blocksize;
        unsigned long long cp1_version = 0, cp2_version = 0;
        unsigned long long cp_start_blk_no;
+       unsigned int cp_blks = 1 + __cp_payload(sbi);
+       block_t cp_blk_no;
+       int i;
 
-       sbi->ckpt = kzalloc(blk_size, GFP_KERNEL);
+       sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
        if (!sbi->ckpt)
                return -ENOMEM;
        /*
@@ -422,7 +744,8 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
        cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
 
        /* The second checkpoint pack should start at the next segment */
-       cp_start_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
+       cp_start_blk_no += ((unsigned long long)1) <<
+                               le32_to_cpu(fsb->log_blocks_per_seg);
        cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
 
        if (cp1 && cp2) {
@@ -441,112 +764,182 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
        cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
        memcpy(sbi->ckpt, cp_block, blk_size);
 
+       /* Sanity checking of checkpoint */
+       if (sanity_check_ckpt(sbi))
+               goto free_fail_no_cp;
+
+       if (cur_page == cp1)
+               sbi->cur_cp_pack = 1;
+       else
+               sbi->cur_cp_pack = 2;
+
+       if (cp_blks <= 1)
+               goto done;
+
+       cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
+       if (cur_page == cp2)
+               cp_blk_no += 1 << le32_to_cpu(fsb->log_blocks_per_seg);
+
+       for (i = 1; i < cp_blks; i++) {
+               void *sit_bitmap_ptr;
+               unsigned char *ckpt = (unsigned char *)sbi->ckpt;
+
+               cur_page = get_meta_page(sbi, cp_blk_no + i);
+               sit_bitmap_ptr = page_address(cur_page);
+               memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
+               f2fs_put_page(cur_page, 1);
+       }
+done:
        f2fs_put_page(cp1, 1);
        f2fs_put_page(cp2, 1);
        return 0;
 
+free_fail_no_cp:
+       f2fs_put_page(cp1, 1);
+       f2fs_put_page(cp2, 1);
 fail_no_cp:
        kfree(sbi->ckpt);
        return -EINVAL;
 }
 
-void set_dirty_dir_page(struct inode *inode, struct page *page)
+static void __add_dirty_inode(struct inode *inode, enum inode_type type)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct list_head *head = &sbi->dir_inode_list;
-       struct dir_inode_entry *new;
-       struct list_head *this;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
 
-       if (!S_ISDIR(inode->i_mode))
+       if (is_inode_flag_set(inode, flag))
                return;
-retry:
-       new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
-       new->inode = inode;
-       INIT_LIST_HEAD(&new->list);
 
-       spin_lock(&sbi->dir_inode_lock);
-       list_for_each(this, head) {
-               struct dir_inode_entry *entry;
-               entry = list_entry(this, struct dir_inode_entry, list);
-               if (entry->inode == inode) {
-                       kmem_cache_free(inode_entry_slab, new);
-                       goto out;
-               }
-       }
-       list_add_tail(&new->list, head);
-       sbi->n_dirty_dirs++;
+       set_inode_flag(inode, flag);
+       list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
+       stat_inc_dirty_inode(sbi, type);
+}
 
-       BUG_ON(!S_ISDIR(inode->i_mode));
-out:
-       inc_page_count(sbi, F2FS_DIRTY_DENTS);
-       inode_inc_dirty_dents(inode);
-       SetPagePrivate(page);
+static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
+{
+       int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
+
+       if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
+               return;
 
-       spin_unlock(&sbi->dir_inode_lock);
+       list_del_init(&F2FS_I(inode)->dirty_list);
+       clear_inode_flag(inode, flag);
+       stat_dec_dirty_inode(F2FS_I_SB(inode), type);
 }
 
-void remove_dirty_dir_inode(struct inode *inode)
+void update_dirty_page(struct inode *inode, struct page *page)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct list_head *head = &sbi->dir_inode_list;
-       struct list_head *this;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
 
-       if (!S_ISDIR(inode->i_mode))
+       if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+                       !S_ISLNK(inode->i_mode))
                return;
 
-       spin_lock(&sbi->dir_inode_lock);
-       if (atomic_read(&F2FS_I(inode)->dirty_dents))
-               goto out;
+       spin_lock(&sbi->inode_lock[type]);
+       if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
+               __add_dirty_inode(inode, type);
+       inode_inc_dirty_pages(inode);
+       spin_unlock(&sbi->inode_lock[type]);
 
-       list_for_each(this, head) {
-               struct dir_inode_entry *entry;
-               entry = list_entry(this, struct dir_inode_entry, list);
-               if (entry->inode == inode) {
-                       list_del(&entry->list);
-                       kmem_cache_free(inode_entry_slab, entry);
-                       sbi->n_dirty_dirs--;
-                       break;
-               }
-       }
-out:
-       spin_unlock(&sbi->dir_inode_lock);
+       SetPagePrivate(page);
+       f2fs_trace_pid(page);
+}
+
+void remove_dirty_inode(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
+
+       if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+                       !S_ISLNK(inode->i_mode))
+               return;
+
+       if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
+               return;
+
+       spin_lock(&sbi->inode_lock[type]);
+       __remove_dirty_inode(inode, type);
+       spin_unlock(&sbi->inode_lock[type]);
 }
 
-void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
+int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
 {
-       struct list_head *head = &sbi->dir_inode_list;
-       struct dir_inode_entry *entry;
+       struct list_head *head;
        struct inode *inode;
+       struct f2fs_inode_info *fi;
+       bool is_dir = (type == DIR_INODE);
+
+       trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
+                               get_pages(sbi, is_dir ?
+                               F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
 retry:
-       spin_lock(&sbi->dir_inode_lock);
+       if (unlikely(f2fs_cp_error(sbi)))
+               return -EIO;
+
+       spin_lock(&sbi->inode_lock[type]);
+
+       head = &sbi->inode_list[type];
        if (list_empty(head)) {
-               spin_unlock(&sbi->dir_inode_lock);
-               return;
+               spin_unlock(&sbi->inode_lock[type]);
+               trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
+                               get_pages(sbi, is_dir ?
+                               F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+               return 0;
        }
-       entry = list_entry(head->next, struct dir_inode_entry, list);
-       inode = igrab(entry->inode);
-       spin_unlock(&sbi->dir_inode_lock);
+       fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
+       inode = igrab(&fi->vfs_inode);
+       spin_unlock(&sbi->inode_lock[type]);
        if (inode) {
-               filemap_flush(inode->i_mapping);
+               filemap_fdatawrite(inode->i_mapping);
                iput(inode);
        } else {
                /*
                 * We should submit bio, since it exists several
                 * wribacking dentry pages in the freeing inode.
                 */
-               f2fs_submit_bio(sbi, DATA, true);
+               f2fs_submit_merged_bio(sbi, DATA, WRITE);
+               cond_resched();
        }
        goto retry;
 }
 
+int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
+{
+       struct list_head *head = &sbi->inode_list[DIRTY_META];
+       struct inode *inode;
+       struct f2fs_inode_info *fi;
+       s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
+
+       while (total--) {
+               if (unlikely(f2fs_cp_error(sbi)))
+                       return -EIO;
+
+               spin_lock(&sbi->inode_lock[DIRTY_META]);
+               if (list_empty(head)) {
+                       spin_unlock(&sbi->inode_lock[DIRTY_META]);
+                       return 0;
+               }
+               fi = list_entry(head->next, struct f2fs_inode_info,
+                                                       gdirty_list);
+               inode = igrab(&fi->vfs_inode);
+               spin_unlock(&sbi->inode_lock[DIRTY_META]);
+               if (inode) {
+                       sync_inode_metadata(inode, 0);
+
+                       /* it's on eviction */
+                       if (is_inode_flag_set(inode, FI_DIRTY_INODE))
+                               update_inode_page(inode);
+                       iput(inode);
+               }
+       };
+       return 0;
+}
+
 /*
  * Freeze all the FS-operations for checkpoint.
  */
-static void block_operations(struct f2fs_sb_info *sbi)
+static int block_operations(struct f2fs_sb_info *sbi)
 {
        struct writeback_control wbc = {
                .sync_mode = WB_SYNC_ALL,
@@ -554,54 +947,125 @@ static void block_operations(struct f2fs_sb_info *sbi)
                .for_reclaim = 0,
        };
        struct blk_plug plug;
+       int err = 0;
 
        blk_start_plug(&plug);
 
 retry_flush_dents:
-       mutex_lock_all(sbi);
-
+       f2fs_lock_all(sbi);
        /* write all the dirty dentry pages */
        if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
-               mutex_unlock_all(sbi);
-               sync_dirty_dir_inodes(sbi);
+               f2fs_unlock_all(sbi);
+               err = sync_dirty_inodes(sbi, DIR_INODE);
+               if (err)
+                       goto out;
+               goto retry_flush_dents;
+       }
+
+       if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
+               f2fs_unlock_all(sbi);
+               err = f2fs_sync_inode_meta(sbi);
+               if (err)
+                       goto out;
                goto retry_flush_dents;
        }
 
        /*
-        * POR: we should ensure that there is no dirty node pages
+        * POR: we should ensure that there are no dirty node pages
         * until finishing nat/sit flush.
         */
 retry_flush_nodes:
-       mutex_lock(&sbi->node_write);
+       down_write(&sbi->node_write);
 
        if (get_pages(sbi, F2FS_DIRTY_NODES)) {
-               mutex_unlock(&sbi->node_write);
-               sync_node_pages(sbi, 0, &wbc);
+               up_write(&sbi->node_write);
+               err = sync_node_pages(sbi, &wbc);
+               if (err) {
+                       f2fs_unlock_all(sbi);
+                       goto out;
+               }
                goto retry_flush_nodes;
        }
+out:
        blk_finish_plug(&plug);
+       return err;
 }
 
 static void unblock_operations(struct f2fs_sb_info *sbi)
 {
-       mutex_unlock(&sbi->node_write);
-       mutex_unlock_all(sbi);
+       up_write(&sbi->node_write);
+
+       build_free_nids(sbi, false);
+       f2fs_unlock_all(sbi);
+}
+
+static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+{
+       DEFINE_WAIT(wait);
+
+       for (;;) {
+               prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
+
+               if (!get_pages(sbi, F2FS_WB_CP_DATA))
+                       break;
+
+               io_schedule_timeout(5*HZ);
+       }
+       finish_wait(&sbi->cp_wait, &wait);
 }
 
-static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
+static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
+       unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-       nid_t last_nid = 0;
+
+       spin_lock(&sbi->cp_lock);
+
+       if (cpc->reason == CP_UMOUNT)
+               __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+       else
+               __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+
+       if (cpc->reason == CP_FASTBOOT)
+               __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+       else
+               __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+
+       if (orphan_num)
+               __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+       else
+               __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+
+       if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+               __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+
+       /* set this flag to activate crc|cp_ver for recovery */
+       __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+
+       spin_unlock(&sbi->cp_lock);
+}
+
+static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+       struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
+       nid_t last_nid = nm_i->next_scan_nid;
        block_t start_blk;
-       struct page *cp_page;
        unsigned int data_sum_blocks, orphan_blocks;
-       unsigned int crc32 = 0;
-       void *kaddr;
+       __u32 crc32 = 0;
        int i;
+       int cp_payload_blks = __cp_payload(sbi);
+       struct super_block *sb = sbi->sb;
+       struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+       u64 kbytes_written;
 
        /* Flush all the NAT/SIT pages */
-       while (get_pages(sbi, F2FS_DIRTY_META))
+       while (get_pages(sbi, F2FS_DIRTY_META)) {
                sync_meta_pages(sbi, META, LONG_MAX);
+               if (unlikely(f2fs_cp_error(sbi)))
+                       return -EIO;
+       }
 
        next_free_nid(sbi, &last_nid);
 
@@ -612,7 +1076,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi));
        ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
        ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
-       for (i = 0; i < 3; i++) {
+       for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
                ckpt->cur_node_segno[i] =
                        cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE));
                ckpt->cur_node_blkoff[i] =
@@ -620,7 +1084,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
                ckpt->alloc_type[i + CURSEG_HOT_NODE] =
                                curseg_alloc_type(sbi, i + CURSEG_HOT_NODE);
        }
-       for (i = 0; i < 3; i++) {
+       for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
                ckpt->cur_data_segno[i] =
                        cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA));
                ckpt->cur_data_blkoff[i] =
@@ -634,145 +1098,225 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        ckpt->next_free_nid = cpu_to_le32(last_nid);
 
        /* 2 cp  + n data seg summary + orphan inode blocks */
-       data_sum_blocks = npages_for_summary_flush(sbi);
-       if (data_sum_blocks < 3)
-               set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+       data_sum_blocks = npages_for_summary_flush(sbi, false);
+       spin_lock(&sbi->cp_lock);
+       if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
+               __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
        else
-               clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
-
-       orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1)
-                                       / F2FS_ORPHANS_PER_BLOCK;
-       ckpt->cp_pack_start_sum = cpu_to_le32(1 + orphan_blocks);
+               __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+       spin_unlock(&sbi->cp_lock);
 
-       if (is_umount) {
-               set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-               ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
-                       data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE);
-       } else {
-               clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-               ckpt->cp_pack_total_block_count = cpu_to_le32(2 +
-                       data_sum_blocks + orphan_blocks);
-       }
+       orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
+       ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
+                       orphan_blocks);
 
-       if (sbi->n_orphans)
-               set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+       if (__remain_node_summaries(cpc->reason))
+               ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+
+                               cp_payload_blks + data_sum_blocks +
+                               orphan_blocks + NR_CURSEG_NODE_TYPE);
        else
-               clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+               ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
+                               cp_payload_blks + data_sum_blocks +
+                               orphan_blocks);
+
+       /* update ckpt flag for checkpoint */
+       update_ckpt_flags(sbi, cpc);
 
        /* update SIT/NAT bitmap */
        get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
        get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
 
-       crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
-       *(__le32 *)((unsigned char *)ckpt +
-                               le32_to_cpu(ckpt->checksum_offset))
+       crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
+       *((__le32 *)((unsigned char *)ckpt +
+                               le32_to_cpu(ckpt->checksum_offset)))
                                = cpu_to_le32(crc32);
 
-       start_blk = __start_cp_addr(sbi);
+       start_blk = __start_cp_next_addr(sbi);
+
+       /* need to wait for end_io results */
+       wait_on_all_pages_writeback(sbi);
+       if (unlikely(f2fs_cp_error(sbi)))
+               return -EIO;
 
        /* write out checkpoint buffer at block 0 */
-       cp_page = grab_meta_page(sbi, start_blk++);
-       kaddr = page_address(cp_page);
-       memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
-       set_page_dirty(cp_page);
-       f2fs_put_page(cp_page, 1);
+       update_meta_page(sbi, ckpt, start_blk++);
 
-       if (sbi->n_orphans) {
+       for (i = 1; i < 1 + cp_payload_blks; i++)
+               update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
+                                                       start_blk++);
+
+       if (orphan_num) {
                write_orphan_inodes(sbi, start_blk);
                start_blk += orphan_blocks;
        }
 
        write_data_summaries(sbi, start_blk);
        start_blk += data_sum_blocks;
-       if (is_umount) {
+
+       /* Record write statistics in the hot node summary */
+       kbytes_written = sbi->kbytes_written;
+       if (sb->s_bdev->bd_part)
+               kbytes_written += BD_PART_WRITTEN(sbi);
+
+       seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
+
+       if (__remain_node_summaries(cpc->reason)) {
                write_node_summaries(sbi, start_blk);
                start_blk += NR_CURSEG_NODE_TYPE;
        }
 
        /* writeout checkpoint block */
-       cp_page = grab_meta_page(sbi, start_blk);
-       kaddr = page_address(cp_page);
-       memcpy(kaddr, ckpt, (1 << sbi->log_blocksize));
-       set_page_dirty(cp_page);
-       f2fs_put_page(cp_page, 1);
+       update_meta_page(sbi, ckpt, start_blk);
 
        /* wait for previous submitted node/meta pages writeback */
-       while (get_pages(sbi, F2FS_WRITEBACK))
-               congestion_wait(BLK_RW_ASYNC, HZ / 50);
+       wait_on_all_pages_writeback(sbi);
+
+       if (unlikely(f2fs_cp_error(sbi)))
+               return -EIO;
 
-       filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
-       filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
+       filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
+       filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
 
        /* update user_block_counts */
        sbi->last_valid_block_count = sbi->total_valid_block_count;
-       sbi->alloc_valid_block_count = 0;
+       percpu_counter_set(&sbi->alloc_valid_block_count, 0);
 
        /* Here, we only have one bio having CP pack */
        sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
 
-       if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
-               clear_prefree_segments(sbi);
-               F2FS_RESET_SB_DIRT(sbi);
-       }
+       /* wait for previous submitted meta pages writeback */
+       wait_on_all_pages_writeback(sbi);
+
+       release_ino_entry(sbi, false);
+
+       if (unlikely(f2fs_cp_error(sbi)))
+               return -EIO;
+
+       clear_sbi_flag(sbi, SBI_IS_DIRTY);
+       clear_sbi_flag(sbi, SBI_NEED_CP);
+       __set_cp_next_pack(sbi);
+
+       /*
+        * redirty superblock if metadata like node page or inode cache is
+        * updated during writing checkpoint.
+        */
+       if (get_pages(sbi, F2FS_DIRTY_NODES) ||
+                       get_pages(sbi, F2FS_DIRTY_IMETA))
+               set_sbi_flag(sbi, SBI_IS_DIRTY);
+
+       f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
+
+       return 0;
 }
 
 /*
- * We guarantee that this checkpoint procedure should not fail.
+ * We guarantee that this checkpoint procedure will not fail.
  */
-void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
+int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
        unsigned long long ckpt_ver;
-
-       trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops");
+       int err = 0;
 
        mutex_lock(&sbi->cp_mutex);
-       block_operations(sbi);
 
-       trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");
+       if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
+               (cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
+               (cpc->reason == CP_DISCARD && !sbi->discard_blks)))
+               goto out;
+       if (unlikely(f2fs_cp_error(sbi))) {
+               err = -EIO;
+               goto out;
+       }
+       if (f2fs_readonly(sbi->sb)) {
+               err = -EROFS;
+               goto out;
+       }
 
-       f2fs_submit_bio(sbi, DATA, true);
-       f2fs_submit_bio(sbi, NODE, true);
-       f2fs_submit_bio(sbi, META, true);
+       trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
+
+       err = block_operations(sbi);
+       if (err)
+               goto out;
+
+       trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
+
+       f2fs_flush_merged_bios(sbi);
+
+       /* this is the case of multiple fstrims without any changes */
+       if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
+               f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
+               f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
+               f2fs_bug_on(sbi, prefree_segments(sbi));
+               flush_sit_entries(sbi, cpc);
+               clear_prefree_segments(sbi, cpc);
+               unblock_operations(sbi);
+               goto out;
+       }
 
        /*
         * update checkpoint pack index
         * Increase the version number so that
         * SIT entries and seg summaries are written at correct place
         */
-       ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver);
+       ckpt_ver = cur_cp_version(ckpt);
        ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
 
        /* write cached NAT/SIT entries to NAT/SIT area */
        flush_nat_entries(sbi);
-       flush_sit_entries(sbi);
+       flush_sit_entries(sbi, cpc);
 
        /* unlock all the fs_lock[] in do_checkpoint() */
-       do_checkpoint(sbi, is_umount);
+       err = do_checkpoint(sbi, cpc);
+
+       if (err)
+               release_discard_addrs(sbi);
+       else
+               clear_prefree_segments(sbi, cpc);
 
        unblock_operations(sbi);
-       mutex_unlock(&sbi->cp_mutex);
+       stat_inc_cp_count(sbi->stat_info);
 
-       trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
+       if (cpc->reason == CP_RECOVERY)
+               f2fs_msg(sbi->sb, KERN_NOTICE,
+                       "checkpoint: version = %llx", ckpt_ver);
+
+       /* do checkpoint periodically */
+       f2fs_update_time(sbi, CP_TIME);
+       trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+out:
+       mutex_unlock(&sbi->cp_mutex);
+       return err;
 }
 
-void init_orphan_info(struct f2fs_sb_info *sbi)
+void init_ino_entry_info(struct f2fs_sb_info *sbi)
 {
-       mutex_init(&sbi->orphan_inode_mutex);
-       INIT_LIST_HEAD(&sbi->orphan_inode_list);
-       sbi->n_orphans = 0;
+       int i;
+
+       for (i = 0; i < MAX_INO_ENTRY; i++) {
+               struct inode_management *im = &sbi->im[i];
+
+               INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
+               spin_lock_init(&im->ino_lock);
+               INIT_LIST_HEAD(&im->ino_list);
+               im->ino_num = 0;
+       }
+
+       sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
+                       NR_CURSEG_TYPE - __cp_payload(sbi)) *
+                               F2FS_ORPHANS_PER_BLOCK;
 }
 
 int __init create_checkpoint_caches(void)
 {
-       orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
-                       sizeof(struct orphan_inode_entry), NULL);
-       if (unlikely(!orphan_entry_slab))
+       ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
+                       sizeof(struct ino_entry));
+       if (!ino_entry_slab)
                return -ENOMEM;
-       inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
-                       sizeof(struct dir_inode_entry), NULL);
-       if (unlikely(!inode_entry_slab)) {
-               kmem_cache_destroy(orphan_entry_slab);
+       inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
+                       sizeof(struct inode_entry));
+       if (!inode_entry_slab) {
+               kmem_cache_destroy(ino_entry_slab);
                return -ENOMEM;
        }
        return 0;
@@ -780,6 +1324,6 @@ int __init create_checkpoint_caches(void)
 
 void destroy_checkpoint_caches(void)
 {
-       kmem_cache_destroy(orphan_entry_slab);
+       kmem_cache_destroy(ino_entry_slab);
        kmem_cache_destroy(inode_entry_slab);
 }
index 91ff93b0b0f403300f951f0d26fa2698ac535c9a..034ebe3c9e2f5437453c54de36ea4da15a36e02c 100644 (file)
 #include <linux/aio.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
+#include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/prefetch.h>
+#include <linux/uio.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/cleancache.h>
 
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include "trace.h"
 #include <trace/events/f2fs.h>
 
-/*
- * Lock ordering for the change of data block address:
- * ->data_page
- *  ->node_page
- *    update block addresses in the node page
- */
-static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
+static bool __is_cp_guaranteed(struct page *page)
 {
-       struct f2fs_node *rn;
-       __le32 *addr_array;
-       struct page *node_page = dn->node_page;
-       unsigned int ofs_in_node = dn->ofs_in_node;
+       struct address_space *mapping = page->mapping;
+       struct inode *inode;
+       struct f2fs_sb_info *sbi;
 
-       wait_on_page_writeback(node_page);
+       if (!mapping)
+               return false;
 
-       rn = (struct f2fs_node *)page_address(node_page);
+       inode = mapping->host;
+       sbi = F2FS_I_SB(inode);
 
-       /* Get physical address of data block */
-       addr_array = blkaddr_in_node(rn);
-       addr_array[ofs_in_node] = cpu_to_le32(new_addr);
-       set_page_dirty(node_page);
+       if (inode->i_ino == F2FS_META_INO(sbi) ||
+                       inode->i_ino ==  F2FS_NODE_INO(sbi) ||
+                       S_ISDIR(inode->i_mode) ||
+                       is_cold_data(page))
+               return true;
+       return false;
 }
 
-int reserve_new_block(struct dnode_of_data *dn)
+static void f2fs_read_end_io(struct bio *bio, int err)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+       struct bio_vec *bvec;
+       int i;
 
-       if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
-               return -EPERM;
-       if (!inc_valid_block_count(sbi, dn->inode, 1))
-               return -ENOSPC;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
+               err = -EIO;
+#endif
+
+       if (f2fs_bio_encrypted(bio)) {
+               if (err) {
+                       fscrypt_release_ctx(bio->bi_private);
+               } else {
+                       fscrypt_decrypt_bio_pages(bio->bi_private, bio);
+                       return;
+               }
+       }
+
+       bio_for_each_segment_all(bvec, bio, i) {
+               struct page *page = bvec->bv_page;
+
+               if (!err) {
+                       if (!PageUptodate(page))
+                               SetPageUptodate(page);
+               } else {
+                       ClearPageUptodate(page);
+                       SetPageError(page);
+               }
+               unlock_page(page);
+       }
+       bio_put(bio);
+}
+
+static void f2fs_write_end_io(struct bio *bio, int err)
+{
+       struct f2fs_sb_info *sbi = bio->bi_private;
+       struct bio_vec *bvec;
+       int i;
+
+       bio_for_each_segment_all(bvec, bio, i) {
+               struct page *page = bvec->bv_page;
+               enum count_type type = WB_DATA_TYPE(page);
+
+               fscrypt_pullback_bio_page(&page, true);
+
+               if (unlikely(err)) {
+                       set_bit(AS_EIO, &page->mapping->flags);
+                       f2fs_stop_checkpoint(sbi, true);
+               }
+               dec_page_count(sbi, type);
+               clear_cold_data(page);
+               end_page_writeback(page);
+       }
+       if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
+                               wq_has_sleeper(&sbi->cp_wait))
+               wake_up(&sbi->cp_wait);
+
+       bio_put(bio);
+}
+
+/*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+                               block_t blk_addr, struct bio *bio)
+{
+       struct block_device *bdev = sbi->sb->s_bdev;
+       int i;
+
+       for (i = 0; i < sbi->s_ndevs; i++) {
+               if (FDEV(i).start_blk <= blk_addr &&
+                                       FDEV(i).end_blk >= blk_addr) {
+                       blk_addr -= FDEV(i).start_blk;
+                       bdev = FDEV(i).bdev;
+                       break;
+               }
+       }
+       if (bio) {
+               bio->bi_bdev = bdev;
+               bio->bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+       }
+       return bdev;
+}
 
-       trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+       int i;
 
-       __set_data_blkaddr(dn, NEW_ADDR);
-       dn->data_blkaddr = NEW_ADDR;
-       sync_inode_page(dn);
+       for (i = 0; i < sbi->s_ndevs; i++)
+               if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
+                       return i;
        return 0;
 }
 
-static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
-                                       struct buffer_head *bh_result)
+static bool __same_bdev(struct f2fs_sb_info *sbi,
+                               block_t blk_addr, struct bio *bio)
+{
+       return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+}
+
+/*
+ * Low-level block read/write IO operations.
+ */
+static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
+                               int npages, bool is_read)
 {
-       struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       pgoff_t start_fofs, end_fofs;
-       block_t start_blkaddr;
+       struct bio *bio;
 
-       read_lock(&fi->ext.ext_lock);
-       if (fi->ext.len == 0) {
-               read_unlock(&fi->ext.ext_lock);
-               return 0;
+       bio = f2fs_bio_alloc(npages);
+
+       f2fs_target_device(sbi, blk_addr, bio);
+       bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
+       bio->bi_private = is_read ? NULL : sbi;
+
+       return bio;
+}
+
+static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
+                       struct bio *bio, enum page_type type)
+{
+       if (!is_read_io(rw)) {
+               if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+                       current->plug && (type == DATA || type == NODE))
+                       blk_finish_plug(current->plug);
        }
+       submit_bio(rw, bio);
+}
 
-       sbi->total_hit_ext++;
-       start_fofs = fi->ext.fofs;
-       end_fofs = fi->ext.fofs + fi->ext.len - 1;
-       start_blkaddr = fi->ext.blk_addr;
+static void __submit_merged_bio(struct f2fs_bio_info *io)
+{
+       struct f2fs_io_info *fio = &io->fio;
+
+       if (!io->bio)
+               return;
 
-       if (pgofs >= start_fofs && pgofs <= end_fofs) {
-               unsigned int blkbits = inode->i_sb->s_blocksize_bits;
-               size_t count;
+       if (is_read_io(fio->rw))
+               trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
+       else
+               trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
 
-               clear_buffer_new(bh_result);
-               map_bh(bh_result, inode->i_sb,
-                               start_blkaddr + pgofs - start_fofs);
-               count = end_fofs - pgofs + 1;
-               if (count < (UINT_MAX >> blkbits))
-                       bh_result->b_size = (count << blkbits);
+       __submit_bio(io->sbi, fio->rw, io->bio, fio->type);
+       io->bio = NULL;
+}
+
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+                                               struct page *page, nid_t ino)
+{
+       struct bio_vec *bvec;
+       struct page *target;
+       int i;
+
+       if (!io->bio)
+               return false;
+
+       if (!inode && !page && !ino)
+               return true;
+
+       bio_for_each_segment_all(bvec, io->bio, i) {
+
+               if (bvec->bv_page->mapping)
+                       target = bvec->bv_page;
                else
-                       bh_result->b_size = UINT_MAX;
+                       target = fscrypt_control_page(bvec->bv_page);
+
+               if (inode && inode == target->mapping->host)
+                       return true;
+               if (page && page == target)
+                       return true;
+               if (ino && ino == ino_of_node(target))
+                       return true;
+       }
 
-               sbi->read_hit_ext++;
-               read_unlock(&fi->ext.ext_lock);
-               return 1;
+       return false;
+}
+
+static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
+                                               struct page *page, nid_t ino,
+                                               enum page_type type)
+{
+       enum page_type btype = PAGE_TYPE_OF_BIO(type);
+       struct f2fs_bio_info *io = &sbi->write_io[btype];
+       bool ret;
+
+       down_read(&io->io_rwsem);
+       ret = __has_merged_page(io, inode, page, ino);
+       up_read(&io->io_rwsem);
+       return ret;
+}
+
+static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
+                               struct inode *inode, struct page *page,
+                               nid_t ino, enum page_type type, int rw)
+{
+       enum page_type btype = PAGE_TYPE_OF_BIO(type);
+       struct f2fs_bio_info *io;
+
+       io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
+
+       down_write(&io->io_rwsem);
+
+       if (!__has_merged_page(io, inode, page, ino))
+               goto out;
+
+       /* change META to META_FLUSH in the checkpoint procedure */
+       if (type >= META_FLUSH) {
+               io->fio.type = META_FLUSH;
+               if (test_opt(sbi, NOBARRIER))
+                       io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
+               else
+                       io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
        }
-       read_unlock(&fi->ext.ext_lock);
-       return 0;
+       __submit_merged_bio(io);
+out:
+       up_write(&io->io_rwsem);
 }
 
-void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
+void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
+                                                                       int rw)
 {
-       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
-       pgoff_t fofs, start_fofs, end_fofs;
-       block_t start_blkaddr, end_blkaddr;
+       __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
+}
 
-       BUG_ON(blk_addr == NEW_ADDR);
-       fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+                               struct inode *inode, struct page *page,
+                               nid_t ino, enum page_type type, int rw)
+{
+       if (has_merged_page(sbi, inode, page, ino, type))
+               __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
+}
 
-       /* Update the page address in the parent node */
-       __set_data_blkaddr(dn, blk_addr);
+void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
+{
+       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+       f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       f2fs_submit_merged_bio(sbi, META, WRITE);
+}
 
-       write_lock(&fi->ext.ext_lock);
+/*
+ * Fill the locked page with data located in the block address.
+ * Return unlocked page.
+ */
+int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+{
+       struct bio *bio;
+       struct page *page = fio->encrypted_page ?
+                       fio->encrypted_page : fio->page;
 
-       start_fofs = fi->ext.fofs;
-       end_fofs = fi->ext.fofs + fi->ext.len - 1;
-       start_blkaddr = fi->ext.blk_addr;
-       end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
+       trace_f2fs_submit_page_bio(page, fio);
+       f2fs_trace_ios(fio, 0);
 
-       /* Drop and initialize the matched extent */
-       if (fi->ext.len == 1 && fofs == start_fofs)
-               fi->ext.len = 0;
+       /* Allocate a new bio */
+       bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
 
-       /* Initial extent */
-       if (fi->ext.len == 0) {
-               if (blk_addr != NULL_ADDR) {
-                       fi->ext.fofs = fofs;
-                       fi->ext.blk_addr = blk_addr;
-                       fi->ext.len = 1;
-               }
-               goto end_update;
+       if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+               bio_put(bio);
+               return -EFAULT;
        }
 
-       /* Front merge */
-       if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
-               fi->ext.fofs--;
-               fi->ext.blk_addr--;
-               fi->ext.len++;
-               goto end_update;
+       __submit_bio(fio->sbi, fio->rw, bio, fio->type);
+       return 0;
+}
+
+void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
+{
+       struct f2fs_sb_info *sbi = fio->sbi;
+       enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
+       struct f2fs_bio_info *io;
+       bool is_read = is_read_io(fio->rw);
+       struct page *bio_page;
+
+       io = is_read ? &sbi->read_io : &sbi->write_io[btype];
+
+       if (fio->old_blkaddr != NEW_ADDR)
+               verify_block_addr(sbi, fio->old_blkaddr);
+       verify_block_addr(sbi, fio->new_blkaddr);
+
+       bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+
+       if (!is_read)
+               inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+
+       down_write(&io->io_rwsem);
+
+       if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
+                       (io->fio.rw != fio->rw) ||
+                       !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
+               __submit_merged_bio(io);
+alloc_new:
+       if (io->bio == NULL) {
+               io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+                                               BIO_MAX_PAGES, is_read);
+               io->fio = *fio;
        }
 
-       /* Back merge */
-       if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
-               fi->ext.len++;
-               goto end_update;
+       if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
+                                                       PAGE_SIZE) {
+               __submit_merged_bio(io);
+               goto alloc_new;
        }
 
-       /* Split the existing extent */
-       if (fi->ext.len > 1 &&
-               fofs >= start_fofs && fofs <= end_fofs) {
-               if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
-                       fi->ext.len = fofs - start_fofs;
-               } else {
-                       fi->ext.fofs = fofs + 1;
-                       fi->ext.blk_addr = start_blkaddr +
-                                       fofs - start_fofs + 1;
-                       fi->ext.len -= fofs - start_fofs + 1;
+       io->last_block_in_bio = fio->new_blkaddr;
+       f2fs_trace_ios(fio, 0);
+
+       up_write(&io->io_rwsem);
+       trace_f2fs_submit_page_mbio(fio->page, fio);
+}
+
+static void __set_data_blkaddr(struct dnode_of_data *dn)
+{
+       struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+       __le32 *addr_array;
+
+       /* Get physical address of data block */
+       addr_array = blkaddr_in_node(rn);
+       addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+}
+
+/*
+ * Lock ordering for the change of data block address:
+ * ->data_page
+ *  ->node_page
+ *    update block addresses in the node page
+ */
+void set_data_blkaddr(struct dnode_of_data *dn)
+{
+       f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+       __set_data_blkaddr(dn);
+       if (set_page_dirty(dn->node_page))
+               dn->node_changed = true;
+}
+
+void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+{
+       dn->data_blkaddr = blkaddr;
+       set_data_blkaddr(dn);
+       f2fs_update_extent_cache(dn);
+}
+
+/* dn->ofs_in_node will be returned with up-to-date last block pointer */
+int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+
+       if (!count)
+               return 0;
+
+       if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
+               return -EPERM;
+       if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
+               return -ENOSPC;
+
+       trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+                                               dn->ofs_in_node, count);
+
+       f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+
+       for (; count > 0; dn->ofs_in_node++) {
+               block_t blkaddr =
+                       datablock_addr(dn->node_page, dn->ofs_in_node);
+               if (blkaddr == NULL_ADDR) {
+                       dn->data_blkaddr = NEW_ADDR;
+                       __set_data_blkaddr(dn);
+                       count--;
                }
-               goto end_update;
        }
-       write_unlock(&fi->ext.ext_lock);
-       return;
 
-end_update:
-       write_unlock(&fi->ext.ext_lock);
-       sync_inode_page(dn);
-       return;
+       if (set_page_dirty(dn->node_page))
+               dn->node_changed = true;
+       return 0;
+}
+
+/* Should keep dn->ofs_in_node unchanged */
+int reserve_new_block(struct dnode_of_data *dn)
+{
+       unsigned int ofs_in_node = dn->ofs_in_node;
+       int ret;
+
+       ret = reserve_new_blocks(dn, 1);
+       dn->ofs_in_node = ofs_in_node;
+       return ret;
+}
+
+int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
+{
+       bool need_put = dn->inode_page ? false : true;
+       int err;
+
+       err = get_dnode_of_data(dn, index, ALLOC_NODE);
+       if (err)
+               return err;
+
+       if (dn->data_blkaddr == NULL_ADDR)
+               err = reserve_new_block(dn);
+       if (err || need_put)
+               f2fs_put_dnode(dn);
+       return err;
+}
+
+int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
+{
+       struct extent_info ei;
+       struct inode *inode = dn->inode;
+
+       if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+               dn->data_blkaddr = ei.blk + index - ei.fofs;
+               return 0;
+       }
+
+       return f2fs_reserve_block(dn, index);
 }
 
-struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
+struct page *get_read_data_page(struct inode *inode, pgoff_t index,
+                                               int rw, bool for_write)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct address_space *mapping = inode->i_mapping;
        struct dnode_of_data dn;
        struct page *page;
+       struct extent_info ei;
        int err;
+       struct f2fs_io_info fio = {
+               .sbi = F2FS_I_SB(inode),
+               .type = DATA,
+               .rw = rw,
+               .encrypted_page = NULL,
+       };
 
-       page = find_get_page(mapping, index);
-       if (page && PageUptodate(page))
-               return page;
-       f2fs_put_page(page, 0);
+       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+               return read_mapping_page(mapping, index, NULL);
+
+       page = f2fs_grab_cache_page(mapping, index, for_write);
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+               dn.data_blkaddr = ei.blk + index - ei.fofs;
+               goto got_it;
+       }
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
        if (err)
-               return ERR_PTR(err);
+               goto put_err;
        f2fs_put_dnode(&dn);
 
-       if (dn.data_blkaddr == NULL_ADDR)
-               return ERR_PTR(-ENOENT);
-
-       /* By fallocate(), there is no cached page, but with NEW_ADDR */
-       if (dn.data_blkaddr == NEW_ADDR)
-               return ERR_PTR(-EINVAL);
-
-       page = grab_cache_page(mapping, index);
-       if (!page)
-               return ERR_PTR(-ENOMEM);
-
+       if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
+               err = -ENOENT;
+               goto put_err;
+       }
+got_it:
        if (PageUptodate(page)) {
                unlock_page(page);
                return page;
        }
 
-       err = f2fs_readpage(sbi, page, dn.data_blkaddr,
-                                       sync ? READ_SYNC : READA);
-       if (sync) {
-               wait_on_page_locked(page);
-               if (!PageUptodate(page)) {
-                       f2fs_put_page(page, 0);
-                       return ERR_PTR(-EIO);
-               }
+       /*
+        * A new dentry page is allocated but not able to be written, since its
+        * new inode page couldn't be allocated due to -ENOSPC.
+        * In such the case, its blkaddr can be remained as NEW_ADDR.
+        * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
+        */
+       if (dn.data_blkaddr == NEW_ADDR) {
+               zero_user_segment(page, 0, PAGE_SIZE);
+               if (!PageUptodate(page))
+                       SetPageUptodate(page);
+               unlock_page(page);
+               return page;
        }
+
+       fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+       fio.page = page;
+       err = f2fs_submit_page_bio(&fio);
+       if (err)
+               goto put_err;
        return page;
+
+put_err:
+       f2fs_put_page(page, 1);
+       return ERR_PTR(err);
 }
 
-/*
- * If it tries to access a hole, return an error.
- * Because, the callers, functions in dir.c and GC, should be able to know
- * whether this page exists or not.
- */
-struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
+struct page *find_data_page(struct inode *inode, pgoff_t index)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct address_space *mapping = inode->i_mapping;
-       struct dnode_of_data dn;
        struct page *page;
-       int err;
 
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
-       err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-       if (err)
-               return ERR_PTR(err);
-       f2fs_put_dnode(&dn);
+       page = find_get_page(mapping, index);
+       if (page && PageUptodate(page))
+               return page;
+       f2fs_put_page(page, 0);
 
-       if (dn.data_blkaddr == NULL_ADDR)
-               return ERR_PTR(-ENOENT);
-repeat:
-       page = grab_cache_page(mapping, index);
-       if (!page)
-               return ERR_PTR(-ENOMEM);
+       page = get_read_data_page(inode, index, READ_SYNC, false);
+       if (IS_ERR(page))
+               return page;
 
        if (PageUptodate(page))
                return page;
 
-       BUG_ON(dn.data_blkaddr == NEW_ADDR);
-       BUG_ON(dn.data_blkaddr == NULL_ADDR);
+       wait_on_page_locked(page);
+       if (unlikely(!PageUptodate(page))) {
+               f2fs_put_page(page, 0);
+               return ERR_PTR(-EIO);
+       }
+       return page;
+}
 
-       err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
-       if (err)
-               return ERR_PTR(err);
+/*
+ * If it tries to access a hole, return an error.
+ * Because, the callers, functions in dir.c and GC, should be able to know
+ * whether this page exists or not.
+ */
+struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
+                                                       bool for_write)
+{
+       struct address_space *mapping = inode->i_mapping;
+       struct page *page;
+repeat:
+       page = get_read_data_page(inode, index, READ_SYNC, for_write);
+       if (IS_ERR(page))
+               return page;
 
+       /* wait for read completion */
        lock_page(page);
-       if (!PageUptodate(page)) {
+       if (unlikely(page->mapping != mapping)) {
                f2fs_put_page(page, 1);
-               return ERR_PTR(-EIO);
+               goto repeat;
        }
-       if (page->mapping != mapping) {
+       if (unlikely(!PageUptodate(page))) {
                f2fs_put_page(page, 1);
-               goto repeat;
+               return ERR_PTR(-EIO);
        }
        return page;
 }
@@ -272,195 +588,635 @@ repeat:
  * Caller ensures that this data page is never allocated.
  * A new zero-filled data page is allocated in the page cache.
  *
- * Also, caller should grab and release a mutex by calling mutex_lock_op() and
- * mutex_unlock_op().
+ * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
+ * Note that, ipage is set only by make_empty_dir, and if any error occur,
+ * ipage should be released by this function.
  */
-struct page *get_new_data_page(struct inode *inode, pgoff_t index,
-                                               bool new_i_size)
+struct page *get_new_data_page(struct inode *inode,
+               struct page *ipage, pgoff_t index, bool new_i_size)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
        struct dnode_of_data dn;
        int err;
 
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
-       err = get_dnode_of_data(&dn, index, ALLOC_NODE);
-       if (err)
-               return ERR_PTR(err);
+       page = f2fs_grab_cache_page(mapping, index, true);
+       if (!page) {
+               /*
+                * before exiting, we should make sure ipage will be released
+                * if any error occur.
+                */
+               f2fs_put_page(ipage, 1);
+               return ERR_PTR(-ENOMEM);
+       }
 
-       if (dn.data_blkaddr == NULL_ADDR) {
-               if (reserve_new_block(&dn)) {
-                       f2fs_put_dnode(&dn);
-                       return ERR_PTR(-ENOSPC);
-               }
+       set_new_dnode(&dn, inode, ipage, NULL, 0);
+       err = f2fs_reserve_block(&dn, index);
+       if (err) {
+               f2fs_put_page(page, 1);
+               return ERR_PTR(err);
        }
-       f2fs_put_dnode(&dn);
-repeat:
-       page = grab_cache_page(mapping, index);
-       if (!page)
-               return ERR_PTR(-ENOMEM);
+       if (!ipage)
+               f2fs_put_dnode(&dn);
 
        if (PageUptodate(page))
-               return page;
+               goto got_it;
 
        if (dn.data_blkaddr == NEW_ADDR) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
-               SetPageUptodate(page);
+               zero_user_segment(page, 0, PAGE_SIZE);
+               if (!PageUptodate(page))
+                       SetPageUptodate(page);
        } else {
-               err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
+               f2fs_put_page(page, 1);
+
+               /* if ipage exists, blkaddr should be NEW_ADDR */
+               f2fs_bug_on(F2FS_I_SB(inode), ipage);
+               page = get_lock_data_page(inode, index, true);
+               if (IS_ERR(page))
+                       return page;
+       }
+got_it:
+       if (new_i_size && i_size_read(inode) <
+                               ((loff_t)(index + 1) << PAGE_SHIFT))
+               f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
+       return page;
+}
+
+static int __allocate_data_block(struct dnode_of_data *dn)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+       struct f2fs_summary sum;
+       struct node_info ni;
+       pgoff_t fofs;
+       blkcnt_t count = 1;
+
+       if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
+               return -EPERM;
+
+       dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+       if (dn->data_blkaddr == NEW_ADDR)
+               goto alloc;
+
+       if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
+               return -ENOSPC;
+
+alloc:
+       get_node_info(sbi, dn->nid, &ni);
+       set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+
+       allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+                                               &sum, CURSEG_WARM_DATA);
+       set_data_blkaddr(dn);
+
+       /* update i_size */
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+                                                       dn->ofs_in_node;
+       if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
+               f2fs_i_size_write(dn->inode,
+                               ((loff_t)(fofs + 1) << PAGE_SHIFT));
+       return 0;
+}
+
+static inline bool __force_buffered_io(struct inode *inode, int rw)
+{
+       return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+                       (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+                       F2FS_I_SB(inode)->s_ndevs);
+}
+
+int f2fs_preallocate_blocks(struct inode *inode, loff_t pos,
+                                       size_t count, bool dio)
+{
+       struct f2fs_map_blocks map;
+       int err = 0;
+
+       map.m_lblk = F2FS_BLK_ALIGN(pos);
+       map.m_len = F2FS_BYTES_TO_BLK(pos + count);
+       if (map.m_len > map.m_lblk)
+               map.m_len -= map.m_lblk;
+       else
+               map.m_len = 0;
+
+       map.m_next_pgofs = NULL;
+
+       if (dio) {
+               err = f2fs_convert_inline_inode(inode);
                if (err)
-                       return ERR_PTR(err);
-               lock_page(page);
-               if (!PageUptodate(page)) {
-                       f2fs_put_page(page, 1);
-                       return ERR_PTR(-EIO);
+                       return err;
+               return f2fs_map_blocks(inode, &map, 1,
+                       __force_buffered_io(inode, WRITE) ?
+                               F2FS_GET_BLOCK_PRE_AIO :
+                               F2FS_GET_BLOCK_PRE_DIO);
+       }
+       if (pos + count > MAX_INLINE_DATA) {
+               err = f2fs_convert_inline_inode(inode);
+               if (err)
+                       return err;
+       }
+       if (!f2fs_has_inline_data(inode))
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+       return err;
+}
+
+/*
+ * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
+ * f2fs_map_blocks structure.
+ * If original data blocks are allocated, then give them to blockdev.
+ * Otherwise,
+ *     a. preallocate requested block addresses
+ *     b. do not use extent cache for better performance
+ *     c. give the block addresses to blockdev
+ */
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+                                               int create, int flag)
+{
+       unsigned int maxblocks = map->m_len;
+       struct dnode_of_data dn;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+       pgoff_t pgofs, end_offset, end;
+       int err = 0, ofs = 1;
+       unsigned int ofs_in_node, last_ofs_in_node;
+       blkcnt_t prealloc;
+       struct extent_info ei;
+       block_t blkaddr;
+
+       if (!maxblocks)
+               return 0;
+
+       map->m_len = 0;
+       map->m_flags = 0;
+
+       /* it only supports block size == page size */
+       pgofs = (pgoff_t)map->m_lblk;
+       end = pgofs + maxblocks;
+
+       if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+               map->m_pblk = ei.blk + pgofs - ei.fofs;
+               map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
+               map->m_flags = F2FS_MAP_MAPPED;
+               goto out;
+       }
+
+next_dnode:
+       if (create)
+               f2fs_lock_op(sbi);
+
+       /* When reading holes, we need its node page */
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       err = get_dnode_of_data(&dn, pgofs, mode);
+       if (err) {
+               if (flag == F2FS_GET_BLOCK_BMAP)
+                       map->m_pblk = 0;
+               if (err == -ENOENT) {
+                       err = 0;
+                       if (map->m_next_pgofs)
+                               *map->m_next_pgofs =
+                                       get_next_page_offset(&dn, pgofs);
                }
-               if (page->mapping != mapping) {
-                       f2fs_put_page(page, 1);
-                       goto repeat;
+               goto unlock_out;
+       }
+
+       prealloc = 0;
+       last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
+       end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+next_block:
+       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+
+       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
+               if (create) {
+                       if (unlikely(f2fs_cp_error(sbi))) {
+                               err = -EIO;
+                               goto sync_out;
+                       }
+                       if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+                               if (blkaddr == NULL_ADDR) {
+                                       prealloc++;
+                                       last_ofs_in_node = dn.ofs_in_node;
+                               }
+                       } else {
+                               err = __allocate_data_block(&dn);
+                               if (!err)
+                                       set_inode_flag(inode, FI_APPEND_WRITE);
+                       }
+                       if (err)
+                               goto sync_out;
+                       map->m_flags = F2FS_MAP_NEW;
+                       blkaddr = dn.data_blkaddr;
+               } else {
+                       if (flag == F2FS_GET_BLOCK_BMAP) {
+                               map->m_pblk = 0;
+                               goto sync_out;
+                       }
+                       if (flag == F2FS_GET_BLOCK_FIEMAP &&
+                                               blkaddr == NULL_ADDR) {
+                               if (map->m_next_pgofs)
+                                       *map->m_next_pgofs = pgofs + 1;
+                       }
+                       if (flag != F2FS_GET_BLOCK_FIEMAP ||
+                                               blkaddr != NEW_ADDR)
+                               goto sync_out;
                }
        }
 
-       if (new_i_size &&
-               i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
-               i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
-               mark_inode_dirty_sync(inode);
+       if (flag == F2FS_GET_BLOCK_PRE_AIO)
+               goto skip;
+
+       if (map->m_len == 0) {
+               /* preallocated unwritten block should be mapped for fiemap. */
+               if (blkaddr == NEW_ADDR)
+                       map->m_flags |= F2FS_MAP_UNWRITTEN;
+               map->m_flags |= F2FS_MAP_MAPPED;
+
+               map->m_pblk = blkaddr;
+               map->m_len = 1;
+       } else if ((map->m_pblk != NEW_ADDR &&
+                       blkaddr == (map->m_pblk + ofs)) ||
+                       (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+                       flag == F2FS_GET_BLOCK_PRE_DIO) {
+               ofs++;
+               map->m_len++;
+       } else {
+               goto sync_out;
        }
-       return page;
+
+skip:
+       dn.ofs_in_node++;
+       pgofs++;
+
+       /* preallocate blocks in batch for one dnode page */
+       if (flag == F2FS_GET_BLOCK_PRE_AIO &&
+                       (pgofs == end || dn.ofs_in_node == end_offset)) {
+
+               dn.ofs_in_node = ofs_in_node;
+               err = reserve_new_blocks(&dn, prealloc);
+               if (err)
+                       goto sync_out;
+
+               map->m_len += dn.ofs_in_node - ofs_in_node;
+               if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
+                       err = -ENOSPC;
+                       goto sync_out;
+               }
+               dn.ofs_in_node = end_offset;
+       }
+
+       if (pgofs >= end)
+               goto sync_out;
+       else if (dn.ofs_in_node < end_offset)
+               goto next_block;
+
+       f2fs_put_dnode(&dn);
+
+       if (create) {
+               f2fs_unlock_op(sbi);
+               f2fs_balance_fs(sbi, dn.node_changed);
+       }
+       goto next_dnode;
+
+sync_out:
+       f2fs_put_dnode(&dn);
+unlock_out:
+       if (create) {
+               f2fs_unlock_op(sbi);
+               f2fs_balance_fs(sbi, dn.node_changed);
+       }
+out:
+       trace_f2fs_map_blocks(inode, map, err);
+       return err;
 }
 
-static void read_end_io(struct bio *bio, int err)
+static int __get_data_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh, int create, int flag,
+                       pgoff_t *next_pgofs)
 {
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+       struct f2fs_map_blocks map;
+       int err;
 
-       do {
-               struct page *page = bvec->bv_page;
+       map.m_lblk = iblock;
+       map.m_len = bh->b_size >> inode->i_blkbits;
+       map.m_next_pgofs = next_pgofs;
 
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
+       err = f2fs_map_blocks(inode, &map, create, flag);
+       if (!err) {
+               map_bh(bh, inode->i_sb, map.m_pblk);
+               bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
+               bh->b_size = map.m_len << inode->i_blkbits;
+       }
+       return err;
+}
 
-               if (uptodate) {
-                       SetPageUptodate(page);
-               } else {
-                       ClearPageUptodate(page);
-                       SetPageError(page);
-               }
-               unlock_page(page);
-       } while (bvec >= bio->bi_io_vec);
-       kfree(bio->bi_private);
-       bio_put(bio);
+static int get_data_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create, int flag,
+                       pgoff_t *next_pgofs)
+{
+       return __get_data_block(inode, iblock, bh_result, create,
+                                                       flag, next_pgofs);
 }
 
-/*
- * Fill the locked page with data located in the block address.
- * Return unlocked page.
- */
-int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
-                                       block_t blk_addr, int type)
+static int get_data_block_dio(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       return __get_data_block(inode, iblock, bh_result, create,
+                                               F2FS_GET_BLOCK_DIO, NULL);
+}
+
+static int get_data_block_bmap(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       /* Block number less than F2FS MAX BLOCKS */
+       if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
+               return -EFBIG;
+
+       return __get_data_block(inode, iblock, bh_result, create,
+                                               F2FS_GET_BLOCK_BMAP, NULL);
+}
+
+static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
+{
+       return (offset >> inode->i_blkbits);
+}
+
+static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
 {
-       struct block_device *bdev = sbi->sb->s_bdev;
-       struct bio *bio;
+       return (blk << inode->i_blkbits);
+}
 
-       trace_f2fs_readpage(page, blk_addr, type);
+int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+               u64 start, u64 len)
+{
+       struct buffer_head map_bh;
+       sector_t start_blk, last_blk;
+       pgoff_t next_pgofs;
+       u64 logical = 0, phys = 0, size = 0;
+       u32 flags = 0;
+       int ret = 0;
+
+       ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+       if (ret)
+               return ret;
+
+       if (f2fs_has_inline_data(inode)) {
+               ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
+               if (ret != -EAGAIN)
+                       return ret;
+       }
 
-       down_read(&sbi->bio_sem);
+       inode_lock(inode);
 
-       /* Allocate a new bio */
-       bio = f2fs_bio_alloc(bdev, 1);
+       if (logical_to_blk(inode, len) == 0)
+               len = blk_to_logical(inode, 1);
 
-       /* Initialize the bio */
-       bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
-       bio->bi_end_io = read_end_io;
+       start_blk = logical_to_blk(inode, start);
+       last_blk = logical_to_blk(inode, start + len - 1);
 
-       if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
-               kfree(bio->bi_private);
-               bio_put(bio);
-               up_read(&sbi->bio_sem);
-               f2fs_put_page(page, 1);
-               return -EFAULT;
+next:
+       memset(&map_bh, 0, sizeof(struct buffer_head));
+       map_bh.b_size = len;
+
+       ret = get_data_block(inode, start_blk, &map_bh, 0,
+                                       F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
+       if (ret)
+               goto out;
+
+       /* HOLE */
+       if (!buffer_mapped(&map_bh)) {
+               start_blk = next_pgofs;
+
+               if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+                                       F2FS_I_SB(inode)->max_file_blocks))
+                       goto prep_next;
+
+               flags |= FIEMAP_EXTENT_LAST;
        }
 
-       submit_bio(type, bio);
-       up_read(&sbi->bio_sem);
-       return 0;
+       if (size) {
+               if (f2fs_encrypted_inode(inode))
+                       flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
+
+               ret = fiemap_fill_next_extent(fieinfo, logical,
+                               phys, size, flags);
+       }
+
+       if (start_blk > last_blk || ret)
+               goto out;
+
+       logical = blk_to_logical(inode, start_blk);
+       phys = blk_to_logical(inode, map_bh.b_blocknr);
+       size = map_bh.b_size;
+       flags = 0;
+       if (buffer_unwritten(&map_bh))
+               flags = FIEMAP_EXTENT_UNWRITTEN;
+
+       start_blk += logical_to_blk(inode, size);
+
+prep_next:
+       cond_resched();
+       if (fatal_signal_pending(current))
+               ret = -EINTR;
+       else
+               goto next;
+out:
+       if (ret == 1)
+               ret = 0;
+
+       inode_unlock(inode);
+       return ret;
 }
 
-/*
- * This function should be used by the data read flow only where it
- * does not check the "create" flag that indicates block allocation.
- * The reason for this special functionality is to exploit VFS readahead
- * mechanism.
- */
-static int get_data_block_ro(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh_result, int create)
+static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+                                unsigned nr_pages)
 {
-       unsigned int blkbits = inode->i_sb->s_blocksize_bits;
-       unsigned maxblocks = bh_result->b_size >> blkbits;
-       struct dnode_of_data dn;
-       pgoff_t pgofs;
-       int err;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct fscrypt_ctx *ctx = NULL;
+       struct bio *bio;
 
-       /* Get the page offset from the block offset(iblock) */
-       pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
+       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+               ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+               if (IS_ERR(ctx))
+                       return ERR_CAST(ctx);
 
-       if (check_extent_cache(inode, pgofs, bh_result)) {
-               trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
-               return 0;
+               /* wait the page to be moved by cleaning */
+               f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
        }
 
-       /* When reading holes, we need its node page */
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
-       err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
-       if (err) {
-               trace_f2fs_get_data_block(inode, iblock, bh_result, err);
-               return (err == -ENOENT) ? 0 : err;
+       bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+       if (!bio) {
+               if (ctx)
+                       fscrypt_release_ctx(ctx);
+               return ERR_PTR(-ENOMEM);
        }
+       f2fs_target_device(sbi, blkaddr, bio);
+       bio->bi_end_io = f2fs_read_end_io;
+       bio->bi_private = ctx;
 
-       /* It does not support data allocation */
-       BUG_ON(create);
+       return bio;
+}
 
-       if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
-               int i;
-               unsigned int end_offset;
+/*
+ * This function was originally taken from fs/mpage.c, and customized for f2fs.
+ * Major change was from block_size == page_size in f2fs by default.
+ */
+static int f2fs_mpage_readpages(struct address_space *mapping,
+                       struct list_head *pages, struct page *page,
+                       unsigned nr_pages)
+{
+       struct bio *bio = NULL;
+       unsigned page_idx;
+       sector_t last_block_in_bio = 0;
+       struct inode *inode = mapping->host;
+       const unsigned blkbits = inode->i_blkbits;
+       const unsigned blocksize = 1 << blkbits;
+       sector_t block_in_file;
+       sector_t last_block;
+       sector_t last_block_in_file;
+       sector_t block_nr;
+       struct f2fs_map_blocks map;
+
+       map.m_pblk = 0;
+       map.m_lblk = 0;
+       map.m_len = 0;
+       map.m_flags = 0;
+       map.m_next_pgofs = NULL;
+
+       for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
+
+               prefetchw(&page->flags);
+               if (pages) {
+                       page = list_entry(pages->prev, struct page, lru);
+                       list_del(&page->lru);
+                       if (add_to_page_cache_lru(page, mapping,
+                                                 page->index, GFP_KERNEL))
+                               goto next_page;
+               }
+
+               block_in_file = (sector_t)page->index;
+               last_block = block_in_file + nr_pages;
+               last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
+                                                               blkbits;
+               if (last_block > last_block_in_file)
+                       last_block = last_block_in_file;
+
+               /*
+                * Map blocks using the previous result first.
+                */
+               if ((map.m_flags & F2FS_MAP_MAPPED) &&
+                               block_in_file > map.m_lblk &&
+                               block_in_file < (map.m_lblk + map.m_len))
+                       goto got_it;
+
+               /*
+                * Then do more f2fs_map_blocks() calls until we are
+                * done with this page.
+                */
+               map.m_flags = 0;
+
+               if (block_in_file < last_block) {
+                       map.m_lblk = block_in_file;
+                       map.m_len = last_block - block_in_file;
+
+                       if (f2fs_map_blocks(inode, &map, 0,
+                                               F2FS_GET_BLOCK_READ))
+                               goto set_error_page;
+               }
+got_it:
+               if ((map.m_flags & F2FS_MAP_MAPPED)) {
+                       block_nr = map.m_pblk + block_in_file - map.m_lblk;
+                       SetPageMappedToDisk(page);
+
+                       if (!PageUptodate(page) && !cleancache_get_page(page)) {
+                               SetPageUptodate(page);
+                               goto confused;
+                       }
+               } else {
+                       zero_user_segment(page, 0, PAGE_SIZE);
+                       if (!PageUptodate(page))
+                               SetPageUptodate(page);
+                       unlock_page(page);
+                       goto next_page;
+               }
 
-               end_offset = IS_INODE(dn.node_page) ?
-                               ADDRS_PER_INODE :
-                               ADDRS_PER_BLOCK;
+               /*
+                * This page will go to BIO.  Do we need to send this
+                * BIO off first?
+                */
+               if (bio && (last_block_in_bio != block_nr - 1 ||
+                       !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
+submit_and_realloc:
+                       __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
+                       bio = NULL;
+               }
+               if (bio == NULL) {
+                       bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+                       if (IS_ERR(bio)) {
+                               bio = NULL;
+                               goto set_error_page;
+                       }
+               }
 
-               clear_buffer_new(bh_result);
+               if (bio_add_page(bio, page, blocksize, 0) < blocksize)
+                       goto submit_and_realloc;
 
-               /* Give more consecutive addresses for the read ahead */
-               for (i = 0; i < end_offset - dn.ofs_in_node; i++)
-                       if (((datablock_addr(dn.node_page,
-                                                       dn.ofs_in_node + i))
-                               != (dn.data_blkaddr + i)) || maxblocks == i)
-                               break;
-               map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
-               bh_result->b_size = (i << blkbits);
+               last_block_in_bio = block_nr;
+               goto next_page;
+set_error_page:
+               SetPageError(page);
+               zero_user_segment(page, 0, PAGE_SIZE);
+               unlock_page(page);
+               goto next_page;
+confused:
+               if (bio) {
+                       __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
+                       bio = NULL;
+               }
+               unlock_page(page);
+next_page:
+               if (pages)
+                       put_page(page);
        }
-       f2fs_put_dnode(&dn);
-       trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
+       BUG_ON(pages && !list_empty(pages));
+       if (bio)
+               __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
        return 0;
 }
 
 static int f2fs_read_data_page(struct file *file, struct page *page)
 {
-       return mpage_readpage(page, get_data_block_ro);
+       struct inode *inode = page->mapping->host;
+       int ret = -EAGAIN;
+
+       trace_f2fs_readpage(page, DATA);
+
+       /* If the file has inline data, try to read it directly */
+       if (f2fs_has_inline_data(inode))
+               ret = f2fs_read_inline_data(inode, page);
+       if (ret == -EAGAIN)
+               ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+       return ret;
 }
 
 static int f2fs_read_data_pages(struct file *file,
                        struct address_space *mapping,
                        struct list_head *pages, unsigned nr_pages)
 {
-       return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
+       struct inode *inode = file->f_mapping->host;
+       struct page *page = list_entry(pages->prev, struct page, lru);
+
+       trace_f2fs_readpages(inode, page, nr_pages);
+
+       /* If the file has inline data, skip readpages */
+       if (f2fs_has_inline_data(inode))
+               return 0;
+
+       return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
 }
 
-int do_write_data_page(struct page *page)
+int do_write_data_page(struct f2fs_io_info *fio)
 {
+       struct page *page = fio->page;
        struct inode *inode = page->mapping->host;
-       block_t old_blk_addr, new_blk_addr;
        struct dnode_of_data dn;
        int err = 0;
 
@@ -469,11 +1225,36 @@ int do_write_data_page(struct page *page)
        if (err)
                return err;
 
-       old_blk_addr = dn.data_blkaddr;
+       fio->old_blkaddr = dn.data_blkaddr;
 
        /* This page is already truncated */
-       if (old_blk_addr == NULL_ADDR)
+       if (fio->old_blkaddr == NULL_ADDR) {
+               ClearPageUptodate(page);
                goto out_writepage;
+       }
+
+       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+               gfp_t gfp_flags = GFP_NOFS;
+
+               /* wait for GCed encrypted page writeback */
+               f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
+                                                       fio->old_blkaddr);
+retry_encrypt:
+               fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+                                                               gfp_flags);
+               if (IS_ERR(fio->encrypted_page)) {
+                       err = PTR_ERR(fio->encrypted_page);
+                       if (err == -ENOMEM) {
+                               /* flush pending ios and wait for a while */
+                               f2fs_flush_merged_bios(F2FS_I_SB(inode));
+                               congestion_wait(BLK_RW_ASYNC, HZ/50);
+                               gfp_flags |= __GFP_NOFAIL;
+                               err = 0;
+                               goto retry_encrypt;
+                       }
+                       goto out_writepage;
+               }
+       }
 
        set_page_writeback(page);
 
@@ -481,14 +1262,19 @@ int do_write_data_page(struct page *page)
         * If current allocation needs SSR,
         * it had better in-place writes for updated data.
         */
-       if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
-                               need_inplace_update(inode)) {
-               rewrite_data_page(F2FS_SB(inode->i_sb), page,
-                                               old_blk_addr);
+       if (unlikely(fio->old_blkaddr != NEW_ADDR &&
+                       !is_cold_data(page) &&
+                       !IS_ATOMIC_WRITTEN_PAGE(page) &&
+                       need_inplace_update(inode))) {
+               rewrite_data_page(fio);
+               set_inode_flag(inode, FI_UPDATE_WRITE);
+               trace_f2fs_do_write_data_page(page, IPU);
        } else {
-               write_data_page(inode, page, &dn,
-                               old_blk_addr, &new_blk_addr);
-               update_extent_cache(new_blk_addr, &dn);
+               write_data_page(&dn, fio);
+               trace_f2fs_do_write_data_page(page, OPU);
+               set_inode_flag(inode, FI_APPEND_WRITE);
+               if (page->index == 0)
+                       set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
        }
 out_writepage:
        f2fs_put_dnode(&dn);
@@ -499,13 +1285,23 @@ static int f2fs_write_data_page(struct page *page,
                                        struct writeback_control *wbc)
 {
        struct inode *inode = page->mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        loff_t i_size = i_size_read(inode);
        const pgoff_t end_index = ((unsigned long long) i_size)
-                                                       >> PAGE_CACHE_SHIFT;
-       unsigned offset;
+                                                       >> PAGE_SHIFT;
+       loff_t psize = (page->index + 1) << PAGE_SHIFT;
+       unsigned offset = 0;
        bool need_balance_fs = false;
        int err = 0;
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = DATA,
+               .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+               .page = page,
+               .encrypted_page = NULL,
+       };
+
+       trace_f2fs_writepage(page, DATA);
 
        if (page->index < end_index)
                goto write;
@@ -514,62 +1310,208 @@ static int f2fs_write_data_page(struct page *page,
         * If the offset is out-of-range of file size,
         * this page does not have to be written to disk.
         */
-       offset = i_size & (PAGE_CACHE_SIZE - 1);
-       if ((page->index >= end_index + 1) || !offset) {
-               if (S_ISDIR(inode->i_mode)) {
-                       dec_page_count(sbi, F2FS_DIRTY_DENTS);
-                       inode_dec_dirty_dents(inode);
-               }
+       offset = i_size & (PAGE_SIZE - 1);
+       if ((page->index >= end_index + 1) || !offset)
                goto out;
-       }
 
-       zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+       zero_user_segment(page, offset, PAGE_SIZE);
 write:
-       if (sbi->por_doing) {
-               err = AOP_WRITEPAGE_ACTIVATE;
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto redirty_out;
+       if (f2fs_is_drop_cache(inode))
+               goto out;
+       /* we should not write 0'th page having journal header */
+       if (f2fs_is_volatile_file(inode) && (!page->index ||
+                       (!wbc->for_reclaim &&
+                       available_free_memory(sbi, BASE_CHECK))))
+               goto redirty_out;
+
+       /* we should bypass data pages to proceed the kworkder jobs */
+       if (unlikely(f2fs_cp_error(sbi))) {
+               mapping_set_error(page->mapping, -EIO);
+               goto out;
        }
 
        /* Dentry blocks are controlled by checkpoint */
        if (S_ISDIR(inode->i_mode)) {
-               dec_page_count(sbi, F2FS_DIRTY_DENTS);
-               inode_dec_dirty_dents(inode);
-               err = do_write_data_page(page);
-       } else {
-               int ilock = mutex_lock_op(sbi);
-               err = do_write_data_page(page);
-               mutex_unlock_op(sbi, ilock);
-               need_balance_fs = true;
+               err = do_write_data_page(&fio);
+               goto done;
        }
-       if (err == -ENOENT)
-               goto out;
-       else if (err)
+
+       if (!wbc->for_reclaim)
+               need_balance_fs = true;
+       else if (has_not_enough_free_secs(sbi, 0, 0))
                goto redirty_out;
 
-       if (wbc->for_reclaim)
-               f2fs_submit_bio(sbi, DATA, true);
+       err = -EAGAIN;
+       f2fs_lock_op(sbi);
+       if (f2fs_has_inline_data(inode))
+               err = f2fs_write_inline_data(inode, page);
+       if (err == -EAGAIN)
+               err = do_write_data_page(&fio);
+       if (F2FS_I(inode)->last_disk_size < psize)
+               F2FS_I(inode)->last_disk_size = psize;
+       f2fs_unlock_op(sbi);
+done:
+       if (err && err != -ENOENT)
+               goto redirty_out;
 
-       clear_cold_data(page);
 out:
+       inode_dec_dirty_pages(inode);
+       if (err)
+               ClearPageUptodate(page);
+
+       if (wbc->for_reclaim) {
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
+               remove_dirty_inode(inode);
+       }
+
        unlock_page(page);
-       if (need_balance_fs)
-               f2fs_balance_fs(sbi);
+       f2fs_balance_fs(sbi, need_balance_fs);
+
+       if (unlikely(f2fs_cp_error(sbi)))
+               f2fs_submit_merged_bio(sbi, DATA, WRITE);
+
        return 0;
 
 redirty_out:
-       wbc->pages_skipped++;
-       set_page_dirty(page);
+       redirty_page_for_writepage(wbc, page);
+       if (!err)
+               return AOP_WRITEPAGE_ACTIVATE;
+       unlock_page(page);
        return err;
 }
 
-#define MAX_DESIRED_PAGES_WP   4096
-
-static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
-                       void *data)
+/*
+ * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * The major change is making write step of cold data page separately from
+ * warm/hot data page.
+ */
+static int f2fs_write_cache_pages(struct address_space *mapping,
+                                       struct writeback_control *wbc)
 {
-       struct address_space *mapping = data;
-       int ret = mapping->a_ops->writepage(page, wbc);
-       mapping_set_error(mapping, ret);
+       int ret = 0;
+       int done = 0;
+       struct pagevec pvec;
+       int nr_pages;
+       pgoff_t uninitialized_var(writeback_index);
+       pgoff_t index;
+       pgoff_t end;            /* Inclusive */
+       pgoff_t done_index;
+       int cycled;
+       int range_whole = 0;
+       int tag;
+       int nwritten = 0;
+
+       pagevec_init(&pvec, 0);
+
+       if (wbc->range_cyclic) {
+               writeback_index = mapping->writeback_index; /* prev offset */
+               index = writeback_index;
+               if (index == 0)
+                       cycled = 1;
+               else
+                       cycled = 0;
+               end = -1;
+       } else {
+               index = wbc->range_start >> PAGE_SHIFT;
+               end = wbc->range_end >> PAGE_SHIFT;
+               if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+                       range_whole = 1;
+               cycled = 1; /* ignore range_cyclic tests */
+       }
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+               tag = PAGECACHE_TAG_TOWRITE;
+       else
+               tag = PAGECACHE_TAG_DIRTY;
+retry:
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+               tag_pages_for_writeback(mapping, index, end);
+       done_index = index;
+       while (!done && (index <= end)) {
+               int i;
+
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+                             min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+               if (nr_pages == 0)
+                       break;
+
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+
+                       if (page->index > end) {
+                               done = 1;
+                               break;
+                       }
+
+                       done_index = page->index;
+
+                       lock_page(page);
+
+                       if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+                               unlock_page(page);
+                               continue;
+                       }
+
+                       if (!PageDirty(page)) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
+                       }
+
+                       if (PageWriteback(page)) {
+                               if (wbc->sync_mode != WB_SYNC_NONE)
+                                       f2fs_wait_on_page_writeback(page,
+                                                               DATA, true);
+                               else
+                                       goto continue_unlock;
+                       }
+
+                       BUG_ON(PageWriteback(page));
+                       if (!clear_page_dirty_for_io(page))
+                               goto continue_unlock;
+
+                       ret = mapping->a_ops->writepage(page, wbc);
+                       if (unlikely(ret)) {
+                               /*
+                                * keep nr_to_write, since vfs uses this to
+                                * get # of written pages.
+                                */
+                               if (ret == AOP_WRITEPAGE_ACTIVATE) {
+                                       unlock_page(page);
+                                       ret = 0;
+                                       continue;
+                               }
+                               done_index = page->index + 1;
+                               done = 1;
+                               break;
+                       } else {
+                               nwritten++;
+                       }
+
+                       if (--wbc->nr_to_write <= 0 &&
+                           wbc->sync_mode == WB_SYNC_NONE) {
+                               done = 1;
+                               break;
+                       }
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+       }
+
+       if (!cycled && !done) {
+               cycled = 1;
+               index = 0;
+               end = writeback_index - 1;
+               goto retry;
+       }
+       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+               mapping->writeback_index = done_index;
+
+       if (nwritten)
+               f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
+                                                       NULL, 0, DATA, WRITE);
+
        return ret;
 }
 
@@ -577,34 +1519,134 @@ static int f2fs_write_data_pages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
        struct inode *inode = mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       bool locked = false;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct blk_plug plug;
        int ret;
-       long excess_nrtw = 0, desired_nrtw;
 
        /* deal with chardevs and other special file */
        if (!mapping->a_ops->writepage)
                return 0;
 
-       if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
-               desired_nrtw = MAX_DESIRED_PAGES_WP;
-               excess_nrtw = desired_nrtw - wbc->nr_to_write;
-               wbc->nr_to_write = desired_nrtw;
+       /* skip writing if there is no dirty page in this inode */
+       if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
+               return 0;
+
+       if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
+                       get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
+                       available_free_memory(sbi, DIRTY_DENTS))
+               goto skip_write;
+
+       /* skip writing during file defragment */
+       if (is_inode_flag_set(inode, FI_DO_DEFRAG))
+               goto skip_write;
+
+       /* during POR, we don't need to trigger writepage at all. */
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+               goto skip_write;
+
+       trace_f2fs_writepages(mapping->host, wbc, DATA);
+
+       blk_start_plug(&plug);
+       ret = f2fs_write_cache_pages(mapping, wbc);
+       blk_finish_plug(&plug);
+       /*
+        * if some pages were truncated, we cannot guarantee its mapping->host
+        * to detect pending bios.
+        */
+
+       remove_dirty_inode(inode);
+       return ret;
+
+skip_write:
+       wbc->pages_skipped += get_dirty_pages(inode);
+       trace_f2fs_writepages(mapping->host, wbc, DATA);
+       return 0;
+}
+
+static void f2fs_write_failed(struct address_space *mapping, loff_t to)
+{
+       struct inode *inode = mapping->host;
+       loff_t i_size = i_size_read(inode);
+
+       if (to > i_size) {
+               truncate_pagecache(inode, 0, i_size);
+               truncate_blocks(inode, i_size, true);
        }
+}
+
+static int prepare_write_begin(struct f2fs_sb_info *sbi,
+                       struct page *page, loff_t pos, unsigned len,
+                       block_t *blk_addr, bool *node_changed)
+{
+       struct inode *inode = page->mapping->host;
+       pgoff_t index = page->index;
+       struct dnode_of_data dn;
+       struct page *ipage;
+       bool locked = false;
+       struct extent_info ei;
+       int err = 0;
+
+       /*
+        * we already allocated all the blocks, so we don't need to get
+        * the block addresses when there is no need to fill the page.
+        */
+       if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
+               return 0;
 
-       if (!S_ISDIR(inode->i_mode)) {
-               mutex_lock(&sbi->writepages);
+       if (f2fs_has_inline_data(inode) ||
+                       (pos & PAGE_MASK) >= i_size_read(inode)) {
+               f2fs_lock_op(sbi);
                locked = true;
        }
-       ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
-       if (locked)
-               mutex_unlock(&sbi->writepages);
-       f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
+restart:
+       /* check inline_data */
+       ipage = get_node_page(sbi, inode->i_ino);
+       if (IS_ERR(ipage)) {
+               err = PTR_ERR(ipage);
+               goto unlock_out;
+       }
 
-       remove_dirty_dir_inode(inode);
+       set_new_dnode(&dn, inode, ipage, ipage, 0);
 
-       wbc->nr_to_write -= excess_nrtw;
-       return ret;
+       if (f2fs_has_inline_data(inode)) {
+               if (pos + len <= MAX_INLINE_DATA) {
+                       read_inline_data(page, ipage);
+                       set_inode_flag(inode, FI_DATA_EXIST);
+                       if (inode->i_nlink)
+                               set_inline_node(ipage);
+               } else {
+                       err = f2fs_convert_inline_page(&dn, page);
+                       if (err)
+                               goto out;
+                       if (dn.data_blkaddr == NULL_ADDR)
+                               err = f2fs_get_block(&dn, index);
+               }
+       } else if (locked) {
+               err = f2fs_get_block(&dn, index);
+       } else {
+               if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+                       dn.data_blkaddr = ei.blk + index - ei.fofs;
+               } else {
+                       /* hole case */
+                       err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+                       if (err || dn.data_blkaddr == NULL_ADDR) {
+                               f2fs_put_dnode(&dn);
+                               f2fs_lock_op(sbi);
+                               locked = true;
+                               goto restart;
+                       }
+               }
+       }
+
+       /* convert_inline_page can make node_changed */
+       *blk_addr = dn.data_blkaddr;
+       *node_changed = dn.node_changed;
+out:
+       f2fs_put_dnode(&dn);
+unlock_out:
+       if (locked)
+               f2fs_unlock_op(sbi);
+       return err;
 }
 
 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
@@ -612,118 +1654,298 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
                struct page **pagep, void **fsdata)
 {
        struct inode *inode = mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct page *page;
-       pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
-       struct dnode_of_data dn;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct page *page = NULL;
+       pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
+       bool need_balance = false;
+       block_t blkaddr = NULL_ADDR;
        int err = 0;
-       int ilock;
 
-       /* for nobh_write_end */
-       *fsdata = NULL;
+       trace_f2fs_write_begin(inode, pos, len, flags);
 
-       f2fs_balance_fs(sbi);
+       /*
+        * We should check this at this moment to avoid deadlock on inode page
+        * and #0 page. The locking rule for inline_data conversion should be:
+        * lock_page(page #0) -> lock_page(inode_page)
+        */
+       if (index != 0) {
+               err = f2fs_convert_inline_inode(inode);
+               if (err)
+                       goto fail;
+       }
 repeat:
        page = grab_cache_page_write_begin(mapping, index, flags);
-       if (!page)
-               return -ENOMEM;
-       *pagep = page;
+       if (!page) {
+               err = -ENOMEM;
+               goto fail;
+       }
 
-       ilock = mutex_lock_op(sbi);
+       *pagep = page;
 
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
-       err = get_dnode_of_data(&dn, index, ALLOC_NODE);
+       err = prepare_write_begin(sbi, page, pos, len,
+                                       &blkaddr, &need_balance);
        if (err)
-               goto err;
+               goto fail;
 
-       if (dn.data_blkaddr == NULL_ADDR)
-               err = reserve_new_block(&dn);
+       if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+               unlock_page(page);
+               f2fs_balance_fs(sbi, true);
+               lock_page(page);
+               if (page->mapping != mapping) {
+                       /* The page got truncated from under us */
+                       f2fs_put_page(page, 1);
+                       goto repeat;
+               }
+       }
 
-       f2fs_put_dnode(&dn);
-       if (err)
-               goto err;
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
-       mutex_unlock_op(sbi, ilock);
+       /* wait for GCed encrypted page writeback */
+       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+               f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
 
-       if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
+       if (len == PAGE_SIZE || PageUptodate(page))
                return 0;
 
-       if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
-               unsigned start = pos & (PAGE_CACHE_SIZE - 1);
-               unsigned end = start + len;
+       if (blkaddr == NEW_ADDR) {
+               zero_user_segment(page, 0, PAGE_SIZE);
+               SetPageUptodate(page);
+       } else {
+               struct bio *bio;
 
-               /* Reading beyond i_size is simple: memset to zero */
-               zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
-               goto out;
-       }
+               bio = f2fs_grab_bio(inode, blkaddr, 1);
+               if (IS_ERR(bio)) {
+                       err = PTR_ERR(bio);
+                       goto fail;
+               }
 
-       if (dn.data_blkaddr == NEW_ADDR) {
-               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
-       } else {
-               err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
-               if (err)
-                       return err;
-               lock_page(page);
-               if (!PageUptodate(page)) {
-                       f2fs_put_page(page, 1);
-                       return -EIO;
+               if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+                       bio_put(bio);
+                       err = -EFAULT;
+                       goto fail;
                }
-               if (page->mapping != mapping) {
+
+               __submit_bio(sbi, READ_SYNC, bio, DATA);
+
+               lock_page(page);
+               if (unlikely(page->mapping != mapping)) {
                        f2fs_put_page(page, 1);
                        goto repeat;
                }
+               if (unlikely(!PageUptodate(page))) {
+                       err = -EIO;
+                       goto fail;
+               }
        }
-out:
-       SetPageUptodate(page);
-       clear_cold_data(page);
        return 0;
 
-err:
-       mutex_unlock_op(sbi, ilock);
+fail:
        f2fs_put_page(page, 1);
+       f2fs_write_failed(mapping, pos + len);
        return err;
 }
 
-static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
+static int f2fs_write_end(struct file *file,
+                       struct address_space *mapping,
+                       loff_t pos, unsigned len, unsigned copied,
+                       struct page *page, void *fsdata)
+{
+       struct inode *inode = page->mapping->host;
+
+       trace_f2fs_write_end(inode, pos, len, copied);
+
+       /*
+        * This should be come from len == PAGE_SIZE, and we expect copied
+        * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
+        * let generic_perform_write() try to copy data again through copied=0.
+        */
+       if (!PageUptodate(page)) {
+               if (unlikely(copied != PAGE_SIZE))
+                       copied = 0;
+               else
+                       SetPageUptodate(page);
+       }
+       if (!copied)
+               goto unlock_out;
+
+       set_page_dirty(page);
+
+       if (pos + copied > i_size_read(inode))
+               f2fs_i_size_write(inode, pos + copied);
+unlock_out:
+       f2fs_put_page(page, 1);
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+       return copied;
+}
+
+static ssize_t check_direct_IO(struct inode *inode, int rw,
                const struct iovec *iov, loff_t offset, unsigned long nr_segs)
 {
-       struct file *file = iocb->ki_filp;
-       struct inode *inode = file->f_mapping->host;
+       unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
+       int seg, i;
+       size_t size;
+       unsigned long addr;
+       ssize_t retval = -EINVAL;
+       loff_t end = offset;
+
+       if (offset & blocksize_mask)
+               return -EINVAL;
+
+       /* Check the memory alignment.  Blocks cannot straddle pages */
+       for (seg = 0; seg < nr_segs; seg++) {
+               addr = (unsigned long)iov[seg].iov_base;
+               size = iov[seg].iov_len;
+               end += size;
+               if ((addr & blocksize_mask) || (size & blocksize_mask))
+                       goto out;
+
+               /* If this is a write we don't need to check anymore */
+               if (rw & WRITE)
+                       continue;
+
+               /*
+                * Check to make sure we don't have duplicate iov_base's in this
+                * iovec, if so return EINVAL, otherwise we'll get csum errors
+                * when reading back.
+                */
+               for (i = seg + 1; i < nr_segs; i++) {
+                       if (iov[seg].iov_base == iov[i].iov_base)
+                               goto out;
+               }
+       }
+       retval = 0;
+out:
+       return retval;
+}
+
+static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
+                               const struct iovec *iov, loff_t offset,
+                               unsigned long nr_segs)
+{
+       struct address_space *mapping = iocb->ki_filp->f_mapping;
+       struct inode *inode = mapping->host;
+       size_t count = iov_length(iov, nr_segs);
+       int err;
 
-       if (rw == WRITE)
+       err = check_direct_IO(inode, rw, iov, offset, nr_segs);
+       if (err)
+               return err;
+
+       if (__force_buffered_io(inode, rw))
                return 0;
 
-       /* Needs synchronization with the cleaner */
-       return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                                 get_data_block_ro);
+       trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+
+       down_read(&F2FS_I(inode)->dio_rwsem[rw]);
+       err = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+                                                       get_data_block_dio);
+       up_read(&F2FS_I(inode)->dio_rwsem[rw]);
+       if (err < 0 && (rw & WRITE)) {
+               if (err > 0)
+                       set_inode_flag(inode, FI_UPDATE_WRITE);
+               else if (err < 0)
+                       f2fs_write_failed(mapping, offset + count);
+       }
+
+       trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
+
+       return err;
 }
 
-static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
+void f2fs_invalidate_page(struct page *page, unsigned long offset)
 {
        struct inode *inode = page->mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
-               dec_page_count(sbi, F2FS_DIRTY_DENTS);
-               inode_dec_dirty_dents(inode);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+       if (inode->i_ino >= F2FS_ROOT_INO(sbi) && (offset % PAGE_SIZE))
+               return;
+
+       if (PageDirty(page)) {
+               if (inode->i_ino == F2FS_META_INO(sbi)) {
+                       dec_page_count(sbi, F2FS_DIRTY_META);
+               } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
+                       dec_page_count(sbi, F2FS_DIRTY_NODES);
+               } else {
+                       inode_dec_dirty_pages(inode);
+                       remove_dirty_inode(inode);
+               }
        }
+
+       /* This is atomic written page, keep Private */
+       if (IS_ATOMIC_WRITTEN_PAGE(page))
+               return;
+
+       set_page_private(page, 0);
        ClearPagePrivate(page);
 }
 
-static int f2fs_release_data_page(struct page *page, gfp_t wait)
+int f2fs_release_page(struct page *page, gfp_t wait)
 {
+       /* If this is dirty page, keep PagePrivate */
+       if (PageDirty(page))
+               return 0;
+
+       /* This is atomic written page, keep Private */
+       if (IS_ATOMIC_WRITTEN_PAGE(page))
+               return 0;
+
+       set_page_private(page, 0);
        ClearPagePrivate(page);
        return 1;
 }
 
+/*
+ * This was copied from __set_page_dirty_buffers which gives higher performance
+ * in very high speed storages. (e.g., pmem)
+ */
+void f2fs_set_page_dirty_nobuffers(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       unsigned long flags;
+
+       if (unlikely(!mapping))
+               return;
+
+       spin_lock(&mapping->private_lock);
+       SetPageDirty(page);
+       spin_unlock(&mapping->private_lock);
+
+       spin_lock_irqsave(&mapping->tree_lock, flags);
+       WARN_ON_ONCE(!PageUptodate(page));
+       account_page_dirtied(page, mapping);
+       radix_tree_tag_set(&mapping->page_tree,
+                       page_index(page), PAGECACHE_TAG_DIRTY);
+       spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
+       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       return;
+}
+
 static int f2fs_set_data_page_dirty(struct page *page)
 {
        struct address_space *mapping = page->mapping;
        struct inode *inode = mapping->host;
 
-       SetPageUptodate(page);
+       trace_f2fs_set_page_dirty(page, DATA);
+
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+
+       if (f2fs_is_atomic_file(inode)) {
+               if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+                       register_inmem_page(inode, page);
+                       return 1;
+               }
+               /*
+                * Previously, this page has been registered, we just
+                * return here.
+                */
+               return 0;
+       }
+
        if (!PageDirty(page)) {
-               __set_page_dirty_nobuffers(page);
-               set_dirty_dir_page(inode, page);
+               f2fs_set_page_dirty_nobuffers(page);
+               update_dirty_page(inode, page);
                return 1;
        }
        return 0;
@@ -731,7 +1953,16 @@ static int f2fs_set_data_page_dirty(struct page *page)
 
 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
 {
-       return generic_block_bmap(mapping, block, get_data_block_ro);
+       struct inode *inode = mapping->host;
+
+       if (f2fs_has_inline_data(inode))
+               return 0;
+
+       /* make sure allocating whole blocks */
+       if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+               filemap_write_and_wait(mapping);
+
+       return generic_block_bmap(mapping, block, get_data_block_bmap);
 }
 
 const struct address_space_operations f2fs_dblock_aops = {
@@ -740,10 +1971,10 @@ const struct address_space_operations f2fs_dblock_aops = {
        .writepage      = f2fs_write_data_page,
        .writepages     = f2fs_write_data_pages,
        .write_begin    = f2fs_write_begin,
-       .write_end      = nobh_write_end,
+       .write_end      = f2fs_write_end,
        .set_page_dirty = f2fs_set_data_page_dirty,
-       .invalidatepage = f2fs_invalidate_data_page,
-       .releasepage    = f2fs_release_data_page,
+       .invalidatepage = f2fs_invalidate_page,
+       .releasepage    = f2fs_release_page,
        .direct_IO      = f2fs_direct_IO,
        .bmap           = f2fs_bmap,
 };
index 8d9943786c318effc179b281ea5d4d34947033ff..29d8feaf09ee77d3f29f5c097cb94cfbd89d8271 100644 (file)
 #include "gc.h"
 
 static LIST_HEAD(f2fs_stat_list);
-static struct dentry *debugfs_root;
+static struct dentry *f2fs_debugfs_root;
 static DEFINE_MUTEX(f2fs_stat_mutex);
 
 static void update_general_status(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
        int i;
 
-       /* valid check of the segment numbers */
-       si->hit_ext = sbi->read_hit_ext;
-       si->total_ext = sbi->total_hit_ext;
+       /* validation check of the segment numbers */
+       si->hit_largest = atomic64_read(&sbi->read_hit_largest);
+       si->hit_cached = atomic64_read(&sbi->read_hit_cached);
+       si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
+       si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
+       si->total_ext = atomic64_read(&sbi->total_hit_ext);
+       si->ext_tree = atomic_read(&sbi->total_ext_tree);
+       si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
+       si->ext_node = atomic_read(&sbi->total_ext_node);
        si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
        si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
-       si->ndirty_dirs = sbi->n_dirty_dirs;
        si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+       si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
+       si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+       si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
+       si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+       si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
+       si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
+       si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
+       si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
        si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
        si->rsvd_segs = reserved_segments(sbi);
        si->overp_segs = overprovision_segments(sbi);
        si->valid_count = valid_user_blocks(sbi);
+       si->discard_blks = discard_blocks(sbi);
        si->valid_node_count = valid_node_count(sbi);
        si->valid_inode_count = valid_inode_count(sbi);
+       si->inline_xattr = atomic_read(&sbi->inline_xattr);
+       si->inline_inode = atomic_read(&sbi->inline_inode);
+       si->inline_dir = atomic_read(&sbi->inline_dir);
+       si->orphans = sbi->im[ORPHAN_INO].ino_num;
        si->utilization = utilization(sbi);
 
        si->free_segs = free_segments(sbi);
        si->free_secs = free_sections(sbi);
        si->prefree_count = prefree_segments(sbi);
        si->dirty_count = dirty_segments(sbi);
-       si->node_pages = sbi->node_inode->i_mapping->nrpages;
-       si->meta_pages = sbi->meta_inode->i_mapping->nrpages;
+       si->node_pages = NODE_MAPPING(sbi)->nrpages;
+       si->meta_pages = META_MAPPING(sbi)->nrpages;
        si->nats = NM_I(sbi)->nat_cnt;
-       si->sits = SIT_I(sbi)->dirty_sentries;
-       si->fnids = NM_I(sbi)->fcnt;
+       si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
+       si->sits = MAIN_SEGS(sbi);
+       si->dirty_sits = SIT_I(sbi)->dirty_sentries;
+       si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+       si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
        si->bg_gc = sbi->bg_gc;
        si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
                * 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -76,6 +97,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
                si->segment_count[i] = sbi->segment_count[i];
                si->block_count[i] = sbi->block_count[i];
        }
+
+       si->inplace_count = atomic_read(&sbi->inplace_count);
 }
 
 /*
@@ -83,18 +106,17 @@ static void update_general_status(struct f2fs_sb_info *sbi)
  */
 static void update_sit_info(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
-       unsigned int blks_per_sec, hblks_per_sec, total_vblocks, bimodal, dist;
-       struct sit_info *sit_i = SIT_I(sbi);
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
+       unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
+       unsigned long long bimodal, dist;
        unsigned int segno, vblocks;
        int ndirty = 0;
 
        bimodal = 0;
        total_vblocks = 0;
-       blks_per_sec = sbi->segs_per_sec * (1 << sbi->log_blocks_per_seg);
+       blks_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
        hblks_per_sec = blks_per_sec / 2;
-       mutex_lock(&sit_i->sentry_lock);
-       for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
+       for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
                vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
                dist = abs(vblocks - hblks_per_sec);
                bimodal += dist * dist;
@@ -104,11 +126,10 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
                        ndirty++;
                }
        }
-       mutex_unlock(&sit_i->sentry_lock);
-       dist = TOTAL_SECS(sbi) * hblks_per_sec * hblks_per_sec / 100;
-       si->bimodal = bimodal / dist;
+       dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
+       si->bimodal = div64_u64(bimodal, dist);
        if (si->dirty_count)
-               si->avg_vblocks = total_vblocks / ndirty;
+               si->avg_vblocks = div_u64(total_vblocks, ndirty);
        else
                si->avg_vblocks = 0;
 }
@@ -118,8 +139,9 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
  */
 static void update_mem_info(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
        unsigned npages;
+       int i;
 
        if (si->base_mem)
                goto get_cache;
@@ -127,79 +149,115 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
        si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
        si->base_mem += 2 * sizeof(struct f2fs_inode_info);
        si->base_mem += sizeof(*sbi->ckpt);
+       si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
 
        /* build sm */
        si->base_mem += sizeof(struct f2fs_sm_info);
 
        /* build sit */
        si->base_mem += sizeof(struct sit_info);
-       si->base_mem += TOTAL_SEGS(sbi) * sizeof(struct seg_entry);
-       si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
-       si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * TOTAL_SEGS(sbi);
+       si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
+       si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
+       si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+       if (f2fs_discard_en(sbi))
+               si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+       si->base_mem += SIT_VBLOCK_MAP_SIZE;
        if (sbi->segs_per_sec > 1)
-               si->base_mem += TOTAL_SECS(sbi) * sizeof(struct sec_entry);
+               si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
        si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
 
        /* build free segmap */
        si->base_mem += sizeof(struct free_segmap_info);
-       si->base_mem += f2fs_bitmap_size(TOTAL_SEGS(sbi));
-       si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
+       si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
+       si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
 
        /* build curseg */
        si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
-       si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+       si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
 
        /* build dirty segmap */
        si->base_mem += sizeof(struct dirty_seglist_info);
-       si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi));
-       si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi));
+       si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(MAIN_SEGS(sbi));
+       si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
 
-       /* buld nm */
+       /* build nm */
        si->base_mem += sizeof(struct f2fs_nm_info);
        si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
 
+get_cache:
+       si->cache_mem = 0;
+
        /* build gc */
-       si->base_mem += sizeof(struct f2fs_gc_kthread);
+       if (sbi->gc_thread)
+               si->cache_mem += sizeof(struct f2fs_gc_kthread);
+
+       /* build merge flush thread */
+       if (SM_I(sbi)->cmd_control_info)
+               si->cache_mem += sizeof(struct flush_cmd_control);
 
-get_cache:
        /* free nids */
-       si->cache_mem = NM_I(sbi)->fcnt;
-       si->cache_mem += NM_I(sbi)->nat_cnt;
-       npages = sbi->node_inode->i_mapping->nrpages;
-       si->cache_mem += npages << PAGE_CACHE_SHIFT;
-       npages = sbi->meta_inode->i_mapping->nrpages;
-       si->cache_mem += npages << PAGE_CACHE_SHIFT;
-       si->cache_mem += sbi->n_orphans * sizeof(struct orphan_inode_entry);
-       si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry);
+       si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
+                               NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+                               sizeof(struct free_nid);
+       si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
+       si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
+                                       sizeof(struct nat_entry_set);
+       si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
+       for (i = 0; i <= ORPHAN_INO; i++)
+               si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
+       si->cache_mem += atomic_read(&sbi->total_ext_tree) *
+                                               sizeof(struct extent_tree);
+       si->cache_mem += atomic_read(&sbi->total_ext_node) *
+                                               sizeof(struct extent_node);
+
+       si->page_mem = 0;
+       npages = NODE_MAPPING(sbi)->nrpages;
+       si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
+       npages = META_MAPPING(sbi)->nrpages;
+       si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
 }
 
 static int stat_show(struct seq_file *s, void *v)
 {
-       struct f2fs_stat_info *si, *next;
+       struct f2fs_stat_info *si;
        int i = 0;
        int j;
 
        mutex_lock(&f2fs_stat_mutex);
-       list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
+       list_for_each_entry(si, &f2fs_stat_list, stat_list) {
                char devname[BDEVNAME_SIZE];
 
                update_general_status(si->sbi);
 
-               seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
-                       bdevname(si->sbi->sb->s_bdev, devname), i++);
+               seq_printf(s, "\n=====[ partition info(%s). #%d, %s]=====\n",
+                       bdevname(si->sbi->sb->s_bdev, devname), i++,
+                       f2fs_readonly(si->sbi->sb) ? "RO": "RW");
                seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
                           si->sit_area_segs, si->nat_area_segs);
                seq_printf(s, "[SSA: %d] [MAIN: %d",
                           si->ssa_area_segs, si->main_area_segs);
                seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
                           si->overp_segs, si->rsvd_segs);
-               seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
-                          si->utilization, si->valid_count);
+               if (test_opt(si->sbi, DISCARD))
+                       seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
+                               si->utilization, si->valid_count, si->discard_blks);
+               else
+                       seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
+                               si->utilization, si->valid_count);
+
                seq_printf(s, "  - Node: %u (Inode: %u, ",
                           si->valid_node_count, si->valid_inode_count);
                seq_printf(s, "Other: %u)\n  - Data: %u\n",
                           si->valid_node_count - si->valid_inode_count,
                           si->valid_count - si->valid_node_count);
+               seq_printf(s, "  - Inline_xattr Inode: %u\n",
+                          si->inline_xattr);
+               seq_printf(s, "  - Inline_data Inode: %u\n",
+                          si->inline_inode);
+               seq_printf(s, "  - Inline_dentry Inode: %u\n",
+                          si->inline_dir);
+               seq_printf(s, "  - Orphan Inode: %u\n",
+                          si->orphans);
                seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
                           si->main_area_segs, si->main_area_sections,
                           si->main_area_zones);
@@ -233,41 +291,63 @@ static int stat_show(struct seq_file *s, void *v)
                           si->dirty_count);
                seq_printf(s, "  - Prefree: %d\n  - Free: %d (%d)\n\n",
                           si->prefree_count, si->free_segs, si->free_secs);
+               seq_printf(s, "CP calls: %d (BG: %d)\n",
+                               si->cp_count, si->bg_cp_count);
                seq_printf(s, "GC calls: %d (BG: %d)\n",
                           si->call_count, si->bg_gc);
-               seq_printf(s, "  - data segments : %d\n", si->data_segs);
-               seq_printf(s, "  - node segments : %d\n", si->node_segs);
-               seq_printf(s, "Try to move %d blocks\n", si->tot_blks);
-               seq_printf(s, "  - data blocks : %d\n", si->data_blks);
-               seq_printf(s, "  - node blocks : %d\n", si->node_blks);
-               seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
-                          si->hit_ext, si->total_ext);
-               seq_printf(s, "\nBalancing F2FS Async:\n");
-               seq_printf(s, "  - nodes %4d in %4d\n",
+               seq_printf(s, "  - data segments : %d (%d)\n",
+                               si->data_segs, si->bg_data_segs);
+               seq_printf(s, "  - node segments : %d (%d)\n",
+                               si->node_segs, si->bg_node_segs);
+               seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
+                               si->bg_data_blks + si->bg_node_blks);
+               seq_printf(s, "  - data blocks : %d (%d)\n", si->data_blks,
+                               si->bg_data_blks);
+               seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
+                               si->bg_node_blks);
+               seq_puts(s, "\nExtent Cache:\n");
+               seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
+                               si->hit_largest, si->hit_cached,
+                               si->hit_rbtree);
+               seq_printf(s, "  - Hit Ratio: %llu%% (%llu / %llu)\n",
+                               !si->total_ext ? 0 :
+                               div64_u64(si->hit_total * 100, si->total_ext),
+                               si->hit_total, si->total_ext);
+               seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
+                               si->ext_tree, si->zombie_tree, si->ext_node);
+               seq_puts(s, "\nBalancing F2FS Async:\n");
+               seq_printf(s, "  - inmem: %4d, wb_cp_data: %4d, wb_data: %4d\n",
+                          si->inmem_pages, si->nr_wb_cp_data, si->nr_wb_data);
+               seq_printf(s, "  - nodes: %4d in %4d\n",
                           si->ndirty_node, si->node_pages);
-               seq_printf(s, "  - dents %4d in dirs:%4d\n",
-                          si->ndirty_dent, si->ndirty_dirs);
-               seq_printf(s, "  - meta %4d in %4d\n",
+               seq_printf(s, "  - dents: %4d in dirs:%4d (%4d)\n",
+                          si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
+               seq_printf(s, "  - datas: %4d in files:%4d\n",
+                          si->ndirty_data, si->ndirty_files);
+               seq_printf(s, "  - meta: %4d in %4d\n",
                           si->ndirty_meta, si->meta_pages);
-               seq_printf(s, "  - NATs %5d > %lu\n",
-                          si->nats, NM_WOUT_THRESHOLD);
-               seq_printf(s, "  - SITs: %5d\n  - free_nids: %5d\n",
-                          si->sits, si->fnids);
-               seq_printf(s, "\nDistribution of User Blocks:");
-               seq_printf(s, " [ valid | invalid | free ]\n");
-               seq_printf(s, "  [");
+               seq_printf(s, "  - imeta: %4d\n",
+                          si->ndirty_imeta);
+               seq_printf(s, "  - NATs: %9d/%9d\n  - SITs: %9d/%9d\n",
+                          si->dirty_nats, si->nats, si->dirty_sits, si->sits);
+               seq_printf(s, "  - free_nids: %9d, alloc_nids: %9d\n",
+                          si->free_nids, si->alloc_nids);
+               seq_puts(s, "\nDistribution of User Blocks:");
+               seq_puts(s, " [ valid | invalid | free ]\n");
+               seq_puts(s, "  [");
 
                for (j = 0; j < si->util_valid; j++)
-                       seq_printf(s, "-");
-               seq_printf(s, "|");
+                       seq_putc(s, '-');
+               seq_putc(s, '|');
 
                for (j = 0; j < si->util_invalid; j++)
-                       seq_printf(s, "-");
-               seq_printf(s, "|");
+                       seq_putc(s, '-');
+               seq_putc(s, '|');
 
                for (j = 0; j < si->util_free; j++)
-                       seq_printf(s, "-");
-               seq_printf(s, "]\n\n");
+                       seq_putc(s, '-');
+               seq_puts(s, "]\n\n");
+               seq_printf(s, "IPU: %u blocks\n", si->inplace_count);
                seq_printf(s, "SSR: %u blocks in %u segments\n",
                           si->block_count[SSR], si->segment_count[SSR]);
                seq_printf(s, "LFS: %u blocks in %u segments\n",
@@ -280,9 +360,14 @@ static int stat_show(struct seq_file *s, void *v)
 
                /* memory footprint */
                update_mem_info(si->sbi);
-               seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
-                               (si->base_mem + si->cache_mem) >> 10,
-                               si->base_mem >> 10, si->cache_mem >> 10);
+               seq_printf(s, "\nMemory: %llu KB\n",
+                       (si->base_mem + si->cache_mem + si->page_mem) >> 10);
+               seq_printf(s, "  - static: %llu KB\n",
+                               si->base_mem >> 10);
+               seq_printf(s, "  - cached: %llu KB\n",
+                               si->cache_mem >> 10);
+               seq_printf(s, "  - paged : %llu KB\n",
+                               si->page_mem >> 10);
        }
        mutex_unlock(&f2fs_stat_mutex);
        return 0;
@@ -294,6 +379,7 @@ static int stat_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations stat_fops = {
+       .owner = THIS_MODULE,
        .open = stat_open,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -305,11 +391,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        struct f2fs_stat_info *si;
 
-       sbi->stat_info = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
-       if (!sbi->stat_info)
+       si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
+       if (!si)
                return -ENOMEM;
 
-       si = sbi->stat_info;
        si->all_area_segs = le32_to_cpu(raw_super->segment_count);
        si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
        si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -319,6 +404,17 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
        si->main_area_zones = si->main_area_sections /
                                le32_to_cpu(raw_super->secs_per_zone);
        si->sbi = sbi;
+       sbi->stat_info = si;
+
+       atomic64_set(&sbi->total_hit_ext, 0);
+       atomic64_set(&sbi->read_hit_rbtree, 0);
+       atomic64_set(&sbi->read_hit_largest, 0);
+       atomic64_set(&sbi->read_hit_cached, 0);
+
+       atomic_set(&sbi->inline_xattr, 0);
+       atomic_set(&sbi->inline_inode, 0);
+       atomic_set(&sbi->inline_dir, 0);
+       atomic_set(&sbi->inplace_count, 0);
 
        mutex_lock(&f2fs_stat_mutex);
        list_add_tail(&si->stat_list, &f2fs_stat_list);
@@ -329,25 +425,39 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
 
 void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_stat_info *si = sbi->stat_info;
+       struct f2fs_stat_info *si = F2FS_STAT(sbi);
 
        mutex_lock(&f2fs_stat_mutex);
        list_del(&si->stat_list);
        mutex_unlock(&f2fs_stat_mutex);
 
-       kfree(sbi->stat_info);
+       kfree(si);
 }
 
-void __init f2fs_create_root_stats(void)
+int __init f2fs_create_root_stats(void)
 {
-       debugfs_root = debugfs_create_dir("f2fs", NULL);
-       if (debugfs_root)
-               debugfs_create_file("status", S_IRUGO, debugfs_root,
-                                        NULL, &stat_fops);
+       struct dentry *file;
+
+       f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
+       if (!f2fs_debugfs_root)
+               return -ENOMEM;
+
+       file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
+                       NULL, &stat_fops);
+       if (!file) {
+               debugfs_remove(f2fs_debugfs_root);
+               f2fs_debugfs_root = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 void f2fs_destroy_root_stats(void)
 {
-       debugfs_remove_recursive(debugfs_root);
-       debugfs_root = NULL;
+       if (!f2fs_debugfs_root)
+               return;
+
+       debugfs_remove_recursive(f2fs_debugfs_root);
+       f2fs_debugfs_root = NULL;
 }
index 1ac6b93036b7a23980a66d4f6987a82c34ff32df..ee6c997949233321dca8f15ce791bebf6f6ac0d3 100644 (file)
@@ -9,23 +9,25 @@
  * published by the Free Software Foundation.
  */
 #include <linux/fs.h>
+#include <linux/namei.h>
 #include <linux/f2fs_fs.h>
 #include "f2fs.h"
 #include "node.h"
 #include "acl.h"
+#include "xattr.h"
 
 static unsigned long dir_blocks(struct inode *inode)
 {
-       return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
-                                                       >> PAGE_CACHE_SHIFT;
+       return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+                                                       >> PAGE_SHIFT;
 }
 
-static unsigned int dir_buckets(unsigned int level)
+static unsigned int dir_buckets(unsigned int level, int dir_level)
 {
-       if (level < MAX_DIR_HASH_DEPTH / 2)
-               return 1 << level;
+       if (level + dir_level < MAX_DIR_HASH_DEPTH / 2)
+               return 1 << (level + dir_level);
        else
-               return 1 << ((MAX_DIR_HASH_DEPTH / 2) - 1);
+               return MAX_DIR_BUCKETS;
 }
 
 static unsigned int bucket_blocks(unsigned int level)
@@ -47,7 +49,6 @@ static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
        [F2FS_FT_SYMLINK]       = DT_LNK,
 };
 
-#define S_SHIFT 12
 static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
        [S_IFREG >> S_SHIFT]    = F2FS_FT_REG_FILE,
        [S_IFDIR >> S_SHIFT]    = F2FS_FT_DIR,
@@ -58,104 +59,147 @@ static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
        [S_IFLNK >> S_SHIFT]    = F2FS_FT_SYMLINK,
 };
 
-static void set_de_type(struct f2fs_dir_entry *de, struct inode *inode)
+void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
 {
-       umode_t mode = inode->i_mode;
        de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
 }
 
-static unsigned long dir_block_index(unsigned int level, unsigned int idx)
+unsigned char get_de_type(struct f2fs_dir_entry *de)
+{
+       if (de->file_type < F2FS_FT_MAX)
+               return f2fs_filetype_table[de->file_type];
+       return DT_UNKNOWN;
+}
+
+static unsigned long dir_block_index(unsigned int level,
+                               int dir_level, unsigned int idx)
 {
        unsigned long i;
        unsigned long bidx = 0;
 
        for (i = 0; i < level; i++)
-               bidx += dir_buckets(i) * bucket_blocks(i);
+               bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
        bidx += idx * bucket_blocks(level);
        return bidx;
 }
 
-static bool early_match_name(const char *name, size_t namelen,
-                       f2fs_hash_t namehash, struct f2fs_dir_entry *de)
+static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
+                               struct fscrypt_name *fname,
+                               f2fs_hash_t namehash,
+                               int *max_slots,
+                               struct page **res_page)
 {
-       if (le16_to_cpu(de->name_len) != namelen)
-               return false;
+       struct f2fs_dentry_block *dentry_blk;
+       struct f2fs_dir_entry *de;
+       struct f2fs_dentry_ptr d;
 
-       if (de->hash_code != namehash)
-               return false;
+       dentry_blk = (struct f2fs_dentry_block *)kmap(dentry_page);
 
-       return true;
+       make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+       de = find_target_dentry(fname, namehash, max_slots, &d);
+       if (de)
+               *res_page = dentry_page;
+       else
+               kunmap(dentry_page);
+
+       return de;
 }
 
-static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
-                       const char *name, size_t namelen, int *max_slots,
-                       f2fs_hash_t namehash, struct page **res_page)
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
+                       f2fs_hash_t namehash, int *max_slots,
+                       struct f2fs_dentry_ptr *d)
 {
        struct f2fs_dir_entry *de;
-       unsigned long bit_pos, end_pos, next_pos;
-       struct f2fs_dentry_block *dentry_blk = kmap(dentry_page);
-       int slots;
+       unsigned long bit_pos = 0;
+       int max_len = 0;
+       struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
+       struct fscrypt_str *name = &fname->disk_name;
+
+       if (max_slots)
+               *max_slots = 0;
+       while (bit_pos < d->max) {
+               if (!test_bit_le(bit_pos, d->bitmap)) {
+                       bit_pos++;
+                       max_len++;
+                       continue;
+               }
 
-       bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
-                                       NR_DENTRY_IN_BLOCK, 0);
-       while (bit_pos < NR_DENTRY_IN_BLOCK) {
-               de = &dentry_blk->dentry[bit_pos];
-               slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
-
-               if (early_match_name(name, namelen, namehash, de)) {
-                       if (!memcmp(dentry_blk->filename[bit_pos],
-                                                       name, namelen)) {
-                               *res_page = dentry_page;
-                               goto found;
-                       }
+               de = &d->dentry[bit_pos];
+
+               if (unlikely(!de->name_len)) {
+                       bit_pos++;
+                       continue;
                }
-               next_pos = bit_pos + slots;
-               bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
-                               NR_DENTRY_IN_BLOCK, next_pos);
-               if (bit_pos >= NR_DENTRY_IN_BLOCK)
-                       end_pos = NR_DENTRY_IN_BLOCK;
-               else
-                       end_pos = bit_pos;
-               if (*max_slots < end_pos - next_pos)
-                       *max_slots = end_pos - next_pos;
+
+               /* encrypted case */
+               de_name.name = d->filename[bit_pos];
+               de_name.len = le16_to_cpu(de->name_len);
+
+               /* show encrypted name */
+               if (fname->hash) {
+                       if (de->hash_code == cpu_to_le32(fname->hash))
+                               goto found;
+               } else if (de_name.len == name->len &&
+                       de->hash_code == namehash &&
+                       !memcmp(de_name.name, name->name, name->len))
+                       goto found;
+
+               if (max_slots && max_len > *max_slots)
+                       *max_slots = max_len;
+               max_len = 0;
+
+               bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
        }
 
        de = NULL;
-       kunmap(dentry_page);
 found:
+       if (max_slots && max_len > *max_slots)
+               *max_slots = max_len;
        return de;
 }
 
 static struct f2fs_dir_entry *find_in_level(struct inode *dir,
-               unsigned int level, const char *name, size_t namelen,
-                       f2fs_hash_t namehash, struct page **res_page)
+                                       unsigned int level,
+                                       struct fscrypt_name *fname,
+                                       struct page **res_page)
 {
-       int s = GET_DENTRY_SLOTS(namelen);
+       struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
+       int s = GET_DENTRY_SLOTS(name.len);
        unsigned int nbucket, nblock;
        unsigned int bidx, end_block;
        struct page *dentry_page;
        struct f2fs_dir_entry *de = NULL;
        bool room = false;
-       int max_slots = 0;
+       int max_slots;
+       f2fs_hash_t namehash;
 
-       BUG_ON(level > MAX_DIR_HASH_DEPTH);
+       if(fname->hash)
+               namehash = cpu_to_le32(fname->hash);
+       else
+               namehash = f2fs_dentry_hash(&name);
 
-       nbucket = dir_buckets(level);
+       nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
        nblock = bucket_blocks(level);
 
-       bidx = dir_block_index(level, le32_to_cpu(namehash) % nbucket);
+       bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
+                                       le32_to_cpu(namehash) % nbucket);
        end_block = bidx + nblock;
 
        for (; bidx < end_block; bidx++) {
                /* no need to allocate new dentry pages to all the indices */
-               dentry_page = find_data_page(dir, bidx, true);
+               dentry_page = find_data_page(dir, bidx);
                if (IS_ERR(dentry_page)) {
-                       room = true;
-                       continue;
+                       if (PTR_ERR(dentry_page) == -ENOENT) {
+                               room = true;
+                               continue;
+                       } else {
+                               *res_page = dentry_page;
+                               break;
+                       }
                }
 
-               de = find_in_block(dentry_page, name, namelen,
-                                       &max_slots, namehash, res_page);
+               de = find_in_block(dentry_page, fname, namehash, &max_slots,
+                                                               res_page);
                if (de)
                        break;
 
@@ -172,75 +216,87 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
        return de;
 }
 
-/*
- * Find an entry in the specified directory with the wanted name.
- * It returns the page where the entry was found (as a parameter - res_page),
- * and the entry itself. Page is returned mapped and unlocked.
- * Entry is guaranteed to be valid.
- */
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
-                       struct qstr *child, struct page **res_page)
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+                       struct fscrypt_name *fname, struct page **res_page)
 {
-       const char *name = child->name;
-       size_t namelen = child->len;
        unsigned long npages = dir_blocks(dir);
        struct f2fs_dir_entry *de = NULL;
-       f2fs_hash_t name_hash;
        unsigned int max_depth;
        unsigned int level;
 
-       if (namelen > F2FS_NAME_LEN)
-               return NULL;
-
-       if (npages == 0)
-               return NULL;
+       if (f2fs_has_inline_dentry(dir)) {
+               *res_page = NULL;
+               de = find_in_inline_dir(dir, fname, res_page);
+               goto out;
+       }
 
-       *res_page = NULL;
+       if (npages == 0) {
+               *res_page = NULL;
+               goto out;
+       }
 
-       name_hash = f2fs_dentry_hash(name, namelen);
        max_depth = F2FS_I(dir)->i_current_depth;
+       if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
+               f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING,
+                               "Corrupted max_depth of %lu: %u",
+                               dir->i_ino, max_depth);
+               max_depth = MAX_DIR_HASH_DEPTH;
+               f2fs_i_depth_write(dir, max_depth);
+       }
 
        for (level = 0; level < max_depth; level++) {
-               de = find_in_level(dir, level, name,
-                               namelen, name_hash, res_page);
-               if (de)
+               *res_page = NULL;
+               de = find_in_level(dir, level, fname, res_page);
+               if (de || IS_ERR(*res_page))
                        break;
        }
-       if (!de && F2FS_I(dir)->chash != name_hash) {
-               F2FS_I(dir)->chash = name_hash;
-               F2FS_I(dir)->clevel = level - 1;
-       }
+out:
        return de;
 }
 
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+/*
+ * Find an entry in the specified directory with the wanted name.
+ * It returns the page where the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+                       const struct qstr *child, struct page **res_page)
 {
-       struct page *page = NULL;
        struct f2fs_dir_entry *de = NULL;
-       struct f2fs_dentry_block *dentry_blk = NULL;
+       struct fscrypt_name fname;
+       int err;
 
-       page = get_lock_data_page(dir, 0);
-       if (IS_ERR(page))
+       err = fscrypt_setup_filename(dir, child, 1, &fname);
+       if (err) {
+               *res_page = ERR_PTR(err);
                return NULL;
+       }
+
+       de = __f2fs_find_entry(dir, &fname, res_page);
 
-       dentry_blk = kmap(page);
-       de = &dentry_blk->dentry[1];
-       *p = page;
-       unlock_page(page);
+       fscrypt_free_filename(&fname);
        return de;
 }
 
-ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+{
+       struct qstr dotdot = QSTR_INIT("..", 2);
+
+       return f2fs_find_entry(dir, &dotdot, p);
+}
+
+ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr,
+                                                       struct page **page)
 {
        ino_t res = 0;
        struct f2fs_dir_entry *de;
-       struct page *page;
 
-       de = f2fs_find_entry(dir, qstr, &page);
+       de = f2fs_find_entry(dir, qstr, page);
        if (de) {
                res = le32_to_cpu(de->ino);
-               kunmap(page);
-               f2fs_put_page(page, 0);
+               f2fs_dentry_kunmap(dir, *page);
+               f2fs_put_page(*page, 0);
        }
 
        return res;
@@ -249,185 +305,239 @@ ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
                struct page *page, struct inode *inode)
 {
+       enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
        lock_page(page);
-       wait_on_page_writeback(page);
+       f2fs_wait_on_page_writeback(page, type, true);
        de->ino = cpu_to_le32(inode->i_ino);
-       set_de_type(de, inode);
-       kunmap(page);
+       set_de_type(de, inode->i_mode);
+       f2fs_dentry_kunmap(dir, page);
        set_page_dirty(page);
-       dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-       mark_inode_dirty(dir);
-
-       /* update parent inode number before releasing dentry page */
-       F2FS_I(inode)->i_pino = dir->i_ino;
 
+       dir->i_mtime = dir->i_ctime = current_time(dir);
+       f2fs_mark_inode_dirty_sync(dir, false);
        f2fs_put_page(page, 1);
 }
 
-void init_dent_inode(const struct qstr *name, struct page *ipage)
+static void init_dent_inode(const struct qstr *name, struct page *ipage)
 {
-       struct f2fs_node *rn;
-
-       if (IS_ERR(ipage))
-               return;
+       struct f2fs_inode *ri;
 
-       wait_on_page_writeback(ipage);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        /* copy name info. to this inode page */
-       rn = (struct f2fs_node *)page_address(ipage);
-       rn->i.i_namelen = cpu_to_le32(name->len);
-       memcpy(rn->i.i_name, name->name, name->len);
+       ri = F2FS_INODE(ipage);
+       ri->i_namelen = cpu_to_le32(name->len);
+       memcpy(ri->i_name, name->name, name->len);
        set_page_dirty(ipage);
 }
 
-static int make_empty_dir(struct inode *inode, struct inode *parent)
+int update_dent_inode(struct inode *inode, struct inode *to,
+                                       const struct qstr *name)
+{
+       struct page *page;
+
+       if (file_enc_name(to))
+               return 0;
+
+       page = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
+       init_dent_inode(name, page);
+       f2fs_put_page(page, 1);
+
+       return 0;
+}
+
+void do_make_empty_dir(struct inode *inode, struct inode *parent,
+                                       struct f2fs_dentry_ptr *d)
+{
+       struct qstr dot = QSTR_INIT(".", 1);
+       struct qstr dotdot = QSTR_INIT("..", 2);
+
+       /* update dirent of "." */
+       f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
+
+       /* update dirent of ".." */
+       f2fs_update_dentry(parent->i_ino, parent->i_mode, d, &dotdot, 0, 1);
+}
+
+static int make_empty_dir(struct inode *inode,
+               struct inode *parent, struct page *page)
 {
        struct page *dentry_page;
        struct f2fs_dentry_block *dentry_blk;
-       struct f2fs_dir_entry *de;
-       void *kaddr;
+       struct f2fs_dentry_ptr d;
+
+       if (f2fs_has_inline_dentry(inode))
+               return make_empty_inline_dir(inode, parent, page);
 
-       dentry_page = get_new_data_page(inode, 0, true);
+       dentry_page = get_new_data_page(inode, page, 0, true);
        if (IS_ERR(dentry_page))
                return PTR_ERR(dentry_page);
 
-       kaddr = kmap_atomic(dentry_page);
-       dentry_blk = (struct f2fs_dentry_block *)kaddr;
+       dentry_blk = kmap_atomic(dentry_page);
 
-       de = &dentry_blk->dentry[0];
-       de->name_len = cpu_to_le16(1);
-       de->hash_code = 0;
-       de->ino = cpu_to_le32(inode->i_ino);
-       memcpy(dentry_blk->filename[0], ".", 1);
-       set_de_type(de, inode);
-
-       de = &dentry_blk->dentry[1];
-       de->hash_code = 0;
-       de->name_len = cpu_to_le16(2);
-       de->ino = cpu_to_le32(parent->i_ino);
-       memcpy(dentry_blk->filename[1], "..", 2);
-       set_de_type(de, inode);
+       make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+       do_make_empty_dir(inode, parent, &d);
 
-       test_and_set_bit_le(0, &dentry_blk->dentry_bitmap);
-       test_and_set_bit_le(1, &dentry_blk->dentry_bitmap);
-       kunmap_atomic(kaddr);
+       kunmap_atomic(dentry_blk);
 
        set_page_dirty(dentry_page);
        f2fs_put_page(dentry_page, 1);
        return 0;
 }
 
-static int init_inode_metadata(struct inode *inode,
-               struct inode *dir, const struct qstr *name)
+struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
+                       const struct qstr *new_name, const struct qstr *orig_name,
+                       struct page *dpage)
 {
-       if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
-               int err;
-               err = new_inode_page(inode, name);
-               if (err)
-                       return err;
+       struct page *page;
+       int err;
+
+       if (is_inode_flag_set(inode, FI_NEW_INODE)) {
+               page = new_inode_page(inode);
+               if (IS_ERR(page))
+                       return page;
 
                if (S_ISDIR(inode->i_mode)) {
-                       err = make_empty_dir(inode, dir);
+                       /* in order to handle error case */
+                       get_page(page);
+                       err = make_empty_dir(inode, dir, page);
                        if (err) {
-                               remove_inode_page(inode);
-                               return err;
+                               lock_page(page);
+                               goto put_error;
                        }
+                       put_page(page);
                }
 
-               err = f2fs_init_acl(inode, dir);
-               if (err) {
-                       remove_inode_page(inode);
-                       return err;
+               err = f2fs_init_acl(inode, dir, page, dpage);
+               if (err)
+                       goto put_error;
+
+               err = f2fs_init_security(inode, dir, orig_name, page);
+               if (err)
+                       goto put_error;
+
+               if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
+                       err = fscrypt_inherit_context(dir, inode, page, false);
+                       if (err)
+                               goto put_error;
                }
        } else {
-               struct page *ipage;
-               ipage = get_node_page(F2FS_SB(dir->i_sb), inode->i_ino);
-               if (IS_ERR(ipage))
-                       return PTR_ERR(ipage);
-               set_cold_node(inode, ipage);
-               init_dent_inode(name, ipage);
-               f2fs_put_page(ipage, 1);
+               page = get_node_page(F2FS_I_SB(dir), inode->i_ino);
+               if (IS_ERR(page))
+                       return page;
+
+               set_cold_node(inode, page);
        }
-       if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
-               inc_nlink(inode);
-               update_inode_page(inode);
+
+       if (new_name)
+               init_dent_inode(new_name, page);
+
+       /*
+        * This file should be checkpointed during fsync.
+        * We lost i_pino from now on.
+        */
+       if (is_inode_flag_set(inode, FI_INC_LINK)) {
+               file_lost_pino(inode);
+               /*
+                * If link the tmpfile to alias through linkat path,
+                * we should remove this inode from orphan list.
+                */
+               if (inode->i_nlink == 0)
+                       remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
+               f2fs_i_links_write(inode, true);
        }
-       return 0;
+       return page;
+
+put_error:
+       clear_nlink(inode);
+       update_inode(inode, page);
+       f2fs_put_page(page, 1);
+       return ERR_PTR(err);
 }
 
-static void update_parent_metadata(struct inode *dir, struct inode *inode,
+void update_parent_metadata(struct inode *dir, struct inode *inode,
                                                unsigned int current_depth)
 {
-       bool need_dir_update = false;
-
-       if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
-               if (S_ISDIR(inode->i_mode)) {
-                       inc_nlink(dir);
-                       need_dir_update = true;
-               }
-               clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
-       }
-       dir->i_mtime = dir->i_ctime = CURRENT_TIME;
-       if (F2FS_I(dir)->i_current_depth != current_depth) {
-               F2FS_I(dir)->i_current_depth = current_depth;
-               need_dir_update = true;
+       if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
+               if (S_ISDIR(inode->i_mode))
+                       f2fs_i_links_write(dir, true);
+               clear_inode_flag(inode, FI_NEW_INODE);
        }
+       dir->i_mtime = dir->i_ctime = current_time(dir);
+       f2fs_mark_inode_dirty_sync(dir, false);
 
-       if (need_dir_update)
-               update_inode_page(dir);
-       else
-               mark_inode_dirty(dir);
+       if (F2FS_I(dir)->i_current_depth != current_depth)
+               f2fs_i_depth_write(dir, current_depth);
 
-       if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
-               clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+       if (inode && is_inode_flag_set(inode, FI_INC_LINK))
+               clear_inode_flag(inode, FI_INC_LINK);
 }
 
-static int room_for_filename(struct f2fs_dentry_block *dentry_blk, int slots)
+int room_for_filename(const void *bitmap, int slots, int max_slots)
 {
        int bit_start = 0;
        int zero_start, zero_end;
 next:
-       zero_start = find_next_zero_bit_le(&dentry_blk->dentry_bitmap,
-                                               NR_DENTRY_IN_BLOCK,
-                                               bit_start);
-       if (zero_start >= NR_DENTRY_IN_BLOCK)
-               return NR_DENTRY_IN_BLOCK;
+       zero_start = find_next_zero_bit_le(bitmap, max_slots, bit_start);
+       if (zero_start >= max_slots)
+               return max_slots;
 
-       zero_end = find_next_bit_le(&dentry_blk->dentry_bitmap,
-                                               NR_DENTRY_IN_BLOCK,
-                                               zero_start);
+       zero_end = find_next_bit_le(bitmap, max_slots, zero_start);
        if (zero_end - zero_start >= slots)
                return zero_start;
 
        bit_start = zero_end + 1;
 
-       if (zero_end + 1 >= NR_DENTRY_IN_BLOCK)
-               return NR_DENTRY_IN_BLOCK;
+       if (zero_end + 1 >= max_slots)
+               return max_slots;
        goto next;
 }
 
-/*
- * Caller should grab and release a mutex by calling mutex_lock_op() and
- * mutex_unlock_op().
- */
-int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *inode)
+void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
+                               const struct qstr *name, f2fs_hash_t name_hash,
+                               unsigned int bit_pos)
+{
+       struct f2fs_dir_entry *de;
+       int slots = GET_DENTRY_SLOTS(name->len);
+       int i;
+
+       de = &d->dentry[bit_pos];
+       de->hash_code = name_hash;
+       de->name_len = cpu_to_le16(name->len);
+       memcpy(d->filename[bit_pos], name->name, name->len);
+       de->ino = cpu_to_le32(ino);
+       set_de_type(de, mode);
+       for (i = 0; i < slots; i++) {
+               __set_bit_le(bit_pos + i, (void *)d->bitmap);
+               /* avoid wrong garbage data for readdir */
+               if (i)
+                       (de + i)->name_len = 0;
+       }
+}
+
+int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+                               const struct qstr *orig_name,
+                               struct inode *inode, nid_t ino, umode_t mode)
 {
        unsigned int bit_pos;
        unsigned int level;
        unsigned int current_depth;
        unsigned long bidx, block;
        f2fs_hash_t dentry_hash;
-       struct f2fs_dir_entry *de;
        unsigned int nbucket, nblock;
-       size_t namelen = name->len;
        struct page *dentry_page = NULL;
        struct f2fs_dentry_block *dentry_blk = NULL;
-       int slots = GET_DENTRY_SLOTS(namelen);
-       int err = 0;
-       int i;
+       struct f2fs_dentry_ptr d;
+       struct page *page = NULL;
+       int slots, err = 0;
 
-       dentry_hash = f2fs_dentry_hash(name->name, name->len);
        level = 0;
+       slots = GET_DENTRY_SLOTS(new_name->len);
+       dentry_hash = f2fs_dentry_hash(new_name);
+
        current_depth = F2FS_I(dir)->i_current_depth;
        if (F2FS_I(dir)->chash == dentry_hash) {
                level = F2FS_I(dir)->clevel;
@@ -435,25 +545,31 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
        }
 
 start:
-       if (current_depth == MAX_DIR_HASH_DEPTH)
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
+               return -ENOSPC;
+#endif
+       if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
                return -ENOSPC;
 
        /* Increase the depth, if required */
        if (level == current_depth)
                ++current_depth;
 
-       nbucket = dir_buckets(level);
+       nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
        nblock = bucket_blocks(level);
 
-       bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));
+       bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
+                               (le32_to_cpu(dentry_hash) % nbucket));
 
        for (block = bidx; block <= (bidx + nblock - 1); block++) {
-               dentry_page = get_new_data_page(dir, block, true);
+               dentry_page = get_new_data_page(dir, NULL, block, true);
                if (IS_ERR(dentry_page))
                        return PTR_ERR(dentry_page);
 
                dentry_blk = kmap(dentry_page);
-               bit_pos = room_for_filename(dentry_blk, slots);
+               bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
+                                               slots, NR_DENTRY_IN_BLOCK);
                if (bit_pos < NR_DENTRY_IN_BLOCK)
                        goto add_dentry;
 
@@ -465,55 +581,148 @@ start:
        ++level;
        goto start;
 add_dentry:
-       err = init_inode_metadata(inode, dir, name);
-       if (err)
-               goto fail;
+       f2fs_wait_on_page_writeback(dentry_page, DATA, true);
+
+       if (inode) {
+               down_write(&F2FS_I(inode)->i_sem);
+               page = init_inode_metadata(inode, dir, new_name,
+                                               orig_name, NULL);
+               if (IS_ERR(page)) {
+                       err = PTR_ERR(page);
+                       goto fail;
+               }
+               if (f2fs_encrypted_inode(dir))
+                       file_set_enc_name(inode);
+       }
 
-       wait_on_page_writeback(dentry_page);
+       make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
+       f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
 
-       de = &dentry_blk->dentry[bit_pos];
-       de->hash_code = dentry_hash;
-       de->name_len = cpu_to_le16(namelen);
-       memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
-       de->ino = cpu_to_le32(inode->i_ino);
-       set_de_type(de, inode);
-       for (i = 0; i < slots; i++)
-               test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
        set_page_dirty(dentry_page);
 
-       update_parent_metadata(dir, inode, current_depth);
+       if (inode) {
+               f2fs_i_pino_write(inode, dir->i_ino);
+               f2fs_put_page(page, 1);
+       }
 
-       /* update parent inode number before releasing dentry page */
-       F2FS_I(inode)->i_pino = dir->i_ino;
+       update_parent_metadata(dir, inode, current_depth);
 fail:
+       if (inode)
+               up_write(&F2FS_I(inode)->i_sem);
+
        kunmap(dentry_page);
        f2fs_put_page(dentry_page, 1);
+
+       return err;
+}
+
+int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+                               struct inode *inode, nid_t ino, umode_t mode)
+{
+       struct qstr new_name;
+       int err = -EAGAIN;
+
+       new_name.name = fname_name(fname);
+       new_name.len = fname_len(fname);
+
+       if (f2fs_has_inline_dentry(dir))
+               err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
+                                                       inode, ino, mode);
+       if (err == -EAGAIN)
+               err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
+                                                       inode, ino, mode);
+
+       f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+       return err;
+}
+
+/*
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
+ */
+int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+                               struct inode *inode, nid_t ino, umode_t mode)
+{
+       struct fscrypt_name fname;
+       int err;
+
+       err = fscrypt_setup_filename(dir, name, 0, &fname);
+       if (err)
+               return err;
+
+       err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
+
+       fscrypt_free_filename(&fname);
        return err;
 }
 
+int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
+{
+       struct page *page;
+       int err = 0;
+
+       down_write(&F2FS_I(inode)->i_sem);
+       page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
+       if (IS_ERR(page)) {
+               err = PTR_ERR(page);
+               goto fail;
+       }
+       f2fs_put_page(page, 1);
+
+       clear_inode_flag(inode, FI_NEW_INODE);
+fail:
+       up_write(&F2FS_I(inode)->i_sem);
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+       return err;
+}
+
+void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+
+       down_write(&F2FS_I(inode)->i_sem);
+
+       if (S_ISDIR(inode->i_mode))
+               f2fs_i_links_write(dir, false);
+       inode->i_ctime = current_time(inode);
+
+       f2fs_i_links_write(inode, false);
+       if (S_ISDIR(inode->i_mode)) {
+               f2fs_i_links_write(inode, false);
+               f2fs_i_size_write(inode, 0);
+       }
+       up_write(&F2FS_I(inode)->i_sem);
+
+       if (inode->i_nlink == 0)
+               add_orphan_inode(inode);
+       else
+               release_orphan_inode(sbi);
+}
+
 /*
- * It only removes the dentry from the dentry page,corresponding name
+ * It only removes the dentry from the dentry page, corresponding name
  * entry in name page does not need to be touched during deletion.
  */
 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
-                                               struct inode *inode)
+                                       struct inode *dir, struct inode *inode)
 {
        struct  f2fs_dentry_block *dentry_blk;
        unsigned int bit_pos;
-       struct address_space *mapping = page->mapping;
-       struct inode *dir = mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
        int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
-       void *kaddr = page_address(page);
        int i;
 
+       f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+
+       if (f2fs_has_inline_dentry(dir))
+               return f2fs_delete_inline_entry(dentry, page, dir, inode);
+
        lock_page(page);
-       wait_on_page_writeback(page);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
-       dentry_blk = (struct f2fs_dentry_block *)kaddr;
-       bit_pos = dentry - (struct f2fs_dir_entry *)dentry_blk->dentry;
+       dentry_blk = page_address(page);
+       bit_pos = dentry - dentry_blk->dentry;
        for (i = 0; i < slots; i++)
-               test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
+               clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
 
        /* Let's check and deallocate this dentry page */
        bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
@@ -522,34 +731,19 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
        kunmap(page); /* kunmap - pair of f2fs_find_entry */
        set_page_dirty(page);
 
-       dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+       dir->i_ctime = dir->i_mtime = current_time(dir);
+       f2fs_mark_inode_dirty_sync(dir, false);
 
-       if (inode && S_ISDIR(inode->i_mode)) {
-               drop_nlink(dir);
-               update_inode_page(dir);
-       } else {
-               mark_inode_dirty(dir);
-       }
-
-       if (inode) {
-               inode->i_ctime = CURRENT_TIME;
-               drop_nlink(inode);
-               if (S_ISDIR(inode->i_mode)) {
-                       drop_nlink(inode);
-                       i_size_write(inode, 0);
-               }
-               update_inode_page(inode);
-
-               if (inode->i_nlink == 0)
-                       add_orphan_inode(sbi, inode->i_ino);
-       }
+       if (inode)
+               f2fs_drop_nlink(dir, inode);
 
-       if (bit_pos == NR_DENTRY_IN_BLOCK) {
-               truncate_hole(dir, page->index, page->index + 1);
+       if (bit_pos == NR_DENTRY_IN_BLOCK &&
+                       !truncate_hole(dir, page->index, page->index + 1)) {
                clear_page_dirty_for_io(page);
+               ClearPagePrivate(page);
                ClearPageUptodate(page);
-               dec_page_count(sbi, F2FS_DIRTY_DENTS);
-               inode_dec_dirty_dents(dir);
+               inode_dec_dirty_pages(dir);
+               remove_dirty_inode(dir);
        }
        f2fs_put_page(page, 1);
 }
@@ -559,12 +753,14 @@ bool f2fs_empty_dir(struct inode *dir)
        unsigned long bidx;
        struct page *dentry_page;
        unsigned int bit_pos;
-       struct  f2fs_dentry_block *dentry_blk;
+       struct f2fs_dentry_block *dentry_blk;
        unsigned long nblock = dir_blocks(dir);
 
+       if (f2fs_has_inline_dentry(dir))
+               return f2fs_empty_inline_dir(dir);
+
        for (bidx = 0; bidx < nblock; bidx++) {
-               void *kaddr;
-               dentry_page = get_lock_data_page(dir, bidx);
+               dentry_page = get_lock_data_page(dir, bidx, false);
                if (IS_ERR(dentry_page)) {
                        if (PTR_ERR(dentry_page) == -ENOENT)
                                continue;
@@ -572,8 +768,7 @@ bool f2fs_empty_dir(struct inode *dir)
                                return false;
                }
 
-               kaddr = kmap_atomic(dentry_page);
-               dentry_blk = (struct f2fs_dentry_block *)kaddr;
+               dentry_blk = kmap_atomic(dentry_page);
                if (bidx == 0)
                        bit_pos = 2;
                else
@@ -581,7 +776,7 @@ bool f2fs_empty_dir(struct inode *dir)
                bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
                                                NR_DENTRY_IN_BLOCK,
                                                bit_pos);
-               kunmap_atomic(kaddr);
+               kunmap_atomic(dentry_blk);
 
                f2fs_put_page(dentry_page, 1);
 
@@ -591,68 +786,136 @@ bool f2fs_empty_dir(struct inode *dir)
        return true;
 }
 
+int f2fs_fill_dentries(struct file *file, void *dirent, filldir_t filldir,
+               struct f2fs_dentry_ptr *d, unsigned int n, unsigned int bit_pos,
+               struct fscrypt_str *fstr)
+{
+       unsigned int start_bit_pos = bit_pos;
+       unsigned char d_type;
+       struct f2fs_dir_entry *de = NULL;
+       struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
+       int over;
+
+       while (bit_pos < d->max) {
+               d_type = DT_UNKNOWN;
+               bit_pos = find_next_bit_le(d->bitmap, d->max, bit_pos);
+               if (bit_pos >= d->max)
+                       break;
+
+               de = &d->dentry[bit_pos];
+
+               if (de->name_len == 0) {
+                       bit_pos++;
+                       continue;
+               }
+
+               d_type = get_de_type(de);
+
+               de_name.name = d->filename[bit_pos];
+               de_name.len = le16_to_cpu(de->name_len);
+
+               if (f2fs_encrypted_inode(d->inode)) {
+                       int save_len = fstr->len;
+                       int err;
+
+                       err = fscrypt_fname_disk_to_usr(d->inode,
+                                               (u32)de->hash_code, 0,
+                                               &de_name, fstr);
+                       if (err)
+                               return err;
+
+                       de_name = *fstr;
+                       fstr->len = save_len;
+               }
+
+               over = filldir(dirent, de_name.name, de_name.len,
+                                       (n * d->max) + bit_pos,
+                                       le32_to_cpu(de->ino), d_type);
+               if (over) {
+                       file->f_pos += bit_pos - start_bit_pos;
+                       return 1;
+               }
+
+               bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+       }
+       return 0;
+}
+
 static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
 {
        unsigned long pos = file->f_pos;
+       unsigned int bit_pos = 0;
        struct inode *inode = file_inode(file);
        unsigned long npages = dir_blocks(inode);
-       unsigned char *types = NULL;
-       unsigned int bit_pos = 0, start_bit_pos = 0;
-       int over = 0;
        struct f2fs_dentry_block *dentry_blk = NULL;
-       struct f2fs_dir_entry *de = NULL;
        struct page *dentry_page = NULL;
+       struct file_ra_state *ra = &file->f_ra;
+       struct f2fs_dentry_ptr d;
+       struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
        unsigned int n = 0;
-       unsigned char d_type = DT_UNKNOWN;
-       int slots;
+       int err = 0;
+
+       if (f2fs_encrypted_inode(inode)) {
+               err = fscrypt_get_encryption_info(inode);
+               if (err && err != -ENOKEY)
+                       return err;
+
+               err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
+               if (err < 0)
+                       return err;
+       }
+
+       if (f2fs_has_inline_dentry(inode)) {
+               err = f2fs_read_inline_dir(file, dirent, filldir, &fstr);
+               goto out;
+       }
 
-       types = f2fs_filetype_table;
        bit_pos = (pos % NR_DENTRY_IN_BLOCK);
        n = (pos / NR_DENTRY_IN_BLOCK);
 
-       for ( ; n < npages; n++) {
-               dentry_page = get_lock_data_page(inode, n);
-               if (IS_ERR(dentry_page))
-                       continue;
+       /* readahead for multi pages of dir */
+       if (npages - n > 1 && !ra_has_index(ra, n))
+               page_cache_sync_readahead(inode->i_mapping, ra, file, n,
+                               min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES));
+
+       for (; n < npages; n++) {
+               dentry_page = get_lock_data_page(inode, n, false);
+               if (IS_ERR(dentry_page)) {
+                       err = PTR_ERR(dentry_page);
+                       if (err == -ENOENT) {
+                               err = 0;
+                               continue;
+                       } else {
+                               goto out;
+                       }
+               }
 
-               start_bit_pos = bit_pos;
                dentry_blk = kmap(dentry_page);
-               while (bit_pos < NR_DENTRY_IN_BLOCK) {
-                       d_type = DT_UNKNOWN;
-                       bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
-                                                       NR_DENTRY_IN_BLOCK,
-                                                       bit_pos);
-                       if (bit_pos >= NR_DENTRY_IN_BLOCK)
-                               break;
 
-                       de = &dentry_blk->dentry[bit_pos];
-                       if (types && de->file_type < F2FS_FT_MAX)
-                               d_type = types[de->file_type];
+               make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
 
-                       over = filldir(dirent,
-                                       dentry_blk->filename[bit_pos],
-                                       le16_to_cpu(de->name_len),
-                                       (n * NR_DENTRY_IN_BLOCK) + bit_pos,
-                                       le32_to_cpu(de->ino), d_type);
-                       if (over) {
-                               file->f_pos += bit_pos - start_bit_pos;
-                               goto success;
-                       }
-                       slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
-                       bit_pos += slots;
+               err = f2fs_fill_dentries(file, dirent, filldir, &d, n,
+                                                       bit_pos, &fstr);
+               if (err) {
+                       kunmap(dentry_page);
+                       f2fs_put_page(dentry_page, 1);
+                       break;
                }
+
                bit_pos = 0;
                file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK;
                kunmap(dentry_page);
                f2fs_put_page(dentry_page, 1);
-               dentry_page = NULL;
-       }
-success:
-       if (dentry_page && !IS_ERR(dentry_page)) {
-               kunmap(dentry_page);
-               f2fs_put_page(dentry_page, 1);
        }
+out:
+       fscrypt_fname_free_buffer(&fstr);
+       return err < 0 ? err : 0;
+}
 
+static int f2fs_dir_open(struct inode *inode, struct file *filp)
+{
+       if (f2fs_encrypted_inode(inode))
+               return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
        return 0;
 }
 
@@ -661,5 +924,9 @@ const struct file_operations f2fs_dir_operations = {
        .read           = generic_read_dir,
        .readdir        = f2fs_readdir,
        .fsync          = f2fs_sync_file,
+       .open           = f2fs_dir_open,
        .unlocked_ioctl = f2fs_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = f2fs_compat_ioctl,
+#endif
 };
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
new file mode 100644 (file)
index 0000000..4db44da
--- /dev/null
@@ -0,0 +1,749 @@
+/*
+ * f2fs extent cache support
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Samsung Electronics
+ * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
+ *          Chao Yu <chao2.yu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include <trace/events/f2fs.h>
+
+static struct kmem_cache *extent_tree_slab;
+static struct kmem_cache *extent_node_slab;
+
+static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_info *ei,
+                               struct rb_node *parent, struct rb_node **p)
+{
+       struct extent_node *en;
+
+       en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+       if (!en)
+               return NULL;
+
+       en->ei = *ei;
+       INIT_LIST_HEAD(&en->list);
+       en->et = et;
+
+       rb_link_node(&en->rb_node, parent, p);
+       rb_insert_color(&en->rb_node, &et->root);
+       atomic_inc(&et->node_cnt);
+       atomic_inc(&sbi->total_ext_node);
+       return en;
+}
+
+static void __detach_extent_node(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_node *en)
+{
+       rb_erase(&en->rb_node, &et->root);
+       atomic_dec(&et->node_cnt);
+       atomic_dec(&sbi->total_ext_node);
+
+       if (et->cached_en == en)
+               et->cached_en = NULL;
+       kmem_cache_free(extent_node_slab, en);
+}
+
+/*
+ * Flow to release an extent_node:
+ * 1. list_del_init
+ * 2. __detach_extent_node
+ * 3. kmem_cache_free.
+ */
+static void __release_extent_node(struct f2fs_sb_info *sbi,
+                       struct extent_tree *et, struct extent_node *en)
+{
+       spin_lock(&sbi->extent_lock);
+       f2fs_bug_on(sbi, list_empty(&en->list));
+       list_del_init(&en->list);
+       spin_unlock(&sbi->extent_lock);
+
+       __detach_extent_node(sbi, et, en);
+}
+
+static struct extent_tree *__grab_extent_tree(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       nid_t ino = inode->i_ino;
+
+       down_write(&sbi->extent_tree_lock);
+       et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+       if (!et) {
+               et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+               f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+               memset(et, 0, sizeof(struct extent_tree));
+               et->ino = ino;
+               et->root = RB_ROOT;
+               et->cached_en = NULL;
+               rwlock_init(&et->lock);
+               INIT_LIST_HEAD(&et->list);
+               atomic_set(&et->node_cnt, 0);
+               atomic_inc(&sbi->total_ext_tree);
+       } else {
+               atomic_dec(&sbi->total_zombie_tree);
+               list_del_init(&et->list);
+       }
+       up_write(&sbi->extent_tree_lock);
+
+       /* never died until evict_inode */
+       F2FS_I(inode)->extent_tree = et;
+
+       return et;
+}
+
+static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, unsigned int fofs)
+{
+       struct rb_node *node = et->root.rb_node;
+       struct extent_node *en = et->cached_en;
+
+       if (en) {
+               struct extent_info *cei = &en->ei;
+
+               if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
+                       stat_inc_cached_node_hit(sbi);
+                       return en;
+               }
+       }
+
+       while (node) {
+               en = rb_entry(node, struct extent_node, rb_node);
+
+               if (fofs < en->ei.fofs) {
+                       node = node->rb_left;
+               } else if (fofs >= en->ei.fofs + en->ei.len) {
+                       node = node->rb_right;
+               } else {
+                       stat_inc_rbtree_node_hit(sbi);
+                       return en;
+               }
+       }
+       return NULL;
+}
+
+static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_info *ei)
+{
+       struct rb_node **p = &et->root.rb_node;
+       struct extent_node *en;
+
+       en = __attach_extent_node(sbi, et, ei, NULL, p);
+       if (!en)
+               return NULL;
+
+       et->largest = en->ei;
+       et->cached_en = en;
+       return en;
+}
+
+static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+                                       struct extent_tree *et)
+{
+       struct rb_node *node, *next;
+       struct extent_node *en;
+       unsigned int count = atomic_read(&et->node_cnt);
+
+       node = rb_first(&et->root);
+       while (node) {
+               next = rb_next(node);
+               en = rb_entry(node, struct extent_node, rb_node);
+               __release_extent_node(sbi, et, en);
+               node = next;
+       }
+
+       return count - atomic_read(&et->node_cnt);
+}
+
+static void __drop_largest_extent(struct inode *inode,
+                                       pgoff_t fofs, unsigned int len)
+{
+       struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
+
+       if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
+               largest->len = 0;
+               f2fs_mark_inode_dirty_sync(inode, true);
+       }
+}
+
+/* return true, if inode page is changed */
+bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       struct extent_node *en;
+       struct extent_info ei;
+
+       if (!f2fs_may_extent_tree(inode)) {
+               /* drop largest extent */
+               if (i_ext && i_ext->len) {
+                       i_ext->len = 0;
+                       return true;
+               }
+               return false;
+       }
+
+       et = __grab_extent_tree(inode);
+
+       if (!i_ext || !i_ext->len)
+               return false;
+
+       get_extent_info(&ei, i_ext);
+
+       write_lock(&et->lock);
+       if (atomic_read(&et->node_cnt))
+               goto out;
+
+       en = __init_extent_tree(sbi, et, &ei);
+       if (en) {
+               spin_lock(&sbi->extent_lock);
+               list_add_tail(&en->list, &sbi->extent_list);
+               spin_unlock(&sbi->extent_lock);
+       }
+out:
+       write_unlock(&et->lock);
+       return false;
+}
+
+static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+                                                       struct extent_info *ei)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et = F2FS_I(inode)->extent_tree;
+       struct extent_node *en;
+       bool ret = false;
+
+       f2fs_bug_on(sbi, !et);
+
+       trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+
+       read_lock(&et->lock);
+
+       if (et->largest.fofs <= pgofs &&
+                       et->largest.fofs + et->largest.len > pgofs) {
+               *ei = et->largest;
+               ret = true;
+               stat_inc_largest_node_hit(sbi);
+               goto out;
+       }
+
+       en = __lookup_extent_tree(sbi, et, pgofs);
+       if (en) {
+               *ei = en->ei;
+               spin_lock(&sbi->extent_lock);
+               if (!list_empty(&en->list)) {
+                       list_move_tail(&en->list, &sbi->extent_list);
+                       et->cached_en = en;
+               }
+               spin_unlock(&sbi->extent_lock);
+               ret = true;
+       }
+out:
+       stat_inc_total_hit(sbi);
+       read_unlock(&et->lock);
+
+       trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
+       return ret;
+}
+
+
+/*
+ * lookup extent at @fofs, if hit, return the extent
+ * if not, return NULL and
+ * @prev_ex: extent before fofs
+ * @next_ex: extent after fofs
+ * @insert_p: insert point for new extent at fofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
+                               unsigned int fofs,
+                               struct extent_node **prev_ex,
+                               struct extent_node **next_ex,
+                               struct rb_node ***insert_p,
+                               struct rb_node **insert_parent)
+{
+       struct rb_node **pnode = &et->root.rb_node;
+       struct rb_node *parent = NULL, *tmp_node;
+       struct extent_node *en = et->cached_en;
+
+       *insert_p = NULL;
+       *insert_parent = NULL;
+       *prev_ex = NULL;
+       *next_ex = NULL;
+
+       if (RB_EMPTY_ROOT(&et->root))
+               return NULL;
+
+       if (en) {
+               struct extent_info *cei = &en->ei;
+
+               if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
+                       goto lookup_neighbors;
+       }
+
+       while (*pnode) {
+               parent = *pnode;
+               en = rb_entry(*pnode, struct extent_node, rb_node);
+
+               if (fofs < en->ei.fofs)
+                       pnode = &(*pnode)->rb_left;
+               else if (fofs >= en->ei.fofs + en->ei.len)
+                       pnode = &(*pnode)->rb_right;
+               else
+                       goto lookup_neighbors;
+       }
+
+       *insert_p = pnode;
+       *insert_parent = parent;
+
+       en = rb_entry(parent, struct extent_node, rb_node);
+       tmp_node = parent;
+       if (parent && fofs > en->ei.fofs)
+               tmp_node = rb_next(parent);
+       *next_ex = tmp_node ?
+               rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+
+       tmp_node = parent;
+       if (parent && fofs < en->ei.fofs)
+               tmp_node = rb_prev(parent);
+       *prev_ex = tmp_node ?
+               rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+       return NULL;
+
+lookup_neighbors:
+       if (fofs == en->ei.fofs) {
+               /* lookup prev node for merging backward later */
+               tmp_node = rb_prev(&en->rb_node);
+               *prev_ex = tmp_node ?
+                       rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+       }
+       if (fofs == en->ei.fofs + en->ei.len - 1) {
+               /* lookup next node for merging frontward later */
+               tmp_node = rb_next(&en->rb_node);
+               *next_ex = tmp_node ?
+                       rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+       }
+       return en;
+}
+
+static struct extent_node *__try_merge_extent_node(struct inode *inode,
+                               struct extent_tree *et, struct extent_info *ei,
+                               struct extent_node *prev_ex,
+                               struct extent_node *next_ex)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_node *en = NULL;
+
+       if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+               prev_ex->ei.len += ei->len;
+               ei = &prev_ex->ei;
+               en = prev_ex;
+       }
+
+       if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+               if (en)
+                       __release_extent_node(sbi, et, prev_ex);
+               next_ex->ei.fofs = ei->fofs;
+               next_ex->ei.blk = ei->blk;
+               next_ex->ei.len += ei->len;
+               en = next_ex;
+       }
+
+       if (!en)
+               return NULL;
+
+       __try_update_largest_extent(inode, et, en);
+
+       spin_lock(&sbi->extent_lock);
+       if (!list_empty(&en->list)) {
+               list_move_tail(&en->list, &sbi->extent_list);
+               et->cached_en = en;
+       }
+       spin_unlock(&sbi->extent_lock);
+       return en;
+}
+
+static struct extent_node *__insert_extent_tree(struct inode *inode,
+                               struct extent_tree *et, struct extent_info *ei,
+                               struct rb_node **insert_p,
+                               struct rb_node *insert_parent)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct rb_node **p = &et->root.rb_node;
+       struct rb_node *parent = NULL;
+       struct extent_node *en = NULL;
+
+       if (insert_p && insert_parent) {
+               parent = insert_parent;
+               p = insert_p;
+               goto do_insert;
+       }
+
+       while (*p) {
+               parent = *p;
+               en = rb_entry(parent, struct extent_node, rb_node);
+
+               if (ei->fofs < en->ei.fofs)
+                       p = &(*p)->rb_left;
+               else if (ei->fofs >= en->ei.fofs + en->ei.len)
+                       p = &(*p)->rb_right;
+               else
+                       f2fs_bug_on(sbi, 1);
+       }
+do_insert:
+       en = __attach_extent_node(sbi, et, ei, parent, p);
+       if (!en)
+               return NULL;
+
+       __try_update_largest_extent(inode, et, en);
+
+       /* update in global extent list */
+       spin_lock(&sbi->extent_lock);
+       list_add_tail(&en->list, &sbi->extent_list);
+       et->cached_en = en;
+       spin_unlock(&sbi->extent_lock);
+       return en;
+}
+
+static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
+                               pgoff_t fofs, block_t blkaddr, unsigned int len)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et = F2FS_I(inode)->extent_tree;
+       struct extent_node *en = NULL, *en1 = NULL;
+       struct extent_node *prev_en = NULL, *next_en = NULL;
+       struct extent_info ei, dei, prev;
+       struct rb_node **insert_p = NULL, *insert_parent = NULL;
+       unsigned int end = fofs + len;
+       unsigned int pos = (unsigned int)fofs;
+
+       if (!et)
+               return false;
+
+       trace_f2fs_update_extent_tree_range(inode, fofs, blkaddr, len);
+
+       write_lock(&et->lock);
+
+       if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
+               write_unlock(&et->lock);
+               return false;
+       }
+
+       prev = et->largest;
+       dei.len = 0;
+
+       /*
+        * drop largest extent before lookup, in case it's already
+        * been shrunk from extent tree
+        */
+       __drop_largest_extent(inode, fofs, len);
+
+       /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
+       en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
+                                       &insert_p, &insert_parent);
+       if (!en)
+               en = next_en;
+
+       /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
+       while (en && en->ei.fofs < end) {
+               unsigned int org_end;
+               int parts = 0;  /* # of parts current extent split into */
+
+               next_en = en1 = NULL;
+
+               dei = en->ei;
+               org_end = dei.fofs + dei.len;
+               f2fs_bug_on(sbi, pos >= org_end);
+
+               if (pos > dei.fofs &&   pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+                       en->ei.len = pos - en->ei.fofs;
+                       prev_en = en;
+                       parts = 1;
+               }
+
+               if (end < org_end && org_end - end >= F2FS_MIN_EXTENT_LEN) {
+                       if (parts) {
+                               set_extent_info(&ei, end,
+                                               end - dei.fofs + dei.blk,
+                                               org_end - end);
+                               en1 = __insert_extent_tree(inode, et, &ei,
+                                                       NULL, NULL);
+                               next_en = en1;
+                       } else {
+                               en->ei.fofs = end;
+                               en->ei.blk += end - dei.fofs;
+                               en->ei.len -= end - dei.fofs;
+                               next_en = en;
+                       }
+                       parts++;
+               }
+
+               if (!next_en) {
+                       struct rb_node *node = rb_next(&en->rb_node);
+
+                       next_en = node ?
+                               rb_entry(node, struct extent_node, rb_node)
+                               : NULL;
+               }
+
+               if (parts)
+                       __try_update_largest_extent(inode, et, en);
+               else
+                       __release_extent_node(sbi, et, en);
+
+               /*
+                * if original extent is split into zero or two parts, extent
+                * tree has been altered by deletion or insertion, therefore
+                * invalidate pointers regard to tree.
+                */
+               if (parts != 1) {
+                       insert_p = NULL;
+                       insert_parent = NULL;
+               }
+               en = next_en;
+       }
+
+       /* 3. update extent in extent cache */
+       if (blkaddr) {
+
+               set_extent_info(&ei, fofs, blkaddr, len);
+               if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
+                       __insert_extent_tree(inode, et, &ei,
+                                               insert_p, insert_parent);
+
+               /* give up extent_cache, if split and small updates happen */
+               if (dei.len >= 1 &&
+                               prev.len < F2FS_MIN_EXTENT_LEN &&
+                               et->largest.len < F2FS_MIN_EXTENT_LEN) {
+                       __drop_largest_extent(inode, 0, UINT_MAX);
+                       set_inode_flag(inode, FI_NO_EXTENT);
+               }
+       }
+
+       if (is_inode_flag_set(inode, FI_NO_EXTENT))
+               __free_extent_tree(sbi, et);
+
+       write_unlock(&et->lock);
+
+       return !__is_extent_same(&prev, &et->largest);
+}
+
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+       struct extent_tree *et, *next;
+       struct extent_node *en;
+       unsigned int node_cnt = 0, tree_cnt = 0;
+       int remained;
+
+       if (!test_opt(sbi, EXTENT_CACHE))
+               return 0;
+
+       if (!atomic_read(&sbi->total_zombie_tree))
+               goto free_node;
+
+       if (!down_write_trylock(&sbi->extent_tree_lock))
+               goto out;
+
+       /* 1. remove unreferenced extent tree */
+       list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
+               if (atomic_read(&et->node_cnt)) {
+                       write_lock(&et->lock);
+                       node_cnt += __free_extent_tree(sbi, et);
+                       write_unlock(&et->lock);
+               }
+               f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+               list_del_init(&et->list);
+               radix_tree_delete(&sbi->extent_tree_root, et->ino);
+               kmem_cache_free(extent_tree_slab, et);
+               atomic_dec(&sbi->total_ext_tree);
+               atomic_dec(&sbi->total_zombie_tree);
+               tree_cnt++;
+
+               if (node_cnt + tree_cnt >= nr_shrink)
+                       goto unlock_out;
+               cond_resched();
+       }
+       up_write(&sbi->extent_tree_lock);
+
+free_node:
+       /* 2. remove LRU extent entries */
+       if (!down_write_trylock(&sbi->extent_tree_lock))
+               goto out;
+
+       remained = nr_shrink - (node_cnt + tree_cnt);
+
+       spin_lock(&sbi->extent_lock);
+       for (; remained > 0; remained--) {
+               if (list_empty(&sbi->extent_list))
+                       break;
+               en = list_first_entry(&sbi->extent_list,
+                                       struct extent_node, list);
+               et = en->et;
+               if (!write_trylock(&et->lock)) {
+                       /* refresh this extent node's position in extent list */
+                       list_move_tail(&en->list, &sbi->extent_list);
+                       continue;
+               }
+
+               list_del_init(&en->list);
+               spin_unlock(&sbi->extent_lock);
+
+               __detach_extent_node(sbi, et, en);
+
+               write_unlock(&et->lock);
+               node_cnt++;
+               spin_lock(&sbi->extent_lock);
+       }
+       spin_unlock(&sbi->extent_lock);
+
+unlock_out:
+       up_write(&sbi->extent_tree_lock);
+out:
+       trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+
+       return node_cnt + tree_cnt;
+}
+
+unsigned int f2fs_destroy_extent_node(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et = F2FS_I(inode)->extent_tree;
+       unsigned int node_cnt = 0;
+
+       if (!et || !atomic_read(&et->node_cnt))
+               return 0;
+
+       write_lock(&et->lock);
+       node_cnt = __free_extent_tree(sbi, et);
+       write_unlock(&et->lock);
+
+       return node_cnt;
+}
+
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+       set_inode_flag(inode, FI_NO_EXTENT);
+
+       write_lock(&et->lock);
+       __free_extent_tree(sbi, et);
+       __drop_largest_extent(inode, 0, UINT_MAX);
+       write_unlock(&et->lock);
+}
+
+void f2fs_destroy_extent_tree(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et = F2FS_I(inode)->extent_tree;
+       unsigned int node_cnt = 0;
+
+       if (!et)
+               return;
+
+       if (inode->i_nlink && !is_bad_inode(inode) &&
+                                       atomic_read(&et->node_cnt)) {
+               down_write(&sbi->extent_tree_lock);
+               list_add_tail(&et->list, &sbi->zombie_list);
+               atomic_inc(&sbi->total_zombie_tree);
+               up_write(&sbi->extent_tree_lock);
+               return;
+       }
+
+       /* free all extent info belong to this extent tree */
+       node_cnt = f2fs_destroy_extent_node(inode);
+
+       /* delete extent tree entry in radix tree */
+       down_write(&sbi->extent_tree_lock);
+       f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+       radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+       kmem_cache_free(extent_tree_slab, et);
+       atomic_dec(&sbi->total_ext_tree);
+       up_write(&sbi->extent_tree_lock);
+
+       F2FS_I(inode)->extent_tree = NULL;
+
+       trace_f2fs_destroy_extent_tree(inode, node_cnt);
+}
+
+bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+                                       struct extent_info *ei)
+{
+       if (!f2fs_may_extent_tree(inode))
+               return false;
+
+       return f2fs_lookup_extent_tree(inode, pgofs, ei);
+}
+
+void f2fs_update_extent_cache(struct dnode_of_data *dn)
+{
+       pgoff_t fofs;
+       block_t blkaddr;
+
+       if (!f2fs_may_extent_tree(dn->inode))
+               return;
+
+       if (dn->data_blkaddr == NEW_ADDR)
+               blkaddr = NULL_ADDR;
+       else
+               blkaddr = dn->data_blkaddr;
+
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+                                                               dn->ofs_in_node;
+       f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
+}
+
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+                               pgoff_t fofs, block_t blkaddr, unsigned int len)
+
+{
+       if (!f2fs_may_extent_tree(dn->inode))
+               return;
+
+       f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
+}
+
+void init_extent_cache_info(struct f2fs_sb_info *sbi)
+{
+       INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+       init_rwsem(&sbi->extent_tree_lock);
+       INIT_LIST_HEAD(&sbi->extent_list);
+       spin_lock_init(&sbi->extent_lock);
+       atomic_set(&sbi->total_ext_tree, 0);
+       INIT_LIST_HEAD(&sbi->zombie_list);
+       atomic_set(&sbi->total_zombie_tree, 0);
+       atomic_set(&sbi->total_ext_node, 0);
+}
+
+int __init create_extent_cache(void)
+{
+       extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
+                       sizeof(struct extent_tree));
+       if (!extent_tree_slab)
+               return -ENOMEM;
+       extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
+                       sizeof(struct extent_node));
+       if (!extent_node_slab) {
+               kmem_cache_destroy(extent_tree_slab);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void destroy_extent_cache(void)
+{
+       kmem_cache_destroy(extent_node_slab);
+       kmem_cache_destroy(extent_tree_slab);
+}
index 20aab02f2a427181a4240ad99ad2b8200a235509..fc88b0ebd0d76536908fdc40c5b92174869a5263 100644 (file)
 #include <linux/slab.h>
 #include <linux/crc32.h>
 #include <linux/magic.h>
+#include <linux/kobject.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/fscrypto.h>
+#include <crypto/hash.h>
+
+#ifdef CONFIG_F2FS_CHECK_FS
+#define f2fs_bug_on(sbi, condition)    BUG_ON(condition)
+#define f2fs_down_write(x, y)  down_write(x)
+#else
+#define f2fs_bug_on(sbi, condition)                                    \
+       do {                                                            \
+               if (unlikely(condition)) {                              \
+                       WARN_ON(1);                                     \
+                       set_sbi_flag(sbi, SBI_NEED_FSCK);               \
+               }                                                       \
+       } while (0)
+#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+enum {
+       FAULT_KMALLOC,
+       FAULT_PAGE_ALLOC,
+       FAULT_ALLOC_NID,
+       FAULT_ORPHAN,
+       FAULT_BLOCK,
+       FAULT_DIR_DEPTH,
+       FAULT_EVICT_INODE,
+       FAULT_IO,
+       FAULT_CHECKPOINT,
+       FAULT_MAX,
+};
+
+struct f2fs_fault_info {
+       atomic_t inject_ops;
+       unsigned int inject_rate;
+       unsigned int inject_type;
+};
+
+extern char *fault_name[FAULT_MAX];
+#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
+#endif
 
 /*
  * For mount options
 #define F2FS_MOUNT_XATTR_USER          0x00000010
 #define F2FS_MOUNT_POSIX_ACL           0x00000020
 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY        0x00000040
+#define F2FS_MOUNT_INLINE_XATTR                0x00000080
+#define F2FS_MOUNT_INLINE_DATA         0x00000100
+#define F2FS_MOUNT_INLINE_DENTRY       0x00000200
+#define F2FS_MOUNT_FLUSH_MERGE         0x00000400
+#define F2FS_MOUNT_NOBARRIER           0x00000800
+#define F2FS_MOUNT_FASTBOOT            0x00001000
+#define F2FS_MOUNT_EXTENT_CACHE                0x00002000
+#define F2FS_MOUNT_FORCE_FG_GC         0x00004000
+#define F2FS_MOUNT_DATA_FLUSH          0x00008000
+#define F2FS_MOUNT_FAULT_INJECTION     0x00010000
+#define F2FS_MOUNT_ADAPTIVE            0x00020000
+#define F2FS_MOUNT_LFS                 0x00040000
 
 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
 #define set_opt(sbi, option)   (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
                typecheck(unsigned long long, b) &&                     \
                ((long long)((a) - (b)) > 0))
 
-typedef u64 block_t;
+typedef u32 block_t;   /*
+                        * should not change u32, since it is the on-disk block
+                        * address format, __le32.
+                        */
 typedef u32 nid_t;
 
 struct f2fs_mount_info {
        unsigned int    opt;
 };
 
-static inline __u32 f2fs_crc32(void *buff, size_t len)
+#define F2FS_FEATURE_ENCRYPT   0x0001
+#define F2FS_FEATURE_BLKZONED  0x0002
+
+#define F2FS_HAS_FEATURE(sb, mask)                                     \
+       ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
+#define F2FS_SET_FEATURE(sb, mask)                                     \
+       F2FS_SB(sb)->raw_super->feature |= cpu_to_le32(mask)
+#define F2FS_CLEAR_FEATURE(sb, mask)                                   \
+       F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)
+
+static inline void inode_lock(struct inode *inode)
+{
+       mutex_lock(&inode->i_mutex);
+}
+
+static inline void inode_unlock(struct inode *inode)
+{
+       mutex_unlock(&inode->i_mutex);
+}
+
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: wait queue head
+ *
+ * Returns true if wq has waiting processes
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_sleeper(wait_queue_head_t *wq) 
+{
+       /*   
+        * We need to be sure we are in sync with the
+        * add_wait_queue modifications to the wait queue.
+        *
+        * This memory barrier should be paired with one on the
+        * waiting side.
+        */
+       smp_mb();
+       return waitqueue_active(wq);
+}
+
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+       return dentry->d_inode;
+}
+
+static inline struct dentry *file_dentry(const struct file *file)
+{
+       return file->f_path.dentry;
+}
+
+static inline void inode_nohighmem(struct inode *inode)
 {
-       return crc32_le(F2FS_SUPER_MAGIC, buff, len);
+       mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
 }
 
-static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size)
+/**
+ * current_time - Return FS time
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs.
+ *
+ * Note that inode and inode->sb cannot be NULL.
+ * Otherwise, the function warns and returns time without truncation.
+ */
+static inline struct timespec current_time(struct inode *inode)
 {
-       return f2fs_crc32(buff, buff_size) == blk_crc;
+       struct timespec now = current_kernel_time();
+
+       if (unlikely(!inode->i_sb)) {
+               WARN(1, "current_time() called with uninitialized super_block in the inode");
+               return now; 
+       }    
+
+       return timespec_trunc(now, inode->i_sb->s_time_gran);
 }
 
 /*
@@ -62,147 +189,413 @@ enum {
        SIT_BITMAP
 };
 
-/* for the list of orphan inodes */
-struct orphan_inode_entry {
+enum {
+       CP_UMOUNT,
+       CP_FASTBOOT,
+       CP_SYNC,
+       CP_RECOVERY,
+       CP_DISCARD,
+};
+
+#define DEF_BATCHED_TRIM_SECTIONS      2
+#define BATCHED_TRIM_SEGMENTS(sbi)     \
+               (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
+#define BATCHED_TRIM_BLOCKS(sbi)       \
+               (BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
+#define DEF_CP_INTERVAL                        60      /* 60 secs */
+#define DEF_IDLE_INTERVAL              120     /* 120 secs */
+
+struct cp_control {
+       int reason;
+       __u64 trim_start;
+       __u64 trim_end;
+       __u64 trim_minlen;
+       __u64 trimmed;
+};
+
+/*
+ * For CP/NAT/SIT/SSA readahead
+ */
+enum {
+       META_CP,
+       META_NAT,
+       META_SIT,
+       META_SSA,
+       META_POR,
+};
+
+/* for the list of ino */
+enum {
+       ORPHAN_INO,             /* for orphan ino list */
+       APPEND_INO,             /* for append ino list */
+       UPDATE_INO,             /* for update ino list */
+       MAX_INO_ENTRY,          /* max. list */
+};
+
+struct ino_entry {
        struct list_head list;  /* list head */
        nid_t ino;              /* inode number */
 };
 
-/* for the list of directory inodes */
-struct dir_inode_entry {
+/* for the list of inodes to be GCed */
+struct inode_entry {
        struct list_head list;  /* list head */
        struct inode *inode;    /* vfs inode pointer */
 };
 
+/* for the list of blockaddresses to be discarded */
+struct discard_entry {
+       struct list_head list;  /* list head */
+       block_t blkaddr;        /* block address to be discarded */
+       int len;                /* # of consecutive blocks of the discard */
+};
+
 /* for the list of fsync inodes, used only during recovery */
 struct fsync_inode_entry {
        struct list_head list;  /* list head */
        struct inode *inode;    /* vfs inode pointer */
-       block_t blkaddr;        /* block address locating the last inode */
+       block_t blkaddr;        /* block address locating the last fsync */
+       block_t last_dentry;    /* block address locating the last dentry */
 };
 
-#define nats_in_cursum(sum)            (le16_to_cpu(sum->n_nats))
-#define sits_in_cursum(sum)            (le16_to_cpu(sum->n_sits))
+#define nats_in_cursum(jnl)            (le16_to_cpu(jnl->n_nats))
+#define sits_in_cursum(jnl)            (le16_to_cpu(jnl->n_sits))
+
+#define nat_in_journal(jnl, i)         (jnl->nat_j.entries[i].ne)
+#define nid_in_journal(jnl, i)         (jnl->nat_j.entries[i].nid)
+#define sit_in_journal(jnl, i)         (jnl->sit_j.entries[i].se)
+#define segno_in_journal(jnl, i)       (jnl->sit_j.entries[i].segno)
 
-#define nat_in_journal(sum, i)         (sum->nat_j.entries[i].ne)
-#define nid_in_journal(sum, i)         (sum->nat_j.entries[i].nid)
-#define sit_in_journal(sum, i)         (sum->sit_j.entries[i].se)
-#define segno_in_journal(sum, i)       (sum->sit_j.entries[i].segno)
+#define MAX_NAT_JENTRIES(jnl)  (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
+#define MAX_SIT_JENTRIES(jnl)  (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
 
-static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
 {
-       int before = nats_in_cursum(rs);
-       rs->n_nats = cpu_to_le16(before + i);
+       int before = nats_in_cursum(journal);
+       journal->n_nats = cpu_to_le16(before + i);
        return before;
 }
 
-static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
 {
-       int before = sits_in_cursum(rs);
-       rs->n_sits = cpu_to_le16(before + i);
+       int before = sits_in_cursum(journal);
+       journal->n_sits = cpu_to_le16(before + i);
        return before;
 }
 
+static inline bool __has_cursum_space(struct f2fs_journal *journal,
+                                                       int size, int type)
+{
+       if (type == NAT_JOURNAL)
+               return size <= MAX_NAT_JENTRIES(journal);
+       return size <= MAX_SIT_JENTRIES(journal);
+}
+
 /*
  * ioctl commands
  */
-#define F2FS_IOC_GETFLAGS               FS_IOC_GETFLAGS
-#define F2FS_IOC_SETFLAGS               FS_IOC_SETFLAGS
+#define F2FS_IOC_GETFLAGS              FS_IOC_GETFLAGS
+#define F2FS_IOC_SETFLAGS              FS_IOC_SETFLAGS
+#define F2FS_IOC_GETVERSION            FS_IOC_GETVERSION
+
+#define F2FS_IOCTL_MAGIC               0xf5
+#define F2FS_IOC_START_ATOMIC_WRITE    _IO(F2FS_IOCTL_MAGIC, 1)
+#define F2FS_IOC_COMMIT_ATOMIC_WRITE   _IO(F2FS_IOCTL_MAGIC, 2)
+#define F2FS_IOC_START_VOLATILE_WRITE  _IO(F2FS_IOCTL_MAGIC, 3)
+#define F2FS_IOC_RELEASE_VOLATILE_WRITE        _IO(F2FS_IOCTL_MAGIC, 4)
+#define F2FS_IOC_ABORT_VOLATILE_WRITE  _IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GARBAGE_COLLECT       _IO(F2FS_IOCTL_MAGIC, 6)
+#define F2FS_IOC_WRITE_CHECKPOINT      _IO(F2FS_IOCTL_MAGIC, 7)
+#define F2FS_IOC_DEFRAGMENT            _IO(F2FS_IOCTL_MAGIC, 8)
+#define F2FS_IOC_MOVE_RANGE            _IOWR(F2FS_IOCTL_MAGIC, 9,      \
+                                               struct f2fs_move_range)
+
+#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
+
+/*
+ * should be same as XFS_IOC_GOINGDOWN.
+ * Flags for going down operation used by FS_IOC_GOINGDOWN
+ */
+#define F2FS_IOC_SHUTDOWN      _IOR('X', 125, __u32)   /* Shutdown */
+#define F2FS_GOING_DOWN_FULLSYNC       0x0     /* going down with full sync */
+#define F2FS_GOING_DOWN_METASYNC       0x1     /* going down with metadata */
+#define F2FS_GOING_DOWN_NOSYNC         0x2     /* going down */
+#define F2FS_GOING_DOWN_METAFLUSH      0x3     /* going down with meta flush */
 
 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
 /*
  * ioctl commands in 32 bit emulation
  */
-#define F2FS_IOC32_GETFLAGS             FS_IOC32_GETFLAGS
-#define F2FS_IOC32_SETFLAGS             FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETFLAGS            FS_IOC32_GETFLAGS
+#define F2FS_IOC32_SETFLAGS            FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETVERSION          FS_IOC32_GETVERSION
 #endif
 
+struct f2fs_defragment {
+       u64 start;
+       u64 len;
+};
+
+struct f2fs_move_range {
+       u32 dst_fd;             /* destination fd */
+       u64 pos_in;             /* start position in src_fd */
+       u64 pos_out;            /* start position in dst_fd */
+       u64 len;                /* size to move */
+};
+
 /*
  * For INODE and NODE manager
  */
-#define XATTR_NODE_OFFSET      (-1)    /*
-                                        * store xattrs to one node block per
-                                        * file keeping -1 as its node offset to
-                                        * distinguish from index node blocks.
-                                        */
+/* for directory operations */
+struct f2fs_dentry_ptr {
+       struct inode *inode;
+       const void *bitmap;
+       struct f2fs_dir_entry *dentry;
+       __u8 (*filename)[F2FS_SLOT_LEN];
+       int max;
+};
+
+static inline void make_dentry_ptr(struct inode *inode,
+               struct f2fs_dentry_ptr *d, void *src, int type)
+{
+       d->inode = inode;
+
+       if (type == 1) {
+               struct f2fs_dentry_block *t = (struct f2fs_dentry_block *)src;
+               d->max = NR_DENTRY_IN_BLOCK;
+               d->bitmap = &t->dentry_bitmap;
+               d->dentry = t->dentry;
+               d->filename = t->filename;
+       } else {
+               struct f2fs_inline_dentry *t = (struct f2fs_inline_dentry *)src;
+               d->max = NR_INLINE_DENTRY;
+               d->bitmap = &t->dentry_bitmap;
+               d->dentry = t->dentry;
+               d->filename = t->filename;
+       }
+}
+
+/*
+ * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
+ * as its node offset to distinguish from index node blocks.
+ * But some bits are used to mark the node block.
+ */
+#define XATTR_NODE_OFFSET      ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
+                               >> OFFSET_BIT_SHIFT)
 enum {
        ALLOC_NODE,                     /* allocate a new node page if needed */
        LOOKUP_NODE,                    /* look up a node without readahead */
        LOOKUP_NODE_RA,                 /*
                                         * look up a node with readahead called
-                                        * by get_datablock_ro.
+                                        * by get_data_block.
                                         */
 };
 
-#define F2FS_LINK_MAX          32000   /* maximum link count per file */
+#define F2FS_LINK_MAX  0xffffffff      /* maximum link count per file */
+
+#define MAX_DIR_RA_PAGES       4       /* maximum ra pages of dir */
+
+/* vector size for gang look-up from extent cache that consists of radix tree */
+#define EXT_TREE_VEC_SIZE      64
 
 /* for in-memory extent cache entry */
+#define F2FS_MIN_EXTENT_LEN    64      /* minimum extent length */
+
+/* number of extent info in extent cache we try to shrink */
+#define EXTENT_CACHE_SHRINK_NUMBER     128
+
 struct extent_info {
-       rwlock_t ext_lock;      /* rwlock for consistency */
-       unsigned int fofs;      /* start offset in a file */
-       u32 blk_addr;           /* start block address of the extent */
-       unsigned int len;       /* length of the extent */
+       unsigned int fofs;              /* start offset in a file */
+       u32 blk;                        /* start block address of the extent */
+       unsigned int len;               /* length of the extent */
+};
+
+struct extent_node {
+       struct rb_node rb_node;         /* rb node located in rb-tree */
+       struct list_head list;          /* node in global extent list of sbi */
+       struct extent_info ei;          /* extent info */
+       struct extent_tree *et;         /* extent tree pointer */
+};
+
+struct extent_tree {
+       nid_t ino;                      /* inode number */
+       struct rb_root root;            /* root of extent info rb-tree */
+       struct extent_node *cached_en;  /* recently accessed extent node */
+       struct extent_info largest;     /* largested extent info */
+       struct list_head list;          /* to be used by sbi->zombie_list */
+       rwlock_t lock;                  /* protect extent info rb-tree */
+       atomic_t node_cnt;              /* # of extent node in rb-tree*/
+};
+
+/*
+ * This structure is taken from ext4_map_blocks.
+ *
+ * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
+ */
+#define F2FS_MAP_NEW           (1 << BH_New)
+#define F2FS_MAP_MAPPED                (1 << BH_Mapped)
+#define F2FS_MAP_UNWRITTEN     (1 << BH_Unwritten)
+#define F2FS_MAP_FLAGS         (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
+                               F2FS_MAP_UNWRITTEN)
+
+struct f2fs_map_blocks {
+       block_t m_pblk;
+       block_t m_lblk;
+       unsigned int m_len;
+       unsigned int m_flags;
+       pgoff_t *m_next_pgofs;          /* point next possible non-hole pgofs */
 };
 
+/* for flag in get_data_block */
+#define F2FS_GET_BLOCK_READ            0
+#define F2FS_GET_BLOCK_DIO             1
+#define F2FS_GET_BLOCK_FIEMAP          2
+#define F2FS_GET_BLOCK_BMAP            3
+#define F2FS_GET_BLOCK_PRE_DIO         4
+#define F2FS_GET_BLOCK_PRE_AIO         5
+
 /*
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  */
 #define FADVISE_COLD_BIT       0x01
-#define FADVISE_CP_BIT         0x02
+#define FADVISE_LOST_PINO_BIT  0x02
+#define FADVISE_ENCRYPT_BIT    0x04
+#define FADVISE_ENC_NAME_BIT   0x08
+#define FADVISE_KEEP_SIZE_BIT  0x10
+
+#define file_is_cold(inode)    is_file(inode, FADVISE_COLD_BIT)
+#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
+#define file_set_cold(inode)   set_file(inode, FADVISE_COLD_BIT)
+#define file_lost_pino(inode)  set_file(inode, FADVISE_LOST_PINO_BIT)
+#define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
+#define file_got_pino(inode)   clear_file(inode, FADVISE_LOST_PINO_BIT)
+#define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
+#define file_set_encrypt(inode)        set_file(inode, FADVISE_ENCRYPT_BIT)
+#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
+#define file_enc_name(inode)   is_file(inode, FADVISE_ENC_NAME_BIT)
+#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
+#define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
+
+#define DEF_DIR_LEVEL          0
 
 struct f2fs_inode_info {
        struct inode vfs_inode;         /* serve a vfs inode */
        unsigned long i_flags;          /* keep an inode flags for ioctl */
        unsigned char i_advise;         /* use to give file attribute hints */
+       unsigned char i_dir_level;      /* use for dentry level for large dir */
        unsigned int i_current_depth;   /* use only in directory structure */
        unsigned int i_pino;            /* parent inode number */
        umode_t i_acl_mode;             /* keep file acl mode temporarily */
 
        /* Use below internally in f2fs*/
        unsigned long flags;            /* use to pass per-file flags */
-       atomic_t dirty_dents;           /* # of dirty dentry pages */
+       struct rw_semaphore i_sem;      /* protect fi info */
+       struct percpu_counter dirty_pages;      /* # of dirty pages */
        f2fs_hash_t chash;              /* hash value of given file name */
        unsigned int clevel;            /* maximum level of given file name */
        nid_t i_xattr_nid;              /* node id that contains xattrs */
-       struct extent_info ext;         /* in-memory extent cache entry */
+       unsigned long long xattr_ver;   /* cp version of xattr modification */
+       loff_t  last_disk_size;         /* lastly written file size */
+
+       struct list_head dirty_list;    /* dirty list for dirs and files */
+       struct list_head gdirty_list;   /* linked in global dirty list */
+       struct list_head inmem_pages;   /* inmemory pages managed by f2fs */
+       struct mutex inmem_lock;        /* lock for inmemory pages */
+       struct extent_tree *extent_tree;        /* cached extent_tree entry */
+       struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
 };
 
 static inline void get_extent_info(struct extent_info *ext,
-                                       struct f2fs_extent i_ext)
+                                       struct f2fs_extent *i_ext)
 {
-       write_lock(&ext->ext_lock);
-       ext->fofs = le32_to_cpu(i_ext.fofs);
-       ext->blk_addr = le32_to_cpu(i_ext.blk_addr);
-       ext->len = le32_to_cpu(i_ext.len);
-       write_unlock(&ext->ext_lock);
+       ext->fofs = le32_to_cpu(i_ext->fofs);
+       ext->blk = le32_to_cpu(i_ext->blk);
+       ext->len = le32_to_cpu(i_ext->len);
 }
 
 static inline void set_raw_extent(struct extent_info *ext,
                                        struct f2fs_extent *i_ext)
 {
-       read_lock(&ext->ext_lock);
        i_ext->fofs = cpu_to_le32(ext->fofs);
-       i_ext->blk_addr = cpu_to_le32(ext->blk_addr);
+       i_ext->blk = cpu_to_le32(ext->blk);
        i_ext->len = cpu_to_le32(ext->len);
-       read_unlock(&ext->ext_lock);
 }
 
+static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
+                                               u32 blk, unsigned int len)
+{
+       ei->fofs = fofs;
+       ei->blk = blk;
+       ei->len = len;
+}
+
+static inline bool __is_extent_same(struct extent_info *ei1,
+                                               struct extent_info *ei2)
+{
+       return (ei1->fofs == ei2->fofs && ei1->blk == ei2->blk &&
+                                               ei1->len == ei2->len);
+}
+
+static inline bool __is_extent_mergeable(struct extent_info *back,
+                                               struct extent_info *front)
+{
+       return (back->fofs + back->len == front->fofs &&
+                       back->blk + back->len == front->blk);
+}
+
+static inline bool __is_back_mergeable(struct extent_info *cur,
+                                               struct extent_info *back)
+{
+       return __is_extent_mergeable(back, cur);
+}
+
+static inline bool __is_front_mergeable(struct extent_info *cur,
+                                               struct extent_info *front)
+{
+       return __is_extent_mergeable(cur, front);
+}
+
+extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
+static inline void __try_update_largest_extent(struct inode *inode,
+                       struct extent_tree *et, struct extent_node *en)
+{
+       if (en->ei.len > et->largest.len) {
+               et->largest = en->ei;
+               f2fs_mark_inode_dirty_sync(inode, true);
+       }
+}
+
+enum nid_list {
+       FREE_NID_LIST,
+       ALLOC_NID_LIST,
+       MAX_NID_LIST,
+};
+
 struct f2fs_nm_info {
        block_t nat_blkaddr;            /* base disk address of NAT */
        nid_t max_nid;                  /* maximum possible node ids */
+       nid_t available_nids;           /* # of available node ids */
        nid_t next_scan_nid;            /* the next nid to be scanned */
+       unsigned int ram_thresh;        /* control the memory footprint */
+       unsigned int ra_nid_pages;      /* # of nid pages to be readaheaded */
+       unsigned int dirty_nats_ratio;  /* control dirty nats ratio threshold */
 
        /* NAT cache management */
        struct radix_tree_root nat_root;/* root of the nat entry cache */
-       rwlock_t nat_tree_lock;         /* protect nat_tree_lock */
-       unsigned int nat_cnt;           /* the # of cached nat entries */
+       struct radix_tree_root nat_set_root;/* root of the nat set cache */
+       struct rw_semaphore nat_tree_lock;      /* protect nat_tree_lock */
        struct list_head nat_entries;   /* cached nat entry list (clean) */
-       struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */
+       unsigned int nat_cnt;           /* the # of cached nat entries */
+       unsigned int dirty_nat_cnt;     /* total num of nat entries in set */
 
        /* free node ids management */
-       struct list_head free_nid_list; /* a list for free nids */
-       spinlock_t free_nid_list_lock;  /* protect free nid list */
-       unsigned int fcnt;              /* the number of free node id */
+       struct radix_tree_root free_nid_root;/* root of the free_nid cache */
+       struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
+       unsigned int nid_cnt[MAX_NID_LIST];     /* the number of free node id */
+       spinlock_t nid_list_lock;       /* protect nid lists ops */
        struct mutex build_lock;        /* lock for build free nids */
 
        /* for checkpoint */
@@ -222,6 +615,9 @@ struct dnode_of_data {
        nid_t nid;                      /* node id of the direct node block */
        unsigned int ofs_in_node;       /* data offset in the node page */
        bool inode_page_locked;         /* inode page is locked or not */
+       bool node_changed;              /* is node block changed */
+       char cur_level;                 /* level of hole node page */
+       char max_level;                 /* level of current page located */
        block_t data_blkaddr;           /* block address of the node block */
 };
 
@@ -259,7 +655,21 @@ enum {
        CURSEG_HOT_NODE,        /* direct node blocks of directory files */
        CURSEG_WARM_NODE,       /* direct node blocks of normal files */
        CURSEG_COLD_NODE,       /* indirect node blocks */
-       NO_CHECK_TYPE
+       NO_CHECK_TYPE,
+};
+
+struct flush_cmd {
+       struct completion wait;
+       struct llist_node llnode;
+       int ret;
+};
+
+struct flush_cmd_control {
+       struct task_struct *f2fs_issue_flush;   /* flush thread */
+       wait_queue_head_t flush_wait_queue;     /* waiting queue for wake-up */
+       atomic_t submit_flush;                  /* # of issued flushes */
+       struct llist_head issue_list;           /* list for command issue */
+       struct llist_node *dispatch_list;       /* list for command dispatch */
 };
 
 struct f2fs_sm_info {
@@ -268,9 +678,6 @@ struct f2fs_sm_info {
        struct dirty_seglist_info *dirty_info;  /* dirty segment information */
        struct curseg_info *curseg_array;       /* active segment information */
 
-       struct list_head wblist_head;   /* list of under-writeback pages */
-       spinlock_t wblist_lock;         /* lock for checkpoint */
-
        block_t seg0_blkaddr;           /* block address of 0'th segment */
        block_t main_blkaddr;           /* start block address of main area */
        block_t ssa_blkaddr;            /* start block address of SSA area */
@@ -279,16 +686,28 @@ struct f2fs_sm_info {
        unsigned int main_segments;     /* # of segments in main area */
        unsigned int reserved_segments; /* # of reserved segments */
        unsigned int ovp_segments;      /* # of overprovision segments */
-};
 
-/*
- * For directory operation
- */
-#define        NODE_DIR1_BLOCK         (ADDRS_PER_INODE + 1)
-#define        NODE_DIR2_BLOCK         (ADDRS_PER_INODE + 2)
-#define        NODE_IND1_BLOCK         (ADDRS_PER_INODE + 3)
-#define        NODE_IND2_BLOCK         (ADDRS_PER_INODE + 4)
-#define        NODE_DIND_BLOCK         (ADDRS_PER_INODE + 5)
+       /* a threshold to reclaim prefree segments */
+       unsigned int rec_prefree_segments;
+
+       /* for small discard management */
+       struct list_head discard_list;          /* 4KB discard list */
+       int nr_discards;                        /* # of discards in the list */
+       int max_discards;                       /* max. discards to be issued */
+
+       /* for batched trimming */
+       unsigned int trim_sections;             /* # of sections to trim */
+
+       struct list_head sit_entry_set; /* sit entry set list */
+
+       unsigned int ipu_policy;        /* in-place-update policy */
+       unsigned int min_ipu_util;      /* in-place-update threshold */
+       unsigned int min_fsync_blocks;  /* threshold for fsync */
+
+       /* for flush command control */
+       struct flush_cmd_control *cmd_control_info;
+
+};
 
 /*
  * For superblock
@@ -299,24 +718,21 @@ struct f2fs_sm_info {
  * f2fs monitors the number of several block types such as on-writeback,
  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
  */
+#define WB_DATA_TYPE(p)        (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
 enum count_type {
-       F2FS_WRITEBACK,
        F2FS_DIRTY_DENTS,
+       F2FS_DIRTY_DATA,
        F2FS_DIRTY_NODES,
        F2FS_DIRTY_META,
+       F2FS_INMEM_PAGES,
+       F2FS_DIRTY_IMETA,
+       F2FS_WB_CP_DATA,
+       F2FS_WB_DATA,
        NR_COUNT_TYPE,
 };
 
 /*
- * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
- * The checkpoint procedure blocks all the locks in this fs_lock array.
- * Some FS operations grab free locks, and if there is no free lock,
- * then wait to grab a lock in a round-robin manner.
- */
-#define NR_GLOBAL_LOCKS        8
-
-/*
- * The below are the page types of bios used in submti_bio().
+ * The below are the page types of bios used in submit_bio().
  * The available types are:
  * DATA                        User data pages. It operates as async mode.
  * NODE                        Node pages. It operates as async mode.
@@ -326,19 +742,104 @@ enum count_type {
  *                     with waiting the bio's completion
  * ...                 Only can be used with META.
  */
+#define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
 enum page_type {
        DATA,
        NODE,
        META,
        NR_PAGE_TYPE,
        META_FLUSH,
+       INMEM,          /* the below types are used by tracepoints only. */
+       INMEM_DROP,
+       INMEM_REVOKE,
+       IPU,
+       OPU,
+};
+
+struct f2fs_io_info {
+       struct f2fs_sb_info *sbi;       /* f2fs_sb_info pointer */
+       enum page_type type;    /* contains DATA/NODE/META/META_FLUSH */
+       int rw;                 /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
+       block_t new_blkaddr;    /* new block address to be written */
+       block_t old_blkaddr;    /* old block address before Cow */
+       struct page *page;      /* page to be written */
+       struct page *encrypted_page;    /* encrypted page */
+};
+
+#define is_read_io(rw) (((rw) & 1) == READ)
+struct f2fs_bio_info {
+       struct f2fs_sb_info *sbi;       /* f2fs superblock */
+       struct bio *bio;                /* bios to merge */
+       sector_t last_block_in_bio;     /* last block number */
+       struct f2fs_io_info fio;        /* store buffered io info. */
+       struct rw_semaphore io_rwsem;   /* blocking op for bio */
+};
+
+#define FDEV(i)                                (sbi->devs[i])
+#define RDEV(i)                                (raw_super->devs[i])
+struct f2fs_dev_info {
+       struct block_device *bdev;
+       char path[MAX_PATH_LEN];
+       unsigned int total_segments;
+       block_t start_blk;
+       block_t end_blk;
+#ifdef CONFIG_BLK_DEV_ZONED
+       unsigned int nr_blkz;                   /* Total number of zones */
+       u8 *blkz_type;                          /* Array of zones type */
+#endif
+};
+
+enum inode_type {
+       DIR_INODE,                      /* for dirty dir inode */
+       FILE_INODE,                     /* for dirty regular/symlink inode */
+       DIRTY_META,                     /* for all dirtied inode metadata */
+       NR_INODE_TYPE,
+};
+
+/* for inner inode cache management */
+struct inode_management {
+       struct radix_tree_root ino_root;        /* ino entry array */
+       spinlock_t ino_lock;                    /* for ino entry lock */
+       struct list_head ino_list;              /* inode list head */
+       unsigned long ino_num;                  /* number of entries */
+};
+
+/* For s_flag in struct f2fs_sb_info */
+enum {
+       SBI_IS_DIRTY,                           /* dirty flag for checkpoint */
+       SBI_IS_CLOSE,                           /* specify unmounting */
+       SBI_NEED_FSCK,                          /* need fsck.f2fs to fix */
+       SBI_POR_DOING,                          /* recovery is doing or not */
+       SBI_NEED_SB_WRITE,                      /* need to recover superblock */
+       SBI_NEED_CP,                            /* need to checkpoint */
 };
 
+enum {
+       CP_TIME,
+       REQ_TIME,
+       MAX_TIME,
+};
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#define F2FS_KEY_DESC_PREFIX "f2fs:"
+#define F2FS_KEY_DESC_PREFIX_SIZE 5
+#endif
 struct f2fs_sb_info {
        struct super_block *sb;                 /* pointer to VFS super block */
-       struct buffer_head *raw_super_buf;      /* buffer head of raw sb */
+       struct proc_dir_entry *s_proc;          /* proc entry */
        struct f2fs_super_block *raw_super;     /* raw super block pointer */
-       int s_dirty;                            /* dirty flag for checkpoint */
+       int valid_super_block;                  /* valid super block no */
+       unsigned long s_flag;                           /* flags for sbi */
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+       u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
+       u8 key_prefix_size;
+#endif
+
+#ifdef CONFIG_BLK_DEV_ZONED
+       unsigned int blocks_per_blkz;           /* F2FS blocks per zone */
+       unsigned int log_blocks_per_blkz;       /* log2 F2FS blocks per zone */
+#endif
 
        /* for node-related operations */
        struct f2fs_nm_info *nm_info;           /* node manager */
@@ -346,32 +847,44 @@ struct f2fs_sb_info {
 
        /* for segment-related operations */
        struct f2fs_sm_info *sm_info;           /* segment manager */
-       struct bio *bio[NR_PAGE_TYPE];          /* bios to merge */
-       sector_t last_block_in_bio[NR_PAGE_TYPE];       /* last block number */
-       struct rw_semaphore bio_sem;            /* IO semaphore */
+
+       /* for bio operations */
+       struct f2fs_bio_info read_io;                   /* for read bios */
+       struct f2fs_bio_info write_io[NR_PAGE_TYPE];    /* for write bios */
+       struct mutex wio_mutex[NODE + 1];       /* bio ordering for NODE/DATA */
 
        /* for checkpoint */
        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
+       int cur_cp_pack;                        /* remain current cp pack */
+       spinlock_t cp_lock;                     /* for flag in ckpt */
        struct inode *meta_inode;               /* cache meta blocks */
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
-       struct mutex fs_lock[NR_GLOBAL_LOCKS];  /* blocking FS operations */
-       struct mutex node_write;                /* locking node writes */
-       struct mutex writepages;                /* mutex for writepages() */
-       unsigned char next_lock_num;            /* round-robin global locks */
-       int por_doing;                          /* recovery is doing or not */
-       int on_build_free_nids;                 /* build_free_nids is doing */
-
-       /* for orphan inode management */
-       struct list_head orphan_inode_list;     /* orphan inode list */
-       struct mutex orphan_inode_mutex;        /* for orphan inode list */
-       unsigned int n_orphans;                 /* # of orphan inodes */
-
-       /* for directory inode management */
-       struct list_head dir_inode_list;        /* dir inode list */
-       spinlock_t dir_inode_lock;              /* for dir inode list lock */
-       unsigned int n_dirty_dirs;              /* # of dir inodes */
-
-       /* basic file system units */
+       struct rw_semaphore cp_rwsem;           /* blocking FS operations */
+       struct rw_semaphore node_write;         /* locking node writes */
+       wait_queue_head_t cp_wait;
+       unsigned long last_time[MAX_TIME];      /* to store time in jiffies */
+       long interval_time[MAX_TIME];           /* to store thresholds */
+
+       struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
+
+       /* for orphan inode, use 0'th array */
+       unsigned int max_orphans;               /* max orphan inodes */
+
+       /* for inode management */
+       struct list_head inode_list[NR_INODE_TYPE];     /* dirty inode list */
+       spinlock_t inode_lock[NR_INODE_TYPE];   /* for dirty inode list lock */
+
+       /* for extent tree cache */
+       struct radix_tree_root extent_tree_root;/* cache extent cache entries */
+       struct rw_semaphore extent_tree_lock;   /* locking extent radix tree */
+       struct list_head extent_list;           /* lru list for shrinker */
+       spinlock_t extent_lock;                 /* locking extent lru list */
+       atomic_t total_ext_tree;                /* extent tree count */
+       struct list_head zombie_list;           /* extent zombie tree list */
+       atomic_t total_zombie_tree;             /* extent zombie tree count */
+       atomic_t total_ext_node;                /* extent info count */
+
+       /* basic filesystem units */
        unsigned int log_sectors_per_block;     /* log2 sectors per block */
        unsigned int log_blocksize;             /* log2 block size */
        unsigned int blocksize;                 /* block size */
@@ -385,15 +898,23 @@ struct f2fs_sb_info {
        unsigned int total_sections;            /* total section count */
        unsigned int total_node_count;          /* total node block count */
        unsigned int total_valid_node_count;    /* valid node block count */
-       unsigned int total_valid_inode_count;   /* valid inode count */
+       loff_t max_file_blocks;                 /* max block index of file */
        int active_logs;                        /* # of active logs */
+       int dir_level;                          /* directory level */
 
        block_t user_block_count;               /* # of user blocks */
        block_t total_valid_block_count;        /* # of valid blocks */
-       block_t alloc_valid_block_count;        /* # of allocated blocks */
+       block_t discard_blks;                   /* discard command candidats */
        block_t last_valid_block_count;         /* for recovery */
        u32 s_next_generation;                  /* for NFS support */
-       atomic_t nr_pages[NR_COUNT_TYPE];       /* # of pages, see count_type */
+
+       /* # of pages, see count_type */
+       atomic_t nr_pages[NR_COUNT_TYPE];
+       /* # of allocated blocks */
+       struct percpu_counter alloc_valid_block_count;
+
+       /* valid inode count */
+       struct percpu_counter total_valid_inode_count;
 
        struct f2fs_mount_info mount_opt;       /* mount options */
 
@@ -402,22 +923,142 @@ struct f2fs_sb_info {
        struct f2fs_gc_kthread  *gc_thread;     /* GC thread */
        unsigned int cur_victim_sec;            /* current victim section num */
 
+       /* maximum # of trials to find a victim segment for SSR and GC */
+       unsigned int max_victim_search;
+
        /*
         * for stat information.
         * one is for the LFS mode, and the other is for the SSR mode.
         */
+#ifdef CONFIG_F2FS_STAT_FS
        struct f2fs_stat_info *stat_info;       /* FS status information */
        unsigned int segment_count[2];          /* # of allocated segments */
        unsigned int block_count[2];            /* # of allocated blocks */
-       unsigned int last_victim[2];            /* last victim segment # */
-       int total_hit_ext, read_hit_ext;        /* extent cache hit ratio */
+       atomic_t inplace_count;         /* # of inplace update */
+       atomic64_t total_hit_ext;               /* # of lookup extent cache */
+       atomic64_t read_hit_rbtree;             /* # of hit rbtree extent node */
+       atomic64_t read_hit_largest;            /* # of hit largest extent node */
+       atomic64_t read_hit_cached;             /* # of hit cached extent node */
+       atomic_t inline_xattr;                  /* # of inline_xattr inodes */
+       atomic_t inline_inode;                  /* # of inline_data inodes */
+       atomic_t inline_dir;                    /* # of inline_dentry inodes */
        int bg_gc;                              /* background gc calls */
+       unsigned int ndirty_inode[NR_INODE_TYPE];       /* # of dirty inodes */
+#endif
+       unsigned int last_victim[2];            /* last victim segment # */
        spinlock_t stat_lock;                   /* lock for stat operations */
+
+       /* For sysfs suppport */
+       struct kobject s_kobj;
+       struct completion s_kobj_unregister;
+
+       /* For shrinker support */
+       struct list_head s_list;
+       int s_ndevs;                            /* number of devices */
+       struct f2fs_dev_info *devs;             /* for device list */
+       struct mutex umount_mutex;
+       unsigned int shrinker_run_no;
+
+       /* For write statistics */
+       u64 sectors_written_start;
+       u64 kbytes_written;
+
+       /* Reference to checksum algorithm driver via cryptoapi */
+       struct crypto_shash *s_chksum_driver;
+
+       /* For fault injection */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       struct f2fs_fault_info fault_info;
+#endif
 };
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+       struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+       if (!ffi->inject_rate)
+               return false;
+
+       if (!IS_FAULT_SET(ffi, type))
+               return false;
+
+       atomic_inc(&ffi->inject_ops);
+       if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+               atomic_set(&ffi->inject_ops, 0);
+               printk("%sF2FS-fs : inject %s in %pF\n",
+                               KERN_INFO,
+                               fault_name[type],
+                               __builtin_return_address(0));
+               return true;
+       }
+       return false;
+}
+#endif
+
+/* For write statistics. Suppose sector size is 512 bytes,
+ * and the return value is in kbytes. s is of struct f2fs_sb_info.
+ */
+#define BD_PART_WRITTEN(s)                                              \
+(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) -             \
+               s->sectors_written_start) >> 1)
+
+static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
+{
+       sbi->last_time[type] = jiffies;
+}
+
+static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
+{
+       struct timespec ts = {sbi->interval_time[type], 0};
+       unsigned long interval = timespec_to_jiffies(&ts);
+
+       return time_after(jiffies, sbi->last_time[type] + interval);
+}
+
+static inline bool is_idle(struct f2fs_sb_info *sbi)
+{
+       struct block_device *bdev = sbi->sb->s_bdev;
+       struct request_queue *q = bdev_get_queue(bdev);
+       struct request_list *rl = &q->root_rl;
+
+       if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
+               return 0;
+
+       return f2fs_time_over(sbi, REQ_TIME);
+}
+
 /*
  * Inline functions
  */
+#define SHASH_DESC_ON_STACK(shash, ctx)                           \
+       char __##shash##_desc[sizeof(struct shash_desc) +         \
+               crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+       struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+
+static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
+                          unsigned int length)
+{
+       SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
+       u32 *ctx = (u32 *)shash_desc_ctx(shash);
+       int err;
+
+       shash->tfm = sbi->s_chksum_driver;
+       shash->flags = 0;
+       *ctx = F2FS_SUPER_MAGIC;
+
+       err = crypto_shash_update(shash, address, length);
+       BUG_ON(err);
+
+       return *ctx;
+}
+
+static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+                                 void *buf, size_t buf_size)
+{
+       return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+}
+
 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
 {
        return container_of(inode, struct f2fs_inode_info, vfs_inode);
@@ -428,6 +1069,21 @@ static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
+static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
+{
+       return F2FS_SB(inode->i_sb);
+}
+
+static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
+{
+       return F2FS_I_SB(mapping->host);
+}
+
+static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
+{
+       return F2FS_M_SB(page->mapping);
+}
+
 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
 {
        return (struct f2fs_super_block *)(sbi->raw_super);
@@ -438,6 +1094,16 @@ static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
        return (struct f2fs_checkpoint *)(sbi->ckpt);
 }
 
+static inline struct f2fs_node *F2FS_NODE(struct page *page)
+{
+       return (struct f2fs_node *)page_address(page);
+}
+
+static inline struct f2fs_inode *F2FS_INODE(struct page *page)
+{
+       return &((struct f2fs_node *)page_address(page))->i;
+}
+
 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
 {
        return (struct f2fs_nm_info *)(sbi->nm_info);
@@ -463,70 +1129,120 @@ static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
        return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
 }
 
-static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
+static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
+{
+       return sbi->meta_inode->i_mapping;
+}
+
+static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
+{
+       return sbi->node_inode->i_mapping;
+}
+
+static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
+{
+       return test_bit(type, &sbi->s_flag);
+}
+
+static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
 {
-       sbi->s_dirty = 1;
+       set_bit(type, &sbi->s_flag);
 }
 
-static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
+static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
 {
-       sbi->s_dirty = 0;
+       clear_bit(type, &sbi->s_flag);
 }
 
-static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
+{
+       return le64_to_cpu(cp->checkpoint_ver);
+}
+
+static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
 {
        unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+
        return ckpt_flags & f;
 }
 
-static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
 {
-       unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+       return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
+}
+
+static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+       unsigned int ckpt_flags;
+
+       ckpt_flags = le32_to_cpu(cp->ckpt_flags);
        ckpt_flags |= f;
        cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 }
 
-static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
 {
-       unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+       spin_lock(&sbi->cp_lock);
+       __set_ckpt_flags(F2FS_CKPT(sbi), f);
+       spin_unlock(&sbi->cp_lock);
+}
+
+static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+       unsigned int ckpt_flags;
+
+       ckpt_flags = le32_to_cpu(cp->ckpt_flags);
        ckpt_flags &= (~f);
        cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 }
 
-static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
+static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
+{
+       spin_lock(&sbi->cp_lock);
+       __clear_ckpt_flags(F2FS_CKPT(sbi), f);
+       spin_unlock(&sbi->cp_lock);
+}
+
+static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
+{
+       down_read(&sbi->cp_rwsem);
+}
+
+static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 {
-       int i = 0;
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               mutex_lock(&sbi->fs_lock[i]);
+       up_read(&sbi->cp_rwsem);
 }
 
-static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
-       int i = 0;
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               mutex_unlock(&sbi->fs_lock[i]);
+       down_write(&sbi->cp_rwsem);
 }
 
-static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
+static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
-       unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
-       int i = 0;
+       up_write(&sbi->cp_rwsem);
+}
 
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               if (mutex_trylock(&sbi->fs_lock[i]))
-                       return i;
+static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
+{
+       int reason = CP_SYNC;
 
-       mutex_lock(&sbi->fs_lock[next_lock]);
-       sbi->next_lock_num++;
-       return next_lock;
+       if (test_opt(sbi, FASTBOOT))
+               reason = CP_FASTBOOT;
+       if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
+               reason = CP_UMOUNT;
+       return reason;
 }
 
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+static inline bool __remain_node_summaries(int reason)
 {
-       if (ilock < 0)
-               return;
-       BUG_ON(ilock >= NR_GLOBAL_LOCKS);
-       mutex_unlock(&sbi->fs_lock[ilock]);
+       return (reason == CP_UMOUNT || reason == CP_FASTBOOT);
+}
+
+static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
+{
+       return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
+                       is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
 }
 
 /*
@@ -534,8 +1250,9 @@ static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
  */
 static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 {
-       WARN_ON((nid >= NM_I(sbi)->max_nid));
-       if (nid >= NM_I(sbi)->max_nid)
+       if (unlikely(nid < F2FS_ROOT_INO(sbi)))
+               return -EINVAL;
+       if (unlikely(nid >= NM_I(sbi)->max_nid))
                return -EINVAL;
        return 0;
 }
@@ -548,52 +1265,78 @@ static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
 {
        if (F2FS_I(inode)->i_xattr_nid)
-               return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
+               return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1;
        else
-               return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
+               return inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS;
+}
+
+static inline bool f2fs_has_xattr_block(unsigned int ofs)
+{
+       return ofs == XATTR_NODE_OFFSET;
 }
 
+static inline void f2fs_i_blocks_write(struct inode *, blkcnt_t, bool);
 static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
-                                struct inode *inode, blkcnt_t count)
+                                struct inode *inode, blkcnt_t *count)
 {
-       block_t valid_block_count;
+       blkcnt_t diff;
 
-       spin_lock(&sbi->stat_lock);
-       valid_block_count =
-               sbi->total_valid_block_count + (block_t)count;
-       if (valid_block_count > sbi->user_block_count) {
-               spin_unlock(&sbi->stat_lock);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(sbi, FAULT_BLOCK))
                return false;
+#endif
+       /*
+        * let's increase this in prior to actual block count change in order
+        * for f2fs_sync_file to avoid data races when deciding checkpoint.
+        */
+       percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+
+       spin_lock(&sbi->stat_lock);
+       sbi->total_valid_block_count += (block_t)(*count);
+       if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) {
+               diff = sbi->total_valid_block_count - sbi->user_block_count;
+               *count -= diff;
+               sbi->total_valid_block_count = sbi->user_block_count;
+               if (!*count) {
+                       spin_unlock(&sbi->stat_lock);
+                       percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
+                       return false;
+               }
        }
-       inode->i_blocks += count;
-       sbi->total_valid_block_count = valid_block_count;
-       sbi->alloc_valid_block_count += (block_t)count;
        spin_unlock(&sbi->stat_lock);
+
+       f2fs_i_blocks_write(inode, *count, true);
        return true;
 }
 
-static inline int dec_valid_block_count(struct f2fs_sb_info *sbi,
+static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
                                                struct inode *inode,
                                                blkcnt_t count)
 {
        spin_lock(&sbi->stat_lock);
-       BUG_ON(sbi->total_valid_block_count < (block_t) count);
-       BUG_ON(inode->i_blocks < count);
-       inode->i_blocks -= count;
+       f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
+       f2fs_bug_on(sbi, inode->i_blocks < count);
        sbi->total_valid_block_count -= (block_t)count;
        spin_unlock(&sbi->stat_lock);
-       return 0;
+       f2fs_i_blocks_write(inode, count, false);
 }
 
 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
 {
        atomic_inc(&sbi->nr_pages[count_type]);
-       F2FS_SET_SB_DIRT(sbi);
+
+       if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
+               count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
+               return;
+
+       set_sbi_flag(sbi, SBI_IS_DIRTY);
 }
 
-static inline void inode_inc_dirty_dents(struct inode *inode)
+static inline void inode_inc_dirty_pages(struct inode *inode)
 {
-       atomic_inc(&F2FS_I(inode)->dirty_dents);
+       percpu_counter_inc(&F2FS_I(inode)->dirty_pages);
+       inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+                               F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
 }
 
 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
@@ -601,31 +1344,44 @@ static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
        atomic_dec(&sbi->nr_pages[count_type]);
 }
 
-static inline void inode_dec_dirty_dents(struct inode *inode)
+static inline void inode_dec_dirty_pages(struct inode *inode)
 {
-       atomic_dec(&F2FS_I(inode)->dirty_dents);
+       if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+                       !S_ISLNK(inode->i_mode))
+               return;
+
+       percpu_counter_dec(&F2FS_I(inode)->dirty_pages);
+       dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+                               F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
 }
 
-static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
+static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
 {
        return atomic_read(&sbi->nr_pages[count_type]);
 }
 
+static inline s64 get_dirty_pages(struct inode *inode)
+{
+       return percpu_counter_sum_positive(&F2FS_I(inode)->dirty_pages);
+}
+
 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
 {
-       unsigned int pages_per_sec = sbi->segs_per_sec *
-                                       (1 << sbi->log_blocks_per_seg);
-       return ((get_pages(sbi, block_type) + pages_per_sec - 1)
-                       >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+       unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
+       unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
+                                               sbi->log_blocks_per_seg;
+
+       return segs / sbi->segs_per_sec;
 }
 
 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
 {
-       block_t ret;
-       spin_lock(&sbi->stat_lock);
-       ret = sbi->total_valid_block_count;
-       spin_unlock(&sbi->stat_lock);
-       return ret;
+       return sbi->total_valid_block_count;
+}
+
+static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
+{
+       return sbi->discard_blks;
 }
 
 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
@@ -641,131 +1397,159 @@ static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
        return 0;
 }
 
+static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
+{
+       return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
+}
+
 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
 {
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-       int offset = (flag == NAT_BITMAP) ?
+       int offset;
+
+       if (__cp_payload(sbi) > 0) {
+               if (flag == NAT_BITMAP)
+                       return &ckpt->sit_nat_version_bitmap;
+               else
+                       return (unsigned char *)ckpt + F2FS_BLKSIZE;
+       } else {
+               offset = (flag == NAT_BITMAP) ?
                        le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
-       return &ckpt->sit_nat_version_bitmap + offset;
+               return &ckpt->sit_nat_version_bitmap + offset;
+       }
 }
 
 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
 {
-       block_t start_addr;
-       struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-       unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
-
-       start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+       block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 
-       /*
-        * odd numbered checkpoint should at cp segment 0
-        * and even segent must be at cp segment 1
-        */
-       if (!(ckpt_version & 1))
+       if (sbi->cur_cp_pack == 2)
                start_addr += sbi->blocks_per_seg;
+       return start_addr;
+}
+
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+       block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
 
+       if (sbi->cur_cp_pack == 1)
+               start_addr += sbi->blocks_per_seg;
        return start_addr;
 }
 
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+       sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
 {
        return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
 }
 
 static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
-                                               struct inode *inode,
-                                               unsigned int count)
+                                               struct inode *inode)
 {
        block_t valid_block_count;
        unsigned int valid_node_count;
 
        spin_lock(&sbi->stat_lock);
 
-       valid_block_count = sbi->total_valid_block_count + (block_t)count;
-       sbi->alloc_valid_block_count += (block_t)count;
-       valid_node_count = sbi->total_valid_node_count + count;
-
-       if (valid_block_count > sbi->user_block_count) {
+       valid_block_count = sbi->total_valid_block_count + 1;
+       if (unlikely(valid_block_count > sbi->user_block_count)) {
                spin_unlock(&sbi->stat_lock);
                return false;
        }
 
-       if (valid_node_count > sbi->total_node_count) {
+       valid_node_count = sbi->total_valid_node_count + 1;
+       if (unlikely(valid_node_count > sbi->total_node_count)) {
                spin_unlock(&sbi->stat_lock);
                return false;
        }
 
        if (inode)
-               inode->i_blocks += count;
-       sbi->total_valid_node_count = valid_node_count;
-       sbi->total_valid_block_count = valid_block_count;
+               f2fs_i_blocks_write(inode, 1, true);
+
+       sbi->total_valid_node_count++;
+       sbi->total_valid_block_count++;
        spin_unlock(&sbi->stat_lock);
 
+       percpu_counter_inc(&sbi->alloc_valid_block_count);
        return true;
 }
 
 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
-                                               struct inode *inode,
-                                               unsigned int count)
+                                               struct inode *inode)
 {
        spin_lock(&sbi->stat_lock);
 
-       BUG_ON(sbi->total_valid_block_count < count);
-       BUG_ON(sbi->total_valid_node_count < count);
-       BUG_ON(inode->i_blocks < count);
+       f2fs_bug_on(sbi, !sbi->total_valid_block_count);
+       f2fs_bug_on(sbi, !sbi->total_valid_node_count);
+       f2fs_bug_on(sbi, !inode->i_blocks);
 
-       inode->i_blocks -= count;
-       sbi->total_valid_node_count -= count;
-       sbi->total_valid_block_count -= (block_t)count;
+       f2fs_i_blocks_write(inode, 1, false);
+       sbi->total_valid_node_count--;
+       sbi->total_valid_block_count--;
 
        spin_unlock(&sbi->stat_lock);
 }
 
 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
 {
-       unsigned int ret;
-       spin_lock(&sbi->stat_lock);
-       ret = sbi->total_valid_node_count;
-       spin_unlock(&sbi->stat_lock);
-       return ret;
+       return sbi->total_valid_node_count;
 }
 
 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
 {
-       spin_lock(&sbi->stat_lock);
-       BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
-       sbi->total_valid_inode_count++;
-       spin_unlock(&sbi->stat_lock);
+       percpu_counter_inc(&sbi->total_valid_inode_count);
 }
 
-static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi)
+static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
 {
-       spin_lock(&sbi->stat_lock);
-       BUG_ON(!sbi->total_valid_inode_count);
-       sbi->total_valid_inode_count--;
-       spin_unlock(&sbi->stat_lock);
-       return 0;
+       percpu_counter_dec(&sbi->total_valid_inode_count);
 }
 
-static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
+static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
 {
-       unsigned int ret;
-       spin_lock(&sbi->stat_lock);
-       ret = sbi->total_valid_inode_count;
-       spin_unlock(&sbi->stat_lock);
-       return ret;
+       return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
+}
+
+static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
+                                               pgoff_t index, bool for_write)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       struct page *page = find_lock_page(mapping, index);
+       if (page)
+               return page;
+
+       if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
+               return NULL;
+#endif
+       if (!for_write)
+               return grab_cache_page(mapping, index);
+       return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
+}
+
+static inline void f2fs_copy_page(struct page *src, struct page *dst)
+{
+       char *src_kaddr = kmap(src);
+       char *dst_kaddr = kmap(dst);
+
+       memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
+       kunmap(dst);
+       kunmap(src);
 }
 
 static inline void f2fs_put_page(struct page *page, int unlock)
 {
-       if (!page || IS_ERR(page))
+       if (!page)
                return;
 
        if (unlock) {
-               BUG_ON(!PageLocked(page));
+               f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
                unlock_page(page);
        }
-       page_cache_release(page);
+       put_page(page);
 }
 
 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
@@ -779,16 +1563,45 @@ static inline void f2fs_put_dnode(struct dnode_of_data *dn)
 }
 
 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
-                                       size_t size, void (*ctor)(void *))
+                                       size_t size)
+{
+       return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
+}
+
+static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+                                               gfp_t flags)
+{
+       void *entry;
+
+       entry = kmem_cache_alloc(cachep, flags);
+       if (!entry)
+               entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
+       return entry;
+}
+
+static inline struct bio *f2fs_bio_alloc(int npages)
+{
+       struct bio *bio;
+
+       /* No failure on bio allocation */
+       bio = bio_alloc(GFP_NOIO, npages);
+       if (!bio)
+               bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+       return bio;
+}
+
+static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
+                               unsigned long index, void *item)
 {
-       return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
+       while (radix_tree_insert(root, index, item))
+               cond_resched();
 }
 
 #define RAW_IS_INODE(p)        ((p)->footer.nid == (p)->footer.ino)
 
 static inline bool IS_INODE(struct page *page)
 {
-       struct f2fs_node *p = (struct f2fs_node *)page_address(page);
+       struct f2fs_node *p = F2FS_NODE(page);
        return RAW_IS_INODE(p);
 }
 
@@ -802,7 +1615,7 @@ static inline block_t datablock_addr(struct page *node_page,
 {
        struct f2fs_node *raw_node;
        __le32 *addr_array;
-       raw_node = (struct f2fs_node *)page_address(node_page);
+       raw_node = F2FS_NODE(node_page);
        addr_array = blkaddr_in_node(raw_node);
        return le32_to_cpu(addr_array[offset]);
 }
@@ -816,7 +1629,25 @@ static inline int f2fs_test_bit(unsigned int nr, char *addr)
        return mask & *addr;
 }
 
-static inline int f2fs_set_bit(unsigned int nr, char *addr)
+static inline void f2fs_set_bit(unsigned int nr, char *addr)
+{
+       int mask;
+
+       addr += (nr >> 3);
+       mask = 1 << (7 - (nr & 0x07));
+       *addr |= mask;
+}
+
+static inline void f2fs_clear_bit(unsigned int nr, char *addr)
+{
+       int mask;
+
+       addr += (nr >> 3);
+       mask = 1 << (7 - (nr & 0x07));
+       *addr &= ~mask;
+}
+
+static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
 {
        int mask;
        int ret;
@@ -828,7 +1659,7 @@ static inline int f2fs_set_bit(unsigned int nr, char *addr)
        return ret;
 }
 
-static inline int f2fs_clear_bit(unsigned int nr, char *addr)
+static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
 {
        int mask;
        int ret;
@@ -840,52 +1671,380 @@ static inline int f2fs_clear_bit(unsigned int nr, char *addr)
        return ret;
 }
 
+static inline void f2fs_change_bit(unsigned int nr, char *addr)
+{
+       int mask;
+
+       addr += (nr >> 3);
+       mask = 1 << (7 - (nr & 0x07));
+       *addr ^= mask;
+}
+
 /* used for f2fs_inode_info->flags */
 enum {
        FI_NEW_INODE,           /* indicate newly allocated inode */
+       FI_DIRTY_INODE,         /* indicate inode is dirty or not */
+       FI_AUTO_RECOVER,        /* indicate inode is recoverable */
+       FI_DIRTY_DIR,           /* indicate directory has dirty pages */
        FI_INC_LINK,            /* need to increment i_nlink */
        FI_ACL_MODE,            /* indicate acl mode */
        FI_NO_ALLOC,            /* should not allocate any blocks */
+       FI_FREE_NID,            /* free allocated nide */
+       FI_NO_EXTENT,           /* not to use the extent cache */
+       FI_INLINE_XATTR,        /* used for inline xattr */
+       FI_INLINE_DATA,         /* used for inline data*/
+       FI_INLINE_DENTRY,       /* used for inline dentry */
+       FI_APPEND_WRITE,        /* inode has appended data */
+       FI_UPDATE_WRITE,        /* inode has in-place-update data */
+       FI_NEED_IPU,            /* used for ipu per file */
+       FI_ATOMIC_FILE,         /* indicate atomic file */
+       FI_VOLATILE_FILE,       /* indicate volatile file */
+       FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
+       FI_DROP_CACHE,          /* drop dirty page cache */
+       FI_DATA_EXIST,          /* indicate data exists */
+       FI_INLINE_DOTS,         /* indicate inline dot dentries */
+       FI_DO_DEFRAG,           /* indicate defragment is running */
+       FI_DIRTY_FILE,          /* indicate regular/symlink has dirty pages */
 };
 
-static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void __mark_inode_dirty_flag(struct inode *inode,
+                                               int flag, bool set)
+{
+       switch (flag) {
+       case FI_INLINE_XATTR:
+       case FI_INLINE_DATA:
+       case FI_INLINE_DENTRY:
+               if (set)
+                       return;
+       case FI_DATA_EXIST:
+       case FI_INLINE_DOTS:
+               f2fs_mark_inode_dirty_sync(inode, true);
+       }
+}
+
+static inline void set_inode_flag(struct inode *inode, int flag)
+{
+       if (!test_bit(flag, &F2FS_I(inode)->flags))
+               set_bit(flag, &F2FS_I(inode)->flags);
+       __mark_inode_dirty_flag(inode, flag, true);
+}
+
+static inline int is_inode_flag_set(struct inode *inode, int flag)
+{
+       return test_bit(flag, &F2FS_I(inode)->flags);
+}
+
+static inline void clear_inode_flag(struct inode *inode, int flag)
+{
+       if (test_bit(flag, &F2FS_I(inode)->flags))
+               clear_bit(flag, &F2FS_I(inode)->flags);
+       __mark_inode_dirty_flag(inode, flag, false);
+}
+
+static inline void set_acl_inode(struct inode *inode, umode_t mode)
+{
+       F2FS_I(inode)->i_acl_mode = mode;
+       set_inode_flag(inode, FI_ACL_MODE);
+       f2fs_mark_inode_dirty_sync(inode, false);
+}
+
+static inline void f2fs_i_links_write(struct inode *inode, bool inc)
+{
+       if (inc)
+               inc_nlink(inode);
+       else
+               drop_nlink(inode);
+       f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_blocks_write(struct inode *inode,
+                                       blkcnt_t diff, bool add)
+{
+       bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+       bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+       inode->i_blocks = add ? inode->i_blocks + diff :
+                               inode->i_blocks - diff;
+       f2fs_mark_inode_dirty_sync(inode, true);
+       if (clean || recover)
+               set_inode_flag(inode, FI_AUTO_RECOVER);
+}
+
+static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
+{
+       bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+       bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+       if (i_size_read(inode) == i_size)
+               return;
+
+       i_size_write(inode, i_size);
+       f2fs_mark_inode_dirty_sync(inode, true);
+       if (clean || recover)
+               set_inode_flag(inode, FI_AUTO_RECOVER);
+}
+
+static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
+{
+       F2FS_I(inode)->i_current_depth = depth;
+       f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
+{
+       F2FS_I(inode)->i_xattr_nid = xnid;
+       f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
+{
+       F2FS_I(inode)->i_pino = pino;
+       f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
+{
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+
+       if (ri->i_inline & F2FS_INLINE_XATTR)
+               set_bit(FI_INLINE_XATTR, &fi->flags);
+       if (ri->i_inline & F2FS_INLINE_DATA)
+               set_bit(FI_INLINE_DATA, &fi->flags);
+       if (ri->i_inline & F2FS_INLINE_DENTRY)
+               set_bit(FI_INLINE_DENTRY, &fi->flags);
+       if (ri->i_inline & F2FS_DATA_EXIST)
+               set_bit(FI_DATA_EXIST, &fi->flags);
+       if (ri->i_inline & F2FS_INLINE_DOTS)
+               set_bit(FI_INLINE_DOTS, &fi->flags);
+}
+
+static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
+{
+       ri->i_inline = 0;
+
+       if (is_inode_flag_set(inode, FI_INLINE_XATTR))
+               ri->i_inline |= F2FS_INLINE_XATTR;
+       if (is_inode_flag_set(inode, FI_INLINE_DATA))
+               ri->i_inline |= F2FS_INLINE_DATA;
+       if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
+               ri->i_inline |= F2FS_INLINE_DENTRY;
+       if (is_inode_flag_set(inode, FI_DATA_EXIST))
+               ri->i_inline |= F2FS_DATA_EXIST;
+       if (is_inode_flag_set(inode, FI_INLINE_DOTS))
+               ri->i_inline |= F2FS_INLINE_DOTS;
+}
+
+static inline int f2fs_has_inline_xattr(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_INLINE_XATTR);
+}
+
+static inline unsigned int addrs_per_inode(struct inode *inode)
+{
+       if (f2fs_has_inline_xattr(inode))
+               return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
+       return DEF_ADDRS_PER_INODE;
+}
+
+static inline void *inline_xattr_addr(struct page *page)
+{
+       struct f2fs_inode *ri = F2FS_INODE(page);
+       return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
+                                       F2FS_INLINE_XATTR_ADDRS]);
+}
+
+static inline int inline_xattr_size(struct inode *inode)
+{
+       if (f2fs_has_inline_xattr(inode))
+               return F2FS_INLINE_XATTR_ADDRS << 2;
+       else
+               return 0;
+}
+
+static inline int f2fs_has_inline_data(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_INLINE_DATA);
+}
+
+static inline void f2fs_clear_inline_inode(struct inode *inode)
+{
+       clear_inode_flag(inode, FI_INLINE_DATA);
+       clear_inode_flag(inode, FI_DATA_EXIST);
+}
+
+static inline int f2fs_exist_data(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_DATA_EXIST);
+}
+
+static inline int f2fs_has_inline_dots(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_INLINE_DOTS);
+}
+
+static inline bool f2fs_is_atomic_file(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_ATOMIC_FILE);
+}
+
+static inline bool f2fs_is_volatile_file(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_VOLATILE_FILE);
+}
+
+static inline bool f2fs_is_first_block_written(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
+}
+
+static inline bool f2fs_is_drop_cache(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_DROP_CACHE);
+}
+
+static inline void *inline_data_addr(struct page *page)
+{
+       struct f2fs_inode *ri = F2FS_INODE(page);
+       return (void *)&(ri->i_addr[1]);
+}
+
+static inline int f2fs_has_inline_dentry(struct inode *inode)
+{
+       return is_inode_flag_set(inode, FI_INLINE_DENTRY);
+}
+
+static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
 {
-       set_bit(flag, &fi->flags);
+       if (!f2fs_has_inline_dentry(dir))
+               kunmap(page);
 }
 
-static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
+static inline int is_file(struct inode *inode, int type)
 {
-       return test_bit(flag, &fi->flags);
+       return F2FS_I(inode)->i_advise & type;
 }
 
-static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void set_file(struct inode *inode, int type)
 {
-       clear_bit(flag, &fi->flags);
+       F2FS_I(inode)->i_advise |= type;
+       f2fs_mark_inode_dirty_sync(inode, true);
 }
 
-static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
+static inline void clear_file(struct inode *inode, int type)
 {
-       fi->i_acl_mode = mode;
-       set_inode_flag(fi, FI_ACL_MODE);
+       F2FS_I(inode)->i_advise &= ~type;
+       f2fs_mark_inode_dirty_sync(inode, true);
 }
 
-static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
 {
-       if (is_inode_flag_set(fi, FI_ACL_MODE)) {
-               clear_inode_flag(fi, FI_ACL_MODE);
-               return 1;
+       if (dsync) {
+               struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+               bool ret;
+
+               spin_lock(&sbi->inode_lock[DIRTY_META]);
+               ret = list_empty(&F2FS_I(inode)->gdirty_list);
+               spin_unlock(&sbi->inode_lock[DIRTY_META]);
+               return ret;
        }
-       return 0;
+       if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
+                       file_keep_isize(inode) ||
+                       i_size_read(inode) & PAGE_MASK)
+               return false;
+       return F2FS_I(inode)->last_disk_size == i_size_read(inode);
+}
+
+static inline int f2fs_readonly(struct super_block *sb)
+{
+       return sb->s_flags & MS_RDONLY;
+}
+
+static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+{
+       return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
+}
+
+static inline bool is_dot_dotdot(const struct qstr *str)
+{
+       if (str->len == 1 && str->name[0] == '.')
+               return true;
+
+       if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+               return true;
+
+       return false;
+}
+
+static inline bool f2fs_may_extent_tree(struct inode *inode)
+{
+       mode_t mode = inode->i_mode;
+
+       if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+                       is_inode_flag_set(inode, FI_NO_EXTENT))
+               return false;
+
+       return S_ISREG(mode);
+}
+
+static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+                                       size_t size, gfp_t flags)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(sbi, FAULT_KMALLOC))
+               return NULL;
+#endif
+       return kmalloc(size, flags);
+}
+
+static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
+{
+       void *ret;
+
+       ret = kmalloc(size, flags | __GFP_NOWARN);
+       if (!ret)
+               ret = __vmalloc(size, flags, PAGE_KERNEL);
+       return ret;
+}
+
+static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
+{
+       void *ret;
+
+       ret = kzalloc(size, flags | __GFP_NOWARN);
+       if (!ret)
+               ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
+       return ret;
 }
 
+static inline void f2fs_kvfree(void *ptr)
+{
+       if (is_vmalloc_addr(ptr))
+               vfree(ptr);
+       else
+               kfree(ptr);
+}
+
+#define get_inode_mode(i) \
+       ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
+        (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
+
+/* get offset of first page in next direct node */
+#define PGOFS_OF_NEXT_DNODE(pgofs, inode)                              \
+       ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) :    \
+       (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) /    \
+       ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
+
 /*
  * file.c
  */
 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 void truncate_data_blocks(struct dnode_of_data *);
-void f2fs_truncate(struct inode *);
+int truncate_blocks(struct inode *, u64, bool);
+int f2fs_truncate(struct inode *);
+int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int f2fs_setattr(struct dentry *, struct iattr *);
 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
+int truncate_data_blocks_range(struct dnode_of_data *, int);
 long f2fs_ioctl(struct file *, unsigned int, unsigned long);
 long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
 
@@ -894,10 +2053,13 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
  */
 void f2fs_set_inode_flags(struct inode *);
 struct inode *f2fs_iget(struct super_block *, unsigned long);
-void update_inode(struct inode *, struct page *);
+struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
+int try_to_free_nats(struct f2fs_sb_info *, int);
+int update_inode(struct inode *, struct page *);
 int update_inode_page(struct inode *);
 int f2fs_write_inode(struct inode *, struct writeback_control *);
 void f2fs_evict_inode(struct inode *);
+void handle_failed_inode(struct inode *);
 
 /*
  * namei.c
@@ -907,35 +2069,63 @@ struct dentry *f2fs_get_parent(struct dentry *child);
 /*
  * dir.c
  */
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
+void set_de_type(struct f2fs_dir_entry *, umode_t);
+unsigned char get_de_type(struct f2fs_dir_entry *);
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
+                       f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
+int f2fs_fill_dentries(struct file *, void *, filldir_t,
+                       struct f2fs_dentry_ptr *, unsigned int,
+                       unsigned int, struct fscrypt_str *);
+void do_make_empty_dir(struct inode *, struct inode *,
+                       struct f2fs_dentry_ptr *);
+struct page *init_inode_metadata(struct inode *, struct inode *,
+               const struct qstr *, const struct qstr *, struct page *);
+void update_parent_metadata(struct inode *, struct inode *, unsigned int);
+int room_for_filename(const void *, int, int);
+void f2fs_drop_nlink(struct inode *, struct inode *);
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
+                                                       struct page **);
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
                                                        struct page **);
 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
-ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
+ino_t f2fs_inode_by_name(struct inode *, struct qstr *, struct page **);
 void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
                                struct page *, struct inode *);
-void init_dent_inode(const struct qstr *, struct page *);
-int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
-void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
-int f2fs_make_empty(struct inode *, struct inode *);
+int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
+void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
+                       const struct qstr *, f2fs_hash_t , unsigned int);
+int f2fs_add_regular_entry(struct inode *, const struct qstr *,
+                       const struct qstr *, struct inode *, nid_t, umode_t);
+int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
+                       nid_t, umode_t);
+int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
+                       umode_t);
+void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
+                                                       struct inode *);
+int f2fs_do_tmpfile(struct inode *, struct inode *);
 bool f2fs_empty_dir(struct inode *);
 
 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
 {
-       return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name,
-                               inode);
+       return __f2fs_add_link(d_inode(dentry->d_parent), &dentry->d_name,
+                               inode, inode->i_ino, inode->i_mode);
 }
 
 /*
  * super.c
  */
+int f2fs_inode_dirtied(struct inode *, bool);
+void f2fs_inode_synced(struct inode *);
+int f2fs_commit_super(struct f2fs_sb_info *, bool);
 int f2fs_sync_fs(struct super_block *, int);
 extern __printf(3, 4)
 void f2fs_msg(struct super_block *, const char *, const char *, ...);
+int sanity_check_ckpt(struct f2fs_sb_info *sbi);
 
 /*
  * hash.c
  */
-f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
 
 /*
  * node.c
@@ -943,23 +2133,33 @@ f2fs_hash_t f2fs_dentry_hash(const char *, size_t);
 struct dnode_of_data;
 struct node_info;
 
-int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
+bool available_free_memory(struct f2fs_sb_info *, int);
+int need_dentry_mark(struct f2fs_sb_info *, nid_t);
+bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
+bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
+pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 int truncate_inode_blocks(struct inode *, pgoff_t);
+int truncate_xattr_node(struct inode *, struct page *);
+int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
 int remove_inode_page(struct inode *);
-int new_inode_page(struct inode *, const struct qstr *);
-struct page *new_node_page(struct dnode_of_data *, unsigned int);
+struct page *new_inode_page(struct inode *);
+struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
 void ra_node_page(struct f2fs_sb_info *, nid_t);
 struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
 struct page *get_node_page_ra(struct page *, int);
-void sync_inode_page(struct dnode_of_data *);
-int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
+void move_node_page(struct page *, int);
+int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
+                       struct writeback_control *, bool);
+int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
+void build_free_nids(struct f2fs_sb_info *, bool);
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
-void recover_node_page(struct f2fs_sb_info *, struct page *,
-               struct f2fs_summary *, struct node_info *, block_t);
+int try_to_free_nids(struct f2fs_sb_info *, int);
+void recover_inline_xattr(struct inode *, struct page *);
+void recover_xattr_data(struct inode *, struct page *, block_t);
 int recover_inode_page(struct f2fs_sb_info *, struct page *);
 int restore_node_summary(struct f2fs_sb_info *, unsigned int,
                                struct f2fs_summary_block *);
@@ -972,78 +2172,118 @@ void destroy_node_manager_caches(void);
 /*
  * segment.c
  */
-void f2fs_balance_fs(struct f2fs_sb_info *);
+void register_inmem_page(struct inode *, struct page *);
+void drop_inmem_pages(struct inode *);
+int commit_inmem_pages(struct inode *);
+void f2fs_balance_fs(struct f2fs_sb_info *, bool);
+void f2fs_balance_fs_bg(struct f2fs_sb_info *);
+int f2fs_issue_flush(struct f2fs_sb_info *);
+int create_flush_cmd_control(struct f2fs_sb_info *);
+void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
 void invalidate_blocks(struct f2fs_sb_info *, block_t);
-void locate_dirty_segment(struct f2fs_sb_info *, unsigned int);
-void clear_prefree_segments(struct f2fs_sb_info *);
-int npages_for_summary_flush(struct f2fs_sb_info *);
+bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
+void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
+void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
+void release_discard_addrs(struct f2fs_sb_info *);
+int npages_for_summary_flush(struct f2fs_sb_info *, bool);
 void allocate_new_segments(struct f2fs_sb_info *);
+int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
 struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
-struct bio *f2fs_bio_alloc(struct block_device *, int);
-void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
+void update_meta_page(struct f2fs_sb_info *, void *, block_t);
 void write_meta_page(struct f2fs_sb_info *, struct page *);
-void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
-                                       block_t, block_t *);
-void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
-                                       block_t, block_t *);
-void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t);
-void recover_data_page(struct f2fs_sb_info *, struct page *,
-                               struct f2fs_summary *, block_t, block_t);
-void rewrite_node_page(struct f2fs_sb_info *, struct page *,
-                               struct f2fs_summary *, block_t, block_t);
+void write_node_page(unsigned int, struct f2fs_io_info *);
+void write_data_page(struct dnode_of_data *, struct f2fs_io_info *);
+void rewrite_data_page(struct f2fs_io_info *);
+void __f2fs_replace_block(struct f2fs_sb_info *, struct f2fs_summary *,
+                                       block_t, block_t, bool, bool);
+void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
+                               block_t, block_t, unsigned char, bool, bool);
+void allocate_data_block(struct f2fs_sb_info *, struct page *,
+               block_t, block_t *, struct f2fs_summary *, int);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
+void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
 void write_data_summaries(struct f2fs_sb_info *, block_t);
 void write_node_summaries(struct f2fs_sb_info *, block_t);
-int lookup_journal_in_cursum(struct f2fs_summary_block *,
-                                       int, unsigned int, int);
-void flush_sit_entries(struct f2fs_sb_info *);
+int lookup_journal_in_cursum(struct f2fs_journal *, int, unsigned int, int);
+void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *);
 int build_segment_manager(struct f2fs_sb_info *);
 void destroy_segment_manager(struct f2fs_sb_info *);
+int __init create_segment_manager_caches(void);
+void destroy_segment_manager_caches(void);
 
 /*
  * checkpoint.c
  */
+void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
 struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
 struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
+struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
+bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
+int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
+void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
 long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
-int check_orphan_space(struct f2fs_sb_info *);
-void add_orphan_inode(struct f2fs_sb_info *, nid_t);
+void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
+void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
+void release_ino_entry(struct f2fs_sb_info *, bool);
+bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
+int f2fs_sync_inode_meta(struct f2fs_sb_info *);
+int acquire_orphan_inode(struct f2fs_sb_info *);
+void release_orphan_inode(struct f2fs_sb_info *);
+void add_orphan_inode(struct inode *);
 void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
 int recover_orphan_inodes(struct f2fs_sb_info *);
 int get_valid_checkpoint(struct f2fs_sb_info *);
-void set_dirty_dir_page(struct inode *, struct page *);
-void remove_dirty_dir_inode(struct inode *);
-void sync_dirty_dir_inodes(struct f2fs_sb_info *);
-void write_checkpoint(struct f2fs_sb_info *, bool);
-void init_orphan_info(struct f2fs_sb_info *);
+void update_dirty_page(struct inode *, struct page *);
+void remove_dirty_inode(struct inode *);
+int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type);
+int write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
+void init_ino_entry_info(struct f2fs_sb_info *);
 int __init create_checkpoint_caches(void);
 void destroy_checkpoint_caches(void);
 
 /*
  * data.c
  */
+void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
+                               struct page *, nid_t, enum page_type, int);
+void f2fs_flush_merged_bios(struct f2fs_sb_info *);
+int f2fs_submit_page_bio(struct f2fs_io_info *);
+void f2fs_submit_page_mbio(struct f2fs_io_info *);
+struct block_device *f2fs_target_device(struct f2fs_sb_info *,
+                               block_t, struct bio *);
+int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
+void set_data_blkaddr(struct dnode_of_data *);
+void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
+int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
 int reserve_new_block(struct dnode_of_data *);
-void update_extent_cache(block_t, struct dnode_of_data *);
-struct page *find_data_page(struct inode *, pgoff_t, bool);
-struct page *get_lock_data_page(struct inode *, pgoff_t);
-struct page *get_new_data_page(struct inode *, pgoff_t, bool);
-int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int);
-int do_write_data_page(struct page *);
+int f2fs_get_block(struct dnode_of_data *, pgoff_t);
+int f2fs_preallocate_blocks(struct inode *, loff_t, size_t, bool);
+int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
+struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
+struct page *find_data_page(struct inode *, pgoff_t);
+struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
+struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
+int do_write_data_page(struct f2fs_io_info *);
+int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int);
+int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
+void f2fs_set_page_dirty_nobuffers(struct page *);
+void f2fs_invalidate_page(struct page *, unsigned long);
+int f2fs_release_page(struct page *, gfp_t);
 
 /*
  * gc.c
  */
 int start_gc_thread(struct f2fs_sb_info *);
 void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int);
-int f2fs_gc(struct f2fs_sb_info *);
+block_t start_bidx_of_node(unsigned int, struct inode *);
+int f2fs_gc(struct f2fs_sb_info *, bool, bool);
 void build_gc_manager(struct f2fs_sb_info *);
-int __init create_gc_caches(void);
-void destroy_gc_caches(void);
 
 /*
  * recovery.c
  */
-int recover_fsync_data(struct f2fs_sb_info *);
+int recover_fsync_data(struct f2fs_sb_info *, bool);
 bool space_for_roll_forward(struct f2fs_sb_info *);
 
 /*
@@ -1053,74 +2293,153 @@ bool space_for_roll_forward(struct f2fs_sb_info *);
 struct f2fs_stat_info {
        struct list_head stat_list;
        struct f2fs_sb_info *sbi;
-       struct mutex stat_lock;
        int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
        int main_area_segs, main_area_sections, main_area_zones;
-       int hit_ext, total_ext;
-       int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
-       int nats, sits, fnids;
+       unsigned long long hit_largest, hit_cached, hit_rbtree;
+       unsigned long long hit_total, total_ext;
+       int ext_tree, zombie_tree, ext_node;
+       int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+       int inmem_pages;
+       unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+       int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
        int total_count, utilization;
-       int bg_gc;
-       unsigned int valid_count, valid_node_count, valid_inode_count;
+       int bg_gc, nr_wb_cp_data, nr_wb_data;
+       int inline_xattr, inline_inode, inline_dir, orphans;
+       unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
        unsigned int bimodal, avg_vblocks;
        int util_free, util_valid, util_invalid;
        int rsvd_segs, overp_segs;
        int dirty_count, node_pages, meta_pages;
-       int prefree_count, call_count;
+       int prefree_count, call_count, cp_count, bg_cp_count;
        int tot_segs, node_segs, data_segs, free_segs, free_secs;
+       int bg_node_segs, bg_data_segs;
        int tot_blks, data_blks, node_blks;
+       int bg_data_blks, bg_node_blks;
        int curseg[NR_CURSEG_TYPE];
        int cursec[NR_CURSEG_TYPE];
        int curzone[NR_CURSEG_TYPE];
 
        unsigned int segment_count[2];
        unsigned int block_count[2];
-       unsigned base_mem, cache_mem;
+       unsigned int inplace_count;
+       unsigned long long base_mem, cache_mem, page_mem;
 };
 
-#define stat_inc_call_count(si)        ((si)->call_count++)
+static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+{
+       return (struct f2fs_stat_info *)sbi->stat_info;
+}
 
-#define stat_inc_seg_count(sbi, type)                                  \
+#define stat_inc_cp_count(si)          ((si)->cp_count++)
+#define stat_inc_bg_cp_count(si)       ((si)->bg_cp_count++)
+#define stat_inc_call_count(si)                ((si)->call_count++)
+#define stat_inc_bggc_count(sbi)       ((sbi)->bg_gc++)
+#define stat_inc_dirty_inode(sbi, type)        ((sbi)->ndirty_inode[type]++)
+#define stat_dec_dirty_inode(sbi, type)        ((sbi)->ndirty_inode[type]--)
+#define stat_inc_total_hit(sbi)                (atomic64_inc(&(sbi)->total_hit_ext))
+#define stat_inc_rbtree_node_hit(sbi)  (atomic64_inc(&(sbi)->read_hit_rbtree))
+#define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
+#define stat_inc_cached_node_hit(sbi)  (atomic64_inc(&(sbi)->read_hit_cached))
+#define stat_inc_inline_xattr(inode)                                   \
+       do {                                                            \
+               if (f2fs_has_inline_xattr(inode))                       \
+                       (atomic_inc(&F2FS_I_SB(inode)->inline_xattr));  \
+       } while (0)
+#define stat_dec_inline_xattr(inode)                                   \
+       do {                                                            \
+               if (f2fs_has_inline_xattr(inode))                       \
+                       (atomic_dec(&F2FS_I_SB(inode)->inline_xattr));  \
+       } while (0)
+#define stat_inc_inline_inode(inode)                                   \
+       do {                                                            \
+               if (f2fs_has_inline_data(inode))                        \
+                       (atomic_inc(&F2FS_I_SB(inode)->inline_inode));  \
+       } while (0)
+#define stat_dec_inline_inode(inode)                                   \
+       do {                                                            \
+               if (f2fs_has_inline_data(inode))                        \
+                       (atomic_dec(&F2FS_I_SB(inode)->inline_inode));  \
+       } while (0)
+#define stat_inc_inline_dir(inode)                                     \
+       do {                                                            \
+               if (f2fs_has_inline_dentry(inode))                      \
+                       (atomic_inc(&F2FS_I_SB(inode)->inline_dir));    \
+       } while (0)
+#define stat_dec_inline_dir(inode)                                     \
+       do {                                                            \
+               if (f2fs_has_inline_dentry(inode))                      \
+                       (atomic_dec(&F2FS_I_SB(inode)->inline_dir));    \
+       } while (0)
+#define stat_inc_seg_type(sbi, curseg)                                 \
+               ((sbi)->segment_count[(curseg)->alloc_type]++)
+#define stat_inc_block_count(sbi, curseg)                              \
+               ((sbi)->block_count[(curseg)->alloc_type]++)
+#define stat_inc_inplace_blocks(sbi)                                   \
+               (atomic_inc(&(sbi)->inplace_count))
+#define stat_inc_seg_count(sbi, type, gc_type)                         \
        do {                                                            \
-               struct f2fs_stat_info *si = sbi->stat_info;             \
+               struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
                (si)->tot_segs++;                                       \
-               if (type == SUM_TYPE_DATA)                              \
+               if (type == SUM_TYPE_DATA) {                            \
                        si->data_segs++;                                \
-               else                                                    \
+                       si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
+               } else {                                                \
                        si->node_segs++;                                \
+                       si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
+               }                                                       \
        } while (0)
 
 #define stat_inc_tot_blk_count(si, blks)                               \
        (si->tot_blks += (blks))
 
-#define stat_inc_data_blk_count(sbi, blks)                             \
+#define stat_inc_data_blk_count(sbi, blks, gc_type)                    \
        do {                                                            \
-               struct f2fs_stat_info *si = sbi->stat_info;             \
+               struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
                stat_inc_tot_blk_count(si, blks);                       \
                si->data_blks += (blks);                                \
+               si->bg_data_blks += (gc_type == BG_GC) ? (blks) : 0;    \
        } while (0)
 
-#define stat_inc_node_blk_count(sbi, blks)                             \
+#define stat_inc_node_blk_count(sbi, blks, gc_type)                    \
        do {                                                            \
-               struct f2fs_stat_info *si = sbi->stat_info;             \
+               struct f2fs_stat_info *si = F2FS_STAT(sbi);             \
                stat_inc_tot_blk_count(si, blks);                       \
                si->node_blks += (blks);                                \
+               si->bg_node_blks += (gc_type == BG_GC) ? (blks) : 0;    \
        } while (0)
 
 int f2fs_build_stats(struct f2fs_sb_info *);
 void f2fs_destroy_stats(struct f2fs_sb_info *);
-void __init f2fs_create_root_stats(void);
+int __init f2fs_create_root_stats(void);
 void f2fs_destroy_root_stats(void);
 #else
+#define stat_inc_cp_count(si)
+#define stat_inc_bg_cp_count(si)
 #define stat_inc_call_count(si)
-#define stat_inc_seg_count(si, type)
+#define stat_inc_bggc_count(si)
+#define stat_inc_dirty_inode(sbi, type)
+#define stat_dec_dirty_inode(sbi, type)
+#define stat_inc_total_hit(sb)
+#define stat_inc_rbtree_node_hit(sb)
+#define stat_inc_largest_node_hit(sbi)
+#define stat_inc_cached_node_hit(sbi)
+#define stat_inc_inline_xattr(inode)
+#define stat_dec_inline_xattr(inode)
+#define stat_inc_inline_inode(inode)
+#define stat_dec_inline_inode(inode)
+#define stat_inc_inline_dir(inode)
+#define stat_dec_inline_dir(inode)
+#define stat_inc_seg_type(sbi, curseg)
+#define stat_inc_block_count(sbi, curseg)
+#define stat_inc_inplace_blocks(sbi)
+#define stat_inc_seg_count(sbi, type, gc_type)
 #define stat_inc_tot_blk_count(si, blks)
-#define stat_inc_data_blk_count(si, blks)
-#define stat_inc_node_blk_count(sbi, blks)
+#define stat_inc_data_blk_count(sbi, blks, gc_type)
+#define stat_inc_node_blk_count(sbi, blks, gc_type)
 
 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline void __init f2fs_create_root_stats(void) { }
+static inline int __init f2fs_create_root_stats(void) { return 0; }
 static inline void f2fs_destroy_root_stats(void) { }
 #endif
 
@@ -1132,5 +2451,158 @@ extern const struct address_space_operations f2fs_node_aops;
 extern const struct address_space_operations f2fs_meta_aops;
 extern const struct inode_operations f2fs_dir_inode_operations;
 extern const struct inode_operations f2fs_symlink_inode_operations;
+extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
 extern const struct inode_operations f2fs_special_inode_operations;
+extern struct kmem_cache *inode_entry_slab;
+
+/*
+ * inline.c
+ */
+bool f2fs_may_inline_data(struct inode *);
+bool f2fs_may_inline_dentry(struct inode *);
+void read_inline_data(struct page *, struct page *);
+bool truncate_inline_inode(struct page *, u64);
+int f2fs_read_inline_data(struct inode *, struct page *);
+int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
+int f2fs_convert_inline_inode(struct inode *);
+int f2fs_write_inline_data(struct inode *, struct page *);
+bool recover_inline_data(struct inode *, struct page *);
+struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
+                               struct fscrypt_name *, struct page **);
+int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
+int f2fs_add_inline_entry(struct inode *, const struct qstr *,
+               const struct qstr *, struct inode *, nid_t, umode_t);
+void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
+                                               struct inode *, struct inode *);
+bool f2fs_empty_inline_dir(struct inode *);
+int f2fs_read_inline_dir(struct file *, void *, filldir_t,
+                                               struct fscrypt_str *);
+int f2fs_inline_data_fiemap(struct inode *,
+               struct fiemap_extent_info *, __u64, __u64);
+
+/*
+ * shrinker.c
+ */
+int f2fs_shrink_count(struct shrinker *, struct shrink_control *);
+int f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
+void f2fs_join_shrinker(struct f2fs_sb_info *);
+void f2fs_leave_shrinker(struct f2fs_sb_info *);
+
+/*
+ * extent_cache.c
+ */
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
+bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+void f2fs_drop_extent_tree(struct inode *);
+unsigned int f2fs_destroy_extent_node(struct inode *);
+void f2fs_destroy_extent_tree(struct inode *);
+bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
+void f2fs_update_extent_cache(struct dnode_of_data *);
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+                                               pgoff_t, block_t, unsigned int);
+void init_extent_cache_info(struct f2fs_sb_info *);
+int __init create_extent_cache(void);
+void destroy_extent_cache(void);
+
+/*
+ * crypto support
+ */
+static inline bool f2fs_encrypted_inode(struct inode *inode)
+{
+       return file_is_encrypt(inode);
+}
+
+static inline void f2fs_set_encrypted_inode(struct inode *inode)
+{
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+       file_set_encrypt(inode);
+#endif
+}
+
+static inline bool f2fs_bio_encrypted(struct bio *bio)
+{
+       return bio->bi_private != NULL;
+}
+
+static inline int f2fs_sb_has_crypto(struct super_block *sb)
+{
+       return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
+}
+
+static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
+{
+       return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+                       struct block_device *bdev, block_t blkaddr)
+{
+       unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
+       int i;
+
+       for (i = 0; i < sbi->s_ndevs; i++)
+               if (FDEV(i).bdev == bdev)
+                       return FDEV(i).blkz_type[zno];
+       return -EINVAL;
+}
+#endif
+
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
+{
+       struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
+
+       return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
+}
+
+static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+{
+       clear_opt(sbi, ADAPTIVE);
+       clear_opt(sbi, LFS);
+
+       switch (mt) {
+       case F2FS_MOUNT_ADAPTIVE:
+               set_opt(sbi, ADAPTIVE);
+               break;
+       case F2FS_MOUNT_LFS:
+               set_opt(sbi, LFS);
+               break;
+       }
+}
+
+static inline bool f2fs_may_encrypt(struct inode *inode)
+{
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+       mode_t mode = inode->i_mode;
+
+       return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
+#else
+       return 0;
+#endif
+}
+
+#ifndef CONFIG_F2FS_FS_ENCRYPTION
+#define fscrypt_set_d_op(i)
+#define fscrypt_get_ctx                        fscrypt_notsupp_get_ctx
+#define fscrypt_release_ctx            fscrypt_notsupp_release_ctx
+#define fscrypt_encrypt_page           fscrypt_notsupp_encrypt_page
+#define fscrypt_decrypt_page           fscrypt_notsupp_decrypt_page
+#define fscrypt_decrypt_bio_pages      fscrypt_notsupp_decrypt_bio_pages
+#define fscrypt_pullback_bio_page      fscrypt_notsupp_pullback_bio_page
+#define fscrypt_restore_control_page   fscrypt_notsupp_restore_control_page
+#define fscrypt_zeroout_range          fscrypt_notsupp_zeroout_range
+#define fscrypt_process_policy         fscrypt_notsupp_process_policy
+#define fscrypt_get_policy             fscrypt_notsupp_get_policy
+#define fscrypt_has_permitted_context  fscrypt_notsupp_has_permitted_context
+#define fscrypt_inherit_context                fscrypt_notsupp_inherit_context
+#define fscrypt_get_encryption_info    fscrypt_notsupp_get_encryption_info
+#define fscrypt_put_encryption_info    fscrypt_notsupp_put_encryption_info
+#define fscrypt_setup_filename         fscrypt_notsupp_setup_filename
+#define fscrypt_free_filename          fscrypt_notsupp_free_filename
+#define fscrypt_fname_encrypted_size   fscrypt_notsupp_fname_encrypted_size
+#define fscrypt_fname_alloc_buffer     fscrypt_notsupp_fname_alloc_buffer
+#define fscrypt_fname_free_buffer      fscrypt_notsupp_fname_free_buffer
+#define fscrypt_fname_disk_to_usr      fscrypt_notsupp_fname_disk_to_usr
+#define fscrypt_fname_usr_to_disk      fscrypt_notsupp_fname_usr_to_disk
+#endif
 #endif
index 1cae864f8dfcd03c676a8e02b1b2261a94af4f21..a8710a3a889ed4906c538ca9111e717388631a56 100644 (file)
 #include <linux/compat.h>
 #include <linux/uaccess.h>
 #include <linux/mount.h>
+#include <linux/pagevec.h>
+#include <linux/random.h>
+#include <linux/aio.h>
+#include <linux/uuid.h>
+#include <linux/file.h>
 
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
 #include "xattr.h"
 #include "acl.h"
+#include "gc.h"
+#include "trace.h"
 #include <trace/events/f2fs.h>
 
 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
@@ -32,41 +39,32 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
 {
        struct page *page = vmf->page;
        struct inode *inode = file_inode(vma->vm_file);
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       block_t old_blk_addr;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct dnode_of_data dn;
-       int err, ilock;
-
-       f2fs_balance_fs(sbi);
+       int err;
 
        sb_start_pagefault(inode->i_sb);
 
+       f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
+
        /* block allocation */
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
-       err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
+       err = f2fs_reserve_block(&dn, page->index);
        if (err) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                goto out;
        }
-
-       old_blk_addr = dn.data_blkaddr;
-
-       if (old_blk_addr == NULL_ADDR) {
-               err = reserve_new_block(&dn);
-               if (err) {
-                       f2fs_put_dnode(&dn);
-                       mutex_unlock_op(sbi, ilock);
-                       goto out;
-               }
-       }
        f2fs_put_dnode(&dn);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
+       f2fs_balance_fs(sbi, dn.node_changed);
+
+       file_update_time(vma->vm_file);
        lock_page(page);
-       if (page->mapping != inode->i_mapping ||
-                       page_offset(page) >= i_size_read(inode) ||
-                       !PageUptodate(page)) {
+       if (unlikely(page->mapping != inode->i_mapping ||
+                       page_offset(page) > i_size_read(inode) ||
+                       !PageUptodate(page))) {
                unlock_page(page);
                err = -EFAULT;
                goto out;
@@ -76,36 +74,116 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
         * check to see if the page is mapped already (no holes)
         */
        if (PageMappedToDisk(page))
-               goto out;
-
-       /* fill the page */
-       wait_on_page_writeback(page);
+               goto mapped;
 
        /* page is wholly or partially inside EOF */
-       if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
+       if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
+                                               i_size_read(inode)) {
                unsigned offset;
-               offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
-               zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+               offset = i_size_read(inode) & ~PAGE_MASK;
+               zero_user_segment(page, offset, PAGE_SIZE);
        }
        set_page_dirty(page);
-       SetPageUptodate(page);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+
+       trace_f2fs_vm_page_mkwrite(page, DATA);
+mapped:
+       /* fill the page */
+       f2fs_wait_on_page_writeback(page, DATA, false);
+
+       /* wait for GCed encrypted page writeback */
+       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+               f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
 
-       file_update_time(vma->vm_file);
 out:
        sb_end_pagefault(inode->i_sb);
+       f2fs_update_time(sbi, REQ_TIME);
        return block_page_mkwrite_return(err);
 }
 
 static const struct vm_operations_struct f2fs_file_vm_ops = {
        .fault          = filemap_fault,
        .page_mkwrite   = f2fs_vm_page_mkwrite,
-       .remap_pages    = generic_file_remap_pages,
 };
 
-int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+static int get_parent_ino(struct inode *inode, nid_t *pino)
+{
+       struct dentry *dentry;
+
+       inode = igrab(inode);
+       dentry = d_find_any_alias(inode);
+       iput(inode);
+       if (!dentry)
+               return 0;
+
+       if (update_dent_inode(inode, inode, &dentry->d_name)) {
+               dput(dentry);
+               return 0;
+       }
+
+       *pino = parent_ino(dentry);
+       dput(dentry);
+       return 1;
+}
+
+static inline bool need_do_checkpoint(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       bool need_cp = false;
+
+       if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
+               need_cp = true;
+       else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
+               need_cp = true;
+       else if (file_wrong_pino(inode))
+               need_cp = true;
+       else if (!space_for_roll_forward(sbi))
+               need_cp = true;
+       else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
+               need_cp = true;
+       else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
+               need_cp = true;
+       else if (test_opt(sbi, FASTBOOT))
+               need_cp = true;
+       else if (sbi->active_logs == 2)
+               need_cp = true;
+
+       return need_cp;
+}
+
+static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
+       bool ret = false;
+       /* But we need to avoid that there are some inode updates */
+       if ((i && PageDirty(i)) || need_inode_block_update(sbi, ino))
+               ret = true;
+       f2fs_put_page(i, 0);
+       return ret;
+}
+
+static void try_to_fix_pino(struct inode *inode)
+{
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       nid_t pino;
+
+       down_write(&fi->i_sem);
+       fi->xattr_ver = 0;
+       if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
+                       get_parent_ino(inode, &pino)) {
+               f2fs_i_pino_write(inode, pino);
+               file_got_pino(inode);
+       }
+       up_write(&fi->i_sem);
+}
+
+static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
+                                               int datasync, bool atomic)
 {
        struct inode *inode = file->f_mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       nid_t ino = inode->i_ino;
        int ret = 0;
        bool need_cp = false;
        struct writeback_control wbc = {
@@ -114,86 +192,333 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
                .for_reclaim = 0,
        };
 
-       if (inode->i_sb->s_flags & MS_RDONLY)
+       if (unlikely(f2fs_readonly(inode->i_sb)))
                return 0;
 
        trace_f2fs_sync_file_enter(inode);
+
+       /* if fdatasync is triggered, let's do in-place-update */
+       if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
+               set_inode_flag(inode, FI_NEED_IPU);
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       clear_inode_flag(inode, FI_NEED_IPU);
+
        if (ret) {
                trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
                return ret;
        }
 
-       /* guarantee free sections for fsync */
-       f2fs_balance_fs(sbi);
+       /* if the inode is dirty, let's recover all the time */
+       if (!f2fs_skip_inode_update(inode, datasync)) {
+               f2fs_write_inode(inode, NULL);
+               goto go_write;
+       }
 
-       mutex_lock(&inode->i_mutex);
+       /*
+        * if there is no written data, don't waste time to write recovery info.
+        */
+       if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
+                       !exist_written_data(sbi, ino, APPEND_INO)) {
 
-       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
-               goto out;
+               /* it may call write_inode just prior to fsync */
+               if (need_inode_page_update(sbi, ino))
+                       goto go_write;
 
-       if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
-               need_cp = true;
-       else if (is_cp_file(inode))
-               need_cp = true;
-       else if (!space_for_roll_forward(sbi))
-               need_cp = true;
-       else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
-               need_cp = true;
+               if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
+                               exist_written_data(sbi, ino, UPDATE_INO))
+                       goto flush_out;
+               goto out;
+       }
+go_write:
+       /*
+        * Both of fdatasync() and fsync() are able to be recovered from
+        * sudden-power-off.
+        */
+       down_read(&F2FS_I(inode)->i_sem);
+       need_cp = need_do_checkpoint(inode);
+       up_read(&F2FS_I(inode)->i_sem);
 
        if (need_cp) {
                /* all the dirty node pages should be flushed for POR */
                ret = f2fs_sync_fs(inode->i_sb, 1);
-       } else {
-               /* if there is no written node page, write its inode page */
-               while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
-                       ret = f2fs_write_inode(inode, NULL);
-                       if (ret)
-                               goto out;
-               }
-               filemap_fdatawait_range(sbi->node_inode->i_mapping,
-                                                       0, LONG_MAX);
-               ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+
+               /*
+                * We've secured consistency through sync_fs. Following pino
+                * will be used only for fsynced inodes after checkpoint.
+                */
+               try_to_fix_pino(inode);
+               clear_inode_flag(inode, FI_APPEND_WRITE);
+               clear_inode_flag(inode, FI_UPDATE_WRITE);
+               goto out;
+       }
+sync_nodes:
+       ret = fsync_node_pages(sbi, inode, &wbc, atomic);
+       if (ret)
+               goto out;
+
+       /* if cp_error was enabled, we should avoid infinite loop */
+       if (unlikely(f2fs_cp_error(sbi))) {
+               ret = -EIO;
+               goto out;
+       }
+
+       if (need_inode_block_update(sbi, ino)) {
+               f2fs_mark_inode_dirty_sync(inode, true);
+               f2fs_write_inode(inode, NULL);
+               goto sync_nodes;
        }
+
+       ret = wait_on_node_pages_writeback(sbi, ino);
+       if (ret)
+               goto out;
+
+       /* once recovery info is written, don't need to tack this */
+       remove_ino_entry(sbi, ino, APPEND_INO);
+       clear_inode_flag(inode, FI_APPEND_WRITE);
+flush_out:
+       remove_ino_entry(sbi, ino, UPDATE_INO);
+       clear_inode_flag(inode, FI_UPDATE_WRITE);
+       ret = f2fs_issue_flush(sbi);
+       f2fs_update_time(sbi, REQ_TIME);
 out:
-       mutex_unlock(&inode->i_mutex);
        trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
+       f2fs_trace_ios(NULL, 1);
        return ret;
 }
 
+int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+{
+       return f2fs_do_sync_file(file, start, end, datasync, false);
+}
+
+static pgoff_t __get_first_dirty_index(struct address_space *mapping,
+                                               pgoff_t pgofs, int whence)
+{
+       struct pagevec pvec;
+       int nr_pages;
+
+       if (whence != SEEK_DATA)
+               return 0;
+
+       /* find first dirty page index */
+       pagevec_init(&pvec, 0);
+       nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
+                                       PAGECACHE_TAG_DIRTY, 1);
+       pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
+       pagevec_release(&pvec);
+       return pgofs;
+}
+
+static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
+                                                       int whence)
+{
+       switch (whence) {
+       case SEEK_DATA:
+               if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
+                       (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
+                       return true;
+               break;
+       case SEEK_HOLE:
+               if (blkaddr == NULL_ADDR)
+                       return true;
+               break;
+       }
+       return false;
+}
+
+static inline int unsigned_offsets(struct file *file)
+{
+       return file->f_mode & FMODE_UNSIGNED_OFFSET;
+}
+
+static loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize)
+{
+       if (offset < 0 && !unsigned_offsets(file))
+               return -EINVAL;
+       if (offset > maxsize)
+               return -EINVAL;
+
+       if (offset != file->f_pos) {
+               file->f_pos = offset;
+               file->f_version = 0;
+       }
+       return offset;
+}
+
+static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
+{
+       struct inode *inode = file->f_mapping->host;
+       loff_t maxbytes = inode->i_sb->s_maxbytes;
+       struct dnode_of_data dn;
+       pgoff_t pgofs, end_offset, dirty;
+       loff_t data_ofs = offset;
+       loff_t isize;
+       int err = 0;
+
+       inode_lock(inode);
+
+       isize = i_size_read(inode);
+       if (offset >= isize)
+               goto fail;
+
+       /* handle inline data case */
+       if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
+               if (whence == SEEK_HOLE)
+                       data_ofs = isize;
+               goto found;
+       }
+
+       pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
+
+       dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
+
+       for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
+               set_new_dnode(&dn, inode, NULL, NULL, 0);
+               err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
+               if (err && err != -ENOENT) {
+                       goto fail;
+               } else if (err == -ENOENT) {
+                       /* direct node does not exists */
+                       if (whence == SEEK_DATA) {
+                               pgofs = get_next_page_offset(&dn, pgofs);
+                               continue;
+                       } else {
+                               goto found;
+                       }
+               }
+
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+               /* find data/hole in dnode block */
+               for (; dn.ofs_in_node < end_offset;
+                               dn.ofs_in_node++, pgofs++,
+                               data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
+                       block_t blkaddr;
+                       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+
+                       if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+                               f2fs_put_dnode(&dn);
+                               goto found;
+                       }
+               }
+               f2fs_put_dnode(&dn);
+       }
+
+       if (whence == SEEK_DATA)
+               goto fail;
+found:
+       if (whence == SEEK_HOLE && data_ofs > isize)
+               data_ofs = isize;
+       inode_unlock(inode);
+       return vfs_setpos(file, data_ofs, maxbytes);
+fail:
+       inode_unlock(inode);
+       return -ENXIO;
+}
+
+static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
+{
+       struct inode *inode = file->f_mapping->host;
+       loff_t maxbytes = inode->i_sb->s_maxbytes;
+
+       switch (whence) {
+       case SEEK_SET:
+       case SEEK_CUR:
+       case SEEK_END:
+               return generic_file_llseek_size(file, offset, whence,
+                                               maxbytes, i_size_read(inode));
+       case SEEK_DATA:
+       case SEEK_HOLE:
+               if (offset < 0)
+                       return -ENXIO;
+               return f2fs_seek_block(file, offset, whence);
+       }
+
+       return -EINVAL;
+}
+
 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
+       struct inode *inode = file_inode(file);
+       int err;
+
+       if (f2fs_encrypted_inode(inode)) {
+               err = fscrypt_get_encryption_info(inode);
+               if (err)
+                       return 0;
+               if (!f2fs_encrypted_inode(inode))
+                       return -ENOKEY;
+       }
+
+       /* we don't need to use inline_data strictly */
+       err = f2fs_convert_inline_inode(inode);
+       if (err)
+               return err;
+
        file_accessed(file);
        vma->vm_ops = &f2fs_file_vm_ops;
        return 0;
 }
 
-static int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+static int f2fs_file_open(struct inode *inode, struct file *filp)
+{
+       int ret = generic_file_open(inode, filp);
+       struct dentry *dir;
+
+       if (!ret && f2fs_encrypted_inode(inode)) {
+               ret = fscrypt_get_encryption_info(inode);
+               if (ret)
+                       return -EACCES;
+               if (!fscrypt_has_encryption_key(inode))
+                       return -ENOKEY;
+       }
+       dir = dget_parent(file_dentry(filp));
+       if (f2fs_encrypted_inode(d_inode(dir)) &&
+                       !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+               dput(dir);
+               return -EPERM;
+       }
+       dput(dir);
+       return ret;
+}
+
+int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
-       int nr_free = 0, ofs = dn->ofs_in_node;
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        struct f2fs_node *raw_node;
+       int nr_free = 0, ofs = dn->ofs_in_node, len = count;
        __le32 *addr;
 
-       raw_node = page_address(dn->node_page);
+       raw_node = F2FS_NODE(dn->node_page);
        addr = blkaddr_in_node(raw_node) + ofs;
 
-       for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
+       for (; count > 0; count--, addr++, dn->ofs_in_node++) {
                block_t blkaddr = le32_to_cpu(*addr);
                if (blkaddr == NULL_ADDR)
                        continue;
 
-               update_extent_cache(NULL_ADDR, dn);
+               dn->data_blkaddr = NULL_ADDR;
+               set_data_blkaddr(dn);
                invalidate_blocks(sbi, blkaddr);
-               dec_valid_block_count(sbi, dn->inode, 1);
+               if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
+                       clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
                nr_free++;
        }
+
        if (nr_free) {
-               set_page_dirty(dn->node_page);
-               sync_inode_page(dn);
+               pgoff_t fofs;
+               /*
+                * once we invalidate valid blkaddr in range [ofs, ofs + count],
+                * we will invalidate all blkaddr in the whole range.
+                */
+               fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
+                                                       dn->inode) + ofs;
+               f2fs_update_extent_cache_range(dn, fofs, 0, len);
+               dec_valid_block_count(sbi, dn->inode, nr_free);
        }
        dn->ofs_in_node = ofs;
 
+       f2fs_update_time(sbi, REQ_TIME);
        trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
                                         dn->ofs_in_node, nr_free);
        return nr_free;
@@ -204,61 +529,84 @@ void truncate_data_blocks(struct dnode_of_data *dn)
        truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
 }
 
-static void truncate_partial_data_page(struct inode *inode, u64 from)
+static int truncate_partial_data_page(struct inode *inode, u64 from,
+                                                               bool cache_only)
 {
-       unsigned offset = from & (PAGE_CACHE_SIZE - 1);
+       unsigned offset = from & (PAGE_SIZE - 1);
+       pgoff_t index = from >> PAGE_SHIFT;
+       struct address_space *mapping = inode->i_mapping;
        struct page *page;
 
-       if (!offset)
-               return;
-
-       page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
-       if (IS_ERR(page))
-               return;
+       if (!offset && !cache_only)
+               return 0;
 
-       lock_page(page);
-       if (page->mapping != inode->i_mapping) {
+       if (cache_only) {
+               page = find_lock_page(mapping, index);
+               if (page && PageUptodate(page))
+                       goto truncate_out;
                f2fs_put_page(page, 1);
-               return;
+               return 0;
        }
-       wait_on_page_writeback(page);
-       zero_user(page, offset, PAGE_CACHE_SIZE - offset);
-       set_page_dirty(page);
+
+       page = get_lock_data_page(inode, index, true);
+       if (IS_ERR(page))
+               return 0;
+truncate_out:
+       f2fs_wait_on_page_writeback(page, DATA, true);
+       zero_user(page, offset, PAGE_SIZE - offset);
+       if (!cache_only || !f2fs_encrypted_inode(inode) ||
+                                       !S_ISREG(inode->i_mode))
+               set_page_dirty(page);
        f2fs_put_page(page, 1);
+       return 0;
 }
 
-static int truncate_blocks(struct inode *inode, u64 from)
+int truncate_blocks(struct inode *inode, u64 from, bool lock)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        unsigned int blocksize = inode->i_sb->s_blocksize;
        struct dnode_of_data dn;
        pgoff_t free_from;
-       int count = 0, ilock = -1;
-       int err;
+       int count = 0, err = 0;
+       struct page *ipage;
+       bool truncate_page = false;
 
        trace_f2fs_truncate_blocks_enter(inode, from);
 
-       free_from = (pgoff_t)
-                       ((from + blocksize - 1) >> (sbi->log_blocksize));
+       free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
 
-       ilock = mutex_lock_op(sbi);
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
-       err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
+       if (free_from >= sbi->max_file_blocks)
+               goto free_partial;
+
+       if (lock)
+               f2fs_lock_op(sbi);
+
+       ipage = get_node_page(sbi, inode->i_ino);
+       if (IS_ERR(ipage)) {
+               err = PTR_ERR(ipage);
+               goto out;
+       }
+
+       if (f2fs_has_inline_data(inode)) {
+               if (truncate_inline_inode(ipage, from))
+                       set_page_dirty(ipage);
+               f2fs_put_page(ipage, 1);
+               truncate_page = true;
+               goto out;
+       }
+
+       set_new_dnode(&dn, inode, ipage, NULL, 0);
+       err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
        if (err) {
                if (err == -ENOENT)
                        goto free_next;
-               mutex_unlock_op(sbi, ilock);
-               trace_f2fs_truncate_blocks_exit(inode, err);
-               return err;
+               goto out;
        }
 
-       if (IS_INODE(dn.node_page))
-               count = ADDRS_PER_INODE;
-       else
-               count = ADDRS_PER_BLOCK;
+       count = ADDRS_PER_PAGE(dn.node_page, inode);
 
        count -= dn.ofs_in_node;
-       BUG_ON(count < 0);
+       f2fs_bug_on(sbi, count < 0);
 
        if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
                truncate_data_blocks_range(&dn, count);
@@ -268,33 +616,48 @@ static int truncate_blocks(struct inode *inode, u64 from)
        f2fs_put_dnode(&dn);
 free_next:
        err = truncate_inode_blocks(inode, free_from);
-       mutex_unlock_op(sbi, ilock);
-
+out:
+       if (lock)
+               f2fs_unlock_op(sbi);
+free_partial:
        /* lastly zero out the first data page */
-       truncate_partial_data_page(inode, from);
+       if (!err)
+               err = truncate_partial_data_page(inode, from, truncate_page);
 
        trace_f2fs_truncate_blocks_exit(inode, err);
        return err;
 }
 
-void f2fs_truncate(struct inode *inode)
+int f2fs_truncate(struct inode *inode)
 {
+       int err;
+
        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
                                S_ISLNK(inode->i_mode)))
-               return;
+               return 0;
 
        trace_f2fs_truncate(inode);
 
-       if (!truncate_blocks(inode, i_size_read(inode))) {
-               inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-               mark_inode_dirty(inode);
+       /* we should check inline_data size */
+       if (!f2fs_may_inline_data(inode)) {
+               err = f2fs_convert_inline_inode(inode);
+               if (err)
+                       return err;
        }
+
+       err = truncate_blocks(inode, i_size_read(inode), true);
+       if (err)
+               return err;
+
+       inode->i_mtime = inode->i_ctime = current_time(inode);
+       f2fs_mark_inode_dirty_sync(inode, false);
+       return 0;
 }
 
-static int f2fs_getattr(struct vfsmount *mnt,
+int f2fs_getattr(struct vfsmount *mnt,
                         struct dentry *dentry, struct kstat *stat)
 {
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = d_inode(dentry);
        generic_fillattr(inode, stat);
        stat->blocks <<= 3;
        return 0;
@@ -303,7 +666,6 @@ static int f2fs_getattr(struct vfsmount *mnt,
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
 {
-       struct f2fs_inode_info *fi = F2FS_I(inode);
        unsigned int ia_valid = attr->ia_valid;
 
        if (ia_valid & ATTR_UID)
@@ -324,7 +686,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
 
                if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
                        mode &= ~S_ISGID;
-               set_acl_inode(fi, mode);
+               set_acl_inode(inode, mode);
        }
 }
 #else
@@ -333,32 +695,59 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
 
 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
 {
-       struct inode *inode = dentry->d_inode;
-       struct f2fs_inode_info *fi = F2FS_I(inode);
+       struct inode *inode = d_inode(dentry);
        int err;
+       bool size_changed = false;
 
        err = inode_change_ok(inode, attr);
        if (err)
                return err;
 
-       if ((attr->ia_valid & ATTR_SIZE) &&
-                       attr->ia_size != i_size_read(inode)) {
-               truncate_setsize(inode, attr->ia_size);
-               f2fs_truncate(inode);
-               f2fs_balance_fs(F2FS_SB(inode->i_sb));
+       if (attr->ia_valid & ATTR_SIZE) {
+               if (f2fs_encrypted_inode(inode) &&
+                               fscrypt_get_encryption_info(inode))
+                       return -EACCES;
+
+               if (attr->ia_size <= i_size_read(inode)) {
+                       truncate_setsize(inode, attr->ia_size);
+                       err = f2fs_truncate(inode);
+                       if (err)
+                               return err;
+               } else {
+                       /*
+                        * do not trim all blocks after i_size if target size is
+                        * larger than i_size.
+                        */
+                       truncate_setsize(inode, attr->ia_size);
+
+                       /* should convert inline inode here */
+                       if (!f2fs_may_inline_data(inode)) {
+                               err = f2fs_convert_inline_inode(inode);
+                               if (err)
+                                       return err;
+                       }
+                       inode->i_mtime = inode->i_ctime = current_time(inode);
+               }
+
+               size_changed = true;
        }
 
        __setattr_copy(inode, attr);
 
        if (attr->ia_valid & ATTR_MODE) {
                err = f2fs_acl_chmod(inode);
-               if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
-                       inode->i_mode = fi->i_acl_mode;
-                       clear_inode_flag(fi, FI_ACL_MODE);
+               if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
+                       inode->i_mode = F2FS_I(inode)->i_acl_mode;
+                       clear_inode_flag(inode, FI_ACL_MODE);
                }
        }
 
-       mark_inode_dirty(inode);
+       /* file size may changed here */
+       f2fs_mark_inode_dirty_sync(inode, size_changed);
+
+       /* inode change will produce dirty node pages flushed by checkpoint */
+       f2fs_balance_fs(F2FS_I_SB(inode), true);
+
        return err;
 }
 
@@ -372,188 +761,713 @@ const struct inode_operations f2fs_file_inode_operations = {
        .listxattr      = f2fs_listxattr,
        .removexattr    = generic_removexattr,
 #endif
+       .fiemap         = f2fs_fiemap,
 };
 
-static void fill_zero(struct inode *inode, pgoff_t index,
+static int fill_zero(struct inode *inode, pgoff_t index,
                                        loff_t start, loff_t len)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct page *page;
-       int ilock;
 
        if (!len)
-               return;
+               return 0;
 
-       f2fs_balance_fs(sbi);
+       f2fs_balance_fs(sbi, true);
 
-       ilock = mutex_lock_op(sbi);
-       page = get_new_data_page(inode, index, false);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_lock_op(sbi);
+       page = get_new_data_page(inode, NULL, index, false);
+       f2fs_unlock_op(sbi);
 
-       if (!IS_ERR(page)) {
-               wait_on_page_writeback(page);
-               zero_user(page, start, len);
-               set_page_dirty(page);
-               f2fs_put_page(page, 1);
-       }
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
+       f2fs_wait_on_page_writeback(page, DATA, true);
+       zero_user(page, start, len);
+       set_page_dirty(page);
+       f2fs_put_page(page, 1);
+       return 0;
 }
 
 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
 {
-       pgoff_t index;
        int err;
 
-       for (index = pg_start; index < pg_end; index++) {
+       while (pg_start < pg_end) {
                struct dnode_of_data dn;
+               pgoff_t end_offset, count;
 
                set_new_dnode(&dn, inode, NULL, NULL, 0);
-               err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+               err = get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
                if (err) {
-                       if (err == -ENOENT)
+                       if (err == -ENOENT) {
+                               pg_start++;
                                continue;
+                       }
                        return err;
                }
 
-               if (dn.data_blkaddr != NULL_ADDR)
-                       truncate_data_blocks_range(&dn, 1);
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+               count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
+
+               f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
+
+               truncate_data_blocks_range(&dn, count);
                f2fs_put_dnode(&dn);
+
+               pg_start += count;
        }
        return 0;
 }
 
-static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
+static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
        pgoff_t pg_start, pg_end;
        loff_t off_start, off_end;
-       int ret = 0;
+       int ret;
 
-       pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-       pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               return ret;
 
-       off_start = offset & (PAGE_CACHE_SIZE - 1);
-       off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+       pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+       pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+
+       off_start = offset & (PAGE_SIZE - 1);
+       off_end = (offset + len) & (PAGE_SIZE - 1);
 
        if (pg_start == pg_end) {
-               fill_zero(inode, pg_start, off_start,
+               ret = fill_zero(inode, pg_start, off_start,
                                                off_end - off_start);
+               if (ret)
+                       return ret;
        } else {
-               if (off_start)
-                       fill_zero(inode, pg_start++, off_start,
-                                       PAGE_CACHE_SIZE - off_start);
-               if (off_end)
-                       fill_zero(inode, pg_end, 0, off_end);
+               if (off_start) {
+                       ret = fill_zero(inode, pg_start++, off_start,
+                                               PAGE_SIZE - off_start);
+                       if (ret)
+                               return ret;
+               }
+               if (off_end) {
+                       ret = fill_zero(inode, pg_end, 0, off_end);
+                       if (ret)
+                               return ret;
+               }
 
                if (pg_start < pg_end) {
                        struct address_space *mapping = inode->i_mapping;
                        loff_t blk_start, blk_end;
-                       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-                       int ilock;
+                       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
-                       f2fs_balance_fs(sbi);
+                       f2fs_balance_fs(sbi, true);
 
-                       blk_start = pg_start << PAGE_CACHE_SHIFT;
-                       blk_end = pg_end << PAGE_CACHE_SHIFT;
+                       blk_start = (loff_t)pg_start << PAGE_SHIFT;
+                       blk_end = (loff_t)pg_end << PAGE_SHIFT;
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
 
-                       ilock = mutex_lock_op(sbi);
+                       f2fs_lock_op(sbi);
                        ret = truncate_hole(inode, pg_start, pg_end);
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                }
        }
 
-       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-               i_size_read(inode) <= (offset + len)) {
-               i_size_write(inode, offset);
-               mark_inode_dirty(inode);
-       }
-
        return ret;
 }
 
-static int expand_inode_data(struct inode *inode, loff_t offset,
-                                       loff_t len, int mode)
+static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
+                               int *do_replace, pgoff_t off, pgoff_t len)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       pgoff_t index, pg_start, pg_end;
-       loff_t new_size = i_size_read(inode);
-       loff_t off_start, off_end;
-       int ret = 0;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct dnode_of_data dn;
+       int ret, done, i;
 
-       ret = inode_newsize_ok(inode, (len + offset));
-       if (ret)
+next_dnode:
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
+       if (ret && ret != -ENOENT) {
                return ret;
+       } else if (ret == -ENOENT) {
+               if (dn.max_level == 0)
+                       return -ENOENT;
+               done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+               blkaddr += done;
+               do_replace += done;
+               goto next;
+       }
+
+       done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+                                                       dn.ofs_in_node, len);
+       for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
+               *blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+               if (!is_checkpointed_data(sbi, *blkaddr)) {
+
+                       if (test_opt(sbi, LFS)) {
+                               f2fs_put_dnode(&dn);
+                               return -ENOTSUPP;
+                       }
 
-       pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
-       pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+                       /* do not invalidate this block address */
+                       f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+                       *do_replace = 1;
+               }
+       }
+       f2fs_put_dnode(&dn);
+next:
+       len -= done;
+       off += done;
+       if (len)
+               goto next_dnode;
+       return 0;
+}
 
-       off_start = offset & (PAGE_CACHE_SIZE - 1);
-       off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
+                               int *do_replace, pgoff_t off, int len)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct dnode_of_data dn;
+       int ret, i;
 
-       for (index = pg_start; index <= pg_end; index++) {
-               struct dnode_of_data dn;
-               int ilock;
+       for (i = 0; i < len; i++, do_replace++, blkaddr++) {
+               if (*do_replace == 0)
+                       continue;
 
-               ilock = mutex_lock_op(sbi);
                set_new_dnode(&dn, inode, NULL, NULL, 0);
-               ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
+               ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
                if (ret) {
-                       mutex_unlock_op(sbi, ilock);
-                       break;
+                       dec_valid_block_count(sbi, inode, 1);
+                       invalidate_blocks(sbi, *blkaddr);
+               } else {
+                       f2fs_update_data_blkaddr(&dn, *blkaddr);
                }
+               f2fs_put_dnode(&dn);
+       }
+       return 0;
+}
 
-               if (dn.data_blkaddr == NULL_ADDR) {
-                       ret = reserve_new_block(&dn);
-                       if (ret) {
-                               f2fs_put_dnode(&dn);
-                               mutex_unlock_op(sbi, ilock);
-                               break;
+static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+                       block_t *blkaddr, int *do_replace,
+                       pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
+       pgoff_t i = 0;
+       int ret;
+
+       while (i < len) {
+               if (blkaddr[i] == NULL_ADDR && !full) {
+                       i++;
+                       continue;
+               }
+
+               if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
+                       struct dnode_of_data dn;
+                       struct node_info ni;
+                       size_t new_size;
+                       pgoff_t ilen;
+
+                       set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
+                       ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+                       if (ret)
+                               return ret;
+
+                       get_node_info(sbi, dn.nid, &ni);
+                       ilen = min((pgoff_t)
+                               ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+                                               dn.ofs_in_node, len - i);
+                       do {
+                               dn.data_blkaddr = datablock_addr(dn.node_page,
+                                                               dn.ofs_in_node);
+                               truncate_data_blocks_range(&dn, 1);
+
+                               if (do_replace[i]) {
+                                       f2fs_i_blocks_write(src_inode,
+                                                               1, false);
+                                       f2fs_i_blocks_write(dst_inode,
+                                                               1, true);
+                                       f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+                                       blkaddr[i], ni.version, true, false);
+
+                                       do_replace[i] = 0;
+                               }
+                               dn.ofs_in_node++;
+                               i++;
+                               new_size = (dst + i) << PAGE_SHIFT;
+                               if (dst_inode->i_size < new_size)
+                                       f2fs_i_size_write(dst_inode, new_size);
+                       } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
+
+                       f2fs_put_dnode(&dn);
+               } else {
+                       struct page *psrc, *pdst;
+
+                       psrc = get_lock_data_page(src_inode, src + i, true);
+                       if (IS_ERR(psrc))
+                               return PTR_ERR(psrc);
+                       pdst = get_new_data_page(dst_inode, NULL, dst + i,
+                                                               true);
+                       if (IS_ERR(pdst)) {
+                               f2fs_put_page(psrc, 1);
+                               return PTR_ERR(pdst);
                        }
+                       f2fs_copy_page(psrc, pdst);
+                       set_page_dirty(pdst);
+                       f2fs_put_page(pdst, 1);
+                       f2fs_put_page(psrc, 1);
+
+                       ret = truncate_hole(src_inode, src + i, src + i + 1);
+                       if (ret)
+                               return ret;
+                       i++;
                }
-               f2fs_put_dnode(&dn);
-               mutex_unlock_op(sbi, ilock);
+       }
+       return 0;
+}
+
+static int __exchange_data_block(struct inode *src_inode,
+                       struct inode *dst_inode, pgoff_t src, pgoff_t dst,
+                       pgoff_t len, bool full)
+{
+       block_t *src_blkaddr;
+       int *do_replace;
+       pgoff_t olen;
+       int ret;
+
+       while (len) {
+               olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+
+               src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+               if (!src_blkaddr)
+                       return -ENOMEM;
+
+               do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+               if (!do_replace) {
+                       f2fs_kvfree(src_blkaddr);
+                       return -ENOMEM;
+               }
+
+               ret = __read_out_blkaddrs(src_inode, src_blkaddr,
+                                       do_replace, src, olen);
+               if (ret)
+                       goto roll_back;
+
+               ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
+                                       do_replace, src, dst, olen, full);
+               if (ret)
+                       goto roll_back;
+
+               src += olen;
+               dst += olen;
+               len -= olen;
+
+               f2fs_kvfree(src_blkaddr);
+               f2fs_kvfree(do_replace);
+       }
+       return 0;
+
+roll_back:
+       __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+       f2fs_kvfree(src_blkaddr);
+       f2fs_kvfree(do_replace);
+       return ret;
+}
+
+static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+       int ret;
+
+       f2fs_balance_fs(sbi, true);
+       f2fs_lock_op(sbi);
+
+       f2fs_drop_extent_tree(inode);
+
+       ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
+       f2fs_unlock_op(sbi);
+       return ret;
+}
+
+static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+{
+       pgoff_t pg_start, pg_end;
+       loff_t new_size;
+       int ret;
+
+       if (offset + len >= i_size_read(inode))
+               return -EINVAL;
+
+       /* collapse range should be aligned to block size of f2fs. */
+       if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
+               return -EINVAL;
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               return ret;
+
+       pg_start = offset >> PAGE_SHIFT;
+       pg_end = (offset + len) >> PAGE_SHIFT;
+
+       /* write out all dirty pages from offset */
+       ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       if (ret)
+               return ret;
+
+       truncate_pagecache(inode, 0, offset);
+
+       ret = f2fs_do_collapse(inode, pg_start, pg_end);
+       if (ret)
+               return ret;
+
+       /* write out all moved pages, if possible */
+       filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       truncate_pagecache(inode, 0, offset);
+
+       new_size = i_size_read(inode) - len;
+       truncate_pagecache(inode, 0, new_size);
+
+       ret = truncate_blocks(inode, new_size, true);
+       if (!ret)
+               f2fs_i_size_write(inode, new_size);
 
-               if (pg_start == pg_end)
+       return ret;
+}
+
+static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+                                                               pgoff_t end)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+       pgoff_t index = start;
+       unsigned int ofs_in_node = dn->ofs_in_node;
+       blkcnt_t count = 0;
+       int ret;
+
+       for (; index < end; index++, dn->ofs_in_node++) {
+               if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
+                       count++;
+       }
+
+       dn->ofs_in_node = ofs_in_node;
+       ret = reserve_new_blocks(dn, count);
+       if (ret)
+               return ret;
+
+       dn->ofs_in_node = ofs_in_node;
+       for (index = start; index < end; index++, dn->ofs_in_node++) {
+               dn->data_blkaddr =
+                               datablock_addr(dn->node_page, dn->ofs_in_node);
+               /*
+                * reserve_new_blocks will not guarantee entire block
+                * allocation.
+                */
+               if (dn->data_blkaddr == NULL_ADDR) {
+                       ret = -ENOSPC;
+                       break;
+               }
+               if (dn->data_blkaddr != NEW_ADDR) {
+                       invalidate_blocks(sbi, dn->data_blkaddr);
+                       dn->data_blkaddr = NEW_ADDR;
+                       set_data_blkaddr(dn);
+               }
+       }
+
+       f2fs_update_extent_cache_range(dn, start, 0, index - start);
+
+       return ret;
+}
+
+static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+                                                               int mode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t index, pg_start, pg_end;
+       loff_t new_size = i_size_read(inode);
+       loff_t off_start, off_end;
+       int ret = 0;
+
+       ret = inode_newsize_ok(inode, (len + offset));
+       if (ret)
+               return ret;
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               return ret;
+
+       ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
+       if (ret)
+               return ret;
+
+       truncate_pagecache_range(inode, offset, offset + len - 1);
+
+       pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+       pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+
+       off_start = offset & (PAGE_SIZE - 1);
+       off_end = (offset + len) & (PAGE_SIZE - 1);
+
+       if (pg_start == pg_end) {
+               ret = fill_zero(inode, pg_start, off_start,
+                                               off_end - off_start);
+               if (ret)
+                       return ret;
+
+               if (offset + len > new_size)
                        new_size = offset + len;
-               else if (index == pg_start && off_start)
-                       new_size = (index + 1) << PAGE_CACHE_SHIFT;
-               else if (index == pg_end)
-                       new_size = (index << PAGE_CACHE_SHIFT) + off_end;
-               else
-                       new_size += PAGE_CACHE_SIZE;
+               new_size = max_t(loff_t, new_size, offset + len);
+       } else {
+               if (off_start) {
+                       ret = fill_zero(inode, pg_start++, off_start,
+                                               PAGE_SIZE - off_start);
+                       if (ret)
+                               return ret;
+
+                       new_size = max_t(loff_t, new_size,
+                                       (loff_t)pg_start << PAGE_SHIFT);
+               }
+
+               for (index = pg_start; index < pg_end;) {
+                       struct dnode_of_data dn;
+                       unsigned int end_offset;
+                       pgoff_t end;
+
+                       f2fs_lock_op(sbi);
+
+                       set_new_dnode(&dn, inode, NULL, NULL, 0);
+                       ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
+                       if (ret) {
+                               f2fs_unlock_op(sbi);
+                               goto out;
+                       }
+
+                       end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+                       end = min(pg_end, end_offset - dn.ofs_in_node + index);
+
+                       ret = f2fs_do_zero_range(&dn, index, end);
+                       f2fs_put_dnode(&dn);
+                       f2fs_unlock_op(sbi);
+
+                       f2fs_balance_fs(sbi, dn.node_changed);
+
+                       if (ret)
+                               goto out;
+
+                       index = end;
+                       new_size = max_t(loff_t, new_size,
+                                       (loff_t)index << PAGE_SHIFT);
+               }
+
+               if (off_end) {
+                       ret = fill_zero(inode, pg_end, 0, off_end);
+                       if (ret)
+                               goto out;
+
+                       new_size = max_t(loff_t, new_size, offset + len);
+               }
        }
 
-       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-               i_size_read(inode) < new_size) {
-               i_size_write(inode, new_size);
-               mark_inode_dirty(inode);
+out:
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+               f2fs_i_size_write(inode, new_size);
+
+       return ret;
+}
+
+static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       pgoff_t nr, pg_start, pg_end, delta, idx;
+       loff_t new_size;
+       int ret = 0;
+
+       new_size = i_size_read(inode) + len;
+       if (new_size > inode->i_sb->s_maxbytes)
+               return -EFBIG;
+
+       if (offset >= i_size_read(inode))
+               return -EINVAL;
+
+       /* insert range should be aligned to block size of f2fs. */
+       if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
+               return -EINVAL;
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               return ret;
+
+       f2fs_balance_fs(sbi, true);
+
+       ret = truncate_blocks(inode, i_size_read(inode), true);
+       if (ret)
+               return ret;
+
+       /* write out all dirty pages from offset */
+       ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       if (ret)
+               return ret;
+
+       truncate_pagecache(inode, 0, offset);
+
+       pg_start = offset >> PAGE_SHIFT;
+       pg_end = (offset + len) >> PAGE_SHIFT;
+       delta = pg_end - pg_start;
+       idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+       while (!ret && idx > pg_start) {
+               nr = idx - pg_start;
+               if (nr > delta)
+                       nr = delta;
+               idx -= nr;
+
+               f2fs_lock_op(sbi);
+               f2fs_drop_extent_tree(inode);
+
+               ret = __exchange_data_block(inode, inode, idx,
+                                       idx + delta, nr, false);
+               f2fs_unlock_op(sbi);
        }
 
+       /* write out all moved pages, if possible */
+       filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
+       truncate_pagecache(inode, 0, offset);
+
+       if (!ret)
+               f2fs_i_size_write(inode, new_size);
        return ret;
 }
 
+static int expand_inode_data(struct inode *inode, loff_t offset,
+                                       loff_t len, int mode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+       pgoff_t pg_end;
+       loff_t new_size = i_size_read(inode);
+       loff_t off_end;
+       int err;
+
+       err = inode_newsize_ok(inode, (len + offset));
+       if (err)
+               return err;
+
+       err = f2fs_convert_inline_inode(inode);
+       if (err)
+               return err;
+
+       f2fs_balance_fs(sbi, true);
+
+       pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
+       off_end = (offset + len) & (PAGE_SIZE - 1);
+
+       map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
+       map.m_len = pg_end - map.m_lblk;
+       if (off_end)
+               map.m_len++;
+
+       err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+       if (err) {
+               pgoff_t last_off;
+
+               if (!map.m_len)
+                       return err;
+
+               last_off = map.m_lblk + map.m_len - 1;
+
+               /* update new size to the failed position */
+               new_size = (last_off == pg_end) ? offset + len:
+                                       (loff_t)(last_off + 1) << PAGE_SHIFT;
+       } else {
+               new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
+       }
+
+       if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+               f2fs_i_size_write(inode, new_size);
+
+       return err;
+}
+
+#ifndef FALLOC_FL_COLLAPSE_RANGE
+#define FALLOC_FL_COLLAPSE_RANGE       0X08
+#endif
+#ifndef FALLOC_FL_ZERO_RANGE
+#define FALLOC_FL_ZERO_RANGE           0X10
+#endif
+#ifndef FALLOC_FL_INSERT_RANGE
+#define FALLOC_FL_INSERT_RANGE         0X20
+#endif
+
 static long f2fs_fallocate(struct file *file, int mode,
                                loff_t offset, loff_t len)
 {
        struct inode *inode = file_inode(file);
-       long ret;
+       long ret = 0;
+
+       /* f2fs only support ->fallocate for regular file */
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
 
-       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+       if (f2fs_encrypted_inode(inode) &&
+               (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
                return -EOPNOTSUPP;
 
-       if (mode & FALLOC_FL_PUNCH_HOLE)
-               ret = punch_hole(inode, offset, len, mode);
-       else
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+                       FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
+                       FALLOC_FL_INSERT_RANGE))
+               return -EOPNOTSUPP;
+
+       inode_lock(inode);
+
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               if (offset >= inode->i_size)
+                       goto out;
+
+               ret = punch_hole(inode, offset, len);
+       } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
+               ret = f2fs_collapse_range(inode, offset, len);
+       } else if (mode & FALLOC_FL_ZERO_RANGE) {
+               ret = f2fs_zero_range(inode, offset, len, mode);
+       } else if (mode & FALLOC_FL_INSERT_RANGE) {
+               ret = f2fs_insert_range(inode, offset, len);
+       } else {
                ret = expand_inode_data(inode, offset, len, mode);
+       }
 
        if (!ret) {
-               inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-               mark_inode_dirty(inode);
+               inode->i_mtime = inode->i_ctime = current_time(inode);
+               f2fs_mark_inode_dirty_sync(inode, false);
+               if (mode & FALLOC_FL_KEEP_SIZE)
+                       file_set_keep_isize(inode);
+               f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
        }
+
+out:
+       inode_unlock(inode);
+
        trace_f2fs_fallocate(inode, mode, offset, len, ret);
        return ret;
 }
 
+static int f2fs_release_file(struct inode *inode, struct file *filp)
+{
+       /*
+        * f2fs_relase_file is called at every close calls. So we should
+        * not drop any inmemory pages by close called by other process.
+        */
+       if (!(filp->f_mode & FMODE_WRITE) ||
+                       atomic_read(&inode->i_writecount) != 1)
+               return 0;
+
+       /* some remained atomic pages should discarded */
+       if (f2fs_is_atomic_file(inode))
+               drop_inmem_pages(inode);
+       if (f2fs_is_volatile_file(inode)) {
+               clear_inode_flag(inode, FI_VOLATILE_FILE);
+               set_inode_flag(inode, FI_DROP_CACHE);
+               filemap_fdatawrite(inode->i_mapping);
+               clear_inode_flag(inode, FI_DROP_CACHE);
+       }
+       return 0;
+}
+
 #define F2FS_REG_FLMASK                (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
 #define F2FS_OTHER_FLMASK      (FS_NODUMP_FL | FS_NOATIME_FL)
 
@@ -567,90 +1481,907 @@ static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
                return flags & F2FS_OTHER_FLMASK;
 }
 
-long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+       return put_user(flags, (int __user *)arg);
+}
+
+static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        unsigned int flags;
+       unsigned int oldflags;
        int ret;
 
-       switch (cmd) {
-       case FS_IOC_GETFLAGS:
-               flags = fi->i_flags & FS_FL_USER_VISIBLE;
-               return put_user(flags, (int __user *) arg);
-       case FS_IOC_SETFLAGS:
-       {
-               unsigned int oldflags;
-
-               ret = mnt_want_write_file(filp);
-               if (ret)
-                       return ret;
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
 
-               if (!inode_owner_or_capable(inode)) {
-                       ret = -EACCES;
-                       goto out;
-               }
+       if (get_user(flags, (int __user *)arg))
+               return -EFAULT;
 
-               if (get_user(flags, (int __user *) arg)) {
-                       ret = -EFAULT;
-                       goto out;
-               }
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
 
-               flags = f2fs_mask_flags(inode->i_mode, flags);
+       flags = f2fs_mask_flags(inode->i_mode, flags);
 
-               mutex_lock(&inode->i_mutex);
+       inode_lock(inode);
 
-               oldflags = fi->i_flags;
+       oldflags = fi->i_flags;
 
-               if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
-                       if (!capable(CAP_LINUX_IMMUTABLE)) {
-                               mutex_unlock(&inode->i_mutex);
-                               ret = -EPERM;
-                               goto out;
-                       }
+       if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
+               if (!capable(CAP_LINUX_IMMUTABLE)) {
+                       inode_unlock(inode);
+                       ret = -EPERM;
+                       goto out;
                }
+       }
 
-               flags = flags & FS_FL_USER_MODIFIABLE;
-               flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
-               fi->i_flags = flags;
-               mutex_unlock(&inode->i_mutex);
+       flags = flags & FS_FL_USER_MODIFIABLE;
+       flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
+       fi->i_flags = flags;
+       inode_unlock(inode);
 
-               f2fs_set_inode_flags(inode);
-               inode->i_ctime = CURRENT_TIME;
-               mark_inode_dirty(inode);
+       inode->i_ctime = current_time(inode);
+       f2fs_set_inode_flags(inode);
 out:
-               mnt_drop_write_file(filp);
-               return ret;
-       }
-       default:
-               return -ENOTTY;
-       }
+       mnt_drop_write_file(filp);
+       return ret;
 }
 
-#ifdef CONFIG_COMPAT
-long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
 {
-       switch (cmd) {
-       case F2FS_IOC32_GETFLAGS:
-               cmd = F2FS_IOC_GETFLAGS;
-               break;
-       case F2FS_IOC32_SETFLAGS:
-               cmd = F2FS_IOC_SETFLAGS;
-               break;
-       default:
-               return -ENOIOCTLCMD;
-       }
-       return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+       struct inode *inode = file_inode(filp);
+
+       return put_user(inode->i_generation, (int __user *)arg);
+}
+
+static int f2fs_ioc_start_atomic_write(struct file *filp)
+{
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       inode_lock(inode);
+
+       if (f2fs_is_atomic_file(inode))
+               goto out;
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               goto out;
+
+       set_inode_flag(inode, FI_ATOMIC_FILE);
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+       if (!get_dirty_pages(inode))
+               goto out;
+
+       f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+               "Unexpected flush for atomic writes: ino=%lu, npages=%lld",
+                                       inode->i_ino, get_dirty_pages(inode));
+       ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+       if (ret)
+               clear_inode_flag(inode, FI_ATOMIC_FILE);
+out:
+       inode_unlock(inode);
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_ioc_commit_atomic_write(struct file *filp)
+{
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       inode_lock(inode);
+
+       if (f2fs_is_volatile_file(inode))
+               goto err_out;
+
+       if (f2fs_is_atomic_file(inode)) {
+               clear_inode_flag(inode, FI_ATOMIC_FILE);
+               ret = commit_inmem_pages(inode);
+               if (ret) {
+                       set_inode_flag(inode, FI_ATOMIC_FILE);
+                       goto err_out;
+               }
+       }
+
+       ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+err_out:
+       inode_unlock(inode);
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_ioc_start_volatile_write(struct file *filp)
+{
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       inode_lock(inode);
+
+       if (f2fs_is_volatile_file(inode))
+               goto out;
+
+       ret = f2fs_convert_inline_inode(inode);
+       if (ret)
+               goto out;
+
+       set_inode_flag(inode, FI_VOLATILE_FILE);
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+out:
+       inode_unlock(inode);
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_ioc_release_volatile_write(struct file *filp)
+{
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       inode_lock(inode);
+
+       if (!f2fs_is_volatile_file(inode))
+               goto out;
+
+       if (!f2fs_is_first_block_written(inode)) {
+               ret = truncate_partial_data_page(inode, 0, true);
+               goto out;
+       }
+
+       ret = punch_hole(inode, 0, F2FS_BLKSIZE);
+out:
+       inode_unlock(inode);
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_ioc_abort_volatile_write(struct file *filp)
+{
+       struct inode *inode = file_inode(filp);
+       int ret;
+
+       if (!inode_owner_or_capable(inode))
+               return -EACCES;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       inode_lock(inode);
+
+       if (f2fs_is_atomic_file(inode))
+               drop_inmem_pages(inode);
+       if (f2fs_is_volatile_file(inode)) {
+               clear_inode_flag(inode, FI_VOLATILE_FILE);
+               ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+       }
+
+       inode_unlock(inode);
+
+       mnt_drop_write_file(filp);
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+       return ret;
+}
+
+static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct super_block *sb = sbi->sb;
+       __u32 in;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (get_user(in, (__u32 __user *)arg))
+               return -EFAULT;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       switch (in) {
+       case F2FS_GOING_DOWN_FULLSYNC:
+               sb = freeze_bdev(sb->s_bdev);
+               if (sb && !IS_ERR(sb)) {
+                       f2fs_stop_checkpoint(sbi, false);
+                       thaw_bdev(sb->s_bdev, sb);
+               }
+               break;
+       case F2FS_GOING_DOWN_METASYNC:
+               /* do checkpoint only */
+               f2fs_sync_fs(sb, 1);
+               f2fs_stop_checkpoint(sbi, false);
+               break;
+       case F2FS_GOING_DOWN_NOSYNC:
+               f2fs_stop_checkpoint(sbi, false);
+               break;
+       case F2FS_GOING_DOWN_METAFLUSH:
+               sync_meta_pages(sbi, META, LONG_MAX);
+               f2fs_stop_checkpoint(sbi, false);
+               break;
+       default:
+               ret = -EINVAL;
+               goto out;
+       }
+       f2fs_update_time(sbi, REQ_TIME);
+out:
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct super_block *sb = inode->i_sb;
+       struct request_queue *q = bdev_get_queue(sb->s_bdev);
+       struct fstrim_range range;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (!blk_queue_discard(q))
+               return -EOPNOTSUPP;
+
+       if (copy_from_user(&range, (struct fstrim_range __user *)arg,
+                               sizeof(range)))
+               return -EFAULT;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       range.minlen = max((unsigned int)range.minlen,
+                               q->limits.discard_granularity);
+       ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+       mnt_drop_write_file(filp);
+       if (ret < 0)
+               return ret;
+
+       if (copy_to_user((struct fstrim_range __user *)arg, &range,
+                               sizeof(range)))
+               return -EFAULT;
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+       return 0;
+}
+
+static bool uuid_is_nonzero(__u8 u[16])
+{
+       int i;
+
+       for (i = 0; i < 16; i++)
+               if (u[i])
+                       return true;
+       return false;
+}
+
+static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
+{
+       struct fscrypt_policy policy;
+       struct inode *inode = file_inode(filp);
+
+       if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
+                                                       sizeof(policy)))
+               return -EFAULT;
+
+       f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+       return fscrypt_process_policy(filp, &policy);
+}
+
+static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
+{
+       struct fscrypt_policy policy;
+       struct inode *inode = file_inode(filp);
+       int err;
+
+       err = fscrypt_get_policy(inode, &policy);
+       if (err)
+               return err;
+
+       if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
+               return -EFAULT;
+       return 0;
+}
+
+static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int err;
+
+       if (!f2fs_sb_has_crypto(inode->i_sb))
+               return -EOPNOTSUPP;
+
+       if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
+               goto got_it;
+
+       err = mnt_want_write_file(filp);
+       if (err)
+               return err;
+
+       /* update superblock with uuid */
+       generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
+
+       err = f2fs_commit_super(sbi, false);
+       if (err) {
+               /* undo new data */
+               memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
+               mnt_drop_write_file(filp);
+               return err;
+       }
+       mnt_drop_write_file(filp);
+got_it:
+       if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
+                                                                       16))
+               return -EFAULT;
+       return 0;
+}
+
+static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       __u32 sync;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (get_user(sync, (__u32 __user *)arg))
+               return -EFAULT;
+
+       if (f2fs_readonly(sbi->sb))
+               return -EROFS;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       if (!sync) {
+               if (!mutex_trylock(&sbi->gc_mutex)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+       } else {
+               mutex_lock(&sbi->gc_mutex);
+       }
+
+       ret = f2fs_gc(sbi, sync, true);
+out:
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (f2fs_readonly(sbi->sb))
+               return -EROFS;
+
+       ret = mnt_want_write_file(filp);
+       if (ret)
+               return ret;
+
+       ret = f2fs_sync_fs(sbi->sb, 1);
+
+       mnt_drop_write_file(filp);
+       return ret;
+}
+
+static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+                                       struct file *filp,
+                                       struct f2fs_defragment *range)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+       struct extent_info ei;
+       pgoff_t pg_start, pg_end;
+       unsigned int blk_per_seg = sbi->blocks_per_seg;
+       unsigned int total = 0, sec_num;
+       unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
+       block_t blk_end = 0;
+       bool fragmented = false;
+       int err;
+
+       /* if in-place-update policy is enabled, don't waste time here */
+       if (need_inplace_update(inode))
+               return -EINVAL;
+
+       pg_start = range->start >> PAGE_SHIFT;
+       pg_end = (range->start + range->len) >> PAGE_SHIFT;
+
+       f2fs_balance_fs(sbi, true);
+
+       inode_lock(inode);
+
+       /* writeback all dirty pages in the range */
+       err = filemap_write_and_wait_range(inode->i_mapping, range->start,
+                                               range->start + range->len - 1);
+       if (err)
+               goto out;
+
+       /*
+        * lookup mapping info in extent cache, skip defragmenting if physical
+        * block addresses are continuous.
+        */
+       if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
+               if (ei.fofs + ei.len >= pg_end)
+                       goto out;
+       }
+
+       map.m_lblk = pg_start;
+
+       /*
+        * lookup mapping info in dnode page cache, skip defragmenting if all
+        * physical block addresses are continuous even if there are hole(s)
+        * in logical blocks.
+        */
+       while (map.m_lblk < pg_end) {
+               map.m_len = pg_end - map.m_lblk;
+               err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+               if (err)
+                       goto out;
+
+               if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+                       map.m_lblk++;
+                       continue;
+               }
+
+               if (blk_end && blk_end != map.m_pblk) {
+                       fragmented = true;
+                       break;
+               }
+               blk_end = map.m_pblk + map.m_len;
+
+               map.m_lblk += map.m_len;
+       }
+
+       if (!fragmented)
+               goto out;
+
+       map.m_lblk = pg_start;
+       map.m_len = pg_end - pg_start;
+
+       sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
+
+       /*
+        * make sure there are enough free section for LFS allocation, this can
+        * avoid defragment running in SSR mode when free section are allocated
+        * intensively
+        */
+       if (has_not_enough_free_secs(sbi, 0, sec_num)) {
+               err = -EAGAIN;
+               goto out;
+       }
+
+       while (map.m_lblk < pg_end) {
+               pgoff_t idx;
+               int cnt = 0;
+
+do_map:
+               map.m_len = pg_end - map.m_lblk;
+               err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+               if (err)
+                       goto clear_out;
+
+               if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+                       map.m_lblk++;
+                       continue;
+               }
+
+               set_inode_flag(inode, FI_DO_DEFRAG);
+
+               idx = map.m_lblk;
+               while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
+                       struct page *page;
+
+                       page = get_lock_data_page(inode, idx, true);
+                       if (IS_ERR(page)) {
+                               err = PTR_ERR(page);
+                               goto clear_out;
+                       }
+
+                       set_page_dirty(page);
+                       f2fs_put_page(page, 1);
+
+                       idx++;
+                       cnt++;
+                       total++;
+               }
+
+               map.m_lblk = idx;
+
+               if (idx < pg_end && cnt < blk_per_seg)
+                       goto do_map;
+
+               clear_inode_flag(inode, FI_DO_DEFRAG);
+
+               err = filemap_fdatawrite(inode->i_mapping);
+               if (err)
+                       goto out;
+       }
+clear_out:
+       clear_inode_flag(inode, FI_DO_DEFRAG);
+out:
+       inode_unlock(inode);
+       if (!err)
+               range->len = (u64)total << PAGE_SHIFT;
+       return err;
+}
+
+static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
+{
+       struct inode *inode = file_inode(filp);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_defragment range;
+       int err;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
+
+       err = mnt_want_write_file(filp);
+       if (err)
+               return err;
+
+       if (f2fs_readonly(sbi->sb)) {
+               err = -EROFS;
+               goto out;
+       }
+
+       if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
+                                                       sizeof(range))) {
+               err = -EFAULT;
+               goto out;
+       }
+
+       /* verify alignment of offset & size */
+       if (range.start & (F2FS_BLKSIZE - 1) ||
+               range.len & (F2FS_BLKSIZE - 1)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = f2fs_defragment_range(sbi, filp, &range);
+       f2fs_update_time(sbi, REQ_TIME);
+       if (err < 0)
+               goto out;
+
+       if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
+                                                       sizeof(range)))
+               err = -EFAULT;
+out:
+       mnt_drop_write_file(filp);
+       return err;
+}
+
+static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out, size_t len)
+{
+       struct inode *src = file_inode(file_in);
+       struct inode *dst = file_inode(file_out);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+       size_t olen = len, dst_max_i_size = 0;
+       size_t dst_osize;
+       int ret;
+
+       if (file_in->f_path.mnt != file_out->f_path.mnt ||
+                               src->i_sb != dst->i_sb)
+               return -EXDEV;
+
+       if (unlikely(f2fs_readonly(src->i_sb)))
+               return -EROFS;
+
+       if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
+               return -EISDIR;
+
+       if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+               return -EOPNOTSUPP;
+
+       if (src == dst) {
+               if (pos_in == pos_out)
+                       return 0;
+               if (pos_out > pos_in && pos_out < pos_in + len)
+                       return -EINVAL;
+       }
+
+       inode_lock(src);
+       if (src != dst)
+               inode_lock(dst);
+
+       ret = -EINVAL;
+       if (pos_in + len > src->i_size || pos_in + len < pos_in)
+               goto out_unlock;
+       if (len == 0)
+               olen = len = src->i_size - pos_in;
+       if (pos_in + len == src->i_size)
+               len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+       if (len == 0) {
+               ret = 0;
+               goto out_unlock;
+       }
+
+       dst_osize = dst->i_size;
+       if (pos_out + olen > dst->i_size)
+               dst_max_i_size = pos_out + olen;
+
+       /* verify the end result is block aligned */
+       if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+                       !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+                       !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+               goto out_unlock;
+
+       ret = f2fs_convert_inline_inode(src);
+       if (ret)
+               goto out_unlock;
+
+       ret = f2fs_convert_inline_inode(dst);
+       if (ret)
+               goto out_unlock;
+
+       /* write out all dirty pages from offset */
+       ret = filemap_write_and_wait_range(src->i_mapping,
+                                       pos_in, pos_in + len);
+       if (ret)
+               goto out_unlock;
+
+       ret = filemap_write_and_wait_range(dst->i_mapping,
+                                       pos_out, pos_out + len);
+       if (ret)
+               goto out_unlock;
+
+       f2fs_balance_fs(sbi, true);
+       f2fs_lock_op(sbi);
+       ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
+                               pos_out >> F2FS_BLKSIZE_BITS,
+                               len >> F2FS_BLKSIZE_BITS, false);
+
+       if (!ret) {
+               if (dst_max_i_size)
+                       f2fs_i_size_write(dst, dst_max_i_size);
+               else if (dst_osize != dst->i_size)
+                       f2fs_i_size_write(dst, dst_osize);
+       }
+       f2fs_unlock_op(sbi);
+out_unlock:
+       if (src != dst)
+               inode_unlock(dst);
+       inode_unlock(src);
+       return ret;
+}
+
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+{
+       struct f2fs_move_range range;
+       struct fd dst;
+       int err;
+
+       if (!(filp->f_mode & FMODE_READ) ||
+                       !(filp->f_mode & FMODE_WRITE))
+               return -EBADF;
+
+       if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
+                                                       sizeof(range)))
+               return -EFAULT;
+
+       dst = fdget(range.dst_fd);
+       if (!dst.file)
+               return -EBADF;
+
+       if (!(dst.file->f_mode & FMODE_WRITE)) {
+               err = -EBADF;
+               goto err_out;
+       }
+
+       err = mnt_want_write_file(filp);
+       if (err)
+               goto err_out;
+
+       err = f2fs_move_file_range(filp, range.pos_in, dst.file,
+                                       range.pos_out, range.len);
+
+       mnt_drop_write_file(filp);
+
+       if (copy_to_user((struct f2fs_move_range __user *)arg,
+                                               &range, sizeof(range)))
+               err = -EFAULT;
+err_out:
+       fdput(dst);
+       return err;
+}
+
+long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case F2FS_IOC_GETFLAGS:
+               return f2fs_ioc_getflags(filp, arg);
+       case F2FS_IOC_SETFLAGS:
+               return f2fs_ioc_setflags(filp, arg);
+       case F2FS_IOC_GETVERSION:
+               return f2fs_ioc_getversion(filp, arg);
+       case F2FS_IOC_START_ATOMIC_WRITE:
+               return f2fs_ioc_start_atomic_write(filp);
+       case F2FS_IOC_COMMIT_ATOMIC_WRITE:
+               return f2fs_ioc_commit_atomic_write(filp);
+       case F2FS_IOC_START_VOLATILE_WRITE:
+               return f2fs_ioc_start_volatile_write(filp);
+       case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+               return f2fs_ioc_release_volatile_write(filp);
+       case F2FS_IOC_ABORT_VOLATILE_WRITE:
+               return f2fs_ioc_abort_volatile_write(filp);
+       case F2FS_IOC_SHUTDOWN:
+               return f2fs_ioc_shutdown(filp, arg);
+       case FITRIM:
+               return f2fs_ioc_fitrim(filp, arg);
+       case F2FS_IOC_SET_ENCRYPTION_POLICY:
+               return f2fs_ioc_set_encryption_policy(filp, arg);
+       case F2FS_IOC_GET_ENCRYPTION_POLICY:
+               return f2fs_ioc_get_encryption_policy(filp, arg);
+       case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+               return f2fs_ioc_get_encryption_pwsalt(filp, arg);
+       case F2FS_IOC_GARBAGE_COLLECT:
+               return f2fs_ioc_gc(filp, arg);
+       case F2FS_IOC_WRITE_CHECKPOINT:
+               return f2fs_ioc_write_checkpoint(filp, arg);
+       case F2FS_IOC_DEFRAGMENT:
+               return f2fs_ioc_defragment(filp, arg);
+       case F2FS_IOC_MOVE_RANGE:
+               return f2fs_ioc_move_range(filp, arg);
+       default:
+               return -ENOTTY;
+       }
+}
+
+static ssize_t f2fs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       size_t count;
+       struct blk_plug plug;
+       ssize_t ret;
+
+       if (f2fs_encrypted_inode(inode) &&
+                               !fscrypt_has_encryption_key(inode) &&
+                               fscrypt_get_encryption_info(inode))
+               return -EACCES;
+
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+       if (ret)
+               return ret;
+
+       inode_lock(inode);
+       ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+       if (!ret) {
+               int err = f2fs_preallocate_blocks(inode, pos, count,
+                               iocb->ki_filp->f_flags & O_DIRECT);
+               if (err) {
+                       inode_unlock(inode);
+                       return err;
+               }
+               blk_start_plug(&plug);
+               ret = __generic_file_aio_write(iocb, iov, nr_segs,
+                                                       &iocb->ki_pos);
+               blk_finish_plug(&plug);
+       }
+       inode_unlock(inode);
+
+       if (ret > 0 || ret == -EIOCBQUEUED) {
+               ssize_t err;
+
+               err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+               if (err < 0 && ret > 0)
+                       ret = err;
+       }
+       return ret;
+}
+
+#ifdef CONFIG_COMPAT
+long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       switch (cmd) {
+       case F2FS_IOC32_GETFLAGS:
+               cmd = F2FS_IOC_GETFLAGS;
+               break;
+       case F2FS_IOC32_SETFLAGS:
+               cmd = F2FS_IOC_SETFLAGS;
+               break;
+       case F2FS_IOC32_GETVERSION:
+               cmd = F2FS_IOC_GETVERSION;
+               break;
+       case F2FS_IOC_START_ATOMIC_WRITE:
+       case F2FS_IOC_COMMIT_ATOMIC_WRITE:
+       case F2FS_IOC_START_VOLATILE_WRITE:
+       case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+       case F2FS_IOC_ABORT_VOLATILE_WRITE:
+       case F2FS_IOC_SHUTDOWN:
+       case F2FS_IOC_SET_ENCRYPTION_POLICY:
+       case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+       case F2FS_IOC_GET_ENCRYPTION_POLICY:
+       case F2FS_IOC_GARBAGE_COLLECT:
+       case F2FS_IOC_WRITE_CHECKPOINT:
+       case F2FS_IOC_DEFRAGMENT:
+               break;
+       case F2FS_IOC_MOVE_RANGE:
+               break;
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static ssize_t f2fs_file_splice_write(struct pipe_inode_info *pipe,
+                       struct file *out,
+                         loff_t *ppos, size_t len, unsigned int flags)
+{
+       struct address_space *mapping = out->f_mapping;
+       struct inode *inode = mapping->host;
+       int ret;
+
+       ret = generic_write_checks(out, ppos, &len, S_ISBLK(inode->i_mode));
+       if (ret)
+               return ret;
+       ret = f2fs_preallocate_blocks(inode, *ppos, len, false);
+       if (ret)
+               return ret;
+       return generic_file_splice_write(pipe, out, ppos, len, flags);
 }
-#endif
 
 const struct file_operations f2fs_file_operations = {
-       .llseek         = generic_file_llseek,
+       .llseek         = f2fs_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
        .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
-       .open           = generic_file_open,
+       .aio_write      = f2fs_file_aio_write,
+       .open           = f2fs_file_open,
+       .release        = f2fs_release_file,
        .mmap           = f2fs_file_mmap,
        .fsync          = f2fs_sync_file,
        .fallocate      = f2fs_fallocate,
@@ -659,5 +2390,5 @@ const struct file_operations f2fs_file_operations = {
        .compat_ioctl   = f2fs_compat_ioctl,
 #endif
        .splice_read    = generic_file_splice_read,
-       .splice_write   = generic_file_splice_write,
+       .splice_write   = f2fs_file_splice_write,
 };
index 14961593e93c84f47860e0a33df001f59a8d9508..7e7b9836cdd6b383b76ccf214a398f50e4b214e4 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/freezer.h>
-#include <linux/blkdev.h>
 
 #include "f2fs.h"
 #include "node.h"
 #include "gc.h"
 #include <trace/events/f2fs.h>
 
-static struct kmem_cache *winode_slab;
-
 static int gc_thread_func(void *data)
 {
        struct f2fs_sb_info *sbi = data;
+       struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
        wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
        long wait_ms;
 
-       wait_ms = GC_THREAD_MIN_SLEEP_TIME;
+       wait_ms = gc_th->min_sleep_time;
 
        do {
                if (try_to_freeze())
@@ -45,10 +43,15 @@ static int gc_thread_func(void *data)
                        break;
 
                if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
-                       wait_ms = GC_THREAD_MAX_SLEEP_TIME;
+                       increase_sleep_time(gc_th, &wait_ms);
                        continue;
                }
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+               if (time_to_inject(sbi, FAULT_CHECKPOINT))
+                       f2fs_stop_checkpoint(sbi, false);
+#endif
+
                /*
                 * [GC triggering condition]
                 * 0. GC is not conducted currently.
@@ -57,7 +60,7 @@ static int gc_thread_func(void *data)
                 * 3. IO subsystem is idle by checking the # of requests in
                 *    bdev's request list.
                 *
-                * Note) We have to avoid triggering GCs too much frequently.
+                * Note) We have to avoid triggering GCs frequently.
                 * Because it is possible that some segments can be
                 * invalidated soon after by user update or deletion.
                 * So, I'd like to wait some time to collect dirty segments.
@@ -66,21 +69,28 @@ static int gc_thread_func(void *data)
                        continue;
 
                if (!is_idle(sbi)) {
-                       wait_ms = increase_sleep_time(wait_ms);
+                       increase_sleep_time(gc_th, &wait_ms);
                        mutex_unlock(&sbi->gc_mutex);
                        continue;
                }
 
                if (has_enough_invalid_blocks(sbi))
-                       wait_ms = decrease_sleep_time(wait_ms);
+                       decrease_sleep_time(gc_th, &wait_ms);
                else
-                       wait_ms = increase_sleep_time(wait_ms);
+                       increase_sleep_time(gc_th, &wait_ms);
 
-               sbi->bg_gc++;
+               stat_inc_bggc_count(sbi);
 
                /* if return value is not zero, no victim was selected */
-               if (f2fs_gc(sbi))
-                       wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
+               if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
+                       wait_ms = gc_th->no_gc_sleep_time;
+
+               trace_f2fs_background_gc(sbi->sb, wait_ms,
+                               prefree_segments(sbi), free_segments(sbi));
+
+               /* balancing f2fs's metadata periodically */
+               f2fs_balance_fs_bg(sbi);
+
        } while (!kthread_should_stop());
        return 0;
 }
@@ -89,23 +99,31 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
 {
        struct f2fs_gc_kthread *gc_th;
        dev_t dev = sbi->sb->s_bdev->bd_dev;
+       int err = 0;
 
-       if (!test_opt(sbi, BG_GC))
-               return 0;
-       gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
-       if (!gc_th)
-               return -ENOMEM;
+       gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
+       if (!gc_th) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+       gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+       gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+
+       gc_th->gc_idle = 0;
 
        sbi->gc_thread = gc_th;
        init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
        sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
                        "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
        if (IS_ERR(gc_th->f2fs_gc_task)) {
+               err = PTR_ERR(gc_th->f2fs_gc_task);
                kfree(gc_th);
                sbi->gc_thread = NULL;
-               return -ENOMEM;
        }
-       return 0;
+out:
+       return err;
 }
 
 void stop_gc_thread(struct f2fs_sb_info *sbi)
@@ -118,9 +136,17 @@ void stop_gc_thread(struct f2fs_sb_info *sbi)
        sbi->gc_thread = NULL;
 }
 
-static int select_gc_type(int gc_type)
+static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
 {
-       return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+       int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+
+       if (gc_th && gc_th->gc_idle) {
+               if (gc_th->gc_idle == 1)
+                       gc_mode = GC_CB;
+               else if (gc_th->gc_idle == 2)
+                       gc_mode = GC_GREEDY;
+       }
+       return gc_mode;
 }
 
 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
@@ -131,12 +157,18 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
        if (p->alloc_mode == SSR) {
                p->gc_mode = GC_GREEDY;
                p->dirty_segmap = dirty_i->dirty_segmap[type];
+               p->max_search = dirty_i->nr_dirty[type];
                p->ofs_unit = 1;
        } else {
-               p->gc_mode = select_gc_type(gc_type);
+               p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
                p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
+               p->max_search = dirty_i->nr_dirty[DIRTY];
                p->ofs_unit = sbi->segs_per_sec;
        }
+
+       if (p->max_search > sbi->max_victim_search)
+               p->max_search = sbi->max_victim_search;
+
        p->offset = sbi->last_victim[p->gc_mode];
 }
 
@@ -145,9 +177,9 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 {
        /* SSR allocates in a segment unit */
        if (p->alloc_mode == SSR)
-               return 1 << sbi->log_blocks_per_seg;
+               return sbi->blocks_per_seg;
        if (p->gc_mode == GC_GREEDY)
-               return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
+               return sbi->blocks_per_seg * p->ofs_unit;
        else if (p->gc_mode == GC_CB)
                return UINT_MAX;
        else /* No other gc_mode */
@@ -157,7 +189,6 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned int hint = 0;
        unsigned int secno;
 
        /*
@@ -165,11 +196,9 @@ static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
         * selected by background GC before.
         * Those segments guarantee they have small valid blocks.
         */
-next:
-       secno = find_next_bit(dirty_i->victim_secmap, TOTAL_SECS(sbi), hint++);
-       if (secno < TOTAL_SECS(sbi)) {
+       for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
                if (sec_usage_check(sbi, secno))
-                       goto next;
+                       continue;
                clear_bit(secno, dirty_i->victim_secmap);
                return secno * sbi->segs_per_sec;
        }
@@ -196,7 +225,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
 
        u = (vblocks * 100) >> sbi->log_blocks_per_seg;
 
-       /* Handle if the system time is changed by user */
+       /* Handle if the system time has changed by the user */
        if (mtime < sit_i->min_mtime)
                sit_i->min_mtime = mtime;
        if (mtime > sit_i->max_mtime)
@@ -208,8 +237,8 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
        return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 }
 
-static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
-                                       struct victim_sel_policy *p)
+static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
+                       unsigned int segno, struct victim_sel_policy *p)
 {
        if (p->alloc_mode == SSR)
                return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
@@ -221,6 +250,18 @@ static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
                return get_cb_cost(sbi, segno);
 }
 
+static unsigned int count_bits(const unsigned long *addr,
+                               unsigned int offset, unsigned int len)
+{
+       unsigned int end = offset + len, sum = 0;
+
+       while (offset < end) {
+               if (test_bit(offset++, addr))
+                       ++sum;
+       }
+       return sum;
+}
+
 /*
  * This function is called from two paths.
  * One is garbage collection and the other is SSR segment selection.
@@ -234,8 +275,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        struct victim_sel_policy p;
-       unsigned int secno;
-       int nsearched = 0;
+       unsigned int secno, last_victim;
+       unsigned int last_segment = MAIN_SEGS(sbi);
+       unsigned int nsearched = 0;
+
+       mutex_lock(&dirty_i->seglist_lock);
 
        p.alloc_mode = alloc_mode;
        select_policy(sbi, gc_type, type, &p);
@@ -243,8 +287,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        p.min_segno = NULL_SEGNO;
        p.min_cost = get_max_cost(sbi, &p);
 
-       mutex_lock(&dirty_i->seglist_lock);
+       if (p.max_search == 0)
+               goto out;
 
+       last_victim = sbi->last_victim[p.gc_mode];
        if (p.alloc_mode == LFS && gc_type == FG_GC) {
                p.min_segno = check_bg_victims(sbi);
                if (p.min_segno != NULL_SEGNO)
@@ -255,23 +301,34 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                unsigned long cost;
                unsigned int segno;
 
-               segno = find_next_bit(p.dirty_segmap,
-                                               TOTAL_SEGS(sbi), p.offset);
-               if (segno >= TOTAL_SEGS(sbi)) {
+               segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
+               if (segno >= last_segment) {
                        if (sbi->last_victim[p.gc_mode]) {
+                               last_segment = sbi->last_victim[p.gc_mode];
                                sbi->last_victim[p.gc_mode] = 0;
                                p.offset = 0;
                                continue;
                        }
                        break;
                }
-               p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+
+               p.offset = segno + p.ofs_unit;
+               if (p.ofs_unit > 1) {
+                       p.offset -= segno % p.ofs_unit;
+                       nsearched += count_bits(p.dirty_segmap,
+                                               p.offset - p.ofs_unit,
+                                               p.ofs_unit);
+               } else {
+                       nsearched++;
+               }
+
+
                secno = GET_SECNO(sbi, segno);
 
                if (sec_usage_check(sbi, secno))
-                       continue;
+                       goto next;
                if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
-                       continue;
+                       goto next;
 
                cost = get_gc_cost(sbi, segno, &p);
 
@@ -279,17 +336,17 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                        p.min_segno = segno;
                        p.min_cost = cost;
                }
-
-               if (cost == get_max_cost(sbi, &p))
-                       continue;
-
-               if (nsearched++ >= MAX_VICTIM_SEARCH) {
-                       sbi->last_victim[p.gc_mode] = segno;
+next:
+               if (nsearched >= p.max_search) {
+                       if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
+                               sbi->last_victim[p.gc_mode] = last_victim + 1;
+                       else
+                               sbi->last_victim[p.gc_mode] = segno + 1;
                        break;
                }
        }
-got_it:
        if (p.min_segno != NULL_SEGNO) {
+got_it:
                if (p.alloc_mode == LFS) {
                        secno = GET_SECNO(sbi, p.min_segno);
                        if (gc_type == FG_GC)
@@ -303,6 +360,7 @@ got_it:
                                sbi->cur_victim_sec,
                                prefree_segments(sbi), free_segments(sbi));
        }
+out:
        mutex_unlock(&dirty_i->seglist_lock);
 
        return (p.min_segno == NULL_SEGNO) ? 0 : 1;
@@ -312,48 +370,39 @@ static const struct victim_selection default_v_ops = {
        .get_victim = get_victim_by_default,
 };
 
-static struct inode *find_gc_inode(nid_t ino, struct list_head *ilist)
+static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
 {
-       struct list_head *this;
        struct inode_entry *ie;
 
-       list_for_each(this, ilist) {
-               ie = list_entry(this, struct inode_entry, list);
-               if (ie->inode->i_ino == ino)
-                       return ie->inode;
-       }
+       ie = radix_tree_lookup(&gc_list->iroot, ino);
+       if (ie)
+               return ie->inode;
        return NULL;
 }
 
-static void add_gc_inode(struct inode *inode, struct list_head *ilist)
+static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
 {
-       struct list_head *this;
-       struct inode_entry *new_ie, *ie;
+       struct inode_entry *new_ie;
 
-       list_for_each(this, ilist) {
-               ie = list_entry(this, struct inode_entry, list);
-               if (ie->inode == inode) {
-                       iput(inode);
-                       return;
-               }
-       }
-repeat:
-       new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
-       if (!new_ie) {
-               cond_resched();
-               goto repeat;
+       if (inode == find_gc_inode(gc_list, inode->i_ino)) {
+               iput(inode);
+               return;
        }
+       new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
        new_ie->inode = inode;
-       list_add_tail(&new_ie->list, ilist);
+
+       f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
+       list_add_tail(&new_ie->list, &gc_list->ilist);
 }
 
-static void put_gc_inode(struct list_head *ilist)
+static void put_gc_inode(struct gc_inode_list *gc_list)
 {
        struct inode_entry *ie, *next_ie;
-       list_for_each_entry_safe(ie, next_ie, ilist, list) {
+       list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
+               radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
                iput(ie->inode);
                list_del(&ie->list);
-               kmem_cache_free(winode_slab, ie);
+               kmem_cache_free(inode_entry_slab, ie);
        }
 }
 
@@ -379,9 +428,12 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
 static void gc_node_segment(struct f2fs_sb_info *sbi,
                struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
-       bool initial = true;
        struct f2fs_summary *entry;
+       block_t start_addr;
        int off;
+       int phase = 0;
+
+       start_addr = START_BLOCK(sbi, segno);
 
 next_step:
        entry = sum;
@@ -389,55 +441,49 @@ next_step:
        for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
                nid_t nid = le32_to_cpu(entry->nid);
                struct page *node_page;
+               struct node_info ni;
 
                /* stop BG_GC if there is not enough free sections. */
-               if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+               if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
                        return;
 
                if (check_valid_map(sbi, segno, off) == 0)
                        continue;
 
-               if (initial) {
+               if (phase == 0) {
+                       ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+                                                       META_NAT, true);
+                       continue;
+               }
+
+               if (phase == 1) {
                        ra_node_page(sbi, nid);
                        continue;
                }
+
+               /* phase == 2 */
                node_page = get_node_page(sbi, nid);
                if (IS_ERR(node_page))
                        continue;
 
-               /* set page dirty and write it */
-               if (gc_type == FG_GC) {
-                       f2fs_submit_bio(sbi, NODE, true);
-                       wait_on_page_writeback(node_page);
-                       set_page_dirty(node_page);
-               } else {
-                       if (!PageWriteback(node_page))
-                               set_page_dirty(node_page);
+               /* block may become invalid during get_node_page */
+               if (check_valid_map(sbi, segno, off) == 0) {
+                       f2fs_put_page(node_page, 1);
+                       continue;
                }
-               f2fs_put_page(node_page, 1);
-               stat_inc_node_blk_count(sbi, 1);
-       }
-
-       if (initial) {
-               initial = false;
-               goto next_step;
-       }
 
-       if (gc_type == FG_GC) {
-               struct writeback_control wbc = {
-                       .sync_mode = WB_SYNC_ALL,
-                       .nr_to_write = LONG_MAX,
-                       .for_reclaim = 0,
-               };
-               sync_node_pages(sbi, 0, &wbc);
+               get_node_info(sbi, nid, &ni);
+               if (ni.blk_addr != start_addr + off) {
+                       f2fs_put_page(node_page, 1);
+                       continue;
+               }
 
-               /*
-                * In the case of FG_GC, it'd be better to reclaim this victim
-                * completely.
-                */
-               if (get_valid_blocks(sbi, segno, 1) != 0)
-                       goto next_step;
+               move_node_page(node_page, gc_type);
+               stat_inc_node_blk_count(sbi, 1, gc_type);
        }
+
+       if (++phase < 3)
+               goto next_step;
 }
 
 /*
@@ -447,7 +493,7 @@ next_step:
  * as indirect or double indirect node blocks, are given, it must be a caller's
  * bug.
  */
-block_t start_bidx_of_node(unsigned int node_ofs)
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 {
        unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
        unsigned int bidx;
@@ -464,10 +510,10 @@ block_t start_bidx_of_node(unsigned int node_ofs)
                int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
                bidx = node_ofs - 5 - dec;
        }
-       return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
+       return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
 }
 
-static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
                struct node_info *dni, block_t blkaddr, unsigned int *nofs)
 {
        struct page *node_page;
@@ -480,13 +526,13 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 
        node_page = get_node_page(sbi, nid);
        if (IS_ERR(node_page))
-               return 0;
+               return false;
 
        get_node_info(sbi, nid, dni);
 
        if (sum->version != dni->version) {
                f2fs_put_page(node_page, 1);
-               return 0;
+               return false;
        }
 
        *nofs = ofs_of_node(node_page);
@@ -494,33 +540,156 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
        f2fs_put_page(node_page, 1);
 
        if (source_blkaddr != blkaddr)
-               return 0;
-       return 1;
+               return false;
+       return true;
 }
 
-static void move_data_page(struct inode *inode, struct page *page, int gc_type)
+static void move_encrypted_block(struct inode *inode, block_t bidx,
+                                                       unsigned int segno, int off)
 {
+       struct f2fs_io_info fio = {
+               .sbi = F2FS_I_SB(inode),
+               .type = DATA,
+               .rw = READ_SYNC,
+               .encrypted_page = NULL,
+       };
+       struct dnode_of_data dn;
+       struct f2fs_summary sum;
+       struct node_info ni;
+       struct page *page;
+       block_t newaddr;
+       int err;
+
+       /* do not read out */
+       page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
+       if (!page)
+               return;
+
+       if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+               goto out;
+
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
+       if (err)
+               goto out;
+
+       if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
+               ClearPageUptodate(page);
+               goto put_out;
+       }
+
+       /*
+        * don't cache encrypted data into meta inode until previous dirty
+        * data were writebacked to avoid racing between GC and flush.
+        */
+       f2fs_wait_on_page_writeback(page, DATA, true);
+
+       get_node_info(fio.sbi, dn.nid, &ni);
+       set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
+
+       /* read page */
+       fio.page = page;
+       fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+       allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+                                                       &sum, CURSEG_COLD_DATA);
+
+       fio.encrypted_page = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+                                                       newaddr, true);
+       if (!fio.encrypted_page) {
+               err = -ENOMEM;
+               goto recover_block;
+       }
+
+       err = f2fs_submit_page_bio(&fio);
+       if (err)
+               goto put_page_out;
+
+       /* write page */
+       lock_page(fio.encrypted_page);
+
+       if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
+               err = -EIO;
+               goto put_page_out;
+       }
+       if (unlikely(!PageUptodate(fio.encrypted_page))) {
+               err = -EIO;
+               goto put_page_out;
+       }
+
+       set_page_dirty(fio.encrypted_page);
+       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
+       if (clear_page_dirty_for_io(fio.encrypted_page))
+               dec_page_count(fio.sbi, F2FS_DIRTY_META);
+
+       set_page_writeback(fio.encrypted_page);
+
+       /* allocate block address */
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+
+       fio.rw = WRITE_SYNC;
+       fio.new_blkaddr = newaddr;
+       f2fs_submit_page_mbio(&fio);
+
+       f2fs_update_data_blkaddr(&dn, newaddr);
+       set_inode_flag(inode, FI_APPEND_WRITE);
+       if (page->index == 0)
+               set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+put_page_out:
+       f2fs_put_page(fio.encrypted_page, 1);
+recover_block:
+       if (err)
+               __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
+                                                               true, true);
+put_out:
+       f2fs_put_dnode(&dn);
+out:
+       f2fs_put_page(page, 1);
+}
+
+static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+                                                       unsigned int segno, int off)
+{
+       struct page *page;
+
+       page = get_lock_data_page(inode, bidx, true);
+       if (IS_ERR(page))
+               return;
+
+       if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+               goto out;
+
        if (gc_type == BG_GC) {
                if (PageWriteback(page))
                        goto out;
                set_page_dirty(page);
                set_cold_data(page);
        } else {
-               struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+               struct f2fs_io_info fio = {
+                       .sbi = F2FS_I_SB(inode),
+                       .type = DATA,
+                       .rw = WRITE_SYNC,
+                       .page = page,
+                       .encrypted_page = NULL,
+               };
+               bool is_dirty = PageDirty(page);
+               int err;
 
-               if (PageWriteback(page)) {
-                       f2fs_submit_bio(sbi, DATA, true);
-                       wait_on_page_writeback(page);
+retry:
+               set_page_dirty(page);
+               f2fs_wait_on_page_writeback(page, DATA, true);
+               if (clear_page_dirty_for_io(page)) {
+                       inode_dec_dirty_pages(inode);
+                       remove_dirty_inode(inode);
                }
 
-               if (clear_page_dirty_for_io(page) &&
-                       S_ISDIR(inode->i_mode)) {
-                       dec_page_count(sbi, F2FS_DIRTY_DENTS);
-                       inode_dec_dirty_dents(inode);
-               }
                set_cold_data(page);
-               do_write_data_page(page);
-               clear_cold_data(page);
+
+               err = do_write_data_page(&fio);
+               if (err == -ENOMEM && is_dirty) {
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                       goto retry;
+               }
        }
 out:
        f2fs_put_page(page, 1);
@@ -534,7 +703,7 @@ out:
  * the victim data block is ignored.
  */
 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
-               struct list_head *ilist, unsigned int segno, int gc_type)
+               struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 {
        struct super_block *sb = sbi->sb;
        struct f2fs_summary *entry;
@@ -553,159 +722,258 @@ next_step:
                struct node_info dni; /* dnode info for the data */
                unsigned int ofs_in_node, nofs;
                block_t start_bidx;
+               nid_t nid = le32_to_cpu(entry->nid);
 
                /* stop BG_GC if there is not enough free sections. */
-               if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
+               if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
                        return;
 
                if (check_valid_map(sbi, segno, off) == 0)
                        continue;
 
                if (phase == 0) {
-                       ra_node_page(sbi, le32_to_cpu(entry->nid));
+                       ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+                                                       META_NAT, true);
+                       continue;
+               }
+
+               if (phase == 1) {
+                       ra_node_page(sbi, nid);
                        continue;
                }
 
                /* Get an inode by ino with checking validity */
-               if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
+               if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
                        continue;
 
-               if (phase == 1) {
+               if (phase == 2) {
                        ra_node_page(sbi, dni.ino);
                        continue;
                }
 
-               start_bidx = start_bidx_of_node(nofs);
                ofs_in_node = le16_to_cpu(entry->ofs_in_node);
 
-               if (phase == 2) {
+               if (phase == 3) {
                        inode = f2fs_iget(sb, dni.ino);
-                       if (IS_ERR(inode))
+                       if (IS_ERR(inode) || is_bad_inode(inode))
                                continue;
 
-                       data_page = find_data_page(inode,
-                                       start_bidx + ofs_in_node, false);
-                       if (IS_ERR(data_page))
-                               goto next_iput;
+                       /* if encrypted inode, let's go phase 3 */
+                       if (f2fs_encrypted_inode(inode) &&
+                                               S_ISREG(inode->i_mode)) {
+                               add_gc_inode(gc_list, inode);
+                               continue;
+                       }
+
+                       start_bidx = start_bidx_of_node(nofs, inode);
+                       data_page = get_read_data_page(inode,
+                                       start_bidx + ofs_in_node, READA, true);
+                       if (IS_ERR(data_page)) {
+                               iput(inode);
+                               continue;
+                       }
 
                        f2fs_put_page(data_page, 0);
-                       add_gc_inode(inode, ilist);
-               } else {
-                       inode = find_gc_inode(dni.ino, ilist);
-                       if (inode) {
-                               data_page = get_lock_data_page(inode,
-                                               start_bidx + ofs_in_node);
-                               if (IS_ERR(data_page))
+                       add_gc_inode(gc_list, inode);
+                       continue;
+               }
+
+               /* phase 4 */
+               inode = find_gc_inode(gc_list, dni.ino);
+               if (inode) {
+                       struct f2fs_inode_info *fi = F2FS_I(inode);
+                       bool locked = false;
+
+                       if (S_ISREG(inode->i_mode)) {
+                               if (!down_write_trylock(&fi->dio_rwsem[READ]))
                                        continue;
-                               move_data_page(inode, data_page, gc_type);
-                               stat_inc_data_blk_count(sbi, 1);
+                               if (!down_write_trylock(
+                                               &fi->dio_rwsem[WRITE])) {
+                                       up_write(&fi->dio_rwsem[READ]);
+                                       continue;
+                               }
+                               locked = true;
                        }
-               }
-               continue;
-next_iput:
-               iput(inode);
-       }
 
-       if (++phase < 4)
-               goto next_step;
+                       start_bidx = start_bidx_of_node(nofs, inode)
+                                                               + ofs_in_node;
+                       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+                               move_encrypted_block(inode, start_bidx, segno, off);
+                       else
+                               move_data_page(inode, start_bidx, gc_type, segno, off);
 
-       if (gc_type == FG_GC) {
-               f2fs_submit_bio(sbi, DATA, true);
+                       if (locked) {
+                               up_write(&fi->dio_rwsem[WRITE]);
+                               up_write(&fi->dio_rwsem[READ]);
+                       }
 
-               /*
-                * In the case of FG_GC, it'd be better to reclaim this victim
-                * completely.
-                */
-               if (get_valid_blocks(sbi, segno, 1) != 0) {
-                       phase = 2;
-                       goto next_step;
+                       stat_inc_data_blk_count(sbi, 1, gc_type);
                }
        }
+
+       if (++phase < 5)
+               goto next_step;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
-                                               int gc_type, int type)
+                       int gc_type)
 {
        struct sit_info *sit_i = SIT_I(sbi);
        int ret;
+
        mutex_lock(&sit_i->sentry_lock);
-       ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, type, LFS);
+       ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
+                                             NO_CHECK_TYPE, LFS);
        mutex_unlock(&sit_i->sentry_lock);
        return ret;
 }
 
-static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
-                               struct list_head *ilist, int gc_type)
+static int do_garbage_collect(struct f2fs_sb_info *sbi,
+                               unsigned int start_segno,
+                               struct gc_inode_list *gc_list, int gc_type)
 {
        struct page *sum_page;
        struct f2fs_summary_block *sum;
        struct blk_plug plug;
-
-       /* read segment summary of victim */
-       sum_page = get_sum_page(sbi, segno);
-       if (IS_ERR(sum_page))
-               return;
+       unsigned int segno = start_segno;
+       unsigned int end_segno = start_segno + sbi->segs_per_sec;
+       int sec_freed = 0;
+       unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+                                               SUM_TYPE_DATA : SUM_TYPE_NODE;
+
+       /* readahead multi ssa blocks those have contiguous address */
+       if (sbi->segs_per_sec > 1)
+               ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+                                       sbi->segs_per_sec, META_SSA, true);
+
+       /* reference all summary page */
+       while (segno < end_segno) {
+               sum_page = get_sum_page(sbi, segno++);
+               unlock_page(sum_page);
+       }
 
        blk_start_plug(&plug);
 
-       sum = page_address(sum_page);
+       for (segno = start_segno; segno < end_segno; segno++) {
+
+               /* find segment summary of victim */
+               sum_page = find_get_page(META_MAPPING(sbi),
+                                       GET_SUM_BLOCK(sbi, segno));
+               f2fs_put_page(sum_page, 0);
 
-       switch (GET_SUM_TYPE((&sum->footer))) {
-       case SUM_TYPE_NODE:
-               gc_node_segment(sbi, sum->entries, segno, gc_type);
-               break;
-       case SUM_TYPE_DATA:
-               gc_data_segment(sbi, sum->entries, ilist, segno, gc_type);
-               break;
+               if (get_valid_blocks(sbi, segno, 1) == 0 ||
+                               !PageUptodate(sum_page) ||
+                               unlikely(f2fs_cp_error(sbi)))
+                       goto next;
+
+               sum = page_address(sum_page);
+               f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+
+               /*
+                * this is to avoid deadlock:
+                * - lock_page(sum_page)         - f2fs_replace_block
+                *  - check_valid_map()            - mutex_lock(sentry_lock)
+                *   - mutex_lock(sentry_lock)     - change_curseg()
+                *                                  - lock_page(sum_page)
+                */
+
+               if (type == SUM_TYPE_NODE)
+                       gc_node_segment(sbi, sum->entries, segno, gc_type);
+               else
+                       gc_data_segment(sbi, sum->entries, gc_list, segno,
+                                                               gc_type);
+
+               stat_inc_seg_count(sbi, type, gc_type);
+next:
+               f2fs_put_page(sum_page, 0);
        }
+
+       if (gc_type == FG_GC)
+               f2fs_submit_merged_bio(sbi,
+                               (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
+
        blk_finish_plug(&plug);
 
-       stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)));
+       if (gc_type == FG_GC &&
+               get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
+               sec_freed = 1;
+
        stat_inc_call_count(sbi->stat_info);
 
-       f2fs_put_page(sum_page, 1);
+       return sec_freed;
 }
 
-int f2fs_gc(struct f2fs_sb_info *sbi)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
 {
-       struct list_head ilist;
-       unsigned int segno, i;
-       int gc_type = BG_GC;
-       int nfree = 0;
-       int ret = -1;
-
-       INIT_LIST_HEAD(&ilist);
+       unsigned int segno;
+       int gc_type = sync ? FG_GC : BG_GC;
+       int sec_freed = 0;
+       int ret = -EINVAL;
+       struct cp_control cpc;
+       struct gc_inode_list gc_list = {
+               .ilist = LIST_HEAD_INIT(gc_list.ilist),
+               .iroot = RADIX_TREE_INIT(GFP_NOFS),
+       };
+
+       cpc.reason = __get_cp_reason(sbi);
 gc_more:
-       if (!(sbi->sb->s_flags & MS_ACTIVE))
+       segno = NULL_SEGNO;
+
+       if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
                goto stop;
+       if (unlikely(f2fs_cp_error(sbi))) {
+               ret = -EIO;
+               goto stop;
+       }
 
-       if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
+       if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
                gc_type = FG_GC;
-               write_checkpoint(sbi, false);
+               /*
+                * If there is no victim and no prefree segment but still not
+                * enough free sections, we should flush dent/node blocks and do
+                * garbage collections.
+                */
+               if (__get_victim(sbi, &segno, gc_type) ||
+                                               prefree_segments(sbi)) {
+                       ret = write_checkpoint(sbi, &cpc);
+                       if (ret)
+                               goto stop;
+                       segno = NULL_SEGNO;
+               } else if (has_not_enough_free_secs(sbi, 0, 0)) {
+                       ret = write_checkpoint(sbi, &cpc);
+                       if (ret)
+                               goto stop;
+               }
+       } else if (gc_type == BG_GC && !background) {
+               /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
+               goto stop;
        }
 
-       if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
+       if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
                goto stop;
        ret = 0;
 
-       for (i = 0; i < sbi->segs_per_sec; i++)
-               do_garbage_collect(sbi, segno + i, &ilist, gc_type);
+       if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
+                       gc_type == FG_GC)
+               sec_freed++;
 
-       if (gc_type == FG_GC) {
+       if (gc_type == FG_GC)
                sbi->cur_victim_sec = NULL_SEGNO;
-               nfree++;
-               WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
-       }
 
-       if (has_not_enough_free_secs(sbi, nfree))
-               goto gc_more;
+       if (!sync) {
+               if (has_not_enough_free_secs(sbi, sec_freed, 0))
+                       goto gc_more;
 
-       if (gc_type == FG_GC)
-               write_checkpoint(sbi, false);
+               if (gc_type == FG_GC)
+                       ret = write_checkpoint(sbi, &cpc);
+       }
 stop:
        mutex_unlock(&sbi->gc_mutex);
 
-       put_gc_inode(&ilist);
+       put_gc_inode(&gc_list);
+
+       if (sync)
+               ret = sec_freed ? 0 : -EAGAIN;
        return ret;
 }
 
@@ -713,17 +981,3 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
 {
        DIRTY_I(sbi)->v_ops = &default_v_ops;
 }
-
-int __init create_gc_caches(void)
-{
-       winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
-                       sizeof(struct inode_entry), NULL);
-       if (!winode_slab)
-               return -ENOMEM;
-       return 0;
-}
-
-void destroy_gc_caches(void)
-{
-       kmem_cache_destroy(winode_slab);
-}
index 2c6a6bd0832244f4bb1e45b52cd41400e87fe363..a993967dcdb979160ec5bced54fa7df565bf1e65 100644 (file)
                                                 * whether IO subsystem is idle
                                                 * or not
                                                 */
-#define GC_THREAD_MIN_SLEEP_TIME       30000   /* milliseconds */
-#define GC_THREAD_MAX_SLEEP_TIME       60000
-#define GC_THREAD_NOGC_SLEEP_TIME      300000  /* wait 5 min */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME   30000   /* milliseconds */
+#define DEF_GC_THREAD_MAX_SLEEP_TIME   60000
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME  300000  /* wait 5 min */
 #define LIMIT_INVALID_BLOCK    40 /* percentage over total user space */
 #define LIMIT_FREE_BLOCK       40 /* percentage over invalid + free space */
 
 /* Search max. number of dirty segments to select a victim segment */
-#define MAX_VICTIM_SEARCH      20
+#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
 
 struct f2fs_gc_kthread {
        struct task_struct *f2fs_gc_task;
        wait_queue_head_t gc_wait_queue_head;
+
+       /* for gc sleep time */
+       unsigned int min_sleep_time;
+       unsigned int max_sleep_time;
+       unsigned int no_gc_sleep_time;
+
+       /* for changing gc mode */
+       unsigned int gc_idle;
 };
 
-struct inode_entry {
-       struct list_head list;
-       struct inode *inode;
+struct gc_inode_list {
+       struct list_head ilist;
+       struct radix_tree_root iroot;
 };
 
 /*
@@ -56,26 +64,26 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
        return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
 }
 
-static inline long increase_sleep_time(long wait)
+static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
+                                                               long *wait)
 {
-       if (wait == GC_THREAD_NOGC_SLEEP_TIME)
-               return wait;
+       if (*wait == gc_th->no_gc_sleep_time)
+               return;
 
-       wait += GC_THREAD_MIN_SLEEP_TIME;
-       if (wait > GC_THREAD_MAX_SLEEP_TIME)
-               wait = GC_THREAD_MAX_SLEEP_TIME;
-       return wait;
+       *wait += gc_th->min_sleep_time;
+       if (*wait > gc_th->max_sleep_time)
+               *wait = gc_th->max_sleep_time;
 }
 
-static inline long decrease_sleep_time(long wait)
+static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
+                                                               long *wait)
 {
-       if (wait == GC_THREAD_NOGC_SLEEP_TIME)
-               wait = GC_THREAD_MAX_SLEEP_TIME;
+       if (*wait == gc_th->no_gc_sleep_time)
+               *wait = gc_th->max_sleep_time;
 
-       wait -= GC_THREAD_MIN_SLEEP_TIME;
-       if (wait <= GC_THREAD_MIN_SLEEP_TIME)
-               wait = GC_THREAD_MIN_SLEEP_TIME;
-       return wait;
+       *wait -= gc_th->min_sleep_time;
+       if (*wait <= gc_th->min_sleep_time)
+               *wait = gc_th->min_sleep_time;
 }
 
 static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
@@ -83,7 +91,7 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
        block_t invalid_user_blocks = sbi->user_block_count -
                                        written_block_count(sbi);
        /*
-        * Background GC is triggered with the following condition.
+        * Background GC is triggered with the following conditions.
         * 1. There are a number of invalid blocks.
         * 2. There is not enough free space.
         */
@@ -92,11 +100,3 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
                return true;
        return false;
 }
-
-static inline int is_idle(struct f2fs_sb_info *sbi)
-{
-       struct block_device *bdev = sbi->sb->s_bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
-       struct request_list *rl = &q->root_rl;
-       return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
-}
index 6eb8d269b53b612b7141b2d101eccfcf840197e0..71b7206c431ea00f937e5b1e59b5f90999091d2a 100644 (file)
@@ -42,7 +42,8 @@ static void TEA_transform(unsigned int buf[4], unsigned int const in[])
        buf[1] += b1;
 }
 
-static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num)
+static void str2hashbuf(const unsigned char *msg, size_t len,
+                               unsigned int *buf, int num)
 {
        unsigned pad, val;
        int i;
@@ -69,15 +70,16 @@ static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num)
                *buf++ = pad;
 }
 
-f2fs_hash_t f2fs_dentry_hash(const char *name, size_t len)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
 {
        __u32 hash;
        f2fs_hash_t f2fs_hash;
-       const char *p;
+       const unsigned char *p;
        __u32 in[8], buf[4];
+       const unsigned char *name = name_info->name;
+       size_t len = name_info->len;
 
-       if ((len <= 2) && (name[0] == '.') &&
-               (name[1] == '.' || name[1] == '\0'))
+       if (is_dot_dotdot(name_info))
                return 0;
 
        /* Initialize the default seed for the hash checksum functions */
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
new file mode 100644 (file)
index 0000000..f42bfcb
--- /dev/null
@@ -0,0 +1,672 @@
+/*
+ * fs/f2fs/inline.c
+ * Copyright (c) 2013, Intel Corporation
+ * Authors: Huajun Li <huajun.li@intel.com>
+ *          Haicheng Li <haicheng.li@intel.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+
+bool f2fs_may_inline_data(struct inode *inode)
+{
+       if (f2fs_is_atomic_file(inode))
+               return false;
+
+       if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
+               return false;
+
+       if (i_size_read(inode) > MAX_INLINE_DATA)
+               return false;
+
+       if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+               return false;
+
+       return true;
+}
+
+bool f2fs_may_inline_dentry(struct inode *inode)
+{
+       if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
+               return false;
+
+       if (!S_ISDIR(inode->i_mode))
+               return false;
+
+       return true;
+}
+
+void read_inline_data(struct page *page, struct page *ipage)
+{
+       void *src_addr, *dst_addr;
+
+       if (PageUptodate(page))
+               return;
+
+       f2fs_bug_on(F2FS_P_SB(page), page->index);
+
+       zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
+
+       /* Copy the whole inline data block */
+       src_addr = inline_data_addr(ipage);
+       dst_addr = kmap_atomic(page);
+       memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+       flush_dcache_page(page);
+       kunmap_atomic(dst_addr);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+}
+
+bool truncate_inline_inode(struct page *ipage, u64 from)
+{
+       void *addr;
+
+       if (from >= MAX_INLINE_DATA)
+               return false;
+
+       addr = inline_data_addr(ipage);
+
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
+       memset(addr + from, 0, MAX_INLINE_DATA - from);
+       set_page_dirty(ipage);
+       return true;
+}
+
+int f2fs_read_inline_data(struct inode *inode, struct page *page)
+{
+       struct page *ipage;
+
+       ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+       if (IS_ERR(ipage)) {
+               unlock_page(page);
+               return PTR_ERR(ipage);
+       }
+
+       if (!f2fs_has_inline_data(inode)) {
+               f2fs_put_page(ipage, 1);
+               return -EAGAIN;
+       }
+
+       if (page->index)
+               zero_user_segment(page, 0, PAGE_SIZE);
+       else
+               read_inline_data(page, ipage);
+
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+       f2fs_put_page(ipage, 1);
+       unlock_page(page);
+       return 0;
+}
+
+int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
+{
+       struct f2fs_io_info fio = {
+               .sbi = F2FS_I_SB(dn->inode),
+               .type = DATA,
+               .rw = WRITE_SYNC | REQ_PRIO,
+               .page = page,
+               .encrypted_page = NULL,
+       };
+       int dirty, err;
+
+       if (!f2fs_exist_data(dn->inode))
+               goto clear_out;
+
+       err = f2fs_reserve_block(dn, 0);
+       if (err)
+               return err;
+
+       f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
+
+       read_inline_data(page, dn->inode_page);
+       set_page_dirty(page);
+
+       /* clear dirty state */
+       dirty = clear_page_dirty_for_io(page);
+
+       /* write data page to try to make data consistent */
+       set_page_writeback(page);
+       fio.old_blkaddr = dn->data_blkaddr;
+       write_data_page(dn, &fio);
+       f2fs_wait_on_page_writeback(page, DATA, true);
+       if (dirty) {
+               inode_dec_dirty_pages(dn->inode);
+               remove_dirty_inode(dn->inode);
+       }
+
+       /* this converted inline_data should be recovered. */
+       set_inode_flag(dn->inode, FI_APPEND_WRITE);
+
+       /* clear inline data and flag after data writeback */
+       truncate_inline_inode(dn->inode_page, 0);
+       clear_inline_node(dn->inode_page);
+clear_out:
+       stat_dec_inline_inode(dn->inode);
+       f2fs_clear_inline_inode(dn->inode);
+       f2fs_put_dnode(dn);
+       return 0;
+}
+
+int f2fs_convert_inline_inode(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct dnode_of_data dn;
+       struct page *ipage, *page;
+       int err = 0;
+
+       if (!f2fs_has_inline_data(inode))
+               return 0;
+
+       page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
+       if (!page)
+               return -ENOMEM;
+
+       f2fs_lock_op(sbi);
+
+       ipage = get_node_page(sbi, inode->i_ino);
+       if (IS_ERR(ipage)) {
+               err = PTR_ERR(ipage);
+               goto out;
+       }
+
+       set_new_dnode(&dn, inode, ipage, ipage, 0);
+
+       if (f2fs_has_inline_data(inode))
+               err = f2fs_convert_inline_page(&dn, page);
+
+       f2fs_put_dnode(&dn);
+out:
+       f2fs_unlock_op(sbi);
+
+       f2fs_put_page(page, 1);
+
+       f2fs_balance_fs(sbi, dn.node_changed);
+
+       return err;
+}
+
+int f2fs_write_inline_data(struct inode *inode, struct page *page)
+{
+       void *src_addr, *dst_addr;
+       struct dnode_of_data dn;
+       int err;
+
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+       if (err)
+               return err;
+
+       if (!f2fs_has_inline_data(inode)) {
+               f2fs_put_dnode(&dn);
+               return -EAGAIN;
+       }
+
+       f2fs_bug_on(F2FS_I_SB(inode), page->index);
+
+       f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
+       src_addr = kmap_atomic(page);
+       dst_addr = inline_data_addr(dn.inode_page);
+       memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+       kunmap_atomic(src_addr);
+       set_page_dirty(dn.inode_page);
+
+       set_inode_flag(inode, FI_APPEND_WRITE);
+       set_inode_flag(inode, FI_DATA_EXIST);
+
+       clear_inline_node(dn.inode_page);
+       f2fs_put_dnode(&dn);
+       return 0;
+}
+
+bool recover_inline_data(struct inode *inode, struct page *npage)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_inode *ri = NULL;
+       void *src_addr, *dst_addr;
+       struct page *ipage;
+
+       /*
+        * The inline_data recovery policy is as follows.
+        * [prev.] [next] of inline_data flag
+        *    o       o  -> recover inline_data
+        *    o       x  -> remove inline_data, and then recover data blocks
+        *    x       o  -> remove inline_data, and then recover inline_data
+        *    x       x  -> recover data blocks
+        */
+       if (IS_INODE(npage))
+               ri = F2FS_INODE(npage);
+
+       if (f2fs_has_inline_data(inode) &&
+                       ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+process_inline:
+               ipage = get_node_page(sbi, inode->i_ino);
+               f2fs_bug_on(sbi, IS_ERR(ipage));
+
+               f2fs_wait_on_page_writeback(ipage, NODE, true);
+
+               src_addr = inline_data_addr(npage);
+               dst_addr = inline_data_addr(ipage);
+               memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
+
+               set_inode_flag(inode, FI_INLINE_DATA);
+               set_inode_flag(inode, FI_DATA_EXIST);
+
+               set_page_dirty(ipage);
+               f2fs_put_page(ipage, 1);
+               return true;
+       }
+
+       if (f2fs_has_inline_data(inode)) {
+               ipage = get_node_page(sbi, inode->i_ino);
+               f2fs_bug_on(sbi, IS_ERR(ipage));
+               if (!truncate_inline_inode(ipage, 0))
+                       return false;
+               f2fs_clear_inline_inode(inode);
+               f2fs_put_page(ipage, 1);
+       } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
+               if (truncate_blocks(inode, 0, false))
+                       return false;
+               goto process_inline;
+       }
+       return false;
+}
+
+struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
+                       struct fscrypt_name *fname, struct page **res_page)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+       struct f2fs_inline_dentry *inline_dentry;
+       struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
+       struct f2fs_dir_entry *de;
+       struct f2fs_dentry_ptr d;
+       struct page *ipage;
+       f2fs_hash_t namehash;
+
+       ipage = get_node_page(sbi, dir->i_ino);
+       if (IS_ERR(ipage)) {
+               *res_page = ipage;
+               return NULL;
+       }
+
+       namehash = f2fs_dentry_hash(&name);
+
+       inline_dentry = inline_data_addr(ipage);
+
+       make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+       de = find_target_dentry(fname, namehash, NULL, &d);
+       unlock_page(ipage);
+       if (de)
+               *res_page = ipage;
+       else
+               f2fs_put_page(ipage, 0);
+
+       return de;
+}
+
+int make_empty_inline_dir(struct inode *inode, struct inode *parent,
+                                                       struct page *ipage)
+{
+       struct f2fs_inline_dentry *dentry_blk;
+       struct f2fs_dentry_ptr d;
+
+       dentry_blk = inline_data_addr(ipage);
+
+       make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
+       do_make_empty_dir(inode, parent, &d);
+
+       set_page_dirty(ipage);
+
+       /* update i_size to MAX_INLINE_DATA */
+       if (i_size_read(inode) < MAX_INLINE_DATA)
+               f2fs_i_size_write(inode, MAX_INLINE_DATA);
+       return 0;
+}
+
+/*
+ * NOTE: ipage is grabbed by caller, but if any error occurs, we should
+ * release ipage in this function.
+ */
+static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+                               struct f2fs_inline_dentry *inline_dentry)
+{
+       struct page *page;
+       struct dnode_of_data dn;
+       struct f2fs_dentry_block *dentry_blk;
+       int err;
+
+       page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
+       if (!page) {
+               f2fs_put_page(ipage, 1);
+               return -ENOMEM;
+       }
+
+       set_new_dnode(&dn, dir, ipage, NULL, 0);
+       err = f2fs_reserve_block(&dn, 0);
+       if (err)
+               goto out;
+
+       f2fs_wait_on_page_writeback(page, DATA, true);
+       zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
+
+       dentry_blk = kmap_atomic(page);
+
+       /* copy data from inline dentry block to new dentry block */
+       memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
+                                       INLINE_DENTRY_BITMAP_SIZE);
+       memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
+                       SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
+       /*
+        * we do not need to zero out remainder part of dentry and filename
+        * field, since we have used bitmap for marking the usage status of
+        * them, besides, we can also ignore copying/zeroing reserved space
+        * of dentry block, because them haven't been used so far.
+        */
+       memcpy(dentry_blk->dentry, inline_dentry->dentry,
+                       sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
+       memcpy(dentry_blk->filename, inline_dentry->filename,
+                                       NR_INLINE_DENTRY * F2FS_SLOT_LEN);
+
+       kunmap_atomic(dentry_blk);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+       set_page_dirty(page);
+
+       /* clear inline dir and flag after data writeback */
+       truncate_inline_inode(ipage, 0);
+
+       stat_dec_inline_dir(dir);
+       clear_inode_flag(dir, FI_INLINE_DENTRY);
+
+       f2fs_i_depth_write(dir, 1);
+       if (i_size_read(dir) < PAGE_SIZE)
+               f2fs_i_size_write(dir, PAGE_SIZE);
+out:
+       f2fs_put_page(page, 1);
+       return err;
+}
+
+static int f2fs_add_inline_entries(struct inode *dir,
+                       struct f2fs_inline_dentry *inline_dentry)
+{
+       struct f2fs_dentry_ptr d;
+       unsigned long bit_pos = 0;
+       int err = 0;
+
+       make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+
+       while (bit_pos < d.max) {
+               struct f2fs_dir_entry *de;
+               struct qstr new_name;
+               nid_t ino;
+               umode_t fake_mode;
+
+               if (!test_bit_le(bit_pos, d.bitmap)) {
+                       bit_pos++;
+                       continue;
+               }
+
+               de = &d.dentry[bit_pos];
+
+               if (unlikely(!de->name_len)) {
+                       bit_pos++;
+                       continue;
+               }
+
+               new_name.name = d.filename[bit_pos];
+               new_name.len = le16_to_cpu(de->name_len);
+
+               ino = le32_to_cpu(de->ino);
+               fake_mode = get_de_type(de) << S_SHIFT;
+
+               err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
+                                                       ino, fake_mode);
+               if (err)
+                       goto punch_dentry_pages;
+
+               bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+       }
+       return 0;
+punch_dentry_pages:
+       truncate_inode_pages(&dir->i_data, 0);
+       truncate_blocks(dir, 0, false);
+       remove_dirty_inode(dir);
+       return err;
+}
+
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+                               struct f2fs_inline_dentry *inline_dentry)
+{
+       struct f2fs_inline_dentry *backup_dentry;
+       int err;
+
+       backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
+                       sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO);
+       if (!backup_dentry) {
+               f2fs_put_page(ipage, 1);
+               return -ENOMEM;
+       }
+
+       memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
+       truncate_inline_inode(ipage, 0);
+
+       unlock_page(ipage);
+
+       err = f2fs_add_inline_entries(dir, backup_dentry);
+       if (err)
+               goto recover;
+
+       lock_page(ipage);
+
+       stat_dec_inline_dir(dir);
+       clear_inode_flag(dir, FI_INLINE_DENTRY);
+       kfree(backup_dentry);
+       return 0;
+recover:
+       lock_page(ipage);
+       memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
+       f2fs_i_depth_write(dir, 0);
+       f2fs_i_size_write(dir, MAX_INLINE_DATA);
+       set_page_dirty(ipage);
+       f2fs_put_page(ipage, 1);
+
+       kfree(backup_dentry);
+       return err;
+}
+
+static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+                               struct f2fs_inline_dentry *inline_dentry)
+{
+       if (!F2FS_I(dir)->i_dir_level)
+               return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+       else
+               return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+}
+
+int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+                               const struct qstr *orig_name,
+                               struct inode *inode, nid_t ino, umode_t mode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+       struct page *ipage;
+       unsigned int bit_pos;
+       f2fs_hash_t name_hash;
+       struct f2fs_inline_dentry *dentry_blk = NULL;
+       struct f2fs_dentry_ptr d;
+       int slots = GET_DENTRY_SLOTS(new_name->len);
+       struct page *page = NULL;
+       int err = 0;
+
+       ipage = get_node_page(sbi, dir->i_ino);
+       if (IS_ERR(ipage))
+               return PTR_ERR(ipage);
+
+       dentry_blk = inline_data_addr(ipage);
+       bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
+                                               slots, NR_INLINE_DENTRY);
+       if (bit_pos >= NR_INLINE_DENTRY) {
+               err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
+               if (err)
+                       return err;
+               err = -EAGAIN;
+               goto out;
+       }
+
+       if (inode) {
+               down_write(&F2FS_I(inode)->i_sem);
+               page = init_inode_metadata(inode, dir, new_name,
+                                               orig_name, ipage);
+               if (IS_ERR(page)) {
+                       err = PTR_ERR(page);
+                       goto fail;
+               }
+               if (f2fs_encrypted_inode(dir))
+                       file_set_enc_name(inode);
+       }
+
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
+
+       name_hash = f2fs_dentry_hash(new_name);
+       make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
+       f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
+
+       set_page_dirty(ipage);
+
+       /* we don't need to mark_inode_dirty now */
+       if (inode) {
+               f2fs_i_pino_write(inode, dir->i_ino);
+               f2fs_put_page(page, 1);
+       }
+
+       update_parent_metadata(dir, inode, 0);
+fail:
+       if (inode)
+               up_write(&F2FS_I(inode)->i_sem);
+out:
+       f2fs_put_page(ipage, 1);
+       return err;
+}
+
+void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
+                                       struct inode *dir, struct inode *inode)
+{
+       struct f2fs_inline_dentry *inline_dentry;
+       int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
+       unsigned int bit_pos;
+       int i;
+
+       lock_page(page);
+       f2fs_wait_on_page_writeback(page, NODE, true);
+
+       inline_dentry = inline_data_addr(page);
+       bit_pos = dentry - inline_dentry->dentry;
+       for (i = 0; i < slots; i++)
+               __clear_bit_le(bit_pos + i,
+                               &inline_dentry->dentry_bitmap);
+
+       set_page_dirty(page);
+       f2fs_put_page(page, 1);
+
+       dir->i_ctime = dir->i_mtime = current_time(dir);
+       f2fs_mark_inode_dirty_sync(dir, false);
+
+       if (inode)
+               f2fs_drop_nlink(dir, inode);
+}
+
+bool f2fs_empty_inline_dir(struct inode *dir)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+       struct page *ipage;
+       unsigned int bit_pos = 2;
+       struct f2fs_inline_dentry *dentry_blk;
+
+       ipage = get_node_page(sbi, dir->i_ino);
+       if (IS_ERR(ipage))
+               return false;
+
+       dentry_blk = inline_data_addr(ipage);
+       bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
+                                       NR_INLINE_DENTRY,
+                                       bit_pos);
+
+       f2fs_put_page(ipage, 1);
+
+       if (bit_pos < NR_INLINE_DENTRY)
+               return false;
+
+       return true;
+}
+
+int f2fs_read_inline_dir(struct file *file, void *dirent, filldir_t filldir,
+                                               struct fscrypt_str *fstr)
+{
+       unsigned long pos = file->f_pos;
+       unsigned int bit_pos = 0;
+       struct inode *inode = file_inode(file);
+       struct f2fs_inline_dentry *inline_dentry = NULL;
+       struct page *ipage = NULL;
+       struct f2fs_dentry_ptr d;
+       int err;
+
+       if (pos >= NR_INLINE_DENTRY)
+               return 0;
+
+       bit_pos = (pos % NR_INLINE_DENTRY);
+
+       ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+       if (IS_ERR(ipage))
+               return PTR_ERR(ipage);
+
+       inline_dentry = inline_data_addr(ipage);
+
+       make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
+
+       err = f2fs_fill_dentries(file, dirent, filldir, &d, 0, bit_pos, fstr);
+       if (!err)
+               file->f_pos = NR_INLINE_DENTRY;
+
+       f2fs_put_page(ipage, 1);
+       return err < 0 ? err : 0;
+}
+
+int f2fs_inline_data_fiemap(struct inode *inode,
+               struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
+{
+       __u64 byteaddr, ilen;
+       __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
+               FIEMAP_EXTENT_LAST;
+       struct node_info ni;
+       struct page *ipage;
+       int err = 0;
+
+       ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+       if (IS_ERR(ipage))
+               return PTR_ERR(ipage);
+
+       if (!f2fs_has_inline_data(inode)) {
+               err = -EAGAIN;
+               goto out;
+       }
+
+       ilen = min_t(size_t, MAX_INLINE_DATA, i_size_read(inode));
+       if (start >= ilen)
+               goto out;
+       if (start + len < ilen)
+               ilen = start + len;
+       ilen -= start;
+
+       get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+       byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
+       byteaddr += (char *)inline_data_addr(ipage) - (char *)F2FS_INODE(ipage);
+       err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
+out:
+       f2fs_put_page(ipage, 1);
+       return err;
+}
index 91ac7f9d88eeaf84a86f38bb07866095910d8620..79936608c9b760d0e1ea758560dd4785a7af6b3c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
 #include <linux/writeback.h>
 
 #include "f2fs.h"
 
 #include <trace/events/f2fs.h>
 
+void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+{
+       if (f2fs_inode_dirtied(inode, sync))
+               return;
+
+       mark_inode_dirty_sync(inode);
+}
+
 void f2fs_set_inode_flags(struct inode *inode)
 {
        unsigned int flags = F2FS_I(inode)->i_flags;
-
-       inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE |
-                       S_NOATIME | S_DIRSYNC);
+       unsigned int new_fl = 0;
 
        if (flags & FS_SYNC_FL)
-               inode->i_flags |= S_SYNC;
+               new_fl |= S_SYNC;
        if (flags & FS_APPEND_FL)
-               inode->i_flags |= S_APPEND;
+               new_fl |= S_APPEND;
        if (flags & FS_IMMUTABLE_FL)
-               inode->i_flags |= S_IMMUTABLE;
+               new_fl |= S_IMMUTABLE;
        if (flags & FS_NOATIME_FL)
-               inode->i_flags |= S_NOATIME;
+               new_fl |= S_NOATIME;
        if (flags & FS_DIRSYNC_FL)
-               inode->i_flags |= S_DIRSYNC;
+               new_fl |= S_DIRSYNC;
+       set_mask_bits(&inode->i_flags,
+               S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+       f2fs_mark_inode_dirty_sync(inode, false);
+}
+
+static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+                       S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+               if (ri->i_addr[0])
+                       inode->i_rdev =
+                               old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+               else
+                       inode->i_rdev =
+                               new_decode_dev(le32_to_cpu(ri->i_addr[1]));
+       }
+}
+
+static bool __written_first_block(struct f2fs_inode *ri)
+{
+       block_t addr = le32_to_cpu(ri->i_addr[0]);
+
+       if (addr != NEW_ADDR && addr != NULL_ADDR)
+               return true;
+       return false;
+}
+
+static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+               if (old_valid_dev(inode->i_rdev)) {
+                       ri->i_addr[0] =
+                               cpu_to_le32(old_encode_dev(inode->i_rdev));
+                       ri->i_addr[1] = 0;
+               } else {
+                       ri->i_addr[0] = 0;
+                       ri->i_addr[1] =
+                               cpu_to_le32(new_encode_dev(inode->i_rdev));
+                       ri->i_addr[2] = 0;
+               }
+       }
+}
+
+static void __recover_inline_status(struct inode *inode, struct page *ipage)
+{
+       void *inline_data = inline_data_addr(ipage);
+       __le32 *start = inline_data;
+       __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32);
+
+       while (start < end) {
+               if (*start++) {
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
+
+                       set_inode_flag(inode, FI_DATA_EXIST);
+                       set_raw_inline(inode, F2FS_INODE(ipage));
+                       set_page_dirty(ipage);
+                       return;
+               }
+       }
+       return;
 }
 
 static int do_read_inode(struct inode *inode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        struct page *node_page;
-       struct f2fs_node *rn;
        struct f2fs_inode *ri;
 
        /* Check if ino is within scope */
        if (check_nid_range(sbi, inode->i_ino)) {
                f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
                         (unsigned long) inode->i_ino);
+               WARN_ON(1);
                return -EINVAL;
        }
 
@@ -56,8 +123,7 @@ static int do_read_inode(struct inode *inode)
        if (IS_ERR(node_page))
                return PTR_ERR(node_page);
 
-       rn = page_address(node_page);
-       ri = &(rn->i);
+       ri = F2FS_INODE(node_page);
 
        inode->i_mode = le16_to_cpu(ri->i_mode);
        i_uid_write(inode, le32_to_cpu(ri->i_uid));
@@ -73,10 +139,6 @@ static int do_read_inode(struct inode *inode)
        inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
        inode->i_generation = le32_to_cpu(ri->i_generation);
-       if (ri->i_addr[0])
-               inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
-       else
-               inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
 
        fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -84,8 +146,32 @@ static int do_read_inode(struct inode *inode)
        fi->flags = 0;
        fi->i_advise = ri->i_advise;
        fi->i_pino = le32_to_cpu(ri->i_pino);
-       get_extent_info(&fi->ext, ri->i_ext);
+       fi->i_dir_level = ri->i_dir_level;
+
+       if (f2fs_init_extent_tree(inode, &ri->i_ext))
+               set_page_dirty(node_page);
+
+       get_inline_info(inode, ri);
+
+       /* check data exist */
+       if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
+               __recover_inline_status(inode, node_page);
+
+       /* get rdev by using inline_info */
+       __get_inode_rdev(inode, ri);
+
+       if (__written_first_block(ri))
+               set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+       if (!need_inode_block_update(sbi, inode->i_ino))
+               fi->last_disk_size = inode->i_size;
+
        f2fs_put_page(node_page, 1);
+
+       stat_inc_inline_xattr(inode);
+       stat_inc_inline_inode(inode);
+       stat_inc_inline_dir(inode);
+
        return 0;
 }
 
@@ -109,12 +195,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
        ret = do_read_inode(inode);
        if (ret)
                goto bad_inode;
-
-       if (!sbi->por_doing && inode->i_nlink == 0) {
-               ret = -ENOENT;
-               goto bad_inode;
-       }
-
 make_now:
        if (ino == F2FS_NODE_INO(sbi)) {
                inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -130,10 +210,13 @@ make_now:
                inode->i_op = &f2fs_dir_inode_operations;
                inode->i_fop = &f2fs_dir_operations;
                inode->i_mapping->a_ops = &f2fs_dblock_aops;
-               mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE |
-                               __GFP_ZERO);
+               mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
        } else if (S_ISLNK(inode->i_mode)) {
-               inode->i_op = &f2fs_symlink_inode_operations;
+               if (f2fs_encrypted_inode(inode))
+                       inode->i_op = &f2fs_encrypted_symlink_inode_operations;
+               else
+                       inode->i_op = &f2fs_symlink_inode_operations;
+               inode_nohighmem(inode);
                inode->i_mapping->a_ops = &f2fs_dblock_aops;
        } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
                        S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
@@ -153,15 +236,30 @@ bad_inode:
        return ERR_PTR(ret);
 }
 
-void update_inode(struct inode *inode, struct page *node_page)
+struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
+{
+       struct inode *inode;
+retry:
+       inode = f2fs_iget(sb, ino);
+       if (IS_ERR(inode)) {
+               if (PTR_ERR(inode) == -ENOMEM) {
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                       goto retry;
+               }
+       }
+       return inode;
+}
+
+int update_inode(struct inode *inode, struct page *node_page)
 {
-       struct f2fs_node *rn;
        struct f2fs_inode *ri;
+       struct extent_tree *et = F2FS_I(inode)->extent_tree;
 
-       wait_on_page_writeback(node_page);
+       f2fs_inode_synced(inode);
 
-       rn = page_address(node_page);
-       ri = &(rn->i);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
+
+       ri = F2FS_INODE(node_page);
 
        ri->i_mode = cpu_to_le16(inode->i_mode);
        ri->i_advise = F2FS_I(inode)->i_advise;
@@ -170,7 +268,15 @@ void update_inode(struct inode *inode, struct page *node_page)
        ri->i_links = cpu_to_le32(inode->i_nlink);
        ri->i_size = cpu_to_le64(i_size_read(inode));
        ri->i_blocks = cpu_to_le64(inode->i_blocks);
-       set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
+
+       if (et) {
+               read_lock(&et->lock);
+               set_raw_extent(&et->largest, &ri->i_ext);
+               read_unlock(&et->lock);
+       } else {
+               memset(&ri->i_ext, 0, sizeof(ri->i_ext));
+       }
+       set_raw_inline(inode, ri);
 
        ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
        ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
@@ -183,58 +289,59 @@ void update_inode(struct inode *inode, struct page *node_page)
        ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
        ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
        ri->i_generation = cpu_to_le32(inode->i_generation);
+       ri->i_dir_level = F2FS_I(inode)->i_dir_level;
 
-       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-               if (old_valid_dev(inode->i_rdev)) {
-                       ri->i_addr[0] =
-                               cpu_to_le32(old_encode_dev(inode->i_rdev));
-                       ri->i_addr[1] = 0;
-               } else {
-                       ri->i_addr[0] = 0;
-                       ri->i_addr[1] =
-                               cpu_to_le32(new_encode_dev(inode->i_rdev));
-                       ri->i_addr[2] = 0;
-               }
-       }
-
+       __set_inode_rdev(inode, ri);
        set_cold_node(inode, node_page);
-       set_page_dirty(node_page);
+
+       /* deleted inode */
+       if (inode->i_nlink == 0)
+               clear_inline_node(node_page);
+
+       return set_page_dirty(node_page);
 }
 
 int update_inode_page(struct inode *inode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct page *node_page;
-
+       int ret = 0;
+retry:
        node_page = get_node_page(sbi, inode->i_ino);
-       if (IS_ERR(node_page))
-               return PTR_ERR(node_page);
-
-       update_inode(inode, node_page);
+       if (IS_ERR(node_page)) {
+               int err = PTR_ERR(node_page);
+               if (err == -ENOMEM) {
+                       cond_resched();
+                       goto retry;
+               } else if (err != -ENOENT) {
+                       f2fs_stop_checkpoint(sbi, false);
+               }
+               f2fs_inode_synced(inode);
+               return 0;
+       }
+       ret = update_inode(inode, node_page);
        f2fs_put_page(node_page, 1);
-       return 0;
+       return ret;
 }
 
 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int ret, ilock;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 
        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
                        inode->i_ino == F2FS_META_INO(sbi))
                return 0;
 
-       if (wbc)
-               f2fs_balance_fs(sbi);
+       if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
+               return 0;
 
        /*
-        * We need to lock here to prevent from producing dirty node pages
+        * We need to balance fs here to prevent from producing dirty node pages
         * during the urgent cleaning time when runing out of free sections.
         */
-       ilock = mutex_lock_op(sbi);
-       ret = update_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
-       return ret;
+       if (update_inode_page(inode) && wbc && wbc->nr_to_write)
+               f2fs_balance_fs(sbi, true);
+       return 0;
 }
 
 /*
@@ -242,34 +349,130 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
  */
 void f2fs_evict_inode(struct inode *inode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int ilock;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+       int err = 0;
+
+       /* some remained atomic pages should discarded */
+       if (f2fs_is_atomic_file(inode))
+               drop_inmem_pages(inode);
 
        trace_f2fs_evict_inode(inode);
        truncate_inode_pages(&inode->i_data, 0);
 
        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
                        inode->i_ino == F2FS_META_INO(sbi))
-               goto no_delete;
+               goto out_clear;
 
-       BUG_ON(atomic_read(&F2FS_I(inode)->dirty_dents));
-       remove_dirty_dir_inode(inode);
+       f2fs_bug_on(sbi, get_dirty_pages(inode));
+       remove_dirty_inode(inode);
+
+       f2fs_destroy_extent_tree(inode);
 
        if (inode->i_nlink || is_bad_inode(inode))
                goto no_delete;
 
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(sbi, FAULT_EVICT_INODE))
+               goto no_delete;
+#endif
+
+       remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+       remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+
        sb_start_intwrite(inode->i_sb);
-       set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
+       set_inode_flag(inode, FI_NO_ALLOC);
        i_size_write(inode, 0);
-
+retry:
        if (F2FS_HAS_BLOCKS(inode))
-               f2fs_truncate(inode);
+               err = f2fs_truncate(inode);
+
+       if (!err) {
+               f2fs_lock_op(sbi);
+               err = remove_inode_page(inode);
+               f2fs_unlock_op(sbi);
+               if (err == -ENOENT)
+                       err = 0;
+       }
 
-       ilock = mutex_lock_op(sbi);
-       remove_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
+       /* give more chances, if ENOMEM case */
+       if (err == -ENOMEM) {
+               err = 0;
+               goto retry;
+       }
 
+       if (err)
+               update_inode_page(inode);
        sb_end_intwrite(inode->i_sb);
 no_delete:
+       stat_dec_inline_xattr(inode);
+       stat_dec_inline_dir(inode);
+       stat_dec_inline_inode(inode);
+
+       invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
+       if (xnid)
+               invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
+       if (inode->i_nlink) {
+               if (is_inode_flag_set(inode, FI_APPEND_WRITE))
+                       add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+               if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
+                       add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+       }
+       if (is_inode_flag_set(inode, FI_FREE_NID)) {
+               alloc_nid_failed(sbi, inode->i_ino);
+               clear_inode_flag(inode, FI_FREE_NID);
+       }
+       f2fs_bug_on(sbi, err &&
+               !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
+out_clear:
+       fscrypt_put_encryption_info(inode, NULL);
        clear_inode(inode);
 }
+
+/* caller should call f2fs_lock_op() */
+void handle_failed_inode(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct node_info ni;
+
+       /*
+        * clear nlink of inode in order to release resource of inode
+        * immediately.
+        */
+       clear_nlink(inode);
+
+       /*
+        * we must call this to avoid inode being remained as dirty, resulting
+        * in a panic when flushing dirty inodes in gdirty_list.
+        */
+       update_inode_page(inode);
+
+       /* don't make bad inode, since it becomes a regular file. */
+       unlock_new_inode(inode);
+
+       /*
+        * Note: we should add inode to orphan list before f2fs_unlock_op()
+        * so we can prevent losing this orphan when encoutering checkpoint
+        * and following suddenly power-off.
+        */
+       get_node_info(sbi, inode->i_ino, &ni);
+
+       if (ni.blk_addr != NULL_ADDR) {
+               int err = acquire_orphan_inode(sbi);
+               if (err) {
+                       set_sbi_flag(sbi, SBI_NEED_FSCK);
+                       f2fs_msg(sbi->sb, KERN_WARNING,
+                               "Too many orphan inodes, run fsck to fix.");
+               } else {
+                       add_orphan_inode(inode);
+               }
+               alloc_nid_done(sbi, inode->i_ino);
+       } else {
+               set_inode_flag(inode, FI_FREE_NID);
+       }
+
+       f2fs_unlock_op(sbi);
+
+       /* iput will drop the inode object */
+       iput(inode);
+}
index 47abc9722b17abfae9656b3d5d52360fc8e72ad8..1f89d738dcafc28493b90fd33c9ffae24158635b 100644 (file)
@@ -9,10 +9,13 @@
  * published by the Free Software Foundation.
  */
 #include <linux/fs.h>
+#include <linux/namei.h>
 #include <linux/f2fs_fs.h>
 #include <linux/pagemap.h>
 #include <linux/sched.h>
 #include <linux/ctype.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
 
 #include "f2fs.h"
 #include "node.h"
 
 static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
 {
-       struct super_block *sb = dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
        nid_t ino;
        struct inode *inode;
        bool nid_free = false;
-       int err, ilock;
+       int err;
 
-       inode = new_inode(sb);
+       inode = new_inode(dir->i_sb);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        if (!alloc_nid(sbi, &ino)) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                err = -ENOSPC;
                goto fail;
        }
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
-       inode->i_uid = current_fsuid();
-
-       if (dir->i_mode & S_ISGID) {
-               inode->i_gid = dir->i_gid;
-               if (S_ISDIR(mode))
-                       mode |= S_ISGID;
-       } else {
-               inode->i_gid = current_fsgid();
-       }
+       inode_init_owner(inode, dir, mode);
 
        inode->i_ino = ino;
-       inode->i_mode = mode;
        inode->i_blocks = 0;
-       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+       inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
        inode->i_generation = sbi->s_next_generation++;
 
        err = insert_inode_locked(inode);
        if (err) {
                err = -EINVAL;
                nid_free = true;
-               goto out;
+               goto fail;
        }
+
+       /* If the directory encrypted, then we should encrypt the inode. */
+       if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
+               f2fs_set_encrypted_inode(inode);
+
+       set_inode_flag(inode, FI_NEW_INODE);
+
+       if (test_opt(sbi, INLINE_XATTR))
+               set_inode_flag(inode, FI_INLINE_XATTR);
+       if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+               set_inode_flag(inode, FI_INLINE_DATA);
+       if (f2fs_may_inline_dentry(inode))
+               set_inode_flag(inode, FI_INLINE_DENTRY);
+
+       f2fs_init_extent_tree(inode, NULL);
+
+       stat_inc_inline_xattr(inode);
+       stat_inc_inline_inode(inode);
+       stat_inc_inline_dir(inode);
+
        trace_f2fs_new_inode(inode, 0);
-       mark_inode_dirty(inode);
        return inode;
 
-out:
-       clear_nlink(inode);
-       unlock_new_inode(inode);
 fail:
        trace_f2fs_new_inode(inode, err);
        make_bad_inode(inode);
-       iput(inode);
        if (nid_free)
-               alloc_nid_failed(sbi, ino);
+               set_inode_flag(inode, FI_FREE_NID);
+       iput(inode);
        return ERR_PTR(err);
 }
 
@@ -83,21 +92,23 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
 {
        size_t slen = strlen(s);
        size_t sublen = strlen(sub);
-       int ret;
+       int i;
 
-       if (sublen > slen)
+       /*
+        * filename format of multimedia file should be defined as:
+        * "filename + '.' + extension + (optional: '.' + temp extension)".
+        */
+       if (slen < sublen + 2)
                return 0;
 
-       ret = memcmp(s + slen - sublen, sub, sublen);
-       if (ret) {      /* compare upper case */
-               int i;
-               char upper_sub[8];
-               for (i = 0; i < sublen && i < sizeof(upper_sub); i++)
-                       upper_sub[i] = toupper(sub[i]);
-               return !memcmp(s + slen - sublen, upper_sub, sublen);
+       for (i = 1; i < slen - sublen; i++) {
+               if (s[i] != '.')
+                       continue;
+               if (!strncasecmp(s + i + 1, sub, sublen))
+                       return 1;
        }
 
-       return !ret;
+       return 0;
 }
 
 /*
@@ -112,7 +123,7 @@ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
        int count = le32_to_cpu(sbi->raw_super->extension_count);
        for (i = 0; i < count; i++) {
                if (is_multimedia_file(name, extlist[i])) {
-                       set_cold_file(inode);
+                       file_set_cold(inode);
                        break;
                }
        }
@@ -121,13 +132,10 @@ static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode,
 static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                                                bool excl)
 {
-       struct super_block *sb = dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
        struct inode *inode;
        nid_t ino = 0;
-       int err, ilock;
-
-       f2fs_balance_fs(sbi);
+       int err;
 
        inode = f2fs_new_inode(dir, mode);
        if (IS_ERR(inode))
@@ -141,69 +149,123 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
        ino = inode->i_ino;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_balance_fs(sbi, true);
+
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
        if (err)
                goto out;
+       f2fs_unlock_op(sbi);
 
        alloc_nid_done(sbi, ino);
 
-       if (!sbi->por_doing)
-               d_instantiate(dentry, inode);
+       d_instantiate(dentry, inode);
        unlock_new_inode(inode);
+
+       if (IS_DIRSYNC(dir))
+               f2fs_sync_fs(sbi->sb, 1);
        return 0;
 out:
-       clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
-       alloc_nid_failed(sbi, ino);
+       handle_failed_inode(inode);
        return err;
 }
 
 static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *dentry)
 {
-       struct inode *inode = old_dentry->d_inode;
-       struct super_block *sb = dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       int err, ilock;
+       struct inode *inode = d_inode(old_dentry);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+       int err;
 
-       f2fs_balance_fs(sbi);
+       if (f2fs_encrypted_inode(dir) &&
+                       !fscrypt_has_permitted_context(dir, inode))
+               return -EPERM;
 
-       inode->i_ctime = CURRENT_TIME;
-       atomic_inc(&inode->i_count);
+       f2fs_balance_fs(sbi, true);
 
-       set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       ilock = mutex_lock_op(sbi);
+       inode->i_ctime = current_time(inode);
+       ihold(inode);
+
+       set_inode_flag(inode, FI_INC_LINK);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
        if (err)
                goto out;
-
-       /*
-        * This file should be checkpointed during fsync.
-        * We lost i_pino from now on.
-        */
-       set_cp_file(inode);
+       f2fs_unlock_op(sbi);
 
        d_instantiate(dentry, inode);
+
+       if (IS_DIRSYNC(dir))
+               f2fs_sync_fs(sbi->sb, 1);
        return 0;
 out:
-       clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       make_bad_inode(inode);
+       clear_inode_flag(inode, FI_INC_LINK);
        iput(inode);
+       f2fs_unlock_op(sbi);
        return err;
 }
 
 struct dentry *f2fs_get_parent(struct dentry *child)
 {
        struct qstr dotdot = QSTR_INIT("..", 2);
-       unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot);
-       if (!ino)
+       struct page *page;
+       unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page);
+       if (!ino) {
+               if (IS_ERR(page))
+                       return ERR_CAST(page);
                return ERR_PTR(-ENOENT);
-       return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino));
+       }
+       return d_obtain_alias(f2fs_iget(child->d_sb, ino));
+}
+
+static int __recover_dot_dentries(struct inode *dir, nid_t pino)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+       struct qstr dot = QSTR_INIT(".", 1);
+       struct qstr dotdot = QSTR_INIT("..", 2);
+       struct f2fs_dir_entry *de;
+       struct page *page;
+       int err = 0;
+
+       if (f2fs_readonly(sbi->sb)) {
+               f2fs_msg(sbi->sb, KERN_INFO,
+                       "skip recovering inline_dots inode (ino:%lu, pino:%u) "
+                       "in readonly mountpoint", dir->i_ino, pino);
+               return 0;
+       }
+
+       f2fs_balance_fs(sbi, true);
+
+       f2fs_lock_op(sbi);
+
+       de = f2fs_find_entry(dir, &dot, &page);
+       if (de) {
+               f2fs_dentry_kunmap(dir, page);
+               f2fs_put_page(page, 0);
+       } else if (IS_ERR(page)) {
+               err = PTR_ERR(page);
+               goto out;
+       } else {
+               err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
+               if (err)
+                       goto out;
+       }
+
+       de = f2fs_find_entry(dir, &dotdot, &page);
+       if (de) {
+               f2fs_dentry_kunmap(dir, page);
+               f2fs_put_page(page, 0);
+       } else if (IS_ERR(page)) {
+               err = PTR_ERR(page);
+       } else {
+               err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
+       }
+out:
+       if (!err)
+               clear_inode_flag(dir, FI_INLINE_DOTS);
+
+       f2fs_unlock_op(sbi);
+       return err;
 }
 
 static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
@@ -212,105 +274,239 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
        struct inode *inode = NULL;
        struct f2fs_dir_entry *de;
        struct page *page;
+       nid_t ino;
+       int err = 0;
+       unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+
+       if (f2fs_encrypted_inode(dir)) {
+               int res = fscrypt_get_encryption_info(dir);
+
+               /*
+                * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
+                * created while the directory was encrypted and we
+                * don't have access to the key.
+                */
+               if (fscrypt_has_encryption_key(dir))
+                       fscrypt_set_encrypted_dentry(dentry);
+               fscrypt_set_d_op(dentry);
+               if (res && res != -ENOKEY)
+                       return ERR_PTR(res);
+       }
 
        if (dentry->d_name.len > F2FS_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
        de = f2fs_find_entry(dir, &dentry->d_name, &page);
-       if (de) {
-               nid_t ino = le32_to_cpu(de->ino);
-               kunmap(page);
-               f2fs_put_page(page, 0);
+       if (!de) {
+               if (IS_ERR(page))
+                       return (struct dentry *)page;
+               return d_splice_alias(inode, dentry);
+       }
+
+       ino = le32_to_cpu(de->ino);
+       f2fs_dentry_kunmap(dir, page);
+       f2fs_put_page(page, 0);
+
+       inode = f2fs_iget(dir->i_sb, ino);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
 
-               inode = f2fs_iget(dir->i_sb, ino);
-               if (IS_ERR(inode))
-                       return ERR_CAST(inode);
+       if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
+               err = __recover_dot_dentries(dir, root_ino);
+               if (err)
+                       goto err_out;
        }
 
+       if (f2fs_has_inline_dots(inode)) {
+               err = __recover_dot_dentries(inode, dir->i_ino);
+               if (err)
+                       goto err_out;
+       }
+       if (!IS_ERR(inode) && f2fs_encrypted_inode(dir) &&
+                       (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+                       !fscrypt_has_permitted_context(dir, inode)) {
+               bool nokey = f2fs_encrypted_inode(inode) &&
+                       !fscrypt_has_encryption_key(inode);
+               err = nokey ? -ENOKEY : -EPERM;
+               goto err_out;
+       }
        return d_splice_alias(inode, dentry);
+
+err_out:
+       iput(inode);
+       return ERR_PTR(err);
 }
 
 static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
 {
-       struct super_block *sb = dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       struct inode *inode = dentry->d_inode;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
+       struct inode *inode = d_inode(dentry);
        struct f2fs_dir_entry *de;
        struct page *page;
        int err = -ENOENT;
-       int ilock;
 
        trace_f2fs_unlink_enter(dir, dentry);
-       f2fs_balance_fs(sbi);
 
        de = f2fs_find_entry(dir, &dentry->d_name, &page);
-       if (!de)
+       if (!de) {
+               if (IS_ERR(page))
+                       err = PTR_ERR(page);
                goto fail;
+       }
+
+       f2fs_balance_fs(sbi, true);
 
-       err = check_orphan_space(sbi);
+       f2fs_lock_op(sbi);
+       err = acquire_orphan_inode(sbi);
        if (err) {
-               kunmap(page);
+               f2fs_unlock_op(sbi);
+               f2fs_dentry_kunmap(dir, page);
                f2fs_put_page(page, 0);
                goto fail;
        }
+       f2fs_delete_entry(de, page, dir, inode);
+       f2fs_unlock_op(sbi);
 
-       ilock = mutex_lock_op(sbi);
-       f2fs_delete_entry(de, page, inode);
-       mutex_unlock_op(sbi, ilock);
-
-       /* In order to evict this inode,  we set it dirty */
-       mark_inode_dirty(inode);
+       if (IS_DIRSYNC(dir))
+               f2fs_sync_fs(sbi->sb, 1);
 fail:
        trace_f2fs_unlink_exit(inode, err);
        return err;
 }
 
+static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+       struct page *page;
+       char *link;
+
+       page = page_follow_link_light(dentry, nd);
+       if (IS_ERR(page))
+               return page;
+
+       link = nd_get_link(nd);
+       if (IS_ERR(link))
+               return link;
+
+       /* this is broken symlink case */
+       if (*link == 0) {
+               kunmap(page);
+               page_cache_release(page);
+               return ERR_PTR(-ENOENT);
+       }
+       return page;
+}
+
 static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
                                        const char *symname)
 {
-       struct super_block *sb = dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
        struct inode *inode;
-       size_t symlen = strlen(symname) + 1;
-       int err, ilock;
+       size_t len = strlen(symname);
+       struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
+       struct fscrypt_symlink_data *sd = NULL;
+       int err;
 
-       f2fs_balance_fs(sbi);
+       if (f2fs_encrypted_inode(dir)) {
+               err = fscrypt_get_encryption_info(dir);
+               if (err)
+                       return err;
+
+               if (!fscrypt_has_encryption_key(dir))
+                       return -EPERM;
+
+               disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
+                               sizeof(struct fscrypt_symlink_data));
+       }
+
+       if (disk_link.len > dir->i_sb->s_blocksize)
+               return -ENAMETOOLONG;
 
        inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
 
-       inode->i_op = &f2fs_symlink_inode_operations;
+       if (f2fs_encrypted_inode(inode))
+               inode->i_op = &f2fs_encrypted_symlink_inode_operations;
+       else
+               inode->i_op = &f2fs_symlink_inode_operations;
+       inode_nohighmem(inode);
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_balance_fs(sbi, true);
+
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
        if (err)
                goto out;
-
-       err = page_symlink(inode, symname, symlen);
+       f2fs_unlock_op(sbi);
        alloc_nid_done(sbi, inode->i_ino);
 
+       if (f2fs_encrypted_inode(inode)) {
+               struct qstr istr = QSTR_INIT(symname, len);
+               struct fscrypt_str ostr;
+
+               sd = kzalloc(disk_link.len, GFP_NOFS);
+               if (!sd) {
+                       err = -ENOMEM;
+                       goto err_out;
+               }
+
+               err = fscrypt_get_encryption_info(inode);
+               if (err)
+                       goto err_out;
+
+               if (!fscrypt_has_encryption_key(inode)) {
+                       err = -EPERM;
+                       goto err_out;
+               }
+
+               ostr.name = sd->encrypted_path;
+               ostr.len = disk_link.len;
+               err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
+               if (err)
+                       goto err_out;
+
+               sd->len = cpu_to_le16(ostr.len);
+               disk_link.name = (char *)sd;
+       }
+
+       err = page_symlink(inode, disk_link.name, disk_link.len);
+
+err_out:
        d_instantiate(dentry, inode);
        unlock_new_inode(inode);
+
+       /*
+        * Let's flush symlink data in order to avoid broken symlink as much as
+        * possible. Nevertheless, fsyncing is the best way, but there is no
+        * way to get a file descriptor in order to flush that.
+        *
+        * Note that, it needs to do dir->fsync to make this recoverable.
+        * If the symlink path is stored into inline_data, there is no
+        * performance regression.
+        */
+       if (!err) {
+               filemap_write_and_wait_range(inode->i_mapping, 0,
+                                                       disk_link.len - 1);
+
+               if (IS_DIRSYNC(dir))
+                       f2fs_sync_fs(sbi->sb, 1);
+       } else {
+               f2fs_unlink(dir, dentry);
+       }
+
+       kfree(sd);
        return err;
 out:
-       clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
-       alloc_nid_failed(sbi, inode->i_ino);
+       handle_failed_inode(inode);
        return err;
 }
 
 static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
        struct inode *inode;
-       int err, ilock;
-
-       f2fs_balance_fs(sbi);
+       int err;
 
        inode = f2fs_new_inode(dir, S_IFDIR | mode);
        if (IS_ERR(inode))
@@ -319,35 +515,35 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        inode->i_op = &f2fs_dir_inode_operations;
        inode->i_fop = &f2fs_dir_operations;
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
-       mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
+       mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
 
-       set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       ilock = mutex_lock_op(sbi);
+       f2fs_balance_fs(sbi, true);
+
+       set_inode_flag(inode, FI_INC_LINK);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
        if (err)
                goto out_fail;
+       f2fs_unlock_op(sbi);
 
        alloc_nid_done(sbi, inode->i_ino);
 
        d_instantiate(dentry, inode);
        unlock_new_inode(inode);
 
+       if (IS_DIRSYNC(dir))
+               f2fs_sync_fs(sbi->sb, 1);
        return 0;
 
 out_fail:
-       clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
-       alloc_nid_failed(sbi, inode->i_ino);
+       clear_inode_flag(inode, FI_INC_LINK);
+       handle_failed_inode(inode);
        return err;
 }
 
 static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
 {
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = d_inode(dentry);
        if (f2fs_empty_dir(inode))
                return f2fs_unlink(dir, dentry);
        return -ENOTEMPTY;
@@ -356,17 +552,13 @@ static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
 static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
                                umode_t mode, dev_t rdev)
 {
-       struct super_block *sb = dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
        struct inode *inode;
        int err = 0;
-       int ilock;
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
-       f2fs_balance_fs(sbi);
-
        inode = f2fs_new_inode(dir, mode);
        if (IS_ERR(inode))
                return PTR_ERR(inode);
@@ -374,56 +566,64 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
        init_special_inode(inode, inode->i_mode, rdev);
        inode->i_op = &f2fs_special_inode_operations;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_balance_fs(sbi, true);
+
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
        if (err)
                goto out;
+       f2fs_unlock_op(sbi);
 
        alloc_nid_done(sbi, inode->i_ino);
+
        d_instantiate(dentry, inode);
        unlock_new_inode(inode);
+
+       if (IS_DIRSYNC(dir))
+               f2fs_sync_fs(sbi->sb, 1);
        return 0;
 out:
-       clear_nlink(inode);
-       unlock_new_inode(inode);
-       make_bad_inode(inode);
-       iput(inode);
-       alloc_nid_failed(sbi, inode->i_ino);
+       handle_failed_inode(inode);
        return err;
 }
 
 static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        struct inode *new_dir, struct dentry *new_dentry)
 {
-       struct super_block *sb = old_dir->i_sb;
-       struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       struct inode *old_inode = old_dentry->d_inode;
-       struct inode *new_inode = new_dentry->d_inode;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
+       struct inode *old_inode = d_inode(old_dentry);
+       struct inode *new_inode = d_inode(new_dentry);
        struct page *old_dir_page;
-       struct page *old_page;
+       struct page *old_page, *new_page;
        struct f2fs_dir_entry *old_dir_entry = NULL;
        struct f2fs_dir_entry *old_entry;
        struct f2fs_dir_entry *new_entry;
-       int err = -ENOENT, ilock = -1;
+       bool is_old_inline = f2fs_has_inline_dentry(old_dir);
+       int err = -ENOENT;
 
-       f2fs_balance_fs(sbi);
+       if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
+                       !fscrypt_has_permitted_context(new_dir, old_inode)) {
+               err = -EPERM;
+               goto out;
+       }
 
        old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
-       if (!old_entry)
+       if (!old_entry) {
+               if (IS_ERR(old_page))
+                       err = PTR_ERR(old_page);
                goto out;
+       }
 
        if (S_ISDIR(old_inode->i_mode)) {
-               err = -EIO;
                old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
-               if (!old_dir_entry)
+               if (!old_dir_entry) {
+                       if (IS_ERR(old_dir_page))
+                               err = PTR_ERR(old_dir_page);
                        goto out_old;
+               }
        }
 
-       ilock = mutex_lock_op(sbi);
-
        if (new_inode) {
-               struct page *new_page;
 
                err = -ENOTEMPTY;
                if (old_dir_entry && !f2fs_empty_dir(new_inode))
@@ -432,62 +632,210 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                err = -ENOENT;
                new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
                                                &new_page);
-               if (!new_entry)
+               if (!new_entry) {
+                       if (IS_ERR(new_page))
+                               err = PTR_ERR(new_page);
                        goto out_dir;
+               }
+
+               f2fs_balance_fs(sbi, true);
+
+               f2fs_lock_op(sbi);
+
+               err = acquire_orphan_inode(sbi);
+               if (err)
+                       goto put_out_dir;
+
+               err = update_dent_inode(old_inode, new_inode,
+                                               &new_dentry->d_name);
+               if (err) {
+                       release_orphan_inode(sbi);
+                       goto put_out_dir;
+               }
 
                f2fs_set_link(new_dir, new_entry, new_page, old_inode);
 
-               new_inode->i_ctime = CURRENT_TIME;
+               new_inode->i_ctime = current_time(new_inode);
+               down_write(&F2FS_I(new_inode)->i_sem);
                if (old_dir_entry)
-                       drop_nlink(new_inode);
-               drop_nlink(new_inode);
+                       f2fs_i_links_write(new_inode, false);
+               f2fs_i_links_write(new_inode, false);
+               up_write(&F2FS_I(new_inode)->i_sem);
+
                if (!new_inode->i_nlink)
-                       add_orphan_inode(sbi, new_inode->i_ino);
-               update_inode_page(new_inode);
+                       add_orphan_inode(new_inode);
+               else
+                       release_orphan_inode(sbi);
        } else {
+               f2fs_balance_fs(sbi, true);
+
+               f2fs_lock_op(sbi);
+
                err = f2fs_add_link(new_dentry, old_inode);
-               if (err)
+               if (err) {
+                       f2fs_unlock_op(sbi);
                        goto out_dir;
+               }
 
-               if (old_dir_entry) {
-                       inc_nlink(new_dir);
-                       update_inode_page(new_dir);
+               if (old_dir_entry)
+                       f2fs_i_links_write(new_dir, true);
+
+               /*
+                * old entry and new entry can locate in the same inline
+                * dentry in inode, when attaching new entry in inline dentry,
+                * it could force inline dentry conversion, after that,
+                * old_entry and old_page will point to wrong address, in
+                * order to avoid this, let's do the check and update here.
+                */
+               if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) {
+                       f2fs_put_page(old_page, 0);
+                       old_page = NULL;
+
+                       old_entry = f2fs_find_entry(old_dir,
+                                               &old_dentry->d_name, &old_page);
+                       if (!old_entry) {
+                               err = -ENOENT;
+                               if (IS_ERR(old_page))
+                                       err = PTR_ERR(old_page);
+                               f2fs_unlock_op(sbi);
+                               goto out_dir;
+                       }
                }
        }
 
-       old_inode->i_ctime = CURRENT_TIME;
-       mark_inode_dirty(old_inode);
+       down_write(&F2FS_I(old_inode)->i_sem);
+       file_lost_pino(old_inode);
+       if (new_inode && file_enc_name(new_inode))
+               file_set_enc_name(old_inode);
+       up_write(&F2FS_I(old_inode)->i_sem);
+
+       old_inode->i_ctime = current_time(old_inode);
+       f2fs_mark_inode_dirty_sync(old_inode, false);
 
-       f2fs_delete_entry(old_entry, old_page, NULL);
+       f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
 
        if (old_dir_entry) {
                if (old_dir != new_dir) {
                        f2fs_set_link(old_inode, old_dir_entry,
                                                old_dir_page, new_dir);
                } else {
-                       kunmap(old_dir_page);
+                       f2fs_dentry_kunmap(old_inode, old_dir_page);
                        f2fs_put_page(old_dir_page, 0);
                }
-               drop_nlink(old_dir);
-               update_inode_page(old_dir);
+               f2fs_i_links_write(old_dir, false);
        }
 
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
+
+       if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+               f2fs_sync_fs(sbi->sb, 1);
        return 0;
 
+put_out_dir:
+       f2fs_unlock_op(sbi);
+       f2fs_dentry_kunmap(new_dir, new_page);
+       f2fs_put_page(new_page, 0);
 out_dir:
        if (old_dir_entry) {
-               kunmap(old_dir_page);
+               f2fs_dentry_kunmap(old_inode, old_dir_page);
                f2fs_put_page(old_dir_page, 0);
        }
-       mutex_unlock_op(sbi, ilock);
 out_old:
-       kunmap(old_page);
+       f2fs_dentry_kunmap(old_dir, old_page);
        f2fs_put_page(old_page, 0);
 out:
        return err;
 }
 
+static void *f2fs_encrypted_follow_link(struct dentry *dentry,
+                                               struct nameidata *nd)
+{
+       struct page *cpage = NULL;
+       char *caddr, *paddr = NULL;
+       struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
+       struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
+       struct fscrypt_symlink_data *sd;
+       struct inode *inode = d_inode(dentry);
+       u32 max_size = inode->i_sb->s_blocksize;
+       int res;
+
+       res = fscrypt_get_encryption_info(inode);
+       if (res)
+               return ERR_PTR(res);
+
+       cpage = read_mapping_page(inode->i_mapping, 0, NULL);
+       if (IS_ERR(cpage))
+               return ERR_CAST(cpage);
+       caddr = kmap(cpage);
+
+       /* Symlink is encrypted */
+       sd = (struct fscrypt_symlink_data *)caddr;
+       cstr.name = sd->encrypted_path;
+       cstr.len = le16_to_cpu(sd->len);
+
+       /* this is broken symlink case */
+       if (unlikely(cstr.len == 0)) {
+               res = -ENOENT;
+               goto errout;
+       }
+
+       if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
+               /* Symlink data on the disk is corrupted */
+               res = -EIO;
+               goto errout;
+       }
+       res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
+       if (res)
+               goto errout;
+
+       res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+       if (res)
+               goto errout;
+
+       /* this is broken symlink case */
+       if (unlikely(pstr.name[0] == 0)) {
+               res = -ENOENT;
+               goto errout;
+       }
+
+       paddr = pstr.name;
+
+       /* Null-terminate the name */
+       paddr[pstr.len] = '\0';
+       nd_set_link(nd, paddr);
+
+       kunmap(cpage);
+       page_cache_release(cpage);
+       return NULL;
+errout:
+       fscrypt_fname_free_buffer(&pstr);
+       kunmap(cpage);
+       page_cache_release(cpage);
+       return ERR_PTR(res);
+}
+
+void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
+               void *cookie)
+{
+       char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
+}
+
+const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = f2fs_encrypted_follow_link,
+       .put_link       = kfree_put_link,
+       .getattr        = f2fs_getattr,
+       .setattr        = f2fs_setattr,
+#ifdef CONFIG_F2FS_FS_XATTR
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = f2fs_listxattr,
+       .removexattr    = generic_removexattr,
+#endif
+};
+
 const struct inode_operations f2fs_dir_inode_operations = {
        .create         = f2fs_create,
        .lookup         = f2fs_lookup,
@@ -498,6 +846,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
        .rmdir          = f2fs_rmdir,
        .mknod          = f2fs_mknod,
        .rename         = f2fs_rename,
+       .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
        .get_acl        = f2fs_get_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
@@ -510,8 +859,9 @@ const struct inode_operations f2fs_dir_inode_operations = {
 
 const struct inode_operations f2fs_symlink_inode_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = page_follow_link_light,
+       .follow_link    = f2fs_follow_link,
        .put_link       = page_put_link,
+       .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
 #ifdef CONFIG_F2FS_FS_XATTR
        .setxattr       = generic_setxattr,
@@ -522,6 +872,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
 };
 
 const struct inode_operations f2fs_special_inode_operations = {
+       .getattr        = f2fs_getattr,
        .setattr        = f2fs_setattr,
        .get_acl        = f2fs_get_acl,
 #ifdef CONFIG_F2FS_FS_XATTR
index 3df43b4efd89e96e971263e61369dee5d3e39b87..adc52d86fae28ef5a989377e4bc7e2daa5198ca0 100644 (file)
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
+#include "trace.h"
 #include <trace/events/f2fs.h>
 
+#define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
+
+#ifndef PTR_ERR_OR_ZERO
+static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
+{
+       if (IS_ERR(ptr))
+               return PTR_ERR(ptr);
+       else
+               return 0;
+}
+#endif
+
 static struct kmem_cache *nat_entry_slab;
 static struct kmem_cache *free_nid_slab;
+static struct kmem_cache *nat_entry_set_slab;
+
+bool available_free_memory(struct f2fs_sb_info *sbi, int type)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct sysinfo val;
+       unsigned long avail_ram;
+       unsigned long mem_size = 0;
+       bool res = false;
+
+       si_meminfo(&val);
+
+       /* only uses low memory */
+       avail_ram = val.totalram - val.totalhigh;
+
+       /*
+        * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
+        */
+       if (type == FREE_NIDS) {
+               mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+                               sizeof(struct free_nid)) >> PAGE_SHIFT;
+               res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+       } else if (type == NAT_ENTRIES) {
+               mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
+                                                       PAGE_SHIFT;
+               res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+               if (excess_cached_nats(sbi))
+                       res = false;
+       } else if (type == DIRTY_DENTS) {
+               if (sbi->sb->s_bdi->dirty_exceeded)
+                       return false;
+               mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
+               res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+       } else if (type == INO_ENTRIES) {
+               int i;
+
+               for (i = 0; i <= UPDATE_INO; i++)
+                       mem_size += (sbi->im[i].ino_num *
+                               sizeof(struct ino_entry)) >> PAGE_SHIFT;
+               res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+       } else if (type == EXTENT_CACHE) {
+               mem_size = (atomic_read(&sbi->total_ext_tree) *
+                               sizeof(struct extent_tree) +
+                               atomic_read(&sbi->total_ext_node) *
+                               sizeof(struct extent_node)) >> PAGE_SHIFT;
+               res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
+       } else {
+               if (!sbi->sb->s_bdi->dirty_exceeded)
+                       return true;
+       }
+       return res;
+}
 
 static void clear_node_page_dirty(struct page *page)
 {
        struct address_space *mapping = page->mapping;
-       struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
        unsigned int long flags;
 
        if (PageDirty(page)) {
@@ -38,7 +102,7 @@ static void clear_node_page_dirty(struct page *page)
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
 
                clear_page_dirty_for_io(page);
-               dec_page_count(sbi, F2FS_DIRTY_NODES);
+               dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
        }
        ClearPageUptodate(page);
 }
@@ -64,16 +128,12 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
 
        /* get current nat block page with lock */
        src_page = get_meta_page(sbi, src_off);
-
-       /* Dirty src_page means that it is already the new target NAT page. */
-       if (PageDirty(src_page))
-               return src_page;
-
        dst_page = grab_meta_page(sbi, dst_off);
+       f2fs_bug_on(sbi, PageDirty(src_page));
 
        src_addr = page_address(src_page);
        dst_addr = page_address(dst_page);
-       memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+       memcpy(dst_addr, src_addr, PAGE_SIZE);
        set_page_dirty(dst_page);
        f2fs_put_page(src_page, 1);
 
@@ -82,40 +142,6 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
        return dst_page;
 }
 
-/*
- * Readahead NAT pages
- */
-static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
-{
-       struct address_space *mapping = sbi->meta_inode->i_mapping;
-       struct f2fs_nm_info *nm_i = NM_I(sbi);
-       struct blk_plug plug;
-       struct page *page;
-       pgoff_t index;
-       int i;
-
-       blk_start_plug(&plug);
-
-       for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
-               if (nid >= nm_i->max_nid)
-                       nid = 0;
-               index = current_nat_addr(sbi, nid);
-
-               page = grab_cache_page(mapping, index);
-               if (!page)
-                       continue;
-               if (PageUptodate(page)) {
-                       f2fs_put_page(page, 1);
-                       continue;
-               }
-               if (f2fs_readpage(sbi, page, index, READ))
-                       continue;
-
-               f2fs_put_page(page, 0);
-       }
-       blk_finish_plug(&plug);
-}
-
 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
 {
        return radix_tree_lookup(&nm_i->nat_root, n);
@@ -135,119 +161,199 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
        kmem_cache_free(nat_entry_slab, e);
 }
 
-int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
+static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
+                                               struct nat_entry *ne)
+{
+       nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
+       struct nat_entry_set *head;
+
+       if (get_nat_flag(ne, IS_DIRTY))
+               return;
+
+       head = radix_tree_lookup(&nm_i->nat_set_root, set);
+       if (!head) {
+               head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
+
+               INIT_LIST_HEAD(&head->entry_list);
+               INIT_LIST_HEAD(&head->set_list);
+               head->set = set;
+               head->entry_cnt = 0;
+               f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
+       }
+       list_move_tail(&ne->list, &head->entry_list);
+       nm_i->dirty_nat_cnt++;
+       head->entry_cnt++;
+       set_nat_flag(ne, IS_DIRTY, true);
+}
+
+static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
+                                               struct nat_entry *ne)
+{
+       nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
+       struct nat_entry_set *head;
+
+       head = radix_tree_lookup(&nm_i->nat_set_root, set);
+       if (head) {
+               list_move_tail(&ne->list, &nm_i->nat_entries);
+               set_nat_flag(ne, IS_DIRTY, false);
+               head->entry_cnt--;
+               nm_i->dirty_nat_cnt--;
+       }
+}
+
+static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
+               nid_t start, unsigned int nr, struct nat_entry_set **ep)
+{
+       return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
+                                                       start, nr);
+}
+
+int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct nat_entry *e;
+       bool need = false;
+
+       down_read(&nm_i->nat_tree_lock);
+       e = __lookup_nat_cache(nm_i, nid);
+       if (e) {
+               if (!get_nat_flag(e, IS_CHECKPOINTED) &&
+                               !get_nat_flag(e, HAS_FSYNCED_INODE))
+                       need = true;
+       }
+       up_read(&nm_i->nat_tree_lock);
+       return need;
+}
+
+bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct nat_entry *e;
-       int is_cp = 1;
+       bool is_cp = true;
 
-       read_lock(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
-       if (e && !e->checkpointed)
-               is_cp = 0;
-       read_unlock(&nm_i->nat_tree_lock);
+       if (e && !get_nat_flag(e, IS_CHECKPOINTED))
+               is_cp = false;
+       up_read(&nm_i->nat_tree_lock);
        return is_cp;
 }
 
+bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct nat_entry *e;
+       bool need_update = true;
+
+       down_read(&nm_i->nat_tree_lock);
+       e = __lookup_nat_cache(nm_i, ino);
+       if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
+                       (get_nat_flag(e, IS_CHECKPOINTED) ||
+                        get_nat_flag(e, HAS_FSYNCED_INODE)))
+               need_update = false;
+       up_read(&nm_i->nat_tree_lock);
+       return need_update;
+}
+
 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
 {
        struct nat_entry *new;
 
-       new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
-       if (!new)
-               return NULL;
-       if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
-               kmem_cache_free(nat_entry_slab, new);
-               return NULL;
-       }
+       new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
+       f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
        memset(new, 0, sizeof(struct nat_entry));
        nat_set_nid(new, nid);
+       nat_reset_flag(new);
        list_add_tail(&new->list, &nm_i->nat_entries);
        nm_i->nat_cnt++;
        return new;
 }
 
-static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
                                                struct f2fs_nat_entry *ne)
 {
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct nat_entry *e;
-retry:
-       write_lock(&nm_i->nat_tree_lock);
+
        e = __lookup_nat_cache(nm_i, nid);
        if (!e) {
                e = grab_nat_entry(nm_i, nid);
-               if (!e) {
-                       write_unlock(&nm_i->nat_tree_lock);
-                       goto retry;
-               }
-               nat_set_blkaddr(e, le32_to_cpu(ne->block_addr));
-               nat_set_ino(e, le32_to_cpu(ne->ino));
-               nat_set_version(e, ne->version);
-               e->checkpointed = true;
+               node_info_from_raw_nat(&e->ni, ne);
+       } else {
+               f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+                               nat_get_blkaddr(e) !=
+                                       le32_to_cpu(ne->block_addr) ||
+                               nat_get_version(e) != ne->version);
        }
-       write_unlock(&nm_i->nat_tree_lock);
 }
 
 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
-                       block_t new_blkaddr)
+                       block_t new_blkaddr, bool fsync_done)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct nat_entry *e;
-retry:
-       write_lock(&nm_i->nat_tree_lock);
+
+       down_write(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, ni->nid);
        if (!e) {
                e = grab_nat_entry(nm_i, ni->nid);
-               if (!e) {
-                       write_unlock(&nm_i->nat_tree_lock);
-                       goto retry;
-               }
-               e->ni = *ni;
-               e->checkpointed = true;
-               BUG_ON(ni->blk_addr == NEW_ADDR);
+               copy_node_info(&e->ni, ni);
+               f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
        } else if (new_blkaddr == NEW_ADDR) {
                /*
                 * when nid is reallocated,
                 * previous nat entry can be remained in nat cache.
                 * So, reinitialize it with new information.
                 */
-               e->ni = *ni;
-               BUG_ON(ni->blk_addr != NULL_ADDR);
+               copy_node_info(&e->ni, ni);
+               f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
        }
 
-       if (new_blkaddr == NEW_ADDR)
-               e->checkpointed = false;
-
        /* sanity check */
-       BUG_ON(nat_get_blkaddr(e) != ni->blk_addr);
-       BUG_ON(nat_get_blkaddr(e) == NULL_ADDR &&
+       f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
+       f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
                        new_blkaddr == NULL_ADDR);
-       BUG_ON(nat_get_blkaddr(e) == NEW_ADDR &&
+       f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
                        new_blkaddr == NEW_ADDR);
-       BUG_ON(nat_get_blkaddr(e) != NEW_ADDR &&
+       f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
                        nat_get_blkaddr(e) != NULL_ADDR &&
                        new_blkaddr == NEW_ADDR);
 
-       /* increament version no as node is removed */
+       /* increment version no as node is removed */
        if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
                unsigned char version = nat_get_version(e);
                nat_set_version(e, inc_node_version(version));
+
+               /* in order to reuse the nid */
+               if (nm_i->next_scan_nid > ni->nid)
+                       nm_i->next_scan_nid = ni->nid;
        }
 
        /* change address */
        nat_set_blkaddr(e, new_blkaddr);
+       if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
+               set_nat_flag(e, IS_CHECKPOINTED, false);
        __set_nat_cache_dirty(nm_i, e);
-       write_unlock(&nm_i->nat_tree_lock);
+
+       /* update fsync_mark if its inode nat entry is still alive */
+       if (ni->nid != ni->ino)
+               e = __lookup_nat_cache(nm_i, ni->ino);
+       if (e) {
+               if (fsync_done && ni->nid == ni->ino)
+                       set_nat_flag(e, HAS_FSYNCED_INODE, true);
+               set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
+       }
+       up_write(&nm_i->nat_tree_lock);
 }
 
-static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
+int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
+       int nr = nr_shrink;
 
-       if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD)
+       if (!down_write_trylock(&nm_i->nat_tree_lock))
                return 0;
 
-       write_lock(&nm_i->nat_tree_lock);
        while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
                struct nat_entry *ne;
                ne = list_first_entry(&nm_i->nat_entries,
@@ -255,18 +361,18 @@ static int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
                __del_from_nat_cache(nm_i, ne);
                nr_shrink--;
        }
-       write_unlock(&nm_i->nat_tree_lock);
-       return nr_shrink;
+       up_write(&nm_i->nat_tree_lock);
+       return nr - nr_shrink;
 }
 
 /*
- * This function returns always success
+ * This function always returns success
  */
 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
+       struct f2fs_journal *journal = curseg->journal;
        nid_t start_nid = START_NID(nid);
        struct f2fs_nat_block *nat_blk;
        struct page *page = NULL;
@@ -274,29 +380,29 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
        struct nat_entry *e;
        int i;
 
-       memset(&ne, 0, sizeof(struct f2fs_nat_entry));
        ni->nid = nid;
 
        /* Check nat cache */
-       read_lock(&nm_i->nat_tree_lock);
+       down_read(&nm_i->nat_tree_lock);
        e = __lookup_nat_cache(nm_i, nid);
        if (e) {
                ni->ino = nat_get_ino(e);
                ni->blk_addr = nat_get_blkaddr(e);
                ni->version = nat_get_version(e);
-       }
-       read_unlock(&nm_i->nat_tree_lock);
-       if (e)
+               up_read(&nm_i->nat_tree_lock);
                return;
+       }
+
+       memset(&ne, 0, sizeof(struct f2fs_nat_entry));
 
        /* Check current segment summary */
-       mutex_lock(&curseg->curseg_mutex);
-       i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+       down_read(&curseg->journal_rwsem);
+       i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
        if (i >= 0) {
-               ne = nat_in_journal(sum, i);
+               ne = nat_in_journal(journal, i);
                node_info_from_raw_nat(ni, &ne);
        }
-       mutex_unlock(&curseg->curseg_mutex);
+       up_read(&curseg->journal_rwsem);
        if (i >= 0)
                goto cache;
 
@@ -307,17 +413,75 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
        node_info_from_raw_nat(ni, &ne);
        f2fs_put_page(page, 1);
 cache:
+       up_read(&nm_i->nat_tree_lock);
        /* cache nat entry */
-       cache_nat_entry(NM_I(sbi), nid, &ne);
+       down_write(&nm_i->nat_tree_lock);
+       cache_nat_entry(sbi, nid, &ne);
+       up_write(&nm_i->nat_tree_lock);
+}
+
+/*
+ * readahead MAX_RA_NODE number of node pages.
+ */
+static void ra_node_pages(struct page *parent, int start, int n)
+{
+       struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+       struct blk_plug plug;
+       int i, end;
+       nid_t nid;
+
+       blk_start_plug(&plug);
+
+       /* Then, try readahead for siblings of the desired node */
+       end = start + n;
+       end = min(end, NIDS_PER_BLOCK);
+       for (i = start; i < end; i++) {
+               nid = get_nid(parent, i, false);
+               ra_node_page(sbi, nid);
+       }
+
+       blk_finish_plug(&plug);
+}
+
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+       const long direct_index = ADDRS_PER_INODE(dn->inode);
+       const long direct_blks = ADDRS_PER_BLOCK;
+       const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+       unsigned int skipped_unit = ADDRS_PER_BLOCK;
+       int cur_level = dn->cur_level;
+       int max_level = dn->max_level;
+       pgoff_t base = 0;
+
+       if (!dn->max_level)
+               return pgofs + 1;
+
+       while (max_level-- > cur_level)
+               skipped_unit *= NIDS_PER_BLOCK;
+
+       switch (dn->max_level) {
+       case 3:
+               base += 2 * indirect_blks;
+       case 2:
+               base += 2 * direct_blks;
+       case 1:
+               base += direct_index;
+               break;
+       default:
+               f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+       }
+
+       return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
 }
 
 /*
  * The maximum depth is four.
  * Offset[0] will have raw inode offset.
  */
-static int get_node_path(long block, int offset[4], unsigned int noffset[4])
+static int get_node_path(struct inode *inode, long block,
+                               int offset[4], unsigned int noffset[4])
 {
-       const long direct_index = ADDRS_PER_INODE;
+       const long direct_index = ADDRS_PER_INODE(inode);
        const long direct_blks = ADDRS_PER_BLOCK;
        const long dptrs_per_blk = NIDS_PER_BLOCK;
        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@@ -390,27 +554,38 @@ got:
 
 /*
  * Caller should call f2fs_put_dnode(dn).
- * Also, it should grab and release a mutex by calling mutex_lock_op() and
- * mutex_unlock_op() only if ro is not set RDONLY_NODE.
+ * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
  * In the case of RDONLY_NODE, we don't need to care about mutex.
  */
 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        struct page *npage[4];
-       struct page *parent;
+       struct page *parent = NULL;
        int offset[4];
        unsigned int noffset[4];
        nid_t nids[4];
-       int level, i;
+       int level, i = 0;
        int err = 0;
 
-       level = get_node_path(index, offset, noffset);
+       level = get_node_path(dn->inode, index, offset, noffset);
 
        nids[0] = dn->inode->i_ino;
-       npage[0] = get_node_page(sbi, nids[0]);
-       if (IS_ERR(npage[0]))
-               return PTR_ERR(npage[0]);
+       npage[0] = dn->inode_page;
+
+       if (!npage[0]) {
+               npage[0] = get_node_page(sbi, nids[0]);
+               if (IS_ERR(npage[0]))
+                       return PTR_ERR(npage[0]);
+       }
+
+       /* if inline_data is set, should not report any block indices */
+       if (f2fs_has_inline_data(dn->inode) && index) {
+               err = -ENOENT;
+               f2fs_put_page(npage[0], 1);
+               goto release_out;
+       }
 
        parent = npage[0];
        if (level != 0)
@@ -430,7 +605,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
                        }
 
                        dn->nid = nids[i];
-                       npage[i] = new_node_page(dn, noffset[i]);
+                       npage[i] = new_node_page(dn, noffset[i], NULL);
                        if (IS_ERR(npage[i])) {
                                alloc_nid_failed(sbi, nids[i]);
                                err = PTR_ERR(npage[i]);
@@ -481,51 +656,58 @@ release_pages:
 release_out:
        dn->inode_page = NULL;
        dn->node_page = NULL;
+       if (err == -ENOENT) {
+               dn->cur_level = i;
+               dn->max_level = level;
+               dn->ofs_in_node = offset[level];
+       }
        return err;
 }
 
 static void truncate_node(struct dnode_of_data *dn)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        struct node_info ni;
 
        get_node_info(sbi, dn->nid, &ni);
        if (dn->inode->i_blocks == 0) {
-               BUG_ON(ni.blk_addr != NULL_ADDR);
+               f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
                goto invalidate;
        }
-       BUG_ON(ni.blk_addr == NULL_ADDR);
+       f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
 
        /* Deallocate node address */
        invalidate_blocks(sbi, ni.blk_addr);
-       dec_valid_node_count(sbi, dn->inode, 1);
-       set_node_addr(sbi, &ni, NULL_ADDR);
+       dec_valid_node_count(sbi, dn->inode);
+       set_node_addr(sbi, &ni, NULL_ADDR, false);
 
        if (dn->nid == dn->inode->i_ino) {
                remove_orphan_inode(sbi, dn->nid);
                dec_valid_inode_count(sbi);
-       } else {
-               sync_inode_page(dn);
+               f2fs_inode_synced(dn->inode);
        }
 invalidate:
        clear_node_page_dirty(dn->node_page);
-       F2FS_SET_SB_DIRT(sbi);
+       set_sbi_flag(sbi, SBI_IS_DIRTY);
 
        f2fs_put_page(dn->node_page, 1);
+
+       invalidate_mapping_pages(NODE_MAPPING(sbi),
+                       dn->node_page->index, dn->node_page->index);
+
        dn->node_page = NULL;
        trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
 }
 
 static int truncate_dnode(struct dnode_of_data *dn)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
        struct page *page;
 
        if (dn->nid == 0)
                return 1;
 
        /* get direct node */
-       page = get_node_page(sbi, dn->nid);
+       page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
        if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
                return 1;
        else if (IS_ERR(page))
@@ -542,7 +724,6 @@ static int truncate_dnode(struct dnode_of_data *dn)
 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
                                                int ofs, int depth)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
        struct dnode_of_data rdn = *dn;
        struct page *page;
        struct f2fs_node *rn;
@@ -556,13 +737,15 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
 
        trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
 
-       page = get_node_page(sbi, dn->nid);
+       page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
        if (IS_ERR(page)) {
                trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
                return PTR_ERR(page);
        }
 
-       rn = (struct f2fs_node *)page_address(page);
+       ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+
+       rn = F2FS_NODE(page);
        if (depth < 3) {
                for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
                        child_nid = le32_to_cpu(rn->in.nid[i]);
@@ -572,7 +755,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
                        ret = truncate_dnode(&rdn);
                        if (ret < 0)
                                goto out_err;
-                       set_nid(page, i, 0, false);
+                       if (set_nid(page, i, 0, false))
+                               dn->node_changed = true;
                }
        } else {
                child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
@@ -585,7 +769,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
                        rdn.nid = child_nid;
                        ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
                        if (ret == (NIDS_PER_BLOCK + 1)) {
-                               set_nid(page, i, 0, false);
+                               if (set_nid(page, i, 0, false))
+                                       dn->node_changed = true;
                                child_nofs += ret;
                        } else if (ret < 0 && ret != -ENOENT) {
                                goto out_err;
@@ -614,7 +799,6 @@ out_err:
 static int truncate_partial_nodes(struct dnode_of_data *dn,
                        struct f2fs_inode *ri, int *offset, int depth)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
        struct page *pages[2];
        nid_t nid[3];
        nid_t child_nid;
@@ -627,19 +811,21 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
                return 0;
 
        /* get indirect nodes in the path */
-       for (i = 0; i < depth - 1; i++) {
-               /* refernece count'll be increased */
-               pages[i] = get_node_page(sbi, nid[i]);
+       for (i = 0; i < idx + 1; i++) {
+               /* reference count'll be increased */
+               pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
                if (IS_ERR(pages[i])) {
-                       depth = i + 1;
                        err = PTR_ERR(pages[i]);
+                       idx = i - 1;
                        goto fail;
                }
                nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
        }
 
+       ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+
        /* free direct nodes linked to a partial indirect node */
-       for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
+       for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
                child_nid = get_nid(pages[idx], i, false);
                if (!child_nid)
                        continue;
@@ -647,10 +833,11 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
                err = truncate_dnode(dn);
                if (err < 0)
                        goto fail;
-               set_nid(pages[idx], i, 0, false);
+               if (set_nid(pages[idx], i, 0, false))
+                       dn->node_changed = true;
        }
 
-       if (offset[depth - 1] == 0) {
+       if (offset[idx + 1] == 0) {
                dn->node_page = pages[idx];
                dn->nid = nid[idx];
                truncate_node(dn);
@@ -658,9 +845,10 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
                f2fs_put_page(pages[idx], 1);
        }
        offset[idx]++;
-       offset[depth - 1] = 0;
+       offset[idx + 1] = 0;
+       idx--;
 fail:
-       for (i = depth - 3; i >= 0; i--)
+       for (i = idx; i >= 0; i--)
                f2fs_put_page(pages[i], 1);
 
        trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
@@ -673,19 +861,18 @@ fail:
  */
 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct address_space *node_mapping = sbi->node_inode->i_mapping;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        int err = 0, cont = 1;
        int level, offset[4], noffset[4];
        unsigned int nofs = 0;
-       struct f2fs_node *rn;
+       struct f2fs_inode *ri;
        struct dnode_of_data dn;
        struct page *page;
 
        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 
-       level = get_node_path(from, offset, noffset);
-restart:
+       level = get_node_path(inode, from, offset, noffset);
+
        page = get_node_page(sbi, inode->i_ino);
        if (IS_ERR(page)) {
                trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
@@ -695,7 +882,7 @@ restart:
        set_new_dnode(&dn, inode, page, NULL, 0);
        unlock_page(page);
 
-       rn = page_address(page);
+       ri = F2FS_INODE(page);
        switch (level) {
        case 0:
        case 1:
@@ -705,7 +892,7 @@ restart:
                nofs = noffset[1];
                if (!offset[level - 1])
                        goto skip_partial;
-               err = truncate_partial_nodes(&dn, &rn->i, offset, level);
+               err = truncate_partial_nodes(&dn, ri, offset, level);
                if (err < 0 && err != -ENOENT)
                        goto fail;
                nofs += 1 + NIDS_PER_BLOCK;
@@ -714,7 +901,7 @@ restart:
                nofs = 5 + 2 * NIDS_PER_BLOCK;
                if (!offset[level - 1])
                        goto skip_partial;
-               err = truncate_partial_nodes(&dn, &rn->i, offset, level);
+               err = truncate_partial_nodes(&dn, ri, offset, level);
                if (err < 0 && err != -ENOENT)
                        goto fail;
                break;
@@ -724,7 +911,7 @@ restart:
 
 skip_partial:
        while (cont) {
-               dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
+               dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
                switch (offset[0]) {
                case NODE_DIR1_BLOCK:
                case NODE_DIR2_BLOCK:
@@ -747,14 +934,11 @@ skip_partial:
                if (err < 0 && err != -ENOENT)
                        goto fail;
                if (offset[1] == 0 &&
-                               rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
+                               ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
                        lock_page(page);
-                       if (page->mapping != node_mapping) {
-                               f2fs_put_page(page, 1);
-                               goto restart;
-                       }
-                       wait_on_page_writeback(page);
-                       rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
+                       BUG_ON(page->mapping != NODE_MAPPING(sbi));
+                       f2fs_wait_on_page_writeback(page, NODE, true);
+                       ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
                        set_page_dirty(page);
                        unlock_page(page);
                }
@@ -768,94 +952,119 @@ fail:
        return err > 0 ? 0 : err;
 }
 
+int truncate_xattr_node(struct inode *inode, struct page *page)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       nid_t nid = F2FS_I(inode)->i_xattr_nid;
+       struct dnode_of_data dn;
+       struct page *npage;
+
+       if (!nid)
+               return 0;
+
+       npage = get_node_page(sbi, nid);
+       if (IS_ERR(npage))
+               return PTR_ERR(npage);
+
+       f2fs_i_xnid_write(inode, 0);
+
+       /* need to do checkpoint during fsync */
+       F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
+
+       set_new_dnode(&dn, inode, page, npage, nid);
+
+       if (page)
+               dn.inode_page_locked = true;
+       truncate_node(&dn);
+       return 0;
+}
+
 /*
- * Caller should grab and release a mutex by calling mutex_lock_op() and
- * mutex_unlock_op().
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
  */
 int remove_inode_page(struct inode *inode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct page *page;
-       nid_t ino = inode->i_ino;
        struct dnode_of_data dn;
+       int err;
 
-       page = get_node_page(sbi, ino);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
+       set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
+       err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+       if (err)
+               return err;
 
-       if (F2FS_I(inode)->i_xattr_nid) {
-               nid_t nid = F2FS_I(inode)->i_xattr_nid;
-               struct page *npage = get_node_page(sbi, nid);
+       err = truncate_xattr_node(inode, dn.inode_page);
+       if (err) {
+               f2fs_put_dnode(&dn);
+               return err;
+       }
 
-               if (IS_ERR(npage))
-                       return PTR_ERR(npage);
+       /* remove potential inline_data blocks */
+       if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                               S_ISLNK(inode->i_mode))
+               truncate_data_blocks_range(&dn, 1);
 
-               F2FS_I(inode)->i_xattr_nid = 0;
-               set_new_dnode(&dn, inode, page, npage, nid);
-               dn.inode_page_locked = 1;
-               truncate_node(&dn);
-       }
+       /* 0 is possible, after f2fs_new_inode() has failed */
+       f2fs_bug_on(F2FS_I_SB(inode),
+                       inode->i_blocks != 0 && inode->i_blocks != 1);
 
-       /* 0 is possible, after f2fs_new_inode() is failed */
-       BUG_ON(inode->i_blocks != 0 && inode->i_blocks != 1);
-       set_new_dnode(&dn, inode, page, page, ino);
+       /* will put inode & node pages */
        truncate_node(&dn);
        return 0;
 }
 
-int new_inode_page(struct inode *inode, const struct qstr *name)
+struct page *new_inode_page(struct inode *inode)
 {
-       struct page *page;
        struct dnode_of_data dn;
 
        /* allocate inode page for new inode */
        set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-       page = new_node_page(&dn, 0);
-       init_dent_inode(name, page);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
-       f2fs_put_page(page, 1);
-       return 0;
+
+       /* caller should f2fs_put_page(page, 1); */
+       return new_node_page(&dn, 0, NULL);
 }
 
-struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs)
+struct page *new_node_page(struct dnode_of_data *dn,
+                               unsigned int ofs, struct page *ipage)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
-       struct address_space *mapping = sbi->node_inode->i_mapping;
+       struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        struct node_info old_ni, new_ni;
        struct page *page;
        int err;
 
-       if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
+       if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
                return ERR_PTR(-EPERM);
 
-       page = grab_cache_page(mapping, dn->nid);
+       page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
        if (!page)
                return ERR_PTR(-ENOMEM);
 
-       get_node_info(sbi, dn->nid, &old_ni);
+       if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
+               err = -ENOSPC;
+               goto fail;
+       }
 
-       SetPageUptodate(page);
-       fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
+       get_node_info(sbi, dn->nid, &old_ni);
 
        /* Reinitialize old_ni with new node page */
-       BUG_ON(old_ni.blk_addr != NULL_ADDR);
+       f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
        new_ni = old_ni;
        new_ni.ino = dn->inode->i_ino;
+       set_node_addr(sbi, &new_ni, NEW_ADDR, false);
 
-       if (!inc_valid_node_count(sbi, dn->inode, 1)) {
-               err = -ENOSPC;
-               goto fail;
-       }
-       set_node_addr(sbi, &new_ni, NEW_ADDR);
+       f2fs_wait_on_page_writeback(page, NODE, true);
+       fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
        set_cold_node(dn->inode, page);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
+       if (set_page_dirty(page))
+               dn->node_changed = true;
+
+       if (f2fs_has_xattr_block(ofs))
+               f2fs_i_xnid_write(dn->inode, dn->nid);
 
-       dn->node_page = page;
-       sync_inode_page(dn);
-       set_page_dirty(page);
        if (ofs == 0)
                inc_valid_inode_count(sbi);
-
        return page;
 
 fail:
@@ -867,25 +1076,32 @@ fail:
 /*
  * Caller should do after getting the following values.
  * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE: f2fs_put_page(page, 1)
- * error: nothing
+ * LOCKED_PAGE or error: f2fs_put_page(page, 1)
  */
-static int read_node_page(struct page *page, int type)
+static int read_node_page(struct page *page, int rw)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_P_SB(page);
        struct node_info ni;
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = NODE,
+               .rw = rw,
+               .page = page,
+               .encrypted_page = NULL,
+       };
+
+       if (PageUptodate(page))
+               return LOCKED_PAGE;
 
        get_node_info(sbi, page->index, &ni);
 
-       if (ni.blk_addr == NULL_ADDR) {
-               f2fs_put_page(page, 1);
+       if (unlikely(ni.blk_addr == NULL_ADDR)) {
+               ClearPageUptodate(page);
                return -ENOENT;
        }
 
-       if (PageUptodate(page))
-               return LOCKED_PAGE;
-
-       return f2fs_readpage(sbi, page, ni.blk_addr, type);
+       fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
+       return f2fs_submit_page_bio(&fio);
 }
 
 /*
@@ -893,149 +1109,171 @@ static int read_node_page(struct page *page, int type)
  */
 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
 {
-       struct address_space *mapping = sbi->node_inode->i_mapping;
        struct page *apage;
        int err;
 
-       apage = find_get_page(mapping, nid);
-       if (apage && PageUptodate(apage)) {
-               f2fs_put_page(apage, 0);
+       if (!nid)
+               return;
+       f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+
+       rcu_read_lock();
+       apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
+       rcu_read_unlock();
+       if (apage)
                return;
-       }
-       f2fs_put_page(apage, 0);
 
-       apage = grab_cache_page(mapping, nid);
+       apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
        if (!apage)
                return;
 
        err = read_node_page(apage, READA);
-       if (err == 0)
-               f2fs_put_page(apage, 0);
-       else if (err == LOCKED_PAGE)
-               f2fs_put_page(apage, 1);
-       return;
+       f2fs_put_page(apage, err ? 1 : 0);
 }
 
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
+                                       struct page *parent, int start)
 {
-       struct address_space *mapping = sbi->node_inode->i_mapping;
        struct page *page;
        int err;
+
+       if (!nid)
+               return ERR_PTR(-ENOENT);
+       f2fs_bug_on(sbi, check_nid_range(sbi, nid));
 repeat:
-       page = grab_cache_page(mapping, nid);
+       page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
        if (!page)
                return ERR_PTR(-ENOMEM);
 
        err = read_node_page(page, READ_SYNC);
-       if (err < 0)
+       if (err < 0) {
+               f2fs_put_page(page, 1);
                return ERR_PTR(err);
-       else if (err == LOCKED_PAGE)
-               goto got_it;
+       } else if (err == LOCKED_PAGE) {
+               goto page_hit;
+       }
+
+       if (parent)
+               ra_node_pages(parent, start + 1, MAX_RA_NODE);
 
        lock_page(page);
-       if (!PageUptodate(page)) {
-               f2fs_put_page(page, 1);
-               return ERR_PTR(-EIO);
-       }
-       if (page->mapping != mapping) {
+
+       if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
                f2fs_put_page(page, 1);
                goto repeat;
        }
-got_it:
-       BUG_ON(nid != nid_of_node(page));
+
+       if (unlikely(!PageUptodate(page)))
+               goto out_err;
+page_hit:
        mark_page_accessed(page);
+
+       if(unlikely(nid != nid_of_node(page))) {
+               f2fs_bug_on(sbi, 1);
+               ClearPageUptodate(page);
+out_err:
+               f2fs_put_page(page, 1);
+               return ERR_PTR(-EIO);
+       }
        return page;
 }
 
-/*
- * Return a locked page for the desired node page.
- * And, readahead MAX_RA_NODE number of node pages.
- */
+struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+{
+       return __get_node_page(sbi, nid, NULL, 0);
+}
+
 struct page *get_node_page_ra(struct page *parent, int start)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb);
-       struct address_space *mapping = sbi->node_inode->i_mapping;
-       struct blk_plug plug;
+       struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+       nid_t nid = get_nid(parent, start, false);
+
+       return __get_node_page(sbi, nid, parent, start);
+}
+
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       struct inode *inode;
        struct page *page;
-       int err, i, end;
-       nid_t nid;
+       int ret;
 
-       /* First, try getting the desired direct node. */
-       nid = get_nid(parent, start, false);
-       if (!nid)
-               return ERR_PTR(-ENOENT);
-repeat:
-       page = grab_cache_page(mapping, nid);
+       /* should flush inline_data before evict_inode */
+       inode = ilookup(sbi->sb, ino);
+       if (!inode)
+               return;
+
+       page = find_get_page(inode->i_mapping, 0);
        if (!page)
-               return ERR_PTR(-ENOMEM);
+               goto iput_out;
 
-       err = read_node_page(page, READ_SYNC);
-       if (err < 0)
-               return ERR_PTR(err);
-       else if (err == LOCKED_PAGE)
-               goto page_hit;
+       if (!trylock_page(page))
+               goto release_out;
 
-       blk_start_plug(&plug);
+       if (!PageUptodate(page))
+               goto page_out;
 
-       /* Then, try readahead for siblings of the desired node */
-       end = start + MAX_RA_NODE;
-       end = min(end, NIDS_PER_BLOCK);
-       for (i = start + 1; i < end; i++) {
-               nid = get_nid(parent, i, false);
-               if (!nid)
-                       continue;
-               ra_node_page(sbi, nid);
-       }
+       if (!PageDirty(page))
+               goto page_out;
 
-       blk_finish_plug(&plug);
+       if (!clear_page_dirty_for_io(page))
+               goto page_out;
 
-       lock_page(page);
-       if (page->mapping != mapping) {
-               f2fs_put_page(page, 1);
-               goto repeat;
-       }
-page_hit:
-       if (!PageUptodate(page)) {
-               f2fs_put_page(page, 1);
-               return ERR_PTR(-EIO);
-       }
-       mark_page_accessed(page);
-       return page;
+       ret = f2fs_write_inline_data(inode, page);
+       inode_dec_dirty_pages(inode);
+       remove_dirty_inode(inode);
+       if (ret)
+               set_page_dirty(page);
+page_out:
+       unlock_page(page);
+release_out:
+       f2fs_put_page(page, 0);
+iput_out:
+       iput(inode);
 }
 
-void sync_inode_page(struct dnode_of_data *dn)
+void move_node_page(struct page *node_page, int gc_type)
 {
-       if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
-               update_inode(dn->inode, dn->node_page);
-       } else if (dn->inode_page) {
-               if (!dn->inode_page_locked)
-                       lock_page(dn->inode_page);
-               update_inode(dn->inode, dn->inode_page);
-               if (!dn->inode_page_locked)
-                       unlock_page(dn->inode_page);
+       if (gc_type == FG_GC) {
+               struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
+               struct writeback_control wbc = {
+                       .sync_mode = WB_SYNC_ALL,
+                       .nr_to_write = 1,
+                       .for_reclaim = 0,
+               };
+
+               set_page_dirty(node_page);
+               f2fs_wait_on_page_writeback(node_page, NODE, true);
+
+               f2fs_bug_on(sbi, PageWriteback(node_page));
+               if (!clear_page_dirty_for_io(node_page))
+                       goto out_page;
+
+               if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
+                       unlock_page(node_page);
+               goto release_page;
        } else {
-               update_inode_page(dn->inode);
+               /* set page dirty and write it */
+               if (!PageWriteback(node_page))
+                       set_page_dirty(node_page);
        }
+out_page:
+       unlock_page(node_page);
+release_page:
+       f2fs_put_page(node_page, 0);
 }
 
-int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
-                                       struct writeback_control *wbc)
+static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-       struct address_space *mapping = sbi->node_inode->i_mapping;
        pgoff_t index, end;
        struct pagevec pvec;
-       int step = ino ? 2 : 0;
-       int nwritten = 0, wrote = 0;
+       struct page *last_page = NULL;
 
        pagevec_init(&pvec, 0);
-
-next_step:
        index = 0;
-       end = LONG_MAX;
+       end = ULONG_MAX;
 
        while (index <= end) {
                int i, nr_pages;
-               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+               nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
                                PAGECACHE_TAG_DIRTY,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
                if (nr_pages == 0)
@@ -1044,59 +1282,242 @@ next_step:
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
 
-                       /*
-                        * flushing sequence with step:
-                        * 0. indirect nodes
-                        * 1. dentry dnodes
-                        * 2. file dnodes
-                        */
-                       if (step == 0 && IS_DNODE(page))
-                               continue;
+                       if (unlikely(f2fs_cp_error(sbi))) {
+                               f2fs_put_page(last_page, 0);
+                               pagevec_release(&pvec);
+                               return ERR_PTR(-EIO);
+                       }
+
+                       if (!IS_DNODE(page) || !is_cold_node(page))
+                               continue;
+                       if (ino_of_node(page) != ino)
+                               continue;
+
+                       lock_page(page);
+
+                       if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+                               unlock_page(page);
+                               continue;
+                       }
+                       if (ino_of_node(page) != ino)
+                               goto continue_unlock;
+
+                       if (!PageDirty(page)) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
+                       }
+
+                       if (last_page)
+                               f2fs_put_page(last_page, 0);
+
+                       get_page(page);
+                       last_page = page;
+                       unlock_page(page);
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+       }
+       return last_page;
+}
+
+int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+                       struct writeback_control *wbc, bool atomic)
+{
+       pgoff_t index, end;
+       struct pagevec pvec;
+       int ret = 0;
+       struct page *last_page = NULL;
+       bool marked = false;
+       nid_t ino = inode->i_ino;
+       int nwritten = 0;
+
+       if (atomic) {
+               last_page = last_fsync_dnode(sbi, ino);
+               if (IS_ERR_OR_NULL(last_page))
+                       return PTR_ERR_OR_ZERO(last_page);
+       }
+retry:
+       pagevec_init(&pvec, 0);
+       index = 0;
+       end = ULONG_MAX;
+
+       while (index <= end) {
+               int i, nr_pages;
+               nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+                               PAGECACHE_TAG_DIRTY,
+                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               if (nr_pages == 0)
+                       break;
+
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+
+                       if (unlikely(f2fs_cp_error(sbi))) {
+                               f2fs_put_page(last_page, 0);
+                               pagevec_release(&pvec);
+                               ret = -EIO;
+                               goto out;
+                       }
+
+                       if (!IS_DNODE(page) || !is_cold_node(page))
+                               continue;
+                       if (ino_of_node(page) != ino)
+                               continue;
+
+                       lock_page(page);
+
+                       if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+                               unlock_page(page);
+                               continue;
+                       }
+                       if (ino_of_node(page) != ino)
+                               goto continue_unlock;
+
+                       if (!PageDirty(page) && page != last_page) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
+                       }
+
+                       f2fs_wait_on_page_writeback(page, NODE, true);
+                       BUG_ON(PageWriteback(page));
+
+                       if (!atomic || page == last_page) {
+                               set_fsync_mark(page, 1);
+                               if (IS_INODE(page)) {
+                                       if (is_inode_flag_set(inode,
+                                                               FI_DIRTY_INODE))
+                                               update_inode(inode, page);
+                                       set_dentry_mark(page,
+                                               need_dentry_mark(sbi, ino));
+                               }
+                               /*  may be written by other thread */
+                               if (!PageDirty(page))
+                                       set_page_dirty(page);
+                       }
+
+                       if (!clear_page_dirty_for_io(page))
+                               goto continue_unlock;
+
+                       ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
+                       if (ret) {
+                               unlock_page(page);
+                               f2fs_put_page(last_page, 0);
+                               break;
+                       } else {
+                               nwritten++;
+                       }
+
+                       if (page == last_page) {
+                               f2fs_put_page(page, 0);
+                               marked = true;
+                               break;
+                       }
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+
+               if (ret || marked)
+                       break;
+       }
+       if (!ret && atomic && !marked) {
+               f2fs_msg(sbi->sb, KERN_DEBUG,
+                       "Retry to write fsync mark: ino=%u, idx=%lx",
+                                       ino, last_page->index);
+               lock_page(last_page);
+               f2fs_wait_on_page_writeback(last_page, NODE, true);
+               set_page_dirty(last_page);
+               unlock_page(last_page);
+               goto retry;
+       }
+out:
+       if (nwritten)
+               f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
+       return ret ? -EIO: 0;
+}
+
+int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
+{
+       pgoff_t index, end;
+       struct pagevec pvec;
+       int step = 0;
+       int nwritten = 0;
+       int ret = 0;
+
+       pagevec_init(&pvec, 0);
+
+next_step:
+       index = 0;
+       end = ULONG_MAX;
+
+       while (index <= end) {
+               int i, nr_pages;
+               nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+                               PAGECACHE_TAG_DIRTY,
+                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               if (nr_pages == 0)
+                       break;
+
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+
+                       if (unlikely(f2fs_cp_error(sbi))) {
+                               pagevec_release(&pvec);
+                               ret = -EIO;
+                               goto out;
+                       }
+
+                       /*
+                        * flushing sequence with step:
+                        * 0. indirect nodes
+                        * 1. dentry dnodes
+                        * 2. file dnodes
+                        */
+                       if (step == 0 && IS_DNODE(page))
+                               continue;
                        if (step == 1 && (!IS_DNODE(page) ||
                                                is_cold_node(page)))
                                continue;
                        if (step == 2 && (!IS_DNODE(page) ||
                                                !is_cold_node(page)))
                                continue;
-
-                       /*
-                        * If an fsync mode,
-                        * we should not skip writing node pages.
-                        */
-                       if (ino && ino_of_node(page) == ino)
-                               lock_page(page);
-                       else if (!trylock_page(page))
+lock_node:
+                       if (!trylock_page(page))
                                continue;
 
-                       if (unlikely(page->mapping != mapping)) {
+                       if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
 continue_unlock:
                                unlock_page(page);
                                continue;
                        }
-                       if (ino && ino_of_node(page) != ino)
-                               goto continue_unlock;
 
                        if (!PageDirty(page)) {
                                /* someone wrote it for us */
                                goto continue_unlock;
                        }
 
+                       /* flush inline_data */
+                       if (is_inline_node(page)) {
+                               clear_inline_node(page);
+                               unlock_page(page);
+                               flush_inline_data(sbi, ino_of_node(page));
+                               goto lock_node;
+                       }
+
+                       f2fs_wait_on_page_writeback(page, NODE, true);
+
+                       BUG_ON(PageWriteback(page));
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
-                       /* called by fsync() */
-                       if (ino && IS_DNODE(page)) {
-                               int mark = !is_checkpointed_node(sbi, ino);
-                               set_fsync_mark(page, 1);
-                               if (IS_INODE(page))
-                                       set_dentry_mark(page, mark);
+                       set_fsync_mark(page, 0);
+                       set_dentry_mark(page, 0);
+
+                       if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
+                               unlock_page(page);
+                       else
                                nwritten++;
-                       } else {
-                               set_fsync_mark(page, 0);
-                               set_dentry_mark(page, 0);
-                       }
-                       mapping->a_ops->writepage(page, wbc);
-                       wrote++;
 
                        if (--wbc->nr_to_write == 0)
                                break;
@@ -1114,112 +1535,165 @@ continue_unlock:
                step++;
                goto next_step;
        }
+out:
+       if (nwritten)
+               f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       return ret;
+}
+
+int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       pgoff_t index = 0, end = ULONG_MAX;
+       struct pagevec pvec;
+       int ret2 = 0, ret = 0;
+
+       pagevec_init(&pvec, 0);
 
-       if (wrote)
-               f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
+       while (index <= end) {
+               int i, nr_pages;
+               nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+                               PAGECACHE_TAG_WRITEBACK,
+                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               if (nr_pages == 0)
+                       break;
 
-       return nwritten;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+
+                       /* until radix tree lookup accepts end_index */
+                       if (unlikely(page->index > end))
+                               continue;
+
+                       if (ino && ino_of_node(page) == ino) {
+                               f2fs_wait_on_page_writeback(page, NODE, true);
+                               if (TestClearPageError(page))
+                                       ret = -EIO;
+                       }
+               }
+               pagevec_release(&pvec);
+               cond_resched();
+       }
+
+       if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
+               ret2 = -ENOSPC;
+       if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
+               ret2 = -EIO;
+       if (!ret)
+               ret = ret2;
+       return ret;
 }
 
 static int f2fs_write_node_page(struct page *page,
                                struct writeback_control *wbc)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+       struct f2fs_sb_info *sbi = F2FS_P_SB(page);
        nid_t nid;
-       block_t new_addr;
        struct node_info ni;
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = NODE,
+               .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+               .page = page,
+               .encrypted_page = NULL,
+       };
 
-       wait_on_page_writeback(page);
+       trace_f2fs_writepage(page, NODE);
+
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+               goto redirty_out;
+       if (unlikely(f2fs_cp_error(sbi)))
+               goto redirty_out;
 
        /* get old block addr of this node page */
        nid = nid_of_node(page);
-       BUG_ON(page->index != nid);
+       f2fs_bug_on(sbi, page->index != nid);
+
+       if (wbc->for_reclaim) {
+               if (!down_read_trylock(&sbi->node_write))
+                       goto redirty_out;
+       } else {
+               down_read(&sbi->node_write);
+       }
 
        get_node_info(sbi, nid, &ni);
 
        /* This page is already truncated */
-       if (ni.blk_addr == NULL_ADDR) {
+       if (unlikely(ni.blk_addr == NULL_ADDR)) {
+               ClearPageUptodate(page);
                dec_page_count(sbi, F2FS_DIRTY_NODES);
+               up_read(&sbi->node_write);
                unlock_page(page);
                return 0;
        }
 
-       if (wbc->for_reclaim) {
-               dec_page_count(sbi, F2FS_DIRTY_NODES);
-               wbc->pages_skipped++;
-               set_page_dirty(page);
-               return AOP_WRITEPAGE_ACTIVATE;
-       }
-
-       mutex_lock(&sbi->node_write);
        set_page_writeback(page);
-       write_node_page(sbi, page, nid, ni.blk_addr, &new_addr);
-       set_node_addr(sbi, &ni, new_addr);
+       fio.old_blkaddr = ni.blk_addr;
+       write_node_page(nid, &fio);
+       set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
        dec_page_count(sbi, F2FS_DIRTY_NODES);
-       mutex_unlock(&sbi->node_write);
+       up_read(&sbi->node_write);
+
+       if (wbc->for_reclaim)
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
+
        unlock_page(page);
+
+       if (unlikely(f2fs_cp_error(sbi)))
+               f2fs_submit_merged_bio(sbi, NODE, WRITE);
+
        return 0;
+
+redirty_out:
+       redirty_page_for_writepage(wbc, page);
+       return AOP_WRITEPAGE_ACTIVATE;
 }
 
-/*
- * It is very important to gather dirty pages and write at once, so that we can
- * submit a big bio without interfering other data writes.
- * Be default, 512 pages (2MB), a segment size, is quite reasonable.
- */
-#define COLLECT_DIRTY_NODES    512
 static int f2fs_write_node_pages(struct address_space *mapping,
                            struct writeback_control *wbc)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
-       long nr_to_write = wbc->nr_to_write;
+       struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+       struct blk_plug plug;
+       long diff;
 
-       /* First check balancing cached NAT entries */
-       if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
-               f2fs_sync_fs(sbi->sb, true);
-               return 0;
-       }
+       /* balancing f2fs's metadata in background */
+       f2fs_balance_fs_bg(sbi);
 
        /* collect a number of dirty node pages and write together */
-       if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
-               return 0;
+       if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
+               goto skip_write;
+
+       trace_f2fs_writepages(mapping->host, wbc, NODE);
 
-       /* if mounting is failed, skip writing node pages */
-       wbc->nr_to_write = max_hw_blocks(sbi);
-       sync_node_pages(sbi, 0, wbc);
-       wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
+       diff = nr_pages_to_write(sbi, NODE, wbc);
+       wbc->sync_mode = WB_SYNC_NONE;
+       blk_start_plug(&plug);
+       sync_node_pages(sbi, wbc);
+       blk_finish_plug(&plug);
+       wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+       return 0;
+
+skip_write:
+       wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+       trace_f2fs_writepages(mapping->host, wbc, NODE);
        return 0;
 }
 
 static int f2fs_set_node_page_dirty(struct page *page)
 {
-       struct address_space *mapping = page->mapping;
-       struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
+       trace_f2fs_set_page_dirty(page, NODE);
 
-       SetPageUptodate(page);
+       if (!PageUptodate(page))
+               SetPageUptodate(page);
        if (!PageDirty(page)) {
-               __set_page_dirty_nobuffers(page);
-               inc_page_count(sbi, F2FS_DIRTY_NODES);
+               f2fs_set_page_dirty_nobuffers(page);
+               inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
                SetPagePrivate(page);
+               f2fs_trace_pid(page);
                return 1;
        }
        return 0;
 }
 
-static void f2fs_invalidate_node_page(struct page *page, unsigned long offset)
-{
-       struct inode *inode = page->mapping->host;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       if (PageDirty(page))
-               dec_page_count(sbi, F2FS_DIRTY_NODES);
-       ClearPagePrivate(page);
-}
-
-static int f2fs_release_node_page(struct page *page, gfp_t wait)
-{
-       ClearPagePrivate(page);
-       return 1;
-}
-
 /*
  * Structure of the f2fs node operations
  */
@@ -1227,88 +1701,108 @@ const struct address_space_operations f2fs_node_aops = {
        .writepage      = f2fs_write_node_page,
        .writepages     = f2fs_write_node_pages,
        .set_page_dirty = f2fs_set_node_page_dirty,
-       .invalidatepage = f2fs_invalidate_node_page,
-       .releasepage    = f2fs_release_node_page,
+       .invalidatepage = f2fs_invalidate_page,
+       .releasepage    = f2fs_release_page,
 };
 
-static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head)
+static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
+                                               nid_t n)
 {
-       struct list_head *this;
-       struct free_nid *i;
-       list_for_each(this, head) {
-               i = list_entry(this, struct free_nid, list);
-               if (i->nid == n)
-                       return i;
+       return radix_tree_lookup(&nm_i->free_nid_root, n);
+}
+
+static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
+                       struct free_nid *i, enum nid_list list, bool new)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+       if (new) {
+               int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+               if (err)
+                       return err;
        }
-       return NULL;
+
+       f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+                                               i->state != NID_ALLOC);
+       nm_i->nid_cnt[list]++;
+       list_add_tail(&i->list, &nm_i->nid_list[list]);
+       return 0;
 }
 
-static void __del_from_free_nid_list(struct free_nid *i)
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
+                       struct free_nid *i, enum nid_list list, bool reuse)
 {
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+       f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+                                               i->state != NID_ALLOC);
+       nm_i->nid_cnt[list]--;
        list_del(&i->list);
-       kmem_cache_free(free_nid_slab, i);
+       if (!reuse)
+               radix_tree_delete(&nm_i->free_nid_root, i->nid);
 }
 
-static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
+static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
 {
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i;
        struct nat_entry *ne;
-       bool allocated = false;
-
-       if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
-               return -1;
+       int err;
 
        /* 0 nid should not be used */
-       if (nid == 0)
+       if (unlikely(nid == 0))
                return 0;
 
-       if (!build)
-               goto retry;
-
-       /* do not add allocated nids */
-       read_lock(&nm_i->nat_tree_lock);
-       ne = __lookup_nat_cache(nm_i, nid);
-       if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
-               allocated = true;
-       read_unlock(&nm_i->nat_tree_lock);
-       if (allocated)
-               return 0;
-retry:
-       i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
-       if (!i) {
-               cond_resched();
-               goto retry;
+       if (build) {
+               /* do not add allocated nids */
+               ne = __lookup_nat_cache(nm_i, nid);
+               if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+                               nat_get_blkaddr(ne) != NULL_ADDR))
+                       return 0;
        }
+
+       i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
        i->nid = nid;
        i->state = NID_NEW;
 
-       spin_lock(&nm_i->free_nid_list_lock);
-       if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) {
-               spin_unlock(&nm_i->free_nid_list_lock);
+       if (radix_tree_preload(GFP_NOFS)) {
+               kmem_cache_free(free_nid_slab, i);
+               return 0;
+       }
+
+       spin_lock(&nm_i->nid_list_lock);
+       err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+       spin_unlock(&nm_i->nid_list_lock);
+       radix_tree_preload_end();
+       if (err) {
                kmem_cache_free(free_nid_slab, i);
                return 0;
        }
-       list_add_tail(&i->list, &nm_i->free_nid_list);
-       nm_i->fcnt++;
-       spin_unlock(&nm_i->free_nid_list_lock);
        return 1;
 }
 
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
 {
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i;
-       spin_lock(&nm_i->free_nid_list_lock);
-       i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
+       bool need_free = false;
+
+       spin_lock(&nm_i->nid_list_lock);
+       i = __lookup_free_nid_list(nm_i, nid);
        if (i && i->state == NID_NEW) {
-               __del_from_free_nid_list(i);
-               nm_i->fcnt--;
+               __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+               need_free = true;
        }
-       spin_unlock(&nm_i->free_nid_list_lock);
+       spin_unlock(&nm_i->nid_list_lock);
+
+       if (need_free)
+               kmem_cache_free(free_nid_slab, i);
 }
 
-static void scan_nat_page(struct f2fs_nm_info *nm_i,
+static void scan_nat_page(struct f2fs_sb_info *sbi,
                        struct page *nat_page, nid_t start_nid)
 {
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct f2fs_nat_block *nat_blk = page_address(nat_page);
        block_t blk_addr;
        int i;
@@ -1317,44 +1811,48 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
 
        for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
 
-               if (start_nid >= nm_i->max_nid)
+               if (unlikely(start_nid >= nm_i->max_nid))
                        break;
 
                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
-               BUG_ON(blk_addr == NEW_ADDR);
-               if (blk_addr == NULL_ADDR) {
-                       if (add_free_nid(nm_i, start_nid, true) < 0)
-                               break;
-               }
+               f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+               if (blk_addr == NULL_ADDR)
+                       add_free_nid(sbi, start_nid, true);
        }
 }
 
-static void build_free_nids(struct f2fs_sb_info *sbi)
+static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
+       struct f2fs_journal *journal = curseg->journal;
        int i = 0;
        nid_t nid = nm_i->next_scan_nid;
 
        /* Enough entries */
-       if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+       if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
+               return;
+
+       if (!sync && !available_free_memory(sbi, FREE_NIDS))
                return;
 
        /* readahead nat pages to be scanned */
-       ra_nat_pages(sbi, nid);
+       ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
+                                                       META_NAT, true);
+
+       down_read(&nm_i->nat_tree_lock);
 
        while (1) {
                struct page *page = get_current_nat_page(sbi, nid);
 
-               scan_nat_page(nm_i, page, nid);
+               scan_nat_page(sbi, page, nid);
                f2fs_put_page(page, 1);
 
                nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
-               if (nid >= nm_i->max_nid)
+               if (unlikely(nid >= nm_i->max_nid))
                        nid = 0;
 
-               if (i++ == FREE_NID_PAGES)
+               if (++i >= FREE_NID_PAGES)
                        break;
        }
 
@@ -1362,16 +1860,29 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
        nm_i->next_scan_nid = nid;
 
        /* find free nids from current sum_pages */
-       mutex_lock(&curseg->curseg_mutex);
-       for (i = 0; i < nats_in_cursum(sum); i++) {
-               block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
-               nid = le32_to_cpu(nid_in_journal(sum, i));
+       down_read(&curseg->journal_rwsem);
+       for (i = 0; i < nats_in_cursum(journal); i++) {
+               block_t addr;
+
+               addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+               nid = le32_to_cpu(nid_in_journal(journal, i));
                if (addr == NULL_ADDR)
-                       add_free_nid(nm_i, nid, true);
+                       add_free_nid(sbi, nid, true);
                else
-                       remove_free_nid(nm_i, nid);
+                       remove_free_nid(sbi, nid);
        }
-       mutex_unlock(&curseg->curseg_mutex);
+       up_read(&curseg->journal_rwsem);
+       up_read(&nm_i->nat_tree_lock);
+
+       ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
+                                       nm_i->ra_nid_pages, META_NAT, false);
+}
+
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
+{
+       mutex_lock(&NM_I(sbi)->build_lock);
+       __build_free_nids(sbi, sync);
+       mutex_unlock(&NM_I(sbi)->build_lock);
 }
 
 /*
@@ -1383,37 +1894,36 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i = NULL;
-       struct list_head *this;
 retry:
-       if (sbi->total_valid_node_count + 1 >= nm_i->max_nid)
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(sbi, FAULT_ALLOC_NID))
                return false;
+#endif
+       spin_lock(&nm_i->nid_list_lock);
 
-       spin_lock(&nm_i->free_nid_list_lock);
+       if (unlikely(nm_i->available_nids == 0)) {
+               spin_unlock(&nm_i->nid_list_lock);
+               return false;
+       }
 
        /* We should not use stale free nids created by build_free_nids */
-       if (nm_i->fcnt && !sbi->on_build_free_nids) {
-               BUG_ON(list_empty(&nm_i->free_nid_list));
-               list_for_each(this, &nm_i->free_nid_list) {
-                       i = list_entry(this, struct free_nid, list);
-                       if (i->state == NID_NEW)
-                               break;
-               }
-
-               BUG_ON(i->state != NID_NEW);
+       if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
+               f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
+               i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+                                       struct free_nid, list);
                *nid = i->nid;
+
+               __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
                i->state = NID_ALLOC;
-               nm_i->fcnt--;
-               spin_unlock(&nm_i->free_nid_list_lock);
+               __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+               nm_i->available_nids--;
+               spin_unlock(&nm_i->nid_list_lock);
                return true;
        }
-       spin_unlock(&nm_i->free_nid_list_lock);
+       spin_unlock(&nm_i->nid_list_lock);
 
        /* Let's scan nat pages and its caches to get free nids */
-       mutex_lock(&nm_i->build_lock);
-       sbi->on_build_free_nids = 1;
-       build_free_nids(sbi);
-       sbi->on_build_free_nids = 0;
-       mutex_unlock(&nm_i->build_lock);
+       build_free_nids(sbi, true);
        goto retry;
 }
 
@@ -1425,11 +1935,13 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i;
 
-       spin_lock(&nm_i->free_nid_list_lock);
-       i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
-       BUG_ON(!i || i->state != NID_ALLOC);
-       __del_from_free_nid_list(i);
-       spin_unlock(&nm_i->free_nid_list_lock);
+       spin_lock(&nm_i->nid_list_lock);
+       i = __lookup_free_nid_list(nm_i, nid);
+       f2fs_bug_on(sbi, !i);
+       __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+       spin_unlock(&nm_i->nid_list_lock);
+
+       kmem_cache_free(free_nid_slab, i);
 }
 
 /*
@@ -1439,62 +1951,165 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i;
+       bool need_free = false;
+
+       if (!nid)
+               return;
 
-       spin_lock(&nm_i->free_nid_list_lock);
-       i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
-       BUG_ON(!i || i->state != NID_ALLOC);
-       if (nm_i->fcnt > 2 * MAX_FREE_NIDS) {
-               __del_from_free_nid_list(i);
+       spin_lock(&nm_i->nid_list_lock);
+       i = __lookup_free_nid_list(nm_i, nid);
+       f2fs_bug_on(sbi, !i);
+
+       if (!available_free_memory(sbi, FREE_NIDS)) {
+               __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+               need_free = true;
        } else {
+               __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
                i->state = NID_NEW;
-               nm_i->fcnt++;
+               __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
        }
-       spin_unlock(&nm_i->free_nid_list_lock);
+
+       nm_i->available_nids++;
+
+       spin_unlock(&nm_i->nid_list_lock);
+
+       if (need_free)
+               kmem_cache_free(free_nid_slab, i);
 }
 
-void recover_node_page(struct f2fs_sb_info *sbi, struct page *page,
-               struct f2fs_summary *sum, struct node_info *ni,
-               block_t new_blkaddr)
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
 {
-       rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr);
-       set_node_addr(sbi, ni, new_blkaddr);
-       clear_node_page_dirty(page);
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct free_nid *i, *next;
+       int nr = nr_shrink;
+
+       if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+               return 0;
+
+       if (!mutex_trylock(&nm_i->build_lock))
+               return 0;
+
+       spin_lock(&nm_i->nid_list_lock);
+       list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
+                                                                       list) {
+               if (nr_shrink <= 0 ||
+                               nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+                       break;
+
+               __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+               kmem_cache_free(free_nid_slab, i);
+               nr_shrink--;
+       }
+       spin_unlock(&nm_i->nid_list_lock);
+       mutex_unlock(&nm_i->build_lock);
+
+       return nr - nr_shrink;
+}
+
+void recover_inline_xattr(struct inode *inode, struct page *page)
+{
+       void *src_addr, *dst_addr;
+       size_t inline_size;
+       struct page *ipage;
+       struct f2fs_inode *ri;
+
+       ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
+       f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
+
+       ri = F2FS_INODE(page);
+       if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
+               clear_inode_flag(inode, FI_INLINE_XATTR);
+               goto update_inode;
+       }
+
+       dst_addr = inline_xattr_addr(ipage);
+       src_addr = inline_xattr_addr(page);
+       inline_size = inline_xattr_size(inode);
+
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
+       memcpy(dst_addr, src_addr, inline_size);
+update_inode:
+       update_inode(inode, ipage);
+       f2fs_put_page(ipage, 1);
+}
+
+void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
+       nid_t new_xnid = nid_of_node(page);
+       struct node_info ni;
+
+       /* 1: invalidate the previous xattr nid */
+       if (!prev_xnid)
+               goto recover_xnid;
+
+       /* Deallocate node address */
+       get_node_info(sbi, prev_xnid, &ni);
+       f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
+       invalidate_blocks(sbi, ni.blk_addr);
+       dec_valid_node_count(sbi, inode);
+       set_node_addr(sbi, &ni, NULL_ADDR, false);
+
+recover_xnid:
+       /* 2: allocate new xattr nid */
+       if (unlikely(!inc_valid_node_count(sbi, inode)))
+               f2fs_bug_on(sbi, 1);
+
+       remove_free_nid(sbi, new_xnid);
+       get_node_info(sbi, new_xnid, &ni);
+       ni.ino = inode->i_ino;
+       set_node_addr(sbi, &ni, NEW_ADDR, false);
+       f2fs_i_xnid_write(inode, new_xnid);
+
+       /* 3: update xattr blkaddr */
+       refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
+       set_node_addr(sbi, &ni, blkaddr, false);
 }
 
 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
 {
-       struct address_space *mapping = sbi->node_inode->i_mapping;
-       struct f2fs_node *src, *dst;
+       struct f2fs_inode *src, *dst;
        nid_t ino = ino_of_node(page);
        struct node_info old_ni, new_ni;
        struct page *ipage;
 
-       ipage = grab_cache_page(mapping, ino);
-       if (!ipage)
-               return -ENOMEM;
+       get_node_info(sbi, ino, &old_ni);
+
+       if (unlikely(old_ni.blk_addr != NULL_ADDR))
+               return -EINVAL;
+retry:
+       ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
+       if (!ipage) {
+               congestion_wait(BLK_RW_ASYNC, HZ/50);
+               goto retry;
+       }
 
-       /* Should not use this inode  from free nid list */
-       remove_free_nid(NM_I(sbi), ino);
+       /* Should not use this inode from free nid list */
+       remove_free_nid(sbi, ino);
 
-       get_node_info(sbi, ino, &old_ni);
-       SetPageUptodate(ipage);
+       if (!PageUptodate(ipage))
+               SetPageUptodate(ipage);
        fill_node_footer(ipage, ino, ino, 0, true);
 
-       src = (struct f2fs_node *)page_address(page);
-       dst = (struct f2fs_node *)page_address(ipage);
+       src = F2FS_INODE(page);
+       dst = F2FS_INODE(ipage);
 
-       memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
-       dst->i.i_size = 0;
-       dst->i.i_blocks = cpu_to_le64(1);
-       dst->i.i_links = cpu_to_le32(1);
-       dst->i.i_xattr_nid = 0;
+       memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
+       dst->i_size = 0;
+       dst->i_blocks = cpu_to_le64(1);
+       dst->i_links = cpu_to_le32(1);
+       dst->i_xattr_nid = 0;
+       dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
 
        new_ni = old_ni;
        new_ni.ino = ino;
 
-       set_node_addr(sbi, &new_ni, NEW_ADDR);
+       if (unlikely(!inc_valid_node_count(sbi, NULL)))
+               WARN_ON(1);
+       set_node_addr(sbi, &new_ni, NEW_ADDR, false);
        inc_valid_inode_count(sbi);
-
+       set_page_dirty(ipage);
        f2fs_put_page(ipage, 1);
        return 0;
 }
@@ -1504,180 +2119,204 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
 {
        struct f2fs_node *rn;
        struct f2fs_summary *sum_entry;
-       struct page *page;
        block_t addr;
-       int i, last_offset;
-
-       /* alloc temporal page for read node */
-       page = alloc_page(GFP_NOFS | __GFP_ZERO);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
-       lock_page(page);
+       int i, idx, last_offset, nrpages;
 
        /* scan the node segment */
        last_offset = sbi->blocks_per_seg;
        addr = START_BLOCK(sbi, segno);
        sum_entry = &sum->entries[0];
 
-       for (i = 0; i < last_offset; i++, sum_entry++) {
-               /*
-                * In order to read next node page,
-                * we must clear PageUptodate flag.
-                */
-               ClearPageUptodate(page);
+       for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
+               nrpages = min(last_offset - i, BIO_MAX_PAGES);
+
+               /* readahead node pages */
+               ra_meta_pages(sbi, addr, nrpages, META_POR, true);
 
-               if (f2fs_readpage(sbi, page, addr, READ_SYNC))
-                       goto out;
+               for (idx = addr; idx < addr + nrpages; idx++) {
+                       struct page *page = get_tmp_page(sbi, idx);
 
-               lock_page(page);
-               rn = (struct f2fs_node *)page_address(page);
-               sum_entry->nid = rn->footer.nid;
-               sum_entry->version = 0;
-               sum_entry->ofs_in_node = 0;
-               addr++;
+                       rn = F2FS_NODE(page);
+                       sum_entry->nid = rn->footer.nid;
+                       sum_entry->version = 0;
+                       sum_entry->ofs_in_node = 0;
+                       sum_entry++;
+                       f2fs_put_page(page, 1);
+               }
+
+               invalidate_mapping_pages(META_MAPPING(sbi), addr,
+                                                       addr + nrpages);
        }
-       unlock_page(page);
-out:
-       __free_pages(page, 0);
        return 0;
 }
 
-static bool flush_nats_in_journal(struct f2fs_sb_info *sbi)
+static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
+       struct f2fs_journal *journal = curseg->journal;
        int i;
 
-       mutex_lock(&curseg->curseg_mutex);
-
-       if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
-               mutex_unlock(&curseg->curseg_mutex);
-               return false;
-       }
-
-       for (i = 0; i < nats_in_cursum(sum); i++) {
+       down_write(&curseg->journal_rwsem);
+       for (i = 0; i < nats_in_cursum(journal); i++) {
                struct nat_entry *ne;
                struct f2fs_nat_entry raw_ne;
-               nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
+               nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
+
+               raw_ne = nat_in_journal(journal, i);
 
-               raw_ne = nat_in_journal(sum, i);
-retry:
-               write_lock(&nm_i->nat_tree_lock);
                ne = __lookup_nat_cache(nm_i, nid);
-               if (ne) {
-                       __set_nat_cache_dirty(nm_i, ne);
-                       write_unlock(&nm_i->nat_tree_lock);
-                       continue;
-               }
-               ne = grab_nat_entry(nm_i, nid);
                if (!ne) {
-                       write_unlock(&nm_i->nat_tree_lock);
-                       goto retry;
+                       ne = grab_nat_entry(nm_i, nid);
+                       node_info_from_raw_nat(&ne->ni, &raw_ne);
+               }
+
+               /*
+                * if a free nat in journal has not been used after last
+                * checkpoint, we should remove it from available nids,
+                * since later we will add it again.
+                */
+               if (!get_nat_flag(ne, IS_DIRTY) &&
+                               le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+                       spin_lock(&nm_i->nid_list_lock);
+                       nm_i->available_nids--;
+                       spin_unlock(&nm_i->nid_list_lock);
                }
-               nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr));
-               nat_set_ino(ne, le32_to_cpu(raw_ne.ino));
-               nat_set_version(ne, raw_ne.version);
+
                __set_nat_cache_dirty(nm_i, ne);
-               write_unlock(&nm_i->nat_tree_lock);
        }
-       update_nats_in_cursum(sum, -i);
-       mutex_unlock(&curseg->curseg_mutex);
-       return true;
+       update_nats_in_cursum(journal, -i);
+       up_write(&curseg->journal_rwsem);
 }
 
-/*
- * This function is called during the checkpointing process.
- */
-void flush_nat_entries(struct f2fs_sb_info *sbi)
+static void __adjust_nat_entry_set(struct nat_entry_set *nes,
+                                               struct list_head *head, int max)
+{
+       struct nat_entry_set *cur;
+
+       if (nes->entry_cnt >= max)
+               goto add_out;
+
+       list_for_each_entry(cur, head, set_list) {
+               if (cur->entry_cnt >= nes->entry_cnt) {
+                       list_add(&nes->set_list, cur->set_list.prev);
+                       return;
+               }
+       }
+add_out:
+       list_add_tail(&nes->set_list, head);
+}
+
+static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
+                                       struct nat_entry_set *set)
 {
-       struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
-       struct list_head *cur, *n;
+       struct f2fs_journal *journal = curseg->journal;
+       nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
+       bool to_journal = true;
+       struct f2fs_nat_block *nat_blk;
+       struct nat_entry *ne, *cur;
        struct page *page = NULL;
-       struct f2fs_nat_block *nat_blk = NULL;
-       nid_t start_nid = 0, end_nid = 0;
-       bool flushed;
-
-       flushed = flush_nats_in_journal(sbi);
 
-       if (!flushed)
-               mutex_lock(&curseg->curseg_mutex);
+       /*
+        * there are two steps to flush nat entries:
+        * #1, flush nat entries to journal in current hot data summary block.
+        * #2, flush nat entries to nat page.
+        */
+       if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
+               to_journal = false;
 
-       /* 1) flush dirty nat caches */
-       list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) {
-               struct nat_entry *ne;
-               nid_t nid;
-               struct f2fs_nat_entry raw_ne;
-               int offset = -1;
-               block_t new_blkaddr;
+       if (to_journal) {
+               down_write(&curseg->journal_rwsem);
+       } else {
+               page = get_next_nat_page(sbi, start_nid);
+               nat_blk = page_address(page);
+               f2fs_bug_on(sbi, !nat_blk);
+       }
 
-               ne = list_entry(cur, struct nat_entry, list);
-               nid = nat_get_nid(ne);
+       /* flush dirty nats in nat entry set */
+       list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
+               struct f2fs_nat_entry *raw_ne;
+               nid_t nid = nat_get_nid(ne);
+               int offset;
 
                if (nat_get_blkaddr(ne) == NEW_ADDR)
                        continue;
-               if (flushed)
-                       goto to_nat_page;
-
-               /* if there is room for nat enries in curseg->sumpage */
-               offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1);
-               if (offset >= 0) {
-                       raw_ne = nat_in_journal(sum, offset);
-                       goto flush_now;
-               }
-to_nat_page:
-               if (!page || (start_nid > nid || nid > end_nid)) {
-                       if (page) {
-                               f2fs_put_page(page, 1);
-                               page = NULL;
-                       }
-                       start_nid = START_NID(nid);
-                       end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
 
-                       /*
-                        * get nat block with dirty flag, increased reference
-                        * count, mapped and lock
-                        */
-                       page = get_next_nat_page(sbi, start_nid);
-                       nat_blk = page_address(page);
+               if (to_journal) {
+                       offset = lookup_journal_in_cursum(journal,
+                                                       NAT_JOURNAL, nid, 1);
+                       f2fs_bug_on(sbi, offset < 0);
+                       raw_ne = &nat_in_journal(journal, offset);
+                       nid_in_journal(journal, offset) = cpu_to_le32(nid);
+               } else {
+                       raw_ne = &nat_blk->entries[nid - start_nid];
+               }
+               raw_nat_from_node_info(raw_ne, &ne->ni);
+               nat_reset_flag(ne);
+               __clear_nat_cache_dirty(NM_I(sbi), ne);
+               if (nat_get_blkaddr(ne) == NULL_ADDR) {
+                       add_free_nid(sbi, nid, false);
+                       spin_lock(&NM_I(sbi)->nid_list_lock);
+                       NM_I(sbi)->available_nids++;
+                       spin_unlock(&NM_I(sbi)->nid_list_lock);
                }
+       }
 
-               BUG_ON(!nat_blk);
-               raw_ne = nat_blk->entries[nid - start_nid];
-flush_now:
-               new_blkaddr = nat_get_blkaddr(ne);
+       if (to_journal)
+               up_write(&curseg->journal_rwsem);
+       else
+               f2fs_put_page(page, 1);
 
-               raw_ne.ino = cpu_to_le32(nat_get_ino(ne));
-               raw_ne.block_addr = cpu_to_le32(new_blkaddr);
-               raw_ne.version = nat_get_version(ne);
+       f2fs_bug_on(sbi, set->entry_cnt);
 
-               if (offset < 0) {
-                       nat_blk->entries[nid - start_nid] = raw_ne;
-               } else {
-                       nat_in_journal(sum, offset) = raw_ne;
-                       nid_in_journal(sum, offset) = cpu_to_le32(nid);
-               }
+       radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+       kmem_cache_free(nat_entry_set_slab, set);
+}
 
-               if (nat_get_blkaddr(ne) == NULL_ADDR &&
-                               add_free_nid(NM_I(sbi), nid, false) <= 0) {
-                       write_lock(&nm_i->nat_tree_lock);
-                       __del_from_nat_cache(nm_i, ne);
-                       write_unlock(&nm_i->nat_tree_lock);
-               } else {
-                       write_lock(&nm_i->nat_tree_lock);
-                       __clear_nat_cache_dirty(nm_i, ne);
-                       ne->checkpointed = true;
-                       write_unlock(&nm_i->nat_tree_lock);
-               }
+/*
+ * This function is called during the checkpointing process.
+ */
+void flush_nat_entries(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
+       struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
+       struct f2fs_journal *journal = curseg->journal;
+       struct nat_entry_set *setvec[SETVEC_SIZE];
+       struct nat_entry_set *set, *tmp;
+       unsigned int found;
+       nid_t set_idx = 0;
+       LIST_HEAD(sets);
+
+       if (!nm_i->dirty_nat_cnt)
+               return;
+
+       down_write(&nm_i->nat_tree_lock);
+
+       /*
+        * if there are no enough space in journal to store dirty nat
+        * entries, remove all entries from journal and merge them
+        * into nat entry set.
+        */
+       if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
+               remove_nats_in_journal(sbi);
+
+       while ((found = __gang_lookup_nat_set(nm_i,
+                                       set_idx, SETVEC_SIZE, setvec))) {
+               unsigned idx;
+               set_idx = setvec[found - 1]->set + 1;
+               for (idx = 0; idx < found; idx++)
+                       __adjust_nat_entry_set(setvec[idx], &sets,
+                                               MAX_NAT_JENTRIES(journal));
        }
-       if (!flushed)
-               mutex_unlock(&curseg->curseg_mutex);
-       f2fs_put_page(page, 1);
 
-       /* 2) shrink nat caches if necessary */
-       try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD);
+       /* flush dirty nats in nat entry set */
+       list_for_each_entry_safe(set, tmp, &sets, set_list)
+               __flush_nat_entry_set(sbi, set);
+
+       up_write(&nm_i->nat_tree_lock);
+
+       f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
 }
 
 static int init_node_manager(struct f2fs_sb_info *sbi)
@@ -1692,18 +2331,29 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        /* segment_count_nat includes pair segment so divide to 2. */
        nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
        nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
+
        nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
-       nm_i->fcnt = 0;
-       nm_i->nat_cnt = 0;
 
-       INIT_LIST_HEAD(&nm_i->free_nid_list);
-       INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
+       /* not used nids: 0, node, meta, (and root counted as valid node) */
+       nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+                                                       F2FS_RESERVED_NODE_NUM;
+       nm_i->nid_cnt[FREE_NID_LIST] = 0;
+       nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
+       nm_i->nat_cnt = 0;
+       nm_i->ram_thresh = DEF_RAM_THRESHOLD;
+       nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+       nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
+
+       INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
+       INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
+       INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
+       INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
+       INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
        INIT_LIST_HEAD(&nm_i->nat_entries);
-       INIT_LIST_HEAD(&nm_i->dirty_nat_entries);
 
        mutex_init(&nm_i->build_lock);
-       spin_lock_init(&nm_i->free_nid_list_lock);
-       rwlock_init(&nm_i->nat_tree_lock);
+       spin_lock_init(&nm_i->nid_list_lock);
+       init_rwsem(&nm_i->nat_tree_lock);
 
        nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
        nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -1730,7 +2380,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
        if (err)
                return err;
 
-       build_free_nids(sbi);
+       build_free_nids(sbi, true);
        return 0;
 }
 
@@ -1739,6 +2389,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *i, *next_i;
        struct nat_entry *natvec[NATVEC_SIZE];
+       struct nat_entry_set *setvec[SETVEC_SIZE];
        nid_t nid = 0;
        unsigned int found;
 
@@ -1746,28 +2397,46 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
                return;
 
        /* destroy free nid list */
-       spin_lock(&nm_i->free_nid_list_lock);
-       list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
-               BUG_ON(i->state == NID_ALLOC);
-               __del_from_free_nid_list(i);
-               nm_i->fcnt--;
+       spin_lock(&nm_i->nid_list_lock);
+       list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
+                                                                       list) {
+               __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+               spin_unlock(&nm_i->nid_list_lock);
+               kmem_cache_free(free_nid_slab, i);
+               spin_lock(&nm_i->nid_list_lock);
        }
-       BUG_ON(nm_i->fcnt);
-       spin_unlock(&nm_i->free_nid_list_lock);
+       f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
+       f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
+       f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+       spin_unlock(&nm_i->nid_list_lock);
 
        /* destroy nat cache */
-       write_lock(&nm_i->nat_tree_lock);
+       down_write(&nm_i->nat_tree_lock);
        while ((found = __gang_lookup_nat_cache(nm_i,
                                        nid, NATVEC_SIZE, natvec))) {
                unsigned idx;
+
+               nid = nat_get_nid(natvec[found - 1]) + 1;
+               for (idx = 0; idx < found; idx++)
+                       __del_from_nat_cache(nm_i, natvec[idx]);
+       }
+       f2fs_bug_on(sbi, nm_i->nat_cnt);
+
+       /* destroy nat set cache */
+       nid = 0;
+       while ((found = __gang_lookup_nat_set(nm_i,
+                                       nid, SETVEC_SIZE, setvec))) {
+               unsigned idx;
+
+               nid = setvec[found - 1]->set + 1;
                for (idx = 0; idx < found; idx++) {
-                       struct nat_entry *e = natvec[idx];
-                       nid = nat_get_nid(e) + 1;
-                       __del_from_nat_cache(nm_i, e);
+                       /* entry_cnt is not zero, when cp_error was occurred */
+                       f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
+                       radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
+                       kmem_cache_free(nat_entry_set_slab, setvec[idx]);
                }
        }
-       BUG_ON(nm_i->nat_cnt);
-       write_unlock(&nm_i->nat_tree_lock);
+       up_write(&nm_i->nat_tree_lock);
 
        kfree(nm_i->nat_bitmap);
        sbi->nm_info = NULL;
@@ -1777,21 +2446,32 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
 int __init create_node_manager_caches(void)
 {
        nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
-                       sizeof(struct nat_entry), NULL);
+                       sizeof(struct nat_entry));
        if (!nat_entry_slab)
-               return -ENOMEM;
+               goto fail;
 
        free_nid_slab = f2fs_kmem_cache_create("free_nid",
-                       sizeof(struct free_nid), NULL);
-       if (!free_nid_slab) {
-               kmem_cache_destroy(nat_entry_slab);
-               return -ENOMEM;
-       }
+                       sizeof(struct free_nid));
+       if (!free_nid_slab)
+               goto destroy_nat_entry;
+
+       nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
+                       sizeof(struct nat_entry_set));
+       if (!nat_entry_set_slab)
+               goto destroy_free_nid;
        return 0;
+
+destroy_free_nid:
+       kmem_cache_destroy(free_nid_slab);
+destroy_nat_entry:
+       kmem_cache_destroy(nat_entry_slab);
+fail:
+       return -ENOMEM;
 }
 
 void destroy_node_manager_caches(void)
 {
+       kmem_cache_destroy(nat_entry_set_slab);
        kmem_cache_destroy(free_nid_slab);
        kmem_cache_destroy(nat_entry_slab);
 }
index 0a2d72f0024ddf88c4df50c46d0eda59a00bbb62..e7997e2403660799de6ed30605f5b090e938fd5f 100644 (file)
 /* node block offset on the NAT area dedicated to the given start node id */
 #define        NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
 
-/* # of pages to perform readahead before building free nids */
-#define FREE_NID_PAGES 4
+/* # of pages to perform synchronous readahead before building free nids */
+#define FREE_NID_PAGES 8
+#define MAX_FREE_NIDS  (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
 
-/* maximum # of free node ids to produce during build_free_nids */
-#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
+#define DEF_RA_NID_PAGES       0       /* # of nid pages to be readaheaded */
 
 /* maximum readahead size for node during getting data blocks */
 #define MAX_RA_NODE            128
 
-/* maximum cached nat entries to manage memory footprint */
-#define NM_WOUT_THRESHOLD      (64 * NAT_ENTRY_PER_BLOCK)
+/* control the memory footprint threshold (10MB per 1GB ram) */
+#define DEF_RAM_THRESHOLD      1
+
+/* control dirty nats ratio threshold (default: 10% over max nid count) */
+#define DEF_DIRTY_NAT_RATIO_THRESHOLD          10
+/* control total # of nats */
+#define DEF_NAT_CACHE_THRESHOLD                        100000
 
 /* vector size for gang look-up from nat cache that consists of radix tree */
 #define NATVEC_SIZE    64
+#define SETVEC_SIZE    32
 
 /* return value for read_node_page */
 #define LOCKED_PAGE    1
 
+/* For flag in struct node_info */
+enum {
+       IS_CHECKPOINTED,        /* is it checkpointed before? */
+       HAS_FSYNCED_INODE,      /* is the inode fsynced before? */
+       HAS_LAST_FSYNC,         /* has the latest node fsync mark? */
+       IS_DIRTY,               /* this nat entry is dirty? */
+};
+
 /*
  * For node information
  */
@@ -40,11 +54,11 @@ struct node_info {
        nid_t ino;              /* inode number of the node's owner */
        block_t blk_addr;       /* block address of the node */
        unsigned char version;  /* version of the node */
+       unsigned char flag;     /* for node information bits */
 };
 
 struct nat_entry {
        struct list_head list;  /* for clean or dirty nat list */
-       bool checkpointed;      /* whether it is checkpointed or not */
        struct node_info ni;    /* in-memory node information */
 };
 
@@ -57,12 +71,42 @@ struct nat_entry {
 #define nat_get_version(nat)           (nat->ni.version)
 #define nat_set_version(nat, v)                (nat->ni.version = v)
 
-#define __set_nat_cache_dirty(nm_i, ne)                                        \
-       list_move_tail(&ne->list, &nm_i->dirty_nat_entries);
-#define __clear_nat_cache_dirty(nm_i, ne)                              \
-       list_move_tail(&ne->list, &nm_i->nat_entries);
 #define inc_node_version(version)      (++version)
 
+static inline void copy_node_info(struct node_info *dst,
+                                               struct node_info *src)
+{
+       dst->nid = src->nid;
+       dst->ino = src->ino;
+       dst->blk_addr = src->blk_addr;
+       dst->version = src->version;
+       /* should not copy flag here */
+}
+
+static inline void set_nat_flag(struct nat_entry *ne,
+                               unsigned int type, bool set)
+{
+       unsigned char mask = 0x01 << type;
+       if (set)
+               ne->ni.flag |= mask;
+       else
+               ne->ni.flag &= ~mask;
+}
+
+static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
+{
+       unsigned char mask = 0x01 << type;
+       return ne->ni.flag & mask;
+}
+
+static inline void nat_reset_flag(struct nat_entry *ne)
+{
+       /* these states can be set only after checkpoint was done */
+       set_nat_flag(ne, IS_CHECKPOINTED, true);
+       set_nat_flag(ne, HAS_FSYNCED_INODE, false);
+       set_nat_flag(ne, HAS_LAST_FSYNC, true);
+}
+
 static inline void node_info_from_raw_nat(struct node_info *ni,
                                                struct f2fs_nat_entry *raw_ne)
 {
@@ -71,6 +115,41 @@ static inline void node_info_from_raw_nat(struct node_info *ni,
        ni->version = raw_ne->version;
 }
 
+static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
+                                               struct node_info *ni)
+{
+       raw_ne->ino = cpu_to_le32(ni->ino);
+       raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
+       raw_ne->version = ni->version;
+}
+
+static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
+{
+       return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+                                       NM_I(sbi)->dirty_nats_ratio / 100;
+}
+
+static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
+{
+       return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+}
+
+enum mem_type {
+       FREE_NIDS,      /* indicates the free nid list */
+       NAT_ENTRIES,    /* indicates the cached nat entry */
+       DIRTY_DENTS,    /* indicates dirty dentry pages */
+       INO_ENTRIES,    /* indicates inode entries */
+       EXTENT_CACHE,   /* indicates extent cache */
+       BASE_CHECK,     /* check kernel status */
+};
+
+struct nat_entry_set {
+       struct list_head set_list;      /* link with other nat sets */
+       struct list_head entry_list;    /* link with dirty nat entries */
+       nid_t set;                      /* set number*/
+       unsigned int entry_cnt;         /* the # of nat entries in set */
+};
+
 /*
  * For free nid mangement
  */
@@ -85,18 +164,20 @@ struct free_nid {
        int state;              /* in use or not: NID_NEW or NID_ALLOC */
 };
 
-static inline int next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
+static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
 {
        struct f2fs_nm_info *nm_i = NM_I(sbi);
        struct free_nid *fnid;
 
-       if (nm_i->fcnt <= 0)
-               return -1;
-       spin_lock(&nm_i->free_nid_list_lock);
-       fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+       spin_lock(&nm_i->nid_list_lock);
+       if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+               spin_unlock(&nm_i->nid_list_lock);
+               return;
+       }
+       fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next,
+                                               struct free_nid, list);
        *nid = fnid->nid;
-       spin_unlock(&nm_i->free_nid_list_lock);
-       return 0;
+       spin_unlock(&nm_i->nid_list_lock);
 }
 
 /*
@@ -120,7 +201,7 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
 
        block_addr = (pgoff_t)(nm_i->nat_blkaddr +
                (seg_off << sbi->log_blocks_per_seg << 1) +
-               (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
+               (block_off & (sbi->blocks_per_seg - 1)));
 
        if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
                block_addr += sbi->blocks_per_seg;
@@ -146,79 +227,96 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
 {
        unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
 
-       if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
-               f2fs_clear_bit(block_off, nm_i->nat_bitmap);
-       else
-               f2fs_set_bit(block_off, nm_i->nat_bitmap);
-}
-
-static inline void fill_node_footer(struct page *page, nid_t nid,
-                               nid_t ino, unsigned int ofs, bool reset)
-{
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       if (reset)
-               memset(rn, 0, sizeof(*rn));
-       rn->footer.nid = cpu_to_le32(nid);
-       rn->footer.ino = cpu_to_le32(ino);
-       rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT);
-}
-
-static inline void copy_node_footer(struct page *dst, struct page *src)
-{
-       void *src_addr = page_address(src);
-       void *dst_addr = page_address(dst);
-       struct f2fs_node *src_rn = (struct f2fs_node *)src_addr;
-       struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr;
-       memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
-}
-
-static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
-{
-       struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
-       struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       rn->footer.cp_ver = ckpt->checkpoint_ver;
-       rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
+       f2fs_change_bit(block_off, nm_i->nat_bitmap);
 }
 
 static inline nid_t ino_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le32_to_cpu(rn->footer.ino);
 }
 
 static inline nid_t nid_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le32_to_cpu(rn->footer.nid);
 }
 
 static inline unsigned int ofs_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        unsigned flag = le32_to_cpu(rn->footer.flag);
        return flag >> OFFSET_BIT_SHIFT;
 }
 
-static inline unsigned long long cpver_of_node(struct page *node_page)
+static inline __u64 cpver_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le64_to_cpu(rn->footer.cp_ver);
 }
 
 static inline block_t next_blkaddr_of_node(struct page *node_page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(node_page);
        return le32_to_cpu(rn->footer.next_blkaddr);
 }
 
+static inline void fill_node_footer(struct page *page, nid_t nid,
+                               nid_t ino, unsigned int ofs, bool reset)
+{
+       struct f2fs_node *rn = F2FS_NODE(page);
+       unsigned int old_flag = 0;
+
+       if (reset)
+               memset(rn, 0, sizeof(*rn));
+       else
+               old_flag = le32_to_cpu(rn->footer.flag);
+
+       rn->footer.nid = cpu_to_le32(nid);
+       rn->footer.ino = cpu_to_le32(ino);
+
+       /* should remain old flag bits such as COLD_BIT_SHIFT */
+       rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
+                                       (old_flag & OFFSET_BIT_MASK));
+}
+
+static inline void copy_node_footer(struct page *dst, struct page *src)
+{
+       struct f2fs_node *src_rn = F2FS_NODE(src);
+       struct f2fs_node *dst_rn = F2FS_NODE(dst);
+       memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
+}
+
+static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
+{
+       struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+       struct f2fs_node *rn = F2FS_NODE(page);
+       size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+       __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
+
+       if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+               __u64 crc = le32_to_cpu(*((__le32 *)
+                               ((unsigned char *)ckpt + crc_offset)));
+               cp_ver |= (crc << 32);
+       }
+       rn->footer.cp_ver = cpu_to_le64(cp_ver);
+       rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
+}
+
+static inline bool is_recoverable_dnode(struct page *page)
+{
+       struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+       size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+       __u64 cp_ver = cur_cp_version(ckpt);
+
+       if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+               __u64 crc = le32_to_cpu(*((__le32 *)
+                               ((unsigned char *)ckpt + crc_offset)));
+               cp_ver |= (crc << 32);
+       }
+       return cp_ver == cpver_of_node(page);
+}
+
 /*
  * f2fs assigns the following node offsets described as (num).
  * N = NIDS_PER_BLOCK
@@ -232,11 +330,21 @@ static inline block_t next_blkaddr_of_node(struct page *node_page)
  *    |            `- direct node (5 + N => 5 + 2N - 1)
  *    `- double indirect node (5 + 2N)
  *                 `- indirect node (6 + 2N)
- *                       `- direct node (x(N + 1))
+ *                       `- direct node
+ *                 ......
+ *                 `- indirect node ((6 + 2N) + x(N + 1))
+ *                       `- direct node
+ *                 ......
+ *                 `- indirect node ((6 + 2N) + (N - 1)(N + 1))
+ *                       `- direct node
  */
 static inline bool IS_DNODE(struct page *node_page)
 {
        unsigned int ofs = ofs_of_node(node_page);
+
+       if (f2fs_has_xattr_block(ofs))
+               return false;
+
        if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
                        ofs == 5 + 2 * NIDS_PER_BLOCK)
                return false;
@@ -248,22 +356,23 @@ static inline bool IS_DNODE(struct page *node_page)
        return true;
 }
 
-static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
+static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+       struct f2fs_node *rn = F2FS_NODE(p);
 
-       wait_on_page_writeback(p);
+       f2fs_wait_on_page_writeback(p, NODE, true);
 
        if (i)
                rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
        else
                rn->in.nid[off] = cpu_to_le32(nid);
-       set_page_dirty(p);
+       return set_page_dirty(p);
 }
 
 static inline nid_t get_nid(struct page *p, int off, bool i)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+       struct f2fs_node *rn = F2FS_NODE(p);
+
        if (i)
                return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
        return le32_to_cpu(rn->in.nid[off]);
@@ -275,26 +384,6 @@ static inline nid_t get_nid(struct page *p, int off, bool i)
  *  - Mark cold node blocks in their node footer
  *  - Mark cold data pages in page cache
  */
-static inline int is_cold_file(struct inode *inode)
-{
-       return F2FS_I(inode)->i_advise & FADVISE_COLD_BIT;
-}
-
-static inline void set_cold_file(struct inode *inode)
-{
-       F2FS_I(inode)->i_advise |= FADVISE_COLD_BIT;
-}
-
-static inline int is_cp_file(struct inode *inode)
-{
-       return F2FS_I(inode)->i_advise & FADVISE_CP_BIT;
-}
-
-static inline void set_cp_file(struct inode *inode)
-{
-       F2FS_I(inode)->i_advise |= FADVISE_CP_BIT;
-}
-
 static inline int is_cold_data(struct page *page)
 {
        return PageChecked(page);
@@ -310,33 +399,34 @@ static inline void clear_cold_data(struct page *page)
        ClearPageChecked(page);
 }
 
-static inline int is_cold_node(struct page *page)
+static inline int is_node(struct page *page, int type)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       return flag & (0x1 << COLD_BIT_SHIFT);
+       struct f2fs_node *rn = F2FS_NODE(page);
+       return le32_to_cpu(rn->footer.flag) & (1 << type);
 }
 
-static inline unsigned char is_fsync_dnode(struct page *page)
+#define is_cold_node(page)     is_node(page, COLD_BIT_SHIFT)
+#define is_fsync_dnode(page)   is_node(page, FSYNC_BIT_SHIFT)
+#define is_dent_dnode(page)    is_node(page, DENT_BIT_SHIFT)
+
+static inline int is_inline_node(struct page *page)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       return flag & (0x1 << FSYNC_BIT_SHIFT);
+       return PageChecked(page);
 }
 
-static inline unsigned char is_dent_dnode(struct page *page)
+static inline void set_inline_node(struct page *page)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       return flag & (0x1 << DENT_BIT_SHIFT);
+       SetPageChecked(page);
+}
+
+static inline void clear_inline_node(struct page *page)
+{
+       ClearPageChecked(page);
 }
 
 static inline void set_cold_node(struct inode *inode, struct page *page)
 {
-       struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+       struct f2fs_node *rn = F2FS_NODE(page);
        unsigned int flag = le32_to_cpu(rn->footer.flag);
 
        if (S_ISDIR(inode->i_mode))
@@ -346,26 +436,15 @@ static inline void set_cold_node(struct inode *inode, struct page *page)
        rn->footer.flag = cpu_to_le32(flag);
 }
 
-static inline void set_fsync_mark(struct page *page, int mark)
-{
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
-       unsigned int flag = le32_to_cpu(rn->footer.flag);
-       if (mark)
-               flag |= (0x1 << FSYNC_BIT_SHIFT);
-       else
-               flag &= ~(0x1 << FSYNC_BIT_SHIFT);
-       rn->footer.flag = cpu_to_le32(flag);
-}
-
-static inline void set_dentry_mark(struct page *page, int mark)
+static inline void set_mark(struct page *page, int mark, int type)
 {
-       void *kaddr = page_address(page);
-       struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+       struct f2fs_node *rn = F2FS_NODE(page);
        unsigned int flag = le32_to_cpu(rn->footer.flag);
        if (mark)
-               flag |= (0x1 << DENT_BIT_SHIFT);
+               flag |= (0x1 << type);
        else
-               flag &= ~(0x1 << DENT_BIT_SHIFT);
+               flag &= ~(0x1 << type);
        rn->footer.flag = cpu_to_le32(flag);
 }
+#define set_dentry_mark(page, mark)    set_mark(page, mark, DENT_BIT_SHIFT)
+#define set_fsync_mark(page, mark)     set_mark(page, mark, FSYNC_BIT_SHIFT)
index 60c8a5097058f02e02c28979dad759a7d734438a..192da3de4fbfcb9144b4c96ce131ab73fcb24bee 100644 (file)
 #include "node.h"
 #include "segment.h"
 
+/*
+ * Roll forward recovery scenarios.
+ *
+ * [Term] F: fsync_mark, D: dentry_mark
+ *
+ * 1. inode(x) | CP | inode(x) | dnode(F)
+ * -> Update the latest inode(x).
+ *
+ * 2. inode(x) | CP | inode(F) | dnode(F)
+ * -> No problem.
+ *
+ * 3. inode(x) | CP | dnode(F) | inode(x)
+ * -> Recover to the latest dnode(F), and drop the last inode(x)
+ *
+ * 4. inode(x) | CP | dnode(F) | inode(F)
+ * -> No problem.
+ *
+ * 5. CP | inode(x) | dnode(F)
+ * -> The inode(DF) was missing. Should drop this dnode(F).
+ *
+ * 6. CP | inode(DF) | dnode(F)
+ * -> No problem.
+ *
+ * 7. CP | dnode(F) | inode(DF)
+ * -> If f2fs_iget fails, then goto next to find inode(DF).
+ *
+ * 8. CP | dnode(F) | inode(x)
+ * -> If f2fs_iget fails, then goto next to find inode(DF).
+ *    But it will fail due to no inode(DF).
+ */
+
 static struct kmem_cache *fsync_entry_slab;
 
 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
 {
-       if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
-                       > sbi->user_block_count)
+       s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
+
+       if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
                return false;
        return true;
 }
@@ -27,376 +59,563 @@ bool space_for_roll_forward(struct f2fs_sb_info *sbi)
 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
                                                                nid_t ino)
 {
-       struct list_head *this;
        struct fsync_inode_entry *entry;
 
-       list_for_each(this, head) {
-               entry = list_entry(this, struct fsync_inode_entry, list);
+       list_for_each_entry(entry, head, list)
                if (entry->inode->i_ino == ino)
                        return entry;
-       }
+
        return NULL;
 }
 
-static int recover_dentry(struct page *ipage, struct inode *inode)
+static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
+                                       struct list_head *head, nid_t ino)
+{
+       struct inode *inode;
+       struct fsync_inode_entry *entry;
+
+       inode = f2fs_iget_retry(sbi->sb, ino);
+       if (IS_ERR(inode))
+               return ERR_CAST(inode);
+
+       entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+       entry->inode = inode;
+       list_add_tail(&entry->list, head);
+
+       return entry;
+}
+
+static void del_fsync_inode(struct fsync_inode_entry *entry)
 {
-       struct f2fs_node *raw_node = (struct f2fs_node *)kmap(ipage);
-       struct f2fs_inode *raw_inode = &(raw_node->i);
-       struct qstr name;
+       iput(entry->inode);
+       list_del(&entry->list);
+       kmem_cache_free(fsync_entry_slab, entry);
+}
+
+static int recover_dentry(struct inode *inode, struct page *ipage,
+                                               struct list_head *dir_list)
+{
+       struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
+       nid_t pino = le32_to_cpu(raw_inode->i_pino);
        struct f2fs_dir_entry *de;
+       struct fscrypt_name fname;
        struct page *page;
-       struct inode *dir;
+       struct inode *dir, *einode;
+       struct fsync_inode_entry *entry;
        int err = 0;
+       char *name;
+
+       entry = get_fsync_inode(dir_list, pino);
+       if (!entry) {
+               entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
+               if (IS_ERR(entry)) {
+                       dir = ERR_CAST(entry);
+                       err = PTR_ERR(entry);
+                       goto out;
+               }
+       }
 
-       if (!is_dent_dnode(ipage))
-               goto out;
+       dir = entry->inode;
 
-       dir = f2fs_iget(inode->i_sb, le32_to_cpu(raw_inode->i_pino));
-       if (IS_ERR(dir)) {
-               err = PTR_ERR(dir);
+       memset(&fname, 0, sizeof(struct fscrypt_name));
+       fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+       fname.disk_name.name = raw_inode->i_name;
+
+       if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
+               WARN_ON(1);
+               err = -ENAMETOOLONG;
                goto out;
        }
+retry:
+       de = __f2fs_find_entry(dir, &fname, &page);
+       if (de && inode->i_ino == le32_to_cpu(de->ino))
+               goto out_unmap_put;
 
-       name.len = le32_to_cpu(raw_inode->i_namelen);
-       name.name = raw_inode->i_name;
-
-       de = f2fs_find_entry(dir, &name, &page);
        if (de) {
-               kunmap(page);
-               f2fs_put_page(page, 0);
+               einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
+               if (IS_ERR(einode)) {
+                       WARN_ON(1);
+                       err = PTR_ERR(einode);
+                       if (err == -ENOENT)
+                               err = -EEXIST;
+                       goto out_unmap_put;
+               }
+               err = acquire_orphan_inode(F2FS_I_SB(inode));
+               if (err) {
+                       iput(einode);
+                       goto out_unmap_put;
+               }
+               f2fs_delete_entry(de, page, dir, einode);
+               iput(einode);
+               goto retry;
+       } else if (IS_ERR(page)) {
+               err = PTR_ERR(page);
        } else {
-               err = __f2fs_add_link(dir, &name, inode);
+               err = __f2fs_do_add_link(dir, &fname, inode,
+                                       inode->i_ino, inode->i_mode);
        }
-       iput(dir);
+       if (err == -ENOMEM)
+               goto retry;
+       goto out;
+
+out_unmap_put:
+       f2fs_dentry_kunmap(dir, page);
+       f2fs_put_page(page, 0);
 out:
-       kunmap(ipage);
+       if (file_enc_name(inode))
+               name = "<encrypted>";
+       else
+               name = raw_inode->i_name;
+       f2fs_msg(inode->i_sb, KERN_NOTICE,
+                       "%s: ino = %x, name = %s, dir = %lx, err = %d",
+                       __func__, ino_of_node(ipage), name,
+                       IS_ERR(dir) ? 0 : dir->i_ino, err);
        return err;
 }
 
-static int recover_inode(struct inode *inode, struct page *node_page)
+static void recover_inode(struct inode *inode, struct page *page)
 {
-       void *kaddr = page_address(node_page);
-       struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
-       struct f2fs_inode *raw_inode = &(raw_node->i);
-
-       inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-       i_size_write(inode, le64_to_cpu(raw_inode->i_size));
-       inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
-       inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
-       inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
-       inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
-       inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
-       inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
-
-       return recover_dentry(node_page, inode);
+       struct f2fs_inode *raw = F2FS_INODE(page);
+       char *name;
+
+       inode->i_mode = le16_to_cpu(raw->i_mode);
+       f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
+       inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
+       inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
+       inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
+       inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
+       inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
+       inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+
+       F2FS_I(inode)->i_advise = raw->i_advise;
+
+       if (file_enc_name(inode))
+               name = "<encrypted>";
+       else
+               name = F2FS_INODE(page)->i_name;
+
+       f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
+                       ino_of_node(page), name);
 }
 
 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
 {
-       unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
        struct curseg_info *curseg;
-       struct page *page;
+       struct page *page = NULL;
        block_t blkaddr;
        int err = 0;
 
        /* get node pages in the current segment */
        curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
-       blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
-
-       /* read node page */
-       page = alloc_page(GFP_F2FS_ZERO);
-       if (IS_ERR(page))
-               return PTR_ERR(page);
-       lock_page(page);
+       blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
        while (1) {
                struct fsync_inode_entry *entry;
 
-               err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
-               if (err)
-                       goto out;
+               if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
+                       return 0;
 
-               lock_page(page);
+               page = get_tmp_page(sbi, blkaddr);
 
-               if (cp_ver != cpver_of_node(page))
-                       goto unlock_out;
+               if (!is_recoverable_dnode(page))
+                       break;
 
                if (!is_fsync_dnode(page))
                        goto next;
 
                entry = get_fsync_inode(head, ino_of_node(page));
-               if (entry) {
-                       entry->blkaddr = blkaddr;
-                       if (IS_INODE(page) && is_dent_dnode(page))
-                               set_inode_flag(F2FS_I(entry->inode),
-                                                       FI_INC_LINK);
-               } else {
+               if (!entry) {
                        if (IS_INODE(page) && is_dent_dnode(page)) {
                                err = recover_inode_page(sbi, page);
                                if (err)
-                                       goto unlock_out;
-                       }
-
-                       /* add this fsync inode to the list */
-                       entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
-                       if (!entry) {
-                               err = -ENOMEM;
-                               goto unlock_out;
+                                       break;
                        }
 
-                       entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
-                       if (IS_ERR(entry->inode)) {
-                               err = PTR_ERR(entry->inode);
-                               kmem_cache_free(fsync_entry_slab, entry);
-                               goto unlock_out;
-                       }
-
-                       list_add_tail(&entry->list, head);
-                       entry->blkaddr = blkaddr;
-               }
-               if (IS_INODE(page)) {
-                       err = recover_inode(entry->inode, page);
-                       if (err == -ENOENT) {
-                               goto next;
-                       } else if (err) {
-                               err = -EINVAL;
-                               goto unlock_out;
+                       /*
+                        * CP | dnode(F) | inode(DF)
+                        * For this case, we should not give up now.
+                        */
+                       entry = add_fsync_inode(sbi, head, ino_of_node(page));
+                       if (IS_ERR(entry)) {
+                               err = PTR_ERR(entry);
+                               if (err == -ENOENT) {
+                                       err = 0;
+                                       goto next;
+                               }
+                               break;
                        }
                }
+               entry->blkaddr = blkaddr;
+
+               if (IS_INODE(page) && is_dent_dnode(page))
+                       entry->last_dentry = blkaddr;
 next:
                /* check next segment */
                blkaddr = next_blkaddr_of_node(page);
+               f2fs_put_page(page, 1);
+
+               ra_meta_pages_cond(sbi, blkaddr);
        }
-unlock_out:
-       unlock_page(page);
-out:
-       __free_pages(page, 0);
+       f2fs_put_page(page, 1);
        return err;
 }
 
-static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
-                                       struct list_head *head)
+static void destroy_fsync_dnodes(struct list_head *head)
 {
        struct fsync_inode_entry *entry, *tmp;
 
-       list_for_each_entry_safe(entry, tmp, head, list) {
-               iput(entry->inode);
-               list_del(&entry->list);
-               kmem_cache_free(fsync_entry_slab, entry);
-       }
+       list_for_each_entry_safe(entry, tmp, head, list)
+               del_fsync_inode(entry);
 }
 
-static void check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
-                                               block_t blkaddr)
+static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+                       block_t blkaddr, struct dnode_of_data *dn)
 {
        struct seg_entry *sentry;
        unsigned int segno = GET_SEGNO(sbi, blkaddr);
-       unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
-                                       (sbi->blocks_per_seg - 1);
+       unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+       struct f2fs_summary_block *sum_node;
        struct f2fs_summary sum;
-       nid_t ino;
-       void *kaddr;
+       struct page *sum_page, *node_page;
+       struct dnode_of_data tdn = *dn;
+       nid_t ino, nid;
        struct inode *inode;
-       struct page *node_page;
+       unsigned int offset;
        block_t bidx;
        int i;
 
        sentry = get_seg_entry(sbi, segno);
        if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
-               return;
+               return 0;
 
        /* Get the previous summary */
        for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
                struct curseg_info *curseg = CURSEG_I(sbi, i);
                if (curseg->segno == segno) {
                        sum = curseg->sum_blk->entries[blkoff];
-                       break;
+                       goto got_it;
                }
        }
-       if (i > CURSEG_COLD_DATA) {
-               struct page *sum_page = get_sum_page(sbi, segno);
-               struct f2fs_summary_block *sum_node;
-               kaddr = page_address(sum_page);
-               sum_node = (struct f2fs_summary_block *)kaddr;
-               sum = sum_node->entries[blkoff];
-               f2fs_put_page(sum_page, 1);
+
+       sum_page = get_sum_page(sbi, segno);
+       sum_node = (struct f2fs_summary_block *)page_address(sum_page);
+       sum = sum_node->entries[blkoff];
+       f2fs_put_page(sum_page, 1);
+got_it:
+       /* Use the locked dnode page and inode */
+       nid = le32_to_cpu(sum.nid);
+       if (dn->inode->i_ino == nid) {
+               tdn.nid = nid;
+               if (!dn->inode_page_locked)
+                       lock_page(dn->inode_page);
+               tdn.node_page = dn->inode_page;
+               tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+               goto truncate_out;
+       } else if (dn->nid == nid) {
+               tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
+               goto truncate_out;
        }
 
        /* Get the node page */
-       node_page = get_node_page(sbi, le32_to_cpu(sum.nid));
-       bidx = start_bidx_of_node(ofs_of_node(node_page)) +
-                               le16_to_cpu(sum.ofs_in_node);
+       node_page = get_node_page(sbi, nid);
+       if (IS_ERR(node_page))
+               return PTR_ERR(node_page);
+
+       offset = ofs_of_node(node_page);
        ino = ino_of_node(node_page);
        f2fs_put_page(node_page, 1);
 
-       /* Deallocate previous index in the node page */
-       inode = f2fs_iget(sbi->sb, ino);
-       if (IS_ERR(inode))
-               return;
+       if (ino != dn->inode->i_ino) {
+               /* Deallocate previous index in the node page */
+               inode = f2fs_iget_retry(sbi->sb, ino);
+               if (IS_ERR(inode))
+                       return PTR_ERR(inode);
+       } else {
+               inode = dn->inode;
+       }
+
+       bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
+
+       /*
+        * if inode page is locked, unlock temporarily, but its reference
+        * count keeps alive.
+        */
+       if (ino == dn->inode->i_ino && dn->inode_page_locked)
+               unlock_page(dn->inode_page);
+
+       set_new_dnode(&tdn, inode, NULL, NULL, 0);
+       if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
+               goto out;
+
+       if (tdn.data_blkaddr == blkaddr)
+               truncate_data_blocks_range(&tdn, 1);
+
+       f2fs_put_dnode(&tdn);
+out:
+       if (ino != dn->inode->i_ino)
+               iput(inode);
+       else if (dn->inode_page_locked)
+               lock_page(dn->inode_page);
+       return 0;
 
-       truncate_hole(inode, bidx, bidx + 1);
-       iput(inode);
+truncate_out:
+       if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr)
+               truncate_data_blocks_range(&tdn, 1);
+       if (dn->inode->i_ino == nid && !dn->inode_page_locked)
+               unlock_page(dn->inode_page);
+       return 0;
 }
 
 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
                                        struct page *page, block_t blkaddr)
 {
-       unsigned int start, end;
        struct dnode_of_data dn;
-       struct f2fs_summary sum;
        struct node_info ni;
-       int err = 0;
-       int ilock;
+       unsigned int start, end;
+       int err = 0, recovered = 0;
+
+       /* step 1: recover xattr */
+       if (IS_INODE(page)) {
+               recover_inline_xattr(inode, page);
+       } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
+               /*
+                * Deprecated; xattr blocks should be found from cold log.
+                * But, we should remain this for backward compatibility.
+                */
+               recover_xattr_data(inode, page, blkaddr);
+               goto out;
+       }
 
-       start = start_bidx_of_node(ofs_of_node(page));
-       if (IS_INODE(page))
-               end = start + ADDRS_PER_INODE;
-       else
-               end = start + ADDRS_PER_BLOCK;
+       /* step 2: recover inline data */
+       if (recover_inline_data(inode, page))
+               goto out;
 
-       ilock = mutex_lock_op(sbi);
-       set_new_dnode(&dn, inode, NULL, NULL, 0);
+       /* step 3: recover data indices */
+       start = start_bidx_of_node(ofs_of_node(page), inode);
+       end = start + ADDRS_PER_PAGE(page, inode);
 
+       set_new_dnode(&dn, inode, NULL, NULL, 0);
+retry_dn:
        err = get_dnode_of_data(&dn, start, ALLOC_NODE);
        if (err) {
-               mutex_unlock_op(sbi, ilock);
-               return err;
+               if (err == -ENOMEM) {
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                       goto retry_dn;
+               }
+               goto out;
        }
 
-       wait_on_page_writeback(dn.node_page);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
        get_node_info(sbi, dn.nid, &ni);
-       BUG_ON(ni.ino != ino_of_node(page));
-       BUG_ON(ofs_of_node(dn.node_page) != ofs_of_node(page));
+       f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
+       f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
-       for (; start < end; start++) {
+       for (; start < end; start++, dn.ofs_in_node++) {
                block_t src, dest;
 
                src = datablock_addr(dn.node_page, dn.ofs_in_node);
                dest = datablock_addr(page, dn.ofs_in_node);
 
-               if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
+               /* skip recovering if dest is the same as src */
+               if (src == dest)
+                       continue;
+
+               /* dest is invalid, just invalidate src block */
+               if (dest == NULL_ADDR) {
+                       truncate_data_blocks_range(&dn, 1);
+                       continue;
+               }
+
+               if (!file_keep_isize(inode) &&
+                               (i_size_read(inode) <= (start << PAGE_SHIFT)))
+                       f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
+
+               /*
+                * dest is reserved block, invalidate src block
+                * and then reserve one new block in dnode page.
+                */
+               if (dest == NEW_ADDR) {
+                       truncate_data_blocks_range(&dn, 1);
+                       reserve_new_block(&dn);
+                       continue;
+               }
+
+               /* dest is valid block, try to recover from src to dest */
+               if (is_valid_blkaddr(sbi, dest, META_POR)) {
+
                        if (src == NULL_ADDR) {
-                               int err = reserve_new_block(&dn);
+                               err = reserve_new_block(&dn);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+                               while (err)
+                                       err = reserve_new_block(&dn);
+#endif
                                /* We should not get -ENOSPC */
-                               BUG_ON(err);
+                               f2fs_bug_on(sbi, err);
+                               if (err)
+                                       goto err;
                        }
-
+retry_prev:
                        /* Check the previous node page having this index */
-                       check_index_in_prev_nodes(sbi, dest);
-
-                       set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
+                       err = check_index_in_prev_nodes(sbi, dest, &dn);
+                       if (err) {
+                               if (err == -ENOMEM) {
+                                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+                                       goto retry_prev;
+                               }
+                               goto err;
+                       }
 
                        /* write dummy data page */
-                       recover_data_page(sbi, NULL, &sum, src, dest);
-                       update_extent_cache(dest, &dn);
+                       f2fs_replace_block(sbi, &dn, src, dest,
+                                               ni.version, false, false);
+                       recovered++;
                }
-               dn.ofs_in_node++;
        }
 
-       /* write node page in place */
-       set_summary(&sum, dn.nid, 0, 0);
-       if (IS_INODE(dn.node_page))
-               sync_inode_page(&dn);
-
        copy_node_footer(dn.node_page, page);
        fill_node_footer(dn.node_page, dn.nid, ni.ino,
                                        ofs_of_node(page), false);
        set_page_dirty(dn.node_page);
-
-       recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
+err:
        f2fs_put_dnode(&dn);
-       mutex_unlock_op(sbi, ilock);
-       return 0;
+out:
+       f2fs_msg(sbi->sb, KERN_NOTICE,
+               "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+               inode->i_ino,
+               file_keep_isize(inode) ? "keep" : "recover",
+               recovered, err);
+       return err;
 }
 
-static int recover_data(struct f2fs_sb_info *sbi,
-                               struct list_head *head, int type)
+static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+                                               struct list_head *dir_list)
 {
-       unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
        struct curseg_info *curseg;
-       struct page *page;
+       struct page *page = NULL;
        int err = 0;
        block_t blkaddr;
 
        /* get node pages in the current segment */
-       curseg = CURSEG_I(sbi, type);
+       curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
        blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
-       /* read node page */
-       page = alloc_page(GFP_NOFS | __GFP_ZERO);
-       if (IS_ERR(page))
-               return -ENOMEM;
-
-       lock_page(page);
-
        while (1) {
                struct fsync_inode_entry *entry;
 
-               err = f2fs_readpage(sbi, page, blkaddr, READ_SYNC);
-               if (err)
-                       goto out;
+               if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
+                       break;
 
-               lock_page(page);
+               ra_meta_pages_cond(sbi, blkaddr);
 
-               if (cp_ver != cpver_of_node(page))
-                       goto unlock_out;
+               page = get_tmp_page(sbi, blkaddr);
 
-               entry = get_fsync_inode(head, ino_of_node(page));
+               if (!is_recoverable_dnode(page)) {
+                       f2fs_put_page(page, 1);
+                       break;
+               }
+
+               entry = get_fsync_inode(inode_list, ino_of_node(page));
                if (!entry)
                        goto next;
-
+               /*
+                * inode(x) | CP | inode(x) | dnode(F)
+                * In this case, we can lose the latest inode(x).
+                * So, call recover_inode for the inode update.
+                */
+               if (IS_INODE(page))
+                       recover_inode(entry->inode, page);
+               if (entry->last_dentry == blkaddr) {
+                       err = recover_dentry(entry->inode, page, dir_list);
+                       if (err) {
+                               f2fs_put_page(page, 1);
+                               break;
+                       }
+               }
                err = do_recover_data(sbi, entry->inode, page, blkaddr);
-               if (err)
-                       goto out;
-
-               if (entry->blkaddr == blkaddr) {
-                       iput(entry->inode);
-                       list_del(&entry->list);
-                       kmem_cache_free(fsync_entry_slab, entry);
+               if (err) {
+                       f2fs_put_page(page, 1);
+                       break;
                }
+
+               if (entry->blkaddr == blkaddr)
+                       del_fsync_inode(entry);
 next:
                /* check next segment */
                blkaddr = next_blkaddr_of_node(page);
+               f2fs_put_page(page, 1);
        }
-unlock_out:
-       unlock_page(page);
-out:
-       __free_pages(page, 0);
-
        if (!err)
                allocate_new_segments(sbi);
        return err;
 }
 
-int recover_fsync_data(struct f2fs_sb_info *sbi)
+int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
 {
+       struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
        struct list_head inode_list;
+       struct list_head dir_list;
+       block_t blkaddr;
        int err;
+       int ret = 0;
+       bool need_writecp = false;
 
        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
-                       sizeof(struct fsync_inode_entry), NULL);
-       if (unlikely(!fsync_entry_slab))
+                       sizeof(struct fsync_inode_entry));
+       if (!fsync_entry_slab)
                return -ENOMEM;
 
        INIT_LIST_HEAD(&inode_list);
+       INIT_LIST_HEAD(&dir_list);
+
+       /* prevent checkpoint */
+       mutex_lock(&sbi->cp_mutex);
+
+       blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
        /* step #1: find fsynced inode numbers */
        err = find_fsync_dnodes(sbi, &inode_list);
-       if (err)
+       if (err || list_empty(&inode_list))
                goto out;
 
-       if (list_empty(&inode_list))
+       if (check_only) {
+               ret = 1;
                goto out;
+       }
+
+       need_writecp = true;
 
        /* step #2: recover data */
-       sbi->por_doing = 1;
-       err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
-       sbi->por_doing = 0;
-       BUG_ON(!list_empty(&inode_list));
+       err = recover_data(sbi, &inode_list, &dir_list);
+       if (!err)
+               f2fs_bug_on(sbi, !list_empty(&inode_list));
 out:
-       destroy_fsync_dnodes(sbi, &inode_list);
+       destroy_fsync_dnodes(&inode_list);
+
+       /* truncate meta pages to be used by the recovery */
+       truncate_inode_pages_range(META_MAPPING(sbi),
+                       (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
+
+       if (err) {
+               truncate_inode_pages(NODE_MAPPING(sbi), 0);
+               truncate_inode_pages(META_MAPPING(sbi), 0);
+       }
+
+       clear_sbi_flag(sbi, SBI_POR_DOING);
+       if (err)
+               set_ckpt_flags(sbi, CP_ERROR_FLAG);
+       mutex_unlock(&sbi->cp_mutex);
+
+       /* let's drop all the directory inodes for clean checkpoint */
+       destroy_fsync_dnodes(&dir_list);
+
+       if (!err && need_writecp) {
+               struct cp_control cpc = {
+                       .reason = CP_RECOVERY,
+               };
+               err = write_checkpoint(sbi, &cpc);
+       }
+
        kmem_cache_destroy(fsync_entry_slab);
-       write_checkpoint(sbi, false);
-       return err;
+       return ret ? ret: err;
 }
index d8e84e49a5c301cb139acfa6d0e06cbd1544b82a..2f754eecfa2cf080fb29295bde28cec3e4647872 100644 (file)
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/prefetch.h>
-#include <linux/vmalloc.h>
+#include <linux/kthread.h>
+#include <linux/swap.h>
+#include <linux/timer.h>
 
 #include "f2fs.h"
 #include "segment.h"
 #include "node.h"
+#include "trace.h"
 #include <trace/events/f2fs.h>
 
+#define __reverse_ffz(x) __reverse_ffs(~(x))
+
+static struct kmem_cache *discard_entry_slab;
+static struct kmem_cache *sit_entry_set_slab;
+static struct kmem_cache *inmem_entry_slab;
+
+/**
+ * Copied from latest lib/llist.c
+ * llist_for_each_entry_safe - iterate over some deleted entries of
+ *                             lock-less list of given type
+ *                            safe against removal of list entry
+ * @pos:       the type * to use as a loop cursor.
+ * @n:         another type * to use as temporary storage
+ * @node:      the first entry of deleted list entries.
+ * @member:    the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry.  If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member)                               \
+       for (pos = llist_entry((node), typeof(*pos), member);                  \
+               &pos->member != NULL &&                                        \
+               (n = llist_entry(pos->member.next, typeof(*n), member), true); \
+               pos = n)
+
+/**
+ * Copied from latest lib/llist.c
+ * llist_reverse_order - reverse order of a llist chain
+ * @head:      first item of the list to be reversed
+ *
+ * Reverse the order of a chain of llist entries and return the
+ * new first entry.
+ */
+struct llist_node *llist_reverse_order(struct llist_node *head)
+{
+       struct llist_node *new_head = NULL;
+
+       while (head) {
+               struct llist_node *tmp = head;
+               head = head->next;
+               tmp->next = new_head;
+               new_head = tmp;
+       }
+
+       return new_head;
+}
+
+/**
+ * Copied from latest linux/list.h
+ * list_last_entry - get the last element from a list
+ * @ptr:        the list head to take the element from.
+ * @type:       the type of the struct this is embedded in.
+ * @member:     the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+       list_entry((ptr)->prev, type, member)
+
+static unsigned long __reverse_ulong(unsigned char *str)
+{
+       unsigned long tmp = 0;
+       int shift = 24, idx = 0;
+
+#if BITS_PER_LONG == 64
+       shift = 56;
+#endif
+       while (shift >= 0) {
+               tmp |= (unsigned long)str[idx++] << shift;
+               shift -= BITS_PER_BYTE;
+       }
+       return tmp;
+}
+
+/*
+ * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
+ * MSB and LSB are reversed in a byte by f2fs_set_bit.
+ */
+static inline unsigned long __reverse_ffs(unsigned long word)
+{
+       int num = 0;
+
+#if BITS_PER_LONG == 64
+       if ((word & 0xffffffff00000000UL) == 0)
+               num += 32;
+       else
+               word >>= 32;
+#endif
+       if ((word & 0xffff0000) == 0)
+               num += 16;
+       else
+               word >>= 16;
+
+       if ((word & 0xff00) == 0)
+               num += 8;
+       else
+               word >>= 8;
+
+       if ((word & 0xf0) == 0)
+               num += 4;
+       else
+               word >>= 4;
+
+       if ((word & 0xc) == 0)
+               num += 2;
+       else
+               word >>= 2;
+
+       if ((word & 0x2) == 0)
+               num += 1;
+       return num;
+}
+
+/*
+ * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
+ * f2fs_set_bit makes MSB and LSB reversed in a byte.
+ * @size must be integral times of unsigned long.
+ * Example:
+ *                             MSB <--> LSB
+ *   f2fs_set_bit(0, bitmap) => 1000 0000
+ *   f2fs_set_bit(7, bitmap) => 0000 0001
+ */
+static unsigned long __find_rev_next_bit(const unsigned long *addr,
+                       unsigned long size, unsigned long offset)
+{
+       const unsigned long *p = addr + BIT_WORD(offset);
+       unsigned long result = size;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+
+       size -= (offset & ~(BITS_PER_LONG - 1));
+       offset %= BITS_PER_LONG;
+
+       while (1) {
+               if (*p == 0)
+                       goto pass;
+
+               tmp = __reverse_ulong((unsigned char *)p);
+
+               tmp &= ~0UL >> offset;
+               if (size < BITS_PER_LONG)
+                       tmp &= (~0UL << (BITS_PER_LONG - size));
+               if (tmp)
+                       goto found;
+pass:
+               if (size <= BITS_PER_LONG)
+                       break;
+               size -= BITS_PER_LONG;
+               offset = 0;
+               p++;
+       }
+       return result;
+found:
+       return result - size + __reverse_ffs(tmp);
+}
+
+static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
+                       unsigned long size, unsigned long offset)
+{
+       const unsigned long *p = addr + BIT_WORD(offset);
+       unsigned long result = size;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+
+       size -= (offset & ~(BITS_PER_LONG - 1));
+       offset %= BITS_PER_LONG;
+
+       while (1) {
+               if (*p == ~0UL)
+                       goto pass;
+
+               tmp = __reverse_ulong((unsigned char *)p);
+
+               if (offset)
+                       tmp |= ~0UL << (BITS_PER_LONG - offset);
+               if (size < BITS_PER_LONG)
+                       tmp |= ~0UL >> size;
+               if (tmp != ~0UL)
+                       goto found;
+pass:
+               if (size <= BITS_PER_LONG)
+                       break;
+               size -= BITS_PER_LONG;
+               offset = 0;
+               p++;
+       }
+       return result;
+found:
+       return result - size + __reverse_ffz(tmp);
+}
+
+void register_inmem_page(struct inode *inode, struct page *page)
+{
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       struct inmem_pages *new;
+
+       f2fs_trace_pid(page);
+
+       set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
+       SetPagePrivate(page);
+
+       new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
+
+       /* add atomic page indices to the list */
+       new->page = page;
+       INIT_LIST_HEAD(&new->list);
+
+       /* increase reference count with clean state */
+       mutex_lock(&fi->inmem_lock);
+       get_page(page);
+       list_add_tail(&new->list, &fi->inmem_pages);
+       inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+       mutex_unlock(&fi->inmem_lock);
+
+       trace_f2fs_register_inmem_page(page, INMEM);
+}
+
+static int __revoke_inmem_pages(struct inode *inode,
+                               struct list_head *head, bool drop, bool recover)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct inmem_pages *cur, *tmp;
+       int err = 0;
+
+       list_for_each_entry_safe(cur, tmp, head, list) {
+               struct page *page = cur->page;
+
+               if (drop)
+                       trace_f2fs_commit_inmem_page(page, INMEM_DROP);
+
+               lock_page(page);
+
+               if (recover) {
+                       struct dnode_of_data dn;
+                       struct node_info ni;
+
+                       trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
+
+                       set_new_dnode(&dn, inode, NULL, NULL, 0);
+                       if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
+                               err = -EAGAIN;
+                               goto next;
+                       }
+                       get_node_info(sbi, dn.nid, &ni);
+                       f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+                                       cur->old_addr, ni.version, true, true);
+                       f2fs_put_dnode(&dn);
+               }
+next:
+               /* we don't need to invalidate this in the sccessful status */
+               if (drop || recover)
+                       ClearPageUptodate(page);
+               set_page_private(page, 0);
+               ClearPagePrivate(page);
+               f2fs_put_page(page, 1);
+
+               list_del(&cur->list);
+               kmem_cache_free(inmem_entry_slab, cur);
+               dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+       }
+       return err;
+}
+
+void drop_inmem_pages(struct inode *inode)
+{
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+
+       clear_inode_flag(inode, FI_ATOMIC_FILE);
+
+       mutex_lock(&fi->inmem_lock);
+       __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+       mutex_unlock(&fi->inmem_lock);
+}
+
+static int __commit_inmem_pages(struct inode *inode,
+                                       struct list_head *revoke_list)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       struct inmem_pages *cur, *tmp;
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = DATA,
+               .rw = WRITE_SYNC | REQ_PRIO,
+               .encrypted_page = NULL,
+       };
+       bool submit_bio = false;
+       int err = 0;
+
+       list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
+               struct page *page = cur->page;
+
+               lock_page(page);
+               if (page->mapping == inode->i_mapping) {
+                       trace_f2fs_commit_inmem_page(page, INMEM);
+
+                       set_page_dirty(page);
+                       f2fs_wait_on_page_writeback(page, DATA, true);
+                       if (clear_page_dirty_for_io(page)) {
+                               inode_dec_dirty_pages(inode);
+                               remove_dirty_inode(inode);
+                       }
+
+                       fio.page = page;
+                       err = do_write_data_page(&fio);
+                       if (err) {
+                               unlock_page(page);
+                               break;
+                       }
+
+                       /* record old blkaddr for revoking */
+                       cur->old_addr = fio.old_blkaddr;
+
+                       submit_bio = true;
+               }
+               unlock_page(page);
+               list_move_tail(&cur->list, revoke_list);
+       }
+
+       if (submit_bio)
+               f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
+
+       if (!err)
+               __revoke_inmem_pages(inode, revoke_list, false, false);
+
+       return err;
+}
+
+int commit_inmem_pages(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       struct list_head revoke_list;
+       int err;
+
+       INIT_LIST_HEAD(&revoke_list);
+       f2fs_balance_fs(sbi, true);
+       f2fs_lock_op(sbi);
+
+       mutex_lock(&fi->inmem_lock);
+       err = __commit_inmem_pages(inode, &revoke_list);
+       if (err) {
+               int ret;
+               /*
+                * try to revoke all committed pages, but still we could fail
+                * due to no memory or other reason, if that happened, EAGAIN
+                * will be returned, which means in such case, transaction is
+                * already not integrity, caller should use journal to do the
+                * recovery or rewrite & commit last transaction. For other
+                * error number, revoking was done by filesystem itself.
+                */
+               ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
+               if (ret)
+                       err = ret;
+
+               /* drop all uncommitted pages */
+               __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+       }
+       mutex_unlock(&fi->inmem_lock);
+
+       f2fs_unlock_op(sbi);
+       return err;
+}
+
 /*
  * This function balances dirty node and dentry pages.
  * In addition, it controls garbage collection.
  */
-void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
 {
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (time_to_inject(sbi, FAULT_CHECKPOINT))
+               f2fs_stop_checkpoint(sbi, false);
+#endif
+
+       if (!need)
+               return;
+
+       /* balance_fs_bg is able to be pending */
+       if (excess_cached_nats(sbi))
+               f2fs_balance_fs_bg(sbi);
+
        /*
         * We should do GC or end up with checkpoint, if there are so many dirty
         * dir/node pages without enough free segments.
         */
-       if (has_not_enough_free_secs(sbi, 0)) {
+       if (has_not_enough_free_secs(sbi, 0, 0)) {
                mutex_lock(&sbi->gc_mutex);
-               f2fs_gc(sbi);
+               f2fs_gc(sbi, false, false);
+       }
+}
+
+void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
+{
+       /* try to shrink extent cache when there is no enough memory */
+       if (!available_free_memory(sbi, EXTENT_CACHE))
+               f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
+
+       /* check the # of cached NAT entries */
+       if (!available_free_memory(sbi, NAT_ENTRIES))
+               try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+
+       if (!available_free_memory(sbi, FREE_NIDS))
+               try_to_free_nids(sbi, MAX_FREE_NIDS);
+       else
+               build_free_nids(sbi, false);
+
+       if (!is_idle(sbi))
+               return;
+
+       /* checkpoint is the only way to shrink partial cached entries */
+       if (!available_free_memory(sbi, NAT_ENTRIES) ||
+                       !available_free_memory(sbi, INO_ENTRIES) ||
+                       excess_prefree_segs(sbi) ||
+                       excess_dirty_nats(sbi) ||
+                       f2fs_time_over(sbi, CP_TIME)) {
+               if (test_opt(sbi, DATA_FLUSH)) {
+                       struct blk_plug plug;
+
+                       blk_start_plug(&plug);
+                       sync_dirty_inodes(sbi, FILE_INODE);
+                       blk_finish_plug(&plug);
+               }
+               f2fs_sync_fs(sbi->sb, true);
+               stat_inc_bg_cp_count(sbi->stat_info);
+       }
+}
+
+static int __submit_flush_wait(struct block_device *bdev)
+{
+       struct bio *bio = f2fs_bio_alloc(0);
+       int ret;
+
+       bio->bi_bdev = bdev;
+       ret = submit_bio_wait(WRITE_FLUSH, bio);
+       bio_put(bio);
+       return ret;
+}
+
+static int submit_flush_wait(struct f2fs_sb_info *sbi)
+{
+       int ret = __submit_flush_wait(sbi->sb->s_bdev);
+       int i;
+
+       if (sbi->s_ndevs && !ret) {
+               for (i = 1; i < sbi->s_ndevs; i++) {
+                       ret = __submit_flush_wait(FDEV(i).bdev);
+                       if (ret)
+                               break;
+               }
+       }
+       return ret;
+}
+
+static int issue_flush_thread(void *data)
+{
+       struct f2fs_sb_info *sbi = data;
+       struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
+       wait_queue_head_t *q = &fcc->flush_wait_queue;
+repeat:
+       if (kthread_should_stop())
+               return 0;
+
+       if (!llist_empty(&fcc->issue_list)) {
+               struct flush_cmd *cmd, *next;
+               int ret;
+
+               fcc->dispatch_list = llist_del_all(&fcc->issue_list);
+               fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
+
+               ret = submit_flush_wait(sbi);
+               llist_for_each_entry_safe(cmd, next,
+                                         fcc->dispatch_list, llnode) {
+                       cmd->ret = ret;
+                       complete(&cmd->wait);
+               }
+               fcc->dispatch_list = NULL;
+       }
+
+       wait_event_interruptible(*q,
+               kthread_should_stop() || !llist_empty(&fcc->issue_list));
+       goto repeat;
+}
+
+int f2fs_issue_flush(struct f2fs_sb_info *sbi)
+{
+       struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
+       struct flush_cmd cmd;
+
+       trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
+                                       test_opt(sbi, FLUSH_MERGE));
+
+       if (test_opt(sbi, NOBARRIER))
+               return 0;
+
+       if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
+               int ret;
+
+               atomic_inc(&fcc->submit_flush);
+               ret = submit_flush_wait(sbi);
+               atomic_dec(&fcc->submit_flush);
+               return ret;
+       }
+
+       init_completion(&cmd.wait);
+
+       atomic_inc(&fcc->submit_flush);
+       llist_add(&cmd.llnode, &fcc->issue_list);
+
+       if (!fcc->dispatch_list)
+               wake_up(&fcc->flush_wait_queue);
+
+       if (fcc->f2fs_issue_flush) {
+               wait_for_completion(&cmd.wait);
+               atomic_dec(&fcc->submit_flush);
+       } else {
+               llist_del_all(&fcc->issue_list);
+               atomic_set(&fcc->submit_flush, 0);
+       }
+
+       return cmd.ret;
+}
+
+int create_flush_cmd_control(struct f2fs_sb_info *sbi)
+{
+       dev_t dev = sbi->sb->s_bdev->bd_dev;
+       struct flush_cmd_control *fcc;
+       int err = 0;
+
+       if (SM_I(sbi)->cmd_control_info) {
+               fcc = SM_I(sbi)->cmd_control_info;
+               goto init_thread;
+       }
+
+       fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
+       if (!fcc)
+               return -ENOMEM;
+       atomic_set(&fcc->submit_flush, 0);
+       init_waitqueue_head(&fcc->flush_wait_queue);
+       init_llist_head(&fcc->issue_list);
+       SM_I(sbi)->cmd_control_info = fcc;
+init_thread:
+       fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
+                               "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
+       if (IS_ERR(fcc->f2fs_issue_flush)) {
+               err = PTR_ERR(fcc->f2fs_issue_flush);
+               kfree(fcc);
+               SM_I(sbi)->cmd_control_info = NULL;
+               return err;
+       }
+
+       return err;
+}
+
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
+{
+       struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
+
+       if (fcc && fcc->f2fs_issue_flush) {
+               struct task_struct *flush_thread = fcc->f2fs_issue_flush;
+
+               fcc->f2fs_issue_flush = NULL;
+               kthread_stop(flush_thread);
+       }
+       if (free) {
+               kfree(fcc);
+               SM_I(sbi)->cmd_control_info = NULL;
        }
 }
 
@@ -50,20 +617,14 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
 
        if (dirty_type == DIRTY) {
                struct seg_entry *sentry = get_seg_entry(sbi, segno);
-               enum dirty_type t = DIRTY_HOT_DATA;
+               enum dirty_type t = sentry->type;
 
-               dirty_type = sentry->type;
-
-               if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
-                       dirty_i->nr_dirty[dirty_type]++;
-
-               /* Only one bitmap should be set */
-               for (; t <= DIRTY_COLD_NODE; t++) {
-                       if (t == dirty_type)
-                               continue;
-                       if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
-                               dirty_i->nr_dirty[t]--;
+               if (unlikely(t >= DIRTY)) {
+                       f2fs_bug_on(sbi, 1);
+                       return;
                }
+               if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
+                       dirty_i->nr_dirty[t]++;
        }
 }
 
@@ -76,12 +637,11 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
                dirty_i->nr_dirty[dirty_type]--;
 
        if (dirty_type == DIRTY) {
-               enum dirty_type t = DIRTY_HOT_DATA;
+               struct seg_entry *sentry = get_seg_entry(sbi, segno);
+               enum dirty_type t = sentry->type;
 
-               /* clear all the bitmaps */
-               for (; t <= DIRTY_COLD_NODE; t++)
-                       if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
-                               dirty_i->nr_dirty[t]--;
+               if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+                       dirty_i->nr_dirty[t]--;
 
                if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
                        clear_bit(GET_SECNO(sbi, segno),
@@ -94,7 +654,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
  * Adding dirty entry into seglist is not critical operation.
  * If a given segment is one of current working segments, it won't be added.
  */
-void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
+static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        unsigned short valid_blocks;
@@ -117,7 +677,187 @@ void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
        }
 
        mutex_unlock(&dirty_i->seglist_lock);
-       return;
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
+               struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+       sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
+       sector_t sector;
+       int devi = 0;
+
+       if (sbi->s_ndevs) {
+               devi = f2fs_target_device_index(sbi, blkstart);
+               blkstart -= FDEV(devi).start_blk;
+       }
+       sector = SECTOR_FROM_BLOCK(blkstart);
+
+       if (sector & (bdev_zone_size(bdev) - 1) ||
+                               nr_sects != bdev_zone_size(bdev)) {
+               f2fs_msg(sbi->sb, KERN_INFO,
+                       "(%d) %s: Unaligned discard attempted (block %x + %x)",
+                       devi, sbi->s_ndevs ? FDEV(devi).path: "",
+                       blkstart, blklen);
+               return -EIO;
+       }
+
+       /*
+        * We need to know the type of the zone: for conventional zones,
+        * use regular discard if the drive supports it. For sequential
+        * zones, reset the zone write pointer.
+        */
+       switch (get_blkz_type(sbi, bdev, blkstart)) {
+
+       case BLK_ZONE_TYPE_CONVENTIONAL:
+               if (!blk_queue_discard(bdev_get_queue(bdev)))
+                       return 0;
+               return blkdev_issue_discard(bdev, sector, nr_sects,
+                                               GFP_NOFS, 0);
+       case BLK_ZONE_TYPE_SEQWRITE_REQ:
+       case BLK_ZONE_TYPE_SEQWRITE_PREF:
+               trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
+               return blkdev_reset_zones(bdev, sector,
+                                         nr_sects, GFP_NOFS);
+       default:
+               /* Unknown zone type: broken device ? */
+               return -EIO;
+       }
+}
+#endif
+
+static int __issue_discard_async(struct f2fs_sb_info *sbi,
+               struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+       sector_t start = SECTOR_FROM_BLOCK(blkstart);
+       sector_t len = SECTOR_FROM_BLOCK(blklen);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+       if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+                               bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+               return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
+#endif
+       return blkdev_issue_discard(bdev, start, len, GFP_NOFS, 0);
+}
+
+static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
+                               block_t blkstart, block_t blklen)
+{
+       sector_t start = blkstart, len = 0;
+       struct block_device *bdev;
+       struct seg_entry *se;
+       unsigned int offset;
+       block_t i;
+       int err = 0;
+
+       bdev = f2fs_target_device(sbi, blkstart, NULL);
+
+       for (i = blkstart; i < blkstart + blklen; i++, len++) {
+               if (i != start) {
+                       struct block_device *bdev2 =
+                               f2fs_target_device(sbi, i, NULL);
+
+                       if (bdev2 != bdev) {
+                               err = __issue_discard_async(sbi, bdev,
+                                               start, len);
+                               if (err)
+                                       return err;
+                               bdev = bdev2;
+                               start = i;
+                               len = 0;
+                       }
+               }
+
+               se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
+               offset = GET_BLKOFF_FROM_SEG0(sbi, i);
+
+               if (!f2fs_test_and_set_bit(offset, se->discard_map))
+                       sbi->discard_blks--;
+       }
+
+       if (len)
+               err = __issue_discard_async(sbi, bdev, start, len);
+
+       trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
+       return err;
+}
+
+static void __add_discard_entry(struct f2fs_sb_info *sbi,
+               struct cp_control *cpc, struct seg_entry *se,
+               unsigned int start, unsigned int end)
+{
+       struct list_head *head = &SM_I(sbi)->discard_list;
+       struct discard_entry *new, *last;
+
+       if (!list_empty(head)) {
+               last = list_last_entry(head, struct discard_entry, list);
+               if (START_BLOCK(sbi, cpc->trim_start) + start ==
+                                               last->blkaddr + last->len) {
+                       last->len += end - start;
+                       goto done;
+               }
+       }
+
+       new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
+       INIT_LIST_HEAD(&new->list);
+       new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
+       new->len = end - start;
+       list_add_tail(&new->list, head);
+done:
+       SM_I(sbi)->nr_discards += end - start;
+}
+
+static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
+       int max_blocks = sbi->blocks_per_seg;
+       struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
+       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
+       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
+       unsigned long *discard_map = (unsigned long *)se->discard_map;
+       unsigned long *dmap = SIT_I(sbi)->tmp_map;
+       unsigned int start = 0, end = -1;
+       bool force = (cpc->reason == CP_DISCARD);
+       int i;
+
+       if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
+               return;
+
+       if (!force) {
+               if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
+                   SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
+                       return;
+       }
+
+       /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
+       for (i = 0; i < entries; i++)
+               dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
+                               (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
+
+       while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
+               start = __find_rev_next_bit(dmap, max_blocks, end + 1);
+               if (start >= max_blocks)
+                       break;
+
+               end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+               if (force && start && end != max_blocks
+                                       && (end - start) < cpc->trim_minlen)
+                       continue;
+
+               __add_discard_entry(sbi, cpc, se, start, end);
+       }
+}
+
+void release_discard_addrs(struct f2fs_sb_info *sbi)
+{
+       struct list_head *head = &(SM_I(sbi)->discard_list);
+       struct discard_entry *entry, *this;
+
+       /* drop caches */
+       list_for_each_entry_safe(entry, this, head, list) {
+               list_del(&entry->list);
+               kmem_cache_free(discard_entry_slab, entry);
+       }
 }
 
 /*
@@ -126,55 +866,84 @@ void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned int segno, offset = 0;
-       unsigned int total_segs = TOTAL_SEGS(sbi);
+       unsigned int segno;
 
        mutex_lock(&dirty_i->seglist_lock);
-       while (1) {
-               segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
-                               offset);
-               if (segno >= total_segs)
-                       break;
+       for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
                __set_test_and_free(sbi, segno);
-               offset = segno + 1;
-       }
        mutex_unlock(&dirty_i->seglist_lock);
 }
 
-void clear_prefree_segments(struct f2fs_sb_info *sbi)
+void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
+       struct list_head *head = &(SM_I(sbi)->discard_list);
+       struct discard_entry *entry, *this;
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned int segno, offset = 0;
-       unsigned int total_segs = TOTAL_SEGS(sbi);
+       unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
+       unsigned int start = 0, end = -1;
+       unsigned int secno, start_segno;
+       bool force = (cpc->reason == CP_DISCARD);
 
        mutex_lock(&dirty_i->seglist_lock);
+
        while (1) {
-               segno = find_next_bit(dirty_i->dirty_segmap[PRE], total_segs,
-                               offset);
-               if (segno >= total_segs)
+               int i;
+               start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
+               if (start >= MAIN_SEGS(sbi))
                        break;
+               end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
+                                                               start + 1);
 
-               offset = segno + 1;
-               if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
-                       dirty_i->nr_dirty[PRE]--;
-
-               /* Let's use trim */
-               if (test_opt(sbi, DISCARD))
-                       blkdev_issue_discard(sbi->sb->s_bdev,
-                                       START_BLOCK(sbi, segno) <<
-                                       sbi->log_sectors_per_block,
-                                       1 << (sbi->log_sectors_per_block +
-                                               sbi->log_blocks_per_seg),
-                                       GFP_NOFS, 0);
+               for (i = start; i < end; i++)
+                       clear_bit(i, prefree_map);
+
+               dirty_i->nr_dirty[PRE] -= end - start;
+
+               if (force || !test_opt(sbi, DISCARD))
+                       continue;
+
+               if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+                       f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+                               (end - start) << sbi->log_blocks_per_seg);
+                       continue;
+               }
+next:
+               secno = GET_SECNO(sbi, start);
+               start_segno = secno * sbi->segs_per_sec;
+               if (!IS_CURSEC(sbi, secno) &&
+                       !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+                       f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+                               sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+               start = start_segno + sbi->segs_per_sec;
+               if (start < end)
+                       goto next;
        }
        mutex_unlock(&dirty_i->seglist_lock);
+
+       /* send small discards */
+       list_for_each_entry_safe(entry, this, head, list) {
+               if (force && entry->len < cpc->trim_minlen)
+                       goto skip;
+               f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
+               cpc->trimmed += entry->len;
+skip:
+               list_del(&entry->list);
+               SM_I(sbi)->nr_discards -= entry->len;
+               kmem_cache_free(discard_entry_slab, entry);
+       }
 }
 
-static void __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
+static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
 {
        struct sit_info *sit_i = SIT_I(sbi);
-       if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap))
+
+       if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
                sit_i->dirty_sentries++;
+               return false;
+       }
+
+       return true;
 }
 
 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
@@ -196,9 +965,9 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
 
        se = get_seg_entry(sbi, segno);
        new_vblocks = se->valid_blocks + del;
-       offset = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) & (sbi->blocks_per_seg - 1);
+       offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 
-       BUG_ON((new_vblocks >> (sizeof(unsigned short) << 3) ||
+       f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
                                (new_vblocks > sbi->blocks_per_seg)));
 
        se->valid_blocks = new_vblocks;
@@ -207,11 +976,17 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
 
        /* Update valid block bitmap */
        if (del > 0) {
-               if (f2fs_set_bit(offset, se->cur_valid_map))
-                       BUG();
+               if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
+                       f2fs_bug_on(sbi, 1);
+               if (f2fs_discard_en(sbi) &&
+                       !f2fs_test_and_set_bit(offset, se->discard_map))
+                       sbi->discard_blks--;
        } else {
-               if (!f2fs_clear_bit(offset, se->cur_valid_map))
-                       BUG();
+               if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
+                       f2fs_bug_on(sbi, 1);
+               if (f2fs_discard_en(sbi) &&
+                       f2fs_test_and_clear_bit(offset, se->discard_map))
+                       sbi->discard_blks++;
        }
        if (!f2fs_test_bit(offset, se->ckpt_valid_map))
                se->ckpt_valid_blocks += del;
@@ -225,12 +1000,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
                get_sec_entry(sbi, segno)->valid_blocks += del;
 }
 
-static void refresh_sit_entry(struct f2fs_sb_info *sbi,
-                       block_t old_blkaddr, block_t new_blkaddr)
+void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
 {
-       update_sit_entry(sbi, new_blkaddr, 1);
-       if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
-               update_sit_entry(sbi, old_blkaddr, -1);
+       update_sit_entry(sbi, new, 1);
+       if (GET_SEGNO(sbi, old) != NULL_SEGNO)
+               update_sit_entry(sbi, old, -1);
+
+       locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
+       locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
 }
 
 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
@@ -238,7 +1015,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
        unsigned int segno = GET_SEGNO(sbi, addr);
        struct sit_info *sit_i = SIT_I(sbi);
 
-       BUG_ON(addr == NULL_ADDR);
+       f2fs_bug_on(sbi, addr == NULL_ADDR);
        if (addr == NEW_ADDR)
                return;
 
@@ -253,42 +1030,68 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
        mutex_unlock(&sit_i->sentry_lock);
 }
 
+bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+       struct sit_info *sit_i = SIT_I(sbi);
+       unsigned int segno, offset;
+       struct seg_entry *se;
+       bool is_cp = false;
+
+       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+               return true;
+
+       mutex_lock(&sit_i->sentry_lock);
+
+       segno = GET_SEGNO(sbi, blkaddr);
+       se = get_seg_entry(sbi, segno);
+       offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
+
+       if (f2fs_test_bit(offset, se->ckpt_valid_map))
+               is_cp = true;
+
+       mutex_unlock(&sit_i->sentry_lock);
+
+       return is_cp;
+}
+
 /*
  * This function should be resided under the curseg_mutex lock
  */
 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
-               struct f2fs_summary *sum, unsigned short offset)
+                                       struct f2fs_summary *sum)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
        void *addr = curseg->sum_blk;
-       addr += offset * sizeof(struct f2fs_summary);
+       addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
        memcpy(addr, sum, sizeof(struct f2fs_summary));
-       return;
 }
 
 /*
  * Calculate the number of current summary pages for writing
  */
-int npages_for_summary_flush(struct f2fs_sb_info *sbi)
+int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
 {
-       int total_size_bytes = 0;
        int valid_sum_count = 0;
-       int i, sum_space;
+       int i, sum_in_page;
 
        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
                if (sbi->ckpt->alloc_type[i] == SSR)
                        valid_sum_count += sbi->blocks_per_seg;
-               else
-                       valid_sum_count += curseg_blkoff(sbi, i);
+               else {
+                       if (for_ra)
+                               valid_sum_count += le16_to_cpu(
+                                       F2FS_CKPT(sbi)->cur_data_blkoff[i]);
+                       else
+                               valid_sum_count += curseg_blkoff(sbi, i);
+               }
        }
 
-       total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1)
-                       + sizeof(struct nat_journal) + 2
-                       + sizeof(struct sit_journal) + 2;
-       sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
-       if (total_size_bytes < sum_space)
+       sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
+                       SUM_FOOTER_SIZE) / SUMMARY_SIZE;
+       if (valid_sum_count <= sum_in_page)
                return 1;
-       else if (total_size_bytes < 2 * sum_space)
+       else if ((valid_sum_count - sum_in_page) <=
+               (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
                return 2;
        return 3;
 }
@@ -301,74 +1104,58 @@ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
        return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
 }
 
-static void write_sum_page(struct f2fs_sb_info *sbi,
-                       struct f2fs_summary_block *sum_blk, block_t blk_addr)
+void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
 {
        struct page *page = grab_meta_page(sbi, blk_addr);
-       void *kaddr = page_address(page);
-       memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
+       void *dst = page_address(page);
+
+       if (src)
+               memcpy(dst, src, PAGE_SIZE);
+       else
+               memset(dst, 0, PAGE_SIZE);
        set_page_dirty(page);
        f2fs_put_page(page, 1);
 }
 
-static unsigned int check_prefree_segments(struct f2fs_sb_info *sbi, int type)
+static void write_sum_page(struct f2fs_sb_info *sbi,
+                       struct f2fs_summary_block *sum_blk, block_t blk_addr)
 {
-       struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
-       unsigned int segno;
-       unsigned int ofs = 0;
-
-       /*
-        * If there is not enough reserved sections,
-        * we should not reuse prefree segments.
-        */
-       if (has_not_enough_free_secs(sbi, 0))
-               return NULL_SEGNO;
-
-       /*
-        * NODE page should not reuse prefree segment,
-        * since those information is used for SPOR.
-        */
-       if (IS_NODESEG(type))
-               return NULL_SEGNO;
-next:
-       segno = find_next_bit(prefree_segmap, TOTAL_SEGS(sbi), ofs);
-       ofs += sbi->segs_per_sec;
+       update_meta_page(sbi, (void *)sum_blk, blk_addr);
+}
 
-       if (segno < TOTAL_SEGS(sbi)) {
-               int i;
+static void write_current_sum_page(struct f2fs_sb_info *sbi,
+                                               int type, block_t blk_addr)
+{
+       struct curseg_info *curseg = CURSEG_I(sbi, type);
+       struct page *page = grab_meta_page(sbi, blk_addr);
+       struct f2fs_summary_block *src = curseg->sum_blk;
+       struct f2fs_summary_block *dst;
 
-               /* skip intermediate segments in a section */
-               if (segno % sbi->segs_per_sec)
-                       goto next;
+       dst = (struct f2fs_summary_block *)page_address(page);
 
-               /* skip if the section is currently used */
-               if (sec_usage_check(sbi, GET_SECNO(sbi, segno)))
-                       goto next;
+       mutex_lock(&curseg->curseg_mutex);
 
-               /* skip if whole section is not prefree */
-               for (i = 1; i < sbi->segs_per_sec; i++)
-                       if (!test_bit(segno + i, prefree_segmap))
-                               goto next;
+       down_read(&curseg->journal_rwsem);
+       memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
+       up_read(&curseg->journal_rwsem);
 
-               /* skip if whole section was not free at the last checkpoint */
-               for (i = 0; i < sbi->segs_per_sec; i++)
-                       if (get_seg_entry(sbi, segno + i)->ckpt_valid_blocks)
-                               goto next;
+       memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
+       memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
 
-               return segno;
-       }
-       return NULL_SEGNO;
+       mutex_unlock(&curseg->curseg_mutex);
+
+       set_page_dirty(page);
+       f2fs_put_page(page, 1);
 }
 
 static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
-       unsigned int segno = curseg->segno;
+       unsigned int segno = curseg->segno + 1;
        struct free_segmap_info *free_i = FREE_I(sbi);
 
-       if (segno + 1 < TOTAL_SEGS(sbi) && (segno + 1) % sbi->segs_per_sec)
-               return !test_bit(segno + 1, free_i->free_segmap);
+       if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
+               return !test_bit(segno, free_i->free_segmap);
        return 0;
 }
 
@@ -381,7 +1168,7 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
 {
        struct free_segmap_info *free_i = FREE_I(sbi);
        unsigned int segno, secno, zoneno;
-       unsigned int total_zones = TOTAL_SECS(sbi) / sbi->secs_per_zone;
+       unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
        unsigned int hint = *newseg / sbi->segs_per_sec;
        unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
        unsigned int left_start = hint;
@@ -389,22 +1176,21 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
        int go_left = 0;
        int i;
 
-       write_lock(&free_i->segmap_lock);
+       spin_lock(&free_i->segmap_lock);
 
        if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
                segno = find_next_zero_bit(free_i->free_segmap,
-                                       TOTAL_SEGS(sbi), *newseg + 1);
-               if (segno - *newseg < sbi->segs_per_sec -
-                                       (*newseg % sbi->segs_per_sec))
+                               (hint + 1) * sbi->segs_per_sec, *newseg + 1);
+               if (segno < (hint + 1) * sbi->segs_per_sec)
                        goto got_it;
        }
 find_other_zone:
-       secno = find_next_zero_bit(free_i->free_secmap, TOTAL_SECS(sbi), hint);
-       if (secno >= TOTAL_SECS(sbi)) {
+       secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
+       if (secno >= MAIN_SECS(sbi)) {
                if (dir == ALLOC_RIGHT) {
                        secno = find_next_zero_bit(free_i->free_secmap,
-                                                       TOTAL_SECS(sbi), 0);
-                       BUG_ON(secno >= TOTAL_SECS(sbi));
+                                                       MAIN_SECS(sbi), 0);
+                       f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
                } else {
                        go_left = 1;
                        left_start = hint - 1;
@@ -419,8 +1205,8 @@ find_other_zone:
                        continue;
                }
                left_start = find_next_zero_bit(free_i->free_secmap,
-                                                       TOTAL_SECS(sbi), 0);
-               BUG_ON(left_start >= TOTAL_SECS(sbi));
+                                                       MAIN_SECS(sbi), 0);
+               f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
                break;
        }
        secno = left_start;
@@ -459,10 +1245,10 @@ skip_left:
        }
 got_it:
        /* set it as dirty segment in free segmap */
-       BUG_ON(test_bit(segno, free_i->free_segmap));
+       f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
        __set_inuse(sbi, segno);
        *newseg = segno;
-       write_unlock(&free_i->segmap_lock);
+       spin_unlock(&free_i->segmap_lock);
 }
 
 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
@@ -495,7 +1281,7 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
        int dir = ALLOC_LEFT;
 
        write_sum_page(sbi, curseg->sum_blk,
-                               GET_SUM_BLOCK(sbi, curseg->segno));
+                               GET_SUM_BLOCK(sbi, segno));
        if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
                dir = ALLOC_RIGHT;
 
@@ -512,13 +1298,18 @@ static void __next_free_blkoff(struct f2fs_sb_info *sbi,
                        struct curseg_info *seg, block_t start)
 {
        struct seg_entry *se = get_seg_entry(sbi, seg->segno);
-       block_t ofs;
-       for (ofs = start; ofs < sbi->blocks_per_seg; ofs++) {
-               if (!f2fs_test_bit(ofs, se->ckpt_valid_map)
-                       && !f2fs_test_bit(ofs, se->cur_valid_map))
-                       break;
-       }
-       seg->next_blkoff = ofs;
+       int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
+       unsigned long *target_map = SIT_I(sbi)->tmp_map;
+       unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
+       unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
+       int i, pos;
+
+       for (i = 0; i < entries; i++)
+               target_map[i] = ckpt_map[i] | cur_map[i];
+
+       pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
+
+       seg->next_blkoff = pos;
 }
 
 /*
@@ -536,7 +1327,7 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
 }
 
 /*
- * This function always allocates a used segment (from dirty seglist) by SSR
+ * This function always allocates a used segment(from dirty seglist) by SSR
  * manner, so it should recover the existing segment information of valid blocks
  */
 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
@@ -573,7 +1364,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
        struct curseg_info *curseg = CURSEG_I(sbi, type);
        const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
 
-       if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
+       if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
                return v_ops->get_victim(sbi,
                                &(curseg)->next_segno, BG_GC, type, SSR);
 
@@ -594,15 +1385,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
 {
        struct curseg_info *curseg = CURSEG_I(sbi, type);
 
-       if (force) {
+       if (force)
                new_curseg(sbi, type, true);
-               goto out;
-       }
-
-       curseg->next_segno = check_prefree_segments(sbi, type);
-
-       if (curseg->next_segno != NULL_SEGNO)
-               change_curseg(sbi, type, false);
        else if (type == CURSEG_WARM_NODE)
                new_curseg(sbi, type, false);
        else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
@@ -611,21 +1395,24 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
                change_curseg(sbi, type, true);
        else
                new_curseg(sbi, type, false);
-out:
-       sbi->segment_count[curseg->alloc_type]++;
+
+       stat_inc_seg_type(sbi, curseg);
 }
 
 void allocate_new_segments(struct f2fs_sb_info *sbi)
 {
        struct curseg_info *curseg;
-       unsigned int old_curseg;
+       unsigned int old_segno;
        int i;
 
+       if (test_opt(sbi, LFS))
+               return;
+
        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
                curseg = CURSEG_I(sbi, i);
-               old_curseg = curseg->segno;
+               old_segno = curseg->segno;
                SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
-               locate_dirty_segment(sbi, old_curseg);
+               locate_dirty_segment(sbi, old_segno);
        }
 }
 
@@ -633,126 +1420,59 @@ static const struct segment_allocation default_salloc_ops = {
        .allocate_segment = allocate_segment_by_default,
 };
 
-static void f2fs_end_io_write(struct bio *bio, int err)
-{
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct bio_private *p = bio->bi_private;
-
-       do {
-               struct page *page = bvec->bv_page;
-
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-               if (!uptodate) {
-                       SetPageError(page);
-                       if (page->mapping)
-                               set_bit(AS_EIO, &page->mapping->flags);
-                       set_ckpt_flags(p->sbi->ckpt, CP_ERROR_FLAG);
-                       p->sbi->sb->s_flags |= MS_RDONLY;
-               }
-               end_page_writeback(page);
-               dec_page_count(p->sbi, F2FS_WRITEBACK);
-       } while (bvec >= bio->bi_io_vec);
-
-       if (p->is_sync)
-               complete(p->wait);
-       kfree(p);
-       bio_put(bio);
-}
-
-struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
-{
-       struct bio *bio;
-       struct bio_private *priv;
-retry:
-       priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
-       if (!priv) {
-               cond_resched();
-               goto retry;
-       }
-
-       /* No failure on bio allocation */
-       bio = bio_alloc(GFP_NOIO, npages);
-       bio->bi_bdev = bdev;
-       bio->bi_private = priv;
-       return bio;
-}
-
-static void do_submit_bio(struct f2fs_sb_info *sbi,
-                               enum page_type type, bool sync)
+int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
 {
-       int rw = sync ? WRITE_SYNC : WRITE;
-       enum page_type btype = type > META ? META : type;
-
-       if (type >= META_FLUSH)
-               rw = WRITE_FLUSH_FUA;
-
-       if (btype == META)
-               rw |= REQ_META;
+       __u64 start = F2FS_BYTES_TO_BLK(range->start);
+       __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
+       unsigned int start_segno, end_segno;
+       struct cp_control cpc;
+       int err = 0;
 
-       if (sbi->bio[btype]) {
-               struct bio_private *p = sbi->bio[btype]->bi_private;
-               p->sbi = sbi;
-               sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
+       if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
+               return -EINVAL;
 
-               trace_f2fs_do_submit_bio(sbi->sb, btype, sync, sbi->bio[btype]);
+       cpc.trimmed = 0;
+       if (end <= MAIN_BLKADDR(sbi))
+               goto out;
 
-               if (type == META_FLUSH) {
-                       DECLARE_COMPLETION_ONSTACK(wait);
-                       p->is_sync = true;
-                       p->wait = &wait;
-                       submit_bio(rw, sbi->bio[btype]);
-                       wait_for_completion(&wait);
-               } else {
-                       p->is_sync = false;
-                       submit_bio(rw, sbi->bio[btype]);
-               }
-               sbi->bio[btype] = NULL;
+       if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                       "Found FS corruption, run fsck to fix.");
+               goto out;
        }
-}
-
-void f2fs_submit_bio(struct f2fs_sb_info *sbi, enum page_type type, bool sync)
-{
-       down_write(&sbi->bio_sem);
-       do_submit_bio(sbi, type, sync);
-       up_write(&sbi->bio_sem);
-}
 
-static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
-                               block_t blk_addr, enum page_type type)
-{
-       struct block_device *bdev = sbi->sb->s_bdev;
-
-       verify_block_addr(sbi, blk_addr);
+       /* start/end segment number in main_area */
+       start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
+       end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
+                                               GET_SEGNO(sbi, end);
+       cpc.reason = CP_DISCARD;
+       cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
 
-       down_write(&sbi->bio_sem);
+       /* do checkpoint to issue discard commands safely */
+       for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
+               cpc.trim_start = start_segno;
 
-       inc_page_count(sbi, F2FS_WRITEBACK);
+               if (sbi->discard_blks == 0)
+                       break;
+               else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
+                       cpc.trim_end = end_segno;
+               else
+                       cpc.trim_end = min_t(unsigned int,
+                               rounddown(start_segno +
+                               BATCHED_TRIM_SEGMENTS(sbi),
+                               sbi->segs_per_sec) - 1, end_segno);
 
-       if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
-               do_submit_bio(sbi, type, false);
-alloc_new:
-       if (sbi->bio[type] == NULL) {
-               sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
-               sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
-               /*
-                * The end_io will be assigned at the sumbission phase.
-                * Until then, let bio_add_page() merge consecutive IOs as much
-                * as possible.
-                */
-       }
+               mutex_lock(&sbi->gc_mutex);
+               err = write_checkpoint(sbi, &cpc);
+               mutex_unlock(&sbi->gc_mutex);
+               if (err)
+                       break;
 
-       if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
-                                                       PAGE_CACHE_SIZE) {
-               do_submit_bio(sbi, type, false);
-               goto alloc_new;
+               schedule();
        }
-
-       sbi->last_block_in_bio[type] = blk_addr;
-
-       up_write(&sbi->bio_sem);
-       trace_f2fs_submit_write_page(page, blk_addr, type);
+out:
+       range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
+       return err;
 }
 
 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
@@ -781,8 +1501,8 @@ static int __get_segment_type_4(struct page *page, enum page_type p_type)
                else
                        return CURSEG_COLD_DATA;
        } else {
-               if (IS_DNODE(page) && !is_cold_node(page))
-                       return CURSEG_HOT_NODE;
+               if (IS_DNODE(page) && is_cold_node(page))
+                       return CURSEG_WARM_NODE;
                else
                        return CURSEG_COLD_NODE;
        }
@@ -795,7 +1515,7 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type)
 
                if (S_ISDIR(inode->i_mode))
                        return CURSEG_HOT_DATA;
-               else if (is_cold_data(page) || is_cold_file(inode))
+               else if (is_cold_data(page) || file_is_cold(inode))
                        return CURSEG_COLD_DATA;
                else
                        return CURSEG_WARM_DATA;
@@ -810,130 +1530,156 @@ static int __get_segment_type_6(struct page *page, enum page_type p_type)
 
 static int __get_segment_type(struct page *page, enum page_type p_type)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
-       switch (sbi->active_logs) {
+       switch (F2FS_P_SB(page)->active_logs) {
        case 2:
                return __get_segment_type_2(page, p_type);
        case 4:
                return __get_segment_type_4(page, p_type);
        }
        /* NR_CURSEG_TYPE(6) logs by default */
-       BUG_ON(sbi->active_logs != NR_CURSEG_TYPE);
+       f2fs_bug_on(F2FS_P_SB(page),
+               F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
        return __get_segment_type_6(page, p_type);
 }
 
-static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
-                       block_t old_blkaddr, block_t *new_blkaddr,
-                       struct f2fs_summary *sum, enum page_type p_type)
+void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
+               block_t old_blkaddr, block_t *new_blkaddr,
+               struct f2fs_summary *sum, int type)
 {
        struct sit_info *sit_i = SIT_I(sbi);
-       struct curseg_info *curseg;
-       unsigned int old_cursegno;
-       int type;
-
-       type = __get_segment_type(page, p_type);
-       curseg = CURSEG_I(sbi, type);
+       struct curseg_info *curseg = CURSEG_I(sbi, type);
 
        mutex_lock(&curseg->curseg_mutex);
+       mutex_lock(&sit_i->sentry_lock);
 
        *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
-       old_cursegno = curseg->segno;
 
        /*
         * __add_sum_entry should be resided under the curseg_mutex
         * because, this function updates a summary entry in the
         * current summary block.
         */
-       __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+       __add_sum_entry(sbi, type, sum);
 
-       mutex_lock(&sit_i->sentry_lock);
        __refresh_next_blkoff(sbi, curseg);
-       sbi->block_count[curseg->alloc_type]++;
 
+       stat_inc_block_count(sbi, curseg);
+
+       if (!__has_curseg_space(sbi, type))
+               sit_i->s_ops->allocate_segment(sbi, type, false);
        /*
         * SIT information should be updated before segment allocation,
         * since SSR needs latest valid block information.
         */
        refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
 
-       if (!__has_curseg_space(sbi, type))
-               sit_i->s_ops->allocate_segment(sbi, type, false);
-
-       locate_dirty_segment(sbi, old_cursegno);
-       locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
        mutex_unlock(&sit_i->sentry_lock);
 
-       if (p_type == NODE)
+       if (page && IS_NODESEG(type))
                fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
 
+       mutex_unlock(&curseg->curseg_mutex);
+}
+
+static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
+{
+       int type = __get_segment_type(fio->page, fio->type);
+
+       if (fio->type == NODE || fio->type == DATA)
+               mutex_lock(&fio->sbi->wio_mutex[fio->type]);
+
+       allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+                                       &fio->new_blkaddr, sum, type);
+
        /* writeout dirty page into bdev */
-       submit_write_page(sbi, page, *new_blkaddr, p_type);
+       f2fs_submit_page_mbio(fio);
 
-       mutex_unlock(&curseg->curseg_mutex);
+       if (fio->type == NODE || fio->type == DATA)
+               mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
 }
 
 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
 {
+       struct f2fs_io_info fio = {
+               .sbi = sbi,
+               .type = META,
+               .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
+               .old_blkaddr = page->index,
+               .new_blkaddr = page->index,
+               .page = page,
+               .encrypted_page = NULL,
+       };
+
+       if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
+               fio.rw &= ~REQ_META;
+
        set_page_writeback(page);
-       submit_write_page(sbi, page, page->index, META);
+       f2fs_submit_page_mbio(&fio);
 }
 
-void write_node_page(struct f2fs_sb_info *sbi, struct page *page,
-               unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr)
+void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
 {
        struct f2fs_summary sum;
+
        set_summary(&sum, nid, 0, 0);
-       do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, NODE);
+       do_write_page(&sum, fio);
 }
 
-void write_data_page(struct inode *inode, struct page *page,
-               struct dnode_of_data *dn, block_t old_blkaddr,
-               block_t *new_blkaddr)
+void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       struct f2fs_sb_info *sbi = fio->sbi;
        struct f2fs_summary sum;
        struct node_info ni;
 
-       BUG_ON(old_blkaddr == NULL_ADDR);
+       f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
        get_node_info(sbi, dn->nid, &ni);
        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
-       do_write_page(sbi, page, old_blkaddr,
-                       new_blkaddr, &sum, DATA);
+       do_write_page(&sum, fio);
+       f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
 }
 
-void rewrite_data_page(struct f2fs_sb_info *sbi, struct page *page,
-                                       block_t old_blk_addr)
+void rewrite_data_page(struct f2fs_io_info *fio)
 {
-       submit_write_page(sbi, page, old_blk_addr, DATA);
+       fio->new_blkaddr = fio->old_blkaddr;
+       stat_inc_inplace_blocks(fio->sbi);
+       f2fs_submit_page_mbio(fio);
 }
 
-void recover_data_page(struct f2fs_sb_info *sbi,
-                       struct page *page, struct f2fs_summary *sum,
-                       block_t old_blkaddr, block_t new_blkaddr)
+void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+                               block_t old_blkaddr, block_t new_blkaddr,
+                               bool recover_curseg, bool recover_newaddr)
 {
        struct sit_info *sit_i = SIT_I(sbi);
        struct curseg_info *curseg;
        unsigned int segno, old_cursegno;
        struct seg_entry *se;
        int type;
+       unsigned short old_blkoff;
 
        segno = GET_SEGNO(sbi, new_blkaddr);
        se = get_seg_entry(sbi, segno);
        type = se->type;
 
-       if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
-               if (old_blkaddr == NULL_ADDR)
-                       type = CURSEG_COLD_DATA;
-               else
+       if (!recover_curseg) {
+               /* for recovery flow */
+               if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
+                       if (old_blkaddr == NULL_ADDR)
+                               type = CURSEG_COLD_DATA;
+                       else
+                               type = CURSEG_WARM_DATA;
+               }
+       } else {
+               if (!IS_CURSEG(sbi, segno))
                        type = CURSEG_WARM_DATA;
        }
+
        curseg = CURSEG_I(sbi, type);
 
        mutex_lock(&curseg->curseg_mutex);
        mutex_lock(&sit_i->sentry_lock);
 
        old_cursegno = curseg->segno;
+       old_blkoff = curseg->next_blkoff;
 
        /* change the current segment */
        if (segno != curseg->segno) {
@@ -941,66 +1687,73 @@ void recover_data_page(struct f2fs_sb_info *sbi,
                change_curseg(sbi, type, true);
        }
 
-       curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
-                                       (sbi->blocks_per_seg - 1);
-       __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+       curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
+       __add_sum_entry(sbi, type, sum);
 
-       refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
+       if (!recover_curseg || recover_newaddr)
+               update_sit_entry(sbi, new_blkaddr, 1);
+       if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+               update_sit_entry(sbi, old_blkaddr, -1);
 
-       locate_dirty_segment(sbi, old_cursegno);
        locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+       locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
+
+       locate_dirty_segment(sbi, old_cursegno);
+
+       if (recover_curseg) {
+               if (old_cursegno != curseg->segno) {
+                       curseg->next_segno = old_cursegno;
+                       change_curseg(sbi, type, true);
+               }
+               curseg->next_blkoff = old_blkoff;
+       }
 
        mutex_unlock(&sit_i->sentry_lock);
        mutex_unlock(&curseg->curseg_mutex);
 }
 
-void rewrite_node_page(struct f2fs_sb_info *sbi,
-                       struct page *page, struct f2fs_summary *sum,
-                       block_t old_blkaddr, block_t new_blkaddr)
+void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
+                               block_t old_addr, block_t new_addr,
+                               unsigned char version, bool recover_curseg,
+                               bool recover_newaddr)
 {
-       struct sit_info *sit_i = SIT_I(sbi);
-       int type = CURSEG_WARM_NODE;
-       struct curseg_info *curseg;
-       unsigned int segno, old_cursegno;
-       block_t next_blkaddr = next_blkaddr_of_node(page);
-       unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr);
+       struct f2fs_summary sum;
 
-       curseg = CURSEG_I(sbi, type);
+       set_summary(&sum, dn->nid, dn->ofs_in_node, version);
 
-       mutex_lock(&curseg->curseg_mutex);
-       mutex_lock(&sit_i->sentry_lock);
+       __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
+                                       recover_curseg, recover_newaddr);
 
-       segno = GET_SEGNO(sbi, new_blkaddr);
-       old_cursegno = curseg->segno;
+       f2fs_update_data_blkaddr(dn, new_addr);
+}
 
-       /* change the current segment */
-       if (segno != curseg->segno) {
-               curseg->next_segno = segno;
-               change_curseg(sbi, type, true);
-       }
-       curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, new_blkaddr) &
-                                       (sbi->blocks_per_seg - 1);
-       __add_sum_entry(sbi, type, sum, curseg->next_blkoff);
+void f2fs_wait_on_page_writeback(struct page *page,
+                               enum page_type type, bool ordered)
+{
+       if (PageWriteback(page)) {
+               struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
-       /* change the current log to the next block addr in advance */
-       if (next_segno != segno) {
-               curseg->next_segno = next_segno;
-               change_curseg(sbi, type, true);
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
+               if (ordered)
+                       wait_on_page_writeback(page);
+               else
+                       wait_for_stable_page(page);
        }
-       curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(sbi, next_blkaddr) &
-                                       (sbi->blocks_per_seg - 1);
+}
 
-       /* rewrite node page */
-       set_page_writeback(page);
-       submit_write_page(sbi, page, new_blkaddr, NODE);
-       f2fs_submit_bio(sbi, NODE, true);
-       refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
+void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
+                                                       block_t blkaddr)
+{
+       struct page *cpage;
 
-       locate_dirty_segment(sbi, old_cursegno);
-       locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
+       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+               return;
 
-       mutex_unlock(&sit_i->sentry_lock);
-       mutex_unlock(&curseg->curseg_mutex);
+       cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
+       if (cpage) {
+               f2fs_wait_on_page_writeback(cpage, DATA, true);
+               f2fs_put_page(cpage, 1);
+       }
 }
 
 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
@@ -1019,12 +1772,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
 
        /* Step 1: restore nat cache */
        seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
-       memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
+       memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
 
        /* Step 2: restore sit cache */
        seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
-       memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
-                                               SUM_JOURNAL_SIZE);
+       memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
        offset = 2 * SUM_JOURNAL_SIZE;
 
        /* Step 3: restore summary entries */
@@ -1048,7 +1800,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
                        s = (struct f2fs_summary *)(kaddr + offset);
                        seg_i->sum_blk->entries[j] = *s;
                        offset += SUMMARY_SIZE;
-                       if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+                       if (offset + SUMMARY_SIZE <= PAGE_SIZE -
                                                SUM_FOOTER_SIZE)
                                continue;
 
@@ -1079,7 +1831,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
                segno = le32_to_cpu(ckpt->cur_data_segno[type]);
                blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
                                                        CURSEG_HOT_DATA]);
-               if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
+               if (__exist_node_summaries(sbi))
                        blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
                else
                        blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
@@ -1088,7 +1840,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
                                                        CURSEG_HOT_NODE]);
                blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
                                                        CURSEG_HOT_NODE]);
-               if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG))
+               if (__exist_node_summaries(sbi))
                        blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
                                                        type - CURSEG_HOT_NODE);
                else
@@ -1099,7 +1851,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
        sum = (struct f2fs_summary_block *)page_address(new);
 
        if (IS_NODESEG(type)) {
-               if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) {
+               if (__exist_node_summaries(sbi)) {
                        struct f2fs_summary *ns = &sum->entries[0];
                        int i;
                        for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
@@ -1107,9 +1859,12 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
                                ns->ofs_in_node = 0;
                        }
                } else {
-                       if (restore_node_summary(sbi, segno, sum)) {
+                       int err;
+
+                       err = restore_node_summary(sbi, segno, sum);
+                       if (err) {
                                f2fs_put_page(new, 1);
-                               return -EINVAL;
+                               return err;
                        }
                }
        }
@@ -1117,7 +1872,14 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
        /* set uncompleted segment to curseg */
        curseg = CURSEG_I(sbi, type);
        mutex_lock(&curseg->curseg_mutex);
-       memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
+
+       /* update journal info */
+       down_write(&curseg->journal_rwsem);
+       memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
+       up_write(&curseg->journal_rwsem);
+
+       memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
+       memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
        curseg->next_segno = segno;
        reset_curseg(sbi, type, 0);
        curseg->alloc_type = ckpt->alloc_type[type];
@@ -1130,17 +1892,31 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
 {
        int type = CURSEG_HOT_DATA;
+       int err;
+
+       if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
+               int npages = npages_for_summary_flush(sbi, true);
+
+               if (npages >= 2)
+                       ra_meta_pages(sbi, start_sum_block(sbi), npages,
+                                                       META_CP, true);
 
-       if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
                /* restore for compacted data summary */
                if (read_compacted_summaries(sbi))
                        return -EINVAL;
                type = CURSEG_HOT_NODE;
        }
 
-       for (; type <= CURSEG_COLD_NODE; type++)
-               if (read_normal_summaries(sbi, type))
-                       return -EINVAL;
+       if (__exist_node_summaries(sbi))
+               ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
+                                       NR_CURSEG_TYPE - type, META_CP, true);
+
+       for (; type <= CURSEG_COLD_NODE; type++) {
+               err = read_normal_summaries(sbi, type);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
@@ -1158,17 +1934,14 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
 
        /* Step 1: write nat cache */
        seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
-       memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
+       memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
        written_size += SUM_JOURNAL_SIZE;
 
        /* Step 2: write sit cache */
        seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
-       memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
-                                               SUM_JOURNAL_SIZE);
+       memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
        written_size += SUM_JOURNAL_SIZE;
 
-       set_page_dirty(page);
-
        /* Step 3: write summary entries */
        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
                unsigned short blkoff;
@@ -1187,18 +1960,20 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
                        summary = (struct f2fs_summary *)(kaddr + written_size);
                        *summary = seg_i->sum_blk->entries[j];
                        written_size += SUMMARY_SIZE;
-                       set_page_dirty(page);
 
-                       if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+                       if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
                                                        SUM_FOOTER_SIZE)
                                continue;
 
+                       set_page_dirty(page);
                        f2fs_put_page(page, 1);
                        page = NULL;
                }
        }
-       if (page)
+       if (page) {
+               set_page_dirty(page);
                f2fs_put_page(page, 1);
+       }
 }
 
 static void write_normal_summaries(struct f2fs_sb_info *sbi,
@@ -1210,17 +1985,13 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi,
        else
                end = type + NR_CURSEG_NODE_TYPE;
 
-       for (i = type; i < end; i++) {
-               struct curseg_info *sum = CURSEG_I(sbi, i);
-               mutex_lock(&sum->curseg_mutex);
-               write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
-               mutex_unlock(&sum->curseg_mutex);
-       }
+       for (i = type; i < end; i++)
+               write_current_sum_page(sbi, i, blkaddr + (i - type));
 }
 
 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
 {
-       if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
+       if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
                write_compacted_summaries(sbi, start_blk);
        else
                write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
@@ -1228,29 +1999,27 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
 
 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
 {
-       if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
-               write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
-       return;
+       write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
 }
 
-int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
+int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
                                        unsigned int val, int alloc)
 {
        int i;
 
        if (type == NAT_JOURNAL) {
-               for (i = 0; i < nats_in_cursum(sum); i++) {
-                       if (le32_to_cpu(nid_in_journal(sum, i)) == val)
+               for (i = 0; i < nats_in_cursum(journal); i++) {
+                       if (le32_to_cpu(nid_in_journal(journal, i)) == val)
                                return i;
                }
-               if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
-                       return update_nats_in_cursum(sum, 1);
+               if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
+                       return update_nats_in_cursum(journal, 1);
        } else if (type == SIT_JOURNAL) {
-               for (i = 0; i < sits_in_cursum(sum); i++)
-                       if (le32_to_cpu(segno_in_journal(sum, i)) == val)
+               for (i = 0; i < sits_in_cursum(journal); i++)
+                       if (le32_to_cpu(segno_in_journal(journal, i)) == val)
                                return i;
-               if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
-                       return update_sits_in_cursum(sum, 1);
+               if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
+                       return update_sits_in_cursum(journal, 1);
        }
        return -1;
 }
@@ -1258,17 +2027,7 @@ int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
                                        unsigned int segno)
 {
-       struct sit_info *sit_i = SIT_I(sbi);
-       unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
-       block_t blk_addr = sit_i->sit_base_addr + offset;
-
-       check_seg_range(sbi, segno);
-
-       /* calculate sit block address */
-       if (f2fs_test_bit(offset, sit_i->sit_bitmap))
-               blk_addr += sit_i->sit_blocks;
-
-       return get_meta_page(sbi, blk_addr);
+       return get_meta_page(sbi, current_sit_addr(sbi, segno));
 }
 
 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
@@ -1285,11 +2044,11 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
        /* get current sit block page without lock */
        src_page = get_meta_page(sbi, src_off);
        dst_page = grab_meta_page(sbi, dst_off);
-       BUG_ON(PageDirty(src_page));
+       f2fs_bug_on(sbi, PageDirty(src_page));
 
        src_addr = page_address(src_page);
        dst_addr = page_address(dst_page);
-       memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+       memcpy(dst_addr, src_addr, PAGE_SIZE);
 
        set_page_dirty(dst_page);
        f2fs_put_page(src_page, 1);
@@ -1299,96 +2058,195 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
        return dst_page;
 }
 
-static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
+static struct sit_entry_set *grab_sit_entry_set(void)
+{
+       struct sit_entry_set *ses =
+                       f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
+
+       ses->entry_cnt = 0;
+       INIT_LIST_HEAD(&ses->set_list);
+       return ses;
+}
+
+static void release_sit_entry_set(struct sit_entry_set *ses)
+{
+       list_del(&ses->set_list);
+       kmem_cache_free(sit_entry_set_slab, ses);
+}
+
+static void adjust_sit_entry_set(struct sit_entry_set *ses,
+                                               struct list_head *head)
+{
+       struct sit_entry_set *next = ses;
+
+       if (list_is_last(&ses->set_list, head))
+               return;
+
+       list_for_each_entry_continue(next, head, set_list)
+               if (ses->entry_cnt <= next->entry_cnt)
+                       break;
+
+       list_move_tail(&ses->set_list, &next->set_list);
+}
+
+static void add_sit_entry(unsigned int segno, struct list_head *head)
+{
+       struct sit_entry_set *ses;
+       unsigned int start_segno = START_SEGNO(segno);
+
+       list_for_each_entry(ses, head, set_list) {
+               if (ses->start_segno == start_segno) {
+                       ses->entry_cnt++;
+                       adjust_sit_entry_set(ses, head);
+                       return;
+               }
+       }
+
+       ses = grab_sit_entry_set();
+
+       ses->start_segno = start_segno;
+       ses->entry_cnt++;
+       list_add(&ses->set_list, head);
+}
+
+static void add_sits_in_set(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_sm_info *sm_info = SM_I(sbi);
+       struct list_head *set_list = &sm_info->sit_entry_set;
+       unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
+       unsigned int segno;
+
+       for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
+               add_sit_entry(segno, set_list);
+}
+
+static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
 {
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
+       struct f2fs_journal *journal = curseg->journal;
        int i;
 
-       /*
-        * If the journal area in the current summary is full of sit entries,
-        * all the sit entries will be flushed. Otherwise the sit entries
-        * are not able to replace with newly hot sit entries.
-        */
-       if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
-               for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
-                       unsigned int segno;
-                       segno = le32_to_cpu(segno_in_journal(sum, i));
-                       __mark_sit_entry_dirty(sbi, segno);
-               }
-               update_sits_in_cursum(sum, -sits_in_cursum(sum));
-               return 1;
+       down_write(&curseg->journal_rwsem);
+       for (i = 0; i < sits_in_cursum(journal); i++) {
+               unsigned int segno;
+               bool dirtied;
+
+               segno = le32_to_cpu(segno_in_journal(journal, i));
+               dirtied = __mark_sit_entry_dirty(sbi, segno);
+
+               if (!dirtied)
+                       add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
        }
-       return 0;
+       update_sits_in_cursum(journal, -i);
+       up_write(&curseg->journal_rwsem);
 }
 
 /*
  * CP calls this function, which flushes SIT entries including sit_journal,
  * and moves prefree segs to free segs.
  */
-void flush_sit_entries(struct f2fs_sb_info *sbi)
+void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 {
        struct sit_info *sit_i = SIT_I(sbi);
        unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
-       unsigned long nsegs = TOTAL_SEGS(sbi);
-       struct page *page = NULL;
-       struct f2fs_sit_block *raw_sit = NULL;
-       unsigned int start = 0, end = 0;
-       unsigned int segno = -1;
-       bool flushed;
+       struct f2fs_journal *journal = curseg->journal;
+       struct sit_entry_set *ses, *tmp;
+       struct list_head *head = &SM_I(sbi)->sit_entry_set;
+       bool to_journal = true;
+       struct seg_entry *se;
 
-       mutex_lock(&curseg->curseg_mutex);
        mutex_lock(&sit_i->sentry_lock);
 
+       if (!sit_i->dirty_sentries)
+               goto out;
+
        /*
-        * "flushed" indicates whether sit entries in journal are flushed
-        * to the SIT area or not.
+        * add and account sit entries of dirty bitmap in sit entry
+        * set temporarily
         */
-       flushed = flush_sits_in_journal(sbi);
+       add_sits_in_set(sbi);
 
-       while ((segno = find_next_bit(bitmap, nsegs, segno + 1)) < nsegs) {
-               struct seg_entry *se = get_seg_entry(sbi, segno);
-               int sit_offset, offset;
+       /*
+        * if there are no enough space in journal to store dirty sit
+        * entries, remove all entries from journal and add and account
+        * them in sit entry set.
+        */
+       if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
+               remove_sits_in_journal(sbi);
 
-               sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
+       /*
+        * there are two steps to flush sit entries:
+        * #1, flush sit entries to journal in current cold data summary block.
+        * #2, flush sit entries to sit page.
+        */
+       list_for_each_entry_safe(ses, tmp, head, set_list) {
+               struct page *page = NULL;
+               struct f2fs_sit_block *raw_sit = NULL;
+               unsigned int start_segno = ses->start_segno;
+               unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
+                                               (unsigned long)MAIN_SEGS(sbi));
+               unsigned int segno = start_segno;
+
+               if (to_journal &&
+                       !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
+                       to_journal = false;
+
+               if (to_journal) {
+                       down_write(&curseg->journal_rwsem);
+               } else {
+                       page = get_next_sit_page(sbi, start_segno);
+                       raw_sit = page_address(page);
+               }
 
-               if (flushed)
-                       goto to_sit_page;
+               /* flush dirty sit entries in region of current sit set */
+               for_each_set_bit_from(segno, bitmap, end) {
+                       int offset, sit_offset;
 
-               offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1);
-               if (offset >= 0) {
-                       segno_in_journal(sum, offset) = cpu_to_le32(segno);
-                       seg_info_to_raw_sit(se, &sit_in_journal(sum, offset));
-                       goto flush_done;
-               }
-to_sit_page:
-               if (!page || (start > segno) || (segno > end)) {
-                       if (page) {
-                               f2fs_put_page(page, 1);
-                               page = NULL;
+                       se = get_seg_entry(sbi, segno);
+
+                       /* add discard candidates */
+                       if (cpc->reason != CP_DISCARD) {
+                               cpc->trim_start = segno;
+                               add_discard_addrs(sbi, cpc);
                        }
 
-                       start = START_SEGNO(sit_i, segno);
-                       end = start + SIT_ENTRY_PER_BLOCK - 1;
+                       if (to_journal) {
+                               offset = lookup_journal_in_cursum(journal,
+                                                       SIT_JOURNAL, segno, 1);
+                               f2fs_bug_on(sbi, offset < 0);
+                               segno_in_journal(journal, offset) =
+                                                       cpu_to_le32(segno);
+                               seg_info_to_raw_sit(se,
+                                       &sit_in_journal(journal, offset));
+                       } else {
+                               sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
+                               seg_info_to_raw_sit(se,
+                                               &raw_sit->entries[sit_offset]);
+                       }
 
-                       /* read sit block that will be updated */
-                       page = get_next_sit_page(sbi, start);
-                       raw_sit = page_address(page);
+                       __clear_bit(segno, bitmap);
+                       sit_i->dirty_sentries--;
+                       ses->entry_cnt--;
                }
 
-               /* udpate entry in SIT block */
-               seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]);
-flush_done:
-               __clear_bit(segno, bitmap);
-               sit_i->dirty_sentries--;
+               if (to_journal)
+                       up_write(&curseg->journal_rwsem);
+               else
+                       f2fs_put_page(page, 1);
+
+               f2fs_bug_on(sbi, ses->entry_cnt);
+               release_sit_entry_set(ses);
        }
-       mutex_unlock(&sit_i->sentry_lock);
-       mutex_unlock(&curseg->curseg_mutex);
 
-       /* writeout last modified SIT block */
-       f2fs_put_page(page, 1);
+       f2fs_bug_on(sbi, !list_empty(head));
+       f2fs_bug_on(sbi, sit_i->dirty_sentries);
+out:
+       if (cpc->reason == CP_DISCARD) {
+               for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
+                       add_discard_addrs(sbi, cpc);
+       }
+       mutex_unlock(&sit_i->sentry_lock);
 
        set_prefree_as_free_segments(sbi);
 }
@@ -1396,7 +2254,6 @@ flush_done:
 static int build_sit_info(struct f2fs_sb_info *sbi)
 {
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
-       struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
        struct sit_info *sit_i;
        unsigned int sit_segs, start;
        char *src_bitmap, *dst_bitmap;
@@ -1409,28 +2266,40 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
 
        SM_I(sbi)->sit_info = sit_i;
 
-       sit_i->sentries = vzalloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry));
+       sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
+                                       sizeof(struct seg_entry), GFP_KERNEL);
        if (!sit_i->sentries)
                return -ENOMEM;
 
-       bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
-       sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
+       sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
        if (!sit_i->dirty_sentries_bitmap)
                return -ENOMEM;
 
-       for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+       for (start = 0; start < MAIN_SEGS(sbi); start++) {
                sit_i->sentries[start].cur_valid_map
                        = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
                sit_i->sentries[start].ckpt_valid_map
                        = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
-               if (!sit_i->sentries[start].cur_valid_map
-                               || !sit_i->sentries[start].ckpt_valid_map)
+               if (!sit_i->sentries[start].cur_valid_map ||
+                               !sit_i->sentries[start].ckpt_valid_map)
                        return -ENOMEM;
+
+               if (f2fs_discard_en(sbi)) {
+                       sit_i->sentries[start].discard_map
+                               = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+                       if (!sit_i->sentries[start].discard_map)
+                               return -ENOMEM;
+               }
        }
 
+       sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+       if (!sit_i->tmp_map)
+               return -ENOMEM;
+
        if (sbi->segs_per_sec > 1) {
-               sit_i->sec_entries = vzalloc(TOTAL_SECS(sbi) *
-                                       sizeof(struct sec_entry));
+               sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
+                                       sizeof(struct sec_entry), GFP_KERNEL);
                if (!sit_i->sec_entries)
                        return -ENOMEM;
        }
@@ -1451,7 +2320,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
 
        sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
        sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
-       sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
+       sit_i->written_valid_blocks = 0;
        sit_i->sit_bitmap = dst_bitmap;
        sit_i->bitmap_size = bitmap_size;
        sit_i->dirty_sentries = 0;
@@ -1464,7 +2333,6 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
 
 static int build_free_segmap(struct f2fs_sb_info *sbi)
 {
-       struct f2fs_sm_info *sm_info = SM_I(sbi);
        struct free_segmap_info *free_i;
        unsigned int bitmap_size, sec_bitmap_size;
 
@@ -1475,13 +2343,13 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
 
        SM_I(sbi)->free_info = free_i;
 
-       bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
-       free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
+       bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
+       free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
        if (!free_i->free_segmap)
                return -ENOMEM;
 
-       sec_bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
-       free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
+       sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
+       free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
        if (!free_i->free_secmap)
                return -ENOMEM;
 
@@ -1490,11 +2358,10 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
        memset(free_i->free_secmap, 0xff, sec_bitmap_size);
 
        /* init free segmap information */
-       free_i->start_segno =
-               (unsigned int) GET_SEGNO_FROM_SEG0(sbi, sm_info->main_blkaddr);
+       free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
        free_i->free_segments = 0;
        free_i->free_sections = 0;
-       rwlock_init(&free_i->segmap_lock);
+       spin_lock_init(&free_i->segmap_lock);
        return 0;
 }
 
@@ -1503,7 +2370,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
        struct curseg_info *array;
        int i;
 
-       array = kzalloc(sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
+       array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
        if (!array)
                return -ENOMEM;
 
@@ -1511,9 +2378,14 @@ static int build_curseg(struct f2fs_sb_info *sbi)
 
        for (i = 0; i < NR_CURSEG_TYPE; i++) {
                mutex_init(&array[i].curseg_mutex);
-               array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+               array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
                if (!array[i].sum_blk)
                        return -ENOMEM;
+               init_rwsem(&array[i].journal_rwsem);
+               array[i].journal = kzalloc(sizeof(struct f2fs_journal),
+                                                       GFP_KERNEL);
+               if (!array[i].journal)
+                       return -ENOMEM;
                array[i].segno = NULL_SEGNO;
                array[i].next_blkoff = 0;
        }
@@ -1524,37 +2396,73 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
 {
        struct sit_info *sit_i = SIT_I(sbi);
        struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
-       struct f2fs_summary_block *sum = curseg->sum_blk;
-       unsigned int start;
+       struct f2fs_journal *journal = curseg->journal;
+       struct seg_entry *se;
+       struct f2fs_sit_entry sit;
+       int sit_blk_cnt = SIT_BLK_CNT(sbi);
+       unsigned int i, start, end;
+       unsigned int readed, start_blk = 0;
 
-       for (start = 0; start < TOTAL_SEGS(sbi); start++) {
-               struct seg_entry *se = &sit_i->sentries[start];
-               struct f2fs_sit_block *sit_blk;
-               struct f2fs_sit_entry sit;
-               struct page *page;
-               int i;
+       do {
+               readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+                                                       META_SIT, true);
+
+               start = start_blk * sit_i->sents_per_block;
+               end = (start_blk + readed) * sit_i->sents_per_block;
 
-               mutex_lock(&curseg->curseg_mutex);
-               for (i = 0; i < sits_in_cursum(sum); i++) {
-                       if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
-                               sit = sit_in_journal(sum, i);
-                               mutex_unlock(&curseg->curseg_mutex);
-                               goto got_it;
+               for (; start < end && start < MAIN_SEGS(sbi); start++) {
+                       struct f2fs_sit_block *sit_blk;
+                       struct page *page;
+
+                       se = &sit_i->sentries[start];
+                       page = get_current_sit_page(sbi, start);
+                       sit_blk = (struct f2fs_sit_block *)page_address(page);
+                       sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
+                       f2fs_put_page(page, 1);
+
+                       check_block_count(sbi, start, &sit);
+                       seg_info_from_raw_sit(se, &sit);
+
+                       /* build discard map only one time */
+                       if (f2fs_discard_en(sbi)) {
+                               memcpy(se->discard_map, se->cur_valid_map,
+                                                       SIT_VBLOCK_MAP_SIZE);
+                               sbi->discard_blks += sbi->blocks_per_seg -
+                                                       se->valid_blocks;
                        }
+
+                       if (sbi->segs_per_sec > 1)
+                               get_sec_entry(sbi, start)->valid_blocks +=
+                                                       se->valid_blocks;
                }
-               mutex_unlock(&curseg->curseg_mutex);
-               page = get_current_sit_page(sbi, start);
-               sit_blk = (struct f2fs_sit_block *)page_address(page);
-               sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
-               f2fs_put_page(page, 1);
-got_it:
+               start_blk += readed;
+       } while (start_blk < sit_blk_cnt);
+
+       down_read(&curseg->journal_rwsem);
+       for (i = 0; i < sits_in_cursum(journal); i++) {
+               unsigned int old_valid_blocks;
+
+               start = le32_to_cpu(segno_in_journal(journal, i));
+               se = &sit_i->sentries[start];
+               sit = sit_in_journal(journal, i);
+
+               old_valid_blocks = se->valid_blocks;
+
                check_block_count(sbi, start, &sit);
                seg_info_from_raw_sit(se, &sit);
-               if (sbi->segs_per_sec > 1) {
-                       struct sec_entry *e = get_sec_entry(sbi, start);
-                       e->valid_blocks += se->valid_blocks;
+
+               if (f2fs_discard_en(sbi)) {
+                       memcpy(se->discard_map, se->cur_valid_map,
+                                               SIT_VBLOCK_MAP_SIZE);
+                       sbi->discard_blks += old_valid_blocks -
+                                               se->valid_blocks;
                }
+
+               if (sbi->segs_per_sec > 1)
+                       get_sec_entry(sbi, start)->valid_blocks +=
+                               se->valid_blocks - old_valid_blocks;
        }
+       up_read(&curseg->journal_rwsem);
 }
 
 static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -1562,10 +2470,13 @@ static void init_free_segmap(struct f2fs_sb_info *sbi)
        unsigned int start;
        int type;
 
-       for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+       for (start = 0; start < MAIN_SEGS(sbi); start++) {
                struct seg_entry *sentry = get_seg_entry(sbi, start);
                if (!sentry->valid_blocks)
                        __set_free(sbi, start);
+               else
+                       SIT_I(sbi)->written_valid_blocks +=
+                                               sentry->valid_blocks;
        }
 
        /* set use the current segments */
@@ -1582,15 +2493,19 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
        unsigned int segno = 0, offset = 0;
        unsigned short valid_blocks;
 
-       while (segno < TOTAL_SEGS(sbi)) {
+       while (1) {
                /* find dirty segment based on free segmap */
-               segno = find_next_inuse(free_i, TOTAL_SEGS(sbi), offset);
-               if (segno >= TOTAL_SEGS(sbi))
+               segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
+               if (segno >= MAIN_SEGS(sbi))
                        break;
                offset = segno + 1;
                valid_blocks = get_valid_blocks(sbi, segno, 0);
-               if (valid_blocks >= sbi->blocks_per_seg || !valid_blocks)
+               if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
+                       continue;
+               if (valid_blocks > sbi->blocks_per_seg) {
+                       f2fs_bug_on(sbi, 1);
                        continue;
+               }
                mutex_lock(&dirty_i->seglist_lock);
                __locate_dirty_segment(sbi, segno, DIRTY);
                mutex_unlock(&dirty_i->seglist_lock);
@@ -1600,9 +2515,9 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
 static int init_victim_secmap(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SECS(sbi));
+       unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
 
-       dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
+       dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
        if (!dirty_i->victim_secmap)
                return -ENOMEM;
        return 0;
@@ -1621,10 +2536,10 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
        SM_I(sbi)->dirty_info = dirty_i;
        mutex_init(&dirty_i->seglist_lock);
 
-       bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(sbi));
+       bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
 
        for (i = 0; i < NR_DIRTY_TYPE; i++) {
-               dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
+               dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
                if (!dirty_i->dirty_segmap[i])
                        return -ENOMEM;
        }
@@ -1645,7 +2560,7 @@ static void init_min_max_mtime(struct f2fs_sb_info *sbi)
 
        sit_i->min_mtime = LLONG_MAX;
 
-       for (segno = 0; segno < TOTAL_SEGS(sbi); segno += sbi->segs_per_sec) {
+       for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
                unsigned int i;
                unsigned long long mtime = 0;
 
@@ -1674,8 +2589,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
 
        /* init sm info */
        sbi->sm_info = sm_info;
-       INIT_LIST_HEAD(&sm_info->wblist_head);
-       spin_lock_init(&sm_info->wblist_lock);
        sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
        sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
        sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
@@ -1683,6 +2596,29 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
        sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
        sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
        sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+       sm_info->rec_prefree_segments = sm_info->main_segments *
+                                       DEF_RECLAIM_PREFREE_SEGMENTS / 100;
+       if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+               sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+       if (!test_opt(sbi, LFS))
+               sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+       sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
+       sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
+
+       INIT_LIST_HEAD(&sm_info->discard_list);
+       sm_info->nr_discards = 0;
+       sm_info->max_discards = 0;
+
+       sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
+
+       INIT_LIST_HEAD(&sm_info->sit_entry_set);
+
+       if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
+               err = create_flush_cmd_control(sbi);
+               if (err)
+                       return err;
+       }
 
        err = build_sit_info(sbi);
        if (err)
@@ -1712,7 +2648,7 @@ static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
 
        mutex_lock(&dirty_i->seglist_lock);
-       kfree(dirty_i->dirty_segmap[dirty_type]);
+       f2fs_kvfree(dirty_i->dirty_segmap[dirty_type]);
        dirty_i->nr_dirty[dirty_type] = 0;
        mutex_unlock(&dirty_i->seglist_lock);
 }
@@ -1720,7 +2656,7 @@ static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
 {
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
-       kfree(dirty_i->victim_secmap);
+       f2fs_kvfree(dirty_i->victim_secmap);
 }
 
 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
@@ -1748,8 +2684,10 @@ static void destroy_curseg(struct f2fs_sb_info *sbi)
        if (!array)
                return;
        SM_I(sbi)->curseg_array = NULL;
-       for (i = 0; i < NR_CURSEG_TYPE; i++)
+       for (i = 0; i < NR_CURSEG_TYPE; i++) {
                kfree(array[i].sum_blk);
+               kfree(array[i].journal);
+       }
        kfree(array);
 }
 
@@ -1759,8 +2697,8 @@ static void destroy_free_segmap(struct f2fs_sb_info *sbi)
        if (!free_i)
                return;
        SM_I(sbi)->free_info = NULL;
-       kfree(free_i->free_segmap);
-       kfree(free_i->free_secmap);
+       f2fs_kvfree(free_i->free_segmap);
+       f2fs_kvfree(free_i->free_secmap);
        kfree(free_i);
 }
 
@@ -1773,14 +2711,17 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
                return;
 
        if (sit_i->sentries) {
-               for (start = 0; start < TOTAL_SEGS(sbi); start++) {
+               for (start = 0; start < MAIN_SEGS(sbi); start++) {
                        kfree(sit_i->sentries[start].cur_valid_map);
                        kfree(sit_i->sentries[start].ckpt_valid_map);
+                       kfree(sit_i->sentries[start].discard_map);
                }
        }
-       vfree(sit_i->sentries);
-       vfree(sit_i->sec_entries);
-       kfree(sit_i->dirty_sentries_bitmap);
+       kfree(sit_i->tmp_map);
+
+       f2fs_kvfree(sit_i->sentries);
+       f2fs_kvfree(sit_i->sec_entries);
+       f2fs_kvfree(sit_i->dirty_sentries_bitmap);
 
        SM_I(sbi)->sit_info = NULL;
        kfree(sit_i->sit_bitmap);
@@ -1790,6 +2731,10 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi)
 void destroy_segment_manager(struct f2fs_sb_info *sbi)
 {
        struct f2fs_sm_info *sm_info = SM_I(sbi);
+
+       if (!sm_info)
+               return;
+       destroy_flush_cmd_control(sbi, true);
        destroy_dirty_segmap(sbi);
        destroy_curseg(sbi);
        destroy_free_segmap(sbi);
@@ -1797,3 +2742,36 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
        sbi->sm_info = NULL;
        kfree(sm_info);
 }
+
+int __init create_segment_manager_caches(void)
+{
+       discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
+                       sizeof(struct discard_entry));
+       if (!discard_entry_slab)
+               goto fail;
+
+       sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
+                       sizeof(struct sit_entry_set));
+       if (!sit_entry_set_slab)
+               goto destory_discard_entry;
+
+       inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
+                       sizeof(struct inmem_pages));
+       if (!inmem_entry_slab)
+               goto destroy_sit_entry_set;
+       return 0;
+
+destroy_sit_entry_set:
+       kmem_cache_destroy(sit_entry_set_slab);
+destory_discard_entry:
+       kmem_cache_destroy(discard_entry_slab);
+fail:
+       return -ENOMEM;
+}
+
+void destroy_segment_manager_caches(void)
+{
+       kmem_cache_destroy(sit_entry_set_slab);
+       kmem_cache_destroy(discard_entry_slab);
+       kmem_cache_destroy(inmem_entry_slab);
+}
index 062424a0e4c3a1ab912e0008c1eaa836f29af85f..4fe8d9ccce62b32a2383a8fcfa65d008bd4f631c 100644 (file)
 #define NULL_SEGNO                     ((unsigned int)(~0))
 #define NULL_SECNO                     ((unsigned int)(~0))
 
+#define DEF_RECLAIM_PREFREE_SEGMENTS   5       /* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS       4096    /* 8GB in maximum */
+
+#define F2FS_MIN_SEGMENTS      9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
+
 /* L: Logical segment # in volume, R: Relative segment # in main area */
 #define GET_L2R_SEGNO(free_i, segno)   (segno - free_i->start_segno)
 #define GET_R2L_SEGNO(free_i, segno)   (segno + free_i->start_segno)
 
-#define IS_DATASEG(t)                                                  \
-       ((t == CURSEG_HOT_DATA) || (t == CURSEG_COLD_DATA) ||           \
-       (t == CURSEG_WARM_DATA))
-
-#define IS_NODESEG(t)                                                  \
-       ((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) ||           \
-       (t == CURSEG_WARM_NODE))
+#define IS_DATASEG(t)  (t <= CURSEG_COLD_DATA)
+#define IS_NODESEG(t)  (t >= CURSEG_HOT_NODE)
 
 #define IS_CURSEG(sbi, seg)                                            \
        ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||      \
         (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /             \
          sbi->segs_per_sec))   \
 
-#define START_BLOCK(sbi, segno)                                                \
-       (SM_I(sbi)->seg0_blkaddr +                                      \
+#define MAIN_BLKADDR(sbi)      (SM_I(sbi)->main_blkaddr)
+#define SEG0_BLKADDR(sbi)      (SM_I(sbi)->seg0_blkaddr)
+
+#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
+#define MAIN_SECS(sbi) (sbi->total_sections)
+
+#define TOTAL_SEGS(sbi)        (SM_I(sbi)->segment_count)
+#define TOTAL_BLKS(sbi)        (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
+
+#define MAX_BLKADDR(sbi)       (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
+#define SEGMENT_SIZE(sbi)      (1ULL << (sbi->log_blocksize +          \
+                                       sbi->log_blocks_per_seg))
+
+#define START_BLOCK(sbi, segno)        (SEG0_BLKADDR(sbi) +                    \
         (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
+
 #define NEXT_FREE_BLKADDR(sbi, curseg)                                 \
        (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
 
-#define MAIN_BASE_BLOCK(sbi)   (SM_I(sbi)->main_blkaddr)
-
-#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)                            \
-       ((blk_addr) - SM_I(sbi)->seg0_blkaddr)
+#define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)    ((blk_addr) - SEG0_BLKADDR(sbi))
 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)                             \
        (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
+#define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)                            \
+       (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
+
 #define GET_SEGNO(sbi, blk_addr)                                       \
        (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ?          \
        NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),                 \
 
 #define SIT_ENTRY_OFFSET(sit_i, segno)                                 \
        (segno % sit_i->sents_per_block)
-#define SIT_BLOCK_OFFSET(sit_i, segno)                                 \
+#define SIT_BLOCK_OFFSET(segno)                                        \
        (segno / SIT_ENTRY_PER_BLOCK)
-#define        START_SEGNO(sit_i, segno)               \
-       (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK)
+#define        START_SEGNO(segno)              \
+       (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
+#define SIT_BLK_CNT(sbi)                       \
+       ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
 #define f2fs_bitmap_size(nr)                   \
        (BITS_TO_LONGS(nr) * sizeof(unsigned long))
-#define TOTAL_SEGS(sbi)        (SM_I(sbi)->main_segments)
-#define TOTAL_SECS(sbi)        (sbi->total_sections)
-
-#define SECTOR_FROM_BLOCK(sbi, blk_addr)                               \
-       (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
-#define SECTOR_TO_BLOCK(sbi, sectors)                                  \
-       (sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
-
-/* during checkpoint, bio_private is used to synchronize the last bio */
-struct bio_private {
-       struct f2fs_sb_info *sbi;
-       bool is_sync;
-       void *wait;
-};
+
+#define SECTOR_FROM_BLOCK(blk_addr)                                    \
+       (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
+#define SECTOR_TO_BLOCK(sectors)                                       \
+       (sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
 
 /*
  * indicate a block allocation direction: RIGHT and LEFT.
@@ -131,10 +137,12 @@ enum {
 /*
  * BG_GC means the background cleaning job.
  * FG_GC means the on-demand cleaning job.
+ * FORCE_FG_GC means on-demand cleaning job in background.
  */
 enum {
        BG_GC = 0,
-       FG_GC
+       FG_GC,
+       FORCE_FG_GC,
 };
 
 /* for a function parameter to select a victim segment */
@@ -142,6 +150,7 @@ struct victim_sel_policy {
        int alloc_mode;                 /* LFS or SSR */
        int gc_mode;                    /* GC_CB or GC_GREEDY */
        unsigned long *dirty_segmap;    /* dirty segment bitmap */
+       unsigned int max_search;        /* maximum # of segments to search */
        unsigned int offset;            /* last scanned bitmap offset */
        unsigned int ofs_unit;          /* bitmap search unit */
        unsigned int min_cost;          /* minimum cost */
@@ -149,15 +158,17 @@ struct victim_sel_policy {
 };
 
 struct seg_entry {
-       unsigned short valid_blocks;    /* # of valid blocks */
+       unsigned int type:6;            /* segment type like CURSEG_XXX_TYPE */
+       unsigned int valid_blocks:10;   /* # of valid blocks */
+       unsigned int ckpt_valid_blocks:10;      /* # of valid blocks last cp */
+       unsigned int padding:6;         /* padding */
        unsigned char *cur_valid_map;   /* validity bitmap of blocks */
        /*
         * # of valid blocks and the validity bitmap stored in the the last
         * checkpoint pack. This information is used by the SSR mode.
         */
-       unsigned short ckpt_valid_blocks;
-       unsigned char *ckpt_valid_map;
-       unsigned char type;             /* segment type like CURSEG_XXX_TYPE */
+       unsigned char *ckpt_valid_map;  /* validity bitmap of blocks last cp */
+       unsigned char *discard_map;
        unsigned long long mtime;       /* modification time of the segment */
 };
 
@@ -169,6 +180,21 @@ struct segment_allocation {
        void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
 };
 
+/*
+ * this value is set in page as a private data which indicate that
+ * the page is atomically written, and it is in inmem_pages list.
+ */
+#define ATOMIC_WRITTEN_PAGE            ((unsigned long)-1)
+
+#define IS_ATOMIC_WRITTEN_PAGE(page)                   \
+               (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+
+struct inmem_pages {
+       struct list_head list;
+       struct page *page;
+       block_t old_addr;               /* for revoking when fail to commit */
+};
+
 struct sit_info {
        const struct segment_allocation *s_ops;
 
@@ -178,6 +204,7 @@ struct sit_info {
        char *sit_bitmap;               /* SIT bitmap pointer */
        unsigned int bitmap_size;       /* SIT bitmap size */
 
+       unsigned long *tmp_map;                 /* bitmap for temporal use */
        unsigned long *dirty_sentries_bitmap;   /* bitmap for dirty sentries */
        unsigned int dirty_sentries;            /* # of dirty sentries */
        unsigned int sents_per_block;           /* # of SIT entries per block */
@@ -196,7 +223,7 @@ struct free_segmap_info {
        unsigned int start_segno;       /* start segment number logically */
        unsigned int free_segments;     /* # of free segments */
        unsigned int free_sections;     /* # of free sections */
-       rwlock_t segmap_lock;           /* free segmap lock */
+       spinlock_t segmap_lock;         /* free segmap lock */
        unsigned long *free_segmap;     /* free segment bitmap */
        unsigned long *free_secmap;     /* free section bitmap */
 };
@@ -232,6 +259,8 @@ struct victim_selection {
 struct curseg_info {
        struct mutex curseg_mutex;              /* lock for consistency */
        struct f2fs_summary_block *sum_blk;     /* cached summary block */
+       struct rw_semaphore journal_rwsem;      /* protect journal area */
+       struct f2fs_journal *journal;           /* cached journal info */
        unsigned char alloc_type;               /* current allocation type */
        unsigned int segno;                     /* current segment number */
        unsigned short next_blkoff;             /* next block offset to write */
@@ -239,6 +268,12 @@ struct curseg_info {
        unsigned int next_segno;                /* preallocated segment */
 };
 
+struct sit_entry_set {
+       struct list_head set_list;      /* link with all sit sets */
+       unsigned int start_segno;       /* start segno of sits in set */
+       unsigned int entry_cnt;         /* the # of sit entries in set */
+};
+
 /*
  * inline functions
  */
@@ -301,9 +336,9 @@ static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
                unsigned int max, unsigned int segno)
 {
        unsigned int ret;
-       read_lock(&free_i->segmap_lock);
+       spin_lock(&free_i->segmap_lock);
        ret = find_next_bit(free_i->free_segmap, max, segno);
-       read_unlock(&free_i->segmap_lock);
+       spin_unlock(&free_i->segmap_lock);
        return ret;
 }
 
@@ -314,16 +349,17 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
        unsigned int start_segno = secno * sbi->segs_per_sec;
        unsigned int next;
 
-       write_lock(&free_i->segmap_lock);
+       spin_lock(&free_i->segmap_lock);
        clear_bit(segno, free_i->free_segmap);
        free_i->free_segments++;
 
-       next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno);
+       next = find_next_bit(free_i->free_segmap,
+                       start_segno + sbi->segs_per_sec, start_segno);
        if (next >= start_segno + sbi->segs_per_sec) {
                clear_bit(secno, free_i->free_secmap);
                free_i->free_sections++;
        }
-       write_unlock(&free_i->segmap_lock);
+       spin_unlock(&free_i->segmap_lock);
 }
 
 static inline void __set_inuse(struct f2fs_sb_info *sbi,
@@ -345,18 +381,18 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
        unsigned int start_segno = secno * sbi->segs_per_sec;
        unsigned int next;
 
-       write_lock(&free_i->segmap_lock);
+       spin_lock(&free_i->segmap_lock);
        if (test_and_clear_bit(segno, free_i->free_segmap)) {
                free_i->free_segments++;
 
-               next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi),
-                                                               start_segno);
+               next = find_next_bit(free_i->free_segmap,
+                               start_segno + sbi->segs_per_sec, start_segno);
                if (next >= start_segno + sbi->segs_per_sec) {
                        if (test_and_clear_bit(secno, free_i->free_secmap))
                                free_i->free_sections++;
                }
        }
-       write_unlock(&free_i->segmap_lock);
+       spin_unlock(&free_i->segmap_lock);
 }
 
 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
@@ -364,13 +400,13 @@ static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
 {
        struct free_segmap_info *free_i = FREE_I(sbi);
        unsigned int secno = segno / sbi->segs_per_sec;
-       write_lock(&free_i->segmap_lock);
+       spin_lock(&free_i->segmap_lock);
        if (!test_and_set_bit(segno, free_i->free_segmap)) {
                free_i->free_segments--;
                if (!test_and_set_bit(secno, free_i->free_secmap))
                        free_i->free_sections--;
        }
-       write_unlock(&free_i->segmap_lock);
+       spin_unlock(&free_i->segmap_lock);
 }
 
 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
@@ -382,26 +418,12 @@ static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
 
 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
 {
-       struct sit_info *sit_i = SIT_I(sbi);
-       block_t vblocks;
-
-       mutex_lock(&sit_i->sentry_lock);
-       vblocks = sit_i->written_valid_blocks;
-       mutex_unlock(&sit_i->sentry_lock);
-
-       return vblocks;
+       return SIT_I(sbi)->written_valid_blocks;
 }
 
 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
 {
-       struct free_segmap_info *free_i = FREE_I(sbi);
-       unsigned int free_segs;
-
-       read_lock(&free_i->segmap_lock);
-       free_segs = free_i->free_segments;
-       read_unlock(&free_i->segmap_lock);
-
-       return free_segs;
+       return FREE_I(sbi)->free_segments;
 }
 
 static inline int reserved_segments(struct f2fs_sb_info *sbi)
@@ -411,14 +433,7 @@ static inline int reserved_segments(struct f2fs_sb_info *sbi)
 
 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
 {
-       struct free_segmap_info *free_i = FREE_I(sbi);
-       unsigned int free_secs;
-
-       read_lock(&free_i->segmap_lock);
-       free_secs = free_i->free_sections;
-       read_unlock(&free_i->segmap_lock);
-
-       return free_secs;
+       return FREE_I(sbi)->free_sections;
 }
 
 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
@@ -453,41 +468,96 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
 
 static inline bool need_SSR(struct f2fs_sb_info *sbi)
 {
-       return (free_sections(sbi) < overprovision_sections(sbi));
+       int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
+       int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+       int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+
+       if (test_opt(sbi, LFS))
+               return false;
+
+       return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
+                                               reserved_sections(sbi) + 1);
 }
 
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+                                       int freed, int needed)
 {
        int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
        int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+       int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
 
-       if (sbi->por_doing)
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                return false;
 
-       return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
-                                               reserved_sections(sbi)));
+       return (free_sections(sbi) + freed) <=
+               (node_secs + 2 * dent_secs + imeta_secs +
+               reserved_sections(sbi) + needed);
+}
+
+static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
+{
+       return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
 }
 
 static inline int utilization(struct f2fs_sb_info *sbi)
 {
-       return div_u64(valid_user_blocks(sbi) * 100, sbi->user_block_count);
+       return div_u64((u64)valid_user_blocks(sbi) * 100,
+                                       sbi->user_block_count);
 }
 
 /*
  * Sometimes f2fs may be better to drop out-of-place update policy.
- * So, if fs utilization is over MIN_IPU_UTIL, then f2fs tries to write
- * data in the original place likewise other traditional file systems.
- * But, currently set 100 in percentage, which means it is disabled.
- * See below need_inplace_update().
+ * And, users can control the policy through sysfs entries.
+ * There are five policies with triggering conditions as follows.
+ * F2FS_IPU_FORCE - all the time,
+ * F2FS_IPU_SSR - if SSR mode is activated,
+ * F2FS_IPU_UTIL - if FS utilization is over threashold,
+ * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
+ *                     threashold,
+ * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
+ *                     storages. IPU will be triggered only if the # of dirty
+ *                     pages over min_fsync_blocks.
+ * F2FS_IPUT_DISABLE - disable IPU. (=default option)
  */
-#define MIN_IPU_UTIL           100
+#define DEF_MIN_IPU_UTIL       70
+#define DEF_MIN_FSYNC_BLOCKS   8
+
+enum {
+       F2FS_IPU_FORCE,
+       F2FS_IPU_SSR,
+       F2FS_IPU_UTIL,
+       F2FS_IPU_SSR_UTIL,
+       F2FS_IPU_FSYNC,
+};
+
 static inline bool need_inplace_update(struct inode *inode)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       if (S_ISDIR(inode->i_mode))
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       unsigned int policy = SM_I(sbi)->ipu_policy;
+
+       /* IPU can be done only for the user data */
+       if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
                return false;
-       if (need_SSR(sbi) && utilization(sbi) > MIN_IPU_UTIL)
+
+       if (test_opt(sbi, LFS))
+               return false;
+
+       if (policy & (0x1 << F2FS_IPU_FORCE))
+               return true;
+       if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
+               return true;
+       if (policy & (0x1 << F2FS_IPU_UTIL) &&
+                       utilization(sbi) > SM_I(sbi)->min_ipu_util)
                return true;
+       if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
+                       utilization(sbi) > SM_I(sbi)->min_ipu_util)
+               return true;
+
+       /* this is only set during fdatasync */
+       if (policy & (0x1 << F2FS_IPU_FSYNC) &&
+                       is_inode_flag_set(inode, FI_NEED_IPU))
+               return true;
+
        return false;
 }
 
@@ -513,53 +583,52 @@ static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
 
 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-       unsigned int end_segno = SM_I(sbi)->segment_count - 1;
-       BUG_ON(segno > end_segno);
+       f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
-/*
- * This function is used for only debugging.
- * NOTE: In future, we have to remove this function.
- */
 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
 {
-       struct f2fs_sm_info *sm_info = SM_I(sbi);
-       block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg;
-       block_t start_addr = sm_info->seg0_blkaddr;
-       block_t end_addr = start_addr + total_blks - 1;
-       BUG_ON(blk_addr < start_addr);
-       BUG_ON(blk_addr > end_addr);
+       BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
+                       || blk_addr >= MAX_BLKADDR(sbi));
 }
 
 /*
- * Summary block is always treated as invalid block
+ * Summary block is always treated as an invalid block
  */
 static inline void check_block_count(struct f2fs_sb_info *sbi,
                int segno, struct f2fs_sit_entry *raw_sit)
 {
-       struct f2fs_sm_info *sm_info = SM_I(sbi);
-       unsigned int end_segno = sm_info->segment_count - 1;
+#ifdef CONFIG_F2FS_CHECK_FS
+       bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
        int valid_blocks = 0;
-       int i;
-
-       /* check segment usage */
-       BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
-
-       /* check boundary of a given segment number */
-       BUG_ON(segno > end_segno);
+       int cur_pos = 0, next_pos;
 
        /* check bitmap with valid block count */
-       for (i = 0; i < sbi->blocks_per_seg; i++)
-               if (f2fs_test_bit(i, raw_sit->valid_map))
-                       valid_blocks++;
+       do {
+               if (is_valid) {
+                       next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
+                                       sbi->blocks_per_seg,
+                                       cur_pos);
+                       valid_blocks += next_pos - cur_pos;
+               } else
+                       next_pos = find_next_bit_le(&raw_sit->valid_map,
+                                       sbi->blocks_per_seg,
+                                       cur_pos);
+               cur_pos = next_pos;
+               is_valid = !is_valid;
+       } while (cur_pos < sbi->blocks_per_seg);
        BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
+#endif
+       /* check segment usage, and check boundary of a given segment number */
+       f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+                                       || segno > TOTAL_SEGS(sbi) - 1);
 }
 
 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
                                                unsigned int start)
 {
        struct sit_info *sit_i = SIT_I(sbi);
-       unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start);
+       unsigned int offset = SIT_BLOCK_OFFSET(start);
        block_t blk_addr = sit_i->sit_base_addr + offset;
 
        check_seg_range(sbi, start);
@@ -586,12 +655,9 @@ static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
 
 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
 {
-       unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start);
+       unsigned int block_off = SIT_BLOCK_OFFSET(start);
 
-       if (f2fs_test_bit(block_off, sit_i->sit_bitmap))
-               f2fs_clear_bit(block_off, sit_i->sit_bitmap);
-       else
-               f2fs_set_bit(block_off, sit_i->sit_bitmap);
+       f2fs_change_bit(block_off, sit_i->sit_bitmap);
 }
 
 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
@@ -629,9 +695,44 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
        return false;
 }
 
-static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
+/*
+ * It is very important to gather dirty pages and write at once, so that we can
+ * submit a big bio without interfering other data writes.
+ * By default, 512 pages for directory data,
+ * 512 pages (2MB) * 3 for three types of nodes, and
+ * max_bio_blocks for meta are set.
+ */
+static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
 {
-       struct block_device *bdev = sbi->sb->s_bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
-       return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
+       if (sbi->sb->s_bdi->dirty_exceeded)
+               return 0;
+
+       if (type == DATA)
+               return sbi->blocks_per_seg;
+       else if (type == NODE)
+               return 8 * sbi->blocks_per_seg;
+       else if (type == META)
+               return 8 * BIO_MAX_PAGES;
+       else
+               return 0;
+}
+
+/*
+ * When writing pages, it'd better align nr_to_write for segment size.
+ */
+static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
+                                       struct writeback_control *wbc)
+{
+       long nr_to_write, desired;
+
+       if (wbc->sync_mode != WB_SYNC_NONE)
+               return 0;
+
+       nr_to_write = wbc->nr_to_write;
+       desired = BIO_MAX_PAGES;
+       if (type == NODE)
+               desired <<= 1;
+
+       wbc->nr_to_write = desired;
+       return desired - nr_to_write;
 }
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
new file mode 100644 (file)
index 0000000..cb303f9
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * f2fs shrinker support
+ *   the basic infra was copied from fs/ubifs/shrinker.c
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+
+static LIST_HEAD(f2fs_list);
+static DEFINE_SPINLOCK(f2fs_list_lock);
+static unsigned int shrinker_run_no;
+
+static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
+{
+       long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+
+       return count > 0 ? count : 0;
+}
+
+static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+{
+       long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+
+       return count > 0 ? count : 0;
+}
+
+static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
+{
+       return atomic_read(&sbi->total_zombie_tree) +
+                               atomic_read(&sbi->total_ext_node);
+}
+
+int f2fs_shrink_count(struct shrinker *shrink,
+                               struct shrink_control *sc)
+{
+       struct f2fs_sb_info *sbi;
+       struct list_head *p;
+       unsigned long count = 0;
+
+       spin_lock(&f2fs_list_lock);
+       p = f2fs_list.next;
+       while (p != &f2fs_list) {
+               sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+               /* stop f2fs_put_super */
+               if (!mutex_trylock(&sbi->umount_mutex)) {
+                       p = p->next;
+                       continue;
+               }
+               spin_unlock(&f2fs_list_lock);
+
+               /* count extent cache entries */
+               count += __count_extent_cache(sbi);
+
+               /* shrink clean nat cache entries */
+               count += __count_nat_entries(sbi);
+
+               /* count free nids cache entries */
+               count += __count_free_nids(sbi);
+
+               spin_lock(&f2fs_list_lock);
+               p = p->next;
+               mutex_unlock(&sbi->umount_mutex);
+       }
+       spin_unlock(&f2fs_list_lock);
+       return count;
+}
+
+int f2fs_shrink_scan(struct shrinker *shrink,
+                               struct shrink_control *sc)
+{
+       unsigned long nr = sc->nr_to_scan;
+       struct f2fs_sb_info *sbi;
+       struct list_head *p;
+       unsigned int run_no;
+       unsigned long freed = 0;
+
+       spin_lock(&f2fs_list_lock);
+       do {
+               run_no = ++shrinker_run_no;
+       } while (run_no == 0);
+       p = f2fs_list.next;
+       while (p != &f2fs_list) {
+               sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+               if (sbi->shrinker_run_no == run_no)
+                       break;
+
+               /* stop f2fs_put_super */
+               if (!mutex_trylock(&sbi->umount_mutex)) {
+                       p = p->next;
+                       continue;
+               }
+               spin_unlock(&f2fs_list_lock);
+
+               sbi->shrinker_run_no = run_no;
+
+               /* shrink extent cache entries */
+               freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
+
+               /* shrink clean nat cache entries */
+               if (freed < nr)
+                       freed += try_to_free_nats(sbi, nr - freed);
+
+               /* shrink free nids cache entries */
+               if (freed < nr)
+                       freed += try_to_free_nids(sbi, nr - freed);
+
+               spin_lock(&f2fs_list_lock);
+               p = p->next;
+               list_move_tail(&sbi->s_list, &f2fs_list);
+               mutex_unlock(&sbi->umount_mutex);
+               if (freed >= nr)
+                       break;
+       }
+       spin_unlock(&f2fs_list_lock);
+       return f2fs_shrink_count(NULL, NULL);
+}
+
+void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
+{
+       spin_lock(&f2fs_list_lock);
+       list_add_tail(&sbi->s_list, &f2fs_list);
+       spin_unlock(&f2fs_list_lock);
+}
+
+void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+{
+       f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
+
+       spin_lock(&f2fs_list_lock);
+       list_del(&sbi->s_list);
+       spin_unlock(&f2fs_list_lock);
+}
index 8555f7df82c796720c2c85ce0bd0af6248efa1c2..f0105b697fc32f724a240b75efafe63d22ffb42b 100644 (file)
 #include <linux/parser.h>
 #include <linux/mount.h>
 #include <linux/seq_file.h>
+#include <linux/proc_fs.h>
 #include <linux/random.h>
 #include <linux/exportfs.h>
 #include <linux/blkdev.h>
 #include <linux/f2fs_fs.h>
+#include <linux/sysfs.h>
 
 #include "f2fs.h"
 #include "node.h"
 #include "segment.h"
 #include "xattr.h"
+#include "gc.h"
+#include "trace.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/f2fs.h>
 
+static struct proc_dir_entry *f2fs_proc_root;
 static struct kmem_cache *f2fs_inode_cachep;
+static struct kset *f2fs_kset;
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+
+char *fault_name[FAULT_MAX] = {
+       [FAULT_KMALLOC]         = "kmalloc",
+       [FAULT_PAGE_ALLOC]      = "page alloc",
+       [FAULT_ALLOC_NID]       = "alloc nid",
+       [FAULT_ORPHAN]          = "orphan",
+       [FAULT_BLOCK]           = "no more block",
+       [FAULT_DIR_DEPTH]       = "too big dir depth",
+       [FAULT_EVICT_INODE]     = "evict_inode fail",
+       [FAULT_IO]              = "IO error",
+       [FAULT_CHECKPOINT]      = "checkpoint error",
+};
+
+static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+                                               unsigned int rate)
+{
+       struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+       if (rate) {
+               atomic_set(&ffi->inject_ops, 0);
+               ffi->inject_rate = rate;
+               ffi->inject_type = (1 << FAULT_MAX) - 1;
+       } else {
+               memset(ffi, 0, sizeof(struct f2fs_fault_info));
+       }
+}
+#endif
+
+/* f2fs-wide shrinker description */
+static struct shrinker f2fs_shrinker_info = {
+       .shrink = f2fs_shrink_scan,
+       .seeks = DEFAULT_SEEKS,
+};
 
 enum {
-       Opt_gc_background_off,
+       Opt_gc_background,
        Opt_disable_roll_forward,
+       Opt_norecovery,
        Opt_discard,
+       Opt_nodiscard,
        Opt_noheap,
+       Opt_user_xattr,
        Opt_nouser_xattr,
+       Opt_acl,
        Opt_noacl,
        Opt_active_logs,
        Opt_disable_ext_identify,
+       Opt_inline_xattr,
+       Opt_inline_data,
+       Opt_inline_dentry,
+       Opt_noinline_dentry,
+       Opt_flush_merge,
+       Opt_noflush_merge,
+       Opt_nobarrier,
+       Opt_fastboot,
+       Opt_extent_cache,
+       Opt_noextent_cache,
+       Opt_noinline_data,
+       Opt_data_flush,
+       Opt_mode,
+       Opt_fault_injection,
        Opt_err,
 };
 
 static match_table_t f2fs_tokens = {
-       {Opt_gc_background_off, "background_gc_off"},
+       {Opt_gc_background, "background_gc=%s"},
        {Opt_disable_roll_forward, "disable_roll_forward"},
+       {Opt_norecovery, "norecovery"},
        {Opt_discard, "discard"},
+       {Opt_nodiscard, "nodiscard"},
        {Opt_noheap, "no_heap"},
+       {Opt_user_xattr, "user_xattr"},
        {Opt_nouser_xattr, "nouser_xattr"},
+       {Opt_acl, "acl"},
        {Opt_noacl, "noacl"},
        {Opt_active_logs, "active_logs=%u"},
        {Opt_disable_ext_identify, "disable_ext_identify"},
+       {Opt_inline_xattr, "inline_xattr"},
+       {Opt_inline_data, "inline_data"},
+       {Opt_inline_dentry, "inline_dentry"},
+       {Opt_noinline_dentry, "noinline_dentry"},
+       {Opt_flush_merge, "flush_merge"},
+       {Opt_noflush_merge, "noflush_merge"},
+       {Opt_nobarrier, "nobarrier"},
+       {Opt_fastboot, "fastboot"},
+       {Opt_extent_cache, "extent_cache"},
+       {Opt_noextent_cache, "noextent_cache"},
+       {Opt_noinline_data, "noinline_data"},
+       {Opt_data_flush, "data_flush"},
+       {Opt_mode, "mode=%s"},
+       {Opt_fault_injection, "fault_injection=%u"},
        {Opt_err, NULL},
 };
 
+/* Sysfs support for f2fs */
+enum {
+       GC_THREAD,      /* struct f2fs_gc_thread */
+       SM_INFO,        /* struct f2fs_sm_info */
+       NM_INFO,        /* struct f2fs_nm_info */
+       F2FS_SBI,       /* struct f2fs_sb_info */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       FAULT_INFO_RATE,        /* struct f2fs_fault_info */
+       FAULT_INFO_TYPE,        /* struct f2fs_fault_info */
+#endif
+};
+
+struct f2fs_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
+       ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
+                        const char *, size_t);
+       int struct_type;
+       int offset;
+};
+
+static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
+{
+       if (struct_type == GC_THREAD)
+               return (unsigned char *)sbi->gc_thread;
+       else if (struct_type == SM_INFO)
+               return (unsigned char *)SM_I(sbi);
+       else if (struct_type == NM_INFO)
+               return (unsigned char *)NM_I(sbi);
+       else if (struct_type == F2FS_SBI)
+               return (unsigned char *)sbi;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       else if (struct_type == FAULT_INFO_RATE ||
+                                       struct_type == FAULT_INFO_TYPE)
+               return (unsigned char *)&sbi->fault_info;
+#endif
+       return NULL;
+}
+
+static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
+               struct f2fs_sb_info *sbi, char *buf)
+{
+       struct super_block *sb = sbi->sb;
+
+       if (!sb->s_bdev->bd_part)
+               return snprintf(buf, PAGE_SIZE, "0\n");
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n",
+               (unsigned long long)(sbi->kbytes_written +
+                       BD_PART_WRITTEN(sbi)));
+}
+
+static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+                       struct f2fs_sb_info *sbi, char *buf)
+{
+       unsigned char *ptr = NULL;
+       unsigned int *ui;
+
+       ptr = __struct_ptr(sbi, a->struct_type);
+       if (!ptr)
+               return -EINVAL;
+
+       ui = (unsigned int *)(ptr + a->offset);
+
+       return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+}
+
+static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
+                       struct f2fs_sb_info *sbi,
+                       const char *buf, size_t count)
+{
+       unsigned char *ptr;
+       unsigned long t;
+       unsigned int *ui;
+       ssize_t ret;
+
+       ptr = __struct_ptr(sbi, a->struct_type);
+       if (!ptr)
+               return -EINVAL;
+
+       ui = (unsigned int *)(ptr + a->offset);
+
+       ret = kstrtoul(skip_spaces(buf), 0, &t);
+       if (ret < 0)
+               return ret;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
+               return -EINVAL;
+#endif
+       *ui = t;
+       return count;
+}
+
+static ssize_t f2fs_attr_show(struct kobject *kobj,
+                               struct attribute *attr, char *buf)
+{
+       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+                                                               s_kobj);
+       struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+       return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
+                                               const char *buf, size_t len)
+{
+       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+                                                                       s_kobj);
+       struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+       return a->store ? a->store(a, sbi, buf, len) : 0;
+}
+
+static void f2fs_sb_release(struct kobject *kobj)
+{
+       struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+                                                               s_kobj);
+       complete(&sbi->s_kobj_unregister);
+}
+
+#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
+static struct f2fs_attr f2fs_attr_##_name = {                  \
+       .attr = {.name = __stringify(_name), .mode = _mode },   \
+       .show   = _show,                                        \
+       .store  = _store,                                       \
+       .struct_type = _struct_type,                            \
+       .offset = _offset                                       \
+}
+
+#define F2FS_RW_ATTR(struct_type, struct_name, name, elname)   \
+       F2FS_ATTR_OFFSET(struct_type, name, 0644,               \
+               f2fs_sbi_show, f2fs_sbi_store,                  \
+               offsetof(struct struct_name, elname))
+
+#define F2FS_GENERAL_RO_ATTR(name) \
+static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
+
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
+F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
+F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
+F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
+#endif
+F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
+
+#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
+static struct attribute *f2fs_attrs[] = {
+       ATTR_LIST(gc_min_sleep_time),
+       ATTR_LIST(gc_max_sleep_time),
+       ATTR_LIST(gc_no_gc_sleep_time),
+       ATTR_LIST(gc_idle),
+       ATTR_LIST(reclaim_segments),
+       ATTR_LIST(max_small_discards),
+       ATTR_LIST(batched_trim_sections),
+       ATTR_LIST(ipu_policy),
+       ATTR_LIST(min_ipu_util),
+       ATTR_LIST(min_fsync_blocks),
+       ATTR_LIST(max_victim_search),
+       ATTR_LIST(dir_level),
+       ATTR_LIST(ram_thresh),
+       ATTR_LIST(ra_nid_pages),
+       ATTR_LIST(dirty_nats_ratio),
+       ATTR_LIST(cp_interval),
+       ATTR_LIST(idle_interval),
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       ATTR_LIST(inject_rate),
+       ATTR_LIST(inject_type),
+#endif
+       ATTR_LIST(lifetime_write_kbytes),
+       NULL,
+};
+
+static const struct sysfs_ops f2fs_attr_ops = {
+       .show   = f2fs_attr_show,
+       .store  = f2fs_attr_store,
+};
+
+static struct kobj_type f2fs_ktype = {
+       .default_attrs  = f2fs_attrs,
+       .sysfs_ops      = &f2fs_attr_ops,
+       .release        = f2fs_sb_release,
+};
+
 void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
 {
        struct va_format vaf;
@@ -76,25 +351,234 @@ static void init_once(void *foo)
        inode_init_once(&fi->vfs_inode);
 }
 
+static int parse_options(struct super_block *sb, char *options)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct request_queue *q;
+       substring_t args[MAX_OPT_ARGS];
+       char *p, *name;
+       int arg = 0;
+
+       if (!options)
+               return 0;
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+               if (!*p)
+                       continue;
+               /*
+                * Initialize args struct so we know whether arg was
+                * found; some options take optional arguments.
+                */
+               args[0].to = args[0].from = NULL;
+               token = match_token(p, f2fs_tokens, args);
+
+               switch (token) {
+               case Opt_gc_background:
+                       name = match_strdup(&args[0]);
+
+                       if (!name)
+                               return -ENOMEM;
+                       if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
+                               set_opt(sbi, BG_GC);
+                               clear_opt(sbi, FORCE_FG_GC);
+                       } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
+                               clear_opt(sbi, BG_GC);
+                               clear_opt(sbi, FORCE_FG_GC);
+                       } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
+                               set_opt(sbi, BG_GC);
+                               set_opt(sbi, FORCE_FG_GC);
+                       } else {
+                               kfree(name);
+                               return -EINVAL;
+                       }
+                       kfree(name);
+                       break;
+               case Opt_disable_roll_forward:
+                       set_opt(sbi, DISABLE_ROLL_FORWARD);
+                       break;
+               case Opt_norecovery:
+                       /* this option mounts f2fs with ro */
+                       set_opt(sbi, DISABLE_ROLL_FORWARD);
+                       if (!f2fs_readonly(sb))
+                               return -EINVAL;
+                       break;
+               case Opt_discard:
+                       q = bdev_get_queue(sb->s_bdev);
+                       if (blk_queue_discard(q)) {
+                               set_opt(sbi, DISCARD);
+                       } else if (!f2fs_sb_mounted_blkzoned(sb)) {
+                               f2fs_msg(sb, KERN_WARNING,
+                                       "mounting with \"discard\" option, but "
+                                       "the device does not support discard");
+                       }
+                       break;
+               case Opt_nodiscard:
+                       if (f2fs_sb_mounted_blkzoned(sb)) {
+                               f2fs_msg(sb, KERN_WARNING,
+                                       "discard is required for zoned block devices");
+                               return -EINVAL;
+                       }
+                       clear_opt(sbi, DISCARD);
+                       break;
+               case Opt_noheap:
+                       set_opt(sbi, NOHEAP);
+                       break;
+#ifdef CONFIG_F2FS_FS_XATTR
+               case Opt_user_xattr:
+                       set_opt(sbi, XATTR_USER);
+                       break;
+               case Opt_nouser_xattr:
+                       clear_opt(sbi, XATTR_USER);
+                       break;
+               case Opt_inline_xattr:
+                       set_opt(sbi, INLINE_XATTR);
+                       break;
+#else
+               case Opt_user_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "user_xattr options not supported");
+                       break;
+               case Opt_nouser_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "nouser_xattr options not supported");
+                       break;
+               case Opt_inline_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "inline_xattr options not supported");
+                       break;
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+               case Opt_acl:
+                       set_opt(sbi, POSIX_ACL);
+                       break;
+               case Opt_noacl:
+                       clear_opt(sbi, POSIX_ACL);
+                       break;
+#else
+               case Opt_acl:
+                       f2fs_msg(sb, KERN_INFO, "acl options not supported");
+                       break;
+               case Opt_noacl:
+                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
+                       break;
+#endif
+               case Opt_active_logs:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+                       if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
+                               return -EINVAL;
+                       sbi->active_logs = arg;
+                       break;
+               case Opt_disable_ext_identify:
+                       set_opt(sbi, DISABLE_EXT_IDENTIFY);
+                       break;
+               case Opt_inline_data:
+                       set_opt(sbi, INLINE_DATA);
+                       break;
+               case Opt_inline_dentry:
+                       set_opt(sbi, INLINE_DENTRY);
+                       break;
+               case Opt_noinline_dentry:
+                       clear_opt(sbi, INLINE_DENTRY);
+                       break;
+               case Opt_flush_merge:
+                       set_opt(sbi, FLUSH_MERGE);
+                       break;
+               case Opt_noflush_merge:
+                       clear_opt(sbi, FLUSH_MERGE);
+                       break;
+               case Opt_nobarrier:
+                       set_opt(sbi, NOBARRIER);
+                       break;
+               case Opt_fastboot:
+                       set_opt(sbi, FASTBOOT);
+                       break;
+               case Opt_extent_cache:
+                       set_opt(sbi, EXTENT_CACHE);
+                       break;
+               case Opt_noextent_cache:
+                       clear_opt(sbi, EXTENT_CACHE);
+                       break;
+               case Opt_noinline_data:
+                       clear_opt(sbi, INLINE_DATA);
+                       break;
+               case Opt_data_flush:
+                       set_opt(sbi, DATA_FLUSH);
+                       break;
+               case Opt_mode:
+                       name = match_strdup(&args[0]);
+
+                       if (!name)
+                               return -ENOMEM;
+                       if (strlen(name) == 8 &&
+                                       !strncmp(name, "adaptive", 8)) {
+                               if (f2fs_sb_mounted_blkzoned(sb)) {
+                                       f2fs_msg(sb, KERN_WARNING,
+                                                "adaptive mode is not allowed with "
+                                                "zoned block device feature");
+                                       kfree(name);
+                                       return -EINVAL;
+                               }
+                               set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+                       } else if (strlen(name) == 3 &&
+                                       !strncmp(name, "lfs", 3)) {
+                               set_opt_mode(sbi, F2FS_MOUNT_LFS);
+                       } else {
+                               kfree(name);
+                               return -EINVAL;
+                       }
+                       kfree(name);
+                       break;
+               case Opt_fault_injection:
+                       if (args->from && match_int(args, &arg))
+                               return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+                       f2fs_build_fault_attr(sbi, arg);
+#else
+                       f2fs_msg(sb, KERN_INFO,
+                               "FAULT_INJECTION was not selected");
+#endif
+                       break;
+               default:
+                       f2fs_msg(sb, KERN_ERR,
+                               "Unrecognized mount option \"%s\" or missing value",
+                               p);
+                       return -EINVAL;
+               }
+       }
+       return 0;
+}
+
 static struct inode *f2fs_alloc_inode(struct super_block *sb)
 {
        struct f2fs_inode_info *fi;
 
-       fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
+       fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
        if (!fi)
                return NULL;
 
        init_once((void *) fi);
 
+       if (percpu_counter_init(&fi->dirty_pages, 0)) {
+               kmem_cache_free(f2fs_inode_cachep, fi);
+               return NULL;
+       }
+
        /* Initialize f2fs-specific inode info */
        fi->vfs_inode.i_version = 1;
-       atomic_set(&fi->dirty_dents, 0);
        fi->i_current_depth = 1;
        fi->i_advise = 0;
-       rwlock_init(&fi->ext.ext_lock);
-
-       set_inode_flag(fi, FI_NEW_INODE);
-
+       init_rwsem(&fi->i_sem);
+       INIT_LIST_HEAD(&fi->dirty_list);
+       INIT_LIST_HEAD(&fi->gdirty_list);
+       INIT_LIST_HEAD(&fi->inmem_pages);
+       mutex_init(&fi->inmem_lock);
+       init_rwsem(&fi->dio_rwsem[READ]);
+       init_rwsem(&fi->dio_rwsem[WRITE]);
+
+       /* Will be used by directory only */
+       fi->i_dir_level = F2FS_SB(sb)->dir_level;
        return &fi->vfs_inode;
 }
 
@@ -107,11 +591,96 @@ static int f2fs_drop_inode(struct inode *inode)
         *    - f2fs_gc -> iput -> evict
         *       - inode_wait_for_writeback(inode)
         */
-       if (!inode_unhashed(inode) && inode->i_state & I_SYNC)
+       if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
+               if (!inode->i_nlink && !is_bad_inode(inode)) {
+                       /* to avoid evict_inode call simultaneously */
+                       atomic_inc(&inode->i_count);
+                       spin_unlock(&inode->i_lock);
+
+                       /* some remained atomic pages should discarded */
+                       if (f2fs_is_atomic_file(inode))
+                               drop_inmem_pages(inode);
+
+                       /* should remain fi->extent_tree for writepage */
+                       f2fs_destroy_extent_node(inode);
+
+                       sb_start_intwrite(inode->i_sb);
+                       f2fs_i_size_write(inode, 0);
+
+                       if (F2FS_HAS_BLOCKS(inode))
+                               f2fs_truncate(inode);
+
+                       sb_end_intwrite(inode->i_sb);
+
+                       fscrypt_put_encryption_info(inode, NULL);
+                       spin_lock(&inode->i_lock);
+                       atomic_dec(&inode->i_count);
+               }
                return 0;
+       }
+
        return generic_drop_inode(inode);
 }
 
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int ret = 0;
+
+       spin_lock(&sbi->inode_lock[DIRTY_META]);
+       if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+               ret = 1;
+       } else {
+               set_inode_flag(inode, FI_DIRTY_INODE);
+               stat_inc_dirty_inode(sbi, DIRTY_META);
+       }
+       if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+               list_add_tail(&F2FS_I(inode)->gdirty_list,
+                               &sbi->inode_list[DIRTY_META]);
+               inc_page_count(sbi, F2FS_DIRTY_IMETA);
+       }
+       spin_unlock(&sbi->inode_lock[DIRTY_META]);
+       return ret;
+}
+
+void f2fs_inode_synced(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+       spin_lock(&sbi->inode_lock[DIRTY_META]);
+       if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+               spin_unlock(&sbi->inode_lock[DIRTY_META]);
+               return;
+       }
+       if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+               list_del_init(&F2FS_I(inode)->gdirty_list);
+               dec_page_count(sbi, F2FS_DIRTY_IMETA);
+       }
+       clear_inode_flag(inode, FI_DIRTY_INODE);
+       clear_inode_flag(inode, FI_AUTO_RECOVER);
+       stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
+       spin_unlock(&sbi->inode_lock[DIRTY_META]);
+}
+
+/*
+ * f2fs_dirty_inode() is called from __mark_inode_dirty()
+ *
+ * We should call set_dirty_inode to write the dirty inode through write_inode.
+ */
+static void f2fs_dirty_inode(struct inode *inode, int flags)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+       if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+                       inode->i_ino == F2FS_META_INO(sbi))
+               return;
+
+       if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
+               clear_inode_flag(inode, FI_AUTO_RECOVER);
+
+       f2fs_inode_dirtied(inode, false);
+}
+
 static void f2fs_i_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
@@ -120,17 +689,72 @@ static void f2fs_i_callback(struct rcu_head *head)
 
 static void f2fs_destroy_inode(struct inode *inode)
 {
+       percpu_counter_destroy(&F2FS_I(inode)->dirty_pages);
        call_rcu(&inode->i_rcu, f2fs_i_callback);
 }
 
+static void destroy_percpu_info(struct f2fs_sb_info *sbi)
+{
+       percpu_counter_destroy(&sbi->alloc_valid_block_count);
+       percpu_counter_destroy(&sbi->total_valid_inode_count);
+}
+
+static void destroy_device_list(struct f2fs_sb_info *sbi)
+{
+       int i;
+
+       for (i = 0; i < sbi->s_ndevs; i++) {
+               blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+               kfree(FDEV(i).blkz_type);
+#endif
+       }
+       kfree(sbi->devs);
+}
+
 static void f2fs_put_super(struct super_block *sb)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
 
-       f2fs_destroy_stats(sbi);
+       if (sbi->s_proc) {
+               remove_proc_entry("segment_info", sbi->s_proc);
+               remove_proc_entry("segment_bits", sbi->s_proc);
+               remove_proc_entry(sb->s_id, f2fs_proc_root);
+       }
+       kobject_del(&sbi->s_kobj);
+
        stop_gc_thread(sbi);
 
-       write_checkpoint(sbi, true);
+       /* prevent remaining shrinker jobs */
+       mutex_lock(&sbi->umount_mutex);
+
+       /*
+        * We don't need to do checkpoint when superblock is clean.
+        * But, the previous checkpoint was not done by umount, it needs to do
+        * clean checkpoint again.
+        */
+       if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+                       !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+               struct cp_control cpc = {
+                       .reason = CP_UMOUNT,
+               };
+               write_checkpoint(sbi, &cpc);
+       }
+
+       /* write_checkpoint can update stat informaion */
+       f2fs_destroy_stats(sbi);
+
+       /*
+        * normally superblock is clean, so we need to release this.
+        * In addition, EIO will skip do checkpoint, we need this as well.
+        */
+       release_ino_entry(sbi, true);
+
+       f2fs_leave_shrinker(sbi);
+       mutex_unlock(&sbi->umount_mutex);
+
+       /* our cp_error case, we can wait for any writeback page */
+       f2fs_flush_merged_bios(sbi);
 
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
@@ -140,41 +764,54 @@ static void f2fs_put_super(struct super_block *sb)
        destroy_segment_manager(sbi);
 
        kfree(sbi->ckpt);
+       kobject_put(&sbi->s_kobj);
+       wait_for_completion(&sbi->s_kobj_unregister);
 
        sb->s_fs_info = NULL;
-       brelse(sbi->raw_super_buf);
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
+       kfree(sbi->raw_super);
+
+       destroy_device_list(sbi);
+
+       destroy_percpu_info(sbi);
        kfree(sbi);
 }
 
 int f2fs_sync_fs(struct super_block *sb, int sync)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       int err = 0;
 
        trace_f2fs_sync_fs(sb, sync);
 
-       if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES))
-               return 0;
-
        if (sync) {
+               struct cp_control cpc;
+
+               cpc.reason = __get_cp_reason(sbi);
+
                mutex_lock(&sbi->gc_mutex);
-               write_checkpoint(sbi, false);
+               err = write_checkpoint(sbi, &cpc);
                mutex_unlock(&sbi->gc_mutex);
-       } else {
-               f2fs_balance_fs(sbi);
        }
+       f2fs_trace_ios(NULL, 1);
 
-       return 0;
+       return err;
 }
 
 static int f2fs_freeze(struct super_block *sb)
 {
-       int err;
-
-       if (sb->s_flags & MS_RDONLY)
+       if (f2fs_readonly(sb))
                return 0;
 
-       err = f2fs_sync_fs(sb, 1);
-       return err;
+       /* IO error happened before */
+       if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+               return -EIO;
+
+       /* must be clean, since sync_filesystem() was already called */
+       if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+               return -EINVAL;
+       return 0;
 }
 
 static int f2fs_unfreeze(struct super_block *sb)
@@ -197,11 +834,12 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_bsize = sbi->blocksize;
 
        buf->f_blocks = total_count - start_count;
-       buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
+       buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
        buf->f_bavail = user_block_count - valid_user_blocks(sbi);
 
-       buf->f_files = sbi->total_node_count;
-       buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi);
+       buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+       buf->f_ffree = min(buf->f_files - valid_node_count(sbi),
+                                                       buf->f_bavail);
 
        buf->f_namelen = F2FS_NAME_LEN;
        buf->f_fsid.val[0] = (u32)id;
@@ -214,10 +852,14 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
 
-       if (test_opt(sbi, BG_GC))
-               seq_puts(seq, ",background_gc_on");
-       else
-               seq_puts(seq, ",background_gc_off");
+       if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) {
+               if (test_opt(sbi, FORCE_FG_GC))
+                       seq_printf(seq, ",background_gc=%s", "sync");
+               else
+                       seq_printf(seq, ",background_gc=%s", "on");
+       } else {
+               seq_printf(seq, ",background_gc=%s", "off");
+       }
        if (test_opt(sbi, DISABLE_ROLL_FORWARD))
                seq_puts(seq, ",disable_roll_forward");
        if (test_opt(sbi, DISCARD))
@@ -229,6 +871,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_puts(seq, ",user_xattr");
        else
                seq_puts(seq, ",nouser_xattr");
+       if (test_opt(sbi, INLINE_XATTR))
+               seq_puts(seq, ",inline_xattr");
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
        if (test_opt(sbi, POSIX_ACL))
@@ -238,17 +882,253 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
 #endif
        if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
                seq_puts(seq, ",disable_ext_identify");
-
+       if (test_opt(sbi, INLINE_DATA))
+               seq_puts(seq, ",inline_data");
+       else
+               seq_puts(seq, ",noinline_data");
+       if (test_opt(sbi, INLINE_DENTRY))
+               seq_puts(seq, ",inline_dentry");
+       else
+               seq_puts(seq, ",noinline_dentry");
+       if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
+               seq_puts(seq, ",flush_merge");
+       if (test_opt(sbi, NOBARRIER))
+               seq_puts(seq, ",nobarrier");
+       if (test_opt(sbi, FASTBOOT))
+               seq_puts(seq, ",fastboot");
+       if (test_opt(sbi, EXTENT_CACHE))
+               seq_puts(seq, ",extent_cache");
+       else
+               seq_puts(seq, ",noextent_cache");
+       if (test_opt(sbi, DATA_FLUSH))
+               seq_puts(seq, ",data_flush");
+
+       seq_puts(seq, ",mode=");
+       if (test_opt(sbi, ADAPTIVE))
+               seq_puts(seq, "adaptive");
+       else if (test_opt(sbi, LFS))
+               seq_puts(seq, "lfs");
        seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 
        return 0;
 }
 
+static int segment_info_seq_show(struct seq_file *seq, void *offset)
+{
+       struct super_block *sb = seq->private;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       unsigned int total_segs =
+                       le32_to_cpu(sbi->raw_super->segment_count_main);
+       int i;
+
+       seq_puts(seq, "format: segment_type|valid_blocks\n"
+               "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+
+       for (i = 0; i < total_segs; i++) {
+               struct seg_entry *se = get_seg_entry(sbi, i);
+
+               if ((i % 10) == 0)
+                       seq_printf(seq, "%-10d", i);
+               seq_printf(seq, "%d|%-3u", se->type,
+                                       get_valid_blocks(sbi, i, 1));
+               if ((i % 10) == 9 || i == (total_segs - 1))
+                       seq_putc(seq, '\n');
+               else
+                       seq_putc(seq, ' ');
+       }
+
+       return 0;
+}
+
+static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+{
+       struct super_block *sb = seq->private;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       unsigned int total_segs =
+                       le32_to_cpu(sbi->raw_super->segment_count_main);
+       int i, j;
+
+       seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+               "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+
+       for (i = 0; i < total_segs; i++) {
+               struct seg_entry *se = get_seg_entry(sbi, i);
+
+               seq_printf(seq, "%-10d", i);
+               seq_printf(seq, "%d|%-3u|", se->type,
+                                       get_valid_blocks(sbi, i, 1));
+               for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
+                       seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+               seq_putc(seq, '\n');
+       }
+       return 0;
+}
+
+#define F2FS_PROC_FILE_DEF(_name)                                      \
+static int _name##_open_fs(struct inode *inode, struct file *file)     \
+{                                                                      \
+       return single_open(file, _name##_seq_show, PDE_DATA(inode));    \
+}                                                                      \
+                                                                       \
+static const struct file_operations f2fs_seq_##_name##_fops = {                \
+       .owner = THIS_MODULE,                                           \
+       .open = _name##_open_fs,                                        \
+       .read = seq_read,                                               \
+       .llseek = seq_lseek,                                            \
+       .release = single_release,                                      \
+};
+
+F2FS_PROC_FILE_DEF(segment_info);
+F2FS_PROC_FILE_DEF(segment_bits);
+
+static void default_options(struct f2fs_sb_info *sbi)
+{
+       /* init some FS parameters */
+       sbi->active_logs = NR_CURSEG_TYPE;
+
+       set_opt(sbi, BG_GC);
+       set_opt(sbi, INLINE_DATA);
+       set_opt(sbi, INLINE_DENTRY);
+       set_opt(sbi, EXTENT_CACHE);
+       set_opt(sbi, FLUSH_MERGE);
+       if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
+               set_opt_mode(sbi, F2FS_MOUNT_LFS);
+               set_opt(sbi, DISCARD);
+       } else {
+               set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+       }
+
+#ifdef CONFIG_F2FS_FS_XATTR
+       set_opt(sbi, XATTR_USER);
+#endif
+#ifdef CONFIG_F2FS_FS_POSIX_ACL
+       set_opt(sbi, POSIX_ACL);
+#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       f2fs_build_fault_attr(sbi, 0);
+#endif
+}
+
+static int f2fs_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       struct f2fs_mount_info org_mount_opt;
+       int err, active_logs;
+       bool need_restart_gc = false;
+       bool need_stop_gc = false;
+       bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       struct f2fs_fault_info ffi = sbi->fault_info;
+#endif
+
+       /*
+        * Save the old mount options in case we
+        * need to restore them.
+        */
+       org_mount_opt = sbi->mount_opt;
+       active_logs = sbi->active_logs;
+
+       /* recover superblocks we couldn't write due to previous RO mount */
+       if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+               err = f2fs_commit_super(sbi, false);
+               f2fs_msg(sb, KERN_INFO,
+                       "Try to recover all the superblocks, ret: %d", err);
+               if (!err)
+                       clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+       }
+
+       sbi->mount_opt.opt = 0;
+       default_options(sbi);
+
+       /* parse mount options */
+       err = parse_options(sb, data);
+       if (err)
+               goto restore_opts;
+
+       /*
+        * Previous and new state of filesystem is RO,
+        * so skip checking GC and FLUSH_MERGE conditions.
+        */
+       if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
+               goto skip;
+
+       /* disallow enable/disable extent_cache dynamically */
+       if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
+               err = -EINVAL;
+               f2fs_msg(sbi->sb, KERN_WARNING,
+                               "switch extent_cache option is not allowed");
+               goto restore_opts;
+       }
+
+       /*
+        * We stop the GC thread if FS is mounted as RO
+        * or if background_gc = off is passed in mount
+        * option. Also sync the filesystem.
+        */
+       if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+               if (sbi->gc_thread) {
+                       stop_gc_thread(sbi);
+                       need_restart_gc = true;
+               }
+       } else if (!sbi->gc_thread) {
+               err = start_gc_thread(sbi);
+               if (err)
+                       goto restore_opts;
+               need_stop_gc = true;
+       }
+
+       if (*flags & MS_RDONLY) {
+               writeback_inodes_sb(sb, WB_REASON_SYNC);
+               sync_inodes_sb(sb);
+
+               set_sbi_flag(sbi, SBI_IS_DIRTY);
+               set_sbi_flag(sbi, SBI_IS_CLOSE);
+               f2fs_sync_fs(sb, 1);
+               clear_sbi_flag(sbi, SBI_IS_CLOSE);
+       }
+
+       /*
+        * We stop issue flush thread if FS is mounted as RO
+        * or if flush_merge is not passed in mount option.
+        */
+       if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+               clear_opt(sbi, FLUSH_MERGE);
+               destroy_flush_cmd_control(sbi, false);
+       } else {
+               err = create_flush_cmd_control(sbi);
+               if (err)
+                       goto restore_gc;
+       }
+skip:
+       /* Update the POSIXACL Flag */
+       sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+               (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+
+       return 0;
+restore_gc:
+       if (need_restart_gc) {
+               if (start_gc_thread(sbi))
+                       f2fs_msg(sbi->sb, KERN_WARNING,
+                               "background gc thread has stopped");
+       } else if (need_stop_gc) {
+               stop_gc_thread(sbi);
+       }
+restore_opts:
+       sbi->mount_opt = org_mount_opt;
+       sbi->active_logs = active_logs;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+       sbi->fault_info = ffi;
+#endif
+       return err;
+}
+
 static struct super_operations f2fs_sops = {
        .alloc_inode    = f2fs_alloc_inode,
        .drop_inode     = f2fs_drop_inode,
        .destroy_inode  = f2fs_destroy_inode,
        .write_inode    = f2fs_write_inode,
+       .dirty_inode    = f2fs_dirty_inode,
        .show_options   = f2fs_show_options,
        .evict_inode    = f2fs_evict_inode,
        .put_super      = f2fs_put_super,
@@ -256,7 +1136,50 @@ static struct super_operations f2fs_sops = {
        .freeze_fs      = f2fs_freeze,
        .unfreeze_fs    = f2fs_unfreeze,
        .statfs         = f2fs_statfs,
+       .remount_fs     = f2fs_remount,
+};
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
+{
+       return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+                               F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+                               ctx, len, NULL);
+}
+
+static int f2fs_key_prefix(struct inode *inode, u8 **key)
+{
+       *key = F2FS_I_SB(inode)->key_prefix;
+       return F2FS_I_SB(inode)->key_prefix_size;
+}
+
+static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
+                                                       void *fs_data)
+{
+       return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+                               F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+                               ctx, len, fs_data, XATTR_CREATE);
+}
+
+static unsigned f2fs_max_namelen(struct inode *inode)
+{
+       return S_ISLNK(inode->i_mode) ?
+                       inode->i_sb->s_blocksize : F2FS_NAME_LEN;
+}
+
+static struct fscrypt_operations f2fs_cryptops = {
+       .get_context    = f2fs_get_context,
+       .key_prefix     = f2fs_key_prefix,
+       .set_context    = f2fs_set_context,
+       .is_encrypted   = f2fs_encrypted_inode,
+       .empty_dir      = f2fs_empty_dir,
+       .max_namelen    = f2fs_max_namelen,
+};
+#else
+static struct fscrypt_operations f2fs_cryptops = {
+       .is_encrypted   = f2fs_encrypted_inode,
 };
+#endif
 
 static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
                u64 ino, u32 generation)
@@ -264,7 +1187,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
 
-       if (ino < F2FS_ROOT_INO(sbi))
+       if (check_nid_range(sbi, ino))
                return ERR_PTR(-ESTALE);
 
        /*
@@ -275,7 +1198,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
        inode = f2fs_iget(sb, ino);
        if (IS_ERR(inode))
                return ERR_CAST(inode);
-       if (generation && inode->i_generation != generation) {
+       if (unlikely(generation && inode->i_generation != generation)) {
                /* we didn't find the right inode.. */
                iput(inode);
                return ERR_PTR(-ESTALE);
@@ -289,96 +1212,23 @@ static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
        return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
                                    f2fs_nfs_get_inode);
 }
-
-static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
-               int fh_len, int fh_type)
-{
-       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
-                                   f2fs_nfs_get_inode);
-}
-
-static const struct export_operations f2fs_export_ops = {
-       .fh_to_dentry = f2fs_fh_to_dentry,
-       .fh_to_parent = f2fs_fh_to_parent,
-       .get_parent = f2fs_get_parent,
-};
-
-static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
-                               char *options)
-{
-       substring_t args[MAX_OPT_ARGS];
-       char *p;
-       int arg = 0;
-
-       if (!options)
-               return 0;
-
-       while ((p = strsep(&options, ",")) != NULL) {
-               int token;
-               if (!*p)
-                       continue;
-               /*
-                * Initialize args struct so we know whether arg was
-                * found; some options take optional arguments.
-                */
-               args[0].to = args[0].from = NULL;
-               token = match_token(p, f2fs_tokens, args);
-
-               switch (token) {
-               case Opt_gc_background_off:
-                       clear_opt(sbi, BG_GC);
-                       break;
-               case Opt_disable_roll_forward:
-                       set_opt(sbi, DISABLE_ROLL_FORWARD);
-                       break;
-               case Opt_discard:
-                       set_opt(sbi, DISCARD);
-                       break;
-               case Opt_noheap:
-                       set_opt(sbi, NOHEAP);
-                       break;
-#ifdef CONFIG_F2FS_FS_XATTR
-               case Opt_nouser_xattr:
-                       clear_opt(sbi, XATTR_USER);
-                       break;
-#else
-               case Opt_nouser_xattr:
-                       f2fs_msg(sb, KERN_INFO,
-                               "nouser_xattr options not supported");
-                       break;
-#endif
-#ifdef CONFIG_F2FS_FS_POSIX_ACL
-               case Opt_noacl:
-                       clear_opt(sbi, POSIX_ACL);
-                       break;
-#else
-               case Opt_noacl:
-                       f2fs_msg(sb, KERN_INFO, "noacl options not supported");
-                       break;
-#endif
-               case Opt_active_logs:
-                       if (args->from && match_int(args, &arg))
-                               return -EINVAL;
-                       if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE)
-                               return -EINVAL;
-                       sbi->active_logs = arg;
-                       break;
-               case Opt_disable_ext_identify:
-                       set_opt(sbi, DISABLE_EXT_IDENTIFY);
-                       break;
-               default:
-                       f2fs_msg(sb, KERN_ERR,
-                               "Unrecognized mount option \"%s\" or missing value",
-                               p);
-                       return -EINVAL;
-               }
-       }
-       return 0;
+
+static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+               int fh_len, int fh_type)
+{
+       return generic_fh_to_parent(sb, fid, fh_len, fh_type,
+                                   f2fs_nfs_get_inode);
 }
 
-static loff_t max_file_size(unsigned bits)
+static const struct export_operations f2fs_export_ops = {
+       .fh_to_dentry = f2fs_fh_to_dentry,
+       .fh_to_parent = f2fs_fh_to_parent,
+       .get_parent = f2fs_get_parent,
+};
+
+static loff_t max_file_blocks(void)
 {
-       loff_t result = ADDRS_PER_INODE;
+       loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
        loff_t leaf_count = ADDRS_PER_BLOCK;
 
        /* two direct node blocks */
@@ -392,13 +1242,131 @@ static loff_t max_file_size(unsigned bits)
        leaf_count *= NIDS_PER_BLOCK;
        result += leaf_count;
 
-       result <<= bits;
        return result;
 }
 
-static int sanity_check_raw_super(struct super_block *sb,
-                       struct f2fs_super_block *raw_super)
+static int __f2fs_commit_super(struct buffer_head *bh,
+                       struct f2fs_super_block *super)
+{
+       lock_buffer(bh);
+       if (super)
+               memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+       set_buffer_uptodate(bh);
+       set_buffer_dirty(bh);
+       unlock_buffer(bh);
+
+       /* it's rare case, we can do fua all the time */
+       return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
+static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
+                                       struct buffer_head *bh)
+{
+       struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+                                       (bh->b_data + F2FS_SUPER_OFFSET);
+       struct super_block *sb = sbi->sb;
+       u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
+       u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
+       u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
+       u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
+       u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+       u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
+       u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
+       u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
+       u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
+       u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
+       u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+       u32 segment_count = le32_to_cpu(raw_super->segment_count);
+       u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+       u64 main_end_blkaddr = main_blkaddr +
+                               (segment_count_main << log_blocks_per_seg);
+       u64 seg_end_blkaddr = segment0_blkaddr +
+                               (segment_count << log_blocks_per_seg);
+
+       if (segment0_blkaddr != cp_blkaddr) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
+                       segment0_blkaddr, cp_blkaddr);
+               return true;
+       }
+
+       if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
+                                                       sit_blkaddr) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
+                       cp_blkaddr, sit_blkaddr,
+                       segment_count_ckpt << log_blocks_per_seg);
+               return true;
+       }
+
+       if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
+                                                       nat_blkaddr) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
+                       sit_blkaddr, nat_blkaddr,
+                       segment_count_sit << log_blocks_per_seg);
+               return true;
+       }
+
+       if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
+                                                       ssa_blkaddr) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
+                       nat_blkaddr, ssa_blkaddr,
+                       segment_count_nat << log_blocks_per_seg);
+               return true;
+       }
+
+       if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
+                                                       main_blkaddr) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
+                       ssa_blkaddr, main_blkaddr,
+                       segment_count_ssa << log_blocks_per_seg);
+               return true;
+       }
+
+       if (main_end_blkaddr > seg_end_blkaddr) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
+                       main_blkaddr,
+                       segment0_blkaddr +
+                               (segment_count << log_blocks_per_seg),
+                       segment_count_main << log_blocks_per_seg);
+               return true;
+       } else if (main_end_blkaddr < seg_end_blkaddr) {
+               int err = 0;
+               char *res;
+
+               /* fix in-memory information all the time */
+               raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+                               segment0_blkaddr) >> log_blocks_per_seg);
+
+               if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+                       set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+                       res = "internally";
+               } else {
+                       err = __f2fs_commit_super(bh, NULL);
+                       res = err ? "failed" : "done";
+               }
+               f2fs_msg(sb, KERN_INFO,
+                       "Fix alignment : %s, start(%u) end(%u) block(%u)",
+                       res, main_blkaddr,
+                       segment0_blkaddr +
+                               (segment_count << log_blocks_per_seg),
+                       segment_count_main << log_blocks_per_seg);
+               if (err)
+                       return true;
+       }
+       return false;
+}
+
+static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+                               struct buffer_head *bh)
 {
+       struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+                                       (bh->b_data + F2FS_SUPER_OFFSET);
+       struct super_block *sb = sbi->sb;
        unsigned int blocksize;
 
        if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -409,10 +1377,10 @@ static int sanity_check_raw_super(struct super_block *sb,
        }
 
        /* Currently, support only 4KB page cache size */
-       if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+       if (F2FS_BLKSIZE != PAGE_SIZE) {
                f2fs_msg(sb, KERN_INFO,
                        "Invalid page_cache_size (%lu), supports only 4KB\n",
-                       PAGE_CACHE_SIZE);
+                       PAGE_SIZE);
                return 1;
        }
 
@@ -425,24 +1393,58 @@ static int sanity_check_raw_super(struct super_block *sb,
                return 1;
        }
 
-       if (le32_to_cpu(raw_super->log_sectorsize) !=
-                                       F2FS_LOG_SECTOR_SIZE) {
-               f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
+       /* check log blocks per segment */
+       if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid log blocks per segment (%u)\n",
+                       le32_to_cpu(raw_super->log_blocks_per_seg));
+               return 1;
+       }
+
+       /* Currently, support 512/1024/2048/4096 bytes sector size */
+       if (le32_to_cpu(raw_super->log_sectorsize) >
+                               F2FS_MAX_LOG_SECTOR_SIZE ||
+               le32_to_cpu(raw_super->log_sectorsize) <
+                               F2FS_MIN_LOG_SECTOR_SIZE) {
+               f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)",
+                       le32_to_cpu(raw_super->log_sectorsize));
+               return 1;
+       }
+       if (le32_to_cpu(raw_super->log_sectors_per_block) +
+               le32_to_cpu(raw_super->log_sectorsize) !=
+                       F2FS_MAX_LOG_SECTOR_SIZE) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid log sectors per block(%u) log sectorsize(%u)",
+                       le32_to_cpu(raw_super->log_sectors_per_block),
+                       le32_to_cpu(raw_super->log_sectorsize));
                return 1;
        }
-       if (le32_to_cpu(raw_super->log_sectors_per_block) !=
-                                       F2FS_LOG_SECTORS_PER_BLOCK) {
-               f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
+
+       /* check reserved ino info */
+       if (le32_to_cpu(raw_super->node_ino) != 1 ||
+               le32_to_cpu(raw_super->meta_ino) != 2 ||
+               le32_to_cpu(raw_super->root_ino) != 3) {
+               f2fs_msg(sb, KERN_INFO,
+                       "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
+                       le32_to_cpu(raw_super->node_ino),
+                       le32_to_cpu(raw_super->meta_ino),
+                       le32_to_cpu(raw_super->root_ino));
                return 1;
        }
+
+       /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
+       if (sanity_check_area_boundary(sbi, bh))
+               return 1;
+
        return 0;
 }
 
-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+int sanity_check_ckpt(struct f2fs_sb_info *sbi)
 {
        unsigned int total, fsmeta;
        struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
        struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+       unsigned int ovp_segments, reserved_segments;
 
        total = le32_to_cpu(raw_super->segment_count);
        fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -451,10 +1453,20 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
        fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
        fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
 
-       if (fsmeta >= total)
+       if (unlikely(fsmeta >= total))
+               return 1;
+
+       ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+       reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+
+       if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+                       ovp_segments == 0 || reserved_segments == 0)) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "Wrong layout: check mkfs.f2fs version");
                return 1;
+       }
 
-       if (is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) {
+       if (unlikely(f2fs_cp_error(sbi))) {
                f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
                return 1;
        }
@@ -482,116 +1494,388 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
        sbi->node_ino_num = le32_to_cpu(raw_super->node_ino);
        sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino);
        sbi->cur_victim_sec = NULL_SECNO;
+       sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
+
+       sbi->dir_level = DEF_DIR_LEVEL;
+       sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
+       sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
+       clear_sbi_flag(sbi, SBI_NEED_FSCK);
 
        for (i = 0; i < NR_COUNT_TYPE; i++)
                atomic_set(&sbi->nr_pages[i], 0);
+
+       INIT_LIST_HEAD(&sbi->s_list);
+       mutex_init(&sbi->umount_mutex);
+       mutex_init(&sbi->wio_mutex[NODE]);
+       mutex_init(&sbi->wio_mutex[DATA]);
+       spin_lock_init(&sbi->cp_lock);
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+       memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
+                               F2FS_KEY_DESC_PREFIX_SIZE);
+       sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
+#endif
 }
 
-static int validate_superblock(struct super_block *sb,
-               struct f2fs_super_block **raw_super,
-               struct buffer_head **raw_super_buf, sector_t block)
+static int init_percpu_info(struct f2fs_sb_info *sbi)
 {
-       const char *super = (block == 0 ? "first" : "second");
+       int err;
 
-       /* read f2fs raw super block */
-       *raw_super_buf = sb_bread(sb, block);
-       if (!*raw_super_buf) {
-               f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
-                               super);
-               return -EIO;
-       }
+       err = percpu_counter_init(&sbi->alloc_valid_block_count, 0);
+       if (err)
+               return err;
 
-       *raw_super = (struct f2fs_super_block *)
-               ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
+       return percpu_counter_init(&sbi->total_valid_inode_count, 0);
+}
 
-       /* sanity checking of raw super */
-       if (!sanity_check_raw_super(sb, *raw_super))
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+       struct block_device *bdev = FDEV(devi).bdev;
+       sector_t nr_sectors = bdev->bd_part->nr_sects;
+       sector_t sector = 0;
+       struct blk_zone *zones;
+       unsigned int i, nr_zones;
+       unsigned int n = 0;
+       int err = -EIO;
+
+       if (!f2fs_sb_mounted_blkzoned(sbi->sb))
                return 0;
 
-       f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
-                               "in %s superblock", super);
-       return -EINVAL;
+       if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+                               SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+               return -EINVAL;
+       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+       if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+                               __ilog2_u32(sbi->blocks_per_blkz))
+               return -EINVAL;
+       sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+       FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+                                       sbi->log_blocks_per_blkz;
+       if (nr_sectors & (bdev_zone_size(bdev) - 1))
+               FDEV(devi).nr_blkz++;
+
+       FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+       if (!FDEV(devi).blkz_type)
+               return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES   4096
+
+       zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
+                       GFP_KERNEL);
+       if (!zones)
+               return -ENOMEM;
+
+       /* Get block zones type */
+       while (zones && sector < nr_sectors) {
+
+               nr_zones = F2FS_REPORT_NR_ZONES;
+               err = blkdev_report_zones(bdev, sector,
+                                         zones, &nr_zones,
+                                         GFP_KERNEL);
+               if (err)
+                       break;
+               if (!nr_zones) {
+                       err = -EIO;
+                       break;
+               }
+
+               for (i = 0; i < nr_zones; i++) {
+                       FDEV(devi).blkz_type[n] = zones[i].type;
+                       sector += zones[i].len;
+                       n++;
+               }
+       }
+
+       kfree(zones);
+
+       return err;
+}
+#endif
+
+/*
+ * Read f2fs raw super block.
+ * Because we have two copies of super block, so read both of them
+ * to get the first valid one. If any one of them is broken, we pass
+ * them recovery flag back to the caller.
+ */
+static int read_raw_super_block(struct f2fs_sb_info *sbi,
+                       struct f2fs_super_block **raw_super,
+                       int *valid_super_block, int *recovery)
+{
+       struct super_block *sb = sbi->sb;
+       int block;
+       struct buffer_head *bh;
+       struct f2fs_super_block *super;
+       int err = 0;
+
+       super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
+       if (!super)
+               return -ENOMEM;
+
+       for (block = 0; block < 2; block++) {
+               bh = sb_bread(sb, block);
+               if (!bh) {
+                       f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+                               block + 1);
+                       err = -EIO;
+                       continue;
+               }
+
+               /* sanity checking of raw super */
+               if (sanity_check_raw_super(sbi, bh)) {
+                       f2fs_msg(sb, KERN_ERR,
+                               "Can't find valid F2FS filesystem in %dth superblock",
+                               block + 1);
+                       err = -EINVAL;
+                       brelse(bh);
+                       continue;
+               }
+
+               if (!*raw_super) {
+                       memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+                                                       sizeof(*super));
+                       *valid_super_block = block;
+                       *raw_super = super;
+               }
+               brelse(bh);
+       }
+
+       /* Fail to read any one of the superblocks*/
+       if (err < 0)
+               *recovery = 1;
+
+       /* No valid superblock */
+       if (!*raw_super)
+               kfree(super);
+       else
+               err = 0;
+
+       return err;
+}
+
+int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
+{
+       struct buffer_head *bh;
+       int err;
+
+       if ((recover && f2fs_readonly(sbi->sb)) ||
+                               bdev_read_only(sbi->sb->s_bdev)) {
+               set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+               return -EROFS;
+       }
+
+       /* write back-up superblock first */
+       bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+       if (!bh)
+               return -EIO;
+       err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+       brelse(bh);
+
+       /* if we are in recovery path, skip writing valid superblock */
+       if (recover || err)
+               return err;
+
+       /* write current valid superblock */
+       bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+       if (!bh)
+               return -EIO;
+       err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+       brelse(bh);
+       return err;
+}
+
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+       struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+       int i;
+
+       for (i = 0; i < MAX_DEVICES; i++) {
+               if (!RDEV(i).path[0])
+                       return 0;
+
+               if (i == 0) {
+                       sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
+                                               MAX_DEVICES, GFP_KERNEL);
+                       if (!sbi->devs)
+                               return -ENOMEM;
+               }
+
+               memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+               FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
+               if (i == 0) {
+                       FDEV(i).start_blk = 0;
+                       FDEV(i).end_blk = FDEV(i).start_blk +
+                               (FDEV(i).total_segments <<
+                               sbi->log_blocks_per_seg) - 1 +
+                               le32_to_cpu(raw_super->segment0_blkaddr);
+               } else {
+                       FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+                       FDEV(i).end_blk = FDEV(i).start_blk +
+                               (FDEV(i).total_segments <<
+                               sbi->log_blocks_per_seg) - 1;
+               }
+
+               FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+                                       sbi->sb->s_mode, sbi->sb->s_type);
+               if (IS_ERR(FDEV(i).bdev))
+                       return PTR_ERR(FDEV(i).bdev);
+
+               /* to release errored devices */
+               sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+               if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+                               !f2fs_sb_mounted_blkzoned(sbi->sb)) {
+                       f2fs_msg(sbi->sb, KERN_ERR,
+                               "Zoned block device feature not enabled\n");
+                       return -EINVAL;
+               }
+               if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+                       if (init_blkz_info(sbi, i)) {
+                               f2fs_msg(sbi->sb, KERN_ERR,
+                                       "Failed to initialize F2FS blkzone information");
+                               return -EINVAL;
+                       }
+                       f2fs_msg(sbi->sb, KERN_INFO,
+                               "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+                               i, FDEV(i).path,
+                               FDEV(i).total_segments,
+                               FDEV(i).start_blk, FDEV(i).end_blk,
+                               bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+                               "Host-aware" : "Host-managed");
+                       continue;
+               }
+#endif
+               f2fs_msg(sbi->sb, KERN_INFO,
+                       "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+                               i, FDEV(i).path,
+                               FDEV(i).total_segments,
+                               FDEV(i).start_blk, FDEV(i).end_blk);
+       }
+       return 0;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct f2fs_sb_info *sbi;
        struct f2fs_super_block *raw_super;
-       struct buffer_head *raw_super_buf;
        struct inode *root;
-       long err = -EINVAL;
-       int i;
+       int err;
+       bool retry = true, need_fsck = false;
+       char *options = NULL;
+       int recovery, i, valid_super_block;
+       struct curseg_info *seg_i;
+
+try_onemore:
+       err = -EINVAL;
+       raw_super = NULL;
+       valid_super_block = -1;
+       recovery = 0;
 
        /* allocate memory for f2fs-specific super block info */
        sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
        if (!sbi)
                return -ENOMEM;
 
+       sbi->sb = sb;
+
+       /* Load the checksum driver */
+       sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
+       if (IS_ERR(sbi->s_chksum_driver)) {
+               f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
+               err = PTR_ERR(sbi->s_chksum_driver);
+               sbi->s_chksum_driver = NULL;
+               goto free_sbi;
+       }
+
        /* set a block size */
-       if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
+       if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
                f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
                goto free_sbi;
        }
 
-       err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
-       if (err) {
-               brelse(raw_super_buf);
-               /* check secondary superblock when primary failed */
-               err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
-               if (err)
-                       goto free_sb_buf;
-       }
-       /* init some FS parameters */
-       sbi->active_logs = NR_CURSEG_TYPE;
+       err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
+                                                               &recovery);
+       if (err)
+               goto free_sbi;
 
-       set_opt(sbi, BG_GC);
+       sb->s_fs_info = sbi;
+       sbi->raw_super = raw_super;
 
-#ifdef CONFIG_F2FS_FS_XATTR
-       set_opt(sbi, XATTR_USER);
-#endif
-#ifdef CONFIG_F2FS_FS_POSIX_ACL
-       set_opt(sbi, POSIX_ACL);
+       /*
+        * The BLKZONED feature indicates that the drive was formatted with
+        * zone alignment optimization. This is optional for host-aware
+        * devices, but mandatory for host-managed zoned block devices.
+        */
+#ifndef CONFIG_BLK_DEV_ZONED
+       if (f2fs_sb_mounted_blkzoned(sb)) {
+               f2fs_msg(sb, KERN_ERR,
+                        "Zoned block device support is not enabled\n");
+               goto free_sb_buf;
+       }
 #endif
+       default_options(sbi);
        /* parse mount options */
-       err = parse_options(sb, sbi, (char *)data);
-       if (err)
+       options = kstrdup((const char *)data, GFP_KERNEL);
+       if (data && !options) {
+               err = -ENOMEM;
                goto free_sb_buf;
+       }
+
+       err = parse_options(sb, options);
+       if (err)
+               goto free_options;
 
-       sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
+       sbi->max_file_blocks = max_file_blocks();
+       sb->s_maxbytes = sbi->max_file_blocks <<
+                               le32_to_cpu(raw_super->log_blocksize);
        sb->s_max_links = F2FS_LINK_MAX;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 
        sb->s_op = &f2fs_sops;
+       sb->s_cop = &f2fs_cryptops;
        sb->s_xattr = f2fs_xattr_handlers;
        sb->s_export_op = &f2fs_export_ops;
        sb->s_magic = F2FS_SUPER_MAGIC;
-       sb->s_fs_info = sbi;
        sb->s_time_gran = 1;
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
        memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
 
        /* init f2fs-specific super block info */
-       sbi->sb = sb;
-       sbi->raw_super = raw_super;
-       sbi->raw_super_buf = raw_super_buf;
+       sbi->valid_super_block = valid_super_block;
        mutex_init(&sbi->gc_mutex);
-       mutex_init(&sbi->writepages);
        mutex_init(&sbi->cp_mutex);
-       for (i = 0; i < NR_GLOBAL_LOCKS; i++)
-               mutex_init(&sbi->fs_lock[i]);
-       mutex_init(&sbi->node_write);
-       sbi->por_doing = 0;
+       init_rwsem(&sbi->node_write);
+
+       /* disallow all the data/node/meta page writes */
+       set_sbi_flag(sbi, SBI_POR_DOING);
        spin_lock_init(&sbi->stat_lock);
-       init_rwsem(&sbi->bio_sem);
+
+       init_rwsem(&sbi->read_io.io_rwsem);
+       sbi->read_io.sbi = sbi;
+       sbi->read_io.bio = NULL;
+       for (i = 0; i < NR_PAGE_TYPE; i++) {
+               init_rwsem(&sbi->write_io[i].io_rwsem);
+               sbi->write_io[i].sbi = sbi;
+               sbi->write_io[i].bio = NULL;
+       }
+
+       init_rwsem(&sbi->cp_rwsem);
+       init_waitqueue_head(&sbi->cp_wait);
        init_sb_info(sbi);
 
+       err = init_percpu_info(sbi);
+       if (err)
+               goto free_options;
+
        /* get an inode for meta space */
        sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
        if (IS_ERR(sbi->meta_inode)) {
                f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
                err = PTR_ERR(sbi->meta_inode);
-               goto free_sb_buf;
+               goto free_options;
        }
 
        err = get_valid_checkpoint(sbi);
@@ -600,26 +1884,30 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_meta_inode;
        }
 
-       /* sanity checking of checkpoint */
-       err = -EINVAL;
-       if (sanity_check_ckpt(sbi)) {
-               f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
-               goto free_cp;
+       /* Initialize device list */
+       err = f2fs_scan_devices(sbi);
+       if (err) {
+               f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+               goto free_devices;
        }
 
        sbi->total_valid_node_count =
                                le32_to_cpu(sbi->ckpt->valid_node_count);
-       sbi->total_valid_inode_count =
-                               le32_to_cpu(sbi->ckpt->valid_inode_count);
+       percpu_counter_set(&sbi->total_valid_inode_count,
+                               le32_to_cpu(sbi->ckpt->valid_inode_count));
        sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
        sbi->total_valid_block_count =
                                le64_to_cpu(sbi->ckpt->valid_block_count);
        sbi->last_valid_block_count = sbi->total_valid_block_count;
-       sbi->alloc_valid_block_count = 0;
-       INIT_LIST_HEAD(&sbi->dir_inode_list);
-       spin_lock_init(&sbi->dir_inode_lock);
 
-       init_orphan_info(sbi);
+       for (i = 0; i < NR_INODE_TYPE; i++) {
+               INIT_LIST_HEAD(&sbi->inode_list[i]);
+               spin_lock_init(&sbi->inode_lock[i]);
+       }
+
+       init_extent_cache_info(sbi);
+
+       init_ino_entry_info(sbi);
 
        /* setup f2fs internal modules */
        err = build_segment_manager(sbi);
@@ -635,6 +1923,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_nm;
        }
 
+       /* For write statistics */
+       if (sb->s_bdev->bd_part)
+               sbi->sectors_written_start =
+                       (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+
+       /* Read accumulated write IO statistics if exists */
+       seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+       if (__exist_node_summaries(sbi))
+               sbi->kbytes_written =
+                       le64_to_cpu(seg_i->journal->info.kbytes_written);
+
        build_gc_manager(sbi);
 
        /* get an inode for node space */
@@ -645,9 +1944,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_nm;
        }
 
+       f2fs_join_shrinker(sbi);
+
        /* if there are nt orphan nodes free them */
-       err = -EINVAL;
-       if (recover_orphan_inodes(sbi))
+       err = recover_orphan_inodes(sbi);
+       if (err)
                goto free_node_inode;
 
        /* read root inode and dentry */
@@ -657,8 +1958,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                err = PTR_ERR(root);
                goto free_node_inode;
        }
-       if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size)
-               goto free_root_inode;
+       if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+               iput(root);
+               err = -EINVAL;
+               goto free_node_inode;
+       }
 
        sb->s_root = d_make_root(root); /* allocate root dentry */
        if (!sb->s_root) {
@@ -666,52 +1970,145 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_root_inode;
        }
 
+       err = f2fs_build_stats(sbi);
+       if (err)
+               goto free_root_inode;
+
+       if (f2fs_proc_root)
+               sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+
+       if (sbi->s_proc) {
+               proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
+                                &f2fs_seq_segment_info_fops, sb);
+               proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
+                                &f2fs_seq_segment_bits_fops, sb);
+       }
+
+       sbi->s_kobj.kset = f2fs_kset;
+       init_completion(&sbi->s_kobj_unregister);
+       err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
+                                                       "%s", sb->s_id);
+       if (err)
+               goto free_proc;
+
        /* recover fsynced data */
        if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
-               err = recover_fsync_data(sbi);
-               if (err)
+               /*
+                * mount should be failed, when device has readonly mode, and
+                * previous checkpoint was not done by clean system shutdown.
+                */
+               if (bdev_read_only(sb->s_bdev) &&
+                               !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+                       err = -EROFS;
+                       goto free_kobj;
+               }
+
+               if (need_fsck)
+                       set_sbi_flag(sbi, SBI_NEED_FSCK);
+
+               if (!retry)
+                       goto skip_recovery;
+
+               err = recover_fsync_data(sbi, false);
+               if (err < 0) {
+                       need_fsck = true;
                        f2fs_msg(sb, KERN_ERR,
-                               "Cannot recover all fsync data errno=%ld", err);
-       }
+                               "Cannot recover all fsync data errno=%d", err);
+                       goto free_kobj;
+               }
+       } else {
+               err = recover_fsync_data(sbi, true);
 
-       /* After POR, we can run background GC thread */
-       err = start_gc_thread(sbi);
-       if (err)
-               goto fail;
+               if (!f2fs_readonly(sb) && err > 0) {
+                       err = -EINVAL;
+                       f2fs_msg(sb, KERN_ERR,
+                               "Need to recover fsync data");
+                       goto free_kobj;
+               }
+       }
+skip_recovery:
+       /* recover_fsync_data() cleared this already */
+       clear_sbi_flag(sbi, SBI_POR_DOING);
 
-       err = f2fs_build_stats(sbi);
-       if (err)
-               goto fail;
+       /*
+        * If filesystem is not mounted as read-only then
+        * do start the gc_thread.
+        */
+       if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) {
+               /* After POR, we can run background GC thread.*/
+               err = start_gc_thread(sbi);
+               if (err)
+                       goto free_kobj;
+       }
+       kfree(options);
 
-       if (test_opt(sbi, DISCARD)) {
-               struct request_queue *q = bdev_get_queue(sb->s_bdev);
-               if (!blk_queue_discard(q))
-                       f2fs_msg(sb, KERN_WARNING,
-                                       "mounting with \"discard\" option, but "
-                                       "the device does not support discard");
+       /* recover broken superblock */
+       if (recovery) {
+               err = f2fs_commit_super(sbi, true);
+               f2fs_msg(sb, KERN_INFO,
+                       "Try to recover %dth superblock, ret: %d",
+                       sbi->valid_super_block ? 1 : 2, err);
        }
 
+       f2fs_update_time(sbi, CP_TIME);
+       f2fs_update_time(sbi, REQ_TIME);
        return 0;
-fail:
-       stop_gc_thread(sbi);
+
+free_kobj:
+       f2fs_sync_inode_meta(sbi);
+       kobject_del(&sbi->s_kobj);
+       kobject_put(&sbi->s_kobj);
+       wait_for_completion(&sbi->s_kobj_unregister);
+free_proc:
+       if (sbi->s_proc) {
+               remove_proc_entry("segment_info", sbi->s_proc);
+               remove_proc_entry("segment_bits", sbi->s_proc);
+               remove_proc_entry(sb->s_id, f2fs_proc_root);
+       }
+       f2fs_destroy_stats(sbi);
 free_root_inode:
        dput(sb->s_root);
        sb->s_root = NULL;
 free_node_inode:
+       truncate_inode_pages(NODE_MAPPING(sbi), 0);
+       mutex_lock(&sbi->umount_mutex);
+       release_ino_entry(sbi, true);
+       f2fs_leave_shrinker(sbi);
+       /*
+        * Some dirty meta pages can be produced by recover_orphan_inodes()
+        * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+        * followed by write_checkpoint() through f2fs_write_node_pages(), which
+        * falls into an infinite loop in sync_meta_pages().
+        */
+       truncate_inode_pages(META_MAPPING(sbi), 0);
        iput(sbi->node_inode);
+       mutex_unlock(&sbi->umount_mutex);
 free_nm:
        destroy_node_manager(sbi);
 free_sm:
        destroy_segment_manager(sbi);
-free_cp:
+free_devices:
+       destroy_device_list(sbi);
        kfree(sbi->ckpt);
 free_meta_inode:
        make_bad_inode(sbi->meta_inode);
        iput(sbi->meta_inode);
+free_options:
+       destroy_percpu_info(sbi);
+       kfree(options);
 free_sb_buf:
-       brelse(raw_super_buf);
+       kfree(raw_super);
 free_sbi:
+       if (sbi->s_chksum_driver)
+               crypto_free_shash(sbi->s_chksum_driver);
        kfree(sbi);
+
+       /* give only one another chance */
+       if (retry) {
+               retry = false;
+               shrink_dcache_sb(sb);
+               goto try_onemore;
+       }
        return err;
 }
 
@@ -721,11 +2118,18 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
        return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
 }
 
+static void kill_f2fs_super(struct super_block *sb)
+{
+       if (sb->s_root)
+               set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
+       kill_block_super(sb);
+}
+
 static struct file_system_type f2fs_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "f2fs",
        .mount          = f2fs_mount,
-       .kill_sb        = kill_block_super,
+       .kill_sb        = kill_f2fs_super,
        .fs_flags       = FS_REQUIRES_DEV,
 };
 MODULE_ALIAS_FS("f2fs");
@@ -733,8 +2137,8 @@ MODULE_ALIAS_FS("f2fs");
 static int __init init_inodecache(void)
 {
        f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
-                       sizeof(struct f2fs_inode_info), NULL);
-       if (f2fs_inode_cachep == NULL)
+                       sizeof(struct f2fs_inode_info));
+       if (!f2fs_inode_cachep)
                return -ENOMEM;
        return 0;
 }
@@ -753,34 +2157,72 @@ static int __init init_f2fs_fs(void)
 {
        int err;
 
+       f2fs_build_trace_ios();
+
        err = init_inodecache();
        if (err)
                goto fail;
        err = create_node_manager_caches();
        if (err)
-               goto fail;
-       err = create_gc_caches();
+               goto free_inodecache;
+       err = create_segment_manager_caches();
        if (err)
-               goto fail;
+               goto free_node_manager_caches;
        err = create_checkpoint_caches();
        if (err)
-               goto fail;
+               goto free_segment_manager_caches;
+       err = create_extent_cache();
+       if (err)
+               goto free_checkpoint_caches;
+       f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
+       if (!f2fs_kset) {
+               err = -ENOMEM;
+               goto free_extent_cache;
+       }
+
+       register_shrinker(&f2fs_shrinker_info);
+
        err = register_filesystem(&f2fs_fs_type);
        if (err)
-               goto fail;
-       f2fs_create_root_stats();
+               goto free_shrinker;
+       err = f2fs_create_root_stats();
+       if (err)
+               goto free_filesystem;
+       f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+       return 0;
+
+free_filesystem:
+       unregister_filesystem(&f2fs_fs_type);
+free_shrinker:
+       unregister_shrinker(&f2fs_shrinker_info);
+       kset_unregister(f2fs_kset);
+free_extent_cache:
+       destroy_extent_cache();
+free_checkpoint_caches:
+       destroy_checkpoint_caches();
+free_segment_manager_caches:
+       destroy_segment_manager_caches();
+free_node_manager_caches:
+       destroy_node_manager_caches();
+free_inodecache:
+       destroy_inodecache();
 fail:
        return err;
 }
 
 static void __exit exit_f2fs_fs(void)
 {
+       remove_proc_entry("fs/f2fs", NULL);
        f2fs_destroy_root_stats();
        unregister_filesystem(&f2fs_fs_type);
+       unregister_shrinker(&f2fs_shrinker_info);
+       kset_unregister(f2fs_kset);
+       destroy_extent_cache();
        destroy_checkpoint_caches();
-       destroy_gc_caches();
+       destroy_segment_manager_caches();
        destroy_node_manager_caches();
        destroy_inodecache();
+       f2fs_destroy_trace_ios();
 }
 
 module_init(init_f2fs_fs)
@@ -789,3 +2231,4 @@ module_exit(exit_f2fs_fs)
 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
 MODULE_DESCRIPTION("Flash Friendly File System");
 MODULE_LICENSE("GPL");
+
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
new file mode 100644 (file)
index 0000000..562ce08
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * f2fs IO tracer
+ *
+ * Copyright (c) 2014 Motorola Mobility
+ * Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+#include <linux/sched.h>
+#include <linux/radix-tree.h>
+
+#include "f2fs.h"
+#include "trace.h"
+
+static RADIX_TREE(pids, GFP_ATOMIC);
+static spinlock_t pids_lock;
+static struct last_io_info last_io;
+
+static inline void __print_last_io(void)
+{
+       if (!last_io.len)
+               return;
+
+       trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n",
+                       last_io.major, last_io.minor,
+                       last_io.pid, "----------------",
+                       last_io.type,
+                       last_io.fio.rw,
+                       last_io.fio.new_blkaddr,
+                       last_io.len);
+       memset(&last_io, 0, sizeof(last_io));
+}
+
+static int __file_type(struct inode *inode, pid_t pid)
+{
+       if (f2fs_is_atomic_file(inode))
+               return __ATOMIC_FILE;
+       else if (f2fs_is_volatile_file(inode))
+               return __VOLATILE_FILE;
+       else if (S_ISDIR(inode->i_mode))
+               return __DIR_FILE;
+       else if (inode->i_ino == F2FS_NODE_INO(F2FS_I_SB(inode)))
+               return __NODE_FILE;
+       else if (inode->i_ino == F2FS_META_INO(F2FS_I_SB(inode)))
+               return __META_FILE;
+       else if (pid)
+               return __NORMAL_FILE;
+       else
+               return __MISC_FILE;
+}
+
+void f2fs_trace_pid(struct page *page)
+{
+       struct inode *inode = page->mapping->host;
+       pid_t pid = task_pid_nr(current);
+       void *p;
+
+       page->private = pid;
+
+       if (radix_tree_preload(GFP_NOFS))
+               return;
+
+       spin_lock(&pids_lock);
+       p = radix_tree_lookup(&pids, pid);
+       if (p == current)
+               goto out;
+       if (p)
+               radix_tree_delete(&pids, pid);
+
+       f2fs_radix_tree_insert(&pids, pid, current);
+
+       trace_printk("%3x:%3x %4x %-16s\n",
+                       MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
+                       pid, current->comm);
+out:
+       spin_unlock(&pids_lock);
+       radix_tree_preload_end();
+}
+
+void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
+{
+       struct inode *inode;
+       pid_t pid;
+       int major, minor;
+
+       if (flush) {
+               __print_last_io();
+               return;
+       }
+
+       inode = fio->page->mapping->host;
+       pid = page_private(fio->page);
+
+       major = MAJOR(inode->i_sb->s_dev);
+       minor = MINOR(inode->i_sb->s_dev);
+
+       if (last_io.major == major && last_io.minor == minor &&
+                       last_io.pid == pid &&
+                       last_io.type == __file_type(inode, pid) &&
+                       last_io.fio.rw == fio->rw &&
+                       last_io.fio.new_blkaddr + last_io.len ==
+                                                       fio->new_blkaddr) {
+               last_io.len++;
+               return;
+       }
+
+       __print_last_io();
+
+       last_io.major = major;
+       last_io.minor = minor;
+       last_io.pid = pid;
+       last_io.type = __file_type(inode, pid);
+       last_io.fio = *fio;
+       last_io.len = 1;
+       return;
+}
+
+void f2fs_build_trace_ios(void)
+{
+       spin_lock_init(&pids_lock);
+}
+
+#define PIDVEC_SIZE    128
+static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index,
+                                                       unsigned int max_items)
+{
+       struct radix_tree_iter iter;
+       void **slot;
+       unsigned int ret = 0;
+
+       if (unlikely(!max_items))
+               return 0;
+
+       radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
+               results[ret] = iter.index;
+               if (++ret == PIDVEC_SIZE)
+                       break;
+       }
+       return ret;
+}
+
+void f2fs_destroy_trace_ios(void)
+{
+       pid_t pid[PIDVEC_SIZE];
+       pid_t next_pid = 0;
+       unsigned int found;
+
+       spin_lock(&pids_lock);
+       while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
+               unsigned idx;
+
+               next_pid = pid[found - 1] + 1;
+               for (idx = 0; idx < found; idx++)
+                       radix_tree_delete(&pids, pid[idx]);
+       }
+       spin_unlock(&pids_lock);
+}
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
new file mode 100644 (file)
index 0000000..67db24a
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * f2fs IO tracer
+ *
+ * Copyright (c) 2014 Motorola Mobility
+ * Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __F2FS_TRACE_H__
+#define __F2FS_TRACE_H__
+
+#ifdef CONFIG_F2FS_IO_TRACE
+#include <trace/events/f2fs.h>
+
+enum file_type {
+       __NORMAL_FILE,
+       __DIR_FILE,
+       __NODE_FILE,
+       __META_FILE,
+       __ATOMIC_FILE,
+       __VOLATILE_FILE,
+       __MISC_FILE,
+};
+
+struct last_io_info {
+       int major, minor;
+       pid_t pid;
+       enum file_type type;
+       struct f2fs_io_info fio;
+       block_t len;
+};
+
+extern void f2fs_trace_pid(struct page *);
+extern void f2fs_trace_ios(struct f2fs_io_info *, int);
+extern void f2fs_build_trace_ios(void);
+extern void f2fs_destroy_trace_ios(void);
+#else
+#define f2fs_trace_pid(p)
+#define f2fs_trace_ios(i, n)
+#define f2fs_build_trace_ios()
+#define f2fs_destroy_trace_ios()
+
+#endif
+#endif /* __F2FS_TRACE_H__ */
index 0b02dce313565d5f287b96f464efca6cad0ea2cd..9a8a7d8b7bdfc0d3ffe59c93849408d1604a50f4 100644 (file)
  */
 #include <linux/rwsem.h>
 #include <linux/f2fs_fs.h>
+#include <linux/security.h>
 #include "f2fs.h"
 #include "xattr.h"
 
 static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
-               size_t list_size, const char *name, size_t name_len, int type)
+               size_t list_size, const char *name, size_t len, int type)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
        int total_len, prefix_len = 0;
@@ -43,15 +44,19 @@ static size_t f2fs_xattr_generic_list(struct dentry *dentry, char *list,
                prefix = XATTR_TRUSTED_PREFIX;
                prefix_len = XATTR_TRUSTED_PREFIX_LEN;
                break;
+       case F2FS_XATTR_INDEX_SECURITY:
+               prefix = XATTR_SECURITY_PREFIX;
+               prefix_len = XATTR_SECURITY_PREFIX_LEN;
+               break;
        default:
                return -EINVAL;
        }
 
-       total_len = prefix_len + name_len + 1;
+       total_len = prefix_len + len + 1;
        if (list && total_len <= list_size) {
                memcpy(list, prefix, prefix_len);
-               memcpy(list+prefix_len, name, name_len);
-               list[prefix_len + name_len] = '\0';
+               memcpy(list + prefix_len, name, len);
+               list[prefix_len + len] = '\0';
        }
        return total_len;
 }
@@ -70,13 +75,14 @@ static int f2fs_xattr_generic_get(struct dentry *dentry, const char *name,
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
                break;
+       case F2FS_XATTR_INDEX_SECURITY:
+               break;
        default:
                return -EINVAL;
        }
        if (strcmp(name, "") == 0)
                return -EINVAL;
-       return f2fs_getxattr(dentry->d_inode, type, name,
-                       buffer, size);
+       return f2fs_getxattr(d_inode(dentry), type, name, buffer, size, NULL);
 }
 
 static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
@@ -93,17 +99,20 @@ static int f2fs_xattr_generic_set(struct dentry *dentry, const char *name,
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
                break;
+       case F2FS_XATTR_INDEX_SECURITY:
+               break;
        default:
                return -EINVAL;
        }
        if (strcmp(name, "") == 0)
                return -EINVAL;
 
-       return f2fs_setxattr(dentry->d_inode, type, name, value, size);
+       return f2fs_setxattr(d_inode(dentry), type, name,
+                                       value, size, NULL, flags);
 }
 
 static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
-               size_t list_size, const char *name, size_t name_len, int type)
+               size_t list_size, const char *name, size_t len, int type)
 {
        const char *xname = F2FS_SYSTEM_ADVISE_PREFIX;
        size_t size;
@@ -120,19 +129,20 @@ static size_t f2fs_xattr_advise_list(struct dentry *dentry, char *list,
 static int f2fs_xattr_advise_get(struct dentry *dentry, const char *name,
                void *buffer, size_t size, int type)
 {
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = d_inode(dentry);
 
        if (strcmp(name, "") != 0)
                return -EINVAL;
 
-       *((char *)buffer) = F2FS_I(inode)->i_advise;
+       if (buffer)
+               *((char *)buffer) = F2FS_I(inode)->i_advise;
        return sizeof(char);
 }
 
 static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
                const void *value, size_t size, int flags, int type)
 {
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = d_inode(dentry);
 
        if (strcmp(name, "") != 0)
                return -EINVAL;
@@ -142,9 +152,35 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
                return -EINVAL;
 
        F2FS_I(inode)->i_advise |= *(char *)value;
+       f2fs_mark_inode_dirty_sync(inode, true);
        return 0;
 }
 
+#ifdef CONFIG_F2FS_FS_SECURITY
+static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
+               void *page)
+{
+       const struct xattr *xattr;
+       int err = 0;
+
+       for (xattr = xattr_array; xattr->name != NULL; xattr++) {
+               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
+                               xattr->name, xattr->value,
+                               xattr->value_len, (struct page *)page, 0);
+               if (err < 0)
+                       break;
+       }
+       return err;
+}
+
+int f2fs_init_security(struct inode *inode, struct inode *dir,
+                               const struct qstr *qstr, struct page *ipage)
+{
+       return security_inode_init_security(inode, dir, qstr,
+                               &f2fs_initxattrs, ipage);
+}
+#endif
+
 const struct xattr_handler f2fs_xattr_user_handler = {
        .prefix = XATTR_USER_PREFIX,
        .flags  = F2FS_XATTR_INDEX_USER,
@@ -169,6 +205,14 @@ const struct xattr_handler f2fs_xattr_advise_handler = {
        .set    = f2fs_xattr_advise_set,
 };
 
+const struct xattr_handler f2fs_xattr_security_handler = {
+       .prefix = XATTR_SECURITY_PREFIX,
+       .flags  = F2FS_XATTR_INDEX_SECURITY,
+       .list   = f2fs_xattr_generic_list,
+       .get    = f2fs_xattr_generic_get,
+       .set    = f2fs_xattr_generic_set,
+};
+
 static const struct xattr_handler *f2fs_xattr_handler_map[] = {
        [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -176,6 +220,9 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = {
        [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &f2fs_xattr_acl_default_handler,
 #endif
        [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
+#ifdef CONFIG_F2FS_FS_SECURITY
+       [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler,
+#endif
        [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
 };
 
@@ -186,89 +233,233 @@ const struct xattr_handler *f2fs_xattr_handlers[] = {
        &f2fs_xattr_acl_default_handler,
 #endif
        &f2fs_xattr_trusted_handler,
+#ifdef CONFIG_F2FS_FS_SECURITY
+       &f2fs_xattr_security_handler,
+#endif
        &f2fs_xattr_advise_handler,
        NULL,
 };
 
-static inline const struct xattr_handler *f2fs_xattr_handler(int name_index)
+static inline const struct xattr_handler *f2fs_xattr_handler(int index)
 {
        const struct xattr_handler *handler = NULL;
 
-       if (name_index > 0 && name_index < ARRAY_SIZE(f2fs_xattr_handler_map))
-               handler = f2fs_xattr_handler_map[name_index];
+       if (index > 0 && index < ARRAY_SIZE(f2fs_xattr_handler_map))
+               handler = f2fs_xattr_handler_map[index];
        return handler;
 }
 
-int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
-               void *buffer, size_t buffer_size)
+static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
+                                       size_t len, const char *name)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct f2fs_inode_info *fi = F2FS_I(inode);
        struct f2fs_xattr_entry *entry;
-       struct page *page;
-       void *base_addr;
-       int error = 0, found = 0;
-       size_t value_len, name_len;
-
-       if (name == NULL)
-               return -EINVAL;
-       name_len = strlen(name);
-
-       if (!fi->i_xattr_nid)
-               return -ENODATA;
-
-       page = get_node_page(sbi, fi->i_xattr_nid);
-       base_addr = page_address(page);
 
        list_for_each_xattr(entry, base_addr) {
-               if (entry->e_name_index != name_index)
+               if (entry->e_name_index != index)
                        continue;
-               if (entry->e_name_len != name_len)
+               if (entry->e_name_len != len)
                        continue;
-               if (!memcmp(entry->e_name, name, name_len)) {
-                       found = 1;
+               if (!memcmp(entry->e_name, name, len))
                        break;
+       }
+       return entry;
+}
+
+static int read_all_xattrs(struct inode *inode, struct page *ipage,
+                                                       void **base_addr)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct f2fs_xattr_header *header;
+       size_t size = PAGE_SIZE, inline_size = 0;
+       void *txattr_addr;
+       int err;
+
+       inline_size = inline_xattr_size(inode);
+
+       txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
+       if (!txattr_addr)
+               return -ENOMEM;
+
+       /* read from inline xattr */
+       if (inline_size) {
+               struct page *page = NULL;
+               void *inline_addr;
+
+               if (ipage) {
+                       inline_addr = inline_xattr_addr(ipage);
+               } else {
+                       page = get_node_page(sbi, inode->i_ino);
+                       if (IS_ERR(page)) {
+                               err = PTR_ERR(page);
+                               goto fail;
+                       }
+                       inline_addr = inline_xattr_addr(page);
                }
+               memcpy(txattr_addr, inline_addr, inline_size);
+               f2fs_put_page(page, 1);
+       }
+
+       /* read from xattr node block */
+       if (F2FS_I(inode)->i_xattr_nid) {
+               struct page *xpage;
+               void *xattr_addr;
+
+               /* The inode already has an extended attribute block. */
+               xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
+               if (IS_ERR(xpage)) {
+                       err = PTR_ERR(xpage);
+                       goto fail;
+               }
+
+               xattr_addr = page_address(xpage);
+               memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
+               f2fs_put_page(xpage, 1);
        }
-       if (!found) {
+
+       header = XATTR_HDR(txattr_addr);
+
+       /* never been allocated xattrs */
+       if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
+               header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
+               header->h_refcount = cpu_to_le32(1);
+       }
+       *base_addr = txattr_addr;
+       return 0;
+fail:
+       kzfree(txattr_addr);
+       return err;
+}
+
+static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
+                               void *txattr_addr, struct page *ipage)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       size_t inline_size = 0;
+       void *xattr_addr;
+       struct page *xpage;
+       nid_t new_nid = 0;
+       int err;
+
+       inline_size = inline_xattr_size(inode);
+
+       if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
+               if (!alloc_nid(sbi, &new_nid))
+                       return -ENOSPC;
+
+       /* write to inline xattr */
+       if (inline_size) {
+               struct page *page = NULL;
+               void *inline_addr;
+
+               if (ipage) {
+                       inline_addr = inline_xattr_addr(ipage);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
+                       set_page_dirty(ipage);
+               } else {
+                       page = get_node_page(sbi, inode->i_ino);
+                       if (IS_ERR(page)) {
+                               alloc_nid_failed(sbi, new_nid);
+                               return PTR_ERR(page);
+                       }
+                       inline_addr = inline_xattr_addr(page);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
+               }
+               memcpy(inline_addr, txattr_addr, inline_size);
+               f2fs_put_page(page, 1);
+
+               /* no need to use xattr node block */
+               if (hsize <= inline_size) {
+                       err = truncate_xattr_node(inode, ipage);
+                       alloc_nid_failed(sbi, new_nid);
+                       return err;
+               }
+       }
+
+       /* write to xattr node block */
+       if (F2FS_I(inode)->i_xattr_nid) {
+               xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
+               if (IS_ERR(xpage)) {
+                       alloc_nid_failed(sbi, new_nid);
+                       return PTR_ERR(xpage);
+               }
+               f2fs_bug_on(sbi, new_nid);
+               f2fs_wait_on_page_writeback(xpage, NODE, true);
+       } else {
+               struct dnode_of_data dn;
+               set_new_dnode(&dn, inode, NULL, NULL, new_nid);
+               xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
+               if (IS_ERR(xpage)) {
+                       alloc_nid_failed(sbi, new_nid);
+                       return PTR_ERR(xpage);
+               }
+               alloc_nid_done(sbi, new_nid);
+       }
+
+       xattr_addr = page_address(xpage);
+       memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE -
+                                               sizeof(struct node_footer));
+       set_page_dirty(xpage);
+       f2fs_put_page(xpage, 1);
+
+       /* need to checkpoint during fsync */
+       F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
+       return 0;
+}
+
+int f2fs_getxattr(struct inode *inode, int index, const char *name,
+               void *buffer, size_t buffer_size, struct page *ipage)
+{
+       struct f2fs_xattr_entry *entry;
+       void *base_addr;
+       int error = 0;
+       size_t size, len;
+
+       if (name == NULL)
+               return -EINVAL;
+
+       len = strlen(name);
+       if (len > F2FS_NAME_LEN)
+               return -ERANGE;
+
+       error = read_all_xattrs(inode, ipage, &base_addr);
+       if (error)
+               return error;
+
+       entry = __find_xattr(base_addr, index, len, name);
+       if (IS_XATTR_LAST_ENTRY(entry)) {
                error = -ENODATA;
                goto cleanup;
        }
 
-       value_len = le16_to_cpu(entry->e_value_size);
+       size = le16_to_cpu(entry->e_value_size);
 
-       if (buffer && value_len > buffer_size) {
+       if (buffer && size > buffer_size) {
                error = -ERANGE;
                goto cleanup;
        }
 
        if (buffer) {
                char *pval = entry->e_name + entry->e_name_len;
-               memcpy(buffer, pval, value_len);
+               memcpy(buffer, pval, size);
        }
-       error = value_len;
+       error = size;
 
 cleanup:
-       f2fs_put_page(page, 1);
+       kzfree(base_addr);
        return error;
 }
 
 ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
 {
-       struct inode *inode = dentry->d_inode;
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct f2fs_inode_info *fi = F2FS_I(inode);
+       struct inode *inode = d_inode(dentry);
        struct f2fs_xattr_entry *entry;
-       struct page *page;
        void *base_addr;
        int error = 0;
        size_t rest = buffer_size;
 
-       if (!fi->i_xattr_nid)
-               return 0;
-
-       page = get_node_page(sbi, fi->i_xattr_nid);
-       base_addr = page_address(page);
+       error = read_all_xattrs(inode, NULL, &base_addr);
+       if (error)
+               return error;
 
        list_for_each_xattr(entry, base_addr) {
                const struct xattr_handler *handler =
@@ -291,119 +482,79 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
        }
        error = buffer_size - rest;
 cleanup:
-       f2fs_put_page(page, 1);
+       kzfree(base_addr);
        return error;
 }
 
-int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-                                       const void *value, size_t value_len)
+static int __f2fs_setxattr(struct inode *inode, int index,
+                       const char *name, const void *value, size_t size,
+                       struct page *ipage, int flags)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct f2fs_xattr_header *header = NULL;
        struct f2fs_xattr_entry *here, *last;
-       struct page *page;
        void *base_addr;
-       int error, found, free, newsize;
-       size_t name_len;
-       char *pval;
-       int ilock;
+       int found, newsize;
+       size_t len;
+       __u32 new_hsize;
+       int error = 0;
 
        if (name == NULL)
                return -EINVAL;
 
        if (value == NULL)
-               value_len = 0;
+               size = 0;
 
-       name_len = strlen(name);
+       len = strlen(name);
 
-       if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN)
+       if (len > F2FS_NAME_LEN)
                return -ERANGE;
 
-       f2fs_balance_fs(sbi);
-
-       ilock = mutex_lock_op(sbi);
-
-       if (!fi->i_xattr_nid) {
-               /* Allocate new attribute block */
-               struct dnode_of_data dn;
+       if (size > MAX_VALUE_LEN(inode))
+               return -E2BIG;
 
-               if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
-                       error = -ENOSPC;
-                       goto exit;
-               }
-               set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
-               mark_inode_dirty(inode);
-
-               page = new_node_page(&dn, XATTR_NODE_OFFSET);
-               if (IS_ERR(page)) {
-                       alloc_nid_failed(sbi, fi->i_xattr_nid);
-                       fi->i_xattr_nid = 0;
-                       error = PTR_ERR(page);
-                       goto exit;
-               }
-
-               alloc_nid_done(sbi, fi->i_xattr_nid);
-               base_addr = page_address(page);
-               header = XATTR_HDR(base_addr);
-               header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
-               header->h_refcount = cpu_to_le32(1);
-       } else {
-               /* The inode already has an extended attribute block. */
-               page = get_node_page(sbi, fi->i_xattr_nid);
-               if (IS_ERR(page)) {
-                       error = PTR_ERR(page);
-                       goto exit;
-               }
+       error = read_all_xattrs(inode, ipage, &base_addr);
+       if (error)
+               return error;
 
-               base_addr = page_address(page);
-               header = XATTR_HDR(base_addr);
-       }
+       /* find entry with wanted name. */
+       here = __find_xattr(base_addr, index, len, name);
 
-       if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
-               error = -EIO;
-               goto cleanup;
-       }
+       found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
 
-       /* find entry with wanted name. */
-       found = 0;
-       list_for_each_xattr(here, base_addr) {
-               if (here->e_name_index != name_index)
-                       continue;
-               if (here->e_name_len != name_len)
-                       continue;
-               if (!memcmp(here->e_name, name, name_len)) {
-                       found = 1;
-                       break;
-               }
+       if ((flags & XATTR_REPLACE) && !found) {
+               error = -ENODATA;
+               goto exit;
+       } else if ((flags & XATTR_CREATE) && found) {
+               error = -EEXIST;
+               goto exit;
        }
 
        last = here;
-
        while (!IS_XATTR_LAST_ENTRY(last))
                last = XATTR_NEXT_ENTRY(last);
 
-       newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) +
-                       name_len + value_len);
+       newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
 
        /* 1. Check space */
        if (value) {
-               /* If value is NULL, it is remove operation.
-                * In case of update operation, we caculate free.
+               int free;
+               /*
+                * If value is NULL, it is remove operation.
+                * In case of update operation, we calculate free.
                 */
-               free = MIN_OFFSET - ((char *)last - (char *)header);
+               free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
                if (found)
-                       free = free - ENTRY_SIZE(here);
+                       free = free + ENTRY_SIZE(here);
 
-               if (free < newsize) {
-                       error = -ENOSPC;
-                       goto cleanup;
+               if (unlikely(free < newsize)) {
+                       error = -E2BIG;
+                       goto exit;
                }
        }
 
        /* 2. Remove old entry */
        if (found) {
-               /* If entry is found, remove old entry.
+               /*
+                * If entry is found, remove old entry.
                 * If not found, remove operation is not needed.
                 */
                struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here);
@@ -414,34 +565,64 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
                memset(last, 0, oldsize);
        }
 
+       new_hsize = (char *)last - (char *)base_addr;
+
        /* 3. Write new entry */
        if (value) {
-               /* Before we come here, old entry is removed.
-                * We just write new entry. */
-               memset(last, 0, newsize);
-               last->e_name_index = name_index;
-               last->e_name_len = name_len;
-               memcpy(last->e_name, name, name_len);
-               pval = last->e_name + name_len;
-               memcpy(pval, value, value_len);
-               last->e_value_size = cpu_to_le16(value_len);
+               char *pval;
+               /*
+                * Before we come here, old entry is removed.
+                * We just write new entry.
+                */
+               last->e_name_index = index;
+               last->e_name_len = len;
+               memcpy(last->e_name, name, len);
+               pval = last->e_name + len;
+               memcpy(pval, value, size);
+               last->e_value_size = cpu_to_le16(size);
+               new_hsize += newsize;
        }
 
-       set_page_dirty(page);
-       f2fs_put_page(page, 1);
+       error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+       if (error)
+               goto exit;
 
-       if (is_inode_flag_set(fi, FI_ACL_MODE)) {
-               inode->i_mode = fi->i_acl_mode;
-               inode->i_ctime = CURRENT_TIME;
-               clear_inode_flag(fi, FI_ACL_MODE);
+       if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+               inode->i_mode = F2FS_I(inode)->i_acl_mode;
+               inode->i_ctime = current_time(inode);
+               clear_inode_flag(inode, FI_ACL_MODE);
        }
-       update_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
-
-       return 0;
-cleanup:
-       f2fs_put_page(page, 1);
+       if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
+                       !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
+               f2fs_set_encrypted_inode(inode);
+       f2fs_mark_inode_dirty_sync(inode, true);
+       if (!error && S_ISDIR(inode->i_mode))
+               set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
 exit:
-       mutex_unlock_op(sbi, ilock);
+       kzfree(base_addr);
        return error;
 }
+
+int f2fs_setxattr(struct inode *inode, int index, const char *name,
+                               const void *value, size_t size,
+                               struct page *ipage, int flags)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       int err;
+
+       /* this case is only from init_inode_metadata */
+       if (ipage)
+               return __f2fs_setxattr(inode, index, name, value,
+                                               size, ipage, flags);
+       f2fs_balance_fs(sbi, true);
+
+       f2fs_lock_op(sbi);
+       /* protect xattr_ver */
+       down_write(&F2FS_I(inode)->i_sem);
+       err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
+       up_write(&F2FS_I(inode)->i_sem);
+       f2fs_unlock_op(sbi);
+
+       f2fs_update_time(sbi, REQ_TIME);
+       return err;
+}
index 49c9558305e3b4dae06e16733f150af76dbfe954..f3f8181b547439bfefc1e55a347f86cf52b2aece 100644 (file)
 #define F2FS_XATTR_INDEX_LUSTRE                        5
 #define F2FS_XATTR_INDEX_SECURITY              6
 #define F2FS_XATTR_INDEX_ADVISE                        7
+/* Should be same as EXT4_XATTR_INDEX_ENCRYPTION */
+#define F2FS_XATTR_INDEX_ENCRYPTION            9
+
+#define F2FS_XATTR_NAME_ENCRYPTION_CONTEXT     "c"
 
 struct f2fs_xattr_header {
        __le32  h_magic;        /* magic number for identification */
@@ -51,7 +55,7 @@ struct f2fs_xattr_entry {
 
 #define XATTR_HDR(ptr)         ((struct f2fs_xattr_header *)(ptr))
 #define XATTR_ENTRY(ptr)       ((struct f2fs_xattr_entry *)(ptr))
-#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr)+1))
+#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr) + 1))
 #define XATTR_ROUND            (3)
 
 #define XATTR_ALIGN(size)      ((size + XATTR_ROUND) & ~XATTR_ROUND)
@@ -69,17 +73,16 @@ struct f2fs_xattr_entry {
                                !IS_XATTR_LAST_ENTRY(entry);\
                                entry = XATTR_NEXT_ENTRY(entry))
 
+#define MIN_OFFSET(i)  XATTR_ALIGN(inline_xattr_size(i) + PAGE_SIZE -  \
+                               sizeof(struct node_footer) - sizeof(__u32))
 
-#define MIN_OFFSET     XATTR_ALIGN(PAGE_SIZE - \
-                       sizeof(struct node_footer) - \
-                       sizeof(__u32))
-
-#define MAX_VALUE_LEN  (MIN_OFFSET - sizeof(struct f2fs_xattr_header) - \
-                       sizeof(struct f2fs_xattr_entry))
+#define MAX_VALUE_LEN(i)       (MIN_OFFSET(i) -                        \
+                               sizeof(struct f2fs_xattr_header) -      \
+                               sizeof(struct f2fs_xattr_entry))
 
 /*
  * On-disk structure of f2fs_xattr
- * We use only 1 block for xattr.
+ * We use inline xattrs space + 1 block for xattr.
  *
  * +--------------------+
  * | f2fs_xattr_header  |
@@ -112,26 +115,27 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler;
 extern const struct xattr_handler f2fs_xattr_acl_access_handler;
 extern const struct xattr_handler f2fs_xattr_acl_default_handler;
 extern const struct xattr_handler f2fs_xattr_advise_handler;
+extern const struct xattr_handler f2fs_xattr_security_handler;
 
 extern const struct xattr_handler *f2fs_xattr_handlers[];
 
-extern int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-               const void *value, size_t value_len);
-extern int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
-               void *buffer, size_t buffer_size);
-extern ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
-               size_t buffer_size);
-
+extern int f2fs_setxattr(struct inode *, int, const char *,
+                               const void *, size_t, struct page *, int);
+extern int f2fs_getxattr(struct inode *, int, const char *, void *,
+                                               size_t, struct page *);
+extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
 #else
 
 #define f2fs_xattr_handlers    NULL
-static inline int f2fs_setxattr(struct inode *inode, int name_index,
-       const char *name, const void *value, size_t value_len)
+static inline int f2fs_setxattr(struct inode *inode, int index,
+               const char *name, const void *value, size_t size,
+               struct page *page, int flags)
 {
        return -EOPNOTSUPP;
 }
-static inline int f2fs_getxattr(struct inode *inode, int name_index,
-               const char *name, void *buffer, size_t buffer_size)
+static inline int f2fs_getxattr(struct inode *inode, int index,
+                       const char *name, void *buffer,
+                       size_t buffer_size, struct page *dpage)
 {
        return -EOPNOTSUPP;
 }
@@ -142,4 +146,14 @@ static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
 }
 #endif
 
+#ifdef CONFIG_F2FS_FS_SECURITY
+extern int f2fs_init_security(struct inode *, struct inode *,
+                               const struct qstr *, struct page *);
+#else
+static inline int f2fs_init_security(struct inode *inode, struct inode *dir,
+                               const struct qstr *qstr, struct page *ipage)
+{
+       return 0;
+}
+#endif
 #endif /* __F2FS_XATTR_H__ */
index 37caf183e7dc994c54999307559df88279f809e3..0ab0ed8bdaa2d186d018ba9952623a65e2f7a8b2 100644 (file)
@@ -212,6 +212,8 @@ struct dentry_operations {
 #define DCACHE_DENTRY_KILLED   0x100000
 #define DCACHE_WILL_INVALIDATE         0x80000000 /* will be invalidated */
 
+#define DCACHE_ENCRYPTED_WITH_KEY      0x04000000 /* dir is encrypted with a valid key */
+
 extern seqlock_t rename_lock;
 
 static inline int dname_external(struct dentry *dentry)
index df6fab82f87e7650bafaa247523713fc3cc4be2f..083172fed2b7397c1a36151790a15a71eec23883 100644 (file)
 #include <linux/types.h>
 
 #define F2FS_SUPER_OFFSET              1024    /* byte-size offset */
-#define F2FS_LOG_SECTOR_SIZE           9       /* 9 bits for 512 byte */
-#define F2FS_LOG_SECTORS_PER_BLOCK     3       /* 4KB: F2FS_BLKSIZE */
+#define F2FS_MIN_LOG_SECTOR_SIZE       9       /* 9 bits for 512 bytes */
+#define F2FS_MAX_LOG_SECTOR_SIZE       12      /* 12 bits for 4096 bytes */
+#define F2FS_LOG_SECTORS_PER_BLOCK     3       /* log number for sector/blk */
 #define F2FS_BLKSIZE                   4096    /* support only 4KB block */
+#define F2FS_BLKSIZE_BITS              12      /* bits for F2FS_BLKSIZE */
 #define F2FS_MAX_EXTENSION             64      /* # of extension entries */
+#define F2FS_BLK_ALIGN(x)      (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
 
-#define NULL_ADDR              0x0U
-#define NEW_ADDR               -1U
+#define NULL_ADDR              ((block_t)0)    /* used as block_t addresses */
+#define NEW_ADDR               ((block_t)-1)   /* used as block_t addresses */
+
+#define F2FS_BYTES_TO_BLK(bytes)       ((bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk)         ((blk) << F2FS_BLKSIZE_BITS)
+
+/* 0, 1(node nid), 2(meta nid) are reserved node id */
+#define F2FS_RESERVED_NODE_NUM         3
 
 #define F2FS_ROOT_INO(sbi)     (sbi->root_ino_num)
 #define F2FS_NODE_INO(sbi)     (sbi->node_ino_num)
 #define F2FS_META_INO(sbi)     (sbi->meta_ino_num)
 
 /* This flag is used by node and meta inodes, and by recovery */
-#define GFP_F2FS_ZERO  (GFP_NOFS | __GFP_ZERO)
+#define GFP_F2FS_ZERO          (GFP_NOFS | __GFP_ZERO)
+#define GFP_F2FS_HIGH_ZERO     (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
 
 /*
  * For further optimization on multi-head logs, on-disk layout supports maximum
 #define MAX_ACTIVE_NODE_LOGS   8
 #define MAX_ACTIVE_DATA_LOGS   8
 
+#define VERSION_LEN    256
+#define MAX_VOLUME_NAME                512
+#define MAX_PATH_LEN           64
+#define MAX_DEVICES            8
+
 /*
  * For superblock
  */
+struct f2fs_device {
+       __u8 path[MAX_PATH_LEN];
+       __le32 total_segments;
+} __packed;
+
 struct f2fs_super_block {
        __le32 magic;                   /* Magic Number */
        __le16 major_ver;               /* Major Version */
@@ -72,19 +92,32 @@ struct f2fs_super_block {
        __le32 node_ino;                /* node inode number */
        __le32 meta_ino;                /* meta inode number */
        __u8 uuid[16];                  /* 128-bit uuid for volume */
-       __le16 volume_name[512];        /* volume name */
+       __le16 volume_name[MAX_VOLUME_NAME];    /* volume name */
        __le32 extension_count;         /* # of extensions below */
        __u8 extension_list[F2FS_MAX_EXTENSION][8];     /* extension array */
+       __le32 cp_payload;
+       __u8 version[VERSION_LEN];      /* the kernel version */
+       __u8 init_version[VERSION_LEN]; /* the initial kernel version */
+       __le32 feature;                 /* defined features */
+       __u8 encryption_level;          /* versioning level for encryption */
+       __u8 encrypt_pw_salt[16];       /* Salt used for string2key algorithm */
+       struct f2fs_device devs[MAX_DEVICES];   /* device list */
+       __u8 reserved[327];             /* valid reserved region */
 } __packed;
 
 /*
  * For checkpoint
  */
+#define CP_CRC_RECOVERY_FLAG   0x00000040
+#define CP_FASTBOOT_FLAG       0x00000020
+#define CP_FSCK_FLAG           0x00000010
 #define CP_ERROR_FLAG          0x00000008
 #define CP_COMPACT_SUM_FLAG    0x00000004
 #define CP_ORPHAN_PRESENT_FLAG 0x00000002
 #define CP_UMOUNT_FLAG         0x00000001
 
+#define F2FS_CP_PACKS          2       /* # of checkpoint packs */
+
 struct f2fs_checkpoint {
        __le64 checkpoint_ver;          /* checkpoint block version number */
        __le64 user_block_count;        /* # of user blocks */
@@ -121,6 +154,9 @@ struct f2fs_checkpoint {
  */
 #define F2FS_ORPHANS_PER_BLOCK 1020
 
+#define GET_ORPHAN_BLOCKS(n)   ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+                                       F2FS_ORPHANS_PER_BLOCK)
+
 struct f2fs_orphan_block {
        __le32 ino[F2FS_ORPHANS_PER_BLOCK];     /* inode numbers */
        __le32 reserved;        /* reserved */
@@ -135,19 +171,40 @@ struct f2fs_orphan_block {
  */
 struct f2fs_extent {
        __le32 fofs;            /* start file offset of the extent */
-       __le32 blk_addr;        /* start block address of the extent */
+       __le32 blk;             /* start block address of the extent */
        __le32 len;             /* lengh of the extent */
 } __packed;
 
 #define F2FS_NAME_LEN          255
-#define ADDRS_PER_INODE         923    /* Address Pointers in an Inode */
-#define ADDRS_PER_BLOCK         1018   /* Address Pointers in a Direct Block */
-#define NIDS_PER_BLOCK          1018   /* Node IDs in an Indirect Block */
+#define F2FS_INLINE_XATTR_ADDRS        50      /* 200 bytes for inline xattrs */
+#define DEF_ADDRS_PER_INODE    923     /* Address Pointers in an Inode */
+#define DEF_NIDS_PER_INODE     5       /* Node IDs in an Inode */
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
+#define ADDRS_PER_BLOCK                1018    /* Address Pointers in a Direct Block */
+#define NIDS_PER_BLOCK         1018    /* Node IDs in an Indirect Block */
+
+#define ADDRS_PER_PAGE(page, inode)    \
+       (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
+
+#define        NODE_DIR1_BLOCK         (DEF_ADDRS_PER_INODE + 1)
+#define        NODE_DIR2_BLOCK         (DEF_ADDRS_PER_INODE + 2)
+#define        NODE_IND1_BLOCK         (DEF_ADDRS_PER_INODE + 3)
+#define        NODE_IND2_BLOCK         (DEF_ADDRS_PER_INODE + 4)
+#define        NODE_DIND_BLOCK         (DEF_ADDRS_PER_INODE + 5)
+
+#define F2FS_INLINE_XATTR      0x01    /* file inline xattr flag */
+#define F2FS_INLINE_DATA       0x02    /* file inline data flag */
+#define F2FS_INLINE_DENTRY     0x04    /* file inline dentry flag */
+#define F2FS_DATA_EXIST                0x08    /* file inline data exist flag */
+#define F2FS_INLINE_DOTS       0x10    /* file having implicit dot dentries */
+
+#define MAX_INLINE_DATA                (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
+                                               F2FS_INLINE_XATTR_ADDRS - 1))
 
 struct f2fs_inode {
        __le16 i_mode;                  /* file mode */
        __u8 i_advise;                  /* file hints */
-       __u8 i_reserved;                /* reserved */
+       __u8 i_inline;                  /* file inline flags */
        __le32 i_uid;                   /* user ID */
        __le32 i_gid;                   /* group ID */
        __le32 i_links;                 /* links count */
@@ -166,13 +223,13 @@ struct f2fs_inode {
        __le32 i_pino;                  /* parent inode number */
        __le32 i_namelen;               /* file name length */
        __u8 i_name[F2FS_NAME_LEN];     /* file name for SPOR */
-       __u8 i_reserved2;               /* for backward compatibility */
+       __u8 i_dir_level;               /* dentry_level for large dir */
 
        struct f2fs_extent i_ext;       /* caching a largest extent */
 
-       __le32 i_addr[ADDRS_PER_INODE]; /* Pointers to data blocks */
+       __le32 i_addr[DEF_ADDRS_PER_INODE];     /* Pointers to data blocks */
 
-       __le32 i_nid[5];                /* direct(2), indirect(2),
+       __le32 i_nid[DEF_NIDS_PER_INODE];       /* direct(2), indirect(2),
                                                double_indirect(1) node id */
 } __packed;
 
@@ -191,6 +248,8 @@ enum {
        OFFSET_BIT_SHIFT
 };
 
+#define OFFSET_BIT_MASK                (0x07)  /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
+
 struct node_footer {
        __le32 nid;             /* node id */
        __le32 ino;             /* inode nunmber */
@@ -295,7 +354,7 @@ struct f2fs_summary {
 
 struct summary_footer {
        unsigned char entry_type;       /* SUM_TYPE_XXX */
-       __u32 check_sum;                /* summary checksum */
+       __le32 check_sum;               /* summary checksum */
 } __packed;
 
 #define SUM_JOURNAL_SIZE       (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -308,6 +367,12 @@ struct summary_footer {
                                sizeof(struct sit_journal_entry))
 #define SIT_JOURNAL_RESERVED   ((SUM_JOURNAL_SIZE - 2) %\
                                sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED    (SUM_JOURNAL_SIZE - 2 - 8)
+
 /*
  * frequently updated NAT/SIT entries can be stored in the spare area in
  * summary blocks
@@ -337,18 +402,28 @@ struct sit_journal {
        __u8 reserved[SIT_JOURNAL_RESERVED];
 } __packed;
 
-/* 4KB-sized summary block structure */
-struct f2fs_summary_block {
-       struct f2fs_summary entries[ENTRIES_IN_SUM];
+struct f2fs_extra_info {
+       __le64 kbytes_written;
+       __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
+struct f2fs_journal {
        union {
                __le16 n_nats;
                __le16 n_sits;
        };
-       /* spare area is used by NAT or SIT journals */
+       /* spare area is used by NAT or SIT journals or extra info */
        union {
                struct nat_journal nat_j;
                struct sit_journal sit_j;
+               struct f2fs_extra_info info;
        };
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+       struct f2fs_summary entries[ENTRIES_IN_SUM];
+       struct f2fs_journal journal;
        struct summary_footer footer;
 } __packed;
 
@@ -368,12 +443,25 @@ typedef __le32    f2fs_hash_t;
 
 #define GET_DENTRY_SLOTS(x)    ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
 
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK     214
-
 /* MAX level for dir lookup */
 #define MAX_DIR_HASH_DEPTH     63
 
+/* MAX buckets in one level of dir */
+#define MAX_DIR_BUCKETS                (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+
+/*
+ * space utilization of regular dentry and inline dentry
+ *             regular dentry                  inline dentry
+ * bitmap      1 * 27 = 27                     1 * 23 = 23
+ * reserved    1 * 3 = 3                       1 * 7 = 7
+ * dentry      11 * 214 = 2354                 11 * 182 = 2002
+ * filename    8 * 214 = 1712                  8 * 182 = 1456
+ * total       4096                            3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK     214     /* the number of dentry in a block */
 #define SIZE_OF_DIR_ENTRY      11      /* by byte */
 #define SIZE_OF_DENTRY_BITMAP  ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
                                        BITS_PER_BYTE)
@@ -398,6 +486,24 @@ struct f2fs_dentry_block {
        __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
 } __packed;
 
+/* for inline dir */
+#define NR_INLINE_DENTRY       (MAX_INLINE_DATA * BITS_PER_BYTE / \
+                               ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+                               BITS_PER_BYTE + 1))
+#define INLINE_DENTRY_BITMAP_SIZE      ((NR_INLINE_DENTRY + \
+                                       BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+#define INLINE_RESERVED_SIZE   (MAX_INLINE_DATA - \
+                               ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+                               NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
+
+/* inline directory entry structure */
+struct f2fs_inline_dentry {
+       __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
+       __u8 reserved[INLINE_RESERVED_SIZE];
+       struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
+       __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
+} __packed;
+
 /* file types used in inode_info->flags */
 enum {
        F2FS_FT_UNKNOWN,
@@ -411,4 +517,6 @@ enum {
        F2FS_FT_MAX
 };
 
+#define S_SHIFT 12
+
 #endif  /* _LINUX_F2FS_FS_H */
index 87229a3950f4dccbc72ed3353a25ac7a39d419b7..949eeb77d12fc01cae16b99a369f804f8da55fb3 100644 (file)
@@ -45,6 +45,8 @@ struct vfsmount;
 struct cred;
 struct swap_info_struct;
 struct seq_file;
+struct fscrypt_info;
+struct fscrypt_operations;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -626,6 +628,11 @@ struct inode {
 #ifdef CONFIG_IMA
        atomic_t                i_readcount; /* struct files open RO */
 #endif
+
+#ifdef CONFIG_FS_ENCRYPTION
+       struct fscrypt_info     *i_crypt_info;
+#endif
+
        void                    *i_private; /* fs or device private pointer */
 };
 
@@ -1275,6 +1282,8 @@ struct super_block {
 #endif
        const struct xattr_handler **s_xattr;
 
+       const struct fscrypt_operations *s_cop;
+
        struct list_head        s_inodes;       /* all inodes */
        struct hlist_bl_head    s_anon;         /* anonymous dentries for (nfs) exporting */
 #ifdef CONFIG_SMP
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
new file mode 100644 (file)
index 0000000..770e5a5
--- /dev/null
@@ -0,0 +1,435 @@
+/*
+ * General per-file encryption definition
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPTO_H
+#define _LINUX_FSCRYPTO_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <uapi/linux/fs.h>
+
+#define FS_KEY_DERIVATION_NONCE_SIZE           16
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1                1
+
+#define FS_POLICY_FLAGS_PAD_4          0x00
+#define FS_POLICY_FLAGS_PAD_8          0x01
+#define FS_POLICY_FLAGS_PAD_16         0x02
+#define FS_POLICY_FLAGS_PAD_32         0x03
+#define FS_POLICY_FLAGS_PAD_MASK       0x03
+#define FS_POLICY_FLAGS_VALID          0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID             0
+#define FS_ENCRYPTION_MODE_AES_256_XTS         1
+#define FS_ENCRYPTION_MODE_AES_256_GCM         2
+#define FS_ENCRYPTION_MODE_AES_256_CBC         3
+#define FS_ENCRYPTION_MODE_AES_256_CTS         4
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ *  1 byte: Protector format (1 = this version)
+ *  1 byte: File contents encryption mode
+ *  1 byte: File names encryption mode
+ *  1 byte: Flags
+ *  8 bytes: Master Key descriptor
+ *  16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+       u8 format;
+       u8 contents_encryption_mode;
+       u8 filenames_encryption_mode;
+       u8 flags;
+       u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+       u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE              16
+#define FS_AES_128_ECB_KEY_SIZE                16
+#define FS_AES_256_GCM_KEY_SIZE                32
+#define FS_AES_256_CBC_KEY_SIZE                32
+#define FS_AES_256_CTS_KEY_SIZE                32
+#define FS_AES_256_XTS_KEY_SIZE                64
+#define FS_MAX_KEY_SIZE                        64
+
+#define FS_KEY_DESC_PREFIX             "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE                8
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+       u32 mode;
+       u8 raw[FS_MAX_KEY_SIZE];
+       u32 size;
+} __packed;
+
+struct fscrypt_info {
+       u8 ci_data_mode;
+       u8 ci_filename_mode;
+       u8 ci_flags;
+       struct crypto_ablkcipher *ci_ctfm;
+       struct key *ci_keyring_key;
+       u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL                0x00000001
+#define FS_WRITE_PATH_FL                       0x00000002
+
+struct fscrypt_ctx {
+       union {
+               struct {
+                       struct page *bounce_page;       /* Ciphertext page */
+                       struct page *control_page;      /* Original page  */
+               } w;
+               struct {
+                       struct bio *bio;
+                       struct work_struct work;
+               } r;
+               struct list_head free_list;     /* Free list */
+       };
+       u8 flags;                               /* Flags */
+       u8 mode;                                /* Encryption mode for tfm */
+};
+
+struct fscrypt_completion_result {
+       struct completion completion;
+       int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+       struct fscrypt_completion_result ecr = { \
+               COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+static inline int fscrypt_key_size(int mode)
+{
+       switch (mode) {
+       case FS_ENCRYPTION_MODE_AES_256_XTS:
+               return FS_AES_256_XTS_KEY_SIZE;
+       case FS_ENCRYPTION_MODE_AES_256_GCM:
+               return FS_AES_256_GCM_KEY_SIZE;
+       case FS_ENCRYPTION_MODE_AES_256_CBC:
+               return FS_AES_256_CBC_KEY_SIZE;
+       case FS_ENCRYPTION_MODE_AES_256_CTS:
+               return FS_AES_256_CTS_KEY_SIZE;
+       default:
+               BUG();
+       }
+       return 0;
+}
+
+#define FS_FNAME_NUM_SCATTER_ENTRIES   4
+#define FS_CRYPTO_BLOCK_SIZE           16
+#define FS_FNAME_CRYPTO_DIGEST_SIZE    32
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+       __le16 len;
+       char encrypted_path[1];
+} __packed;
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 fscrypt_symlink_data_len(u32 l)
+{
+       if (l < FS_CRYPTO_BLOCK_SIZE)
+               l = FS_CRYPTO_BLOCK_SIZE;
+       return (l + sizeof(struct fscrypt_symlink_data) - 1);
+}
+
+struct fscrypt_str {
+       unsigned char *name;
+       u32 len;
+};
+
+struct fscrypt_name {
+       const struct qstr *usr_fname;
+       struct fscrypt_str disk_name;
+       u32 hash;
+       u32 minor_hash;
+       struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l)                { .name = n, .len = l }
+#define FSTR_TO_QSTR(f)                QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p)          ((p)->disk_name.name)
+#define fname_len(p)           ((p)->disk_name.len)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+       int (*get_context)(struct inode *, void *, size_t);
+       int (*key_prefix)(struct inode *, u8 **);
+       int (*prepare_context)(struct inode *);
+       int (*set_context)(struct inode *, const void *, size_t, void *);
+       int (*dummy_context)(struct inode *);
+       bool (*is_encrypted)(struct inode *);
+       bool (*empty_dir)(struct inode *);
+       unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+       if (inode->i_sb->s_cop->dummy_context &&
+                               inode->i_sb->s_cop->dummy_context(inode))
+               return true;
+       return false;
+}
+
+static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+{
+       return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
+{
+       return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size)
+{
+       if (size == fscrypt_key_size(mode))
+               return size;
+       return 0;
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+       if (str->len == 1 && str->name[0] == '.')
+               return true;
+
+       if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+               return true;
+
+       return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+       return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+       WARN_ON_ONCE(1);
+       return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+       return (inode->i_crypt_info != NULL);
+#else
+       return 0;
+#endif
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+       spin_lock(&dentry->d_lock);
+       dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+       spin_unlock(&dentry->d_lock);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+extern const struct dentry_operations fscrypt_d_ops;
+#endif
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+       d_set_d_op(dentry, &fscrypt_d_ops);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+int fscrypt_initialize(void);
+
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
+extern int fscrypt_decrypt_page(struct page *);
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern void fscrypt_restore_control_page(struct page *);
+extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+                                               unsigned int);
+/* policy.c */
+extern int fscrypt_process_policy(struct file *,
+                                       const struct fscrypt_policy *);
+extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+                                       void *, bool);
+/* keyinfo.c */
+extern int get_crypt_info(struct inode *);
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+                               int lookup, struct fscrypt_name *);
+extern void fscrypt_free_filename(struct fscrypt_name *);
+extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+                               struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+                       const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+                       struct fscrypt_str *);
+#endif
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+                                                       gfp_t f)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
+{
+       return;
+}
+
+static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
+                                               struct page *p, gfp_t f)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
+                                               struct bio *b)
+{
+       return;
+}
+
+static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
+{
+       return;
+}
+
+static inline void fscrypt_notsupp_restore_control_page(struct page *p)
+{
+       return;
+}
+
+static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+                                       sector_t s, unsigned int f)
+{
+       return -EOPNOTSUPP;
+}
+
+/* policy.c */
+static inline int fscrypt_notsupp_process_policy(struct file *f,
+                               const struct fscrypt_policy *p)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_get_policy(struct inode *i,
+                               struct fscrypt_policy *p)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
+                               struct inode *i)
+{
+       return 0;
+}
+
+static inline int fscrypt_notsupp_inherit_context(struct inode *p,
+                               struct inode *i, void *v, bool b)
+{
+       return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
+                                       struct fscrypt_info *f)
+{
+       return;
+}
+
+ /* fname.c */
+static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
+                       const struct qstr *iname,
+                       int lookup, struct fscrypt_name *fname)
+{
+       if (dir->i_sb->s_cop->is_encrypted(dir))
+               return -EOPNOTSUPP;
+
+       memset(fname, 0, sizeof(struct fscrypt_name));
+       fname->usr_fname = iname;
+       fname->disk_name.name = (unsigned char *)iname->name;
+       fname->disk_name.len = iname->len;
+       return 0;
+}
+
+static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
+{
+       return;
+}
+
+static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
+{
+       /* never happens */
+       WARN_ON(1);
+       return 0;
+}
+
+static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
+                               u32 ilen, struct fscrypt_str *crypto_str)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
+{
+       return;
+}
+
+static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
+                       u32 hash, u32 minor_hash,
+                       const struct fscrypt_str *iname,
+                       struct fscrypt_str *oname)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
+                       const struct qstr *iname,
+                       struct fscrypt_str *oname)
+{
+       return -EOPNOTSUPP;
+}
+#endif /* _LINUX_FSCRYPTO_H */
index 52ae54828eda4eb905023124a9b2d4c5cb500226..0543b6424312545cfd3da254a2392bbadb1d524b 100644 (file)
                { NODE,         "NODE" },                               \
                { DATA,         "DATA" },                               \
                { META,         "META" },                               \
-               { META_FLUSH,   "META_FLUSH" })
-
-#define show_bio_type(type)                                            \
-       __print_symbolic(type,                                          \
-               { READ,         "READ" },                               \
-               { READA,        "READAHEAD" },                          \
-               { READ_SYNC,    "READ_SYNC" },                          \
-               { WRITE,        "WRITE" },                              \
-               { WRITE_SYNC,   "WRITE_SYNC" },                         \
-               { WRITE_FLUSH,  "WRITE_FLUSH" },                        \
-               { WRITE_FUA,    "WRITE_FUA" })
+               { META_FLUSH,   "META_FLUSH" },                         \
+               { INMEM,        "INMEM" },                              \
+               { INMEM_DROP,   "INMEM_DROP" },                         \
+               { INMEM_REVOKE, "INMEM_REVOKE" },                       \
+               { IPU,          "IN-PLACE" },                           \
+               { OPU,          "OUT-OF-PLACE" })
+
+#define F2FS_BIO_MASK(t)       (t & (READA | WRITE_FLUSH_FUA))
+#define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
+
+#define show_bio_type(type)    show_bio_base(type), show_bio_extra(type)
+
+#define show_bio_base(type)                                            \
+       __print_symbolic(F2FS_BIO_MASK(type),                           \
+               { READ,                 "READ" },                       \
+               { READA,                "READAHEAD" },                  \
+               { READ_SYNC,            "READ_SYNC" },                  \
+               { WRITE,                "WRITE" },                      \
+               { WRITE_SYNC,           "WRITE_SYNC" },                 \
+               { WRITE_FLUSH,          "WRITE_FLUSH" },                \
+               { WRITE_FUA,            "WRITE_FUA" },                  \
+               { WRITE_FLUSH_FUA,      "WRITE_FLUSH_FUA" })
+
+#define show_bio_extra(type)                                           \
+       __print_symbolic(F2FS_BIO_EXTRA_MASK(type),                     \
+               { REQ_META,             "(M)" },                        \
+               { REQ_PRIO,             "(P)" },                        \
+               { REQ_META | REQ_PRIO,  "(MP)" },                       \
+               { 0, " \b" })
 
 #define show_data_type(type)                                           \
        __print_symbolic(type,                                          \
                { CURSEG_COLD_NODE,     "Cold NODE" },                  \
                { NO_CHECK_TYPE,        "No TYPE" })
 
+#define show_file_type(type)                                           \
+       __print_symbolic(type,                                          \
+               { 0,            "FILE" },                               \
+               { 1,            "DIR" })
+
 #define show_gc_type(type)                                             \
        __print_symbolic(type,                                          \
                { FG_GC,        "Foreground GC" },                      \
                { GC_GREEDY,    "Greedy" },                             \
                { GC_CB,        "Cost-Benefit" })
 
+#define show_cpreason(type)                                            \
+       __print_symbolic(type,                                          \
+               { CP_UMOUNT,    "Umount" },                             \
+               { CP_FASTBOOT,  "Fastboot" },                           \
+               { CP_SYNC,      "Sync" },                               \
+               { CP_RECOVERY,  "Recovery" },                           \
+               { CP_DISCARD,   "Discard" })
+
 struct victim_sel_policy;
+struct f2fs_map_blocks;
 
 DECLARE_EVENT_CLASS(f2fs__inode,
 
@@ -124,14 +156,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter,
 
 TRACE_EVENT(f2fs_sync_file_exit,
 
-       TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret),
+       TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret),
 
        TP_ARGS(inode, need_cp, datasync, ret),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
                __field(ino_t,  ino)
-               __field(bool,   need_cp)
+               __field(int,    need_cp)
                __field(int,    datasync)
                __field(int,    ret)
        ),
@@ -166,7 +198,7 @@ TRACE_EVENT(f2fs_sync_fs,
 
        TP_fast_assign(
                __entry->dev    = sb->s_dev;
-               __entry->dirty  = F2FS_SB(sb)->s_dirty;
+               __entry->dirty  = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
                __entry->wait   = wait;
        ),
 
@@ -416,69 +448,64 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
                __entry->err)
 );
 
-TRACE_EVENT_CONDITION(f2fs_readpage,
+TRACE_EVENT(f2fs_map_blocks,
+       TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
 
-       TP_PROTO(struct page *page, sector_t blkaddr, int type),
-
-       TP_ARGS(page, blkaddr, type),
-
-       TP_CONDITION(page->mapping),
+       TP_ARGS(inode, map, ret),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
                __field(ino_t,  ino)
-               __field(pgoff_t,        index)
-               __field(sector_t,       blkaddr)
-               __field(int,    type)
+               __field(block_t,        m_lblk)
+               __field(block_t,        m_pblk)
+               __field(unsigned int,   m_len)
+               __field(int,    ret)
        ),
 
        TP_fast_assign(
-               __entry->dev            = page->mapping->host->i_sb->s_dev;
-               __entry->ino            = page->mapping->host->i_ino;
-               __entry->index          = page->index;
-               __entry->blkaddr        = blkaddr;
-               __entry->type           = type;
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->m_lblk         = map->m_lblk;
+               __entry->m_pblk         = map->m_pblk;
+               __entry->m_len          = map->m_len;
+               __entry->ret            = ret;
        ),
 
-       TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
-               "blkaddr = 0x%llx, bio_type = %s",
+       TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
+               "start blkaddr = 0x%llx, len = 0x%llx, err = %d",
                show_dev_ino(__entry),
-               (unsigned long)__entry->index,
-               (unsigned long long)__entry->blkaddr,
-               show_bio_type(__entry->type))
+               (unsigned long long)__entry->m_lblk,
+               (unsigned long long)__entry->m_pblk,
+               (unsigned long long)__entry->m_len,
+               __entry->ret)
 );
 
-TRACE_EVENT(f2fs_get_data_block,
-       TP_PROTO(struct inode *inode, sector_t iblock,
-                               struct buffer_head *bh, int ret),
+TRACE_EVENT(f2fs_background_gc,
+
+       TP_PROTO(struct super_block *sb, long wait_ms,
+                       unsigned int prefree, unsigned int free),
 
-       TP_ARGS(inode, iblock, bh, ret),
+       TP_ARGS(sb, wait_ms, prefree, free),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
-               __field(ino_t,  ino)
-               __field(sector_t,       iblock)
-               __field(sector_t,       bh_start)
-               __field(size_t, bh_size)
-               __field(int,    ret)
+               __field(long,   wait_ms)
+               __field(unsigned int,   prefree)
+               __field(unsigned int,   free)
        ),
 
        TP_fast_assign(
-               __entry->dev            = inode->i_sb->s_dev;
-               __entry->ino            = inode->i_ino;
-               __entry->iblock         = iblock;
-               __entry->bh_start       = bh->b_blocknr;
-               __entry->bh_size        = bh->b_size;
-               __entry->ret            = ret;
+               __entry->dev            = sb->s_dev;
+               __entry->wait_ms        = wait_ms;
+               __entry->prefree        = prefree;
+               __entry->free           = free;
        ),
 
-       TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, "
-               "start blkaddr = 0x%llx, len = 0x%llx bytes, err = %d",
-               show_dev_ino(__entry),
-               (unsigned long long)__entry->iblock,
-               (unsigned long long)__entry->bh_start,
-               (unsigned long long)__entry->bh_size,
-               __entry->ret)
+       TP_printk("dev = (%d,%d), wait_ms = %ld, prefree = %u, free = %u",
+               show_dev(__entry),
+               __entry->wait_ms,
+               __entry->prefree,
+               __entry->free)
 );
 
 TRACE_EVENT(f2fs_get_victim,
@@ -569,113 +596,704 @@ TRACE_EVENT(f2fs_fallocate,
                __entry->ret)
 );
 
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_direct_IO_enter,
+
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+
+       TP_ARGS(inode, offset, len, rw),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(loff_t, pos)
+               __field(unsigned long,  len)
+               __field(int,    rw)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = offset;
+               __entry->len    = len;
+               __entry->rw     = rw;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu rw = %d",
+               show_dev_ino(__entry),
+               __entry->pos,
+               __entry->len,
+               __entry->rw)
+);
+
+TRACE_EVENT(f2fs_direct_IO_exit,
+
+       TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
+                int rw, int ret),
+
+       TP_ARGS(inode, offset, len, rw, ret),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(loff_t, pos)
+               __field(unsigned long,  len)
+               __field(int,    rw)
+               __field(int,    ret)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = offset;
+               __entry->len    = len;
+               __entry->rw     = rw;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu "
+               "rw = %d ret = %d",
+               show_dev_ino(__entry),
+               __entry->pos,
+               __entry->len,
+               __entry->rw,
+               __entry->ret)
+);
+
+TRACE_EVENT(f2fs_reserve_new_blocks,
 
-       TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+       TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+                                                       blkcnt_t count),
 
-       TP_ARGS(inode, nid, ofs_in_node),
+       TP_ARGS(inode, nid, ofs_in_node, count),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
                __field(nid_t, nid)
                __field(unsigned int, ofs_in_node)
+               __field(blkcnt_t, count)
        ),
 
        TP_fast_assign(
                __entry->dev    = inode->i_sb->s_dev;
                __entry->nid    = nid;
                __entry->ofs_in_node = ofs_in_node;
+               __entry->count = count;
        ),
 
-       TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+       TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
                show_dev(__entry),
                (unsigned int)__entry->nid,
-               __entry->ofs_in_node)
+               __entry->ofs_in_node,
+               (unsigned long long)__entry->count)
 );
 
-TRACE_EVENT(f2fs_do_submit_bio,
+DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
 
-       TP_PROTO(struct super_block *sb, int btype, bool sync, struct bio *bio),
+       TP_PROTO(struct page *page, struct f2fs_io_info *fio),
 
-       TP_ARGS(sb, btype, sync, bio),
+       TP_ARGS(page, fio),
+
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(ino_t, ino)
+               __field(pgoff_t, index)
+               __field(block_t, old_blkaddr)
+               __field(block_t, new_blkaddr)
+               __field(int, rw)
+               __field(int, type)
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = page->mapping->host->i_sb->s_dev;
+               __entry->ino            = page->mapping->host->i_ino;
+               __entry->index          = page->index;
+               __entry->old_blkaddr    = fio->old_blkaddr;
+               __entry->new_blkaddr    = fio->new_blkaddr;
+               __entry->rw             = fio->rw;
+               __entry->type           = fio->type;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
+               "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s",
+               show_dev_ino(__entry),
+               (unsigned long)__entry->index,
+               (unsigned long long)__entry->old_blkaddr,
+               (unsigned long long)__entry->new_blkaddr,
+               show_bio_type(__entry->rw),
+               show_block_type(__entry->type))
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio,
+
+       TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+       TP_ARGS(page, fio),
+
+       TP_CONDITION(page->mapping)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio,
+
+       TP_PROTO(struct page *page, struct f2fs_io_info *fio),
+
+       TP_ARGS(page, fio),
+
+       TP_CONDITION(page->mapping)
+);
+
+DECLARE_EVENT_CLASS(f2fs__submit_bio,
+
+       TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+                                               struct bio *bio),
+
+       TP_ARGS(sb, fio, bio),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
-               __field(int,    btype)
-               __field(bool,   sync)
+               __field(int,    rw)
+               __field(int,    type)
                __field(sector_t,       sector)
                __field(unsigned int,   size)
        ),
 
        TP_fast_assign(
                __entry->dev            = sb->s_dev;
-               __entry->btype          = btype;
-               __entry->sync           = sync;
+               __entry->rw             = fio->rw;
+               __entry->type           = fio->type;
                __entry->sector         = bio->bi_sector;
                __entry->size           = bio->bi_size;
        ),
 
-       TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
+       TP_printk("dev = (%d,%d), %s%s, %s, sector = %lld, size = %u",
                show_dev(__entry),
-               show_block_type(__entry->btype),
-               __entry->sync ? "sync" : "no sync",
+               show_bio_type(__entry->rw),
+               show_block_type(__entry->type),
                (unsigned long long)__entry->sector,
                __entry->size)
 );
 
-TRACE_EVENT(f2fs_submit_write_page,
+DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio,
+
+       TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+                                                       struct bio *bio),
+
+       TP_ARGS(sb, fio, bio),
+
+       TP_CONDITION(bio)
+);
+
+DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio,
+
+       TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio,
+                                                       struct bio *bio),
+
+       TP_ARGS(sb, fio, bio),
+
+       TP_CONDITION(bio)
+);
 
-       TP_PROTO(struct page *page, block_t blk_addr, int type),
+TRACE_EVENT(f2fs_write_begin,
 
-       TP_ARGS(page, blk_addr, type),
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                               unsigned int flags),
+
+       TP_ARGS(inode, pos, len, flags),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(loff_t, pos)
+               __field(unsigned int, len)
+               __field(unsigned int, flags)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = pos;
+               __entry->len    = len;
+               __entry->flags  = flags;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, flags = %u",
+               show_dev_ino(__entry),
+               (unsigned long long)__entry->pos,
+               __entry->len,
+               __entry->flags)
+);
+
+TRACE_EVENT(f2fs_write_end,
+
+       TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
+                               unsigned int copied),
+
+       TP_ARGS(inode, pos, len, copied),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(loff_t, pos)
+               __field(unsigned int, len)
+               __field(unsigned int, copied)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->pos    = pos;
+               __entry->len    = len;
+               __entry->copied = copied;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, copied = %u",
+               show_dev_ino(__entry),
+               (unsigned long long)__entry->pos,
+               __entry->len,
+               __entry->copied)
+);
+
+DECLARE_EVENT_CLASS(f2fs__page,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
                __field(ino_t,  ino)
                __field(int, type)
+               __field(int, dir)
                __field(pgoff_t, index)
-               __field(block_t, block)
+               __field(int, dirty)
+               __field(int, uptodate)
        ),
 
        TP_fast_assign(
                __entry->dev    = page->mapping->host->i_sb->s_dev;
                __entry->ino    = page->mapping->host->i_ino;
                __entry->type   = type;
+               __entry->dir    = S_ISDIR(page->mapping->host->i_mode);
                __entry->index  = page->index;
-               __entry->block  = blk_addr;
+               __entry->dirty  = PageDirty(page);
+               __entry->uptodate = PageUptodate(page);
        ),
 
-       TP_printk("dev = (%d,%d), ino = %lu, %s, index = %lu, blkaddr = 0x%llx",
+       TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, "
+               "dirty = %d, uptodate = %d",
                show_dev_ino(__entry),
                show_block_type(__entry->type),
+               show_file_type(__entry->dir),
                (unsigned long)__entry->index,
-               (unsigned long long)__entry->block)
+               __entry->dirty,
+               __entry->uptodate)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_writepage,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_do_write_data_page,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_readpage,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_set_page_dirty,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_vm_page_mkwrite,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_register_inmem_page,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
+
+       TP_PROTO(struct page *page, int type),
+
+       TP_ARGS(page, type)
+);
+
+TRACE_EVENT(f2fs_writepages,
+
+       TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
+
+       TP_ARGS(inode, wbc, type),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(int,    type)
+               __field(int,    dir)
+               __field(long,   nr_to_write)
+               __field(long,   pages_skipped)
+               __field(loff_t, range_start)
+               __field(loff_t, range_end)
+               __field(pgoff_t, writeback_index)
+               __field(int,    sync_mode)
+               __field(char,   for_kupdate)
+               __field(char,   for_background)
+               __field(char,   tagged_writepages)
+               __field(char,   for_reclaim)
+               __field(char,   range_cyclic)
+       ),
+
+       TP_fast_assign(
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ino            = inode->i_ino;
+               __entry->type           = type;
+               __entry->dir            = S_ISDIR(inode->i_mode);
+               __entry->nr_to_write    = wbc->nr_to_write;
+               __entry->pages_skipped  = wbc->pages_skipped;
+               __entry->range_start    = wbc->range_start;
+               __entry->range_end      = wbc->range_end;
+               __entry->writeback_index = inode->i_mapping->writeback_index;
+               __entry->sync_mode      = wbc->sync_mode;
+               __entry->for_kupdate    = wbc->for_kupdate;
+               __entry->for_background = wbc->for_background;
+               __entry->tagged_writepages      = wbc->tagged_writepages;
+               __entry->for_reclaim    = wbc->for_reclaim;
+               __entry->range_cyclic   = wbc->range_cyclic;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, "
+               "skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, "
+               "kupdate %u background %u tagged %u reclaim %u cyclic %u",
+               show_dev_ino(__entry),
+               show_block_type(__entry->type),
+               show_file_type(__entry->dir),
+               __entry->nr_to_write,
+               __entry->pages_skipped,
+               __entry->range_start,
+               __entry->range_end,
+               (unsigned long)__entry->writeback_index,
+               __entry->sync_mode,
+               __entry->for_kupdate,
+               __entry->for_background,
+               __entry->tagged_writepages,
+               __entry->for_reclaim,
+               __entry->range_cyclic)
+);
+
+TRACE_EVENT(f2fs_readpages,
+
+       TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage),
+
+       TP_ARGS(inode, page, nrpage),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(pgoff_t,        start)
+               __field(unsigned int,   nrpage)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->ino    = inode->i_ino;
+               __entry->start  = page->index;
+               __entry->nrpage = nrpage;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, start = %lu nrpage = %u",
+               show_dev_ino(__entry),
+               (unsigned long)__entry->start,
+               __entry->nrpage)
 );
 
 TRACE_EVENT(f2fs_write_checkpoint,
 
-       TP_PROTO(struct super_block *sb, bool is_umount, char *msg),
+       TP_PROTO(struct super_block *sb, int reason, char *msg),
 
-       TP_ARGS(sb, is_umount, msg),
+       TP_ARGS(sb, reason, msg),
 
        TP_STRUCT__entry(
                __field(dev_t,  dev)
-               __field(bool,   is_umount)
+               __field(int,    reason)
                __field(char *, msg)
        ),
 
        TP_fast_assign(
                __entry->dev            = sb->s_dev;
-               __entry->is_umount      = is_umount;
+               __entry->reason         = reason;
                __entry->msg            = msg;
        ),
 
        TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
                show_dev(__entry),
-               __entry->is_umount ? "clean umount" : "consistency",
+               show_cpreason(__entry->reason),
                __entry->msg)
 );
 
+TRACE_EVENT(f2fs_issue_discard,
+
+       TP_PROTO(struct super_block *sb, block_t blkstart, block_t blklen),
+
+       TP_ARGS(sb, blkstart, blklen),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(block_t, blkstart)
+               __field(block_t, blklen)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->blkstart = blkstart;
+               __entry->blklen = blklen;
+       ),
+
+       TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx",
+               show_dev(__entry),
+               (unsigned long long)__entry->blkstart,
+               (unsigned long long)__entry->blklen)
+);
+
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+       TP_PROTO(struct super_block *sb, block_t blkstart),
+
+       TP_ARGS(sb, blkstart),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(block_t, blkstart)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->blkstart = blkstart;
+       ),
+
+       TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+               show_dev(__entry),
+               (unsigned long long)__entry->blkstart)
+);
+
+TRACE_EVENT(f2fs_issue_flush,
+
+       TP_PROTO(struct super_block *sb, unsigned int nobarrier,
+                                       unsigned int flush_merge),
+
+       TP_ARGS(sb, nobarrier, flush_merge),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(unsigned int, nobarrier)
+               __field(unsigned int, flush_merge)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->nobarrier = nobarrier;
+               __entry->flush_merge = flush_merge;
+       ),
+
+       TP_printk("dev = (%d,%d), %s %s",
+               show_dev(__entry),
+               __entry->nobarrier ? "skip (nobarrier)" : "issue",
+               __entry->flush_merge ? " with flush_merge" : "")
+);
+
+TRACE_EVENT(f2fs_lookup_extent_tree_start,
+
+       TP_PROTO(struct inode *inode, unsigned int pgofs),
+
+       TP_ARGS(inode, pgofs),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(unsigned int, pgofs)
+       ),
+
+       TP_fast_assign(
+               __entry->dev = inode->i_sb->s_dev;
+               __entry->ino = inode->i_ino;
+               __entry->pgofs = pgofs;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u",
+               show_dev_ino(__entry),
+               __entry->pgofs)
+);
+
+TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
+
+       TP_PROTO(struct inode *inode, unsigned int pgofs,
+                                               struct extent_info *ei),
+
+       TP_ARGS(inode, pgofs, ei),
+
+       TP_CONDITION(ei),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(unsigned int, pgofs)
+               __field(unsigned int, fofs)
+               __field(u32, blk)
+               __field(unsigned int, len)
+       ),
+
+       TP_fast_assign(
+               __entry->dev = inode->i_sb->s_dev;
+               __entry->ino = inode->i_ino;
+               __entry->pgofs = pgofs;
+               __entry->fofs = ei->fofs;
+               __entry->blk = ei->blk;
+               __entry->len = ei->len;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+               "ext_info(fofs: %u, blk: %u, len: %u)",
+               show_dev_ino(__entry),
+               __entry->pgofs,
+               __entry->fofs,
+               __entry->blk,
+               __entry->len)
+);
+
+TRACE_EVENT(f2fs_update_extent_tree_range,
+
+       TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr,
+                                               unsigned int len),
+
+       TP_ARGS(inode, pgofs, blkaddr, len),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(unsigned int, pgofs)
+               __field(u32, blk)
+               __field(unsigned int, len)
+       ),
+
+       TP_fast_assign(
+               __entry->dev = inode->i_sb->s_dev;
+               __entry->ino = inode->i_ino;
+               __entry->pgofs = pgofs;
+               __entry->blk = blkaddr;
+               __entry->len = len;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
+                                       "blkaddr = %u, len = %u",
+               show_dev_ino(__entry),
+               __entry->pgofs,
+               __entry->blk,
+               __entry->len)
+);
+
+TRACE_EVENT(f2fs_shrink_extent_tree,
+
+       TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt,
+                                               unsigned int tree_cnt),
+
+       TP_ARGS(sbi, node_cnt, tree_cnt),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(unsigned int, node_cnt)
+               __field(unsigned int, tree_cnt)
+       ),
+
+       TP_fast_assign(
+               __entry->dev = sbi->sb->s_dev;
+               __entry->node_cnt = node_cnt;
+               __entry->tree_cnt = tree_cnt;
+       ),
+
+       TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u",
+               show_dev(__entry),
+               __entry->node_cnt,
+               __entry->tree_cnt)
+);
+
+TRACE_EVENT(f2fs_destroy_extent_tree,
+
+       TP_PROTO(struct inode *inode, unsigned int node_cnt),
+
+       TP_ARGS(inode, node_cnt),
+
+       TP_STRUCT__entry(
+               __field(dev_t,  dev)
+               __field(ino_t,  ino)
+               __field(unsigned int, node_cnt)
+       ),
+
+       TP_fast_assign(
+               __entry->dev = inode->i_sb->s_dev;
+               __entry->ino = inode->i_ino;
+               __entry->node_cnt = node_cnt;
+       ),
+
+       TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u",
+               show_dev_ino(__entry),
+               __entry->node_cnt)
+);
+
+DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
+
+       TP_PROTO(struct super_block *sb, int type, s64 count),
+
+       TP_ARGS(sb, type, count),
+
+       TP_STRUCT__entry(
+               __field(dev_t, dev)
+               __field(int, type)
+               __field(s64, count)
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->type   = type;
+               __entry->count  = count;
+       ),
+
+       TP_printk("dev = (%d,%d), %s, dirty count = %lld",
+               show_dev(__entry),
+               show_file_type(__entry->type),
+               __entry->count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter,
+
+       TP_PROTO(struct super_block *sb, int type, s64 count),
+
+       TP_ARGS(sb, type, count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
+
+       TP_PROTO(struct super_block *sb, int type, s64 count),
+
+       TP_ARGS(sb, type, count)
+);
+
 #endif /* _TRACE_F2FS_H */
 
  /* This part must be outside protection */
index 5ea3238303e5694a8ee1ebe22707a92e31a9ab1b..2782ca7f22c536c1b2991825b73f1609eb9a373a 100644 (file)
@@ -167,6 +167,24 @@ struct inodes_stat_t {
 #define FS_IOC32_SETVERSION            _IOW('v', 2, int)
 
 #define FS_IOC_INVAL_MAPPING           _IO('f', 13)
+/*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE 8
+
+struct fscrypt_policy {
+       char version;
+       char contents_encryption_mode;
+       char filenames_encryption_mode;
+       char flags;
+       char master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+} __packed;
+
+#define FS_IOC_SET_ENCRYPTION_POLICY   _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT   _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY   _IOW('f', 21, struct fscrypt_policy)
+
 /*
  * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
  */
index 9e1e005c75967d497831fb67146020aeed4bc4a8..c4c8df4b214d9f0bc2f941e0144526b3260a7a1f 100644 (file)
@@ -1018,10 +1018,13 @@ static int __init init_encrypted(void)
        ret = encrypted_shash_alloc();
        if (ret < 0)
                return ret;
+       ret = aes_get_sizes();
+       if (ret < 0)
+               goto out;
        ret = register_key_type(&key_type_encrypted);
        if (ret < 0)
                goto out;
-       return aes_get_sizes();
+       return 0;
 out:
        encrypted_shash_release();
        return ret;