Makefile: Add '-fno-builtin-bcmp' to CLANG_FLAGS
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / block / blk-merge.c
index e01405a3e8b3f51ce0424a844fadb3304bda5e44..09df6133e19b24b7f1527e8c847c1b6ec1eb8e60 100644 (file)
@@ -68,6 +68,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 }
 
+static inline unsigned get_max_io_size(struct request_queue *q,
+                                      struct bio *bio)
+{
+       unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+       unsigned mask = queue_logical_block_size(q) - 1;
+
+       /* aligned to logical block size */
+       sectors &= ~(mask >> 9);
+
+       return sectors;
+}
+
 static struct bio *blk_bio_segment_split(struct request_queue *q,
                                         struct bio *bio,
                                         struct bio_set *bs,
@@ -79,9 +91,29 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        unsigned front_seg_size = bio->bi_seg_front_size;
        bool do_split = true;
        struct bio *new = NULL;
+       const unsigned max_sectors = get_max_io_size(q, bio);
+       unsigned bvecs = 0;
 
        bio_for_each_segment(bv, bio, iter) {
-               if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
+               /*
+                * With arbitrary bio size, the incoming bio may be very
+                * big. We have to split the bio into small bios so that
+                * each holds at most BIO_MAX_PAGES bvecs because
+                * bio_clone() can fail to allocate big bvecs.
+                *
+                * It should have been better to apply the limit per
+                * request queue in which bio_clone() is involved,
+                * instead of globally. The biggest blocker is the
+                * bio_clone() in bio bounce.
+                *
+                * If bio is splitted by this reason, we should have
+                * allowed to continue bios merging, but don't do
+                * that now for making the change simple.
+                *
+                * TODO: deal with bio bounce's bio_clone() gracefully
+                * and convert the global limit into per-queue limit.
+                */
+               if (bvecs++ >= BIO_MAX_PAGES)
                        goto split;
 
                /*
@@ -91,6 +123,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
                        goto split;
 
+               if (sectors + (bv.bv_len >> 9) > max_sectors) {
+                       /*
+                        * Consider this a new segment if we're splitting in
+                        * the middle of this vector.
+                        */
+                       if (nsegs < queue_max_segments(q) &&
+                           sectors < max_sectors) {
+                               nsegs++;
+                               sectors = max_sectors;
+                       }
+                       if (sectors)
+                               goto split;
+                       /* Make this single bvec as the 1st segment */
+               }
+
                if (bvprvp && blk_queue_cluster(q)) {
                        if (seg_size + bv.bv_len > queue_max_segment_size(q))
                                goto new_segment;
@@ -616,6 +663,51 @@ static void blk_account_io_merge(struct request *req)
        }
 }
 
+static struct inode *get_inode_from_bio(struct bio *bio)
+{
+       if (!bio)
+               return NULL;
+       if (!bio_has_data((struct bio *)bio))
+               return NULL;
+       if (!bio->bi_io_vec)
+               return NULL;
+       if (!bio->bi_io_vec->bv_page)
+               return NULL;
+       if (PageAnon(bio->bi_io_vec->bv_page)) {
+               struct inode *inode;
+
+               /* Using direct-io (O_DIRECT) without page cache */
+               inode = bio->bi_dio_inode;
+               return inode;
+       }
+
+       if (!bio->bi_io_vec->bv_page->mapping)
+               return NULL;
+       if (!bio->bi_io_vec->bv_page->mapping->host)
+               return NULL;
+       return bio->bi_io_vec->bv_page->mapping->host;
+}
+
+static bool inode_is_data_equal(void *data1, void *data2)
+{
+       /* pointer comparison*/
+       return data1 == data2;
+}
+
+static bool allow_merge_bio_for_encryption(struct bio *bio1, struct bio *bio2)
+{
+       struct inode *inode1 = NULL;
+       struct inode *inode2 = NULL;
+
+       inode1 = get_inode_from_bio(bio1);
+       inode2 = get_inode_from_bio(bio2);
+
+       if (!inode_is_data_equal(inode1, inode2))
+               return false;
+
+       return true;
+}
+
 /*
  * Has to be called with the request spinlock acquired
  */
@@ -643,6 +735,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            !blk_write_same_mergeable(req->bio, next->bio))
                return 0;
 
+       if (!allow_merge_bio_for_encryption(req->bio, next->bio))
+               return 0;
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn
@@ -747,6 +841,16 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
            !blk_write_same_mergeable(rq->bio, bio))
                return false;
 
+#ifdef CONFIG_JOURNAL_DATA_TAG
+       /* journal tagged bio can only be merged to REQ_META request */
+       if ((bio_flagged(bio, BIO_JMETA) || bio_flagged(bio, BIO_JOURNAL)) &&
+                       !(rq->cmd_flags & REQ_META))
+               return false;
+#endif
+
+       if (!allow_merge_bio_for_encryption(rq->bio, bio))
+               return false;
+
        return true;
 }