dm cache: submit writethrough writes in parallel to origin and cache
authorMike Snitzer <snitzer@redhat.com>
Thu, 19 Oct 2017 21:16:54 +0000 (17:16 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Aug 2020 07:48:23 +0000 (09:48 +0200)
commit 2df3bae9a6543e90042291707b8db0cbfbae9ee9 upstream.

Discontinue issuing writethrough write IO in series to the origin and
then cache.

Use bio_clone_fast() to create a new origin clone bio that will be
mapped to the origin device and then bio_chain() it to the bio that gets
remapped to the cache device.  The origin clone bio does _not_ have a
copy of the per_bio_data -- as such check_if_tick_bio_needed() will not
be called.

The cache bio (parent bio) will not complete until the origin bio has
completed -- this fulfills bio_clone_fast()'s requirements as well as
the requirement to not complete the original IO until the write IO has
completed to both the origin and cache device.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/dm-cache-target.c

index 36b43a484e56feb5c1eeb51de9b4b15c76878ba4..2b107b579c2a509f793f2fb4406be96d2e84e9c9 100644 (file)
@@ -450,6 +450,7 @@ struct cache {
        struct work_struct migration_worker;
        struct delayed_work waker;
        struct dm_bio_prison_v2 *prison;
+       struct bio_set *bs;
 
        mempool_t *migration_pool;
 
@@ -868,16 +869,23 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
        spin_unlock_irqrestore(&cache->lock, flags);
 }
 
-static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
-                                         dm_oblock_t oblock)
+static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+                                           dm_oblock_t oblock, bool bio_has_pbd)
 {
-       // FIXME: this is called way too much.
-       check_if_tick_bio_needed(cache, bio);
+       if (bio_has_pbd)
+               check_if_tick_bio_needed(cache, bio);
        remap_to_origin(cache, bio);
        if (bio_data_dir(bio) == WRITE)
                clear_discard(cache, oblock_to_dblock(cache, oblock));
 }
 
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+                                         dm_oblock_t oblock)
+{
+       // FIXME: check_if_tick_bio_needed() is called way too much through this interface
+       __remap_to_origin_clear_discard(cache, bio, oblock, true);
+}
+
 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
                                 dm_oblock_t oblock, dm_cblock_t cblock)
 {
@@ -971,23 +979,25 @@ static void writethrough_endio(struct bio *bio)
 }
 
 /*
- * FIXME: send in parallel, huge latency as is.
  * When running in writethrough mode we need to send writes to clean blocks
- * to both the cache and origin devices.  In future we'd like to clone the
- * bio and send them in parallel, but for now we're doing them in
- * series as this is easier.
+ * to both the cache and origin devices.  Clone the bio and send them in parallel.
  */
-static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
-                                      dm_oblock_t oblock, dm_cblock_t cblock)
+static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
+                                     dm_oblock_t oblock, dm_cblock_t cblock)
 {
-       struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+       struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, cache->bs);
 
-       pb->cache = cache;
-       pb->cblock = cblock;
-       dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
-       dm_bio_record(&pb->bio_details, bio);
+       BUG_ON(!origin_bio);
 
-       remap_to_origin_clear_discard(pb->cache, bio, oblock);
+       bio_chain(origin_bio, bio);
+       /*
+        * Passing false to __remap_to_origin_clear_discard() skips
+        * all code that might use per_bio_data (since clone doesn't have it)
+        */
+       __remap_to_origin_clear_discard(cache, origin_bio, oblock, false);
+       submit_bio(origin_bio);
+
+       remap_to_cache(cache, bio, cblock);
 }
 
 /*----------------------------------------------------------------
@@ -1873,7 +1883,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
                } else {
                        if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
                            !is_dirty(cache, cblock)) {
-                               remap_to_origin_then_cache(cache, bio, block, cblock);
+                               remap_to_origin_and_cache(cache, bio, block, cblock);
                                accounted_begin(cache, bio);
                        } else
                                remap_to_cache_dirty(cache, bio, block, cblock);
@@ -2132,6 +2142,9 @@ static void destroy(struct cache *cache)
                kfree(cache->ctr_args[i]);
        kfree(cache->ctr_args);
 
+       if (cache->bs)
+               bioset_free(cache->bs);
+
        kfree(cache);
 }
 
@@ -2589,6 +2602,13 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        cache->features = ca->features;
        ti->per_io_data_size = get_per_bio_data_size(cache);
 
+       if (writethrough_mode(cache)) {
+               /* Create bioset for writethrough bios issued to origin */
+               cache->bs = bioset_create(BIO_POOL_SIZE, 0, 0);
+               if (!cache->bs)
+                       goto bad;
+       }
+
        cache->callbacks.congested_fn = cache_is_congested;
        dm_table_add_target_callbacks(ti->table, &cache->callbacks);