dm cache: pass cache structure to mode functions
authorMike Snitzer <snitzer@redhat.com>
Fri, 20 Oct 2017 01:01:04 +0000 (21:01 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Aug 2020 07:48:23 +0000 (09:48 +0200)
commit 8e3c3827776fc93728c0c8d7c7b731226dc6ee23 upstream.

No functional changes, just a bit cleaner than passing cache_features
structure.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/dm-cache-target.c

index 69cdb29ef6be96b49bd096c3c9e58a4a2e22c763..36b43a484e56feb5c1eeb51de9b4b15c76878ba4 100644 (file)
@@ -515,19 +515,19 @@ struct dm_cache_migration {
 
 /*----------------------------------------------------------------*/
 
-static bool writethrough_mode(struct cache_features *f)
+static bool writethrough_mode(struct cache *cache)
 {
-       return f->io_mode == CM_IO_WRITETHROUGH;
+       return cache->features.io_mode == CM_IO_WRITETHROUGH;
 }
 
-static bool writeback_mode(struct cache_features *f)
+static bool writeback_mode(struct cache *cache)
 {
-       return f->io_mode == CM_IO_WRITEBACK;
+       return cache->features.io_mode == CM_IO_WRITEBACK;
 }
 
-static inline bool passthrough_mode(struct cache_features *f)
+static inline bool passthrough_mode(struct cache *cache)
 {
-       return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
+       return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
 }
 
 /*----------------------------------------------------------------*/
@@ -544,7 +544,7 @@ static void wake_deferred_writethrough_worker(struct cache *cache)
 
 static void wake_migration_worker(struct cache *cache)
 {
-       if (passthrough_mode(&cache->features))
+       if (passthrough_mode(cache))
                return;
 
        queue_work(cache->wq, &cache->migration_worker);
@@ -626,7 +626,7 @@ static unsigned lock_level(struct bio *bio)
 
 static size_t get_per_bio_data_size(struct cache *cache)
 {
-       return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+       return writethrough_mode(cache) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
 }
 
 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
@@ -1209,7 +1209,7 @@ static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
 
 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
 {
-       return writeback_mode(&cache->features) &&
+       return writeback_mode(cache) &&
                (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
 }
 
@@ -1862,7 +1862,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
                 * Passthrough always maps to the origin, invalidating any
                 * cache blocks that are written to.
                 */
-               if (passthrough_mode(&cache->features)) {
+               if (passthrough_mode(cache)) {
                        if (bio_data_dir(bio) == WRITE) {
                                bio_drop_shared_lock(cache, bio);
                                atomic_inc(&cache->stats.demotion);
@@ -1871,7 +1871,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
                                remap_to_origin_clear_discard(cache, bio, block);
 
                } else {
-                       if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+                       if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
                            !is_dirty(cache, cblock)) {
                                remap_to_origin_then_cache(cache, bio, block, cblock);
                                accounted_begin(cache, bio);
@@ -2649,7 +2649,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
                goto bad;
        }
 
-       if (passthrough_mode(&cache->features)) {
+       if (passthrough_mode(cache)) {
                bool all_clean;
 
                r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
@@ -3279,13 +3279,13 @@ static void cache_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("1 ");
 
-               if (writethrough_mode(&cache->features))
+               if (writethrough_mode(cache))
                        DMEMIT("writethrough ");
 
-               else if (passthrough_mode(&cache->features))
+               else if (passthrough_mode(cache))
                        DMEMIT("passthrough ");
 
-               else if (writeback_mode(&cache->features))
+               else if (writeback_mode(cache))
                        DMEMIT("writeback ");
 
                else {
@@ -3451,7 +3451,7 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
        unsigned i;
        struct cblock_range range;
 
-       if (!passthrough_mode(&cache->features)) {
+       if (!passthrough_mode(cache)) {
                DMERR("%s: cache has to be in passthrough mode for invalidation",
                      cache_device_name(cache));
                return -EPERM;