/*----------------------------------------------------------------*/
-static bool writethrough_mode(struct cache_features *f)
+static bool writethrough_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITETHROUGH;
+ return cache->features.io_mode == CM_IO_WRITETHROUGH;
}
-static bool writeback_mode(struct cache_features *f)
+static bool writeback_mode(struct cache *cache)
{
- return f->io_mode == CM_IO_WRITEBACK;
+ return cache->features.io_mode == CM_IO_WRITEBACK;
}
-static inline bool passthrough_mode(struct cache_features *f)
+static inline bool passthrough_mode(struct cache *cache)
{
- return unlikely(f->io_mode == CM_IO_PASSTHROUGH);
+ return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
}
/*----------------------------------------------------------------*/
static void wake_migration_worker(struct cache *cache)
{
- if (passthrough_mode(&cache->features))
+ if (passthrough_mode(cache))
return;
queue_work(cache->wq, &cache->migration_worker);
static size_t get_per_bio_data_size(struct cache *cache)
{
- return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
+ return writethrough_mode(cache) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
}
static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{
- return writeback_mode(&cache->features) &&
+ return writeback_mode(cache) &&
(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
}
* Passthrough always maps to the origin, invalidating any
* cache blocks that are written to.
*/
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
if (bio_data_dir(bio) == WRITE) {
bio_drop_shared_lock(cache, bio);
atomic_inc(&cache->stats.demotion);
remap_to_origin_clear_discard(cache, bio, block);
} else {
- if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
+ if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
!is_dirty(cache, cblock)) {
remap_to_origin_then_cache(cache, bio, block, cblock);
accounted_begin(cache, bio);
goto bad;
}
- if (passthrough_mode(&cache->features)) {
+ if (passthrough_mode(cache)) {
bool all_clean;
r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
else
DMEMIT("1 ");
- if (writethrough_mode(&cache->features))
+ if (writethrough_mode(cache))
DMEMIT("writethrough ");
- else if (passthrough_mode(&cache->features))
+ else if (passthrough_mode(cache))
DMEMIT("passthrough ");
- else if (writeback_mode(&cache->features))
+ else if (writeback_mode(cache))
DMEMIT("writeback ");
else {
unsigned i;
struct cblock_range range;
- if (!passthrough_mode(&cache->features)) {
+ if (!passthrough_mode(cache)) {
DMERR("%s: cache has to be in passthrough mode for invalidation",
cache_device_name(cache));
return -EPERM;