bcache: Kill sequential_merge option
authorKent Overstreet <kmo@daterainc.com>
Wed, 31 Jul 2013 05:34:40 +0000 (22:34 -0700)
committerKent Overstreet <kmo@daterainc.com>
Mon, 11 Nov 2013 05:56:39 +0000 (21:56 -0800)
It never really made sense to expose this, so just kill it.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/bcache.h
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/sysfs.c

index d6970a651e42858136c050d4c5d9bd659aa69681..322735547eab01e108f869155402c2527f138e40 100644 (file)
@@ -364,7 +364,6 @@ struct cached_dev {
        unsigned                sequential_cutoff;
        unsigned                readahead;
 
-       unsigned                sequential_merge:1;
        unsigned                verify:1;
 
        unsigned                partial_stripes_expensive:1;
index 932300f189730e0ed06b68ffa4c74464e433f55a..f645da61189aef79ac9f38c8d9a16051669fe1a2 100644 (file)
@@ -510,6 +510,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
        unsigned mode = cache_mode(dc, bio);
        unsigned sectors, congested = bch_get_congested(c);
        struct task_struct *task = current;
+       struct io *i;
 
        if (atomic_read(&dc->disk.detaching) ||
            c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
@@ -536,38 +537,30 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
            (bio->bi_rw & REQ_SYNC))
                goto rescale;
 
-       if (dc->sequential_merge) {
-               struct io *i;
+       spin_lock(&dc->io_lock);
 
-               spin_lock(&dc->io_lock);
+       hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
+               if (i->last == bio->bi_sector &&
+                   time_before(jiffies, i->jiffies))
+                       goto found;
 
-               hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
-                       if (i->last == bio->bi_sector &&
-                           time_before(jiffies, i->jiffies))
-                               goto found;
+       i = list_first_entry(&dc->io_lru, struct io, lru);
 
-               i = list_first_entry(&dc->io_lru, struct io, lru);
-
-               add_sequential(task);
-               i->sequential = 0;
+       add_sequential(task);
+       i->sequential = 0;
 found:
-               if (i->sequential + bio->bi_size > i->sequential)
-                       i->sequential   += bio->bi_size;
-
-               i->last                  = bio_end_sector(bio);
-               i->jiffies               = jiffies + msecs_to_jiffies(5000);
-               task->sequential_io      = i->sequential;
+       if (i->sequential + bio->bi_size > i->sequential)
+               i->sequential   += bio->bi_size;
 
-               hlist_del(&i->hash);
-               hlist_add_head(&i->hash, iohash(dc, i->last));
-               list_move_tail(&i->lru, &dc->io_lru);
+       i->last                  = bio_end_sector(bio);
+       i->jiffies               = jiffies + msecs_to_jiffies(5000);
+       task->sequential_io      = i->sequential;
 
-               spin_unlock(&dc->io_lock);
-       } else {
-               task->sequential_io = bio->bi_size;
+       hlist_del(&i->hash);
+       hlist_add_head(&i->hash, iohash(dc, i->last));
+       list_move_tail(&i->lru, &dc->io_lru);
 
-               add_sequential(task);
-       }
+       spin_unlock(&dc->io_lock);
 
        sectors = max(task->sequential_io,
                      task->sequential_io_avg) >> 9;
index e21200e98da61df3f01b779bfc1bbdd6c0a69f72..041dd9d1d882083b6a301069edc82e43c7825c26 100644 (file)
@@ -1079,7 +1079,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
        spin_lock_init(&dc->io_lock);
        bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
 
-       dc->sequential_merge            = true;
        dc->sequential_cutoff           = 4 << 20;
 
        for (io = dc->io; io < dc->io + RECENT_IO; io++) {
index c5f73e34d01605569d6509adc07aa3bb052a8839..4b672449ffafa960de6276f28aca5d42bda67023 100644 (file)
@@ -72,7 +72,6 @@ rw_attribute(congested_read_threshold_us);
 rw_attribute(congested_write_threshold_us);
 
 rw_attribute(sequential_cutoff);
-rw_attribute(sequential_merge);
 rw_attribute(data_csum);
 rw_attribute(cache_mode);
 rw_attribute(writeback_metadata);
@@ -161,7 +160,6 @@ SHOW(__bch_cached_dev)
        sysfs_hprint(stripe_size,       dc->disk.stripe_size << 9);
        var_printf(partial_stripes_expensive,   "%u");
 
-       var_printf(sequential_merge,    "%i");
        var_hprint(sequential_cutoff);
        var_hprint(readahead);
 
@@ -207,7 +205,6 @@ STORE(__cached_dev)
                            dc->writeback_rate_p_term_inverse, 1, INT_MAX);
        d_strtoul(writeback_rate_d_smooth);
 
-       d_strtoul(sequential_merge);
        d_strtoi_h(sequential_cutoff);
        d_strtoi_h(readahead);
 
@@ -319,7 +316,6 @@ static struct attribute *bch_cached_dev_files[] = {
        &sysfs_stripe_size,
        &sysfs_partial_stripes_expensive,
        &sysfs_sequential_cutoff,
-       &sysfs_sequential_merge,
        &sysfs_clear_stats,
        &sysfs_running,
        &sysfs_state,