dm: introduce num_flush_requests
authorMikulas Patocka <mpatocka@redhat.com>
Mon, 22 Jun 2009 09:12:20 +0000 (10:12 +0100)
committerAlasdair G Kergon <agk@redhat.com>
Mon, 22 Jun 2009 09:12:20 +0000 (10:12 +0100)
Introduce num_flush_requests for a target to set to say how many flush
instructions (empty barriers) it wants to receive.  These are sent by
__clone_and_map_empty_barrier with map_info->flush_request going from 0
to (num_flush_requests - 1).

Old targets without flush support won't receive any flush requests.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
drivers/md/dm.c
include/linux/device-mapper.h

index 7d9ca70943376e8330b958d7eb2b338abf91b2e9..badb7519cccbfaa463e674e30a8936dab61eaebb 100644 (file)
@@ -750,6 +750,40 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
        return clone;
 }
 
+static void __flush_target(struct clone_info *ci, struct dm_target *ti,
+                         unsigned flush_nr)
+{
+       struct dm_target_io *tio = alloc_tio(ci->md);
+       struct bio *clone;
+
+       tio->io = ci->io;
+       tio->ti = ti;
+
+       memset(&tio->info, 0, sizeof(tio->info));
+       tio->info.flush_request = flush_nr;
+
+       clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+       __bio_clone(clone, ci->bio);
+       clone->bi_destructor = dm_bio_destructor;
+
+       __map_bio(ti, clone, tio);
+}
+
+static int __clone_and_map_empty_barrier(struct clone_info *ci)
+{
+       unsigned target_nr = 0, flush_nr;
+       struct dm_target *ti;
+
+       while ((ti = dm_table_get_target(ci->map, target_nr++)))
+               for (flush_nr = 0; flush_nr < ti->num_flush_requests;
+                    flush_nr++)
+                       __flush_target(ci, ti, flush_nr);
+
+       ci->sector_count = 0;
+
+       return 0;
+}
+
 static int __clone_and_map(struct clone_info *ci)
 {
        struct bio *clone, *bio = ci->bio;
@@ -757,6 +791,9 @@ static int __clone_and_map(struct clone_info *ci)
        sector_t len = 0, max;
        struct dm_target_io *tio;
 
+       if (unlikely(bio_empty_barrier(bio)))
+               return __clone_and_map_empty_barrier(ci);
+
        ti = dm_table_find_target(ci->map, ci->sector);
        if (!dm_target_is_valid(ti))
                return -EIO;
@@ -877,6 +914,8 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
        ci.io->md = md;
        ci.sector = bio->bi_sector;
        ci.sector_count = bio_sectors(bio);
+       if (unlikely(bio_empty_barrier(bio)))
+               ci.sector_count = 1;
        ci.idx = bio->bi_idx;
 
        start_io_acct(ci.io);
index 49c2362977fde10457c72d00e3672ba71142cd42..fc36a4d07723d642f0da8ee0a42617bd01d60d99 100644 (file)
@@ -21,6 +21,7 @@ typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
 union map_info {
        void *ptr;
        unsigned long long ll;
+       unsigned flush_request;
 };
 
 /*
@@ -167,6 +168,16 @@ struct dm_target {
        /* Always a power of 2 */
        sector_t split_io;
 
+       /*
+        * A number of zero-length barrier requests that will be submitted
+        * to the target for the purpose of flushing cache.
+        *
+        * The request number will be placed in union map_info->flush_request.
+        * It is a responsibility of the target driver to remap these requests
+        * to the real underlying devices.
+        */
+       unsigned num_flush_requests;
+
        /*
         * These are automatically filled in by
         * dm_table_get_device.