xen/blkback: Add support for BLKIF_OP_FLUSH_DISKCACHE and drop BLKIF_OP_WRITE_BARRIER.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Wed, 4 May 2011 21:07:27 +0000 (17:07 -0400)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 5 May 2011 17:43:24 +0000 (13:43 -0400)
We drop the support for 'feature-barrier' and add in the support
for the 'feature-flush-cache' if the real backend storage supports
flushing.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c

index eb068d0b47ea2042af6d30056e3438993a8a88a9..72ede0bf26977b590ee56b74fd78aadbb7b9cd2d 100644 (file)
@@ -46,8 +46,6 @@
 #include <asm/xen/hypercall.h>
 #include "common.h"
 
-#define WRITE_BARRIER  (REQ_WRITE | REQ_FLUSH | REQ_FUA)
-
 /*
  * These are rather arbitrary. They are fairly large because adjacent requests
  * pulled from a communication ring are quite likely to end up being part of
@@ -256,9 +254,9 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
 
 static void print_stats(struct blkif_st *blkif)
 {
-       printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  br %4d\n",
+       printk(KERN_DEBUG "%s: oo %3d  |  rd %4d  |  wr %4d  |  f %4d\n",
               current->comm, blkif->st_oo_req,
-              blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
+              blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
        blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
        blkif->st_rd_req = 0;
        blkif->st_wr_req = 0;
@@ -414,10 +412,10 @@ static int xen_blkbk_map(struct blkif_request *req, struct pending_req *pending_
 static void __end_block_io_op(struct pending_req *pending_req, int error)
 {
        /* An error fails the entire request. */
-       if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
+       if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
            (error == -EOPNOTSUPP)) {
-               DPRINTK("blkback: write barrier op failed, not supported\n");
-               xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
+               DPRINTK("blkback: flush diskcache op failed, not supported\n");
+               xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
        } else if (error) {
                DPRINTK("Buffer not up-to-date at end of operation, "
@@ -506,13 +504,14 @@ static int do_block_io_op(struct blkif_st *blkif)
                        blkif->st_rd_req++;
                        dispatch_rw_block_io(blkif, &req, pending_req);
                        break;
-               case BLKIF_OP_WRITE_BARRIER:
-                       blkif->st_br_req++;
+               case BLKIF_OP_FLUSH_DISKCACHE:
+                       blkif->st_f_req++;
                        /* fall through */
                case BLKIF_OP_WRITE:
                        blkif->st_wr_req++;
                        dispatch_rw_block_io(blkif, &req, pending_req);
                        break;
+               case BLKIF_OP_WRITE_BARRIER:
                default:
                        /* A good sign something is wrong: sleep for a while to
                         * avoid excessive CPU consumption by a bad guest. */
@@ -556,9 +555,14 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
        case BLKIF_OP_WRITE:
                operation = WRITE_ODIRECT;
                break;
-       case BLKIF_OP_WRITE_BARRIER:
-               operation = WRITE_BARRIER;
+       case BLKIF_OP_FLUSH_DISKCACHE:
+               operation = WRITE_FLUSH;
+               /* The frontend likes to set this to -1, which vbd_translate
+                * is alergic too. */
+               req->u.rw.sector_number = 0;
                break;
+       case BLKIF_OP_WRITE_BARRIER:
+               /* Should never get here. */
        default:
                operation = 0; /* make gcc happy */
                BUG();
@@ -566,7 +570,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
 
        /* Check that the number of segments is sane. */
        nseg = req->nr_segments;
-       if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
+       if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
            unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
                DPRINTK("Bad number of segments in request (%d)\n", nseg);
                /* Haven't submitted any bio's yet. */
@@ -643,7 +647,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
 
        /* This will be hit if the operation was a barrier. */
        if (!bio) {
-               BUG_ON(operation != WRITE_BARRIER);
+               BUG_ON(operation != WRITE_FLUSH);
                bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
                if (unlikely(bio == NULL))
                        goto fail_put_bio;
@@ -651,7 +655,6 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
                bio->bi_bdev    = preq.bdev;
                bio->bi_private = pending_req;
                bio->bi_end_io  = end_block_io_op;
-               bio->bi_sector  = -1;
        }
 
 
@@ -671,7 +674,7 @@ static void dispatch_rw_block_io(struct blkif_st *blkif,
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
-       else if (operation == WRITE || operation == WRITE_BARRIER)
+       else if (operation == WRITE || operation == WRITE_FLUSH)
                blkif->st_wr_sect += preq.nr_sects;
 
        return;
index 16af388268e7f18613d53caced5cd550007ebc8c..af93837e129548c9a757675ce60c241962d90d18 100644 (file)
@@ -53,6 +53,7 @@ struct vbd {
        u32            pdevice;     /* phys device that this vbd maps to */
        struct block_device *bdev;
        sector_t       size;        /* Cached size parameter */
+       bool           flush_support;
 };
 
 struct backend_info;
@@ -85,7 +86,7 @@ struct blkif_st {
        int                 st_rd_req;
        int                 st_wr_req;
        int                 st_oo_req;
-       int                 st_br_req;
+       int                 st_f_req;
        int                 st_rd_sect;
        int                 st_wr_sect;
 
@@ -120,8 +121,8 @@ int xen_blkif_xenbus_init(void);
 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
 int xen_blkif_schedule(void *arg);
 
-int xen_blkbk_barrier(struct xenbus_transaction xbt,
-                       struct backend_info *be, int state);
+int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
+                             struct backend_info *be, int state);
 
 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
 
index 64b0a1c760fb2484d1b9bb423c8d11f683079250..9adcf806f83f49d577b6d5f62809859add1f8f47 100644 (file)
@@ -276,7 +276,7 @@ int __init xen_blkif_interface_init(void)
 VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
 VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
 VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
-VBD_SHOW(br_req,  "%d\n", be->blkif->st_br_req);
+VBD_SHOW(f_req,  "%d\n", be->blkif->st_f_req);
 VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
 VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
 
@@ -284,7 +284,7 @@ static struct attribute *vbdstat_attrs[] = {
        &dev_attr_oo_req.attr,
        &dev_attr_rd_req.attr,
        &dev_attr_wr_req.attr,
-       &dev_attr_br_req.attr,
+       &dev_attr_f_req.attr,
        &dev_attr_rd_sect.attr,
        &dev_attr_wr_sect.attr,
        NULL
@@ -343,6 +343,7 @@ static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle,
 {
        struct vbd *vbd;
        struct block_device *bdev;
+       struct request_queue *q;
 
        vbd = &blkif->vbd;
        vbd->handle   = handle;
@@ -375,6 +376,10 @@ static int vbd_create(struct blkif_st *blkif, blkif_vdev_t handle,
        if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
                vbd->type |= VDISK_REMOVABLE;
 
+       q = bdev_get_queue(bdev);
+       if (q && q->flush_flags)
+               vbd->flush_support = true;
+
        DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
                handle, blkif->domid);
        return 0;
@@ -406,16 +411,16 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
        return 0;
 }
 
-int xen_blkbk_barrier(struct xenbus_transaction xbt,
-                     struct backend_info *be, int state)
+int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
+                             struct backend_info *be, int state)
 {
        struct xenbus_device *dev = be->dev;
        int err;
 
-       err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
+       err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
                            "%d", state);
        if (err)
-               xenbus_dev_fatal(dev, err, "writing feature-barrier");
+               xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
 
        return err;
 }
@@ -642,7 +647,7 @@ again:
                return;
        }
 
-       err = xen_blkbk_barrier(xbt, be, 1);
+       err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
        if (err)
                goto abort;