libnvdimm, btt: add support for blk integrity
authorVishal Verma <vishal.l.verma@intel.com>
Thu, 25 Jun 2015 08:21:52 +0000 (04:21 -0400)
committerDan Williams <dan.j.williams@intel.com>
Fri, 26 Jun 2015 15:23:38 +0000 (11:23 -0400)
Support multiple block sizes (sector + metadata) using the blk integrity
framework. This registers a new integrity template that defines the
protection information tuple size based on the configured metadata size,
and simply acts as a passthrough for protection information generated by
another layer. The metadata is written to the storage as-is, and read back
with each sector.

Signed-off-by: Vishal Verma <vishal.l.verma@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/nvdimm/btt.c
drivers/nvdimm/btt.h
drivers/nvdimm/btt_devs.c
drivers/nvdimm/core.c
drivers/nvdimm/nd.h

index 7ae38aac2c25eda10753d78a75ecc5d4acbe6977..18a2463c230069af7bf89cc51d80f83bacdd480d 100644 (file)
@@ -837,6 +837,11 @@ static int btt_meta_init(struct btt *btt)
        return ret;
 }
 
+static u32 btt_meta_size(struct btt *btt)
+{
+       return btt->lbasize - btt->sector_size;
+}
+
 /*
  * This function calculates the arena in which the given LBA lies
  * by doing a linear walk. This is acceptable since we expect only
@@ -921,8 +926,63 @@ static void zero_fill_data(struct page *page, unsigned int off, u32 len)
        kunmap_atomic(mem);
 }
 
-static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
-                       sector_t sector, unsigned int len)
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
+                       struct arena_info *arena, u32 postmap, int rw)
+{
+       unsigned int len = btt_meta_size(btt);
+       u64 meta_nsoff;
+       int ret = 0;
+
+       if (bip == NULL)
+               return 0;
+
+       meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
+
+       while (len) {
+               unsigned int cur_len;
+               struct bio_vec bv;
+               void *mem;
+
+               bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
+               /*
+                * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
+                * .bv_offset already adjusted for iter->bi_bvec_done, and we
+                * can use those directly
+                */
+
+               cur_len = min(len, bv.bv_len);
+               mem = kmap_atomic(bv.bv_page);
+               if (rw)
+                       ret = arena_write_bytes(arena, meta_nsoff,
+                                       mem + bv.bv_offset, cur_len);
+               else
+                       ret = arena_read_bytes(arena, meta_nsoff,
+                                       mem + bv.bv_offset, cur_len);
+
+               kunmap_atomic(mem);
+               if (ret)
+                       return ret;
+
+               len -= cur_len;
+               meta_nsoff += cur_len;
+               bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
+       }
+
+       return ret;
+}
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
+                       struct arena_info *arena, u32 postmap, int rw)
+{
+       return 0;
+}
+#endif
+
+static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
+                       struct page *page, unsigned int off, sector_t sector,
+                       unsigned int len)
 {
        int ret = 0;
        int t_flag, e_flag;
@@ -984,6 +1044,12 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
                if (ret)
                        goto out_rtt;
 
+               if (bip) {
+                       ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
+                       if (ret)
+                               goto out_rtt;
+               }
+
                arena->rtt[lane] = RTT_INVALID;
                nd_region_release_lane(btt->nd_region, lane);
 
@@ -1001,8 +1067,9 @@ static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
        return ret;
 }
 
-static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
-               unsigned int off, unsigned int len)
+static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
+                       sector_t sector, struct page *page, unsigned int off,
+                       unsigned int len)
 {
        int ret = 0;
        struct arena_info *arena = NULL;
@@ -1036,12 +1103,19 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
                if (new_postmap >= arena->internal_nlba) {
                        ret = -EIO;
                        goto out_lane;
-               } else
-                       ret = btt_data_write(arena, new_postmap, page,
-                                               off, cur_len);
+               }
+
+               ret = btt_data_write(arena, new_postmap, page, off, cur_len);
                if (ret)
                        goto out_lane;
 
+               if (bip) {
+                       ret = btt_rw_integrity(btt, bip, arena, new_postmap,
+                                               WRITE);
+                       if (ret)
+                               goto out_lane;
+               }
+
                lock_map(arena, premap);
                ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
                if (ret)
@@ -1081,18 +1155,18 @@ static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
        return ret;
 }
 
-static int btt_do_bvec(struct btt *btt, struct page *page,
-                       unsigned int len, unsigned int off, int rw,
-                       sector_t sector)
+static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
+                       struct page *page, unsigned int len, unsigned int off,
+                       int rw, sector_t sector)
 {
        int ret;
 
        if (rw == READ) {
-               ret = btt_read_pg(btt, page, off, sector, len);
+               ret = btt_read_pg(btt, bip, page, off, sector, len);
                flush_dcache_page(page);
        } else {
                flush_dcache_page(page);
-               ret = btt_write_pg(btt, sector, page, off, len);
+               ret = btt_write_pg(btt, bip, sector, page, off, len);
        }
 
        return ret;
@@ -1100,11 +1174,23 @@ static int btt_do_bvec(struct btt *btt, struct page *page,
 
 static void btt_make_request(struct request_queue *q, struct bio *bio)
 {
+       struct bio_integrity_payload *bip = bio_integrity(bio);
        struct btt *btt = q->queuedata;
        struct bvec_iter iter;
        struct bio_vec bvec;
        int err = 0, rw;
 
+       /*
+        * bio_integrity_enabled also checks if the bio already has an
+        * integrity payload attached. If it does, we *don't* do a
+        * bio_integrity_prep here - the payload has been generated by
+        * another kernel subsystem, and we just pass it through.
+        */
+       if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+               err = -EIO;
+               goto out;
+       }
+
        rw = bio_data_dir(bio);
        bio_for_each_segment(bvec, bio, iter) {
                unsigned int len = bvec.bv_len;
@@ -1115,7 +1201,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
                BUG_ON(len < btt->sector_size);
                BUG_ON(len % btt->sector_size);
 
-               err = btt_do_bvec(btt, bvec.bv_page, len, bvec.bv_offset,
+               err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
                                rw, iter.bi_sector);
                if (err) {
                        dev_info(&btt->nd_btt->dev,
@@ -1135,7 +1221,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
 {
        struct btt *btt = bdev->bd_disk->private_data;
 
-       btt_do_bvec(btt, page, PAGE_CACHE_SIZE, 0, rw, sector);
+       btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
        page_endio(page, rw & WRITE, 0);
        return 0;
 }
@@ -1188,15 +1274,26 @@ static int btt_blk_init(struct btt *btt)
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
        btt->btt_queue->queuedata = btt;
 
-       set_capacity(btt->btt_disk,
-                       btt->nlba * btt->sector_size >> SECTOR_SHIFT);
+       set_capacity(btt->btt_disk, 0);
        add_disk(btt->btt_disk);
+       if (btt_meta_size(btt)) {
+               int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
+
+               if (rc) {
+                       del_gendisk(btt->btt_disk);
+                       put_disk(btt->btt_disk);
+                       blk_cleanup_queue(btt->btt_queue);
+                       return rc;
+               }
+       }
+       set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
 
        return 0;
 }
 
 static void btt_blk_cleanup(struct btt *btt)
 {
+       blk_integrity_unregister(btt->btt_disk);
        del_gendisk(btt->btt_disk);
        put_disk(btt->btt_disk);
        blk_cleanup_queue(btt->btt_queue);
index 8c95a7792c3ebc861b98e4ed0e50204df088238a..2caa0ef7e67a29af7b6e025379d77d84fb76cce9 100644 (file)
@@ -31,7 +31,7 @@
 #define ARENA_MAX_SIZE (1ULL << 39)    /* 512 GB */
 #define RTT_VALID (1UL << 31)
 #define RTT_INVALID 0
-#define INT_LBASIZE_ALIGNMENT 256
+#define INT_LBASIZE_ALIGNMENT 64
 #define BTT_PG_SIZE 4096
 #define BTT_DEFAULT_NFREE ND_MAX_LANES
 #define LOG_SEQ_INIT 1
index 470fbdccd0acede02141938052c53095887302e0..661aacedc14007c7cebbe150d2248fdd17d08626 100644 (file)
@@ -103,7 +103,8 @@ struct nd_btt *to_nd_btt(struct device *dev)
 }
 EXPORT_SYMBOL(to_nd_btt);
 
-static const unsigned long btt_lbasize_supported[] = { 512, 4096, 0 };
+static const unsigned long btt_lbasize_supported[] = { 512, 520, 528,
+       4096, 4104, 4160, 4224, 0 };
 
 static ssize_t sector_size_show(struct device *dev,
                struct device_attribute *attr, char *buf)
index dd824d7c266986c6a3883f1a8865abc87f4c2da8..1d96b9a6e4cc0fb0274c9403ea6925095f04afc2 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/libnvdimm.h>
 #include <linux/export.h>
 #include <linux/module.h>
+#include <linux/blkdev.h>
 #include <linux/device.h>
 #include <linux/ctype.h>
 #include <linux/ndctl.h>
@@ -361,6 +362,42 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
 }
 EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
 
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int nd_pi_nop_generate_verify(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
+{
+       struct blk_integrity integrity = {
+               .name = "ND-PI-NOP",
+               .generate_fn = nd_pi_nop_generate_verify,
+               .verify_fn = nd_pi_nop_generate_verify,
+               .tuple_size = meta_size,
+               .tag_size = meta_size,
+       };
+       int ret;
+
+       ret = blk_integrity_register(disk, &integrity);
+       if (ret)
+               return ret;
+
+       blk_queue_max_integrity_segments(disk->queue, 1);
+
+       return 0;
+}
+EXPORT_SYMBOL(nd_integrity_init);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
+{
+       return 0;
+}
+EXPORT_SYMBOL(nd_integrity_init);
+
+#endif
+
 static __init int libnvdimm_init(void)
 {
        int rc;
index f153f43ca3d6959363cf59de77c866e000ae3ca4..f4459faa456cff0fb6ee413c0f46d4089bf8e73a 100644 (file)
@@ -136,6 +136,7 @@ enum nd_async_mode {
        ND_ASYNC,
 };
 
+int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
 void wait_nvdimm_bus_probe_idle(struct device *dev);
 void nd_device_register(struct device *dev);
 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);