return ret;
}
+static u32 btt_meta_size(struct btt *btt)
+{
+ return btt->lbasize - btt->sector_size;
+}
+
/*
* This function calculates the arena in which the given LBA lies
* by doing a linear walk. This is acceptable since we expect only
kunmap_atomic(mem);
}
-static int btt_read_pg(struct btt *btt, struct page *page, unsigned int off,
- sector_t sector, unsigned int len)
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
+ struct arena_info *arena, u32 postmap, int rw)
+{
+ unsigned int len = btt_meta_size(btt);
+ u64 meta_nsoff;
+ int ret = 0;
+
+ if (bip == NULL)
+ return 0;
+
+ meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
+
+ while (len) {
+ unsigned int cur_len;
+ struct bio_vec bv;
+ void *mem;
+
+ bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
+ /*
+ * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
+ * .bv_offset already adjusted for iter->bi_bvec_done, and we
+ * can use those directly
+ */
+
+ cur_len = min(len, bv.bv_len);
+ mem = kmap_atomic(bv.bv_page);
+ if (rw)
+ ret = arena_write_bytes(arena, meta_nsoff,
+ mem + bv.bv_offset, cur_len);
+ else
+ ret = arena_read_bytes(arena, meta_nsoff,
+ mem + bv.bv_offset, cur_len);
+
+ kunmap_atomic(mem);
+ if (ret)
+ return ret;
+
+ len -= cur_len;
+ meta_nsoff += cur_len;
+ bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
+ }
+
+ return ret;
+}
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
+ struct arena_info *arena, u32 postmap, int rw)
+{
+ return 0;
+}
+#endif
+
+static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
+ struct page *page, unsigned int off, sector_t sector,
+ unsigned int len)
{
int ret = 0;
int t_flag, e_flag;
if (ret)
goto out_rtt;
+ if (bip) {
+ ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
+ if (ret)
+ goto out_rtt;
+ }
+
arena->rtt[lane] = RTT_INVALID;
nd_region_release_lane(btt->nd_region, lane);
return ret;
}
-static int btt_write_pg(struct btt *btt, sector_t sector, struct page *page,
- unsigned int off, unsigned int len)
+static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
+ sector_t sector, struct page *page, unsigned int off,
+ unsigned int len)
{
int ret = 0;
struct arena_info *arena = NULL;
if (new_postmap >= arena->internal_nlba) {
ret = -EIO;
goto out_lane;
- } else
- ret = btt_data_write(arena, new_postmap, page,
- off, cur_len);
+ }
+
+ ret = btt_data_write(arena, new_postmap, page, off, cur_len);
if (ret)
goto out_lane;
+ if (bip) {
+ ret = btt_rw_integrity(btt, bip, arena, new_postmap,
+ WRITE);
+ if (ret)
+ goto out_lane;
+ }
+
lock_map(arena, premap);
ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
if (ret)
return ret;
}
-static int btt_do_bvec(struct btt *btt, struct page *page,
- unsigned int len, unsigned int off, int rw,
- sector_t sector)
+static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
+ struct page *page, unsigned int len, unsigned int off,
+ int rw, sector_t sector)
{
int ret;
if (rw == READ) {
- ret = btt_read_pg(btt, page, off, sector, len);
+ ret = btt_read_pg(btt, bip, page, off, sector, len);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
- ret = btt_write_pg(btt, sector, page, off, len);
+ ret = btt_write_pg(btt, bip, sector, page, off, len);
}
return ret;
static void btt_make_request(struct request_queue *q, struct bio *bio)
{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = q->queuedata;
struct bvec_iter iter;
struct bio_vec bvec;
int err = 0, rw;
+ /*
+ * bio_integrity_enabled also checks if the bio already has an
+ * integrity payload attached. If it does, we *don't* do a
+ * bio_integrity_prep here - the payload has been generated by
+ * another kernel subsystem, and we just pass it through.
+ */
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ err = -EIO;
+ goto out;
+ }
+
rw = bio_data_dir(bio);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
BUG_ON(len < btt->sector_size);
BUG_ON(len % btt->sector_size);
- err = btt_do_bvec(btt, bvec.bv_page, len, bvec.bv_offset,
+ err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
rw, iter.bi_sector);
if (err) {
dev_info(&btt->nd_btt->dev,
{
struct btt *btt = bdev->bd_disk->private_data;
- btt_do_bvec(btt, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, 0);
return 0;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
btt->btt_queue->queuedata = btt;
- set_capacity(btt->btt_disk,
- btt->nlba * btt->sector_size >> SECTOR_SHIFT);
+ set_capacity(btt->btt_disk, 0);
add_disk(btt->btt_disk);
+ if (btt_meta_size(btt)) {
+ int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
+
+ if (rc) {
+ del_gendisk(btt->btt_disk);
+ put_disk(btt->btt_disk);
+ blk_cleanup_queue(btt->btt_queue);
+ return rc;
+ }
+ }
+ set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
return 0;
}
static void btt_blk_cleanup(struct btt *btt)
{
+ blk_integrity_unregister(btt->btt_disk);
del_gendisk(btt->btt_disk);
put_disk(btt->btt_disk);
blk_cleanup_queue(btt->btt_queue);