lightnvm: export set bad block table
authorJavier González <jg@lightnvm.io>
Mon, 28 Nov 2016 21:38:56 +0000 (22:38 +0100)
committerJens Axboe <axboe@fb.com>
Tue, 29 Nov 2016 19:12:51 +0000 (12:12 -0700)
Bad blocks should be managed by block owners. This would be either
targets for data blocks or sysblk for system blocks.

In order to support this, export two functions: One to mark a block as
an specific type (e.g., bad block) and another to update the bad block
table on the device.

Move bad block management to rrpc.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/lightnvm/core.c
drivers/lightnvm/gennvm.c
drivers/lightnvm/rrpc.c
drivers/lightnvm/sysblk.c
include/linux/lightnvm.h

index 8664fe09cc82c9b8a80457dfb2f96f0bd3f0ff2b..6527cf6862fa09436a46211f87dcd9d408fc8753 100644 (file)
@@ -196,6 +196,33 @@ void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
 }
 EXPORT_SYMBOL(nvm_mark_blk);
 
+int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas,
+                                                               int type)
+{
+       struct nvm_rq rqd;
+       int ret;
+
+       if (nr_ppas > dev->ops->max_phys_sect) {
+               pr_err("nvm: unable to update all sysblocks atomically\n");
+               return -EINVAL;
+       }
+
+       memset(&rqd, 0, sizeof(struct nvm_rq));
+
+       nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
+       nvm_generic_to_addr_mode(dev, &rqd);
+
+       ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+       nvm_free_rqd_ppalist(dev, &rqd);
+       if (ret) {
+               pr_err("nvm: sysblk failed bb mark\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(nvm_set_bb_tbl);
+
 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
 {
        return dev->mt->submit_io(dev, rqd);
index a7e17faea2cd6e114d2253b350f5f30ca2c4c3b2..e969e3a801c47fd2485dee7f36f11d793b69b267 100644 (file)
@@ -543,34 +543,10 @@ static void gen_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
        blk->state = type;
 }
 
-/*
- * mark block bad in gen. It is expected that the target recovers separately
- */
-static void gen_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
-       int bit = -1;
-       int max_secs = dev->ops->max_phys_sect;
-       void *comp_bits = &rqd->ppa_status;
-
-       nvm_addr_to_generic_mode(dev, rqd);
-
-       /* look up blocks and mark them as bad */
-       if (rqd->nr_ppas == 1) {
-               gen_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
-               return;
-       }
-
-       while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
-               gen_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
-}
-
 static void gen_end_io(struct nvm_rq *rqd)
 {
        struct nvm_tgt_instance *ins = rqd->ins;
 
-       if (rqd->error == NVM_RSP_ERR_FAILWRITE)
-               gen_mark_blk_bad(rqd->dev, rqd);
-
        ins->tt->end_io(rqd);
 }
 
index 067e890ae2bf6563492825c014ccc7e68a6f4e5d..2b71b7e59dac5aa95ed9414034300a09f4987226 100644 (file)
@@ -675,6 +675,34 @@ static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
        queue_work(rrpc->kgc_wq, &gcb->ws_gc);
 }
 
+static void __rrpc_mark_bad_block(struct nvm_dev *dev, struct ppa_addr *ppa)
+{
+               nvm_mark_blk(dev, *ppa, NVM_BLK_ST_BAD);
+               nvm_set_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
+}
+
+static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
+{
+       struct nvm_dev *dev = rrpc->dev;
+       void *comp_bits = &rqd->ppa_status;
+       struct ppa_addr ppa, prev_ppa;
+       int nr_ppas = rqd->nr_ppas;
+       int bit;
+
+       if (rqd->nr_ppas == 1)
+               __rrpc_mark_bad_block(dev, &rqd->ppa_addr);
+
+       ppa_set_empty(&prev_ppa);
+       bit = -1;
+       while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
+               ppa = rqd->ppa_list[bit];
+               if (ppa_cmp_blk(ppa, prev_ppa))
+                       continue;
+
+               __rrpc_mark_bad_block(dev, &ppa);
+       }
+}
+
 static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
                                                sector_t laddr, uint8_t npages)
 {
@@ -701,8 +729,12 @@ static void rrpc_end_io(struct nvm_rq *rqd)
        uint8_t npages = rqd->nr_ppas;
        sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
 
-       if (bio_data_dir(rqd->bio) == WRITE)
+       if (bio_data_dir(rqd->bio) == WRITE) {
+               if (rqd->error == NVM_RSP_ERR_FAILWRITE)
+                       rrpc_mark_bad_block(rrpc, rqd);
+
                rrpc_end_io_write(rrpc, rrqd, laddr, npages);
+       }
 
        bio_put(rqd->bio);
 
index d229067574159a622564d878f8de0907666bbd6f..fa644afb25de91eeff59b1017863e9d382d8122b 100644 (file)
@@ -267,29 +267,10 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
        return found;
 }
 
-static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
+static int nvm_sysblk_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s,
+                                                               int type)
 {
-       struct nvm_rq rqd;
-       int ret;
-
-       if (s->nr_ppas > dev->ops->max_phys_sect) {
-               pr_err("nvm: unable to update all sysblocks atomically\n");
-               return -EINVAL;
-       }
-
-       memset(&rqd, 0, sizeof(struct nvm_rq));
-
-       nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
-       nvm_generic_to_addr_mode(dev, &rqd);
-
-       ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
-       nvm_free_rqd_ppalist(dev, &rqd);
-       if (ret) {
-               pr_err("nvm: sysblk failed bb mark\n");
-               return -EINVAL;
-       }
-
-       return 0;
+       return nvm_set_bb_tbl(dev, s->ppas, s->nr_ppas, type);
 }
 
 static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
@@ -573,7 +554,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
        if (ret)
                goto err_mark;
 
-       ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
+       ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
        if (ret)
                goto err_mark;
 
@@ -733,7 +714,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
                mutex_lock(&dev->mlock);
                ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
                if (!ret)
-                       ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
+                       ret = nvm_sysblk_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
                mutex_unlock(&dev->mlock);
        }
 err_ppas:
index d87be02edc39c13720a374fb3e1574bf1fba72f0..4480d1c6a1a5c34e6105d851c9b6b72fc0e4ef73 100644 (file)
@@ -423,6 +423,15 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
        return ppa;
 }
 
+static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
+{
+       if (ppa_empty(ppa1) || ppa_empty(ppa2))
+               return 0;
+
+       return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
+                                       (ppa1.g.blk == ppa2.g.blk));
+}
+
 static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
 {
        return dev->lptbl[slc_pg];
@@ -528,7 +537,9 @@ extern struct nvm_dev *nvm_alloc_dev(int);
 extern int nvm_register(struct nvm_dev *);
 extern void nvm_unregister(struct nvm_dev *);
 
-void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
+extern void nvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type);
+extern int nvm_set_bb_tbl(struct nvm_dev *dev, struct ppa_addr *ppas,
+                                                       int nr_ppas, int type);
 
 extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
 extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);