lightnvm: pblk: issue multiplane reads if possible
authorJavier González <jg@lightnvm.io>
Mon, 26 Jun 2017 09:57:20 +0000 (11:57 +0200)
committerJens Axboe <axboe@kernel.dk>
Mon, 26 Jun 2017 22:27:39 +0000 (16:27 -0600)
If a read request is sequential and its size aligns with a
multi-plane page size, use the multi-plane hint to process the I/O in
parallel in the controller.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/lightnvm/pblk-core.c
drivers/lightnvm/pblk-read.c
drivers/lightnvm/pblk-recovery.c
drivers/lightnvm/pblk.h

index beae1618483fd71fc82a4c85be36e1dff644802d..29565f89a85e0a48e06d4b9dcd9c95d2821d2200 100644 (file)
@@ -564,7 +564,6 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
        int id = line->id;
        int rq_ppas, rq_len;
        int cmd_op, bio_op;
-       int flags;
        int i, j;
        int ret;
        DECLARE_COMPLETION_ONSTACK(wait);
@@ -572,11 +571,9 @@ static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
        if (dir == WRITE) {
                bio_op = REQ_OP_WRITE;
                cmd_op = NVM_OP_PWRITE;
-               flags = pblk_set_progr_mode(pblk, WRITE);
        } else if (dir == READ) {
                bio_op = REQ_OP_READ;
                cmd_op = NVM_OP_PREAD;
-               flags = pblk_set_read_mode(pblk);
        } else
                return -EINVAL;
 
@@ -601,7 +598,6 @@ next_rq:
 
        rqd.bio = bio;
        rqd.opcode = cmd_op;
-       rqd.flags = flags;
        rqd.nr_ppas = rq_ppas;
        rqd.ppa_list = ppa_list;
        rqd.dma_ppa_list = dma_ppa_list;
@@ -609,6 +605,7 @@ next_rq:
        rqd.private = &wait;
 
        if (dir == WRITE) {
+               rqd.flags = pblk_set_progr_mode(pblk, WRITE);
                for (i = 0; i < rqd.nr_ppas; ) {
                        spin_lock(&line->lock);
                        paddr = __pblk_alloc_page(pblk, line, min);
@@ -621,6 +618,11 @@ next_rq:
                for (i = 0; i < rqd.nr_ppas; ) {
                        struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
                        int pos = pblk_dev_ppa_to_pos(geo, ppa);
+                       int read_type = PBLK_READ_RANDOM;
+
+                       if (pblk_io_aligned(pblk, rq_ppas))
+                               read_type = PBLK_READ_SEQUENTIAL;
+                       rqd.flags = pblk_set_read_mode(pblk, read_type);
 
                        while (test_bit(pos, line->blk_bitmap)) {
                                paddr += min;
@@ -717,7 +719,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
        } else if (dir == READ) {
                bio_op = REQ_OP_READ;
                cmd_op = NVM_OP_PREAD;
-               flags = pblk_set_read_mode(pblk);
+               flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
        } else
                return -EINVAL;
 
index 9c4d89cdd32f0c0200388e1560599a6b4706052b..1e7e98961821606fba0be5d3f98e7ad7f30b3674 100644 (file)
@@ -88,6 +88,11 @@ retry:
                        bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
        }
 
+       if (pblk_io_aligned(pblk, nr_secs))
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+       else
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
 #ifdef CONFIG_NVM_DEBUG
        atomic_long_add(nr_secs, &pblk->inflight_reads);
 #endif
@@ -97,8 +102,6 @@ static int pblk_submit_read_io(struct pblk *pblk, struct nvm_rq *rqd)
 {
        int err;
 
-       rqd->flags = pblk_set_read_mode(pblk);
-
        err = pblk_submit_io(pblk, rqd);
        if (err)
                return NVM_IO_ERR;
@@ -177,6 +180,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
 
        rqd->bio = new_bio;
        rqd->nr_ppas = nr_holes;
+       rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
        rqd->end_io = NULL;
 
        if (unlikely(nr_secs > 1 && nr_holes == 1)) {
@@ -290,6 +294,8 @@ retry:
        } else {
                rqd->ppa_addr = ppa;
        }
+
+       rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
 }
 
 int pblk_submit_read(struct pblk *pblk, struct bio *bio)
@@ -497,6 +503,7 @@ int pblk_submit_read_gc(struct pblk *pblk, u64 *lba_list, void *data,
        rqd.end_io = pblk_end_io_sync;
        rqd.private = &wait;
        rqd.nr_ppas = *secs_to_gc;
+       rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
        rqd.bio = bio;
 
        ret = pblk_submit_read_io(pblk, &rqd);
index 7b0ace2f49574b718b4ecb4873fb64fd49660603..b9f2b40bd5a7282c051bd96e11b0cad63530c685 100644 (file)
@@ -257,7 +257,6 @@ next_read_rq:
 
        rqd->bio = bio;
        rqd->opcode = NVM_OP_PREAD;
-       rqd->flags = pblk_set_read_mode(pblk);
        rqd->meta_list = meta_list;
        rqd->nr_ppas = rq_ppas;
        rqd->ppa_list = ppa_list;
@@ -266,6 +265,11 @@ next_read_rq:
        rqd->end_io = pblk_end_io_sync;
        rqd->private = &wait;
 
+       if (pblk_io_aligned(pblk, rq_ppas))
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+       else
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
        for (i = 0; i < rqd->nr_ppas; ) {
                struct ppa_addr ppa;
                int pos;
@@ -473,7 +477,6 @@ next_rq:
 
        rqd->bio = bio;
        rqd->opcode = NVM_OP_PREAD;
-       rqd->flags = pblk_set_read_mode(pblk);
        rqd->meta_list = meta_list;
        rqd->nr_ppas = rq_ppas;
        rqd->ppa_list = ppa_list;
@@ -482,6 +485,11 @@ next_rq:
        rqd->end_io = pblk_end_io_sync;
        rqd->private = &wait;
 
+       if (pblk_io_aligned(pblk, rq_ppas))
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+       else
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
        for (i = 0; i < rqd->nr_ppas; ) {
                struct ppa_addr ppa;
                int pos;
@@ -607,7 +615,6 @@ next_rq:
 
        rqd->bio = bio;
        rqd->opcode = NVM_OP_PREAD;
-       rqd->flags = pblk_set_read_mode(pblk);
        rqd->meta_list = meta_list;
        rqd->nr_ppas = rq_ppas;
        rqd->ppa_list = ppa_list;
@@ -616,6 +623,11 @@ next_rq:
        rqd->end_io = pblk_end_io_sync;
        rqd->private = &wait;
 
+       if (pblk_io_aligned(pblk, rq_ppas))
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
+       else
+               rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+
        for (i = 0; i < rqd->nr_ppas; ) {
                struct ppa_addr ppa;
                int pos;
index 50f30434718f7bd5dedeb096db39bcc0c5472fe9..6dc58d360077e62106d22dbf4d16b3478f0858e0 100644 (file)
@@ -1075,9 +1075,27 @@ static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
        return flags;
 }
 
-static inline int pblk_set_read_mode(struct pblk *pblk)
+enum {
+       PBLK_READ_RANDOM        = 0,
+       PBLK_READ_SEQUENTIAL    = 1,
+};
+
+static inline int pblk_set_read_mode(struct pblk *pblk, int type)
+{
+       struct nvm_tgt_dev *dev = pblk->dev;
+       struct nvm_geo *geo = &dev->geo;
+       int flags;
+
+       flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
+       if (type == PBLK_READ_SEQUENTIAL)
+               flags |= geo->plane_mode >> 1;
+
+       return flags;
+}
+
+static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
 {
-       return NVM_IO_SNGL_ACCESS | NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
+       return !(nr_secs % pblk->min_write_pgs);
 }
 
 #ifdef CONFIG_NVM_DEBUG