the same bi_io_vec array, but with the index and size accordingly modified)
- A linked list of bios is used as before for unrelated merges (*) - this
avoids reallocs and makes independent completions easier to handle.
-- Code that traverses the req list needs to make a distinction between
- segments of a request (bio_for_each_segment) and the distinct completion
- units/bios (rq_for_each_bio).
+- Code that traverses the req list can find all the segments of a bio
+ by using rq_for_each_segment. This handles the fact that a request
+ has multiple bios, each of which can have multiple segments.
- Drivers which can't process a large bio in one shot can use the bi_idx
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
3.2.1 Traversing segments and completion units in a request
-The macros bio_for_each_segment() and rq_for_each_bio() should be used for
-traversing the bios in the request list (drivers should avoid directly
-trying to do it themselves). Using these helpers should also make it easier
-to cope with block changes in the future.
+The macro rq_for_each_segment() should be used for traversing the bios
+in the request list (drivers should avoid directly trying to do it
+themselves). Using these helpers should also make it easier to cope
+with block changes in the future.
- rq_for_each_bio(bio, rq)
- bio_for_each_segment(bio_vec, bio, i)
- /* bio_vec is now current segment */
+ struct req_iterator iter;
+ rq_for_each_segment(bio_vec, rq, iter)
+ /* bio_vec is now current segment */
I/O completion callbacks are per-bio rather than per-segment, so drivers
that traverse bio chains on completion need to keep that in mind. Drivers
int seg_size;
int hw_seg_size;
int cluster;
- struct bio *bio;
- int i;
+ struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
- rq_for_each_bio(bio, rq)
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
- struct bio *bio;
- int nsegs, i, cluster;
+ struct req_iterator iter;
+ int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
* for each bio in rq
*/
bvprv = NULL;
- rq_for_each_bio(bio, rq) {
- /*
- * for each segment in bio
- */
- bio_for_each_segment(bvec, bio, i) {
+ rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
nsegs++;
}
bvprv = bvec;
- } /* segments in bio */
- } /* bios in rq */
+ } /* segments in rq */
return nsegs;
}
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
- struct bio *bio;
struct bio_vec *bv;
- int size, i;
+ int size;
+ struct req_iterator iter;
char *base;
base = bio_data(current_req->bio);
size = 0;
- rq_for_each_bio(bio, current_req) {
- bio_for_each_segment(bv, bio, i) {
- if (page_address(bv->bv_page) + bv->bv_offset !=
- base + size)
- break;
+ rq_for_each_segment(bv, current_req, iter) {
+ if (page_address(bv->bv_page) + bv->bv_offset != base + size)
+ break;
- size += bv->bv_len;
- }
+ size += bv->bv_len;
}
return size >> 9;
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec *bv;
- struct bio *bio;
char *buffer, *dma_buffer;
- int size, i;
+ int size;
+ struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
size = current_req->current_nr_sectors << 9;
- rq_for_each_bio(bio, current_req) {
- bio_for_each_segment(bv, bio, i) {
- if (!remaining)
- break;
+ rq_for_each_segment(bv, current_req, iter) {
+ if (!remaining)
+ break;
- size = bv->bv_len;
- SUPBOUND(size, remaining);
+ size = bv->bv_len;
+ SUPBOUND(size, remaining);
- buffer = page_address(bv->bv_page) + bv->bv_offset;
+ buffer = page_address(bv->bv_page) + bv->bv_offset;
#ifdef FLOPPY_SANITY_CHECK
- if (dma_buffer + size >
- floppy_track_buffer + (max_buffer_sectors << 10) ||
- dma_buffer < floppy_track_buffer) {
- DPRINT("buffer overrun in copy buffer %d\n",
- (int)((floppy_track_buffer -
- dma_buffer) >> 9));
- printk("fsector_t=%d buffer_min=%d\n",
- fsector_t, buffer_min);
- printk("current_count_sectors=%ld\n",
- current_count_sectors);
- if (CT(COMMAND) == FD_READ)
- printk("read\n");
- if (CT(COMMAND) == FD_WRITE)
- printk("write\n");
- break;
- }
- if (((unsigned long)buffer) % 512)
- DPRINT("%p buffer not aligned\n", buffer);
-#endif
+ if (dma_buffer + size >
+ floppy_track_buffer + (max_buffer_sectors << 10) ||
+ dma_buffer < floppy_track_buffer) {
+ DPRINT("buffer overrun in copy buffer %d\n",
+ (int)((floppy_track_buffer -
+ dma_buffer) >> 9));
+ printk("fsector_t=%d buffer_min=%d\n",
+ fsector_t, buffer_min);
+ printk("current_count_sectors=%ld\n",
+ current_count_sectors);
if (CT(COMMAND) == FD_READ)
- memcpy(buffer, dma_buffer, size);
- else
- memcpy(dma_buffer, buffer, size);
-
- remaining -= size;
- dma_buffer += size;
+ printk("read\n");
+ if (CT(COMMAND) == FD_WRITE)
+ printk("write\n");
+ break;
}
+ if (((unsigned long)buffer) % 512)
+ DPRINT("%p buffer not aligned\n", buffer);
+#endif
+ if (CT(COMMAND) == FD_READ)
+ memcpy(buffer, dma_buffer, size);
+ else
+ memcpy(dma_buffer, buffer, size);
+
+ remaining -= size;
+ dma_buffer += size;
}
#ifdef FLOPPY_SANITY_CHECK
if (remaining) {
* return the total length. */
static unsigned int req_to_dma(struct request *req, struct lguest_dma *dma)
{
- unsigned int i = 0, idx, len = 0;
- struct bio *bio;
+ unsigned int i = 0, len = 0;
+ struct req_iterator iter;
+ struct bio_vec *bvec;
- rq_for_each_bio(bio, req) {
- struct bio_vec *bvec;
- bio_for_each_segment(bvec, bio, idx) {
+ rq_for_each_segment(bvec, req, iter) {
/* We told the block layer not to give us too many. */
BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
/* If we had a zero-length segment, it would look like
dma->len[i] = bvec->bv_len;
len += bvec->bv_len;
i++;
- }
}
/* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)
static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
- int result, i, flags;
+ int result, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
struct socket *sock = lo->sock;
}
if (nbd_cmd(req) == NBD_CMD_WRITE) {
- struct bio *bio;
+ struct req_iterator iter;
+ struct bio_vec *bvec;
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not...
*/
- rq_for_each_bio(bio, req) {
- struct bio_vec *bvec;
- bio_for_each_segment(bvec, bio, i) {
+ rq_for_each_segment(bvec, req, iter) {
flags = 0;
- if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
+ if (!rq_iter_last(req, iter))
flags = MSG_MORE;
dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
lo->disk->disk_name, req,
result);
goto error_out;
}
- }
}
}
return 0;
dprintk(DBG_RX, "%s: request %p: got reply\n",
lo->disk->disk_name, req);
if (nbd_cmd(req) == NBD_CMD_READ) {
- int i;
- struct bio *bio;
- rq_for_each_bio(bio, req) {
- struct bio_vec *bvec;
- bio_for_each_segment(bvec, bio, i) {
+ struct req_iterator iter;
+ struct bio_vec *bvec;
+
+ rq_for_each_segment(bvec, req, iter) {
result = sock_recv_bvec(sock, bvec);
if (result <= 0) {
printk(KERN_ERR "%s: Receive data failed (result %d)\n",
}
dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
lo->disk->disk_name, req, bvec->bv_len);
- }
}
}
return req;
struct request *req, int gather)
{
unsigned int offset = 0;
- struct bio *bio;
- sector_t sector;
+ struct req_iterator iter;
struct bio_vec *bvec;
- unsigned int i = 0, j;
+ unsigned int i = 0;
size_t size;
void *buf;
- rq_for_each_bio(bio, req) {
- sector = bio->bi_sector;
+ rq_for_each_segment(bvec, req, iter) {
+ unsigned long flags;
dev_dbg(&dev->sbd.core,
"%s:%u: bio %u: %u segs %u sectors from %lu\n",
- __func__, __LINE__, i, bio_segments(bio),
- bio_sectors(bio), sector);
- bio_for_each_segment(bvec, bio, j) {
+ __func__, __LINE__, i, bio_segments(iter.bio),
+ bio_sectors(iter.bio),
+ (unsigned long)iter.bio->bi_sector);
+
size = bvec->bv_len;
- buf = __bio_kmap_atomic(bio, j, KM_IRQ0);
+ buf = bvec_kmap_irq(bvec, &flags);
if (gather)
memcpy(dev->bounce_buf+offset, buf, size);
else
memcpy(buf, dev->bounce_buf+offset, size);
offset += size;
- flush_kernel_dcache_page(bio_iovec_idx(bio, j)->bv_page);
- __bio_kunmap_atomic(bio, KM_IRQ0);
- }
+ flush_kernel_dcache_page(bvec->bv_page);
+ bvec_kunmap_irq(bvec, &flags);
+
i++;
}
}
#ifdef DEBUG
unsigned int n = 0;
- struct bio *bio;
+ struct bio_vec *bv;
+ struct req_iterator iter;
- rq_for_each_bio(bio, req)
+ rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
- "%s:%u: %s req has %u bios for %lu sectors %lu hard sectors\n",
+ "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors);
#endif
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bvec;
- int idx;
unsigned long id;
unsigned int fsect, lsect;
int ref;
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = 0;
- rq_for_each_bio (bio, req) {
- bio_for_each_segment (bvec, bio, idx) {
+ rq_for_each_segment(bvec, req, iter) {
BUG_ON(ring_req->nr_segments
== BLKIF_MAX_SEGMENTS_PER_REQUEST);
buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
.last_sect = lsect };
ring_req->nr_segments++;
- }
}
info->ring.req_prod_pvt++;
{
struct request *rq = pc->rq;
struct bio_vec *bvec;
- struct bio *bio;
+ struct req_iterator iter;
unsigned long flags;
char *data;
- int count, i, done = 0;
+ int count, done = 0;
- rq_for_each_bio(bio, rq) {
- bio_for_each_segment(bvec, bio, i) {
+ rq_for_each_segment(bvec, rq, iter) {
if (!bcount)
break;
bcount -= count;
pc->b_count += count;
done += count;
- }
}
idefloppy_do_end_request(drive, 1, done >> 9);
static void idefloppy_output_buffers (ide_drive_t *drive, idefloppy_pc_t *pc, unsigned int bcount)
{
struct request *rq = pc->rq;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bvec;
unsigned long flags;
- int count, i, done = 0;
+ int count, done = 0;
char *data;
- rq_for_each_bio(bio, rq) {
- bio_for_each_segment(bvec, bio, i) {
+ rq_for_each_segment(bvec, rq, iter) {
if (!bcount)
break;
bcount -= count;
pc->b_count += count;
done += count;
- }
}
idefloppy_do_end_request(drive, 1, done >> 9);
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int count, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
- int i;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv->bv_len >> (device->s2b_shift + 9);
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dst += blksize;
recid++;
}
- }
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
unsigned int blksize, blk_per_trk, off;
sector_t first_trk, last_trk;
unsigned int first_offs, last_offs;
unsigned char cmd, rcmd;
- int i;
private = (struct dasd_eckd_private *) device->private;
if (rq_data_dir(req) == READ)
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
bv->bv_len))
cidaw += bv->bv_len >> (device->s2b_shift + 9);
#endif
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
last_rec - recid + 1, cmd, device, blksize);
}
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
{
struct dasd_eckd_private *private;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
- int i, status;
+ int status;
if (!dasd_page_cache)
goto out;
ccw++;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
- int i;
private = (struct dasd_fba_private *) device->private;
if (rq_data_dir(req) == READ) {
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
if (bv->bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
bv->bv_len))
cidaw += bv->bv_len / blksize;
#endif
- }
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
{
struct dasd_fba_private *private;
struct ccw1 *ccw;
- struct bio *bio;
+ struct req_iterator iter;
struct bio_vec *bv;
char *dst, *cda;
unsigned int blksize, off;
- int i, status;
+ int status;
if (!dasd_page_cache)
goto out;
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
- rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len; off += blksize) {
/* Skip locate record. */
{
struct tape_request *request;
struct ccw1 *ccw;
- int count = 0, i;
+ int count = 0;
unsigned off;
char *dst;
struct bio_vec *bv;
- struct bio *bio;
+ struct req_iterator iter;
struct tape_34xx_block_id * start_block;
DBF_EVENT(6, "xBREDid:");
/* Count the number of blocks for the request. */
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
- }
- }
+ rq_for_each_segment(bv, req, iter)
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
/* Allocate the ccw request. */
request = tape_alloc_request(3+count+1, 8);
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = kmap(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len;
off += TAPEBLOCK_HSEC_SIZE) {
ccw++;
dst += TAPEBLOCK_HSEC_SIZE;
}
- }
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
{
struct tape_request *request;
struct ccw1 *ccw;
- int count = 0, start_block, i;
+ int count = 0, start_block;
unsigned off;
char *dst;
struct bio_vec *bv;
- struct bio *bio;
+ struct req_iterator iter;
DBF_EVENT(6, "xBREDid:");
start_block = req->sector >> TAPEBLOCK_HSEC_S2B;
DBF_EVENT(6, "start_block = %i\n", start_block);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
- count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
- }
- }
+ rq_for_each_segment(bv, req, iter)
+ count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
+
request = tape_alloc_request(2 + count + 1, 4);
if (IS_ERR(request))
return request;
*/
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bv, bio, i) {
+ rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
for (off = 0; off < bv->bv_len;
off += TAPEBLOCK_HSEC_SIZE) {
}
if (off > bv->bv_len)
BUG();
- }
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");
}
#endif /* CONFIG_MMU */
-#define rq_for_each_bio(_bio, rq) \
+struct req_iterator {
+ int i;
+ struct bio *bio;
+};
+
+/* This should not be used directly - use rq_for_each_segment */
+#define __rq_for_each_bio(_bio, rq) \
if ((rq->bio)) \
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.i)
+
+#define rq_iter_last(rq, _iter) \
+ (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);