Remove one level of nesting where appropriate.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
*/
bvprv = NULL;
rq_for_each_segment(bvec, rq, iter) {
- int nbytes = bvec->bv_len;
+ int nbytes = bvec->bv_len;
- if (bvprv && cluster) {
- if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
- goto new_segment;
+ if (bvprv && cluster) {
+ if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+ goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
- goto new_segment;
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ goto new_segment;
- sg[nsegs - 1].length += nbytes;
- } else {
+ sg[nsegs - 1].length += nbytes;
+ } else {
new_segment:
- memset(&sg[nsegs],0,sizeof(struct scatterlist));
- sg[nsegs].page = bvec->bv_page;
- sg[nsegs].length = nbytes;
- sg[nsegs].offset = bvec->bv_offset;
+ memset(&sg[nsegs],0,sizeof(struct scatterlist));
+ sg[nsegs].page = bvec->bv_page;
+ sg[nsegs].length = nbytes;
+ sg[nsegs].offset = bvec->bv_offset;
- nsegs++;
- }
- bvprv = bvec;
+ nsegs++;
+ }
+ bvprv = bvec;
} /* segments in rq */
return nsegs;
struct bio_vec *bvec;
rq_for_each_segment(bvec, req, iter) {
- /* We told the block layer not to give us too many. */
- BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
- /* If we had a zero-length segment, it would look like
- * the end of the data referred to by the "struct
- * lguest_dma", so make sure that doesn't happen. */
- BUG_ON(!bvec->bv_len);
- /* Convert page & offset to a physical address */
- dma->addr[i] = page_to_phys(bvec->bv_page)
- + bvec->bv_offset;
- dma->len[i] = bvec->bv_len;
- len += bvec->bv_len;
- i++;
+ /* We told the block layer not to give us too many. */
+ BUG_ON(i == LGUEST_MAX_DMA_SECTIONS);
+ /* If we had a zero-length segment, it would look like
+ * the end of the data referred to by the "struct
+ * lguest_dma", so make sure that doesn't happen. */
+ BUG_ON(!bvec->bv_len);
+ /* Convert page & offset to a physical address */
+ dma->addr[i] = page_to_phys(bvec->bv_page)
+ + bvec->bv_offset;
+ dma->len[i] = bvec->bv_len;
+ len += bvec->bv_len;
+ i++;
}
/* If the array isn't full, we mark the end with a 0 length */
if (i < LGUEST_MAX_DMA_SECTIONS)
* whether to set MSG_MORE or not...
*/
rq_for_each_segment(bvec, req, iter) {
- flags = 0;
- if (!rq_iter_last(req, iter))
- flags = MSG_MORE;
- dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
- lo->disk->disk_name, req,
- bvec->bv_len);
- result = sock_send_bvec(sock, bvec, flags);
- if (result <= 0) {
- printk(KERN_ERR "%s: Send data failed (result %d)\n",
- lo->disk->disk_name,
- result);
- goto error_out;
- }
+ flags = 0;
+ if (!rq_iter_last(req, iter))
+ flags = MSG_MORE;
+ dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
+ lo->disk->disk_name, req, bvec->bv_len);
+ result = sock_send_bvec(sock, bvec, flags);
+ if (result <= 0) {
+ printk(KERN_ERR "%s: Send data failed (result %d)\n",
+ lo->disk->disk_name, result);
+ goto error_out;
+ }
}
}
return 0;
struct bio_vec *bvec;
rq_for_each_segment(bvec, req, iter) {
- result = sock_recv_bvec(sock, bvec);
- if (result <= 0) {
- printk(KERN_ERR "%s: Receive data failed (result %d)\n",
- lo->disk->disk_name,
- result);
- req->errors++;
- return req;
- }
- dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
- lo->disk->disk_name, req, bvec->bv_len);
+ result = sock_recv_bvec(sock, bvec);
+ if (result <= 0) {
+ printk(KERN_ERR "%s: Receive data failed (result %d)\n",
+ lo->disk->disk_name, result);
+ req->errors++;
+ return req;
+ }
+ dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
+ lo->disk->disk_name, req, bvec->bv_len);
}
}
return req;
bio_sectors(iter.bio),
(unsigned long)iter.bio->bi_sector);
- size = bvec->bv_len;
- buf = bvec_kmap_irq(bvec, &flags);
- if (gather)
- memcpy(dev->bounce_buf+offset, buf, size);
- else
- memcpy(buf, dev->bounce_buf+offset, size);
- offset += size;
- flush_kernel_dcache_page(bvec->bv_page);
- bvec_kunmap_irq(bvec, &flags);
-
+ size = bvec->bv_len;
+ buf = bvec_kmap_irq(bvec, &flags);
+ if (gather)
+ memcpy(dev->bounce_buf+offset, buf, size);
+ else
+ memcpy(buf, dev->bounce_buf+offset, size);
+ offset += size;
+ flush_kernel_dcache_page(bvec->bv_page);
+ bvec_kunmap_irq(bvec, &flags);
i++;
}
}
ring_req->nr_segments = 0;
rq_for_each_segment(bvec, req, iter) {
- BUG_ON(ring_req->nr_segments
- == BLKIF_MAX_SEGMENTS_PER_REQUEST);
- buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
- fsect = bvec->bv_offset >> 9;
- lsect = fsect + (bvec->bv_len >> 9) - 1;
- /* install a grant reference. */
- ref = gnttab_claim_grant_reference(&gref_head);
- BUG_ON(ref == -ENOSPC);
-
- gnttab_grant_foreign_access_ref(
+ BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ /* install a grant reference. */
+ ref = gnttab_claim_grant_reference(&gref_head);
+ BUG_ON(ref == -ENOSPC);
+
+ gnttab_grant_foreign_access_ref(
ref,
info->xbdev->otherend_id,
buffer_mfn,
rq_data_dir(req) );
- info->shadow[id].frame[ring_req->nr_segments] =
+ info->shadow[id].frame[ring_req->nr_segments] =
mfn_to_pfn(buffer_mfn);
- ring_req->seg[ring_req->nr_segments] =
+ ring_req->seg[ring_req->nr_segments] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
- ring_req->nr_segments++;
+ ring_req->nr_segments++;
}
info->ring.req_prod_pvt++;
int count, done = 0;
rq_for_each_segment(bvec, rq, iter) {
- if (!bcount)
- break;
+ if (!bcount)
+ break;
- count = min(bvec->bv_len, bcount);
+ count = min(bvec->bv_len, bcount);
- data = bvec_kmap_irq(bvec, &flags);
- drive->hwif->atapi_input_bytes(drive, data, count);
- bvec_kunmap_irq(data, &flags);
+ data = bvec_kmap_irq(bvec, &flags);
+ drive->hwif->atapi_input_bytes(drive, data, count);
+ bvec_kunmap_irq(data, &flags);
- bcount -= count;
- pc->b_count += count;
- done += count;
+ bcount -= count;
+ pc->b_count += count;
+ done += count;
}
idefloppy_do_end_request(drive, 1, done >> 9);
char *data;
rq_for_each_segment(bvec, rq, iter) {
- if (!bcount)
- break;
+ if (!bcount)
+ break;
- count = min(bvec->bv_len, bcount);
+ count = min(bvec->bv_len, bcount);
- data = bvec_kmap_irq(bvec, &flags);
- drive->hwif->atapi_output_bytes(drive, data, count);
- bvec_kunmap_irq(data, &flags);
+ data = bvec_kmap_irq(bvec, &flags);
+ drive->hwif->atapi_output_bytes(drive, data, count);
+ bvec_kunmap_irq(data, &flags);
- bcount -= count;
- pc->b_count += count;
- done += count;
+ bcount -= count;
+ pc->b_count += count;
+ done += count;
}
idefloppy_do_end_request(drive, 1, done >> 9);
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
- /* Fba can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len; off += blksize) {
- memset(dbio, 0, sizeof (struct dasd_diag_bio));
- dbio->type = rw_cmd;
- dbio->block_number = recid + 1;
- dbio->buffer = dst;
- dbio++;
- dst += blksize;
- recid++;
- }
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += blksize) {
+ memset(dbio, 0, sizeof (struct dasd_diag_bio));
+ dbio->type = rw_cmd;
+ dbio->block_number = recid + 1;
+ dbio->buffer = dst;
+ dbio++;
+ dst += blksize;
+ recid++;
+ }
}
cqr->retries = DIAG_MAX_RETRIES;
cqr->buildclk = get_clock();
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
- /* Eckd can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ if (bv->bv_len & (blksize - 1))
+ /* Eckd can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page),
- bv->bv_len))
- cidaw += bv->bv_len >> (device->s2b_shift + 9);
+ if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ cidaw += bv->bv_len >> (device->s2b_shift + 9);
#endif
}
/* Paranoia. */
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
- if (bv->bv_len & (blksize - 1))
- /* Fba can only do full blocks. */
- return ERR_PTR(-EINVAL);
- count += bv->bv_len >> (device->s2b_shift + 9);
+ if (bv->bv_len & (blksize - 1))
+ /* Fba can only do full blocks. */
+ return ERR_PTR(-EINVAL);
+ count += bv->bv_len >> (device->s2b_shift + 9);
#if defined(CONFIG_64BIT)
- if (idal_is_needed (page_address(bv->bv_page),
- bv->bv_len))
- cidaw += bv->bv_len / blksize;
+ if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
+ cidaw += bv->bv_len / blksize;
#endif
}
/* Paranoia. */
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
rq_for_each_segment(bv, req, iter) {
- dst = kmap(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len;
- off += TAPEBLOCK_HSEC_SIZE) {
- ccw->flags = CCW_FLAG_CC;
- ccw->cmd_code = READ_FORWARD;
- ccw->count = TAPEBLOCK_HSEC_SIZE;
- set_normalized_cda(ccw, (void*) __pa(dst));
- ccw++;
- dst += TAPEBLOCK_HSEC_SIZE;
- }
+ dst = kmap(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void*) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
+ }
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
rq_for_each_segment(bv, req, iter) {
- dst = page_address(bv->bv_page) + bv->bv_offset;
- for (off = 0; off < bv->bv_len;
- off += TAPEBLOCK_HSEC_SIZE) {
- ccw->flags = CCW_FLAG_CC;
- ccw->cmd_code = READ_FORWARD;
- ccw->count = TAPEBLOCK_HSEC_SIZE;
- set_normalized_cda(ccw, (void *) __pa(dst));
- ccw++;
- dst += TAPEBLOCK_HSEC_SIZE;
- }
- if (off > bv->bv_len)
- BUG();
+ dst = page_address(bv->bv_page) + bv->bv_offset;
+ for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = READ_FORWARD;
+ ccw->count = TAPEBLOCK_HSEC_SIZE;
+ set_normalized_cda(ccw, (void *) __pa(dst));
+ ccw++;
+ dst += TAPEBLOCK_HSEC_SIZE;
+ }
+ if (off > bv->bv_len)
+ BUG();
}
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
DBF_EVENT(6, "xBREDccwg\n");