}
EXPORT_SYMBOL(blk_get_request);
-/**
- * blk_make_request - given a bio, allocate a corresponding struct request.
- * @q: target request queue
- * @bio: The bio describing the memory mappings that will be submitted for IO.
- * It may be a chained-bio properly constructed by block/bio layer.
- * @gfp_mask: gfp flags to be used for memory allocation
- *
- * blk_make_request is the parallel of generic_make_request for BLOCK_PC
- * type commands. Where the struct request needs to be farther initialized by
- * the caller. It is passed a &struct bio, which describes the memory info of
- * the I/O transfer.
- *
- * The caller of blk_make_request must make sure that bi_io_vec
- * are set to describe the memory buffers. That bio_data_dir() will return
- * the needed direction of the request. (And all bio's in the passed bio-chain
- * are properly set accordingly)
- *
- * If called under none-sleepable conditions, mapped bio buffers must not
- * need bouncing, by calling the appropriate masked or flagged allocator,
- * suitable for the target device. Otherwise the call to blk_queue_bounce will
- * BUG.
- *
- * WARNING: When allocating/cloning a bio-chain, careful consideration should be
- * given to how you allocate bios. In particular, you cannot use
- * __GFP_DIRECT_RECLAIM for anything but the first bio in the chain. Otherwise
- * you risk waiting for IO completion of a bio that hasn't been submitted yet,
- * thus resulting in a deadlock. Alternatively bios should be allocated using
- * bio_kmalloc() instead of bio_alloc(), as that avoids the mempool deadlock.
- * If possible a big IO should be split into smaller parts when allocation
- * fails. Partial allocation should not be an error, or you risk a live-lock.
- */
-struct request *blk_make_request(struct request_queue *q, struct bio *bio,
- gfp_t gfp_mask)
-{
- struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
-
- if (IS_ERR(rq))
- return rq;
-
- blk_rq_set_block_pc(rq);
-
- for_each_bio(bio) {
- struct bio *bounce_bio = bio;
- int ret;
-
- blk_queue_bounce(q, &bounce_bio);
- ret = blk_rq_append_bio(rq, bounce_bio);
- if (unlikely(ret)) {
- blk_put_request(rq);
- return ERR_PTR(ret);
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(blk_make_request);
-
/**
* blk_rq_set_block_pc - initialize a request to type BLOCK_PC
* @rq: request to be initialized
static struct request *_make_request(struct request_queue *q, bool has_write,
struct _osd_io_info *oii, gfp_t flags)
{
- if (oii->bio)
- return blk_make_request(q, oii->bio, flags);
- else {
- struct request *req;
-
- req = blk_get_request(q, has_write ? WRITE : READ, flags);
- if (IS_ERR(req))
- return req;
+ struct request *req;
+ struct bio *bio = oii->bio;
+ int ret;
- blk_rq_set_block_pc(req);
+ req = blk_get_request(q, has_write ? WRITE : READ, flags);
+ if (IS_ERR(req))
return req;
+ blk_rq_set_block_pc(req);
+
+ for_each_bio(bio) {
+ struct bio *bounce_bio = bio;
+
+ blk_queue_bounce(req->q, &bounce_bio);
+ ret = blk_rq_append_bio(req, bounce_bio);
+ if (ret)
+ return ERR_PTR(ret);
}
+
+ return req;
}
static int _init_blk_request(struct osd_request *or,
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
-extern struct request *blk_make_request(struct request_queue *, struct bio *,
- gfp_t);
extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,