Commit 79eb63e9 authored by Boaz Harrosh's avatar Boaz Harrosh Committed by Jens Axboe

block: Add blk_make_request(), takes bio, returns a request

New block API:
given a struct bio allocates a new request. This is the parallel of
generic_make_request for BLOCK_PC commands users.

The passed bio may be a chained-bio. The bio is bounced if needed
inside the call to this member.

This is in the effort of un-exporting blk_rq_append_bio().
Signed-off-by: default avatarBoaz Harrosh <bharrosh@panasas.com>
CC: Jeff Garzik <jeff@garzik.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent bc38bf10
...@@ -890,6 +890,51 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) ...@@ -890,6 +890,51 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
} }
EXPORT_SYMBOL(blk_get_request); EXPORT_SYMBOL(blk_get_request);
/**
* blk_make_request - given a bio, allocate a corresponding struct request.
*
* @bio: The bio describing the memory mappings that will be submitted for IO.
* It may be a chained-bio properly constructed by block/bio layer.
*
* blk_make_request is the parallel of generic_make_request for BLOCK_PC
* type commands. Where the struct request needs to be farther initialized by
* the caller. It is passed a &struct bio, which describes the memory info of
* the I/O transfer.
*
* The caller of blk_make_request must make sure that bi_io_vec
* are set to describe the memory buffers. That bio_data_dir() will return
* the needed direction of the request. (And all bio's in the passed bio-chain
* are properly set accordingly)
*
* If called under none-sleepable conditions, mapped bio buffers must not
* need bouncing, by calling the appropriate masked or flagged allocator,
* suitable for the target device. Otherwise the call to blk_queue_bounce will
* BUG.
*/
struct request *blk_make_request(struct request_queue *q, struct bio *bio,
gfp_t gfp_mask)
{
struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
if (unlikely(!rq))
return ERR_PTR(-ENOMEM);
for_each_bio(bio) {
struct bio *bounce_bio = bio;
int ret;
blk_queue_bounce(q, &bounce_bio);
ret = blk_rq_append_bio(q, rq, bounce_bio);
if (unlikely(ret)) {
blk_put_request(rq);
return ERR_PTR(ret);
}
}
return rq;
}
EXPORT_SYMBOL(blk_make_request);
/** /**
* blk_requeue_request - put a request back on queue * blk_requeue_request - put a request back on queue
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
......
...@@ -740,6 +740,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); ...@@ -740,6 +740,8 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *); extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
extern struct request *blk_make_request(struct request_queue *, struct bio *,
gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment