Commit 710027a4 authored by Randy Dunlap's avatar Randy Dunlap Committed by Jens Axboe

Add some block/ source files to the kernel-api docbook. Fix kernel-doc...

Add some block/ source files to the kernel-api docbook. Fix kernel-doc notation in them as needed. Fix changed function parameter names. Fix typos/spellos. In comments, change REQ_SPECIAL to REQ_TYPE_SPECIAL and REQ_BLOCK_PC to REQ_TYPE_BLOCK_PC.
Signed-off-by: default avatarRandy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 5b99c2ff
......@@ -364,6 +364,10 @@ X!Edrivers/pnp/system.c
!Eblock/blk-barrier.c
!Eblock/blk-tag.c
!Iblock/blk-tag.c
!Eblock/blk-integrity.c
!Iblock/blktrace.c
!Iblock/genhd.c
!Eblock/genhd.c
</chapter>
<chapter id="chrdev">
......
This diff is collapsed.
......@@ -16,7 +16,7 @@
/**
* blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete
* @error: end io status of the request
* @error: end I/O status of the request
*/
static void blk_end_sync_rq(struct request *rq, int error)
{
......@@ -41,7 +41,7 @@ static void blk_end_sync_rq(struct request *rq, int error)
* @done: I/O completion handler
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution. Don't wait for completion.
*/
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
......@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
* @at_head: insert request at head or tail of queue
*
* Description:
* Insert a fully prepared request at the back of the io scheduler queue
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution and wait for completion.
*/
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
......
......@@ -109,8 +109,8 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
/**
* blk_integrity_compare - Compare integrity profile of two block devices
* @b1: Device to compare
* @b2: Device to compare
* @bd1: Device to compare
* @bd2: Device to compare
*
* Description: Meta-devices like DM and MD need to verify that all
* sub-devices use the same integrity format before advertising to
......
......@@ -85,17 +85,17 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
}
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request structure to fill
* @ubuf: the user buffer
* @len: length of user data
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
......@@ -154,7 +154,7 @@ unmap_rq:
EXPORT_SYMBOL(blk_rq_map_user);
/**
* blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to map data to
* @iov: pointer to the iovec
......@@ -162,10 +162,10 @@ EXPORT_SYMBOL(blk_rq_map_user);
* @len: I/O byte count
*
* Description:
* Data will be mapped directly for zero copy io, if possible. Otherwise
* Data will be mapped directly for zero copy I/O, if possible. Otherwise
* a kernel bounce buffer is used.
*
* A matching blk_rq_unmap_user() must be issued at the end of io, while
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while
* still in process context.
*
* Note: The mapped bio may need to be bounced through blk_queue_bounce()
......@@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
* Description:
* Unmap a rq previously mapped by blk_rq_map_user(). The caller must
* supply the original rq->bio from the blk_rq_map_user() return, since
* the io completion may have changed rq->bio.
* the I/O completion may have changed rq->bio.
*/
int blk_rq_unmap_user(struct bio *bio)
{
......@@ -250,7 +250,7 @@ int blk_rq_unmap_user(struct bio *bio)
EXPORT_SYMBOL(blk_rq_unmap_user);
/**
* blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted
* @rq: request to fill
* @kbuf: the kernel buffer
......
......@@ -144,7 +144,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
* Different hardware can have different requirements as to what pages
* it can do I/O directly to. A low level driver can call
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
* buffers for doing I/O to pages residing above @page.
* buffers for doing I/O to pages residing above @dma_addr.
**/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
......@@ -229,7 +229,7 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments);
* Description:
* Enables a low level driver to set an upper limit on the number of
* hw data segments in a request. This would be the largest number of
* address/length pairs the host adapter can actually give as once
* address/length pairs the host adapter can actually give at once
* to the device.
**/
void blk_queue_max_hw_segments(struct request_queue *q,
......@@ -410,7 +410,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
* @mask: alignment mask
*
* description:
* set required memory and length aligment for direct dma transactions.
* set required memory and length alignment for direct dma transactions.
* this is used when buiding direct io requests for the queue.
*
**/
......@@ -426,7 +426,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment);
* @mask: alignment mask
*
* description:
* update required memory and length aligment for direct dma transactions.
* update required memory and length alignment for direct dma transactions.
* If the requested alignment is larger than the current alignment, then
* the current queue alignment is updated to the new value, otherwise it
* is left alone. The design of this is to allow multiple objects
......
......@@ -29,7 +29,7 @@ EXPORT_SYMBOL(blk_queue_find_tag);
* __blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* Tries to free the specified @bqt@. Returns true if it was
* Tries to free the specified @bqt. Returns true if it was
* actually freed and false if there are still references using it
*/
static int __blk_free_tags(struct blk_queue_tag *bqt)
......@@ -78,7 +78,7 @@ void __blk_queue_free_tags(struct request_queue *q)
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* For externally managed @bqt@ frees the map. Callers of this
* For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
......@@ -94,7 +94,7 @@ EXPORT_SYMBOL(blk_free_tags);
* @q: the request queue for the device
*
* Notes:
* This is used to disabled tagged queuing to a device, yet leave
* This is used to disable tagged queuing to a device, yet leave
* queue in function.
**/
void blk_queue_free_tags(struct request_queue *q)
......@@ -271,7 +271,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
* @rq: the request that has completed
*
* Description:
* Typically called when end_that_request_first() returns 0, meaning
* Typically called when end_that_request_first() returns %0, meaning
* all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list.
......
......@@ -211,10 +211,11 @@ void unlink_gendisk(struct gendisk *disk)
/**
* get_gendisk - get partitioning information for a given device
* @dev: device to get partitioning information for
* @devt: device to get partitioning information for
* @part: returned partition index
*
* This function gets the structure containing partitioning
* information for the given device @dev.
* information for the given device @devt.
*/
struct gendisk *get_gendisk(dev_t devt, int *part)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment