Commit a05c0205 authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe

block: Fix bounce limit setting in DM

blk_queue_bounce_limit() is more than a wrapper about the request queue
limits.bounce_pfn variable.  Introduce blk_queue_bounce_pfn() which can
be called by stacking drivers that wish to set the bounce limit
explicitly.
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent dbdc9dd3
...@@ -193,6 +193,23 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) ...@@ -193,6 +193,23 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
} }
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
/**
* blk_queue_bounce_pfn - set the bounce buffer limit for queue
* @q: the request queue for the device
* @pfn: max address
*
* Description:
* This function is similar to blk_queue_bounce_limit except it
* neither changes allocation flags, nor does it set up the ISA DMA
* pool. This function should only be used by stacking drivers.
* Hardware drivers should use blk_queue_bounce_limit instead.
*/
void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn)
{
q->limits.bounce_pfn = pfn;
}
EXPORT_SYMBOL(blk_queue_bounce_pfn);
/** /**
* blk_queue_max_sectors - set max sectors for a request for this queue * blk_queue_max_sectors - set max sectors for a request for this queue
* @q: the request queue for the device * @q: the request queue for the device
......
...@@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) ...@@ -920,7 +920,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
blk_queue_max_segment_size(q, t->limits.max_segment_size); blk_queue_max_segment_size(q, t->limits.max_segment_size);
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
blk_queue_bounce_limit(q, t->limits.bounce_pfn); blk_queue_bounce_pfn(q, t->limits.bounce_pfn);
if (t->limits.no_cluster) if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
......
...@@ -910,6 +910,7 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); ...@@ -910,6 +910,7 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(struct request_queue *); extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_bounce_pfn(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment