Commit 86adf8ad authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: add text file detailing queue/ sysfs files
  bio.h: If they MUST be inlined, then use __always_inline
  Fix misleading comment in bio.h
  block: fix inconsistent parenthesisation of QUEUE_FLAG_DEFAULT
  block: fix oops in blk_queue_io_stat()
parents 3fff0179 cbb5901b
Queue sysfs files
=================
This text file will detail the queue files that are located in the sysfs tree
for each block device. Note that stacked devices typically do not export
any settings, since their queue merely functions are a remapping target.
These files are the ones found in the /sys/block/xxx/queue/ directory.
Files denoted with a RO postfix are readonly and the RW postfix means
read-write.
hw_sector_size (RO)
-------------------
This is the hardware sector size of the device, in bytes.
max_hw_sectors_kb (RO)
----------------------
This is the maximum number of kilobytes supported in a single data transfer.
max_sectors_kb (RW)
-------------------
This is the maximum number of kilobytes that the block layer will allow
for a filesystem request. Must be smaller than or equal to the maximum
size allowed by the hardware.
nomerges (RW)
-------------
This enables the user to disable the lookup logic involved with IO merging
requests in the block layer. Merging may still occur through a direct
1-hit cache, since that comes for (almost) free. The IO scheduler will not
waste cycles doing tree/hash lookups for merges if nomerges is 1. Defaults
to 0, enabling all merges.
nr_requests (RW)
----------------
This controls how many requests may be allocated in the block layer for
read or write requests. Note that the total allocated number may be twice
this amount, since it applies only to reads or writes (not the accumulated
sum).
read_ahead_kb (RW)
------------------
Maximum number of kilobytes to read-ahead for filesystems on this block
device.
rq_affinity (RW)
----------------
If this option is enabled, the block layer will migrate request completions
to the CPU that originally submitted the request. For some workloads
this provides a significant reduction in CPU cycles due to caching effects.
scheduler (RW)
--------------
When read, this file will display the current and available IO schedulers
for this block device. The currently active IO scheduler will be enclosed
in [] brackets. Writing an IO scheduler name to this file will switch
control of this block device to that new IO scheduler. Note that writing
an IO scheduler name to this file will attempt to load that IO scheduler
module, if it isn't already present in the system.
Jens Axboe <jens.axboe@oracle.com>, February 2009
...@@ -69,7 +69,7 @@ static void drive_stat_acct(struct request *rq, int new_io) ...@@ -69,7 +69,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
int rw = rq_data_dir(rq); int rw = rq_data_dir(rq);
int cpu; int cpu;
if (!blk_fs_request(rq) || !disk || !blk_queue_io_stat(disk->queue)) if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue))
return; return;
cpu = part_stat_lock(); cpu = part_stat_lock();
...@@ -1667,7 +1667,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) ...@@ -1667,7 +1667,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
{ {
struct gendisk *disk = req->rq_disk; struct gendisk *disk = req->rq_disk;
if (!disk || !blk_queue_io_stat(disk->queue)) if (!disk || !blk_do_io_stat(disk->queue))
return; return;
if (blk_fs_request(req)) { if (blk_fs_request(req)) {
...@@ -1686,7 +1686,7 @@ static void blk_account_io_done(struct request *req) ...@@ -1686,7 +1686,7 @@ static void blk_account_io_done(struct request *req)
{ {
struct gendisk *disk = req->rq_disk; struct gendisk *disk = req->rq_disk;
if (!disk || !blk_queue_io_stat(disk->queue)) if (!disk || !blk_do_io_stat(disk->queue))
return; return;
/* /*
......
...@@ -108,4 +108,12 @@ static inline int blk_cpu_to_group(int cpu) ...@@ -108,4 +108,12 @@ static inline int blk_cpu_to_group(int cpu)
#endif #endif
} }
static inline int blk_do_io_stat(struct request_queue *q)
{
if (q)
return blk_queue_io_stat(q);
return 0;
}
#endif #endif
...@@ -451,12 +451,13 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; ...@@ -451,12 +451,13 @@ extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* /*
* remember to add offset! and never ever reenable interrupts between a * remember never ever reenable interrupts between a bvec_kmap_irq and
* bvec_kmap_irq and bvec_kunmap_irq!! * bvec_kunmap_irq!
* *
* This function MUST be inlined - it plays with the CPU interrupt flags. * This function MUST be inlined - it plays with the CPU interrupt flags.
*/ */
static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) static __always_inline char *bvec_kmap_irq(struct bio_vec *bvec,
unsigned long *flags)
{ {
unsigned long addr; unsigned long addr;
...@@ -472,7 +473,8 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) ...@@ -472,7 +473,8 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
return (char *) addr + bvec->bv_offset; return (char *) addr + bvec->bv_offset;
} }
static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) static __always_inline void bvec_kunmap_irq(char *buffer,
unsigned long *flags)
{ {
unsigned long ptr = (unsigned long) buffer & PAGE_MASK; unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
......
...@@ -455,7 +455,7 @@ struct request_queue ...@@ -455,7 +455,7 @@ struct request_queue
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \ (1 << QUEUE_FLAG_CLUSTER) | \
1 << QUEUE_FLAG_STACKABLE) (1 << QUEUE_FLAG_STACKABLE))
static inline int queue_is_locked(struct request_queue *q) static inline int queue_is_locked(struct request_queue *q)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment