Commit 42dad764 authored by Jerome Marchand's avatar Jerome Marchand Committed by Jens Axboe

block: simplify I/O stat accounting

This simplifies I/O stat accounting switching code and separates it
completely from I/O scheduler switch code.

Requests are accounted according to the state of their request queue
at the time of the request allocation. There is no need anymore to
flush the request queue when switching I/O accounting state.
Signed-off-by: default avatarJerome Marchand <jmarchan@redhat.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 097102c2
...@@ -643,7 +643,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) ...@@ -643,7 +643,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
} }
static struct request * static struct request *
blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
{ {
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
...@@ -652,7 +652,7 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) ...@@ -652,7 +652,7 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
blk_rq_init(q, rq); blk_rq_init(q, rq);
rq->cmd_flags = rw | REQ_ALLOCED; rq->cmd_flags = flags | REQ_ALLOCED;
if (priv) { if (priv) {
if (unlikely(elv_set_request(q, rq, gfp_mask))) { if (unlikely(elv_set_request(q, rq, gfp_mask))) {
...@@ -792,6 +792,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -792,6 +792,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
if (priv) if (priv)
rl->elvpriv++; rl->elvpriv++;
if (blk_queue_io_stat(q))
rw_flags |= REQ_IO_STAT;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
......
...@@ -402,7 +402,10 @@ static int attempt_merge(struct request_queue *q, struct request *req, ...@@ -402,7 +402,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,
elv_merge_requests(q, req, next); elv_merge_requests(q, req, next);
blk_account_io_merge(req); /*
* 'next' is going away, so update stats accordingly
*/
blk_account_io_merge(next);
req->ioprio = ioprio_best(req->ioprio, next->ioprio); req->ioprio = ioprio_best(req->ioprio, next->ioprio);
if (blk_rq_cpu_valid(next)) if (blk_rq_cpu_valid(next))
......
...@@ -209,14 +209,10 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page, ...@@ -209,14 +209,10 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
ssize_t ret = queue_var_store(&stats, page, count); ssize_t ret = queue_var_store(&stats, page, count);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
elv_quiesce_start(q);
if (stats) if (stats)
queue_flag_set(QUEUE_FLAG_IO_STAT, q); queue_flag_set(QUEUE_FLAG_IO_STAT, q);
else else
queue_flag_clear(QUEUE_FLAG_IO_STAT, q); queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
elv_quiesce_end(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
return ret; return ret;
......
...@@ -114,12 +114,7 @@ static inline int blk_cpu_to_group(int cpu) ...@@ -114,12 +114,7 @@ static inline int blk_cpu_to_group(int cpu)
static inline int blk_do_io_stat(struct request *rq) static inline int blk_do_io_stat(struct request *rq)
{ {
struct gendisk *disk = rq->rq_disk; return rq->rq_disk && blk_rq_io_stat(rq);
if (!disk || !disk->queue)
return 0;
return blk_queue_io_stat(disk->queue) && (rq->cmd_flags & REQ_ELVPRIV);
} }
#endif #endif
...@@ -118,6 +118,7 @@ enum rq_flag_bits { ...@@ -118,6 +118,7 @@ enum rq_flag_bits {
__REQ_COPY_USER, /* contains copies of user pages */ __REQ_COPY_USER, /* contains copies of user pages */
__REQ_INTEGRITY, /* integrity metadata has been remapped */ __REQ_INTEGRITY, /* integrity metadata has been remapped */
__REQ_NOIDLE, /* Don't anticipate more IO after this one */ __REQ_NOIDLE, /* Don't anticipate more IO after this one */
__REQ_IO_STAT, /* account I/O stat */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
...@@ -145,6 +146,7 @@ enum rq_flag_bits { ...@@ -145,6 +146,7 @@ enum rq_flag_bits {
#define REQ_COPY_USER (1 << __REQ_COPY_USER) #define REQ_COPY_USER (1 << __REQ_COPY_USER)
#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) #define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
#define REQ_NOIDLE (1 << __REQ_NOIDLE) #define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define BLK_MAX_CDB 16 #define BLK_MAX_CDB 16
...@@ -598,6 +600,7 @@ enum { ...@@ -598,6 +600,7 @@ enum {
blk_failfast_transport(rq) || \ blk_failfast_transport(rq) || \
blk_failfast_driver(rq)) blk_failfast_driver(rq))
#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
#define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT)
#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment