Commit 0d99519e authored by Wu Fengguang's avatar Wu Fengguang Committed by Jens Axboe

writeback: remove unused nonblocking and congestion checks

- no one is calling wb_writeback and write_cache_pages with
  wbc.nonblocking=1 any more
- lumpy pageout will want to do nonblocking writeback without the
  congestion wait

So remove the congestion checks as suggested by Chris.
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Evgeniy Polyakov <zbr@ioremap.net>
Cc: Alex Elder <aelder@sgi.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent b17621fe
...@@ -143,7 +143,6 @@ static int pohmelfs_writepages(struct address_space *mapping, struct writeback_c ...@@ -143,7 +143,6 @@ static int pohmelfs_writepages(struct address_space *mapping, struct writeback_c
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct pohmelfs_inode *pi = POHMELFS_I(inode); struct pohmelfs_inode *pi = POHMELFS_I(inode);
struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb); struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
struct backing_dev_info *bdi = mapping->backing_dev_info;
int err = 0; int err = 0;
int done = 0; int done = 0;
int nr_pages; int nr_pages;
...@@ -152,11 +151,6 @@ static int pohmelfs_writepages(struct address_space *mapping, struct writeback_c ...@@ -152,11 +151,6 @@ static int pohmelfs_writepages(struct address_space *mapping, struct writeback_c
int scanned = 0; int scanned = 0;
int range_whole = 0; int range_whole = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
return 0;
}
if (wbc->range_cyclic) { if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */ index = mapping->writeback_index; /* Start from prev offset */
end = -1; end = -1;
...@@ -248,10 +242,6 @@ retry: ...@@ -248,10 +242,6 @@ retry:
if (wbc->nr_to_write <= 0) if (wbc->nr_to_write <= 0)
done = 1; done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
done = 1;
}
continue; continue;
out_continue: out_continue:
......
...@@ -639,14 +639,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, ...@@ -639,14 +639,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
continue; continue;
} }
if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
wbc->encountered_congestion = 1;
if (!is_blkdev_sb)
break; /* Skip a congested fs */
requeue_io(inode);
continue; /* Skip a congested blockdev */
}
/* /*
* Was this inode dirtied after sync_sb_inodes was called? * Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock. * This keeps sync from extra jobs and livelock.
...@@ -770,7 +762,6 @@ static long wb_writeback(struct bdi_writeback *wb, ...@@ -770,7 +762,6 @@ static long wb_writeback(struct bdi_writeback *wb,
break; break;
wbc.more_io = 0; wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0; wbc.pages_skipped = 0;
writeback_inodes_wb(wb, &wbc); writeback_inodes_wb(wb, &wbc);
......
...@@ -904,16 +904,9 @@ xfs_convert_page( ...@@ -904,16 +904,9 @@ xfs_convert_page(
if (startio) { if (startio) {
if (count) { if (count) {
struct backing_dev_info *bdi;
bdi = inode->i_mapping->backing_dev_info;
wbc->nr_to_write--; wbc->nr_to_write--;
if (bdi_write_congested(bdi)) { if (wbc->nr_to_write <= 0)
wbc->encountered_congestion = 1;
done = 1;
} else if (wbc->nr_to_write <= 0) {
done = 1; done = 1;
}
} }
xfs_start_page_writeback(page, !page_dirty, count); xfs_start_page_writeback(page, !page_dirty, count);
} }
......
...@@ -821,7 +821,6 @@ int write_cache_pages(struct address_space *mapping, ...@@ -821,7 +821,6 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage, struct writeback_control *wbc, writepage_t writepage,
void *data) void *data)
{ {
struct backing_dev_info *bdi = mapping->backing_dev_info;
int ret = 0; int ret = 0;
int done = 0; int done = 0;
struct pagevec pvec; struct pagevec pvec;
...@@ -834,11 +833,6 @@ int write_cache_pages(struct address_space *mapping, ...@@ -834,11 +833,6 @@ int write_cache_pages(struct address_space *mapping,
int range_whole = 0; int range_whole = 0;
long nr_to_write = wbc->nr_to_write; long nr_to_write = wbc->nr_to_write;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
return 0;
}
pagevec_init(&pvec, 0); pagevec_init(&pvec, 0);
if (wbc->range_cyclic) { if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */ writeback_index = mapping->writeback_index; /* prev offset */
...@@ -957,12 +951,6 @@ continue_unlock: ...@@ -957,12 +951,6 @@ continue_unlock:
break; break;
} }
} }
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
done = 1;
break;
}
} }
pagevec_release(&pvec); pagevec_release(&pvec);
cond_resched(); cond_resched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment