Commit d2c3f4f6 authored by Chris Mason's avatar Chris Mason

Btrfs: Avoid writeback stalls

While building large bios in writepages, btrfs may end up waiting
for other page writeback to finish if WB_SYNC_ALL is used.

While it is waiting, the bio it is building has a number of pages with the
writeback bit set and they aren't getting to the disk any time soon.  This
lowers the latencies of writeback in general by sending down the bio being
built before waiting for other pages.

The bio submission code tries to limit the total number of async bios in
flight by waiting when we're over a certain number of async bios.  But,
the waits are happening while writepages is building bios, and this can easily
lead to stalls and other problems for people calling wait_on_page_writeback.

The current fix is to let the congestion tests take care of waiting.

sync() and others make sure to drain the current async requests to make
sure that everything that was pending when the sync was started really get
to disk.  The code would drain pending requests both before and after
submitting a new request.

But, if one of the requests is waiting for page writeback to finish,
the draining waits might block that page writeback.  This changes the
draining code to only wait after submitting the bio being processed.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 105d931d
......@@ -538,15 +538,9 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->work.flags = 0;
async->bio_flags = bio_flags;
while(atomic_read(&fs_info->async_submit_draining) &&
atomic_read(&fs_info->nr_async_submits)) {
wait_event(fs_info->async_submit_wait,
(atomic_read(&fs_info->nr_async_submits) == 0));
}
atomic_inc(&fs_info->nr_async_submits);
btrfs_queue_worker(&fs_info->workers, &async->work);
#if 0
if (atomic_read(&fs_info->nr_async_submits) > limit) {
wait_event_timeout(fs_info->async_submit_wait,
(atomic_read(&fs_info->nr_async_submits) < limit),
......@@ -556,7 +550,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
(atomic_read(&fs_info->nr_async_bios) < limit),
HZ/10);
}
#endif
while(atomic_read(&fs_info->async_submit_draining) &&
atomic_read(&fs_info->nr_async_submits)) {
wait_event(fs_info->async_submit_wait,
......@@ -1765,11 +1759,11 @@ struct btrfs_root *open_ctree(struct super_block *sb,
ret = btrfs_cleanup_reloc_trees(tree_root);
BUG_ON(ret);
read_fs_root:
location.objectid = BTRFS_FS_TREE_OBJECTID;
location.type = BTRFS_ROOT_ITEM_KEY;
location.offset = (u64)-1;
read_fs_root:
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (!fs_info->fs_root)
goto fail_cleaner;
......
......@@ -2398,7 +2398,8 @@ update_nr_written:
int extent_write_cache_pages(struct extent_io_tree *tree,
struct address_space *mapping,
struct writeback_control *wbc,
writepage_t writepage, void *data)
writepage_t writepage, void *data,
void (*flush_fn)(void *))
{
struct backing_dev_info *bdi = mapping->backing_dev_info;
int ret = 0;
......@@ -2460,8 +2461,10 @@ retry:
continue;
}
if (wbc->sync_mode != WB_SYNC_NONE)
if (wbc->sync_mode != WB_SYNC_NONE) {
flush_fn(data);
wait_on_page_writeback(page);
}
if (PageWriteback(page) ||
!clear_page_dirty_for_io(page)) {
......@@ -2498,6 +2501,15 @@ retry:
}
EXPORT_SYMBOL(extent_write_cache_pages);
static noinline void flush_write_bio(void *data)
{
struct extent_page_data *epd = data;
if (epd->bio) {
submit_one_bio(WRITE, epd->bio, 0, 0);
epd->bio = NULL;
}
}
int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent,
struct writeback_control *wbc)
......@@ -2523,7 +2535,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
ret = __extent_writepage(page, wbc, &epd);
extent_write_cache_pages(tree, mapping, &wbc_writepages,
__extent_writepage, &epd);
__extent_writepage, &epd, flush_write_bio);
if (epd.bio) {
submit_one_bio(WRITE, epd.bio, 0, 0);
}
......@@ -2592,7 +2604,8 @@ int extent_writepages(struct extent_io_tree *tree,
};
ret = extent_write_cache_pages(tree, mapping, wbc,
__extent_writepage, &epd);
__extent_writepage, &epd,
flush_write_bio);
if (epd.bio) {
submit_one_bio(WRITE, epd.bio, 0, 0);
}
......@@ -3087,6 +3100,9 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if (!set && !PageDirty(page))
continue;
lock_page(page);
if (i == 0)
set_page_extent_head(page, eb->len);
......
......@@ -883,13 +883,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->work.ordered_free = async_cow_free;
async_cow->work.flags = 0;
while(atomic_read(&root->fs_info->async_submit_draining) &&
atomic_read(&root->fs_info->async_delalloc_pages)) {
wait_event(root->fs_info->async_submit_wait,
(atomic_read(&root->fs_info->async_delalloc_pages)
== 0));
}
nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment