Commit 771ed689 authored by Chris Mason's avatar Chris Mason

Btrfs: Optimize compressed writeback and reads

When reading compressed extents, try to put pages into the page cache
for any pages covered by the compressed extent that readpages didn't already
preload.

Add an async work queue to handle transformations at delayed allocation processing
time.  Right now this is just compression.  The workflow is:

1) Find offsets in the file marked for delayed allocation
2) Lock the pages
3) Lock the state bits
4) Call the async delalloc code

The async delalloc code clears the state lock bits and delalloc bits.  It is
important this happens before the range goes into the work queue because
otherwise it might deadlock with other work queue items that try to lock
those extent bits.

The file pages are compressed, and if the compression doesn't work the
pages are written back directly.

An ordered work queue is used to make sure the inodes are written in the same
order that pdflush or writepages sent them down.

This changes extent_write_cache_pages to let the writepage function
update the wbc nr_written count.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 4a69a410
......@@ -33,6 +33,7 @@
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
#include <linux/version.h>
#include <linux/pagevec.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
......@@ -145,9 +146,9 @@ static void end_compressed_bio_read(struct bio *bio, int err)
}
/* do io completion on the original bio */
if (cb->errors)
if (cb->errors) {
bio_io_error(cb->orig_bio);
else
} else
bio_endio(cb->orig_bio, 0);
/* finally free the cb struct */
......@@ -333,6 +334,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
}
bytes_left -= PAGE_CACHE_SIZE;
first_byte += PAGE_CACHE_SIZE;
cond_resched();
}
bio_get(bio);
......@@ -346,6 +348,130 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
return 0;
}
static noinline int add_ra_bio_pages(struct inode *inode,
u64 compressed_end,
struct compressed_bio *cb)
{
unsigned long end_index;
unsigned long page_index;
u64 last_offset;
u64 isize = i_size_read(inode);
int ret;
struct page *page;
unsigned long nr_pages = 0;
struct extent_map *em;
struct address_space *mapping = inode->i_mapping;
struct pagevec pvec;
struct extent_map_tree *em_tree;
struct extent_io_tree *tree;
u64 end;
int misses = 0;
page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
em_tree = &BTRFS_I(inode)->extent_tree;
tree = &BTRFS_I(inode)->io_tree;
if (isize == 0)
return 0;
end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
pagevec_init(&pvec, 0);
while(last_offset < compressed_end) {
page_index = last_offset >> PAGE_CACHE_SHIFT;
if (page_index > end_index)
break;
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_index);
rcu_read_unlock();
if (page) {
misses++;
if (misses > 4)
break;
goto next;
}
page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS);
if (!page)
break;
page->index = page_index;
/*
* what we want to do here is call add_to_page_cache_lru,
* but that isn't exported, so we reproduce it here
*/
if (add_to_page_cache(page, mapping,
page->index, GFP_NOFS)) {
page_cache_release(page);
goto next;
}
/* open coding of lru_cache_add, also not exported */
page_cache_get(page);
if (!pagevec_add(&pvec, page))
__pagevec_lru_add(&pvec);
end = last_offset + PAGE_CACHE_SIZE - 1;
/*
* at this point, we have a locked page in the page cache
* for these bytes in the file. But, we have to make
* sure they map to this compressed extent on disk.
*/
set_page_extent_mapped(page);
lock_extent(tree, last_offset, end, GFP_NOFS);
spin_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE);
spin_unlock(&em_tree->lock);
if (!em || last_offset < em->start ||
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_sector) {
free_extent_map(em);
unlock_extent(tree, last_offset, end, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
break;
}
free_extent_map(em);
if (page->index == end_index) {
char *userpage;
size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
if (zero_offset) {
int zeros;
zeros = PAGE_CACHE_SIZE - zero_offset;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + zero_offset, 0, zeros);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
}
}
ret = bio_add_page(cb->orig_bio, page,
PAGE_CACHE_SIZE, 0);
if (ret == PAGE_CACHE_SIZE) {
nr_pages++;
page_cache_release(page);
} else {
unlock_extent(tree, last_offset, end, GFP_NOFS);
unlock_page(page);
page_cache_release(page);
break;
}
next:
last_offset += PAGE_CACHE_SIZE;
}
if (pagevec_count(&pvec))
__pagevec_lru_add(&pvec);
return 0;
}
/*
* for a compressed read, the bio we get passed has all the inode pages
* in it. We don't actually do IO on those pages but allocate new ones
......@@ -373,6 +499,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
struct block_device *bdev;
struct bio *comp_bio;
u64 cur_disk_byte = (u64)bio->bi_sector << 9;
u64 em_len;
struct extent_map *em;
int ret;
......@@ -393,6 +520,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->start = em->start;
compressed_len = em->block_len;
em_len = em->len;
free_extent_map(em);
cb->len = uncompressed_len;
......@@ -411,6 +539,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
cb->nr_pages = nr_pages;
add_ra_bio_pages(inode, cb->start + em_len, cb);
if (!btrfs_test_opt(root, NODATASUM) &&
!btrfs_test_flag(inode, NODATASUM)) {
btrfs_lookup_bio_sums(root, inode, cb->orig_bio);
}
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
cb->len = uncompressed_len;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
......@@ -442,9 +581,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
GFP_NOFS);
atomic_inc(&cb->pending_bios);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
}
cur_disk_byte += PAGE_CACHE_SIZE;
}
......
......@@ -625,8 +625,8 @@ struct btrfs_fs_info {
struct btrfs_transaction *running_transaction;
wait_queue_head_t transaction_throttle;
wait_queue_head_t transaction_wait;
wait_queue_head_t async_submit_wait;
wait_queue_head_t async_submit_wait;
wait_queue_head_t tree_log_wait;
struct btrfs_super_block super_copy;
......@@ -653,6 +653,7 @@ struct btrfs_fs_info {
atomic_t nr_async_submits;
atomic_t async_submit_draining;
atomic_t nr_async_bios;
atomic_t async_delalloc_pages;
atomic_t tree_log_writers;
atomic_t tree_log_commit;
unsigned long tree_log_batch;
......@@ -677,6 +678,7 @@ struct btrfs_fs_info {
* two
*/
struct btrfs_workers workers;
struct btrfs_workers delalloc_workers;
struct btrfs_workers endio_workers;
struct btrfs_workers endio_write_workers;
struct btrfs_workers submit_workers;
......
......@@ -539,6 +539,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
(atomic_read(&fs_info->nr_async_bios) < limit),
HZ/10);
}
while(atomic_read(&fs_info->async_submit_draining) &&
atomic_read(&fs_info->nr_async_submits)) {
wait_event(fs_info->async_submit_wait,
(atomic_read(&fs_info->nr_async_submits) == 0));
}
return 0;
}
......@@ -1437,6 +1444,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->space_info);
btrfs_mapping_init(&fs_info->mapping_tree);
atomic_set(&fs_info->nr_async_submits, 0);
atomic_set(&fs_info->async_delalloc_pages, 0);
atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->throttles, 0);
......@@ -1550,6 +1558,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_init_workers(&fs_info->workers, "worker",
fs_info->thread_pool_size);
btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
fs_info->thread_pool_size);
btrfs_init_workers(&fs_info->submit_workers, "submit",
min_t(u64, fs_devices->num_devices,
fs_info->thread_pool_size));
......@@ -1560,15 +1571,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
*/
fs_info->submit_workers.idle_thresh = 64;
/* fs_info->workers is responsible for checksumming file data
* blocks and metadata. Using a larger idle thresh allows each
* worker thread to operate on things in roughly the order they
* were sent by the writeback daemons, improving overall locality
* of the IO going down the pipe.
*/
fs_info->workers.idle_thresh = 8;
fs_info->workers.idle_thresh = 16;
fs_info->workers.ordered = 1;
fs_info->delalloc_workers.idle_thresh = 2;
fs_info->delalloc_workers.ordered = 1;
btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
btrfs_init_workers(&fs_info->endio_workers, "endio",
fs_info->thread_pool_size);
......@@ -1584,6 +1592,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
btrfs_start_workers(&fs_info->workers, 1);
btrfs_start_workers(&fs_info->submit_workers, 1);
btrfs_start_workers(&fs_info->delalloc_workers, 1);
btrfs_start_workers(&fs_info->fixup_workers, 1);
btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
btrfs_start_workers(&fs_info->endio_write_workers,
......@@ -1732,6 +1741,7 @@ fail_tree_root:
fail_sys_array:
fail_sb_buffer:
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
......@@ -1988,6 +1998,7 @@ int close_ctree(struct btrfs_root *root)
truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
btrfs_stop_workers(&fs_info->fixup_workers);
btrfs_stop_workers(&fs_info->delalloc_workers);
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
......@@ -2062,7 +2073,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
struct extent_io_tree *tree;
u64 num_dirty;
u64 start = 0;
unsigned long thresh = 96 * 1024 * 1024;
unsigned long thresh = 32 * 1024 * 1024;
tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
if (current_is_pdflush() || current->flags & PF_MEMALLOC)
......
......@@ -768,7 +768,11 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
BUG_ON(key.objectid != bytenr);
if (key.objectid != bytenr) {
btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
printk("wanted %Lu found %Lu\n", bytenr, key.objectid);
BUG();
}
BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
......
......@@ -47,6 +47,11 @@ struct extent_page_data {
struct bio *bio;
struct extent_io_tree *tree;
get_extent_t *get_extent;
/* tells writepage not to lock the state bits for this range
* it still does the unlocking
*/
int extent_locked;
};
int __init extent_io_init(void)
......@@ -1198,11 +1203,18 @@ static noinline int lock_delalloc_pages(struct inode *inode,
* the caller is taking responsibility for
* locked_page
*/
if (pages[i] != locked_page)
if (pages[i] != locked_page) {
lock_page(pages[i]);
if (pages[i]->mapping != inode->i_mapping) {
ret = -EAGAIN;
unlock_page(pages[i]);
page_cache_release(pages[i]);
goto done;
}
}
page_cache_release(pages[i]);
pages_locked++;
}
pages_locked += ret;
nrpages -= ret;
index += ret;
cond_resched();
......@@ -1262,8 +1274,7 @@ again:
* if we're looping.
*/
if (delalloc_end + 1 - delalloc_start > max_bytes && loops) {
delalloc_end = (delalloc_start + PAGE_CACHE_SIZE - 1) &
~((u64)PAGE_CACHE_SIZE - 1);
delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
}
/* step two, lock all the pages after the page that has start */
ret = lock_delalloc_pages(inode, locked_page,
......@@ -1306,7 +1317,10 @@ out_failed:
int extent_clear_unlock_delalloc(struct inode *inode,
struct extent_io_tree *tree,
u64 start, u64 end, struct page *locked_page,
int clear_dirty, int set_writeback,
int unlock_pages,
int clear_unlock,
int clear_delalloc, int clear_dirty,
int set_writeback,
int end_writeback)
{
int ret;
......@@ -1315,12 +1329,19 @@ int extent_clear_unlock_delalloc(struct inode *inode,
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
int clear_bits = EXTENT_LOCKED | EXTENT_DELALLOC;
int clear_bits = 0;
if (clear_unlock)
clear_bits |= EXTENT_LOCKED;
if (clear_dirty)
clear_bits |= EXTENT_DIRTY;
if (clear_delalloc)
clear_bits |= EXTENT_DELALLOC;
clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
return 0;
while(nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
......@@ -1336,7 +1357,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
set_page_writeback(pages[i]);
if (end_writeback)
end_page_writeback(pages[i]);
unlock_page(pages[i]);
if (unlock_pages)
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
nr_pages -= ret;
......@@ -1741,9 +1763,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
}
}
if (uptodate)
if (uptodate) {
set_extent_uptodate(tree, start, end,
GFP_ATOMIC);
}
unlock_extent(tree, start, end, GFP_ATOMIC);
if (whole_page) {
......@@ -1925,6 +1948,7 @@ void set_page_extent_mapped(struct page *page)
set_page_private(page, EXTENT_PAGE_PRIVATE);
}
}
EXPORT_SYMBOL(set_page_extent_mapped);
void set_page_extent_head(struct page *page, unsigned long len)
{
......@@ -2143,12 +2167,17 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
u64 delalloc_end;
int page_started;
int compressed;
unsigned long nr_written = 0;
WARN_ON(!PageLocked(page));
pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0);
if (epd->extent_locked) {
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start,
page_end, NULL, 1);
}
unlock_page(page);
return 0;
}
......@@ -2169,27 +2198,33 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
delalloc_start = start;
delalloc_end = 0;
page_started = 0;
while(delalloc_end < page_end) {
nr_delalloc = find_lock_delalloc_range(inode, tree,
if (!epd->extent_locked) {
while(delalloc_end < page_end) {
nr_delalloc = find_lock_delalloc_range(inode, tree,
page,
&delalloc_start,
&delalloc_end,
128 * 1024 * 1024);
if (nr_delalloc == 0) {
if (nr_delalloc == 0) {
delalloc_start = delalloc_end + 1;
continue;
}
tree->ops->fill_delalloc(inode, page, delalloc_start,
delalloc_end, &page_started,
&nr_written);
delalloc_start = delalloc_end + 1;
continue;
}
tree->ops->fill_delalloc(inode, page, delalloc_start,
delalloc_end, &page_started);
delalloc_start = delalloc_end + 1;
}
/* did the fill delalloc function already unlock and start the IO? */
if (page_started) {
return 0;
/* did the fill delalloc function already unlock and start
* the IO?
*/
if (page_started) {
ret = 0;
goto update_nr_written;
}
}
lock_extent(tree, start, page_end, GFP_NOFS);
unlock_start = start;
if (tree->ops && tree->ops->writepage_start_hook) {
......@@ -2199,10 +2234,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unlock_extent(tree, start, page_end, GFP_NOFS);
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
ret = 0;
goto update_nr_written;
}
}
nr_written++;
end = page_end;
if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
printk("found delalloc bits after lock_extent\n");
......@@ -2333,6 +2371,12 @@ done:
if (unlock_start <= page_end)
unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
unlock_page(page);
update_nr_written:
wbc->nr_to_write -= nr_written;
if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
page->mapping->writeback_index = page->index + nr_written;
return 0;
}
......@@ -2431,7 +2475,7 @@ retry:
unlock_page(page);
ret = 0;
}
if (ret || (--(wbc->nr_to_write) <= 0))
if (ret || wbc->nr_to_write <= 0)
done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
......@@ -2452,6 +2496,8 @@ retry:
}
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
if (wbc->range_cont)
wbc->range_start = index << PAGE_CACHE_SHIFT;
......@@ -2469,6 +2515,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
.bio = NULL,
.tree = tree,
.get_extent = get_extent,
.extent_locked = 0,
};
struct writeback_control wbc_writepages = {
.bdi = wbc->bdi,
......@@ -2491,6 +2538,52 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
}
EXPORT_SYMBOL(extent_write_full_page);
int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
u64 start, u64 end, get_extent_t *get_extent,
int mode)
{
int ret = 0;
struct address_space *mapping = inode->i_mapping;
struct page *page;
unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
.get_extent = get_extent,
.extent_locked = 1,
};
struct writeback_control wbc_writepages = {
.bdi = inode->i_mapping->backing_dev_info,
.sync_mode = mode,
.older_than_this = NULL,
.nr_to_write = nr_pages * 2,
.range_start = start,
.range_end = end + 1,
};
while(start <= end) {
page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
if (clear_page_dirty_for_io(page))
ret = __extent_writepage(page, &wbc_writepages, &epd);
else {
if (tree->ops && tree->ops->writepage_end_io_hook)
tree->ops->writepage_end_io_hook(page, start,
start + PAGE_CACHE_SIZE - 1,
NULL, 1);
unlock_page(page);
}
page_cache_release(page);
start += PAGE_CACHE_SIZE;
}
if (epd.bio)
submit_one_bio(WRITE, epd.bio, 0, 0);
return ret;
}
EXPORT_SYMBOL(extent_write_locked_range);
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
......@@ -2502,6 +2595,7 @@ int extent_writepages(struct extent_io_tree *tree,
.bio = NULL,
.tree = tree,
.get_extent = get_extent,
.extent_locked = 0,
};
ret = extent_write_cache_pages(tree, mapping, wbc,
......
......@@ -35,7 +35,8 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
unsigned long bio_flags);
struct extent_io_ops {
int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started);
u64 start, u64 end, int *page_started,
unsigned long *nr_written);
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook;
......@@ -172,6 +173,9 @@ int extent_invalidatepage(struct extent_io_tree *tree,
int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent,
struct writeback_control *wbc);
int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
u64 start, u64 end, get_extent_t *get_extent,
int mode);
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
get_extent_t *get_extent,
......@@ -256,6 +260,9 @@ int extent_range_uptodate(struct extent_io_tree *tree,
int extent_clear_unlock_delalloc(struct inode *inode,
struct extent_io_tree *tree,
u64 start, u64 end, struct page *locked_page,
int clear_dirty, int set_writeback,
int clear_writeback);
int unlock_page,
int clear_unlock,
int clear_delalloc, int clear_dirty,
int set_writeback,
int end_writeback);
#endif
......@@ -368,6 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
u64 search_start = start;
u64 leaf_start;
u64 ram_bytes = 0;
u64 orig_parent = 0;
u64 disk_bytenr = 0;
u8 compression;
u8 encryption;
u16 other_encoding = 0;
......@@ -500,17 +502,31 @@ next_slot:
keep = 1;
}
if (bookend && found_extent && locked_end < extent_end) {
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
locked_end, extent_end - 1, GFP_NOFS);
if (!ret) {
btrfs_release_path(root, path);
lock_extent(&BTRFS_I(inode)->io_tree,
locked_end, extent_end - 1, GFP_NOFS);
if (bookend && found_extent) {
if (locked_end < extent_end) {
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
locked_end, extent_end - 1,
GFP_NOFS);
if (!ret) {
btrfs_release_path(root, path);
lock_extent(&BTRFS_I(inode)->io_tree,
locked_end, extent_end - 1,
GFP_NOFS);
locked_end = extent_end;
continue;
}
locked_end = extent_end;
continue;
}
locked_end = extent_end;
orig_parent = path->nodes[0]->start;
disk_bytenr = le64_to_cpu(old.disk_bytenr);
if (disk_bytenr != 0) {
ret = btrfs_inc_extent_ref(trans, root,
disk_bytenr,
le64_to_cpu(old.disk_num_bytes),
orig_parent, root->root_key.objectid,
trans->transid, inode->i_ino);
BUG_ON(ret);
}
}
if (found_inline) {
......@@ -537,8 +553,12 @@ next_slot:
inode_sub_bytes(inode, old_num -
new_num);
}
btrfs_set_file_extent_num_bytes(leaf, extent,
new_num);
if (!compression && !encryption) {
btrfs_set_file_extent_ram_bytes(leaf,
extent, new_num);
}
btrfs_set_file_extent_num_bytes(leaf,
extent, new_num);
btrfs_mark_buffer_dirty(leaf);
} else if (key.offset < inline_limit &&
(end > extent_end) &&
......@@ -582,11 +602,11 @@ next_slot:
}
/* create bookend, splitting the extent in two */
if (bookend && found_extent) {
u64 disk_bytenr;
struct btrfs_key ins;
ins.objectid = inode->i_ino;
ins.offset = end;
btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
btrfs_release_path(root, path);
ret = btrfs_insert_empty_item(trans, root, path, &ins,
sizeof(*extent));
......@@ -623,14 +643,13 @@ next_slot:
btrfs_mark_buffer_dirty(path->nodes[0]);
disk_bytenr = le64_to_cpu(old.disk_bytenr);
if (disk_bytenr != 0) {
ret = btrfs_inc_extent_ref(trans, root,
disk_bytenr,
le64_to_cpu(old.disk_num_bytes),
leaf->start,
ret = btrfs_update_extent_ref(trans, root,
disk_bytenr, orig_parent,
leaf->start,
root->root_key.objectid,
trans->transid, ins.objectid);
BUG_ON(ret);
}
btrfs_release_path(root, path);
......
This diff is collapsed.
......@@ -390,7 +390,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
* start IO on any dirty ones so the wait doesn't stall waiting
* for pdflush to find them
*/
btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_NONE);
btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL);
if (wait) {
wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
&entry->flags));
......@@ -421,6 +421,12 @@ again:
*/
btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_NONE);
/* The compression code will leave pages locked but return from
* writepage without setting the page writeback. Starting again
* with WB_SYNC_ALL will end up waiting for the IO to actually start.
*/
btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL);
btrfs_wait_on_page_writeback_range(inode->i_mapping,
start >> PAGE_CACHE_SHIFT,
orig_end >> PAGE_CACHE_SHIFT);
......@@ -448,10 +454,7 @@ again:
}
if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
EXTENT_ORDERED | EXTENT_DELALLOC, 0)) {
printk("inode %lu still ordered or delalloc after wait "
"%llu %llu\n", inode->i_ino,
(unsigned long long)start,
(unsigned long long)orig_end);
schedule_timeout(1);
goto again;
}
return 0;
......
......@@ -375,6 +375,10 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
filemap_flush(root->fs_info->btree_inode->i_mapping);
return 0;
}
btrfs_start_delalloc_inodes(root);
btrfs_wait_ordered_extents(root, 0);
btrfs_clean_old_snapshots(root);
trans = btrfs_start_transaction(root, 1);
ret = btrfs_commit_transaction(trans, root);
......
......@@ -423,8 +423,9 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in,
/* we didn't make progress in this inflate
* call, we're done
*/
if (ret != Z_STREAM_END)
if (ret != Z_STREAM_END) {
ret = -1;
}
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment