Commit 35c80d5f authored by Chris Mason's avatar Chris Mason Committed by Linus Torvalds

Add block_write_full_page_endio for passing endio handler

block_write_full_page doesn't allow the caller to control what happens
when the IO is over.  This adds a new call named block_write_full_page_endio
so the buffer head end_io handler can be provided by the caller.

This will be used by the ext3 data=guarded mode to do i_size updates in
a workqueue based end_io handler.  end_buffer_async_write is also
exported so it can be called to do the dirty work of managing page
writeback for the higher level end_io handler.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
Acked-by: default avatarTheodore Tso <tytso@mit.edu>
Acked-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f6995585
......@@ -360,7 +360,7 @@ still_busy:
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
unsigned long flags;
......@@ -438,11 +438,17 @@ static void mark_buffer_async_read(struct buffer_head *bh)
set_buffer_async_read(bh);
}
void mark_buffer_async_write(struct buffer_head *bh)
void mark_buffer_async_write_endio(struct buffer_head *bh,
bh_end_io_t *handler)
{
bh->b_end_io = end_buffer_async_write;
bh->b_end_io = handler;
set_buffer_async_write(bh);
}
void mark_buffer_async_write(struct buffer_head *bh)
{
mark_buffer_async_write_endio(bh, end_buffer_async_write);
}
EXPORT_SYMBOL(mark_buffer_async_write);
......@@ -1615,7 +1621,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
* unplugging the device queue.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc)
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler)
{
int err;
sector_t block;
......@@ -1700,7 +1707,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
continue;
}
if (test_clear_buffer_dirty(bh)) {
mark_buffer_async_write(bh);
mark_buffer_async_write_endio(bh, handler);
} else {
unlock_buffer(bh);
}
......@@ -1753,7 +1760,7 @@ recover:
if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
mark_buffer_async_write_endio(bh, handler);
} else {
/*
* The buffer may have been set dirty during
......@@ -2679,7 +2686,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
ret = __block_write_full_page(inode, page, get_block, wbc);
ret = __block_write_full_page(inode, page, get_block, wbc,
end_buffer_async_write);
return ret;
}
EXPORT_SYMBOL(nobh_writepage);
......@@ -2837,9 +2845,10 @@ out:
/*
* The generic ->writepage function for buffer-backed address_spaces
* this form passes in the end_io handler used to finish the IO.
*/
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc)
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
struct writeback_control *wbc, bh_end_io_t *handler)
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
......@@ -2848,7 +2857,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
/* Is the page fully inside i_size? */
if (page->index < end_index)
return __block_write_full_page(inode, page, get_block, wbc);
return __block_write_full_page(inode, page, get_block, wbc,
handler);
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_CACHE_SIZE-1);
......@@ -2871,9 +2881,20 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
* writes to that region are not written out to the file."
*/
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc);
return __block_write_full_page(inode, page, get_block, wbc, handler);
}
/*
* The generic ->writepage function for buffer-backed address_spaces
*/
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc)
{
return block_write_full_page_endio(page, get_block, wbc,
end_buffer_async_write);
}
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
{
......@@ -3342,9 +3363,11 @@ EXPORT_SYMBOL(block_read_full_page);
EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(block_write_full_page_endio);
EXPORT_SYMBOL(cont_write_begin);
EXPORT_SYMBOL(end_buffer_read_sync);
EXPORT_SYMBOL(end_buffer_write_sync);
EXPORT_SYMBOL(end_buffer_async_write);
EXPORT_SYMBOL(file_fsync);
EXPORT_SYMBOL(generic_block_bmap);
EXPORT_SYMBOL(generic_cont_expand_simple);
......
......@@ -155,6 +155,7 @@ void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
......@@ -197,6 +198,8 @@ extern int buffer_heads_over_limit;
void block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int block_write_full_page_endio(struct page *page, get_block_t *get_block,
struct writeback_control *wbc, bh_end_io_t *handler);
int block_read_full_page(struct page*, get_block_t*);
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment