Commit 5431bf97 authored by Evgeniy Dushistov's avatar Evgeniy Dushistov Committed by Linus Torvalds

[PATCH] ufs: prepare write + change blocks on the fly

This fixes "change blocks numbers on the fly" in case when "prepare
write page" is in the call chain, in this case some buffers may be not
uptodate and not mapped, we should care to map them and load from disk.

This patch was tested with:
 - ufs regressions simple tests
 - fsx-linux
 - ltp(20060306)
 - untar and build kernel
Signed-off-by: default avatarEvgeniy Dushistov <dushistov@mail.ru>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2189850f
...@@ -244,62 +244,87 @@ failed: ...@@ -244,62 +244,87 @@ failed:
* We can come here from ufs_writepage or ufs_prepare_write, * We can come here from ufs_writepage or ufs_prepare_write,
* locked_page is argument of these functions, so we already lock it. * locked_page is argument of these functions, so we already lock it.
*/ */
static void ufs_change_blocknr(struct inode *inode, unsigned int beg, static void ufs_change_blocknr(struct inode *inode, sector_t beg,
unsigned int count, unsigned int oldb, unsigned int count, sector_t oldb,
unsigned int newb, struct page *locked_page) sector_t newb, struct page *locked_page)
{ {
const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1; const unsigned blks_per_page =
1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
const unsigned mask = blks_per_page - 1;
struct address_space * const mapping = inode->i_mapping; struct address_space * const mapping = inode->i_mapping;
pgoff_t index, cur_index; pgoff_t index, cur_index, last_index;
unsigned end, pos, j; unsigned pos, j, lblock;
sector_t end, i;
struct page *page; struct page *page;
struct buffer_head *head, *bh; struct buffer_head *head, *bh;
UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n", UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
inode->i_ino, count, oldb, newb); inode->i_ino, count,
(unsigned long long)oldb, (unsigned long long)newb);
BUG_ON(!locked_page); BUG_ON(!locked_page);
BUG_ON(!PageLocked(locked_page)); BUG_ON(!PageLocked(locked_page));
cur_index = locked_page->index; cur_index = locked_page->index;
end = count + beg;
for (end = count + beg; beg < end; beg = (beg | mask) + 1) { last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits); for (i = beg; i < end; i = (i | mask) + 1) {
index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
if (likely(cur_index != index)) { if (likely(cur_index != index)) {
page = ufs_get_locked_page(mapping, index); page = ufs_get_locked_page(mapping, index);
if (!page || IS_ERR(page)) /* it was truncated or EIO */ if (!page)/* it was truncated */
continue;
if (IS_ERR(page)) {/* or EIO */
ufs_error(inode->i_sb, __FUNCTION__,
"read of page %llu failed\n",
(unsigned long long)index);
continue; continue;
}
} else } else
page = locked_page; page = locked_page;
head = page_buffers(page); head = page_buffers(page);
bh = head; bh = head;
pos = beg & mask; pos = i & mask;
for (j = 0; j < pos; ++j) for (j = 0; j < pos; ++j)
bh = bh->b_this_page; bh = bh->b_this_page;
j = 0;
if (unlikely(index == last_index))
lblock = end & mask;
else
lblock = blks_per_page;
do { do {
if (buffer_mapped(bh)) { if (j >= lblock)
pos = bh->b_blocknr - oldb; break;
if (pos < count) { pos = (i - beg) + j;
UFSD(" change from %llu to %llu\n",
(unsigned long long)pos + oldb, if (!buffer_mapped(bh))
(unsigned long long)pos + newb); map_bh(bh, inode->i_sb, oldb + pos);
bh->b_blocknr = newb + pos; if (!buffer_uptodate(bh)) {
unmap_underlying_metadata(bh->b_bdev, ll_rw_block(READ, 1, &bh);
bh->b_blocknr); wait_on_buffer(bh);
mark_buffer_dirty(bh); if (!buffer_uptodate(bh)) {
++j; ufs_error(inode->i_sb, __FUNCTION__,
"read of block failed\n");
break;
} }
} }
UFSD(" change from %llu to %llu, pos %u\n",
(unsigned long long)pos + oldb,
(unsigned long long)pos + newb, pos);
bh->b_blocknr = newb + pos;
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
mark_buffer_dirty(bh);
++j;
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
if (j)
set_page_dirty(page);
if (likely(cur_index != index)) if (likely(cur_index != index))
ufs_put_locked_page(page); ufs_put_locked_page(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment