Commit 89747d36 authored by Mingming Cao's avatar Mingming Cao Committed by Linus Torvalds

[PATCH] ext3_get_blocks: Mapping multiple blocks at a once

Currently ext3_get_block() only maps or allocates one block at a time.  This
is quite inefficient for sequential IO workload.

I have posted a early implements a simply multiple block map and allocation
with current ext3.  The basic idea is allocating the 1st block in the existing
way, and attempting to allocate the next adjacent blocks on a best effort
basis.  More description about the implementation could be found here:
http://marc.theaimsgroup.com/?l=ext2-devel&m=112162230003522&w=2

The following the latest version of the patch: break the original patch into 5
patches, re-worked some logicals, and fixed some bugs.  The break ups are:

 [patch 1] Adding map multiple blocks at a time in ext3_get_blocks()
 [patch 2] Extend ext3_get_blocks() to support multiple block allocation
 [patch 3] Implement multiple block allocation in ext3-try-to-allocate
 (called via ext3_new_block()).
 [patch 4] Proper accounting updates in ext3_new_blocks()
 [patch 5] Adjust reservation window size properly (by the given number
 of blocks to allocate) before block allocation to increase the
 possibility of allocating multiple blocks in a single call.

Tests done so far includes fsx,tiobench and dbench.  The following numbers
collected from Direct IO tests (1G file creation/read) shows the system time
have been greatly reduced (more than 50% on my 8 cpu system) with the patches.

 1G file DIO write:
 	2.6.15		2.6.15+patches
 real    0m31.275s	0m31.161s
 user    0m0.000s	0m0.000s
 sys     0m3.384s	0m0.564s

 1G file DIO read:
 	2.6.15		2.6.15+patches
 real    0m30.733s	0m30.624s
 user    0m0.000s	0m0.004s
 sys     0m0.748s	0m0.380s

Some previous test we did on buffered IO with using multiple blocks allocation
and delayed allocation shows noticeable improvement on throughput and system
time.

This patch:

Add support of mapping multiple blocks in one call.

This is useful for DIO reads and re-writes (where blocks are already
allocated), also is in line with Christoph's proposal of using getblocks() in
mpage_readpage() or mpage_readpages().
Signed-off-by: default avatarMingming Cao <cmm@us.ibm.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent e2d53f95
...@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp, ...@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp,
struct buffer_head *bh = NULL; struct buffer_head *bh = NULL;
map_bh.b_state = 0; map_bh.b_state = 0;
err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); err = ext3_get_blocks_handle(NULL, inode, blk, 1,
if (!err) { &map_bh, 0, 0);
if (err > 0) {
page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
&filp->f_ra, &filp->f_ra,
filp, filp,
......
...@@ -330,7 +330,7 @@ static int ext3_block_to_path(struct inode *inode, ...@@ -330,7 +330,7 @@ static int ext3_block_to_path(struct inode *inode,
ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
} }
if (boundary) if (boundary)
*boundary = (i_block & (ptrs - 1)) == (final - 1); *boundary = final - 1 - (i_block & (ptrs - 1));
return n; return n;
} }
...@@ -669,11 +669,15 @@ err_out: ...@@ -669,11 +669,15 @@ err_out:
* akpm: `handle' can be NULL if create == 0. * akpm: `handle' can be NULL if create == 0.
* *
* The BKL may not be held on entry here. Be sure to take it early. * The BKL may not be held on entry here. Be sure to take it early.
* return > 0, # of blocks mapped or allocated.
* return = 0, if plain lookup failed.
* return < 0, error case.
*/ */
int int
ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create, int extend_disksize) unsigned long maxblocks, struct buffer_head *bh_result,
int create, int extend_disksize)
{ {
int err = -EIO; int err = -EIO;
int offsets[4]; int offsets[4];
...@@ -681,11 +685,15 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -681,11 +685,15 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
Indirect *partial; Indirect *partial;
unsigned long goal; unsigned long goal;
int left; int left;
int boundary = 0; int blocks_to_boundary = 0;
const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); int depth;
struct ext3_inode_info *ei = EXT3_I(inode); struct ext3_inode_info *ei = EXT3_I(inode);
int count = 0;
unsigned long first_block = 0;
J_ASSERT(handle != NULL || create == 0); J_ASSERT(handle != NULL || create == 0);
depth = ext3_block_to_path(inode, iblock, offsets, &blocks_to_boundary);
if (depth == 0) if (depth == 0)
goto out; goto out;
...@@ -694,7 +702,30 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -694,7 +702,30 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
/* Simplest case - block found, no allocation needed */ /* Simplest case - block found, no allocation needed */
if (!partial) { if (!partial) {
first_block = chain[depth - 1].key;
clear_buffer_new(bh_result); clear_buffer_new(bh_result);
count++;
/*map more blocks*/
while (count < maxblocks && count <= blocks_to_boundary) {
if (!verify_chain(chain, partial)) {
/*
* Indirect block might be removed by
* truncate while we were reading it.
* Handling of that case: forget what we've
* got now. Flag the err as EAGAIN, so it
* will reread.
*/
err = -EAGAIN;
count = 0;
break;
}
if (le32_to_cpu(*(chain[depth-1].p+count) ==
(first_block + count)))
count++;
else
break;
}
if (err != -EAGAIN)
goto got_it; goto got_it;
} }
...@@ -723,6 +754,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -723,6 +754,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
} }
partial = ext3_get_branch(inode, depth, offsets, chain, &err); partial = ext3_get_branch(inode, depth, offsets, chain, &err);
if (!partial) { if (!partial) {
count++;
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
if (err) if (err)
goto cleanup; goto cleanup;
...@@ -772,8 +804,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, ...@@ -772,8 +804,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
set_buffer_new(bh_result); set_buffer_new(bh_result);
got_it: got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
if (boundary) if (blocks_to_boundary == 0)
set_buffer_boundary(bh_result); set_buffer_boundary(bh_result);
err = count;
/* Clean up and exit */ /* Clean up and exit */
partial = chain + depth - 1; /* the whole chain */ partial = chain + depth - 1; /* the whole chain */
cleanup: cleanup:
...@@ -787,21 +820,6 @@ out: ...@@ -787,21 +820,6 @@ out:
return err; return err;
} }
static int ext3_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
handle_t *handle = NULL;
int ret;
if (create) {
handle = ext3_journal_current_handle();
J_ASSERT(handle != 0);
}
ret = ext3_get_block_handle(handle, inode, iblock,
bh_result, create, 1);
return ret;
}
#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
static int static int
...@@ -812,9 +830,12 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, ...@@ -812,9 +830,12 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
handle_t *handle = journal_current_handle(); handle_t *handle = journal_current_handle();
int ret = 0; int ret = 0;
if (!handle) if (!create)
goto get_block; /* A read */ goto get_block; /* A read */
if (max_blocks == 1)
goto get_block; /* A single block get */
if (handle->h_transaction->t_state == T_LOCKED) { if (handle->h_transaction->t_state == T_LOCKED) {
/* /*
* Huge direct-io writes can hold off commits for long * Huge direct-io writes can hold off commits for long
...@@ -841,13 +862,31 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, ...@@ -841,13 +862,31 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
} }
get_block: get_block:
if (ret == 0) if (ret == 0) {
ret = ext3_get_block_handle(handle, inode, iblock, ret = ext3_get_blocks_handle(handle, inode, iblock,
bh_result, create, 0); max_blocks, bh_result, create, 0);
bh_result->b_size = (1 << inode->i_blkbits); if (ret > 0) {
bh_result->b_size = (ret << inode->i_blkbits);
ret = 0;
}
}
return ret; return ret;
} }
static int ext3_get_blocks(struct inode *inode, sector_t iblock,
unsigned long maxblocks, struct buffer_head *bh_result,
int create)
{
return ext3_direct_io_get_blocks(inode, iblock, maxblocks,
bh_result, create);
}
static int ext3_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return ext3_get_blocks(inode, iblock, 1, bh_result, create);
}
/* /*
* `handle' can be NULL if create is zero * `handle' can be NULL if create is zero
*/ */
...@@ -862,8 +901,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, ...@@ -862,8 +901,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
dummy.b_state = 0; dummy.b_state = 0;
dummy.b_blocknr = -1000; dummy.b_blocknr = -1000;
buffer_trace_init(&dummy.b_history); buffer_trace_init(&dummy.b_history);
*errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); err = ext3_get_blocks_handle(handle, inode, block, 1,
if (!*errp && buffer_mapped(&dummy)) { &dummy, create, 1);
if (err == 1) {
err = 0;
} else if (err >= 0) {
WARN_ON(1);
err = -EIO;
}
*errp = err;
if (!err && buffer_mapped(&dummy)) {
struct buffer_head *bh; struct buffer_head *bh;
bh = sb_getblk(inode->i_sb, dummy.b_blocknr); bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (!bh) { if (!bh) {
......
...@@ -775,9 +775,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned); ...@@ -775,9 +775,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
int ext3_get_block_handle(handle_t *handle, struct inode *inode, int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, struct buffer_head *bh_result, int create, sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
int extend_disksize); int create, int extend_disksize);
extern void ext3_read_inode (struct inode *); extern void ext3_read_inode (struct inode *);
extern int ext3_write_inode (struct inode *, int); extern int ext3_write_inode (struct inode *, int);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment