Commit f198dbb9 authored by Ryusuke Konishi's avatar Ryusuke Konishi

nilfs2: move get block functions in bmap.c into btree codes

Two get block function for btree nodes, nilfs_bmap_get_block() and
nilfs_bmap_get_new_block(), are called only from the btree codes.
This relocation will increase opportunities of compiler optimization.
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
parent 9f098900
...@@ -417,26 +417,6 @@ void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) ...@@ -417,26 +417,6 @@ void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
mark_inode_dirty(bmap->b_inode); mark_inode_dirty(bmap->b_inode);
} }
int nilfs_bmap_get_block(const struct nilfs_bmap *bmap, __u64 ptr,
struct buffer_head **bhp)
{
return nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache,
ptr, 0, bhp, 0);
}
int nilfs_bmap_get_new_block(const struct nilfs_bmap *bmap, __u64 ptr,
struct buffer_head **bhp)
{
int ret;
ret = nilfs_btnode_get(&NILFS_BMAP_I(bmap)->i_btnode_cache,
ptr, 0, bhp, 1);
if (ret < 0)
return ret;
set_buffer_nilfs_volatile(*bhp);
return 0;
}
__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
const struct buffer_head *bh) const struct buffer_head *bh)
{ {
......
...@@ -202,12 +202,6 @@ void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int); ...@@ -202,12 +202,6 @@ void nilfs_bmap_add_blocks(const struct nilfs_bmap *, int);
void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int); void nilfs_bmap_sub_blocks(const struct nilfs_bmap *, int);
int nilfs_bmap_get_block(const struct nilfs_bmap *, __u64,
struct buffer_head **);
int nilfs_bmap_get_new_block(const struct nilfs_bmap *, __u64,
struct buffer_head **);
/* Assume that bmap semaphore is locked. */ /* Assume that bmap semaphore is locked. */
static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap) static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap)
{ {
......
...@@ -122,10 +122,29 @@ static void nilfs_btree_clear_path(const struct nilfs_btree *btree, ...@@ -122,10 +122,29 @@ static void nilfs_btree_clear_path(const struct nilfs_btree *btree,
} }
} }
/* /*
* B-tree node operations * B-tree node operations
*/ */
static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr,
struct buffer_head **bhp)
{
struct address_space *btnc =
&NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
return nilfs_btnode_get(btnc, ptr, 0, bhp, 0);
}
static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
__u64 ptr, struct buffer_head **bhp)
{
struct address_space *btnc =
&NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
int ret;
ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1);
if (!ret)
set_buffer_nilfs_volatile(*bhp);
return ret;
}
static inline int static inline int
nilfs_btree_node_get_flags(const struct nilfs_btree *btree, nilfs_btree_node_get_flags(const struct nilfs_btree *btree,
...@@ -487,8 +506,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree, ...@@ -487,8 +506,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
path[level].bp_index = index; path[level].bp_index = index;
for (level--; level >= minlevel; level--) { for (level--; level >= minlevel; level--) {
ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
&path[level].bp_bh);
if (ret < 0) if (ret < 0)
return ret; return ret;
node = nilfs_btree_get_nonroot_node(btree, path, level); node = nilfs_btree_get_nonroot_node(btree, path, level);
...@@ -534,8 +552,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree, ...@@ -534,8 +552,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
path[level].bp_index = index; path[level].bp_index = index;
for (level--; level > 0; level--) { for (level--; level > 0; level--) {
ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, ret = nilfs_btree_get_block(btree, ptr, &path[level].bp_bh);
&path[level].bp_bh);
if (ret < 0) if (ret < 0)
return ret; return ret;
node = nilfs_btree_get_nonroot_node(btree, path, level); node = nilfs_btree_get_nonroot_node(btree, path, level);
...@@ -923,8 +940,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, ...@@ -923,8 +940,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
if (pindex > 0) { if (pindex > 0) {
sibptr = nilfs_btree_node_get_ptr(btree, parent, sibptr = nilfs_btree_node_get_ptr(btree, parent,
pindex - 1); pindex - 1);
ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, ret = nilfs_btree_get_block(btree, sibptr, &bh);
&bh);
if (ret < 0) if (ret < 0)
goto err_out_child_node; goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data; sib = (struct nilfs_btree_node *)bh->b_data;
...@@ -943,8 +959,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, ...@@ -943,8 +959,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
nilfs_btree_node_get_nchildren(btree, parent) - 1) { nilfs_btree_node_get_nchildren(btree, parent) - 1) {
sibptr = nilfs_btree_node_get_ptr(btree, parent, sibptr = nilfs_btree_node_get_ptr(btree, parent,
pindex + 1); pindex + 1);
ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, ret = nilfs_btree_get_block(btree, sibptr, &bh);
&bh);
if (ret < 0) if (ret < 0)
goto err_out_child_node; goto err_out_child_node;
sib = (struct nilfs_btree_node *)bh->b_data; sib = (struct nilfs_btree_node *)bh->b_data;
...@@ -965,7 +980,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, ...@@ -965,7 +980,7 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
&btree->bt_bmap, &path[level].bp_newreq); &btree->bt_bmap, &path[level].bp_newreq);
if (ret < 0) if (ret < 0)
goto err_out_child_node; goto err_out_child_node;
ret = nilfs_bmap_get_new_block(&btree->bt_bmap, ret = nilfs_btree_get_new_block(btree,
path[level].bp_newreq.bpr_ptr, path[level].bp_newreq.bpr_ptr,
&bh); &bh);
if (ret < 0) if (ret < 0)
...@@ -997,8 +1012,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree, ...@@ -997,8 +1012,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
&btree->bt_bmap, &path[level].bp_newreq); &btree->bt_bmap, &path[level].bp_newreq);
if (ret < 0) if (ret < 0)
goto err_out_child_node; goto err_out_child_node;
ret = nilfs_bmap_get_new_block(&btree->bt_bmap, ret = nilfs_btree_get_new_block(btree, path[level].bp_newreq.bpr_ptr,
path[level].bp_newreq.bpr_ptr, &bh); &bh);
if (ret < 0) if (ret < 0)
goto err_out_curr_node; goto err_out_curr_node;
...@@ -1320,8 +1335,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, ...@@ -1320,8 +1335,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
/* left sibling */ /* left sibling */
sibptr = nilfs_btree_node_get_ptr(btree, parent, sibptr = nilfs_btree_node_get_ptr(btree, parent,
pindex - 1); pindex - 1);
ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, ret = nilfs_btree_get_block(btree, sibptr, &bh);
&bh);
if (ret < 0) if (ret < 0)
goto err_out_curr_node; goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data; sib = (struct nilfs_btree_node *)bh->b_data;
...@@ -1342,8 +1356,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree, ...@@ -1342,8 +1356,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
/* right sibling */ /* right sibling */
sibptr = nilfs_btree_node_get_ptr(btree, parent, sibptr = nilfs_btree_node_get_ptr(btree, parent,
pindex + 1); pindex + 1);
ret = nilfs_bmap_get_block(&btree->bt_bmap, sibptr, ret = nilfs_btree_get_block(btree, sibptr, &bh);
&bh);
if (ret < 0) if (ret < 0)
goto err_out_curr_node; goto err_out_curr_node;
sib = (struct nilfs_btree_node *)bh->b_data; sib = (struct nilfs_btree_node *)bh->b_data;
...@@ -1500,7 +1513,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key) ...@@ -1500,7 +1513,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *bmap, __u64 key)
if (nchildren > 1) if (nchildren > 1)
return 0; return 0;
ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
ret = nilfs_bmap_get_block(bmap, ptr, &bh); ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0) if (ret < 0)
return ret; return ret;
node = (struct nilfs_btree_node *)bh->b_data; node = (struct nilfs_btree_node *)bh->b_data;
...@@ -1541,7 +1554,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap, ...@@ -1541,7 +1554,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
nchildren = nilfs_btree_node_get_nchildren(btree, root); nchildren = nilfs_btree_node_get_nchildren(btree, root);
WARN_ON(nchildren > 1); WARN_ON(nchildren > 1);
ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
ret = nilfs_bmap_get_block(bmap, ptr, &bh); ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0) if (ret < 0)
return ret; return ret;
node = (struct nilfs_btree_node *)bh->b_data; node = (struct nilfs_btree_node *)bh->b_data;
...@@ -1598,7 +1611,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key, ...@@ -1598,7 +1611,7 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *bmap, __u64 key,
if (ret < 0) if (ret < 0)
goto err_out_dreq; goto err_out_dreq;
ret = nilfs_bmap_get_new_block(bmap, nreq->bpr_ptr, &bh); ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh);
if (ret < 0) if (ret < 0)
goto err_out_nreq; goto err_out_nreq;
...@@ -2167,7 +2180,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level) ...@@ -2167,7 +2180,7 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
WARN_ON(ret == -ENOENT); WARN_ON(ret == -ENOENT);
goto out; goto out;
} }
ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, &bh); ret = nilfs_btree_get_block(btree, ptr, &bh);
if (ret < 0) { if (ret < 0) {
WARN_ON(ret == -ENOENT); WARN_ON(ret == -ENOENT);
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment