Commit 8e73f275 authored by Chris Mason's avatar Chris Mason

Btrfs: Optimize locking in btrfs_next_leaf()

btrfs_next_leaf was using blocking locks when it could have been using
faster spinning ones instead.  This adds a few extra checks around
the pieces that block and switches over to spinning locks.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent c8c42864
...@@ -4127,28 +4127,44 @@ next: ...@@ -4127,28 +4127,44 @@ next:
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
{ {
int slot; int slot;
int level = 1; int level;
struct extent_buffer *c; struct extent_buffer *c;
struct extent_buffer *next = NULL; struct extent_buffer *next;
struct btrfs_key key; struct btrfs_key key;
u32 nritems; u32 nritems;
int ret; int ret;
int old_spinning = path->leave_spinning;
int force_blocking = 0;
nritems = btrfs_header_nritems(path->nodes[0]); nritems = btrfs_header_nritems(path->nodes[0]);
if (nritems == 0) if (nritems == 0)
return 1; return 1;
btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); /*
* we take the blocks in an order that upsets lockdep. Using
* blocking mode is the only way around it.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
force_blocking = 1;
#endif
btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
again:
level = 1;
next = NULL;
btrfs_release_path(root, path); btrfs_release_path(root, path);
path->keep_locks = 1; path->keep_locks = 1;
if (!force_blocking)
path->leave_spinning = 1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
path->keep_locks = 0; path->keep_locks = 0;
if (ret < 0) if (ret < 0)
return ret; return ret;
btrfs_set_path_blocking(path);
nritems = btrfs_header_nritems(path->nodes[0]); nritems = btrfs_header_nritems(path->nodes[0]);
/* /*
* by releasing the path above we dropped all our locks. A balance * by releasing the path above we dropped all our locks. A balance
...@@ -4158,19 +4174,24 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) ...@@ -4158,19 +4174,24 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
*/ */
if (nritems > 0 && path->slots[0] < nritems - 1) { if (nritems > 0 && path->slots[0] < nritems - 1) {
path->slots[0]++; path->slots[0]++;
ret = 0;
goto done; goto done;
} }
while (level < BTRFS_MAX_LEVEL) { while (level < BTRFS_MAX_LEVEL) {
if (!path->nodes[level]) if (!path->nodes[level]) {
return 1; ret = 1;
goto done;
}
slot = path->slots[level] + 1; slot = path->slots[level] + 1;
c = path->nodes[level]; c = path->nodes[level];
if (slot >= btrfs_header_nritems(c)) { if (slot >= btrfs_header_nritems(c)) {
level++; level++;
if (level == BTRFS_MAX_LEVEL) if (level == BTRFS_MAX_LEVEL) {
return 1; ret = 1;
goto done;
}
continue; continue;
} }
...@@ -4179,16 +4200,22 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) ...@@ -4179,16 +4200,22 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
free_extent_buffer(next); free_extent_buffer(next);
} }
/* the path was set to blocking above */ next = c;
if (level == 1 && (path->locks[1] || path->skip_locking) && ret = read_block_for_search(NULL, root, path, &next, level,
path->reada) slot, &key);
reada_for_search(root, path, level, slot, 0); if (ret == -EAGAIN)
goto again;
next = read_node_slot(root, c, slot);
if (!path->skip_locking) { if (!path->skip_locking) {
btrfs_assert_tree_locked(c); ret = btrfs_try_spin_lock(next);
btrfs_tree_lock(next); if (!ret) {
btrfs_set_lock_blocking(next); btrfs_set_path_blocking(path);
btrfs_tree_lock(next);
if (!force_blocking)
btrfs_clear_path_blocking(path, next);
}
if (force_blocking)
btrfs_set_lock_blocking(next);
} }
break; break;
} }
...@@ -4198,27 +4225,42 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) ...@@ -4198,27 +4225,42 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
c = path->nodes[level]; c = path->nodes[level];
if (path->locks[level]) if (path->locks[level])
btrfs_tree_unlock(c); btrfs_tree_unlock(c);
free_extent_buffer(c); free_extent_buffer(c);
path->nodes[level] = next; path->nodes[level] = next;
path->slots[level] = 0; path->slots[level] = 0;
if (!path->skip_locking) if (!path->skip_locking)
path->locks[level] = 1; path->locks[level] = 1;
if (!level) if (!level)
break; break;
btrfs_set_path_blocking(path); ret = read_block_for_search(NULL, root, path, &next, level,
if (level == 1 && path->locks[1] && path->reada) 0, &key);
reada_for_search(root, path, level, slot, 0); if (ret == -EAGAIN)
next = read_node_slot(root, next, 0); goto again;
if (!path->skip_locking) { if (!path->skip_locking) {
btrfs_assert_tree_locked(path->nodes[level]); btrfs_assert_tree_locked(path->nodes[level]);
btrfs_tree_lock(next); ret = btrfs_try_spin_lock(next);
btrfs_set_lock_blocking(next); if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_lock(next);
if (!force_blocking)
btrfs_clear_path_blocking(path, next);
}
if (force_blocking)
btrfs_set_lock_blocking(next);
} }
} }
ret = 0;
done: done:
unlock_up(path, 0, 1); unlock_up(path, 0, 1);
return 0; path->leave_spinning = old_spinning;
if (!old_spinning)
btrfs_set_path_blocking(path);
return ret;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment