Commit c0dd527e authored by Steven Rostedt's avatar Steven Rostedt Committed by Thomas Gleixner

fs: jbd: replace bh_state lock

I was compiling a kernel in a shell that I set to a priority of 20,
and it locked up on the bit_spin_lock crap of jbd.

This patch adds another spinlock to the buffer head and uses that
instead of the bit_spins.

From: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

--

 fs/buffer.c                 |    3 ++-
 include/linux/buffer_head.h |    1 +
 include/linux/jbd.h         |   12 ++++++------
 3 files changed, 9 insertions(+), 7 deletions(-)
parent da29da14
......@@ -40,7 +40,6 @@
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
......@@ -3243,6 +3242,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
spin_lock_init(&ret->b_uptodate_lock);
spin_lock_init(&ret->b_state_lock);
get_cpu_var(bh_accounting).nr++;
recalc_bh_state();
put_cpu_var(bh_accounting);
......@@ -3255,6 +3255,7 @@ void free_buffer_head(struct buffer_head *bh)
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
BUG_ON(spin_is_locked(&bh->b_uptodate_lock));
BUG_ON(spin_is_locked(&bh->b_state_lock));
kmem_cache_free(bh_cachep, bh);
get_cpu_var(bh_accounting).nr--;
recalc_bh_state();
......
......@@ -71,6 +71,7 @@ struct buffer_head {
associated with */
atomic_t b_count; /* users using this buffer_head */
spinlock_t b_uptodate_lock;
spinlock_t b_state_lock;
};
/*
......
......@@ -315,32 +315,32 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
bit_spin_lock(BH_State, &bh->b_state);
spin_lock(&bh->b_state_lock);
}
static inline int jbd_trylock_bh_state(struct buffer_head *bh)
{
return bit_spin_trylock(BH_State, &bh->b_state);
return spin_trylock(&bh->b_state_lock);
}
static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
{
return bit_spin_is_locked(BH_State, &bh->b_state);
return spin_is_locked(&bh->b_state_lock);
}
static inline void jbd_unlock_bh_state(struct buffer_head *bh)
{
bit_spin_unlock(BH_State, &bh->b_state);
spin_unlock(&bh->b_state_lock);
}
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
spin_lock_irq(&bh->b_uptodate_lock);
}
static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_unlock(BH_JournalHead, &bh->b_state);
spin_unlock_irq(&bh->b_uptodate_lock);
}
struct jbd_revoke_table_s;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment