Commit 12132933 authored by Steven Whitehouse's avatar Steven Whitehouse

[GFS2] Remove queue_empty() function

This function is not longer required since we do not do recursive
locking in the glock layer. As a result all its callers can be
replaceed with list_empty() calls.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent bd44e2b0
...@@ -210,30 +210,6 @@ out: ...@@ -210,30 +210,6 @@ out:
return rv; return rv;
} }
/**
* queue_empty - check to see if a glock's queue is empty
* @gl: the glock
* @head: the head of the queue to check
*
* This function protects the list in the event that a process already
* has a holder on the list and is adding a second holder for itself.
* The glmutex lock is what generally prevents processes from working
* on the same glock at once, but the special case of adding a second
* holder for yourself ("recursive" locking) doesn't involve locking
* glmutex, making the spin lock necessary.
*
* Returns: 1 if the queue is empty
*/
static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
{
int empty;
spin_lock(&gl->gl_spin);
empty = list_empty(head);
spin_unlock(&gl->gl_spin);
return empty;
}
/** /**
* search_bucket() - Find struct gfs2_glock by lock number * search_bucket() - Find struct gfs2_glock by lock number
* @bucket: the bucket to search * @bucket: the bucket to search
...@@ -814,7 +790,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) ...@@ -814,7 +790,7 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
int op_done = 1; int op_done = 1;
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
state_change(gl, ret & LM_OUT_ST_MASK); state_change(gl, ret & LM_OUT_ST_MASK);
...@@ -925,7 +901,7 @@ void gfs2_glock_xmote_th(struct gfs2_holder *gh) ...@@ -925,7 +901,7 @@ void gfs2_glock_xmote_th(struct gfs2_holder *gh)
glops->go_xmote_th(gl); glops->go_xmote_th(gl);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
gfs2_assert_warn(sdp, state != gl->gl_state); gfs2_assert_warn(sdp, state != gl->gl_state);
...@@ -960,7 +936,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret) ...@@ -960,7 +936,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh = gl->gl_req_gh; struct gfs2_holder *gh = gl->gl_req_gh;
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, !ret); gfs2_assert_warn(sdp, !ret);
state_change(gl, LM_ST_UNLOCKED); state_change(gl, LM_ST_UNLOCKED);
...@@ -1007,7 +983,7 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl) ...@@ -1007,7 +983,7 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
glops->go_drop_th(gl); glops->go_drop_th(gl);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
...@@ -1697,7 +1673,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) ...@@ -1697,7 +1673,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
atomic_inc(&sdp->sd_reclaimed); atomic_inc(&sdp->sd_reclaimed);
if (gfs2_glmutex_trylock(gl)) { if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) && if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED); handle_callback(gl, LM_ST_UNLOCKED);
gfs2_glmutex_unlock(gl); gfs2_glmutex_unlock(gl);
...@@ -1761,7 +1737,7 @@ static void scan_glock(struct gfs2_glock *gl) ...@@ -1761,7 +1737,7 @@ static void scan_glock(struct gfs2_glock *gl)
return; return;
if (gfs2_glmutex_trylock(gl)) { if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) && if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
goto out_schedule; goto out_schedule;
gfs2_glmutex_unlock(gl); gfs2_glmutex_unlock(gl);
...@@ -1810,7 +1786,7 @@ static void clear_glock(struct gfs2_glock *gl) ...@@ -1810,7 +1786,7 @@ static void clear_glock(struct gfs2_glock *gl)
} }
if (gfs2_glmutex_trylock(gl)) { if (gfs2_glmutex_trylock(gl)) {
if (queue_empty(gl, &gl->gl_holders) && if (list_empty(gl, &gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED) gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED); handle_callback(gl, LM_ST_UNLOCKED);
gfs2_glmutex_unlock(gl); gfs2_glmutex_unlock(gl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment