Commit 81f2094a authored by Mark Fasheh's avatar Mark Fasheh

[PATCH] ocfs2: use hlists for lockres hash

Switch from list_head to hlist_head. Make the size of the hash dependent
upon the allocated area, rather than a constant.
Signed-off-by: default avatarMark Fasheh <mark.fasheh@oracle.com>
parent b7668c72
...@@ -37,9 +37,7 @@ ...@@ -37,9 +37,7 @@
#define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
#define DLM_THREAD_MS 200 // flush at least every 200 ms #define DLM_THREAD_MS 200 // flush at least every 200 ms
#define DLM_HASH_BITS 7 #define DLM_HASH_BUCKETS (PAGE_SIZE / sizeof(struct hlist_head))
#define DLM_HASH_SIZE (1 << DLM_HASH_BITS)
#define DLM_HASH_MASK (DLM_HASH_SIZE - 1)
enum dlm_ast_type { enum dlm_ast_type {
DLM_AST = 0, DLM_AST = 0,
...@@ -87,7 +85,7 @@ enum dlm_ctxt_state { ...@@ -87,7 +85,7 @@ enum dlm_ctxt_state {
struct dlm_ctxt struct dlm_ctxt
{ {
struct list_head list; struct list_head list;
struct list_head *resources; struct hlist_head *lockres_hash;
struct list_head dirty_list; struct list_head dirty_list;
struct list_head purge_list; struct list_head purge_list;
struct list_head pending_asts; struct list_head pending_asts;
...@@ -217,7 +215,7 @@ struct dlm_lock_resource ...@@ -217,7 +215,7 @@ struct dlm_lock_resource
{ {
/* WARNING: Please see the comment in dlm_init_lockres before /* WARNING: Please see the comment in dlm_init_lockres before
* adding fields here. */ * adding fields here. */
struct list_head list; struct hlist_node hash_node;
struct kref refs; struct kref refs;
/* please keep these next 3 in this order /* please keep these next 3 in this order
......
...@@ -117,8 +117,8 @@ EXPORT_SYMBOL_GPL(dlm_print_one_lock); ...@@ -117,8 +117,8 @@ EXPORT_SYMBOL_GPL(dlm_print_one_lock);
void dlm_dump_lock_resources(struct dlm_ctxt *dlm) void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
{ {
struct dlm_lock_resource *res; struct dlm_lock_resource *res;
struct list_head *iter; struct hlist_node *iter;
struct list_head *bucket; struct hlist_head *bucket;
int i; int i;
mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n", mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n",
...@@ -129,12 +129,10 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm) ...@@ -129,12 +129,10 @@ void dlm_dump_lock_resources(struct dlm_ctxt *dlm)
} }
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
for (i=0; i<DLM_HASH_SIZE; i++) { for (i=0; i<DLM_HASH_BUCKETS; i++) {
bucket = &(dlm->resources[i]); bucket = &(dlm->lockres_hash[i]);
list_for_each(iter, bucket) { hlist_for_each_entry(res, iter, bucket, hash_node)
res = list_entry(iter, struct dlm_lock_resource, list);
dlm_print_one_lock_resource(res); dlm_print_one_lock_resource(res);
}
} }
spin_unlock(&dlm->spinlock); spin_unlock(&dlm->spinlock);
} }
......
...@@ -77,26 +77,26 @@ static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); ...@@ -77,26 +77,26 @@ static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
{ {
list_del_init(&lockres->list); hlist_del_init(&lockres->hash_node);
dlm_lockres_put(lockres); dlm_lockres_put(lockres);
} }
void __dlm_insert_lockres(struct dlm_ctxt *dlm, void __dlm_insert_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res) struct dlm_lock_resource *res)
{ {
struct list_head *bucket; struct hlist_head *bucket;
struct qstr *q; struct qstr *q;
assert_spin_locked(&dlm->spinlock); assert_spin_locked(&dlm->spinlock);
q = &res->lockname; q = &res->lockname;
q->hash = full_name_hash(q->name, q->len); q->hash = full_name_hash(q->name, q->len);
bucket = &(dlm->resources[q->hash & DLM_HASH_MASK]); bucket = &(dlm->lockres_hash[q->hash % DLM_HASH_BUCKETS]);
/* get a reference for our hashtable */ /* get a reference for our hashtable */
dlm_lockres_get(res); dlm_lockres_get(res);
list_add_tail(&res->list, bucket); hlist_add_head(&res->hash_node, bucket);
} }
struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
...@@ -104,9 +104,9 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, ...@@ -104,9 +104,9 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
unsigned int len) unsigned int len)
{ {
unsigned int hash; unsigned int hash;
struct list_head *iter; struct hlist_node *iter;
struct dlm_lock_resource *tmpres=NULL; struct dlm_lock_resource *tmpres=NULL;
struct list_head *bucket; struct hlist_head *bucket;
mlog_entry("%.*s\n", len, name); mlog_entry("%.*s\n", len, name);
...@@ -114,11 +114,11 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, ...@@ -114,11 +114,11 @@ struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
hash = full_name_hash(name, len); hash = full_name_hash(name, len);
bucket = &(dlm->resources[hash & DLM_HASH_MASK]); bucket = &(dlm->lockres_hash[hash % DLM_HASH_BUCKETS]);
/* check for pre-existing lock */ /* check for pre-existing lock */
list_for_each(iter, bucket) { hlist_for_each(iter, bucket) {
tmpres = list_entry(iter, struct dlm_lock_resource, list); tmpres = hlist_entry(iter, struct dlm_lock_resource, hash_node);
if (tmpres->lockname.len == len && if (tmpres->lockname.len == len &&
memcmp(tmpres->lockname.name, name, len) == 0) { memcmp(tmpres->lockname.name, name, len) == 0) {
dlm_lockres_get(tmpres); dlm_lockres_get(tmpres);
...@@ -193,8 +193,8 @@ static int dlm_wait_on_domain_helper(const char *domain) ...@@ -193,8 +193,8 @@ static int dlm_wait_on_domain_helper(const char *domain)
static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
{ {
if (dlm->resources) if (dlm->lockres_hash)
free_page((unsigned long) dlm->resources); free_page((unsigned long) dlm->lockres_hash);
if (dlm->name) if (dlm->name)
kfree(dlm->name); kfree(dlm->name);
...@@ -303,10 +303,10 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) ...@@ -303,10 +303,10 @@ static void dlm_migrate_all_locks(struct dlm_ctxt *dlm)
mlog(0, "Migrating locks from domain %s\n", dlm->name); mlog(0, "Migrating locks from domain %s\n", dlm->name);
restart: restart:
spin_lock(&dlm->spinlock); spin_lock(&dlm->spinlock);
for (i=0; i<DLM_HASH_SIZE; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
while (!list_empty(&dlm->resources[i])) { while (!hlist_empty(&dlm->lockres_hash[i])) {
res = list_entry(dlm->resources[i].next, res = hlist_entry(dlm->lockres_hash[i].first,
struct dlm_lock_resource, list); struct dlm_lock_resource, hash_node);
/* need reference when manually grabbing lockres */ /* need reference when manually grabbing lockres */
dlm_lockres_get(res); dlm_lockres_get(res);
/* this should unhash the lockres /* this should unhash the lockres
...@@ -1191,18 +1191,17 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, ...@@ -1191,18 +1191,17 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
goto leave; goto leave;
} }
dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL); dlm->lockres_hash = (struct hlist_head *) __get_free_page(GFP_KERNEL);
if (!dlm->resources) { if (!dlm->lockres_hash) {
mlog_errno(-ENOMEM); mlog_errno(-ENOMEM);
kfree(dlm->name); kfree(dlm->name);
kfree(dlm); kfree(dlm);
dlm = NULL; dlm = NULL;
goto leave; goto leave;
} }
memset(dlm->resources, 0, PAGE_SIZE);
for (i=0; i<DLM_HASH_SIZE; i++) for (i=0; i<DLM_HASH_BUCKETS; i++)
INIT_LIST_HEAD(&dlm->resources[i]); INIT_HLIST_HEAD(&dlm->lockres_hash[i]);
strcpy(dlm->name, domain); strcpy(dlm->name, domain);
dlm->key = key; dlm->key = key;
......
...@@ -564,7 +564,7 @@ static void dlm_lockres_release(struct kref *kref) ...@@ -564,7 +564,7 @@ static void dlm_lockres_release(struct kref *kref)
/* By the time we're ready to blow this guy away, we shouldn't /* By the time we're ready to blow this guy away, we shouldn't
* be on any lists. */ * be on any lists. */
BUG_ON(!list_empty(&res->list)); BUG_ON(!hlist_unhashed(&res->hash_node));
BUG_ON(!list_empty(&res->granted)); BUG_ON(!list_empty(&res->granted));
BUG_ON(!list_empty(&res->converting)); BUG_ON(!list_empty(&res->converting));
BUG_ON(!list_empty(&res->blocked)); BUG_ON(!list_empty(&res->blocked));
...@@ -605,7 +605,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, ...@@ -605,7 +605,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
init_waitqueue_head(&res->wq); init_waitqueue_head(&res->wq);
spin_lock_init(&res->spinlock); spin_lock_init(&res->spinlock);
INIT_LIST_HEAD(&res->list); INIT_HLIST_NODE(&res->hash_node);
INIT_LIST_HEAD(&res->granted); INIT_LIST_HEAD(&res->granted);
INIT_LIST_HEAD(&res->converting); INIT_LIST_HEAD(&res->converting);
INIT_LIST_HEAD(&res->blocked); INIT_LIST_HEAD(&res->blocked);
......
...@@ -1693,7 +1693,10 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, ...@@ -1693,7 +1693,10 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
u8 dead_node, u8 new_master) u8 dead_node, u8 new_master)
{ {
int i; int i;
struct list_head *iter, *iter2, *bucket; struct list_head *iter, *iter2;
struct hlist_node *hash_iter;
struct hlist_head *bucket;
struct dlm_lock_resource *res; struct dlm_lock_resource *res;
mlog_entry_void(); mlog_entry_void();
...@@ -1717,10 +1720,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, ...@@ -1717,10 +1720,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
* for now we need to run the whole hash, clear * for now we need to run the whole hash, clear
* the RECOVERING state and set the owner * the RECOVERING state and set the owner
* if necessary */ * if necessary */
for (i=0; i<DLM_HASH_SIZE; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = &(dlm->resources[i]); bucket = &(dlm->lockres_hash[i]);
list_for_each(iter, bucket) { hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
res = list_entry (iter, struct dlm_lock_resource, list);
if (res->state & DLM_LOCK_RES_RECOVERING) { if (res->state & DLM_LOCK_RES_RECOVERING) {
if (res->owner == dead_node) { if (res->owner == dead_node) {
mlog(0, "(this=%u) res %.*s owner=%u " mlog(0, "(this=%u) res %.*s owner=%u "
...@@ -1852,10 +1854,10 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, ...@@ -1852,10 +1854,10 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
{ {
struct list_head *iter; struct hlist_node *iter;
struct dlm_lock_resource *res; struct dlm_lock_resource *res;
int i; int i;
struct list_head *bucket; struct hlist_head *bucket;
struct dlm_lock *lock; struct dlm_lock *lock;
...@@ -1876,10 +1878,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) ...@@ -1876,10 +1878,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
* can be kicked again to see if any ASTs or BASTs * can be kicked again to see if any ASTs or BASTs
* need to be fired as a result. * need to be fired as a result.
*/ */
for (i=0; i<DLM_HASH_SIZE; i++) { for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = &(dlm->resources[i]); bucket = &(dlm->lockres_hash[i]);
list_for_each(iter, bucket) { hlist_for_each_entry(res, iter, bucket, hash_node) {
res = list_entry (iter, struct dlm_lock_resource, list);
/* always prune any $RECOVERY entries for dead nodes, /* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */ * otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name, if (dlm_is_recovery_lock(res->lockname.name,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment