summaryrefslogtreecommitdiff
path: root/fs/dlm/lockspace.c
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2011-07-07 14:05:03 -0500
committerDavid Teigland <teigland@redhat.com>2011-07-12 16:02:09 -0500
commit3881ac04ebf94268ba3d6e486aa524fd41f893a9 (patch)
tree2ddb5c099f60f541d7e50f669f84fd58c3920bec /fs/dlm/lockspace.c
parent3d6aa675fff9eee5a6339d67b355b63a6d69565f (diff)
dlm: improve rsb searches
By pre-allocating rsb structs before searching the hash table, they can be inserted immediately. This avoids always having to repeat the search when adding the struct to hash list. This also adds space to the rsb struct for a max resource name, so an rsb allocation can be used by any request. The constant size also allows us to finally use a slab for the rsb structs. Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/lockspace.c')
-rw-r--r--fs/dlm/lockspace.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 871fe6deb5fa..98a97762c893 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -493,6 +493,9 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
INIT_LIST_HEAD(&ls->ls_timeout);
mutex_init(&ls->ls_timeout_mutex);
+ INIT_LIST_HEAD(&ls->ls_new_rsb);
+ spin_lock_init(&ls->ls_new_rsb_spin);
+
INIT_LIST_HEAD(&ls->ls_nodes);
INIT_LIST_HEAD(&ls->ls_nodes_gone);
ls->ls_num_nodes = 0;
@@ -764,6 +767,13 @@ static int release_lockspace(struct dlm_ls *ls, int force)
vfree(ls->ls_rsbtbl);
+ while (!list_empty(&ls->ls_new_rsb)) {
+ rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
+ res_hashchain);
+ list_del(&rsb->res_hashchain);
+ dlm_free_rsb(rsb);
+ }
+
/*
* Free structures on any other lists
*/