summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorThomas Graf <tgraf@suug.ch>2015-01-12 23:58:21 +0000
committerDavid S. Miller <davem@davemloft.net>2015-01-14 00:21:44 -0500
commit80ca8c3a84c74a87977558861bb8eef650732912 (patch)
treee275dc10e62e28ea8b7f4c25e6a30664974e233f /lib
parentdf8a39defad46b83694ea6dd868d332976d62cc0 (diff)
rhashtable: Lower/upper bucket may map to same lock while shrinking
Each per bucket lock covers a configurable number of buckets. While shrinking, two buckets in the old table contain entries for a single bucket in the new table. We need to lock down both while linking. Check if they are protected by different locks to avoid a recursive lock. Fixes: 97defe1e ("rhashtable: Per bucket locks & deferred expansion/shrinking") Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ed6ae1ad304c..aca699813ba9 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -443,8 +443,16 @@ int rhashtable_shrink(struct rhashtable *ht)
new_bucket_lock = bucket_lock(new_tbl, new_hash);
spin_lock_bh(old_bucket_lock1);
- spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
- spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
+
+ /* Depending on the lock per buckets mapping, the bucket in
+ * the lower and upper region may map to the same lock.
+ */
+ if (old_bucket_lock1 != old_bucket_lock2) {
+ spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
+ spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
+ } else {
+ spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
+ }
rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
tbl->buckets[new_hash]);
@@ -452,7 +460,8 @@ int rhashtable_shrink(struct rhashtable *ht)
tbl->buckets[new_hash + new_tbl->size]);
spin_unlock_bh(new_bucket_lock);
- spin_unlock_bh(old_bucket_lock2);
+ if (old_bucket_lock1 != old_bucket_lock2)
+ spin_unlock_bh(old_bucket_lock2);
spin_unlock_bh(old_bucket_lock1);
}