summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-14 13:57:21 +1100
committerDavid S. Miller <davem@davemloft.net>2015-03-15 01:35:34 -0400
commit8f2484bdb55daa53ecaddb5fa4c298e3d262b69e (patch)
tree4d27176d889d22df4587fd4e94ff85d406409b8a
parenteddee5ba34eb6c9890ef106f19ead2b370e5342f (diff)
rhashtable: Use SINGLE_DEPTH_NESTING
We only nest one level deep there is no need to roll our own subclasses. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--lib/rhashtable.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index f7c76079f8f1..5d06cc2b1e4a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -33,11 +33,6 @@
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
-enum {
- RHT_LOCK_NORMAL,
- RHT_LOCK_NESTED,
-};
-
/* The bucket lock is selected based on the hash and protects mutations
* on a group of hash buckets.
*
@@ -231,7 +226,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
new_bucket_lock = bucket_lock(new_tbl, new_hash);
- spin_lock_nested(new_bucket_lock, RHT_LOCK_NESTED);
+ spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
head = rht_dereference_bucket(new_tbl->buckets[new_hash],
new_tbl, new_hash);
@@ -405,7 +400,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
tbl = rht_dereference_rcu(ht->future_tbl, ht);
if (tbl != old_tbl) {
hash = head_hashfn(ht, tbl, obj);
- spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED);
+ spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
}
if (compare &&