rhashtable: Lower/upper bucket may map to same lock while shrinking
Each per bucket lock covers a configurable number of buckets. While
shrinking, two buckets in the old table contain entries for a single
bucket in the new table. We need to lock down both while linking.
Check if they are protected by different locks to avoid a recursive
lock.
Fixes: 97defe1e
("rhashtable: Per bucket locks & deferred expansion/shrinking")
Reported-by: Fengguang Wu <fengguang.wu@intel.com>
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
df8a39defa
commit
80ca8c3a84
1 changed files with 12 additions and 3 deletions
|
@ -443,8 +443,16 @@ int rhashtable_shrink(struct rhashtable *ht)
|
|||
new_bucket_lock = bucket_lock(new_tbl, new_hash);
|
||||
|
||||
spin_lock_bh(old_bucket_lock1);
|
||||
spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
|
||||
spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
|
||||
|
||||
/* Depending on the lock per buckets mapping, the bucket in
|
||||
* the lower and upper region may map to the same lock.
|
||||
*/
|
||||
if (old_bucket_lock1 != old_bucket_lock2) {
|
||||
spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
|
||||
spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);
|
||||
} else {
|
||||
spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
|
||||
}
|
||||
|
||||
rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
|
||||
tbl->buckets[new_hash]);
|
||||
|
@ -452,7 +460,8 @@ int rhashtable_shrink(struct rhashtable *ht)
|
|||
tbl->buckets[new_hash + new_tbl->size]);
|
||||
|
||||
spin_unlock_bh(new_bucket_lock);
|
||||
spin_unlock_bh(old_bucket_lock2);
|
||||
if (old_bucket_lock1 != old_bucket_lock2)
|
||||
spin_unlock_bh(old_bucket_lock2);
|
||||
spin_unlock_bh(old_bucket_lock1);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue