IB/core: use RCU for uverbs id lookup
The current implementation gets a spin_lock, and at any scale with qib and hfi1 post send, the lock contention grows exponentially with the number of QPs. idr_find() is RCU compatibile, so read doesn't need the lock. Change to use rcu_read_lock() and rcu_read_unlock() in __idr_get_uobj(). kfree_rcu() is used to insure a grace period between the idr removal and actual free. Reviewed-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-By: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
57ab251213
commit
d144da8c6f
2 changed files with 8 additions and 5 deletions
|
@ -62,9 +62,11 @@ static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
|
|||
* The ib_uobject locking scheme is as follows:
|
||||
*
|
||||
* - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
|
||||
* needs to be held during all idr operations. When an object is
|
||||
* needs to be held during all idr write operations. When an object is
|
||||
* looked up, a reference must be taken on the object's kref before
|
||||
* dropping this lock.
|
||||
* dropping this lock. For read operations, the rcu_read_lock()
|
||||
* and rcu_write_lock() but similarly the kref reference is grabbed
|
||||
* before the rcu_read_unlock().
|
||||
*
|
||||
* - Each object also has an rwsem. This rwsem must be held for
|
||||
* reading while an operation that uses the object is performed.
|
||||
|
@ -96,7 +98,7 @@ static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
|
|||
|
||||
static void release_uobj(struct kref *kref)
|
||||
{
|
||||
kfree(container_of(kref, struct ib_uobject, ref));
|
||||
kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu);
|
||||
}
|
||||
|
||||
static void put_uobj(struct ib_uobject *uobj)
|
||||
|
@ -145,7 +147,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
|
|||
{
|
||||
struct ib_uobject *uobj;
|
||||
|
||||
spin_lock(&ib_uverbs_idr_lock);
|
||||
rcu_read_lock();
|
||||
uobj = idr_find(idr, id);
|
||||
if (uobj) {
|
||||
if (uobj->context == context)
|
||||
|
@ -153,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
|
|||
else
|
||||
uobj = NULL;
|
||||
}
|
||||
spin_unlock(&ib_uverbs_idr_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
return uobj;
|
||||
}
|
||||
|
|
|
@ -1271,6 +1271,7 @@ struct ib_uobject {
|
|||
int id; /* index into kernel idr */
|
||||
struct kref ref;
|
||||
struct rw_semaphore mutex; /* protects .live */
|
||||
struct rcu_head rcu; /* kfree_rcu() overhead */
|
||||
int live;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in a new issue