locking/qspinlock: Use atomic_sub_return_release() in queued_spin_unlock()
The existing version uses a heavy barrier while only release semantics is required. So use atomic_sub_return_release() instead. Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: arnd@arndb.de Cc: waiman.long@hp.com Link: http://lkml.kernel.org/r/1464943094-3129-1-git-send-email-xinhui.pan@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6428671bae
commit
ca50e426f9
1 changed files with 2 additions and 3 deletions
|
@ -111,10 +111,9 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
|
|||
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
/*
|
||||
* smp_mb__before_atomic() in order to guarantee release semantics
|
||||
* unlock() needs release semantics:
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
atomic_sub(_Q_LOCKED_VAL, &lock->val);
|
||||
(void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in a new issue