ipc/sem.c: update/correct memory barriers
sem_lock() did not properly pair memory barriers: !spin_is_locked() and spin_unlock_wait() are both only control barriers. The code needs an acquire barrier, otherwise the cpu might perform read operations before the lock test. As no primitive exists inside <include/spinlock.h> and since it seems noone wants another primitive, the code creates a local primitive within ipc/sem.c. With regards to -stable: The change of sem_wait_array() is a bugfix, the change to sem_lock() is a nop (just a preprocessor redefinition to improve the readability). The bugfix is necessary for all kernels that use sem_wait_array() (i.e.: starting from 3.10). Signed-off-by: Manfred Spraul <manfred@colorfullife.com> Reported-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Kirill Tkhai <ktkhai@parallels.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: <stable@vger.kernel.org> [3.10+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7f6bf39bbd
commit
3ed1f8a99d
1 changed files with 14 additions and 4 deletions
18
ipc/sem.c
18
ipc/sem.c
|
@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
|
||||||
ipc_rcu_free(head);
|
ipc_rcu_free(head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
|
||||||
|
* are only control barriers.
|
||||||
|
* The code must pair with spin_unlock(&sem->lock) or
|
||||||
|
* spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
|
||||||
|
*
|
||||||
|
* smp_rmb() is sufficient, as writes cannot pass the control barrier.
|
||||||
|
*/
|
||||||
|
#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait until all currently ongoing simple ops have completed.
|
* Wait until all currently ongoing simple ops have completed.
|
||||||
* Caller must own sem_perm.lock.
|
* Caller must own sem_perm.lock.
|
||||||
|
@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
|
||||||
sem = sma->sem_base + i;
|
sem = sma->sem_base + i;
|
||||||
spin_unlock_wait(&sem->lock);
|
spin_unlock_wait(&sem->lock);
|
||||||
}
|
}
|
||||||
|
ipc_smp_acquire__after_spin_is_unlocked();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
|
||||||
/* Then check that the global lock is free */
|
/* Then check that the global lock is free */
|
||||||
if (!spin_is_locked(&sma->sem_perm.lock)) {
|
if (!spin_is_locked(&sma->sem_perm.lock)) {
|
||||||
/*
|
/*
|
||||||
* The ipc object lock check must be visible on all
|
* We need a memory barrier with acquire semantics,
|
||||||
* cores before rechecking the complex count. Otherwise
|
* otherwise we can race with another thread that does:
|
||||||
* we can race with another thread that does:
|
|
||||||
* complex_count++;
|
* complex_count++;
|
||||||
* spin_unlock(sem_perm.lock);
|
* spin_unlock(sem_perm.lock);
|
||||||
*/
|
*/
|
||||||
smp_rmb();
|
ipc_smp_acquire__after_spin_is_unlocked();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now repeat the test of complex_count:
|
* Now repeat the test of complex_count:
|
||||||
|
|
Loading…
Reference in a new issue