[MIPS] Make support for weakly ordered LL/SC a config option.
None of weakly ordered processor supported in tree need this but it seems like this could change ... Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
ed203dadcd
commit
17099b1142
7 changed files with 59 additions and 38 deletions
|
@ -1190,8 +1190,19 @@ config SYS_HAS_CPU_RM9000
|
|||
config SYS_HAS_CPU_SB1
|
||||
bool
|
||||
|
||||
#
|
||||
# CPU may reorder R->R, R->W, W->R, W->W
|
||||
# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
|
||||
#
|
||||
config WEAK_ORDERING
|
||||
bool
|
||||
|
||||
#
|
||||
# CPU may reorder reads and writes beyond LL/SC
|
||||
# CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC
|
||||
#
|
||||
config WEAK_REORDERING_BEYOND_LLSC
|
||||
bool
|
||||
endmenu
|
||||
|
||||
#
|
||||
|
|
|
@ -138,7 +138,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
|
|||
{
|
||||
unsigned long result;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
unsigned long temp;
|
||||
|
@ -181,7 +181,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -190,7 +190,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
|||
{
|
||||
unsigned long result;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
unsigned long temp;
|
||||
|
@ -233,7 +233,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
|
|||
{
|
||||
unsigned long result;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
unsigned long temp;
|
||||
|
@ -302,7 +302,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
|
|||
{
|
||||
unsigned long result;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
unsigned long temp;
|
||||
|
@ -562,7 +562,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
|||
{
|
||||
unsigned long result;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
unsigned long temp;
|
||||
|
@ -614,7 +614,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -631,7 +631,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
|
|||
{
|
||||
unsigned long result;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
unsigned long temp;
|
||||
|
@ -683,7 +683,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -791,10 +791,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
|
|||
* atomic*_return operations are serializing but not the non-*_return
|
||||
* versions.
|
||||
*/
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
#define smp_mb__before_atomic_inc() smp_mb()
|
||||
#define smp_mb__after_atomic_inc() smp_mb()
|
||||
#define smp_mb__before_atomic_dec() smp_llsc_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_llsc_mb()
|
||||
#define smp_mb__before_atomic_inc() smp_llsc_mb()
|
||||
#define smp_mb__after_atomic_inc() smp_llsc_mb()
|
||||
|
||||
#include <asm-generic/atomic.h>
|
||||
|
||||
#endif /* _ASM_ATOMIC_H */
|
||||
|
|
|
@ -121,6 +121,11 @@
|
|||
#else
|
||||
#define __WEAK_ORDERING_MB " \n"
|
||||
#endif
|
||||
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
|
||||
#define __WEAK_LLSC_MB " sync \n"
|
||||
#else
|
||||
#define __WEAK_LLSC_MB " \n"
|
||||
#endif
|
||||
|
||||
#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
|
||||
#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
|
||||
|
@ -129,4 +134,8 @@
|
|||
#define set_mb(var, value) \
|
||||
do { var = value; smp_mb(); } while (0)
|
||||
|
||||
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
||||
#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
||||
#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
|
||||
|
||||
#endif /* __ASM_BARRIER_H */
|
||||
|
|
|
@ -38,8 +38,8 @@
|
|||
/*
|
||||
* clear_bit() doesn't provide any barrier for the compiler.
|
||||
*/
|
||||
#define smp_mb__before_clear_bit() smp_mb()
|
||||
#define smp_mb__after_clear_bit() smp_mb()
|
||||
#define smp_mb__before_clear_bit() smp_llsc_mb()
|
||||
#define smp_mb__after_clear_bit() smp_llsc_mb()
|
||||
|
||||
/*
|
||||
* set_bit - Atomically set a bit in memory
|
||||
|
@ -289,7 +289,7 @@ static inline int test_and_set_bit(unsigned long nr,
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return res != 0;
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ static inline int test_and_clear_bit(unsigned long nr,
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return res != 0;
|
||||
}
|
||||
|
@ -445,7 +445,7 @@ static inline int test_and_change_bit(unsigned long nr,
|
|||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return res != 0;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
" .set mips3 \n" \
|
||||
"2: sc $1, %2 \n" \
|
||||
" beqzl $1, 1b \n" \
|
||||
__WEAK_ORDERING_MB \
|
||||
__WEAK_LLSC_MB \
|
||||
"3: \n" \
|
||||
" .set pop \n" \
|
||||
" .set mips0 \n" \
|
||||
|
@ -55,7 +55,7 @@
|
|||
" .set mips3 \n" \
|
||||
"2: sc $1, %2 \n" \
|
||||
" beqz $1, 1b \n" \
|
||||
__WEAK_ORDERING_MB \
|
||||
__WEAK_LLSC_MB \
|
||||
"3: \n" \
|
||||
" .set pop \n" \
|
||||
" .set mips0 \n" \
|
||||
|
@ -152,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
|||
" .set mips3 \n"
|
||||
"2: sc $1, %1 \n"
|
||||
" beqzl $1, 1b \n"
|
||||
__WEAK_ORDERING_MB
|
||||
__WEAK_LLSC_MB
|
||||
"3: \n"
|
||||
" .set pop \n"
|
||||
" .section .fixup,\"ax\" \n"
|
||||
|
@ -179,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
|||
" .set mips3 \n"
|
||||
"2: sc $1, %1 \n"
|
||||
" beqz $1, 1b \n"
|
||||
__WEAK_ORDERING_MB
|
||||
__WEAK_LLSC_MB
|
||||
"3: \n"
|
||||
" .set pop \n"
|
||||
" .section .fixup,\"ax\" \n"
|
||||
|
|
|
@ -67,7 +67,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
}
|
||||
|
||||
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
|
||||
|
@ -118,7 +118,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return res == 0;
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
}
|
||||
|
||||
/* Note the use of sub, not subu which will make the kernel die with an
|
||||
|
@ -193,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
|||
{
|
||||
unsigned int tmp;
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
if (R10000_LLSC_WAR) {
|
||||
__asm__ __volatile__(
|
||||
|
@ -262,7 +262,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
|
|||
: "memory");
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
}
|
||||
|
||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
|
@ -293,7 +293,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
|||
" .set reorder \n"
|
||||
" beqzl %1, 1b \n"
|
||||
" nop \n"
|
||||
__WEAK_ORDERING_MB
|
||||
__WEAK_LLSC_MB
|
||||
" li %2, 1 \n"
|
||||
"2: \n"
|
||||
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
|
||||
|
@ -310,7 +310,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
|
|||
" beqz %1, 1b \n"
|
||||
" nop \n"
|
||||
" .set reorder \n"
|
||||
__WEAK_ORDERING_MB
|
||||
__WEAK_LLSC_MB
|
||||
" li %2, 1 \n"
|
||||
"2: \n"
|
||||
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
|
||||
|
@ -336,7 +336,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
|||
" sc %1, %0 \n"
|
||||
" beqzl %1, 1b \n"
|
||||
" nop \n"
|
||||
__WEAK_ORDERING_MB
|
||||
__WEAK_LLSC_MB
|
||||
" li %2, 1 \n"
|
||||
" .set reorder \n"
|
||||
"2: \n"
|
||||
|
@ -354,7 +354,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
|
|||
" beqz %1, 3f \n"
|
||||
" li %2, 1 \n"
|
||||
"2: \n"
|
||||
__WEAK_ORDERING_MB
|
||||
__WEAK_LLSC_MB
|
||||
" .subsection 2 \n"
|
||||
"3: b 1b \n"
|
||||
" li %2, 0 \n"
|
||||
|
|
|
@ -117,7 +117,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
|
|||
raw_local_irq_restore(flags); /* implies memory barrier */
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
|
|||
raw_local_irq_restore(flags); /* implies memory barrier */
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
|
|||
raw_local_irq_restore(flags); /* implies memory barrier */
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -352,7 +352,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
|
|||
raw_local_irq_restore(flags); /* implies memory barrier */
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
smp_llsc_mb();
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue