locking/rwsem: Remove rwsem_atomic_add() and rwsem_atomic_update()
The rwsem-xadd count has been converted to an atomic variable and the rwsem code now directly uses atomic_long_add() and atomic_long_add_return(), so we can remove the arch implementations of rwsem_atomic_add() and rwsem_atomic_update(). Signed-off-by: Jason Low <jason.low2@hpe.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christoph Lameter <cl@linux.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Henderson <rth@twiddle.net> Cc: Terry Rudd <terry.rudd@hpe.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Waiman Long <Waiman.Long@hpe.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
8ee62b1870
commit
d157bd860f
5 changed files with 0 additions and 120 deletions
|
@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
sem->count += val;
|
||||
#else
|
||||
long temp;
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0,%1\n"
|
||||
" addq %0,%2,%0\n"
|
||||
" stq_c %0,%1\n"
|
||||
" beq %0,2f\n"
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
:"=&r" (temp), "=m" (sem->count)
|
||||
:"Ir" (val), "m" (sem->count));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
sem->count += val;
|
||||
return sem->count;
|
||||
#else
|
||||
long ret, temp;
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0,%1\n"
|
||||
" addq %0,%3,%2\n"
|
||||
" addq %0,%3,%0\n"
|
||||
" stq_c %2,%1\n"
|
||||
" beq %2,2f\n"
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
:"=&r" (ret), "=m" (sem->count), "=&r" (temp)
|
||||
:"Ir" (val), "m" (sem->count));
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ALPHA_RWSEM_H */
|
||||
|
|
|
@ -151,11 +151,4 @@ __downgrade_write (struct rw_semaphore *sem)
|
|||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
|
||||
* doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
|
||||
*/
|
||||
#define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
|
||||
#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
|
||||
|
||||
#endif /* _ASM_IA64_RWSEM_H */
|
||||
|
|
|
@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" agr %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "d" (delta)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" agr %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "d" (delta)
|
||||
: "cc", "memory");
|
||||
return new;
|
||||
}
|
||||
|
||||
#endif /* _S390_RWSEM_H */
|
||||
|
|
|
@ -213,23 +213,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|||
: "memory", "cc");
|
||||
}
|
||||
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
||||
: "+m" (sem->count)
|
||||
: "er" (delta));
|
||||
}
|
||||
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
return delta + xadd(&sem->count, delta);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_X86_RWSEM_H */
|
||||
|
|
|
@ -106,14 +106,6 @@ static inline void __up_write(struct rw_semaphore *sem)
|
|||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement atomic add functionality
|
||||
*/
|
||||
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
atomic_long_add(delta, (atomic_long_t *)&sem->count);
|
||||
}
|
||||
|
||||
/*
|
||||
* downgrade write lock to read lock
|
||||
*/
|
||||
|
@ -134,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
|||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* implement exchange and add functionality
|
||||
*/
|
||||
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
|
||||
{
|
||||
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_GENERIC_RWSEM_H */
|
||||
|
|
Loading…
Reference in a new issue