[PATCH] powerpc: native atomic_add_unless

Do atomic_add_unless natively instead of using cmpxchg.
Improved register allocation idea from Joel Schopp.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
Nick Piggin 2006-02-20 10:41:40 +01:00 committed by Paul Mackerras
parent 4f629d7db3
commit f055affb89

View file

@ -8,6 +8,7 @@
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/synch.h> #include <asm/synch.h>
#include <asm/asm-compat.h> #include <asm/asm-compat.h>
@ -176,20 +177,29 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
#define atomic_add_unless(v, a, u) \ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
({ \ {
int c, old; \ int t;
c = atomic_read(v); \
for (;;) { \ __asm__ __volatile__ (
if (unlikely(c == (u))) \ LWSYNC_ON_SMP
break; \ "1: lwarx %0,0,%1 # atomic_add_unless\n\
old = atomic_cmpxchg((v), c, c + (a)); \ cmpw 0,%0,%3 \n\
if (likely(old == c)) \ beq- 2f \n\
break; \ add %0,%2,%0 \n"
c = old; \ PPC405_ERR77(0,%2)
} \ " stwcx. %0,0,%1 \n\
c != (u); \ bne- 1b \n"
}) ISYNC_ON_SMP
" subf %0,%2,%0 \n\
2:"
: "=&r" (t)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
return t != u;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)