kernel-fxtec-pro1x/arch/alpha/include/asm/atomic.h
Mark Rutland fd2efaa4eb locking/atomics: Rework ordering barriers
Currently architectures can override __atomic_op_*() to define the barriers
used before/after a relaxed atomic when used to build acquire/release/fence
variants.

This has the unfortunate property of requiring the architecture to define the
full wrapper for the atomics, rather than just the barriers they care about,
and gets in the way of generating atomics which can be easily read.

Instead, this patch has architectures define an optional set of barriers:

* __atomic_acquire_fence()
* __atomic_release_fence()
* __atomic_pre_full_fence()
* __atomic_post_full_fence()

... which <linux/atomic.h> uses to build the wrappers.

It would be nice if we could undef these, along with the __atomic_op_*()
wrappers, but that would break the cmpxchg() wrappers, which are written
in preprocessor. Undefs would have been nice, but alas.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: andy.shevchenko@gmail.com
Cc: arnd@arndb.de
Cc: aryabinin@virtuozzo.com
Cc: catalin.marinas@arm.com
Cc: dvyukov@google.com
Cc: glider@google.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: peter@hurleysoftware.com
Link: http://lkml.kernel.org/r/20180716113017.3909-7-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-07-25 11:53:59 +02:00

301 lines
8.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ALPHA_ATOMIC_H
#define _ALPHA_ATOMIC_H
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc...
*
* But use these as seldom as possible since they are much slower
* than regular operations.
*/
/*
* To ensure dependency ordering is preserved for the _relaxed and
* _release atomics, an smp_read_barrier_depends() is unconditionally
* inserted into the _relaxed variants, which are used to build the
* barriered versions. Avoid redundant back-to-back fences in the
* _acquire and _fence versions.
*/
#define __atomic_acquire_fence()
#define __atomic_post_full_fence()
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
#define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #asm_op " %0,%2,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter) \
:"Ir" (i), "m" (v->counter)); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
"1: ldl_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
" " #asm_op " %0,%3,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \
return result; \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
"1: ldl_l %2,%1\n" \
" " #asm_op " %2,%3,%0\n" \
" stl_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \
return result; \
}
#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #asm_op " %0,%2,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter) \
:"Ir" (i), "m" (v->counter)); \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long temp, result; \
__asm__ __volatile__( \
"1: ldq_l %0,%1\n" \
" " #asm_op " %0,%3,%2\n" \
" " #asm_op " %0,%3,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \
return result; \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
{ \
long temp, result; \
__asm__ __volatile__( \
"1: ldq_l %2,%1\n" \
" " #asm_op " %2,%3,%0\n" \
" stq_c %0,%1\n" \
" beq %0,2f\n" \
".subsection 2\n" \
"2: br 1b\n" \
".previous" \
:"=&r" (temp), "=m" (v->counter), "=&r" (result) \
:"Ir" (i), "m" (v->counter) : "memory"); \
smp_read_barrier_depends(); \
return result; \
}
#define ATOMIC_OPS(op) \
ATOMIC_OP(op, op##l) \
ATOMIC_OP_RETURN(op, op##l) \
ATOMIC_FETCH_OP(op, op##l) \
ATOMIC64_OP(op, op##q) \
ATOMIC64_OP_RETURN(op, op##q) \
ATOMIC64_FETCH_OP(op, op##q)
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
ATOMIC_OP(op, asm) \
ATOMIC_FETCH_OP(op, asm) \
ATOMIC64_OP(op, asm) \
ATOMIC64_FETCH_OP(op, asm)
ATOMIC_OPS(and, and)
ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
* atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
__asm__ __volatile__(
"1: ldl_l %[old],%[mem]\n"
" cmpeq %[old],%[u],%[c]\n"
" addl %[old],%[a],%[new]\n"
" bne %[c],2f\n"
" stl_c %[new],%[mem]\n"
" beq %[new],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
: "memory");
smp_mb();
return old;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
/**
* atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
{
long c, new, old;
smp_mb();
__asm__ __volatile__(
"1: ldq_l %[old],%[mem]\n"
" cmpeq %[old],%[u],%[c]\n"
" addq %[old],%[a],%[new]\n"
" bne %[c],2f\n"
" stq_c %[new],%[mem]\n"
" beq %[new],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
: "memory");
smp_mb();
return old;
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long old, tmp;
smp_mb();
__asm__ __volatile__(
"1: ldq_l %[old],%[mem]\n"
" subq %[old],1,%[tmp]\n"
" ble %[old],2f\n"
" stq_c %[tmp],%[mem]\n"
" beq %[tmp],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: [old] "=&r"(old), [tmp] "=&r"(tmp)
: [mem] "m"(*v)
: "memory");
smp_mb();
return old - 1;
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */