kernel-fxtec-pro1x/arch/sh/include/asm/mutex-llsc.h
Maarten Lankhorst a41b56efa7 arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not
This will allow me to call functions that have multiple
arguments if fastpath fails. This is required to support ticket
mutexes, because they need to be able to pass an extra argument
to the fail function.

Originally I duplicated the functions, by adding
__mutex_fastpath_lock_retval_arg. This ended up being just a
duplication of the existing function, so a way to test if
fastpath was called ended up being better.

This also cleaned up the reservation mutex patch some by being
able to call an atomic_set instead of atomic_xchg, and making it
easier to detect if the wrong unlock function was previously
used.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: dri-devel@lists.freedesktop.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: robclark@gmail.com
Cc: rostedt@goodmis.org
Cc: daniel@ffwll.ch
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20130620113105.4001.83929.stgit@patser
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-06-26 12:10:55 +02:00

109 lines
2.5 KiB
C

/*
* arch/sh/include/asm/mutex-llsc.h
*
* SH-4A optimized mutex locking primitives
*
* Please look into asm-generic/mutex-xchg.h for a formal definition.
*/
#ifndef __ASM_SH_MUTEX_LLSC_H
#define __ASM_SH_MUTEX_LLSC_H
/*
* Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
* with a bastardized atomic decrement (it is not a reliable atomic decrement
* but it satisfies the defined semantics for our purpose, while being
* smaller and faster than a real atomic decrement or atomic swap.
* The idea is to attempt decrementing the lock value only once. If once
* decremented it isn't zero, or if its store-back fails due to a dispute
* on the exclusive store, we simply bail out immediately through the slow
* path where the lock will be reattempted until it succeeds.
*/
static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n"
"add #-1, %0 \n"
"movco.l %0, @%2 \n"
"movt %1 \n"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res != 0))
fail_fn(count);
}
static inline int
__mutex_fastpath_lock_retval(atomic_t *count)
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n"
"add #-1, %0 \n"
"movco.l %0, @%2 \n"
"movt %1 \n"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res != 0))
__res = -1;
return __res;
}
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
int __done, __res;
__asm__ __volatile__ (
"movli.l @%2, %0 \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%2 \n\t"
"movt %1 \n\t"
: "=&z" (__res), "=&r" (__done)
: "r" (&(count)->counter)
: "t");
if (unlikely(!__done || __res <= 0))
fail_fn(count);
}
/*
* If the unlock was done on a contended lock, or if the unlock simply fails
* then the mutex remains locked.
*/
#define __mutex_slowpath_needs_to_unlock() 1
/*
* For __mutex_fastpath_trylock we do an atomic decrement and check the
* result and put it in the __res variable.
*/
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
int __res, __orig;
__asm__ __volatile__ (
"1: movli.l @%2, %0 \n\t"
"dt %0 \n\t"
"movco.l %0,@%2 \n\t"
"bf 1b \n\t"
"cmp/eq #0,%0 \n\t"
"bt 2f \n\t"
"mov #0, %1 \n\t"
"bf 3f \n\t"
"2: mov #1, %1 \n\t"
"3: "
: "=&z" (__orig), "=&r" (__res)
: "r" (&count->counter)
: "t");
return __res;
}
#endif /* __ASM_SH_MUTEX_LLSC_H */