ia64: Rewrite atomic_add and atomic_sub

Force __builtin_constant_p to evaluate whether the argument to atomic_add
& atomic_sub is constant in the front-end before optimisations which
can lead GCC to output a call to __bad_increment_for_ia64_fetch_and_add().

See GCC bugzilla 83653.

Signed-off-by: Jakub Jelinek <jakub@redhat.com>
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Matthew Wilcox 2018-01-18 13:52:17 -08:00 committed by Linus Torvalds
parent 726ba84b50
commit 4b664e739f

View file

@ -65,29 +65,30 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
ATOMIC_OPS(add, +) ATOMIC_OPS(add, +)
ATOMIC_OPS(sub, -) ATOMIC_OPS(sub, -)
#define atomic_add_return(i,v) \ #ifdef __OPTIMIZE__
#define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
(i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
#define atomic_add_return(i, v) \
({ \ ({ \
int __ia64_aar_i = (i); \ int __i = (i); \
(__builtin_constant_p(i) \ static const int __ia64_atomic_p = __ia64_atomic_const(i); \
&& ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
|| (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ ia64_atomic_add(__i, v); \
|| (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
|| (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
: ia64_atomic_add(__ia64_aar_i, v); \
}) })
#define atomic_sub_return(i,v) \ #define atomic_sub_return(i, v) \
({ \ ({ \
int __ia64_asr_i = (i); \ int __i = (i); \
(__builtin_constant_p(i) \ static const int __ia64_atomic_p = __ia64_atomic_const(i); \
&& ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
|| (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ ia64_atomic_sub(__i, v); \
|| (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
|| (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
: ia64_atomic_sub(__ia64_asr_i, v); \
}) })
#else
#define atomic_add_return(i, v) ia64_atomic_add(i, v)
#define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
#endif
#define atomic_fetch_add(i,v) \ #define atomic_fetch_add(i,v) \
({ \ ({ \