sh: Fix up R0 dependence in __arch_swab16/32.

There is nothing in these routines that inherently depends on R0 use.
Given that these routines are inlined, it is rather easy to blow up the
compiler by exhausting the spill class when performing a 64-bit swab.

This presently manifests itself as the following:

CC      fs/ocfs2/suballoc.o
fs/ocfs2/suballoc.c: In function 'ocfs2_reserve_suballoc_bits':
fs/ocfs2/suballoc.c:638: error: unrecognizable insn:
(insn 2793 1230 1231 103 arch/sh/include/asm/swab.h:33 (set (reg:HI 853)
        (subreg:HI (reg:SI 149 macl) 2)) -1 (expr_list:REG_DEAD (reg:SI 149 macl)
        (nil)))
fs/ocfs2/suballoc.c:638: internal compiler error: in extract_insn, at recog.c:1991

This patch switches over to using an arbitrarily assigned register instead.

While the same issue does not exist in the SH-5 case, there is likewise no harm
in having an alternate register used for the byterev/shari pair.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2009-05-10 14:25:39 +09:00
parent 457daa2b66
commit 567bb8fd47

View file

@ -14,15 +14,15 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
#ifdef __SH5__
"byterev %0, %0\n\t"
"byterev %1, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %0, %0\n\t"
"swap.b %1, %0\n\t"
"swap.w %0, %0\n\t"
"swap.b %0, %0"
#endif
: "=r" (x)
: "0" (x));
: "r" (x));
return x;
}
@ -32,13 +32,13 @@ static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
#ifdef __SH5__
"byterev %0, %0\n\t"
"byterev %1, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %0, %0"
"swap.b %1, %0"
#endif
: "=r" (x)
: "0" (x));
: "r" (x));
return x;
}