kernel-fxtec-pro1x/arch/m68k/include/asm/bitops.h

531 lines
12 KiB
C
Raw Normal View History

#ifndef _M68K_BITOPS_H
#define _M68K_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
/*
* Bit access functions vary across the ColdFire and 68k families.
* So we will break them out here, and then macro in the ones we want.
*
* ColdFire - supports standard bset/bclr/bchg with register operand only
* 68000 - supports standard bset/bclr/bchg with memory operand
* >= 68020 - also supports the bfset/bfclr/bfchg instructions
*
* Although it is possible to use only the bset/bclr/bchg with register
* operands on all platforms you end up with larger generated code.
* So we use the best form possible on a given platform.
*/
static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,(%0)"
:
: "a" (p), "di" (nr & 7)
: "memory");
}
static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bset %1,%0"
: "+m" (*p)
: "di" (nr & 7));
}
static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfset %1{%0:#1}"
:
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
}
#if defined(CONFIG_COLDFIRE)
#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
#else
#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bset_mem_set_bit(nr, vaddr) : \
bfset_mem_set_bit(nr, vaddr))
#endif
#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,(%0)"
:
: "a" (p), "di" (nr & 7)
: "memory");
}
static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bclr %1,%0"
: "+m" (*p)
: "di" (nr & 7));
}
static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfclr %1{%0:#1}"
:
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
}
#if defined(CONFIG_COLDFIRE)
#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
#else
#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bclr_mem_clear_bit(nr, vaddr) : \
bfclr_mem_clear_bit(nr, vaddr))
#endif
#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bchg %1,(%0)"
:
: "a" (p), "di" (nr & 7)
: "memory");
}
static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
__asm__ __volatile__ ("bchg %1,%0"
: "+m" (*p)
: "di" (nr & 7));
}
static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
{
__asm__ __volatile__ ("bfchg %1{%0:#1}"
:
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
}
#if defined(CONFIG_COLDFIRE)
#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
#else
#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bchg_mem_change_bit(nr, vaddr) : \
bfchg_mem_change_bit(nr, vaddr))
#endif
#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
static inline int test_bit(int nr, const unsigned long *vaddr)
{
return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
}
static inline int bset_reg_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bset %2,(%1); sne %0"
: "=d" (retval)
: "a" (p), "di" (nr & 7)
: "memory");
return retval;
}
static inline int bset_mem_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bset %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int bfset_mem_test_and_set_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
: "=d" (retval)
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
return retval;
}
#if defined(CONFIG_COLDFIRE)
#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
#else
#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bset_mem_test_and_set_bit(nr, vaddr) : \
bfset_mem_test_and_set_bit(nr, vaddr))
#endif
#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
static inline int bclr_reg_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bclr %2,(%1); sne %0"
: "=d" (retval)
: "a" (p), "di" (nr & 7)
: "memory");
return retval;
}
static inline int bclr_mem_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bclr %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int bfclr_mem_test_and_clear_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
: "=d" (retval)
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
return retval;
}
#if defined(CONFIG_COLDFIRE)
#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
#else
#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bclr_mem_test_and_clear_bit(nr, vaddr) : \
bfclr_mem_test_and_clear_bit(nr, vaddr))
#endif
#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
static inline int bchg_reg_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bchg %2,(%1); sne %0"
: "=d" (retval)
: "a" (p), "di" (nr & 7)
: "memory");
return retval;
}
static inline int bchg_mem_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char *p = (char *)vaddr + (nr ^ 31) / 8;
char retval;
__asm__ __volatile__ ("bchg %2,%1; sne %0"
: "=d" (retval), "+m" (*p)
: "di" (nr & 7));
return retval;
}
static inline int bfchg_mem_test_and_change_bit(int nr,
volatile unsigned long *vaddr)
{
char retval;
__asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
: "=d" (retval)
: "d" (nr ^ 31), "o" (*vaddr)
: "memory");
return retval;
}
#if defined(CONFIG_COLDFIRE)
#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
#else
#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
bchg_mem_test_and_change_bit(nr, vaddr) : \
bfchg_mem_test_and_change_bit(nr, vaddr))
#endif
#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
/*
* The true 68020 and more advanced processors support the "bfffo"
* instruction for finding bits. ColdFire and simple 68000 parts
* (including CPU32) do not support this. They simply use the generic
* functions.
*/
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffz.h>
#else
static inline int find_first_zero_bit(const unsigned long *vaddr,
unsigned size)
{
const unsigned long *p = vaddr;
int res = 32;
unsigned int words;
unsigned long num;
if (!size)
return 0;
words = (size + 31) >> 5;
while (!(num = ~*p++)) {
if (!--words)
goto out;
}
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
res ^= 31;
out:
res += ((long)p - (long)vaddr - 4) * 8;
return res < size ? res : size;
}
#define find_first_zero_bit find_first_zero_bit
static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *p = vaddr + (offset >> 5);
int bit = offset & 31UL, res;
if (offset >= size)
return size;
if (bit) {
unsigned long num = ~*p++ & (~0UL << bit);
offset -= bit;
/* Look for zero in first longword */
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
if (res < 32) {
offset += res ^ 31;
return offset < size ? offset : size;
}
offset += 32;
if (offset >= size)
return size;
}
/* No zero yet, search remaining full bytes for a zero */
return offset + find_first_zero_bit(p, size - offset);
}
#define find_next_zero_bit find_next_zero_bit
static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
{
const unsigned long *p = vaddr;
int res = 32;
unsigned int words;
unsigned long num;
if (!size)
return 0;
words = (size + 31) >> 5;
while (!(num = *p++)) {
if (!--words)
goto out;
}
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
res ^= 31;
out:
res += ((long)p - (long)vaddr - 4) * 8;
return res < size ? res : size;
}
#define find_first_bit find_first_bit
static inline int find_next_bit(const unsigned long *vaddr, int size,
int offset)
{
const unsigned long *p = vaddr + (offset >> 5);
int bit = offset & 31UL, res;
if (offset >= size)
return size;
if (bit) {
unsigned long num = *p++ & (~0UL << bit);
offset -= bit;
/* Look for one in first longword */
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (num & -num));
if (res < 32) {
offset += res ^ 31;
return offset < size ? offset : size;
}
offset += 32;
if (offset >= size)
return size;
}
/* No one yet, search remaining full bytes for a one */
return offset + find_first_bit(p, size - offset);
}
#define find_next_bit find_next_bit
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static inline unsigned long ffz(unsigned long word)
{
int res;
__asm__ __volatile__ ("bfffo %1{#0,#0},%0"
: "=d" (res) : "d" (~word & -~word));
return res ^ 31;
}
#endif
#ifdef __KERNEL__
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
/*
* The newer ColdFire family members support a "bitrev" instruction
* and we can use that to implement a fast ffs. Older Coldfire parts,
* and normal 68000 parts don't have anything special, so we use the
* generic functions for those.
*/
#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
!defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
static inline int __ffs(int x)
{
__asm__ __volatile__ ("bitrev %0; ff1 %0"
: "=d" (x)
: "0" (x));
return x;
}
static inline int ffs(int x)
{
if (!x)
return 0;
return __ffs(x) + 1;
}
#else
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
m68k,m68knommu: merge header files Merge header files for m68k and m68knommu to the single location: arch/m68k/include/asm The majority of this patch was the result of the script that is included in the changelog below. The script was originally written by Arnd Bergman and exten by me to cover a few more files. When the header files differed the script uses the following: The original m68k file is named <file>_mm.h [mm for memory manager] The m68knommu file is named <file>_no.h [no for no memory manager] The files uses the following include guard: This include gaurd works as the m68knommu toolchain set the __uClinux__ symbol - so this should work in userspace too. Merging the header files for m68k and m68knommu exposes the (unexpected?) ABI differences thus it is easier to actually identify these and thus to fix them. The commit has been build tested with both a m68k and a m68knommu toolchain - with success. The commit has also been tested with "make headers_check" and this patch fixes make headers_check for m68knommu. The script used: TARGET=arch/m68k/include/asm SOURCE=arch/m68knommu/include/asm INCLUDE="cachectl.h errno.h fcntl.h hwtest.h ioctls.h ipcbuf.h \ linkage.h math-emu.h md.h mman.h movs.h msgbuf.h openprom.h \ oplib.h poll.h posix_types.h resource.h rtc.h sembuf.h shmbuf.h \ shm.h shmparam.h socket.h sockios.h spinlock.h statfs.h stat.h \ termbits.h termios.h tlb.h types.h user.h" EQUAL="auxvec.h cputime.h device.h emergency-restart.h futex.h \ ioctl.h irq_regs.h kdebug.h local.h mutex.h percpu.h \ sections.h topology.h" NOMUUFILES="anchor.h bootstd.h coldfire.h commproc.h dbg.h \ elia.h flat.h m5206sim.h m520xsim.h m523xsim.h m5249sim.h \ m5272sim.h m527xsim.h m528xsim.h m5307sim.h m532xsim.h \ m5407sim.h m68360_enet.h m68360.h m68360_pram.h m68360_quicc.h \ m68360_regs.h MC68328.h MC68332.h MC68EZ328.h MC68VZ328.h \ mcfcache.h mcfdma.h mcfmbus.h mcfne.h mcfpci.h mcfpit.h \ mcfsim.h mcfsmc.h mcftimer.h mcfuart.h mcfwdebug.h \ nettel.h quicc_simple.h smp.h" FILES="atomic.h bitops.h bootinfo.h bug.h bugs.h byteorder.h cache.h \ cacheflush.h checksum.h current.h delay.h div64.h \ dma-mapping.h dma.h elf.h entry.h fb.h fpu.h hardirq.h hw_irq.h io.h \ irq.h kmap_types.h machdep.h mc146818rtc.h mmu.h mmu_context.h \ module.h page.h page_offset.h param.h pci.h pgalloc.h \ pgtable.h processor.h ptrace.h scatterlist.h segment.h \ setup.h sigcontext.h siginfo.h signal.h string.h system.h swab.h \ thread_info.h timex.h tlbflush.h traps.h uaccess.h ucontext.h \ unaligned.h unistd.h" mergefile() { BASE=${1%.h} git mv ${SOURCE}/$1 ${TARGET}/${BASE}_no.h git mv ${TARGET}/$1 ${TARGET}/${BASE}_mm.h cat << EOF > ${TARGET}/$1 EOF git add ${TARGET}/$1 } set -e mkdir -p ${TARGET} git mv include/asm-m68k/* ${TARGET} rmdir include/asm-m68k git rm ${SOURCE}/Kbuild for F in $INCLUDE $EQUAL; do git rm ${SOURCE}/$F done for F in $NOMUUFILES; do git mv ${SOURCE}/$F ${TARGET}/$F done for F in $FILES ; do mergefile $F done rmdir arch/m68knommu/include/asm rmdir arch/m68knommu/include Cc: Arnd Bergmann <arnd@arndb.de> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Greg Ungerer <gerg@uclinux.org>
2009-01-16 04:58:10 -07:00
#else
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int ffs(int x)
{
int cnt;
__asm__ ("bfffo %1{#0:#0},%0"
: "=d" (cnt)
: "dm" (x & -x));
return 32 - cnt;
}
#define __ffs(x) (ffs(x) - 1)
/*
* fls: find last bit set.
*/
static inline int fls(int x)
{
int cnt;
__asm__ ("bfffo %1{#0,#0},%0"
: "=d" (cnt)
: "dm" (x));
return 32 - cnt;
}
static inline int __fls(int x)
{
return fls(x) - 1;
}
m68k,m68knommu: merge header files Merge header files for m68k and m68knommu to the single location: arch/m68k/include/asm The majority of this patch was the result of the script that is included in the changelog below. The script was originally written by Arnd Bergman and exten by me to cover a few more files. When the header files differed the script uses the following: The original m68k file is named <file>_mm.h [mm for memory manager] The m68knommu file is named <file>_no.h [no for no memory manager] The files uses the following include guard: This include gaurd works as the m68knommu toolchain set the __uClinux__ symbol - so this should work in userspace too. Merging the header files for m68k and m68knommu exposes the (unexpected?) ABI differences thus it is easier to actually identify these and thus to fix them. The commit has been build tested with both a m68k and a m68knommu toolchain - with success. The commit has also been tested with "make headers_check" and this patch fixes make headers_check for m68knommu. The script used: TARGET=arch/m68k/include/asm SOURCE=arch/m68knommu/include/asm INCLUDE="cachectl.h errno.h fcntl.h hwtest.h ioctls.h ipcbuf.h \ linkage.h math-emu.h md.h mman.h movs.h msgbuf.h openprom.h \ oplib.h poll.h posix_types.h resource.h rtc.h sembuf.h shmbuf.h \ shm.h shmparam.h socket.h sockios.h spinlock.h statfs.h stat.h \ termbits.h termios.h tlb.h types.h user.h" EQUAL="auxvec.h cputime.h device.h emergency-restart.h futex.h \ ioctl.h irq_regs.h kdebug.h local.h mutex.h percpu.h \ sections.h topology.h" NOMUUFILES="anchor.h bootstd.h coldfire.h commproc.h dbg.h \ elia.h flat.h m5206sim.h m520xsim.h m523xsim.h m5249sim.h \ m5272sim.h m527xsim.h m528xsim.h m5307sim.h m532xsim.h \ m5407sim.h m68360_enet.h m68360.h m68360_pram.h m68360_quicc.h \ m68360_regs.h MC68328.h MC68332.h MC68EZ328.h MC68VZ328.h \ mcfcache.h mcfdma.h mcfmbus.h mcfne.h mcfpci.h mcfpit.h \ mcfsim.h mcfsmc.h mcftimer.h mcfuart.h mcfwdebug.h \ nettel.h quicc_simple.h smp.h" FILES="atomic.h bitops.h bootinfo.h bug.h bugs.h byteorder.h cache.h \ cacheflush.h checksum.h current.h delay.h div64.h \ dma-mapping.h dma.h elf.h entry.h fb.h fpu.h hardirq.h hw_irq.h io.h \ irq.h kmap_types.h machdep.h mc146818rtc.h mmu.h mmu_context.h \ module.h page.h page_offset.h param.h pci.h pgalloc.h \ pgtable.h processor.h ptrace.h scatterlist.h segment.h \ setup.h sigcontext.h siginfo.h signal.h string.h system.h swab.h \ thread_info.h timex.h tlbflush.h traps.h uaccess.h ucontext.h \ unaligned.h unistd.h" mergefile() { BASE=${1%.h} git mv ${SOURCE}/$1 ${TARGET}/${BASE}_no.h git mv ${TARGET}/$1 ${TARGET}/${BASE}_mm.h cat << EOF > ${TARGET}/$1 EOF git add ${TARGET}/$1 } set -e mkdir -p ${TARGET} git mv include/asm-m68k/* ${TARGET} rmdir include/asm-m68k git rm ${SOURCE}/Kbuild for F in $INCLUDE $EQUAL; do git rm ${SOURCE}/$F done for F in $NOMUUFILES; do git mv ${SOURCE}/$F ${TARGET}/$F done for F in $FILES ; do mergefile $F done rmdir arch/m68knommu/include/asm rmdir arch/m68knommu/include Cc: Arnd Bergmann <arnd@arndb.de> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Greg Ungerer <gerg@uclinux.org>
2009-01-16 04:58:10 -07:00
#endif
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#endif /* _M68K_BITOPS_H */