kernel-fxtec-pro1x/include/asm-m68knommu/processor.h
Chase Venters f6dc8c5b8e [PATCH] Make cpu_relax() imply barrier() on all arches
During the recent discussion of taking 'volatile' off of the spinlock, I
noticed that while most arches #define cpu_relax() such that it implies
barrier(), some arches define cpu_relax() to be empty.

This patch changes the definition of cpu_relax() for frv, h8300, m68knommu,
sh, sh64, v850 and xtensa from an empty while(0) to the compiler barrier().

Signed-off-by: Chase Venters <chase.venters@clientec.com>
Acked-by: Arjan van de Ven <arjan@Linux.intel.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-07-12 12:56:45 -07:00

143 lines
3.5 KiB
C

/*
* include/asm-m68k/processor.h
*
* Copyright (C) 1995 Hamish Macdonald
*/
#ifndef __ASM_M68K_PROCESSOR_H
#define __ASM_M68K_PROCESSOR_H
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l;})
#include <linux/compiler.h>
#include <linux/threads.h>
#include <asm/types.h>
#include <asm/segment.h>
#include <asm/fpu.h>
#include <asm/ptrace.h>
#include <asm/current.h>
static inline unsigned long rdusp(void)
{
#ifdef CONFIG_COLDFIRE
extern unsigned int sw_usp;
return(sw_usp);
#else
unsigned long usp;
__asm__ __volatile__("move %/usp,%0" : "=a" (usp));
return usp;
#endif
}
static inline void wrusp(unsigned long usp)
{
#ifdef CONFIG_COLDFIRE
extern unsigned int sw_usp;
sw_usp = usp;
#else
__asm__ __volatile__("move %0,%/usp" : : "a" (usp));
#endif
}
/*
* User space process size: 3.75GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE (0xF0000000UL)
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's. We won't be using it
*/
#define TASK_UNMAPPED_BASE 0
/*
* if you change this structure, you must change the code and offsets
* in m68k/machasm.S
*/
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */
unsigned short sr; /* saved status register */
unsigned short fs; /* saved fs (sfc, dfc) */
unsigned long crp[2]; /* cpu root pointer */
unsigned long esp0; /* points to SR of stack frame */
unsigned long fp[8*3];
unsigned long fpcntl[3]; /* fp control regs */
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
};
#define INIT_THREAD { \
sizeof(init_stack) + (unsigned long) init_stack, 0, \
PS_S, __KERNEL_DS, \
{0, 0}, 0, {0,}, {0, 0, 0}, {0,}, \
}
/*
* Coldfire stacks need to be re-aligned on trap exit, conventional
* 68k can handle this case cleanly.
*/
#if defined(CONFIG_COLDFIRE)
#define reformat(_regs) do { (_regs)->format = 0x4; } while(0)
#else
#define reformat(_regs) do { } while (0)
#endif
/*
* Do necessary setup to start up a newly executed thread.
*
* pass the data segment into user programs if it exists,
* it can't hurt anything as far as I can tell
*/
#define start_thread(_regs, _pc, _usp) \
do { \
set_fs(USER_DS); /* reads from user space */ \
(_regs)->pc = (_pc); \
((struct switch_stack *)(_regs))[-1].a6 = 0; \
reformat(_regs); \
if (current->mm) \
(_regs)->d5 = current->mm->start_data; \
(_regs)->sr &= ~0x2000; \
wrusp(_usp); \
} while(0)
/* Forward declaration, a strange C thing */
struct task_struct;
/* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task)
{
}
/* Prepare to copy thread state - unlazy all lazy status */
#define prepare_to_copy(tsk) do { } while (0)
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
/*
* Free current thread data structures etc..
*/
static inline void exit_thread(void)
{
}
unsigned long thread_saved_pc(struct task_struct *tsk);
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) \
({ \
unsigned long eip = 0; \
if ((tsk)->thread.esp0 > PAGE_SIZE && \
(virt_addr_valid((tsk)->thread.esp0))) \
eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
eip; })
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() barrier()
#endif