4c75f84f2c
On 32-bit non-Book E, local_irq_restore() turns into just mtmsr(), which doesn't currently have a compiler memory barrier. This means that accesses to memory inside a local_irq_save/restore section, or a spin_lock_irqsave/spin_unlock_irqrestore section on UP, can be reordered by the compiler to occur outside that section. To fix this, this adds a compiler memory barrier to mtmsr for both 32-bit and 64-bit. Having a compiler memory barrier in mtmsr makes sense because it will almost always be changing something about the context in which memory accesses are done, so in general we don't want memory accesses getting moved from one side of an mtmsr to the other. With the barrier in mtmsr(), some of the explicit barriers in hw_irq.h are now redundant, so this removes them. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
170 lines
3.7 KiB
C
170 lines
3.7 KiB
C
/*
|
|
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
*/
|
|
#ifndef _ASM_POWERPC_HW_IRQ_H
|
|
#define _ASM_POWERPC_HW_IRQ_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/compiler.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/processor.h>
|
|
|
|
extern void timer_interrupt(struct pt_regs *);
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/paca.h>
|
|
|
|
static inline unsigned long local_get_flags(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
__asm__ __volatile__("lbz %0,%1(13)"
|
|
: "=r" (flags)
|
|
: "i" (offsetof(struct paca_struct, soft_enabled)));
|
|
|
|
return flags;
|
|
}
|
|
|
|
static inline unsigned long raw_local_irq_disable(void)
|
|
{
|
|
unsigned long flags, zero;
|
|
|
|
__asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
|
|
: "=r" (flags), "=&r" (zero)
|
|
: "i" (offsetof(struct paca_struct, soft_enabled))
|
|
: "memory");
|
|
|
|
return flags;
|
|
}
|
|
|
|
extern void raw_local_irq_restore(unsigned long);
|
|
extern void iseries_handle_interrupts(void);
|
|
|
|
#define raw_local_irq_enable() raw_local_irq_restore(1)
|
|
#define raw_local_save_flags(flags) ((flags) = local_get_flags())
|
|
#define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
|
|
|
|
#define raw_irqs_disabled() (local_get_flags() == 0)
|
|
#define raw_irqs_disabled_flags(flags) ((flags) == 0)
|
|
|
|
#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
|
|
#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
|
|
|
|
#define hard_irq_disable() \
|
|
do { \
|
|
__hard_irq_disable(); \
|
|
get_paca()->soft_enabled = 0; \
|
|
get_paca()->hard_enabled = 0; \
|
|
} while(0)
|
|
|
|
static inline int irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
return flags == 0;
|
|
}
|
|
|
|
#else
|
|
|
|
#if defined(CONFIG_BOOKE)
|
|
#define SET_MSR_EE(x) mtmsr(x)
|
|
#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
|
|
#else
|
|
#define SET_MSR_EE(x) mtmsr(x)
|
|
#define local_irq_restore(flags) mtmsr(flags)
|
|
#endif
|
|
|
|
static inline void local_irq_disable(void)
|
|
{
|
|
#ifdef CONFIG_BOOKE
|
|
__asm__ __volatile__("wrteei 0": : :"memory");
|
|
#else
|
|
unsigned long msr;
|
|
|
|
msr = mfmsr();
|
|
SET_MSR_EE(msr & ~MSR_EE);
|
|
#endif
|
|
}
|
|
|
|
static inline void local_irq_enable(void)
|
|
{
|
|
#ifdef CONFIG_BOOKE
|
|
__asm__ __volatile__("wrteei 1": : :"memory");
|
|
#else
|
|
unsigned long msr;
|
|
|
|
msr = mfmsr();
|
|
SET_MSR_EE(msr | MSR_EE);
|
|
#endif
|
|
}
|
|
|
|
static inline void local_irq_save_ptr(unsigned long *flags)
|
|
{
|
|
unsigned long msr;
|
|
msr = mfmsr();
|
|
*flags = msr;
|
|
#ifdef CONFIG_BOOKE
|
|
__asm__ __volatile__("wrteei 0": : :"memory");
|
|
#else
|
|
SET_MSR_EE(msr & ~MSR_EE);
|
|
#endif
|
|
}
|
|
|
|
#define local_save_flags(flags) ((flags) = mfmsr())
|
|
#define local_irq_save(flags) local_irq_save_ptr(&flags)
|
|
#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
|
|
|
|
#define hard_irq_enable() local_irq_enable()
|
|
#define hard_irq_disable() local_irq_disable()
|
|
|
|
static inline int irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
return (flags & MSR_EE) == 0;
|
|
}
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
/*
|
|
* interrupt-retrigger: should we handle this via lost interrupts and IPIs
|
|
* or should we not care like we do now ? --BenH.
|
|
*/
|
|
struct irq_chip;
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
static inline unsigned long test_perf_counter_pending(void)
|
|
{
|
|
unsigned long x;
|
|
|
|
asm volatile("lbz %0,%1(13)"
|
|
: "=r" (x)
|
|
: "i" (offsetof(struct paca_struct, perf_counter_pending)));
|
|
return x;
|
|
}
|
|
|
|
static inline void set_perf_counter_pending(void)
|
|
{
|
|
asm volatile("stb %0,%1(13)" : :
|
|
"r" (1),
|
|
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
|
}
|
|
|
|
static inline void clear_perf_counter_pending(void)
|
|
{
|
|
asm volatile("stb %0,%1(13)" : :
|
|
"r" (0),
|
|
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
|
}
|
|
|
|
#else
|
|
|
|
static inline unsigned long test_perf_counter_pending(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void set_perf_counter_pending(void) {}
|
|
static inline void clear_perf_counter_pending(void) {}
|
|
#endif /* CONFIG_PERF_COUNTERS */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_HW_IRQ_H */
|