925d519ab8
While going over the wakeup code I noticed delayed wakeups only work for hardware counters but basically all software counters rely on them. This patch unifies and generalizes the delayed wakeup to fix this issue. Since we're dealing with NMI context bits here, use a cmpxchg() based single link list implementation to track counters that have pending wakeups. [ This should really be generic code for delayed wakeups, but since we cannot use cmpxchg()/xchg() in generic code, I've let it live in the perf_counter code. -- Eric Dumazet could use it to aggregate the network wakeups. ] Furthermore, the x86 method of using TIF flags was flawed in that its quite possible to end up setting the bit on the idle task, loosing the wakeup. The powerpc method uses per-cpu storage and does appear to be sufficient. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Paul Mackerras <paulus@samba.org> Orig-LKML-Reference: <20090330171023.153932974@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
174 lines
3.9 KiB
C
174 lines
3.9 KiB
C
/*
|
|
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
*/
|
|
#ifndef _ASM_POWERPC_HW_IRQ_H
|
|
#define _ASM_POWERPC_HW_IRQ_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/compiler.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/processor.h>
|
|
|
|
extern void timer_interrupt(struct pt_regs *);
|
|
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/paca.h>
|
|
|
|
static inline unsigned long local_get_flags(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
__asm__ __volatile__("lbz %0,%1(13)"
|
|
: "=r" (flags)
|
|
: "i" (offsetof(struct paca_struct, soft_enabled)));
|
|
|
|
return flags;
|
|
}
|
|
|
|
static inline unsigned long raw_local_irq_disable(void)
|
|
{
|
|
unsigned long flags, zero;
|
|
|
|
__asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
|
|
: "=r" (flags), "=&r" (zero)
|
|
: "i" (offsetof(struct paca_struct, soft_enabled))
|
|
: "memory");
|
|
|
|
return flags;
|
|
}
|
|
|
|
extern void raw_local_irq_restore(unsigned long);
|
|
extern void iseries_handle_interrupts(void);
|
|
|
|
#define raw_local_irq_enable() raw_local_irq_restore(1)
|
|
#define raw_local_save_flags(flags) ((flags) = local_get_flags())
|
|
#define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
|
|
|
|
#define raw_irqs_disabled() (local_get_flags() == 0)
|
|
#define raw_irqs_disabled_flags(flags) ((flags) == 0)
|
|
|
|
#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
|
|
#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
|
|
|
|
#define hard_irq_disable() \
|
|
do { \
|
|
__hard_irq_disable(); \
|
|
get_paca()->soft_enabled = 0; \
|
|
get_paca()->hard_enabled = 0; \
|
|
} while(0)
|
|
|
|
static inline int irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
return flags == 0;
|
|
}
|
|
|
|
#else
|
|
|
|
#if defined(CONFIG_BOOKE)
|
|
#define SET_MSR_EE(x) mtmsr(x)
|
|
#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
|
|
#else
|
|
#define SET_MSR_EE(x) mtmsr(x)
|
|
#define local_irq_restore(flags) mtmsr(flags)
|
|
#endif
|
|
|
|
static inline void local_irq_disable(void)
|
|
{
|
|
#ifdef CONFIG_BOOKE
|
|
__asm__ __volatile__("wrteei 0": : :"memory");
|
|
#else
|
|
unsigned long msr;
|
|
__asm__ __volatile__("": : :"memory");
|
|
msr = mfmsr();
|
|
SET_MSR_EE(msr & ~MSR_EE);
|
|
#endif
|
|
}
|
|
|
|
static inline void local_irq_enable(void)
|
|
{
|
|
#ifdef CONFIG_BOOKE
|
|
__asm__ __volatile__("wrteei 1": : :"memory");
|
|
#else
|
|
unsigned long msr;
|
|
__asm__ __volatile__("": : :"memory");
|
|
msr = mfmsr();
|
|
SET_MSR_EE(msr | MSR_EE);
|
|
#endif
|
|
}
|
|
|
|
static inline void local_irq_save_ptr(unsigned long *flags)
|
|
{
|
|
unsigned long msr;
|
|
msr = mfmsr();
|
|
*flags = msr;
|
|
#ifdef CONFIG_BOOKE
|
|
__asm__ __volatile__("wrteei 0": : :"memory");
|
|
#else
|
|
SET_MSR_EE(msr & ~MSR_EE);
|
|
#endif
|
|
__asm__ __volatile__("": : :"memory");
|
|
}
|
|
|
|
#define local_save_flags(flags) ((flags) = mfmsr())
|
|
#define local_irq_save(flags) local_irq_save_ptr(&flags)
|
|
#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
|
|
|
|
#define hard_irq_enable() local_irq_enable()
|
|
#define hard_irq_disable() local_irq_disable()
|
|
|
|
static inline int irqs_disabled_flags(unsigned long flags)
|
|
{
|
|
return (flags & MSR_EE) == 0;
|
|
}
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
/*
|
|
* interrupt-retrigger: should we handle this via lost interrupts and IPIs
|
|
* or should we not care like we do now ? --BenH.
|
|
*/
|
|
struct irq_chip;
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
static inline unsigned long test_perf_counter_pending(void)
|
|
{
|
|
unsigned long x;
|
|
|
|
asm volatile("lbz %0,%1(13)"
|
|
: "=r" (x)
|
|
: "i" (offsetof(struct paca_struct, perf_counter_pending)));
|
|
return x;
|
|
}
|
|
|
|
static inline void set_perf_counter_pending(void)
|
|
{
|
|
asm volatile("stb %0,%1(13)" : :
|
|
"r" (1),
|
|
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
|
}
|
|
|
|
static inline void clear_perf_counter_pending(void)
|
|
{
|
|
asm volatile("stb %0,%1(13)" : :
|
|
"r" (0),
|
|
"i" (offsetof(struct paca_struct, perf_counter_pending)));
|
|
}
|
|
|
|
extern void perf_counter_do_pending(void);
|
|
|
|
#else
|
|
|
|
static inline unsigned long test_perf_counter_pending(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void set_perf_counter_pending(void) {}
|
|
static inline void clear_perf_counter_pending(void) {}
|
|
static inline void perf_counter_do_pending(void) {}
|
|
#endif /* CONFIG_PERF_COUNTERS */
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* _ASM_POWERPC_HW_IRQ_H */
|