Merge branch 'irq-davinci' of git://gitorious.org/linux-davinci/linux-davinci into devel-stable

This commit is contained in:
Russell King 2011-05-11 18:49:31 +01:00
commit 805499a6d5
10 changed files with 605 additions and 128 deletions

View file

@ -191,8 +191,8 @@
<para>
Whenever an interrupt triggers, the lowlevel arch code calls into
the generic interrupt code by calling desc->handle_irq().
This highlevel IRQ handling function only uses desc->chip primitives
referenced by the assigned chip descriptor structure.
This highlevel IRQ handling function only uses desc->irq_data.chip
primitives referenced by the assigned chip descriptor structure.
</para>
</sect1>
<sect1 id="Highlevel_Driver_API">
@ -206,11 +206,11 @@
<listitem><para>enable_irq()</para></listitem>
<listitem><para>disable_irq_nosync() (SMP only)</para></listitem>
<listitem><para>synchronize_irq() (SMP only)</para></listitem>
<listitem><para>set_irq_type()</para></listitem>
<listitem><para>set_irq_wake()</para></listitem>
<listitem><para>set_irq_data()</para></listitem>
<listitem><para>set_irq_chip()</para></listitem>
<listitem><para>set_irq_chip_data()</para></listitem>
<listitem><para>irq_set_irq_type()</para></listitem>
<listitem><para>irq_set_irq_wake()</para></listitem>
<listitem><para>irq_set_handler_data()</para></listitem>
<listitem><para>irq_set_chip()</para></listitem>
<listitem><para>irq_set_chip_data()</para></listitem>
</itemizedlist>
See the autogenerated function documentation for details.
</para>
@ -225,6 +225,8 @@
<listitem><para>handle_fasteoi_irq</para></listitem>
<listitem><para>handle_simple_irq</para></listitem>
<listitem><para>handle_percpu_irq</para></listitem>
<listitem><para>handle_edge_eoi_irq</para></listitem>
<listitem><para>handle_bad_irq</para></listitem>
</itemizedlist>
The interrupt flow handlers (either predefined or architecture
specific) are assigned to specific interrupts by the architecture
@ -241,13 +243,13 @@
<programlisting>
default_enable(struct irq_data *data)
{
desc->chip->irq_unmask(data);
desc->irq_data.chip->irq_unmask(data);
}
default_disable(struct irq_data *data)
{
if (!delay_disable(data))
desc->chip->irq_mask(data);
desc->irq_data.chip->irq_mask(data);
}
default_ack(struct irq_data *data)
@ -284,9 +286,9 @@ noop(struct irq_data *data))
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
desc->chip->irq_mask();
handle_IRQ_event(desc->action);
desc->chip->irq_unmask();
desc->irq_data.chip->irq_mask_ack();
handle_irq_event(desc->action);
desc->irq_data.chip->irq_unmask();
</programlisting>
</para>
</sect3>
@ -300,8 +302,8 @@ desc->chip->irq_unmask();
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
handle_IRQ_event(desc->action);
desc->chip->irq_eoi();
handle_irq_event(desc->action);
desc->irq_data.chip->irq_eoi();
</programlisting>
</para>
</sect3>
@ -315,17 +317,17 @@ desc->chip->irq_eoi();
The following control flow is implemented (simplified excerpt):
<programlisting>
if (desc->status &amp; running) {
desc->chip->irq_mask();
desc->irq_data.chip->irq_mask_ack();
desc->status |= pending | masked;
return;
}
desc->chip->irq_ack();
desc->irq_data.chip->irq_ack();
desc->status |= running;
do {
if (desc->status &amp; masked)
desc->chip->irq_unmask();
desc->irq_data.chip->irq_unmask();
desc->status &amp;= ~pending;
handle_IRQ_event(desc->action);
handle_irq_event(desc->action);
} while (status &amp; pending);
desc->status &amp;= ~running;
</programlisting>
@ -344,7 +346,7 @@ desc->status &amp;= ~running;
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
handle_IRQ_event(desc->action);
handle_irq_event(desc->action);
</programlisting>
</para>
</sect3>
@ -362,12 +364,29 @@ handle_IRQ_event(desc->action);
<para>
The following control flow is implemented (simplified excerpt):
<programlisting>
handle_IRQ_event(desc->action);
if (desc->chip->irq_eoi)
desc->chip->irq_eoi();
if (desc->irq_data.chip->irq_ack)
desc->irq_data.chip->irq_ack();
handle_irq_event(desc->action);
if (desc->irq_data.chip->irq_eoi)
desc->irq_data.chip->irq_eoi();
</programlisting>
</para>
</sect3>
<sect3 id="EOI_Edge_IRQ_flow_handler">
<title>EOI Edge IRQ flow handler</title>
<para>
handle_edge_eoi_irq provides an abnomination of the edge
handler which is solely used to tame a badly wreckaged
irq controller on powerpc/cell.
</para>
</sect3>
<sect3 id="BAD_IRQ_flow_handler">
<title>Bad IRQ flow handler</title>
<para>
handle_bad_irq is used for spurious interrupts which
have no real handler assigned..
</para>
</sect3>
</sect2>
<sect2 id="Quirks_and_optimizations">
<title>Quirks and optimizations</title>
@ -410,6 +429,7 @@ if (desc->chip->irq_eoi)
<listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
<listitem><para>irq_mask()</para></listitem>
<listitem><para>irq_unmask()</para></listitem>
<listitem><para>irq_eoi() - Optional, required for eoi flow handlers</para></listitem>
<listitem><para>irq_retrigger() - Optional</para></listitem>
<listitem><para>irq_set_type() - Optional</para></listitem>
<listitem><para>irq_set_wake() - Optional</para></listitem>
@ -424,32 +444,24 @@ if (desc->chip->irq_eoi)
<chapter id="doirq">
<title>__do_IRQ entry point</title>
<para>
The original implementation __do_IRQ() is an alternative entry
point for all types of interrupts.
The original implementation __do_IRQ() was an alternative entry
point for all types of interrupts. It not longer exists.
</para>
<para>
This handler turned out to be not suitable for all
interrupt hardware and was therefore reimplemented with split
functionality for egde/level/simple/percpu interrupts. This is not
functionality for edge/level/simple/percpu interrupts. This is not
only a functional optimization. It also shortens code paths for
interrupts.
</para>
<para>
To make use of the split implementation, replace the call to
__do_IRQ by a call to desc->handle_irq() and associate
the appropriate handler function to desc->handle_irq().
In most cases the generic handler implementations should
be sufficient.
</para>
</chapter>
<chapter id="locking">
<title>Locking on SMP</title>
<para>
The locking of chip registers is up to the architecture that
defines the chip primitives. There is a chip->lock field that can be used
for serialization, but the generic layer does not touch it. The per-irq
structure is protected via desc->lock, by the generic layer.
defines the chip primitives. The per-irq structure is
protected via desc->lock, by the generic layer.
</para>
</chapter>
<chapter id="structs">

View file

@ -29,8 +29,6 @@
#include <mach/common.h>
#include <asm/mach/irq.h>
#define IRQ_BIT(irq) ((irq) & 0x1f)
#define FIQ_REG0_OFFSET 0x0000
#define FIQ_REG1_OFFSET 0x0004
#define IRQ_REG0_OFFSET 0x0008
@ -42,78 +40,33 @@
#define IRQ_INTPRI0_REG_OFFSET 0x0030
#define IRQ_INTPRI7_REG_OFFSET 0x004C
static inline unsigned int davinci_irq_readl(int offset)
{
return __raw_readl(davinci_intc_base + offset);
}
static inline void davinci_irq_writel(unsigned long value, int offset)
{
__raw_writel(value, davinci_intc_base + offset);
}
/* Disable interrupt */
static void davinci_mask_irq(struct irq_data *d)
static __init void
davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
{
unsigned int mask;
u32 l;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
mask = 1 << IRQ_BIT(d->irq);
gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
ct = gc->chip_types;
ct->chip.irq_ack = irq_gc_ack;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
if (d->irq > 31) {
l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
l &= ~mask;
davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
} else {
l = davinci_irq_readl(IRQ_ENT_REG0_OFFSET);
l &= ~mask;
davinci_irq_writel(l, IRQ_ENT_REG0_OFFSET);
}
ct->regs.ack = IRQ_REG0_OFFSET;
ct->regs.mask = IRQ_ENT_REG0_OFFSET;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
/* Enable interrupt */
static void davinci_unmask_irq(struct irq_data *d)
{
unsigned int mask;
u32 l;
mask = 1 << IRQ_BIT(d->irq);
if (d->irq > 31) {
l = davinci_irq_readl(IRQ_ENT_REG1_OFFSET);
l |= mask;
davinci_irq_writel(l, IRQ_ENT_REG1_OFFSET);
} else {
l = davinci_irq_readl(IRQ_ENT_REG0_OFFSET);
l |= mask;
davinci_irq_writel(l, IRQ_ENT_REG0_OFFSET);
}
}
/* EOI interrupt */
static void davinci_ack_irq(struct irq_data *d)
{
unsigned int mask;
mask = 1 << IRQ_BIT(d->irq);
if (d->irq > 31)
davinci_irq_writel(mask, IRQ_REG1_OFFSET);
else
davinci_irq_writel(mask, IRQ_REG0_OFFSET);
}
static struct irq_chip davinci_irq_chip_0 = {
.name = "AINTC",
.irq_ack = davinci_ack_irq,
.irq_mask = davinci_mask_irq,
.irq_unmask = davinci_unmask_irq,
};
/* ARM Interrupt Controller Initialization */
void __init davinci_irq_init(void)
{
unsigned i;
unsigned i, j;
const u8 *davinci_def_priorities = davinci_soc_info.intc_irq_prios;
davinci_intc_type = DAVINCI_INTC_TYPE_AINTC;
@ -144,7 +97,6 @@ void __init davinci_irq_init(void)
davinci_irq_writel(~0x0, IRQ_REG1_OFFSET);
for (i = IRQ_INTPRI0_REG_OFFSET; i <= IRQ_INTPRI7_REG_OFFSET; i += 4) {
unsigned j;
u32 pri;
for (j = 0, pri = 0; j < 32; j += 4, davinci_def_priorities++)
@ -152,13 +104,8 @@ void __init davinci_irq_init(void)
davinci_irq_writel(pri, i);
}
/* set up genirq dispatch for ARM INTC */
for (i = 0; i < davinci_soc_info.intc_irq_num; i++) {
irq_set_chip(i, &davinci_irq_chip_0);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
if (i != IRQ_TINT1_TINT34)
irq_set_handler(i, handle_edge_irq);
else
irq_set_handler(i, handle_level_irq);
}
for (i = 0, j = 0; i < davinci_soc_info.intc_irq_num; i += 32, j += 0x04)
davinci_alloc_gc(davinci_intc_base + j, i, 32);
irq_set_handler(IRQ_TINT1_TINT34, handle_level_irq);
}

View file

@ -53,12 +53,13 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* Bits which can be modified via irq_set/clear/modify_status_flags()
* IRQ_LEVEL - Interrupt is level type. Will be also
* updated in the code when the above trigger
* bits are modified via set_irq_type()
* bits are modified via irq_set_irq_type()
* IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect
* it from affinity setting
* IRQ_NOPROBE - Interrupt cannot be probed by autoprobing
* IRQ_NOREQUEST - Interrupt cannot be requested via
* request_irq()
* IRQ_NOTHREAD - Interrupt cannot be threaded
* IRQ_NOAUTOEN - Interrupt is not automatically enabled in
* request/setup_irq()
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
@ -85,6 +86,7 @@ enum {
IRQ_NO_BALANCING = (1 << 13),
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
};
#define IRQF_MODIFY_MASK \
@ -261,23 +263,6 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
* struct irq_chip - hardware interrupt chip descriptor
*
* @name: name for /proc/interrupts
* @startup: deprecated, replaced by irq_startup
* @shutdown: deprecated, replaced by irq_shutdown
* @enable: deprecated, replaced by irq_enable
* @disable: deprecated, replaced by irq_disable
* @ack: deprecated, replaced by irq_ack
* @mask: deprecated, replaced by irq_mask
* @mask_ack: deprecated, replaced by irq_mask_ack
* @unmask: deprecated, replaced by irq_unmask
* @eoi: deprecated, replaced by irq_eoi
* @end: deprecated, will go away with __do_IRQ()
* @set_affinity: deprecated, replaced by irq_set_affinity
* @retrigger: deprecated, replaced by irq_retrigger
* @set_type: deprecated, replaced by irq_set_type
* @set_wake: deprecated, replaced by irq_wake
* @bus_lock: deprecated, replaced by irq_bus_lock
* @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock
*
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
* @irq_enable: enable the interrupt (defaults to chip->unmask if NULL)
@ -295,6 +280,9 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
* @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
* @irq_suspend: function called from core code on suspend once per chip
* @irq_resume: function called from core code on resume once per chip
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_print_chip: optional to print special chip info in show_interrupts
* @flags: chip specific flags
*
@ -324,6 +312,10 @@ struct irq_chip {
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
unsigned long flags;
@ -439,7 +431,7 @@ irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
/*
* Set a highlevel chained flow handler for a given IRQ.
* (a chained handler is automatically enabled and set to
* IRQ_NOREQUEST and IRQ_NOPROBE)
* IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
*/
static inline void
irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
@ -469,6 +461,16 @@ static inline void irq_set_probe(unsigned int irq)
irq_modify_status(irq, IRQ_NOPROBE, 0);
}
static inline void irq_set_nothread(unsigned int irq)
{
irq_modify_status(irq, 0, IRQ_NOTHREAD);
}
static inline void irq_set_thread(unsigned int irq)
{
irq_modify_status(irq, IRQ_NOTHREAD, 0);
}
static inline void irq_set_nested_thread(unsigned int irq, bool nest)
{
if (nest)
@ -573,6 +575,145 @@ static inline int irq_reserve_irq(unsigned int irq)
return irq_reserve_irqs(irq, 1);
}
#ifndef irq_reg_writel
# define irq_reg_writel(val, addr) writel(val, addr)
#endif
#ifndef irq_reg_readl
# define irq_reg_readl(addr) readl(addr)
#endif
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
* @disable: Disable register offset to reg_base
* @mask: Mask register offset to reg_base
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
* @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
unsigned long disable;
unsigned long mask;
unsigned long ack;
unsigned long eoi;
unsigned long type;
unsigned long polarity;
};
/**
* struct irq_chip_type - Generic interrupt chip instance for a flow type
* @chip: The real interrupt chip which provides the callbacks
* @regs: Register offsets for this chip
* @handler: Flow handler associated with this chip
* @type: Chip can handle these flow types
*
* A irq_generic_chip can have several instances of irq_chip_type when
* it requires different functions and register offsets for different
* flow types.
*/
struct irq_chip_type {
struct irq_chip chip;
struct irq_chip_regs regs;
irq_flow_handler_t handler;
u32 type;
};
/**
* struct irq_chip_generic - Generic irq chip data structure
* @lock: Lock to protect register and cache data access
* @reg_base: Register base address (virtual)
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register
* @type_cache: Cached type register
* @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
* @private: Private data for non generic chip callbacks
* @list: List head for keeping track of instances
* @chip_types: Array of interrupt irq_chip_types
*
* Note, that irq_chip_generic can have multiple irq_chip_type
* implementations which can be associated to a particular irq line of
* an irq_chip_generic instance. That allows to share and protect
* state in an irq_chip_generic instance when we need to implement
* different flow mechanisms (level/edge) for it.
*/
struct irq_chip_generic {
raw_spinlock_t lock;
void __iomem *reg_base;
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
u32 type_cache;
u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
void *private;
struct list_head list;
struct irq_chip_type chip_types[0];
};
/**
* enum irq_gc_flags - Initialization flags for generic irq chips
* @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg
* @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for
* irq chips which need to call irq_set_wake() on
* the parent irq. Usually GPIO implementations
*/
enum irq_gc_flags {
IRQ_GC_INIT_MASK_CACHE = 1 << 0,
IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
};
/* Generic chip callback functions */
void irq_gc_noop(struct irq_data *d);
void irq_gc_mask_disable_reg(struct irq_data *d);
void irq_gc_mask_set_bit(struct irq_data *d);
void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
void irq_gc_ack(struct irq_data *d);
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler);
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
enum irq_gc_flags flags, unsigned int clr,
unsigned int set);
int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set);
static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
{
return container_of(d->chip, struct irq_chip_type, chip);
}
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
#ifdef CONFIG_SMP
static inline void irq_gc_lock(struct irq_chip_generic *gc)
{
raw_spin_lock(&gc->lock);
}
static inline void irq_gc_unlock(struct irq_chip_generic *gc)
{
raw_spin_unlock(&gc->lock);
}
#else
static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
#endif /* CONFIG_GENERIC_HARDIRQS */
#endif /* !CONFIG_S390 */

View file

@ -16,16 +16,18 @@ struct timer_rand_state;
* @irq_data: per irq and chip data passed down to chip functions
* @timer_rand_state: pointer to timer rand state struct
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
* @status: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_oneshot: bitfield to handle shared oneshot threads

View file

@ -1,5 +1,6 @@
obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
obj-y += generic-chip.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o

View file

@ -573,6 +573,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
if (handle != handle_bad_irq && is_chained) {
irq_settings_set_noprobe(desc);
irq_settings_set_norequest(desc);
irq_settings_set_nothread(desc);
irq_startup(desc);
}
out:

View file

@ -27,6 +27,7 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
P(IRQ_PER_CPU);
P(IRQ_NOPROBE);
P(IRQ_NOREQUEST);
P(IRQ_NOTHREAD);
P(IRQ_NOAUTOEN);
PS(IRQS_AUTODETECT);

354
kernel/irq/generic-chip.c Normal file
View file

@ -0,0 +1,354 @@
/*
* Library implementing the most common irq chip callback functions
*
* Copyright (C) 2011, Thomas Gleixner
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/syscore_ops.h>
#include "internals.h"
static LIST_HEAD(gc_list);
static DEFINE_RAW_SPINLOCK(gc_lock);
static inline struct irq_chip_regs *cur_regs(struct irq_data *d)
{
return &container_of(d->chip, struct irq_chip_type, chip)->regs;
}
/**
* irq_gc_noop - NOOP function
* @d: irq_data
*/
void irq_gc_noop(struct irq_data *d)
{
}
/**
* irq_gc_mask_disable_reg - Mask chip via disable register
* @d: irq_data
*
* Chip has separate enable/disable registers instead of a single mask
* register.
*/
void irq_gc_mask_disable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable);
gc->mask_cache &= ~mask;
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
gc->mask_cache |= mask;
irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
gc->mask_cache &= ~mask;
irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
irq_gc_unlock(gc);
}
/**
* irq_gc_unmask_enable_reg - Unmask chip via enable register
* @d: irq_data
*
* Chip has separate enable/disable registers instead of a single mask
* register.
*/
void irq_gc_unmask_enable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable);
gc->mask_cache |= mask;
irq_gc_unlock(gc);
}
/**
* irq_gc_ack - Ack pending interrupt
* @d: irq_data
*/
void irq_gc_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
* @d: irq_data
*/
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask);
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
irq_gc_unlock(gc);
}
/**
* irq_gc_eoi - EOI interrupt
* @d: irq_data
*/
void irq_gc_eoi(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi);
irq_gc_unlock(gc);
}
/**
* irq_gc_set_wake - Set/clr wake bit for an interrupt
* @d: irq_data
*
* For chips where the wake from suspend functionality is not
* configured in a separate register and the wakeup active state is
* just stored in a bitmask.
*/
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = 1 << (d->irq - gc->irq_base);
if (!(mask & gc->wake_enabled))
return -EINVAL;
irq_gc_lock(gc);
if (on)
gc->wake_active |= mask;
else
gc->wake_active &= ~mask;
irq_gc_unlock(gc);
return 0;
}
/**
* irq_alloc_generic_chip - Allocate a generic chip and initialize it
* @name: Name of the irq chip
* @num_ct: Number of irq_chip_type instances associated with this
* @irq_base: Interrupt base nr for this chip
* @reg_base: Register base address (virtual)
* @handler: Default flow handler associated with this chip
*
* Returns an initialized irq_chip_generic structure. The chip defaults
* to the primary (index 0) irq_chip_type and @handler
*/
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler)
{
struct irq_chip_generic *gc;
unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
gc = kzalloc(sz, GFP_KERNEL);
if (gc) {
raw_spin_lock_init(&gc->lock);
gc->num_ct = num_ct;
gc->irq_base = irq_base;
gc->reg_base = reg_base;
gc->chip_types->chip.name = name;
gc->chip_types->handler = handler;
}
return gc;
}
/*
* Separate lockdep class for interrupt chip which can nest irq_desc
* lock.
*/
static struct lock_class_key irq_nested_lock_class;
/**
* irq_setup_generic_chip - Setup a range of interrupts with a generic chip
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @flags: Flags for initialization
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Set up max. 32 interrupts starting from gc->irq_base. Note, this
* initializes all interrupts to the primary irq_chip_type and its
* associated handler.
*/
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
enum irq_gc_flags flags, unsigned int clr,
unsigned int set)
{
struct irq_chip_type *ct = gc->chip_types;
unsigned int i;
raw_spin_lock(&gc_lock);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock(&gc_lock);
/* Init mask cache ? */
if (flags & IRQ_GC_INIT_MASK_CACHE)
gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
for (i = gc->irq_base; msk; msk >>= 1, i++) {
if (!msk & 0x01)
continue;
if (flags & IRQ_GC_INIT_NESTED_LOCK)
irq_set_lockdep_class(i, &irq_nested_lock_class);
irq_set_chip_and_handler(i, &ct->chip, ct->handler);
irq_set_chip_data(i, gc);
irq_modify_status(i, clr, set);
}
gc->irq_cnt = i - gc->irq_base;
}
/**
* irq_setup_alt_chip - Switch to alternative chip
* @d: irq_data for this interrupt
* @type Flow type to be initialized
*
* Only to be called from chip->irq_set_type() callbacks.
*/
int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = gc->chip_types;
unsigned int i;
for (i = 0; i < gc->num_ct; i++, ct++) {
if (ct->type & type) {
d->chip = &ct->chip;
irq_data_to_desc(d)->handle_irq = ct->handler;
return 0;
}
}
return -EINVAL;
}
/**
* irq_remove_generic_chip - Remove a chip
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Remove up to 32 interrupts starting from gc->irq_base.
*/
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set)
{
unsigned int i = gc->irq_base;
raw_spin_lock(&gc_lock);
list_del(&gc->list);
raw_spin_unlock(&gc_lock);
for (; msk; msk >>= 1, i++) {
if (!msk & 0x01)
continue;
/* Remove handler first. That will mask the irq line */
irq_set_handler(i, NULL);
irq_set_chip(i, &no_irq_chip);
irq_set_chip_data(i, NULL);
irq_modify_status(i, clr, set);
}
}
#ifdef CONFIG_PM
static int irq_gc_suspend(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_suspend)
ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base));
}
return 0;
}
static void irq_gc_resume(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_resume)
ct->chip.irq_resume(irq_get_irq_data(gc->irq_base));
}
}
#else
#define irq_gc_suspend NULL
#define irq_gc_resume NULL
#endif
static void irq_gc_shutdown(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_pm_shutdown)
ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base));
}
}
static struct syscore_ops irq_gc_syscore_ops = {
.suspend = irq_gc_suspend,
.resume = irq_gc_resume,
.shutdown = irq_gc_shutdown,
};
static int __init irq_gc_init_ops(void)
{
register_syscore_ops(&irq_gc_syscore_ops);
return 0;
}
device_initcall(irq_gc_init_ops);

View file

@ -900,7 +900,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
new->handler = irq_nested_primary_handler;
} else {
irq_setup_forced_threading(new);
if (irq_settings_can_thread(desc))
irq_setup_forced_threading(new);
}
/*

View file

@ -8,6 +8,7 @@ enum {
_IRQ_LEVEL = IRQ_LEVEL,
_IRQ_NOPROBE = IRQ_NOPROBE,
_IRQ_NOREQUEST = IRQ_NOREQUEST,
_IRQ_NOTHREAD = IRQ_NOTHREAD,
_IRQ_NOAUTOEN = IRQ_NOAUTOEN,
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
@ -20,6 +21,7 @@ enum {
#define IRQ_LEVEL GOT_YOU_MORON
#define IRQ_NOPROBE GOT_YOU_MORON
#define IRQ_NOREQUEST GOT_YOU_MORON
#define IRQ_NOTHREAD GOT_YOU_MORON
#define IRQ_NOAUTOEN GOT_YOU_MORON
#define IRQ_NESTED_THREAD GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
@ -94,6 +96,21 @@ static inline void irq_settings_set_norequest(struct irq_desc *desc)
desc->status_use_accessors |= _IRQ_NOREQUEST;
}
static inline bool irq_settings_can_thread(struct irq_desc *desc)
{
return !(desc->status_use_accessors & _IRQ_NOTHREAD);
}
static inline void irq_settings_clr_nothread(struct irq_desc *desc)
{
desc->status_use_accessors &= ~_IRQ_NOTHREAD;
}
static inline void irq_settings_set_nothread(struct irq_desc *desc)
{
desc->status_use_accessors |= _IRQ_NOTHREAD;
}
static inline bool irq_settings_can_probe(struct irq_desc *desc)
{
return !(desc->status_use_accessors & _IRQ_NOPROBE);