Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (96 commits) apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets apic, x86: Check if EILVT APIC registers are available (AMD only) x86: ioapic: Call free_irte only if interrupt remapping enabled arm: Use ARCH_IRQ_INIT_FLAGS genirq, ARM: Fix boot on ARM platforms genirq: Fix CONFIG_GENIRQ_NO_DEPRECATED=y build x86: Switch sparse_irq allocations to GFP_KERNEL genirq: Switch sparse_irq allocator to GFP_KERNEL genirq: Make sparse_lock a mutex x86: lguest: Use new irq allocator genirq: Remove the now unused sparse irq leftovers genirq: Sanitize dynamic irq handling genirq: Remove arch_init_chip_data() x86: xen: Sanitise sparse_irq handling x86: Use sane enumeration x86: uv: Clean up the direct access to irq_desc x86: Make io_apic.c local functions static genirq: Remove irq_2_iommu x86: Speed up the irq_remapped check in hot pathes intr_remap: Simplify the code further ... Fix up trivial conflicts in arch/x86/Kconfig
This commit is contained in:
commit
4a60cfa945
83 changed files with 2177 additions and 2268 deletions
|
@ -28,7 +28,7 @@
|
|||
</authorgroup>
|
||||
|
||||
<copyright>
|
||||
<year>2005-2006</year>
|
||||
<year>2005-2010</year>
|
||||
<holder>Thomas Gleixner</holder>
|
||||
</copyright>
|
||||
<copyright>
|
||||
|
@ -100,6 +100,10 @@
|
|||
<listitem><para>Edge type</para></listitem>
|
||||
<listitem><para>Simple type</para></listitem>
|
||||
</itemizedlist>
|
||||
During the implementation we identified another type:
|
||||
<itemizedlist>
|
||||
<listitem><para>Fast EOI type</para></listitem>
|
||||
</itemizedlist>
|
||||
In the SMP world of the __do_IRQ() super-handler another type
|
||||
was identified:
|
||||
<itemizedlist>
|
||||
|
@ -153,6 +157,7 @@
|
|||
is still available. This leads to a kind of duality for the time
|
||||
being. Over time the new model should be used in more and more
|
||||
architectures, as it enables smaller and cleaner IRQ subsystems.
|
||||
It's deprecated for three years now and about to be removed.
|
||||
</para>
|
||||
</chapter>
|
||||
<chapter id="bugs">
|
||||
|
@ -217,6 +222,7 @@
|
|||
<itemizedlist>
|
||||
<listitem><para>handle_level_irq</para></listitem>
|
||||
<listitem><para>handle_edge_irq</para></listitem>
|
||||
<listitem><para>handle_fasteoi_irq</para></listitem>
|
||||
<listitem><para>handle_simple_irq</para></listitem>
|
||||
<listitem><para>handle_percpu_irq</para></listitem>
|
||||
</itemizedlist>
|
||||
|
@ -233,33 +239,33 @@
|
|||
are used by the default flow implementations.
|
||||
The following helper functions are implemented (simplified excerpt):
|
||||
<programlisting>
|
||||
default_enable(irq)
|
||||
default_enable(struct irq_data *data)
|
||||
{
|
||||
desc->chip->unmask(irq);
|
||||
desc->chip->irq_unmask(data);
|
||||
}
|
||||
|
||||
default_disable(irq)
|
||||
default_disable(struct irq_data *data)
|
||||
{
|
||||
if (!delay_disable(irq))
|
||||
desc->chip->mask(irq);
|
||||
if (!delay_disable(data))
|
||||
desc->chip->irq_mask(data);
|
||||
}
|
||||
|
||||
default_ack(irq)
|
||||
default_ack(struct irq_data *data)
|
||||
{
|
||||
chip->ack(irq);
|
||||
chip->irq_ack(data);
|
||||
}
|
||||
|
||||
default_mask_ack(irq)
|
||||
default_mask_ack(struct irq_data *data)
|
||||
{
|
||||
if (chip->mask_ack) {
|
||||
chip->mask_ack(irq);
|
||||
if (chip->irq_mask_ack) {
|
||||
chip->irq_mask_ack(data);
|
||||
} else {
|
||||
chip->mask(irq);
|
||||
chip->ack(irq);
|
||||
chip->irq_mask(data);
|
||||
chip->irq_ack(data);
|
||||
}
|
||||
}
|
||||
|
||||
noop(irq)
|
||||
noop(struct irq_data *data))
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -278,12 +284,27 @@ noop(irq)
|
|||
<para>
|
||||
The following control flow is implemented (simplified excerpt):
|
||||
<programlisting>
|
||||
desc->chip->start();
|
||||
desc->chip->irq_mask();
|
||||
handle_IRQ_event(desc->action);
|
||||
desc->chip->end();
|
||||
desc->chip->irq_unmask();
|
||||
</programlisting>
|
||||
</para>
|
||||
</sect3>
|
||||
</sect3>
|
||||
<sect3 id="Default_FASTEOI_IRQ_flow_handler">
|
||||
<title>Default Fast EOI IRQ flow handler</title>
|
||||
<para>
|
||||
handle_fasteoi_irq provides a generic implementation
|
||||
for interrupts, which only need an EOI at the end of
|
||||
the handler
|
||||
</para>
|
||||
<para>
|
||||
The following control flow is implemented (simplified excerpt):
|
||||
<programlisting>
|
||||
handle_IRQ_event(desc->action);
|
||||
desc->chip->irq_eoi();
|
||||
</programlisting>
|
||||
</para>
|
||||
</sect3>
|
||||
<sect3 id="Default_Edge_IRQ_flow_handler">
|
||||
<title>Default Edge IRQ flow handler</title>
|
||||
<para>
|
||||
|
@ -294,20 +315,19 @@ desc->chip->end();
|
|||
The following control flow is implemented (simplified excerpt):
|
||||
<programlisting>
|
||||
if (desc->status & running) {
|
||||
desc->chip->hold();
|
||||
desc->chip->irq_mask();
|
||||
desc->status |= pending | masked;
|
||||
return;
|
||||
}
|
||||
desc->chip->start();
|
||||
desc->chip->irq_ack();
|
||||
desc->status |= running;
|
||||
do {
|
||||
if (desc->status & masked)
|
||||
desc->chip->enable();
|
||||
desc->chip->irq_unmask();
|
||||
desc->status &= ~pending;
|
||||
handle_IRQ_event(desc->action);
|
||||
} while (status & pending);
|
||||
desc->status &= ~running;
|
||||
desc->chip->end();
|
||||
</programlisting>
|
||||
</para>
|
||||
</sect3>
|
||||
|
@ -342,9 +362,9 @@ handle_IRQ_event(desc->action);
|
|||
<para>
|
||||
The following control flow is implemented (simplified excerpt):
|
||||
<programlisting>
|
||||
desc->chip->start();
|
||||
handle_IRQ_event(desc->action);
|
||||
desc->chip->end();
|
||||
if (desc->chip->irq_eoi)
|
||||
desc->chip->irq_eoi();
|
||||
</programlisting>
|
||||
</para>
|
||||
</sect3>
|
||||
|
@ -375,8 +395,7 @@ desc->chip->end();
|
|||
mechanism. (It's necessary to enable CONFIG_HARDIRQS_SW_RESEND when
|
||||
you want to use the delayed interrupt disable feature and your
|
||||
hardware is not capable of retriggering an interrupt.)
|
||||
The delayed interrupt disable can be runtime enabled, per interrupt,
|
||||
by setting the IRQ_DELAYED_DISABLE flag in the irq_desc status field.
|
||||
The delayed interrupt disable is not configurable.
|
||||
</para>
|
||||
</sect2>
|
||||
</sect1>
|
||||
|
@ -387,13 +406,13 @@ desc->chip->end();
|
|||
contains all the direct chip relevant functions, which
|
||||
can be utilized by the irq flow implementations.
|
||||
<itemizedlist>
|
||||
<listitem><para>ack()</para></listitem>
|
||||
<listitem><para>mask_ack() - Optional, recommended for performance</para></listitem>
|
||||
<listitem><para>mask()</para></listitem>
|
||||
<listitem><para>unmask()</para></listitem>
|
||||
<listitem><para>retrigger() - Optional</para></listitem>
|
||||
<listitem><para>set_type() - Optional</para></listitem>
|
||||
<listitem><para>set_wake() - Optional</para></listitem>
|
||||
<listitem><para>irq_ack()</para></listitem>
|
||||
<listitem><para>irq_mask_ack() - Optional, recommended for performance</para></listitem>
|
||||
<listitem><para>irq_mask()</para></listitem>
|
||||
<listitem><para>irq_unmask()</para></listitem>
|
||||
<listitem><para>irq_retrigger() - Optional</para></listitem>
|
||||
<listitem><para>irq_set_type() - Optional</para></listitem>
|
||||
<listitem><para>irq_set_wake() - Optional</para></listitem>
|
||||
</itemizedlist>
|
||||
These primitives are strictly intended to mean what they say: ack means
|
||||
ACK, masking means masking of an IRQ line, etc. It is up to the flow
|
||||
|
@ -458,6 +477,7 @@ desc->chip->end();
|
|||
<para>
|
||||
This chapter contains the autogenerated documentation of the internal functions.
|
||||
</para>
|
||||
!Ikernel/irq/irqdesc.c
|
||||
!Ikernel/irq/handle.c
|
||||
!Ikernel/irq/chip.c
|
||||
</chapter>
|
||||
|
|
|
@ -3241,6 +3241,12 @@ F: drivers/net/irda/
|
|||
F: include/net/irda/
|
||||
F: net/irda/
|
||||
|
||||
IRQ SUBSYSTEM
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
|
||||
F: kernel/irq/
|
||||
|
||||
ISAPNP
|
||||
M: Jaroslav Kysela <perex@perex.cz>
|
||||
S: Maintained
|
||||
|
|
|
@ -24,4 +24,6 @@ void set_irq_flags(unsigned int irq, unsigned int flags);
|
|||
#define IRQF_PROBE (1 << 1)
|
||||
#define IRQF_NOAUTOEN (1 << 2)
|
||||
|
||||
#define ARCH_IRQ_INIT_FLAGS (IRQ_NOREQUEST | IRQ_NOPROBE)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -154,14 +154,6 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
|
|||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int irq;
|
||||
|
||||
for (irq = 0; irq < nr_irqs; irq++) {
|
||||
desc = irq_to_desc_alloc_node(irq, 0);
|
||||
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
}
|
||||
|
||||
init_arch_irq();
|
||||
}
|
||||
|
||||
|
@ -169,7 +161,7 @@ void __init init_IRQ(void)
|
|||
int __init arch_probe_nr_irqs(void)
|
||||
{
|
||||
nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
|
||||
return 0;
|
||||
return nr_irqs;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -67,21 +67,21 @@ static void bcmring_unmask_irq2(unsigned int irq)
|
|||
}
|
||||
|
||||
static struct irq_chip bcmring_irq0_chip = {
|
||||
.typename = "ARM-INTC0",
|
||||
.name = "ARM-INTC0",
|
||||
.ack = bcmring_mask_irq0,
|
||||
.mask = bcmring_mask_irq0, /* mask a specific interrupt, blocking its delivery. */
|
||||
.unmask = bcmring_unmask_irq0, /* unmaks an interrupt */
|
||||
};
|
||||
|
||||
static struct irq_chip bcmring_irq1_chip = {
|
||||
.typename = "ARM-INTC1",
|
||||
.name = "ARM-INTC1",
|
||||
.ack = bcmring_mask_irq1,
|
||||
.mask = bcmring_mask_irq1,
|
||||
.unmask = bcmring_unmask_irq1,
|
||||
};
|
||||
|
||||
static struct irq_chip bcmring_irq2_chip = {
|
||||
.typename = "ARM-SINTC",
|
||||
.name = "ARM-SINTC",
|
||||
.ack = bcmring_mask_irq2,
|
||||
.mask = bcmring_mask_irq2,
|
||||
.unmask = bcmring_unmask_irq2,
|
||||
|
|
|
@ -164,10 +164,10 @@ static void iop13xx_msi_nop(unsigned int irq)
|
|||
static struct irq_chip iop13xx_msi_chip = {
|
||||
.name = "PCI-MSI",
|
||||
.ack = iop13xx_msi_nop,
|
||||
.enable = unmask_msi_irq,
|
||||
.disable = mask_msi_irq,
|
||||
.mask = mask_msi_irq,
|
||||
.unmask = unmask_msi_irq,
|
||||
.irq_enable = unmask_msi_irq,
|
||||
.irq_disable = mask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
};
|
||||
|
||||
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
||||
|
|
|
@ -104,8 +104,8 @@ static int ia64_msi_retrigger_irq(unsigned int irq)
|
|||
*/
|
||||
static struct irq_chip ia64_msi_chip = {
|
||||
.name = "PCI-MSI",
|
||||
.mask = mask_msi_irq,
|
||||
.unmask = unmask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
.ack = ia64_ack_msi_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = ia64_set_msi_irq_affinity,
|
||||
|
@ -160,8 +160,8 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
|||
|
||||
static struct irq_chip dmar_msi_type = {
|
||||
.name = "DMAR_MSI",
|
||||
.unmask = dmar_msi_unmask,
|
||||
.mask = dmar_msi_mask,
|
||||
.irq_unmask = dmar_msi_unmask,
|
||||
.irq_mask = dmar_msi_mask,
|
||||
.ack = ia64_ack_msi_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = dmar_msi_set_affinity,
|
||||
|
|
|
@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip sn_msi_chip = {
|
||||
.name = "PCI-MSI",
|
||||
.mask = mask_msi_irq,
|
||||
.unmask = unmask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
.ack = sn_ack_msi_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = sn_set_msi_irq_affinity,
|
||||
|
|
|
@ -51,7 +51,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action=action->next; action; action = action->next)
|
||||
|
|
|
@ -65,7 +65,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip m32104ut_irq_type =
|
||||
{
|
||||
.typename = "M32104UT-IRQ",
|
||||
.name = "M32104UT-IRQ",
|
||||
.startup = startup_m32104ut_irq,
|
||||
.shutdown = shutdown_m32104ut_irq,
|
||||
.enable = enable_m32104ut_irq,
|
||||
|
|
|
@ -71,7 +71,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip m32700ut_irq_type =
|
||||
{
|
||||
.typename = "M32700UT-IRQ",
|
||||
.name = "M32700UT-IRQ",
|
||||
.startup = startup_m32700ut_irq,
|
||||
.shutdown = shutdown_m32700ut_irq,
|
||||
.enable = enable_m32700ut_irq,
|
||||
|
@ -148,7 +148,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip m32700ut_pld_irq_type =
|
||||
{
|
||||
.typename = "M32700UT-PLD-IRQ",
|
||||
.name = "M32700UT-PLD-IRQ",
|
||||
.startup = startup_m32700ut_pld_irq,
|
||||
.shutdown = shutdown_m32700ut_pld_irq,
|
||||
.enable = enable_m32700ut_pld_irq,
|
||||
|
@ -217,7 +217,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip m32700ut_lanpld_irq_type =
|
||||
{
|
||||
.typename = "M32700UT-PLD-LAN-IRQ",
|
||||
.name = "M32700UT-PLD-LAN-IRQ",
|
||||
.startup = startup_m32700ut_lanpld_irq,
|
||||
.shutdown = shutdown_m32700ut_lanpld_irq,
|
||||
.enable = enable_m32700ut_lanpld_irq,
|
||||
|
@ -286,7 +286,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip m32700ut_lcdpld_irq_type =
|
||||
{
|
||||
.typename = "M32700UT-PLD-LCD-IRQ",
|
||||
.name = "M32700UT-PLD-LCD-IRQ",
|
||||
.startup = startup_m32700ut_lcdpld_irq,
|
||||
.shutdown = shutdown_m32700ut_lcdpld_irq,
|
||||
.enable = enable_m32700ut_lcdpld_irq,
|
||||
|
|
|
@ -65,7 +65,7 @@ static void shutdown_mappi_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip mappi_irq_type =
|
||||
{
|
||||
.typename = "MAPPI-IRQ",
|
||||
.name = "MAPPI-IRQ",
|
||||
.startup = startup_mappi_irq,
|
||||
.shutdown = shutdown_mappi_irq,
|
||||
.enable = enable_mappi_irq,
|
||||
|
|
|
@ -72,7 +72,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip mappi2_irq_type =
|
||||
{
|
||||
.typename = "MAPPI2-IRQ",
|
||||
.name = "MAPPI2-IRQ",
|
||||
.startup = startup_mappi2_irq,
|
||||
.shutdown = shutdown_mappi2_irq,
|
||||
.enable = enable_mappi2_irq,
|
||||
|
|
|
@ -72,7 +72,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip mappi3_irq_type =
|
||||
{
|
||||
.typename = "MAPPI3-IRQ",
|
||||
.name = "MAPPI3-IRQ",
|
||||
.startup = startup_mappi3_irq,
|
||||
.shutdown = shutdown_mappi3_irq,
|
||||
.enable = enable_mappi3_irq,
|
||||
|
|
|
@ -63,7 +63,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip oaks32r_irq_type =
|
||||
{
|
||||
.typename = "OAKS32R-IRQ",
|
||||
.name = "OAKS32R-IRQ",
|
||||
.startup = startup_oaks32r_irq,
|
||||
.shutdown = shutdown_oaks32r_irq,
|
||||
.enable = enable_oaks32r_irq,
|
||||
|
|
|
@ -72,7 +72,7 @@ static void shutdown_opsput_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip opsput_irq_type =
|
||||
{
|
||||
.typename = "OPSPUT-IRQ",
|
||||
.name = "OPSPUT-IRQ",
|
||||
.startup = startup_opsput_irq,
|
||||
.shutdown = shutdown_opsput_irq,
|
||||
.enable = enable_opsput_irq,
|
||||
|
@ -149,7 +149,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip opsput_pld_irq_type =
|
||||
{
|
||||
.typename = "OPSPUT-PLD-IRQ",
|
||||
.name = "OPSPUT-PLD-IRQ",
|
||||
.startup = startup_opsput_pld_irq,
|
||||
.shutdown = shutdown_opsput_pld_irq,
|
||||
.enable = enable_opsput_pld_irq,
|
||||
|
@ -218,7 +218,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip opsput_lanpld_irq_type =
|
||||
{
|
||||
.typename = "OPSPUT-PLD-LAN-IRQ",
|
||||
.name = "OPSPUT-PLD-LAN-IRQ",
|
||||
.startup = startup_opsput_lanpld_irq,
|
||||
.shutdown = shutdown_opsput_lanpld_irq,
|
||||
.enable = enable_opsput_lanpld_irq,
|
||||
|
|
|
@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip mappi_irq_type =
|
||||
{
|
||||
.typename = "M32700-IRQ",
|
||||
.name = "M32700-IRQ",
|
||||
.startup = startup_mappi_irq,
|
||||
.shutdown = shutdown_mappi_irq,
|
||||
.enable = enable_mappi_irq,
|
||||
|
@ -136,7 +136,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
|
|||
|
||||
static struct irq_chip m32700ut_pld_irq_type =
|
||||
{
|
||||
.typename = "USRV-PLD-IRQ",
|
||||
.name = "USRV-PLD-IRQ",
|
||||
.startup = startup_m32700ut_pld_irq,
|
||||
.shutdown = shutdown_m32700ut_pld_irq,
|
||||
.enable = enable_m32700ut_pld_irq,
|
||||
|
|
|
@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
|
|||
}
|
||||
|
||||
static struct irq_chip msic_irq_chip = {
|
||||
.mask = mask_msi_irq,
|
||||
.unmask = unmask_msi_irq,
|
||||
.shutdown = unmask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
.irq_shutdown = mask_msi_irq,
|
||||
.name = "AXON-MSI",
|
||||
};
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq)
|
|||
* at that level, so we do it here by hand.
|
||||
*/
|
||||
if (irq_to_desc(virq)->msi_desc)
|
||||
unmask_msi_irq(virq);
|
||||
unmask_msi_irq(irq_get_irq_data(virq));
|
||||
|
||||
/* unmask it */
|
||||
xics_unmask_irq(virq);
|
||||
|
|
|
@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq)
|
|||
}
|
||||
|
||||
static struct irq_chip fsl_msi_chip = {
|
||||
.mask = mask_msi_irq,
|
||||
.unmask = unmask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
.ack = fsl_msi_end_irq,
|
||||
.name = "FSL-MSI",
|
||||
};
|
||||
|
|
|
@ -39,24 +39,24 @@
|
|||
static struct mpic *msi_mpic;
|
||||
|
||||
|
||||
static void mpic_pasemi_msi_mask_irq(unsigned int irq)
|
||||
static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
|
||||
{
|
||||
pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq);
|
||||
mask_msi_irq(irq);
|
||||
mpic_mask_irq(irq);
|
||||
pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
|
||||
mask_msi_irq(data);
|
||||
mpic_mask_irq(data->irq);
|
||||
}
|
||||
|
||||
static void mpic_pasemi_msi_unmask_irq(unsigned int irq)
|
||||
static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
|
||||
{
|
||||
pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq);
|
||||
mpic_unmask_irq(irq);
|
||||
unmask_msi_irq(irq);
|
||||
pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
|
||||
mpic_unmask_irq(data->irq);
|
||||
unmask_msi_irq(data);
|
||||
}
|
||||
|
||||
static struct irq_chip mpic_pasemi_msi_chip = {
|
||||
.shutdown = mpic_pasemi_msi_mask_irq,
|
||||
.mask = mpic_pasemi_msi_mask_irq,
|
||||
.unmask = mpic_pasemi_msi_unmask_irq,
|
||||
.irq_shutdown = mpic_pasemi_msi_mask_irq,
|
||||
.irq_mask = mpic_pasemi_msi_mask_irq,
|
||||
.irq_unmask = mpic_pasemi_msi_unmask_irq,
|
||||
.eoi = mpic_end_irq,
|
||||
.set_type = mpic_set_irq_type,
|
||||
.set_affinity = mpic_set_affinity,
|
||||
|
|
|
@ -23,22 +23,22 @@
|
|||
/* A bit ugly, can we get this from the pci_dev somehow? */
|
||||
static struct mpic *msi_mpic;
|
||||
|
||||
static void mpic_u3msi_mask_irq(unsigned int irq)
|
||||
static void mpic_u3msi_mask_irq(struct irq_data *data)
|
||||
{
|
||||
mask_msi_irq(irq);
|
||||
mpic_mask_irq(irq);
|
||||
mask_msi_irq(data);
|
||||
mpic_mask_irq(data->irq);
|
||||
}
|
||||
|
||||
static void mpic_u3msi_unmask_irq(unsigned int irq)
|
||||
static void mpic_u3msi_unmask_irq(struct irq_data *data)
|
||||
{
|
||||
mpic_unmask_irq(irq);
|
||||
unmask_msi_irq(irq);
|
||||
mpic_unmask_irq(data->irq);
|
||||
unmask_msi_irq(data);
|
||||
}
|
||||
|
||||
static struct irq_chip mpic_u3msi_chip = {
|
||||
.shutdown = mpic_u3msi_mask_irq,
|
||||
.mask = mpic_u3msi_mask_irq,
|
||||
.unmask = mpic_u3msi_unmask_irq,
|
||||
.irq_shutdown = mpic_u3msi_mask_irq,
|
||||
.irq_mask = mpic_u3msi_mask_irq,
|
||||
.irq_unmask = mpic_u3msi_unmask_irq,
|
||||
.eoi = mpic_end_irq,
|
||||
.set_type = mpic_set_irq_type,
|
||||
.set_affinity = mpic_set_affinity,
|
||||
|
|
|
@ -290,7 +290,7 @@ void __init init_IRQ(void)
|
|||
int __init arch_probe_nr_irqs(void)
|
||||
{
|
||||
nr_irqs = sh_mv.mv_nr_irqs;
|
||||
return 0;
|
||||
return NR_IRQS_LEGACY;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -114,10 +114,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num)
|
|||
|
||||
static struct irq_chip msi_irq = {
|
||||
.name = "PCI-MSI",
|
||||
.mask = mask_msi_irq,
|
||||
.unmask = unmask_msi_irq,
|
||||
.enable = unmask_msi_irq,
|
||||
.disable = mask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
.irq_enable = unmask_msi_irq,
|
||||
.irq_disable = mask_msi_irq,
|
||||
/* XXX affinity XXX */
|
||||
};
|
||||
|
||||
|
|
|
@ -208,7 +208,7 @@ static void tile_irq_chip_eoi(unsigned int irq)
|
|||
}
|
||||
|
||||
static struct irq_chip tile_irq_chip = {
|
||||
.typename = "tile_irq_chip",
|
||||
.name = "tile_irq_chip",
|
||||
.ack = tile_irq_chip_ack,
|
||||
.eoi = tile_irq_chip_eoi,
|
||||
.mask = tile_irq_chip_mask,
|
||||
|
@ -288,7 +288,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action = action->next; action; action = action->next)
|
||||
|
|
|
@ -46,7 +46,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action=action->next; action; action = action->next)
|
||||
|
@ -369,7 +369,7 @@ static void dummy(unsigned int irq)
|
|||
|
||||
/* This is used for everything else than the timer. */
|
||||
static struct irq_chip normal_irq_type = {
|
||||
.typename = "SIGIO",
|
||||
.name = "SIGIO",
|
||||
.release = free_irq_by_irq_and_dev,
|
||||
.disable = dummy,
|
||||
.enable = dummy,
|
||||
|
@ -378,7 +378,7 @@ static struct irq_chip normal_irq_type = {
|
|||
};
|
||||
|
||||
static struct irq_chip SIGVTALRM_irq_type = {
|
||||
.typename = "SIGVTALRM",
|
||||
.name = "SIGVTALRM",
|
||||
.release = free_irq_by_irq_and_dev,
|
||||
.shutdown = dummy, /* never called */
|
||||
.disable = dummy,
|
||||
|
|
|
@ -63,6 +63,10 @@ config X86
|
|||
select HAVE_USER_RETURN_NOTIFIER
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_TEXT_POKE_SMP
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_SPARSE_IRQ
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_PENDING_IRQ if SMP
|
||||
|
||||
config INSTRUCTION_DECODER
|
||||
def_bool (KPROBES || PERF_EVENTS)
|
||||
|
@ -204,20 +208,6 @@ config HAVE_INTEL_TXT
|
|||
def_bool y
|
||||
depends on EXPERIMENTAL && DMAR && ACPI
|
||||
|
||||
# Use the generic interrupt handling code in kernel/irq/:
|
||||
config GENERIC_HARDIRQS
|
||||
def_bool y
|
||||
|
||||
config GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
def_bool y
|
||||
|
||||
config GENERIC_IRQ_PROBE
|
||||
def_bool y
|
||||
|
||||
config GENERIC_PENDING_IRQ
|
||||
def_bool y
|
||||
depends on GENERIC_HARDIRQS && SMP
|
||||
|
||||
config USE_GENERIC_SMP_HELPERS
|
||||
def_bool y
|
||||
depends on SMP
|
||||
|
@ -300,23 +290,6 @@ config X86_X2APIC
|
|||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config SPARSE_IRQ
|
||||
bool "Support sparse irq numbering"
|
||||
depends on PCI_MSI || HT_IRQ
|
||||
---help---
|
||||
This enables support for sparse irqs. This is useful for distro
|
||||
kernels that want to define a high CONFIG_NR_CPUS value but still
|
||||
want to have low kernel memory footprint on smaller machines.
|
||||
|
||||
( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
|
||||
out the irq_desc[] array in a more NUMA-friendly way. )
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config NUMA_IRQ_DESC
|
||||
def_bool y
|
||||
depends on SPARSE_IRQ && NUMA
|
||||
|
||||
config X86_MPPARSE
|
||||
bool "Enable MPS table" if ACPI
|
||||
default y
|
||||
|
|
|
@ -252,9 +252,7 @@ static inline int apic_is_clustered_box(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
|
||||
extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
|
||||
|
||||
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
|
||||
|
||||
#else /* !CONFIG_X86_LOCAL_APIC */
|
||||
static inline void lapic_shutdown(void) { }
|
||||
|
|
|
@ -131,6 +131,7 @@
|
|||
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
|
||||
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
|
||||
#define APIC_EILVT_NR_AMD_10H 4
|
||||
#define APIC_EILVT_NR_MAX APIC_EILVT_NR_AMD_10H
|
||||
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
|
||||
#define APIC_EILVT_MSG_FIX 0x0
|
||||
#define APIC_EILVT_MSG_SMI 0x2
|
||||
|
|
|
@ -74,10 +74,12 @@ extern void hpet_disable(void);
|
|||
extern unsigned int hpet_readl(unsigned int a);
|
||||
extern void force_hpet_resume(void);
|
||||
|
||||
extern void hpet_msi_unmask(unsigned int irq);
|
||||
extern void hpet_msi_mask(unsigned int irq);
|
||||
extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
|
||||
extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
|
||||
struct irq_data;
|
||||
extern void hpet_msi_unmask(struct irq_data *data);
|
||||
extern void hpet_msi_mask(struct irq_data *data);
|
||||
struct hpet_dev;
|
||||
extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
|
||||
extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
|
||||
|
|
|
@ -78,6 +78,13 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
|
|||
irq_attr->polarity = polarity;
|
||||
}
|
||||
|
||||
struct irq_2_iommu {
|
||||
struct intel_iommu *iommu;
|
||||
u16 irte_index;
|
||||
u16 sub_handle;
|
||||
u8 irte_mask;
|
||||
};
|
||||
|
||||
/*
|
||||
* This is performance-critical, we want to do it O(1)
|
||||
*
|
||||
|
@ -89,15 +96,17 @@ struct irq_cfg {
|
|||
cpumask_var_t old_domain;
|
||||
u8 vector;
|
||||
u8 move_in_progress : 1;
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
struct irq_2_iommu irq_2_iommu;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct irq_cfg *irq_cfg(unsigned int);
|
||||
extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
|
||||
extern void send_cleanup_vector(struct irq_cfg *);
|
||||
|
||||
struct irq_desc;
|
||||
extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *,
|
||||
unsigned int *dest_id);
|
||||
struct irq_data;
|
||||
int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
|
||||
unsigned int *dest_id);
|
||||
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
|
||||
extern void setup_ioapic_dest(void);
|
||||
|
||||
|
|
|
@ -55,6 +55,8 @@ extern struct irq_chip i8259A_chip;
|
|||
struct legacy_pic {
|
||||
int nr_legacy_irqs;
|
||||
struct irq_chip *chip;
|
||||
void (*mask)(unsigned int irq);
|
||||
void (*unmask)(unsigned int irq);
|
||||
void (*mask_all)(void);
|
||||
void (*restore_mask)(void);
|
||||
void (*init)(int auto_eoi);
|
||||
|
|
|
@ -170,12 +170,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
|||
|
||||
extern void probe_nr_irqs_gsi(void);
|
||||
|
||||
extern int setup_ioapic_entry(int apic, int irq,
|
||||
struct IO_APIC_route_entry *entry,
|
||||
unsigned int destination, int trigger,
|
||||
int polarity, int vector, int pin);
|
||||
extern void ioapic_write_entry(int apic, int pin,
|
||||
struct IO_APIC_route_entry e);
|
||||
extern void setup_ioapic_ids_from_mpc(void);
|
||||
|
||||
struct mp_ioapic_gsi{
|
||||
|
|
|
@ -24,10 +24,18 @@ static inline void prepare_irte(struct irte *irte, int vector,
|
|||
irte->dest_id = IRTE_DEST(dest);
|
||||
irte->redir_hint = 1;
|
||||
}
|
||||
static inline bool irq_remapped(struct irq_cfg *cfg)
|
||||
{
|
||||
return cfg->irq_2_iommu.iommu != NULL;
|
||||
}
|
||||
#else
|
||||
static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
|
||||
{
|
||||
}
|
||||
static inline bool irq_remapped(struct irq_cfg *cfg)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IRQ_REMAPPING_H */
|
||||
|
|
|
@ -231,34 +231,6 @@ static void apbt_restart_clocksource(struct clocksource *cs)
|
|||
apbt_start_counter(phy_cs_timer_id);
|
||||
}
|
||||
|
||||
/* Setup IRQ routing via IOAPIC */
|
||||
#ifdef CONFIG_SMP
|
||||
static void apbt_setup_irq(struct apbt_dev *adev)
|
||||
{
|
||||
struct irq_chip *chip;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/* timer0 irq has been setup early */
|
||||
if (adev->irq == 0)
|
||||
return;
|
||||
desc = irq_to_desc(adev->irq);
|
||||
chip = get_irq_chip(adev->irq);
|
||||
disable_irq(adev->irq);
|
||||
desc->status |= IRQ_MOVE_PCNTXT;
|
||||
irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
|
||||
/* APB timer irqs are set up as mp_irqs, timer is edge triggerred */
|
||||
set_irq_chip_and_handler_name(adev->irq, chip, handle_edge_irq, "edge");
|
||||
enable_irq(adev->irq);
|
||||
if (system_state == SYSTEM_BOOTING)
|
||||
if (request_irq(adev->irq, apbt_interrupt_handler,
|
||||
IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
|
||||
adev->name, adev)) {
|
||||
printk(KERN_ERR "Failed request IRQ for APBT%d\n",
|
||||
adev->num);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void apbt_enable_int(int n)
|
||||
{
|
||||
unsigned long ctrl = apbt_readl(n, APBTMR_N_CONTROL);
|
||||
|
@ -334,6 +306,27 @@ static int __init apbt_clockevent_register(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void apbt_setup_irq(struct apbt_dev *adev)
|
||||
{
|
||||
/* timer0 irq has been setup early */
|
||||
if (adev->irq == 0)
|
||||
return;
|
||||
|
||||
if (system_state == SYSTEM_BOOTING) {
|
||||
irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
|
||||
/* APB timer irqs are set up as mp_irqs, timer is edge type */
|
||||
__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
|
||||
if (request_irq(adev->irq, apbt_interrupt_handler,
|
||||
IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
|
||||
adev->name, adev)) {
|
||||
printk(KERN_ERR "Failed request IRQ for APBT%d\n",
|
||||
adev->num);
|
||||
}
|
||||
} else
|
||||
enable_irq(adev->irq);
|
||||
}
|
||||
|
||||
/* Should be called with per cpu */
|
||||
void apbt_setup_secondary_clock(void)
|
||||
{
|
||||
|
@ -389,10 +382,11 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
|
|||
|
||||
switch (action & 0xf) {
|
||||
case CPU_DEAD:
|
||||
disable_irq(adev->irq);
|
||||
apbt_disable_int(cpu);
|
||||
if (system_state == SYSTEM_RUNNING)
|
||||
if (system_state == SYSTEM_RUNNING) {
|
||||
pr_debug("skipping APBT CPU %lu offline\n", cpu);
|
||||
else if (adev) {
|
||||
} else if (adev) {
|
||||
pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
|
||||
free_irq(adev->irq, adev);
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include <asm/mce.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/tsc.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
unsigned int num_processors;
|
||||
|
||||
|
@ -370,38 +371,87 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
|
|||
}
|
||||
|
||||
/*
|
||||
* Setup extended LVT, AMD specific (K8, family 10h)
|
||||
* Setup extended LVT, AMD specific
|
||||
*
|
||||
* Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
|
||||
* MCE interrupts are supported. Thus MCE offset must be set to 0.
|
||||
* Software should use the LVT offsets the BIOS provides. The offsets
|
||||
* are determined by the subsystems using it like those for MCE
|
||||
* threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
|
||||
* are supported. Beginning with family 10h at least 4 offsets are
|
||||
* available.
|
||||
*
|
||||
* Since the offsets must be consistent for all cores, we keep track
|
||||
* of the LVT offsets in software and reserve the offset for the same
|
||||
* vector also to be used on other cores. An offset is freed by
|
||||
* setting the entry to APIC_EILVT_MASKED.
|
||||
*
|
||||
* If the BIOS is right, there should be no conflicts. Otherwise a
|
||||
* "[Firmware Bug]: ..." error message is generated. However, if
|
||||
* software does not properly determines the offsets, it is not
|
||||
* necessarily a BIOS bug.
|
||||
*/
|
||||
|
||||
static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
|
||||
|
||||
static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
|
||||
{
|
||||
return (old & APIC_EILVT_MASKED)
|
||||
|| (new == APIC_EILVT_MASKED)
|
||||
|| ((new & ~APIC_EILVT_MASKED) == old);
|
||||
}
|
||||
|
||||
static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
|
||||
{
|
||||
unsigned int rsvd; /* 0: uninitialized */
|
||||
|
||||
if (offset >= APIC_EILVT_NR_MAX)
|
||||
return ~0;
|
||||
|
||||
rsvd = atomic_read(&eilvt_offsets[offset]) & ~APIC_EILVT_MASKED;
|
||||
do {
|
||||
if (rsvd &&
|
||||
!eilvt_entry_is_changeable(rsvd, new))
|
||||
/* may not change if vectors are different */
|
||||
return rsvd;
|
||||
rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
|
||||
} while (rsvd != new);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* If mask=1, the LVT entry does not generate interrupts while mask=0
|
||||
* enables the vector. See also the BKDGs.
|
||||
*/
|
||||
|
||||
#define APIC_EILVT_LVTOFF_MCE 0
|
||||
#define APIC_EILVT_LVTOFF_IBS 1
|
||||
|
||||
static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
|
||||
int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
|
||||
{
|
||||
unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0);
|
||||
unsigned int v = (mask << 16) | (msg_type << 8) | vector;
|
||||
unsigned long reg = APIC_EILVTn(offset);
|
||||
unsigned int new, old, reserved;
|
||||
|
||||
apic_write(reg, v);
|
||||
}
|
||||
new = (mask << 16) | (msg_type << 8) | vector;
|
||||
old = apic_read(reg);
|
||||
reserved = reserve_eilvt_offset(offset, new);
|
||||
|
||||
u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
|
||||
{
|
||||
setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
|
||||
return APIC_EILVT_LVTOFF_MCE;
|
||||
}
|
||||
if (reserved != new) {
|
||||
pr_err(FW_BUG "cpu %d, try to setup vector 0x%x, but "
|
||||
"vector 0x%x was already reserved by another core, "
|
||||
"APIC%lX=0x%x\n",
|
||||
smp_processor_id(), new, reserved, reg, old);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
|
||||
{
|
||||
setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
|
||||
return APIC_EILVT_LVTOFF_IBS;
|
||||
if (!eilvt_entry_is_changeable(old, new)) {
|
||||
pr_err(FW_BUG "cpu %d, try to setup vector 0x%x but "
|
||||
"register already in use, APIC%lX=0x%x\n",
|
||||
smp_processor_id(), new, reg, old);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
apic_write(reg, new);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
|
||||
EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
|
||||
|
||||
/*
|
||||
* Program the next event, relative to now
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -178,7 +178,7 @@ int __init check_nmi_watchdog(void)
|
|||
error:
|
||||
if (nmi_watchdog == NMI_IO_APIC) {
|
||||
if (!timer_through_8259)
|
||||
legacy_pic->chip->mask(0);
|
||||
legacy_pic->mask(0);
|
||||
on_each_cpu(__acpi_nmi_disable, NULL, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -131,7 +131,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
|
|||
u32 low = 0, high = 0, address = 0;
|
||||
unsigned int bank, block;
|
||||
struct thresh_restart tr;
|
||||
u8 lvt_off;
|
||||
int lvt_off = -1;
|
||||
u8 offset;
|
||||
|
||||
for (bank = 0; bank < NR_BANKS; ++bank) {
|
||||
for (block = 0; block < NR_BLOCKS; ++block) {
|
||||
|
@ -162,8 +163,28 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
|
|||
if (shared_bank[bank] && c->cpu_core_id)
|
||||
break;
|
||||
#endif
|
||||
lvt_off = setup_APIC_eilvt_mce(THRESHOLD_APIC_VECTOR,
|
||||
APIC_EILVT_MSG_FIX, 0);
|
||||
offset = (high & MASK_LVTOFF_HI) >> 20;
|
||||
if (lvt_off < 0) {
|
||||
if (setup_APIC_eilvt(offset,
|
||||
THRESHOLD_APIC_VECTOR,
|
||||
APIC_EILVT_MSG_FIX, 0)) {
|
||||
pr_err(FW_BUG "cpu %d, failed to "
|
||||
"setup threshold interrupt "
|
||||
"for bank %d, block %d "
|
||||
"(MSR%08X=0x%x%08x)",
|
||||
smp_processor_id(), bank, block,
|
||||
address, high, low);
|
||||
continue;
|
||||
}
|
||||
lvt_off = offset;
|
||||
} else if (lvt_off != offset) {
|
||||
pr_err(FW_BUG "cpu %d, invalid threshold "
|
||||
"interrupt offset %d for bank %d,"
|
||||
"block %d (MSR%08X=0x%x%08x)",
|
||||
smp_processor_id(), lvt_off, bank,
|
||||
block, address, high, low);
|
||||
continue;
|
||||
}
|
||||
|
||||
high &= ~MASK_LVTOFF_HI;
|
||||
high |= lvt_off << 20;
|
||||
|
|
|
@ -440,9 +440,9 @@ static int hpet_legacy_next_event(unsigned long delta,
|
|||
static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
|
||||
static struct hpet_dev *hpet_devs;
|
||||
|
||||
void hpet_msi_unmask(unsigned int irq)
|
||||
void hpet_msi_unmask(struct irq_data *data)
|
||||
{
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
struct hpet_dev *hdev = data->handler_data;
|
||||
unsigned int cfg;
|
||||
|
||||
/* unmask it */
|
||||
|
@ -451,10 +451,10 @@ void hpet_msi_unmask(unsigned int irq)
|
|||
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
|
||||
}
|
||||
|
||||
void hpet_msi_mask(unsigned int irq)
|
||||
void hpet_msi_mask(struct irq_data *data)
|
||||
{
|
||||
struct hpet_dev *hdev = data->handler_data;
|
||||
unsigned int cfg;
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
|
||||
/* mask it */
|
||||
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
|
||||
|
@ -462,18 +462,14 @@ void hpet_msi_mask(unsigned int irq)
|
|||
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
|
||||
}
|
||||
|
||||
void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
|
||||
void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg)
|
||||
{
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
|
||||
hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
|
||||
hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
|
||||
}
|
||||
|
||||
void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
|
||||
void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg)
|
||||
{
|
||||
struct hpet_dev *hdev = get_irq_data(irq);
|
||||
|
||||
msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
|
||||
msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
|
||||
msg->address_hi = 0;
|
||||
|
|
|
@ -29,24 +29,10 @@
|
|||
* plus some generic x86 specific things if generic specifics makes
|
||||
* any sense at all.
|
||||
*/
|
||||
static void init_8259A(int auto_eoi);
|
||||
|
||||
static int i8259A_auto_eoi;
|
||||
DEFINE_RAW_SPINLOCK(i8259A_lock);
|
||||
static void mask_and_ack_8259A(unsigned int);
|
||||
static void mask_8259A(void);
|
||||
static void unmask_8259A(void);
|
||||
static void disable_8259A_irq(unsigned int irq);
|
||||
static void enable_8259A_irq(unsigned int irq);
|
||||
static void init_8259A(int auto_eoi);
|
||||
static int i8259A_irq_pending(unsigned int irq);
|
||||
|
||||
struct irq_chip i8259A_chip = {
|
||||
.name = "XT-PIC",
|
||||
.mask = disable_8259A_irq,
|
||||
.disable = disable_8259A_irq,
|
||||
.unmask = enable_8259A_irq,
|
||||
.mask_ack = mask_and_ack_8259A,
|
||||
};
|
||||
|
||||
/*
|
||||
* 8259A PIC functions to handle ISA devices:
|
||||
|
@ -68,7 +54,7 @@ unsigned int cached_irq_mask = 0xffff;
|
|||
*/
|
||||
unsigned long io_apic_irqs;
|
||||
|
||||
static void disable_8259A_irq(unsigned int irq)
|
||||
static void mask_8259A_irq(unsigned int irq)
|
||||
{
|
||||
unsigned int mask = 1 << irq;
|
||||
unsigned long flags;
|
||||
|
@ -82,7 +68,12 @@ static void disable_8259A_irq(unsigned int irq)
|
|||
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
}
|
||||
|
||||
static void enable_8259A_irq(unsigned int irq)
|
||||
static void disable_8259A_irq(struct irq_data *data)
|
||||
{
|
||||
mask_8259A_irq(data->irq);
|
||||
}
|
||||
|
||||
static void unmask_8259A_irq(unsigned int irq)
|
||||
{
|
||||
unsigned int mask = ~(1 << irq);
|
||||
unsigned long flags;
|
||||
|
@ -96,6 +87,11 @@ static void enable_8259A_irq(unsigned int irq)
|
|||
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
}
|
||||
|
||||
static void enable_8259A_irq(struct irq_data *data)
|
||||
{
|
||||
unmask_8259A_irq(data->irq);
|
||||
}
|
||||
|
||||
static int i8259A_irq_pending(unsigned int irq)
|
||||
{
|
||||
unsigned int mask = 1<<irq;
|
||||
|
@ -117,7 +113,7 @@ static void make_8259A_irq(unsigned int irq)
|
|||
disable_irq_nosync(irq);
|
||||
io_apic_irqs &= ~(1<<irq);
|
||||
set_irq_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
|
||||
"XT");
|
||||
i8259A_chip.name);
|
||||
enable_irq(irq);
|
||||
}
|
||||
|
||||
|
@ -150,8 +146,9 @@ static inline int i8259A_irq_real(unsigned int irq)
|
|||
* first, _then_ send the EOI, and the order of EOI
|
||||
* to the two 8259s is important!
|
||||
*/
|
||||
static void mask_and_ack_8259A(unsigned int irq)
|
||||
static void mask_and_ack_8259A(struct irq_data *data)
|
||||
{
|
||||
unsigned int irq = data->irq;
|
||||
unsigned int irqmask = 1 << irq;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -223,6 +220,14 @@ static void mask_and_ack_8259A(unsigned int irq)
|
|||
}
|
||||
}
|
||||
|
||||
struct irq_chip i8259A_chip = {
|
||||
.name = "XT-PIC",
|
||||
.irq_mask = disable_8259A_irq,
|
||||
.irq_disable = disable_8259A_irq,
|
||||
.irq_unmask = enable_8259A_irq,
|
||||
.irq_mask_ack = mask_and_ack_8259A,
|
||||
};
|
||||
|
||||
static char irq_trigger[2];
|
||||
/**
|
||||
* ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
|
||||
|
@ -342,9 +347,9 @@ static void init_8259A(int auto_eoi)
|
|||
* In AEOI mode we just have to mask the interrupt
|
||||
* when acking.
|
||||
*/
|
||||
i8259A_chip.mask_ack = disable_8259A_irq;
|
||||
i8259A_chip.irq_mask_ack = disable_8259A_irq;
|
||||
else
|
||||
i8259A_chip.mask_ack = mask_and_ack_8259A;
|
||||
i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
|
||||
|
||||
udelay(100); /* wait for 8259A to initialize */
|
||||
|
||||
|
@ -363,14 +368,6 @@ static void init_8259A(int auto_eoi)
|
|||
static void legacy_pic_noop(void) { };
|
||||
static void legacy_pic_uint_noop(unsigned int unused) { };
|
||||
static void legacy_pic_int_noop(int unused) { };
|
||||
|
||||
static struct irq_chip dummy_pic_chip = {
|
||||
.name = "dummy pic",
|
||||
.mask = legacy_pic_uint_noop,
|
||||
.unmask = legacy_pic_uint_noop,
|
||||
.disable = legacy_pic_uint_noop,
|
||||
.mask_ack = legacy_pic_uint_noop,
|
||||
};
|
||||
static int legacy_pic_irq_pending_noop(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
|
@ -378,7 +375,9 @@ static int legacy_pic_irq_pending_noop(unsigned int irq)
|
|||
|
||||
struct legacy_pic null_legacy_pic = {
|
||||
.nr_legacy_irqs = 0,
|
||||
.chip = &dummy_pic_chip,
|
||||
.chip = &dummy_irq_chip,
|
||||
.mask = legacy_pic_uint_noop,
|
||||
.unmask = legacy_pic_uint_noop,
|
||||
.mask_all = legacy_pic_noop,
|
||||
.restore_mask = legacy_pic_noop,
|
||||
.init = legacy_pic_int_noop,
|
||||
|
@ -389,7 +388,9 @@ struct legacy_pic null_legacy_pic = {
|
|||
struct legacy_pic default_legacy_pic = {
|
||||
.nr_legacy_irqs = NR_IRQS_LEGACY,
|
||||
.chip = &i8259A_chip,
|
||||
.mask_all = mask_8259A,
|
||||
.mask = mask_8259A_irq,
|
||||
.unmask = unmask_8259A_irq,
|
||||
.mask_all = mask_8259A,
|
||||
.restore_mask = unmask_8259A,
|
||||
.init = init_8259A,
|
||||
.irq_pending = i8259A_irq_pending,
|
||||
|
|
|
@ -159,7 +159,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
seq_printf(p, "%*d: ", prec, i);
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
seq_printf(p, " %8s", desc->chip->name);
|
||||
seq_printf(p, " %8s", desc->irq_data.chip->name);
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
|
@ -282,6 +282,7 @@ void fixup_irqs(void)
|
|||
unsigned int irq, vector;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *data;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
int break_affinity = 0;
|
||||
|
@ -296,7 +297,8 @@ void fixup_irqs(void)
|
|||
/* interrupt's are disabled at this point */
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
affinity = desc->affinity;
|
||||
data = &desc->irq_data;
|
||||
affinity = data->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
cpumask_equal(affinity, cpu_online_mask)) {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
@ -315,16 +317,16 @@ void fixup_irqs(void)
|
|||
affinity = cpu_all_mask;
|
||||
}
|
||||
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
|
||||
desc->chip->mask(irq);
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_mask)
|
||||
data->chip->irq_mask(data);
|
||||
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, affinity);
|
||||
if (data->chip->irq_set_affinity)
|
||||
data->chip->irq_set_affinity(data, affinity, true);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
if (!(desc->status & IRQ_MOVE_PCNTXT) && data->chip->irq_unmask)
|
||||
data->chip->irq_unmask(data);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
|
@ -355,10 +357,10 @@ void fixup_irqs(void)
|
|||
if (irr & (1 << (vector % 32))) {
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
data = irq_get_irq_data(irq);
|
||||
raw_spin_lock(&desc->lock);
|
||||
if (desc->chip->retrigger)
|
||||
desc->chip->retrigger(irq);
|
||||
if (data->chip->irq_retrigger)
|
||||
data->chip->irq_retrigger(data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,6 +100,8 @@ int vector_used_by_percpu_irq(unsigned int vector)
|
|||
|
||||
void __init init_ISA_irqs(void)
|
||||
{
|
||||
struct irq_chip *chip = legacy_pic->chip;
|
||||
const char *name = chip->name;
|
||||
int i;
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
|
@ -107,19 +109,8 @@ void __init init_ISA_irqs(void)
|
|||
#endif
|
||||
legacy_pic->init(0);
|
||||
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
|
||||
set_irq_chip_and_handler_name(i, chip, handle_level_irq, name);
|
||||
}
|
||||
|
||||
void __init init_IRQ(void)
|
||||
|
|
|
@ -323,9 +323,9 @@ notrace static void __cpuinit start_secondary(void *unused)
|
|||
check_tsc_sync_target();
|
||||
|
||||
if (nmi_watchdog == NMI_IO_APIC) {
|
||||
legacy_pic->chip->mask(0);
|
||||
legacy_pic->mask(0);
|
||||
enable_NMI_through_LVT0();
|
||||
legacy_pic->chip->unmask(0);
|
||||
legacy_pic->unmask(0);
|
||||
}
|
||||
|
||||
/* This must be done before setting cpu_online_mask */
|
||||
|
|
|
@ -28,34 +28,21 @@ struct uv_irq_2_mmr_pnode{
|
|||
static spinlock_t uv_irq_lock;
|
||||
static struct rb_root uv_irq_root;
|
||||
|
||||
static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
|
||||
static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
|
||||
|
||||
static void uv_noop(unsigned int irq)
|
||||
{
|
||||
}
|
||||
static void uv_noop(struct irq_data *data) { }
|
||||
|
||||
static unsigned int uv_noop_ret(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uv_ack_apic(unsigned int irq)
|
||||
static void uv_ack_apic(struct irq_data *data)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
static struct irq_chip uv_irq_chip = {
|
||||
.name = "UV-CORE",
|
||||
.startup = uv_noop_ret,
|
||||
.shutdown = uv_noop,
|
||||
.enable = uv_noop,
|
||||
.disable = uv_noop,
|
||||
.ack = uv_noop,
|
||||
.mask = uv_noop,
|
||||
.unmask = uv_noop,
|
||||
.eoi = uv_ack_apic,
|
||||
.end = uv_noop,
|
||||
.set_affinity = uv_set_irq_affinity,
|
||||
.name = "UV-CORE",
|
||||
.irq_mask = uv_noop,
|
||||
.irq_unmask = uv_noop,
|
||||
.irq_eoi = uv_ack_apic,
|
||||
.irq_set_affinity = uv_set_irq_affinity,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -144,26 +131,22 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
|||
unsigned long mmr_offset, int limit)
|
||||
{
|
||||
const struct cpumask *eligible_cpu = cpumask_of(cpu);
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
int mmr_pnode;
|
||||
struct irq_cfg *cfg = get_irq_chip_data(irq);
|
||||
unsigned long mmr_value;
|
||||
struct uv_IO_APIC_route_entry *entry;
|
||||
int err;
|
||||
int mmr_pnode, err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
|
||||
sizeof(unsigned long));
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
|
||||
err = assign_irq_vector(irq, cfg, eligible_cpu);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
if (limit == UV_AFFINITY_CPU)
|
||||
desc->status |= IRQ_NO_BALANCING;
|
||||
irq_set_status_flags(irq, IRQ_NO_BALANCING);
|
||||
else
|
||||
desc->status |= IRQ_MOVE_PCNTXT;
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
|
||||
set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
|
||||
irq_name);
|
||||
|
@ -206,17 +189,17 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
|
|||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
}
|
||||
|
||||
static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int
|
||||
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg = desc->chip_data;
|
||||
struct irq_cfg *cfg = data->chip_data;
|
||||
unsigned int dest;
|
||||
unsigned long mmr_value;
|
||||
unsigned long mmr_value, mmr_offset;
|
||||
struct uv_IO_APIC_route_entry *entry;
|
||||
unsigned long mmr_offset;
|
||||
int mmr_pnode;
|
||||
|
||||
if (set_desc_affinity(desc, mask, &dest))
|
||||
if (__ioapic_set_affinity(data, mask, &dest))
|
||||
return -1;
|
||||
|
||||
mmr_value = 0;
|
||||
|
@ -231,7 +214,7 @@ static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
|||
entry->dest = dest;
|
||||
|
||||
/* Get previously stored MMR and pnode of hub sourcing interrupts */
|
||||
if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode))
|
||||
if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
|
||||
return -1;
|
||||
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
|
|
|
@ -66,10 +66,7 @@ static void __init visws_time_init(void)
|
|||
}
|
||||
|
||||
/* Replaces the default init_ISA_irqs in the generic setup */
|
||||
static void __init visws_pre_intr_init(void)
|
||||
{
|
||||
init_VISWS_APIC_irqs();
|
||||
}
|
||||
static void __init visws_pre_intr_init(void);
|
||||
|
||||
/* Quirk for machine specific memory setup. */
|
||||
|
||||
|
@ -429,67 +426,34 @@ static int is_co_apic(unsigned int irq)
|
|||
/*
|
||||
* This is the SGI Cobalt (IO-)APIC:
|
||||
*/
|
||||
|
||||
static void enable_cobalt_irq(unsigned int irq)
|
||||
static void enable_cobalt_irq(struct irq_data *data)
|
||||
{
|
||||
co_apic_set(is_co_apic(irq), irq);
|
||||
co_apic_set(is_co_apic(data->irq), data->irq);
|
||||
}
|
||||
|
||||
static void disable_cobalt_irq(unsigned int irq)
|
||||
static void disable_cobalt_irq(struct irq_data *data)
|
||||
{
|
||||
int entry = is_co_apic(irq);
|
||||
int entry = is_co_apic(data->irq);
|
||||
|
||||
co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK);
|
||||
co_apic_read(CO_APIC_LO(entry));
|
||||
}
|
||||
|
||||
/*
|
||||
* "irq" really just serves to identify the device. Here is where we
|
||||
* map this to the Cobalt APIC entry where it's physically wired.
|
||||
* This is called via request_irq -> setup_irq -> irq_desc->startup()
|
||||
*/
|
||||
static unsigned int startup_cobalt_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
|
||||
desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
|
||||
enable_cobalt_irq(irq);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ack_cobalt_irq(unsigned int irq)
|
||||
static void ack_cobalt_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
disable_cobalt_irq(irq);
|
||||
disable_cobalt_irq(data);
|
||||
apic_write(APIC_EOI, APIC_EIO_ACK);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
}
|
||||
|
||||
static void end_cobalt_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
enable_cobalt_irq(irq);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip cobalt_irq_type = {
|
||||
.name = "Cobalt-APIC",
|
||||
.startup = startup_cobalt_irq,
|
||||
.shutdown = disable_cobalt_irq,
|
||||
.enable = enable_cobalt_irq,
|
||||
.disable = disable_cobalt_irq,
|
||||
.ack = ack_cobalt_irq,
|
||||
.end = end_cobalt_irq,
|
||||
.name = "Cobalt-APIC",
|
||||
.irq_enable = enable_cobalt_irq,
|
||||
.irq_disable = disable_cobalt_irq,
|
||||
.irq_ack = ack_cobalt_irq,
|
||||
};
|
||||
|
||||
|
||||
|
@ -503,35 +467,34 @@ static struct irq_chip cobalt_irq_type = {
|
|||
* interrupt controller type, and through a special virtual interrupt-
|
||||
* controller. Device drivers only see the virtual interrupt sources.
|
||||
*/
|
||||
static unsigned int startup_piix4_master_irq(unsigned int irq)
|
||||
static unsigned int startup_piix4_master_irq(struct irq_data *data)
|
||||
{
|
||||
legacy_pic->init(0);
|
||||
|
||||
return startup_cobalt_irq(irq);
|
||||
enable_cobalt_irq(data);
|
||||
}
|
||||
|
||||
static void end_piix4_master_irq(unsigned int irq)
|
||||
static void end_piix4_master_irq(struct irq_data *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
enable_cobalt_irq(irq);
|
||||
enable_cobalt_irq(data);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip piix4_master_irq_type = {
|
||||
.name = "PIIX4-master",
|
||||
.startup = startup_piix4_master_irq,
|
||||
.ack = ack_cobalt_irq,
|
||||
.end = end_piix4_master_irq,
|
||||
.name = "PIIX4-master",
|
||||
.irq_startup = startup_piix4_master_irq,
|
||||
.irq_ack = ack_cobalt_irq,
|
||||
};
|
||||
|
||||
static void pii4_mask(struct irq_data *data) { }
|
||||
|
||||
static struct irq_chip piix4_virtual_irq_type = {
|
||||
.name = "PIIX4-virtual",
|
||||
.name = "PIIX4-virtual",
|
||||
.mask = pii4_mask,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* PIIX4-8259 master/virtual functions to handle interrupt requests
|
||||
* from legacy devices: floppy, parallel, serial, rtc.
|
||||
|
@ -549,9 +512,8 @@ static struct irq_chip piix4_virtual_irq_type = {
|
|||
*/
|
||||
static irqreturn_t piix4_master_intr(int irq, void *dev_id)
|
||||
{
|
||||
int realirq;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
int realirq;
|
||||
|
||||
raw_spin_lock_irqsave(&i8259A_lock, flags);
|
||||
|
||||
|
@ -592,18 +554,10 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
|
|||
|
||||
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
|
||||
desc = irq_to_desc(realirq);
|
||||
|
||||
/*
|
||||
* handle this 'virtual interrupt' as a Cobalt one now.
|
||||
*/
|
||||
kstat_incr_irqs_this_cpu(realirq, desc);
|
||||
|
||||
if (likely(desc->action != NULL))
|
||||
handle_IRQ_event(realirq, desc->action);
|
||||
|
||||
if (!(desc->status & IRQ_DISABLED))
|
||||
legacy_pic->chip->unmask(realirq);
|
||||
generic_handle_irq(realirq);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
||||
|
@ -624,41 +578,35 @@ static struct irqaction cascade_action = {
|
|||
|
||||
static inline void set_piix4_virtual_irq_type(void)
|
||||
{
|
||||
piix4_virtual_irq_type.shutdown = i8259A_chip.mask;
|
||||
piix4_virtual_irq_type.enable = i8259A_chip.unmask;
|
||||
piix4_virtual_irq_type.disable = i8259A_chip.mask;
|
||||
piix4_virtual_irq_type.unmask = i8259A_chip.unmask;
|
||||
}
|
||||
|
||||
void init_VISWS_APIC_irqs(void)
|
||||
static void __init visws_pre_intr_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
set_piix4_virtual_irq_type();
|
||||
|
||||
for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
struct irq_chip *chip = NULL;
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = 0;
|
||||
desc->depth = 1;
|
||||
if (i == 0)
|
||||
chip = &cobalt_irq_type;
|
||||
else if (i == CO_IRQ_IDE0)
|
||||
chip = &cobalt_irq_type;
|
||||
else if (i == CO_IRQ_IDE1)
|
||||
>chip = &cobalt_irq_type;
|
||||
else if (i == CO_IRQ_8259)
|
||||
chip = &piix4_master_irq_type;
|
||||
else if (i < CO_IRQ_APIC0)
|
||||
chip = &piix4_virtual_irq_type;
|
||||
else if (IS_CO_APIC(i))
|
||||
chip = &cobalt_irq_type;
|
||||
|
||||
if (i == 0) {
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_IDE0) {
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_IDE1) {
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_8259) {
|
||||
desc->chip = &piix4_master_irq_type;
|
||||
}
|
||||
else if (i < CO_IRQ_APIC0) {
|
||||
set_piix4_virtual_irq_type();
|
||||
desc->chip = &piix4_virtual_irq_type;
|
||||
}
|
||||
else if (IS_CO_APIC(i)) {
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
if (chip)
|
||||
set_irq_chip(i, chip);
|
||||
}
|
||||
|
||||
setup_irq(CO_IRQ_8259, &master_action);
|
||||
|
|
|
@ -791,22 +791,22 @@ static void lguest_flush_tlb_kernel(void)
|
|||
* simple as setting a bit. We don't actually "ack" interrupts as such, we
|
||||
* just mask and unmask them. I wonder if we should be cleverer?
|
||||
*/
|
||||
static void disable_lguest_irq(unsigned int irq)
|
||||
static void disable_lguest_irq(struct irq_data *data)
|
||||
{
|
||||
set_bit(irq, lguest_data.blocked_interrupts);
|
||||
set_bit(data->irq, lguest_data.blocked_interrupts);
|
||||
}
|
||||
|
||||
static void enable_lguest_irq(unsigned int irq)
|
||||
static void enable_lguest_irq(struct irq_data *data)
|
||||
{
|
||||
clear_bit(irq, lguest_data.blocked_interrupts);
|
||||
clear_bit(data->irq, lguest_data.blocked_interrupts);
|
||||
}
|
||||
|
||||
/* This structure describes the lguest IRQ controller. */
|
||||
static struct irq_chip lguest_irq_controller = {
|
||||
.name = "lguest",
|
||||
.mask = disable_lguest_irq,
|
||||
.mask_ack = disable_lguest_irq,
|
||||
.unmask = enable_lguest_irq,
|
||||
.irq_mask = disable_lguest_irq,
|
||||
.irq_mask_ack = disable_lguest_irq,
|
||||
.irq_unmask = enable_lguest_irq,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -838,12 +838,12 @@ static void __init lguest_init_IRQ(void)
|
|||
* rather than set them in lguest_init_IRQ we are called here every time an
|
||||
* lguest device needs an interrupt.
|
||||
*
|
||||
* FIXME: irq_to_desc_alloc_node() can fail due to lack of memory, we should
|
||||
* FIXME: irq_alloc_desc_at() can fail due to lack of memory, we should
|
||||
* pass that up!
|
||||
*/
|
||||
void lguest_setup_irq(unsigned int irq)
|
||||
{
|
||||
irq_to_desc_alloc_node(irq, 0);
|
||||
irq_alloc_desc_at(irq, 0);
|
||||
set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
|
||||
handle_level_irq, "level");
|
||||
}
|
||||
|
|
|
@ -64,15 +64,22 @@ static u64 ibs_op_ctl;
|
|||
* IBS cpuid feature detection
|
||||
*/
|
||||
|
||||
#define IBS_CPUID_FEATURES 0x8000001b
|
||||
#define IBS_CPUID_FEATURES 0x8000001b
|
||||
|
||||
/*
|
||||
* Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
|
||||
* bit 0 is used to indicate the existence of IBS.
|
||||
*/
|
||||
#define IBS_CAPS_AVAIL (1LL<<0)
|
||||
#define IBS_CAPS_RDWROPCNT (1LL<<3)
|
||||
#define IBS_CAPS_OPCNT (1LL<<4)
|
||||
#define IBS_CAPS_AVAIL (1U<<0)
|
||||
#define IBS_CAPS_RDWROPCNT (1U<<3)
|
||||
#define IBS_CAPS_OPCNT (1U<<4)
|
||||
|
||||
/*
|
||||
* IBS APIC setup
|
||||
*/
|
||||
#define IBSCTL 0x1cc
|
||||
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
|
||||
#define IBSCTL_LVT_OFFSET_MASK 0x0F
|
||||
|
||||
/*
|
||||
* IBS randomization macros
|
||||
|
@ -266,6 +273,74 @@ static void op_amd_stop_ibs(void)
|
|||
wrmsrl(MSR_AMD64_IBSOPCTL, 0);
|
||||
}
|
||||
|
||||
static inline int eilvt_is_available(int offset)
|
||||
{
|
||||
/* check if we may assign a vector */
|
||||
return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
|
||||
}
|
||||
|
||||
static inline int ibs_eilvt_valid(void)
|
||||
{
|
||||
u64 val;
|
||||
int offset;
|
||||
|
||||
rdmsrl(MSR_AMD64_IBSCTL, val);
|
||||
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
|
||||
pr_err(FW_BUG "cpu %d, invalid IBS "
|
||||
"interrupt offset %d (MSR%08X=0x%016llx)",
|
||||
smp_processor_id(), offset,
|
||||
MSR_AMD64_IBSCTL, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
offset = val & IBSCTL_LVT_OFFSET_MASK;
|
||||
|
||||
if (eilvt_is_available(offset))
|
||||
return !0;
|
||||
|
||||
pr_err(FW_BUG "cpu %d, IBS interrupt offset %d "
|
||||
"not available (MSR%08X=0x%016llx)",
|
||||
smp_processor_id(), offset,
|
||||
MSR_AMD64_IBSCTL, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int get_ibs_offset(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
rdmsrl(MSR_AMD64_IBSCTL, val);
|
||||
if (!(val & IBSCTL_LVT_OFFSET_VALID))
|
||||
return -EINVAL;
|
||||
|
||||
return val & IBSCTL_LVT_OFFSET_MASK;
|
||||
}
|
||||
|
||||
static void setup_APIC_ibs(void)
|
||||
{
|
||||
int offset;
|
||||
|
||||
offset = get_ibs_offset();
|
||||
if (offset < 0)
|
||||
goto failed;
|
||||
|
||||
if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
|
||||
return;
|
||||
failed:
|
||||
pr_warn("oprofile: IBS APIC setup failed on cpu #%d\n",
|
||||
smp_processor_id());
|
||||
}
|
||||
|
||||
static void clear_APIC_ibs(void)
|
||||
{
|
||||
int offset;
|
||||
|
||||
offset = get_ibs_offset();
|
||||
if (offset >= 0)
|
||||
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
|
||||
|
||||
static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
|
||||
|
@ -376,13 +451,13 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
|
|||
}
|
||||
|
||||
if (ibs_caps)
|
||||
setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
|
||||
setup_APIC_ibs();
|
||||
}
|
||||
|
||||
static void op_amd_cpu_shutdown(void)
|
||||
{
|
||||
if (ibs_caps)
|
||||
setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
|
||||
clear_APIC_ibs();
|
||||
}
|
||||
|
||||
static int op_amd_check_ctrs(struct pt_regs * const regs,
|
||||
|
@ -445,16 +520,11 @@ static void op_amd_stop(struct op_msrs const * const msrs)
|
|||
op_amd_stop_ibs();
|
||||
}
|
||||
|
||||
static int __init_ibs_nmi(void)
|
||||
static int setup_ibs_ctl(int ibs_eilvt_off)
|
||||
{
|
||||
#define IBSCTL_LVTOFFSETVAL (1 << 8)
|
||||
#define IBSCTL 0x1cc
|
||||
struct pci_dev *cpu_cfg;
|
||||
int nodes;
|
||||
u32 value = 0;
|
||||
u8 ibs_eilvt_off;
|
||||
|
||||
ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
|
||||
|
||||
nodes = 0;
|
||||
cpu_cfg = NULL;
|
||||
|
@ -466,24 +536,63 @@ static int __init_ibs_nmi(void)
|
|||
break;
|
||||
++nodes;
|
||||
pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
|
||||
| IBSCTL_LVTOFFSETVAL);
|
||||
| IBSCTL_LVT_OFFSET_VALID);
|
||||
pci_read_config_dword(cpu_cfg, IBSCTL, &value);
|
||||
if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
|
||||
if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
|
||||
pci_dev_put(cpu_cfg);
|
||||
printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
|
||||
"IBSCTL = 0x%08x", value);
|
||||
return 1;
|
||||
"IBSCTL = 0x%08x\n", value);
|
||||
return -EINVAL;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
if (!nodes) {
|
||||
printk(KERN_DEBUG "No CPU node configured for IBS");
|
||||
return 1;
|
||||
printk(KERN_DEBUG "No CPU node configured for IBS\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int force_ibs_eilvt_setup(void)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
/* find the next free available EILVT entry */
|
||||
for (i = 1; i < 4; i++) {
|
||||
if (!eilvt_is_available(i))
|
||||
continue;
|
||||
ret = setup_ibs_ctl(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "No EILVT entry available\n");
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int __init_ibs_nmi(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ibs_eilvt_valid())
|
||||
return 0;
|
||||
|
||||
ret = force_ibs_eilvt_setup();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!ibs_eilvt_valid())
|
||||
return -EFAULT;
|
||||
|
||||
pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* initialize the APIC for the IBS interrupts if available */
|
||||
static void init_ibs(void)
|
||||
{
|
||||
|
|
|
@ -92,7 +92,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->typename);
|
||||
seq_printf(p, " %14s", irq_desc[i].chip->name);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
||||
for (action=action->next; action; action = action->next)
|
||||
|
|
|
@ -141,9 +141,9 @@ typedef struct irq_data_isa {
|
|||
__u8 rcvhdr[8];
|
||||
} irq_data_isa;
|
||||
|
||||
typedef union irq_data {
|
||||
typedef union act2000_irq_data {
|
||||
irq_data_isa isa;
|
||||
} irq_data;
|
||||
} act2000_irq_data;
|
||||
|
||||
/*
|
||||
* Per card driver data
|
||||
|
@ -176,7 +176,7 @@ typedef struct act2000_card {
|
|||
char *status_buf_read;
|
||||
char *status_buf_write;
|
||||
char *status_buf_end;
|
||||
irq_data idat; /* Data used for IRQ handler */
|
||||
act2000_irq_data idat; /* Data used for IRQ handler */
|
||||
isdn_if interface; /* Interface to upper layer */
|
||||
char regname[35]; /* Name used for request_region */
|
||||
} act2000_card;
|
||||
|
|
|
@ -801,6 +801,16 @@ static void closecard(int cardnr)
|
|||
ll_unload(csta);
|
||||
}
|
||||
|
||||
static irqreturn_t card_irq(int intno, void *dev_id)
|
||||
{
|
||||
struct IsdnCardState *cs = dev_id;
|
||||
irqreturn_t ret = cs->irq_func(intno, cs);
|
||||
|
||||
if (ret == IRQ_HANDLED)
|
||||
cs->irq_cnt++;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_card(struct IsdnCardState *cs)
|
||||
{
|
||||
int irq_cnt, cnt = 3, ret;
|
||||
|
@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs)
|
|||
ret = cs->cardmsg(cs, CARD_INIT, NULL);
|
||||
return(ret);
|
||||
}
|
||||
irq_cnt = kstat_irqs(cs->irq);
|
||||
irq_cnt = cs->irq_cnt = 0;
|
||||
printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ],
|
||||
cs->irq, irq_cnt);
|
||||
if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) {
|
||||
if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) {
|
||||
printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n",
|
||||
cs->irq);
|
||||
return 1;
|
||||
|
@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs)
|
|||
/* Timeout 10ms */
|
||||
msleep(10);
|
||||
printk(KERN_INFO "%s: IRQ %d count %d\n",
|
||||
CardType[cs->typ], cs->irq, kstat_irqs(cs->irq));
|
||||
if (kstat_irqs(cs->irq) == irq_cnt) {
|
||||
CardType[cs->typ], cs->irq, cs->irq_cnt);
|
||||
if (cs->irq_cnt == irq_cnt) {
|
||||
printk(KERN_WARNING
|
||||
"%s: IRQ(%d) getting no interrupts during init %d\n",
|
||||
CardType[cs->typ], cs->irq, 4 - cnt);
|
||||
|
|
|
@ -959,6 +959,7 @@ struct IsdnCardState {
|
|||
u_long event;
|
||||
struct work_struct tqueue;
|
||||
struct timer_list dbusytimer;
|
||||
unsigned int irq_cnt;
|
||||
#ifdef ERROR_STATISTIC
|
||||
int err_crc;
|
||||
int err_tx;
|
||||
|
|
|
@ -78,7 +78,7 @@ struct sih {
|
|||
u8 irq_lines; /* number of supported irq lines */
|
||||
|
||||
/* SIR ignored -- set interrupt, for testing only */
|
||||
struct irq_data {
|
||||
struct sih_irq_data {
|
||||
u8 isr_offset;
|
||||
u8 imr_offset;
|
||||
} mask[2];
|
||||
|
@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
|
|||
twl4030_irq_chip = dummy_irq_chip;
|
||||
twl4030_irq_chip.name = "twl4030";
|
||||
|
||||
twl4030_sih_irq_chip.ack = dummy_irq_chip.ack;
|
||||
twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
|
||||
|
||||
for (i = irq_base; i < irq_end; i++) {
|
||||
set_irq_chip_and_handler(i, &twl4030_irq_chip,
|
||||
|
|
|
@ -1221,9 +1221,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
|
|||
}
|
||||
}
|
||||
|
||||
void dmar_msi_unmask(unsigned int irq)
|
||||
void dmar_msi_unmask(struct irq_data *data)
|
||||
{
|
||||
struct intel_iommu *iommu = get_irq_data(irq);
|
||||
struct intel_iommu *iommu = irq_data_get_irq_data(data);
|
||||
unsigned long flag;
|
||||
|
||||
/* unmask it */
|
||||
|
@ -1234,10 +1234,10 @@ void dmar_msi_unmask(unsigned int irq)
|
|||
spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
}
|
||||
|
||||
void dmar_msi_mask(unsigned int irq)
|
||||
void dmar_msi_mask(struct irq_data *data)
|
||||
{
|
||||
unsigned long flag;
|
||||
struct intel_iommu *iommu = get_irq_data(irq);
|
||||
struct intel_iommu *iommu = irq_data_get_irq_data(data);
|
||||
|
||||
/* mask it */
|
||||
spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
|
|
|
@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
|
|||
*msg = cfg->msg;
|
||||
}
|
||||
|
||||
void mask_ht_irq(unsigned int irq)
|
||||
void mask_ht_irq(struct irq_data *data)
|
||||
{
|
||||
struct ht_irq_cfg *cfg;
|
||||
struct ht_irq_msg msg;
|
||||
struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
|
||||
struct ht_irq_msg msg = cfg->msg;
|
||||
|
||||
cfg = get_irq_data(irq);
|
||||
|
||||
msg = cfg->msg;
|
||||
msg.address_lo |= 1;
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
write_ht_irq_msg(data->irq, &msg);
|
||||
}
|
||||
|
||||
void unmask_ht_irq(unsigned int irq)
|
||||
void unmask_ht_irq(struct irq_data *data)
|
||||
{
|
||||
struct ht_irq_cfg *cfg;
|
||||
struct ht_irq_msg msg;
|
||||
struct ht_irq_cfg *cfg = irq_data_get_irq_data(data);
|
||||
struct ht_irq_msg msg = cfg->msg;
|
||||
|
||||
cfg = get_irq_data(irq);
|
||||
|
||||
msg = cfg->msg;
|
||||
msg.address_lo &= ~1;
|
||||
write_ht_irq_msg(irq, &msg);
|
||||
write_ht_irq_msg(data->irq, &msg);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -46,109 +46,24 @@ static __init int setup_intremap(char *str)
|
|||
}
|
||||
early_param("intremap", setup_intremap);
|
||||
|
||||
struct irq_2_iommu {
|
||||
struct intel_iommu *iommu;
|
||||
u16 irte_index;
|
||||
u16 sub_handle;
|
||||
u8 irte_mask;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
|
||||
{
|
||||
struct irq_2_iommu *iommu;
|
||||
|
||||
iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
|
||||
printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node);
|
||||
|
||||
return iommu;
|
||||
}
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (WARN_ON_ONCE(!desc))
|
||||
return NULL;
|
||||
|
||||
return desc->irq_2_iommu;
|
||||
}
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc) {
|
||||
printk(KERN_INFO "can not get irq_desc for %d\n", irq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
irq_iommu = desc->irq_2_iommu;
|
||||
|
||||
if (!irq_iommu)
|
||||
desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
|
||||
|
||||
return desc->irq_2_iommu;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
if (irq < nr_irqs)
|
||||
return &irq_2_iommuX[irq];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
|
||||
{
|
||||
return irq_2_iommu(irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
static DEFINE_SPINLOCK(irq_2_ir_lock);
|
||||
|
||||
static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
|
||||
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
|
||||
irq_iommu = irq_2_iommu(irq);
|
||||
|
||||
if (!irq_iommu)
|
||||
return NULL;
|
||||
|
||||
if (!irq_iommu->iommu)
|
||||
return NULL;
|
||||
|
||||
return irq_iommu;
|
||||
}
|
||||
|
||||
int irq_remapped(int irq)
|
||||
{
|
||||
return valid_irq_2_iommu(irq) != NULL;
|
||||
struct irq_cfg *cfg = get_irq_chip_data(irq);
|
||||
return cfg ? &cfg->irq_2_iommu : NULL;
|
||||
}
|
||||
|
||||
int get_irte(int irq, struct irte *entry)
|
||||
{
|
||||
int index;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
unsigned long flags;
|
||||
int index;
|
||||
|
||||
if (!entry)
|
||||
if (!entry || !irq_iommu)
|
||||
return -1;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
*entry = *(irq_iommu->iommu->ir_table->base + index);
|
||||
|
@ -160,21 +75,15 @@ int get_irte(int irq, struct irte *entry)
|
|||
int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
||||
{
|
||||
struct ir_table *table = iommu->ir_table;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
u16 index, start_index;
|
||||
unsigned int mask = 0;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
if (!count)
|
||||
if (!count || !irq_iommu)
|
||||
return -1;
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
/* protect irq_2_iommu_alloc later */
|
||||
if (irq >= nr_irqs)
|
||||
return -1;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* start the IRTE search from index 0.
|
||||
*/
|
||||
|
@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|||
for (i = index; i < index + count; i++)
|
||||
table->base[i].present = 1;
|
||||
|
||||
irq_iommu = irq_2_iommu_alloc(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
printk(KERN_ERR "can't allocate irq_2_iommu\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
irq_iommu->iommu = iommu;
|
||||
irq_iommu->irte_index = index;
|
||||
irq_iommu->sub_handle = 0;
|
||||
|
@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
|
|||
|
||||
int map_irq_to_irte_handle(int irq, u16 *sub_handle)
|
||||
{
|
||||
int index;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
unsigned long flags;
|
||||
int index;
|
||||
|
||||
if (!irq_iommu)
|
||||
return -1;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
*sub_handle = irq_iommu->sub_handle;
|
||||
index = irq_iommu->irte_index;
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
|
@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
|
|||
|
||||
int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
|
||||
{
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
|
||||
irq_iommu = irq_2_iommu_alloc(irq);
|
||||
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
printk(KERN_ERR "can't allocate irq_2_iommu\n");
|
||||
if (!irq_iommu)
|
||||
return -1;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
|
||||
irq_iommu->iommu = iommu;
|
||||
irq_iommu->irte_index = index;
|
||||
|
@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
|
||||
{
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
irq_iommu->iommu = NULL;
|
||||
irq_iommu->irte_index = 0;
|
||||
irq_iommu->sub_handle = 0;
|
||||
irq_2_iommu(irq)->irte_mask = 0;
|
||||
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int modify_irte(int irq, struct irte *irte_modified)
|
||||
{
|
||||
int rc;
|
||||
int index;
|
||||
struct irte *irte;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
struct intel_iommu *iommu;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
unsigned long flags;
|
||||
struct irte *irte;
|
||||
int rc, index;
|
||||
|
||||
if (!irq_iommu)
|
||||
return -1;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iommu = irq_iommu->iommu;
|
||||
|
||||
|
@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified)
|
|||
return rc;
|
||||
}
|
||||
|
||||
int flush_irte(int irq)
|
||||
{
|
||||
int rc;
|
||||
int index;
|
||||
struct intel_iommu *iommu;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
iommu = irq_iommu->iommu;
|
||||
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
|
||||
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
|
||||
{
|
||||
int i;
|
||||
|
@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
|
|||
|
||||
int free_irte(int irq)
|
||||
{
|
||||
int rc = 0;
|
||||
struct irq_2_iommu *irq_iommu;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
if (!irq_iommu)
|
||||
return -1;
|
||||
|
||||
spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
irq_iommu = valid_irq_2_iommu(irq);
|
||||
if (!irq_iommu) {
|
||||
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = clear_entries(irq_iommu);
|
||||
|
||||
|
|
|
@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
|
|||
desc->masked = __msix_mask_irq(desc, flag);
|
||||
}
|
||||
|
||||
static void msi_set_mask_bit(unsigned irq, u32 flag)
|
||||
static void msi_set_mask_bit(struct irq_data *data, u32 flag)
|
||||
{
|
||||
struct msi_desc *desc = get_irq_msi(irq);
|
||||
struct msi_desc *desc = irq_data_get_msi(data);
|
||||
|
||||
if (desc->msi_attrib.is_msix) {
|
||||
msix_mask_irq(desc, flag);
|
||||
readl(desc->mask_base); /* Flush write to device */
|
||||
} else {
|
||||
unsigned offset = irq - desc->dev->irq;
|
||||
unsigned offset = data->irq - desc->dev->irq;
|
||||
msi_mask_irq(desc, 1 << offset, flag << offset);
|
||||
}
|
||||
}
|
||||
|
||||
void mask_msi_irq(unsigned int irq)
|
||||
void mask_msi_irq(struct irq_data *data)
|
||||
{
|
||||
msi_set_mask_bit(irq, 1);
|
||||
msi_set_mask_bit(data, 1);
|
||||
}
|
||||
|
||||
void unmask_msi_irq(unsigned int irq)
|
||||
void unmask_msi_irq(struct irq_data *data)
|
||||
{
|
||||
msi_set_mask_bit(irq, 0);
|
||||
msi_set_mask_bit(data, 0);
|
||||
}
|
||||
|
||||
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
||||
void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *entry = get_irq_desc_msi(desc);
|
||||
|
||||
BUG_ON(entry->dev->current_state != PCI_D0);
|
||||
|
||||
if (entry->msi_attrib.is_msix) {
|
||||
|
@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
|||
|
||||
void read_msi_msg(unsigned int irq, struct msi_msg *msg)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct msi_desc *entry = get_irq_msi(irq);
|
||||
|
||||
read_msi_msg_desc(desc, msg);
|
||||
__read_msi_msg(entry, msg);
|
||||
}
|
||||
|
||||
void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
||||
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *entry = get_irq_desc_msi(desc);
|
||||
|
||||
/* Assert that the cache is valid, assuming that
|
||||
* valid messages are not all-zeroes. */
|
||||
BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
|
||||
|
@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
|||
|
||||
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct msi_desc *entry = get_irq_msi(irq);
|
||||
|
||||
get_cached_msi_msg_desc(desc, msg);
|
||||
__get_cached_msi_msg(entry, msg);
|
||||
}
|
||||
|
||||
void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
||||
void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
||||
{
|
||||
struct msi_desc *entry = get_irq_desc_msi(desc);
|
||||
|
||||
if (entry->dev->current_state != PCI_D0) {
|
||||
/* Don't touch the hardware now */
|
||||
} else if (entry->msi_attrib.is_msix) {
|
||||
|
@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
|
|||
|
||||
void write_msi_msg(unsigned int irq, struct msi_msg *msg)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct msi_desc *entry = get_irq_msi(irq);
|
||||
|
||||
write_msi_msg_desc(desc, msg);
|
||||
__write_msi_msg(entry, msg);
|
||||
}
|
||||
|
||||
static void free_msi_irqs(struct pci_dev *dev)
|
||||
|
|
|
@ -338,30 +338,29 @@ static void unmask_evtchn(int port)
|
|||
|
||||
static int find_unbound_irq(void)
|
||||
{
|
||||
int irq;
|
||||
struct irq_desc *desc;
|
||||
struct irq_data *data;
|
||||
int irq, res;
|
||||
|
||||
for (irq = 0; irq < nr_irqs; irq++) {
|
||||
desc = irq_to_desc(irq);
|
||||
data = irq_get_irq_data(irq);
|
||||
/* only 0->15 have init'd desc; handle irq > 16 */
|
||||
if (desc == NULL)
|
||||
if (!data)
|
||||
break;
|
||||
if (desc->chip == &no_irq_chip)
|
||||
if (data->chip == &no_irq_chip)
|
||||
break;
|
||||
if (desc->chip != &xen_dynamic_chip)
|
||||
if (data->chip != &xen_dynamic_chip)
|
||||
continue;
|
||||
if (irq_info[irq].type == IRQT_UNBOUND)
|
||||
break;
|
||||
return irq;
|
||||
}
|
||||
|
||||
if (irq == nr_irqs)
|
||||
panic("No available IRQ to bind to: increase nr_irqs!\n");
|
||||
|
||||
desc = irq_to_desc_alloc_node(irq, 0);
|
||||
if (WARN_ON(desc == NULL))
|
||||
return -1;
|
||||
res = irq_alloc_desc_at(irq, 0);
|
||||
|
||||
dynamic_irq_init_keep_chip_data(irq);
|
||||
if (WARN_ON(res != irq))
|
||||
return -1;
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
@ -495,7 +494,7 @@ static void unbind_from_irq(unsigned int irq)
|
|||
if (irq_info[irq].type != IRQT_UNBOUND) {
|
||||
irq_info[irq] = mk_unbound_info();
|
||||
|
||||
dynamic_irq_cleanup(irq);
|
||||
irq_free_desc(irq);
|
||||
}
|
||||
|
||||
spin_unlock(&irq_mapping_update_lock);
|
||||
|
|
|
@ -106,6 +106,7 @@ struct irte {
|
|||
__u64 high;
|
||||
};
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
extern int intr_remapping_enabled;
|
||||
extern int intr_remapping_supported(void);
|
||||
|
@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
|
|||
extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
|
||||
u16 sub_handle);
|
||||
extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
|
||||
extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
|
||||
extern int flush_irte(int irq);
|
||||
extern int free_irte(int irq);
|
||||
|
||||
extern int irq_remapped(int irq);
|
||||
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
|
||||
extern struct intel_iommu *map_ioapic_to_ir(int apic);
|
||||
extern struct intel_iommu *map_hpet_to_ir(u8 id);
|
||||
|
@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define irq_remapped(irq) (0)
|
||||
#define enable_intr_remapping(mode) (-1)
|
||||
#define disable_intr_remapping() (0)
|
||||
#define reenable_intr_remapping(mode) (0)
|
||||
|
@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
|
|||
/* Can't use the common MSI interrupt functions
|
||||
* since DMAR is not a pci device
|
||||
*/
|
||||
extern void dmar_msi_unmask(unsigned int irq);
|
||||
extern void dmar_msi_mask(unsigned int irq);
|
||||
struct irq_data;
|
||||
extern void dmar_msi_unmask(struct irq_data *data);
|
||||
extern void dmar_msi_mask(struct irq_data *data);
|
||||
extern void dmar_msi_read(int irq, struct msi_msg *msg);
|
||||
extern void dmar_msi_write(int irq, struct msi_msg *msg);
|
||||
extern int dmar_set_interrupt(struct intel_iommu *iommu);
|
||||
|
|
|
@ -9,8 +9,9 @@ struct ht_irq_msg {
|
|||
/* Helper functions.. */
|
||||
void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
|
||||
void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
|
||||
void mask_ht_irq(unsigned int irq);
|
||||
void unmask_ht_irq(unsigned int irq);
|
||||
struct irq_data;
|
||||
void mask_ht_irq(struct irq_data *data);
|
||||
void unmask_ht_irq(struct irq_data *data);
|
||||
|
||||
/* The arch hook for getting things started */
|
||||
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
|
||||
|
|
|
@ -647,11 +647,8 @@ static inline void init_irq_proc(void)
|
|||
struct seq_file;
|
||||
int show_interrupts(struct seq_file *p, void *v);
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
extern int early_irq_init(void);
|
||||
extern int arch_probe_nr_irqs(void);
|
||||
extern int arch_early_irq_init(void);
|
||||
extern int arch_init_chip_data(struct irq_desc *desc, int node);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
|
|||
#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
|
||||
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
|
||||
|
||||
#define IRQF_MODIFY_MASK \
|
||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
|
||||
|
||||
#ifdef CONFIG_IRQ_PER_CPU
|
||||
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
|
||||
# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
||||
|
@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
|
|||
# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING
|
||||
#endif
|
||||
|
||||
struct proc_dir_entry;
|
||||
struct msi_desc;
|
||||
|
||||
/**
|
||||
* struct irq_data - per irq and irq chip data passed down to chip functions
|
||||
* @irq: interrupt number
|
||||
* @node: node index useful for balancing
|
||||
* @chip: low level interrupt hardware access
|
||||
* @handler_data: per-IRQ data for the irq_chip methods
|
||||
* @chip_data: platform-specific per-chip private data for the chip
|
||||
* methods, to allow shared chip implementations
|
||||
* @msi_desc: MSI descriptor
|
||||
* @affinity: IRQ affinity on SMP
|
||||
*
|
||||
* The fields here need to overlay the ones in irq_desc until we
|
||||
* cleaned up the direct references and switched everything over to
|
||||
* irq_data.
|
||||
*/
|
||||
struct irq_data {
|
||||
unsigned int irq;
|
||||
unsigned int node;
|
||||
struct irq_chip *chip;
|
||||
void *handler_data;
|
||||
void *chip_data;
|
||||
struct msi_desc *msi_desc;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct irq_chip - hardware interrupt chip descriptor
|
||||
*
|
||||
* @name: name for /proc/interrupts
|
||||
* @startup: start up the interrupt (defaults to ->enable if NULL)
|
||||
* @shutdown: shut down the interrupt (defaults to ->disable if NULL)
|
||||
* @enable: enable the interrupt (defaults to chip->unmask if NULL)
|
||||
* @disable: disable the interrupt
|
||||
* @ack: start of a new interrupt
|
||||
* @mask: mask an interrupt source
|
||||
* @mask_ack: ack and mask an interrupt source
|
||||
* @unmask: unmask an interrupt source
|
||||
* @eoi: end of interrupt - chip level
|
||||
* @end: end of interrupt - flow level
|
||||
* @set_affinity: set the CPU affinity on SMP machines
|
||||
* @retrigger: resend an IRQ to the CPU
|
||||
* @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
|
||||
* @set_wake: enable/disable power-management wake-on of an IRQ
|
||||
* @startup: deprecated, replaced by irq_startup
|
||||
* @shutdown: deprecated, replaced by irq_shutdown
|
||||
* @enable: deprecated, replaced by irq_enable
|
||||
* @disable: deprecated, replaced by irq_disable
|
||||
* @ack: deprecated, replaced by irq_ack
|
||||
* @mask: deprecated, replaced by irq_mask
|
||||
* @mask_ack: deprecated, replaced by irq_mask_ack
|
||||
* @unmask: deprecated, replaced by irq_unmask
|
||||
* @eoi: deprecated, replaced by irq_eoi
|
||||
* @end: deprecated, will go away with __do_IRQ()
|
||||
* @set_affinity: deprecated, replaced by irq_set_affinity
|
||||
* @retrigger: deprecated, replaced by irq_retrigger
|
||||
* @set_type: deprecated, replaced by irq_set_type
|
||||
* @set_wake: deprecated, replaced by irq_wake
|
||||
* @bus_lock: deprecated, replaced by irq_bus_lock
|
||||
* @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock
|
||||
*
|
||||
* @bus_lock: function to lock access to slow bus (i2c) chips
|
||||
* @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips
|
||||
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
|
||||
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
|
||||
* @irq_enable: enable the interrupt (defaults to chip->unmask if NULL)
|
||||
* @irq_disable: disable the interrupt
|
||||
* @irq_ack: start of a new interrupt
|
||||
* @irq_mask: mask an interrupt source
|
||||
* @irq_mask_ack: ack and mask an interrupt source
|
||||
* @irq_unmask: unmask an interrupt source
|
||||
* @irq_eoi: end of interrupt
|
||||
* @irq_set_affinity: set the CPU affinity on SMP machines
|
||||
* @irq_retrigger: resend an IRQ to the CPU
|
||||
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
|
||||
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
|
||||
* @irq_bus_lock: function to lock access to slow bus (i2c) chips
|
||||
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
|
||||
*
|
||||
* @release: release function solely used by UML
|
||||
* @typename: obsoleted by name, kept as migration helper
|
||||
*/
|
||||
struct irq_chip {
|
||||
const char *name;
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
unsigned int (*startup)(unsigned int irq);
|
||||
void (*shutdown)(unsigned int irq);
|
||||
void (*enable)(unsigned int irq);
|
||||
|
@ -130,154 +175,66 @@ struct irq_chip {
|
|||
|
||||
void (*bus_lock)(unsigned int irq);
|
||||
void (*bus_sync_unlock)(unsigned int irq);
|
||||
#endif
|
||||
unsigned int (*irq_startup)(struct irq_data *data);
|
||||
void (*irq_shutdown)(struct irq_data *data);
|
||||
void (*irq_enable)(struct irq_data *data);
|
||||
void (*irq_disable)(struct irq_data *data);
|
||||
|
||||
void (*irq_ack)(struct irq_data *data);
|
||||
void (*irq_mask)(struct irq_data *data);
|
||||
void (*irq_mask_ack)(struct irq_data *data);
|
||||
void (*irq_unmask)(struct irq_data *data);
|
||||
void (*irq_eoi)(struct irq_data *data);
|
||||
|
||||
int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
|
||||
int (*irq_retrigger)(struct irq_data *data);
|
||||
int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
|
||||
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
|
||||
|
||||
void (*irq_bus_lock)(struct irq_data *data);
|
||||
void (*irq_bus_sync_unlock)(struct irq_data *data);
|
||||
|
||||
/* Currently used only by UML, might disappear one day.*/
|
||||
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
||||
void (*release)(unsigned int irq, void *dev_id);
|
||||
#endif
|
||||
/*
|
||||
* For compatibility, ->typename is copied into ->name.
|
||||
* Will disappear.
|
||||
*/
|
||||
const char *typename;
|
||||
};
|
||||
|
||||
struct timer_rand_state;
|
||||
struct irq_2_iommu;
|
||||
/**
|
||||
* struct irq_desc - interrupt descriptor
|
||||
* @irq: interrupt number for this descriptor
|
||||
* @timer_rand_state: pointer to timer rand state struct
|
||||
* @kstat_irqs: irq stats per cpu
|
||||
* @irq_2_iommu: iommu with this irq
|
||||
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
|
||||
* @chip: low level interrupt hardware access
|
||||
* @msi_desc: MSI descriptor
|
||||
* @handler_data: per-IRQ data for the irq_chip methods
|
||||
* @chip_data: platform-specific per-chip private data for the chip
|
||||
* methods, to allow shared chip implementations
|
||||
* @action: the irq action chain
|
||||
* @status: status information
|
||||
* @depth: disable-depth, for nested irq_disable() calls
|
||||
* @wake_depth: enable depth, for multiple set_irq_wake() callers
|
||||
* @irq_count: stats field to detect stalled irqs
|
||||
* @last_unhandled: aging timer for unhandled count
|
||||
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
||||
* @lock: locking for SMP
|
||||
* @affinity: IRQ affinity on SMP
|
||||
* @node: node index useful for balancing
|
||||
* @pending_mask: pending rebalanced interrupts
|
||||
* @threads_active: number of irqaction threads currently running
|
||||
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
|
||||
* @dir: /proc/irq/ procfs entry
|
||||
* @name: flow handler name for /proc/interrupts output
|
||||
*/
|
||||
struct irq_desc {
|
||||
unsigned int irq;
|
||||
struct timer_rand_state *timer_rand_state;
|
||||
unsigned int *kstat_irqs;
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
struct irq_2_iommu *irq_2_iommu;
|
||||
#endif
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irq_chip *chip;
|
||||
struct msi_desc *msi_desc;
|
||||
void *handler_data;
|
||||
void *chip_data;
|
||||
struct irqaction *action; /* IRQ action list */
|
||||
unsigned int status; /* IRQ status */
|
||||
|
||||
unsigned int depth; /* nested irq disables */
|
||||
unsigned int wake_depth; /* nested wake enables */
|
||||
unsigned int irq_count; /* For detecting broken IRQs */
|
||||
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
||||
unsigned int irqs_unhandled;
|
||||
raw_spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
const struct cpumask *affinity_hint;
|
||||
unsigned int node;
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
#endif
|
||||
atomic_t threads_active;
|
||||
wait_queue_head_t wait_for_threads;
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *dir;
|
||||
#endif
|
||||
const char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int node);
|
||||
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA_IRQ_DESC
|
||||
extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
|
||||
#else
|
||||
static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
||||
{
|
||||
return desc;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
|
||||
/* This include will go away once we isolated irq_desc usage to core code */
|
||||
#include <linux/irqdesc.h>
|
||||
|
||||
/*
|
||||
* Pick up the arch-dependent methods:
|
||||
*/
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
#ifndef NR_IRQS_LEGACY
|
||||
# define NR_IRQS_LEGACY 0
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_IRQ_INIT_FLAGS
|
||||
# define ARCH_IRQ_INIT_FLAGS 0
|
||||
#endif
|
||||
|
||||
#define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS)
|
||||
|
||||
struct irqaction;
|
||||
extern int setup_irq(unsigned int irq, struct irqaction *new);
|
||||
extern void remove_irq(unsigned int irq, struct irqaction *act);
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
|
||||
void move_native_irq(int irq);
|
||||
void move_masked_irq(int irq);
|
||||
|
||||
#else /* CONFIG_GENERIC_PENDING_IRQ */
|
||||
|
||||
static inline void move_irq(int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void move_native_irq(int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void move_masked_irq(int irq)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_GENERIC_PENDING_IRQ */
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define move_native_irq(x)
|
||||
#define move_masked_irq(x)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
#else
|
||||
static inline void move_native_irq(int irq) { }
|
||||
static inline void move_masked_irq(int irq) { }
|
||||
#endif
|
||||
|
||||
extern int no_irq_affinity;
|
||||
|
||||
static inline int irq_balancing_disabled(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
return desc->status & IRQ_NO_BALANCING_MASK;
|
||||
}
|
||||
|
||||
/* Handle irq action chains: */
|
||||
extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
|
||||
|
||||
|
@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
|||
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_nested_irq(unsigned int irq);
|
||||
|
||||
/*
|
||||
* Monolithic do_IRQ implementation.
|
||||
*/
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
extern unsigned int __do_IRQ(unsigned int irq);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures call this to let the generic IRQ layer
|
||||
* handle an interrupt. If the descriptor is attached to an
|
||||
* irqchip-style controller then we call the ->handle_irq() handler,
|
||||
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
|
||||
*/
|
||||
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
desc->handle_irq(irq, desc);
|
||||
#else
|
||||
if (likely(desc->handle_irq))
|
||||
desc->handle_irq(irq, desc);
|
||||
else
|
||||
__do_IRQ(irq);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void generic_handle_irq(unsigned int irq)
|
||||
{
|
||||
generic_handle_irq_desc(irq, irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/* Handling of unhandled and spurious interrupts: */
|
||||
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
||||
irqreturn_t action_ret);
|
||||
|
||||
/* Resending of interrupts :*/
|
||||
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
|
||||
|
||||
/* Enable/disable irq debugging output: */
|
||||
extern int noirqdebug_setup(char *str);
|
||||
|
@ -351,16 +276,6 @@ extern void
|
|||
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
const char *name);
|
||||
|
||||
/* caller has locked the irq_desc and both params are valid */
|
||||
static inline void __set_irq_handler_unlocked(int irq,
|
||||
irq_flow_handler_t handler)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->handle_irq = handler;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a highlevel flow handler for a given IRQ:
|
||||
*/
|
||||
|
@ -384,26 +299,42 @@ set_irq_chained_handler(unsigned int irq,
|
|||
|
||||
extern void set_irq_nested_thread(unsigned int irq, int nest);
|
||||
|
||||
extern void set_irq_noprobe(unsigned int irq);
|
||||
extern void set_irq_probe(unsigned int irq);
|
||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
|
||||
|
||||
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
|
||||
{
|
||||
irq_modify_status(irq, 0, set);
|
||||
}
|
||||
|
||||
static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
|
||||
{
|
||||
irq_modify_status(irq, clr, 0);
|
||||
}
|
||||
|
||||
static inline void set_irq_noprobe(unsigned int irq)
|
||||
{
|
||||
irq_modify_status(irq, 0, IRQ_NOPROBE);
|
||||
}
|
||||
|
||||
static inline void set_irq_probe(unsigned int irq)
|
||||
{
|
||||
irq_modify_status(irq, IRQ_NOPROBE, 0);
|
||||
}
|
||||
|
||||
/* Handle dynamic irq creation and destruction */
|
||||
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
|
||||
extern int create_irq(void);
|
||||
extern void destroy_irq(unsigned int irq);
|
||||
|
||||
/* Test to see if a driver has successfully requested an irq */
|
||||
static inline int irq_has_action(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->action != NULL;
|
||||
}
|
||||
|
||||
/* Dynamic irq helper functions */
|
||||
extern void dynamic_irq_init(unsigned int irq);
|
||||
void dynamic_irq_init_keep_chip_data(unsigned int irq);
|
||||
/*
|
||||
* Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and
|
||||
* irq_free_desc instead.
|
||||
*/
|
||||
extern void dynamic_irq_cleanup(unsigned int irq);
|
||||
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
|
||||
static inline void dynamic_irq_init(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup(irq);
|
||||
}
|
||||
|
||||
/* Set/get chip/data for an IRQ: */
|
||||
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
|
||||
|
@ -411,114 +342,78 @@ extern int set_irq_data(unsigned int irq, void *data);
|
|||
extern int set_irq_chip_data(unsigned int irq, void *data);
|
||||
extern int set_irq_type(unsigned int irq, unsigned int type);
|
||||
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
extern struct irq_data *irq_get_irq_data(unsigned int irq);
|
||||
|
||||
#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
|
||||
#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
|
||||
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
|
||||
#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
|
||||
static inline struct irq_chip *get_irq_chip(unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
return d ? d->chip : NULL;
|
||||
}
|
||||
|
||||
#define get_irq_desc_chip(desc) ((desc)->chip)
|
||||
#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
|
||||
#define get_irq_desc_data(desc) ((desc)->handler_data)
|
||||
#define get_irq_desc_msi(desc) ((desc)->msi_desc)
|
||||
static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
|
||||
{
|
||||
return d->chip;
|
||||
}
|
||||
|
||||
static inline void *get_irq_chip_data(unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
return d ? d->chip_data : NULL;
|
||||
}
|
||||
|
||||
static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
|
||||
{
|
||||
return d->chip_data;
|
||||
}
|
||||
|
||||
static inline void *get_irq_data(unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
return d ? d->handler_data : NULL;
|
||||
}
|
||||
|
||||
static inline void *irq_data_get_irq_data(struct irq_data *d)
|
||||
{
|
||||
return d->handler_data;
|
||||
}
|
||||
|
||||
static inline struct msi_desc *get_irq_msi(unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
return d ? d->msi_desc : NULL;
|
||||
}
|
||||
|
||||
static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
|
||||
{
|
||||
return d->msi_desc;
|
||||
}
|
||||
|
||||
int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node);
|
||||
void irq_free_descs(unsigned int irq, unsigned int cnt);
|
||||
int irq_reserve_irqs(unsigned int from, unsigned int cnt);
|
||||
|
||||
static inline int irq_alloc_desc(int node)
|
||||
{
|
||||
return irq_alloc_descs(-1, 0, 1, node);
|
||||
}
|
||||
|
||||
static inline int irq_alloc_desc_at(unsigned int at, int node)
|
||||
{
|
||||
return irq_alloc_descs(at, at, 1, node);
|
||||
}
|
||||
|
||||
static inline int irq_alloc_desc_from(unsigned int from, int node)
|
||||
{
|
||||
return irq_alloc_descs(-1, from, 1, node);
|
||||
}
|
||||
|
||||
static inline void irq_free_desc(unsigned int irq)
|
||||
{
|
||||
irq_free_descs(irq, 1);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
#endif /* !CONFIG_S390 */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* alloc_desc_masks - allocate cpumasks for irq_desc
|
||||
* @desc: pointer to irq_desc struct
|
||||
* @node: node which will be handling the cpumasks
|
||||
* @boot: true if need bootmem
|
||||
*
|
||||
* Allocates affinity and pending_mask cpumask if required.
|
||||
* Returns true if successful (or not required).
|
||||
*/
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
{
|
||||
gfp_t gfp = GFP_ATOMIC;
|
||||
|
||||
if (boot)
|
||||
gfp = GFP_NOWAIT;
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
||||
free_cpumask_var(desc->affinity);
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void init_desc_masks(struct irq_desc *desc)
|
||||
{
|
||||
cpumask_setall(desc->affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* init_copy_desc_masks - copy cpumasks for irq_desc
|
||||
* @old_desc: pointer to old irq_desc struct
|
||||
* @new_desc: pointer to new irq_desc struct
|
||||
*
|
||||
* Insures affinity and pending_masks are copied to new irq_desc.
|
||||
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
|
||||
* irq_desc struct so the copy is redundant.
|
||||
*/
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
cpumask_copy(new_desc->affinity, old_desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void free_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
free_cpumask_var(old_desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
free_cpumask_var(old_desc->pending_mask);
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void init_desc_masks(struct irq_desc *desc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void free_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#endif /* _LINUX_IRQ_H */
|
||||
|
|
159
include/linux/irqdesc.h
Normal file
159
include/linux/irqdesc.h
Normal file
|
@ -0,0 +1,159 @@
|
|||
#ifndef _LINUX_IRQDESC_H
|
||||
#define _LINUX_IRQDESC_H
|
||||
|
||||
/*
|
||||
* Core internal functions to deal with irq descriptors
|
||||
*
|
||||
* This include will move to kernel/irq once we cleaned up the tree.
|
||||
* For now it's included from <linux/irq.h>
|
||||
*/
|
||||
|
||||
struct proc_dir_entry;
|
||||
struct timer_rand_state;
|
||||
/**
|
||||
* struct irq_desc - interrupt descriptor
|
||||
* @irq_data: per irq and chip data passed down to chip functions
|
||||
* @timer_rand_state: pointer to timer rand state struct
|
||||
* @kstat_irqs: irq stats per cpu
|
||||
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
|
||||
* @action: the irq action chain
|
||||
* @status: status information
|
||||
* @depth: disable-depth, for nested irq_disable() calls
|
||||
* @wake_depth: enable depth, for multiple set_irq_wake() callers
|
||||
* @irq_count: stats field to detect stalled irqs
|
||||
* @last_unhandled: aging timer for unhandled count
|
||||
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
||||
* @lock: locking for SMP
|
||||
* @pending_mask: pending rebalanced interrupts
|
||||
* @threads_active: number of irqaction threads currently running
|
||||
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
|
||||
* @dir: /proc/irq/ procfs entry
|
||||
* @name: flow handler name for /proc/interrupts output
|
||||
*/
|
||||
struct irq_desc {
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
struct irq_data irq_data;
|
||||
#else
|
||||
/*
|
||||
* This union will go away, once we fixed the direct access to
|
||||
* irq_desc all over the place. The direct fields are a 1:1
|
||||
* overlay of irq_data.
|
||||
*/
|
||||
union {
|
||||
struct irq_data irq_data;
|
||||
struct {
|
||||
unsigned int irq;
|
||||
unsigned int node;
|
||||
struct irq_chip *chip;
|
||||
void *handler_data;
|
||||
void *chip_data;
|
||||
struct msi_desc *msi_desc;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_var_t affinity;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
#endif
|
||||
|
||||
struct timer_rand_state *timer_rand_state;
|
||||
unsigned int *kstat_irqs;
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irqaction *action; /* IRQ action list */
|
||||
unsigned int status; /* IRQ status */
|
||||
|
||||
unsigned int depth; /* nested irq disables */
|
||||
unsigned int wake_depth; /* nested wake enables */
|
||||
unsigned int irq_count; /* For detecting broken IRQs */
|
||||
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
||||
unsigned int irqs_unhandled;
|
||||
raw_spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
const struct cpumask *affinity_hint;
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
#endif
|
||||
atomic_t threads_active;
|
||||
wait_queue_head_t wait_for_threads;
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *dir;
|
||||
#endif
|
||||
const char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#endif
|
||||
|
||||
/* Will be removed once the last users in power and sh are gone */
|
||||
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
|
||||
static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
||||
{
|
||||
return desc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
|
||||
#define get_irq_desc_chip(desc) ((desc)->irq_data.chip)
|
||||
#define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data)
|
||||
#define get_irq_desc_data(desc) ((desc)->irq_data.handler_data)
|
||||
#define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc)
|
||||
|
||||
/*
|
||||
* Monolithic do_IRQ implementation.
|
||||
*/
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
extern unsigned int __do_IRQ(unsigned int irq);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures call this to let the generic IRQ layer
|
||||
* handle an interrupt. If the descriptor is attached to an
|
||||
* irqchip-style controller then we call the ->handle_irq() handler,
|
||||
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
|
||||
*/
|
||||
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
desc->handle_irq(irq, desc);
|
||||
#else
|
||||
if (likely(desc->handle_irq))
|
||||
desc->handle_irq(irq, desc);
|
||||
else
|
||||
__do_IRQ(irq);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void generic_handle_irq(unsigned int irq)
|
||||
{
|
||||
generic_handle_irq_desc(irq, irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/* Test to see if a driver has successfully requested an irq */
|
||||
static inline int irq_has_action(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->action != NULL;
|
||||
}
|
||||
|
||||
static inline int irq_balancing_disabled(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
return desc->status & IRQ_NO_BALANCING_MASK;
|
||||
}
|
||||
|
||||
/* caller has locked the irq_desc and both params are valid */
|
||||
static inline void __set_irq_handler_unlocked(int irq,
|
||||
irq_flow_handler_t handler)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->handle_irq = handler;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
extern int nr_irqs;
|
||||
extern struct irq_desc *irq_to_desc(unsigned int irq);
|
||||
unsigned int irq_get_next_irq(unsigned int offset);
|
||||
|
||||
# define for_each_irq_desc(irq, desc) \
|
||||
for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
|
||||
|
@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
|
|||
#define irq_node(irq) 0
|
||||
#endif
|
||||
|
||||
# define for_each_active_irq(irq) \
|
||||
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
|
||||
irq = irq_get_next_irq(irq + 1))
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
#define for_each_irq_nr(irq) \
|
||||
|
|
|
@ -435,14 +435,6 @@ do { \
|
|||
|
||||
#endif /* CONFIG_LOCKDEP */
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
extern void early_init_irq_lock_class(void);
|
||||
#else
|
||||
static inline void early_init_irq_lock_class(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
extern void early_boot_irqs_off(void);
|
||||
extern void early_boot_irqs_on(void);
|
||||
|
|
|
@ -10,12 +10,13 @@ struct msi_msg {
|
|||
};
|
||||
|
||||
/* Helper functions */
|
||||
struct irq_desc;
|
||||
extern void mask_msi_irq(unsigned int irq);
|
||||
extern void unmask_msi_irq(unsigned int irq);
|
||||
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
|
||||
extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
|
||||
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
|
||||
struct irq_data;
|
||||
struct msi_desc;
|
||||
extern void mask_msi_irq(struct irq_data *data);
|
||||
extern void unmask_msi_irq(struct irq_data *data);
|
||||
extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
||||
extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
||||
extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
||||
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
|
||||
extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
|
||||
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
|
||||
|
|
|
@ -339,6 +339,8 @@ config AUDIT_TREE
|
|||
depends on AUDITSYSCALL
|
||||
select FSNOTIFY
|
||||
|
||||
source "kernel/irq/Kconfig"
|
||||
|
||||
menu "RCU Subsystem"
|
||||
|
||||
choice
|
||||
|
|
|
@ -556,7 +556,6 @@ asmlinkage void __init start_kernel(void)
|
|||
|
||||
local_irq_disable();
|
||||
early_boot_irqs_off();
|
||||
early_init_irq_lock_class();
|
||||
|
||||
/*
|
||||
* Interrupts are still disabled. Do necessary setups, then
|
||||
|
|
53
kernel/irq/Kconfig
Normal file
53
kernel/irq/Kconfig
Normal file
|
@ -0,0 +1,53 @@
|
|||
config HAVE_GENERIC_HARDIRQS
|
||||
def_bool n
|
||||
|
||||
if HAVE_GENERIC_HARDIRQS
|
||||
menu "IRQ subsystem"
|
||||
#
|
||||
# Interrupt subsystem related configuration options
|
||||
#
|
||||
config GENERIC_HARDIRQS
|
||||
def_bool y
|
||||
|
||||
config GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
def_bool y
|
||||
|
||||
# Select this to disable the deprecated stuff
|
||||
config GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
def_bool n
|
||||
|
||||
# Options selectable by the architecture code
|
||||
config HAVE_SPARSE_IRQ
|
||||
def_bool n
|
||||
|
||||
config GENERIC_IRQ_PROBE
|
||||
def_bool n
|
||||
|
||||
config GENERIC_PENDING_IRQ
|
||||
def_bool n
|
||||
|
||||
config AUTO_IRQ_AFFINITY
|
||||
def_bool n
|
||||
|
||||
config IRQ_PER_CPU
|
||||
def_bool n
|
||||
|
||||
config HARDIRQS_SW_RESEND
|
||||
def_bool n
|
||||
|
||||
config SPARSE_IRQ
|
||||
bool "Support sparse irq numbering"
|
||||
depends on HAVE_SPARSE_IRQ
|
||||
---help---
|
||||
|
||||
Sparse irq numbering is useful for distro kernels that want
|
||||
to define a high CONFIG_NR_CPUS value but still want to have
|
||||
low kernel memory footprint on smaller machines.
|
||||
|
||||
( Sparse irqs can also be beneficial on NUMA boxes, as they spread
|
||||
out the interrupt descriptors in a more NUMA-friendly way. )
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
endmenu
|
||||
endif
|
|
@ -1,7 +1,6 @@
|
|||
|
||||
obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
|
||||
obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
||||
obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o
|
||||
obj-$(CONFIG_PM_SLEEP) += pm.o
|
||||
|
|
|
@ -57,9 +57,10 @@ unsigned long probe_irq_on(void)
|
|||
* Some chips need to know about probing in
|
||||
* progress:
|
||||
*/
|
||||
if (desc->chip->set_type)
|
||||
desc->chip->set_type(i, IRQ_TYPE_PROBE);
|
||||
desc->chip->startup(i);
|
||||
if (desc->irq_data.chip->irq_set_type)
|
||||
desc->irq_data.chip->irq_set_type(&desc->irq_data,
|
||||
IRQ_TYPE_PROBE);
|
||||
desc->irq_data.chip->irq_startup(&desc->irq_data);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
@ -76,7 +77,7 @@ unsigned long probe_irq_on(void)
|
|||
raw_spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
||||
if (desc->chip->startup(i))
|
||||
if (desc->irq_data.chip->irq_startup(&desc->irq_data))
|
||||
desc->status |= IRQ_PENDING;
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
@ -98,7 +99,7 @@ unsigned long probe_irq_on(void)
|
|||
/* It triggered already - consider it spurious. */
|
||||
if (!(status & IRQ_WAITING)) {
|
||||
desc->status = status & ~IRQ_AUTODETECT;
|
||||
desc->chip->shutdown(i);
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
} else
|
||||
if (i < 32)
|
||||
mask |= 1 << i;
|
||||
|
@ -137,7 +138,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
|||
mask |= 1 << i;
|
||||
|
||||
desc->status = status & ~IRQ_AUTODETECT;
|
||||
desc->chip->shutdown(i);
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
@ -181,7 +182,7 @@ int probe_irq_off(unsigned long val)
|
|||
nr_of_irqs++;
|
||||
}
|
||||
desc->status = status & ~IRQ_AUTODETECT;
|
||||
desc->chip->shutdown(i);
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
|
|
@ -18,108 +18,6 @@
|
|||
|
||||
#include "internals.h"
|
||||
|
||||
static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc) {
|
||||
WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Ensure we don't have left over values from a previous use of this irq */
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->chip = &no_irq_chip;
|
||||
desc->handle_irq = handle_bad_irq;
|
||||
desc->depth = 1;
|
||||
desc->msi_desc = NULL;
|
||||
desc->handler_data = NULL;
|
||||
if (!keep_chip_data)
|
||||
desc->chip_data = NULL;
|
||||
desc->action = NULL;
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_setall(desc->affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* dynamic_irq_init - initialize a dynamically allocated irq
|
||||
* @irq: irq number to initialize
|
||||
*/
|
||||
void dynamic_irq_init(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_init_x(irq, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
|
||||
* @irq: irq number to initialize
|
||||
*
|
||||
* does not set irq_to_desc(irq)->chip_data to NULL
|
||||
*/
|
||||
void dynamic_irq_init_keep_chip_data(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_init_x(irq, true);
|
||||
}
|
||||
|
||||
static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc) {
|
||||
WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
if (desc->action) {
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
|
||||
irq);
|
||||
return;
|
||||
}
|
||||
desc->msi_desc = NULL;
|
||||
desc->handler_data = NULL;
|
||||
if (!keep_chip_data)
|
||||
desc->chip_data = NULL;
|
||||
desc->handle_irq = handle_bad_irq;
|
||||
desc->chip = &no_irq_chip;
|
||||
desc->name = NULL;
|
||||
clear_kstat_irqs(desc);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
|
||||
* @irq: irq number to initialize
|
||||
*/
|
||||
void dynamic_irq_cleanup(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup_x(irq, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
|
||||
* @irq: irq number to initialize
|
||||
*
|
||||
* does not set irq_to_desc(irq)->chip_data to NULL
|
||||
*/
|
||||
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup_x(irq, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* set_irq_chip - set the irq chip for an irq
|
||||
* @irq: irq number
|
||||
|
@ -140,7 +38,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
|
|||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
irq_chip_set_defaults(chip);
|
||||
desc->chip = chip;
|
||||
desc->irq_data.chip = chip;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
@ -193,7 +91,7 @@ int set_irq_data(unsigned int irq, void *data)
|
|||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->handler_data = data;
|
||||
desc->irq_data.handler_data = data;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
@ -218,7 +116,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
|
|||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->msi_desc = entry;
|
||||
desc->irq_data.msi_desc = entry;
|
||||
if (entry)
|
||||
entry->irq = irq;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
@ -243,19 +141,27 @@ int set_irq_chip_data(unsigned int irq, void *data)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!desc->chip) {
|
||||
if (!desc->irq_data.chip) {
|
||||
printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->chip_data = data;
|
||||
desc->irq_data.chip_data = data;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_irq_chip_data);
|
||||
|
||||
struct irq_data *irq_get_irq_data(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return desc ? &desc->irq_data : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_get_irq_data);
|
||||
|
||||
/**
|
||||
* set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
|
||||
*
|
||||
|
@ -287,93 +193,216 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread);
|
|||
/*
|
||||
* default enable function
|
||||
*/
|
||||
static void default_enable(unsigned int irq)
|
||||
static void default_enable(struct irq_data *data)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_desc *desc = irq_data_to_desc(data);
|
||||
|
||||
desc->chip->unmask(irq);
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
desc->status &= ~IRQ_MASKED;
|
||||
}
|
||||
|
||||
/*
|
||||
* default disable function
|
||||
*/
|
||||
static void default_disable(unsigned int irq)
|
||||
static void default_disable(struct irq_data *data)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* default startup function
|
||||
*/
|
||||
static unsigned int default_startup(unsigned int irq)
|
||||
static unsigned int default_startup(struct irq_data *data)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_desc *desc = irq_data_to_desc(data);
|
||||
|
||||
desc->chip->enable(irq);
|
||||
desc->irq_data.chip->irq_enable(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* default shutdown function
|
||||
*/
|
||||
static void default_shutdown(unsigned int irq)
|
||||
static void default_shutdown(struct irq_data *data)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_desc *desc = irq_data_to_desc(data);
|
||||
|
||||
desc->chip->mask(irq);
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
desc->status |= IRQ_MASKED;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
/* Temporary migration helpers */
|
||||
static void compat_irq_mask(struct irq_data *data)
|
||||
{
|
||||
data->chip->mask(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_unmask(struct irq_data *data)
|
||||
{
|
||||
data->chip->unmask(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_ack(struct irq_data *data)
|
||||
{
|
||||
data->chip->ack(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_mask_ack(struct irq_data *data)
|
||||
{
|
||||
data->chip->mask_ack(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_eoi(struct irq_data *data)
|
||||
{
|
||||
data->chip->eoi(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_enable(struct irq_data *data)
|
||||
{
|
||||
data->chip->enable(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_disable(struct irq_data *data)
|
||||
{
|
||||
data->chip->disable(data->irq);
|
||||
}
|
||||
|
||||
static void compat_irq_shutdown(struct irq_data *data)
|
||||
{
|
||||
data->chip->shutdown(data->irq);
|
||||
}
|
||||
|
||||
static unsigned int compat_irq_startup(struct irq_data *data)
|
||||
{
|
||||
return data->chip->startup(data->irq);
|
||||
}
|
||||
|
||||
static int compat_irq_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *dest, bool force)
|
||||
{
|
||||
return data->chip->set_affinity(data->irq, dest);
|
||||
}
|
||||
|
||||
static int compat_irq_set_type(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
return data->chip->set_type(data->irq, type);
|
||||
}
|
||||
|
||||
static int compat_irq_set_wake(struct irq_data *data, unsigned int on)
|
||||
{
|
||||
return data->chip->set_wake(data->irq, on);
|
||||
}
|
||||
|
||||
static int compat_irq_retrigger(struct irq_data *data)
|
||||
{
|
||||
return data->chip->retrigger(data->irq);
|
||||
}
|
||||
|
||||
static void compat_bus_lock(struct irq_data *data)
|
||||
{
|
||||
data->chip->bus_lock(data->irq);
|
||||
}
|
||||
|
||||
static void compat_bus_sync_unlock(struct irq_data *data)
|
||||
{
|
||||
data->chip->bus_sync_unlock(data->irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Fixup enable/disable function pointers
|
||||
*/
|
||||
void irq_chip_set_defaults(struct irq_chip *chip)
|
||||
{
|
||||
if (!chip->enable)
|
||||
chip->enable = default_enable;
|
||||
if (!chip->disable)
|
||||
chip->disable = default_disable;
|
||||
if (!chip->startup)
|
||||
chip->startup = default_startup;
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
/*
|
||||
* We use chip->disable, when the user provided its own. When
|
||||
* we have default_disable set for chip->disable, then we need
|
||||
* Compat fixup functions need to be before we set the
|
||||
* defaults for enable/disable/startup/shutdown
|
||||
*/
|
||||
if (chip->enable)
|
||||
chip->irq_enable = compat_irq_enable;
|
||||
if (chip->disable)
|
||||
chip->irq_disable = compat_irq_disable;
|
||||
if (chip->shutdown)
|
||||
chip->irq_shutdown = compat_irq_shutdown;
|
||||
if (chip->startup)
|
||||
chip->irq_startup = compat_irq_startup;
|
||||
#endif
|
||||
/*
|
||||
* The real defaults
|
||||
*/
|
||||
if (!chip->irq_enable)
|
||||
chip->irq_enable = default_enable;
|
||||
if (!chip->irq_disable)
|
||||
chip->irq_disable = default_disable;
|
||||
if (!chip->irq_startup)
|
||||
chip->irq_startup = default_startup;
|
||||
/*
|
||||
* We use chip->irq_disable, when the user provided its own. When
|
||||
* we have default_disable set for chip->irq_disable, then we need
|
||||
* to use default_shutdown, otherwise the irq line is not
|
||||
* disabled on free_irq():
|
||||
*/
|
||||
if (!chip->shutdown)
|
||||
chip->shutdown = chip->disable != default_disable ?
|
||||
chip->disable : default_shutdown;
|
||||
if (!chip->name)
|
||||
chip->name = chip->typename;
|
||||
if (!chip->irq_shutdown)
|
||||
chip->irq_shutdown = chip->irq_disable != default_disable ?
|
||||
chip->irq_disable : default_shutdown;
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
if (!chip->end)
|
||||
chip->end = dummy_irq_chip.end;
|
||||
|
||||
/*
|
||||
* Now fix up the remaining compat handlers
|
||||
*/
|
||||
if (chip->bus_lock)
|
||||
chip->irq_bus_lock = compat_bus_lock;
|
||||
if (chip->bus_sync_unlock)
|
||||
chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
|
||||
if (chip->mask)
|
||||
chip->irq_mask = compat_irq_mask;
|
||||
if (chip->unmask)
|
||||
chip->irq_unmask = compat_irq_unmask;
|
||||
if (chip->ack)
|
||||
chip->irq_ack = compat_irq_ack;
|
||||
if (chip->mask_ack)
|
||||
chip->irq_mask_ack = compat_irq_mask_ack;
|
||||
if (chip->eoi)
|
||||
chip->irq_eoi = compat_irq_eoi;
|
||||
if (chip->set_affinity)
|
||||
chip->irq_set_affinity = compat_irq_set_affinity;
|
||||
if (chip->set_type)
|
||||
chip->irq_set_type = compat_irq_set_type;
|
||||
if (chip->set_wake)
|
||||
chip->irq_set_wake = compat_irq_set_wake;
|
||||
if (chip->retrigger)
|
||||
chip->irq_retrigger = compat_irq_retrigger;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void mask_ack_irq(struct irq_desc *desc, int irq)
|
||||
static inline void mask_ack_irq(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->chip->mask_ack)
|
||||
desc->chip->mask_ack(irq);
|
||||
if (desc->irq_data.chip->irq_mask_ack)
|
||||
desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
|
||||
else {
|
||||
desc->chip->mask(irq);
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
if (desc->irq_data.chip->irq_ack)
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
}
|
||||
desc->status |= IRQ_MASKED;
|
||||
}
|
||||
|
||||
static inline void mask_irq(struct irq_desc *desc, int irq)
|
||||
static inline void mask_irq(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->chip->mask) {
|
||||
desc->chip->mask(irq);
|
||||
if (desc->irq_data.chip->irq_mask) {
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
desc->status |= IRQ_MASKED;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void unmask_irq(struct irq_desc *desc, int irq)
|
||||
static inline void unmask_irq(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->chip->unmask) {
|
||||
desc->chip->unmask(irq);
|
||||
if (desc->irq_data.chip->irq_unmask) {
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
desc->status &= ~IRQ_MASKED;
|
||||
}
|
||||
}
|
||||
|
@ -476,7 +505,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
|||
irqreturn_t action_ret;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc, irq);
|
||||
mask_ack_irq(desc);
|
||||
|
||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
goto out_unlock;
|
||||
|
@ -502,7 +531,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
|||
desc->status &= ~IRQ_INPROGRESS;
|
||||
|
||||
if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
|
||||
unmask_irq(desc, irq);
|
||||
unmask_irq(desc);
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
@ -539,7 +568,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
|||
action = desc->action;
|
||||
if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
|
||||
desc->status |= IRQ_PENDING;
|
||||
mask_irq(desc, irq);
|
||||
mask_irq(desc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -554,7 +583,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
|||
raw_spin_lock(&desc->lock);
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
out:
|
||||
desc->chip->eoi(irq);
|
||||
desc->irq_data.chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
@ -590,14 +619,13 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|||
if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
|
||||
!desc->action)) {
|
||||
desc->status |= (IRQ_PENDING | IRQ_MASKED);
|
||||
mask_ack_irq(desc, irq);
|
||||
mask_ack_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
|
||||
/* Mark the IRQ currently in progress.*/
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
|
@ -607,7 +635,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|||
irqreturn_t action_ret;
|
||||
|
||||
if (unlikely(!action)) {
|
||||
mask_irq(desc, irq);
|
||||
mask_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -619,7 +647,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
|||
if (unlikely((desc->status &
|
||||
(IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
|
||||
(IRQ_PENDING | IRQ_MASKED))) {
|
||||
unmask_irq(desc, irq);
|
||||
unmask_irq(desc);
|
||||
}
|
||||
|
||||
desc->status &= ~IRQ_PENDING;
|
||||
|
@ -650,15 +678,15 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
|||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
if (desc->irq_data.chip->irq_ack)
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
|
||||
action_ret = handle_IRQ_event(irq, desc->action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
if (desc->chip->eoi)
|
||||
desc->chip->eoi(irq);
|
||||
if (desc->irq_data.chip->irq_eoi)
|
||||
desc->irq_data.chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -676,7 +704,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|||
|
||||
if (!handle)
|
||||
handle = handle_bad_irq;
|
||||
else if (desc->chip == &no_irq_chip) {
|
||||
else if (desc->irq_data.chip == &no_irq_chip) {
|
||||
printk(KERN_WARNING "Trying to install %sinterrupt handler "
|
||||
"for IRQ%d\n", is_chained ? "chained " : "", irq);
|
||||
/*
|
||||
|
@ -686,16 +714,16 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|||
* prevent us to setup the interrupt at all. Switch it to
|
||||
* dummy_irq_chip for easy transition.
|
||||
*/
|
||||
desc->chip = &dummy_irq_chip;
|
||||
desc->irq_data.chip = &dummy_irq_chip;
|
||||
}
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/* Uninstall? */
|
||||
if (handle == handle_bad_irq) {
|
||||
if (desc->chip != &no_irq_chip)
|
||||
mask_ack_irq(desc, irq);
|
||||
if (desc->irq_data.chip != &no_irq_chip)
|
||||
mask_ack_irq(desc);
|
||||
desc->status |= IRQ_DISABLED;
|
||||
desc->depth = 1;
|
||||
}
|
||||
|
@ -706,10 +734,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|||
desc->status &= ~IRQ_DISABLED;
|
||||
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
desc->depth = 0;
|
||||
desc->chip->startup(irq);
|
||||
desc->irq_data.chip->irq_startup(&desc->irq_data);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__set_irq_handler);
|
||||
|
||||
|
@ -729,32 +757,20 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
|||
__set_irq_handler(irq, handle, 0, name);
|
||||
}
|
||||
|
||||
void set_irq_noprobe(unsigned int irq)
|
||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
|
||||
if (!desc)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Sanitize flags */
|
||||
set &= IRQF_MODIFY_MASK;
|
||||
clr &= IRQF_MODIFY_MASK;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status |= IRQ_NOPROBE;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
void set_irq_probe(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status &= ~IRQ_NOPROBE;
|
||||
desc->status &= ~clr;
|
||||
desc->status |= set;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
|
68
kernel/irq/dummychip.c
Normal file
68
kernel/irq/dummychip.c
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
|
||||
*
|
||||
* This file contains the dummy interrupt chip implementation
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* What should we do if we get a hw irq event on an illegal vector?
|
||||
* Each architecture has to answer this themself.
|
||||
*/
|
||||
static void ack_bad(struct irq_data *data)
|
||||
{
|
||||
struct irq_desc *desc = irq_data_to_desc(data);
|
||||
|
||||
print_irq_desc(data->irq, desc);
|
||||
ack_bad_irq(data->irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOP functions
|
||||
*/
|
||||
static void noop(struct irq_data *data) { }
|
||||
|
||||
static unsigned int noop_ret(struct irq_data *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
static void compat_noop(unsigned int irq) { }
|
||||
#define END_INIT .end = compat_noop
|
||||
#else
|
||||
#define END_INIT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic no controller implementation
|
||||
*/
|
||||
struct irq_chip no_irq_chip = {
|
||||
.name = "none",
|
||||
.irq_startup = noop_ret,
|
||||
.irq_shutdown = noop,
|
||||
.irq_enable = noop,
|
||||
.irq_disable = noop,
|
||||
.irq_ack = ack_bad,
|
||||
END_INIT
|
||||
};
|
||||
|
||||
/*
|
||||
* Generic dummy implementation which can be used for
|
||||
* real dumb interrupt sources
|
||||
*/
|
||||
struct irq_chip dummy_irq_chip = {
|
||||
.name = "dummy",
|
||||
.irq_startup = noop_ret,
|
||||
.irq_shutdown = noop,
|
||||
.irq_enable = noop,
|
||||
.irq_disable = noop,
|
||||
.irq_ack = noop,
|
||||
.irq_mask = noop,
|
||||
.irq_unmask = noop,
|
||||
END_INIT
|
||||
};
|
|
@ -11,24 +11,15 @@
|
|||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
#include <trace/events/irq.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
/**
|
||||
* handle_bad_irq - handle spurious and unhandled irqs
|
||||
* @irq: the interrupt number
|
||||
|
@ -43,304 +34,6 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
|||
ack_bad_irq(irq);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
||||
cpumask_setall(irq_default_affinity);
|
||||
}
|
||||
#else
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Linux has a controller-independent interrupt architecture.
|
||||
* Every controller has a 'controller-template', that is used
|
||||
* by the main code to do the right thing. Each driver-visible
|
||||
* interrupt source is transparently wired to the appropriate
|
||||
* controller. Thus drivers need not be aware of the
|
||||
* interrupt-controller.
|
||||
*
|
||||
* The code is designed to be easily extended with new/different
|
||||
* interrupt controllers, without having to do assembly magic or
|
||||
* having to touch the generic code.
|
||||
*
|
||||
* Controller mappings for all interrupt sources:
|
||||
*/
|
||||
int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
};
|
||||
|
||||
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
|
||||
GFP_ATOMIC, node);
|
||||
|
||||
/*
|
||||
* don't overwite if can not get new one
|
||||
* init_copy_kstat_irqs() could still use old one
|
||||
*/
|
||||
if (ptr) {
|
||||
printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
|
||||
desc->kstat_irqs = ptr;
|
||||
}
|
||||
}
|
||||
|
||||
static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
|
||||
{
|
||||
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
||||
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
desc->irq = irq;
|
||||
#ifdef CONFIG_SMP
|
||||
desc->node = node;
|
||||
#endif
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_kstat_irqs(desc, node, nr_cpu_ids);
|
||||
if (!desc->kstat_irqs) {
|
||||
printk(KERN_ERR "can not alloc kstat_irqs\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (!alloc_desc_masks(desc, node, false)) {
|
||||
printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
init_desc_masks(desc);
|
||||
arch_init_chip_data(desc, node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Protect the sparse_irqs:
|
||||
*/
|
||||
DEFINE_RAW_SPINLOCK(sparse_irq_lock);
|
||||
|
||||
static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
|
||||
|
||||
static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
radix_tree_insert(&irq_desc_tree, irq, desc);
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return radix_tree_lookup(&irq_desc_tree, irq);
|
||||
}
|
||||
|
||||
void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
void **ptr;
|
||||
|
||||
ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
|
||||
if (ptr)
|
||||
radix_tree_replace_slot(ptr, desc);
|
||||
}
|
||||
|
||||
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS_LEGACY-1] = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int *kstat_irqs_legacy;
|
||||
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int legacy_count;
|
||||
int node;
|
||||
int i;
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
/* initialize nr_irqs based on nr_cpu_ids */
|
||||
arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
|
||||
|
||||
desc = irq_desc_legacy;
|
||||
legacy_count = ARRAY_SIZE(irq_desc_legacy);
|
||||
node = first_online_node;
|
||||
|
||||
/* allocate based on nr_cpu_ids */
|
||||
kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
|
||||
sizeof(int), GFP_NOWAIT, node);
|
||||
|
||||
for (i = 0; i < legacy_count; i++) {
|
||||
desc[i].irq = i;
|
||||
#ifdef CONFIG_SMP
|
||||
desc[i].node = node;
|
||||
#endif
|
||||
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
|
||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||
alloc_desc_masks(&desc[i], node, true);
|
||||
init_desc_masks(&desc[i]);
|
||||
set_irq_desc(i, &desc[i]);
|
||||
}
|
||||
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
if (irq >= nr_irqs) {
|
||||
WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
|
||||
irq, nr_irqs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (desc)
|
||||
return desc;
|
||||
|
||||
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
|
||||
/* We have to check it to avoid races with another CPU */
|
||||
desc = irq_to_desc(irq);
|
||||
if (desc)
|
||||
goto out_unlock;
|
||||
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
|
||||
printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "can not alloc irq_desc\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
init_one_irq_desc(irq, desc, node);
|
||||
|
||||
set_irq_desc(irq, desc);
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS-1] = {
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int count;
|
||||
int i;
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
|
||||
|
||||
desc = irq_desc;
|
||||
count = ARRAY_SIZE(irq_desc);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
desc[i].irq = i;
|
||||
alloc_desc_masks(&desc[i], 0, true);
|
||||
init_desc_masks(&desc[i]);
|
||||
desc[i].kstat_irqs = kstat_irqs_all[i];
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||
{
|
||||
return irq_to_desc(irq);
|
||||
}
|
||||
#endif /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
void clear_kstat_irqs(struct irq_desc *desc)
|
||||
{
|
||||
memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
|
||||
}
|
||||
|
||||
/*
|
||||
* What should we do if we get a hw irq event on an illegal vector?
|
||||
* Each architecture has to answer this themself.
|
||||
*/
|
||||
static void ack_bad(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
print_irq_desc(irq, desc);
|
||||
ack_bad_irq(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOP functions
|
||||
*/
|
||||
static void noop(unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned int noop_ret(unsigned int irq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic no controller implementation
|
||||
*/
|
||||
struct irq_chip no_irq_chip = {
|
||||
.name = "none",
|
||||
.startup = noop_ret,
|
||||
.shutdown = noop,
|
||||
.enable = noop,
|
||||
.disable = noop,
|
||||
.ack = ack_bad,
|
||||
.end = noop,
|
||||
};
|
||||
|
||||
/*
|
||||
* Generic dummy implementation which can be used for
|
||||
* real dumb interrupt sources
|
||||
*/
|
||||
struct irq_chip dummy_irq_chip = {
|
||||
.name = "dummy",
|
||||
.startup = noop_ret,
|
||||
.shutdown = noop,
|
||||
.enable = noop,
|
||||
.disable = noop,
|
||||
.ack = noop,
|
||||
.mask = noop,
|
||||
.unmask = noop,
|
||||
.end = noop,
|
||||
};
|
||||
|
||||
/*
|
||||
* Special, empty irq handler:
|
||||
*/
|
||||
|
@ -457,20 +150,20 @@ unsigned int __do_IRQ(unsigned int irq)
|
|||
/*
|
||||
* No locking required for CPU-local interrupts:
|
||||
*/
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
if (desc->irq_data.chip->ack)
|
||||
desc->irq_data.chip->ack(irq);
|
||||
if (likely(!(desc->status & IRQ_DISABLED))) {
|
||||
action_ret = handle_IRQ_event(irq, desc->action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
}
|
||||
desc->chip->end(irq);
|
||||
desc->irq_data.chip->end(irq);
|
||||
return 1;
|
||||
}
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
if (desc->irq_data.chip->ack)
|
||||
desc->irq_data.chip->ack(irq);
|
||||
/*
|
||||
* REPLAY is when Linux resends an IRQ that was dropped earlier
|
||||
* WAITING is used by probe to mark irqs that are being tested
|
||||
|
@ -530,27 +223,9 @@ unsigned int __do_IRQ(unsigned int irq)
|
|||
* The ->end() handler has to deal with interrupts which got
|
||||
* disabled while the handler was running.
|
||||
*/
|
||||
desc->chip->end(irq);
|
||||
desc->irq_data.chip->end(irq);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
void early_init_irq_lock_class(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc ? desc->kstat_irqs[cpu] : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstat_irqs_cpu);
|
||||
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
/*
|
||||
* IRQ subsystem internal functions and variables:
|
||||
*/
|
||||
#include <linux/irqdesc.h>
|
||||
|
||||
extern int noirqdebug;
|
||||
|
||||
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
|
||||
|
||||
/* Set default functions for irq_chip structures: */
|
||||
extern void irq_chip_set_defaults(struct irq_chip *chip);
|
||||
|
||||
|
@ -15,21 +18,19 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|||
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
|
||||
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
||||
|
||||
extern struct lock_class_key irq_desc_lock_class;
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
extern void clear_kstat_irqs(struct irq_desc *desc);
|
||||
extern raw_spinlock_t sparse_irq_lock;
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
|
||||
#endif
|
||||
/* Resending of interrupts :*/
|
||||
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
|
||||
extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
|
||||
#else
|
||||
static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
||||
static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
||||
static inline void register_handler_proc(unsigned int irq,
|
||||
struct irqaction *action) { }
|
||||
static inline void unregister_handler_proc(unsigned int irq,
|
||||
|
@ -40,17 +41,27 @@ extern int irq_select_affinity_usr(unsigned int irq);
|
|||
|
||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||
|
||||
/* Inline functions for support of irq chips on slow busses */
|
||||
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED
|
||||
static inline void irq_end(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
if (unlikely(desc->chip->bus_lock))
|
||||
desc->chip->bus_lock(irq);
|
||||
if (desc->irq_data.chip && desc->irq_data.chip->end)
|
||||
desc->irq_data.chip->end(irq);
|
||||
}
|
||||
#else
|
||||
static inline void irq_end(unsigned int irq, struct irq_desc *desc) { }
|
||||
#endif
|
||||
|
||||
/* Inline functions for support of irq chips on slow busses */
|
||||
static inline void chip_bus_lock(struct irq_desc *desc)
|
||||
{
|
||||
if (unlikely(desc->irq_data.chip->irq_bus_lock))
|
||||
desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
|
||||
}
|
||||
|
||||
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
|
||||
static inline void chip_bus_sync_unlock(struct irq_desc *desc)
|
||||
{
|
||||
if (unlikely(desc->chip->bus_sync_unlock))
|
||||
desc->chip->bus_sync_unlock(irq);
|
||||
if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
|
||||
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -67,8 +78,8 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
|
|||
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
||||
printk("->handle_irq(): %p, ", desc->handle_irq);
|
||||
print_symbol("%s\n", (unsigned long)desc->handle_irq);
|
||||
printk("->chip(): %p, ", desc->chip);
|
||||
print_symbol("%s\n", (unsigned long)desc->chip);
|
||||
printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
|
||||
print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
|
||||
printk("->action(): %p\n", desc->action);
|
||||
if (desc->action) {
|
||||
printk("->action->handler(): %p, ", desc->action->handler);
|
||||
|
|
395
kernel/irq/irqdesc.c
Normal file
395
kernel/irq/irqdesc.c
Normal file
|
@ -0,0 +1,395 @@
|
|||
/*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
|
||||
*
|
||||
* This file contains the interrupt descriptor management code
|
||||
*
|
||||
* Detailed information is available in Documentation/DocBook/genericirq
|
||||
*
|
||||
*/
|
||||
#include <linux/irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/bitmap.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
||||
cpumask_setall(irq_default_affinity);
|
||||
}
|
||||
#else
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
|
||||
{
|
||||
if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
|
||||
return -ENOMEM;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
||||
free_cpumask_var(desc->irq_data.affinity);
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void desc_smp_init(struct irq_desc *desc, int node)
|
||||
{
|
||||
desc->irq_data.node = node;
|
||||
cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int desc_node(struct irq_desc *desc)
|
||||
{
|
||||
return desc->irq_data.node;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int
|
||||
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
|
||||
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
|
||||
static inline int desc_node(struct irq_desc *desc) { return 0; }
|
||||
#endif
|
||||
|
||||
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node)
|
||||
{
|
||||
desc->irq_data.irq = irq;
|
||||
desc->irq_data.chip = &no_irq_chip;
|
||||
desc->irq_data.chip_data = NULL;
|
||||
desc->irq_data.handler_data = NULL;
|
||||
desc->irq_data.msi_desc = NULL;
|
||||
desc->status = IRQ_DEFAULT_INIT_FLAGS;
|
||||
desc->handle_irq = handle_bad_irq;
|
||||
desc->depth = 1;
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
desc->name = NULL;
|
||||
memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
|
||||
desc_smp_init(desc, node);
|
||||
}
|
||||
|
||||
int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
static DEFINE_MUTEX(sparse_irq_lock);
|
||||
static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
|
||||
|
||||
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
radix_tree_insert(&irq_desc_tree, irq, desc);
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return radix_tree_lookup(&irq_desc_tree, irq);
|
||||
}
|
||||
|
||||
static void delete_irq_desc(unsigned int irq)
|
||||
{
|
||||
radix_tree_delete(&irq_desc_tree, irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void free_masks(struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
free_cpumask_var(desc->pending_mask);
|
||||
#endif
|
||||
free_cpumask_var(desc->irq_data.affinity);
|
||||
}
|
||||
#else
|
||||
static inline void free_masks(struct irq_desc *desc) { }
|
||||
#endif
|
||||
|
||||
static struct irq_desc *alloc_desc(int irq, int node)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
|
||||
desc = kzalloc_node(sizeof(*desc), gfp, node);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
/* allocate based on nr_cpu_ids */
|
||||
desc->kstat_irqs = kzalloc_node(nr_cpu_ids * sizeof(*desc->kstat_irqs),
|
||||
gfp, node);
|
||||
if (!desc->kstat_irqs)
|
||||
goto err_desc;
|
||||
|
||||
if (alloc_masks(desc, gfp, node))
|
||||
goto err_kstat;
|
||||
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
|
||||
desc_set_defaults(irq, desc, node);
|
||||
|
||||
return desc;
|
||||
|
||||
err_kstat:
|
||||
kfree(desc->kstat_irqs);
|
||||
err_desc:
|
||||
kfree(desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_desc(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
unregister_irq_proc(irq, desc);
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
delete_irq_desc(irq);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
|
||||
free_masks(desc);
|
||||
kfree(desc->kstat_irqs);
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
static int alloc_descs(unsigned int start, unsigned int cnt, int node)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
desc = alloc_desc(start + i, node);
|
||||
if (!desc)
|
||||
goto err;
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
irq_insert_desc(start + i, desc);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
return start;
|
||||
|
||||
err:
|
||||
for (i--; i >= 0; i--)
|
||||
free_desc(start + i);
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
bitmap_clear(allocated_irqs, start, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||
{
|
||||
int res = irq_alloc_descs(irq, irq, 1, node);
|
||||
|
||||
if (res == -EEXIST || res == irq)
|
||||
return irq_to_desc(irq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
int i, initcnt, node = first_online_node;
|
||||
struct irq_desc *desc;
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
/* Let arch update nr_irqs and return the nr of preallocated irqs */
|
||||
initcnt = arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
|
||||
|
||||
for (i = 0; i < initcnt; i++) {
|
||||
desc = alloc_desc(i, node);
|
||||
set_bit(i, allocated_irqs);
|
||||
irq_insert_desc(i, desc);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS-1] = {
|
||||
.status = IRQ_DEFAULT_INIT_FLAGS,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
int count, i, node = first_online_node;
|
||||
struct irq_desc *desc;
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
|
||||
|
||||
desc = irq_desc;
|
||||
count = ARRAY_SIZE(irq_desc);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
desc[i].irq_data.irq = i;
|
||||
desc[i].irq_data.chip = &no_irq_chip;
|
||||
desc[i].kstat_irqs = kstat_irqs_all[i];
|
||||
alloc_masks(desc + i, GFP_KERNEL, node);
|
||||
desc_smp_init(desc + i, node);
|
||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||
{
|
||||
return irq_to_desc(irq);
|
||||
}
|
||||
|
||||
static void free_desc(unsigned int irq)
|
||||
{
|
||||
dynamic_irq_cleanup(irq);
|
||||
}
|
||||
|
||||
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
|
||||
{
|
||||
return start;
|
||||
}
|
||||
#endif /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
/* Dynamic interrupt handling */
|
||||
|
||||
/**
|
||||
* irq_free_descs - free irq descriptors
|
||||
* @from: Start of descriptor range
|
||||
* @cnt: Number of consecutive irqs to free
|
||||
*/
|
||||
void irq_free_descs(unsigned int from, unsigned int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (from >= nr_irqs || (from + cnt) > nr_irqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < cnt; i++)
|
||||
free_desc(from + i);
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
bitmap_clear(allocated_irqs, from, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_alloc_descs - allocate and initialize a range of irq descriptors
|
||||
* @irq: Allocate for specific irq number if irq >= 0
|
||||
* @from: Start the search from this irq number
|
||||
* @cnt: Number of consecutive irqs to allocate.
|
||||
* @node: Preferred node on which the irq descriptor should be allocated
|
||||
*
|
||||
* Returns the first irq number or error code
|
||||
*/
|
||||
int __ref
|
||||
irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
|
||||
{
|
||||
int start, ret;
|
||||
|
||||
if (!cnt)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
|
||||
start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
|
||||
ret = -EEXIST;
|
||||
if (irq >=0 && start != irq)
|
||||
goto err;
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (start >= nr_irqs)
|
||||
goto err;
|
||||
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return alloc_descs(start, cnt, node);
|
||||
|
||||
err:
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_reserve_irqs - mark irqs allocated
|
||||
* @from: mark from irq number
|
||||
* @cnt: number of irqs to mark
|
||||
*
|
||||
* Returns 0 on success or an appropriate error code
|
||||
*/
|
||||
int irq_reserve_irqs(unsigned int from, unsigned int cnt)
|
||||
{
|
||||
unsigned int start;
|
||||
int ret = 0;
|
||||
|
||||
if (!cnt || (from + cnt) > nr_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
|
||||
if (start == from)
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
else
|
||||
ret = -EEXIST;
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_get_next_irq - get next allocated irq number
|
||||
* @offset: where to start the search
|
||||
*
|
||||
* Returns next irq number after offset or nr_irqs if none is found.
|
||||
*/
|
||||
unsigned int irq_get_next_irq(unsigned int offset)
|
||||
{
|
||||
return find_next_bit(allocated_irqs, nr_irqs, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
|
||||
* @irq: irq number to initialize
|
||||
*/
|
||||
void dynamic_irq_cleanup(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc_set_defaults(irq, desc, desc_node(desc));
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc ? desc->kstat_irqs[cpu] : 0;
|
||||
}
|
|
@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
|
|||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
|
||||
!desc->chip->set_affinity)
|
||||
if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
|
||||
!desc->irq_data.chip->irq_set_affinity)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
|
@ -109,17 +109,18 @@ void irq_set_thread_affinity(struct irq_desc *desc)
|
|||
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
if (!chip->irq_set_affinity)
|
||||
return -EINVAL;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT) {
|
||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
|
||||
cpumask_copy(desc->irq_data.affinity, cpumask);
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
}
|
||||
|
@ -128,8 +129,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|||
cpumask_copy(desc->pending_mask, cpumask);
|
||||
}
|
||||
#else
|
||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
|
||||
cpumask_copy(desc->irq_data.affinity, cpumask);
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
#endif
|
||||
|
@ -168,16 +169,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
|||
* one of the targets is online.
|
||||
*/
|
||||
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
||||
if (cpumask_any_and(desc->affinity, cpu_online_mask)
|
||||
if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
|
||||
< nr_cpu_ids)
|
||||
goto set_affinity;
|
||||
else
|
||||
desc->status &= ~IRQ_AFFINITY_SET;
|
||||
}
|
||||
|
||||
cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
|
||||
cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
|
||||
set_affinity:
|
||||
desc->chip->set_affinity(irq, desc->affinity);
|
||||
desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -223,7 +224,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
|||
|
||||
if (!desc->depth++) {
|
||||
desc->status |= IRQ_DISABLED;
|
||||
desc->chip->disable(irq);
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,11 +247,11 @@ void disable_irq_nosync(unsigned int irq)
|
|||
if (!desc)
|
||||
return;
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__disable_irq(desc, irq, false);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
EXPORT_SYMBOL(disable_irq_nosync);
|
||||
|
||||
|
@ -313,7 +314,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
|||
* IRQ line is re-enabled.
|
||||
*
|
||||
* This function may be called from IRQ context only when
|
||||
* desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
|
||||
* desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
|
||||
*/
|
||||
void enable_irq(unsigned int irq)
|
||||
{
|
||||
|
@ -323,11 +324,11 @@ void enable_irq(unsigned int irq)
|
|||
if (!desc)
|
||||
return;
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__enable_irq(desc, irq, false);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
EXPORT_SYMBOL(enable_irq);
|
||||
|
||||
|
@ -336,8 +337,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
|||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
int ret = -ENXIO;
|
||||
|
||||
if (desc->chip->set_wake)
|
||||
ret = desc->chip->set_wake(irq, on);
|
||||
if (desc->irq_data.chip->irq_set_wake)
|
||||
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -429,12 +430,12 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc)
|
|||
}
|
||||
|
||||
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
unsigned long flags)
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
struct irq_chip *chip = desc->chip;
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
if (!chip || !chip->set_type) {
|
||||
if (!chip || !chip->irq_set_type) {
|
||||
/*
|
||||
* IRQF_TRIGGER_* but the PIC does not support multiple
|
||||
* flow-types?
|
||||
|
@ -445,11 +446,11 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|||
}
|
||||
|
||||
/* caller masked out all except trigger mode flags */
|
||||
ret = chip->set_type(irq, flags);
|
||||
ret = chip->irq_set_type(&desc->irq_data, flags);
|
||||
|
||||
if (ret)
|
||||
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
|
||||
(int)flags, irq, chip->set_type);
|
||||
pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
|
||||
flags, irq, chip->irq_set_type);
|
||||
else {
|
||||
if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
|
||||
flags |= IRQ_LEVEL;
|
||||
|
@ -457,8 +458,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|||
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
|
||||
desc->status |= flags;
|
||||
|
||||
if (chip != desc->chip)
|
||||
irq_chip_set_defaults(desc->chip);
|
||||
if (chip != desc->irq_data.chip)
|
||||
irq_chip_set_defaults(desc->irq_data.chip);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -507,7 +508,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
|
|||
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
again:
|
||||
chip_bus_lock(irq, desc);
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
|
||||
/*
|
||||
|
@ -521,17 +522,17 @@ static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
|
|||
*/
|
||||
if (unlikely(desc->status & IRQ_INPROGRESS)) {
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
cpu_relax();
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
|
||||
desc->status &= ~IRQ_MASKED;
|
||||
desc->chip->unmask(irq);
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -556,7 +557,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
|
|||
}
|
||||
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
cpumask_copy(mask, desc->affinity);
|
||||
cpumask_copy(mask, desc->irq_data.affinity);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
||||
set_cpus_allowed_ptr(current, mask);
|
||||
|
@ -657,7 +658,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
if (desc->chip == &no_irq_chip)
|
||||
if (desc->irq_data.chip == &no_irq_chip)
|
||||
return -ENOSYS;
|
||||
/*
|
||||
* Some drivers like serial.c use request_irq() heavily,
|
||||
|
@ -752,7 +753,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
}
|
||||
|
||||
if (!shared) {
|
||||
irq_chip_set_defaults(desc->chip);
|
||||
irq_chip_set_defaults(desc->irq_data.chip);
|
||||
|
||||
init_waitqueue_head(&desc->wait_for_threads);
|
||||
|
||||
|
@ -779,7 +780,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
if (!(desc->status & IRQ_NOAUTOEN)) {
|
||||
desc->depth = 0;
|
||||
desc->status &= ~IRQ_DISABLED;
|
||||
desc->chip->startup(irq);
|
||||
desc->irq_data.chip->irq_startup(&desc->irq_data);
|
||||
} else
|
||||
/* Undo nested disables: */
|
||||
desc->depth = 1;
|
||||
|
@ -912,17 +913,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
|||
|
||||
/* Currently used only by UML, might disappear one day: */
|
||||
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
||||
if (desc->chip->release)
|
||||
desc->chip->release(irq, dev_id);
|
||||
if (desc->irq_data.chip->release)
|
||||
desc->irq_data.chip->release(irq, dev_id);
|
||||
#endif
|
||||
|
||||
/* If this was the last handler, shut down the IRQ line: */
|
||||
if (!desc->action) {
|
||||
desc->status |= IRQ_DISABLED;
|
||||
if (desc->chip->shutdown)
|
||||
desc->chip->shutdown(irq);
|
||||
if (desc->irq_data.chip->irq_shutdown)
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
else
|
||||
desc->chip->disable(irq);
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -997,9 +998,9 @@ void free_irq(unsigned int irq, void *dev_id)
|
|||
if (!desc)
|
||||
return;
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
chip_bus_lock(desc);
|
||||
kfree(__free_irq(irq, dev_id));
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
EXPORT_SYMBOL(free_irq);
|
||||
|
||||
|
@ -1086,9 +1087,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
|||
action->name = devname;
|
||||
action->dev_id = dev_id;
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, action);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
||||
if (retval)
|
||||
kfree(action);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
void move_masked_irq(int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
|
||||
return;
|
||||
|
@ -24,7 +25,7 @@ void move_masked_irq(int irq)
|
|||
if (unlikely(cpumask_empty(desc->pending_mask)))
|
||||
return;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
if (!chip->irq_set_affinity)
|
||||
return;
|
||||
|
||||
assert_raw_spin_locked(&desc->lock);
|
||||
|
@ -43,8 +44,9 @@ void move_masked_irq(int irq)
|
|||
*/
|
||||
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
||||
< nr_cpu_ids))
|
||||
if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
|
||||
cpumask_copy(desc->affinity, desc->pending_mask);
|
||||
if (!chip->irq_set_affinity(&desc->irq_data,
|
||||
desc->pending_mask, false)) {
|
||||
cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
|
||||
|
@ -61,8 +63,8 @@ void move_native_irq(int irq)
|
|||
if (unlikely(desc->status & IRQ_DISABLED))
|
||||
return;
|
||||
|
||||
desc->chip->mask(irq);
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
move_masked_irq(irq);
|
||||
desc->chip->unmask(irq);
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
* NUMA irq-desc migration code
|
||||
*
|
||||
* Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
|
||||
* the new "home node" of the IRQ.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
static void init_copy_kstat_irqs(struct irq_desc *old_desc,
|
||||
struct irq_desc *desc,
|
||||
int node, int nr)
|
||||
{
|
||||
init_kstat_irqs(desc, node, nr);
|
||||
|
||||
if (desc->kstat_irqs != old_desc->kstat_irqs)
|
||||
memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
|
||||
nr * sizeof(*desc->kstat_irqs));
|
||||
}
|
||||
|
||||
static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
{
|
||||
if (old_desc->kstat_irqs == desc->kstat_irqs)
|
||||
return;
|
||||
|
||||
kfree(old_desc->kstat_irqs);
|
||||
old_desc->kstat_irqs = NULL;
|
||||
}
|
||||
|
||||
static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int node)
|
||||
{
|
||||
memcpy(desc, old_desc, sizeof(struct irq_desc));
|
||||
if (!alloc_desc_masks(desc, node, false)) {
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
|
||||
"for migration.\n", irq);
|
||||
return false;
|
||||
}
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
desc->node = node;
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
|
||||
init_copy_desc_masks(old_desc, desc);
|
||||
arch_init_copy_chip_data(old_desc, desc, node);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
{
|
||||
free_kstat_irqs(old_desc, desc);
|
||||
free_desc_masks(old_desc, desc);
|
||||
arch_free_chip_data(old_desc, desc);
|
||||
}
|
||||
|
||||
static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
int node)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
unsigned long flags;
|
||||
|
||||
irq = old_desc->irq;
|
||||
|
||||
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
|
||||
/* We have to check it to avoid races with another CPU */
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (desc && old_desc != desc)
|
||||
goto out_unlock;
|
||||
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc "
|
||||
"for migration.\n", irq);
|
||||
/* still use old one */
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
|
||||
/* still use old one */
|
||||
kfree(desc);
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
replace_irq_desc(irq, desc);
|
||||
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
/* free the old one */
|
||||
free_one_irq_desc(old_desc, desc);
|
||||
kfree(old_desc);
|
||||
|
||||
return desc;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
|
||||
{
|
||||
/* those static or target node is -1, do not move them */
|
||||
if (desc->irq < NR_IRQS_LEGACY || node == -1)
|
||||
return desc;
|
||||
|
||||
if (desc->node != node)
|
||||
desc = __real_move_irq_desc(desc, node);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
|
@ -21,7 +21,7 @@ static struct proc_dir_entry *root_irq_dir;
|
|||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||
const struct cpumask *mask = desc->affinity;
|
||||
const struct cpumask *mask = desc->irq_data.affinity;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PENDING)
|
||||
|
@ -65,7 +65,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
|
|||
cpumask_var_t new_value;
|
||||
int err;
|
||||
|
||||
if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
|
||||
if (!irq_to_desc(irq)->irq_data.chip->irq_set_affinity || no_irq_affinity ||
|
||||
irq_balancing_disabled(irq))
|
||||
return -EIO;
|
||||
|
||||
|
@ -185,7 +185,7 @@ static int irq_node_proc_show(struct seq_file *m, void *v)
|
|||
{
|
||||
struct irq_desc *desc = irq_to_desc((long) m->private);
|
||||
|
||||
seq_printf(m, "%d\n", desc->node);
|
||||
seq_printf(m, "%d\n", desc->irq_data.node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
|||
{
|
||||
char name [MAX_NAMELEN];
|
||||
|
||||
if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
|
||||
if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
|
||||
return;
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
|
@ -297,6 +297,24 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
|||
&irq_spurious_proc_fops, (void *)(long)irq);
|
||||
}
|
||||
|
||||
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
char name [MAX_NAMELEN];
|
||||
|
||||
if (!root_irq_dir || !desc->dir)
|
||||
return;
|
||||
#ifdef CONFIG_SMP
|
||||
remove_proc_entry("smp_affinity", desc->dir);
|
||||
remove_proc_entry("affinity_hint", desc->dir);
|
||||
remove_proc_entry("node", desc->dir);
|
||||
#endif
|
||||
remove_proc_entry("spurious", desc->dir);
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
sprintf(name, "%u", irq);
|
||||
remove_proc_entry(name, root_irq_dir);
|
||||
}
|
||||
|
||||
#undef MAX_NAMELEN
|
||||
|
||||
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
|
|
|
@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
|||
/*
|
||||
* Make sure the interrupt is enabled, before resending it:
|
||||
*/
|
||||
desc->chip->enable(irq);
|
||||
desc->irq_data.chip->irq_enable(&desc->irq_data);
|
||||
|
||||
/*
|
||||
* We do not resend level type interrupts. Level type
|
||||
|
@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
|||
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
|
||||
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
|
||||
|
||||
if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
|
||||
if (!desc->irq_data.chip->irq_retrigger ||
|
||||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
/* Set it pending and activate the softirq: */
|
||||
set_bit(irq, irqs_resend);
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
#include <linux/moduleparam.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
static int irqfixup __read_mostly;
|
||||
|
||||
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
|
||||
|
@ -78,8 +80,8 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
|||
* If we did actual work for the real IRQ line we must let the
|
||||
* IRQ controller clean up too
|
||||
*/
|
||||
if (work && desc->chip && desc->chip->end)
|
||||
desc->chip->end(irq);
|
||||
if (work)
|
||||
irq_end(irq, desc);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
return ok;
|
||||
|
@ -254,7 +256,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
|||
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
|
||||
desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
|
||||
desc->depth++;
|
||||
desc->chip->disable(irq);
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
|
||||
mod_timer(&poll_spurious_irq_timer,
|
||||
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
|
||||
|
|
|
@ -910,17 +910,14 @@ int __init __weak early_irq_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
int __init __weak arch_probe_nr_irqs(void)
|
||||
{
|
||||
return 0;
|
||||
return NR_IRQS_LEGACY;
|
||||
}
|
||||
|
||||
int __init __weak arch_early_irq_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __weak arch_init_chip_data(struct irq_desc *desc, int node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue