Merge branch 'ppi-irq-core-for-rmk' of git://github.com/mzyngier/arm-platforms into devel-stable
This commit is contained in:
commit
34471a9168
46 changed files with 829 additions and 307 deletions
|
@ -1420,6 +1420,31 @@ config SMP_ON_UP
|
||||||
|
|
||||||
If you don't know what to do here, say Y.
|
If you don't know what to do here, say Y.
|
||||||
|
|
||||||
|
config ARM_CPU_TOPOLOGY
|
||||||
|
bool "Support cpu topology definition"
|
||||||
|
depends on SMP && CPU_V7
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Support ARM cpu topology definition. The MPIDR register defines
|
||||||
|
affinity between processors which is then used to describe the cpu
|
||||||
|
topology of an ARM System.
|
||||||
|
|
||||||
|
config SCHED_MC
|
||||||
|
bool "Multi-core scheduler support"
|
||||||
|
depends on ARM_CPU_TOPOLOGY
|
||||||
|
help
|
||||||
|
Multi-core scheduler support improves the CPU scheduler's decision
|
||||||
|
making when dealing with multi-core CPU chips at a cost of slightly
|
||||||
|
increased overhead in some places. If unsure say N here.
|
||||||
|
|
||||||
|
config SCHED_SMT
|
||||||
|
bool "SMT scheduler support"
|
||||||
|
depends on ARM_CPU_TOPOLOGY
|
||||||
|
help
|
||||||
|
Improves the CPU scheduler's decision making when dealing with
|
||||||
|
MultiThreading at a cost of slightly increased overhead in some
|
||||||
|
places. If unsure say N here.
|
||||||
|
|
||||||
config HAVE_ARM_SCU
|
config HAVE_ARM_SCU
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
|
|
|
@ -29,6 +29,9 @@
|
||||||
#include <linux/cpu_pm.h>
|
#include <linux/cpu_pm.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/mach/irq.h>
|
#include <asm/mach/irq.h>
|
||||||
|
@ -181,7 +184,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mask = 0xff << shift;
|
mask = 0xff << shift;
|
||||||
bit = 1 << (cpu + shift);
|
bit = 1 << (cpu_logical_map(cpu) + shift);
|
||||||
|
|
||||||
spin_lock(&irq_controller_lock);
|
spin_lock(&irq_controller_lock);
|
||||||
val = readl_relaxed(reg) & ~mask;
|
val = readl_relaxed(reg) & ~mask;
|
||||||
|
@ -260,9 +263,16 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
|
||||||
unsigned int irq_start)
|
unsigned int irq_start)
|
||||||
{
|
{
|
||||||
unsigned int gic_irqs, irq_limit, i;
|
unsigned int gic_irqs, irq_limit, i;
|
||||||
|
u32 cpumask;
|
||||||
void __iomem *base = gic->dist_base;
|
void __iomem *base = gic->dist_base;
|
||||||
u32 cpumask = 1 << smp_processor_id();
|
u32 cpu = 0;
|
||||||
|
u32 nrppis = 0, ppi_base = 0;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
cpu = cpu_logical_map(smp_processor_id());
|
||||||
|
#endif
|
||||||
|
|
||||||
|
cpumask = 1 << cpu;
|
||||||
cpumask |= cpumask << 8;
|
cpumask |= cpumask << 8;
|
||||||
cpumask |= cpumask << 16;
|
cpumask |= cpumask << 16;
|
||||||
|
|
||||||
|
@ -279,6 +289,23 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
|
||||||
|
|
||||||
gic->gic_irqs = gic_irqs;
|
gic->gic_irqs = gic_irqs;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Nobody would be insane enough to use PPIs on a secondary
|
||||||
|
* GIC, right?
|
||||||
|
*/
|
||||||
|
if (gic == &gic_data[0]) {
|
||||||
|
nrppis = (32 - irq_start) & 31;
|
||||||
|
|
||||||
|
/* The GIC only supports up to 16 PPIs. */
|
||||||
|
if (nrppis > 16)
|
||||||
|
BUG();
|
||||||
|
|
||||||
|
ppi_base = gic->irq_offset + 32 - nrppis;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("Configuring GIC with %d sources (%d PPIs)\n",
|
||||||
|
gic_irqs, (gic == &gic_data[0]) ? nrppis : 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set all global interrupts to be level triggered, active low.
|
* Set all global interrupts to be level triggered, active low.
|
||||||
*/
|
*/
|
||||||
|
@ -314,7 +341,17 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
|
||||||
/*
|
/*
|
||||||
* Setup the Linux IRQ subsystem.
|
* Setup the Linux IRQ subsystem.
|
||||||
*/
|
*/
|
||||||
for (i = irq_start; i < irq_limit; i++) {
|
for (i = 0; i < nrppis; i++) {
|
||||||
|
int ppi = i + ppi_base;
|
||||||
|
|
||||||
|
irq_set_percpu_devid(ppi);
|
||||||
|
irq_set_chip_and_handler(ppi, &gic_chip,
|
||||||
|
handle_percpu_devid_irq);
|
||||||
|
irq_set_chip_data(ppi, gic);
|
||||||
|
set_irq_flags(ppi, IRQF_VALID | IRQF_NOAUTOEN);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = irq_start + nrppis; i < irq_limit; i++) {
|
||||||
irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
|
irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
|
||||||
irq_set_chip_data(i, gic);
|
irq_set_chip_data(i, gic);
|
||||||
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
|
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
|
||||||
|
@ -557,20 +594,15 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
|
||||||
gic_cpu_init(&gic_data[gic_nr]);
|
gic_cpu_init(&gic_data[gic_nr]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __cpuinit gic_enable_ppi(unsigned int irq)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
local_irq_save(flags);
|
|
||||||
irq_set_status_flags(irq, IRQ_NOPROBE);
|
|
||||||
gic_unmask_irq(irq_get_irq_data(irq));
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||||
{
|
{
|
||||||
unsigned long map = *cpus_addr(*mask);
|
int cpu;
|
||||||
|
unsigned long map = 0;
|
||||||
|
|
||||||
|
/* Convert our logical CPU mask into a physical one. */
|
||||||
|
for_each_cpu(cpu, mask)
|
||||||
|
map |= 1 << cpu_logical_map(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that stores to Normal memory are visible to the
|
* Ensure that stores to Normal memory are visible to the
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#define CPUID_CACHETYPE 1
|
#define CPUID_CACHETYPE 1
|
||||||
#define CPUID_TCM 2
|
#define CPUID_TCM 2
|
||||||
#define CPUID_TLBTYPE 3
|
#define CPUID_TLBTYPE 3
|
||||||
|
#define CPUID_MPIDR 5
|
||||||
|
|
||||||
#define CPUID_EXT_PFR0 "c1, 0"
|
#define CPUID_EXT_PFR0 "c1, 0"
|
||||||
#define CPUID_EXT_PFR1 "c1, 1"
|
#define CPUID_EXT_PFR1 "c1, 1"
|
||||||
|
@ -70,6 +71,11 @@ static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
|
||||||
return read_cpuid(CPUID_TCM);
|
return read_cpuid(CPUID_TCM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned int __attribute_const__ read_cpuid_mpidr(void)
|
||||||
|
{
|
||||||
|
return read_cpuid(CPUID_MPIDR);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel's XScale3 core supports some v6 features (supersections, L2)
|
* Intel's XScale3 core supports some v6 features (supersections, L2)
|
||||||
* but advertises itself as v5 as it does not support the v6 ISA. For
|
* but advertises itself as v5 as it does not support the v6 ISA. For
|
||||||
|
|
|
@ -25,13 +25,6 @@
|
||||||
movne r1, sp
|
movne r1, sp
|
||||||
adrne lr, BSYM(1b)
|
adrne lr, BSYM(1b)
|
||||||
bne do_IPI
|
bne do_IPI
|
||||||
|
|
||||||
#ifdef CONFIG_LOCAL_TIMERS
|
|
||||||
test_for_ltirq r0, r2, r6, lr
|
|
||||||
movne r0, sp
|
|
||||||
adrne lr, BSYM(1b)
|
|
||||||
bne do_local_timer
|
|
||||||
#endif
|
|
||||||
#endif
|
#endif
|
||||||
9997:
|
9997:
|
||||||
.endm
|
.endm
|
||||||
|
|
19
arch/arm/include/asm/exception.h
Normal file
19
arch/arm/include/asm/exception.h
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
/*
|
||||||
|
* Annotations for marking C functions as exception handlers.
|
||||||
|
*
|
||||||
|
* These should only be used for C functions that are called from the low
|
||||||
|
* level exception entry code and not any intervening C code.
|
||||||
|
*/
|
||||||
|
#ifndef __ASM_ARM_EXCEPTION_H
|
||||||
|
#define __ASM_ARM_EXCEPTION_H
|
||||||
|
|
||||||
|
#include <linux/ftrace.h>
|
||||||
|
|
||||||
|
#define __exception __attribute__((section(".exception.text")))
|
||||||
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
|
#define __exception_irq_entry __irq_entry
|
||||||
|
#else
|
||||||
|
#define __exception_irq_entry __exception
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* __ASM_ARM_EXCEPTION_H */
|
|
@ -9,9 +9,6 @@
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
unsigned int __softirq_pending;
|
unsigned int __softirq_pending;
|
||||||
#ifdef CONFIG_LOCAL_TIMERS
|
|
||||||
unsigned int local_timer_irqs;
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned int ipi_irqs[NR_IPI];
|
unsigned int ipi_irqs[NR_IPI];
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -22,15 +22,11 @@
|
||||||
* interrupt controller spec. To wit:
|
* interrupt controller spec. To wit:
|
||||||
*
|
*
|
||||||
* Interrupts 0-15 are IPI
|
* Interrupts 0-15 are IPI
|
||||||
* 16-28 are reserved
|
* 16-31 are local. We allow 30 to be used for the watchdog.
|
||||||
* 29-31 are local. We allow 30 to be used for the watchdog.
|
|
||||||
* 32-1020 are global
|
* 32-1020 are global
|
||||||
* 1021-1022 are reserved
|
* 1021-1022 are reserved
|
||||||
* 1023 is "spurious" (no interrupt)
|
* 1023 is "spurious" (no interrupt)
|
||||||
*
|
*
|
||||||
* For now, we ignore all local interrupts so only return an interrupt if it's
|
|
||||||
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
|
|
||||||
*
|
|
||||||
* A simple read from the controller will tell us the number of the highest
|
* A simple read from the controller will tell us the number of the highest
|
||||||
* priority enabled interrupt. We then just need to check whether it is in the
|
* priority enabled interrupt. We then just need to check whether it is in the
|
||||||
* valid range for an IRQ (30-1020 inclusive).
|
* valid range for an IRQ (30-1020 inclusive).
|
||||||
|
@ -43,7 +39,7 @@
|
||||||
|
|
||||||
ldr \tmp, =1021
|
ldr \tmp, =1021
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
bic \irqnr, \irqstat, #0x1c00
|
||||||
cmp \irqnr, #29
|
cmp \irqnr, #15
|
||||||
cmpcc \irqnr, \irqnr
|
cmpcc \irqnr, \irqnr
|
||||||
cmpne \irqnr, \tmp
|
cmpne \irqnr, \tmp
|
||||||
cmpcs \irqnr, \irqnr
|
cmpcs \irqnr, \irqnr
|
||||||
|
@ -62,14 +58,3 @@
|
||||||
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
||||||
cmpcs \irqnr, \irqnr
|
cmpcs \irqnr, \irqnr
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* As above, this assumes that irqstat and base are preserved.. */
|
|
||||||
|
|
||||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
|
||||||
mov \tmp, #0
|
|
||||||
cmp \irqnr, #29
|
|
||||||
moveq \tmp, #1
|
|
||||||
streq \irqstat, [\base, #GIC_CPU_EOI]
|
|
||||||
cmp \tmp, #0
|
|
||||||
.endm
|
|
||||||
|
|
|
@ -40,7 +40,6 @@ void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
|
||||||
void gic_secondary_init(unsigned int);
|
void gic_secondary_init(unsigned int);
|
||||||
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
|
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
|
||||||
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
|
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
|
||||||
void gic_enable_ppi(unsigned int);
|
|
||||||
|
|
||||||
struct gic_chip_data {
|
struct gic_chip_data {
|
||||||
unsigned int irq_offset;
|
unsigned int irq_offset;
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
#ifndef __ASM_ARM_LOCALTIMER_H
|
#ifndef __ASM_ARM_LOCALTIMER_H
|
||||||
#define __ASM_ARM_LOCALTIMER_H
|
#define __ASM_ARM_LOCALTIMER_H
|
||||||
|
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
struct clock_event_device;
|
struct clock_event_device;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -17,27 +19,20 @@ struct clock_event_device;
|
||||||
*/
|
*/
|
||||||
void percpu_timer_setup(void);
|
void percpu_timer_setup(void);
|
||||||
|
|
||||||
/*
|
|
||||||
* Called from assembly, this is the local timer IRQ handler
|
|
||||||
*/
|
|
||||||
asmlinkage void do_local_timer(struct pt_regs *);
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_LOCAL_TIMERS
|
#ifdef CONFIG_LOCAL_TIMERS
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_ARM_TWD
|
#ifdef CONFIG_HAVE_ARM_TWD
|
||||||
|
|
||||||
#include "smp_twd.h"
|
#include "smp_twd.h"
|
||||||
|
|
||||||
#define local_timer_ack() twd_timer_ack()
|
#define local_timer_stop(c) twd_timer_stop((c))
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Platform provides this to acknowledge a local timer IRQ.
|
* Stop the local timer
|
||||||
* Returns true if the local timer IRQ is to be processed.
|
|
||||||
*/
|
*/
|
||||||
int local_timer_ack(void);
|
void local_timer_stop(struct clock_event_device *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -52,6 +47,10 @@ static inline int local_timer_setup(struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void local_timer_stop(struct clock_event_device *evt)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -32,6 +32,11 @@ extern void show_ipi_list(struct seq_file *, int);
|
||||||
*/
|
*/
|
||||||
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called from C code, this handles an IPI.
|
||||||
|
*/
|
||||||
|
void handle_IPI(int ipinr, struct pt_regs *regs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the set of possible CPUs (via set_cpu_possible)
|
* Setup the set of possible CPUs (via set_cpu_possible)
|
||||||
*/
|
*/
|
||||||
|
@ -65,6 +70,12 @@ extern void platform_secondary_init(unsigned int cpu);
|
||||||
*/
|
*/
|
||||||
extern void platform_smp_prepare_cpus(unsigned int);
|
extern void platform_smp_prepare_cpus(unsigned int);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Logical CPU mapping.
|
||||||
|
*/
|
||||||
|
extern int __cpu_logical_map[NR_CPUS];
|
||||||
|
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initial data for bringing up a secondary CPU.
|
* Initial data for bringing up a secondary CPU.
|
||||||
*/
|
*/
|
||||||
|
@ -88,9 +99,4 @@ extern void platform_cpu_enable(unsigned int cpu);
|
||||||
extern void arch_send_call_function_single_ipi(int cpu);
|
extern void arch_send_call_function_single_ipi(int cpu);
|
||||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||||
|
|
||||||
/*
|
|
||||||
* show local interrupt info
|
|
||||||
*/
|
|
||||||
extern void show_local_irqs(struct seq_file *, int);
|
|
||||||
|
|
||||||
#endif /* ifndef __ASM_ARM_SMP_H */
|
#endif /* ifndef __ASM_ARM_SMP_H */
|
||||||
|
|
|
@ -22,7 +22,7 @@ struct clock_event_device;
|
||||||
|
|
||||||
extern void __iomem *twd_base;
|
extern void __iomem *twd_base;
|
||||||
|
|
||||||
int twd_timer_ack(void);
|
|
||||||
void twd_timer_setup(struct clock_event_device *);
|
void twd_timer_setup(struct clock_event_device *);
|
||||||
|
void twd_timer_stop(struct clock_event_device *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -62,13 +62,6 @@
|
||||||
|
|
||||||
#include <asm/outercache.h>
|
#include <asm/outercache.h>
|
||||||
|
|
||||||
#define __exception __attribute__((section(".exception.text")))
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
||||||
#define __exception_irq_entry __irq_entry
|
|
||||||
#else
|
|
||||||
#define __exception_irq_entry __exception
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct thread_info;
|
struct thread_info;
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,39 @@
|
||||||
#ifndef _ASM_ARM_TOPOLOGY_H
|
#ifndef _ASM_ARM_TOPOLOGY_H
|
||||||
#define _ASM_ARM_TOPOLOGY_H
|
#define _ASM_ARM_TOPOLOGY_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM_CPU_TOPOLOGY
|
||||||
|
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
|
||||||
|
struct cputopo_arm {
|
||||||
|
int thread_id;
|
||||||
|
int core_id;
|
||||||
|
int socket_id;
|
||||||
|
cpumask_t thread_sibling;
|
||||||
|
cpumask_t core_sibling;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern struct cputopo_arm cpu_topology[NR_CPUS];
|
||||||
|
|
||||||
|
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||||
|
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
|
||||||
|
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
|
||||||
|
#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
|
||||||
|
|
||||||
|
#define mc_capable() (cpu_topology[0].socket_id != -1)
|
||||||
|
#define smt_capable() (cpu_topology[0].thread_id != -1)
|
||||||
|
|
||||||
|
void init_cpu_topology(void);
|
||||||
|
void store_cpu_topology(unsigned int cpuid);
|
||||||
|
const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline void init_cpu_topology(void) { }
|
||||||
|
static inline void store_cpu_topology(unsigned int cpuid) { }
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <asm-generic/topology.h>
|
#include <asm-generic/topology.h>
|
||||||
|
|
||||||
#endif /* _ASM_ARM_TOPOLOGY_H */
|
#endif /* _ASM_ARM_TOPOLOGY_H */
|
||||||
|
|
|
@ -73,6 +73,7 @@ obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||||
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
|
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
|
||||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
|
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
|
||||||
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
|
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
|
||||||
|
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
|
||||||
|
|
||||||
ifneq ($(CONFIG_ARCH_EBSA110),y)
|
ifneq ($(CONFIG_ARCH_EBSA110),y)
|
||||||
obj-y += io.o
|
obj-y += io.o
|
||||||
|
|
|
@ -35,8 +35,8 @@
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/ftrace.h>
|
|
||||||
|
|
||||||
|
#include <asm/exception.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/irq.h>
|
#include <asm/mach/irq.h>
|
||||||
|
@ -58,9 +58,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
show_ipi_list(p, prec);
|
show_ipi_list(p, prec);
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_LOCAL_TIMERS
|
|
||||||
show_local_irqs(p, prec);
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
#include <linux/cache.h>
|
#include <linux/cache.h>
|
||||||
#include <linux/profile.h>
|
#include <linux/profile.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/ftrace.h>
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
|
@ -31,6 +30,8 @@
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/cpu.h>
|
#include <asm/cpu.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
|
#include <asm/exception.h>
|
||||||
|
#include <asm/topology.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
|
@ -39,6 +40,7 @@
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/localtimer.h>
|
#include <asm/localtimer.h>
|
||||||
|
#include <asm/smp_plat.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* as from 2.5, kernels no longer have an init_tasks structure
|
* as from 2.5, kernels no longer have an init_tasks structure
|
||||||
|
@ -259,6 +261,20 @@ void __ref cpu_die(void)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
#endif /* CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
|
int __cpu_logical_map[NR_CPUS];
|
||||||
|
|
||||||
|
void __init smp_setup_processor_id(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
|
||||||
|
|
||||||
|
cpu_logical_map(0) = cpu;
|
||||||
|
for (i = 1; i < NR_CPUS; ++i)
|
||||||
|
cpu_logical_map(i) = i == cpu ? 0 : i;
|
||||||
|
|
||||||
|
printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called by both boot and secondaries to move global data into
|
* Called by both boot and secondaries to move global data into
|
||||||
* per-processor storage.
|
* per-processor storage.
|
||||||
|
@ -268,6 +284,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
||||||
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
||||||
|
|
||||||
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
||||||
|
|
||||||
|
store_cpu_topology(cpuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -358,6 +376,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||||
{
|
{
|
||||||
unsigned int ncores = num_possible_cpus();
|
unsigned int ncores = num_possible_cpus();
|
||||||
|
|
||||||
|
init_cpu_topology();
|
||||||
|
|
||||||
smp_store_cpu_info(smp_processor_id());
|
smp_store_cpu_info(smp_processor_id());
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -437,10 +457,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
|
||||||
for (i = 0; i < NR_IPI; i++)
|
for (i = 0; i < NR_IPI; i++)
|
||||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
||||||
|
|
||||||
#ifdef CONFIG_LOCAL_TIMERS
|
|
||||||
sum += __get_irq_stat(cpu, local_timer_irqs);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,33 +473,6 @@ static void ipi_timer(void)
|
||||||
irq_exit();
|
irq_exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_LOCAL_TIMERS
|
|
||||||
asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
||||||
int cpu = smp_processor_id();
|
|
||||||
|
|
||||||
if (local_timer_ack()) {
|
|
||||||
__inc_irq_stat(cpu, local_timer_irqs);
|
|
||||||
ipi_timer();
|
|
||||||
}
|
|
||||||
|
|
||||||
set_irq_regs(old_regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void show_local_irqs(struct seq_file *p, int prec)
|
|
||||||
{
|
|
||||||
unsigned int cpu;
|
|
||||||
|
|
||||||
seq_printf(p, "%*s: ", prec, "LOC");
|
|
||||||
|
|
||||||
for_each_present_cpu(cpu)
|
|
||||||
seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
|
|
||||||
|
|
||||||
seq_printf(p, " Local timer interrupts\n");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||||
static void smp_timer_broadcast(const struct cpumask *mask)
|
static void smp_timer_broadcast(const struct cpumask *mask)
|
||||||
{
|
{
|
||||||
|
@ -534,7 +523,7 @@ static void percpu_timer_stop(void)
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
||||||
|
|
||||||
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
local_timer_stop(evt);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -566,6 +555,11 @@ static void ipi_cpu_stop(unsigned int cpu)
|
||||||
* Main handler for inter-processor interrupts
|
* Main handler for inter-processor interrupts
|
||||||
*/
|
*/
|
||||||
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
handle_IPI(ipinr, regs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void handle_IPI(int ipinr, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||||
|
|
|
@ -33,7 +33,7 @@ unsigned int __init scu_get_core_count(void __iomem *scu_base)
|
||||||
/*
|
/*
|
||||||
* Enable the SCU
|
* Enable the SCU
|
||||||
*/
|
*/
|
||||||
void __init scu_enable(void __iomem *scu_base)
|
void scu_enable(void __iomem *scu_base)
|
||||||
{
|
{
|
||||||
u32 scu_ctrl;
|
u32 scu_ctrl;
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
|
||||||
#include <asm/smp_twd.h>
|
#include <asm/smp_twd.h>
|
||||||
|
#include <asm/localtimer.h>
|
||||||
#include <asm/hardware/gic.h>
|
#include <asm/hardware/gic.h>
|
||||||
|
|
||||||
/* set up by the platform code */
|
/* set up by the platform code */
|
||||||
|
@ -26,6 +27,8 @@ void __iomem *twd_base;
|
||||||
|
|
||||||
static unsigned long twd_timer_rate;
|
static unsigned long twd_timer_rate;
|
||||||
|
|
||||||
|
static struct clock_event_device __percpu **twd_evt;
|
||||||
|
|
||||||
static void twd_set_mode(enum clock_event_mode mode,
|
static void twd_set_mode(enum clock_event_mode mode,
|
||||||
struct clock_event_device *clk)
|
struct clock_event_device *clk)
|
||||||
{
|
{
|
||||||
|
@ -80,6 +83,12 @@ int twd_timer_ack(void)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void twd_timer_stop(struct clock_event_device *clk)
|
||||||
|
{
|
||||||
|
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
|
||||||
|
disable_percpu_irq(clk->irq);
|
||||||
|
}
|
||||||
|
|
||||||
static void __cpuinit twd_calibrate_rate(void)
|
static void __cpuinit twd_calibrate_rate(void)
|
||||||
{
|
{
|
||||||
unsigned long count;
|
unsigned long count;
|
||||||
|
@ -119,11 +128,43 @@ static void __cpuinit twd_calibrate_rate(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static irqreturn_t twd_handler(int irq, void *dev_id)
|
||||||
|
{
|
||||||
|
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
|
||||||
|
|
||||||
|
if (twd_timer_ack()) {
|
||||||
|
evt->event_handler(evt);
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
return IRQ_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup the local clock events for a CPU.
|
* Setup the local clock events for a CPU.
|
||||||
*/
|
*/
|
||||||
void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
||||||
{
|
{
|
||||||
|
struct clock_event_device **this_cpu_clk;
|
||||||
|
|
||||||
|
if (!twd_evt) {
|
||||||
|
int err;
|
||||||
|
|
||||||
|
twd_evt = alloc_percpu(struct clock_event_device *);
|
||||||
|
if (!twd_evt) {
|
||||||
|
pr_err("twd: can't allocate memory\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = request_percpu_irq(clk->irq, twd_handler,
|
||||||
|
"twd", twd_evt);
|
||||||
|
if (err) {
|
||||||
|
pr_err("twd: can't register interrupt %d (%d)\n",
|
||||||
|
clk->irq, err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
twd_calibrate_rate();
|
twd_calibrate_rate();
|
||||||
|
|
||||||
clk->name = "local_timer";
|
clk->name = "local_timer";
|
||||||
|
@ -137,8 +178,10 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
||||||
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
|
clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
|
||||||
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
|
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
|
||||||
|
|
||||||
|
this_cpu_clk = __this_cpu_ptr(twd_evt);
|
||||||
|
*this_cpu_clk = clk;
|
||||||
|
|
||||||
clockevents_register_device(clk);
|
clockevents_register_device(clk);
|
||||||
|
|
||||||
/* Make sure our local interrupt controller has this enabled */
|
enable_percpu_irq(clk->irq, 0);
|
||||||
gic_enable_ppi(clk->irq);
|
|
||||||
}
|
}
|
||||||
|
|
148
arch/arm/kernel/topology.c
Normal file
148
arch/arm/kernel/topology.c
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
/*
|
||||||
|
* arch/arm/kernel/topology.c
|
||||||
|
*
|
||||||
|
* Copyright (C) 2011 Linaro Limited.
|
||||||
|
* Written by: Vincent Guittot
|
||||||
|
*
|
||||||
|
* based on arch/sh/kernel/topology.c
|
||||||
|
*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/percpu.h>
|
||||||
|
#include <linux/node.h>
|
||||||
|
#include <linux/nodemask.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
|
#include <asm/cputype.h>
|
||||||
|
#include <asm/topology.h>
|
||||||
|
|
||||||
|
#define MPIDR_SMP_BITMASK (0x3 << 30)
|
||||||
|
#define MPIDR_SMP_VALUE (0x2 << 30)
|
||||||
|
|
||||||
|
#define MPIDR_MT_BITMASK (0x1 << 24)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These masks reflect the current use of the affinity levels.
|
||||||
|
* The affinity level can be up to 16 bits according to ARM ARM
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define MPIDR_LEVEL0_MASK 0x3
|
||||||
|
#define MPIDR_LEVEL0_SHIFT 0
|
||||||
|
|
||||||
|
#define MPIDR_LEVEL1_MASK 0xF
|
||||||
|
#define MPIDR_LEVEL1_SHIFT 8
|
||||||
|
|
||||||
|
#define MPIDR_LEVEL2_MASK 0xFF
|
||||||
|
#define MPIDR_LEVEL2_SHIFT 16
|
||||||
|
|
||||||
|
struct cputopo_arm cpu_topology[NR_CPUS];
|
||||||
|
|
||||||
|
const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
|
||||||
|
{
|
||||||
|
return &cpu_topology[cpu].core_sibling;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* store_cpu_topology is called at boot when only one cpu is running
|
||||||
|
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
|
||||||
|
* which prevents simultaneous write access to cpu_topology array
|
||||||
|
*/
|
||||||
|
void store_cpu_topology(unsigned int cpuid)
|
||||||
|
{
|
||||||
|
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
|
||||||
|
unsigned int mpidr;
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
/* If the cpu topology has been already set, just return */
|
||||||
|
if (cpuid_topo->core_id != -1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mpidr = read_cpuid_mpidr();
|
||||||
|
|
||||||
|
/* create cpu topology mapping */
|
||||||
|
if ((mpidr & MPIDR_SMP_BITMASK) == MPIDR_SMP_VALUE) {
|
||||||
|
/*
|
||||||
|
* This is a multiprocessor system
|
||||||
|
* multiprocessor format & multiprocessor mode field are set
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (mpidr & MPIDR_MT_BITMASK) {
|
||||||
|
/* core performance interdependency */
|
||||||
|
cpuid_topo->thread_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
|
||||||
|
& MPIDR_LEVEL0_MASK;
|
||||||
|
cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
|
||||||
|
& MPIDR_LEVEL1_MASK;
|
||||||
|
cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL2_SHIFT)
|
||||||
|
& MPIDR_LEVEL2_MASK;
|
||||||
|
} else {
|
||||||
|
/* largely independent cores */
|
||||||
|
cpuid_topo->thread_id = -1;
|
||||||
|
cpuid_topo->core_id = (mpidr >> MPIDR_LEVEL0_SHIFT)
|
||||||
|
& MPIDR_LEVEL0_MASK;
|
||||||
|
cpuid_topo->socket_id = (mpidr >> MPIDR_LEVEL1_SHIFT)
|
||||||
|
& MPIDR_LEVEL1_MASK;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* This is an uniprocessor system
|
||||||
|
* we are in multiprocessor format but uniprocessor system
|
||||||
|
* or in the old uniprocessor format
|
||||||
|
*/
|
||||||
|
cpuid_topo->thread_id = -1;
|
||||||
|
cpuid_topo->core_id = 0;
|
||||||
|
cpuid_topo->socket_id = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* update core and thread sibling masks */
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
|
||||||
|
|
||||||
|
if (cpuid_topo->socket_id == cpu_topo->socket_id) {
|
||||||
|
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
|
||||||
|
if (cpu != cpuid)
|
||||||
|
cpumask_set_cpu(cpu,
|
||||||
|
&cpuid_topo->core_sibling);
|
||||||
|
|
||||||
|
if (cpuid_topo->core_id == cpu_topo->core_id) {
|
||||||
|
cpumask_set_cpu(cpuid,
|
||||||
|
&cpu_topo->thread_sibling);
|
||||||
|
if (cpu != cpuid)
|
||||||
|
cpumask_set_cpu(cpu,
|
||||||
|
&cpuid_topo->thread_sibling);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
smp_wmb();
|
||||||
|
|
||||||
|
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
|
||||||
|
cpuid, cpu_topology[cpuid].thread_id,
|
||||||
|
cpu_topology[cpuid].core_id,
|
||||||
|
cpu_topology[cpuid].socket_id, mpidr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* init_cpu_topology is called at boot when only one cpu is running
|
||||||
|
* which prevent simultaneous write access to cpu_topology array
|
||||||
|
*/
|
||||||
|
void init_cpu_topology(void)
|
||||||
|
{
|
||||||
|
unsigned int cpu;
|
||||||
|
|
||||||
|
/* init core mask */
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
|
||||||
|
|
||||||
|
cpu_topo->thread_id = -1;
|
||||||
|
cpu_topo->core_id = -1;
|
||||||
|
cpu_topo->socket_id = -1;
|
||||||
|
cpumask_clear(&cpu_topo->core_sibling);
|
||||||
|
cpumask_clear(&cpu_topo->thread_sibling);
|
||||||
|
}
|
||||||
|
smp_wmb();
|
||||||
|
}
|
|
@ -27,6 +27,7 @@
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/exception.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
|
|
|
@ -55,7 +55,7 @@
|
||||||
|
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
bic \irqnr, \irqstat, #0x1c00
|
||||||
|
|
||||||
cmp \irqnr, #29
|
cmp \irqnr, #15
|
||||||
cmpcc \irqnr, \irqnr
|
cmpcc \irqnr, \irqnr
|
||||||
cmpne \irqnr, \tmp
|
cmpne \irqnr, \tmp
|
||||||
cmpcs \irqnr, \irqnr
|
cmpcs \irqnr, \irqnr
|
||||||
|
@ -76,8 +76,3 @@
|
||||||
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
||||||
cmpcs \irqnr, \irqnr
|
cmpcs \irqnr, \irqnr
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* As above, this assumes that irqstat and base are preserved.. */
|
|
||||||
|
|
||||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
|
||||||
.endm
|
|
||||||
|
|
|
@ -380,9 +380,11 @@ static void exynos4_mct_tick_init(struct clock_event_device *evt)
|
||||||
|
|
||||||
if (cpu == 0) {
|
if (cpu == 0) {
|
||||||
mct_tick0_event_irq.dev_id = &mct_tick[cpu];
|
mct_tick0_event_irq.dev_id = &mct_tick[cpu];
|
||||||
|
evt->irq = IRQ_MCT_L0;
|
||||||
setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq);
|
setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq);
|
||||||
} else {
|
} else {
|
||||||
mct_tick1_event_irq.dev_id = &mct_tick[cpu];
|
mct_tick1_event_irq.dev_id = &mct_tick[cpu];
|
||||||
|
evt->irq = IRQ_MCT_L1;
|
||||||
setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq);
|
setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq);
|
||||||
irq_set_affinity(IRQ_MCT_L1, cpumask_of(1));
|
irq_set_affinity(IRQ_MCT_L1, cpumask_of(1));
|
||||||
}
|
}
|
||||||
|
@ -394,9 +396,10 @@ void __cpuinit local_timer_setup(struct clock_event_device *evt)
|
||||||
exynos4_mct_tick_init(evt);
|
exynos4_mct_tick_init(evt);
|
||||||
}
|
}
|
||||||
|
|
||||||
int local_timer_ack(void)
|
void local_timer_stop(struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
return 0;
|
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
||||||
|
disable_irq(evt->irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_LOCAL_TIMERS */
|
#endif /* CONFIG_LOCAL_TIMERS */
|
||||||
|
|
|
@ -191,12 +191,10 @@ void __init smp_init_cpus(void)
|
||||||
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
||||||
|
|
||||||
/* sanity check */
|
/* sanity check */
|
||||||
if (ncores > NR_CPUS) {
|
if (ncores > nr_cpu_ids) {
|
||||||
printk(KERN_WARNING
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
"EXYNOS4: no. of cores (%d) greater than configured "
|
ncores, nr_cpu_ids);
|
||||||
"maximum of %d - clipping\n",
|
ncores = nr_cpu_ids;
|
||||||
ncores, NR_CPUS);
|
|
||||||
ncores = NR_CPUS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
|
|
|
@ -53,8 +53,6 @@ static void __init msm8x60_map_io(void)
|
||||||
|
|
||||||
static void __init msm8x60_init_irq(void)
|
static void __init msm8x60_init_irq(void)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
|
gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE,
|
||||||
(void *)MSM_QGIC_CPU_BASE);
|
(void *)MSM_QGIC_CPU_BASE);
|
||||||
|
|
||||||
|
@ -66,15 +64,6 @@ static void __init msm8x60_init_irq(void)
|
||||||
*/
|
*/
|
||||||
if (!machine_is_msm8x60_sim())
|
if (!machine_is_msm8x60_sim())
|
||||||
writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET);
|
writel(0x0000FFFF, MSM_QGIC_DIST_BASE + GIC_DIST_ENABLE_SET);
|
||||||
|
|
||||||
/* FIXME: Not installing AVS_SVICINT and AVS_SVICINTSWDONE yet
|
|
||||||
* as they are configured as level, which does not play nice with
|
|
||||||
* handle_percpu_irq.
|
|
||||||
*/
|
|
||||||
for (i = GIC_PPI_START; i < GIC_SPI_START; i++) {
|
|
||||||
if (i != AVS_SVICINT && i != AVS_SVICINTSWDONE)
|
|
||||||
irq_set_handler(i, handle_percpu_irq);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init msm8x60_init(void)
|
static void __init msm8x60_init(void)
|
||||||
|
|
|
@ -8,81 +8,10 @@
|
||||||
* warranty of any kind, whether express or implied.
|
* warranty of any kind, whether express or implied.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <mach/hardware.h>
|
#include <asm/hardware/entry-macro-gic.S>
|
||||||
#include <asm/hardware/gic.h>
|
|
||||||
|
|
||||||
.macro disable_fiq
|
.macro disable_fiq
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro get_irqnr_preamble, base, tmp
|
|
||||||
ldr \base, =gic_cpu_base_addr
|
|
||||||
ldr \base, [\base]
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro arch_ret_to_user, tmp1, tmp2
|
.macro arch_ret_to_user, tmp1, tmp2
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
|
||||||
* The interrupt numbering scheme is defined in the
|
|
||||||
* interrupt controller spec. To wit:
|
|
||||||
*
|
|
||||||
* Migrated the code from ARM MP port to be more consistent
|
|
||||||
* with interrupt processing , the following still holds true
|
|
||||||
* however, all interrupts are treated the same regardless of
|
|
||||||
* if they are local IPI or PPI
|
|
||||||
*
|
|
||||||
* Interrupts 0-15 are IPI
|
|
||||||
* 16-31 are PPI
|
|
||||||
* (16-18 are the timers)
|
|
||||||
* 32-1020 are global
|
|
||||||
* 1021-1022 are reserved
|
|
||||||
* 1023 is "spurious" (no interrupt)
|
|
||||||
*
|
|
||||||
* A simple read from the controller will tell us the number of the
|
|
||||||
* highest priority enabled interrupt. We then just need to check
|
|
||||||
* whether it is in the valid range for an IRQ (0-1020 inclusive).
|
|
||||||
*
|
|
||||||
* Base ARM code assumes that the local (private) peripheral interrupts
|
|
||||||
* are not valid, we treat them differently, in that the privates are
|
|
||||||
* handled like normal shared interrupts with the exception that only
|
|
||||||
* one processor can register the interrupt and the handler must be
|
|
||||||
* the same for all processors.
|
|
||||||
*/
|
|
||||||
|
|
||||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
|
||||||
|
|
||||||
ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 =srcCPU,
|
|
||||||
9-0 =int # */
|
|
||||||
|
|
||||||
bic \irqnr, \irqstat, #0x1c00 @mask src
|
|
||||||
cmp \irqnr, #15
|
|
||||||
ldr \tmp, =1021
|
|
||||||
cmpcc \irqnr, \irqnr
|
|
||||||
cmpne \irqnr, \tmp
|
|
||||||
cmpcs \irqnr, \irqnr
|
|
||||||
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/* We assume that irqstat (the raw value of the IRQ acknowledge
|
|
||||||
* register) is preserved from the macro above.
|
|
||||||
* If there is an IPI, we immediately signal end of interrupt on the
|
|
||||||
* controller, since this requires the original irqstat value which
|
|
||||||
* we won't easily be able to recreate later.
|
|
||||||
*/
|
|
||||||
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
|
||||||
cmp \irqnr, #16
|
|
||||||
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
|
||||||
cmpcs \irqnr, \irqnr
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/* As above, this assumes that irqstat and base are preserved.. */
|
|
||||||
|
|
||||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
|
||||||
mov \tmp, #0
|
|
||||||
cmp \irqnr, #16
|
|
||||||
moveq \tmp, #1
|
|
||||||
streq \irqstat, [\base, #GIC_CPU_EOI]
|
|
||||||
cmp \tmp, #0
|
|
||||||
.endm
|
|
||||||
|
|
|
@ -156,6 +156,12 @@ void __init smp_init_cpus(void)
|
||||||
{
|
{
|
||||||
unsigned int i, ncores = get_core_count();
|
unsigned int i, ncores = get_core_count();
|
||||||
|
|
||||||
|
if (ncores > nr_cpu_ids) {
|
||||||
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
|
ncores, nr_cpu_ids);
|
||||||
|
ncores = nr_cpu_ids;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
set_cpu_possible(i, true);
|
set_cpu_possible(i, true);
|
||||||
|
|
||||||
|
|
|
@ -71,12 +71,16 @@ enum timer_location {
|
||||||
struct msm_clock {
|
struct msm_clock {
|
||||||
struct clock_event_device clockevent;
|
struct clock_event_device clockevent;
|
||||||
struct clocksource clocksource;
|
struct clocksource clocksource;
|
||||||
struct irqaction irq;
|
unsigned int irq;
|
||||||
void __iomem *regbase;
|
void __iomem *regbase;
|
||||||
uint32_t freq;
|
uint32_t freq;
|
||||||
uint32_t shift;
|
uint32_t shift;
|
||||||
void __iomem *global_counter;
|
void __iomem *global_counter;
|
||||||
void __iomem *local_counter;
|
void __iomem *local_counter;
|
||||||
|
union {
|
||||||
|
struct clock_event_device *evt;
|
||||||
|
struct clock_event_device __percpu **percpu_evt;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -87,13 +91,10 @@ enum {
|
||||||
|
|
||||||
|
|
||||||
static struct msm_clock msm_clocks[];
|
static struct msm_clock msm_clocks[];
|
||||||
static struct clock_event_device *local_clock_event;
|
|
||||||
|
|
||||||
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
|
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct clock_event_device *evt = dev_id;
|
struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
|
||||||
if (smp_processor_id() != 0)
|
|
||||||
evt = local_clock_event;
|
|
||||||
if (evt->event_handler == NULL)
|
if (evt->event_handler == NULL)
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
evt->event_handler(evt);
|
evt->event_handler(evt);
|
||||||
|
@ -171,13 +172,7 @@ static struct msm_clock msm_clocks[] = {
|
||||||
.mask = CLOCKSOURCE_MASK(32),
|
.mask = CLOCKSOURCE_MASK(32),
|
||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = INT_GP_TIMER_EXP,
|
||||||
.name = "gp_timer",
|
|
||||||
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
|
|
||||||
.handler = msm_timer_interrupt,
|
|
||||||
.dev_id = &msm_clocks[0].clockevent,
|
|
||||||
.irq = INT_GP_TIMER_EXP
|
|
||||||
},
|
|
||||||
.freq = GPT_HZ,
|
.freq = GPT_HZ,
|
||||||
},
|
},
|
||||||
[MSM_CLOCK_DGT] = {
|
[MSM_CLOCK_DGT] = {
|
||||||
|
@ -196,13 +191,7 @@ static struct msm_clock msm_clocks[] = {
|
||||||
.mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
|
.mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
|
||||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||||
},
|
},
|
||||||
.irq = {
|
.irq = INT_DEBUG_TIMER_EXP,
|
||||||
.name = "dg_timer",
|
|
||||||
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
|
|
||||||
.handler = msm_timer_interrupt,
|
|
||||||
.dev_id = &msm_clocks[1].clockevent,
|
|
||||||
.irq = INT_DEBUG_TIMER_EXP
|
|
||||||
},
|
|
||||||
.freq = DGT_HZ >> MSM_DGT_SHIFT,
|
.freq = DGT_HZ >> MSM_DGT_SHIFT,
|
||||||
.shift = MSM_DGT_SHIFT,
|
.shift = MSM_DGT_SHIFT,
|
||||||
}
|
}
|
||||||
|
@ -261,10 +250,30 @@ static void __init msm_timer_init(void)
|
||||||
printk(KERN_ERR "msm_timer_init: clocksource_register "
|
printk(KERN_ERR "msm_timer_init: clocksource_register "
|
||||||
"failed for %s\n", cs->name);
|
"failed for %s\n", cs->name);
|
||||||
|
|
||||||
res = setup_irq(clock->irq.irq, &clock->irq);
|
ce->irq = clock->irq;
|
||||||
|
if (cpu_is_msm8x60() || cpu_is_msm8960()) {
|
||||||
|
clock->percpu_evt = alloc_percpu(struct clock_event_device *);
|
||||||
|
if (!clock->percpu_evt) {
|
||||||
|
pr_err("msm_timer_init: memory allocation "
|
||||||
|
"failed for %s\n", ce->name);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
*__this_cpu_ptr(clock->percpu_evt) = ce;
|
||||||
|
res = request_percpu_irq(ce->irq, msm_timer_interrupt,
|
||||||
|
ce->name, clock->percpu_evt);
|
||||||
|
if (!res)
|
||||||
|
enable_percpu_irq(ce->irq, 0);
|
||||||
|
} else {
|
||||||
|
clock->evt = ce;
|
||||||
|
res = request_irq(ce->irq, msm_timer_interrupt,
|
||||||
|
IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
|
||||||
|
ce->name, &clock->evt);
|
||||||
|
}
|
||||||
|
|
||||||
if (res)
|
if (res)
|
||||||
printk(KERN_ERR "msm_timer_init: setup_irq "
|
pr_err("msm_timer_init: request_irq failed for %s\n",
|
||||||
"failed for %s\n", cs->name);
|
ce->name);
|
||||||
|
|
||||||
clockevents_register_device(ce);
|
clockevents_register_device(ce);
|
||||||
}
|
}
|
||||||
|
@ -273,6 +282,7 @@ static void __init msm_timer_init(void)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int __cpuinit local_timer_setup(struct clock_event_device *evt)
|
int __cpuinit local_timer_setup(struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
|
static bool local_timer_inited;
|
||||||
struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
|
struct msm_clock *clock = &msm_clocks[MSM_GLOBAL_TIMER];
|
||||||
|
|
||||||
/* Use existing clock_event for cpu 0 */
|
/* Use existing clock_event for cpu 0 */
|
||||||
|
@ -281,12 +291,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
|
||||||
|
|
||||||
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
|
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
|
||||||
|
|
||||||
if (!local_clock_event) {
|
if (!local_timer_inited) {
|
||||||
writel(0, clock->regbase + TIMER_ENABLE);
|
writel(0, clock->regbase + TIMER_ENABLE);
|
||||||
writel(0, clock->regbase + TIMER_CLEAR);
|
writel(0, clock->regbase + TIMER_CLEAR);
|
||||||
writel(~0, clock->regbase + TIMER_MATCH_VAL);
|
writel(~0, clock->regbase + TIMER_MATCH_VAL);
|
||||||
|
local_timer_inited = true;
|
||||||
}
|
}
|
||||||
evt->irq = clock->irq.irq;
|
evt->irq = clock->irq;
|
||||||
evt->name = "local_timer";
|
evt->name = "local_timer";
|
||||||
evt->features = CLOCK_EVT_FEAT_ONESHOT;
|
evt->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||||
evt->rating = clock->clockevent.rating;
|
evt->rating = clock->clockevent.rating;
|
||||||
|
@ -298,17 +309,17 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
|
||||||
clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
|
clockevent_delta2ns(0xf0000000 >> clock->shift, evt);
|
||||||
evt->min_delta_ns = clockevent_delta2ns(4, evt);
|
evt->min_delta_ns = clockevent_delta2ns(4, evt);
|
||||||
|
|
||||||
local_clock_event = evt;
|
*__this_cpu_ptr(clock->percpu_evt) = evt;
|
||||||
|
enable_percpu_irq(evt->irq, 0);
|
||||||
gic_enable_ppi(clock->irq.irq);
|
|
||||||
|
|
||||||
clockevents_register_device(evt);
|
clockevents_register_device(evt);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int local_timer_ack(void)
|
void local_timer_stop(struct clock_event_device *evt)
|
||||||
{
|
{
|
||||||
return 1;
|
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
||||||
|
disable_percpu_irq(evt->irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -78,7 +78,7 @@
|
||||||
4401: ldr \irqstat, [\base, #GIC_CPU_INTACK]
|
4401: ldr \irqstat, [\base, #GIC_CPU_INTACK]
|
||||||
ldr \tmp, =1021
|
ldr \tmp, =1021
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
bic \irqnr, \irqstat, #0x1c00
|
||||||
cmp \irqnr, #29
|
cmp \irqnr, #15
|
||||||
cmpcc \irqnr, \irqnr
|
cmpcc \irqnr, \irqnr
|
||||||
cmpne \irqnr, \tmp
|
cmpne \irqnr, \tmp
|
||||||
cmpcs \irqnr, \irqnr
|
cmpcs \irqnr, \irqnr
|
||||||
|
@ -101,18 +101,6 @@
|
||||||
it cs
|
it cs
|
||||||
cmpcs \irqnr, \irqnr
|
cmpcs \irqnr, \irqnr
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* As above, this assumes that irqstat and base are preserved */
|
|
||||||
|
|
||||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
|
||||||
bic \irqnr, \irqstat, #0x1c00
|
|
||||||
mov \tmp, #0
|
|
||||||
cmp \irqnr, #29
|
|
||||||
itt eq
|
|
||||||
moveq \tmp, #1
|
|
||||||
streq \irqstat, [\base, #GIC_CPU_EOI]
|
|
||||||
cmp \tmp, #0
|
|
||||||
.endm
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
#else /* MULTI_OMAP2 */
|
#else /* MULTI_OMAP2 */
|
||||||
|
|
|
@ -109,12 +109,10 @@ void __init smp_init_cpus(void)
|
||||||
ncores = scu_get_core_count(scu_base);
|
ncores = scu_get_core_count(scu_base);
|
||||||
|
|
||||||
/* sanity check */
|
/* sanity check */
|
||||||
if (ncores > NR_CPUS) {
|
if (ncores > nr_cpu_ids) {
|
||||||
printk(KERN_WARNING
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
"OMAP4: no. of cores (%d) greater than configured "
|
ncores, nr_cpu_ids);
|
||||||
"maximum of %d - clipping\n",
|
ncores = nr_cpu_ids;
|
||||||
ncores, NR_CPUS);
|
|
||||||
ncores = NR_CPUS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
|
|
||||||
|
#include <asm/exception.h>
|
||||||
|
|
||||||
#include <mach/hardware.h>
|
#include <mach/hardware.h>
|
||||||
#include <mach/irqs.h>
|
#include <mach/irqs.h>
|
||||||
#include <mach/gpio.h>
|
#include <mach/gpio.h>
|
||||||
|
|
|
@ -52,12 +52,10 @@ void __init smp_init_cpus(void)
|
||||||
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
||||||
|
|
||||||
/* sanity check */
|
/* sanity check */
|
||||||
if (ncores > NR_CPUS) {
|
if (ncores > nr_cpu_ids) {
|
||||||
printk(KERN_WARNING
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
"Realview: no. of cores (%d) greater than configured "
|
ncores, nr_cpu_ids);
|
||||||
"maximum of %d - clipping\n",
|
ncores = nr_cpu_ids;
|
||||||
ncores, NR_CPUS);
|
|
||||||
ncores = NR_CPUS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
|
|
|
@ -51,7 +51,4 @@
|
||||||
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
|
||||||
.endm
|
|
||||||
|
|
||||||
arch_irq_handler shmobile_handle_irq_intc
|
arch_irq_handler shmobile_handle_irq_intc
|
||||||
|
|
|
@ -27,8 +27,5 @@
|
||||||
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro arch_ret_to_user, tmp1, tmp2
|
.macro arch_ret_to_user, tmp1, tmp2
|
||||||
.endm
|
.endm
|
||||||
|
|
|
@ -56,6 +56,12 @@ void __init smp_init_cpus(void)
|
||||||
unsigned int ncores = shmobile_smp_get_core_count();
|
unsigned int ncores = shmobile_smp_get_core_count();
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
|
if (ncores > nr_cpu_ids) {
|
||||||
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
|
ncores, nr_cpu_ids);
|
||||||
|
ncores = nr_cpu_ids;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
set_cpu_possible(i, true);
|
set_cpu_possible(i, true);
|
||||||
|
|
||||||
|
|
|
@ -114,10 +114,10 @@ void __init smp_init_cpus(void)
|
||||||
{
|
{
|
||||||
unsigned int i, ncores = scu_get_core_count(scu_base);
|
unsigned int i, ncores = scu_get_core_count(scu_base);
|
||||||
|
|
||||||
if (ncores > NR_CPUS) {
|
if (ncores > nr_cpu_ids) {
|
||||||
printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
ncores, NR_CPUS);
|
ncores, nr_cpu_ids);
|
||||||
ncores = NR_CPUS;
|
ncores = nr_cpu_ids;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
|
|
|
@ -156,12 +156,10 @@ void __init smp_init_cpus(void)
|
||||||
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
||||||
|
|
||||||
/* sanity check */
|
/* sanity check */
|
||||||
if (ncores > NR_CPUS) {
|
if (ncores > nr_cpu_ids) {
|
||||||
printk(KERN_WARNING
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
"U8500: no. of cores (%d) greater than configured "
|
ncores, nr_cpu_ids);
|
||||||
"maximum of %d - clipping\n",
|
ncores = nr_cpu_ids;
|
||||||
ncores, NR_CPUS);
|
|
||||||
ncores = NR_CPUS;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; i++)
|
for (i = 0; i < ncores; i++)
|
||||||
|
|
|
@ -221,6 +221,12 @@ static void ct_ca9x4_init_cpu_map(void)
|
||||||
{
|
{
|
||||||
int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
|
int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
|
||||||
|
|
||||||
|
if (ncores > nr_cpu_ids) {
|
||||||
|
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
|
||||||
|
ncores, nr_cpu_ids);
|
||||||
|
ncores = nr_cpu_ids;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < ncores; ++i)
|
for (i = 0; i < ncores; ++i)
|
||||||
set_cpu_possible(i, true);
|
set_cpu_possible(i, true);
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
|
|
||||||
|
#include <asm/exception.h>
|
||||||
#include <asm/system.h>
|
#include <asm/system.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
|
@ -95,6 +95,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
|
||||||
* @flags: flags (see IRQF_* above)
|
* @flags: flags (see IRQF_* above)
|
||||||
* @name: name of the device
|
* @name: name of the device
|
||||||
* @dev_id: cookie to identify the device
|
* @dev_id: cookie to identify the device
|
||||||
|
* @percpu_dev_id: cookie to identify the device
|
||||||
* @next: pointer to the next irqaction for shared interrupts
|
* @next: pointer to the next irqaction for shared interrupts
|
||||||
* @irq: interrupt number
|
* @irq: interrupt number
|
||||||
* @dir: pointer to the proc/irq/NN/name entry
|
* @dir: pointer to the proc/irq/NN/name entry
|
||||||
|
@ -104,17 +105,18 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
|
||||||
* @thread_mask: bitmask for keeping track of @thread activity
|
* @thread_mask: bitmask for keeping track of @thread activity
|
||||||
*/
|
*/
|
||||||
struct irqaction {
|
struct irqaction {
|
||||||
irq_handler_t handler;
|
irq_handler_t handler;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
void *dev_id;
|
void *dev_id;
|
||||||
struct irqaction *next;
|
void __percpu *percpu_dev_id;
|
||||||
int irq;
|
struct irqaction *next;
|
||||||
irq_handler_t thread_fn;
|
int irq;
|
||||||
struct task_struct *thread;
|
irq_handler_t thread_fn;
|
||||||
unsigned long thread_flags;
|
struct task_struct *thread;
|
||||||
unsigned long thread_mask;
|
unsigned long thread_flags;
|
||||||
const char *name;
|
unsigned long thread_mask;
|
||||||
struct proc_dir_entry *dir;
|
const char *name;
|
||||||
|
struct proc_dir_entry *dir;
|
||||||
} ____cacheline_internodealigned_in_smp;
|
} ____cacheline_internodealigned_in_smp;
|
||||||
|
|
||||||
extern irqreturn_t no_action(int cpl, void *dev_id);
|
extern irqreturn_t no_action(int cpl, void *dev_id);
|
||||||
|
@ -136,6 +138,10 @@ extern int __must_check
|
||||||
request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
||||||
unsigned long flags, const char *name, void *dev_id);
|
unsigned long flags, const char *name, void *dev_id);
|
||||||
|
|
||||||
|
extern int __must_check
|
||||||
|
request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||||
|
const char *devname, void __percpu *percpu_dev_id);
|
||||||
|
|
||||||
extern void exit_irq_thread(void);
|
extern void exit_irq_thread(void);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
@ -164,10 +170,18 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
||||||
return request_irq(irq, handler, flags, name, dev_id);
|
return request_irq(irq, handler, flags, name, dev_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int __must_check
|
||||||
|
request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||||
|
const char *devname, void __percpu *percpu_dev_id)
|
||||||
|
{
|
||||||
|
return request_irq(irq, handler, 0, devname, percpu_dev_id);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void exit_irq_thread(void) { }
|
static inline void exit_irq_thread(void) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void free_irq(unsigned int, void *);
|
extern void free_irq(unsigned int, void *);
|
||||||
|
extern void free_percpu_irq(unsigned int, void __percpu *);
|
||||||
|
|
||||||
struct device;
|
struct device;
|
||||||
|
|
||||||
|
@ -207,7 +221,9 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
|
||||||
|
|
||||||
extern void disable_irq_nosync(unsigned int irq);
|
extern void disable_irq_nosync(unsigned int irq);
|
||||||
extern void disable_irq(unsigned int irq);
|
extern void disable_irq(unsigned int irq);
|
||||||
|
extern void disable_percpu_irq(unsigned int irq);
|
||||||
extern void enable_irq(unsigned int irq);
|
extern void enable_irq(unsigned int irq);
|
||||||
|
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
|
||||||
|
|
||||||
/* The following three functions are for the core kernel use only. */
|
/* The following three functions are for the core kernel use only. */
|
||||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||||
|
|
|
@ -66,6 +66,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
|
||||||
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
|
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
|
||||||
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
|
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
|
||||||
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
|
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
|
||||||
|
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
IRQ_TYPE_NONE = 0x00000000,
|
IRQ_TYPE_NONE = 0x00000000,
|
||||||
|
@ -88,12 +89,13 @@ enum {
|
||||||
IRQ_MOVE_PCNTXT = (1 << 14),
|
IRQ_MOVE_PCNTXT = (1 << 14),
|
||||||
IRQ_NESTED_THREAD = (1 << 15),
|
IRQ_NESTED_THREAD = (1 << 15),
|
||||||
IRQ_NOTHREAD = (1 << 16),
|
IRQ_NOTHREAD = (1 << 16),
|
||||||
|
IRQ_PER_CPU_DEVID = (1 << 17),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define IRQF_MODIFY_MASK \
|
#define IRQF_MODIFY_MASK \
|
||||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
||||||
IRQ_PER_CPU | IRQ_NESTED_THREAD)
|
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
|
||||||
|
|
||||||
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
||||||
|
|
||||||
|
@ -336,12 +338,14 @@ struct irq_chip {
|
||||||
* IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
|
* IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
|
||||||
* IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
|
* IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
|
||||||
* when irq enabled
|
* when irq enabled
|
||||||
|
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
|
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
|
||||||
IRQCHIP_EOI_IF_HANDLED = (1 << 1),
|
IRQCHIP_EOI_IF_HANDLED = (1 << 1),
|
||||||
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
|
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
|
||||||
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
|
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
|
||||||
|
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* This include will go away once we isolated irq_desc usage to core code */
|
/* This include will go away once we isolated irq_desc usage to core code */
|
||||||
|
@ -365,6 +369,8 @@ enum {
|
||||||
struct irqaction;
|
struct irqaction;
|
||||||
extern int setup_irq(unsigned int irq, struct irqaction *new);
|
extern int setup_irq(unsigned int irq, struct irqaction *new);
|
||||||
extern void remove_irq(unsigned int irq, struct irqaction *act);
|
extern void remove_irq(unsigned int irq, struct irqaction *act);
|
||||||
|
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
|
||||||
|
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
|
||||||
|
|
||||||
extern void irq_cpu_online(void);
|
extern void irq_cpu_online(void);
|
||||||
extern void irq_cpu_offline(void);
|
extern void irq_cpu_offline(void);
|
||||||
|
@ -392,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
|
extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||||
extern void handle_nested_irq(unsigned int irq);
|
extern void handle_nested_irq(unsigned int irq);
|
||||||
|
|
||||||
|
@ -420,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
|
||||||
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
|
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int irq_set_percpu_devid(unsigned int irq);
|
||||||
|
|
||||||
extern void
|
extern void
|
||||||
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
const char *name);
|
const char *name);
|
||||||
|
@ -481,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)
|
||||||
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
|
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void irq_set_percpu_devid_flags(unsigned int irq)
|
||||||
|
{
|
||||||
|
irq_set_status_flags(irq,
|
||||||
|
IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
|
||||||
|
IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
|
||||||
|
}
|
||||||
|
|
||||||
/* Handle dynamic irq creation and destruction */
|
/* Handle dynamic irq creation and destruction */
|
||||||
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
|
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
|
||||||
extern int create_irq(void);
|
extern int create_irq(void);
|
||||||
|
|
|
@ -53,6 +53,7 @@ struct irq_desc {
|
||||||
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
||||||
unsigned int irqs_unhandled;
|
unsigned int irqs_unhandled;
|
||||||
raw_spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
|
struct cpumask *percpu_enabled;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
const struct cpumask *affinity_hint;
|
const struct cpumask *affinity_hint;
|
||||||
struct irq_affinity_notify *affinity_notify;
|
struct irq_affinity_notify *affinity_notify;
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
|
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_set_chip);
|
||||||
int irq_set_irq_type(unsigned int irq, unsigned int type)
|
int irq_set_irq_type(unsigned int irq, unsigned int type)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
|
@ -78,7 +78,7 @@ EXPORT_SYMBOL(irq_set_irq_type);
|
||||||
int irq_set_handler_data(unsigned int irq, void *data)
|
int irq_set_handler_data(unsigned int irq, void *data)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_set_handler_data);
|
||||||
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
|
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -119,7 +119,7 @@ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
|
||||||
int irq_set_chip_data(unsigned int irq, void *data)
|
int irq_set_chip_data(unsigned int irq, void *data)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc)
|
||||||
desc->depth = 1;
|
desc->depth = 1;
|
||||||
if (desc->irq_data.chip->irq_shutdown)
|
if (desc->irq_data.chip->irq_shutdown)
|
||||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||||
if (desc->irq_data.chip->irq_disable)
|
else if (desc->irq_data.chip->irq_disable)
|
||||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||||
else
|
else
|
||||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||||
|
@ -204,6 +204,24 @@ void irq_disable(struct irq_desc *desc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (desc->irq_data.chip->irq_enable)
|
||||||
|
desc->irq_data.chip->irq_enable(&desc->irq_data);
|
||||||
|
else
|
||||||
|
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||||
|
cpumask_set_cpu(cpu, desc->percpu_enabled);
|
||||||
|
}
|
||||||
|
|
||||||
|
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (desc->irq_data.chip->irq_disable)
|
||||||
|
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||||
|
else
|
||||||
|
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||||
|
cpumask_clear_cpu(cpu, desc->percpu_enabled);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mask_ack_irq(struct irq_desc *desc)
|
static inline void mask_ack_irq(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
if (desc->irq_data.chip->irq_mask_ack)
|
if (desc->irq_data.chip->irq_mask_ack)
|
||||||
|
@ -544,12 +562,44 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
chip->irq_eoi(&desc->irq_data);
|
chip->irq_eoi(&desc->irq_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
|
||||||
|
* @irq: the interrupt number
|
||||||
|
* @desc: the interrupt description structure for this irq
|
||||||
|
*
|
||||||
|
* Per CPU interrupts on SMP machines without locking requirements. Same as
|
||||||
|
* handle_percpu_irq() above but with the following extras:
|
||||||
|
*
|
||||||
|
* action->percpu_dev_id is a pointer to percpu variables which
|
||||||
|
* contain the real device id for the cpu on which this handler is
|
||||||
|
* called
|
||||||
|
*/
|
||||||
|
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||||
|
struct irqaction *action = desc->action;
|
||||||
|
void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
|
||||||
|
irqreturn_t res;
|
||||||
|
|
||||||
|
kstat_incr_irqs_this_cpu(irq, desc);
|
||||||
|
|
||||||
|
if (chip->irq_ack)
|
||||||
|
chip->irq_ack(&desc->irq_data);
|
||||||
|
|
||||||
|
trace_irq_handler_entry(irq, action);
|
||||||
|
res = action->handler(irq, dev_id);
|
||||||
|
trace_irq_handler_exit(irq, action, res);
|
||||||
|
|
||||||
|
if (chip->irq_eoi)
|
||||||
|
chip->irq_eoi(&desc->irq_data);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return;
|
return;
|
||||||
|
@ -593,7 +643,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
||||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -71,6 +71,8 @@ extern int irq_startup(struct irq_desc *desc);
|
||||||
extern void irq_shutdown(struct irq_desc *desc);
|
extern void irq_shutdown(struct irq_desc *desc);
|
||||||
extern void irq_enable(struct irq_desc *desc);
|
extern void irq_enable(struct irq_desc *desc);
|
||||||
extern void irq_disable(struct irq_desc *desc);
|
extern void irq_disable(struct irq_desc *desc);
|
||||||
|
extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
|
||||||
|
extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
|
||||||
extern void mask_irq(struct irq_desc *desc);
|
extern void mask_irq(struct irq_desc *desc);
|
||||||
extern void unmask_irq(struct irq_desc *desc);
|
extern void unmask_irq(struct irq_desc *desc);
|
||||||
|
|
||||||
|
@ -114,14 +116,21 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
|
||||||
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
|
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define _IRQ_DESC_CHECK (1 << 0)
|
||||||
|
#define _IRQ_DESC_PERCPU (1 << 1)
|
||||||
|
|
||||||
|
#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
|
||||||
|
#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
|
||||||
|
|
||||||
struct irq_desc *
|
struct irq_desc *
|
||||||
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus);
|
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
|
||||||
|
unsigned int check);
|
||||||
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
|
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
|
||||||
|
|
||||||
static inline struct irq_desc *
|
static inline struct irq_desc *
|
||||||
irq_get_desc_buslock(unsigned int irq, unsigned long *flags)
|
irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
|
||||||
{
|
{
|
||||||
return __irq_get_desc_lock(irq, flags, true);
|
return __irq_get_desc_lock(irq, flags, true, check);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -131,9 +140,9 @@ irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct irq_desc *
|
static inline struct irq_desc *
|
||||||
irq_get_desc_lock(unsigned int irq, unsigned long *flags)
|
irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
|
||||||
{
|
{
|
||||||
return __irq_get_desc_lock(irq, flags, false);
|
return __irq_get_desc_lock(irq, flags, false, check);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
|
|
@ -424,11 +424,22 @@ unsigned int irq_get_next_irq(unsigned int offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct irq_desc *
|
struct irq_desc *
|
||||||
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus)
|
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
|
||||||
|
unsigned int check)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
if (desc) {
|
if (desc) {
|
||||||
|
if (check & _IRQ_DESC_CHECK) {
|
||||||
|
if ((check & _IRQ_DESC_PERCPU) &&
|
||||||
|
!irq_settings_is_per_cpu_devid(desc))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!(check & _IRQ_DESC_PERCPU) &&
|
||||||
|
irq_settings_is_per_cpu_devid(desc))
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (bus)
|
if (bus)
|
||||||
chip_bus_lock(desc);
|
chip_bus_lock(desc);
|
||||||
raw_spin_lock_irqsave(&desc->lock, *flags);
|
raw_spin_lock_irqsave(&desc->lock, *flags);
|
||||||
|
@ -443,6 +454,25 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
|
||||||
chip_bus_sync_unlock(desc);
|
chip_bus_sync_unlock(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int irq_set_percpu_devid(unsigned int irq)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
|
if (!desc)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (desc->percpu_enabled)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!desc->percpu_enabled)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
irq_set_percpu_devid_flags(irq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
|
* dynamic_irq_cleanup - cleanup a dynamically allocated irq
|
||||||
* @irq: irq number to initialize
|
* @irq: irq number to initialize
|
||||||
|
|
|
@ -195,7 +195,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||||
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
|
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -356,7 +356,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
||||||
static int __disable_irq_nosync(unsigned int irq)
|
static int __disable_irq_nosync(unsigned int irq)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -448,7 +448,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
||||||
void enable_irq(unsigned int irq)
|
void enable_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return;
|
return;
|
||||||
|
@ -467,6 +467,9 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
int ret = -ENXIO;
|
int ret = -ENXIO;
|
||||||
|
|
||||||
|
if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (desc->irq_data.chip->irq_set_wake)
|
if (desc->irq_data.chip->irq_set_wake)
|
||||||
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
|
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
|
||||||
|
|
||||||
|
@ -488,7 +491,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
||||||
int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
|
@ -529,7 +532,7 @@ EXPORT_SYMBOL(irq_set_irq_wake);
|
||||||
int can_request_irq(unsigned int irq, unsigned long irqflags)
|
int can_request_irq(unsigned int irq, unsigned long irqflags)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||||
int canrequest = 0;
|
int canrequest = 0;
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
|
@ -1118,6 +1121,8 @@ int setup_irq(unsigned int irq, struct irqaction *act)
|
||||||
int retval;
|
int retval;
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
|
if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||||
|
return -EINVAL;
|
||||||
chip_bus_lock(desc);
|
chip_bus_lock(desc);
|
||||||
retval = __setup_irq(irq, desc, act);
|
retval = __setup_irq(irq, desc, act);
|
||||||
chip_bus_sync_unlock(desc);
|
chip_bus_sync_unlock(desc);
|
||||||
|
@ -1126,7 +1131,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(setup_irq);
|
EXPORT_SYMBOL_GPL(setup_irq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal function to unregister an irqaction - used to free
|
* Internal function to unregister an irqaction - used to free
|
||||||
* regular and special interrupts that are part of the architecture.
|
* regular and special interrupts that are part of the architecture.
|
||||||
*/
|
*/
|
||||||
|
@ -1224,7 +1229,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||||
*/
|
*/
|
||||||
void remove_irq(unsigned int irq, struct irqaction *act)
|
void remove_irq(unsigned int irq, struct irqaction *act)
|
||||||
{
|
{
|
||||||
__free_irq(irq, act->dev_id);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
|
if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||||
|
__free_irq(irq, act->dev_id);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(remove_irq);
|
EXPORT_SYMBOL_GPL(remove_irq);
|
||||||
|
|
||||||
|
@ -1246,7 +1254,7 @@ void free_irq(unsigned int irq, void *dev_id)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
if (!desc)
|
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@ -1324,7 +1332,8 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!irq_settings_can_request(desc))
|
if (!irq_settings_can_request(desc) ||
|
||||||
|
WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!handler) {
|
if (!handler) {
|
||||||
|
@ -1409,3 +1418,194 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
||||||
return !ret ? IRQC_IS_HARDIRQ : ret;
|
return !ret ? IRQC_IS_HARDIRQ : ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(request_any_context_irq);
|
EXPORT_SYMBOL_GPL(request_any_context_irq);
|
||||||
|
|
||||||
|
void enable_percpu_irq(unsigned int irq, unsigned int type)
|
||||||
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
|
unsigned long flags;
|
||||||
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
|
||||||
|
|
||||||
|
if (!desc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
type &= IRQ_TYPE_SENSE_MASK;
|
||||||
|
if (type != IRQ_TYPE_NONE) {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = __irq_set_trigger(desc, irq, type);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
WARN(1, "failed to set type for IRQ%d\n", irq);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
irq_percpu_enable(desc, cpu);
|
||||||
|
out:
|
||||||
|
irq_put_desc_unlock(desc, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
void disable_percpu_irq(unsigned int irq)
|
||||||
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
|
unsigned long flags;
|
||||||
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
|
||||||
|
|
||||||
|
if (!desc)
|
||||||
|
return;
|
||||||
|
|
||||||
|
irq_percpu_disable(desc, cpu);
|
||||||
|
irq_put_desc_unlock(desc, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Internal function to unregister a percpu irqaction.
|
||||||
|
*/
|
||||||
|
static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
struct irqaction *action;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
|
||||||
|
|
||||||
|
if (!desc)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
|
||||||
|
action = desc->action;
|
||||||
|
if (!action || action->percpu_dev_id != dev_id) {
|
||||||
|
WARN(1, "Trying to free already-free IRQ %d\n", irq);
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!cpumask_empty(desc->percpu_enabled)) {
|
||||||
|
WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
|
||||||
|
irq, cpumask_first(desc->percpu_enabled));
|
||||||
|
goto bad;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Found it - now remove it from the list of entries: */
|
||||||
|
desc->action = NULL;
|
||||||
|
|
||||||
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
|
unregister_handler_proc(irq, action);
|
||||||
|
|
||||||
|
module_put(desc->owner);
|
||||||
|
return action;
|
||||||
|
|
||||||
|
bad:
|
||||||
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* remove_percpu_irq - free a per-cpu interrupt
|
||||||
|
* @irq: Interrupt line to free
|
||||||
|
* @act: irqaction for the interrupt
|
||||||
|
*
|
||||||
|
* Used to remove interrupts statically setup by the early boot process.
|
||||||
|
*/
|
||||||
|
void remove_percpu_irq(unsigned int irq, struct irqaction *act)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
|
if (desc && irq_settings_is_per_cpu_devid(desc))
|
||||||
|
__free_percpu_irq(irq, act->percpu_dev_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* free_percpu_irq - free an interrupt allocated with request_percpu_irq
|
||||||
|
* @irq: Interrupt line to free
|
||||||
|
* @dev_id: Device identity to free
|
||||||
|
*
|
||||||
|
* Remove a percpu interrupt handler. The handler is removed, but
|
||||||
|
* the interrupt line is not disabled. This must be done on each
|
||||||
|
* CPU before calling this function. The function does not return
|
||||||
|
* until any executing interrupts for this IRQ have completed.
|
||||||
|
*
|
||||||
|
* This function must not be called from interrupt context.
|
||||||
|
*/
|
||||||
|
void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
|
if (!desc || !irq_settings_is_per_cpu_devid(desc))
|
||||||
|
return;
|
||||||
|
|
||||||
|
chip_bus_lock(desc);
|
||||||
|
kfree(__free_percpu_irq(irq, dev_id));
|
||||||
|
chip_bus_sync_unlock(desc);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* setup_percpu_irq - setup a per-cpu interrupt
|
||||||
|
* @irq: Interrupt line to setup
|
||||||
|
* @act: irqaction for the interrupt
|
||||||
|
*
|
||||||
|
* Used to statically setup per-cpu interrupts in the early boot process.
|
||||||
|
*/
|
||||||
|
int setup_percpu_irq(unsigned int irq, struct irqaction *act)
|
||||||
|
{
|
||||||
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
int retval;
|
||||||
|
|
||||||
|
if (!desc || !irq_settings_is_per_cpu_devid(desc))
|
||||||
|
return -EINVAL;
|
||||||
|
chip_bus_lock(desc);
|
||||||
|
retval = __setup_irq(irq, desc, act);
|
||||||
|
chip_bus_sync_unlock(desc);
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* request_percpu_irq - allocate a percpu interrupt line
|
||||||
|
* @irq: Interrupt line to allocate
|
||||||
|
* @handler: Function to be called when the IRQ occurs.
|
||||||
|
* @devname: An ascii name for the claiming device
|
||||||
|
* @dev_id: A percpu cookie passed back to the handler function
|
||||||
|
*
|
||||||
|
* This call allocates interrupt resources, but doesn't
|
||||||
|
* automatically enable the interrupt. It has to be done on each
|
||||||
|
* CPU using enable_percpu_irq().
|
||||||
|
*
|
||||||
|
* Dev_id must be globally unique. It is a per-cpu variable, and
|
||||||
|
* the handler gets called with the interrupted CPU's instance of
|
||||||
|
* that variable.
|
||||||
|
*/
|
||||||
|
int request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||||
|
const char *devname, void __percpu *dev_id)
|
||||||
|
{
|
||||||
|
struct irqaction *action;
|
||||||
|
struct irq_desc *desc;
|
||||||
|
int retval;
|
||||||
|
|
||||||
|
if (!dev_id)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
desc = irq_to_desc(irq);
|
||||||
|
if (!desc || !irq_settings_can_request(desc) ||
|
||||||
|
!irq_settings_is_per_cpu_devid(desc))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
||||||
|
if (!action)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
action->handler = handler;
|
||||||
|
action->flags = IRQF_PERCPU;
|
||||||
|
action->name = devname;
|
||||||
|
action->percpu_dev_id = dev_id;
|
||||||
|
|
||||||
|
chip_bus_lock(desc);
|
||||||
|
retval = __setup_irq(irq, desc, action);
|
||||||
|
chip_bus_sync_unlock(desc);
|
||||||
|
|
||||||
|
if (retval)
|
||||||
|
kfree(action);
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ enum {
|
||||||
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
|
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
|
||||||
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
|
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
|
||||||
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
|
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
|
||||||
|
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
||||||
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -24,6 +25,7 @@ enum {
|
||||||
#define IRQ_NOTHREAD GOT_YOU_MORON
|
#define IRQ_NOTHREAD GOT_YOU_MORON
|
||||||
#define IRQ_NOAUTOEN GOT_YOU_MORON
|
#define IRQ_NOAUTOEN GOT_YOU_MORON
|
||||||
#define IRQ_NESTED_THREAD GOT_YOU_MORON
|
#define IRQ_NESTED_THREAD GOT_YOU_MORON
|
||||||
|
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
||||||
#undef IRQF_MODIFY_MASK
|
#undef IRQF_MODIFY_MASK
|
||||||
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
||||||
|
|
||||||
|
@ -39,6 +41,11 @@ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
||||||
return desc->status_use_accessors & _IRQ_PER_CPU;
|
return desc->status_use_accessors & _IRQ_PER_CPU;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
|
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
desc->status_use_accessors |= _IRQ_PER_CPU;
|
desc->status_use_accessors |= _IRQ_PER_CPU;
|
||||||
|
|
Loading…
Add table
Reference in a new issue