genirq: Allow the affinity of a percpu interrupt to be set/retrieved
In order to prepare the genirq layer for the concept of partitionned percpu interrupts, let's allow an affinity to be associated with such an interrupt. We introduce: - irq_set_percpu_devid_partition: flag an interrupt as a percpu-devid interrupt, and associate it with an affinity - irq_get_percpu_devid_partition: allow the affinity of that interrupt to be retrieved. This will allow a driver to discover which CPUs the per-cpu interrupt can actually fire on. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: devicetree@vger.kernel.org Cc: Jason Cooper <jason@lakedaemon.net> Cc: Will Deacon <will.deacon@arm.com> Cc: Rob Herring <robh+dt@kernel.org> Link: http://lkml.kernel.org/r/1460365075-7316-3-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
651e8b54ab
commit
222df54fd8
3 changed files with 30 additions and 1 deletions
|
@ -530,6 +530,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
|
|||
}
|
||||
|
||||
extern int irq_set_percpu_devid(unsigned int irq);
|
||||
extern int irq_set_percpu_devid_partition(unsigned int irq,
|
||||
const struct cpumask *affinity);
|
||||
extern int irq_get_percpu_devid_partition(unsigned int irq,
|
||||
struct cpumask *affinity);
|
||||
|
||||
extern void
|
||||
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
|
|
|
@ -66,6 +66,7 @@ struct irq_desc {
|
|||
int threads_handled_last;
|
||||
raw_spinlock_t lock;
|
||||
struct cpumask *percpu_enabled;
|
||||
const struct cpumask *percpu_affinity;
|
||||
#ifdef CONFIG_SMP
|
||||
const struct cpumask *affinity_hint;
|
||||
struct irq_affinity_notify *affinity_notify;
|
||||
|
|
|
@ -595,7 +595,8 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
|
|||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
|
||||
int irq_set_percpu_devid(unsigned int irq)
|
||||
int irq_set_percpu_devid_partition(unsigned int irq,
|
||||
const struct cpumask *affinity)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
|
@ -610,10 +611,33 @@ int irq_set_percpu_devid(unsigned int irq)
|
|||
if (!desc->percpu_enabled)
|
||||
return -ENOMEM;
|
||||
|
||||
if (affinity)
|
||||
desc->percpu_affinity = affinity;
|
||||
else
|
||||
desc->percpu_affinity = cpu_possible_mask;
|
||||
|
||||
irq_set_percpu_devid_flags(irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int irq_set_percpu_devid(unsigned int irq)
|
||||
{
|
||||
return irq_set_percpu_devid_partition(irq, NULL);
|
||||
}
|
||||
|
||||
int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc || !desc->percpu_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (affinity)
|
||||
cpumask_copy(affinity, desc->percpu_affinity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kstat_incr_irq_this_cpu(unsigned int irq)
|
||||
{
|
||||
kstat_incr_irqs_this_cpu(irq_to_desc(irq));
|
||||
|
|
Loading…
Reference in a new issue