[PATCH] genirq: cleanup: merge pending_irq_cpumask[] into irq_desc[]
Consolidation: remove the pending_irq_cpumask[NR_IRQS] array and move it into the irq_desc[NR_IRQS].pending_mask field. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
4a733ee126
commit
cd916d31cc
4 changed files with 6 additions and 10 deletions
|
@ -581,7 +581,7 @@ static int balanced_irq(void *unused)
|
|||
|
||||
/* push everything to CPU 0 to give us a starting point. */
|
||||
for (i = 0 ; i < NR_IRQS ; i++) {
|
||||
pending_irq_cpumask[i] = cpumask_of_cpu(0);
|
||||
irq_desc[i].pending_mask = cpumask_of_cpu(0);
|
||||
set_pending_irq(i, cpumask_of_cpu(0));
|
||||
}
|
||||
|
||||
|
|
|
@ -83,6 +83,7 @@ struct irq_desc {
|
|||
cpumask_t affinity;
|
||||
#endif
|
||||
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
|
||||
cpumask_t pending_mask;
|
||||
unsigned int move_irq; /* need to re-target IRQ dest */
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
|
@ -120,7 +121,6 @@ static inline void set_native_irq_info(int irq, cpumask_t mask)
|
|||
#ifdef CONFIG_SMP
|
||||
|
||||
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
|
||||
extern cpumask_t pending_irq_cpumask[NR_IRQS];
|
||||
|
||||
void set_pending_irq(unsigned int irq, cpumask_t mask);
|
||||
void move_native_irq(int irq);
|
||||
|
|
|
@ -16,10 +16,6 @@
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
|
||||
cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
|
||||
#endif
|
||||
|
||||
/**
|
||||
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
|
||||
* @irq: interrupt number to wait for
|
||||
|
|
|
@ -8,7 +8,7 @@ void set_pending_irq(unsigned int irq, cpumask_t mask)
|
|||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->move_irq = 1;
|
||||
pending_irq_cpumask[irq] = mask;
|
||||
irq_desc[irq].pending_mask = mask;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ void move_native_irq(int irq)
|
|||
|
||||
desc->move_irq = 0;
|
||||
|
||||
if (unlikely(cpus_empty(pending_irq_cpumask[irq])))
|
||||
if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
|
||||
return;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
|
@ -38,7 +38,7 @@ void move_native_irq(int irq)
|
|||
|
||||
assert_spin_locked(&desc->lock);
|
||||
|
||||
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
|
||||
cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
|
||||
|
||||
/*
|
||||
* If there was a valid mask to work with, please
|
||||
|
@ -58,5 +58,5 @@ void move_native_irq(int irq)
|
|||
if (likely(!(desc->status & IRQ_DISABLED)))
|
||||
desc->chip->enable(irq);
|
||||
}
|
||||
cpus_clear(pending_irq_cpumask[irq]);
|
||||
cpus_clear(irq_desc[irq].pending_mask);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue