genirq: use iterators for irq_desc loops

Use for_each_irq_desc[_reverse] for all the iteration loops.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2008-10-16 14:19:04 +02:00 committed by Ingo Molnar
parent 2be3b52a57
commit 10e580842e
3 changed files with 21 additions and 43 deletions

View file

@ -137,14 +137,12 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
static void init_evtchn_cpu_bindings(void)
{
#ifdef CONFIG_SMP
struct irq_desc *desc;
int i;
/* By default all event channels notify CPU#0. */
for (i = 0; i < nr_irqs; i++) {
struct irq_desc *desc = irq_to_desc(i);
if (!desc)
continue;
for_each_irq_desc(i, desc)
desc->affinity = cpumask_of_cpu(0);
}
#endif
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
@ -233,7 +231,7 @@ static int find_unbound_irq(void)
int irq;
/* Only allocate from dynirq range */
for (irq = 0; irq < nr_irqs; irq++)
for_each_irq_nr(irq)
if (irq_bindcount[irq] == 0)
break;
@ -794,7 +792,7 @@ void xen_irq_resume(void)
mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */
for (irq = 0; irq < nr_irqs; irq++)
for_each_irq_nr(irq)
irq_info[irq].evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
@ -826,7 +824,7 @@ void __init xen_init_IRQ(void)
mask_evtchn(i);
/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
for (i = 0; i < nr_irqs; i++)
for_each_irq_nr(i)
irq_bindcount[i] = 0;
irq_ctx_init(smp_processor_id());

View file

@ -30,19 +30,16 @@ static DEFINE_MUTEX(probing_active);
unsigned long probe_irq_on(void)
{
struct irq_desc *desc;
unsigned long mask;
unsigned int i;
unsigned long mask = 0;
unsigned int status;
int i;
mutex_lock(&probing_active);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
for (i = nr_irqs-1; i > 0; i--) {
desc = irq_to_desc(i);
if (!desc)
continue;
for_each_irq_desc_reverse(i, desc) {
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
@ -70,11 +67,7 @@ unsigned long probe_irq_on(void)
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
for (i = nr_irqs-1; i > 0; i--) {
desc = irq_to_desc(i);
if (!desc)
continue;
for_each_irq_desc_reverse(i, desc) {
spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@ -92,13 +85,7 @@ unsigned long probe_irq_on(void)
/*
* Now filter out any obviously spurious interrupts
*/
mask = 0;
for (i = 0; i < nr_irqs; i++) {
unsigned int status;
desc = irq_to_desc(i);
if (!desc)
continue;
for_each_irq_desc(i, desc) {
spin_lock_irq(&desc->lock);
status = desc->status;
@ -132,16 +119,11 @@ EXPORT_SYMBOL(probe_irq_on);
*/
unsigned int probe_irq_mask(unsigned long val)
{
unsigned int mask;
unsigned int status, mask = 0;
struct irq_desc *desc;
int i;
mask = 0;
for (i = 0; i < nr_irqs; i++) {
struct irq_desc *desc = irq_to_desc(i);
unsigned int status;
if (!desc)
continue;
for_each_irq_desc(i, desc) {
spin_lock_irq(&desc->lock);
status = desc->status;
@ -180,13 +162,10 @@ EXPORT_SYMBOL(probe_irq_mask);
int probe_irq_off(unsigned long val)
{
int i, irq_found = 0, nr_irqs = 0;
for (i = 0; i < nr_irqs; i++) {
struct irq_desc *desc = irq_to_desc(i);
struct irq_desc *desc;
unsigned int status;
if (!desc)
continue;
for_each_irq_desc(i, desc) {
spin_lock_irq(&desc->lock);
status = desc->status;

View file

@ -268,9 +268,10 @@ static struct lock_class_key irq_desc_lock_class;
void early_init_irq_lock_class(void)
{
struct irq_desc *desc;
int i;
for (i = 0; i < nr_irqs; i++)
lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
for_each_irq_desc(i, desc)
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
}
#endif