[IA64] wider use of for_each_cpu_mask() in arch/ia64
In arch/ia64 change the explicit use of for-loops and NR_CPUS into the general for_each_cpu() or for_each_online_cpu() constructs, as appropriate. This widens the scope of potential future optimizations of the general constructs, as well as takes advantage of the existing optimizations of first_cpu() and next_cpu(). Signed-off-by: John Hawkes <hawkes@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
parent
444d1d9bb5
commit
dc565b525d
5 changed files with 20 additions and 19 deletions
|
@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
|
||||
if (i == 0) {
|
||||
seq_printf(p, " ");
|
||||
for (j=0; j<NR_CPUS; j++)
|
||||
if (cpu_online(j))
|
||||
seq_printf(p, "CPU%d ",j);
|
||||
for_each_online_cpu(j) {
|
||||
seq_printf(p, "CPU%d ",j);
|
||||
}
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
|
@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for (j = 0; j < NR_CPUS; j++)
|
||||
if (cpu_online(j))
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
for_each_online_cpu(j) {
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
}
|
||||
#endif
|
||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
|
|
@ -947,8 +947,8 @@ void
|
|||
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (cpu_possible(i))
|
||||
memcpy(pcpudst + __per_cpu_offset[i], src, size);
|
||||
for_each_cpu(i) {
|
||||
memcpy(pcpudst + __per_cpu_offset[i], src, size);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -185,8 +185,8 @@ send_IPI_allbutself (int op)
|
|||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (cpu_online(i) && i != smp_processor_id())
|
||||
for_each_online_cpu(i) {
|
||||
if (i != smp_processor_id())
|
||||
send_IPI_single(i, op);
|
||||
}
|
||||
}
|
||||
|
@ -199,9 +199,9 @@ send_IPI_all (int op)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (cpu_online(i))
|
||||
send_IPI_single(i, op);
|
||||
for_each_online_cpu(i) {
|
||||
send_IPI_single(i, op);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -694,9 +694,9 @@ smp_cpus_done (unsigned int dummy)
|
|||
* Allow the user to impress friends.
|
||||
*/
|
||||
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
if (cpu_online(cpu))
|
||||
bogosum += cpu_data(cpu)->loops_per_jiffy;
|
||||
for_each_online_cpu(cpu) {
|
||||
bogosum += cpu_data(cpu)->loops_per_jiffy;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
|
||||
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
|
||||
|
|
|
@ -77,9 +77,10 @@ wrap_mmu_context (struct mm_struct *mm)
|
|||
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
|
||||
{
|
||||
int cpu = get_cpu(); /* prevent preemption/migration */
|
||||
for (i = 0; i < NR_CPUS; ++i)
|
||||
if (cpu_online(i) && (i != cpu))
|
||||
for_each_online_cpu(i) {
|
||||
if (i != cpu)
|
||||
per_cpu(ia64_need_tlb_flush, i) = 1;
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
local_flush_tlb_all();
|
||||
|
|
Loading…
Reference in a new issue