[PATCH] for_each_possible_cpu: i386
This patch replaces for_each_cpu with for_each_possible_cpu. under arch/i386. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
fe449f4836
commit
c8912599c6
4 changed files with 7 additions and 7 deletions
|
@ -381,7 +381,7 @@ static void do_irq_balance(void)
|
||||||
unsigned long imbalance = 0;
|
unsigned long imbalance = 0;
|
||||||
cpumask_t allowed_mask, target_cpu_mask, tmp;
|
cpumask_t allowed_mask, target_cpu_mask, tmp;
|
||||||
|
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
int package_index;
|
int package_index;
|
||||||
CPU_IRQ(i) = 0;
|
CPU_IRQ(i) = 0;
|
||||||
if (!cpu_online(i))
|
if (!cpu_online(i))
|
||||||
|
@ -632,7 +632,7 @@ static int __init balanced_irq_init(void)
|
||||||
else
|
else
|
||||||
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
|
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
|
||||||
failed:
|
failed:
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
kfree(irq_cpu_data[i].irq_delta);
|
kfree(irq_cpu_data[i].irq_delta);
|
||||||
irq_cpu_data[i].irq_delta = NULL;
|
irq_cpu_data[i].irq_delta = NULL;
|
||||||
kfree(irq_cpu_data[i].last_irq);
|
kfree(irq_cpu_data[i].last_irq);
|
||||||
|
|
|
@ -138,12 +138,12 @@ static int __init check_nmi_watchdog(void)
|
||||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||||
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
|
||||||
|
|
||||||
for_each_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
|
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Check cpu_callin_map here because that is set
|
/* Check cpu_callin_map here because that is set
|
||||||
after the timer is started. */
|
after the timer is started. */
|
||||||
|
@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
|
||||||
* Just reset the alert counters, (other CPUs might be
|
* Just reset the alert counters, (other CPUs might be
|
||||||
* spinning on locks we hold):
|
* spinning on locks we hold):
|
||||||
*/
|
*/
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
alert_counter[i] = 0;
|
alert_counter[i] = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1700,7 +1700,7 @@ after_handle_vic_irq(unsigned int irq)
|
||||||
|
|
||||||
printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
|
printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
|
||||||
cpu, irq);
|
cpu, irq);
|
||||||
for_each_cpu(real_cpu, mask) {
|
for_each_possible_cpu(real_cpu, mask) {
|
||||||
|
|
||||||
outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
|
outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
|
||||||
VIC_PROCESSOR_ID);
|
VIC_PROCESSOR_ID);
|
||||||
|
|
|
@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
|
||||||
static void free_msrs(void)
|
static void free_msrs(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
kfree(cpu_msrs[i].counters);
|
kfree(cpu_msrs[i].counters);
|
||||||
cpu_msrs[i].counters = NULL;
|
cpu_msrs[i].counters = NULL;
|
||||||
kfree(cpu_msrs[i].controls);
|
kfree(cpu_msrs[i].controls);
|
||||||
|
|
Loading…
Reference in a new issue