Reduce cpuset.c write_lock_irq() to read_lock()
cpuset.c:update_nodemask() uses a write_lock_irq() on tasklist_lock to block concurrent forks; a read_lock() suffices and is less intrusive. Signed-off-by: Paul Menage<menage@google.com> Acked-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b2ff457b09
commit
c2aef333c9
1 changed files with 3 additions and 3 deletions
|
@ -981,10 +981,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|||
mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
|
||||
if (!mmarray)
|
||||
goto done;
|
||||
write_lock_irq(&tasklist_lock); /* block fork */
|
||||
read_lock(&tasklist_lock); /* block fork */
|
||||
if (atomic_read(&cs->count) <= ntasks)
|
||||
break; /* got enough */
|
||||
write_unlock_irq(&tasklist_lock); /* try again */
|
||||
read_unlock(&tasklist_lock); /* try again */
|
||||
kfree(mmarray);
|
||||
}
|
||||
|
||||
|
@ -1006,7 +1006,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
|
|||
continue;
|
||||
mmarray[n++] = mm;
|
||||
} while_each_thread(g, p);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
/*
|
||||
* Now that we've dropped the tasklist spinlock, we can
|
||||
|
|
Loading…
Reference in a new issue