workqueue: fix race condition in schedule_on_each_cpu()
Commit 65a6446434
("HWPOISON: Allow
schedule_on_each_cpu() from keventd") which allows schedule_on_each_cpu()
to be called from keventd added a race condition. schedule_on_each_cpu()
may race with cpu hotplug and end up executing the function twice on a
cpu.
Fix it by moving direct execution into the section protected with
get/put_online_cpus(). While at it, update code such that direct
execution is done after works have been scheduled for all other cpus and
drop unnecessary cpu != orig test from flush loop.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Andi Kleen <ak@linux.intel.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e13193319d
commit
9398180097
1 changed files with 16 additions and 18 deletions
|
@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
|
||||||
if (!works)
|
if (!works)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/*
|
|
||||||
* when running in keventd don't schedule a work item on itself.
|
|
||||||
* Can just call directly because the work queue is already bound.
|
|
||||||
* This also is faster.
|
|
||||||
* Make this a generic parameter for other workqueues?
|
|
||||||
*/
|
|
||||||
if (current_is_keventd()) {
|
|
||||||
orig = raw_smp_processor_id();
|
|
||||||
INIT_WORK(per_cpu_ptr(works, orig), func);
|
|
||||||
func(per_cpu_ptr(works, orig));
|
|
||||||
}
|
|
||||||
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When running in keventd don't schedule a work item on
|
||||||
|
* itself. Can just call directly because the work queue is
|
||||||
|
* already bound. This also is faster.
|
||||||
|
*/
|
||||||
|
if (current_is_keventd())
|
||||||
|
orig = raw_smp_processor_id();
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct work_struct *work = per_cpu_ptr(works, cpu);
|
struct work_struct *work = per_cpu_ptr(works, cpu);
|
||||||
|
|
||||||
if (cpu == orig)
|
|
||||||
continue;
|
|
||||||
INIT_WORK(work, func);
|
INIT_WORK(work, func);
|
||||||
schedule_work_on(cpu, work);
|
|
||||||
}
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
if (cpu != orig)
|
if (cpu != orig)
|
||||||
flush_work(per_cpu_ptr(works, cpu));
|
schedule_work_on(cpu, work);
|
||||||
}
|
}
|
||||||
|
if (orig >= 0)
|
||||||
|
func(per_cpu_ptr(works, orig));
|
||||||
|
|
||||||
|
for_each_online_cpu(cpu)
|
||||||
|
flush_work(per_cpu_ptr(works, cpu));
|
||||||
|
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
free_percpu(works);
|
free_percpu(works);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in a new issue