workqueues: shrink cpu_populated_map when CPU dies
When cpu_populated_map was introduced, it was supposed that cwq->thread can survive after CPU_DEAD, that is why we never shrink cpu_populated_map. This is not very nice, we can safely remove the already dead CPU from the map. The only required change is that destroy_workqueue() must hold the hotplug lock until it destroys all cwq->thread's, to protect the cpu_populated_map. We could make the local copy of cpu mask and drop the lock, but sizeof(cpumask_t) may be very large. Also, fix the comment near queue_work(). Unless _cpu_down() happens we do guarantee the cpu-affinity of the work_struct, and we have users which rely on this. [akpm@linux-foundation.org: repair comment] Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
786083667e
commit
00dfcaf748
1 changed files with 9 additions and 4 deletions
|
@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
|
|||
*
|
||||
* Returns 0 if @work was already on a queue, non-zero otherwise.
|
||||
*
|
||||
* We queue the work to the CPU it was submitted, but there is no
|
||||
* guarantee that it will be processed by that CPU.
|
||||
* We queue the work to the CPU on which it was submitted, but if the CPU dies
|
||||
* it can be processed by another CPU.
|
||||
*/
|
||||
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
|
||||
{
|
||||
|
@ -815,12 +815,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||
spin_lock(&workqueue_lock);
|
||||
list_del(&wq->list);
|
||||
spin_unlock(&workqueue_lock);
|
||||
put_online_cpus();
|
||||
|
||||
for_each_cpu_mask(cpu, *cpu_map) {
|
||||
cwq = per_cpu_ptr(wq->cpu_wq, cpu);
|
||||
cleanup_workqueue_thread(cwq, cpu);
|
||||
}
|
||||
put_online_cpus();
|
||||
|
||||
free_percpu(wq->cpu_wq);
|
||||
kfree(wq);
|
||||
|
@ -838,7 +838,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|||
action &= ~CPU_TASKS_FROZEN;
|
||||
|
||||
switch (action) {
|
||||
|
||||
case CPU_UP_PREPARE:
|
||||
cpu_set(cpu, cpu_populated_map);
|
||||
}
|
||||
|
@ -866,6 +865,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|||
}
|
||||
}
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_DEAD:
|
||||
cpu_clear(cpu, cpu_populated_map);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue