Merge branch 'for-3.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue fix from Tejun Heo: "Lai's patch to fix highly unlikely but still possible workqueue stall during CPU hotunplug." * 'for-3.9-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: fix possible pool stall bug in wq_unbind_fn()
This commit is contained in:
commit
b63dc123b2
1 changed files with 25 additions and 19 deletions
|
@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work)
|
|||
|
||||
spin_unlock_irq(&pool->lock);
|
||||
mutex_unlock(&pool->assoc_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call schedule() so that we cross rq->lock and thus can guarantee
|
||||
* sched callbacks see the %WORKER_UNBOUND flag. This is necessary
|
||||
* as scheduler callbacks may be invoked from other cpus.
|
||||
*/
|
||||
schedule();
|
||||
/*
|
||||
* Call schedule() so that we cross rq->lock and thus can
|
||||
* guarantee sched callbacks see the %WORKER_UNBOUND flag.
|
||||
* This is necessary as scheduler callbacks may be invoked
|
||||
* from other cpus.
|
||||
*/
|
||||
schedule();
|
||||
|
||||
/*
|
||||
* Sched callbacks are disabled now. Zap nr_running. After this,
|
||||
* nr_running stays zero and need_more_worker() and keep_working()
|
||||
* are always true as long as the worklist is not empty. Pools on
|
||||
* @cpu now behave as unbound (in terms of concurrency management)
|
||||
* pools which are served by workers tied to the CPU.
|
||||
*
|
||||
* On return from this function, the current worker would trigger
|
||||
* unbound chain execution of pending work items if other workers
|
||||
* didn't already.
|
||||
*/
|
||||
for_each_std_worker_pool(pool, cpu)
|
||||
/*
|
||||
* Sched callbacks are disabled now. Zap nr_running.
|
||||
* After this, nr_running stays zero and need_more_worker()
|
||||
* and keep_working() are always true as long as the
|
||||
* worklist is not empty. This pool now behaves as an
|
||||
* unbound (in terms of concurrency management) pool which
|
||||
* are served by workers tied to the pool.
|
||||
*/
|
||||
atomic_set(&pool->nr_running, 0);
|
||||
|
||||
/*
|
||||
* With concurrency management just turned off, a busy
|
||||
* worker blocking could lead to lengthy stalls. Kick off
|
||||
* unbound chain execution of currently pending work items.
|
||||
*/
|
||||
spin_lock_irq(&pool->lock);
|
||||
wake_up_worker(pool);
|
||||
spin_unlock_irq(&pool->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue