workqueue: perform non-reentrancy test when queueing to unbound workqueues too
Because per-cpu workqueues have multiple pwqs (pool_workqueues) to serve the CPUs, to guarantee that a single work item isn't queued on one pwq while still executing another, __queue_work() takes a look at the previous pool the target work item was on and if it's still executing there, queue the work item on that pool. To support changing workqueue_attrs on the fly, unbound workqueues too will have multiple pwqs and thus need non-reentrancy test when queueing. This patch modifies __queue_work() such that the reentrancy test is performed regardless of the workqueue type. per_cpu_ptr(wq->cpu_pwqs, cpu) used to be used to determine the matching pwq for the last pool. This can't be used for unbound workqueues and is replaced with worker->current_pwq which also happens to be simpler. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
This commit is contained in:
parent
75ccf5950f
commit
c9178087ac
1 changed files with 19 additions and 23 deletions
|
@ -1209,6 +1209,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
||||||
struct work_struct *work)
|
struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct pool_workqueue *pwq;
|
struct pool_workqueue *pwq;
|
||||||
|
struct worker_pool *last_pool;
|
||||||
struct list_head *worklist;
|
struct list_head *worklist;
|
||||||
unsigned int work_flags;
|
unsigned int work_flags;
|
||||||
unsigned int req_cpu = cpu;
|
unsigned int req_cpu = cpu;
|
||||||
|
@ -1228,41 +1229,36 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
|
||||||
WARN_ON_ONCE(!is_chained_work(wq)))
|
WARN_ON_ONCE(!is_chained_work(wq)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* determine the pwq to use */
|
/* pwq which will be used unless @work is executing elsewhere */
|
||||||
if (!(wq->flags & WQ_UNBOUND)) {
|
if (!(wq->flags & WQ_UNBOUND)) {
|
||||||
struct worker_pool *last_pool;
|
|
||||||
|
|
||||||
if (cpu == WORK_CPU_UNBOUND)
|
if (cpu == WORK_CPU_UNBOUND)
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
|
|
||||||
/*
|
|
||||||
* It's multi cpu. If @work was previously on a different
|
|
||||||
* cpu, it might still be running there, in which case the
|
|
||||||
* work needs to be queued on that cpu to guarantee
|
|
||||||
* non-reentrancy.
|
|
||||||
*/
|
|
||||||
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
||||||
last_pool = get_work_pool(work);
|
} else {
|
||||||
|
pwq = first_pwq(wq);
|
||||||
|
}
|
||||||
|
|
||||||
if (last_pool && last_pool != pwq->pool) {
|
/*
|
||||||
struct worker *worker;
|
* If @work was previously on a different pool, it might still be
|
||||||
|
* running there, in which case the work needs to be queued on that
|
||||||
|
* pool to guarantee non-reentrancy.
|
||||||
|
*/
|
||||||
|
last_pool = get_work_pool(work);
|
||||||
|
if (last_pool && last_pool != pwq->pool) {
|
||||||
|
struct worker *worker;
|
||||||
|
|
||||||
spin_lock(&last_pool->lock);
|
spin_lock(&last_pool->lock);
|
||||||
|
|
||||||
worker = find_worker_executing_work(last_pool, work);
|
worker = find_worker_executing_work(last_pool, work);
|
||||||
|
|
||||||
if (worker && worker->current_pwq->wq == wq) {
|
if (worker && worker->current_pwq->wq == wq) {
|
||||||
pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu);
|
pwq = worker->current_pwq;
|
||||||
} else {
|
|
||||||
/* meh... not running there, queue here */
|
|
||||||
spin_unlock(&last_pool->lock);
|
|
||||||
spin_lock(&pwq->pool->lock);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
|
/* meh... not running there, queue here */
|
||||||
|
spin_unlock(&last_pool->lock);
|
||||||
spin_lock(&pwq->pool->lock);
|
spin_lock(&pwq->pool->lock);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pwq = first_pwq(wq);
|
|
||||||
spin_lock(&pwq->pool->lock);
|
spin_lock(&pwq->pool->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue