workqueue: remove workqueue_struct->pool_wq.single

workqueue->pool_wq union is used to point either to percpu pwqs
(pool_workqueues) or single unbound pwq.  As the first pwq can be
accessed via workqueue->pwqs list, there's no reason for the single
pointer anymore.

Use list_first_entry(workqueue->pwqs) to access the unbound pwq and
drop workqueue->pool_wq.single pointer and the pool_wq union.  It
simplifies the code and eases implementing multiple unbound pools w/
custom attributes.

This patch doesn't introduce any visible behavior changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
This commit is contained in:
Tejun Heo 2013-03-12 11:29:59 -07:00
parent d84ff0512f
commit 420c0ddb1f

View file

@ -188,11 +188,7 @@ struct wq_flusher {
*/ */
struct workqueue_struct { struct workqueue_struct {
unsigned int flags; /* W: WQ_* flags */ unsigned int flags; /* W: WQ_* flags */
union { struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
struct pool_workqueue __percpu *pcpu;
struct pool_workqueue *single;
unsigned long v;
} pool_wq; /* I: pwq's */
struct list_head pwqs; /* I: all pwqs of this wq */ struct list_head pwqs; /* I: all pwqs of this wq */
struct list_head list; /* W: list of all workqueues */ struct list_head list; /* W: list of all workqueues */
@ -471,9 +467,11 @@ static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq)
{ {
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
if (likely(cpu < nr_cpu_ids)) if (likely(cpu < nr_cpu_ids))
return per_cpu_ptr(wq->pool_wq.pcpu, cpu); return per_cpu_ptr(wq->cpu_pwqs, cpu);
} else if (likely(cpu == WORK_CPU_UNBOUND)) } else if (likely(cpu == WORK_CPU_UNBOUND)) {
return wq->pool_wq.single; return list_first_entry(&wq->pwqs, struct pool_workqueue,
pwqs_node);
}
return NULL; return NULL;
} }
@ -3085,8 +3083,8 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
int cpu; int cpu;
if (!(wq->flags & WQ_UNBOUND)) { if (!(wq->flags & WQ_UNBOUND)) {
wq->pool_wq.pcpu = alloc_percpu(struct pool_workqueue); wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
if (!wq->pool_wq.pcpu) if (!wq->cpu_pwqs)
return -ENOMEM; return -ENOMEM;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
@ -3102,7 +3100,6 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
if (!pwq) if (!pwq)
return -ENOMEM; return -ENOMEM;
wq->pool_wq.single = pwq;
pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri); pwq->pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
list_add_tail(&pwq->pwqs_node, &wq->pwqs); list_add_tail(&pwq->pwqs_node, &wq->pwqs);
} }
@ -3113,9 +3110,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
static void free_pwqs(struct workqueue_struct *wq) static void free_pwqs(struct workqueue_struct *wq)
{ {
if (!(wq->flags & WQ_UNBOUND)) if (!(wq->flags & WQ_UNBOUND))
free_percpu(wq->pool_wq.pcpu); free_percpu(wq->cpu_pwqs);
else else if (!list_empty(&wq->pwqs))
kmem_cache_free(pwq_cache, wq->pool_wq.single); kmem_cache_free(pwq_cache, list_first_entry(&wq->pwqs,
struct pool_workqueue, pwqs_node));
} }
static int wq_clamp_max_active(int max_active, unsigned int flags, static int wq_clamp_max_active(int max_active, unsigned int flags,