workqueues: s/ON_STACK/ONSTACK/
Silly though it is, completions and wait_queue_heads use foo_ONSTACK (COMPLETION_INITIALIZER_ONSTACK, DECLARE_COMPLETION_ONSTACK, __WAIT_QUEUE_HEAD_INIT_ONSTACK and DECLARE_WAIT_QUEUE_HEAD_ONSTACK) so I guess workqueues should do the same thing. s/INIT_WORK_ON_STACK/INIT_WORK_ONSTACK/ s/INIT_DELAYED_WORK_ON_STACK/INIT_DELAYED_WORK_ONSTACK/ Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
99dc829256
commit
ca1cab37d9
5 changed files with 7 additions and 7 deletions
|
@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
|
|||
|
||||
switch (action & 0xf) {
|
||||
case CPU_ONLINE:
|
||||
INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
|
||||
INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
|
||||
init_completion(&work.complete);
|
||||
/* FIXME: add schedule_work_on() */
|
||||
schedule_delayed_work_on(cpu, &work.work, 0);
|
||||
|
|
|
@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
|||
.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
|
||||
};
|
||||
|
||||
INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
|
||||
INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
|
||||
|
||||
alternatives_smp_switch(1);
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
|
|||
* Issue the synchronous I/O from a different thread
|
||||
* to avoid generic_make_request recursion.
|
||||
*/
|
||||
INIT_WORK_ON_STACK(&req.work, do_metadata);
|
||||
INIT_WORK_ONSTACK(&req.work, do_metadata);
|
||||
queue_work(ps->metadata_wq, &req.work);
|
||||
flush_workqueue(ps->metadata_wq);
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
|
|||
__INIT_WORK((_work), (_func), 0); \
|
||||
} while (0)
|
||||
|
||||
#define INIT_WORK_ON_STACK(_work, _func) \
|
||||
#define INIT_WORK_ONSTACK(_work, _func) \
|
||||
do { \
|
||||
__INIT_WORK((_work), (_func), 1); \
|
||||
} while (0)
|
||||
|
@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
|
|||
init_timer(&(_work)->timer); \
|
||||
} while (0)
|
||||
|
||||
#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
|
||||
#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
|
||||
do { \
|
||||
INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
|
||||
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
|
||||
init_timer_on_stack(&(_work)->timer); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -2064,7 +2064,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
|
|||
* checks and call back into the fixup functions where we
|
||||
* might deadlock.
|
||||
*/
|
||||
INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
|
||||
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
|
||||
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
|
||||
init_completion(&barr->done);
|
||||
|
||||
|
|
Loading…
Reference in a new issue