workqueues: s/ON_STACK/ONSTACK/
authorAndrew Morton <akpm@linux-foundation.org>
Tue, 26 Oct 2010 21:22:34 +0000 (14:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Oct 2010 23:52:14 +0000 (16:52 -0700)
Silly though it is, completions and wait_queue_heads use foo_ONSTACK
(COMPLETION_INITIALIZER_ONSTACK, DECLARE_COMPLETION_ONSTACK,
__WAIT_QUEUE_HEAD_INIT_ONSTACK and DECLARE_WAIT_QUEUE_HEAD_ONSTACK) so I
guess workqueues should do the same thing.

s/INIT_WORK_ON_STACK/INIT_WORK_ONSTACK/
s/INIT_DELAYED_WORK_ON_STACK/INIT_DELAYED_WORK_ONSTACK/

Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/kernel/hpet.c
arch/x86/kernel/smpboot.c
drivers/md/dm-snap-persistent.c
include/linux/workqueue.h
kernel/workqueue.c

index aff0b3c2750929aff9f36ccf946f4a7ae31b1afa..ae03cab4352e8535946a835787c83e5c8563079b 100644 (file)
@@ -713,7 +713,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
 
        switch (action & 0xf) {
        case CPU_ONLINE:
-               INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work);
+               INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
                init_completion(&work.complete);
                /* FIXME: add schedule_work_on() */
                schedule_delayed_work_on(cpu, &work.work, 0);
index 6af118511b4a56fde1ee915162d2013c61e3d7b2..6c7faecd9e4aba5fa26919714a696f215d7d97d3 100644 (file)
@@ -747,7 +747,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
 
-       INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle);
+       INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
 
        alternatives_smp_switch(1);
 
index 0b61792a278041bc6bedc9c04800457f93db1f6d..2129cdb115dc0e72caced734068deee43f10aa1e 100644 (file)
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
         * Issue the synchronous I/O from a different thread
         * to avoid generic_make_request recursion.
         */
-       INIT_WORK_ON_STACK(&req.work, do_metadata);
+       INIT_WORK_ONSTACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
        flush_workqueue(ps->metadata_wq);
 
index 070bb7a8893646418b8b4ddcfbe9a3436e514711..0c0771f06bfa745e8e4e5add4ec4823cf52eb813 100644 (file)
@@ -190,7 +190,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                __INIT_WORK((_work), (_func), 0);               \
        } while (0)
 
-#define INIT_WORK_ON_STACK(_work, _func)                       \
+#define INIT_WORK_ONSTACK(_work, _func)                                \
        do {                                                    \
                __INIT_WORK((_work), (_func), 1);               \
        } while (0)
@@ -201,9 +201,9 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                init_timer(&(_work)->timer);                    \
        } while (0)
 
-#define INIT_DELAYED_WORK_ON_STACK(_work, _func)               \
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func)                        \
        do {                                                    \
-               INIT_WORK_ON_STACK(&(_work)->work, (_func));    \
+               INIT_WORK_ONSTACK(&(_work)->work, (_func));     \
                init_timer_on_stack(&(_work)->timer);           \
        } while (0)
 
index e5ff2cbaadc21d8e138b24c63dda236c4acb0350..90db1bd1a97852e4c547c43d840b2ca624c9a773 100644 (file)
@@ -2064,7 +2064,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
         * checks and call back into the fixup functions where we
         * might deadlock.
         */
-       INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
+       INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
        init_completion(&barr->done);