[PATCH] fix kernel-doc warnings in 2.6.20-rc1
[sfrench/cifs-2.6.git] / kernel / workqueue.c
index 17c2f03d2c27f9928cf175db8b01a929426d130c..a3da07c5af2835f57f094cddd20e731a8a77bdb5 100644 (file)
@@ -29,6 +29,9 @@
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
 #include <linux/mempolicy.h>
+#include <linux/freezer.h>
+#include <linux/kallsyms.h>
+#include <linux/debug_locks.h>
 
 /*
  * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +58,8 @@ struct cpu_workqueue_struct {
        struct task_struct *thread;
 
        int run_depth;          /* Detect run_workqueue() recursion depth */
+
+       int freezeable;         /* Freeze the thread during suspend */
 } ____cacheline_aligned;
 
 /*
@@ -80,6 +85,99 @@ static inline int is_single_threaded(struct workqueue_struct *wq)
        return list_empty(&wq->list);
 }
 
+/*
+ * Set the workqueue on which a work item is to be run
+ * - Must *only* be called if the pending flag is set
+ */
+static inline void set_wq_data(struct work_struct *work, void *wq)
+{
+       unsigned long new;
+
+       BUG_ON(!work_pending(work));
+
+       new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
+       new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
+       atomic_long_set(&work->data, new);
+}
+
+static inline void *get_wq_data(struct work_struct *work)
+{
+       return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
+}
+
+static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cwq->lock, flags);
+       /*
+        * We need to re-validate the work info after we've gotten
+        * the cpu_workqueue lock. We can run the work now iff:
+        *
+        *  - the wq_data still matches the cpu_workqueue_struct
+        *  - AND the work is still marked pending
+        *  - AND the work is still on a list (which will be this
+        *    workqueue_struct list)
+        *
+        * All these conditions are important, because we
+        * need to protect against the work being run right
+        * now on another CPU (all but the last one might be
+        * true if it's currently running and has not been
+        * released yet, for example).
+        */
+       if (get_wq_data(work) == cwq
+           && work_pending(work)
+           && !list_empty(&work->entry)) {
+               work_func_t f = work->func;
+               list_del_init(&work->entry);
+               spin_unlock_irqrestore(&cwq->lock, flags);
+
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
+                       work_release(work);
+               f(work);
+
+               spin_lock_irqsave(&cwq->lock, flags);
+               cwq->remove_sequence++;
+               wake_up(&cwq->work_done);
+               ret = 1;
+       }
+       spin_unlock_irqrestore(&cwq->lock, flags);
+       return ret;
+}
+
+/**
+ * run_scheduled_work - run scheduled work synchronously
+ * @work: work to run
+ *
+ * This checks if the work was pending, and runs it
+ * synchronously if so. It returns a boolean to indicate
+ * whether it had any scheduled work to run or not.
+ *
+ * NOTE! This _only_ works for normal work_structs. You
+ * CANNOT use this for delayed work, because the wq data
+ * for delayed work will not point properly to the per-
+ * CPU workqueue struct, but will change!
+ */
+int fastcall run_scheduled_work(struct work_struct *work)
+{
+       for (;;) {
+               struct cpu_workqueue_struct *cwq;
+
+               if (!work_pending(work))
+                       return 0;
+               if (list_empty(&work->entry))
+                       return 0;
+               /* NOTE! This depends intimately on __queue_work! */
+               cwq = get_wq_data(work);
+               if (!cwq)
+                       return 0;
+               if (__run_work(cwq, work))
+                       return 1;
+       }
+}
+EXPORT_SYMBOL(run_scheduled_work);
+
 /* Preempt must be disabled. */
 static void __queue_work(struct cpu_workqueue_struct *cwq,
                         struct work_struct *work)
@@ -87,7 +185,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
        unsigned long flags;
 
        spin_lock_irqsave(&cwq->lock, flags);
-       work->wq_data = cwq;
+       set_wq_data(work, cwq);
        list_add_tail(&work->entry, &cwq->worklist);
        cwq->insert_sequence++;
        wake_up(&cwq->more_work);
@@ -108,7 +206,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
        int ret = 0, cpu = get_cpu();
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                if (unlikely(is_single_threaded(wq)))
                        cpu = singlethread_cpu;
                BUG_ON(!list_empty(&work->entry));
@@ -122,38 +220,42 @@ EXPORT_SYMBOL_GPL(queue_work);
 
 static void delayed_work_timer_fn(unsigned long __data)
 {
-       struct work_struct *work = (struct work_struct *)__data;
-       struct workqueue_struct *wq = work->wq_data;
+       struct delayed_work *dwork = (struct delayed_work *)__data;
+       struct workqueue_struct *wq = get_wq_data(&dwork->work);
        int cpu = smp_processor_id();
 
        if (unlikely(is_single_threaded(wq)))
                cpu = singlethread_cpu;
 
-       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
+       __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
 }
 
 /**
  * queue_delayed_work - queue work on a workqueue after delay
  * @wq: workqueue to use
- * @work: work to queue
+ * @dwork: delayable work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int fastcall queue_delayed_work(struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (delay == 0)
+               return queue_work(wq, work);
+
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                /* This stores wq for the moment, for the timer_fn */
-               work->wq_data = wq;
+               set_wq_data(work, wq);
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer(timer);
                ret = 1;
@@ -166,25 +268,26 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
  * queue_delayed_work_on - queue work on specific CPU after delay
  * @cpu: CPU number to execute work on
  * @wq: workqueue to use
- * @work: work to queue
+ * @dwork: work to queue
  * @delay: number of jiffies to wait before queueing
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  */
 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
        int ret = 0;
-       struct timer_list *timer = &work->timer;
+       struct timer_list *timer = &dwork->timer;
+       struct work_struct *work = &dwork->work;
 
-       if (!test_and_set_bit(0, &work->pending)) {
+       if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                /* This stores wq for the moment, for the timer_fn */
-               work->wq_data = wq;
+               set_wq_data(work, wq);
                timer->expires = jiffies + delay;
-               timer->data = (unsigned long)work;
+               timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
                add_timer_on(timer, cpu);
                ret = 1;
@@ -212,15 +315,26 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        while (!list_empty(&cwq->worklist)) {
                struct work_struct *work = list_entry(cwq->worklist.next,
                                                struct work_struct, entry);
-               void (*f) (void *) = work->func;
-               void *data = work->data;
+               work_func_t f = work->func;
 
                list_del_init(cwq->worklist.next);
                spin_unlock_irqrestore(&cwq->lock, flags);
 
-               BUG_ON(work->wq_data != cwq);
-               clear_bit(0, &work->pending);
-               f(data);
+               BUG_ON(get_wq_data(work) != cwq);
+               if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
+                       work_release(work);
+               f(work);
+
+               if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+                       printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
+                                       "%s/0x%08x/%d\n",
+                                       current->comm, preempt_count(),
+                                       current->pid);
+                       printk(KERN_ERR "    last function: ");
+                       print_symbol("%s\n", (unsigned long)f);
+                       debug_show_held_locks(current);
+                       dump_stack();
+               }
 
                spin_lock_irqsave(&cwq->lock, flags);
                cwq->remove_sequence++;
@@ -237,7 +351,8 @@ static int worker_thread(void *__cwq)
        struct k_sigaction sa;
        sigset_t blocked;
 
-       current->flags |= PF_NOFREEZE;
+       if (!cwq->freezeable)
+               current->flags |= PF_NOFREEZE;
 
        set_user_nice(current, -5);
 
@@ -260,6 +375,9 @@ static int worker_thread(void *__cwq)
 
        set_current_state(TASK_INTERRUPTIBLE);
        while (!kthread_should_stop()) {
+               if (cwq->freezeable)
+                       try_to_freeze();
+
                add_wait_queue(&cwq->more_work, &wait);
                if (list_empty(&cwq->worklist))
                        schedule();
@@ -336,7 +454,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
 static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
-                                                  int cpu)
+                                                  int cpu, int freezeable)
 {
        struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
        struct task_struct *p;
@@ -346,6 +464,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
        cwq->thread = NULL;
        cwq->insert_sequence = 0;
        cwq->remove_sequence = 0;
+       cwq->freezeable = freezeable;
        INIT_LIST_HEAD(&cwq->worklist);
        init_waitqueue_head(&cwq->more_work);
        init_waitqueue_head(&cwq->work_done);
@@ -361,7 +480,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
 }
 
 struct workqueue_struct *__create_workqueue(const char *name,
-                                           int singlethread)
+                                           int singlethread, int freezeable)
 {
        int cpu, destroy = 0;
        struct workqueue_struct *wq;
@@ -381,7 +500,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        mutex_lock(&workqueue_mutex);
        if (singlethread) {
                INIT_LIST_HEAD(&wq->list);
-               p = create_workqueue_thread(wq, singlethread_cpu);
+               p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
                if (!p)
                        destroy = 1;
                else
@@ -389,7 +508,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
        } else {
                list_add(&wq->list, &workqueues);
                for_each_online_cpu(cpu) {
-                       p = create_workqueue_thread(wq, cpu);
+                       p = create_workqueue_thread(wq, cpu, freezeable);
                        if (p) {
                                kthread_bind(p, cpu);
                                wake_up_process(p);
@@ -468,38 +587,37 @@ EXPORT_SYMBOL(schedule_work);
 
 /**
  * schedule_delayed_work - put work task in global workqueue after delay
- * @work: job to be done
- * @delay: number of jiffies to wait
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue.
  */
-int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work(keventd_wq, work, delay);
+       return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
- * @work: job to be done
+ * @dwork: job to be done
  * @delay: number of jiffies to wait
  *
  * After waiting for a given time this puts a job in the kernel-global
  * workqueue on the specified CPU.
  */
 int schedule_delayed_work_on(int cpu,
-                       struct work_struct *work, unsigned long delay)
+                       struct delayed_work *dwork, unsigned long delay)
 {
-       return queue_delayed_work_on(cpu, keventd_wq, work, delay);
+       return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
  * schedule_on_each_cpu - call a function on each online CPU from keventd
  * @func: the function to call
- * @info: a pointer to pass to func()
  *
  * Returns zero on success.
  * Returns -ve errno on failure.
@@ -508,7 +626,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
  *
  * schedule_on_each_cpu() is very slow.
  */
-int schedule_on_each_cpu(void (*func)(void *info), void *info)
+int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
        struct work_struct *works;
@@ -519,9 +637,11 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
 
        mutex_lock(&workqueue_mutex);
        for_each_online_cpu(cpu) {
-               INIT_WORK(per_cpu_ptr(works, cpu), func, info);
-               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
-                               per_cpu_ptr(works, cpu));
+               struct work_struct *work = per_cpu_ptr(works, cpu);
+
+               INIT_WORK(work, func);
+               set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
+               __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
        }
        mutex_unlock(&workqueue_mutex);
        flush_workqueue(keventd_wq);
@@ -539,12 +659,12 @@ EXPORT_SYMBOL(flush_scheduled_work);
  * cancel_rearming_delayed_workqueue - reliably kill off a delayed
  *                     work whose handler rearms the delayed work.
  * @wq:   the controlling workqueue structure
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
-                                      struct work_struct *work)
+                                      struct delayed_work *dwork)
 {
-       while (!cancel_delayed_work(work))
+       while (!cancel_delayed_work(dwork))
                flush_workqueue(wq);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
@@ -552,18 +672,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
 /**
  * cancel_rearming_delayed_work - reliably kill off a delayed keventd
  *                     work whose handler rearms the delayed work.
- * @work: the delayed work struct
+ * @dwork: the delayed work struct
  */
-void cancel_rearming_delayed_work(struct work_struct *work)
+void cancel_rearming_delayed_work(struct delayed_work *dwork)
 {
-       cancel_rearming_delayed_workqueue(keventd_wq, work);
+       cancel_rearming_delayed_workqueue(keventd_wq, dwork);
 }
 EXPORT_SYMBOL(cancel_rearming_delayed_work);
 
 /**
  * execute_in_process_context - reliably execute the routine with user context
  * @fn:                the function to execute
- * @data:      data to pass to the function
  * @ew:                guaranteed storage for the execute work structure (must
  *             be available when the work executes)
  *
@@ -573,15 +692,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work);
  * Returns:    0 - function was executed
  *             1 - function was scheduled for execution
  */
-int execute_in_process_context(void (*fn)(void *data), void *data,
-                              struct execute_work *ew)
+int execute_in_process_context(work_func_t fn, struct execute_work *ew)
 {
        if (!in_interrupt()) {
-               fn(data);
+               fn(&ew->work);
                return 0;
        }
 
-       INIT_WORK(&ew->work, fn, data);
+       INIT_WORK(&ew->work, fn);
        schedule_work(&ew->work);
 
        return 1;
@@ -609,7 +727,6 @@ int current_is_keventd(void)
 
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* Take the work from this (downed) CPU. */
 static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
 {
@@ -642,7 +759,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                mutex_lock(&workqueue_mutex);
                /* Create a new workqueue thread for it. */
                list_for_each_entry(wq, &workqueues, list) {
-                       if (!create_workqueue_thread(wq, hotcpu)) {
+                       if (!create_workqueue_thread(wq, hotcpu, 0)) {
                                printk("workqueue for %i failed\n", hotcpu);
                                return NOTIFY_BAD;
                        }
@@ -692,7 +809,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
-#endif
 
 void init_workqueues(void)
 {