Merge branch 'akpm' (patches from Andrew)
[sfrench/cifs-2.6.git] / kernel / kthread.c
index 087d18d771b537972b4bacf8c5528a3c40fc91e5..5942eeafb9acfd577768c9e737c91547680449ae 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
+#include <linux/numa.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
@@ -101,6 +102,12 @@ bool kthread_should_stop(void)
 }
 EXPORT_SYMBOL(kthread_should_stop);
 
+bool __kthread_should_park(struct task_struct *k)
+{
+       return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
+}
+EXPORT_SYMBOL_GPL(__kthread_should_park);
+
 /**
  * kthread_should_park - should this kthread park now?
  *
@@ -114,7 +121,7 @@ EXPORT_SYMBOL(kthread_should_stop);
  */
 bool kthread_should_park(void)
 {
-       return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
+       return __kthread_should_park(current);
 }
 EXPORT_SYMBOL_GPL(kthread_should_park);
 
@@ -599,7 +606,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
                                struct lock_class_key *key)
 {
        memset(worker, 0, sizeof(struct kthread_worker));
-       spin_lock_init(&worker->lock);
+       raw_spin_lock_init(&worker->lock);
        lockdep_set_class_and_name(&worker->lock, key, name);
        INIT_LIST_HEAD(&worker->work_list);
        INIT_LIST_HEAD(&worker->delayed_work_list);
@@ -641,21 +648,21 @@ repeat:
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
-               spin_lock_irq(&worker->lock);
+               raw_spin_lock_irq(&worker->lock);
                worker->task = NULL;
-               spin_unlock_irq(&worker->lock);
+               raw_spin_unlock_irq(&worker->lock);
                return 0;
        }
 
        work = NULL;
-       spin_lock_irq(&worker->lock);
+       raw_spin_lock_irq(&worker->lock);
        if (!list_empty(&worker->work_list)) {
                work = list_first_entry(&worker->work_list,
                                        struct kthread_work, node);
                list_del_init(&work->node);
        }
        worker->current_work = work;
-       spin_unlock_irq(&worker->lock);
+       raw_spin_unlock_irq(&worker->lock);
 
        if (work) {
                __set_current_state(TASK_RUNNING);
@@ -675,7 +682,7 @@ __kthread_create_worker(int cpu, unsigned int flags,
 {
        struct kthread_worker *worker;
        struct task_struct *task;
-       int node = -1;
+       int node = NUMA_NO_NODE;
 
        worker = kzalloc(sizeof(*worker), GFP_KERNEL);
        if (!worker)
@@ -812,12 +819,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
        bool ret = false;
        unsigned long flags;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        if (!queuing_blocked(worker, work)) {
                kthread_insert_work(worker, work, &worker->work_list);
                ret = true;
        }
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_work);
@@ -835,6 +842,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
        struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
        struct kthread_work *work = &dwork->work;
        struct kthread_worker *worker = work->worker;
+       unsigned long flags;
 
        /*
         * This might happen when a pending work is reinitialized.
@@ -843,7 +851,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
        if (WARN_ON_ONCE(!worker))
                return;
 
-       spin_lock(&worker->lock);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
@@ -852,7 +860,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
        list_del_init(&work->node);
        kthread_insert_work(worker, work, &worker->work_list);
 
-       spin_unlock(&worker->lock);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
@@ -908,14 +916,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
        unsigned long flags;
        bool ret = false;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
 
        if (!queuing_blocked(worker, work)) {
                __kthread_queue_delayed_work(worker, dwork, delay);
                ret = true;
        }
 
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
@@ -951,7 +959,7 @@ void kthread_flush_work(struct kthread_work *work)
        if (!worker)
                return;
 
-       spin_lock_irq(&worker->lock);
+       raw_spin_lock_irq(&worker->lock);
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
@@ -963,7 +971,7 @@ void kthread_flush_work(struct kthread_work *work)
        else
                noop = true;
 
-       spin_unlock_irq(&worker->lock);
+       raw_spin_unlock_irq(&worker->lock);
 
        if (!noop)
                wait_for_completion(&fwork.done);
@@ -996,9 +1004,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
                 * any queuing is blocked by setting the canceling counter.
                 */
                work->canceling++;
-               spin_unlock_irqrestore(&worker->lock, *flags);
+               raw_spin_unlock_irqrestore(&worker->lock, *flags);
                del_timer_sync(&dwork->timer);
-               spin_lock_irqsave(&worker->lock, *flags);
+               raw_spin_lock_irqsave(&worker->lock, *flags);
                work->canceling--;
        }
 
@@ -1045,7 +1053,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
        unsigned long flags;
        int ret = false;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
 
        /* Do not bother with canceling when never queued. */
        if (!work->worker)
@@ -1062,7 +1070,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
 fast_queue:
        __kthread_queue_delayed_work(worker, dwork, delay);
 out:
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
@@ -1076,7 +1084,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
        if (!worker)
                goto out;
 
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        /* Work must not be used with >1 worker, see kthread_queue_work(). */
        WARN_ON_ONCE(work->worker != worker);
 
@@ -1090,13 +1098,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
         * In the meantime, block any queuing by setting the canceling counter.
         */
        work->canceling++;
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
        kthread_flush_work(work);
-       spin_lock_irqsave(&worker->lock, flags);
+       raw_spin_lock_irqsave(&worker->lock, flags);
        work->canceling--;
 
 out_fast:
-       spin_unlock_irqrestore(&worker->lock, flags);
+       raw_spin_unlock_irqrestore(&worker->lock, flags);
 out:
        return ret;
 }