Merge branch 'for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 30 Jan 2018 22:45:39 +0000 (14:45 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 30 Jan 2018 22:45:39 +0000 (14:45 -0800)
Pull workqueue updates from Tejun Heo:
 "Workqueue has an early init trick where workqueues can be created and
  work items queued on them before the workqueue subsystem is online.
  This helps simplifying early init and operation of low level
  subsystems which use workqueues for managerial things which aren't
  depended upon early during boot.

  Out of laziness, the early init didn't cover workqueues with
  WQ_MEM_RECLAIM, which is inconsistent and confusing because adding the
  flag simply makes the system fail to boot. Cover WQ_MEM_RECLAIM too.

  This was originally brought up for RCU but RCU didn't actually need
  this. I still think it's a good idea to cover it"

* 'for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: allow WQ_MEM_RECLAIM on early init workqueues
  workqueue: separate out init_rescuer()

1  2 
kernel/workqueue.c

diff --combined kernel/workqueue.c
index 8c34981d90ad8dc615452dd11aeb88ef834da830,c86cc1ed678b728dabdb85cceb5ee1e1795558f7..8dd2e66e838323f07eb291b8f4ea88fc19a9352b
@@@ -48,7 -48,6 +48,7 @@@
  #include <linux/moduleparam.h>
  #include <linux/uaccess.h>
  #include <linux/sched/isolation.h>
 +#include <linux/nmi.h>
  
  #include "workqueue_internal.h"
  
@@@ -2136,7 -2135,7 +2136,7 @@@ __acquires(&pool->lock
         * stop_machine. At the same time, report a quiescent RCU state so
         * the same condition doesn't freeze RCU.
         */
 -      cond_resched_rcu_qs();
 +      cond_resched();
  
        spin_lock_irq(&pool->lock);
  
@@@ -3940,6 -3939,37 +3940,37 @@@ static int wq_clamp_max_active(int max_
        return clamp_val(max_active, 1, lim);
  }
  
+ /*
+  * Workqueues which may be used during memory reclaim should have a rescuer
+  * to guarantee forward progress.
+  */
+ static int init_rescuer(struct workqueue_struct *wq)
+ {
+       struct worker *rescuer;
+       int ret;
+       if (!(wq->flags & WQ_MEM_RECLAIM))
+               return 0;
+       rescuer = alloc_worker(NUMA_NO_NODE);
+       if (!rescuer)
+               return -ENOMEM;
+       rescuer->rescue_wq = wq;
+       rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
+       ret = PTR_ERR_OR_ZERO(rescuer->task);
+       if (ret) {
+               kfree(rescuer);
+               return ret;
+       }
+       wq->rescuer = rescuer;
+       kthread_bind_mask(rescuer->task, cpu_possible_mask);
+       wake_up_process(rescuer->task);
+       return 0;
+ }
  struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                                               unsigned int flags,
                                               int max_active,
        if (alloc_and_link_pwqs(wq) < 0)
                goto err_free_wq;
  
-       /*
-        * Workqueues which may be used during memory reclaim should
-        * have a rescuer to guarantee forward progress.
-        */
-       if (flags & WQ_MEM_RECLAIM) {
-               struct worker *rescuer;
-               rescuer = alloc_worker(NUMA_NO_NODE);
-               if (!rescuer)
-                       goto err_destroy;
-               rescuer->rescue_wq = wq;
-               rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
-                                              wq->name);
-               if (IS_ERR(rescuer->task)) {
-                       kfree(rescuer);
-                       goto err_destroy;
-               }
-               wq->rescuer = rescuer;
-               kthread_bind_mask(rescuer->task, cpu_possible_mask);
-               wake_up_process(rescuer->task);
-       }
+       if (wq_online && init_rescuer(wq) < 0)
+               goto err_destroy;
  
        if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
                goto err_destroy;
@@@ -4464,12 -4473,6 +4474,12 @@@ void show_workqueue_state(void
                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
                                show_pwq(pwq);
                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
 +                      /*
 +                       * We could be printing a lot from atomic context, e.g.
 +                       * sysrq-t -> show_workqueue_state(). Avoid triggering
 +                       * hard lockup.
 +                       */
 +                      touch_nmi_watchdog();
                }
        }
  
                pr_cont("\n");
        next_pool:
                spin_unlock_irqrestore(&pool->lock, flags);
 +              /*
 +               * We could be printing a lot from atomic context, e.g.
 +               * sysrq-t -> show_workqueue_state(). Avoid triggering
 +               * hard lockup.
 +               */
 +              touch_nmi_watchdog();
        }
  
        rcu_read_unlock_sched();
@@@ -5642,6 -5639,8 +5652,8 @@@ int __init workqueue_init(void
         * archs such as power and arm64.  As per-cpu pools created
         * previously could be missing node hint and unbound pools NUMA
         * affinity, fix them up.
+        *
+        * Also, while iterating workqueues, create rescuers if requested.
         */
        wq_numa_init();
  
                }
        }
  
-       list_for_each_entry(wq, &workqueues, list)
+       list_for_each_entry(wq, &workqueues, list) {
                wq_update_unbound_numa(wq, smp_processor_id(), true);
+               WARN(init_rescuer(wq),
+                    "workqueue: failed to create early rescuer for %s",
+                    wq->name);
+       }
  
        mutex_unlock(&wq_pool_mutex);