Linux 6.9-rc5
[sfrench/cifs-2.6.git] / kernel / workqueue.c
index 95aea04ff722fc2b17e45dec34c81eb3b7cc481b..0066c8f6c15442641889c87995410ac82d078423 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/signal.h>
 #include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/moduleparam.h>
 #include <linux/uaccess.h>
 #include <linux/sched/isolation.h>
+#include <linux/sched/debug.h>
 #include <linux/nmi.h>
+#include <linux/kvm_para.h>
+#include <linux/delay.h>
+#include <linux/irq_work.h>
 
 #include "workqueue_internal.h"
 
-enum {
+enum worker_pool_flags {
        /*
         * worker_pool flags
         *
@@ -69,10 +74,17 @@ enum {
         * Note that DISASSOCIATED should be flipped only while holding
         * wq_pool_attach_mutex to avoid changing binding state while
         * worker_attach_to_pool() is in progress.
+        *
+        * As there can only be one concurrent BH execution context per CPU, a
+        * BH pool is per-CPU and always DISASSOCIATED.
         */
-       POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
+       POOL_BH                 = 1 << 0,       /* is a BH pool */
+       POOL_MANAGER_ACTIVE     = 1 << 1,       /* being managed */
        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
+       POOL_BH_DRAINING        = 1 << 3,       /* draining after CPU offline */
+};
 
+enum worker_flags {
        /* worker flags */
        WORKER_DIE              = 1 << 1,       /* die die die */
        WORKER_IDLE             = 1 << 2,       /* is idle */
@@ -83,7 +95,13 @@ enum {
 
        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
                                  WORKER_UNBOUND | WORKER_REBOUND,
+};
+
+enum work_cancel_flags {
+       WORK_CANCEL_DELAYED     = 1 << 0,       /* canceling a delayed_work */
+};
 
+enum wq_internal_consts {
        NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
 
        UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
@@ -105,9 +123,17 @@ enum {
        RESCUER_NICE_LEVEL      = MIN_NICE,
        HIGHPRI_NICE_LEVEL      = MIN_NICE,
 
-       WQ_NAME_LEN             = 24,
+       WQ_NAME_LEN             = 32,
 };
 
+/*
+ * We don't want to trap softirq for too long. See MAX_SOFTIRQ_TIME and
+ * MAX_SOFTIRQ_RESTART in kernel/softirq.c. These are macros because
+ * msecs_to_jiffies() can't be an initializer.
+ */
+#define BH_WORKER_JIFFIES      msecs_to_jiffies(2)
+#define BH_WORKER_RESTARTS     10
+
 /*
  * Structure fields follow one of the following exclusion rules.
  *
@@ -119,10 +145,14 @@ enum {
  *
  * L: pool->lock protected.  Access with pool->lock held.
  *
- * X: During normal operation, modification requires pool->lock and should
- *    be done only from local cpu.  Either disabling preemption on local
- *    cpu or grabbing pool->lock is enough for read access.  If
- *    POOL_DISASSOCIATED is set, it's identical to L.
+ * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
+ *     reads.
+ *
+ * K: Only modified by worker while holding pool->lock. Can be safely read by
+ *    self, while holding pool->lock or from IRQ context if %current is the
+ *    kworker.
+ *
+ * S: Only modified by worker self.
  *
  * A: wq_pool_attach_mutex protected.
  *
@@ -139,28 +169,44 @@ enum {
  *
  * WR: wq->mutex protected for writes.  RCU protected for reads.
  *
+ * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
+ *     with READ_ONCE() without locking.
+ *
  * MD: wq_mayday_lock protected.
+ *
+ * WD: Used internally by the watchdog.
  */
 
 /* struct worker is defined in workqueue_internal.h */
 
 struct worker_pool {
-       spinlock_t              lock;           /* the pool lock */
+       raw_spinlock_t          lock;           /* the pool lock */
        int                     cpu;            /* I: the associated cpu */
        int                     node;           /* I: the associated node ID */
        int                     id;             /* I: pool ID */
-       unsigned int            flags;          /* X: flags */
+       unsigned int            flags;          /* L: flags */
 
        unsigned long           watchdog_ts;    /* L: watchdog timestamp */
+       bool                    cpu_stall;      /* WD: stalled cpu bound pool */
+
+       /*
+        * The counter is incremented in a process context on the associated CPU
+        * w/ preemption disabled, and decremented or reset in the same context
+        * but w/ pool->lock held. The readers grab pool->lock and are
+        * guaranteed to see if the counter reached zero.
+        */
+       int                     nr_running;
 
        struct list_head        worklist;       /* L: list of pending works */
 
        int                     nr_workers;     /* L: total number of workers */
        int                     nr_idle;        /* L: currently idle workers */
 
-       struct list_head        idle_list;      /* X: list of idle workers */
+       struct list_head        idle_list;      /* L: list of idle workers */
        struct timer_list       idle_timer;     /* L: worker idle timeout */
-       struct timer_list       mayday_timer;   /* L: SOS timer for workers */
+       struct work_struct      idle_cull_work; /* L: worker idle cleanup */
+
+       struct timer_list       mayday_timer;     /* L: SOS timer for workers */
 
        /* a workers is either on busy_hash or idle_list, or the manager */
        DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
@@ -168,6 +214,7 @@ struct worker_pool {
 
        struct worker           *manager;       /* L: purely informational */
        struct list_head        workers;        /* A: attached workers */
+       struct list_head        dying_workers;  /* A: workers about to die */
        struct completion       *detach_completion; /* all workers detached */
 
        struct ida              worker_ida;     /* worker IDs for task name */
@@ -176,22 +223,32 @@ struct worker_pool {
        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
        int                     refcnt;         /* PL: refcnt for unbound pools */
 
-       /*
-        * The current concurrency level.  As it's likely to be accessed
-        * from other CPUs during try_to_wake_up(), put it in a separate
-        * cacheline.
-        */
-       atomic_t                nr_running ____cacheline_aligned_in_smp;
-
        /*
         * Destruction of pool is RCU protected to allow dereferences
         * from get_work_pool().
         */
        struct rcu_head         rcu;
-} ____cacheline_aligned_in_smp;
+};
+
+/*
+ * Per-pool_workqueue statistics. These can be monitored using
+ * tools/workqueue/wq_monitor.py.
+ */
+enum pool_workqueue_stats {
+       PWQ_STAT_STARTED,       /* work items started execution */
+       PWQ_STAT_COMPLETED,     /* work items completed execution */
+       PWQ_STAT_CPU_TIME,      /* total CPU time consumed */
+       PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
+       PWQ_STAT_CM_WAKEUP,     /* concurrency-management worker wakeups */
+       PWQ_STAT_REPATRIATED,   /* unbound workers brought back into scope */
+       PWQ_STAT_MAYDAY,        /* maydays to rescuer */
+       PWQ_STAT_RESCUED,       /* linked work items executed by rescuer */
+
+       PWQ_NR_STATS,
+};
 
 /*
- * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
+ * The per-pool workqueue.  While queued, bits below WORK_PWQ_SHIFT
  * of work_struct->data are used for flags and the remaining high bits
  * point to the pwq; thus, pwqs need to be aligned at two's power of the
  * number of flag bits.
@@ -204,21 +261,41 @@ struct pool_workqueue {
        int                     refcnt;         /* L: reference count */
        int                     nr_in_flight[WORK_NR_COLORS];
                                                /* L: nr of in_flight works */
+       bool                    plugged;        /* L: execution suspended */
+
+       /*
+        * nr_active management and WORK_STRUCT_INACTIVE:
+        *
+        * When pwq->nr_active >= max_active, new work item is queued to
+        * pwq->inactive_works instead of pool->worklist and marked with
+        * WORK_STRUCT_INACTIVE.
+        *
+        * All work items marked with WORK_STRUCT_INACTIVE do not participate in
+        * nr_active and all work items in pwq->inactive_works are marked with
+        * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
+        * in pwq->inactive_works. Some of them are ready to run in
+        * pool->worklist or worker->scheduled. Those work itmes are only struct
+        * wq_barrier which is used for flush_work() and should not participate
+        * in nr_active. For non-barrier work item, it is marked with
+        * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
+        */
        int                     nr_active;      /* L: nr of active works */
-       int                     max_active;     /* L: max active works */
-       struct list_head        delayed_works;  /* L: delayed works */
+       struct list_head        inactive_works; /* L: inactive works */
+       struct list_head        pending_node;   /* LN: node on wq_node_nr_active->pending_pwqs */
        struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
        struct list_head        mayday_node;    /* MD: node on wq->maydays */
 
+       u64                     stats[PWQ_NR_STATS];
+
        /*
-        * Release of unbound pwq is punted to system_wq.  See put_pwq()
-        * and pwq_unbound_release_workfn() for details.  pool_workqueue
-        * itself is also RCU protected so that the first pwq can be
-        * determined without grabbing wq->mutex.
+        * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
+        * and pwq_release_workfn() for details. pool_workqueue itself is also
+        * RCU protected so that the first pwq can be determined without
+        * grabbing wq->mutex.
         */
-       struct work_struct      unbound_release_work;
+       struct kthread_work     release_work;
        struct rcu_head         rcu;
-} __aligned(1 << WORK_STRUCT_FLAG_BITS);
+} __aligned(1 << WORK_STRUCT_PWQ_SHIFT);
 
 /*
  * Structure used to wait for workqueue flush.
@@ -231,6 +308,26 @@ struct wq_flusher {
 
 struct wq_device;
 
+/*
+ * Unlike in a per-cpu workqueue where max_active limits its concurrency level
+ * on each CPU, in an unbound workqueue, max_active applies to the whole system.
+ * As sharing a single nr_active across multiple sockets can be very expensive,
+ * the counting and enforcement is per NUMA node.
+ *
+ * The following struct is used to enforce per-node max_active. When a pwq wants
+ * to start executing a work item, it should increment ->nr using
+ * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over
+ * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
+ * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in
+ * round-robin order.
+ */
+struct wq_node_nr_active {
+       int                     max;            /* per-node max_active */
+       atomic_t                nr;             /* per-node nr_active */
+       raw_spinlock_t          lock;           /* nests inside pool locks */
+       struct list_head        pending_pwqs;   /* LN: pwqs with inactive works */
+};
+
 /*
  * The externally visible workqueue.  It relays the issued work items to
  * the appropriate worker_pool through its pool_workqueues.
@@ -248,13 +345,18 @@ struct workqueue_struct {
        struct list_head        flusher_overflow; /* WQ: flush overflow list */
 
        struct list_head        maydays;        /* MD: pwqs requesting rescue */
-       struct worker           *rescuer;       /* I: rescue worker */
+       struct worker           *rescuer;       /* MD: rescue worker */
 
        int                     nr_drainers;    /* WQ: drain in progress */
-       int                     saved_max_active; /* WQ: saved pwq max_active */
+
+       /* See alloc_workqueue() function comment for info on min/max_active */
+       int                     max_active;     /* WO: max active works */
+       int                     min_active;     /* WO: min active works */
+       int                     saved_max_active; /* WQ: saved max_active */
+       int                     saved_min_active; /* WQ: saved min_active */
 
        struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
-       struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
+       struct pool_workqueue __rcu *dfl_pwq;   /* PW: only for unbound wqs */
 
 #ifdef CONFIG_SYSFS
        struct wq_device        *wq_dev;        /* I: for sysfs interface */
@@ -275,40 +377,80 @@ struct workqueue_struct {
 
        /* hot fields used during command issue, aligned to cacheline */
        unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
-       struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
-       struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
+       struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
+       struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
 };
 
-static struct kmem_cache *pwq_cache;
+/*
+ * Each pod type describes how CPUs should be grouped for unbound workqueues.
+ * See the comment above workqueue_attrs->affn_scope.
+ */
+struct wq_pod_type {
+       int                     nr_pods;        /* number of pods */
+       cpumask_var_t           *pod_cpus;      /* pod -> cpus */
+       int                     *pod_node;      /* pod -> node */
+       int                     *cpu_pod;       /* cpu -> pod */
+};
 
-static cpumask_var_t *wq_numa_possible_cpumask;
-                                       /* possible CPUs of each node */
+static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
+       [WQ_AFFN_DFL]           = "default",
+       [WQ_AFFN_CPU]           = "cpu",
+       [WQ_AFFN_SMT]           = "smt",
+       [WQ_AFFN_CACHE]         = "cache",
+       [WQ_AFFN_NUMA]          = "numa",
+       [WQ_AFFN_SYSTEM]        = "system",
+};
 
-static bool wq_disable_numa;
-module_param_named(disable_numa, wq_disable_numa, bool, 0444);
+/*
+ * Per-cpu work items which run for longer than the following threshold are
+ * automatically considered CPU intensive and excluded from concurrency
+ * management to prevent them from noticeably delaying other per-cpu work items.
+ * ULONG_MAX indicates that the user hasn't overridden it with a boot parameter.
+ * The actual value is initialized in wq_cpu_intensive_thresh_init().
+ */
+static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
+module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
+#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
+static unsigned int wq_cpu_intensive_warning_thresh = 4;
+module_param_named(cpu_intensive_warning_thresh, wq_cpu_intensive_warning_thresh, uint, 0644);
+#endif
 
 /* see the comment above the definition of WQ_POWER_EFFICIENT */
 static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 
 static bool wq_online;                 /* can kworkers be created yet? */
+static bool wq_topo_initialized __read_mostly = false;
+
+static struct kmem_cache *pwq_cache;
 
-static bool wq_numa_enabled;           /* unbound NUMA affinity enabled */
+static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
+static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
 
-/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
-static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
+/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
+static struct workqueue_attrs *wq_update_pod_attrs_buf;
 
 static DEFINE_MUTEX(wq_pool_mutex);    /* protects pools and workqueues list */
 static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
-static DEFINE_SPINLOCK(wq_mayday_lock);        /* protects wq->maydays list */
-static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+static DEFINE_RAW_SPINLOCK(wq_mayday_lock);    /* protects wq->maydays list */
+/* wait for manager to go away */
+static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
 
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
 
-/* PL: allowable cpus for unbound wqs and work items */
+/* PL&A: allowable cpus for unbound wqs and work items */
 static cpumask_var_t wq_unbound_cpumask;
 
+/* PL: user requested unbound cpumask via sysfs */
+static cpumask_var_t wq_requested_unbound_cpumask;
+
+/* PL: isolated cpumask to be excluded from unbound cpumask */
+static cpumask_var_t wq_isolated_cpumask;
+
+/* for further constrain wq_unbound_cpumask by cmdline parameter*/
+static struct cpumask wq_cmdline_cpumask __initdata;
+
 /* CPU where unbound work was last round robin scheduled from this CPU */
 static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 
@@ -324,8 +466,17 @@ static bool wq_debug_force_rr_cpu = false;
 #endif
 module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 
+/* to raise softirq for the BH worker pools on other CPUs */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
+                                    bh_pool_irq_works);
+
+/* the BH worker pools */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
+                                    bh_worker_pools);
+
 /* the per-cpu worker pools */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
+                                    cpu_worker_pools);
 
 static DEFINE_IDR(worker_pool_idr);    /* PR: idr of all pools */
 
@@ -338,43 +489,62 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 /* I: attributes used when instantiating ordered pools on demand */
 static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 
-struct workqueue_struct *system_wq __read_mostly;
+/*
+ * Used to synchronize multiple cancel_sync attempts on the same work item. See
+ * work_grab_pending() and __cancel_work_sync().
+ */
+static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq);
+
+/*
+ * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
+ * process context while holding a pool lock. Bounce to a dedicated kthread
+ * worker to avoid A-A deadlocks.
+ */
+static struct kthread_worker *pwq_release_worker __ro_after_init;
+
+struct workqueue_struct *system_wq __ro_after_init;
 EXPORT_SYMBOL(system_wq);
-struct workqueue_struct *system_highpri_wq __read_mostly;
+struct workqueue_struct *system_highpri_wq __ro_after_init;
 EXPORT_SYMBOL_GPL(system_highpri_wq);
-struct workqueue_struct *system_long_wq __read_mostly;
+struct workqueue_struct *system_long_wq __ro_after_init;
 EXPORT_SYMBOL_GPL(system_long_wq);
-struct workqueue_struct *system_unbound_wq __read_mostly;
+struct workqueue_struct *system_unbound_wq __ro_after_init;
 EXPORT_SYMBOL_GPL(system_unbound_wq);
-struct workqueue_struct *system_freezable_wq __read_mostly;
+struct workqueue_struct *system_freezable_wq __ro_after_init;
 EXPORT_SYMBOL_GPL(system_freezable_wq);
-struct workqueue_struct *system_power_efficient_wq __read_mostly;
+struct workqueue_struct *system_power_efficient_wq __ro_after_init;
 EXPORT_SYMBOL_GPL(system_power_efficient_wq);
-struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
+struct workqueue_struct *system_freezable_power_efficient_wq __ro_after_init;
 EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+struct workqueue_struct *system_bh_wq;
+EXPORT_SYMBOL_GPL(system_bh_wq);
+struct workqueue_struct *system_bh_highpri_wq;
+EXPORT_SYMBOL_GPL(system_bh_highpri_wq);
 
 static int worker_thread(void *__worker);
 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+static void show_pwq(struct pool_workqueue *pwq);
+static void show_one_worker_pool(struct worker_pool *pool);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
 
 #define assert_rcu_or_pool_mutex()                                     \
-       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
+       RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() &&                   \
                         !lockdep_is_held(&wq_pool_mutex),              \
                         "RCU or wq_pool_mutex should be held")
 
-#define assert_rcu_or_wq_mutex(wq)                                     \
-       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
-                        !lockdep_is_held(&wq->mutex),                  \
-                        "RCU or wq->mutex should be held")
-
 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                       \
-       RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
+       RCU_LOCKDEP_WARN(!rcu_read_lock_any_held() &&                   \
                         !lockdep_is_held(&wq->mutex) &&                \
                         !lockdep_is_held(&wq_pool_mutex),              \
                         "RCU, wq->mutex or wq_pool_mutex should be held")
 
+#define for_each_bh_worker_pool(pool, cpu)                             \
+       for ((pool) = &per_cpu(bh_worker_pools, cpu)[0];                \
+            (pool) < &per_cpu(bh_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+            (pool)++)
+
 #define for_each_cpu_worker_pool(pool, cpu)                            \
        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
@@ -425,13 +595,12 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
  * ignored.
  */
 #define for_each_pwq(pwq, wq)                                          \
-       list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
-               if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
-               else
+       list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node,          \
+                                lockdep_is_held(&(wq->mutex)))
 
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
-static struct debug_obj_descr work_debug_descr;
+static const struct debug_obj_descr work_debug_descr;
 
 static void *work_debug_hint(void *addr)
 {
@@ -481,7 +650,7 @@ static bool work_fixup_free(void *addr, enum debug_obj_state state)
        }
 }
 
-static struct debug_obj_descr work_debug_descr = {
+static const struct debug_obj_descr work_debug_descr = {
        .name           = "work_struct",
        .debug_hint     = work_debug_hint,
        .is_static_object = work_is_static_object,
@@ -527,7 +696,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
 #endif
 
 /**
- * worker_pool_assign_id - allocate ID and assing it to @pool
+ * worker_pool_assign_id - allocate ID and assign it to @pool
  * @pool: the pool pointer of interest
  *
  * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
@@ -548,33 +717,34 @@ static int worker_pool_assign_id(struct worker_pool *pool)
        return ret;
 }
 
+static struct pool_workqueue __rcu **
+unbound_pwq_slot(struct workqueue_struct *wq, int cpu)
+{
+       if (cpu >= 0)
+               return per_cpu_ptr(wq->cpu_pwq, cpu);
+       else
+               return &wq->dfl_pwq;
+}
+
+/* @cpu < 0 for dfl_pwq */
+static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu)
+{
+       return rcu_dereference_check(*unbound_pwq_slot(wq, cpu),
+                                    lockdep_is_held(&wq_pool_mutex) ||
+                                    lockdep_is_held(&wq->mutex));
+}
+
 /**
- * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
- * @wq: the target workqueue
- * @node: the node ID
- *
- * This must be called with any of wq_pool_mutex, wq->mutex or RCU
- * read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
+ * unbound_effective_cpumask - effective cpumask of an unbound workqueue
+ * @wq: workqueue of interest
  *
- * Return: The unbound pool_workqueue for @node.
+ * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
+ * is masked with wq_unbound_cpumask to determine the effective cpumask. The
+ * default pwq is always mapped to the pool with the current effective cpumask.
  */
-static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
-                                                 int node)
+static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
 {
-       assert_rcu_or_wq_mutex_or_pool_mutex(wq);
-
-       /*
-        * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
-        * delayed item is pending.  The plan is to keep CPU -> NODE
-        * mapping valid and stable across CPU on/offlines.  Once that
-        * happens, this workaround can be removed.
-        */
-       if (unlikely(node == NUMA_NO_NODE))
-               return wq->dfl_pwq;
-
-       return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
+       return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
 }
 
 static unsigned int work_color_to_flags(int color)
@@ -582,9 +752,9 @@ static unsigned int work_color_to_flags(int color)
        return color << WORK_STRUCT_COLOR_SHIFT;
 }
 
-static int get_work_color(struct work_struct *work)
+static int get_work_color(unsigned long work_data)
 {
-       return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
+       return (work_data >> WORK_STRUCT_COLOR_SHIFT) &
                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 }
 
@@ -598,10 +768,9 @@ static int work_next_color(int color)
  * contain the pointer to the queued pwq.  Once execution starts, the flag
  * is cleared and the high bits contain OFFQ flags and pool ID.
  *
- * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
- * and clear_work_data() can be used to set the pwq, pool or clear
- * work->data.  These functions should only be called while the work is
- * owned - ie. while the PENDING bit is set.
+ * set_work_pwq(), set_work_pool_and_clear_pending() and mark_work_canceling()
+ * can be used to set the pwq, pool or clear work->data. These functions should
+ * only be called while the work is owned - ie. while the PENDING bit is set.
  *
  * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
  * corresponding to a work.  Pool is available once the work has been
@@ -613,29 +782,28 @@ static int work_next_color(int color)
  * but stay off timer and worklist for arbitrarily long and nobody should
  * try to steal the PENDING bit.
  */
-static inline void set_work_data(struct work_struct *work, unsigned long data,
-                                unsigned long flags)
+static inline void set_work_data(struct work_struct *work, unsigned long data)
 {
        WARN_ON_ONCE(!work_pending(work));
-       atomic_long_set(&work->data, data | flags | work_static(work));
+       atomic_long_set(&work->data, data | work_static(work));
 }
 
 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
-                        unsigned long extra_flags)
+                        unsigned long flags)
 {
-       set_work_data(work, (unsigned long)pwq,
-                     WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
+       set_work_data(work, (unsigned long)pwq | WORK_STRUCT_PENDING |
+                     WORK_STRUCT_PWQ | flags);
 }
 
 static void set_work_pool_and_keep_pending(struct work_struct *work,
-                                          int pool_id)
+                                          int pool_id, unsigned long flags)
 {
-       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
-                     WORK_STRUCT_PENDING);
+       set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
+                     WORK_STRUCT_PENDING | flags);
 }
 
 static void set_work_pool_and_clear_pending(struct work_struct *work,
-                                           int pool_id)
+                                           int pool_id, unsigned long flags)
 {
        /*
         * The following wmb is paired with the implied mb in
@@ -644,7 +812,8 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
         * owner.
         */
        smp_wmb();
-       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+       set_work_data(work, ((unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT) |
+                     flags);
        /*
         * The following mb guarantees that previous clear of a PENDING bit
         * will not be reordered with any speculative LOADS or STORES from
@@ -676,10 +845,9 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
        smp_mb();
 }
 
-static void clear_work_data(struct work_struct *work)
+static inline struct pool_workqueue *work_struct_pwq(unsigned long data)
 {
-       smp_wmb();      /* see set_work_pool_and_clear_pending() */
-       set_work_data(work, WORK_STRUCT_NO_POOL, 0);
+       return (struct pool_workqueue *)(data & WORK_STRUCT_PWQ_MASK);
 }
 
 static struct pool_workqueue *get_work_pwq(struct work_struct *work)
@@ -687,7 +855,7 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
        unsigned long data = atomic_long_read(&work->data);
 
        if (data & WORK_STRUCT_PWQ)
-               return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
+               return work_struct_pwq(data);
        else
                return NULL;
 }
@@ -715,8 +883,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
        assert_rcu_or_pool_mutex();
 
        if (data & WORK_STRUCT_PWQ)
-               return ((struct pool_workqueue *)
-                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
+               return work_struct_pwq(data)->pool;
 
        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
        if (pool_id == WORK_OFFQ_POOL_NONE)
@@ -737,8 +904,7 @@ static int get_work_pool_id(struct work_struct *work)
        unsigned long data = atomic_long_read(&work->data);
 
        if (data & WORK_STRUCT_PWQ)
-               return ((struct pool_workqueue *)
-                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
+               return work_struct_pwq(data)->pool->id;
 
        return data >> WORK_OFFQ_POOL_SHIFT;
 }
@@ -748,7 +914,7 @@ static void mark_work_canceling(struct work_struct *work)
        unsigned long pool_id = get_work_pool_id(work);
 
        pool_id <<= WORK_OFFQ_POOL_SHIFT;
-       set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
+       set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING);
 }
 
 static bool work_is_canceling(struct work_struct *work)
@@ -764,11 +930,6 @@ static bool work_is_canceling(struct work_struct *work)
  * they're being called with pool->lock held.
  */
 
-static bool __need_more_worker(struct worker_pool *pool)
-{
-       return !atomic_read(&pool->nr_running);
-}
-
 /*
  * Need to wake up a worker?  Called from anything but currently
  * running workers.
@@ -779,7 +940,7 @@ static bool __need_more_worker(struct worker_pool *pool)
  */
 static bool need_more_worker(struct worker_pool *pool)
 {
-       return !list_empty(&pool->worklist) && __need_more_worker(pool);
+       return !list_empty(&pool->worklist) && !pool->nr_running;
 }
 
 /* Can I start working?  Called from busy but !running workers. */
@@ -791,8 +952,7 @@ static bool may_start_working(struct worker_pool *pool)
 /* Do I need to keep working?  Called from currently running workers. */
 static bool keep_working(struct worker_pool *pool)
 {
-       return !list_empty(&pool->worklist) &&
-               atomic_read(&pool->nr_running) <= 1;
+       return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
 }
 
 /* Do we need a new worker?  Called from manager. */
@@ -811,152 +971,23 @@ static bool too_many_workers(struct worker_pool *pool)
        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 }
 
-/*
- * Wake up functions.
- */
-
-/* Return the first idle worker.  Safe with preemption disabled */
-static struct worker *first_idle_worker(struct worker_pool *pool)
-{
-       if (unlikely(list_empty(&pool->idle_list)))
-               return NULL;
-
-       return list_first_entry(&pool->idle_list, struct worker, entry);
-}
-
-/**
- * wake_up_worker - wake up an idle worker
- * @pool: worker pool to wake worker from
- *
- * Wake up the first idle worker of @pool.
- *
- * CONTEXT:
- * spin_lock_irq(pool->lock).
- */
-static void wake_up_worker(struct worker_pool *pool)
-{
-       struct worker *worker = first_idle_worker(pool);
-
-       if (likely(worker))
-               wake_up_process(worker->task);
-}
-
-/**
- * wq_worker_running - a worker is running again
- * @task: task waking up
- *
- * This function is called when a worker returns from schedule()
- */
-void wq_worker_running(struct task_struct *task)
-{
-       struct worker *worker = kthread_data(task);
-
-       if (!worker->sleeping)
-               return;
-       if (!(worker->flags & WORKER_NOT_RUNNING))
-               atomic_inc(&worker->pool->nr_running);
-       worker->sleeping = 0;
-}
-
-/**
- * wq_worker_sleeping - a worker is going to sleep
- * @task: task going to sleep
- *
- * This function is called from schedule() when a busy worker is
- * going to sleep.
- */
-void wq_worker_sleeping(struct task_struct *task)
-{
-       struct worker *next, *worker = kthread_data(task);
-       struct worker_pool *pool;
-
-       /*
-        * Rescuers, which may not have all the fields set up like normal
-        * workers, also reach here, let's not access anything before
-        * checking NOT_RUNNING.
-        */
-       if (worker->flags & WORKER_NOT_RUNNING)
-               return;
-
-       pool = worker->pool;
-
-       if (WARN_ON_ONCE(worker->sleeping))
-               return;
-
-       worker->sleeping = 1;
-       spin_lock_irq(&pool->lock);
-
-       /*
-        * The counterpart of the following dec_and_test, implied mb,
-        * worklist not empty test sequence is in insert_work().
-        * Please read comment there.
-        *
-        * NOT_RUNNING is clear.  This means that we're bound to and
-        * running on the local cpu w/ rq lock held and preemption
-        * disabled, which in turn means that none else could be
-        * manipulating idle_list, so dereferencing idle_list without pool
-        * lock is safe.
-        */
-       if (atomic_dec_and_test(&pool->nr_running) &&
-           !list_empty(&pool->worklist)) {
-               next = first_idle_worker(pool);
-               if (next)
-                       wake_up_process(next->task);
-       }
-       spin_unlock_irq(&pool->lock);
-}
-
-/**
- * wq_worker_last_func - retrieve worker's last work function
- * @task: Task to retrieve last work function of.
- *
- * Determine the last function a worker executed. This is called from
- * the scheduler to get a worker's last known identity.
- *
- * CONTEXT:
- * spin_lock_irq(rq->lock)
- *
- * This function is called during schedule() when a kworker is going
- * to sleep. It's used by psi to identify aggregation workers during
- * dequeuing, to allow periodic aggregation to shut-off when that
- * worker is the last task in the system or cgroup to go to sleep.
- *
- * As this function doesn't involve any workqueue-related locking, it
- * only returns stable values when called from inside the scheduler's
- * queuing and dequeuing paths, when @task, which must be a kworker,
- * is guaranteed to not be processing any works.
- *
- * Return:
- * The last work function %current executed as a worker, NULL if it
- * hasn't executed any work yet.
- */
-work_func_t wq_worker_last_func(struct task_struct *task)
-{
-       struct worker *worker = kthread_data(task);
-
-       return worker->last_func;
-}
-
 /**
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
  * @flags: flags to set
  *
  * Set @flags in @worker->flags and adjust nr_running accordingly.
- *
- * CONTEXT:
- * spin_lock_irq(pool->lock)
  */
 static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 {
        struct worker_pool *pool = worker->pool;
 
-       WARN_ON_ONCE(worker->task != current);
+       lockdep_assert_held(&pool->lock);
 
        /* If transitioning into NOT_RUNNING, adjust nr_running. */
        if ((flags & WORKER_NOT_RUNNING) &&
            !(worker->flags & WORKER_NOT_RUNNING)) {
-               atomic_dec(&pool->nr_running);
+               pool->nr_running--;
        }
 
        worker->flags |= flags;
@@ -968,16 +999,13 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
  * @flags: flags to clear
  *
  * Clear @flags in @worker->flags and adjust nr_running accordingly.
- *
- * CONTEXT:
- * spin_lock_irq(pool->lock)
  */
 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 {
        struct worker_pool *pool = worker->pool;
        unsigned int oflags = worker->flags;
 
-       WARN_ON_ONCE(worker->task != current);
+       lockdep_assert_held(&pool->lock);
 
        worker->flags &= ~flags;
 
@@ -988,7 +1016,70 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
         */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
-                       atomic_inc(&pool->nr_running);
+                       pool->nr_running++;
+}
+
+/* Return the first idle worker.  Called with pool->lock held. */
+static struct worker *first_idle_worker(struct worker_pool *pool)
+{
+       if (unlikely(list_empty(&pool->idle_list)))
+               return NULL;
+
+       return list_first_entry(&pool->idle_list, struct worker, entry);
+}
+
+/**
+ * worker_enter_idle - enter idle state
+ * @worker: worker which is entering idle state
+ *
+ * @worker is entering idle state.  Update stats and idle timer if
+ * necessary.
+ *
+ * LOCKING:
+ * raw_spin_lock_irq(pool->lock).
+ */
+static void worker_enter_idle(struct worker *worker)
+{
+       struct worker_pool *pool = worker->pool;
+
+       if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
+           WARN_ON_ONCE(!list_empty(&worker->entry) &&
+                        (worker->hentry.next || worker->hentry.pprev)))
+               return;
+
+       /* can't use worker_set_flags(), also called from create_worker() */
+       worker->flags |= WORKER_IDLE;
+       pool->nr_idle++;
+       worker->last_active = jiffies;
+
+       /* idle_list is LIFO */
+       list_add(&worker->entry, &pool->idle_list);
+
+       if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
+               mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
+
+       /* Sanity check nr_running. */
+       WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
+}
+
+/**
+ * worker_leave_idle - leave idle state
+ * @worker: worker which is leaving idle state
+ *
+ * @worker is leaving idle state.  Update stats.
+ *
+ * LOCKING:
+ * raw_spin_lock_irq(pool->lock).
+ */
+static void worker_leave_idle(struct worker *worker)
+{
+       struct worker_pool *pool = worker->pool;
+
+       if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
+               return;
+       worker_clr_flags(worker, WORKER_IDLE);
+       pool->nr_idle--;
+       list_del_init(&worker->entry);
 }
 
 /**
@@ -1018,7 +1109,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
  * actually occurs, it should be easy to locate the culprit work function.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
  *
  * Return:
  * Pointer to worker which is executing @work if found, %NULL
@@ -1044,16 +1135,13 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
  * @head: target list to append @work to
  * @nextp: out parameter for nested worklist walking
  *
- * Schedule linked works starting from @work to @head.  Work series to
- * be scheduled starts at @work and includes any consecutive work with
- * WORK_STRUCT_LINKED set in its predecessor.
- *
- * If @nextp is not NULL, it's updated to point to the next work of
- * the last scheduled work.  This allows move_linked_works() to be
- * nested inside outer list_for_each_entry_safe().
+ * Schedule linked works starting from @work to @head. Work series to be
+ * scheduled starts at @work and includes any consecutive work with
+ * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
+ * @nextp.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
  */
 static void move_linked_works(struct work_struct *work, struct list_head *head,
                              struct work_struct **nextp)
@@ -1080,108 +1168,855 @@ static void move_linked_works(struct work_struct *work, struct list_head *head,
 }
 
 /**
- * get_pwq - get an extra reference on the specified pool_workqueue
- * @pwq: pool_workqueue to get
+ * assign_work - assign a work item and its linked work items to a worker
+ * @work: work to assign
+ * @worker: worker to assign to
+ * @nextp: out parameter for nested worklist walking
  *
- * Obtain an extra reference on @pwq.  The caller should guarantee that
- * @pwq has positive refcnt and be holding the matching pool->lock.
+ * Assign @work and its linked work items to @worker. If @work is already being
+ * executed by another worker in the same pool, it'll be punted there.
+ *
+ * If @nextp is not NULL, it's updated to point to the next work of the last
+ * scheduled work. This allows assign_work() to be nested inside
+ * list_for_each_entry_safe().
+ *
+ * Returns %true if @work was successfully assigned to @worker. %false if @work
+ * was punted to another worker already executing it.
  */
-static void get_pwq(struct pool_workqueue *pwq)
+static bool assign_work(struct work_struct *work, struct worker *worker,
+                       struct work_struct **nextp)
 {
-       lockdep_assert_held(&pwq->pool->lock);
-       WARN_ON_ONCE(pwq->refcnt <= 0);
-       pwq->refcnt++;
-}
+       struct worker_pool *pool = worker->pool;
+       struct worker *collision;
 
-/**
- * put_pwq - put a pool_workqueue reference
- * @pwq: pool_workqueue to put
- *
+       lockdep_assert_held(&pool->lock);
+
+       /*
+        * A single work shouldn't be executed concurrently by multiple workers.
+        * __queue_work() ensures that @work doesn't jump to a different pool
+        * while still running in the previous pool. Here, we should ensure that
+        * @work is not executed concurrently by multiple workers from the same
+        * pool. Check whether anyone is already processing the work. If so,
+        * defer the work to the currently executing one.
+        */
+       collision = find_worker_executing_work(pool, work);
+       if (unlikely(collision)) {
+               move_linked_works(work, &collision->scheduled, nextp);
+               return false;
+       }
+
+       move_linked_works(work, &worker->scheduled, nextp);
+       return true;
+}
+
+static struct irq_work *bh_pool_irq_work(struct worker_pool *pool)
+{
+       int high = pool->attrs->nice == HIGHPRI_NICE_LEVEL ? 1 : 0;
+
+       return &per_cpu(bh_pool_irq_works, pool->cpu)[high];
+}
+
+static void kick_bh_pool(struct worker_pool *pool)
+{
+#ifdef CONFIG_SMP
+       /* see drain_dead_softirq_workfn() for BH_DRAINING */
+       if (unlikely(pool->cpu != smp_processor_id() &&
+                    !(pool->flags & POOL_BH_DRAINING))) {
+               irq_work_queue_on(bh_pool_irq_work(pool), pool->cpu);
+               return;
+       }
+#endif
+       if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
+               raise_softirq_irqoff(HI_SOFTIRQ);
+       else
+               raise_softirq_irqoff(TASKLET_SOFTIRQ);
+}
+
+/**
+ * kick_pool - wake up an idle worker if necessary
+ * @pool: pool to kick
+ *
+ * @pool may have pending work items. Wake up worker if necessary. Returns
+ * whether a worker was woken up.
+ */
+static bool kick_pool(struct worker_pool *pool)
+{
+       struct worker *worker = first_idle_worker(pool);
+       struct task_struct *p;
+
+       lockdep_assert_held(&pool->lock);
+
+       if (!need_more_worker(pool) || !worker)
+               return false;
+
+       if (pool->flags & POOL_BH) {
+               kick_bh_pool(pool);
+               return true;
+       }
+
+       p = worker->task;
+
+#ifdef CONFIG_SMP
+       /*
+        * Idle @worker is about to execute @work and waking up provides an
+        * opportunity to migrate @worker at a lower cost by setting the task's
+        * wake_cpu field. Let's see if we want to move @worker to improve
+        * execution locality.
+        *
+        * We're waking the worker that went idle the latest and there's some
+        * chance that @worker is marked idle but hasn't gone off CPU yet. If
+        * so, setting the wake_cpu won't do anything. As this is a best-effort
+        * optimization and the race window is narrow, let's leave as-is for
+        * now. If this becomes pronounced, we can skip over workers which are
+        * still on cpu when picking an idle worker.
+        *
+        * If @pool has non-strict affinity, @worker might have ended up outside
+        * its affinity scope. Repatriate.
+        */
+       if (!pool->attrs->affn_strict &&
+           !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
+               struct work_struct *work = list_first_entry(&pool->worklist,
+                                               struct work_struct, entry);
+               p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
+               get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+       }
+#endif
+       wake_up_process(p);
+       return true;
+}
+
+#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
+
+/*
+ * Concurrency-managed per-cpu work items that hog CPU for longer than
+ * wq_cpu_intensive_thresh_us trigger the automatic CPU_INTENSIVE mechanism,
+ * which prevents them from stalling other concurrency-managed work items. If a
+ * work function keeps triggering this mechanism, it's likely that the work item
+ * should be using an unbound workqueue instead.
+ *
+ * wq_cpu_intensive_report() tracks work functions which trigger such conditions
+ * and report them so that they can be examined and converted to use unbound
+ * workqueues as appropriate. To avoid flooding the console, each violating work
+ * function is tracked and reported with exponential backoff.
+ */
+#define WCI_MAX_ENTS 128
+
+struct wci_ent {
+       work_func_t             func;
+       atomic64_t              cnt;
+       struct hlist_node       hash_node;
+};
+
+static struct wci_ent wci_ents[WCI_MAX_ENTS];
+static int wci_nr_ents;
+static DEFINE_RAW_SPINLOCK(wci_lock);
+static DEFINE_HASHTABLE(wci_hash, ilog2(WCI_MAX_ENTS));
+
+static struct wci_ent *wci_find_ent(work_func_t func)
+{
+       struct wci_ent *ent;
+
+       hash_for_each_possible_rcu(wci_hash, ent, hash_node,
+                                  (unsigned long)func) {
+               if (ent->func == func)
+                       return ent;
+       }
+       return NULL;
+}
+
+static void wq_cpu_intensive_report(work_func_t func)
+{
+       struct wci_ent *ent;
+
+restart:
+       ent = wci_find_ent(func);
+       if (ent) {
+               u64 cnt;
+
+               /*
+                * Start reporting from the warning_thresh and back off
+                * exponentially.
+                */
+               cnt = atomic64_inc_return_relaxed(&ent->cnt);
+               if (wq_cpu_intensive_warning_thresh &&
+                   cnt >= wq_cpu_intensive_warning_thresh &&
+                   is_power_of_2(cnt + 1 - wq_cpu_intensive_warning_thresh))
+                       printk_deferred(KERN_WARNING "workqueue: %ps hogged CPU for >%luus %llu times, consider switching to WQ_UNBOUND\n",
+                                       ent->func, wq_cpu_intensive_thresh_us,
+                                       atomic64_read(&ent->cnt));
+               return;
+       }
+
+       /*
+        * @func is a new violation. Allocate a new entry for it. If wcn_ents[]
+        * is exhausted, something went really wrong and we probably made enough
+        * noise already.
+        */
+       if (wci_nr_ents >= WCI_MAX_ENTS)
+               return;
+
+       raw_spin_lock(&wci_lock);
+
+       if (wci_nr_ents >= WCI_MAX_ENTS) {
+               raw_spin_unlock(&wci_lock);
+               return;
+       }
+
+       if (wci_find_ent(func)) {
+               raw_spin_unlock(&wci_lock);
+               goto restart;
+       }
+
+       ent = &wci_ents[wci_nr_ents++];
+       ent->func = func;
+       atomic64_set(&ent->cnt, 0);
+       hash_add_rcu(wci_hash, &ent->hash_node, (unsigned long)func);
+
+       raw_spin_unlock(&wci_lock);
+
+       goto restart;
+}
+
+#else  /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
+static void wq_cpu_intensive_report(work_func_t func) {}
+#endif /* CONFIG_WQ_CPU_INTENSIVE_REPORT */
+
+/**
+ * wq_worker_running - a worker is running again
+ * @task: task waking up
+ *
+ * This function is called when a worker returns from schedule()
+ */
+void wq_worker_running(struct task_struct *task)
+{
+       struct worker *worker = kthread_data(task);
+
+       if (!READ_ONCE(worker->sleeping))
+               return;
+
+       /*
+        * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check
+        * and the nr_running increment below, we may ruin the nr_running reset
+        * and leave with an unexpected pool->nr_running == 1 on the newly unbound
+        * pool. Protect against such race.
+        */
+       preempt_disable();
+       if (!(worker->flags & WORKER_NOT_RUNNING))
+               worker->pool->nr_running++;
+       preempt_enable();
+
+       /*
+        * CPU intensive auto-detection cares about how long a work item hogged
+        * CPU without sleeping. Reset the starting timestamp on wakeup.
+        */
+       worker->current_at = worker->task->se.sum_exec_runtime;
+
+       WRITE_ONCE(worker->sleeping, 0);
+}
+
+/**
+ * wq_worker_sleeping - a worker is going to sleep
+ * @task: task going to sleep
+ *
+ * This function is called from schedule() when a busy worker is
+ * going to sleep.
+ */
+void wq_worker_sleeping(struct task_struct *task)
+{
+       struct worker *worker = kthread_data(task);
+       struct worker_pool *pool;
+
+       /*
+        * Rescuers, which may not have all the fields set up like normal
+        * workers, also reach here, let's not access anything before
+        * checking NOT_RUNNING.
+        */
+       if (worker->flags & WORKER_NOT_RUNNING)
+               return;
+
+       pool = worker->pool;
+
+       /* Return if preempted before wq_worker_running() was reached */
+       if (READ_ONCE(worker->sleeping))
+               return;
+
+       WRITE_ONCE(worker->sleeping, 1);
+       raw_spin_lock_irq(&pool->lock);
+
+       /*
+        * Recheck in case unbind_workers() preempted us. We don't
+        * want to decrement nr_running after the worker is unbound
+        * and nr_running has been reset.
+        */
+       if (worker->flags & WORKER_NOT_RUNNING) {
+               raw_spin_unlock_irq(&pool->lock);
+               return;
+       }
+
+       pool->nr_running--;
+       if (kick_pool(pool))
+               worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
+
+       raw_spin_unlock_irq(&pool->lock);
+}
+
+/**
+ * wq_worker_tick - a scheduler tick occurred while a kworker is running
+ * @task: task currently running
+ *
+ * Called from scheduler_tick(). We're in the IRQ context and the current
+ * worker's fields which follow the 'K' locking rule can be accessed safely.
+ */
+void wq_worker_tick(struct task_struct *task)
+{
+       struct worker *worker = kthread_data(task);
+       struct pool_workqueue *pwq = worker->current_pwq;
+       struct worker_pool *pool = worker->pool;
+
+       if (!pwq)
+               return;
+
+       pwq->stats[PWQ_STAT_CPU_TIME] += TICK_USEC;
+
+       if (!wq_cpu_intensive_thresh_us)
+               return;
+
+       /*
+        * If the current worker is concurrency managed and hogged the CPU for
+        * longer than wq_cpu_intensive_thresh_us, it's automatically marked
+        * CPU_INTENSIVE to avoid stalling other concurrency-managed work items.
+        *
+        * Set @worker->sleeping means that @worker is in the process of
+        * switching out voluntarily and won't be contributing to
+        * @pool->nr_running until it wakes up. As wq_worker_sleeping() also
+        * decrements ->nr_running, setting CPU_INTENSIVE here can lead to
+        * double decrements. The task is releasing the CPU anyway. Let's skip.
+        * We probably want to make this prettier in the future.
+        */
+       if ((worker->flags & WORKER_NOT_RUNNING) || READ_ONCE(worker->sleeping) ||
+           worker->task->se.sum_exec_runtime - worker->current_at <
+           wq_cpu_intensive_thresh_us * NSEC_PER_USEC)
+               return;
+
+       raw_spin_lock(&pool->lock);
+
+       worker_set_flags(worker, WORKER_CPU_INTENSIVE);
+       wq_cpu_intensive_report(worker->current_func);
+       pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
+
+       if (kick_pool(pool))
+               pwq->stats[PWQ_STAT_CM_WAKEUP]++;
+
+       raw_spin_unlock(&pool->lock);
+}
+
+/**
+ * wq_worker_last_func - retrieve worker's last work function
+ * @task: Task to retrieve last work function of.
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * raw_spin_lock_irq(rq->lock)
+ *
+ * This function is called during schedule() when a kworker is going
+ * to sleep. It's used by psi to identify aggregation workers during
+ * dequeuing, to allow periodic aggregation to shut-off when that
+ * worker is the last task in the system or cgroup to go to sleep.
+ *
+ * As this function doesn't involve any workqueue-related locking, it
+ * only returns stable values when called from inside the scheduler's
+ * queuing and dequeuing paths, when @task, which must be a kworker,
+ * is guaranteed to not be processing any works.
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+       struct worker *worker = kthread_data(task);
+
+       return worker->last_func;
+}
+
+/**
+ * wq_node_nr_active - Determine wq_node_nr_active to use
+ * @wq: workqueue of interest
+ * @node: NUMA node, can be %NUMA_NO_NODE
+ *
+ * Determine wq_node_nr_active to use for @wq on @node. Returns:
+ *
+ * - %NULL for per-cpu workqueues as they don't need to use shared nr_active.
+ *
+ * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE.
+ *
+ * - Otherwise, node_nr_active[@node].
+ */
+static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq,
+                                                  int node)
+{
+       if (!(wq->flags & WQ_UNBOUND))
+               return NULL;
+
+       if (node == NUMA_NO_NODE)
+               node = nr_node_ids;
+
+       return wq->node_nr_active[node];
+}
+
+/**
+ * wq_update_node_max_active - Update per-node max_actives to use
+ * @wq: workqueue to update
+ * @off_cpu: CPU that's going down, -1 if a CPU is not going down
+ *
+ * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is
+ * distributed among nodes according to the proportions of numbers of online
+ * cpus. The result is always between @wq->min_active and max_active.
+ */
+static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
+{
+       struct cpumask *effective = unbound_effective_cpumask(wq);
+       int min_active = READ_ONCE(wq->min_active);
+       int max_active = READ_ONCE(wq->max_active);
+       int total_cpus, node;
+
+       lockdep_assert_held(&wq->mutex);
+
+       if (!wq_topo_initialized)
+               return;
+
+       if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
+               off_cpu = -1;
+
+       total_cpus = cpumask_weight_and(effective, cpu_online_mask);
+       if (off_cpu >= 0)
+               total_cpus--;
+
+       for_each_node(node) {
+               int node_cpus;
+
+               node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
+               if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
+                       node_cpus--;
+
+               wq_node_nr_active(wq, node)->max =
+                       clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus),
+                             min_active, max_active);
+       }
+
+       wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
+}
+
+/**
+ * get_pwq - get an extra reference on the specified pool_workqueue
+ * @pwq: pool_workqueue to get
+ *
+ * Obtain an extra reference on @pwq.  The caller should guarantee that
+ * @pwq has positive refcnt and be holding the matching pool->lock.
+ */
+static void get_pwq(struct pool_workqueue *pwq)
+{
+       lockdep_assert_held(&pwq->pool->lock);
+       WARN_ON_ONCE(pwq->refcnt <= 0);
+       pwq->refcnt++;
+}
+
+/**
+ * put_pwq - put a pool_workqueue reference
+ * @pwq: pool_workqueue to put
+ *
  * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
  * destruction.  The caller should be holding the matching pool->lock.
  */
-static void put_pwq(struct pool_workqueue *pwq)
+static void put_pwq(struct pool_workqueue *pwq)
+{
+       lockdep_assert_held(&pwq->pool->lock);
+       if (likely(--pwq->refcnt))
+               return;
+       /*
+        * @pwq can't be released under pool->lock, bounce to a dedicated
+        * kthread_worker to avoid A-A deadlocks.
+        */
+       kthread_queue_work(pwq_release_worker, &pwq->release_work);
+}
+
+/**
+ * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
+ * @pwq: pool_workqueue to put (can be %NULL)
+ *
+ * put_pwq() with locking.  This function also allows %NULL @pwq.
+ */
+static void put_pwq_unlocked(struct pool_workqueue *pwq)
+{
+       if (pwq) {
+               /*
+                * As both pwqs and pools are RCU protected, the
+                * following lock operations are safe.
+                */
+               raw_spin_lock_irq(&pwq->pool->lock);
+               put_pwq(pwq);
+               raw_spin_unlock_irq(&pwq->pool->lock);
+       }
+}
+
+static bool pwq_is_empty(struct pool_workqueue *pwq)
+{
+       return !pwq->nr_active && list_empty(&pwq->inactive_works);
+}
+
+static void __pwq_activate_work(struct pool_workqueue *pwq,
+                               struct work_struct *work)
+{
+       unsigned long *wdb = work_data_bits(work);
+
+       WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE));
+       trace_workqueue_activate_work(work);
+       if (list_empty(&pwq->pool->worklist))
+               pwq->pool->watchdog_ts = jiffies;
+       move_linked_works(work, &pwq->pool->worklist, NULL);
+       __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
+}
+
+/**
+ * pwq_activate_work - Activate a work item if inactive
+ * @pwq: pool_workqueue @work belongs to
+ * @work: work item to activate
+ *
+ * Returns %true if activated. %false if already active.
+ */
+static bool pwq_activate_work(struct pool_workqueue *pwq,
+                             struct work_struct *work)
+{
+       struct worker_pool *pool = pwq->pool;
+       struct wq_node_nr_active *nna;
+
+       lockdep_assert_held(&pool->lock);
+
+       if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE))
+               return false;
+
+       nna = wq_node_nr_active(pwq->wq, pool->node);
+       if (nna)
+               atomic_inc(&nna->nr);
+
+       pwq->nr_active++;
+       __pwq_activate_work(pwq, work);
+       return true;
+}
+
+static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
+{
+       int max = READ_ONCE(nna->max);
+
+       while (true) {
+               int old, tmp;
+
+               old = atomic_read(&nna->nr);
+               if (old >= max)
+                       return false;
+               tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1);
+               if (tmp == old)
+                       return true;
+       }
+}
+
+/**
+ * pwq_tryinc_nr_active - Try to increment nr_active for a pwq
+ * @pwq: pool_workqueue of interest
+ * @fill: max_active may have increased, try to increase concurrency level
+ *
+ * Try to increment nr_active for @pwq. Returns %true if an nr_active count is
+ * successfully obtained. %false otherwise.
+ */
+static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
+{
+       struct workqueue_struct *wq = pwq->wq;
+       struct worker_pool *pool = pwq->pool;
+       struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
+       bool obtained = false;
+
+       lockdep_assert_held(&pool->lock);
+
+       if (!nna) {
+               /* BH or per-cpu workqueue, pwq->nr_active is sufficient */
+               obtained = pwq->nr_active < READ_ONCE(wq->max_active);
+               goto out;
+       }
+
+       if (unlikely(pwq->plugged))
+               return false;
+
+       /*
+        * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is
+        * already waiting on $nna, pwq_dec_nr_active() will maintain the
+        * concurrency level. Don't jump the line.
+        *
+        * We need to ignore the pending test after max_active has increased as
+        * pwq_dec_nr_active() can only maintain the concurrency level but not
+        * increase it. This is indicated by @fill.
+        */
+       if (!list_empty(&pwq->pending_node) && likely(!fill))
+               goto out;
+
+       obtained = tryinc_node_nr_active(nna);
+       if (obtained)
+               goto out;
+
+       /*
+        * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs
+        * and try again. The smp_mb() is paired with the implied memory barrier
+        * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either
+        * we see the decremented $nna->nr or they see non-empty
+        * $nna->pending_pwqs.
+        */
+       raw_spin_lock(&nna->lock);
+
+       if (list_empty(&pwq->pending_node))
+               list_add_tail(&pwq->pending_node, &nna->pending_pwqs);
+       else if (likely(!fill))
+               goto out_unlock;
+
+       smp_mb();
+
+       obtained = tryinc_node_nr_active(nna);
+
+       /*
+        * If @fill, @pwq might have already been pending. Being spuriously
+        * pending in cold paths doesn't affect anything. Let's leave it be.
+        */
+       if (obtained && likely(!fill))
+               list_del_init(&pwq->pending_node);
+
+out_unlock:
+       raw_spin_unlock(&nna->lock);
+out:
+       if (obtained)
+               pwq->nr_active++;
+       return obtained;
+}
+
+/**
+ * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
+ * @pwq: pool_workqueue of interest
+ * @fill: max_active may have increased, try to increase concurrency level
+ *
+ * Activate the first inactive work item of @pwq if available and allowed by
+ * max_active limit.
+ *
+ * Returns %true if an inactive work item has been activated. %false if no
+ * inactive work item is found or max_active limit is reached.
+ */
+static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
+{
+       struct work_struct *work =
+               list_first_entry_or_null(&pwq->inactive_works,
+                                        struct work_struct, entry);
+
+       if (work && pwq_tryinc_nr_active(pwq, fill)) {
+               __pwq_activate_work(pwq, work);
+               return true;
+       } else {
+               return false;
+       }
+}
+
+/**
+ * unplug_oldest_pwq - unplug the oldest pool_workqueue
+ * @wq: workqueue_struct where its oldest pwq is to be unplugged
+ *
+ * This function should only be called for ordered workqueues where only the
+ * oldest pwq is unplugged, the others are plugged to suspend execution to
+ * ensure proper work item ordering::
+ *
+ *    dfl_pwq --------------+     [P] - plugged
+ *                          |
+ *                          v
+ *    pwqs -> A -> B [P] -> C [P] (newest)
+ *            |    |        |
+ *            1    3        5
+ *            |    |        |
+ *            2    4        6
+ *
+ * When the oldest pwq is drained and removed, this function should be called
+ * to unplug the next oldest one to start its work item execution. Note that
+ * pwq's are linked into wq->pwqs with the oldest first, so the first one in
+ * the list is the oldest.
+ */
+static void unplug_oldest_pwq(struct workqueue_struct *wq)
 {
-       lockdep_assert_held(&pwq->pool->lock);
-       if (likely(--pwq->refcnt))
-               return;
-       if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
-               return;
-       /*
-        * @pwq can't be released under pool->lock, bounce to
-        * pwq_unbound_release_workfn().  This never recurses on the same
-        * pool->lock as this path is taken only for unbound workqueues and
-        * the release work item is scheduled on a per-cpu workqueue.  To
-        * avoid lockdep warning, unbound pool->locks are given lockdep
-        * subclass of 1 in get_unbound_pool().
-        */
-       schedule_work(&pwq->unbound_release_work);
+       struct pool_workqueue *pwq;
+
+       lockdep_assert_held(&wq->mutex);
+
+       /* Caller should make sure that pwqs isn't empty before calling */
+       pwq = list_first_entry_or_null(&wq->pwqs, struct pool_workqueue,
+                                      pwqs_node);
+       raw_spin_lock_irq(&pwq->pool->lock);
+       if (pwq->plugged) {
+               pwq->plugged = false;
+               if (pwq_activate_first_inactive(pwq, true))
+                       kick_pool(pwq->pool);
+       }
+       raw_spin_unlock_irq(&pwq->pool->lock);
 }
 
 /**
- * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
- * @pwq: pool_workqueue to put (can be %NULL)
+ * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active
+ * @nna: wq_node_nr_active to activate a pending pwq for
+ * @caller_pool: worker_pool the caller is locking
  *
- * put_pwq() with locking.  This function also allows %NULL @pwq.
+ * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked.
+ * @caller_pool may be unlocked and relocked to lock other worker_pools.
  */
-static void put_pwq_unlocked(struct pool_workqueue *pwq)
+static void node_activate_pending_pwq(struct wq_node_nr_active *nna,
+                                     struct worker_pool *caller_pool)
 {
-       if (pwq) {
-               /*
-                * As both pwqs and pools are RCU protected, the
-                * following lock operations are safe.
-                */
-               spin_lock_irq(&pwq->pool->lock);
-               put_pwq(pwq);
-               spin_unlock_irq(&pwq->pool->lock);
+       struct worker_pool *locked_pool = caller_pool;
+       struct pool_workqueue *pwq;
+       struct work_struct *work;
+
+       lockdep_assert_held(&caller_pool->lock);
+
+       raw_spin_lock(&nna->lock);
+retry:
+       pwq = list_first_entry_or_null(&nna->pending_pwqs,
+                                      struct pool_workqueue, pending_node);
+       if (!pwq)
+               goto out_unlock;
+
+       /*
+        * If @pwq is for a different pool than @locked_pool, we need to lock
+        * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock
+        * / lock dance. For that, we also need to release @nna->lock as it's
+        * nested inside pool locks.
+        */
+       if (pwq->pool != locked_pool) {
+               raw_spin_unlock(&locked_pool->lock);
+               locked_pool = pwq->pool;
+               if (!raw_spin_trylock(&locked_pool->lock)) {
+                       raw_spin_unlock(&nna->lock);
+                       raw_spin_lock(&locked_pool->lock);
+                       raw_spin_lock(&nna->lock);
+                       goto retry;
+               }
        }
-}
 
-static void pwq_activate_delayed_work(struct work_struct *work)
-{
-       struct pool_workqueue *pwq = get_work_pwq(work);
+       /*
+        * $pwq may not have any inactive work items due to e.g. cancellations.
+        * Drop it from pending_pwqs and see if there's another one.
+        */
+       work = list_first_entry_or_null(&pwq->inactive_works,
+                                       struct work_struct, entry);
+       if (!work) {
+               list_del_init(&pwq->pending_node);
+               goto retry;
+       }
 
-       trace_workqueue_activate_work(work);
-       if (list_empty(&pwq->pool->worklist))
-               pwq->pool->watchdog_ts = jiffies;
-       move_linked_works(work, &pwq->pool->worklist, NULL);
-       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
-       pwq->nr_active++;
+       /*
+        * Acquire an nr_active count and activate the inactive work item. If
+        * $pwq still has inactive work items, rotate it to the end of the
+        * pending_pwqs so that we round-robin through them. This means that
+        * inactive work items are not activated in queueing order which is fine
+        * given that there has never been any ordering across different pwqs.
+        */
+       if (likely(tryinc_node_nr_active(nna))) {
+               pwq->nr_active++;
+               __pwq_activate_work(pwq, work);
+
+               if (list_empty(&pwq->inactive_works))
+                       list_del_init(&pwq->pending_node);
+               else
+                       list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
+
+               /* if activating a foreign pool, make sure it's running */
+               if (pwq->pool != caller_pool)
+                       kick_pool(pwq->pool);
+       }
+
+out_unlock:
+       raw_spin_unlock(&nna->lock);
+       if (locked_pool != caller_pool) {
+               raw_spin_unlock(&locked_pool->lock);
+               raw_spin_lock(&caller_pool->lock);
+       }
 }
 
-static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
+/**
+ * pwq_dec_nr_active - Retire an active count
+ * @pwq: pool_workqueue of interest
+ *
+ * Decrement @pwq's nr_active and try to activate the first inactive work item.
+ * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
+ */
+static void pwq_dec_nr_active(struct pool_workqueue *pwq)
 {
-       struct work_struct *work = list_first_entry(&pwq->delayed_works,
-                                                   struct work_struct, entry);
+       struct worker_pool *pool = pwq->pool;
+       struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node);
+
+       lockdep_assert_held(&pool->lock);
+
+       /*
+        * @pwq->nr_active should be decremented for both percpu and unbound
+        * workqueues.
+        */
+       pwq->nr_active--;
+
+       /*
+        * For a percpu workqueue, it's simple. Just need to kick the first
+        * inactive work item on @pwq itself.
+        */
+       if (!nna) {
+               pwq_activate_first_inactive(pwq, false);
+               return;
+       }
+
+       /*
+        * If @pwq is for an unbound workqueue, it's more complicated because
+        * multiple pwqs and pools may be sharing the nr_active count. When a
+        * pwq needs to wait for an nr_active count, it puts itself on
+        * $nna->pending_pwqs. The following atomic_dec_return()'s implied
+        * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to
+        * guarantee that either we see non-empty pending_pwqs or they see
+        * decremented $nna->nr.
+        *
+        * $nna->max may change as CPUs come online/offline and @pwq->wq's
+        * max_active gets updated. However, it is guaranteed to be equal to or
+        * larger than @pwq->wq->min_active which is above zero unless freezing.
+        * This maintains the forward progress guarantee.
+        */
+       if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
+               return;
 
-       pwq_activate_delayed_work(work);
+       if (!list_empty(&nna->pending_pwqs))
+               node_activate_pending_pwq(nna, pool);
 }
 
 /**
  * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
  * @pwq: pwq of interest
- * @color: color of work which left the queue
+ * @work_data: work_data of work which left the queue
  *
  * A work either has completed or is removed from pending queue,
  * decrement nr_in_flight of its pwq and handle workqueue flushing.
  *
+ * NOTE:
+ * For unbound workqueues, this function may temporarily drop @pwq->pool->lock
+ * and thus should be called after all other state updates for the in-flight
+ * work item is complete.
+ *
  * CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
  */
-static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
+static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data)
 {
-       /* uncolored work items don't participate in flushing or nr_active */
-       if (color == WORK_NO_COLOR)
-               goto out_put;
+       int color = get_work_color(work_data);
 
-       pwq->nr_in_flight[color]--;
+       if (!(work_data & WORK_STRUCT_INACTIVE))
+               pwq_dec_nr_active(pwq);
 
-       pwq->nr_active--;
-       if (!list_empty(&pwq->delayed_works)) {
-               /* one down, submit a delayed one */
-               if (pwq->nr_active < pwq->max_active)
-                       pwq_activate_first_delayed(pwq);
-       }
+       pwq->nr_in_flight[color]--;
 
        /* is flush in progress and are we at the flushing tip? */
        if (likely(pwq->flush_color != color))
@@ -1207,18 +2042,21 @@ out_put:
 /**
  * try_to_grab_pending - steal work item from worklist and disable irq
  * @work: work item to steal
- * @is_dwork: @work is a delayed_work
- * @flags: place to store irq state
+ * @cflags: %WORK_CANCEL_ flags
+ * @irq_flags: place to store irq state
  *
  * Try to grab PENDING bit of @work.  This function can handle @work in any
  * stable state - idle, on timer or on worklist.
  *
  * Return:
+ *
+ *  ========   ================================================================
  *  1          if @work was pending and we successfully stole PENDING
  *  0          if @work was idle and we claimed PENDING
  *  -EAGAIN    if PENDING couldn't be grabbed at the moment, safe to busy-retry
  *  -ENOENT    if someone else is canceling @work, this state may persist
  *             for arbitrarily long
+ *  ========   ================================================================
  *
  * Note:
  * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
@@ -1227,20 +2065,20 @@ out_put:
  * irqsafe, ensures that we return -EAGAIN for finite short period of time.
  *
  * On successful return, >= 0, irq is disabled and the caller is
- * responsible for releasing it using local_irq_restore(*@flags).
+ * responsible for releasing it using local_irq_restore(*@irq_flags).
  *
  * This function is safe to call from any context including IRQ handler.
  */
-static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
-                              unsigned long *flags)
+static int try_to_grab_pending(struct work_struct *work, u32 cflags,
+                              unsigned long *irq_flags)
 {
        struct worker_pool *pool;
        struct pool_workqueue *pwq;
 
-       local_irq_save(*flags);
+       local_irq_save(*irq_flags);
 
        /* try to steal the timer if it exists */
-       if (is_dwork) {
+       if (cflags & WORK_CANCEL_DELAYED) {
                struct delayed_work *dwork = to_delayed_work(work);
 
                /*
@@ -1265,7 +2103,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
        if (!pool)
                goto fail;
 
-       spin_lock(&pool->lock);
+       raw_spin_lock(&pool->lock);
        /*
         * work->data is guaranteed to point to pwq only while the work
         * item is queued on pwq->wq, and both updating work->data to point
@@ -1276,38 +2114,118 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
         */
        pwq = get_work_pwq(work);
        if (pwq && pwq->pool == pool) {
+               unsigned long work_data;
+
                debug_work_deactivate(work);
 
                /*
-                * A delayed work item cannot be grabbed directly because
-                * it might have linked NO_COLOR work items which, if left
-                * on the delayed_list, will confuse pwq->nr_active
+                * A cancelable inactive work item must be in the
+                * pwq->inactive_works since a queued barrier can't be
+                * canceled (see the comments in insert_wq_barrier()).
+                *
+                * An inactive work item cannot be grabbed directly because
+                * it might have linked barrier work items which, if left
+                * on the inactive_works list, will confuse pwq->nr_active
                 * management later on and cause stall.  Make sure the work
                 * item is activated before grabbing.
                 */
-               if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
-                       pwq_activate_delayed_work(work);
+               pwq_activate_work(pwq, work);
 
                list_del_init(&work->entry);
-               pwq_dec_nr_in_flight(pwq, get_work_color(work));
 
-               /* work->data points to pwq iff queued, point to pool */
-               set_work_pool_and_keep_pending(work, pool->id);
+               /*
+                * work->data points to pwq iff queued. Let's point to pool. As
+                * this destroys work->data needed by the next step, stash it.
+                */
+               work_data = *work_data_bits(work);
+               set_work_pool_and_keep_pending(work, pool->id, 0);
+
+               /* must be the last step, see the function comment */
+               pwq_dec_nr_in_flight(pwq, work_data);
 
-               spin_unlock(&pool->lock);
+               raw_spin_unlock(&pool->lock);
                rcu_read_unlock();
                return 1;
        }
-       spin_unlock(&pool->lock);
+       raw_spin_unlock(&pool->lock);
 fail:
        rcu_read_unlock();
-       local_irq_restore(*flags);
+       local_irq_restore(*irq_flags);
        if (work_is_canceling(work))
                return -ENOENT;
        cpu_relax();
        return -EAGAIN;
 }
 
+struct cwt_wait {
+       wait_queue_entry_t      wait;
+       struct work_struct      *work;
+};
+
+static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
+{
+       struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
+
+       if (cwait->work != key)
+               return 0;
+       return autoremove_wake_function(wait, mode, sync, key);
+}
+
+/**
+ * work_grab_pending - steal work item from worklist and disable irq
+ * @work: work item to steal
+ * @cflags: %WORK_CANCEL_ flags
+ * @irq_flags: place to store IRQ state
+ *
+ * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer
+ * or on worklist.
+ *
+ * Must be called in process context. IRQ is disabled on return with IRQ state
+ * stored in *@irq_flags. The caller is responsible for re-enabling it using
+ * local_irq_restore().
+ *
+ * Returns %true if @work was pending. %false if idle.
+ */
+static bool work_grab_pending(struct work_struct *work, u32 cflags,
+                             unsigned long *irq_flags)
+{
+       struct cwt_wait cwait;
+       int ret;
+
+       might_sleep();
+repeat:
+       ret = try_to_grab_pending(work, cflags, irq_flags);
+       if (likely(ret >= 0))
+               return ret;
+       if (ret != -ENOENT)
+               goto repeat;
+
+       /*
+        * Someone is already canceling. Wait for it to finish. flush_work()
+        * doesn't work for PREEMPT_NONE because we may get woken up between
+        * @work's completion and the other canceling task resuming and clearing
+        * CANCELING - flush_work() will return false immediately as @work is no
+        * longer busy, try_to_grab_pending() will return -ENOENT as @work is
+        * still being canceled and the other canceling task won't be able to
+        * clear CANCELING as we're hogging the CPU.
+        *
+        * Let's wait for completion using a waitqueue. As this may lead to the
+        * thundering herd problem, use a custom wake function which matches
+        * @work along with exclusive wait and wakeup.
+        */
+       init_wait(&cwait.wait);
+       cwait.wait.func = cwt_wakefn;
+       cwait.work = work;
+
+       prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait,
+                                 TASK_UNINTERRUPTIBLE);
+       if (work_is_canceling(work))
+               schedule();
+       finish_wait(&wq_cancel_waitq, &cwait.wait);
+
+       goto repeat;
+}
+
 /**
  * insert_work - insert a work into a pool
  * @pwq: pwq @work belongs to
@@ -1319,27 +2237,20 @@ fail:
  * work_struct flags.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
  */
 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
                        struct list_head *head, unsigned int extra_flags)
 {
-       struct worker_pool *pool = pwq->pool;
+       debug_work_activate(work);
+
+       /* record the work call stack in order to print it in KASAN reports */
+       kasan_record_aux_stack_noalloc(work);
 
        /* we own @work, set data and link */
        set_work_pwq(work, pwq, extra_flags);
        list_add_tail(&work->entry, head);
        get_pwq(pwq);
-
-       /*
-        * Ensure either wq_worker_sleeping() sees the above
-        * list_add_tail() or we see zero nr_running to avoid workers lying
-        * around lazily while there are works to be processed.
-        */
-       smp_mb();
-
-       if (__need_more_worker(pool))
-               wake_up_worker(pool);
 }
 
 /*
@@ -1365,20 +2276,15 @@ static bool is_chained_work(struct workqueue_struct *wq)
  */
 static int wq_select_unbound_cpu(int cpu)
 {
-       static bool printed_dbg_warning;
        int new_cpu;
 
        if (likely(!wq_debug_force_rr_cpu)) {
                if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
                        return cpu;
-       } else if (!printed_dbg_warning) {
-               pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
-               printed_dbg_warning = true;
+       } else {
+               pr_warn_once("workqueue: round-robin CPU selection forced, expect performance impact\n");
        }
 
-       if (cpumask_empty(wq_unbound_cpumask))
-               return cpu;
-
        new_cpu = __this_cpu_read(wq_rr_cpu_last);
        new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
        if (unlikely(new_cpu >= nr_cpu_ids)) {
@@ -1395,8 +2301,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
        struct pool_workqueue *pwq;
-       struct worker_pool *last_pool;
-       struct list_head *worklist;
+       struct worker_pool *last_pool, *pool;
        unsigned int work_flags;
        unsigned int req_cpu = cpu;
 
@@ -1408,22 +2313,26 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
         */
        lockdep_assert_irqs_disabled();
 
-       debug_work_activate(work);
-
-       /* if draining, only works from the same workqueue are allowed */
-       if (unlikely(wq->flags & __WQ_DRAINING) &&
-           WARN_ON_ONCE(!is_chained_work(wq)))
+       /*
+        * For a draining wq, only works from the same workqueue are
+        * allowed. The __WQ_DESTROYING helps to spot the issue that
+        * queues a new work item to a wq after destroy_workqueue(wq).
+        */
+       if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) &&
+                    WARN_ON_ONCE(!is_chained_work(wq))))
                return;
        rcu_read_lock();
 retry:
-       if (req_cpu == WORK_CPU_UNBOUND)
-               cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
        /* pwq which will be used unless @work is executing elsewhere */
-       if (!(wq->flags & WQ_UNBOUND))
-               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
-       else
-               pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+       if (req_cpu == WORK_CPU_UNBOUND) {
+               if (wq->flags & WQ_UNBOUND)
+                       cpu = wq_select_unbound_cpu(raw_smp_processor_id());
+               else
+                       cpu = raw_smp_processor_id();
+       }
+
+       pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
+       pool = pwq->pool;
 
        /*
         * If @work was previously on a different pool, it might still be
@@ -1431,35 +2340,36 @@ retry:
         * pool to guarantee non-reentrancy.
         */
        last_pool = get_work_pool(work);
-       if (last_pool && last_pool != pwq->pool) {
+       if (last_pool && last_pool != pool) {
                struct worker *worker;
 
-               spin_lock(&last_pool->lock);
+               raw_spin_lock(&last_pool->lock);
 
                worker = find_worker_executing_work(last_pool, work);
 
                if (worker && worker->current_pwq->wq == wq) {
                        pwq = worker->current_pwq;
+                       pool = pwq->pool;
+                       WARN_ON_ONCE(pool != last_pool);
                } else {
                        /* meh... not running there, queue here */
-                       spin_unlock(&last_pool->lock);
-                       spin_lock(&pwq->pool->lock);
+                       raw_spin_unlock(&last_pool->lock);
+                       raw_spin_lock(&pool->lock);
                }
        } else {
-               spin_lock(&pwq->pool->lock);
+               raw_spin_lock(&pool->lock);
        }
 
        /*
-        * pwq is determined and locked.  For unbound pools, we could have
-        * raced with pwq release and it could already be dead.  If its
-        * refcnt is zero, repeat pwq selection.  Note that pwqs never die
-        * without another pwq replacing it in the numa_pwq_tbl or while
-        * work items are executing on it, so the retrying is guaranteed to
-        * make forward-progress.
+        * pwq is determined and locked. For unbound pools, we could have raced
+        * with pwq release and it could already be dead. If its refcnt is zero,
+        * repeat pwq selection. Note that unbound pwqs never die without
+        * another pwq replacing it in cpu_pwq or while work items are executing
+        * on it, so the retrying is guaranteed to make forward-progress.
         */
        if (unlikely(!pwq->refcnt)) {
                if (wq->flags & WQ_UNBOUND) {
-                       spin_unlock(&pwq->pool->lock);
+                       raw_spin_unlock(&pool->lock);
                        cpu_relax();
                        goto retry;
                }
@@ -1477,21 +2387,25 @@ retry:
        pwq->nr_in_flight[pwq->work_color]++;
        work_flags = work_color_to_flags(pwq->work_color);
 
-       if (likely(pwq->nr_active < pwq->max_active)) {
+       /*
+        * Limit the number of concurrently active work items to max_active.
+        * @work must also queue behind existing inactive work items to maintain
+        * ordering when max_active changes. See wq_adjust_max_active().
+        */
+       if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
+               if (list_empty(&pool->worklist))
+                       pool->watchdog_ts = jiffies;
+
                trace_workqueue_activate_work(work);
-               pwq->nr_active++;
-               worklist = &pwq->pool->worklist;
-               if (list_empty(worklist))
-                       pwq->pool->watchdog_ts = jiffies;
+               insert_work(pwq, work, &pool->worklist, work_flags);
+               kick_pool(pool);
        } else {
-               work_flags |= WORK_STRUCT_DELAYED;
-               worklist = &pwq->delayed_works;
+               work_flags |= WORK_STRUCT_INACTIVE;
+               insert_work(pwq, work, &pwq->inactive_works, work_flags);
        }
 
-       insert_work(pwq, work, worklist, work_flags);
-
 out:
-       spin_unlock(&pwq->pool->lock);
+       raw_spin_unlock(&pool->lock);
        rcu_read_unlock();
 }
 
@@ -1502,7 +2416,10 @@ out:
  * @work: work to queue
  *
  * We queue the work to a specific CPU, the caller must ensure it
- * can't go away.
+ * can't go away.  Callers that fail to ensure that the specified
+ * CPU cannot go away will execute on a randomly chosen CPU.
+ * But note well that callers specifying a CPU that never has been
+ * online will get a splat.
  *
  * Return: %false if @work was already on a queue, %true otherwise.
  */
@@ -1510,22 +2427,22 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
                   struct work_struct *work)
 {
        bool ret = false;
-       unsigned long flags;
+       unsigned long irq_flags;
 
-       local_irq_save(flags);
+       local_irq_save(irq_flags);
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
                __queue_work(cpu, wq, work);
                ret = true;
        }
 
-       local_irq_restore(flags);
+       local_irq_restore(irq_flags);
        return ret;
 }
 EXPORT_SYMBOL(queue_work_on);
 
 /**
- * workqueue_select_cpu_near - Select a CPU based on NUMA node
+ * select_numa_node_cpu - Select a CPU based on NUMA node
  * @node: NUMA node ID that we want to select a CPU from
  *
  * This function will attempt to find a "random" cpu available on a given
@@ -1533,14 +2450,10 @@ EXPORT_SYMBOL(queue_work_on);
  * WORK_CPU_UNBOUND indicating that we should just schedule to any
  * available CPU if we need to schedule this work.
  */
-static int workqueue_select_cpu_near(int node)
+static int select_numa_node_cpu(int node)
 {
        int cpu;
 
-       /* No point in doing this if NUMA isn't enabled for workqueues */
-       if (!wq_numa_enabled)
-               return WORK_CPU_UNBOUND;
-
        /* Delay binding to CPU if node is not valid or online */
        if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
                return WORK_CPU_UNBOUND;
@@ -1580,7 +2493,7 @@ static int workqueue_select_cpu_near(int node)
 bool queue_work_node(int node, struct workqueue_struct *wq,
                     struct work_struct *work)
 {
-       unsigned long flags;
+       unsigned long irq_flags;
        bool ret = false;
 
        /*
@@ -1594,16 +2507,16 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
         */
        WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
 
-       local_irq_save(flags);
+       local_irq_save(irq_flags);
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               int cpu = workqueue_select_cpu_near(node);
+               int cpu = select_numa_node_cpu(node);
 
                __queue_work(cpu, wq, work);
                ret = true;
        }
 
-       local_irq_restore(flags);
+       local_irq_restore(irq_flags);
        return ret;
 }
 EXPORT_SYMBOL_GPL(queue_work_node);
@@ -1643,10 +2556,18 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       if (unlikely(cpu != WORK_CPU_UNBOUND))
+       if (housekeeping_enabled(HK_TYPE_TIMER)) {
+               /* If the current cpu is a housekeeping cpu, use it. */
+               cpu = smp_processor_id();
+               if (!housekeeping_test_cpu(cpu, HK_TYPE_TIMER))
+                       cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
                add_timer_on(timer, cpu);
-       else
-               add_timer(timer);
+       } else {
+               if (likely(cpu == WORK_CPU_UNBOUND))
+                       add_timer_global(timer);
+               else
+                       add_timer_on(timer, cpu);
+       }
 }
 
 /**
@@ -1665,17 +2586,17 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
 {
        struct work_struct *work = &dwork->work;
        bool ret = false;
-       unsigned long flags;
+       unsigned long irq_flags;
 
        /* read the comment in __queue_work() */
-       local_irq_save(flags);
+       local_irq_save(irq_flags);
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
                __queue_delayed_work(cpu, wq, dwork, delay);
                ret = true;
        }
 
-       local_irq_restore(flags);
+       local_irq_restore(irq_flags);
        return ret;
 }
 EXPORT_SYMBOL(queue_delayed_work_on);
@@ -1701,16 +2622,17 @@ EXPORT_SYMBOL(queue_delayed_work_on);
 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
                         struct delayed_work *dwork, unsigned long delay)
 {
-       unsigned long flags;
+       unsigned long irq_flags;
        int ret;
 
        do {
-               ret = try_to_grab_pending(&dwork->work, true, &flags);
+               ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED,
+                                         &irq_flags);
        } while (unlikely(ret == -EAGAIN));
 
        if (likely(ret >= 0)) {
                __queue_delayed_work(cpu, wq, dwork, delay);
-               local_irq_restore(flags);
+               local_irq_restore(irq_flags);
        }
 
        /* -ENOENT from try_to_grab_pending() becomes %true */
@@ -1744,7 +2666,7 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
                rwork->wq = wq;
-               call_rcu(&rwork->rcu, rcu_work_rcufn);
+               call_rcu_hurry(&rwork->rcu, rcu_work_rcufn);
                return true;
        }
 
@@ -1752,67 +2674,6 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
 }
 EXPORT_SYMBOL(queue_rcu_work);
 
-/**
- * worker_enter_idle - enter idle state
- * @worker: worker which is entering idle state
- *
- * @worker is entering idle state.  Update stats and idle timer if
- * necessary.
- *
- * LOCKING:
- * spin_lock_irq(pool->lock).
- */
-static void worker_enter_idle(struct worker *worker)
-{
-       struct worker_pool *pool = worker->pool;
-
-       if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
-           WARN_ON_ONCE(!list_empty(&worker->entry) &&
-                        (worker->hentry.next || worker->hentry.pprev)))
-               return;
-
-       /* can't use worker_set_flags(), also called from create_worker() */
-       worker->flags |= WORKER_IDLE;
-       pool->nr_idle++;
-       worker->last_active = jiffies;
-
-       /* idle_list is LIFO */
-       list_add(&worker->entry, &pool->idle_list);
-
-       if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
-               mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-
-       /*
-        * Sanity check nr_running.  Because unbind_workers() releases
-        * pool->lock between setting %WORKER_UNBOUND and zapping
-        * nr_running, the warning may trigger spuriously.  Check iff
-        * unbind is not in progress.
-        */
-       WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
-                    pool->nr_workers == pool->nr_idle &&
-                    atomic_read(&pool->nr_running));
-}
-
-/**
- * worker_leave_idle - leave idle state
- * @worker: worker which is leaving idle state
- *
- * @worker is leaving idle state.  Update stats.
- *
- * LOCKING:
- * spin_lock_irq(pool->lock).
- */
-static void worker_leave_idle(struct worker *worker)
-{
-       struct worker_pool *pool = worker->pool;
-
-       if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
-               return;
-       worker_clr_flags(worker, WORKER_IDLE);
-       pool->nr_idle--;
-       list_del_init(&worker->entry);
-}
-
 static struct worker *alloc_worker(int node)
 {
        struct worker *worker;
@@ -1828,6 +2689,14 @@ static struct worker *alloc_worker(int node)
        return worker;
 }
 
+static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
+{
+       if (pool->cpu < 0 && pool->attrs->affn_strict)
+               return pool->attrs->__pod_cpumask;
+       else
+               return pool->attrs->cpumask;
+}
+
 /**
  * worker_attach_to_pool() - attach a worker to a pool
  * @worker: worker to be attached
@@ -1838,23 +2707,24 @@ static struct worker *alloc_worker(int node)
  * cpu-[un]hotplugs.
  */
 static void worker_attach_to_pool(struct worker *worker,
-                                  struct worker_pool *pool)
+                                 struct worker_pool *pool)
 {
        mutex_lock(&wq_pool_attach_mutex);
 
        /*
-        * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
-        * online CPUs.  It'll be re-applied when any of the CPUs come up.
-        */
-       set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
-       /*
-        * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
-        * stable across this function.  See the comments above the flag
-        * definition for details.
+        * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains stable
+        * across this function. See the comments above the flag definition for
+        * details. BH workers are, while per-CPU, always DISASSOCIATED.
         */
-       if (pool->flags & POOL_DISASSOCIATED)
+       if (pool->flags & POOL_DISASSOCIATED) {
                worker->flags |= WORKER_UNBOUND;
+       } else {
+               WARN_ON_ONCE(pool->flags & POOL_BH);
+               kthread_set_per_cpu(worker->task, pool->cpu);
+       }
+
+       if (worker->rescue_wq)
+               set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
 
        list_add_tail(&worker->node, &pool->workers);
        worker->pool = pool;
@@ -1875,12 +2745,16 @@ static void worker_detach_from_pool(struct worker *worker)
        struct worker_pool *pool = worker->pool;
        struct completion *detach_completion = NULL;
 
+       /* there is one permanent BH worker per CPU which should never detach */
+       WARN_ON_ONCE(pool->flags & POOL_BH);
+
        mutex_lock(&wq_pool_attach_mutex);
 
+       kthread_set_per_cpu(worker->task, -1);
        list_del(&worker->node);
        worker->pool = NULL;
 
-       if (list_empty(&pool->workers))
+       if (list_empty(&pool->workers) && list_empty(&pool->dying_workers))
                detach_completion = pool->detach_completion;
        mutex_unlock(&wq_pool_attach_mutex);
 
@@ -1905,69 +2779,126 @@ static void worker_detach_from_pool(struct worker *worker)
  */
 static struct worker *create_worker(struct worker_pool *pool)
 {
-       struct worker *worker = NULL;
-       int id = -1;
-       char id_buf[16];
+       struct worker *worker;
+       int id;
+       char id_buf[23];
 
        /* ID is needed to determine kthread name */
-       id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
-       if (id < 0)
-               goto fail;
+       id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
+       if (id < 0) {
+               pr_err_once("workqueue: Failed to allocate a worker ID: %pe\n",
+                           ERR_PTR(id));
+               return NULL;
+       }
 
        worker = alloc_worker(pool->node);
-       if (!worker)
+       if (!worker) {
+               pr_err_once("workqueue: Failed to allocate a worker\n");
                goto fail;
+       }
 
        worker->id = id;
 
-       if (pool->cpu >= 0)
-               snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
-                        pool->attrs->nice < 0  ? "H" : "");
-       else
-               snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
-
-       worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
-                                             "kworker/%s", id_buf);
-       if (IS_ERR(worker->task))
-               goto fail;
+       if (!(pool->flags & POOL_BH)) {
+               if (pool->cpu >= 0)
+                       snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
+                                pool->attrs->nice < 0  ? "H" : "");
+               else
+                       snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
+
+               worker->task = kthread_create_on_node(worker_thread, worker,
+                                       pool->node, "kworker/%s", id_buf);
+               if (IS_ERR(worker->task)) {
+                       if (PTR_ERR(worker->task) == -EINTR) {
+                               pr_err("workqueue: Interrupted when creating a worker thread \"kworker/%s\"\n",
+                                      id_buf);
+                       } else {
+                               pr_err_once("workqueue: Failed to create a worker thread: %pe",
+                                           worker->task);
+                       }
+                       goto fail;
+               }
 
-       set_user_nice(worker->task, pool->attrs->nice);
-       kthread_bind_mask(worker->task, pool->attrs->cpumask);
+               set_user_nice(worker->task, pool->attrs->nice);
+               kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
+       }
 
        /* successful, attach the worker to the pool */
        worker_attach_to_pool(worker, pool);
 
        /* start the newly created worker */
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
+
        worker->pool->nr_workers++;
        worker_enter_idle(worker);
-       wake_up_process(worker->task);
-       spin_unlock_irq(&pool->lock);
+
+       /*
+        * @worker is waiting on a completion in kthread() and will trigger hung
+        * check if not woken up soon. As kick_pool() is noop if @pool is empty,
+        * wake it up explicitly.
+        */
+       if (worker->task)
+               wake_up_process(worker->task);
+
+       raw_spin_unlock_irq(&pool->lock);
 
        return worker;
 
 fail:
-       if (id >= 0)
-               ida_simple_remove(&pool->worker_ida, id);
+       ida_free(&pool->worker_ida, id);
        kfree(worker);
        return NULL;
 }
 
+static void unbind_worker(struct worker *worker)
+{
+       lockdep_assert_held(&wq_pool_attach_mutex);
+
+       kthread_set_per_cpu(worker->task, -1);
+       if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
+       else
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+}
+
+static void wake_dying_workers(struct list_head *cull_list)
+{
+       struct worker *worker, *tmp;
+
+       list_for_each_entry_safe(worker, tmp, cull_list, entry) {
+               list_del_init(&worker->entry);
+               unbind_worker(worker);
+               /*
+                * If the worker was somehow already running, then it had to be
+                * in pool->idle_list when set_worker_dying() happened or we
+                * wouldn't have gotten here.
+                *
+                * Thus, the worker must either have observed the WORKER_DIE
+                * flag, or have set its state to TASK_IDLE. Either way, the
+                * below will be observed by the worker and is safe to do
+                * outside of pool->lock.
+                */
+               wake_up_process(worker->task);
+       }
+}
+
 /**
- * destroy_worker - destroy a workqueue worker
+ * set_worker_dying - Tag a worker for destruction
  * @worker: worker to be destroyed
+ * @list: transfer worker away from its pool->idle_list and into list
  *
- * Destroy @worker and adjust @pool stats accordingly.  The worker should
- * be idle.
+ * Tag @worker for destruction and adjust @pool stats accordingly.  The worker
+ * should be idle.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
  */
-static void destroy_worker(struct worker *worker)
+static void set_worker_dying(struct worker *worker, struct list_head *list)
 {
        struct worker_pool *pool = worker->pool;
 
        lockdep_assert_held(&pool->lock);
+       lockdep_assert_held(&wq_pool_attach_mutex);
 
        /* sanity check frenzy */
        if (WARN_ON(worker->current_work) ||
@@ -1978,22 +2909,79 @@ static void destroy_worker(struct worker *worker)
        pool->nr_workers--;
        pool->nr_idle--;
 
-       list_del_init(&worker->entry);
        worker->flags |= WORKER_DIE;
-       wake_up_process(worker->task);
+
+       list_move(&worker->entry, list);
+       list_move(&worker->node, &pool->dying_workers);
 }
 
+/**
+ * idle_worker_timeout - check if some idle workers can now be deleted.
+ * @t: The pool's idle_timer that just expired
+ *
+ * The timer is armed in worker_enter_idle(). Note that it isn't disarmed in
+ * worker_leave_idle(), as a worker flicking between idle and active while its
+ * pool is at the too_many_workers() tipping point would cause too much timer
+ * housekeeping overhead. Since IDLE_WORKER_TIMEOUT is long enough, we just let
+ * it expire and re-evaluate things from there.
+ */
 static void idle_worker_timeout(struct timer_list *t)
 {
        struct worker_pool *pool = from_timer(pool, t, idle_timer);
+       bool do_cull = false;
+
+       if (work_pending(&pool->idle_cull_work))
+               return;
 
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
 
-       while (too_many_workers(pool)) {
+       if (too_many_workers(pool)) {
                struct worker *worker;
                unsigned long expires;
 
                /* idle_list is kept in LIFO order, check the last one */
+               worker = list_entry(pool->idle_list.prev, struct worker, entry);
+               expires = worker->last_active + IDLE_WORKER_TIMEOUT;
+               do_cull = !time_before(jiffies, expires);
+
+               if (!do_cull)
+                       mod_timer(&pool->idle_timer, expires);
+       }
+       raw_spin_unlock_irq(&pool->lock);
+
+       if (do_cull)
+               queue_work(system_unbound_wq, &pool->idle_cull_work);
+}
+
+/**
+ * idle_cull_fn - cull workers that have been idle for too long.
+ * @work: the pool's work for handling these idle workers
+ *
+ * This goes through a pool's idle workers and gets rid of those that have been
+ * idle for at least IDLE_WORKER_TIMEOUT seconds.
+ *
+ * We don't want to disturb isolated CPUs because of a pcpu kworker being
+ * culled, so this also resets worker affinity. This requires a sleepable
+ * context, hence the split between timer callback and work item.
+ */
+static void idle_cull_fn(struct work_struct *work)
+{
+       struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
+       LIST_HEAD(cull_list);
+
+       /*
+        * Grabbing wq_pool_attach_mutex here ensures an already-running worker
+        * cannot proceed beyong worker_detach_from_pool() in its self-destruct
+        * path. This is required as a previously-preempted worker could run after
+        * set_worker_dying() has happened but before wake_dying_workers() did.
+        */
+       mutex_lock(&wq_pool_attach_mutex);
+       raw_spin_lock_irq(&pool->lock);
+
+       while (too_many_workers(pool)) {
+               struct worker *worker;
+               unsigned long expires;
+
                worker = list_entry(pool->idle_list.prev, struct worker, entry);
                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
 
@@ -2002,10 +2990,12 @@ static void idle_worker_timeout(struct timer_list *t)
                        break;
                }
 
-               destroy_worker(worker);
+               set_worker_dying(worker, &cull_list);
        }
 
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
+       wake_dying_workers(&cull_list);
+       mutex_unlock(&wq_pool_attach_mutex);
 }
 
 static void send_mayday(struct work_struct *work)
@@ -2028,6 +3018,7 @@ static void send_mayday(struct work_struct *work)
                get_pwq(pwq);
                list_add_tail(&pwq->mayday_node, &wq->maydays);
                wake_up_process(wq->rescuer->task);
+               pwq->stats[PWQ_STAT_MAYDAY]++;
        }
 }
 
@@ -2036,8 +3027,8 @@ static void pool_mayday_timeout(struct timer_list *t)
        struct worker_pool *pool = from_timer(pool, t, mayday_timer);
        struct work_struct *work;
 
-       spin_lock_irq(&pool->lock);
-       spin_lock(&wq_mayday_lock);             /* for wq->maydays */
+       raw_spin_lock_irq(&pool->lock);
+       raw_spin_lock(&wq_mayday_lock);         /* for wq->maydays */
 
        if (need_to_create_worker(pool)) {
                /*
@@ -2050,8 +3041,8 @@ static void pool_mayday_timeout(struct timer_list *t)
                        send_mayday(work);
        }
 
-       spin_unlock(&wq_mayday_lock);
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock(&wq_mayday_lock);
+       raw_spin_unlock_irq(&pool->lock);
 
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
 }
@@ -2070,7 +3061,7 @@ static void pool_mayday_timeout(struct timer_list *t)
  * may_start_working() %true.
  *
  * LOCKING:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
  */
@@ -2079,7 +3070,7 @@ __releases(&pool->lock)
 __acquires(&pool->lock)
 {
 restart:
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
 
        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
@@ -2095,7 +3086,7 @@ restart:
        }
 
        del_timer_sync(&pool->mayday_timer);
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
        /*
         * This is necessary even after a new worker was just successfully
         * created as @pool->lock was dropped and the new worker might have
@@ -2118,7 +3109,7 @@ restart:
  * and may_start_working() is true.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * Return:
@@ -2141,7 +3132,7 @@ static bool manage_workers(struct worker *worker)
 
        pool->manager = NULL;
        pool->flags &= ~POOL_MANAGER_ACTIVE;
-       wake_up(&wq_manager_wait);
+       rcuwait_wake_up(&manager_wait);
        return true;
 }
 
@@ -2157,7 +3148,7 @@ static bool manage_workers(struct worker *worker)
  * call this function to process a work.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock) which is released and regrabbed.
+ * raw_spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
 __releases(&pool->lock)
@@ -2165,9 +3156,9 @@ __acquires(&pool->lock)
 {
        struct pool_workqueue *pwq = get_work_pwq(work);
        struct worker_pool *pool = worker->pool;
-       bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
-       int work_color;
-       struct worker *collision;
+       unsigned long work_data;
+       int lockdep_start_depth, rcu_start_depth;
+       bool bh_draining = pool->flags & POOL_BH_DRAINING;
 #ifdef CONFIG_LOCKDEP
        /*
         * It is permissible to free the struct work_struct from
@@ -2184,25 +3175,16 @@ __acquires(&pool->lock)
        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                     raw_smp_processor_id() != pool->cpu);
 
-       /*
-        * A single work shouldn't be executed concurrently by
-        * multiple workers on a single cpu.  Check whether anyone is
-        * already processing the work.  If so, defer the work to the
-        * currently executing one.
-        */
-       collision = find_worker_executing_work(pool, work);
-       if (unlikely(collision)) {
-               move_linked_works(work, &collision->scheduled, NULL);
-               return;
-       }
-
        /* claim and dequeue */
        debug_work_deactivate(work);
        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
        worker->current_work = work;
        worker->current_func = work->func;
        worker->current_pwq = pwq;
-       work_color = get_work_color(work);
+       if (worker->task)
+               worker->current_at = worker->task->se.sum_exec_runtime;
+       work_data = *work_data_bits(work);
+       worker->current_color = get_work_color(work_data);
 
        /*
         * Record wq name for cmdline and debug reporting, may get
@@ -2218,18 +3200,16 @@ __acquires(&pool->lock)
         * of concurrency management and the next code block will chain
         * execution of the pending work items.
         */
-       if (unlikely(cpu_intensive))
+       if (unlikely(pwq->wq->flags & WQ_CPU_INTENSIVE))
                worker_set_flags(worker, WORKER_CPU_INTENSIVE);
 
        /*
-        * Wake up another worker if necessary.  The condition is always
-        * false for normal per-cpu workers since nr_running would always
-        * be >= 1 at this point.  This is used to chain execution of the
-        * pending work items for WORKER_NOT_RUNNING workers such as the
-        * UNBOUND and CPU_INTENSIVE ones.
+        * Kick @pool if necessary. It's always noop for per-cpu worker pools
+        * since nr_running would always be >= 1 at this point. This is used to
+        * chain execution of the pending work items for WORKER_NOT_RUNNING
+        * workers such as the UNBOUND and CPU_INTENSIVE ones.
         */
-       if (need_more_worker(pool))
-               wake_up_worker(pool);
+       kick_pool(pool);
 
        /*
         * Record the last pool and clear PENDING which should be the last
@@ -2237,11 +3217,16 @@ __acquires(&pool->lock)
         * PENDING and queued state changes happen together while IRQ is
         * disabled.
         */
-       set_work_pool_and_clear_pending(work, pool->id);
+       set_work_pool_and_clear_pending(work, pool->id, 0);
 
-       spin_unlock_irq(&pool->lock);
+       pwq->stats[PWQ_STAT_STARTED]++;
+       raw_spin_unlock_irq(&pool->lock);
 
-       lock_map_acquire(&pwq->wq->lockdep_map);
+       rcu_start_depth = rcu_preempt_depth();
+       lockdep_start_depth = lockdep_depth(current);
+       /* see drain_dead_softirq_workfn() */
+       if (!bh_draining)
+               lock_map_acquire(&pwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        /*
         * Strictly speaking we should mark the invariant state without holding
@@ -2271,34 +3256,44 @@ __acquires(&pool->lock)
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
         */
-       trace_workqueue_execute_end(work);
+       trace_workqueue_execute_end(work, worker->current_func);
+       pwq->stats[PWQ_STAT_COMPLETED]++;
        lock_map_release(&lockdep_map);
-       lock_map_release(&pwq->wq->lockdep_map);
+       if (!bh_draining)
+               lock_map_release(&pwq->wq->lockdep_map);
 
-       if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
-               pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
-                      "     last function: %ps\n",
-                      current->comm, preempt_count(), task_pid_nr(current),
+       if (unlikely((worker->task && in_atomic()) ||
+                    lockdep_depth(current) != lockdep_start_depth ||
+                    rcu_preempt_depth() != rcu_start_depth)) {
+               pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
+                      "     preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n",
+                      current->comm, task_pid_nr(current), preempt_count(),
+                      lockdep_start_depth, lockdep_depth(current),
+                      rcu_start_depth, rcu_preempt_depth(),
                       worker->current_func);
                debug_show_held_locks(current);
                dump_stack();
        }
 
        /*
-        * The following prevents a kworker from hogging CPU on !PREEMPT
+        * The following prevents a kworker from hogging CPU on !PREEMPTION
         * kernels, where a requeueing work item waiting for something to
         * happen could deadlock with stop_machine as such work item could
         * indefinitely requeue itself while all other CPUs are trapped in
         * stop_machine. At the same time, report a quiescent RCU state so
         * the same condition doesn't freeze RCU.
         */
-       cond_resched();
+       if (worker->task)
+               cond_resched();
 
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
 
-       /* clear cpu intensive status */
-       if (unlikely(cpu_intensive))
-               worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
+       /*
+        * In addition to %WQ_CPU_INTENSIVE, @worker may also have been marked
+        * CPU intensive by wq_worker_tick() if @work hogged CPU longer than
+        * wq_cpu_intensive_thresh_us. Clear it.
+        */
+       worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
        /* tag the worker for identification in schedule() */
        worker->last_func = worker->current_func;
@@ -2308,7 +3303,10 @@ __acquires(&pool->lock)
        worker->current_work = NULL;
        worker->current_func = NULL;
        worker->current_pwq = NULL;
-       pwq_dec_nr_in_flight(pwq, work_color);
+       worker->current_color = INT_MAX;
+
+       /* must be the last step, see the function comment */
+       pwq_dec_nr_in_flight(pwq, work_data);
 }
 
 /**
@@ -2320,14 +3318,20 @@ __acquires(&pool->lock)
  * fetches a work from the top and executes it.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * raw_spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.
  */
 static void process_scheduled_works(struct worker *worker)
 {
-       while (!list_empty(&worker->scheduled)) {
-               struct work_struct *work = list_first_entry(&worker->scheduled,
-                                               struct work_struct, entry);
+       struct work_struct *work;
+       bool first = true;
+
+       while ((work = list_first_entry_or_null(&worker->scheduled,
+                                               struct work_struct, entry))) {
+               if (first) {
+                       worker->pool->watchdog_ts = jiffies;
+                       first = false;
+               }
                process_one_work(worker, work);
        }
 }
@@ -2362,17 +3366,17 @@ static int worker_thread(void *__worker)
        /* tell the scheduler that this is a workqueue worker */
        set_pf_worker(true);
 woke_up:
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
 
        /* am I supposed to die? */
        if (unlikely(worker->flags & WORKER_DIE)) {
-               spin_unlock_irq(&pool->lock);
-               WARN_ON_ONCE(!list_empty(&worker->entry));
+               raw_spin_unlock_irq(&pool->lock);
                set_pf_worker(false);
 
                set_task_comm(worker->task, "kworker/dying");
-               ida_simple_remove(&pool->worker_ida, worker->id);
+               ida_free(&pool->worker_ida, worker->id);
                worker_detach_from_pool(worker);
+               WARN_ON_ONCE(!list_empty(&worker->entry));
                kfree(worker);
                return 0;
        }
@@ -2408,17 +3412,8 @@ recheck:
                        list_first_entry(&pool->worklist,
                                         struct work_struct, entry);
 
-               pool->watchdog_ts = jiffies;
-
-               if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
-                       /* optimization path, not strictly necessary */
-                       process_one_work(worker, work);
-                       if (unlikely(!list_empty(&worker->scheduled)))
-                               process_scheduled_works(worker);
-               } else {
-                       move_linked_works(work, &worker->scheduled, NULL);
+               if (assign_work(work, worker, NULL))
                        process_scheduled_works(worker);
-               }
        } while (keep_working(pool));
 
        worker_set_flags(worker, WORKER_PREP);
@@ -2432,7 +3427,7 @@ sleep:
         */
        worker_enter_idle(worker);
        __set_current_state(TASK_IDLE);
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
        schedule();
        goto woke_up;
 }
@@ -2462,7 +3457,6 @@ static int rescuer_thread(void *__rescuer)
 {
        struct worker *rescuer = __rescuer;
        struct workqueue_struct *wq = rescuer->rescue_wq;
-       struct list_head *scheduled = &rescuer->scheduled;
        bool should_stop;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -2486,55 +3480,57 @@ repeat:
        should_stop = kthread_should_stop();
 
        /* see whether any pwq is asking for help */
-       spin_lock_irq(&wq_mayday_lock);
+       raw_spin_lock_irq(&wq_mayday_lock);
 
        while (!list_empty(&wq->maydays)) {
                struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
                                        struct pool_workqueue, mayday_node);
                struct worker_pool *pool = pwq->pool;
                struct work_struct *work, *n;
-               bool first = true;
 
                __set_current_state(TASK_RUNNING);
                list_del_init(&pwq->mayday_node);
 
-               spin_unlock_irq(&wq_mayday_lock);
+               raw_spin_unlock_irq(&wq_mayday_lock);
 
                worker_attach_to_pool(rescuer, pool);
 
-               spin_lock_irq(&pool->lock);
+               raw_spin_lock_irq(&pool->lock);
 
                /*
                 * Slurp in all works issued via this workqueue and
                 * process'em.
                 */
-               WARN_ON_ONCE(!list_empty(scheduled));
+               WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
                list_for_each_entry_safe(work, n, &pool->worklist, entry) {
-                       if (get_work_pwq(work) == pwq) {
-                               if (first)
-                                       pool->watchdog_ts = jiffies;
-                               move_linked_works(work, scheduled, &n);
-                       }
-                       first = false;
+                       if (get_work_pwq(work) == pwq &&
+                           assign_work(work, rescuer, &n))
+                               pwq->stats[PWQ_STAT_RESCUED]++;
                }
 
-               if (!list_empty(scheduled)) {
+               if (!list_empty(&rescuer->scheduled)) {
                        process_scheduled_works(rescuer);
 
                        /*
                         * The above execution of rescued work items could
                         * have created more to rescue through
-                        * pwq_activate_first_delayed() or chained
+                        * pwq_activate_first_inactive() or chained
                         * queueing.  Let's put @pwq back on mayday list so
                         * that such back-to-back work items, which may be
                         * being used to relieve memory pressure, don't
                         * incur MAYDAY_INTERVAL delay inbetween.
                         */
-                       if (need_to_create_worker(pool)) {
-                               spin_lock(&wq_mayday_lock);
-                               get_pwq(pwq);
-                               list_move_tail(&pwq->mayday_node, &wq->maydays);
-                               spin_unlock(&wq_mayday_lock);
+                       if (pwq->nr_active && need_to_create_worker(pool)) {
+                               raw_spin_lock(&wq_mayday_lock);
+                               /*
+                                * Queue iff we aren't racing destruction
+                                * and somebody else hasn't queued it already.
+                                */
+                               if (wq->rescuer && list_empty(&pwq->mayday_node)) {
+                                       get_pwq(pwq);
+                                       list_add_tail(&pwq->mayday_node, &wq->maydays);
+                               }
+                               raw_spin_unlock(&wq_mayday_lock);
                        }
                }
 
@@ -2545,21 +3541,19 @@ repeat:
                put_pwq(pwq);
 
                /*
-                * Leave this pool.  If need_more_worker() is %true, notify a
-                * regular worker; otherwise, we end up with 0 concurrency
-                * and stalling the execution.
+                * Leave this pool. Notify regular workers; otherwise, we end up
+                * with 0 concurrency and stalling the execution.
                 */
-               if (need_more_worker(pool))
-                       wake_up_worker(pool);
+               kick_pool(pool);
 
-               spin_unlock_irq(&pool->lock);
+               raw_spin_unlock_irq(&pool->lock);
 
                worker_detach_from_pool(rescuer);
 
-               spin_lock_irq(&wq_mayday_lock);
+               raw_spin_lock_irq(&wq_mayday_lock);
        }
 
-       spin_unlock_irq(&wq_mayday_lock);
+       raw_spin_unlock_irq(&wq_mayday_lock);
 
        if (should_stop) {
                __set_current_state(TASK_RUNNING);
@@ -2573,6 +3567,139 @@ repeat:
        goto repeat;
 }
 
+static void bh_worker(struct worker *worker)
+{
+       struct worker_pool *pool = worker->pool;
+       int nr_restarts = BH_WORKER_RESTARTS;
+       unsigned long end = jiffies + BH_WORKER_JIFFIES;
+
+       raw_spin_lock_irq(&pool->lock);
+       worker_leave_idle(worker);
+
+       /*
+        * This function follows the structure of worker_thread(). See there for
+        * explanations on each step.
+        */
+       if (!need_more_worker(pool))
+               goto done;
+
+       WARN_ON_ONCE(!list_empty(&worker->scheduled));
+       worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
+
+       do {
+               struct work_struct *work =
+                       list_first_entry(&pool->worklist,
+                                        struct work_struct, entry);
+
+               if (assign_work(work, worker, NULL))
+                       process_scheduled_works(worker);
+       } while (keep_working(pool) &&
+                --nr_restarts && time_before(jiffies, end));
+
+       worker_set_flags(worker, WORKER_PREP);
+done:
+       worker_enter_idle(worker);
+       kick_pool(pool);
+       raw_spin_unlock_irq(&pool->lock);
+}
+
+/*
+ * TODO: Convert all tasklet users to workqueue and use softirq directly.
+ *
+ * This is currently called from tasklet[_hi]action() and thus is also called
+ * whenever there are tasklets to run. Let's do an early exit if there's nothing
+ * queued. Once conversion from tasklet is complete, the need_more_worker() test
+ * can be dropped.
+ *
+ * After full conversion, we'll add worker->softirq_action, directly use the
+ * softirq action and obtain the worker pointer from the softirq_action pointer.
+ */
+void workqueue_softirq_action(bool highpri)
+{
+       struct worker_pool *pool =
+               &per_cpu(bh_worker_pools, smp_processor_id())[highpri];
+       if (need_more_worker(pool))
+               bh_worker(list_first_entry(&pool->workers, struct worker, node));
+}
+
+struct wq_drain_dead_softirq_work {
+       struct work_struct      work;
+       struct worker_pool      *pool;
+       struct completion       done;
+};
+
+static void drain_dead_softirq_workfn(struct work_struct *work)
+{
+       struct wq_drain_dead_softirq_work *dead_work =
+               container_of(work, struct wq_drain_dead_softirq_work, work);
+       struct worker_pool *pool = dead_work->pool;
+       bool repeat;
+
+       /*
+        * @pool's CPU is dead and we want to execute its still pending work
+        * items from this BH work item which is running on a different CPU. As
+        * its CPU is dead, @pool can't be kicked and, as work execution path
+        * will be nested, a lockdep annotation needs to be suppressed. Mark
+        * @pool with %POOL_BH_DRAINING for the special treatments.
+        */
+       raw_spin_lock_irq(&pool->lock);
+       pool->flags |= POOL_BH_DRAINING;
+       raw_spin_unlock_irq(&pool->lock);
+
+       bh_worker(list_first_entry(&pool->workers, struct worker, node));
+
+       raw_spin_lock_irq(&pool->lock);
+       pool->flags &= ~POOL_BH_DRAINING;
+       repeat = need_more_worker(pool);
+       raw_spin_unlock_irq(&pool->lock);
+
+       /*
+        * bh_worker() might hit consecutive execution limit and bail. If there
+        * still are pending work items, reschedule self and return so that we
+        * don't hog this CPU's BH.
+        */
+       if (repeat) {
+               if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
+                       queue_work(system_bh_highpri_wq, work);
+               else
+                       queue_work(system_bh_wq, work);
+       } else {
+               complete(&dead_work->done);
+       }
+}
+
+/*
+ * @cpu is dead. Drain the remaining BH work items on the current CPU. It's
+ * possible to allocate dead_work per CPU and avoid flushing. However, then we
+ * have to worry about draining overlapping with CPU coming back online or
+ * nesting (one CPU's dead_work queued on another CPU which is also dead and so
+ * on). Let's keep it simple and drain them synchronously. These are BH work
+ * items which shouldn't be requeued on the same pool. Shouldn't take long.
+ */
+void workqueue_softirq_dead(unsigned int cpu)
+{
+       int i;
+
+       for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+               struct worker_pool *pool = &per_cpu(bh_worker_pools, cpu)[i];
+               struct wq_drain_dead_softirq_work dead_work;
+
+               if (!need_more_worker(pool))
+                       continue;
+
+               INIT_WORK(&dead_work.work, drain_dead_softirq_workfn);
+               dead_work.pool = pool;
+               init_completion(&dead_work.done);
+
+               if (pool->attrs->nice == HIGHPRI_NICE_LEVEL)
+                       queue_work(system_bh_highpri_wq, &dead_work.work);
+               else
+                       queue_work(system_bh_wq, &dead_work.work);
+
+               wait_for_completion(&dead_work.done);
+       }
+}
+
 /**
  * check_flush_dependency - check for flush dependency sanity
  * @target_wq: workqueue being flushed
@@ -2639,46 +3766,59 @@ static void wq_barrier_func(struct work_struct *work)
  * underneath us, so we can't reliably determine pwq from @target.
  *
  * CONTEXT:
- * spin_lock_irq(pool->lock).
+ * raw_spin_lock_irq(pool->lock).
  */
 static void insert_wq_barrier(struct pool_workqueue *pwq,
                              struct wq_barrier *barr,
                              struct work_struct *target, struct worker *worker)
 {
+       static __maybe_unused struct lock_class_key bh_key, thr_key;
+       unsigned int work_flags = 0;
+       unsigned int work_color;
        struct list_head *head;
-       unsigned int linked = 0;
 
        /*
         * debugobject calls are safe here even with pool->lock locked
         * as we know for sure that this will not trigger any of the
         * checks and call back into the fixup functions where we
         * might deadlock.
+        *
+        * BH and threaded workqueues need separate lockdep keys to avoid
+        * spuriously triggering "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W}
+        * usage".
         */
-       INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
+       INIT_WORK_ONSTACK_KEY(&barr->work, wq_barrier_func,
+                             (pwq->wq->flags & WQ_BH) ? &bh_key : &thr_key);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
 
        init_completion_map(&barr->done, &target->lockdep_map);
 
        barr->task = current;
 
+       /* The barrier work item does not participate in nr_active. */
+       work_flags |= WORK_STRUCT_INACTIVE;
+
        /*
         * If @target is currently being executed, schedule the
         * barrier to the worker; otherwise, put it after @target.
         */
-       if (worker)
+       if (worker) {
                head = worker->scheduled.next;
-       else {
+               work_color = worker->current_color;
+       } else {
                unsigned long *bits = work_data_bits(target);
 
                head = target->entry.next;
                /* there can already be other linked works, inherit and set */
-               linked = *bits & WORK_STRUCT_LINKED;
+               work_flags |= *bits & WORK_STRUCT_LINKED;
+               work_color = get_work_color(*bits);
                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
        }
 
-       debug_work_activate(&barr->work);
-       insert_work(pwq, &barr->work, head,
-                   work_color_to_flags(WORK_NO_COLOR) | linked);
+       pwq->nr_in_flight[work_color]++;
+       work_flags |= work_color_to_flags(work_color);
+
+       insert_work(pwq, &barr->work, head, work_flags);
 }
 
 /**
@@ -2726,7 +3866,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
        for_each_pwq(pwq, wq) {
                struct worker_pool *pool = pwq->pool;
 
-               spin_lock_irq(&pool->lock);
+               raw_spin_lock_irq(&pool->lock);
 
                if (flush_color >= 0) {
                        WARN_ON_ONCE(pwq->flush_color != -1);
@@ -2743,7 +3883,7 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
                        pwq->work_color = work_color;
                }
 
-               spin_unlock_irq(&pool->lock);
+               raw_spin_unlock_irq(&pool->lock);
        }
 
        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
@@ -2752,14 +3892,43 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
        return wait;
 }
 
+static void touch_wq_lockdep_map(struct workqueue_struct *wq)
+{
+#ifdef CONFIG_LOCKDEP
+       if (wq->flags & WQ_BH)
+               local_bh_disable();
+
+       lock_map_acquire(&wq->lockdep_map);
+       lock_map_release(&wq->lockdep_map);
+
+       if (wq->flags & WQ_BH)
+               local_bh_enable();
+#endif
+}
+
+static void touch_work_lockdep_map(struct work_struct *work,
+                                  struct workqueue_struct *wq)
+{
+#ifdef CONFIG_LOCKDEP
+       if (wq->flags & WQ_BH)
+               local_bh_disable();
+
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
+
+       if (wq->flags & WQ_BH)
+               local_bh_enable();
+#endif
+}
+
 /**
- * flush_workqueue - ensure that any scheduled work has run to completion.
+ * __flush_workqueue - ensure that any scheduled work has run to completion.
  * @wq: workqueue to flush
  *
  * This function sleeps until all work items which were queued on entry
  * have finished execution, but it is not livelocked by new incoming ones.
  */
-void flush_workqueue(struct workqueue_struct *wq)
+void __flush_workqueue(struct workqueue_struct *wq)
 {
        struct wq_flusher this_flusher = {
                .list = LIST_HEAD_INIT(this_flusher.list),
@@ -2771,8 +3940,7 @@ void flush_workqueue(struct workqueue_struct *wq)
        if (WARN_ON(!wq_online))
                return;
 
-       lock_map_acquire(&wq->lockdep_map);
-       lock_map_release(&wq->lockdep_map);
+       touch_wq_lockdep_map(wq);
 
        mutex_lock(&wq->mutex);
 
@@ -2831,7 +3999,7 @@ void flush_workqueue(struct workqueue_struct *wq)
         * First flushers are responsible for cascading flushes and
         * handling overflow.  Non-first flushers can simply return.
         */
-       if (wq->first_flusher != &this_flusher)
+       if (READ_ONCE(wq->first_flusher) != &this_flusher)
                return;
 
        mutex_lock(&wq->mutex);
@@ -2840,7 +4008,7 @@ void flush_workqueue(struct workqueue_struct *wq)
        if (wq->first_flusher != &this_flusher)
                goto out_unlock;
 
-       wq->first_flusher = NULL;
+       WRITE_ONCE(wq->first_flusher, NULL);
 
        WARN_ON_ONCE(!list_empty(&this_flusher.list));
        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
@@ -2908,7 +4076,7 @@ void flush_workqueue(struct workqueue_struct *wq)
 out_unlock:
        mutex_unlock(&wq->mutex);
 }
-EXPORT_SYMBOL(flush_workqueue);
+EXPORT_SYMBOL(__flush_workqueue);
 
 /**
  * drain_workqueue - drain a workqueue
@@ -2936,24 +4104,24 @@ void drain_workqueue(struct workqueue_struct *wq)
                wq->flags |= __WQ_DRAINING;
        mutex_unlock(&wq->mutex);
 reflush:
-       flush_workqueue(wq);
+       __flush_workqueue(wq);
 
        mutex_lock(&wq->mutex);
 
        for_each_pwq(pwq, wq) {
                bool drained;
 
-               spin_lock_irq(&pwq->pool->lock);
-               drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
-               spin_unlock_irq(&pwq->pool->lock);
+               raw_spin_lock_irq(&pwq->pool->lock);
+               drained = pwq_is_empty(pwq);
+               raw_spin_unlock_irq(&pwq->pool->lock);
 
                if (drained)
                        continue;
 
                if (++flush_cnt == 10 ||
                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
-                       pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
-                               wq->name, flush_cnt);
+                       pr_warn("workqueue %s: %s() isn't complete after %u tries\n",
+                               wq->name, __func__, flush_cnt);
 
                mutex_unlock(&wq->mutex);
                goto reflush;
@@ -2971,6 +4139,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
        struct worker *worker = NULL;
        struct worker_pool *pool;
        struct pool_workqueue *pwq;
+       struct workqueue_struct *wq;
 
        might_sleep();
 
@@ -2981,7 +4150,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
                return false;
        }
 
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
        /* see the comment in try_to_grab_pending() with the same code */
        pwq = get_work_pwq(work);
        if (pwq) {
@@ -2994,10 +4163,13 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
                pwq = worker->current_pwq;
        }
 
-       check_flush_dependency(pwq->wq, work);
+       wq = pwq->wq;
+       check_flush_dependency(wq, work);
 
        insert_wq_barrier(pwq, barr, work, worker);
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
+
+       touch_work_lockdep_map(work, wq);
 
        /*
         * Force a lock recursion deadlock when using flush_work() inside a
@@ -3008,15 +4180,13 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
         * workqueues the deadlock happens when the rescuer stalls, blocking
         * forward progress.
         */
-       if (!from_cancel &&
-           (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
-               lock_map_acquire(&pwq->wq->lockdep_map);
-               lock_map_release(&pwq->wq->lockdep_map);
-       }
+       if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
+               touch_wq_lockdep_map(wq);
+
        rcu_read_unlock();
        return true;
 already_gone:
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
        rcu_read_unlock();
        return false;
 }
@@ -3031,11 +4201,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
        if (WARN_ON(!work->func))
                return false;
 
-       if (!from_cancel) {
-               lock_map_acquire(&work->lockdep_map);
-               lock_map_release(&work->lockdep_map);
-       }
-
        if (start_flush_work(work, &barr, from_cancel)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);
@@ -3062,108 +4227,6 @@ bool flush_work(struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
-struct cwt_wait {
-       wait_queue_entry_t              wait;
-       struct work_struct      *work;
-};
-
-static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
-{
-       struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
-
-       if (cwait->work != key)
-               return 0;
-       return autoremove_wake_function(wait, mode, sync, key);
-}
-
-static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
-{
-       static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
-       unsigned long flags;
-       int ret;
-
-       do {
-               ret = try_to_grab_pending(work, is_dwork, &flags);
-               /*
-                * If someone else is already canceling, wait for it to
-                * finish.  flush_work() doesn't work for PREEMPT_NONE
-                * because we may get scheduled between @work's completion
-                * and the other canceling task resuming and clearing
-                * CANCELING - flush_work() will return false immediately
-                * as @work is no longer busy, try_to_grab_pending() will
-                * return -ENOENT as @work is still being canceled and the
-                * other canceling task won't be able to clear CANCELING as
-                * we're hogging the CPU.
-                *
-                * Let's wait for completion using a waitqueue.  As this
-                * may lead to the thundering herd problem, use a custom
-                * wake function which matches @work along with exclusive
-                * wait and wakeup.
-                */
-               if (unlikely(ret == -ENOENT)) {
-                       struct cwt_wait cwait;
-
-                       init_wait(&cwait.wait);
-                       cwait.wait.func = cwt_wakefn;
-                       cwait.work = work;
-
-                       prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
-                                                 TASK_UNINTERRUPTIBLE);
-                       if (work_is_canceling(work))
-                               schedule();
-                       finish_wait(&cancel_waitq, &cwait.wait);
-               }
-       } while (unlikely(ret < 0));
-
-       /* tell other tasks trying to grab @work to back off */
-       mark_work_canceling(work);
-       local_irq_restore(flags);
-
-       /*
-        * This allows canceling during early boot.  We know that @work
-        * isn't executing.
-        */
-       if (wq_online)
-               __flush_work(work, true);
-
-       clear_work_data(work);
-
-       /*
-        * Paired with prepare_to_wait() above so that either
-        * waitqueue_active() is visible here or !work_is_canceling() is
-        * visible there.
-        */
-       smp_mb();
-       if (waitqueue_active(&cancel_waitq))
-               __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
-
-       return ret;
-}
-
-/**
- * cancel_work_sync - cancel a work and wait for it to finish
- * @work: the work to cancel
- *
- * Cancel @work and wait for its execution to finish.  This function
- * can be used even if the work re-queues itself or migrates to
- * another workqueue.  On return from this function, @work is
- * guaranteed to be not pending or executing on any CPU.
- *
- * cancel_work_sync(&delayed_work->work) must not be used for
- * delayed_work's.  Use cancel_delayed_work_sync() instead.
- *
- * The caller must ensure that the workqueue on which @work was last
- * queued can't be destroyed before this function returns.
- *
- * Return:
- * %true if @work was pending, %false otherwise.
- */
-bool cancel_work_sync(struct work_struct *work)
-{
-       return __cancel_work_timer(work, false);
-}
-EXPORT_SYMBOL_GPL(cancel_work_sync);
-
 /**
  * flush_delayed_work - wait for a dwork to finish executing the last queueing
  * @dwork: the delayed work to flush
@@ -3206,23 +4269,86 @@ bool flush_rcu_work(struct rcu_work *rwork)
 }
 EXPORT_SYMBOL(flush_rcu_work);
 
-static bool __cancel_work(struct work_struct *work, bool is_dwork)
+static bool __cancel_work(struct work_struct *work, u32 cflags)
 {
-       unsigned long flags;
+       unsigned long irq_flags;
        int ret;
 
        do {
-               ret = try_to_grab_pending(work, is_dwork, &flags);
+               ret = try_to_grab_pending(work, cflags, &irq_flags);
        } while (unlikely(ret == -EAGAIN));
 
        if (unlikely(ret < 0))
                return false;
 
-       set_work_pool_and_clear_pending(work, get_work_pool_id(work));
-       local_irq_restore(flags);
+       set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0);
+       local_irq_restore(irq_flags);
+       return ret;
+}
+
+static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
+{
+       unsigned long irq_flags;
+       bool ret;
+
+       /* claim @work and tell other tasks trying to grab @work to back off */
+       ret = work_grab_pending(work, cflags, &irq_flags);
+       mark_work_canceling(work);
+       local_irq_restore(irq_flags);
+
+       /*
+        * Skip __flush_work() during early boot when we know that @work isn't
+        * executing. This allows canceling during early boot.
+        */
+       if (wq_online)
+               __flush_work(work, true);
+
+       /*
+        * smp_mb() at the end of set_work_pool_and_clear_pending() is paired
+        * with prepare_to_wait() above so that either waitqueue_active() is
+        * visible here or !work_is_canceling() is visible there.
+        */
+       set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0);
+
+       if (waitqueue_active(&wq_cancel_waitq))
+               __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work);
+
        return ret;
 }
 
+/*
+ * See cancel_delayed_work()
+ */
+bool cancel_work(struct work_struct *work)
+{
+       return __cancel_work(work, 0);
+}
+EXPORT_SYMBOL(cancel_work);
+
+/**
+ * cancel_work_sync - cancel a work and wait for it to finish
+ * @work: the work to cancel
+ *
+ * Cancel @work and wait for its execution to finish.  This function
+ * can be used even if the work re-queues itself or migrates to
+ * another workqueue.  On return from this function, @work is
+ * guaranteed to be not pending or executing on any CPU.
+ *
+ * cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's.  Use cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the workqueue on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return:
+ * %true if @work was pending, %false otherwise.
+ */
+bool cancel_work_sync(struct work_struct *work)
+{
+       return __cancel_work_sync(work, 0);
+}
+EXPORT_SYMBOL_GPL(cancel_work_sync);
+
 /**
  * cancel_delayed_work - cancel a delayed work
  * @dwork: delayed_work to cancel
@@ -3241,7 +4367,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork)
  */
 bool cancel_delayed_work(struct delayed_work *dwork)
 {
-       return __cancel_work(&dwork->work, true);
+       return __cancel_work(&dwork->work, WORK_CANCEL_DELAYED);
 }
 EXPORT_SYMBOL(cancel_delayed_work);
 
@@ -3256,7 +4382,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
  */
 bool cancel_delayed_work_sync(struct delayed_work *dwork)
 {
-       return __cancel_work_timer(&dwork->work, true);
+       return __cancel_work_sync(&dwork->work, WORK_CANCEL_DELAYED);
 }
 EXPORT_SYMBOL(cancel_delayed_work_sync);
 
@@ -3280,7 +4406,7 @@ int schedule_on_each_cpu(work_func_t func)
        if (!works)
                return -ENOMEM;
 
-       get_online_cpus();
+       cpus_read_lock();
 
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
@@ -3292,7 +4418,7 @@ int schedule_on_each_cpu(work_func_t func)
        for_each_online_cpu(cpu)
                flush_work(per_cpu_ptr(works, cpu));
 
-       put_online_cpus();
+       cpus_read_unlock();
        free_percpu(works);
        return 0;
 }
@@ -3333,30 +4459,33 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
 {
        if (attrs) {
                free_cpumask_var(attrs->cpumask);
+               free_cpumask_var(attrs->__pod_cpumask);
                kfree(attrs);
        }
 }
 
 /**
  * alloc_workqueue_attrs - allocate a workqueue_attrs
- * @gfp_mask: allocation mask to use
  *
  * Allocate a new workqueue_attrs, initialize with default settings and
  * return it.
  *
  * Return: The allocated new workqueue_attr on success. %NULL on failure.
  */
-struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
+struct workqueue_attrs *alloc_workqueue_attrs(void)
 {
        struct workqueue_attrs *attrs;
 
-       attrs = kzalloc(sizeof(*attrs), gfp_mask);
+       attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
        if (!attrs)
                goto fail;
-       if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
+       if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
+               goto fail;
+       if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
                goto fail;
 
        cpumask_copy(attrs->cpumask, cpu_possible_mask);
+       attrs->affn_scope = WQ_AFFN_DFL;
        return attrs;
 fail:
        free_workqueue_attrs(attrs);
@@ -3368,12 +4497,26 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
 {
        to->nice = from->nice;
        cpumask_copy(to->cpumask, from->cpumask);
+       cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
+       to->affn_strict = from->affn_strict;
+
        /*
-        * Unlike hash and equality test, this function doesn't ignore
-        * ->no_numa as it is used for both pool and wq attrs.  Instead,
-        * get_unbound_pool() explicitly clears ->no_numa after copying.
+        * Unlike hash and equality test, copying shouldn't ignore wq-only
+        * fields as copying is used for both pool and wq attrs. Instead,
+        * get_unbound_pool() explicitly clears the fields.
         */
-       to->no_numa = from->no_numa;
+       to->affn_scope = from->affn_scope;
+       to->ordered = from->ordered;
+}
+
+/*
+ * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
+ * comments in 'struct workqueue_attrs' definition.
+ */
+static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
+{
+       attrs->affn_scope = WQ_AFFN_NR_TYPES;
+       attrs->ordered = false;
 }
 
 /* hash value of the content of @attr */
@@ -3384,6 +4527,9 @@ static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
        hash = jhash_1word(attrs->nice, hash);
        hash = jhash(cpumask_bits(attrs->cpumask),
                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+       hash = jhash(cpumask_bits(attrs->__pod_cpumask),
+                    BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+       hash = jhash_1word(attrs->affn_strict, hash);
        return hash;
 }
 
@@ -3395,9 +4541,57 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
                return false;
        if (!cpumask_equal(a->cpumask, b->cpumask))
                return false;
+       if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
+               return false;
+       if (a->affn_strict != b->affn_strict)
+               return false;
        return true;
 }
 
+/* Update @attrs with actually available CPUs */
+static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
+                                     const cpumask_t *unbound_cpumask)
+{
+       /*
+        * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
+        * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
+        * @unbound_cpumask.
+        */
+       cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
+       if (unlikely(cpumask_empty(attrs->cpumask)))
+               cpumask_copy(attrs->cpumask, unbound_cpumask);
+}
+
+/* find wq_pod_type to use for @attrs */
+static const struct wq_pod_type *
+wqattrs_pod_type(const struct workqueue_attrs *attrs)
+{
+       enum wq_affn_scope scope;
+       struct wq_pod_type *pt;
+
+       /* to synchronize access to wq_affn_dfl */
+       lockdep_assert_held(&wq_pool_mutex);
+
+       if (attrs->affn_scope == WQ_AFFN_DFL)
+               scope = wq_affn_dfl;
+       else
+               scope = attrs->affn_scope;
+
+       pt = &wq_pod_types[scope];
+
+       if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
+           likely(pt->nr_pods))
+               return pt;
+
+       /*
+        * Before workqueue_init_topology(), only SYSTEM is available which is
+        * initialized in workqueue_init_early().
+        */
+       pt = &wq_pod_types[WQ_AFFN_SYSTEM];
+       BUG_ON(!pt->nr_pods);
+       return pt;
+}
+
 /**
  * init_worker_pool - initialize a newly zalloc'd worker_pool
  * @pool: worker_pool to initialize
@@ -3410,7 +4604,7 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
  */
 static int init_worker_pool(struct worker_pool *pool)
 {
-       spin_lock_init(&pool->lock);
+       raw_spin_lock_init(&pool->lock);
        pool->id = -1;
        pool->cpu = -1;
        pool->node = NUMA_NO_NODE;
@@ -3421,19 +4615,24 @@ static int init_worker_pool(struct worker_pool *pool)
        hash_init(pool->busy_hash);
 
        timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
+       INIT_WORK(&pool->idle_cull_work, idle_cull_fn);
 
        timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
 
        INIT_LIST_HEAD(&pool->workers);
+       INIT_LIST_HEAD(&pool->dying_workers);
 
        ida_init(&pool->worker_ida);
        INIT_HLIST_NODE(&pool->hash_node);
        pool->refcnt = 1;
 
        /* shouldn't fail above this point */
-       pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
+       pool->attrs = alloc_workqueue_attrs();
        if (!pool->attrs)
                return -ENOMEM;
+
+       wqattrs_clear_for_pool(pool->attrs);
+
        return 0;
 }
 
@@ -3475,19 +4674,69 @@ static void wq_free_lockdep(struct workqueue_struct *wq)
 }
 #endif
 
+static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
+{
+       int node;
+
+       for_each_node(node) {
+               kfree(nna_ar[node]);
+               nna_ar[node] = NULL;
+       }
+
+       kfree(nna_ar[nr_node_ids]);
+       nna_ar[nr_node_ids] = NULL;
+}
+
+static void init_node_nr_active(struct wq_node_nr_active *nna)
+{
+       nna->max = WQ_DFL_MIN_ACTIVE;
+       atomic_set(&nna->nr, 0);
+       raw_spin_lock_init(&nna->lock);
+       INIT_LIST_HEAD(&nna->pending_pwqs);
+}
+
+/*
+ * Each node's nr_active counter will be accessed mostly from its own node and
+ * should be allocated in the node.
+ */
+static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar)
+{
+       struct wq_node_nr_active *nna;
+       int node;
+
+       for_each_node(node) {
+               nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node);
+               if (!nna)
+                       goto err_free;
+               init_node_nr_active(nna);
+               nna_ar[node] = nna;
+       }
+
+       /* [nr_node_ids] is used as the fallback */
+       nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE);
+       if (!nna)
+               goto err_free;
+       init_node_nr_active(nna);
+       nna_ar[nr_node_ids] = nna;
+
+       return 0;
+
+err_free:
+       free_node_nr_active(nna_ar);
+       return -ENOMEM;
+}
+
 static void rcu_free_wq(struct rcu_head *rcu)
 {
        struct workqueue_struct *wq =
                container_of(rcu, struct workqueue_struct, rcu);
 
-       wq_free_lockdep(wq);
-
-       if (!(wq->flags & WQ_UNBOUND))
-               free_percpu(wq->cpu_pwqs);
-       else
-               free_workqueue_attrs(wq->unbound_attrs);
+       if (wq->flags & WQ_UNBOUND)
+               free_node_nr_active(wq->node_nr_active);
 
-       kfree(wq->rescuer);
+       wq_free_lockdep(wq);
+       free_percpu(wq->cpu_pwq);
+       free_workqueue_attrs(wq->unbound_attrs);
        kfree(wq);
 }
 
@@ -3515,6 +4764,7 @@ static void put_unbound_pool(struct worker_pool *pool)
 {
        DECLARE_COMPLETION_ONSTACK(detach_completion);
        struct worker *worker;
+       LIST_HEAD(cull_list);
 
        lockdep_assert_held(&wq_pool_mutex);
 
@@ -3535,19 +4785,38 @@ static void put_unbound_pool(struct worker_pool *pool)
         * Become the manager and destroy all workers.  This prevents
         * @pool's workers from blocking on attach_mutex.  We're the last
         * manager and @pool gets freed with the flag set.
+        *
+        * Having a concurrent manager is quite unlikely to happen as we can
+        * only get here with
+        *   pwq->refcnt == pool->refcnt == 0
+        * which implies no work queued to the pool, which implies no worker can
+        * become the manager. However a worker could have taken the role of
+        * manager before the refcnts dropped to 0, since maybe_create_worker()
+        * drops pool->lock
         */
-       spin_lock_irq(&pool->lock);
-       wait_event_lock_irq(wq_manager_wait,
-                           !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
-       pool->flags |= POOL_MANAGER_ACTIVE;
+       while (true) {
+               rcuwait_wait_event(&manager_wait,
+                                  !(pool->flags & POOL_MANAGER_ACTIVE),
+                                  TASK_UNINTERRUPTIBLE);
+
+               mutex_lock(&wq_pool_attach_mutex);
+               raw_spin_lock_irq(&pool->lock);
+               if (!(pool->flags & POOL_MANAGER_ACTIVE)) {
+                       pool->flags |= POOL_MANAGER_ACTIVE;
+                       break;
+               }
+               raw_spin_unlock_irq(&pool->lock);
+               mutex_unlock(&wq_pool_attach_mutex);
+       }
 
        while ((worker = first_idle_worker(pool)))
-               destroy_worker(worker);
+               set_worker_dying(worker, &cull_list);
        WARN_ON(pool->nr_workers || pool->nr_idle);
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
 
-       mutex_lock(&wq_pool_attach_mutex);
-       if (!list_empty(&pool->workers))
+       wake_dying_workers(&cull_list);
+
+       if (!list_empty(&pool->workers) || !list_empty(&pool->dying_workers))
                pool->detach_completion = &detach_completion;
        mutex_unlock(&wq_pool_attach_mutex);
 
@@ -3556,6 +4825,7 @@ static void put_unbound_pool(struct worker_pool *pool)
 
        /* shut down the timers */
        del_timer_sync(&pool->idle_timer);
+       cancel_work_sync(&pool->idle_cull_work);
        del_timer_sync(&pool->mayday_timer);
 
        /* RCU protected to allow dereferences from get_work_pool() */
@@ -3578,10 +4848,10 @@ static void put_unbound_pool(struct worker_pool *pool)
  */
 static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
 {
+       struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
        u32 hash = wqattrs_hash(attrs);
        struct worker_pool *pool;
-       int node;
-       int target_node = NUMA_NO_NODE;
+       int pod, node = NUMA_NO_NODE;
 
        lockdep_assert_held(&wq_pool_mutex);
 
@@ -3593,31 +4863,22 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
                }
        }
 
-       /* if cpumask is contained inside a NUMA node, we belong to that node */
-       if (wq_numa_enabled) {
-               for_each_node(node) {
-                       if (cpumask_subset(attrs->cpumask,
-                                          wq_numa_possible_cpumask[node])) {
-                               target_node = node;
-                               break;
-                       }
+       /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
+       for (pod = 0; pod < pt->nr_pods; pod++) {
+               if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
+                       node = pt->pod_node[pod];
+                       break;
                }
        }
 
        /* nope, create a new one */
-       pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
+       pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
        if (!pool || init_worker_pool(pool) < 0)
                goto fail;
 
-       lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
+       pool->node = node;
        copy_workqueue_attrs(pool->attrs, attrs);
-       pool->node = target_node;
-
-       /*
-        * no_numa isn't a worker_pool attribute, always clear it.  See
-        * 'struct workqueue_attrs' comments for detail.
-        */
-       pool->attrs->no_numa = false;
+       wqattrs_clear_for_pool(pool->attrs);
 
        if (worker_pool_assign_id(pool) < 0)
                goto fail;
@@ -3643,28 +4904,49 @@ static void rcu_free_pwq(struct rcu_head *rcu)
 }
 
 /*
- * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
- * and needs to be destroyed.
+ * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
+ * refcnt and needs to be destroyed.
  */
-static void pwq_unbound_release_workfn(struct work_struct *work)
+static void pwq_release_workfn(struct kthread_work *work)
 {
        struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
-                                                 unbound_release_work);
+                                                 release_work);
        struct workqueue_struct *wq = pwq->wq;
        struct worker_pool *pool = pwq->pool;
-       bool is_last;
+       bool is_last = false;
 
-       if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
-               return;
+       /*
+        * When @pwq is not linked, it doesn't hold any reference to the
+        * @wq, and @wq is invalid to access.
+        */
+       if (!list_empty(&pwq->pwqs_node)) {
+               mutex_lock(&wq->mutex);
+               list_del_rcu(&pwq->pwqs_node);
+               is_last = list_empty(&wq->pwqs);
 
-       mutex_lock(&wq->mutex);
-       list_del_rcu(&pwq->pwqs_node);
-       is_last = list_empty(&wq->pwqs);
-       mutex_unlock(&wq->mutex);
+               /*
+                * For ordered workqueue with a plugged dfl_pwq, restart it now.
+                */
+               if (!is_last && (wq->flags & __WQ_ORDERED))
+                       unplug_oldest_pwq(wq);
 
-       mutex_lock(&wq_pool_mutex);
-       put_unbound_pool(pool);
-       mutex_unlock(&wq_pool_mutex);
+               mutex_unlock(&wq->mutex);
+       }
+
+       if (wq->flags & WQ_UNBOUND) {
+               mutex_lock(&wq_pool_mutex);
+               put_unbound_pool(pool);
+               mutex_unlock(&wq_pool_mutex);
+       }
+
+       if (!list_empty(&pwq->pending_node)) {
+               struct wq_node_nr_active *nna =
+                       wq_node_nr_active(pwq->wq, pwq->pool->node);
+
+               raw_spin_lock_irq(&nna->lock);
+               list_del_init(&pwq->pending_node);
+               raw_spin_unlock_irq(&nna->lock);
+       }
 
        call_rcu(&pwq->rcu, rcu_free_pwq);
 
@@ -3678,59 +4960,11 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
        }
 }
 
-/**
- * pwq_adjust_max_active - update a pwq's max_active to the current setting
- * @pwq: target pool_workqueue
- *
- * If @pwq isn't freezing, set @pwq->max_active to the associated
- * workqueue's saved_max_active and activate delayed work items
- * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
- */
-static void pwq_adjust_max_active(struct pool_workqueue *pwq)
-{
-       struct workqueue_struct *wq = pwq->wq;
-       bool freezable = wq->flags & WQ_FREEZABLE;
-       unsigned long flags;
-
-       /* for @wq->saved_max_active */
-       lockdep_assert_held(&wq->mutex);
-
-       /* fast exit for non-freezable wqs */
-       if (!freezable && pwq->max_active == wq->saved_max_active)
-               return;
-
-       /* this function can be called during early boot w/ irq disabled */
-       spin_lock_irqsave(&pwq->pool->lock, flags);
-
-       /*
-        * During [un]freezing, the caller is responsible for ensuring that
-        * this function is called at least once after @workqueue_freezing
-        * is updated and visible.
-        */
-       if (!freezable || !workqueue_freezing) {
-               pwq->max_active = wq->saved_max_active;
-
-               while (!list_empty(&pwq->delayed_works) &&
-                      pwq->nr_active < pwq->max_active)
-                       pwq_activate_first_delayed(pwq);
-
-               /*
-                * Need to kick a worker after thawed or an unbound wq's
-                * max_active is bumped.  It's a slow path.  Do it always.
-                */
-               wake_up_worker(pwq->pool);
-       } else {
-               pwq->max_active = 0;
-       }
-
-       spin_unlock_irqrestore(&pwq->pool->lock, flags);
-}
-
-/* initialize newly alloced @pwq which is associated with @wq and @pool */
+/* initialize newly allocated @pwq which is associated with @wq and @pool */
 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
                     struct worker_pool *pool)
 {
-       BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
+       BUG_ON((unsigned long)pwq & ~WORK_STRUCT_PWQ_MASK);
 
        memset(pwq, 0, sizeof(*pwq));
 
@@ -3738,10 +4972,11 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
        pwq->wq = wq;
        pwq->flush_color = -1;
        pwq->refcnt = 1;
-       INIT_LIST_HEAD(&pwq->delayed_works);
+       INIT_LIST_HEAD(&pwq->inactive_works);
+       INIT_LIST_HEAD(&pwq->pending_node);
        INIT_LIST_HEAD(&pwq->pwqs_node);
        INIT_LIST_HEAD(&pwq->mayday_node);
-       INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
+       kthread_init_work(&pwq->release_work, pwq_release_workfn);
 }
 
 /* sync @pwq with the current state of its associated wq and link it */
@@ -3758,11 +4993,8 @@ static void link_pwq(struct pool_workqueue *pwq)
        /* set the matching work_color */
        pwq->work_color = wq->work_color;
 
-       /* sync max_active to the current setting */
-       pwq_adjust_max_active(pwq);
-
        /* link in @pwq */
-       list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
+       list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
 }
 
 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */
@@ -3789,62 +5021,51 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
 }
 
 /**
- * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
+ * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
  * @attrs: the wq_attrs of the default pwq of the target workqueue
- * @node: the target NUMA node
+ * @cpu: the target CPU
  * @cpu_going_down: if >= 0, the CPU to consider as offline
- * @cpumask: outarg, the resulting cpumask
  *
- * Calculate the cpumask a workqueue with @attrs should use on @node.  If
- * @cpu_going_down is >= 0, that cpu is considered offline during
- * calculation.  The result is stored in @cpumask.
+ * Calculate the cpumask a workqueue with @attrs should use on @pod. If
+ * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
+ * The result is stored in @attrs->__pod_cpumask.
  *
- * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
- * enabled and @node has online CPUs requested by @attrs, the returned
- * cpumask is the intersection of the possible CPUs of @node and
- * @attrs->cpumask.
+ * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
+ * and @pod has online CPUs requested by @attrs, the returned cpumask is the
+ * intersection of the possible CPUs of @pod and @attrs->cpumask.
  *
- * The caller is responsible for ensuring that the cpumask of @node stays
- * stable.
- *
- * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
- * %false if equal.
+ * The caller is responsible for ensuring that the cpumask of @pod stays stable.
  */
-static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
-                                int cpu_going_down, cpumask_t *cpumask)
+static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
+                               int cpu_going_down)
 {
-       if (!wq_numa_enabled || attrs->no_numa)
-               goto use_dfl;
+       const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
+       int pod = pt->cpu_pod[cpu];
 
-       /* does @node have any online CPUs @attrs wants? */
-       cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
+       /* does @pod have any online CPUs @attrs wants? */
+       cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
+       cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
        if (cpu_going_down >= 0)
-               cpumask_clear_cpu(cpu_going_down, cpumask);
+               cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
 
-       if (cpumask_empty(cpumask))
-               goto use_dfl;
+       if (cpumask_empty(attrs->__pod_cpumask)) {
+               cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
+               return;
+       }
 
-       /* yeap, return possible CPUs in @node that @attrs wants */
-       cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
+       /* yeap, return possible CPUs in @pod that @attrs wants */
+       cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
 
-       if (cpumask_empty(cpumask)) {
+       if (cpumask_empty(attrs->__pod_cpumask))
                pr_warn_once("WARNING: workqueue cpumask: online intersect > "
                                "possible intersect\n");
-               return false;
-       }
-
-       return !cpumask_equal(cpumask, attrs->cpumask);
-
-use_dfl:
-       cpumask_copy(cpumask, attrs->cpumask);
-       return false;
 }
 
-/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
-static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
-                                                  int node,
-                                                  struct pool_workqueue *pwq)
+/* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
+static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
+                                       int cpu, struct pool_workqueue *pwq)
 {
+       struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu);
        struct pool_workqueue *old_pwq;
 
        lockdep_assert_held(&wq_pool_mutex);
@@ -3853,8 +5074,8 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
        /* link_pwq() can handle duplicate calls */
        link_pwq(pwq);
 
-       old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
-       rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
+       old_pwq = rcu_access_pointer(*slot);
+       rcu_assign_pointer(*slot, pwq);
        return old_pwq;
 }
 
@@ -3871,10 +5092,10 @@ struct apply_wqattrs_ctx {
 static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
 {
        if (ctx) {
-               int node;
+               int cpu;
 
-               for_each_node(node)
-                       put_pwq_unlocked(ctx->pwq_tbl[node]);
+               for_each_possible_cpu(cpu)
+                       put_pwq_unlocked(ctx->pwq_tbl[cpu]);
                put_pwq_unlocked(ctx->dfl_pwq);
 
                free_workqueue_attrs(ctx->attrs);
@@ -3886,107 +5107,98 @@ static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
 /* allocate the attrs and pwqs for later installation */
 static struct apply_wqattrs_ctx *
 apply_wqattrs_prepare(struct workqueue_struct *wq,
-                     const struct workqueue_attrs *attrs)
+                     const struct workqueue_attrs *attrs,
+                     const cpumask_var_t unbound_cpumask)
 {
        struct apply_wqattrs_ctx *ctx;
-       struct workqueue_attrs *new_attrs, *tmp_attrs;
-       int node;
+       struct workqueue_attrs *new_attrs;
+       int cpu;
 
        lockdep_assert_held(&wq_pool_mutex);
 
-       ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
-
-       new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
-       tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
-       if (!ctx || !new_attrs || !tmp_attrs)
-               goto out_free;
+       if (WARN_ON(attrs->affn_scope < 0 ||
+                   attrs->affn_scope >= WQ_AFFN_NR_TYPES))
+               return ERR_PTR(-EINVAL);
 
-       /*
-        * Calculate the attrs of the default pwq.
-        * If the user configured cpumask doesn't overlap with the
-        * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
-        */
-       copy_workqueue_attrs(new_attrs, attrs);
-       cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
-       if (unlikely(cpumask_empty(new_attrs->cpumask)))
-               cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
+       ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
 
-       /*
-        * We may create multiple pwqs with differing cpumasks.  Make a
-        * copy of @new_attrs which will be modified and used to obtain
-        * pools.
-        */
-       copy_workqueue_attrs(tmp_attrs, new_attrs);
+       new_attrs = alloc_workqueue_attrs();
+       if (!ctx || !new_attrs)
+               goto out_free;
 
        /*
         * If something goes wrong during CPU up/down, we'll fall back to
         * the default pwq covering whole @attrs->cpumask.  Always create
         * it even if we don't use it immediately.
         */
+       copy_workqueue_attrs(new_attrs, attrs);
+       wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
+       cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
        ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
        if (!ctx->dfl_pwq)
                goto out_free;
 
-       for_each_node(node) {
-               if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
-                       ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
-                       if (!ctx->pwq_tbl[node])
-                               goto out_free;
-               } else {
+       for_each_possible_cpu(cpu) {
+               if (new_attrs->ordered) {
                        ctx->dfl_pwq->refcnt++;
-                       ctx->pwq_tbl[node] = ctx->dfl_pwq;
+                       ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
+               } else {
+                       wq_calc_pod_cpumask(new_attrs, cpu, -1);
+                       ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
+                       if (!ctx->pwq_tbl[cpu])
+                               goto out_free;
                }
        }
 
        /* save the user configured attrs and sanitize it. */
        copy_workqueue_attrs(new_attrs, attrs);
        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
+       cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
        ctx->attrs = new_attrs;
 
+       /*
+        * For initialized ordered workqueues, there should only be one pwq
+        * (dfl_pwq). Set the plugged flag of ctx->dfl_pwq to suspend execution
+        * of newly queued work items until execution of older work items in
+        * the old pwq's have completed.
+        */
+       if ((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))
+               ctx->dfl_pwq->plugged = true;
+
        ctx->wq = wq;
-       free_workqueue_attrs(tmp_attrs);
        return ctx;
 
 out_free:
-       free_workqueue_attrs(tmp_attrs);
        free_workqueue_attrs(new_attrs);
        apply_wqattrs_cleanup(ctx);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 /* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
 static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
 {
-       int node;
+       int cpu;
 
        /* all pwqs have been created successfully, let's install'em */
        mutex_lock(&ctx->wq->mutex);
 
        copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
 
-       /* save the previous pwq and install the new one */
-       for_each_node(node)
-               ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
-                                                         ctx->pwq_tbl[node]);
-
-       /* @dfl_pwq might not have been used, ensure it's linked */
-       link_pwq(ctx->dfl_pwq);
-       swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
+       /* save the previous pwqs and install the new ones */
+       for_each_possible_cpu(cpu)
+               ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
+                                                       ctx->pwq_tbl[cpu]);
+       ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);
 
-       mutex_unlock(&ctx->wq->mutex);
-}
+       /* update node_nr_active->max */
+       wq_update_node_max_active(ctx->wq, -1);
 
-static void apply_wqattrs_lock(void)
-{
-       /* CPUs should stay stable across pwq creations and installations */
-       get_online_cpus();
-       mutex_lock(&wq_pool_mutex);
-}
+       /* rescuer needs to respect wq cpumask changes */
+       if (ctx->wq->rescuer)
+               set_cpus_allowed_ptr(ctx->wq->rescuer->task,
+                                    unbound_effective_cpumask(ctx->wq));
 
-static void apply_wqattrs_unlock(void)
-{
-       mutex_unlock(&wq_pool_mutex);
-       put_online_cpus();
+       mutex_unlock(&ctx->wq->mutex);
 }
 
 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
@@ -3998,17 +5210,9 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
        if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
                return -EINVAL;
 
-       /* creating multiple pwqs breaks ordering guarantee */
-       if (!list_empty(&wq->pwqs)) {
-               if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
-                       return -EINVAL;
-
-               wq->flags &= ~__WQ_ORDERED;
-       }
-
-       ctx = apply_wqattrs_prepare(wq, attrs);
-       if (!ctx)
-               return -ENOMEM;
+       ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
 
        /* the ctx has been prepared successfully, let's commit it */
        apply_wqattrs_commit(ctx);
@@ -4022,15 +5226,16 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
  * @wq: the target workqueue
  * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
  *
- * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
- * machines, this function maps a separate pwq to each NUMA node with
- * possibles CPUs in @attrs->cpumask so that work items are affine to the
- * NUMA node it was issued on.  Older pwqs are released as in-flight work
- * items finish.  Note that a work item which repeatedly requeues itself
- * back-to-back will stay on its current pwq.
+ * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
+ * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
+ * work items are affine to the pod it was issued on. Older pwqs are released as
+ * in-flight work items finish. Note that a work item which repeatedly requeues
+ * itself back-to-back will stay on its current pwq.
  *
  * Performs GFP_KERNEL allocations.
  *
+ * Assumes caller has CPU hotplug read exclusion, i.e. cpus_read_lock().
+ *
  * Return: 0 on success and -errno on failure.
  */
 int apply_workqueue_attrs(struct workqueue_struct *wq,
@@ -4038,49 +5243,47 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
 {
        int ret;
 
-       apply_wqattrs_lock();
+       lockdep_assert_cpus_held();
+
+       mutex_lock(&wq_pool_mutex);
        ret = apply_workqueue_attrs_locked(wq, attrs);
-       apply_wqattrs_unlock();
+       mutex_unlock(&wq_pool_mutex);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(apply_workqueue_attrs);
 
 /**
- * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
+ * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
  * @wq: the target workqueue
- * @cpu: the CPU coming up or going down
+ * @cpu: the CPU to update pool association for
+ * @hotplug_cpu: the CPU coming up or going down
  * @online: whether @cpu is coming up or going down
  *
  * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
- * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
+ * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update pod affinity of
  * @wq accordingly.
  *
- * If NUMA affinity can't be adjusted due to memory allocation failure, it
- * falls back to @wq->dfl_pwq which may not be optimal but is always
- * correct.
  *
- * Note that when the last allowed CPU of a NUMA node goes offline for a
- * workqueue with a cpumask spanning multiple nodes, the workers which were
- * already executing the work items for the workqueue will lose their CPU
- * affinity and may execute on any CPU.  This is similar to how per-cpu
- * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
- * affinity, it's the user's responsibility to flush the work item from
- * CPU_DOWN_PREPARE.
+ * If pod affinity can't be adjusted due to memory allocation failure, it falls
+ * back to @wq->dfl_pwq which may not be optimal but is always correct.
+ *
+ * Note that when the last allowed CPU of a pod goes offline for a workqueue
+ * with a cpumask spanning multiple pods, the workers which were already
+ * executing the work items for the workqueue will lose their CPU affinity and
+ * may execute on any CPU. This is similar to how per-cpu workqueues behave on
+ * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
+ * responsibility to flush the work item from CPU_DOWN_PREPARE.
  */
-static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
-                                  bool online)
+static void wq_update_pod(struct workqueue_struct *wq, int cpu,
+                         int hotplug_cpu, bool online)
 {
-       int node = cpu_to_node(cpu);
-       int cpu_off = online ? -1 : cpu;
+       int off_cpu = online ? -1 : hotplug_cpu;
        struct pool_workqueue *old_pwq = NULL, *pwq;
        struct workqueue_attrs *target_attrs;
-       cpumask_t *cpumask;
 
        lockdep_assert_held(&wq_pool_mutex);
 
-       if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
-           wq->unbound_attrs->no_numa)
+       if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
                return;
 
        /*
@@ -4088,44 +5291,36 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
         * Let's use a preallocated one.  The following buf is protected by
         * CPU hotplug exclusion.
         */
-       target_attrs = wq_update_unbound_numa_attrs_buf;
-       cpumask = target_attrs->cpumask;
+       target_attrs = wq_update_pod_attrs_buf;
 
        copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
-       pwq = unbound_pwq_by_node(wq, node);
+       wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
 
-       /*
-        * Let's determine what needs to be done.  If the target cpumask is
-        * different from the default pwq's, we need to compare it to @pwq's
-        * and create a new one if they don't match.  If the target cpumask
-        * equals the default pwq's, the default pwq should be used.
-        */
-       if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
-               if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
-                       return;
-       } else {
-               goto use_dfl_pwq;
-       }
+       /* nothing to do if the target cpumask matches the current pwq */
+       wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
+       if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
+               return;
 
        /* create a new pwq */
        pwq = alloc_unbound_pwq(wq, target_attrs);
        if (!pwq) {
-               pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
+               pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
                        wq->name);
                goto use_dfl_pwq;
        }
 
        /* Install the new pwq. */
        mutex_lock(&wq->mutex);
-       old_pwq = numa_pwq_tbl_install(wq, node, pwq);
+       old_pwq = install_unbound_pwq(wq, cpu, pwq);
        goto out_unlock;
 
 use_dfl_pwq:
        mutex_lock(&wq->mutex);
-       spin_lock_irq(&wq->dfl_pwq->pool->lock);
-       get_pwq(wq->dfl_pwq);
-       spin_unlock_irq(&wq->dfl_pwq->pool->lock);
-       old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
+       pwq = unbound_pwq(wq, -1);
+       raw_spin_lock_irq(&pwq->pool->lock);
+       get_pwq(pwq);
+       raw_spin_unlock_irq(&pwq->pool->lock);
+       old_pwq = install_unbound_pwq(wq, cpu, pwq);
 out_unlock:
        mutex_unlock(&wq->mutex);
        put_pwq_unlocked(old_pwq);
@@ -4136,46 +5331,83 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
        bool highpri = wq->flags & WQ_HIGHPRI;
        int cpu, ret;
 
-       if (!(wq->flags & WQ_UNBOUND)) {
-               wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
-               if (!wq->cpu_pwqs)
-                       return -ENOMEM;
+       wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
+       if (!wq->cpu_pwq)
+               goto enomem;
 
+       if (!(wq->flags & WQ_UNBOUND)) {
                for_each_possible_cpu(cpu) {
-                       struct pool_workqueue *pwq =
-                               per_cpu_ptr(wq->cpu_pwqs, cpu);
-                       struct worker_pool *cpu_pools =
-                               per_cpu(cpu_worker_pools, cpu);
+                       struct pool_workqueue **pwq_p;
+                       struct worker_pool __percpu *pools;
+                       struct worker_pool *pool;
+
+                       if (wq->flags & WQ_BH)
+                               pools = bh_worker_pools;
+                       else
+                               pools = cpu_worker_pools;
+
+                       pool = &(per_cpu_ptr(pools, cpu)[highpri]);
+                       pwq_p = per_cpu_ptr(wq->cpu_pwq, cpu);
 
-                       init_pwq(pwq, wq, &cpu_pools[highpri]);
+                       *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
+                                                      pool->node);
+                       if (!*pwq_p)
+                               goto enomem;
+
+                       init_pwq(*pwq_p, wq, pool);
 
                        mutex_lock(&wq->mutex);
-                       link_pwq(pwq);
+                       link_pwq(*pwq_p);
                        mutex_unlock(&wq->mutex);
                }
                return 0;
-       } else if (wq->flags & __WQ_ORDERED) {
+       }
+
+       cpus_read_lock();
+       if (wq->flags & __WQ_ORDERED) {
+               struct pool_workqueue *dfl_pwq;
+
                ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
                /* there should only be single pwq for ordering guarantee */
-               WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
-                             wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
+               dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
+               WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node ||
+                             wq->pwqs.prev != &dfl_pwq->pwqs_node),
                     "ordering guarantee broken for workqueue %s\n", wq->name);
-               return ret;
        } else {
-               return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+               ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+       }
+       cpus_read_unlock();
+
+       /* for unbound pwq, flush the pwq_release_worker ensures that the
+        * pwq_release_workfn() completes before calling kfree(wq).
+        */
+       if (ret)
+               kthread_flush_worker(pwq_release_worker);
+
+       return ret;
+
+enomem:
+       if (wq->cpu_pwq) {
+               for_each_possible_cpu(cpu) {
+                       struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
+
+                       if (pwq)
+                               kmem_cache_free(pwq_cache, pwq);
+               }
+               free_percpu(wq->cpu_pwq);
+               wq->cpu_pwq = NULL;
        }
+       return -ENOMEM;
 }
 
 static int wq_clamp_max_active(int max_active, unsigned int flags,
                               const char *name)
 {
-       int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
-
-       if (max_active < 1 || max_active > lim)
+       if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
                pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
-                       max_active, name, 1, lim);
+                       max_active, name, 1, WQ_MAX_ACTIVE);
 
-       return clamp_val(max_active, 1, lim);
+       return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
 }
 
 /*
@@ -4191,43 +5423,111 @@ static int init_rescuer(struct workqueue_struct *wq)
                return 0;
 
        rescuer = alloc_worker(NUMA_NO_NODE);
-       if (!rescuer)
+       if (!rescuer) {
+               pr_err("workqueue: Failed to allocate a rescuer for wq \"%s\"\n",
+                      wq->name);
                return -ENOMEM;
+       }
 
        rescuer->rescue_wq = wq;
-       rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
-       ret = PTR_ERR_OR_ZERO(rescuer->task);
-       if (ret) {
+       rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
+       if (IS_ERR(rescuer->task)) {
+               ret = PTR_ERR(rescuer->task);
+               pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
+                      wq->name, ERR_PTR(ret));
                kfree(rescuer);
                return ret;
        }
 
        wq->rescuer = rescuer;
-       kthread_bind_mask(rescuer->task, cpu_possible_mask);
+       if (wq->flags & WQ_UNBOUND)
+               kthread_bind_mask(rescuer->task, wq_unbound_cpumask);
+       else
+               kthread_bind_mask(rescuer->task, cpu_possible_mask);
        wake_up_process(rescuer->task);
 
        return 0;
 }
 
+/**
+ * wq_adjust_max_active - update a wq's max_active to the current setting
+ * @wq: target workqueue
+ *
+ * If @wq isn't freezing, set @wq->max_active to the saved_max_active and
+ * activate inactive work items accordingly. If @wq is freezing, clear
+ * @wq->max_active to zero.
+ */
+static void wq_adjust_max_active(struct workqueue_struct *wq)
+{
+       bool activated;
+       int new_max, new_min;
+
+       lockdep_assert_held(&wq->mutex);
+
+       if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
+               new_max = 0;
+               new_min = 0;
+       } else {
+               new_max = wq->saved_max_active;
+               new_min = wq->saved_min_active;
+       }
+
+       if (wq->max_active == new_max && wq->min_active == new_min)
+               return;
+
+       /*
+        * Update @wq->max/min_active and then kick inactive work items if more
+        * active work items are allowed. This doesn't break work item ordering
+        * because new work items are always queued behind existing inactive
+        * work items if there are any.
+        */
+       WRITE_ONCE(wq->max_active, new_max);
+       WRITE_ONCE(wq->min_active, new_min);
+
+       if (wq->flags & WQ_UNBOUND)
+               wq_update_node_max_active(wq, -1);
+
+       if (new_max == 0)
+               return;
+
+       /*
+        * Round-robin through pwq's activating the first inactive work item
+        * until max_active is filled.
+        */
+       do {
+               struct pool_workqueue *pwq;
+
+               activated = false;
+               for_each_pwq(pwq, wq) {
+                       unsigned long irq_flags;
+
+                       /* can be called during early boot w/ irq disabled */
+                       raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags);
+                       if (pwq_activate_first_inactive(pwq, true)) {
+                               activated = true;
+                               kick_pool(pwq->pool);
+                       }
+                       raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags);
+               }
+       } while (activated);
+}
+
 __printf(1, 4)
 struct workqueue_struct *alloc_workqueue(const char *fmt,
                                         unsigned int flags,
                                         int max_active, ...)
 {
-       size_t tbl_size = 0;
        va_list args;
        struct workqueue_struct *wq;
-       struct pool_workqueue *pwq;
-
-       /*
-        * Unbound && max_active == 1 used to imply ordered, which is no
-        * longer the case on NUMA machines due to per-node pools.  While
-        * alloc_ordered_workqueue() is the right way to create an ordered
-        * workqueue, keep the previous behavior to avoid subtle breakages
-        * on NUMA.
-        */
-       if ((flags & WQ_UNBOUND) && max_active == 1)
-               flags |= __WQ_ORDERED;
+       size_t wq_size;
+       int name_len;
+
+       if (flags & WQ_BH) {
+               if (WARN_ON_ONCE(flags & ~__WQ_BH_ALLOWS))
+                       return NULL;
+               if (WARN_ON_ONCE(max_active))
+                       return NULL;
+       }
 
        /* see the comment above the definition of WQ_POWER_EFFICIENT */
        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
@@ -4235,28 +5535,45 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
 
        /* allocate wq and format name */
        if (flags & WQ_UNBOUND)
-               tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
+               wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1);
+       else
+               wq_size = sizeof(*wq);
 
-       wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
+       wq = kzalloc(wq_size, GFP_KERNEL);
        if (!wq)
                return NULL;
 
        if (flags & WQ_UNBOUND) {
-               wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+               wq->unbound_attrs = alloc_workqueue_attrs();
                if (!wq->unbound_attrs)
                        goto err_free_wq;
        }
 
        va_start(args, max_active);
-       vsnprintf(wq->name, sizeof(wq->name), fmt, args);
+       name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
        va_end(args);
 
-       max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, flags, wq->name);
+       if (name_len >= WQ_NAME_LEN)
+               pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
+                            wq->name);
+
+       if (flags & WQ_BH) {
+               /*
+                * BH workqueues always share a single execution context per CPU
+                * and don't impose any max_active limit.
+                */
+               max_active = INT_MAX;
+       } else {
+               max_active = max_active ?: WQ_DFL_ACTIVE;
+               max_active = wq_clamp_max_active(max_active, flags, wq->name);
+       }
 
        /* init wq */
        wq->flags = flags;
-       wq->saved_max_active = max_active;
+       wq->max_active = max_active;
+       wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE);
+       wq->saved_max_active = wq->max_active;
+       wq->saved_min_active = wq->min_active;
        mutex_init(&wq->mutex);
        atomic_set(&wq->nr_pwqs_to_flush, 0);
        INIT_LIST_HEAD(&wq->pwqs);
@@ -4267,8 +5584,13 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
        wq_init_lockdep(wq);
        INIT_LIST_HEAD(&wq->list);
 
+       if (flags & WQ_UNBOUND) {
+               if (alloc_node_nr_active(wq->node_nr_active) < 0)
+                       goto err_unreg_lockdep;
+       }
+
        if (alloc_and_link_pwqs(wq) < 0)
-               goto err_unreg_lockdep;
+               goto err_free_node_nr_active;
 
        if (wq_online && init_rescuer(wq) < 0)
                goto err_destroy;
@@ -4284,8 +5606,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
        mutex_lock(&wq_pool_mutex);
 
        mutex_lock(&wq->mutex);
-       for_each_pwq(pwq, wq)
-               pwq_adjust_max_active(pwq);
+       wq_adjust_max_active(wq);
        mutex_unlock(&wq->mutex);
 
        list_add_tail_rcu(&wq->list, &workqueues);
@@ -4294,6 +5615,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
 
        return wq;
 
+err_free_node_nr_active:
+       if (wq->flags & WQ_UNBOUND)
+               free_node_nr_active(wq->node_nr_active);
 err_unreg_lockdep:
        wq_unregister_lockdep(wq);
        wq_free_lockdep(wq);
@@ -4305,7 +5629,23 @@ err_destroy:
        destroy_workqueue(wq);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(alloc_workqueue);
+EXPORT_SYMBOL_GPL(alloc_workqueue);
+
+static bool pwq_busy(struct pool_workqueue *pwq)
+{
+       int i;
+
+       for (i = 0; i < WORK_NR_COLORS; i++)
+               if (pwq->nr_in_flight[i])
+                       return true;
+
+       if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1))
+               return true;
+       if (!pwq_is_empty(pwq))
+               return true;
+
+       return false;
+}
 
 /**
  * destroy_workqueue - safely terminate a workqueue
@@ -4316,31 +5656,55 @@ EXPORT_SYMBOL_GPL(alloc_workqueue);
 void destroy_workqueue(struct workqueue_struct *wq)
 {
        struct pool_workqueue *pwq;
-       int node;
+       int cpu;
+
+       /*
+        * Remove it from sysfs first so that sanity check failure doesn't
+        * lead to sysfs name conflicts.
+        */
+       workqueue_sysfs_unregister(wq);
+
+       /* mark the workqueue destruction is in progress */
+       mutex_lock(&wq->mutex);
+       wq->flags |= __WQ_DESTROYING;
+       mutex_unlock(&wq->mutex);
 
        /* drain it before proceeding with destruction */
        drain_workqueue(wq);
 
-       /* sanity checks */
-       mutex_lock(&wq->mutex);
-       for_each_pwq(pwq, wq) {
-               int i;
+       /* kill rescuer, if sanity checks fail, leave it w/o rescuer */
+       if (wq->rescuer) {
+               struct worker *rescuer = wq->rescuer;
 
-               for (i = 0; i < WORK_NR_COLORS; i++) {
-                       if (WARN_ON(pwq->nr_in_flight[i])) {
-                               mutex_unlock(&wq->mutex);
-                               show_workqueue_state();
-                               return;
-                       }
-               }
+               /* this prevents new queueing */
+               raw_spin_lock_irq(&wq_mayday_lock);
+               wq->rescuer = NULL;
+               raw_spin_unlock_irq(&wq_mayday_lock);
+
+               /* rescuer will empty maydays list before exiting */
+               kthread_stop(rescuer->task);
+               kfree(rescuer);
+       }
 
-               if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
-                   WARN_ON(pwq->nr_active) ||
-                   WARN_ON(!list_empty(&pwq->delayed_works))) {
+       /*
+        * Sanity checks - grab all the locks so that we wait for all
+        * in-flight operations which may do put_pwq().
+        */
+       mutex_lock(&wq_pool_mutex);
+       mutex_lock(&wq->mutex);
+       for_each_pwq(pwq, wq) {
+               raw_spin_lock_irq(&pwq->pool->lock);
+               if (WARN_ON(pwq_busy(pwq))) {
+                       pr_warn("%s: %s has the following busy pwq\n",
+                               __func__, wq->name);
+                       show_pwq(pwq);
+                       raw_spin_unlock_irq(&pwq->pool->lock);
                        mutex_unlock(&wq->mutex);
-                       show_workqueue_state();
+                       mutex_unlock(&wq_pool_mutex);
+                       show_one_workqueue(wq);
                        return;
                }
+               raw_spin_unlock_irq(&pwq->pool->lock);
        }
        mutex_unlock(&wq->mutex);
 
@@ -4348,42 +5712,25 @@ void destroy_workqueue(struct workqueue_struct *wq)
         * wq list is used to freeze wq, remove from list after
         * flushing is complete in case freeze races us.
         */
-       mutex_lock(&wq_pool_mutex);
        list_del_rcu(&wq->list);
        mutex_unlock(&wq_pool_mutex);
 
-       workqueue_sysfs_unregister(wq);
+       /*
+        * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
+        * to put the base refs. @wq will be auto-destroyed from the last
+        * pwq_put. RCU read lock prevents @wq from going away from under us.
+        */
+       rcu_read_lock();
 
-       if (wq->rescuer)
-               kthread_stop(wq->rescuer->task);
+       for_each_possible_cpu(cpu) {
+               put_pwq_unlocked(unbound_pwq(wq, cpu));
+               RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL);
+       }
 
-       if (!(wq->flags & WQ_UNBOUND)) {
-               wq_unregister_lockdep(wq);
-               /*
-                * The base ref is never dropped on per-cpu pwqs.  Directly
-                * schedule RCU free.
-                */
-               call_rcu(&wq->rcu, rcu_free_wq);
-       } else {
-               /*
-                * We're the sole accessor of @wq at this point.  Directly
-                * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
-                * @wq will be freed when the last pwq is released.
-                */
-               for_each_node(node) {
-                       pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
-                       RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
-                       put_pwq_unlocked(pwq);
-               }
+       put_pwq_unlocked(unbound_pwq(wq, -1));
+       RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL);
 
-               /*
-                * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
-                * put.  Don't access it afterwards.
-                */
-               pwq = wq->dfl_pwq;
-               wq->dfl_pwq = NULL;
-               put_pwq_unlocked(pwq);
-       }
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(destroy_workqueue);
 
@@ -4392,33 +5739,62 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
  * @wq: target workqueue
  * @max_active: new max_active value.
  *
- * Set max_active of @wq to @max_active.
+ * Set max_active of @wq to @max_active. See the alloc_workqueue() function
+ * comment.
  *
  * CONTEXT:
  * Don't call from IRQ context.
  */
 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 {
-       struct pool_workqueue *pwq;
-
+       /* max_active doesn't mean anything for BH workqueues */
+       if (WARN_ON(wq->flags & WQ_BH))
+               return;
        /* disallow meddling with max_active for ordered workqueues */
-       if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+       if (WARN_ON(wq->flags & __WQ_ORDERED))
                return;
 
        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
        mutex_lock(&wq->mutex);
 
-       wq->flags &= ~__WQ_ORDERED;
        wq->saved_max_active = max_active;
+       if (wq->flags & WQ_UNBOUND)
+               wq->saved_min_active = min(wq->saved_min_active, max_active);
 
-       for_each_pwq(pwq, wq)
-               pwq_adjust_max_active(pwq);
+       wq_adjust_max_active(wq);
 
        mutex_unlock(&wq->mutex);
 }
 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 
+/**
+ * workqueue_set_min_active - adjust min_active of an unbound workqueue
+ * @wq: target unbound workqueue
+ * @min_active: new min_active value
+ *
+ * Set min_active of an unbound workqueue. Unlike other types of workqueues, an
+ * unbound workqueue is not guaranteed to be able to process max_active
+ * interdependent work items. Instead, an unbound workqueue is guaranteed to be
+ * able to process min_active number of interdependent work items which is
+ * %WQ_DFL_MIN_ACTIVE by default.
+ *
+ * Use this function to adjust the min_active value between 0 and the current
+ * max_active.
+ */
+void workqueue_set_min_active(struct workqueue_struct *wq, int min_active)
+{
+       /* min_active is only meaningful for non-ordered unbound workqueues */
+       if (WARN_ON((wq->flags & (WQ_BH | WQ_UNBOUND | __WQ_ORDERED)) !=
+                   WQ_UNBOUND))
+               return;
+
+       mutex_lock(&wq->mutex);
+       wq->saved_min_active = clamp(min_active, 0, wq->saved_max_active);
+       wq_adjust_max_active(wq);
+       mutex_unlock(&wq->mutex);
+}
+
 /**
  * current_work - retrieve %current task's work struct
  *
@@ -4460,10 +5836,11 @@ bool current_is_workqueue_rescuer(void)
  * unreliable and only useful as advisory hints or for debugging.
  *
  * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
- * Note that both per-cpu and unbound workqueues may be associated with
- * multiple pool_workqueues which have separate congested states.  A
- * workqueue being congested on one CPU doesn't mean the workqueue is also
- * contested on other CPUs / NUMA nodes.
+ *
+ * With the exception of ordered workqueues, all workqueues have per-cpu
+ * pool_workqueues, each with its own congested state. A workqueue being
+ * congested on one CPU doesn't mean that the workqueue is contested on any
+ * other CPUs.
  *
  * Return:
  * %true if congested, %false otherwise.
@@ -4479,12 +5856,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
        if (cpu == WORK_CPU_UNBOUND)
                cpu = smp_processor_id();
 
-       if (!(wq->flags & WQ_UNBOUND))
-               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
-       else
-               pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+       pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
+       ret = !list_empty(&pwq->inactive_works);
 
-       ret = !list_empty(&pwq->delayed_works);
        preempt_enable();
        rcu_read_unlock();
 
@@ -4506,7 +5880,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
 unsigned int work_busy(struct work_struct *work)
 {
        struct worker_pool *pool;
-       unsigned long flags;
+       unsigned long irq_flags;
        unsigned int ret = 0;
 
        if (work_pending(work))
@@ -4515,10 +5889,10 @@ unsigned int work_busy(struct work_struct *work)
        rcu_read_lock();
        pool = get_work_pool(work);
        if (pool) {
-               spin_lock_irqsave(&pool->lock, flags);
+               raw_spin_lock_irqsave(&pool->lock, irq_flags);
                if (find_worker_executing_work(pool, work))
                        ret |= WORK_BUSY_RUNNING;
-               spin_unlock_irqrestore(&pool->lock, flags);
+               raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
        }
        rcu_read_unlock();
 
@@ -4584,11 +5958,11 @@ void print_worker_info(const char *log_lvl, struct task_struct *task)
         * Carefully copy the associated workqueue's workfn, name and desc.
         * Keep the original last '\0' in case the original is garbage.
         */
-       probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
-       probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
-       probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
-       probe_kernel_read(name, wq->name, sizeof(name) - 1);
-       probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
+       copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
+       copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
+       copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
+       copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
+       copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
 
        if (fn || name[0] || desc[0]) {
                printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
@@ -4603,25 +5977,73 @@ static void pr_cont_pool_info(struct worker_pool *pool)
        pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
        if (pool->node != NUMA_NO_NODE)
                pr_cont(" node=%d", pool->node);
-       pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
+       pr_cont(" flags=0x%x", pool->flags);
+       if (pool->flags & POOL_BH)
+               pr_cont(" bh%s",
+                       pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : "");
+       else
+               pr_cont(" nice=%d", pool->attrs->nice);
+}
+
+static void pr_cont_worker_id(struct worker *worker)
+{
+       struct worker_pool *pool = worker->pool;
+
+       if (pool->flags & WQ_BH)
+               pr_cont("bh%s",
+                       pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : "");
+       else
+               pr_cont("%d%s", task_pid_nr(worker->task),
+                       worker->rescue_wq ? "(RESCUER)" : "");
+}
+
+struct pr_cont_work_struct {
+       bool comma;
+       work_func_t func;
+       long ctr;
+};
+
+static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
+{
+       if (!pcwsp->ctr)
+               goto out_record;
+       if (func == pcwsp->func) {
+               pcwsp->ctr++;
+               return;
+       }
+       if (pcwsp->ctr == 1)
+               pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
+       else
+               pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
+       pcwsp->ctr = 0;
+out_record:
+       if ((long)func == -1L)
+               return;
+       pcwsp->comma = comma;
+       pcwsp->func = func;
+       pcwsp->ctr = 1;
 }
 
-static void pr_cont_work(bool comma, struct work_struct *work)
+static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
 {
        if (work->func == wq_barrier_func) {
                struct wq_barrier *barr;
 
                barr = container_of(work, struct wq_barrier, work);
 
+               pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
                pr_cont("%s BAR(%d)", comma ? "," : "",
                        task_pid_nr(barr->task));
        } else {
-               pr_cont("%s %ps", comma ? "," : "", work->func);
+               if (!comma)
+                       pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
+               pr_cont_work_flush(comma, work->func, pcwsp);
        }
 }
 
 static void show_pwq(struct pool_workqueue *pwq)
 {
+       struct pr_cont_work_struct pcws = { .ctr = 0, };
        struct worker_pool *pool = pwq->pool;
        struct work_struct *work;
        struct worker *worker;
@@ -4631,7 +6053,8 @@ static void show_pwq(struct pool_workqueue *pwq)
        pr_info("  pwq %d:", pool->id);
        pr_cont_pool_info(pool);
 
-       pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
+       pr_cont(" active=%d refcnt=%d%s\n",
+               pwq->nr_active, pwq->refcnt,
                !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
 
        hash_for_each(pool->busy_hash, bkt, worker, hentry) {
@@ -4648,12 +6071,12 @@ static void show_pwq(struct pool_workqueue *pwq)
                        if (worker->current_pwq != pwq)
                                continue;
 
-                       pr_cont("%s %d%s:%ps", comma ? "," : "",
-                               task_pid_nr(worker->task),
-                               worker == pwq->wq->rescuer ? "(RESCUER)" : "",
-                               worker->current_func);
+                       pr_cont(" %s", comma ? "," : "");
+                       pr_cont_worker_id(worker);
+                       pr_cont(":%ps", worker->current_func);
                        list_for_each_entry(work, &worker->scheduled, entry)
-                               pr_cont_work(false, work);
+                               pr_cont_work(false, work, &pcws);
+                       pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
                        comma = true;
                }
                pr_cont("\n");
@@ -4673,100 +6096,161 @@ static void show_pwq(struct pool_workqueue *pwq)
                        if (get_work_pwq(work) != pwq)
                                continue;
 
-                       pr_cont_work(comma, work);
+                       pr_cont_work(comma, work, &pcws);
                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
                }
+               pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
                pr_cont("\n");
        }
 
-       if (!list_empty(&pwq->delayed_works)) {
+       if (!list_empty(&pwq->inactive_works)) {
                bool comma = false;
 
-               pr_info("    delayed:");
-               list_for_each_entry(work, &pwq->delayed_works, entry) {
-                       pr_cont_work(comma, work);
+               pr_info("    inactive:");
+               list_for_each_entry(work, &pwq->inactive_works, entry) {
+                       pr_cont_work(comma, work, &pcws);
                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
                }
+               pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
                pr_cont("\n");
        }
 }
 
 /**
- * show_workqueue_state - dump workqueue state
+ * show_one_workqueue - dump state of specified workqueue
+ * @wq: workqueue whose state will be printed
+ */
+void show_one_workqueue(struct workqueue_struct *wq)
+{
+       struct pool_workqueue *pwq;
+       bool idle = true;
+       unsigned long irq_flags;
+
+       for_each_pwq(pwq, wq) {
+               if (!pwq_is_empty(pwq)) {
+                       idle = false;
+                       break;
+               }
+       }
+       if (idle) /* Nothing to print for idle workqueue */
+               return;
+
+       pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
+
+       for_each_pwq(pwq, wq) {
+               raw_spin_lock_irqsave(&pwq->pool->lock, irq_flags);
+               if (!pwq_is_empty(pwq)) {
+                       /*
+                        * Defer printing to avoid deadlocks in console
+                        * drivers that queue work while holding locks
+                        * also taken in their write paths.
+                        */
+                       printk_deferred_enter();
+                       show_pwq(pwq);
+                       printk_deferred_exit();
+               }
+               raw_spin_unlock_irqrestore(&pwq->pool->lock, irq_flags);
+               /*
+                * We could be printing a lot from atomic context, e.g.
+                * sysrq-t -> show_all_workqueues(). Avoid triggering
+                * hard lockup.
+                */
+               touch_nmi_watchdog();
+       }
+
+}
+
+/**
+ * show_one_worker_pool - dump state of specified worker pool
+ * @pool: worker pool whose state will be printed
+ */
+static void show_one_worker_pool(struct worker_pool *pool)
+{
+       struct worker *worker;
+       bool first = true;
+       unsigned long irq_flags;
+       unsigned long hung = 0;
+
+       raw_spin_lock_irqsave(&pool->lock, irq_flags);
+       if (pool->nr_workers == pool->nr_idle)
+               goto next_pool;
+
+       /* How long the first pending work is waiting for a worker. */
+       if (!list_empty(&pool->worklist))
+               hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
+
+       /*
+        * Defer printing to avoid deadlocks in console drivers that
+        * queue work while holding locks also taken in their write
+        * paths.
+        */
+       printk_deferred_enter();
+       pr_info("pool %d:", pool->id);
+       pr_cont_pool_info(pool);
+       pr_cont(" hung=%lus workers=%d", hung, pool->nr_workers);
+       if (pool->manager)
+               pr_cont(" manager: %d",
+                       task_pid_nr(pool->manager->task));
+       list_for_each_entry(worker, &pool->idle_list, entry) {
+               pr_cont(" %s", first ? "idle: " : "");
+               pr_cont_worker_id(worker);
+               first = false;
+       }
+       pr_cont("\n");
+       printk_deferred_exit();
+next_pool:
+       raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
+       /*
+        * We could be printing a lot from atomic context, e.g.
+        * sysrq-t -> show_all_workqueues(). Avoid triggering
+        * hard lockup.
+        */
+       touch_nmi_watchdog();
+
+}
+
+/**
+ * show_all_workqueues - dump workqueue state
  *
- * Called from a sysrq handler or try_to_freeze_tasks() and prints out
- * all busy workqueues and pools.
+ * Called from a sysrq handler and prints out all busy workqueues and pools.
  */
-void show_workqueue_state(void)
+void show_all_workqueues(void)
 {
        struct workqueue_struct *wq;
        struct worker_pool *pool;
-       unsigned long flags;
        int pi;
 
        rcu_read_lock();
 
        pr_info("Showing busy workqueues and worker pools:\n");
 
-       list_for_each_entry_rcu(wq, &workqueues, list) {
-               struct pool_workqueue *pwq;
-               bool idle = true;
+       list_for_each_entry_rcu(wq, &workqueues, list)
+               show_one_workqueue(wq);
 
-               for_each_pwq(pwq, wq) {
-                       if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
-                               idle = false;
-                               break;
-                       }
-               }
-               if (idle)
-                       continue;
+       for_each_pool(pool, pi)
+               show_one_worker_pool(pool);
 
-               pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
+       rcu_read_unlock();
+}
 
-               for_each_pwq(pwq, wq) {
-                       spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->delayed_works))
-                               show_pwq(pwq);
-                       spin_unlock_irqrestore(&pwq->pool->lock, flags);
-                       /*
-                        * We could be printing a lot from atomic context, e.g.
-                        * sysrq-t -> show_workqueue_state(). Avoid triggering
-                        * hard lockup.
-                        */
-                       touch_nmi_watchdog();
-               }
-       }
+/**
+ * show_freezable_workqueues - dump freezable workqueue state
+ *
+ * Called from try_to_freeze_tasks() and prints out all freezable workqueues
+ * still busy.
+ */
+void show_freezable_workqueues(void)
+{
+       struct workqueue_struct *wq;
 
-       for_each_pool(pool, pi) {
-               struct worker *worker;
-               bool first = true;
-
-               spin_lock_irqsave(&pool->lock, flags);
-               if (pool->nr_workers == pool->nr_idle)
-                       goto next_pool;
-
-               pr_info("pool %d:", pool->id);
-               pr_cont_pool_info(pool);
-               pr_cont(" hung=%us workers=%d",
-                       jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
-                       pool->nr_workers);
-               if (pool->manager)
-                       pr_cont(" manager: %d",
-                               task_pid_nr(pool->manager->task));
-               list_for_each_entry(worker, &pool->idle_list, entry) {
-                       pr_cont(" %s%d", first ? "idle: " : "",
-                               task_pid_nr(worker->task));
-                       first = false;
-               }
-               pr_cont("\n");
-       next_pool:
-               spin_unlock_irqrestore(&pool->lock, flags);
-               /*
-                * We could be printing a lot from atomic context, e.g.
-                * sysrq-t -> show_workqueue_state(). Avoid triggering
-                * hard lockup.
-                */
-               touch_nmi_watchdog();
+       rcu_read_lock();
+
+       pr_info("Showing freezable workqueues that are still busy:\n");
+
+       list_for_each_entry_rcu(wq, &workqueues, list) {
+               if (!(wq->flags & WQ_FREEZABLE))
+                       continue;
+               show_one_workqueue(wq);
        }
 
        rcu_read_unlock();
@@ -4790,7 +6274,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
                struct worker_pool *pool = worker->pool;
 
                if (pool) {
-                       spin_lock_irq(&pool->lock);
+                       raw_spin_lock_irq(&pool->lock);
                        /*
                         * ->desc tracks information (wq name or
                         * set_worker_desc()) for the latest execution.  If
@@ -4804,7 +6288,7 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
                                        scnprintf(buf + off, size - off, "-%s",
                                                  worker->desc);
                        }
-                       spin_unlock_irq(&pool->lock);
+                       raw_spin_unlock_irq(&pool->lock);
                }
        }
 
@@ -4835,49 +6319,44 @@ static void unbind_workers(int cpu)
 
        for_each_cpu_worker_pool(pool, cpu) {
                mutex_lock(&wq_pool_attach_mutex);
-               spin_lock_irq(&pool->lock);
+               raw_spin_lock_irq(&pool->lock);
 
                /*
                 * We've blocked all attach/detach operations. Make all workers
                 * unbound and set DISASSOCIATED.  Before this, all workers
-                * except for the ones which are still executing works from
-                * before the last CPU down must be on the cpu.  After
-                * this, they may become diasporas.
+                * must be on the cpu.  After this, they may become diasporas.
+                * And the preemption disabled section in their sched callbacks
+                * are guaranteed to see WORKER_UNBOUND since the code here
+                * is on the same cpu.
                 */
                for_each_pool_worker(worker, pool)
                        worker->flags |= WORKER_UNBOUND;
 
                pool->flags |= POOL_DISASSOCIATED;
 
-               spin_unlock_irq(&pool->lock);
-               mutex_unlock(&wq_pool_attach_mutex);
-
-               /*
-                * Call schedule() so that we cross rq->lock and thus can
-                * guarantee sched callbacks see the %WORKER_UNBOUND flag.
-                * This is necessary as scheduler callbacks may be invoked
-                * from other cpus.
-                */
-               schedule();
-
                /*
-                * Sched callbacks are disabled now.  Zap nr_running.
-                * After this, nr_running stays zero and need_more_worker()
-                * and keep_working() are always true as long as the
-                * worklist is not empty.  This pool now behaves as an
-                * unbound (in terms of concurrency management) pool which
+                * The handling of nr_running in sched callbacks are disabled
+                * now.  Zap nr_running.  After this, nr_running stays zero and
+                * need_more_worker() and keep_working() are always true as
+                * long as the worklist is not empty.  This pool now behaves as
+                * an unbound (in terms of concurrency management) pool which
                 * are served by workers tied to the pool.
                 */
-               atomic_set(&pool->nr_running, 0);
+               pool->nr_running = 0;
 
                /*
                 * With concurrency management just turned off, a busy
                 * worker blocking could lead to lengthy stalls.  Kick off
                 * unbound chain execution of currently pending work items.
                 */
-               spin_lock_irq(&pool->lock);
-               wake_up_worker(pool);
-               spin_unlock_irq(&pool->lock);
+               kick_pool(pool);
+
+               raw_spin_unlock_irq(&pool->lock);
+
+               for_each_pool_worker(worker, pool)
+                       unbind_worker(worker);
+
+               mutex_unlock(&wq_pool_attach_mutex);
        }
 }
 
@@ -4900,28 +6379,19 @@ static void rebind_workers(struct worker_pool *pool)
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
-       for_each_pool_worker(worker, pool)
+       for_each_pool_worker(worker, pool) {
+               kthread_set_per_cpu(worker->task, pool->cpu);
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
-                                                 pool->attrs->cpumask) < 0);
+                                                 pool_allowed_cpus(pool)) < 0);
+       }
 
-       spin_lock_irq(&pool->lock);
+       raw_spin_lock_irq(&pool->lock);
 
        pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
                unsigned int worker_flags = worker->flags;
 
-               /*
-                * A bound idle worker should actually be on the runqueue
-                * of the associated CPU for local wake-ups targeting it to
-                * work.  Kick all idle workers so that they migrate to the
-                * associated CPU.  Doing this in the same loop as
-                * replacing UNBOUND with REBOUND is safe as no worker will
-                * be bound before @pool->lock is released.
-                */
-               if (worker_flags & WORKER_IDLE)
-                       wake_up_process(worker->task);
-
                /*
                 * We want to clear UNBOUND but can't directly call
                 * worker_clr_flags() or adjust nr_running.  Atomically
@@ -4943,7 +6413,7 @@ static void rebind_workers(struct worker_pool *pool)
                WRITE_ONCE(worker->flags, worker_flags);
        }
 
-       spin_unlock_irq(&pool->lock);
+       raw_spin_unlock_irq(&pool->lock);
 }
 
 /**
@@ -4996,19 +6466,34 @@ int workqueue_online_cpu(unsigned int cpu)
        mutex_lock(&wq_pool_mutex);
 
        for_each_pool(pool, pi) {
-               mutex_lock(&wq_pool_attach_mutex);
+               /* BH pools aren't affected by hotplug */
+               if (pool->flags & POOL_BH)
+                       continue;
 
+               mutex_lock(&wq_pool_attach_mutex);
                if (pool->cpu == cpu)
                        rebind_workers(pool);
                else if (pool->cpu < 0)
                        restore_unbound_workers_cpumask(pool, cpu);
-
                mutex_unlock(&wq_pool_attach_mutex);
        }
 
-       /* update NUMA affinity of unbound workqueues */
-       list_for_each_entry(wq, &workqueues, list)
-               wq_update_unbound_numa(wq, cpu, true);
+       /* update pod affinity of unbound workqueues */
+       list_for_each_entry(wq, &workqueues, list) {
+               struct workqueue_attrs *attrs = wq->unbound_attrs;
+
+               if (attrs) {
+                       const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
+                       int tcpu;
+
+                       for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
+                               wq_update_pod(wq, tcpu, cpu, true);
+
+                       mutex_lock(&wq->mutex);
+                       wq_update_node_max_active(wq, -1);
+                       mutex_unlock(&wq->mutex);
+               }
+       }
 
        mutex_unlock(&wq_pool_mutex);
        return 0;
@@ -5024,10 +6509,23 @@ int workqueue_offline_cpu(unsigned int cpu)
 
        unbind_workers(cpu);
 
-       /* update NUMA affinity of unbound workqueues */
+       /* update pod affinity of unbound workqueues */
        mutex_lock(&wq_pool_mutex);
-       list_for_each_entry(wq, &workqueues, list)
-               wq_update_unbound_numa(wq, cpu, false);
+       list_for_each_entry(wq, &workqueues, list) {
+               struct workqueue_attrs *attrs = wq->unbound_attrs;
+
+               if (attrs) {
+                       const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
+                       int tcpu;
+
+                       for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
+                               wq_update_pod(wq, tcpu, cpu, false);
+
+                       mutex_lock(&wq->mutex);
+                       wq_update_node_max_active(wq, cpu);
+                       mutex_unlock(&wq->mutex);
+               }
+       }
        mutex_unlock(&wq_pool_mutex);
 
        return 0;
@@ -5048,50 +6546,54 @@ static void work_for_cpu_fn(struct work_struct *work)
 }
 
 /**
- * work_on_cpu - run a function in thread context on a particular cpu
+ * work_on_cpu_key - run a function in thread context on a particular cpu
  * @cpu: the cpu to run on
  * @fn: the function to run
  * @arg: the function arg
+ * @key: The lock class key for lock debugging purposes
  *
  * It is up to the caller to ensure that the cpu doesn't go offline.
  * The caller must not hold any locks which would prevent @fn from completing.
  *
  * Return: The value @fn returns.
  */
-long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
+long work_on_cpu_key(int cpu, long (*fn)(void *),
+                    void *arg, struct lock_class_key *key)
 {
        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
 
-       INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
+       INIT_WORK_ONSTACK_KEY(&wfc.work, work_for_cpu_fn, key);
        schedule_work_on(cpu, &wfc.work);
        flush_work(&wfc.work);
        destroy_work_on_stack(&wfc.work);
        return wfc.ret;
 }
-EXPORT_SYMBOL_GPL(work_on_cpu);
+EXPORT_SYMBOL_GPL(work_on_cpu_key);
 
 /**
- * work_on_cpu_safe - run a function in thread context on a particular cpu
+ * work_on_cpu_safe_key - run a function in thread context on a particular cpu
  * @cpu: the cpu to run on
  * @fn:  the function to run
  * @arg: the function argument
+ * @key: The lock class key for lock debugging purposes
  *
  * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
  * any locks which would prevent @fn from completing.
  *
  * Return: The value @fn returns.
  */
-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
+                         void *arg, struct lock_class_key *key)
 {
        long ret = -ENODEV;
 
-       get_online_cpus();
+       cpus_read_lock();
        if (cpu_online(cpu))
-               ret = work_on_cpu(cpu, fn, arg);
-       put_online_cpus();
+               ret = work_on_cpu_key(cpu, fn, arg, key);
+       cpus_read_unlock();
        return ret;
 }
-EXPORT_SYMBOL_GPL(work_on_cpu_safe);
+EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
 #endif /* CONFIG_SMP */
 
 #ifdef CONFIG_FREEZER
@@ -5100,7 +6602,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe);
  * freeze_workqueues_begin - begin freezing workqueues
  *
  * Start freezing workqueues.  After this function returns, all freezable
- * workqueues will queue new works to their delayed_works list instead of
+ * workqueues will queue new works to their inactive_works list instead of
  * pool->worklist.
  *
  * CONTEXT:
@@ -5109,7 +6611,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe);
 void freeze_workqueues_begin(void)
 {
        struct workqueue_struct *wq;
-       struct pool_workqueue *pwq;
 
        mutex_lock(&wq_pool_mutex);
 
@@ -5118,8 +6619,7 @@ void freeze_workqueues_begin(void)
 
        list_for_each_entry(wq, &workqueues, list) {
                mutex_lock(&wq->mutex);
-               for_each_pwq(pwq, wq)
-                       pwq_adjust_max_active(pwq);
+               wq_adjust_max_active(wq);
                mutex_unlock(&wq->mutex);
        }
 
@@ -5184,7 +6684,6 @@ out_unlock:
 void thaw_workqueues(void)
 {
        struct workqueue_struct *wq;
-       struct pool_workqueue *pwq;
 
        mutex_lock(&wq_pool_mutex);
 
@@ -5196,8 +6695,7 @@ void thaw_workqueues(void)
        /* restore max_active and repopulate worklist */
        list_for_each_entry(wq, &workqueues, list) {
                mutex_lock(&wq->mutex);
-               for_each_pwq(pwq, wq)
-                       pwq_adjust_max_active(pwq);
+               wq_adjust_max_active(wq);
                mutex_unlock(&wq->mutex);
        }
 
@@ -5206,7 +6704,7 @@ out_unlock:
 }
 #endif /* CONFIG_FREEZER */
 
-static int workqueue_apply_unbound_cpumask(void)
+static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
 {
        LIST_HEAD(ctxs);
        int ret = 0;
@@ -5216,15 +6714,12 @@ static int workqueue_apply_unbound_cpumask(void)
        lockdep_assert_held(&wq_pool_mutex);
 
        list_for_each_entry(wq, &workqueues, list) {
-               if (!(wq->flags & WQ_UNBOUND))
-                       continue;
-               /* creating multiple pwqs breaks ordering guarantee */
-               if (wq->flags & __WQ_ORDERED)
+               if (!(wq->flags & WQ_UNBOUND) || (wq->flags & __WQ_DESTROYING))
                        continue;
 
-               ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
-               if (!ctx) {
-                       ret = -ENOMEM;
+               ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
+               if (IS_ERR(ctx)) {
+                       ret = PTR_ERR(ctx);
                        break;
                }
 
@@ -5237,70 +6732,118 @@ static int workqueue_apply_unbound_cpumask(void)
                apply_wqattrs_cleanup(ctx);
        }
 
+       if (!ret) {
+               mutex_lock(&wq_pool_attach_mutex);
+               cpumask_copy(wq_unbound_cpumask, unbound_cpumask);
+               mutex_unlock(&wq_pool_attach_mutex);
+       }
        return ret;
 }
 
 /**
- *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
- *  @cpumask: the cpumask to set
- *
- *  The low-level workqueues cpumask is a global cpumask that limits
- *  the affinity of all unbound workqueues.  This function check the @cpumask
- *  and apply it to all unbound workqueues and updates all pwqs of them.
+ * workqueue_unbound_exclude_cpumask - Exclude given CPUs from unbound cpumask
+ * @exclude_cpumask: the cpumask to be excluded from wq_unbound_cpumask
  *
- *  Retun:     0       - Success
- *             -EINVAL - Invalid @cpumask
- *             -ENOMEM - Failed to allocate memory for attrs or pwqs.
+ * This function can be called from cpuset code to provide a set of isolated
+ * CPUs that should be excluded from wq_unbound_cpumask. The caller must hold
+ * either cpus_read_lock or cpus_write_lock.
  */
-int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+int workqueue_unbound_exclude_cpumask(cpumask_var_t exclude_cpumask)
 {
-       int ret = -EINVAL;
-       cpumask_var_t saved_cpumask;
+       cpumask_var_t cpumask;
+       int ret = 0;
 
-       if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
+       if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
                return -ENOMEM;
 
+       lockdep_assert_cpus_held();
+       mutex_lock(&wq_pool_mutex);
+
+       /* Save the current isolated cpumask & export it via sysfs */
+       cpumask_copy(wq_isolated_cpumask, exclude_cpumask);
+
        /*
-        * Not excluding isolated cpus on purpose.
-        * If the user wishes to include them, we allow that.
+        * If the operation fails, it will fall back to
+        * wq_requested_unbound_cpumask which is initially set to
+        * (HK_TYPE_WQ âˆ© HK_TYPE_DOMAIN) house keeping mask and rewritten
+        * by any subsequent write to workqueue/cpumask sysfs file.
         */
-       cpumask_and(cpumask, cpumask, cpu_possible_mask);
-       if (!cpumask_empty(cpumask)) {
-               apply_wqattrs_lock();
+       if (!cpumask_andnot(cpumask, wq_requested_unbound_cpumask, exclude_cpumask))
+               cpumask_copy(cpumask, wq_requested_unbound_cpumask);
+       if (!cpumask_equal(cpumask, wq_unbound_cpumask))
+               ret = workqueue_apply_unbound_cpumask(cpumask);
+
+       mutex_unlock(&wq_pool_mutex);
+       free_cpumask_var(cpumask);
+       return ret;
+}
+
+static int parse_affn_scope(const char *val)
+{
+       int i;
 
-               /* save the old wq_unbound_cpumask. */
-               cpumask_copy(saved_cpumask, wq_unbound_cpumask);
+       for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
+               if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
+                       return i;
+       }
+       return -EINVAL;
+}
 
-               /* update wq_unbound_cpumask at first and apply it to wqs. */
-               cpumask_copy(wq_unbound_cpumask, cpumask);
-               ret = workqueue_apply_unbound_cpumask();
+static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
+{
+       struct workqueue_struct *wq;
+       int affn, cpu;
 
-               /* restore the wq_unbound_cpumask when failed. */
-               if (ret < 0)
-                       cpumask_copy(wq_unbound_cpumask, saved_cpumask);
+       affn = parse_affn_scope(val);
+       if (affn < 0)
+               return affn;
+       if (affn == WQ_AFFN_DFL)
+               return -EINVAL;
 
-               apply_wqattrs_unlock();
+       cpus_read_lock();
+       mutex_lock(&wq_pool_mutex);
+
+       wq_affn_dfl = affn;
+
+       list_for_each_entry(wq, &workqueues, list) {
+               for_each_online_cpu(cpu) {
+                       wq_update_pod(wq, cpu, cpu, true);
+               }
        }
 
-       free_cpumask_var(saved_cpumask);
-       return ret;
+       mutex_unlock(&wq_pool_mutex);
+       cpus_read_unlock();
+
+       return 0;
 }
 
+static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
+{
+       return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
+}
+
+static const struct kernel_param_ops wq_affn_dfl_ops = {
+       .set    = wq_affn_dfl_set,
+       .get    = wq_affn_dfl_get,
+};
+
+module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
+
 #ifdef CONFIG_SYSFS
 /*
  * Workqueues with WQ_SYSFS flag set is visible to userland via
  * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
  * following attributes.
  *
- *  per_cpu    RO bool : whether the workqueue is per-cpu or unbound
- *  max_active RW int  : maximum number of in-flight work items
+ *  per_cpu            RO bool : whether the workqueue is per-cpu or unbound
+ *  max_active         RW int  : maximum number of in-flight work items
  *
  * Unbound workqueues have the following extra attributes.
  *
- *  pool_ids   RO int  : the associated pool IDs for each node
- *  nice       RW int  : nice value of the workers
- *  cpumask    RW mask : bitmask of allowed CPUs for the workers
- *  numa       RW bool : whether enable NUMA affinity
+ *  nice               RW int  : nice value of the workers
+ *  cpumask            RW mask : bitmask of allowed CPUs for the workers
+ *  affinity_scope     RW str  : worker CPU affinity scope (cache, numa, none)
+ *  affinity_strict    RW bool : worker CPU affinity is strict
  */
 struct wq_device {
        struct workqueue_struct         *wq;
@@ -5353,26 +6896,17 @@ static struct attribute *wq_sysfs_attrs[] = {
 };
 ATTRIBUTE_GROUPS(wq_sysfs);
 
-static ssize_t wq_pool_ids_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static void apply_wqattrs_lock(void)
 {
-       struct workqueue_struct *wq = dev_to_wq(dev);
-       const char *delim = "";
-       int node, written = 0;
-
-       get_online_cpus();
-       rcu_read_lock();
-       for_each_node(node) {
-               written += scnprintf(buf + written, PAGE_SIZE - written,
-                                    "%s%d:%d", delim, node,
-                                    unbound_pwq_by_node(wq, node)->pool->id);
-               delim = " ";
-       }
-       written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
-       rcu_read_unlock();
-       put_online_cpus();
+       /* CPUs should stay stable across pwq creations and installations */
+       cpus_read_lock();
+       mutex_lock(&wq_pool_mutex);
+}
 
-       return written;
+static void apply_wqattrs_unlock(void)
+{
+       mutex_unlock(&wq_pool_mutex);
+       cpus_read_unlock();
 }
 
 static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
@@ -5395,7 +6929,7 @@ static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
 
        lockdep_assert_held(&wq_pool_mutex);
 
-       attrs = alloc_workqueue_attrs(GFP_KERNEL);
+       attrs = alloc_workqueue_attrs();
        if (!attrs)
                return NULL;
 
@@ -5465,71 +6999,160 @@ out_unlock:
        return ret ?: count;
 }
 
-static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
-                           char *buf)
+static ssize_t wq_affn_scope_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
 {
        struct workqueue_struct *wq = dev_to_wq(dev);
        int written;
 
        mutex_lock(&wq->mutex);
-       written = scnprintf(buf, PAGE_SIZE, "%d\n",
-                           !wq->unbound_attrs->no_numa);
+       if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
+               written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
+                                   wq_affn_names[WQ_AFFN_DFL],
+                                   wq_affn_names[wq_affn_dfl]);
+       else
+               written = scnprintf(buf, PAGE_SIZE, "%s\n",
+                                   wq_affn_names[wq->unbound_attrs->affn_scope]);
        mutex_unlock(&wq->mutex);
 
        return written;
 }
 
-static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
-                            const char *buf, size_t count)
+static ssize_t wq_affn_scope_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
 {
        struct workqueue_struct *wq = dev_to_wq(dev);
        struct workqueue_attrs *attrs;
-       int v, ret = -ENOMEM;
+       int affn, ret = -ENOMEM;
 
-       apply_wqattrs_lock();
+       affn = parse_affn_scope(buf);
+       if (affn < 0)
+               return affn;
 
+       apply_wqattrs_lock();
        attrs = wq_sysfs_prep_attrs(wq);
-       if (!attrs)
-               goto out_unlock;
-
-       ret = -EINVAL;
-       if (sscanf(buf, "%d", &v) == 1) {
-               attrs->no_numa = !v;
+       if (attrs) {
+               attrs->affn_scope = affn;
                ret = apply_workqueue_attrs_locked(wq, attrs);
        }
+       apply_wqattrs_unlock();
+       free_workqueue_attrs(attrs);
+       return ret ?: count;
+}
 
-out_unlock:
+static ssize_t wq_affinity_strict_show(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n",
+                        wq->unbound_attrs->affn_strict);
+}
+
+static ssize_t wq_affinity_strict_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct workqueue_struct *wq = dev_to_wq(dev);
+       struct workqueue_attrs *attrs;
+       int v, ret = -ENOMEM;
+
+       if (sscanf(buf, "%d", &v) != 1)
+               return -EINVAL;
+
+       apply_wqattrs_lock();
+       attrs = wq_sysfs_prep_attrs(wq);
+       if (attrs) {
+               attrs->affn_strict = (bool)v;
+               ret = apply_workqueue_attrs_locked(wq, attrs);
+       }
        apply_wqattrs_unlock();
        free_workqueue_attrs(attrs);
        return ret ?: count;
 }
 
 static struct device_attribute wq_sysfs_unbound_attrs[] = {
-       __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
        __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
-       __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
+       __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
+       __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
        __ATTR_NULL,
 };
 
-static struct bus_type wq_subsys = {
+static const struct bus_type wq_subsys = {
        .name                           = "workqueue",
        .dev_groups                     = wq_sysfs_groups,
 };
 
-static ssize_t wq_unbound_cpumask_show(struct device *dev,
-               struct device_attribute *attr, char *buf)
+/**
+ *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
+ *  @cpumask: the cpumask to set
+ *
+ *  The low-level workqueues cpumask is a global cpumask that limits
+ *  the affinity of all unbound workqueues.  This function check the @cpumask
+ *  and apply it to all unbound workqueues and updates all pwqs of them.
+ *
+ *  Return:    0       - Success
+ *             -EINVAL - Invalid @cpumask
+ *             -ENOMEM - Failed to allocate memory for attrs or pwqs.
+ */
+static int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
+{
+       int ret = -EINVAL;
+
+       /*
+        * Not excluding isolated cpus on purpose.
+        * If the user wishes to include them, we allow that.
+        */
+       cpumask_and(cpumask, cpumask, cpu_possible_mask);
+       if (!cpumask_empty(cpumask)) {
+               apply_wqattrs_lock();
+               cpumask_copy(wq_requested_unbound_cpumask, cpumask);
+               if (cpumask_equal(cpumask, wq_unbound_cpumask)) {
+                       ret = 0;
+                       goto out_unlock;
+               }
+
+               ret = workqueue_apply_unbound_cpumask(cpumask);
+
+out_unlock:
+               apply_wqattrs_unlock();
+       }
+
+       return ret;
+}
+
+static ssize_t __wq_cpumask_show(struct device *dev,
+               struct device_attribute *attr, char *buf, cpumask_var_t mask)
 {
        int written;
 
        mutex_lock(&wq_pool_mutex);
-       written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
-                           cpumask_pr_args(wq_unbound_cpumask));
+       written = scnprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
        mutex_unlock(&wq_pool_mutex);
 
        return written;
 }
 
+static ssize_t wq_unbound_cpumask_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return __wq_cpumask_show(dev, attr, buf, wq_unbound_cpumask);
+}
+
+static ssize_t wq_requested_cpumask_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return __wq_cpumask_show(dev, attr, buf, wq_requested_unbound_cpumask);
+}
+
+static ssize_t wq_isolated_cpumask_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       return __wq_cpumask_show(dev, attr, buf, wq_isolated_cpumask);
+}
+
 static ssize_t wq_unbound_cpumask_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -5547,19 +7170,35 @@ static ssize_t wq_unbound_cpumask_store(struct device *dev,
        return ret ? ret : count;
 }
 
-static struct device_attribute wq_sysfs_cpumask_attr =
+static struct device_attribute wq_sysfs_cpumask_attrs[] = {
        __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
-              wq_unbound_cpumask_store);
+              wq_unbound_cpumask_store),
+       __ATTR(cpumask_requested, 0444, wq_requested_cpumask_show, NULL),
+       __ATTR(cpumask_isolated, 0444, wq_isolated_cpumask_show, NULL),
+       __ATTR_NULL,
+};
 
 static int __init wq_sysfs_init(void)
 {
+       struct device *dev_root;
        int err;
 
        err = subsys_virtual_register(&wq_subsys, NULL);
        if (err)
                return err;
 
-       return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
+       dev_root = bus_get_dev_root(&wq_subsys);
+       if (dev_root) {
+               struct device_attribute *attr;
+
+               for (attr = wq_sysfs_cpumask_attrs; attr->attr.name; attr++) {
+                       err = device_create_file(dev_root, attr);
+                       if (err)
+                               break;
+               }
+               put_device(dev_root);
+       }
+       return err;
 }
 core_initcall(wq_sysfs_init);
 
@@ -5591,11 +7230,10 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
        int ret;
 
        /*
-        * Adjusting max_active or creating new pwqs by applying
-        * attributes breaks ordering guarantee.  Disallow exposing ordered
-        * workqueues.
+        * Adjusting max_active breaks ordering guarantee.  Disallow exposing
+        * ordered workqueues.
         */
-       if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+       if (WARN_ON(wq->flags & __WQ_ORDERED))
                return -EINVAL;
 
        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
@@ -5683,6 +7321,57 @@ static struct timer_list wq_watchdog_timer;
 static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
 static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
 
+/*
+ * Show workers that might prevent the processing of pending work items.
+ * The only candidates are CPU-bound workers in the running state.
+ * Pending work items should be handled by another idle worker
+ * in all other situations.
+ */
+static void show_cpu_pool_hog(struct worker_pool *pool)
+{
+       struct worker *worker;
+       unsigned long irq_flags;
+       int bkt;
+
+       raw_spin_lock_irqsave(&pool->lock, irq_flags);
+
+       hash_for_each(pool->busy_hash, bkt, worker, hentry) {
+               if (task_is_running(worker->task)) {
+                       /*
+                        * Defer printing to avoid deadlocks in console
+                        * drivers that queue work while holding locks
+                        * also taken in their write paths.
+                        */
+                       printk_deferred_enter();
+
+                       pr_info("pool %d:\n", pool->id);
+                       sched_show_task(worker->task);
+
+                       printk_deferred_exit();
+               }
+       }
+
+       raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
+}
+
+static void show_cpu_pools_hogs(void)
+{
+       struct worker_pool *pool;
+       int pi;
+
+       pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
+
+       rcu_read_lock();
+
+       for_each_pool(pool, pi) {
+               if (pool->cpu_stall)
+                       show_cpu_pool_hog(pool);
+
+       }
+
+       rcu_read_unlock();
+}
+
 static void wq_watchdog_reset_touched(void)
 {
        int cpu;
@@ -5696,6 +7385,8 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
 {
        unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
        bool lockup_detected = false;
+       bool cpu_pool_stall = false;
+       unsigned long now = jiffies;
        struct worker_pool *pool;
        int pi;
 
@@ -5707,40 +7398,51 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
        for_each_pool(pool, pi) {
                unsigned long pool_ts, touched, ts;
 
+               pool->cpu_stall = false;
                if (list_empty(&pool->worklist))
                        continue;
 
+               /*
+                * If a virtual machine is stopped by the host it can look to
+                * the watchdog like a stall.
+                */
+               kvm_check_and_clear_guest_paused();
+
                /* get the latest of pool and touched timestamps */
+               if (pool->cpu >= 0)
+                       touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+               else
+                       touched = READ_ONCE(wq_watchdog_touched);
                pool_ts = READ_ONCE(pool->watchdog_ts);
-               touched = READ_ONCE(wq_watchdog_touched);
 
                if (time_after(pool_ts, touched))
                        ts = pool_ts;
                else
                        ts = touched;
 
-               if (pool->cpu >= 0) {
-                       unsigned long cpu_touched =
-                               READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
-                                                 pool->cpu));
-                       if (time_after(cpu_touched, ts))
-                               ts = cpu_touched;
-               }
-
                /* did we stall? */
-               if (time_after(jiffies, ts + thresh)) {
+               if (time_after(now, ts + thresh)) {
                        lockup_detected = true;
+                       if (pool->cpu >= 0 && !(pool->flags & POOL_BH)) {
+                               pool->cpu_stall = true;
+                               cpu_pool_stall = true;
+                       }
                        pr_emerg("BUG: workqueue lockup - pool");
                        pr_cont_pool_info(pool);
                        pr_cont(" stuck for %us!\n",
-                               jiffies_to_msecs(jiffies - pool_ts) / 1000);
+                               jiffies_to_msecs(now - pool_ts) / 1000);
                }
+
+
        }
 
        rcu_read_unlock();
 
        if (lockup_detected)
-               show_workqueue_state();
+               show_all_workqueues();
+
+       if (cpu_pool_stall)
+               show_cpu_pools_hogs();
 
        wq_watchdog_reset_touched();
        mod_timer(&wq_watchdog_timer, jiffies + thresh);
@@ -5750,8 +7452,8 @@ notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
-       else
-               wq_watchdog_touched = jiffies;
+
+       wq_watchdog_touched = jiffies;
 }
 
 static void wq_watchdog_set_thresh(unsigned long thresh)
@@ -5804,106 +7506,132 @@ static inline void wq_watchdog_init(void) { }
 
 #endif /* CONFIG_WQ_WATCHDOG */
 
-static void __init wq_numa_init(void)
+static void bh_pool_kick_normal(struct irq_work *irq_work)
 {
-       cpumask_var_t *tbl;
-       int node, cpu;
+       raise_softirq_irqoff(TASKLET_SOFTIRQ);
+}
 
-       if (num_possible_nodes() <= 1)
-               return;
+static void bh_pool_kick_highpri(struct irq_work *irq_work)
+{
+       raise_softirq_irqoff(HI_SOFTIRQ);
+}
 
-       if (wq_disable_numa) {
-               pr_info("workqueue: NUMA affinity support disabled\n");
+static void __init restrict_unbound_cpumask(const char *name, const struct cpumask *mask)
+{
+       if (!cpumask_intersects(wq_unbound_cpumask, mask)) {
+               pr_warn("workqueue: Restricting unbound_cpumask (%*pb) with %s (%*pb) leaves no CPU, ignoring\n",
+                       cpumask_pr_args(wq_unbound_cpumask), name, cpumask_pr_args(mask));
                return;
        }
 
-       wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
-       BUG_ON(!wq_update_unbound_numa_attrs_buf);
-
-       /*
-        * We want masks of possible CPUs of each node which isn't readily
-        * available.  Build one from cpu_to_node() which should have been
-        * fully initialized by now.
-        */
-       tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
-       BUG_ON(!tbl);
-
-       for_each_node(node)
-               BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
-                               node_online(node) ? node : NUMA_NO_NODE));
+       cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, mask);
+}
 
-       for_each_possible_cpu(cpu) {
-               node = cpu_to_node(cpu);
-               if (WARN_ON(node == NUMA_NO_NODE)) {
-                       pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
-                       /* happens iff arch is bonkers, let's just proceed */
-                       return;
-               }
-               cpumask_set_cpu(cpu, tbl[node]);
-       }
+static void __init init_cpu_worker_pool(struct worker_pool *pool, int cpu, int nice)
+{
+       BUG_ON(init_worker_pool(pool));
+       pool->cpu = cpu;
+       cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
+       cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
+       pool->attrs->nice = nice;
+       pool->attrs->affn_strict = true;
+       pool->node = cpu_to_node(cpu);
 
-       wq_numa_possible_cpumask = tbl;
-       wq_numa_enabled = true;
+       /* alloc pool ID */
+       mutex_lock(&wq_pool_mutex);
+       BUG_ON(worker_pool_assign_id(pool));
+       mutex_unlock(&wq_pool_mutex);
 }
 
 /**
  * workqueue_init_early - early init for workqueue subsystem
  *
- * This is the first half of two-staged workqueue subsystem initialization
- * and invoked as soon as the bare basics - memory allocation, cpumasks and
- * idr are up.  It sets up all the data structures and system workqueues
- * and allows early boot code to create workqueues and queue/cancel work
- * items.  Actual work item execution starts only after kthreads can be
- * created and scheduled right before early initcalls.
+ * This is the first step of three-staged workqueue subsystem initialization and
+ * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
+ * up. It sets up all the data structures and system workqueues and allows early
+ * boot code to create workqueues and queue/cancel work items. Actual work item
+ * execution starts only after kthreads can be created and scheduled right
+ * before early initcalls.
  */
-int __init workqueue_init_early(void)
+void __init workqueue_init_early(void)
 {
+       struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
-       int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
+       void (*irq_work_fns[2])(struct irq_work *) = { bh_pool_kick_normal,
+                                                      bh_pool_kick_highpri };
        int i, cpu;
 
-       WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
+       BUILD_BUG_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
 
        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
-       cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
+       BUG_ON(!alloc_cpumask_var(&wq_requested_unbound_cpumask, GFP_KERNEL));
+       BUG_ON(!zalloc_cpumask_var(&wq_isolated_cpumask, GFP_KERNEL));
+
+       cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
+       restrict_unbound_cpumask("HK_TYPE_WQ", housekeeping_cpumask(HK_TYPE_WQ));
+       restrict_unbound_cpumask("HK_TYPE_DOMAIN", housekeeping_cpumask(HK_TYPE_DOMAIN));
+       if (!cpumask_empty(&wq_cmdline_cpumask))
+               restrict_unbound_cpumask("workqueue.unbound_cpus", &wq_cmdline_cpumask);
+
+       cpumask_copy(wq_requested_unbound_cpumask, wq_unbound_cpumask);
 
        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
-       /* initialize CPU pools */
+       wq_update_pod_attrs_buf = alloc_workqueue_attrs();
+       BUG_ON(!wq_update_pod_attrs_buf);
+
+       /*
+        * If nohz_full is enabled, set power efficient workqueue as unbound.
+        * This allows workqueue items to be moved to HK CPUs.
+        */
+       if (housekeeping_enabled(HK_TYPE_TICK))
+               wq_power_efficient = true;
+
+       /* initialize WQ_AFFN_SYSTEM pods */
+       pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
+       pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
+       pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
+       BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
+
+       BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
+
+       pt->nr_pods = 1;
+       cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
+       pt->pod_node[0] = NUMA_NO_NODE;
+       pt->cpu_pod[0] = 0;
+
+       /* initialize BH and CPU pools */
        for_each_possible_cpu(cpu) {
                struct worker_pool *pool;
 
                i = 0;
-               for_each_cpu_worker_pool(pool, cpu) {
-                       BUG_ON(init_worker_pool(pool));
-                       pool->cpu = cpu;
-                       cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
-                       pool->attrs->nice = std_nice[i++];
-                       pool->node = cpu_to_node(cpu);
-
-                       /* alloc pool ID */
-                       mutex_lock(&wq_pool_mutex);
-                       BUG_ON(worker_pool_assign_id(pool));
-                       mutex_unlock(&wq_pool_mutex);
+               for_each_bh_worker_pool(pool, cpu) {
+                       init_cpu_worker_pool(pool, cpu, std_nice[i]);
+                       pool->flags |= POOL_BH;
+                       init_irq_work(bh_pool_irq_work(pool), irq_work_fns[i]);
+                       i++;
                }
+
+               i = 0;
+               for_each_cpu_worker_pool(pool, cpu)
+                       init_cpu_worker_pool(pool, cpu, std_nice[i++]);
        }
 
        /* create default unbound and ordered wq attrs */
        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
                struct workqueue_attrs *attrs;
 
-               BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+               BUG_ON(!(attrs = alloc_workqueue_attrs()));
                attrs->nice = std_nice[i];
                unbound_std_wq_attrs[i] = attrs;
 
                /*
                 * An ordered wq should have only one pwq as ordering is
                 * guaranteed by max_active which is enforced by pwqs.
-                * Turn off NUMA so that dfl_pwq is used for all nodes.
                 */
-               BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+               BUG_ON(!(attrs = alloc_workqueue_attrs()));
                attrs->nice = std_nice[i];
-               attrs->no_numa = true;
+               attrs->ordered = true;
                ordered_wq_attrs[i] = attrs;
        }
 
@@ -5911,58 +7639,94 @@ int __init workqueue_init_early(void)
        system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
        system_long_wq = alloc_workqueue("events_long", 0, 0);
        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
-                                           WQ_UNBOUND_MAX_ACTIVE);
+                                           WQ_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
        system_power_efficient_wq = alloc_workqueue("events_power_efficient",
                                              WQ_POWER_EFFICIENT, 0);
-       system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
+       system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient",
                                              WQ_FREEZABLE | WQ_POWER_EFFICIENT,
                                              0);
+       system_bh_wq = alloc_workqueue("events_bh", WQ_BH, 0);
+       system_bh_highpri_wq = alloc_workqueue("events_bh_highpri",
+                                              WQ_BH | WQ_HIGHPRI, 0);
        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
               !system_unbound_wq || !system_freezable_wq ||
               !system_power_efficient_wq ||
-              !system_freezable_power_efficient_wq);
+              !system_freezable_power_efficient_wq ||
+              !system_bh_wq || !system_bh_highpri_wq);
+}
 
-       return 0;
+static void __init wq_cpu_intensive_thresh_init(void)
+{
+       unsigned long thresh;
+       unsigned long bogo;
+
+       pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
+       BUG_ON(IS_ERR(pwq_release_worker));
+
+       /* if the user set it to a specific value, keep it */
+       if (wq_cpu_intensive_thresh_us != ULONG_MAX)
+               return;
+
+       /*
+        * The default of 10ms is derived from the fact that most modern (as of
+        * 2023) processors can do a lot in 10ms and that it's just below what
+        * most consider human-perceivable. However, the kernel also runs on a
+        * lot slower CPUs including microcontrollers where the threshold is way
+        * too low.
+        *
+        * Let's scale up the threshold upto 1 second if BogoMips is below 4000.
+        * This is by no means accurate but it doesn't have to be. The mechanism
+        * is still useful even when the threshold is fully scaled up. Also, as
+        * the reports would usually be applicable to everyone, some machines
+        * operating on longer thresholds won't significantly diminish their
+        * usefulness.
+        */
+       thresh = 10 * USEC_PER_MSEC;
+
+       /* see init/calibrate.c for lpj -> BogoMIPS calculation */
+       bogo = max_t(unsigned long, loops_per_jiffy / 500000 * HZ, 1);
+       if (bogo < 4000)
+               thresh = min_t(unsigned long, thresh * 4000 / bogo, USEC_PER_SEC);
+
+       pr_debug("wq_cpu_intensive_thresh: lpj=%lu BogoMIPS=%lu thresh_us=%lu\n",
+                loops_per_jiffy, bogo, thresh);
+
+       wq_cpu_intensive_thresh_us = thresh;
 }
 
 /**
  * workqueue_init - bring workqueue subsystem fully online
  *
- * This is the latter half of two-staged workqueue subsystem initialization
- * and invoked as soon as kthreads can be created and scheduled.
- * Workqueues have been created and work items queued on them, but there
- * are no kworkers executing the work items yet.  Populate the worker pools
- * with the initial workers and enable future kworker creations.
+ * This is the second step of three-staged workqueue subsystem initialization
+ * and invoked as soon as kthreads can be created and scheduled. Workqueues have
+ * been created and work items queued on them, but there are no kworkers
+ * executing the work items yet. Populate the worker pools with the initial
+ * workers and enable future kworker creations.
  */
-int __init workqueue_init(void)
+void __init workqueue_init(void)
 {
        struct workqueue_struct *wq;
        struct worker_pool *pool;
        int cpu, bkt;
 
-       /*
-        * It'd be simpler to initialize NUMA in workqueue_init_early() but
-        * CPU to node mapping may not be available that early on some
-        * archs such as power and arm64.  As per-cpu pools created
-        * previously could be missing node hint and unbound pools NUMA
-        * affinity, fix them up.
-        *
-        * Also, while iterating workqueues, create rescuers if requested.
-        */
-       wq_numa_init();
+       wq_cpu_intensive_thresh_init();
 
        mutex_lock(&wq_pool_mutex);
 
+       /*
+        * Per-cpu pools created earlier could be missing node hint. Fix them
+        * up. Also, create a rescuer for workqueues that requested it.
+        */
        for_each_possible_cpu(cpu) {
-               for_each_cpu_worker_pool(pool, cpu) {
+               for_each_bh_worker_pool(pool, cpu)
+                       pool->node = cpu_to_node(cpu);
+               for_each_cpu_worker_pool(pool, cpu)
                        pool->node = cpu_to_node(cpu);
-               }
        }
 
        list_for_each_entry(wq, &workqueues, list) {
-               wq_update_unbound_numa(wq, smp_processor_id(), true);
                WARN(init_rescuer(wq),
                     "workqueue: failed to create early rescuer for %s",
                     wq->name);
@@ -5970,7 +7734,16 @@ int __init workqueue_init(void)
 
        mutex_unlock(&wq_pool_mutex);
 
-       /* create the initial workers */
+       /*
+        * Create the initial workers. A BH pool has one pseudo worker that
+        * represents the shared BH execution context and thus doesn't get
+        * affected by hotplug events. Create the BH pseudo workers for all
+        * possible CPUs here.
+        */
+       for_each_possible_cpu(cpu)
+               for_each_bh_worker_pool(pool, cpu)
+                       BUG_ON(!create_worker(pool));
+
        for_each_online_cpu(cpu) {
                for_each_cpu_worker_pool(pool, cpu) {
                        pool->flags &= ~POOL_DISASSOCIATED;
@@ -5983,6 +7756,123 @@ int __init workqueue_init(void)
 
        wq_online = true;
        wq_watchdog_init();
+}
 
-       return 0;
+/*
+ * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
+ * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
+ * and consecutive pod ID. The rest of @pt is initialized accordingly.
+ */
+static void __init init_pod_type(struct wq_pod_type *pt,
+                                bool (*cpus_share_pod)(int, int))
+{
+       int cur, pre, cpu, pod;
+
+       pt->nr_pods = 0;
+
+       /* init @pt->cpu_pod[] according to @cpus_share_pod() */
+       pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
+       BUG_ON(!pt->cpu_pod);
+
+       for_each_possible_cpu(cur) {
+               for_each_possible_cpu(pre) {
+                       if (pre >= cur) {
+                               pt->cpu_pod[cur] = pt->nr_pods++;
+                               break;
+                       }
+                       if (cpus_share_pod(cur, pre)) {
+                               pt->cpu_pod[cur] = pt->cpu_pod[pre];
+                               break;
+                       }
+               }
+       }
+
+       /* init the rest to match @pt->cpu_pod[] */
+       pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
+       pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
+       BUG_ON(!pt->pod_cpus || !pt->pod_node);
+
+       for (pod = 0; pod < pt->nr_pods; pod++)
+               BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
+
+       for_each_possible_cpu(cpu) {
+               cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
+               pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
+       }
+}
+
+static bool __init cpus_dont_share(int cpu0, int cpu1)
+{
+       return false;
+}
+
+static bool __init cpus_share_smt(int cpu0, int cpu1)
+{
+#ifdef CONFIG_SCHED_SMT
+       return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
+#else
+       return false;
+#endif
+}
+
+static bool __init cpus_share_numa(int cpu0, int cpu1)
+{
+       return cpu_to_node(cpu0) == cpu_to_node(cpu1);
+}
+
+/**
+ * workqueue_init_topology - initialize CPU pods for unbound workqueues
+ *
+ * This is the third step of three-staged workqueue subsystem initialization and
+ * invoked after SMP and topology information are fully initialized. It
+ * initializes the unbound CPU pods accordingly.
+ */
+void __init workqueue_init_topology(void)
+{
+       struct workqueue_struct *wq;
+       int cpu;
+
+       init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
+       init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
+       init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+       init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
+
+       wq_topo_initialized = true;
+
+       mutex_lock(&wq_pool_mutex);
+
+       /*
+        * Workqueues allocated earlier would have all CPUs sharing the default
+        * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
+        * combinations to apply per-pod sharing.
+        */
+       list_for_each_entry(wq, &workqueues, list) {
+               for_each_online_cpu(cpu)
+                       wq_update_pod(wq, cpu, cpu, true);
+               if (wq->flags & WQ_UNBOUND) {
+                       mutex_lock(&wq->mutex);
+                       wq_update_node_max_active(wq, -1);
+                       mutex_unlock(&wq->mutex);
+               }
+       }
+
+       mutex_unlock(&wq_pool_mutex);
+}
+
+void __warn_flushing_systemwide_wq(void)
+{
+       pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
+       dump_stack();
+}
+EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
+
+static int __init workqueue_unbound_cpus_setup(char *str)
+{
+       if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
+               cpumask_clear(&wq_cmdline_cpumask);
+               pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
+       }
+
+       return 1;
 }
+__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);