workqueue: make hotplug processing per-pool
[deliverable/linux.git] / kernel / workqueue.c
index fbc6576a83c3e6837df8f9912963aadad2860a06..fd400f8c9514f8d41ddd0fa0c621f3bc2c4213fa 100644 (file)
 #include <linux/debug_locks.h>
 #include <linux/lockdep.h>
 #include <linux/idr.h>
+#include <linux/hashtable.h>
 
-#include "workqueue_sched.h"
+#include "workqueue_internal.h"
 
 enum {
        /*
-        * global_cwq flags
+        * worker_pool flags
         *
-        * A bound gcwq is either associated or disassociated with its CPU.
+        * A bound pool is either associated or disassociated with its CPU.
         * While associated (!DISASSOCIATED), all workers are bound to the
         * CPU and none has %WORKER_UNBOUND set and concurrency management
         * is in effect.
         *
         * While DISASSOCIATED, the cpu may be offline and all workers have
         * %WORKER_UNBOUND set and concurrency management disabled, and may
-        * be executing on any CPU.  The gcwq behaves as an unbound one.
+        * be executing on any CPU.  The pool behaves as an unbound one.
         *
         * Note that DISASSOCIATED can be flipped only while holding
-        * assoc_mutex of all pools on the gcwq to avoid changing binding
-        * state while create_worker() is in progress.
+        * assoc_mutex to avoid changing binding state while
+        * create_worker() is in progress.
         */
-       GCWQ_DISASSOCIATED      = 1 << 0,       /* cpu can't serve workers */
-       GCWQ_FREEZING           = 1 << 1,       /* freeze in progress */
-
-       /* pool flags */
        POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
        POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
+       POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
+       POOL_FREEZING           = 1 << 3,       /* freeze in progress */
 
        /* worker flags */
        WORKER_STARTED          = 1 << 0,       /* started */
@@ -79,11 +78,9 @@ enum {
        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_UNBOUND |
                                  WORKER_CPU_INTENSIVE,
 
-       NR_WORKER_POOLS         = 2,            /* # worker pools per gcwq */
+       NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
 
        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
-       BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
-       BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
 
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
@@ -111,48 +108,25 @@ enum {
  * P: Preemption protected.  Disabling preemption is enough and should
  *    only be modified and accessed from the local cpu.
  *
- * L: gcwq->lock protected.  Access with gcwq->lock held.
+ * L: pool->lock protected.  Access with pool->lock held.
  *
- * X: During normal operation, modification requires gcwq->lock and
- *    should be done only from local cpu.  Either disabling preemption
- *    on local cpu or grabbing gcwq->lock is enough for read access.
- *    If GCWQ_DISASSOCIATED is set, it's identical to L.
+ * X: During normal operation, modification requires pool->lock and should
+ *    be done only from local cpu.  Either disabling preemption on local
+ *    cpu or grabbing pool->lock is enough for read access.  If
+ *    POOL_DISASSOCIATED is set, it's identical to L.
  *
  * F: wq->flush_mutex protected.
  *
  * W: workqueue_lock protected.
  */
 
-struct global_cwq;
-struct worker_pool;
-
-/*
- * The poor guys doing the actual heavy lifting.  All on-duty workers
- * are either serving the manager role, on idle list or on busy hash.
- */
-struct worker {
-       /* on idle list while idle, on busy hash table while busy */
-       union {
-               struct list_head        entry;  /* L: while idle */
-               struct hlist_node       hentry; /* L: while busy */
-       };
-
-       struct work_struct      *current_work;  /* L: work being processed */
-       struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
-       struct list_head        scheduled;      /* L: scheduled works */
-       struct task_struct      *task;          /* I: worker task */
-       struct worker_pool      *pool;          /* I: the associated pool */
-       /* 64 bytes boundary on 64bit, 32 on 32bit */
-       unsigned long           last_active;    /* L: last active timestamp */
-       unsigned int            flags;          /* X: flags */
-       int                     id;             /* I: worker id */
-
-       /* for rebinding worker to CPU */
-       struct work_struct      rebind_work;    /* L: for busy worker */
-};
+/* struct worker is defined in workqueue_internal.h */
 
 struct worker_pool {
        struct global_cwq       *gcwq;          /* I: the owning gcwq */
+       spinlock_t              lock;           /* the pool lock */
+       unsigned int            cpu;            /* I: the associated cpu */
+       int                     id;             /* I: pool ID */
        unsigned int            flags;          /* X: flags */
 
        struct list_head        worklist;       /* L: list of pending works */
@@ -165,7 +139,11 @@ struct worker_pool {
        struct timer_list       idle_timer;     /* L: worker idle timeout */
        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 
-       struct mutex            assoc_mutex;    /* protect GCWQ_DISASSOCIATED */
+       /* workers are chained either in busy_hash or idle_list */
+       DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
+                                               /* L: hash of busy workers */
+
+       struct mutex            assoc_mutex;    /* protect POOL_DISASSOCIATED */
        struct ida              worker_ida;     /* L: for worker IDs */
 };
 
@@ -175,15 +153,7 @@ struct worker_pool {
  * target workqueues.
  */
 struct global_cwq {
-       spinlock_t              lock;           /* the gcwq lock */
-       unsigned int            cpu;            /* I: the associated cpu */
-       unsigned int            flags;          /* L: GCWQ_* flags */
-
-       /* workers are chained either in busy_hash or pool idle_list */
-       struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
-                                               /* L: hash of busy workers */
-
-       struct worker_pool      pools[NR_WORKER_POOLS];
+       struct worker_pool      pools[NR_STD_WORKER_POOLS];
                                                /* normal and highpri pools */
 } ____cacheline_aligned_in_smp;
 
@@ -282,11 +252,10 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
 
 #define for_each_worker_pool(pool, gcwq)                               \
        for ((pool) = &(gcwq)->pools[0];                                \
-            (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
+            (pool) < &(gcwq)->pools[NR_STD_WORKER_POOLS]; (pool)++)
 
-#define for_each_busy_worker(worker, i, pos, gcwq)                     \
-       for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
-               hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
+#define for_each_busy_worker(worker, i, pos, pool)                     \
+       hash_for_each(pool->busy_hash, i, pos, worker, hentry)
 
 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
                                  unsigned int sw)
@@ -464,21 +433,25 @@ static bool workqueue_freezing;           /* W: have wqs started freezing? */
  * try_to_wake_up().  Put it in a separate cacheline.
  */
 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
-static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
+static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_STD_WORKER_POOLS]);
 
 /*
- * Global cpu workqueue and nr_running counter for unbound gcwq.  The
- * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
- * workers have WORKER_UNBOUND set.
+ * Global cpu workqueue and nr_running counter for unbound gcwq.  The pools
+ * for online CPUs have POOL_DISASSOCIATED set, and all their workers have
+ * WORKER_UNBOUND set.
  */
 static struct global_cwq unbound_global_cwq;
-static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
-       [0 ... NR_WORKER_POOLS - 1]     = ATOMIC_INIT(0),       /* always 0 */
+static atomic_t unbound_pool_nr_running[NR_STD_WORKER_POOLS] = {
+       [0 ... NR_STD_WORKER_POOLS - 1] = ATOMIC_INIT(0),       /* always 0 */
 };
 
+/* idr of all pools */
+static DEFINE_MUTEX(worker_pool_idr_mutex);
+static DEFINE_IDR(worker_pool_idr);
+
 static int worker_thread(void *__worker);
 
-static int worker_pool_pri(struct worker_pool *pool)
+static int std_worker_pool_pri(struct worker_pool *pool)
 {
        return pool - pool->gcwq->pools;
 }
@@ -491,10 +464,39 @@ static struct global_cwq *get_gcwq(unsigned int cpu)
                return &unbound_global_cwq;
 }
 
+/* allocate ID and assign it to @pool */
+static int worker_pool_assign_id(struct worker_pool *pool)
+{
+       int ret;
+
+       mutex_lock(&worker_pool_idr_mutex);
+       idr_pre_get(&worker_pool_idr, GFP_KERNEL);
+       ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
+       mutex_unlock(&worker_pool_idr_mutex);
+
+       return ret;
+}
+
+/*
+ * Lookup worker_pool by id.  The idr currently is built during boot and
+ * never modified.  Don't worry about locking for now.
+ */
+static struct worker_pool *worker_pool_by_id(int pool_id)
+{
+       return idr_find(&worker_pool_idr, pool_id);
+}
+
+static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
+{
+       struct global_cwq *gcwq = get_gcwq(cpu);
+
+       return &gcwq->pools[highpri];
+}
+
 static atomic_t *get_pool_nr_running(struct worker_pool *pool)
 {
-       int cpu = pool->gcwq->cpu;
-       int idx = worker_pool_pri(pool);
+       int cpu = pool->cpu;
+       int idx = std_worker_pool_pri(pool);
 
        if (cpu != WORK_CPU_UNBOUND)
                return &per_cpu(pool_nr_running, cpu)[idx];
@@ -532,17 +534,17 @@ static int work_next_color(int color)
 /*
  * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
  * contain the pointer to the queued cwq.  Once execution starts, the flag
- * is cleared and the high bits contain OFFQ flags and CPU number.
+ * is cleared and the high bits contain OFFQ flags and pool ID.
  *
- * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
- * and clear_work_data() can be used to set the cwq, cpu or clear
+ * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
+ * and clear_work_data() can be used to set the cwq, pool or clear
  * work->data.  These functions should only be called while the work is
  * owned - ie. while the PENDING bit is set.
  *
- * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
- * a work.  gcwq is available once the work has been queued anywhere after
- * initialization until it is sync canceled.  cwq is available only while
- * the work item is queued.
+ * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq
+ * corresponding to a work.  Pool is available once the work has been
+ * queued anywhere after initialization until it is sync canceled.  cwq is
+ * available only while the work item is queued.
  *
  * %WORK_OFFQ_CANCELING is used to mark a work item which is being
  * canceled.  While being canceled, a work item may have its PENDING set
@@ -564,8 +566,8 @@ static void set_work_cwq(struct work_struct *work,
                      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 }
 
-static void set_work_cpu_and_clear_pending(struct work_struct *work,
-                                          unsigned int cpu)
+static void set_work_pool_and_clear_pending(struct work_struct *work,
+                                           int pool_id)
 {
        /*
         * The following wmb is paired with the implied mb in
@@ -574,13 +576,13 @@ static void set_work_cpu_and_clear_pending(struct work_struct *work,
         * owner.
         */
        smp_wmb();
-       set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
+       set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 }
 
 static void clear_work_data(struct work_struct *work)
 {
-       smp_wmb();      /* see set_work_cpu_and_clear_pending() */
-       set_work_data(work, WORK_STRUCT_NO_CPU, 0);
+       smp_wmb();      /* see set_work_pool_and_clear_pending() */
+       set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 }
 
 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
@@ -593,30 +595,51 @@ static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
                return NULL;
 }
 
-static struct global_cwq *get_work_gcwq(struct work_struct *work)
+/**
+ * get_work_pool - return the worker_pool a given work was associated with
+ * @work: the work item of interest
+ *
+ * Return the worker_pool @work was last associated with.  %NULL if none.
+ */
+static struct worker_pool *get_work_pool(struct work_struct *work)
 {
        unsigned long data = atomic_long_read(&work->data);
-       unsigned int cpu;
+       struct worker_pool *pool;
+       int pool_id;
 
        if (data & WORK_STRUCT_CWQ)
                return ((struct cpu_workqueue_struct *)
-                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 
-       cpu = data >> WORK_OFFQ_CPU_SHIFT;
-       if (cpu == WORK_CPU_NONE)
+       pool_id = data >> WORK_OFFQ_POOL_SHIFT;
+       if (pool_id == WORK_OFFQ_POOL_NONE)
                return NULL;
 
-       BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
-       return get_gcwq(cpu);
+       pool = worker_pool_by_id(pool_id);
+       WARN_ON_ONCE(!pool);
+       return pool;
+}
+
+/**
+ * get_work_pool_id - return the worker pool ID a given work is associated with
+ * @work: the work item of interest
+ *
+ * Return the worker_pool ID @work was last associated with.
+ * %WORK_OFFQ_POOL_NONE if none.
+ */
+static int get_work_pool_id(struct work_struct *work)
+{
+       struct worker_pool *pool = get_work_pool(work);
+
+       return pool ? pool->id : WORK_OFFQ_POOL_NONE;
 }
 
 static void mark_work_canceling(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_work_gcwq(work);
-       unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
+       unsigned long pool_id = get_work_pool_id(work);
 
-       set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
-                     WORK_STRUCT_PENDING);
+       pool_id <<= WORK_OFFQ_POOL_SHIFT;
+       set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 }
 
 static bool work_is_canceling(struct work_struct *work)
@@ -629,7 +652,7 @@ static bool work_is_canceling(struct work_struct *work)
 /*
  * Policy functions.  These define the policies on how the global worker
  * pools are managed.  Unless noted otherwise, these functions assume that
- * they're being called with gcwq->lock held.
+ * they're being called with pool->lock held.
  */
 
 static bool __need_more_worker(struct worker_pool *pool)
@@ -714,7 +737,7 @@ static struct worker *first_worker(struct worker_pool *pool)
  * Wake up the first idle worker of @pool.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void wake_up_worker(struct worker_pool *pool)
 {
@@ -740,7 +763,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
        struct worker *worker = kthread_data(task);
 
        if (!(worker->flags & WORKER_NOT_RUNNING)) {
-               WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu);
+               WARN_ON_ONCE(worker->pool->cpu != cpu);
                atomic_inc(get_pool_nr_running(worker->pool));
        }
 }
@@ -764,12 +787,20 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
                                       unsigned int cpu)
 {
        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
-       struct worker_pool *pool = worker->pool;
-       atomic_t *nr_running = get_pool_nr_running(pool);
+       struct worker_pool *pool;
+       atomic_t *nr_running;
 
+       /*
+        * Rescuers, which may not have all the fields set up like normal
+        * workers, also reach here, let's not access anything before
+        * checking NOT_RUNNING.
+        */
        if (worker->flags & WORKER_NOT_RUNNING)
                return NULL;
 
+       pool = worker->pool;
+       nr_running = get_pool_nr_running(pool);
+
        /* this can only happen on the local cpu */
        BUG_ON(cpu != raw_smp_processor_id());
 
@@ -781,7 +812,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
         * NOT_RUNNING is clear.  This means that we're bound to and
         * running on the local cpu w/ rq lock held and preemption
         * disabled, which in turn means that none else could be
-        * manipulating idle_list, so dereferencing idle_list without gcwq
+        * manipulating idle_list, so dereferencing idle_list without pool
         * lock is safe.
         */
        if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
@@ -800,7 +831,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
  * woken up.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock)
+ * spin_lock_irq(pool->lock)
  */
 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
                                    bool wakeup)
@@ -837,7 +868,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
  * Clear @flags in @worker->flags and adjust nr_running accordingly.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock)
+ * spin_lock_irq(pool->lock)
  */
 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 {
@@ -859,83 +890,52 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 }
 
 /**
- * busy_worker_head - return the busy hash head for a work
- * @gcwq: gcwq of interest
- * @work: work to be hashed
- *
- * Return hash head of @gcwq for @work.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to the hash head.
- */
-static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
-                                          struct work_struct *work)
-{
-       const int base_shift = ilog2(sizeof(struct work_struct));
-       unsigned long v = (unsigned long)work;
-
-       /* simple shift and fold hash, do we need something better? */
-       v >>= base_shift;
-       v += v >> BUSY_WORKER_HASH_ORDER;
-       v &= BUSY_WORKER_HASH_MASK;
-
-       return &gcwq->busy_hash[v];
-}
-
-/**
- * __find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @bwh: hash head as returned by busy_worker_head()
+ * find_worker_executing_work - find worker which is executing a work
+ * @pool: pool of interest
  * @work: work to find worker for
  *
- * Find a worker which is executing @work on @gcwq.  @bwh should be
- * the hash head obtained by calling busy_worker_head() with the same
- * work.
+ * Find a worker which is executing @work on @pool by searching
+ * @pool->busy_hash which is keyed by the address of @work.  For a worker
+ * to match, its current execution should match the address of @work and
+ * its work function.  This is to avoid unwanted dependency between
+ * unrelated work executions through a work item being recycled while still
+ * being executed.
+ *
+ * This is a bit tricky.  A work item may be freed once its execution
+ * starts and nothing prevents the freed area from being recycled for
+ * another work item.  If the same work item address ends up being reused
+ * before the original execution finishes, workqueue will identify the
+ * recycled work item as currently executing and make it wait until the
+ * current execution finishes, introducing an unwanted dependency.
+ *
+ * This function checks the work item address, work function and workqueue
+ * to avoid false positives.  Note that this isn't complete as one may
+ * construct a work function which can introduce dependency onto itself
+ * through a recycled work item.  Well, if somebody wants to shoot oneself
+ * in the foot that badly, there's only so much we can do, and if such
+ * deadlock actually occurs, it should be easy to locate the culprit work
+ * function.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  *
  * RETURNS:
  * Pointer to worker which is executing @work if found, NULL
  * otherwise.
  */
-static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
-                                                  struct hlist_head *bwh,
-                                                  struct work_struct *work)
+static struct worker *find_worker_executing_work(struct worker_pool *pool,
+                                                struct work_struct *work)
 {
        struct worker *worker;
        struct hlist_node *tmp;
 
-       hlist_for_each_entry(worker, tmp, bwh, hentry)
-               if (worker->current_work == work)
+       hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
+                              (unsigned long)work)
+               if (worker->current_work == work &&
+                   worker->current_func == work->func)
                        return worker;
-       return NULL;
-}
 
-/**
- * find_worker_executing_work - find worker which is executing a work
- * @gcwq: gcwq of interest
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @gcwq.  This function is
- * identical to __find_worker_executing_work() except that this
- * function calculates @bwh itself.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
- * otherwise.
- */
-static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
-                                                struct work_struct *work)
-{
-       return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
-                                           work);
+       return NULL;
 }
 
 /**
@@ -953,7 +953,7 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
  * nested inside outer list_for_each_entry_safe().
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void move_linked_works(struct work_struct *work, struct list_head *head,
                              struct work_struct **nextp)
@@ -1006,7 +1006,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * decrement nr_in_flight of its cwq and handle workqueue flushing.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
 {
@@ -1070,7 +1070,7 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                               unsigned long *flags)
 {
-       struct global_cwq *gcwq;
+       struct worker_pool *pool;
 
        local_irq_save(*flags);
 
@@ -1095,19 +1095,19 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
         * The queueing is in progress, or it is already queued. Try to
         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
         */
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
+       pool = get_work_pool(work);
+       if (!pool)
                goto fail;
 
-       spin_lock(&gcwq->lock);
+       spin_lock(&pool->lock);
        if (!list_empty(&work->entry)) {
                /*
-                * This work is queued, but perhaps we locked the wrong gcwq.
-                * In that case we must see the new value after rmb(), see
-                * insert_work()->wmb().
+                * This work is queued, but perhaps we locked the wrong
+                * pool.  In that case we must see the new value after
+                * rmb(), see insert_work()->wmb().
                 */
                smp_rmb();
-               if (gcwq == get_work_gcwq(work)) {
+               if (pool == get_work_pool(work)) {
                        debug_work_deactivate(work);
 
                        /*
@@ -1125,11 +1125,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                        cwq_dec_nr_in_flight(get_work_cwq(work),
                                get_work_color(work));
 
-                       spin_unlock(&gcwq->lock);
+                       spin_unlock(&pool->lock);
                        return 1;
                }
        }
-       spin_unlock(&gcwq->lock);
+       spin_unlock(&pool->lock);
 fail:
        local_irq_restore(*flags);
        if (work_is_canceling(work))
@@ -1149,7 +1149,7 @@ fail:
  * @extra_flags is or'd to work_struct flags.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void insert_work(struct cpu_workqueue_struct *cwq,
                        struct work_struct *work, struct list_head *head,
@@ -1190,23 +1190,24 @@ static bool is_chained_work(struct workqueue_struct *wq)
        unsigned int cpu;
 
        for_each_gcwq_cpu(cpu) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               struct worker_pool *pool = cwq->pool;
                struct worker *worker;
                struct hlist_node *pos;
                int i;
 
-               spin_lock_irqsave(&gcwq->lock, flags);
-               for_each_busy_worker(worker, i, pos, gcwq) {
+               spin_lock_irqsave(&pool->lock, flags);
+               for_each_busy_worker(worker, i, pos, pool) {
                        if (worker->task != current)
                                continue;
-                       spin_unlock_irqrestore(&gcwq->lock, flags);
+                       spin_unlock_irqrestore(&pool->lock, flags);
                        /*
                         * I'm @worker, no locking necessary.  See if @work
                         * is headed to the same workqueue.
                         */
                        return worker->current_cwq->wq == wq;
                }
-               spin_unlock_irqrestore(&gcwq->lock, flags);
+               spin_unlock_irqrestore(&pool->lock, flags);
        }
        return false;
 }
@@ -1214,7 +1215,8 @@ static bool is_chained_work(struct workqueue_struct *wq)
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
-       struct global_cwq *gcwq;
+       bool highpri = wq->flags & WQ_HIGHPRI;
+       struct worker_pool *pool;
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
        unsigned int work_flags;
@@ -1235,9 +1237,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
            WARN_ON_ONCE(!is_chained_work(wq)))
                return;
 
-       /* determine gcwq to use */
+       /* determine pool to use */
        if (!(wq->flags & WQ_UNBOUND)) {
-               struct global_cwq *last_gcwq;
+               struct worker_pool *last_pool;
 
                if (cpu == WORK_CPU_UNBOUND)
                        cpu = raw_smp_processor_id();
@@ -1248,37 +1250,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                 * work needs to be queued on that cpu to guarantee
                 * non-reentrancy.
                 */
-               gcwq = get_gcwq(cpu);
-               last_gcwq = get_work_gcwq(work);
+               pool = get_std_worker_pool(cpu, highpri);
+               last_pool = get_work_pool(work);
 
-               if (last_gcwq && last_gcwq != gcwq) {
+               if (last_pool && last_pool != pool) {
                        struct worker *worker;
 
-                       spin_lock(&last_gcwq->lock);
+                       spin_lock(&last_pool->lock);
 
-                       worker = find_worker_executing_work(last_gcwq, work);
+                       worker = find_worker_executing_work(last_pool, work);
 
                        if (worker && worker->current_cwq->wq == wq)
-                               gcwq = last_gcwq;
+                               pool = last_pool;
                        else {
                                /* meh... not running there, queue here */
-                               spin_unlock(&last_gcwq->lock);
-                               spin_lock(&gcwq->lock);
+                               spin_unlock(&last_pool->lock);
+                               spin_lock(&pool->lock);
                        }
                } else {
-                       spin_lock(&gcwq->lock);
+                       spin_lock(&pool->lock);
                }
        } else {
-               gcwq = get_gcwq(WORK_CPU_UNBOUND);
-               spin_lock(&gcwq->lock);
+               pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri);
+               spin_lock(&pool->lock);
        }
 
-       /* gcwq determined, get cwq and queue */
-       cwq = get_cwq(gcwq->cpu, wq);
+       /* pool determined, get cwq and queue */
+       cwq = get_cwq(pool->cpu, wq);
        trace_workqueue_queue_work(req_cpu, cwq, work);
 
        if (WARN_ON(!list_empty(&work->entry))) {
-               spin_unlock(&gcwq->lock);
+               spin_unlock(&pool->lock);
                return;
        }
 
@@ -1296,7 +1298,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        insert_work(cwq, work, worklist, work_flags);
 
-       spin_unlock(&gcwq->lock);
+       spin_unlock(&pool->lock);
 }
 
 /**
@@ -1381,20 +1383,20 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
 
        /*
         * This stores cwq for the moment, for the timer_fn.  Note that the
-        * work's gcwq is preserved to allow reentrance detection for
+        * work's pool is preserved to allow reentrance detection for
         * delayed works.
         */
        if (!(wq->flags & WQ_UNBOUND)) {
-               struct global_cwq *gcwq = get_work_gcwq(work);
+               struct worker_pool *pool = get_work_pool(work);
 
                /*
-                * If we cannot get the last gcwq from @work directly,
+                * If we cannot get the last pool from @work directly,
                 * select the last CPU such that it avoids unnecessarily
                 * triggering non-reentrancy check in __queue_work().
                 */
                lcpu = cpu;
-               if (gcwq)
-                       lcpu = gcwq->cpu;
+               if (pool)
+                       lcpu = pool->cpu;
                if (lcpu == WORK_CPU_UNBOUND)
                        lcpu = raw_smp_processor_id();
        } else {
@@ -1519,12 +1521,11 @@ EXPORT_SYMBOL_GPL(mod_delayed_work);
  * necessary.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void worker_enter_idle(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
        BUG_ON(worker->flags & WORKER_IDLE);
        BUG_ON(!list_empty(&worker->entry) &&
@@ -1543,11 +1544,11 @@ static void worker_enter_idle(struct worker *worker)
 
        /*
         * Sanity check nr_running.  Because gcwq_unbind_fn() releases
-        * gcwq->lock between setting %WORKER_UNBOUND and zapping
+        * pool->lock between setting %WORKER_UNBOUND and zapping
         * nr_running, the warning may trigger spuriously.  Check iff
         * unbind is not in progress.
         */
-       WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
+       WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                     pool->nr_workers == pool->nr_idle &&
                     atomic_read(get_pool_nr_running(pool)));
 }
@@ -1559,7 +1560,7 @@ static void worker_enter_idle(struct worker *worker)
  * @worker is leaving idle state.  Update stats.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void worker_leave_idle(struct worker *worker)
 {
@@ -1588,13 +1589,13 @@ static void worker_leave_idle(struct worker *worker)
  * [dis]associated in the meantime.
  *
  * This function tries set_cpus_allowed() and locks gcwq and verifies the
- * binding against %GCWQ_DISASSOCIATED which is set during
+ * binding against %POOL_DISASSOCIATED which is set during
  * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
  * enters idle state or fetches works without dropping lock, it can
  * guarantee the scheduling requirement described in the first paragraph.
  *
  * CONTEXT:
- * Might sleep.  Called without any lock but returns with gcwq->lock
+ * Might sleep.  Called without any lock but returns with pool->lock
  * held.
  *
  * RETURNS:
@@ -1602,9 +1603,9 @@ static void worker_leave_idle(struct worker *worker)
  * bound), %false if offline.
  */
 static bool worker_maybe_bind_and_lock(struct worker *worker)
-__acquires(&gcwq->lock)
+__acquires(&pool->lock)
 {
-       struct global_cwq *gcwq = worker->pool->gcwq;
+       struct worker_pool *pool = worker->pool;
        struct task_struct *task = worker->task;
 
        while (true) {
@@ -1612,19 +1613,19 @@ __acquires(&gcwq->lock)
                 * The following call may fail, succeed or succeed
                 * without actually migrating the task to the cpu if
                 * it races with cpu hotunplug operation.  Verify
-                * against GCWQ_DISASSOCIATED.
+                * against POOL_DISASSOCIATED.
                 */
-               if (!(gcwq->flags & GCWQ_DISASSOCIATED))
-                       set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
+               if (!(pool->flags & POOL_DISASSOCIATED))
+                       set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu));
 
-               spin_lock_irq(&gcwq->lock);
-               if (gcwq->flags & GCWQ_DISASSOCIATED)
+               spin_lock_irq(&pool->lock);
+               if (pool->flags & POOL_DISASSOCIATED)
                        return false;
-               if (task_cpu(task) == gcwq->cpu &&
+               if (task_cpu(task) == pool->cpu &&
                    cpumask_equal(&current->cpus_allowed,
-                                 get_cpu_mask(gcwq->cpu)))
+                                 get_cpu_mask(pool->cpu)))
                        return true;
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
 
                /*
                 * We've raced with CPU hot[un]plug.  Give it a breather
@@ -1643,15 +1644,13 @@ __acquires(&gcwq->lock)
  */
 static void idle_worker_rebind(struct worker *worker)
 {
-       struct global_cwq *gcwq = worker->pool->gcwq;
-
        /* CPU may go down again inbetween, clear UNBOUND only on success */
        if (worker_maybe_bind_and_lock(worker))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
        /* rebind complete, become available again */
        list_add(&worker->entry, &worker->pool->idle_list);
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&worker->pool->lock);
 }
 
 /*
@@ -1663,19 +1662,18 @@ static void idle_worker_rebind(struct worker *worker)
 static void busy_worker_rebind_fn(struct work_struct *work)
 {
        struct worker *worker = container_of(work, struct worker, rebind_work);
-       struct global_cwq *gcwq = worker->pool->gcwq;
 
        if (worker_maybe_bind_and_lock(worker))
                worker_clr_flags(worker, WORKER_UNBOUND);
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&worker->pool->lock);
 }
 
 /**
- * rebind_workers - rebind all workers of a gcwq to the associated CPU
- * @gcwq: gcwq of interest
+ * rebind_workers - rebind all workers of a pool to the associated CPU
+ * @pool: pool of interest
  *
- * @gcwq->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
+ * @pool->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
  * is different for idle and busy ones.
  *
  * Idle ones will be removed from the idle_list and woken up.  They will
@@ -1693,38 +1691,32 @@ static void busy_worker_rebind_fn(struct work_struct *work)
  * including the manager will not appear on @idle_list until rebind is
  * complete, making local wake-ups safe.
  */
-static void rebind_workers(struct global_cwq *gcwq)
+static void rebind_workers(struct worker_pool *pool)
 {
-       struct worker_pool *pool;
        struct worker *worker, *n;
        struct hlist_node *pos;
        int i;
 
-       lockdep_assert_held(&gcwq->lock);
-
-       for_each_worker_pool(pool, gcwq)
-               lockdep_assert_held(&pool->assoc_mutex);
+       lockdep_assert_held(&pool->assoc_mutex);
+       lockdep_assert_held(&pool->lock);
 
        /* dequeue and kick idle ones */
-       for_each_worker_pool(pool, gcwq) {
-               list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
-                       /*
-                        * idle workers should be off @pool->idle_list
-                        * until rebind is complete to avoid receiving
-                        * premature local wake-ups.
-                        */
-                       list_del_init(&worker->entry);
+       list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
+               /*
+                * idle workers should be off @pool->idle_list until rebind
+                * is complete to avoid receiving premature local wake-ups.
+                */
+               list_del_init(&worker->entry);
 
-                       /*
-                        * worker_thread() will see the above dequeuing
-                        * and call idle_worker_rebind().
-                        */
-                       wake_up_process(worker->task);
-               }
+               /*
+                * worker_thread() will see the above dequeuing and call
+                * idle_worker_rebind().
+                */
+               wake_up_process(worker->task);
        }
 
        /* rebind busy workers */
-       for_each_busy_worker(worker, i, pos, gcwq) {
+       for_each_busy_worker(worker, i, pos, pool) {
                struct work_struct *rebind_work = &worker->rebind_work;
                struct workqueue_struct *wq;
 
@@ -1738,14 +1730,14 @@ static void rebind_workers(struct global_cwq *gcwq)
                 * wq doesn't really matter but let's keep @worker->pool
                 * and @cwq->pool consistent for sanity.
                 */
-               if (worker_pool_pri(worker->pool))
+               if (std_worker_pool_pri(worker->pool))
                        wq = system_highpri_wq;
                else
                        wq = system_wq;
 
-               insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
-                       worker->scheduled.next,
-                       work_color_to_flags(WORK_NO_COLOR));
+               insert_work(get_cwq(pool->cpu, wq), rebind_work,
+                           worker->scheduled.next,
+                           work_color_to_flags(WORK_NO_COLOR));
        }
 }
 
@@ -1780,19 +1772,18 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct worker_pool *pool)
 {
-       struct global_cwq *gcwq = pool->gcwq;
-       const char *pri = worker_pool_pri(pool) ? "H" : "";
+       const char *pri = std_worker_pool_pri(pool) ? "H" : "";
        struct worker *worker = NULL;
        int id = -1;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        while (ida_get_new(&pool->worker_ida, &id)) {
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
                if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
                        goto fail;
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
        }
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        worker = alloc_worker();
        if (!worker)
@@ -1801,30 +1792,30 @@ static struct worker *create_worker(struct worker_pool *pool)
        worker->pool = pool;
        worker->id = id;
 
-       if (gcwq->cpu != WORK_CPU_UNBOUND)
+       if (pool->cpu != WORK_CPU_UNBOUND)
                worker->task = kthread_create_on_node(worker_thread,
-                                       worker, cpu_to_node(gcwq->cpu),
-                                       "kworker/%u:%d%s", gcwq->cpu, id, pri);
+                                       worker, cpu_to_node(pool->cpu),
+                                       "kworker/%u:%d%s", pool->cpu, id, pri);
        else
                worker->task = kthread_create(worker_thread, worker,
                                              "kworker/u:%d%s", id, pri);
        if (IS_ERR(worker->task))
                goto fail;
 
-       if (worker_pool_pri(pool))
+       if (std_worker_pool_pri(pool))
                set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
 
        /*
         * Determine CPU binding of the new worker depending on
-        * %GCWQ_DISASSOCIATED.  The caller is responsible for ensuring the
+        * %POOL_DISASSOCIATED.  The caller is responsible for ensuring the
         * flag remains stable across this function.  See the comments
         * above the flag definition for details.
         *
         * As an unbound worker may later become a regular one if CPU comes
         * online, make sure every worker has %PF_THREAD_BOUND set.
         */
-       if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
-               kthread_bind(worker->task, gcwq->cpu);
+       if (!(pool->flags & POOL_DISASSOCIATED)) {
+               kthread_bind(worker->task, pool->cpu);
        } else {
                worker->task->flags |= PF_THREAD_BOUND;
                worker->flags |= WORKER_UNBOUND;
@@ -1833,9 +1824,9 @@ static struct worker *create_worker(struct worker_pool *pool)
        return worker;
 fail:
        if (id >= 0) {
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
                ida_remove(&pool->worker_ida, id);
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
        kfree(worker);
        return NULL;
@@ -1848,7 +1839,7 @@ fail:
  * Make the gcwq aware of @worker and start it.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void start_worker(struct worker *worker)
 {
@@ -1865,12 +1856,11 @@ static void start_worker(struct worker *worker)
  * Destroy @worker and adjust @gcwq stats accordingly.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which is released and regrabbed.
+ * spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void destroy_worker(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
        int id = worker->id;
 
        /* sanity check frenzy */
@@ -1885,21 +1875,20 @@ static void destroy_worker(struct worker *worker)
        list_del_init(&worker->entry);
        worker->flags |= WORKER_DIE;
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        kthread_stop(worker->task);
        kfree(worker);
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        ida_remove(&pool->worker_ida, id);
 }
 
 static void idle_worker_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        if (too_many_workers(pool)) {
                struct worker *worker;
@@ -1918,7 +1907,7 @@ static void idle_worker_timeout(unsigned long __pool)
                }
        }
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 }
 
 static bool send_mayday(struct work_struct *work)
@@ -1931,7 +1920,7 @@ static bool send_mayday(struct work_struct *work)
                return false;
 
        /* mayday mayday mayday */
-       cpu = cwq->pool->gcwq->cpu;
+       cpu = cwq->pool->cpu;
        /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
        if (cpu == WORK_CPU_UNBOUND)
                cpu = 0;
@@ -1943,10 +1932,9 @@ static bool send_mayday(struct work_struct *work)
 static void gcwq_mayday_timeout(unsigned long __pool)
 {
        struct worker_pool *pool = (void *)__pool;
-       struct global_cwq *gcwq = pool->gcwq;
        struct work_struct *work;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        if (need_to_create_worker(pool)) {
                /*
@@ -1959,7 +1947,7 @@ static void gcwq_mayday_timeout(unsigned long __pool)
                        send_mayday(work);
        }
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
 }
@@ -1978,24 +1966,22 @@ static void gcwq_mayday_timeout(unsigned long __pool)
  * may_start_working() true.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true
+ * false if no action was taken and pool->lock stayed locked, true
  * otherwise.
  */
 static bool maybe_create_worker(struct worker_pool *pool)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
+__releases(&pool->lock)
+__acquires(&pool->lock)
 {
-       struct global_cwq *gcwq = pool->gcwq;
-
        if (!need_to_create_worker(pool))
                return false;
 restart:
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
@@ -2006,7 +1992,7 @@ restart:
                worker = create_worker(pool);
                if (worker) {
                        del_timer_sync(&pool->mayday_timer);
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
                        BUG_ON(need_to_create_worker(pool));
                        return true;
@@ -2023,7 +2009,7 @@ restart:
        }
 
        del_timer_sync(&pool->mayday_timer);
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        if (need_to_create_worker(pool))
                goto restart;
        return true;
@@ -2037,11 +2023,11 @@ restart:
  * IDLE_WORKER_TIMEOUT.
  *
  * LOCKING:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Called only from manager.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true
+ * false if no action was taken and pool->lock stayed locked, true
  * otherwise.
  */
 static bool maybe_destroy_workers(struct worker_pool *pool)
@@ -2080,12 +2066,12 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * and may_start_working() is true.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * RETURNS:
- * false if no action was taken and gcwq->lock stayed locked, true if
- * some action was taken.
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
+ * multiple times.  Does GFP_KERNEL allocations.
  */
 static bool manage_workers(struct worker *worker)
 {
@@ -2107,10 +2093,10 @@ static bool manage_workers(struct worker *worker)
         * manager against CPU hotplug.
         *
         * assoc_mutex would always be free unless CPU hotplug is in
-        * progress.  trylock first without dropping @gcwq->lock.
+        * progress.  trylock first without dropping @pool->lock.
         */
        if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
-               spin_unlock_irq(&pool->gcwq->lock);
+               spin_unlock_irq(&pool->lock);
                mutex_lock(&pool->assoc_mutex);
                /*
                 * CPU hotplug could have happened while we were waiting
@@ -2157,18 +2143,15 @@ static bool manage_workers(struct worker *worker)
  * call this function to process a work.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which is released and regrabbed.
+ * spin_lock_irq(pool->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
+__releases(&pool->lock)
+__acquires(&pool->lock)
 {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
-       struct hlist_head *bwh = busy_worker_head(gcwq, work);
        bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
-       work_func_t f = work->func;
        int work_color;
        struct worker *collision;
 #ifdef CONFIG_LOCKDEP
@@ -2186,11 +2169,11 @@ __acquires(&gcwq->lock)
        /*
         * Ensure we're on the correct CPU.  DISASSOCIATED test is
         * necessary to avoid spurious warnings from rescuers servicing the
-        * unbound or a disassociated gcwq.
+        * unbound or a disassociated pool.
         */
        WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
-                    !(gcwq->flags & GCWQ_DISASSOCIATED) &&
-                    raw_smp_processor_id() != gcwq->cpu);
+                    !(pool->flags & POOL_DISASSOCIATED) &&
+                    raw_smp_processor_id() != pool->cpu);
 
        /*
         * A single work shouldn't be executed concurrently by
@@ -2198,7 +2181,7 @@ __acquires(&gcwq->lock)
         * already processing the work.  If so, defer the work to the
         * currently executing one.
         */
-       collision = __find_worker_executing_work(gcwq, bwh, work);
+       collision = find_worker_executing_work(pool, work);
        if (unlikely(collision)) {
                move_linked_works(work, &collision->scheduled, NULL);
                return;
@@ -2206,8 +2189,9 @@ __acquires(&gcwq->lock)
 
        /* claim and dequeue */
        debug_work_deactivate(work);
-       hlist_add_head(&worker->hentry, bwh);
+       hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
        worker->current_work = work;
+       worker->current_func = work->func;
        worker->current_cwq = cwq;
        work_color = get_work_color(work);
 
@@ -2221,26 +2205,26 @@ __acquires(&gcwq->lock)
                worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
 
        /*
-        * Unbound gcwq isn't concurrency managed and work items should be
+        * Unbound pool isn't concurrency managed and work items should be
         * executed ASAP.  Wake up another worker if necessary.
         */
        if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
                wake_up_worker(pool);
 
        /*
-        * Record the last CPU and clear PENDING which should be the last
-        * update to @work.  Also, do this inside @gcwq->lock so that
+        * Record the last pool and clear PENDING which should be the last
+        * update to @work.  Also, do this inside @pool->lock so that
         * PENDING and queued state changes happen together while IRQ is
         * disabled.
         */
-       set_work_cpu_and_clear_pending(work, gcwq->cpu);
+       set_work_pool_and_clear_pending(work, pool->id);
 
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
-       f(work);
+       worker->current_func(work);
        /*
         * While we must be careful to not use "work" after this, the trace
         * point will only record its address.
@@ -2252,20 +2236,22 @@ __acquires(&gcwq->lock)
        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
                       "     last function: %pf\n",
-                      current->comm, preempt_count(), task_pid_nr(current), f);
+                      current->comm, preempt_count(), task_pid_nr(current),
+                      worker->current_func);
                debug_show_held_locks(current);
                dump_stack();
        }
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        /* clear cpu intensive status */
        if (unlikely(cpu_intensive))
                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
 
        /* we're done with it, release */
-       hlist_del_init(&worker->hentry);
+       hash_del(&worker->hentry);
        worker->current_work = NULL;
+       worker->current_func = NULL;
        worker->current_cwq = NULL;
        cwq_dec_nr_in_flight(cwq, work_color);
 }
@@ -2279,7 +2265,7 @@ __acquires(&gcwq->lock)
  * fetches a work from the top and executes it.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
+ * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.
  */
 static void process_scheduled_works(struct worker *worker)
@@ -2305,16 +2291,15 @@ static int worker_thread(void *__worker)
 {
        struct worker *worker = __worker;
        struct worker_pool *pool = worker->pool;
-       struct global_cwq *gcwq = pool->gcwq;
 
        /* tell the scheduler that this is a workqueue worker */
        worker->task->flags |= PF_WQ_WORKER;
 woke_up:
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
 
        /* we are off idle list if destruction or rebind is requested */
        if (unlikely(list_empty(&worker->entry))) {
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
 
                /* if DIE is set, destruction is requested */
                if (worker->flags & WORKER_DIE) {
@@ -2373,22 +2358,22 @@ sleep:
                goto recheck;
 
        /*
-        * gcwq->lock is held and there's no work to process and no
-        * need to manage, sleep.  Workers are woken up only while
-        * holding gcwq->lock or from local cpu, so setting the
-        * current state before releasing gcwq->lock is enough to
-        * prevent losing any event.
+        * pool->lock is held and there's no work to process and no need to
+        * manage, sleep.  Workers are woken up only while holding
+        * pool->lock or from local cpu, so setting the current state
+        * before releasing pool->lock is enough to prevent losing any
+        * event.
         */
        worker_enter_idle(worker);
        __set_current_state(TASK_INTERRUPTIBLE);
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
        schedule();
        goto woke_up;
 }
 
 /**
  * rescuer_thread - the rescuer thread function
- * @__wq: the associated workqueue
+ * @__rescuer: self
  *
  * Workqueue rescuer thread function.  There's one rescuer for each
  * workqueue which has WQ_RESCUER set.
@@ -2405,20 +2390,27 @@ sleep:
  *
  * This should happen rarely.
  */
-static int rescuer_thread(void *__wq)
+static int rescuer_thread(void *__rescuer)
 {
-       struct workqueue_struct *wq = __wq;
-       struct worker *rescuer = wq->rescuer;
+       struct worker *rescuer = __rescuer;
+       struct workqueue_struct *wq = rescuer->rescue_wq;
        struct list_head *scheduled = &rescuer->scheduled;
        bool is_unbound = wq->flags & WQ_UNBOUND;
        unsigned int cpu;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
+
+       /*
+        * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
+        * doesn't participate in concurrency management.
+        */
+       rescuer->task->flags |= PF_WQ_WORKER;
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);
 
        if (kthread_should_stop()) {
                __set_current_state(TASK_RUNNING);
+               rescuer->task->flags &= ~PF_WQ_WORKER;
                return 0;
        }
 
@@ -2430,7 +2422,6 @@ repeat:
                unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
                struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
                struct worker_pool *pool = cwq->pool;
-               struct global_cwq *gcwq = pool->gcwq;
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
@@ -2452,16 +2443,18 @@ repeat:
                process_scheduled_works(rescuer);
 
                /*
-                * Leave this gcwq.  If keep_working() is %true, notify a
+                * Leave this pool.  If keep_working() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
                 * and stalling the execution.
                 */
                if (keep_working(pool))
                        wake_up_worker(pool);
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
 
+       /* rescuers should never participate in concurrency management */
+       WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
        schedule();
        goto repeat;
 }
@@ -2499,7 +2492,7 @@ static void wq_barrier_func(struct work_struct *work)
  * underneath us, so we can't reliably determine cwq from @target.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
                              struct wq_barrier *barr,
@@ -2509,7 +2502,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
        unsigned int linked = 0;
 
        /*
-        * debugobject calls are safe here even with gcwq->lock locked
+        * debugobject calls are safe here even with pool->lock locked
         * as we know for sure that this will not trigger any of the
         * checks and call back into the fixup functions where we
         * might deadlock.
@@ -2582,9 +2575,9 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
 
        for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-               struct global_cwq *gcwq = cwq->pool->gcwq;
+               struct worker_pool *pool = cwq->pool;
 
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
 
                if (flush_color >= 0) {
                        BUG_ON(cwq->flush_color != -1);
@@ -2601,7 +2594,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
                        cwq->work_color = work_color;
                }
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
 
        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
@@ -2798,9 +2791,9 @@ reflush:
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                bool drained;
 
-               spin_lock_irq(&cwq->pool->gcwq->lock);
+               spin_lock_irq(&cwq->pool->lock);
                drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
-               spin_unlock_irq(&cwq->pool->gcwq->lock);
+               spin_unlock_irq(&cwq->pool->lock);
 
                if (drained)
                        continue;
@@ -2822,34 +2815,34 @@ EXPORT_SYMBOL_GPL(drain_workqueue);
 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 {
        struct worker *worker = NULL;
-       struct global_cwq *gcwq;
+       struct worker_pool *pool;
        struct cpu_workqueue_struct *cwq;
 
        might_sleep();
-       gcwq = get_work_gcwq(work);
-       if (!gcwq)
+       pool = get_work_pool(work);
+       if (!pool)
                return false;
 
-       spin_lock_irq(&gcwq->lock);
+       spin_lock_irq(&pool->lock);
        if (!list_empty(&work->entry)) {
                /*
                 * See the comment near try_to_grab_pending()->smp_rmb().
-                * If it was re-queued to a different gcwq under us, we
+                * If it was re-queued to a different pool under us, we
                 * are not going to wait.
                 */
                smp_rmb();
                cwq = get_work_cwq(work);
-               if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
+               if (unlikely(!cwq || pool != cwq->pool))
                        goto already_gone;
        } else {
-               worker = find_worker_executing_work(gcwq, work);
+               worker = find_worker_executing_work(pool, work);
                if (!worker)
                        goto already_gone;
                cwq = worker->current_cwq;
        }
 
        insert_wq_barrier(cwq, barr, work, worker);
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
 
        /*
         * If @max_active is 1 or rescuer is in use, flushing another work
@@ -2865,7 +2858,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
 
        return true;
 already_gone:
-       spin_unlock_irq(&gcwq->lock);
+       spin_unlock_irq(&pool->lock);
        return false;
 }
 
@@ -2992,7 +2985,8 @@ bool cancel_delayed_work(struct delayed_work *dwork)
        if (unlikely(ret < 0))
                return false;
 
-       set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
+       set_work_pool_and_clear_pending(&dwork->work,
+                                       get_work_pool_id(&dwork->work));
        local_irq_restore(flags);
        return ret;
 }
@@ -3297,7 +3291,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                if (!rescuer)
                        goto err;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+               rescuer->rescue_wq = wq;
+               rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
                                               wq->name);
                if (IS_ERR(rescuer->task))
                        goto err;
@@ -3385,7 +3380,7 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
  * increased.
  *
  * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+ * spin_lock_irq(pool->lock).
  */
 static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
 {
@@ -3417,15 +3412,16 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
        wq->saved_max_active = max_active;
 
        for_each_cwq_cpu(cpu, wq) {
-               struct global_cwq *gcwq = get_gcwq(cpu);
+               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               struct worker_pool *pool = cwq->pool;
 
-               spin_lock_irq(&gcwq->lock);
+               spin_lock_irq(&pool->lock);
 
                if (!(wq->flags & WQ_FREEZABLE) ||
-                   !(gcwq->flags & GCWQ_FREEZING))
-                       cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
+                   !(pool->flags & POOL_FREEZING))
+                       cwq_set_max_active(cwq, max_active);
 
-               spin_unlock_irq(&gcwq->lock);
+               spin_unlock_irq(&pool->lock);
        }
 
        spin_unlock(&workqueue_lock);
@@ -3452,21 +3448,6 @@ bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
 }
 EXPORT_SYMBOL_GPL(workqueue_congested);
 
-/**
- * work_cpu - return the last known associated cpu for @work
- * @work: the work of interest
- *
- * RETURNS:
- * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
- */
-unsigned int work_cpu(struct work_struct *work)
-{
-       struct global_cwq *gcwq = get_work_gcwq(work);
-
-       return gcwq ? gcwq->cpu : WORK_CPU_NONE;
-}
-EXPORT_SYMBOL_GPL(work_cpu);
-
 /**
  * work_busy - test whether a work is currently pending or running
  * @work: the work to be tested
@@ -3482,21 +3463,21 @@ EXPORT_SYMBOL_GPL(work_cpu);
  */
 unsigned int work_busy(struct work_struct *work)
 {
-       struct global_cwq *gcwq = get_work_gcwq(work);
+       struct worker_pool *pool = get_work_pool(work);
        unsigned long flags;
        unsigned int ret = 0;
 
-       if (!gcwq)
+       if (!pool)
                return 0;
 
-       spin_lock_irqsave(&gcwq->lock, flags);
+       spin_lock_irqsave(&pool->lock, flags);
 
        if (work_pending(work))
                ret |= WORK_BUSY_PENDING;
-       if (find_worker_executing_work(gcwq, work))
+       if (find_worker_executing_work(pool, work))
                ret |= WORK_BUSY_RUNNING;
 
-       spin_unlock_irqrestore(&gcwq->lock, flags);
+       spin_unlock_irqrestore(&pool->lock, flags);
 
        return ret;
 }
@@ -3509,34 +3490,14 @@ EXPORT_SYMBOL_GPL(work_busy);
  * are a lot of assumptions on strong associations among work, cwq and
  * gcwq which make migrating pending and scheduled works very
  * difficult to implement without impacting hot paths.  Secondly,
- * gcwqs serve mix of short, long and very long running works making
+ * worker pools serve mix of short, long and very long running works making
  * blocked draining impractical.
  *
- * This is solved by allowing a gcwq to be disassociated from the CPU
+ * This is solved by allowing the pools to be disassociated from the CPU
  * running as an unbound one and allowing it to be reattached later if the
  * cpu comes back online.
  */
 
-/* claim manager positions of all pools */
-static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
-{
-       struct worker_pool *pool;
-
-       for_each_worker_pool(pool, gcwq)
-               mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
-       spin_lock_irq(&gcwq->lock);
-}
-
-/* release manager positions */
-static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
-{
-       struct worker_pool *pool;
-
-       spin_unlock_irq(&gcwq->lock);
-       for_each_worker_pool(pool, gcwq)
-               mutex_unlock(&pool->assoc_mutex);
-}
-
 static void gcwq_unbind_fn(struct work_struct *work)
 {
        struct global_cwq *gcwq = get_gcwq(smp_processor_id());
@@ -3545,26 +3506,30 @@ static void gcwq_unbind_fn(struct work_struct *work)
        struct hlist_node *pos;
        int i;
 
-       BUG_ON(gcwq->cpu != smp_processor_id());
+       for_each_worker_pool(pool, gcwq) {
+               BUG_ON(pool->cpu != smp_processor_id());
 
-       gcwq_claim_assoc_and_lock(gcwq);
+               mutex_lock(&pool->assoc_mutex);
+               spin_lock_irq(&pool->lock);
 
-       /*
-        * We've claimed all manager positions.  Make all workers unbound
-        * and set DISASSOCIATED.  Before this, all workers except for the
-        * ones which are still executing works from before the last CPU
-        * down must be on the cpu.  After this, they may become diasporas.
-        */
-       for_each_worker_pool(pool, gcwq)
+               /*
+                * We've claimed all manager positions.  Make all workers
+                * unbound and set DISASSOCIATED.  Before this, all workers
+                * except for the ones which are still executing works from
+                * before the last CPU down must be on the cpu.  After
+                * this, they may become diasporas.
+                */
                list_for_each_entry(worker, &pool->idle_list, entry)
                        worker->flags |= WORKER_UNBOUND;
 
-       for_each_busy_worker(worker, i, pos, gcwq)
-               worker->flags |= WORKER_UNBOUND;
+               for_each_busy_worker(worker, i, pos, pool)
+                       worker->flags |= WORKER_UNBOUND;
 
-       gcwq->flags |= GCWQ_DISASSOCIATED;
+               pool->flags |= POOL_DISASSOCIATED;
 
-       gcwq_release_assoc_and_unlock(gcwq);
+               spin_unlock_irq(&pool->lock);
+               mutex_unlock(&pool->assoc_mutex);
+       }
 
        /*
         * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3612,18 +3577,24 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
                        if (!worker)
                                return NOTIFY_BAD;
 
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
-                       spin_unlock_irq(&gcwq->lock);
+                       spin_unlock_irq(&pool->lock);
                }
                break;
 
        case CPU_DOWN_FAILED:
        case CPU_ONLINE:
-               gcwq_claim_assoc_and_lock(gcwq);
-               gcwq->flags &= ~GCWQ_DISASSOCIATED;
-               rebind_workers(gcwq);
-               gcwq_release_assoc_and_unlock(gcwq);
+               for_each_worker_pool(pool, gcwq) {
+                       mutex_lock(&pool->assoc_mutex);
+                       spin_lock_irq(&pool->lock);
+
+                       pool->flags &= ~POOL_DISASSOCIATED;
+                       rebind_workers(pool);
+
+                       spin_unlock_irq(&pool->lock);
+                       mutex_unlock(&pool->assoc_mutex);
+               }
                break;
        }
        return NOTIFY_OK;
@@ -3699,7 +3670,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  * gcwq->worklist.
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock and gcwq->lock's.
+ * Grabs and releases workqueue_lock and pool->lock's.
  */
 void freeze_workqueues_begin(void)
 {
@@ -3712,12 +3683,17 @@ void freeze_workqueues_begin(void)
 
        for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
+               struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               spin_lock_irq(&gcwq->lock);
+               local_irq_disable();
 
-               BUG_ON(gcwq->flags & GCWQ_FREEZING);
-               gcwq->flags |= GCWQ_FREEZING;
+               for_each_worker_pool(pool, gcwq) {
+                       spin_lock_nested(&pool->lock, pool - gcwq->pools);
+
+                       WARN_ON_ONCE(pool->flags & POOL_FREEZING);
+                       pool->flags |= POOL_FREEZING;
+               }
 
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
@@ -3726,7 +3702,9 @@ void freeze_workqueues_begin(void)
                                cwq->max_active = 0;
                }
 
-               spin_unlock_irq(&gcwq->lock);
+               for_each_worker_pool(pool, gcwq)
+                       spin_unlock(&pool->lock);
+               local_irq_enable();
        }
 
        spin_unlock(&workqueue_lock);
@@ -3785,7 +3763,7 @@ out_unlock:
  * frozen works are transferred to their respective gcwq worklists.
  *
  * CONTEXT:
- * Grabs and releases workqueue_lock and gcwq->lock's.
+ * Grabs and releases workqueue_lock and pool->lock's.
  */
 void thaw_workqueues(void)
 {
@@ -3801,10 +3779,14 @@ void thaw_workqueues(void)
                struct worker_pool *pool;
                struct workqueue_struct *wq;
 
-               spin_lock_irq(&gcwq->lock);
+               local_irq_disable();
 
-               BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
-               gcwq->flags &= ~GCWQ_FREEZING;
+               for_each_worker_pool(pool, gcwq) {
+                       spin_lock_nested(&pool->lock, pool - gcwq->pools);
+
+                       WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
+                       pool->flags &= ~POOL_FREEZING;
+               }
 
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
@@ -3816,10 +3798,11 @@ void thaw_workqueues(void)
                        cwq_set_max_active(cwq, wq->saved_max_active);
                }
 
-               for_each_worker_pool(pool, gcwq)
+               for_each_worker_pool(pool, gcwq) {
                        wake_up_worker(pool);
-
-               spin_unlock_irq(&gcwq->lock);
+                       spin_unlock(&pool->lock);
+               }
+               local_irq_enable();
        }
 
        workqueue_freezing = false;
@@ -3831,11 +3814,10 @@ out_unlock:
 static int __init init_workqueues(void)
 {
        unsigned int cpu;
-       int i;
 
-       /* make sure we have enough bits for OFFQ CPU number */
-       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
-                    WORK_CPU_LAST);
+       /* make sure we have enough bits for OFFQ pool ID */
+       BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
+                    WORK_CPU_LAST * NR_STD_WORKER_POOLS);
 
        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
@@ -3845,17 +3827,14 @@ static int __init init_workqueues(void)
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker_pool *pool;
 
-               spin_lock_init(&gcwq->lock);
-               gcwq->cpu = cpu;
-               gcwq->flags |= GCWQ_DISASSOCIATED;
-
-               for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
-                       INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
-
                for_each_worker_pool(pool, gcwq) {
                        pool->gcwq = gcwq;
+                       spin_lock_init(&pool->lock);
+                       pool->cpu = cpu;
+                       pool->flags |= POOL_DISASSOCIATED;
                        INIT_LIST_HEAD(&pool->worklist);
                        INIT_LIST_HEAD(&pool->idle_list);
+                       hash_init(pool->busy_hash);
 
                        init_timer_deferrable(&pool->idle_timer);
                        pool->idle_timer.function = idle_worker_timeout;
@@ -3866,6 +3845,9 @@ static int __init init_workqueues(void)
 
                        mutex_init(&pool->assoc_mutex);
                        ida_init(&pool->worker_ida);
+
+                       /* alloc pool ID */
+                       BUG_ON(worker_pool_assign_id(pool));
                }
        }
 
@@ -3874,17 +3856,17 @@ static int __init init_workqueues(void)
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker_pool *pool;
 
-               if (cpu != WORK_CPU_UNBOUND)
-                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
-
                for_each_worker_pool(pool, gcwq) {
                        struct worker *worker;
 
+                       if (cpu != WORK_CPU_UNBOUND)
+                               pool->flags &= ~POOL_DISASSOCIATED;
+
                        worker = create_worker(pool);
                        BUG_ON(!worker);
-                       spin_lock_irq(&gcwq->lock);
+                       spin_lock_irq(&pool->lock);
                        start_worker(worker);
-                       spin_unlock_irq(&gcwq->lock);
+                       spin_unlock_irq(&pool->lock);
                }
        }
 
This page took 0.079349 seconds and 5 git commands to generate.