workqueue: fix locking in retry path of maybe_create_worker()
[deliverable/linux.git] / kernel / workqueue.c
index 16ce617974d2d6e80eac52fc02ba4e69c6090c53..aca94726e20a937d2bb9d4b711da9c3fbda75bcf 100644 (file)
@@ -43,6 +43,7 @@ enum {
        GCWQ_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
        GCWQ_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
        GCWQ_FREEZING           = 1 << 3,       /* freeze in progress */
+       GCWQ_HIGHPRI_PENDING    = 1 << 4,       /* highpri works on queue */
 
        /* worker flags */
        WORKER_STARTED          = 1 << 0,       /* started */
@@ -51,8 +52,11 @@ enum {
        WORKER_PREP             = 1 << 3,       /* preparing to run works */
        WORKER_ROGUE            = 1 << 4,       /* not bound to any cpu */
        WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
+       WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
+       WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 
-       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND,
+       WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
+                                 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
 
        /* gcwq->trustee_state */
        TRUSTEE_START           = 0,            /* start */
@@ -93,7 +97,7 @@ enum {
  * X: During normal operation, modification requires gcwq->lock and
  *    should be done only from local cpu.  Either disabling preemption
  *    on local cpu or grabbing gcwq->lock is enough for read access.
- *    While trustee is in charge, it's identical to L.
+ *    If GCWQ_DISASSOCIATED is set, it's identical to L.
  *
  * F: wq->flush_mutex protected.
  *
@@ -187,7 +191,11 @@ struct wq_flusher {
  */
 struct workqueue_struct {
        unsigned int            flags;          /* I: WQ_* flags */
-       struct cpu_workqueue_struct *cpu_wq;    /* I: cwq's */
+       union {
+               struct cpu_workqueue_struct __percpu    *pcpu;
+               struct cpu_workqueue_struct             *single;
+               unsigned long                           v;
+       } cpu_wq;                               /* I: cwq's */
        struct list_head        list;           /* W: list of all workqueues */
 
        struct mutex            flush_mutex;    /* protects wq flushing */
@@ -198,12 +206,10 @@ struct workqueue_struct {
        struct list_head        flusher_queue;  /* F: flush waiters */
        struct list_head        flusher_overflow; /* F: flush overflow list */
 
-       unsigned long           single_cpu;     /* cpu for single cpu wq */
-
        cpumask_var_t           mayday_mask;    /* cpus requesting rescue */
        struct worker           *rescuer;       /* I: rescue worker */
 
-       int                     saved_max_active; /* I: saved cwq max_active */
+       int                     saved_max_active; /* W: saved cwq max_active */
        const char              *name;          /* I: workqueue name */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
@@ -213,14 +219,52 @@ struct workqueue_struct {
 struct workqueue_struct *system_wq __read_mostly;
 struct workqueue_struct *system_long_wq __read_mostly;
 struct workqueue_struct *system_nrt_wq __read_mostly;
+struct workqueue_struct *system_unbound_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 EXPORT_SYMBOL_GPL(system_long_wq);
 EXPORT_SYMBOL_GPL(system_nrt_wq);
+EXPORT_SYMBOL_GPL(system_unbound_wq);
 
 #define for_each_busy_worker(worker, i, pos, gcwq)                     \
        for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
                hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 
+static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
+                                 unsigned int sw)
+{
+       if (cpu < nr_cpu_ids) {
+               if (sw & 1) {
+                       cpu = cpumask_next(cpu, mask);
+                       if (cpu < nr_cpu_ids)
+                               return cpu;
+               }
+               if (sw & 2)
+                       return WORK_CPU_UNBOUND;
+       }
+       return WORK_CPU_NONE;
+}
+
+static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
+                               struct workqueue_struct *wq)
+{
+       return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
+}
+
+#define for_each_gcwq_cpu(cpu)                                         \
+       for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
+            (cpu) < WORK_CPU_NONE;                                     \
+            (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
+
+#define for_each_online_gcwq_cpu(cpu)                                  \
+       for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
+            (cpu) < WORK_CPU_NONE;                                     \
+            (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
+
+#define for_each_cwq_cpu(cpu, wq)                                      \
+       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
+            (cpu) < WORK_CPU_NONE;                                     \
+            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
+
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
 static struct debug_obj_descr work_debug_descr;
@@ -344,22 +388,46 @@ static bool workqueue_freezing;           /* W: have wqs started freezing? */
 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
 
+/*
+ * Global cpu workqueue and nr_running counter for unbound gcwq.  The
+ * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
+ * workers have WORKER_UNBOUND set.
+ */
+static struct global_cwq unbound_global_cwq;
+static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);      /* always 0 */
+
 static int worker_thread(void *__worker);
 
 static struct global_cwq *get_gcwq(unsigned int cpu)
 {
-       return &per_cpu(global_cwq, cpu);
+       if (cpu != WORK_CPU_UNBOUND)
+               return &per_cpu(global_cwq, cpu);
+       else
+               return &unbound_global_cwq;
 }
 
 static atomic_t *get_gcwq_nr_running(unsigned int cpu)
 {
-       return &per_cpu(gcwq_nr_running, cpu);
+       if (cpu != WORK_CPU_UNBOUND)
+               return &per_cpu(gcwq_nr_running, cpu);
+       else
+               return &unbound_gcwq_nr_running;
 }
 
 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
                                            struct workqueue_struct *wq)
 {
-       return per_cpu_ptr(wq->cpu_wq, cpu);
+       if (!(wq->flags & WQ_UNBOUND)) {
+               if (likely(cpu < nr_cpu_ids)) {
+#ifdef CONFIG_SMP
+                       return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
+#else
+                       return wq->cpu_wq.single;
+#endif
+               }
+       } else if (likely(cpu == WORK_CPU_UNBOUND))
+               return wq->cpu_wq.single;
+       return NULL;
 }
 
 static unsigned int work_color_to_flags(int color)
@@ -439,10 +507,10 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
                return ((struct cpu_workqueue_struct *)data)->gcwq;
 
        cpu = data >> WORK_STRUCT_FLAG_BITS;
-       if (cpu == NR_CPUS)
+       if (cpu == WORK_CPU_NONE)
                return NULL;
 
-       BUG_ON(cpu >= num_possible_cpus());
+       BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
        return get_gcwq(cpu);
 }
 
@@ -452,15 +520,19 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
  * assume that they're being called with gcwq->lock held.
  */
 
+static bool __need_more_worker(struct global_cwq *gcwq)
+{
+       return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
+               gcwq->flags & GCWQ_HIGHPRI_PENDING;
+}
+
 /*
  * Need to wake up a worker?  Called from anything but currently
  * running workers.
  */
 static bool need_more_worker(struct global_cwq *gcwq)
 {
-       atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
-
-       return !list_empty(&gcwq->worklist) && !atomic_read(nr_running);
+       return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
 }
 
 /* Can I start working?  Called from busy but !running workers. */
@@ -594,7 +666,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
 
 /**
  * worker_set_flags - set worker flags and adjust nr_running accordingly
- * @worker: worker to set flags for
+ * @worker: self
  * @flags: flags to set
  * @wakeup: wakeup an idle worker if necessary
  *
@@ -602,14 +674,16 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
  * nr_running becomes zero and @wakeup is %true, an idle worker is
  * woken up.
  *
- * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock)
  */
 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
                                    bool wakeup)
 {
        struct global_cwq *gcwq = worker->gcwq;
 
+       WARN_ON_ONCE(worker->task != current);
+
        /*
         * If transitioning into NOT_RUNNING, adjust nr_running and
         * wake up an idle worker as necessary if requested by
@@ -632,19 +706,21 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
 
 /**
  * worker_clr_flags - clear worker flags and adjust nr_running accordingly
- * @worker: worker to set flags for
+ * @worker: self
  * @flags: flags to clear
  *
  * Clear @flags in @worker->flags and adjust nr_running accordingly.
  *
- * LOCKING:
- * spin_lock_irq(gcwq->lock).
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock)
  */
 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 {
        struct global_cwq *gcwq = worker->gcwq;
        unsigned int oflags = worker->flags;
 
+       WARN_ON_ONCE(worker->task != current);
+
        worker->flags &= ~flags;
 
        /* if transitioning out of NOT_RUNNING, increment nr_running */
@@ -733,6 +809,43 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
                                            work);
 }
 
+/**
+ * gcwq_determine_ins_pos - find insertion position
+ * @gcwq: gcwq of interest
+ * @cwq: cwq a work is being queued for
+ *
+ * A work for @cwq is about to be queued on @gcwq, determine insertion
+ * position for the work.  If @cwq is for HIGHPRI wq, the work is
+ * queued at the head of the queue but in FIFO order with respect to
+ * other HIGHPRI works; otherwise, at the end of the queue.  This
+ * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
+ * there are HIGHPRI works pending.
+ *
+ * CONTEXT:
+ * spin_lock_irq(gcwq->lock).
+ *
+ * RETURNS:
+ * Pointer to inserstion position.
+ */
+static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
+                                              struct cpu_workqueue_struct *cwq)
+{
+       struct work_struct *twork;
+
+       if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
+               return &gcwq->worklist;
+
+       list_for_each_entry(twork, &gcwq->worklist, entry) {
+               struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
+
+               if (!(tcwq->wq->flags & WQ_HIGHPRI))
+                       break;
+       }
+
+       gcwq->flags |= GCWQ_HIGHPRI_PENDING;
+       return &twork->entry;
+}
+
 /**
  * insert_work - insert a work into gcwq
  * @cwq: cwq @work belongs to
@@ -770,38 +883,10 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
         */
        smp_mb();
 
-       if (!atomic_read(get_gcwq_nr_running(gcwq->cpu)))
+       if (__need_more_worker(gcwq))
                wake_up_worker(gcwq);
 }
 
-/**
- * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
- * @cwq: cwq to unbind
- *
- * Try to unbind @cwq from single cpu workqueue processing.  If
- * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- */
-static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
-{
-       struct workqueue_struct *wq = cwq->wq;
-       struct global_cwq *gcwq = cwq->gcwq;
-
-       BUG_ON(wq->single_cpu != gcwq->cpu);
-       /*
-        * Unbind from workqueue if @cwq is not frozen.  If frozen,
-        * thaw_workqueues() will either restart processing on this
-        * cpu or unbind if empty.  This keeps works queued while
-        * frozen fully ordered and flushable.
-        */
-       if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
-               smp_wmb();      /* paired with cmpxchg() in __queue_work() */
-               wq->single_cpu = NR_CPUS;
-       }
-}
-
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
@@ -809,17 +894,16 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
        unsigned long flags;
-       bool arbitrate;
 
        debug_work_activate(work);
 
-       /*
-        * Determine gcwq to use.  SINGLE_CPU is inherently
-        * NON_REENTRANT, so test it first.
-        */
-       if (!(wq->flags & WQ_SINGLE_CPU)) {
+       /* determine gcwq to use */
+       if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
 
+               if (unlikely(cpu == WORK_CPU_UNBOUND))
+                       cpu = raw_smp_processor_id();
+
                /*
                 * It's multi cpu.  If @wq is non-reentrant and @work
                 * was previously on a different cpu, it might still
@@ -845,37 +929,8 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                } else
                        spin_lock_irqsave(&gcwq->lock, flags);
        } else {
-               unsigned int req_cpu = cpu;
-
-               /*
-                * It's a bit more complex for single cpu workqueues.
-                * We first need to determine which cpu is going to be
-                * used.  If no cpu is currently serving this
-                * workqueue, arbitrate using atomic accesses to
-                * wq->single_cpu; otherwise, use the current one.
-                */
-       retry:
-               cpu = wq->single_cpu;
-               arbitrate = cpu == NR_CPUS;
-               if (arbitrate)
-                       cpu = req_cpu;
-
-               gcwq = get_gcwq(cpu);
+               gcwq = get_gcwq(WORK_CPU_UNBOUND);
                spin_lock_irqsave(&gcwq->lock, flags);
-
-               /*
-                * The following cmpxchg() is a full barrier paired
-                * with smp_wmb() in cwq_unbind_single_cpu() and
-                * guarantees that all changes to wq->st_* fields are
-                * visible on the new cpu after this point.
-                */
-               if (arbitrate)
-                       cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
-
-               if (unlikely(wq->single_cpu != cpu)) {
-                       spin_unlock_irqrestore(&gcwq->lock, flags);
-                       goto retry;
-               }
        }
 
        /* gcwq determined, get cwq and queue */
@@ -887,7 +942,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        if (likely(cwq->nr_active < cwq->max_active)) {
                cwq->nr_active++;
-               worklist = &gcwq->worklist;
+               worklist = gcwq_determine_ins_pos(gcwq, cwq);
        } else
                worklist = &cwq->delayed_works;
 
@@ -984,19 +1039,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        struct work_struct *work = &dwork->work;
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               struct global_cwq *gcwq = get_work_gcwq(work);
-               unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
+               unsigned int lcpu;
 
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                timer_stats_timer_set_start_info(&dwork->timer);
+
                /*
                 * This stores cwq for the moment, for the timer_fn.
                 * Note that the work's gcwq is preserved to allow
                 * reentrance detection for delayed works.
                 */
+               if (!(wq->flags & WQ_UNBOUND)) {
+                       struct global_cwq *gcwq = get_work_gcwq(work);
+
+                       if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
+                               lcpu = gcwq->cpu;
+                       else
+                               lcpu = raw_smp_processor_id();
+               } else
+                       lcpu = WORK_CPU_UNBOUND;
+
                set_work_cwq(work, get_cwq(lcpu, wq), 0);
+
                timer->expires = jiffies + delay;
                timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
@@ -1029,7 +1095,8 @@ static void worker_enter_idle(struct worker *worker)
        BUG_ON(!list_empty(&worker->entry) &&
               (worker->hentry.next || worker->hentry.pprev));
 
-       worker_set_flags(worker, WORKER_IDLE, false);
+       /* can't use worker_set_flags(), also called from start_worker() */
+       worker->flags |= WORKER_IDLE;
        gcwq->nr_idle++;
        worker->last_active = jiffies;
 
@@ -1042,6 +1109,10 @@ static void worker_enter_idle(struct worker *worker)
                                  jiffies + IDLE_WORKER_TIMEOUT);
        } else
                wake_up_all(&gcwq->trustee_wait);
+
+       /* sanity check nr_running */
+       WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
+                    atomic_read(get_gcwq_nr_running(gcwq->cpu)));
 }
 
 /**
@@ -1105,7 +1176,8 @@ static bool worker_maybe_bind_and_lock(struct worker *worker)
                 * it races with cpu hotunplug operation.  Verify
                 * against GCWQ_DISASSOCIATED.
                 */
-               set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
+               if (!(gcwq->flags & GCWQ_DISASSOCIATED))
+                       set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
 
                spin_lock_irq(&gcwq->lock);
                if (gcwq->flags & GCWQ_DISASSOCIATED)
@@ -1170,8 +1242,9 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
 {
-       int id = -1;
+       bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
        struct worker *worker = NULL;
+       int id = -1;
 
        spin_lock_irq(&gcwq->lock);
        while (ida_get_new(&gcwq->worker_ida, &id)) {
@@ -1189,8 +1262,12 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
        worker->gcwq = gcwq;
        worker->id = id;
 
-       worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
-                                     gcwq->cpu, id);
+       if (!on_unbound_cpu)
+               worker->task = kthread_create(worker_thread, worker,
+                                             "kworker/%u:%d", gcwq->cpu, id);
+       else
+               worker->task = kthread_create(worker_thread, worker,
+                                             "kworker/u:%d", id);
        if (IS_ERR(worker->task))
                goto fail;
 
@@ -1199,10 +1276,13 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
         * online later on.  Make sure every worker has
         * PF_THREAD_BOUND set.
         */
-       if (bind)
+       if (bind && !on_unbound_cpu)
                kthread_bind(worker->task, gcwq->cpu);
-       else
+       else {
                worker->task->flags |= PF_THREAD_BOUND;
+               if (on_unbound_cpu)
+                       worker->flags |= WORKER_UNBOUND;
+       }
 
        return worker;
 fail:
@@ -1226,7 +1306,7 @@ fail:
  */
 static void start_worker(struct worker *worker)
 {
-       worker_set_flags(worker, WORKER_STARTED, false);
+       worker->flags |= WORKER_STARTED;
        worker->gcwq->nr_workers++;
        worker_enter_idle(worker);
        wake_up_process(worker->task);
@@ -1256,7 +1336,7 @@ static void destroy_worker(struct worker *worker)
                gcwq->nr_idle--;
 
        list_del_init(&worker->entry);
-       worker_set_flags(worker, WORKER_DIE, false);
+       worker->flags |= WORKER_DIE;
 
        spin_unlock_irq(&gcwq->lock);
 
@@ -1297,12 +1377,17 @@ static bool send_mayday(struct work_struct *work)
 {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct workqueue_struct *wq = cwq->wq;
+       unsigned int cpu;
 
        if (!(wq->flags & WQ_RESCUER))
                return false;
 
        /* mayday mayday mayday */
-       if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask))
+       cpu = cwq->gcwq->cpu;
+       /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = 0;
+       if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask))
                wake_up_process(wq->rescuer->task);
        return true;
 }
@@ -1357,14 +1442,14 @@ static bool maybe_create_worker(struct global_cwq *gcwq)
        if (!need_to_create_worker(gcwq))
                return false;
 restart:
+       spin_unlock_irq(&gcwq->lock);
+
        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
        mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
 
        while (true) {
                struct worker *worker;
 
-               spin_unlock_irq(&gcwq->lock);
-
                worker = create_worker(gcwq, true);
                if (worker) {
                        del_timer_sync(&gcwq->mayday_timer);
@@ -1377,15 +1462,13 @@ restart:
                if (!need_to_create_worker(gcwq))
                        break;
 
-               spin_unlock_irq(&gcwq->lock);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(CREATE_COOLDOWN);
-               spin_lock_irq(&gcwq->lock);
+
                if (!need_to_create_worker(gcwq))
                        break;
        }
 
-       spin_unlock_irq(&gcwq->lock);
        del_timer_sync(&gcwq->mayday_timer);
        spin_lock_irq(&gcwq->lock);
        if (need_to_create_worker(gcwq))
@@ -1526,8 +1609,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
 {
        struct work_struct *work = list_first_entry(&cwq->delayed_works,
                                                    struct work_struct, entry);
+       struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
 
-       move_linked_works(work, &cwq->gcwq->worklist, NULL);
+       move_linked_works(work, pos, NULL);
        cwq->nr_active++;
 }
 
@@ -1555,9 +1639,6 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
                /* one down, submit a delayed one */
                if (cwq->nr_active < cwq->max_active)
                        cwq_activate_first_delayed(cwq);
-       } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
-               /* this was the last work, unbind from single cpu */
-               cwq_unbind_single_cpu(cwq);
        }
 
        /* is flush in progress and are we at the flushing tip? */
@@ -1598,6 +1679,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct global_cwq *gcwq = cwq->gcwq;
        struct hlist_head *bwh = busy_worker_head(gcwq, work);
+       bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
        work_func_t f = work->func;
        int work_color;
        struct worker *collision;
@@ -1634,6 +1716,28 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        set_work_cpu(work, gcwq->cpu);
        list_del_init(&work->entry);
 
+       /*
+        * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
+        * wake up another worker; otherwise, clear HIGHPRI_PENDING.
+        */
+       if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
+               struct work_struct *nwork = list_first_entry(&gcwq->worklist,
+                                               struct work_struct, entry);
+
+               if (!list_empty(&gcwq->worklist) &&
+                   get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
+                       wake_up_worker(gcwq);
+               else
+                       gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
+       }
+
+       /*
+        * CPU intensive works don't participate in concurrency
+        * management.  They're the scheduler's responsibility.
+        */
+       if (unlikely(cpu_intensive))
+               worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
+
        spin_unlock_irq(&gcwq->lock);
 
        work_clear_pending(work);
@@ -1655,6 +1759,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
 
        spin_lock_irq(&gcwq->lock);
 
+       /* clear cpu intensive status */
+       if (unlikely(cpu_intensive))
+               worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
+
        /* we're done with it, release */
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
@@ -1751,10 +1859,10 @@ recheck:
        } while (keep_working(gcwq));
 
        worker_set_flags(worker, WORKER_PREP, false);
-
+sleep:
        if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
                goto recheck;
-sleep:
+
        /*
         * gcwq->lock is held and there's no work to process and no
         * need to manage, sleep.  Workers are woken up only while
@@ -1793,6 +1901,7 @@ static int rescuer_thread(void *__wq)
        struct workqueue_struct *wq = __wq;
        struct worker *rescuer = wq->rescuer;
        struct list_head *scheduled = &rescuer->scheduled;
+       bool is_unbound = wq->flags & WQ_UNBOUND;
        unsigned int cpu;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -1802,8 +1911,13 @@ repeat:
        if (kthread_should_stop())
                return 0;
 
+       /*
+        * See whether any cpu is asking for help.  Unbounded
+        * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
+        */
        for_each_cpu(cpu, wq->mayday_mask) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
+               struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
                struct global_cwq *gcwq = cwq->gcwq;
                struct work_struct *work, *n;
 
@@ -1945,7 +2059,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
                atomic_set(&wq->nr_cwqs_to_flush, 1);
        }
 
-       for_each_possible_cpu(cpu) {
+       for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                struct global_cwq *gcwq = cwq->gcwq;
 
@@ -2057,6 +2171,10 @@ void flush_workqueue(struct workqueue_struct *wq)
 
        mutex_lock(&wq->flush_mutex);
 
+       /* we might have raced, check again with mutex held */
+       if (wq->first_flusher != &this_flusher)
+               goto out_unlock;
+
        wq->first_flusher = NULL;
 
        BUG_ON(!list_empty(&this_flusher.list));
@@ -2251,7 +2369,7 @@ static void wait_on_work(struct work_struct *work)
        lock_map_acquire(&work->lockdep_map);
        lock_map_release(&work->lockdep_map);
 
-       for_each_possible_cpu(cpu)
+       for_each_gcwq_cpu(cpu)
                wait_on_cpu_work(get_gcwq(cpu), work);
 }
 
@@ -2487,7 +2605,7 @@ int keventd_up(void)
        return system_wq != NULL;
 }
 
-static struct cpu_workqueue_struct *alloc_cwqs(void)
+static int alloc_cwqs(struct workqueue_struct *wq)
 {
        /*
         * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
@@ -2497,51 +2615,51 @@ static struct cpu_workqueue_struct *alloc_cwqs(void)
        const size_t size = sizeof(struct cpu_workqueue_struct);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
-       struct cpu_workqueue_struct *cwqs;
-#ifndef CONFIG_SMP
-       void *ptr;
 
-       /*
-        * On UP, percpu allocator doesn't honor alignment parameter
-        * and simply uses arch-dependent default.  Allocate enough
-        * room to align cwq and put an extra pointer at the end
-        * pointing back to the originally allocated pointer which
-        * will be used for free.
-        *
-        * FIXME: This really belongs to UP percpu code.  Update UP
-        * percpu code to honor alignment and remove this ugliness.
-        */
-       ptr = __alloc_percpu(size + align + sizeof(void *), 1);
-       cwqs = PTR_ALIGN(ptr, align);
-       *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
-#else
-       /* On SMP, percpu allocator can do it itself */
-       cwqs = __alloc_percpu(size, align);
-#endif
+       if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND)) {
+               /* on SMP, percpu allocator can align itself */
+               wq->cpu_wq.pcpu = __alloc_percpu(size, align);
+       } else {
+               void *ptr;
+
+               /*
+                * Allocate enough room to align cwq and put an extra
+                * pointer at the end pointing back to the originally
+                * allocated pointer which will be used for free.
+                */
+               ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
+               if (ptr) {
+                       wq->cpu_wq.single = PTR_ALIGN(ptr, align);
+                       *(void **)(wq->cpu_wq.single + 1) = ptr;
+               }
+       }
+
        /* just in case, make sure it's actually aligned */
-       BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
-       return cwqs;
+       BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
+       return wq->cpu_wq.v ? 0 : -ENOMEM;
 }
 
-static void free_cwqs(struct cpu_workqueue_struct *cwqs)
+static void free_cwqs(struct workqueue_struct *wq)
 {
-#ifndef CONFIG_SMP
-       /* on UP, the pointer to free is stored right after the cwq */
-       if (cwqs)
-               free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
-#else
-       free_percpu(cwqs);
-#endif
+       if (CONFIG_SMP && !(wq->flags & WQ_UNBOUND))
+               free_percpu(wq->cpu_wq.pcpu);
+       else if (wq->cpu_wq.single) {
+               /* the pointer to free is stored right after the cwq */
+               kfree(*(void **)(wq->cpu_wq.single + 1));
+       }
 }
 
-static int wq_clamp_max_active(int max_active, const char *name)
+static int wq_clamp_max_active(int max_active, unsigned int flags,
+                              const char *name)
 {
-       if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
+       int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
+
+       if (max_active < 1 || max_active > lim)
                printk(KERN_WARNING "workqueue: max_active %d requested for %s "
                       "is out of range, clamping between %d and %d\n",
-                      max_active, name, 1, WQ_MAX_ACTIVE);
+                      max_active, name, 1, lim);
 
-       return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
+       return clamp_val(max_active, 1, lim);
 }
 
 struct workqueue_struct *__alloc_workqueue_key(const char *name,
@@ -2553,30 +2671,35 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        struct workqueue_struct *wq;
        unsigned int cpu;
 
+       /*
+        * Unbound workqueues aren't concurrency managed and should be
+        * dispatched to workers immediately.
+        */
+       if (flags & WQ_UNBOUND)
+               flags |= WQ_HIGHPRI;
+
        max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, name);
+       max_active = wq_clamp_max_active(max_active, flags, name);
 
        wq = kzalloc(sizeof(*wq), GFP_KERNEL);
        if (!wq)
                goto err;
 
-       wq->cpu_wq = alloc_cwqs();
-       if (!wq->cpu_wq)
-               goto err;
-
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
        atomic_set(&wq->nr_cwqs_to_flush, 0);
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
-       wq->single_cpu = NR_CPUS;
 
        wq->name = name;
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
-       for_each_possible_cpu(cpu) {
+       if (alloc_cwqs(wq) < 0)
+               goto err;
+
+       for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                struct global_cwq *gcwq = get_gcwq(cpu);
 
@@ -2615,7 +2738,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        spin_lock(&workqueue_lock);
 
        if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
-               for_each_possible_cpu(cpu)
+               for_each_cwq_cpu(cpu, wq)
                        get_cwq(cpu, wq)->max_active = 0;
 
        list_add(&wq->list, &workqueues);
@@ -2625,7 +2748,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        return wq;
 err:
        if (wq) {
-               free_cwqs(wq->cpu_wq);
+               free_cwqs(wq);
                free_cpumask_var(wq->mayday_mask);
                kfree(wq->rescuer);
                kfree(wq);
@@ -2655,7 +2778,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        spin_unlock(&workqueue_lock);
 
        /* sanity check */
-       for_each_possible_cpu(cpu) {
+       for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                int i;
 
@@ -2670,11 +2793,117 @@ void destroy_workqueue(struct workqueue_struct *wq)
                free_cpumask_var(wq->mayday_mask);
        }
 
-       free_cwqs(wq->cpu_wq);
+       free_cwqs(wq);
        kfree(wq);
 }
 EXPORT_SYMBOL_GPL(destroy_workqueue);
 
+/**
+ * workqueue_set_max_active - adjust max_active of a workqueue
+ * @wq: target workqueue
+ * @max_active: new max_active value.
+ *
+ * Set max_active of @wq to @max_active.
+ *
+ * CONTEXT:
+ * Don't call from IRQ context.
+ */
+void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
+{
+       unsigned int cpu;
+
+       max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
+
+       spin_lock(&workqueue_lock);
+
+       wq->saved_max_active = max_active;
+
+       for_each_cwq_cpu(cpu, wq) {
+               struct global_cwq *gcwq = get_gcwq(cpu);
+
+               spin_lock_irq(&gcwq->lock);
+
+               if (!(wq->flags & WQ_FREEZEABLE) ||
+                   !(gcwq->flags & GCWQ_FREEZING))
+                       get_cwq(gcwq->cpu, wq)->max_active = max_active;
+
+               spin_unlock_irq(&gcwq->lock);
+       }
+
+       spin_unlock(&workqueue_lock);
+}
+EXPORT_SYMBOL_GPL(workqueue_set_max_active);
+
+/**
+ * workqueue_congested - test whether a workqueue is congested
+ * @cpu: CPU in question
+ * @wq: target workqueue
+ *
+ * Test whether @wq's cpu workqueue for @cpu is congested.  There is
+ * no synchronization around this function and the test result is
+ * unreliable and only useful as advisory hints or for debugging.
+ *
+ * RETURNS:
+ * %true if congested, %false otherwise.
+ */
+bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
+{
+       struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+       return !list_empty(&cwq->delayed_works);
+}
+EXPORT_SYMBOL_GPL(workqueue_congested);
+
+/**
+ * work_cpu - return the last known associated cpu for @work
+ * @work: the work of interest
+ *
+ * RETURNS:
+ * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
+ */
+unsigned int work_cpu(struct work_struct *work)
+{
+       struct global_cwq *gcwq = get_work_gcwq(work);
+
+       return gcwq ? gcwq->cpu : WORK_CPU_NONE;
+}
+EXPORT_SYMBOL_GPL(work_cpu);
+
+/**
+ * work_busy - test whether a work is currently pending or running
+ * @work: the work to be tested
+ *
+ * Test whether @work is currently pending or running.  There is no
+ * synchronization around this function and the test result is
+ * unreliable and only useful as advisory hints or for debugging.
+ * Especially for reentrant wqs, the pending state might hide the
+ * running state.
+ *
+ * RETURNS:
+ * OR'd bitmask of WORK_BUSY_* bits.
+ */
+unsigned int work_busy(struct work_struct *work)
+{
+       struct global_cwq *gcwq = get_work_gcwq(work);
+       unsigned long flags;
+       unsigned int ret = 0;
+
+       if (!gcwq)
+               return false;
+
+       spin_lock_irqsave(&gcwq->lock, flags);
+
+       if (work_pending(work))
+               ret |= WORK_BUSY_PENDING;
+       if (find_worker_executing_work(gcwq, work))
+               ret |= WORK_BUSY_RUNNING;
+
+       spin_unlock_irqrestore(&gcwq->lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(work_busy);
+
 /*
  * CPU hotplug.
  *
@@ -2801,10 +3030,10 @@ static int __cpuinit trustee_thread(void *__gcwq)
        gcwq->flags |= GCWQ_MANAGING_WORKERS;
 
        list_for_each_entry(worker, &gcwq->idle_list, entry)
-               worker_set_flags(worker, WORKER_ROGUE, false);
+               worker->flags |= WORKER_ROGUE;
 
        for_each_busy_worker(worker, i, pos, gcwq)
-               worker_set_flags(worker, WORKER_ROGUE, false);
+               worker->flags |= WORKER_ROGUE;
 
        /*
         * Call schedule() so that we cross rq->lock and thus can
@@ -2817,12 +3046,12 @@ static int __cpuinit trustee_thread(void *__gcwq)
        spin_lock_irq(&gcwq->lock);
 
        /*
-        * Sched callbacks are disabled now.  gcwq->nr_running should
-        * be zero and will stay that way, making need_more_worker()
-        * and keep_working() always return true as long as the
-        * worklist is not empty.
+        * Sched callbacks are disabled now.  Zap nr_running.  After
+        * this, nr_running stays zero and need_more_worker() and
+        * keep_working() are always true as long as the worklist is
+        * not empty.
         */
-       WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0);
+       atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
 
        spin_unlock_irq(&gcwq->lock);
        del_timer_sync(&gcwq->idle_timer);
@@ -2868,7 +3097,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
                        worker = create_worker(gcwq, false);
                        spin_lock_irq(&gcwq->lock);
                        if (worker) {
-                               worker_set_flags(worker, WORKER_ROGUE, false);
+                               worker->flags |= WORKER_ROGUE;
                                start_worker(worker);
                        }
                }
@@ -2907,8 +3136,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
                 * operations.  Use a separate flag to mark that
                 * rebinding is scheduled.
                 */
-               worker_set_flags(worker, WORKER_REBIND, false);
-               worker_clr_flags(worker, WORKER_ROGUE);
+               worker->flags |= WORKER_REBIND;
+               worker->flags &= ~WORKER_ROGUE;
 
                /* queue rebind_work, wq doesn't matter, use the default one */
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
@@ -3109,7 +3338,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
  */
 void freeze_workqueues_begin(void)
 {
-       struct workqueue_struct *wq;
        unsigned int cpu;
 
        spin_lock(&workqueue_lock);
@@ -3117,8 +3345,9 @@ void freeze_workqueues_begin(void)
        BUG_ON(workqueue_freezing);
        workqueue_freezing = true;
 
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
+               struct workqueue_struct *wq;
 
                spin_lock_irq(&gcwq->lock);
 
@@ -3128,7 +3357,7 @@ void freeze_workqueues_begin(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (wq->flags & WQ_FREEZEABLE)
+                       if (cwq && wq->flags & WQ_FREEZEABLE)
                                cwq->max_active = 0;
                }
 
@@ -3153,7 +3382,6 @@ void freeze_workqueues_begin(void)
  */
 bool freeze_workqueues_busy(void)
 {
-       struct workqueue_struct *wq;
        unsigned int cpu;
        bool busy = false;
 
@@ -3161,7 +3389,8 @@ bool freeze_workqueues_busy(void)
 
        BUG_ON(!workqueue_freezing);
 
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
+               struct workqueue_struct *wq;
                /*
                 * nr_active is monotonically decreasing.  It's safe
                 * to peek without lock.
@@ -3169,7 +3398,7 @@ bool freeze_workqueues_busy(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
                                continue;
 
                        BUG_ON(cwq->nr_active < 0);
@@ -3195,7 +3424,6 @@ out_unlock:
  */
 void thaw_workqueues(void)
 {
-       struct workqueue_struct *wq;
        unsigned int cpu;
 
        spin_lock(&workqueue_lock);
@@ -3203,8 +3431,9 @@ void thaw_workqueues(void)
        if (!workqueue_freezing)
                goto out_unlock;
 
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
+               struct workqueue_struct *wq;
 
                spin_lock_irq(&gcwq->lock);
 
@@ -3214,7 +3443,7 @@ void thaw_workqueues(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
                                continue;
 
                        /* restore max_active and repopulate worklist */
@@ -3223,11 +3452,6 @@ void thaw_workqueues(void)
                        while (!list_empty(&cwq->delayed_works) &&
                               cwq->nr_active < cwq->max_active)
                                cwq_activate_first_delayed(cwq);
-
-                       /* perform delayed unbind from single cpu if empty */
-                       if (wq->single_cpu == gcwq->cpu &&
-                           !cwq->nr_active && list_empty(&cwq->delayed_works))
-                               cwq_unbind_single_cpu(cwq);
                }
 
                wake_up_worker(gcwq);
@@ -3252,17 +3476,19 @@ void __init init_workqueues(void)
         * sure cpu number won't overflow into kernel pointer area so
         * that they can be distinguished.
         */
-       BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
+       BUILD_BUG_ON(WORK_CPU_LAST << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
 
        hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
 
        /* initialize gcwqs */
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
 
                spin_lock_init(&gcwq->lock);
                INIT_LIST_HEAD(&gcwq->worklist);
                gcwq->cpu = cpu;
+               if (cpu == WORK_CPU_UNBOUND)
+                       gcwq->flags |= GCWQ_DISASSOCIATED;
 
                INIT_LIST_HEAD(&gcwq->idle_list);
                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3282,7 +3508,7 @@ void __init init_workqueues(void)
        }
 
        /* create the initial worker */
-       for_each_online_cpu(cpu) {
+       for_each_online_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker *worker;
 
@@ -3296,5 +3522,7 @@ void __init init_workqueues(void)
        system_wq = alloc_workqueue("events", 0, 0);
        system_long_wq = alloc_workqueue("events_long", 0, 0);
        system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
+       system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
+                                           WQ_UNBOUND_MAX_ACTIVE);
        BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
 }
This page took 0.071143 seconds and 5 git commands to generate.