workqueue: add find_worker_executing_work() and track current_cwq
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
37
38 enum {
39 /* global_cwq flags */
40 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
41
42 /* worker flags */
43 WORKER_STARTED = 1 << 0, /* started */
44 WORKER_DIE = 1 << 1, /* die die die */
45 WORKER_IDLE = 1 << 2, /* is idle */
46 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
47
48 /* gcwq->trustee_state */
49 TRUSTEE_START = 0, /* start */
50 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
51 TRUSTEE_BUTCHER = 2, /* butcher workers */
52 TRUSTEE_RELEASE = 3, /* release workers */
53 TRUSTEE_DONE = 4, /* trustee is done */
54
55 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
56 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
57 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
58
59 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
60 };
61
62 /*
63 * Structure fields follow one of the following exclusion rules.
64 *
65 * I: Set during initialization and read-only afterwards.
66 *
67 * L: gcwq->lock protected. Access with gcwq->lock held.
68 *
69 * F: wq->flush_mutex protected.
70 *
71 * W: workqueue_lock protected.
72 */
73
74 struct global_cwq;
75 struct cpu_workqueue_struct;
76
77 struct worker {
78 /* on idle list while idle, on busy hash table while busy */
79 union {
80 struct list_head entry; /* L: while idle */
81 struct hlist_node hentry; /* L: while busy */
82 };
83
84 struct work_struct *current_work; /* L: work being processed */
85 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
86 struct list_head scheduled; /* L: scheduled works */
87 struct task_struct *task; /* I: worker task */
88 struct global_cwq *gcwq; /* I: the associated gcwq */
89 struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
90 unsigned int flags; /* L: flags */
91 int id; /* I: worker id */
92 };
93
94 /*
95 * Global per-cpu workqueue.
96 */
97 struct global_cwq {
98 spinlock_t lock; /* the gcwq lock */
99 unsigned int cpu; /* I: the associated cpu */
100 unsigned int flags; /* L: GCWQ_* flags */
101
102 int nr_workers; /* L: total number of workers */
103 int nr_idle; /* L: currently idle ones */
104
105 /* workers are chained either in the idle_list or busy_hash */
106 struct list_head idle_list; /* L: list of idle workers */
107 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
108 /* L: hash of busy workers */
109
110 struct ida worker_ida; /* L: for worker IDs */
111
112 struct task_struct *trustee; /* L: for gcwq shutdown */
113 unsigned int trustee_state; /* L: trustee state */
114 wait_queue_head_t trustee_wait; /* trustee wait */
115 } ____cacheline_aligned_in_smp;
116
117 /*
118 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
119 * work_struct->data are used for flags and thus cwqs need to be
120 * aligned at two's power of the number of flag bits.
121 */
122 struct cpu_workqueue_struct {
123 struct global_cwq *gcwq; /* I: the associated gcwq */
124 struct list_head worklist;
125 struct worker *worker;
126 struct workqueue_struct *wq; /* I: the owning workqueue */
127 int work_color; /* L: current color */
128 int flush_color; /* L: flushing color */
129 int nr_in_flight[WORK_NR_COLORS];
130 /* L: nr of in_flight works */
131 int nr_active; /* L: nr of active works */
132 int max_active; /* L: max active works */
133 struct list_head delayed_works; /* L: delayed works */
134 };
135
136 /*
137 * Structure used to wait for workqueue flush.
138 */
139 struct wq_flusher {
140 struct list_head list; /* F: list of flushers */
141 int flush_color; /* F: flush color waiting for */
142 struct completion done; /* flush completion */
143 };
144
145 /*
146 * The externally visible workqueue abstraction is an array of
147 * per-CPU workqueues:
148 */
149 struct workqueue_struct {
150 unsigned int flags; /* I: WQ_* flags */
151 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
152 struct list_head list; /* W: list of all workqueues */
153
154 struct mutex flush_mutex; /* protects wq flushing */
155 int work_color; /* F: current work color */
156 int flush_color; /* F: current flush color */
157 atomic_t nr_cwqs_to_flush; /* flush in progress */
158 struct wq_flusher *first_flusher; /* F: first flusher */
159 struct list_head flusher_queue; /* F: flush waiters */
160 struct list_head flusher_overflow; /* F: flush overflow list */
161
162 unsigned long single_cpu; /* cpu for single cpu wq */
163
164 int saved_max_active; /* I: saved cwq max_active */
165 const char *name; /* I: workqueue name */
166 #ifdef CONFIG_LOCKDEP
167 struct lockdep_map lockdep_map;
168 #endif
169 };
170
171 #define for_each_busy_worker(worker, i, pos, gcwq) \
172 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
173 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
174
175 #ifdef CONFIG_DEBUG_OBJECTS_WORK
176
177 static struct debug_obj_descr work_debug_descr;
178
179 /*
180 * fixup_init is called when:
181 * - an active object is initialized
182 */
183 static int work_fixup_init(void *addr, enum debug_obj_state state)
184 {
185 struct work_struct *work = addr;
186
187 switch (state) {
188 case ODEBUG_STATE_ACTIVE:
189 cancel_work_sync(work);
190 debug_object_init(work, &work_debug_descr);
191 return 1;
192 default:
193 return 0;
194 }
195 }
196
197 /*
198 * fixup_activate is called when:
199 * - an active object is activated
200 * - an unknown object is activated (might be a statically initialized object)
201 */
202 static int work_fixup_activate(void *addr, enum debug_obj_state state)
203 {
204 struct work_struct *work = addr;
205
206 switch (state) {
207
208 case ODEBUG_STATE_NOTAVAILABLE:
209 /*
210 * This is not really a fixup. The work struct was
211 * statically initialized. We just make sure that it
212 * is tracked in the object tracker.
213 */
214 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
215 debug_object_init(work, &work_debug_descr);
216 debug_object_activate(work, &work_debug_descr);
217 return 0;
218 }
219 WARN_ON_ONCE(1);
220 return 0;
221
222 case ODEBUG_STATE_ACTIVE:
223 WARN_ON(1);
224
225 default:
226 return 0;
227 }
228 }
229
230 /*
231 * fixup_free is called when:
232 * - an active object is freed
233 */
234 static int work_fixup_free(void *addr, enum debug_obj_state state)
235 {
236 struct work_struct *work = addr;
237
238 switch (state) {
239 case ODEBUG_STATE_ACTIVE:
240 cancel_work_sync(work);
241 debug_object_free(work, &work_debug_descr);
242 return 1;
243 default:
244 return 0;
245 }
246 }
247
248 static struct debug_obj_descr work_debug_descr = {
249 .name = "work_struct",
250 .fixup_init = work_fixup_init,
251 .fixup_activate = work_fixup_activate,
252 .fixup_free = work_fixup_free,
253 };
254
255 static inline void debug_work_activate(struct work_struct *work)
256 {
257 debug_object_activate(work, &work_debug_descr);
258 }
259
260 static inline void debug_work_deactivate(struct work_struct *work)
261 {
262 debug_object_deactivate(work, &work_debug_descr);
263 }
264
265 void __init_work(struct work_struct *work, int onstack)
266 {
267 if (onstack)
268 debug_object_init_on_stack(work, &work_debug_descr);
269 else
270 debug_object_init(work, &work_debug_descr);
271 }
272 EXPORT_SYMBOL_GPL(__init_work);
273
274 void destroy_work_on_stack(struct work_struct *work)
275 {
276 debug_object_free(work, &work_debug_descr);
277 }
278 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
279
280 #else
281 static inline void debug_work_activate(struct work_struct *work) { }
282 static inline void debug_work_deactivate(struct work_struct *work) { }
283 #endif
284
285 /* Serializes the accesses to the list of workqueues. */
286 static DEFINE_SPINLOCK(workqueue_lock);
287 static LIST_HEAD(workqueues);
288 static bool workqueue_freezing; /* W: have wqs started freezing? */
289
290 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
291
292 static int worker_thread(void *__worker);
293
294 static struct global_cwq *get_gcwq(unsigned int cpu)
295 {
296 return &per_cpu(global_cwq, cpu);
297 }
298
299 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
300 struct workqueue_struct *wq)
301 {
302 return per_cpu_ptr(wq->cpu_wq, cpu);
303 }
304
305 static unsigned int work_color_to_flags(int color)
306 {
307 return color << WORK_STRUCT_COLOR_SHIFT;
308 }
309
310 static int get_work_color(struct work_struct *work)
311 {
312 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
313 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
314 }
315
316 static int work_next_color(int color)
317 {
318 return (color + 1) % WORK_NR_COLORS;
319 }
320
321 /*
322 * Set the workqueue on which a work item is to be run
323 * - Must *only* be called if the pending flag is set
324 */
325 static inline void set_wq_data(struct work_struct *work,
326 struct cpu_workqueue_struct *cwq,
327 unsigned long extra_flags)
328 {
329 BUG_ON(!work_pending(work));
330
331 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
332 WORK_STRUCT_PENDING | extra_flags);
333 }
334
335 /*
336 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
337 */
338 static inline void clear_wq_data(struct work_struct *work)
339 {
340 atomic_long_set(&work->data, work_static(work));
341 }
342
343 static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
344 {
345 return (void *)(atomic_long_read(&work->data) &
346 WORK_STRUCT_WQ_DATA_MASK);
347 }
348
349 /**
350 * busy_worker_head - return the busy hash head for a work
351 * @gcwq: gcwq of interest
352 * @work: work to be hashed
353 *
354 * Return hash head of @gcwq for @work.
355 *
356 * CONTEXT:
357 * spin_lock_irq(gcwq->lock).
358 *
359 * RETURNS:
360 * Pointer to the hash head.
361 */
362 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
363 struct work_struct *work)
364 {
365 const int base_shift = ilog2(sizeof(struct work_struct));
366 unsigned long v = (unsigned long)work;
367
368 /* simple shift and fold hash, do we need something better? */
369 v >>= base_shift;
370 v += v >> BUSY_WORKER_HASH_ORDER;
371 v &= BUSY_WORKER_HASH_MASK;
372
373 return &gcwq->busy_hash[v];
374 }
375
376 /**
377 * __find_worker_executing_work - find worker which is executing a work
378 * @gcwq: gcwq of interest
379 * @bwh: hash head as returned by busy_worker_head()
380 * @work: work to find worker for
381 *
382 * Find a worker which is executing @work on @gcwq. @bwh should be
383 * the hash head obtained by calling busy_worker_head() with the same
384 * work.
385 *
386 * CONTEXT:
387 * spin_lock_irq(gcwq->lock).
388 *
389 * RETURNS:
390 * Pointer to worker which is executing @work if found, NULL
391 * otherwise.
392 */
393 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
394 struct hlist_head *bwh,
395 struct work_struct *work)
396 {
397 struct worker *worker;
398 struct hlist_node *tmp;
399
400 hlist_for_each_entry(worker, tmp, bwh, hentry)
401 if (worker->current_work == work)
402 return worker;
403 return NULL;
404 }
405
406 /**
407 * find_worker_executing_work - find worker which is executing a work
408 * @gcwq: gcwq of interest
409 * @work: work to find worker for
410 *
411 * Find a worker which is executing @work on @gcwq. This function is
412 * identical to __find_worker_executing_work() except that this
413 * function calculates @bwh itself.
414 *
415 * CONTEXT:
416 * spin_lock_irq(gcwq->lock).
417 *
418 * RETURNS:
419 * Pointer to worker which is executing @work if found, NULL
420 * otherwise.
421 */
422 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
423 struct work_struct *work)
424 {
425 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
426 work);
427 }
428
429 /**
430 * insert_work - insert a work into cwq
431 * @cwq: cwq @work belongs to
432 * @work: work to insert
433 * @head: insertion point
434 * @extra_flags: extra WORK_STRUCT_* flags to set
435 *
436 * Insert @work into @cwq after @head.
437 *
438 * CONTEXT:
439 * spin_lock_irq(gcwq->lock).
440 */
441 static void insert_work(struct cpu_workqueue_struct *cwq,
442 struct work_struct *work, struct list_head *head,
443 unsigned int extra_flags)
444 {
445 /* we own @work, set data and link */
446 set_wq_data(work, cwq, extra_flags);
447
448 /*
449 * Ensure that we get the right work->data if we see the
450 * result of list_add() below, see try_to_grab_pending().
451 */
452 smp_wmb();
453
454 list_add_tail(&work->entry, head);
455 wake_up_process(cwq->worker->task);
456 }
457
458 /**
459 * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
460 * @cwq: cwq to unbind
461 *
462 * Try to unbind @cwq from single cpu workqueue processing. If
463 * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
464 *
465 * CONTEXT:
466 * spin_lock_irq(gcwq->lock).
467 */
468 static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
469 {
470 struct workqueue_struct *wq = cwq->wq;
471 struct global_cwq *gcwq = cwq->gcwq;
472
473 BUG_ON(wq->single_cpu != gcwq->cpu);
474 /*
475 * Unbind from workqueue if @cwq is not frozen. If frozen,
476 * thaw_workqueues() will either restart processing on this
477 * cpu or unbind if empty. This keeps works queued while
478 * frozen fully ordered and flushable.
479 */
480 if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
481 smp_wmb(); /* paired with cmpxchg() in __queue_work() */
482 wq->single_cpu = NR_CPUS;
483 }
484 }
485
486 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
487 struct work_struct *work)
488 {
489 struct global_cwq *gcwq;
490 struct cpu_workqueue_struct *cwq;
491 struct list_head *worklist;
492 unsigned long flags;
493 bool arbitrate;
494
495 debug_work_activate(work);
496
497 /* determine gcwq to use */
498 if (!(wq->flags & WQ_SINGLE_CPU)) {
499 /* just use the requested cpu for multicpu workqueues */
500 gcwq = get_gcwq(cpu);
501 spin_lock_irqsave(&gcwq->lock, flags);
502 } else {
503 unsigned int req_cpu = cpu;
504
505 /*
506 * It's a bit more complex for single cpu workqueues.
507 * We first need to determine which cpu is going to be
508 * used. If no cpu is currently serving this
509 * workqueue, arbitrate using atomic accesses to
510 * wq->single_cpu; otherwise, use the current one.
511 */
512 retry:
513 cpu = wq->single_cpu;
514 arbitrate = cpu == NR_CPUS;
515 if (arbitrate)
516 cpu = req_cpu;
517
518 gcwq = get_gcwq(cpu);
519 spin_lock_irqsave(&gcwq->lock, flags);
520
521 /*
522 * The following cmpxchg() is a full barrier paired
523 * with smp_wmb() in cwq_unbind_single_cpu() and
524 * guarantees that all changes to wq->st_* fields are
525 * visible on the new cpu after this point.
526 */
527 if (arbitrate)
528 cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
529
530 if (unlikely(wq->single_cpu != cpu)) {
531 spin_unlock_irqrestore(&gcwq->lock, flags);
532 goto retry;
533 }
534 }
535
536 /* gcwq determined, get cwq and queue */
537 cwq = get_cwq(gcwq->cpu, wq);
538
539 BUG_ON(!list_empty(&work->entry));
540
541 cwq->nr_in_flight[cwq->work_color]++;
542
543 if (likely(cwq->nr_active < cwq->max_active)) {
544 cwq->nr_active++;
545 worklist = &cwq->worklist;
546 } else
547 worklist = &cwq->delayed_works;
548
549 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
550
551 spin_unlock_irqrestore(&gcwq->lock, flags);
552 }
553
554 /**
555 * queue_work - queue work on a workqueue
556 * @wq: workqueue to use
557 * @work: work to queue
558 *
559 * Returns 0 if @work was already on a queue, non-zero otherwise.
560 *
561 * We queue the work to the CPU on which it was submitted, but if the CPU dies
562 * it can be processed by another CPU.
563 */
564 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
565 {
566 int ret;
567
568 ret = queue_work_on(get_cpu(), wq, work);
569 put_cpu();
570
571 return ret;
572 }
573 EXPORT_SYMBOL_GPL(queue_work);
574
575 /**
576 * queue_work_on - queue work on specific cpu
577 * @cpu: CPU number to execute work on
578 * @wq: workqueue to use
579 * @work: work to queue
580 *
581 * Returns 0 if @work was already on a queue, non-zero otherwise.
582 *
583 * We queue the work to a specific CPU, the caller must ensure it
584 * can't go away.
585 */
586 int
587 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
588 {
589 int ret = 0;
590
591 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
592 __queue_work(cpu, wq, work);
593 ret = 1;
594 }
595 return ret;
596 }
597 EXPORT_SYMBOL_GPL(queue_work_on);
598
599 static void delayed_work_timer_fn(unsigned long __data)
600 {
601 struct delayed_work *dwork = (struct delayed_work *)__data;
602 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
603
604 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
605 }
606
607 /**
608 * queue_delayed_work - queue work on a workqueue after delay
609 * @wq: workqueue to use
610 * @dwork: delayable work to queue
611 * @delay: number of jiffies to wait before queueing
612 *
613 * Returns 0 if @work was already on a queue, non-zero otherwise.
614 */
615 int queue_delayed_work(struct workqueue_struct *wq,
616 struct delayed_work *dwork, unsigned long delay)
617 {
618 if (delay == 0)
619 return queue_work(wq, &dwork->work);
620
621 return queue_delayed_work_on(-1, wq, dwork, delay);
622 }
623 EXPORT_SYMBOL_GPL(queue_delayed_work);
624
625 /**
626 * queue_delayed_work_on - queue work on specific CPU after delay
627 * @cpu: CPU number to execute work on
628 * @wq: workqueue to use
629 * @dwork: work to queue
630 * @delay: number of jiffies to wait before queueing
631 *
632 * Returns 0 if @work was already on a queue, non-zero otherwise.
633 */
634 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
635 struct delayed_work *dwork, unsigned long delay)
636 {
637 int ret = 0;
638 struct timer_list *timer = &dwork->timer;
639 struct work_struct *work = &dwork->work;
640
641 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
642 BUG_ON(timer_pending(timer));
643 BUG_ON(!list_empty(&work->entry));
644
645 timer_stats_timer_set_start_info(&dwork->timer);
646
647 /* This stores cwq for the moment, for the timer_fn */
648 set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
649 timer->expires = jiffies + delay;
650 timer->data = (unsigned long)dwork;
651 timer->function = delayed_work_timer_fn;
652
653 if (unlikely(cpu >= 0))
654 add_timer_on(timer, cpu);
655 else
656 add_timer(timer);
657 ret = 1;
658 }
659 return ret;
660 }
661 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
662
663 /**
664 * worker_enter_idle - enter idle state
665 * @worker: worker which is entering idle state
666 *
667 * @worker is entering idle state. Update stats and idle timer if
668 * necessary.
669 *
670 * LOCKING:
671 * spin_lock_irq(gcwq->lock).
672 */
673 static void worker_enter_idle(struct worker *worker)
674 {
675 struct global_cwq *gcwq = worker->gcwq;
676
677 BUG_ON(worker->flags & WORKER_IDLE);
678 BUG_ON(!list_empty(&worker->entry) &&
679 (worker->hentry.next || worker->hentry.pprev));
680
681 worker->flags |= WORKER_IDLE;
682 gcwq->nr_idle++;
683
684 /* idle_list is LIFO */
685 list_add(&worker->entry, &gcwq->idle_list);
686
687 if (unlikely(worker->flags & WORKER_ROGUE))
688 wake_up_all(&gcwq->trustee_wait);
689 }
690
691 /**
692 * worker_leave_idle - leave idle state
693 * @worker: worker which is leaving idle state
694 *
695 * @worker is leaving idle state. Update stats.
696 *
697 * LOCKING:
698 * spin_lock_irq(gcwq->lock).
699 */
700 static void worker_leave_idle(struct worker *worker)
701 {
702 struct global_cwq *gcwq = worker->gcwq;
703
704 BUG_ON(!(worker->flags & WORKER_IDLE));
705 worker->flags &= ~WORKER_IDLE;
706 gcwq->nr_idle--;
707 list_del_init(&worker->entry);
708 }
709
710 static struct worker *alloc_worker(void)
711 {
712 struct worker *worker;
713
714 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
715 if (worker) {
716 INIT_LIST_HEAD(&worker->entry);
717 INIT_LIST_HEAD(&worker->scheduled);
718 }
719 return worker;
720 }
721
722 /**
723 * create_worker - create a new workqueue worker
724 * @cwq: cwq the new worker will belong to
725 * @bind: whether to set affinity to @cpu or not
726 *
727 * Create a new worker which is bound to @cwq. The returned worker
728 * can be started by calling start_worker() or destroyed using
729 * destroy_worker().
730 *
731 * CONTEXT:
732 * Might sleep. Does GFP_KERNEL allocations.
733 *
734 * RETURNS:
735 * Pointer to the newly created worker.
736 */
737 static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
738 {
739 struct global_cwq *gcwq = cwq->gcwq;
740 int id = -1;
741 struct worker *worker = NULL;
742
743 spin_lock_irq(&gcwq->lock);
744 while (ida_get_new(&gcwq->worker_ida, &id)) {
745 spin_unlock_irq(&gcwq->lock);
746 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
747 goto fail;
748 spin_lock_irq(&gcwq->lock);
749 }
750 spin_unlock_irq(&gcwq->lock);
751
752 worker = alloc_worker();
753 if (!worker)
754 goto fail;
755
756 worker->gcwq = gcwq;
757 worker->cwq = cwq;
758 worker->id = id;
759
760 worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
761 gcwq->cpu, id);
762 if (IS_ERR(worker->task))
763 goto fail;
764
765 /*
766 * A rogue worker will become a regular one if CPU comes
767 * online later on. Make sure every worker has
768 * PF_THREAD_BOUND set.
769 */
770 if (bind)
771 kthread_bind(worker->task, gcwq->cpu);
772 else
773 worker->task->flags |= PF_THREAD_BOUND;
774
775 return worker;
776 fail:
777 if (id >= 0) {
778 spin_lock_irq(&gcwq->lock);
779 ida_remove(&gcwq->worker_ida, id);
780 spin_unlock_irq(&gcwq->lock);
781 }
782 kfree(worker);
783 return NULL;
784 }
785
786 /**
787 * start_worker - start a newly created worker
788 * @worker: worker to start
789 *
790 * Make the gcwq aware of @worker and start it.
791 *
792 * CONTEXT:
793 * spin_lock_irq(gcwq->lock).
794 */
795 static void start_worker(struct worker *worker)
796 {
797 worker->flags |= WORKER_STARTED;
798 worker->gcwq->nr_workers++;
799 worker_enter_idle(worker);
800 wake_up_process(worker->task);
801 }
802
803 /**
804 * destroy_worker - destroy a workqueue worker
805 * @worker: worker to be destroyed
806 *
807 * Destroy @worker and adjust @gcwq stats accordingly.
808 *
809 * CONTEXT:
810 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
811 */
812 static void destroy_worker(struct worker *worker)
813 {
814 struct global_cwq *gcwq = worker->gcwq;
815 int id = worker->id;
816
817 /* sanity check frenzy */
818 BUG_ON(worker->current_work);
819 BUG_ON(!list_empty(&worker->scheduled));
820
821 if (worker->flags & WORKER_STARTED)
822 gcwq->nr_workers--;
823 if (worker->flags & WORKER_IDLE)
824 gcwq->nr_idle--;
825
826 list_del_init(&worker->entry);
827 worker->flags |= WORKER_DIE;
828
829 spin_unlock_irq(&gcwq->lock);
830
831 kthread_stop(worker->task);
832 kfree(worker);
833
834 spin_lock_irq(&gcwq->lock);
835 ida_remove(&gcwq->worker_ida, id);
836 }
837
838 /**
839 * move_linked_works - move linked works to a list
840 * @work: start of series of works to be scheduled
841 * @head: target list to append @work to
842 * @nextp: out paramter for nested worklist walking
843 *
844 * Schedule linked works starting from @work to @head. Work series to
845 * be scheduled starts at @work and includes any consecutive work with
846 * WORK_STRUCT_LINKED set in its predecessor.
847 *
848 * If @nextp is not NULL, it's updated to point to the next work of
849 * the last scheduled work. This allows move_linked_works() to be
850 * nested inside outer list_for_each_entry_safe().
851 *
852 * CONTEXT:
853 * spin_lock_irq(gcwq->lock).
854 */
855 static void move_linked_works(struct work_struct *work, struct list_head *head,
856 struct work_struct **nextp)
857 {
858 struct work_struct *n;
859
860 /*
861 * Linked worklist will always end before the end of the list,
862 * use NULL for list head.
863 */
864 list_for_each_entry_safe_from(work, n, NULL, entry) {
865 list_move_tail(&work->entry, head);
866 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
867 break;
868 }
869
870 /*
871 * If we're already inside safe list traversal and have moved
872 * multiple works to the scheduled queue, the next position
873 * needs to be updated.
874 */
875 if (nextp)
876 *nextp = n;
877 }
878
879 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
880 {
881 struct work_struct *work = list_first_entry(&cwq->delayed_works,
882 struct work_struct, entry);
883
884 move_linked_works(work, &cwq->worklist, NULL);
885 cwq->nr_active++;
886 }
887
888 /**
889 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
890 * @cwq: cwq of interest
891 * @color: color of work which left the queue
892 *
893 * A work either has completed or is removed from pending queue,
894 * decrement nr_in_flight of its cwq and handle workqueue flushing.
895 *
896 * CONTEXT:
897 * spin_lock_irq(gcwq->lock).
898 */
899 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
900 {
901 /* ignore uncolored works */
902 if (color == WORK_NO_COLOR)
903 return;
904
905 cwq->nr_in_flight[color]--;
906 cwq->nr_active--;
907
908 if (!list_empty(&cwq->delayed_works)) {
909 /* one down, submit a delayed one */
910 if (cwq->nr_active < cwq->max_active)
911 cwq_activate_first_delayed(cwq);
912 } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
913 /* this was the last work, unbind from single cpu */
914 cwq_unbind_single_cpu(cwq);
915 }
916
917 /* is flush in progress and are we at the flushing tip? */
918 if (likely(cwq->flush_color != color))
919 return;
920
921 /* are there still in-flight works? */
922 if (cwq->nr_in_flight[color])
923 return;
924
925 /* this cwq is done, clear flush_color */
926 cwq->flush_color = -1;
927
928 /*
929 * If this was the last cwq, wake up the first flusher. It
930 * will handle the rest.
931 */
932 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
933 complete(&cwq->wq->first_flusher->done);
934 }
935
936 /**
937 * process_one_work - process single work
938 * @worker: self
939 * @work: work to process
940 *
941 * Process @work. This function contains all the logics necessary to
942 * process a single work including synchronization against and
943 * interaction with other workers on the same cpu, queueing and
944 * flushing. As long as context requirement is met, any worker can
945 * call this function to process a work.
946 *
947 * CONTEXT:
948 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
949 */
950 static void process_one_work(struct worker *worker, struct work_struct *work)
951 {
952 struct cpu_workqueue_struct *cwq = worker->cwq;
953 struct global_cwq *gcwq = cwq->gcwq;
954 struct hlist_head *bwh = busy_worker_head(gcwq, work);
955 work_func_t f = work->func;
956 int work_color;
957 #ifdef CONFIG_LOCKDEP
958 /*
959 * It is permissible to free the struct work_struct from
960 * inside the function that is called from it, this we need to
961 * take into account for lockdep too. To avoid bogus "held
962 * lock freed" warnings as well as problems when looking into
963 * work->lockdep_map, make a copy and use that here.
964 */
965 struct lockdep_map lockdep_map = work->lockdep_map;
966 #endif
967 /* claim and process */
968 debug_work_deactivate(work);
969 hlist_add_head(&worker->hentry, bwh);
970 worker->current_work = work;
971 worker->current_cwq = cwq;
972 work_color = get_work_color(work);
973 list_del_init(&work->entry);
974
975 spin_unlock_irq(&gcwq->lock);
976
977 BUG_ON(get_wq_data(work) != cwq);
978 work_clear_pending(work);
979 lock_map_acquire(&cwq->wq->lockdep_map);
980 lock_map_acquire(&lockdep_map);
981 f(work);
982 lock_map_release(&lockdep_map);
983 lock_map_release(&cwq->wq->lockdep_map);
984
985 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
986 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
987 "%s/0x%08x/%d\n",
988 current->comm, preempt_count(), task_pid_nr(current));
989 printk(KERN_ERR " last function: ");
990 print_symbol("%s\n", (unsigned long)f);
991 debug_show_held_locks(current);
992 dump_stack();
993 }
994
995 spin_lock_irq(&gcwq->lock);
996
997 /* we're done with it, release */
998 hlist_del_init(&worker->hentry);
999 worker->current_work = NULL;
1000 worker->current_cwq = NULL;
1001 cwq_dec_nr_in_flight(cwq, work_color);
1002 }
1003
1004 /**
1005 * process_scheduled_works - process scheduled works
1006 * @worker: self
1007 *
1008 * Process all scheduled works. Please note that the scheduled list
1009 * may change while processing a work, so this function repeatedly
1010 * fetches a work from the top and executes it.
1011 *
1012 * CONTEXT:
1013 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1014 * multiple times.
1015 */
1016 static void process_scheduled_works(struct worker *worker)
1017 {
1018 while (!list_empty(&worker->scheduled)) {
1019 struct work_struct *work = list_first_entry(&worker->scheduled,
1020 struct work_struct, entry);
1021 process_one_work(worker, work);
1022 }
1023 }
1024
1025 /**
1026 * worker_thread - the worker thread function
1027 * @__worker: self
1028 *
1029 * The cwq worker thread function.
1030 */
1031 static int worker_thread(void *__worker)
1032 {
1033 struct worker *worker = __worker;
1034 struct global_cwq *gcwq = worker->gcwq;
1035 struct cpu_workqueue_struct *cwq = worker->cwq;
1036
1037 woke_up:
1038 spin_lock_irq(&gcwq->lock);
1039
1040 /* DIE can be set only while we're idle, checking here is enough */
1041 if (worker->flags & WORKER_DIE) {
1042 spin_unlock_irq(&gcwq->lock);
1043 return 0;
1044 }
1045
1046 worker_leave_idle(worker);
1047 recheck:
1048 /*
1049 * ->scheduled list can only be filled while a worker is
1050 * preparing to process a work or actually processing it.
1051 * Make sure nobody diddled with it while I was sleeping.
1052 */
1053 BUG_ON(!list_empty(&worker->scheduled));
1054
1055 while (!list_empty(&cwq->worklist)) {
1056 struct work_struct *work =
1057 list_first_entry(&cwq->worklist,
1058 struct work_struct, entry);
1059
1060 /*
1061 * The following is a rather inefficient way to close
1062 * race window against cpu hotplug operations. Will
1063 * be replaced soon.
1064 */
1065 if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1066 !cpumask_equal(&worker->task->cpus_allowed,
1067 get_cpu_mask(gcwq->cpu)))) {
1068 spin_unlock_irq(&gcwq->lock);
1069 set_cpus_allowed_ptr(worker->task,
1070 get_cpu_mask(gcwq->cpu));
1071 cpu_relax();
1072 spin_lock_irq(&gcwq->lock);
1073 goto recheck;
1074 }
1075
1076 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1077 /* optimization path, not strictly necessary */
1078 process_one_work(worker, work);
1079 if (unlikely(!list_empty(&worker->scheduled)))
1080 process_scheduled_works(worker);
1081 } else {
1082 move_linked_works(work, &worker->scheduled, NULL);
1083 process_scheduled_works(worker);
1084 }
1085 }
1086
1087 /*
1088 * gcwq->lock is held and there's no work to process, sleep.
1089 * Workers are woken up only while holding gcwq->lock, so
1090 * setting the current state before releasing gcwq->lock is
1091 * enough to prevent losing any event.
1092 */
1093 worker_enter_idle(worker);
1094 __set_current_state(TASK_INTERRUPTIBLE);
1095 spin_unlock_irq(&gcwq->lock);
1096 schedule();
1097 goto woke_up;
1098 }
1099
1100 struct wq_barrier {
1101 struct work_struct work;
1102 struct completion done;
1103 };
1104
1105 static void wq_barrier_func(struct work_struct *work)
1106 {
1107 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1108 complete(&barr->done);
1109 }
1110
1111 /**
1112 * insert_wq_barrier - insert a barrier work
1113 * @cwq: cwq to insert barrier into
1114 * @barr: wq_barrier to insert
1115 * @target: target work to attach @barr to
1116 * @worker: worker currently executing @target, NULL if @target is not executing
1117 *
1118 * @barr is linked to @target such that @barr is completed only after
1119 * @target finishes execution. Please note that the ordering
1120 * guarantee is observed only with respect to @target and on the local
1121 * cpu.
1122 *
1123 * Currently, a queued barrier can't be canceled. This is because
1124 * try_to_grab_pending() can't determine whether the work to be
1125 * grabbed is at the head of the queue and thus can't clear LINKED
1126 * flag of the previous work while there must be a valid next work
1127 * after a work with LINKED flag set.
1128 *
1129 * Note that when @worker is non-NULL, @target may be modified
1130 * underneath us, so we can't reliably determine cwq from @target.
1131 *
1132 * CONTEXT:
1133 * spin_lock_irq(gcwq->lock).
1134 */
1135 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1136 struct wq_barrier *barr,
1137 struct work_struct *target, struct worker *worker)
1138 {
1139 struct list_head *head;
1140 unsigned int linked = 0;
1141
1142 /*
1143 * debugobject calls are safe here even with gcwq->lock locked
1144 * as we know for sure that this will not trigger any of the
1145 * checks and call back into the fixup functions where we
1146 * might deadlock.
1147 */
1148 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
1149 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1150 init_completion(&barr->done);
1151
1152 /*
1153 * If @target is currently being executed, schedule the
1154 * barrier to the worker; otherwise, put it after @target.
1155 */
1156 if (worker)
1157 head = worker->scheduled.next;
1158 else {
1159 unsigned long *bits = work_data_bits(target);
1160
1161 head = target->entry.next;
1162 /* there can already be other linked works, inherit and set */
1163 linked = *bits & WORK_STRUCT_LINKED;
1164 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
1165 }
1166
1167 debug_work_activate(&barr->work);
1168 insert_work(cwq, &barr->work, head,
1169 work_color_to_flags(WORK_NO_COLOR) | linked);
1170 }
1171
1172 /**
1173 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1174 * @wq: workqueue being flushed
1175 * @flush_color: new flush color, < 0 for no-op
1176 * @work_color: new work color, < 0 for no-op
1177 *
1178 * Prepare cwqs for workqueue flushing.
1179 *
1180 * If @flush_color is non-negative, flush_color on all cwqs should be
1181 * -1. If no cwq has in-flight commands at the specified color, all
1182 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
1183 * has in flight commands, its cwq->flush_color is set to
1184 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1185 * wakeup logic is armed and %true is returned.
1186 *
1187 * The caller should have initialized @wq->first_flusher prior to
1188 * calling this function with non-negative @flush_color. If
1189 * @flush_color is negative, no flush color update is done and %false
1190 * is returned.
1191 *
1192 * If @work_color is non-negative, all cwqs should have the same
1193 * work_color which is previous to @work_color and all will be
1194 * advanced to @work_color.
1195 *
1196 * CONTEXT:
1197 * mutex_lock(wq->flush_mutex).
1198 *
1199 * RETURNS:
1200 * %true if @flush_color >= 0 and there's something to flush. %false
1201 * otherwise.
1202 */
1203 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
1204 int flush_color, int work_color)
1205 {
1206 bool wait = false;
1207 unsigned int cpu;
1208
1209 if (flush_color >= 0) {
1210 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
1211 atomic_set(&wq->nr_cwqs_to_flush, 1);
1212 }
1213
1214 for_each_possible_cpu(cpu) {
1215 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1216 struct global_cwq *gcwq = cwq->gcwq;
1217
1218 spin_lock_irq(&gcwq->lock);
1219
1220 if (flush_color >= 0) {
1221 BUG_ON(cwq->flush_color != -1);
1222
1223 if (cwq->nr_in_flight[flush_color]) {
1224 cwq->flush_color = flush_color;
1225 atomic_inc(&wq->nr_cwqs_to_flush);
1226 wait = true;
1227 }
1228 }
1229
1230 if (work_color >= 0) {
1231 BUG_ON(work_color != work_next_color(cwq->work_color));
1232 cwq->work_color = work_color;
1233 }
1234
1235 spin_unlock_irq(&gcwq->lock);
1236 }
1237
1238 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
1239 complete(&wq->first_flusher->done);
1240
1241 return wait;
1242 }
1243
1244 /**
1245 * flush_workqueue - ensure that any scheduled work has run to completion.
1246 * @wq: workqueue to flush
1247 *
1248 * Forces execution of the workqueue and blocks until its completion.
1249 * This is typically used in driver shutdown handlers.
1250 *
1251 * We sleep until all works which were queued on entry have been handled,
1252 * but we are not livelocked by new incoming ones.
1253 */
1254 void flush_workqueue(struct workqueue_struct *wq)
1255 {
1256 struct wq_flusher this_flusher = {
1257 .list = LIST_HEAD_INIT(this_flusher.list),
1258 .flush_color = -1,
1259 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
1260 };
1261 int next_color;
1262
1263 lock_map_acquire(&wq->lockdep_map);
1264 lock_map_release(&wq->lockdep_map);
1265
1266 mutex_lock(&wq->flush_mutex);
1267
1268 /*
1269 * Start-to-wait phase
1270 */
1271 next_color = work_next_color(wq->work_color);
1272
1273 if (next_color != wq->flush_color) {
1274 /*
1275 * Color space is not full. The current work_color
1276 * becomes our flush_color and work_color is advanced
1277 * by one.
1278 */
1279 BUG_ON(!list_empty(&wq->flusher_overflow));
1280 this_flusher.flush_color = wq->work_color;
1281 wq->work_color = next_color;
1282
1283 if (!wq->first_flusher) {
1284 /* no flush in progress, become the first flusher */
1285 BUG_ON(wq->flush_color != this_flusher.flush_color);
1286
1287 wq->first_flusher = &this_flusher;
1288
1289 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
1290 wq->work_color)) {
1291 /* nothing to flush, done */
1292 wq->flush_color = next_color;
1293 wq->first_flusher = NULL;
1294 goto out_unlock;
1295 }
1296 } else {
1297 /* wait in queue */
1298 BUG_ON(wq->flush_color == this_flusher.flush_color);
1299 list_add_tail(&this_flusher.list, &wq->flusher_queue);
1300 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1301 }
1302 } else {
1303 /*
1304 * Oops, color space is full, wait on overflow queue.
1305 * The next flush completion will assign us
1306 * flush_color and transfer to flusher_queue.
1307 */
1308 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
1309 }
1310
1311 mutex_unlock(&wq->flush_mutex);
1312
1313 wait_for_completion(&this_flusher.done);
1314
1315 /*
1316 * Wake-up-and-cascade phase
1317 *
1318 * First flushers are responsible for cascading flushes and
1319 * handling overflow. Non-first flushers can simply return.
1320 */
1321 if (wq->first_flusher != &this_flusher)
1322 return;
1323
1324 mutex_lock(&wq->flush_mutex);
1325
1326 wq->first_flusher = NULL;
1327
1328 BUG_ON(!list_empty(&this_flusher.list));
1329 BUG_ON(wq->flush_color != this_flusher.flush_color);
1330
1331 while (true) {
1332 struct wq_flusher *next, *tmp;
1333
1334 /* complete all the flushers sharing the current flush color */
1335 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
1336 if (next->flush_color != wq->flush_color)
1337 break;
1338 list_del_init(&next->list);
1339 complete(&next->done);
1340 }
1341
1342 BUG_ON(!list_empty(&wq->flusher_overflow) &&
1343 wq->flush_color != work_next_color(wq->work_color));
1344
1345 /* this flush_color is finished, advance by one */
1346 wq->flush_color = work_next_color(wq->flush_color);
1347
1348 /* one color has been freed, handle overflow queue */
1349 if (!list_empty(&wq->flusher_overflow)) {
1350 /*
1351 * Assign the same color to all overflowed
1352 * flushers, advance work_color and append to
1353 * flusher_queue. This is the start-to-wait
1354 * phase for these overflowed flushers.
1355 */
1356 list_for_each_entry(tmp, &wq->flusher_overflow, list)
1357 tmp->flush_color = wq->work_color;
1358
1359 wq->work_color = work_next_color(wq->work_color);
1360
1361 list_splice_tail_init(&wq->flusher_overflow,
1362 &wq->flusher_queue);
1363 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1364 }
1365
1366 if (list_empty(&wq->flusher_queue)) {
1367 BUG_ON(wq->flush_color != wq->work_color);
1368 break;
1369 }
1370
1371 /*
1372 * Need to flush more colors. Make the next flusher
1373 * the new first flusher and arm cwqs.
1374 */
1375 BUG_ON(wq->flush_color == wq->work_color);
1376 BUG_ON(wq->flush_color != next->flush_color);
1377
1378 list_del_init(&next->list);
1379 wq->first_flusher = next;
1380
1381 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
1382 break;
1383
1384 /*
1385 * Meh... this color is already done, clear first
1386 * flusher and repeat cascading.
1387 */
1388 wq->first_flusher = NULL;
1389 }
1390
1391 out_unlock:
1392 mutex_unlock(&wq->flush_mutex);
1393 }
1394 EXPORT_SYMBOL_GPL(flush_workqueue);
1395
1396 /**
1397 * flush_work - block until a work_struct's callback has terminated
1398 * @work: the work which is to be flushed
1399 *
1400 * Returns false if @work has already terminated.
1401 *
1402 * It is expected that, prior to calling flush_work(), the caller has
1403 * arranged for the work to not be requeued, otherwise it doesn't make
1404 * sense to use this function.
1405 */
1406 int flush_work(struct work_struct *work)
1407 {
1408 struct worker *worker = NULL;
1409 struct cpu_workqueue_struct *cwq;
1410 struct global_cwq *gcwq;
1411 struct wq_barrier barr;
1412
1413 might_sleep();
1414 cwq = get_wq_data(work);
1415 if (!cwq)
1416 return 0;
1417 gcwq = cwq->gcwq;
1418
1419 lock_map_acquire(&cwq->wq->lockdep_map);
1420 lock_map_release(&cwq->wq->lockdep_map);
1421
1422 spin_lock_irq(&gcwq->lock);
1423 if (!list_empty(&work->entry)) {
1424 /*
1425 * See the comment near try_to_grab_pending()->smp_rmb().
1426 * If it was re-queued under us we are not going to wait.
1427 */
1428 smp_rmb();
1429 if (unlikely(cwq != get_wq_data(work)))
1430 goto already_gone;
1431 } else {
1432 if (cwq->worker && cwq->worker->current_work == work)
1433 worker = cwq->worker;
1434 if (!worker)
1435 goto already_gone;
1436 }
1437
1438 insert_wq_barrier(cwq, &barr, work, worker);
1439 spin_unlock_irq(&gcwq->lock);
1440 wait_for_completion(&barr.done);
1441 destroy_work_on_stack(&barr.work);
1442 return 1;
1443 already_gone:
1444 spin_unlock_irq(&gcwq->lock);
1445 return 0;
1446 }
1447 EXPORT_SYMBOL_GPL(flush_work);
1448
1449 /*
1450 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1451 * so this work can't be re-armed in any way.
1452 */
1453 static int try_to_grab_pending(struct work_struct *work)
1454 {
1455 struct global_cwq *gcwq;
1456 struct cpu_workqueue_struct *cwq;
1457 int ret = -1;
1458
1459 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1460 return 0;
1461
1462 /*
1463 * The queueing is in progress, or it is already queued. Try to
1464 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1465 */
1466
1467 cwq = get_wq_data(work);
1468 if (!cwq)
1469 return ret;
1470 gcwq = cwq->gcwq;
1471
1472 spin_lock_irq(&gcwq->lock);
1473 if (!list_empty(&work->entry)) {
1474 /*
1475 * This work is queued, but perhaps we locked the wrong cwq.
1476 * In that case we must see the new value after rmb(), see
1477 * insert_work()->wmb().
1478 */
1479 smp_rmb();
1480 if (cwq == get_wq_data(work)) {
1481 debug_work_deactivate(work);
1482 list_del_init(&work->entry);
1483 cwq_dec_nr_in_flight(cwq, get_work_color(work));
1484 ret = 1;
1485 }
1486 }
1487 spin_unlock_irq(&gcwq->lock);
1488
1489 return ret;
1490 }
1491
1492 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
1493 struct work_struct *work)
1494 {
1495 struct global_cwq *gcwq = cwq->gcwq;
1496 struct wq_barrier barr;
1497 struct worker *worker;
1498
1499 spin_lock_irq(&gcwq->lock);
1500
1501 worker = NULL;
1502 if (unlikely(cwq->worker && cwq->worker->current_work == work)) {
1503 worker = cwq->worker;
1504 insert_wq_barrier(cwq, &barr, work, worker);
1505 }
1506
1507 spin_unlock_irq(&gcwq->lock);
1508
1509 if (unlikely(worker)) {
1510 wait_for_completion(&barr.done);
1511 destroy_work_on_stack(&barr.work);
1512 }
1513 }
1514
1515 static void wait_on_work(struct work_struct *work)
1516 {
1517 struct cpu_workqueue_struct *cwq;
1518 struct workqueue_struct *wq;
1519 int cpu;
1520
1521 might_sleep();
1522
1523 lock_map_acquire(&work->lockdep_map);
1524 lock_map_release(&work->lockdep_map);
1525
1526 cwq = get_wq_data(work);
1527 if (!cwq)
1528 return;
1529
1530 wq = cwq->wq;
1531
1532 for_each_possible_cpu(cpu)
1533 wait_on_cpu_work(get_cwq(cpu, wq), work);
1534 }
1535
1536 static int __cancel_work_timer(struct work_struct *work,
1537 struct timer_list* timer)
1538 {
1539 int ret;
1540
1541 do {
1542 ret = (timer && likely(del_timer(timer)));
1543 if (!ret)
1544 ret = try_to_grab_pending(work);
1545 wait_on_work(work);
1546 } while (unlikely(ret < 0));
1547
1548 clear_wq_data(work);
1549 return ret;
1550 }
1551
1552 /**
1553 * cancel_work_sync - block until a work_struct's callback has terminated
1554 * @work: the work which is to be flushed
1555 *
1556 * Returns true if @work was pending.
1557 *
1558 * cancel_work_sync() will cancel the work if it is queued. If the work's
1559 * callback appears to be running, cancel_work_sync() will block until it
1560 * has completed.
1561 *
1562 * It is possible to use this function if the work re-queues itself. It can
1563 * cancel the work even if it migrates to another workqueue, however in that
1564 * case it only guarantees that work->func() has completed on the last queued
1565 * workqueue.
1566 *
1567 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1568 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1569 *
1570 * The caller must ensure that workqueue_struct on which this work was last
1571 * queued can't be destroyed before this function returns.
1572 */
1573 int cancel_work_sync(struct work_struct *work)
1574 {
1575 return __cancel_work_timer(work, NULL);
1576 }
1577 EXPORT_SYMBOL_GPL(cancel_work_sync);
1578
1579 /**
1580 * cancel_delayed_work_sync - reliably kill off a delayed work.
1581 * @dwork: the delayed work struct
1582 *
1583 * Returns true if @dwork was pending.
1584 *
1585 * It is possible to use this function if @dwork rearms itself via queue_work()
1586 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1587 */
1588 int cancel_delayed_work_sync(struct delayed_work *dwork)
1589 {
1590 return __cancel_work_timer(&dwork->work, &dwork->timer);
1591 }
1592 EXPORT_SYMBOL(cancel_delayed_work_sync);
1593
1594 static struct workqueue_struct *keventd_wq __read_mostly;
1595
1596 /**
1597 * schedule_work - put work task in global workqueue
1598 * @work: job to be done
1599 *
1600 * Returns zero if @work was already on the kernel-global workqueue and
1601 * non-zero otherwise.
1602 *
1603 * This puts a job in the kernel-global workqueue if it was not already
1604 * queued and leaves it in the same position on the kernel-global
1605 * workqueue otherwise.
1606 */
1607 int schedule_work(struct work_struct *work)
1608 {
1609 return queue_work(keventd_wq, work);
1610 }
1611 EXPORT_SYMBOL(schedule_work);
1612
1613 /*
1614 * schedule_work_on - put work task on a specific cpu
1615 * @cpu: cpu to put the work task on
1616 * @work: job to be done
1617 *
1618 * This puts a job on a specific cpu
1619 */
1620 int schedule_work_on(int cpu, struct work_struct *work)
1621 {
1622 return queue_work_on(cpu, keventd_wq, work);
1623 }
1624 EXPORT_SYMBOL(schedule_work_on);
1625
1626 /**
1627 * schedule_delayed_work - put work task in global workqueue after delay
1628 * @dwork: job to be done
1629 * @delay: number of jiffies to wait or 0 for immediate execution
1630 *
1631 * After waiting for a given time this puts a job in the kernel-global
1632 * workqueue.
1633 */
1634 int schedule_delayed_work(struct delayed_work *dwork,
1635 unsigned long delay)
1636 {
1637 return queue_delayed_work(keventd_wq, dwork, delay);
1638 }
1639 EXPORT_SYMBOL(schedule_delayed_work);
1640
1641 /**
1642 * flush_delayed_work - block until a dwork_struct's callback has terminated
1643 * @dwork: the delayed work which is to be flushed
1644 *
1645 * Any timeout is cancelled, and any pending work is run immediately.
1646 */
1647 void flush_delayed_work(struct delayed_work *dwork)
1648 {
1649 if (del_timer_sync(&dwork->timer)) {
1650 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
1651 &dwork->work);
1652 put_cpu();
1653 }
1654 flush_work(&dwork->work);
1655 }
1656 EXPORT_SYMBOL(flush_delayed_work);
1657
1658 /**
1659 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1660 * @cpu: cpu to use
1661 * @dwork: job to be done
1662 * @delay: number of jiffies to wait
1663 *
1664 * After waiting for a given time this puts a job in the kernel-global
1665 * workqueue on the specified CPU.
1666 */
1667 int schedule_delayed_work_on(int cpu,
1668 struct delayed_work *dwork, unsigned long delay)
1669 {
1670 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1671 }
1672 EXPORT_SYMBOL(schedule_delayed_work_on);
1673
1674 /**
1675 * schedule_on_each_cpu - call a function on each online CPU from keventd
1676 * @func: the function to call
1677 *
1678 * Returns zero on success.
1679 * Returns -ve errno on failure.
1680 *
1681 * schedule_on_each_cpu() is very slow.
1682 */
1683 int schedule_on_each_cpu(work_func_t func)
1684 {
1685 int cpu;
1686 int orig = -1;
1687 struct work_struct *works;
1688
1689 works = alloc_percpu(struct work_struct);
1690 if (!works)
1691 return -ENOMEM;
1692
1693 get_online_cpus();
1694
1695 /*
1696 * When running in keventd don't schedule a work item on
1697 * itself. Can just call directly because the work queue is
1698 * already bound. This also is faster.
1699 */
1700 if (current_is_keventd())
1701 orig = raw_smp_processor_id();
1702
1703 for_each_online_cpu(cpu) {
1704 struct work_struct *work = per_cpu_ptr(works, cpu);
1705
1706 INIT_WORK(work, func);
1707 if (cpu != orig)
1708 schedule_work_on(cpu, work);
1709 }
1710 if (orig >= 0)
1711 func(per_cpu_ptr(works, orig));
1712
1713 for_each_online_cpu(cpu)
1714 flush_work(per_cpu_ptr(works, cpu));
1715
1716 put_online_cpus();
1717 free_percpu(works);
1718 return 0;
1719 }
1720
1721 /**
1722 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1723 *
1724 * Forces execution of the kernel-global workqueue and blocks until its
1725 * completion.
1726 *
1727 * Think twice before calling this function! It's very easy to get into
1728 * trouble if you don't take great care. Either of the following situations
1729 * will lead to deadlock:
1730 *
1731 * One of the work items currently on the workqueue needs to acquire
1732 * a lock held by your code or its caller.
1733 *
1734 * Your code is running in the context of a work routine.
1735 *
1736 * They will be detected by lockdep when they occur, but the first might not
1737 * occur very often. It depends on what work items are on the workqueue and
1738 * what locks they need, which you have no control over.
1739 *
1740 * In most situations flushing the entire workqueue is overkill; you merely
1741 * need to know that a particular work item isn't queued and isn't running.
1742 * In such cases you should use cancel_delayed_work_sync() or
1743 * cancel_work_sync() instead.
1744 */
1745 void flush_scheduled_work(void)
1746 {
1747 flush_workqueue(keventd_wq);
1748 }
1749 EXPORT_SYMBOL(flush_scheduled_work);
1750
1751 /**
1752 * execute_in_process_context - reliably execute the routine with user context
1753 * @fn: the function to execute
1754 * @ew: guaranteed storage for the execute work structure (must
1755 * be available when the work executes)
1756 *
1757 * Executes the function immediately if process context is available,
1758 * otherwise schedules the function for delayed execution.
1759 *
1760 * Returns: 0 - function was executed
1761 * 1 - function was scheduled for execution
1762 */
1763 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1764 {
1765 if (!in_interrupt()) {
1766 fn(&ew->work);
1767 return 0;
1768 }
1769
1770 INIT_WORK(&ew->work, fn);
1771 schedule_work(&ew->work);
1772
1773 return 1;
1774 }
1775 EXPORT_SYMBOL_GPL(execute_in_process_context);
1776
1777 int keventd_up(void)
1778 {
1779 return keventd_wq != NULL;
1780 }
1781
1782 int current_is_keventd(void)
1783 {
1784 struct cpu_workqueue_struct *cwq;
1785 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1786 int ret = 0;
1787
1788 BUG_ON(!keventd_wq);
1789
1790 cwq = get_cwq(cpu, keventd_wq);
1791 if (current == cwq->worker->task)
1792 ret = 1;
1793
1794 return ret;
1795
1796 }
1797
1798 static struct cpu_workqueue_struct *alloc_cwqs(void)
1799 {
1800 /*
1801 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1802 * Make sure that the alignment isn't lower than that of
1803 * unsigned long long.
1804 */
1805 const size_t size = sizeof(struct cpu_workqueue_struct);
1806 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1807 __alignof__(unsigned long long));
1808 struct cpu_workqueue_struct *cwqs;
1809 #ifndef CONFIG_SMP
1810 void *ptr;
1811
1812 /*
1813 * On UP, percpu allocator doesn't honor alignment parameter
1814 * and simply uses arch-dependent default. Allocate enough
1815 * room to align cwq and put an extra pointer at the end
1816 * pointing back to the originally allocated pointer which
1817 * will be used for free.
1818 *
1819 * FIXME: This really belongs to UP percpu code. Update UP
1820 * percpu code to honor alignment and remove this ugliness.
1821 */
1822 ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1823 cwqs = PTR_ALIGN(ptr, align);
1824 *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1825 #else
1826 /* On SMP, percpu allocator can do it itself */
1827 cwqs = __alloc_percpu(size, align);
1828 #endif
1829 /* just in case, make sure it's actually aligned */
1830 BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1831 return cwqs;
1832 }
1833
1834 static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1835 {
1836 #ifndef CONFIG_SMP
1837 /* on UP, the pointer to free is stored right after the cwq */
1838 if (cwqs)
1839 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1840 #else
1841 free_percpu(cwqs);
1842 #endif
1843 }
1844
1845 struct workqueue_struct *__create_workqueue_key(const char *name,
1846 unsigned int flags,
1847 int max_active,
1848 struct lock_class_key *key,
1849 const char *lock_name)
1850 {
1851 struct workqueue_struct *wq;
1852 bool failed = false;
1853 unsigned int cpu;
1854
1855 max_active = clamp_val(max_active, 1, INT_MAX);
1856
1857 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1858 if (!wq)
1859 goto err;
1860
1861 wq->cpu_wq = alloc_cwqs();
1862 if (!wq->cpu_wq)
1863 goto err;
1864
1865 wq->flags = flags;
1866 wq->saved_max_active = max_active;
1867 mutex_init(&wq->flush_mutex);
1868 atomic_set(&wq->nr_cwqs_to_flush, 0);
1869 INIT_LIST_HEAD(&wq->flusher_queue);
1870 INIT_LIST_HEAD(&wq->flusher_overflow);
1871 wq->single_cpu = NR_CPUS;
1872
1873 wq->name = name;
1874 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1875 INIT_LIST_HEAD(&wq->list);
1876
1877 cpu_maps_update_begin();
1878 /*
1879 * We must initialize cwqs for each possible cpu even if we
1880 * are going to call destroy_workqueue() finally. Otherwise
1881 * cpu_up() can hit the uninitialized cwq once we drop the
1882 * lock.
1883 */
1884 for_each_possible_cpu(cpu) {
1885 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1886 struct global_cwq *gcwq = get_gcwq(cpu);
1887
1888 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
1889 cwq->gcwq = gcwq;
1890 cwq->wq = wq;
1891 cwq->flush_color = -1;
1892 cwq->max_active = max_active;
1893 INIT_LIST_HEAD(&cwq->worklist);
1894 INIT_LIST_HEAD(&cwq->delayed_works);
1895
1896 if (failed)
1897 continue;
1898 cwq->worker = create_worker(cwq, cpu_online(cpu));
1899 if (cwq->worker)
1900 start_worker(cwq->worker);
1901 else
1902 failed = true;
1903 }
1904
1905 /*
1906 * workqueue_lock protects global freeze state and workqueues
1907 * list. Grab it, set max_active accordingly and add the new
1908 * workqueue to workqueues list.
1909 */
1910 spin_lock(&workqueue_lock);
1911
1912 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1913 for_each_possible_cpu(cpu)
1914 get_cwq(cpu, wq)->max_active = 0;
1915
1916 list_add(&wq->list, &workqueues);
1917
1918 spin_unlock(&workqueue_lock);
1919
1920 cpu_maps_update_done();
1921
1922 if (failed) {
1923 destroy_workqueue(wq);
1924 wq = NULL;
1925 }
1926 return wq;
1927 err:
1928 if (wq) {
1929 free_cwqs(wq->cpu_wq);
1930 kfree(wq);
1931 }
1932 return NULL;
1933 }
1934 EXPORT_SYMBOL_GPL(__create_workqueue_key);
1935
1936 /**
1937 * destroy_workqueue - safely terminate a workqueue
1938 * @wq: target workqueue
1939 *
1940 * Safely destroy a workqueue. All work currently pending will be done first.
1941 */
1942 void destroy_workqueue(struct workqueue_struct *wq)
1943 {
1944 unsigned int cpu;
1945
1946 flush_workqueue(wq);
1947
1948 /*
1949 * wq list is used to freeze wq, remove from list after
1950 * flushing is complete in case freeze races us.
1951 */
1952 cpu_maps_update_begin();
1953 spin_lock(&workqueue_lock);
1954 list_del(&wq->list);
1955 spin_unlock(&workqueue_lock);
1956 cpu_maps_update_done();
1957
1958 for_each_possible_cpu(cpu) {
1959 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1960 int i;
1961
1962 if (cwq->worker) {
1963 spin_lock_irq(&cwq->gcwq->lock);
1964 destroy_worker(cwq->worker);
1965 cwq->worker = NULL;
1966 spin_unlock_irq(&cwq->gcwq->lock);
1967 }
1968
1969 for (i = 0; i < WORK_NR_COLORS; i++)
1970 BUG_ON(cwq->nr_in_flight[i]);
1971 BUG_ON(cwq->nr_active);
1972 BUG_ON(!list_empty(&cwq->delayed_works));
1973 }
1974
1975 free_cwqs(wq->cpu_wq);
1976 kfree(wq);
1977 }
1978 EXPORT_SYMBOL_GPL(destroy_workqueue);
1979
1980 /*
1981 * CPU hotplug.
1982 *
1983 * CPU hotplug is implemented by allowing cwqs to be detached from
1984 * CPU, running with unbound workers and allowing them to be
1985 * reattached later if the cpu comes back online. A separate thread
1986 * is created to govern cwqs in such state and is called the trustee.
1987 *
1988 * Trustee states and their descriptions.
1989 *
1990 * START Command state used on startup. On CPU_DOWN_PREPARE, a
1991 * new trustee is started with this state.
1992 *
1993 * IN_CHARGE Once started, trustee will enter this state after
1994 * making all existing workers rogue. DOWN_PREPARE waits
1995 * for trustee to enter this state. After reaching
1996 * IN_CHARGE, trustee tries to execute the pending
1997 * worklist until it's empty and the state is set to
1998 * BUTCHER, or the state is set to RELEASE.
1999 *
2000 * BUTCHER Command state which is set by the cpu callback after
2001 * the cpu has went down. Once this state is set trustee
2002 * knows that there will be no new works on the worklist
2003 * and once the worklist is empty it can proceed to
2004 * killing idle workers.
2005 *
2006 * RELEASE Command state which is set by the cpu callback if the
2007 * cpu down has been canceled or it has come online
2008 * again. After recognizing this state, trustee stops
2009 * trying to drain or butcher and transits to DONE.
2010 *
2011 * DONE Trustee will enter this state after BUTCHER or RELEASE
2012 * is complete.
2013 *
2014 * trustee CPU draining
2015 * took over down complete
2016 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2017 * | | ^
2018 * | CPU is back online v return workers |
2019 * ----------------> RELEASE --------------
2020 */
2021
2022 /**
2023 * trustee_wait_event_timeout - timed event wait for trustee
2024 * @cond: condition to wait for
2025 * @timeout: timeout in jiffies
2026 *
2027 * wait_event_timeout() for trustee to use. Handles locking and
2028 * checks for RELEASE request.
2029 *
2030 * CONTEXT:
2031 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2032 * multiple times. To be used by trustee.
2033 *
2034 * RETURNS:
2035 * Positive indicating left time if @cond is satisfied, 0 if timed
2036 * out, -1 if canceled.
2037 */
2038 #define trustee_wait_event_timeout(cond, timeout) ({ \
2039 long __ret = (timeout); \
2040 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2041 __ret) { \
2042 spin_unlock_irq(&gcwq->lock); \
2043 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
2044 (gcwq->trustee_state == TRUSTEE_RELEASE), \
2045 __ret); \
2046 spin_lock_irq(&gcwq->lock); \
2047 } \
2048 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
2049 })
2050
2051 /**
2052 * trustee_wait_event - event wait for trustee
2053 * @cond: condition to wait for
2054 *
2055 * wait_event() for trustee to use. Automatically handles locking and
2056 * checks for CANCEL request.
2057 *
2058 * CONTEXT:
2059 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2060 * multiple times. To be used by trustee.
2061 *
2062 * RETURNS:
2063 * 0 if @cond is satisfied, -1 if canceled.
2064 */
2065 #define trustee_wait_event(cond) ({ \
2066 long __ret1; \
2067 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2068 __ret1 < 0 ? -1 : 0; \
2069 })
2070
2071 static int __cpuinit trustee_thread(void *__gcwq)
2072 {
2073 struct global_cwq *gcwq = __gcwq;
2074 struct worker *worker;
2075 struct hlist_node *pos;
2076 int i;
2077
2078 BUG_ON(gcwq->cpu != smp_processor_id());
2079
2080 spin_lock_irq(&gcwq->lock);
2081 /*
2082 * Make all workers rogue. Trustee must be bound to the
2083 * target cpu and can't be cancelled.
2084 */
2085 BUG_ON(gcwq->cpu != smp_processor_id());
2086
2087 list_for_each_entry(worker, &gcwq->idle_list, entry)
2088 worker->flags |= WORKER_ROGUE;
2089
2090 for_each_busy_worker(worker, i, pos, gcwq)
2091 worker->flags |= WORKER_ROGUE;
2092
2093 /*
2094 * We're now in charge. Notify and proceed to drain. We need
2095 * to keep the gcwq running during the whole CPU down
2096 * procedure as other cpu hotunplug callbacks may need to
2097 * flush currently running tasks.
2098 */
2099 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2100 wake_up_all(&gcwq->trustee_wait);
2101
2102 /*
2103 * The original cpu is in the process of dying and may go away
2104 * anytime now. When that happens, we and all workers would
2105 * be migrated to other cpus. Try draining any left work.
2106 * Note that if the gcwq is frozen, there may be frozen works
2107 * in freezeable cwqs. Don't declare completion while frozen.
2108 */
2109 while (gcwq->nr_workers != gcwq->nr_idle ||
2110 gcwq->flags & GCWQ_FREEZING ||
2111 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2112 /* give a breather */
2113 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2114 break;
2115 }
2116
2117 /* notify completion */
2118 gcwq->trustee = NULL;
2119 gcwq->trustee_state = TRUSTEE_DONE;
2120 wake_up_all(&gcwq->trustee_wait);
2121 spin_unlock_irq(&gcwq->lock);
2122 return 0;
2123 }
2124
2125 /**
2126 * wait_trustee_state - wait for trustee to enter the specified state
2127 * @gcwq: gcwq the trustee of interest belongs to
2128 * @state: target state to wait for
2129 *
2130 * Wait for the trustee to reach @state. DONE is already matched.
2131 *
2132 * CONTEXT:
2133 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2134 * multiple times. To be used by cpu_callback.
2135 */
2136 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2137 {
2138 if (!(gcwq->trustee_state == state ||
2139 gcwq->trustee_state == TRUSTEE_DONE)) {
2140 spin_unlock_irq(&gcwq->lock);
2141 __wait_event(gcwq->trustee_wait,
2142 gcwq->trustee_state == state ||
2143 gcwq->trustee_state == TRUSTEE_DONE);
2144 spin_lock_irq(&gcwq->lock);
2145 }
2146 }
2147
2148 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
2149 unsigned long action,
2150 void *hcpu)
2151 {
2152 unsigned int cpu = (unsigned long)hcpu;
2153 struct global_cwq *gcwq = get_gcwq(cpu);
2154 struct task_struct *new_trustee = NULL;
2155 struct worker *worker;
2156 struct hlist_node *pos;
2157 unsigned long flags;
2158 int i;
2159
2160 action &= ~CPU_TASKS_FROZEN;
2161
2162 switch (action) {
2163 case CPU_DOWN_PREPARE:
2164 new_trustee = kthread_create(trustee_thread, gcwq,
2165 "workqueue_trustee/%d\n", cpu);
2166 if (IS_ERR(new_trustee))
2167 return notifier_from_errno(PTR_ERR(new_trustee));
2168 kthread_bind(new_trustee, cpu);
2169 }
2170
2171 /* some are called w/ irq disabled, don't disturb irq status */
2172 spin_lock_irqsave(&gcwq->lock, flags);
2173
2174 switch (action) {
2175 case CPU_DOWN_PREPARE:
2176 /* initialize trustee and tell it to acquire the gcwq */
2177 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2178 gcwq->trustee = new_trustee;
2179 gcwq->trustee_state = TRUSTEE_START;
2180 wake_up_process(gcwq->trustee);
2181 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2182 break;
2183
2184 case CPU_POST_DEAD:
2185 gcwq->trustee_state = TRUSTEE_BUTCHER;
2186 break;
2187
2188 case CPU_DOWN_FAILED:
2189 case CPU_ONLINE:
2190 if (gcwq->trustee_state != TRUSTEE_DONE) {
2191 gcwq->trustee_state = TRUSTEE_RELEASE;
2192 wake_up_process(gcwq->trustee);
2193 wait_trustee_state(gcwq, TRUSTEE_DONE);
2194 }
2195
2196 /* clear ROGUE from all workers */
2197 list_for_each_entry(worker, &gcwq->idle_list, entry)
2198 worker->flags &= ~WORKER_ROGUE;
2199
2200 for_each_busy_worker(worker, i, pos, gcwq)
2201 worker->flags &= ~WORKER_ROGUE;
2202 break;
2203 }
2204
2205 spin_unlock_irqrestore(&gcwq->lock, flags);
2206
2207 return notifier_from_errno(0);
2208 }
2209
2210 #ifdef CONFIG_SMP
2211
2212 struct work_for_cpu {
2213 struct completion completion;
2214 long (*fn)(void *);
2215 void *arg;
2216 long ret;
2217 };
2218
2219 static int do_work_for_cpu(void *_wfc)
2220 {
2221 struct work_for_cpu *wfc = _wfc;
2222 wfc->ret = wfc->fn(wfc->arg);
2223 complete(&wfc->completion);
2224 return 0;
2225 }
2226
2227 /**
2228 * work_on_cpu - run a function in user context on a particular cpu
2229 * @cpu: the cpu to run on
2230 * @fn: the function to run
2231 * @arg: the function arg
2232 *
2233 * This will return the value @fn returns.
2234 * It is up to the caller to ensure that the cpu doesn't go offline.
2235 * The caller must not hold any locks which would prevent @fn from completing.
2236 */
2237 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
2238 {
2239 struct task_struct *sub_thread;
2240 struct work_for_cpu wfc = {
2241 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
2242 .fn = fn,
2243 .arg = arg,
2244 };
2245
2246 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
2247 if (IS_ERR(sub_thread))
2248 return PTR_ERR(sub_thread);
2249 kthread_bind(sub_thread, cpu);
2250 wake_up_process(sub_thread);
2251 wait_for_completion(&wfc.completion);
2252 return wfc.ret;
2253 }
2254 EXPORT_SYMBOL_GPL(work_on_cpu);
2255 #endif /* CONFIG_SMP */
2256
2257 #ifdef CONFIG_FREEZER
2258
2259 /**
2260 * freeze_workqueues_begin - begin freezing workqueues
2261 *
2262 * Start freezing workqueues. After this function returns, all
2263 * freezeable workqueues will queue new works to their frozen_works
2264 * list instead of the cwq ones.
2265 *
2266 * CONTEXT:
2267 * Grabs and releases workqueue_lock and gcwq->lock's.
2268 */
2269 void freeze_workqueues_begin(void)
2270 {
2271 struct workqueue_struct *wq;
2272 unsigned int cpu;
2273
2274 spin_lock(&workqueue_lock);
2275
2276 BUG_ON(workqueue_freezing);
2277 workqueue_freezing = true;
2278
2279 for_each_possible_cpu(cpu) {
2280 struct global_cwq *gcwq = get_gcwq(cpu);
2281
2282 spin_lock_irq(&gcwq->lock);
2283
2284 BUG_ON(gcwq->flags & GCWQ_FREEZING);
2285 gcwq->flags |= GCWQ_FREEZING;
2286
2287 list_for_each_entry(wq, &workqueues, list) {
2288 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2289
2290 if (wq->flags & WQ_FREEZEABLE)
2291 cwq->max_active = 0;
2292 }
2293
2294 spin_unlock_irq(&gcwq->lock);
2295 }
2296
2297 spin_unlock(&workqueue_lock);
2298 }
2299
2300 /**
2301 * freeze_workqueues_busy - are freezeable workqueues still busy?
2302 *
2303 * Check whether freezing is complete. This function must be called
2304 * between freeze_workqueues_begin() and thaw_workqueues().
2305 *
2306 * CONTEXT:
2307 * Grabs and releases workqueue_lock.
2308 *
2309 * RETURNS:
2310 * %true if some freezeable workqueues are still busy. %false if
2311 * freezing is complete.
2312 */
2313 bool freeze_workqueues_busy(void)
2314 {
2315 struct workqueue_struct *wq;
2316 unsigned int cpu;
2317 bool busy = false;
2318
2319 spin_lock(&workqueue_lock);
2320
2321 BUG_ON(!workqueue_freezing);
2322
2323 for_each_possible_cpu(cpu) {
2324 /*
2325 * nr_active is monotonically decreasing. It's safe
2326 * to peek without lock.
2327 */
2328 list_for_each_entry(wq, &workqueues, list) {
2329 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2330
2331 if (!(wq->flags & WQ_FREEZEABLE))
2332 continue;
2333
2334 BUG_ON(cwq->nr_active < 0);
2335 if (cwq->nr_active) {
2336 busy = true;
2337 goto out_unlock;
2338 }
2339 }
2340 }
2341 out_unlock:
2342 spin_unlock(&workqueue_lock);
2343 return busy;
2344 }
2345
2346 /**
2347 * thaw_workqueues - thaw workqueues
2348 *
2349 * Thaw workqueues. Normal queueing is restored and all collected
2350 * frozen works are transferred to their respective cwq worklists.
2351 *
2352 * CONTEXT:
2353 * Grabs and releases workqueue_lock and gcwq->lock's.
2354 */
2355 void thaw_workqueues(void)
2356 {
2357 struct workqueue_struct *wq;
2358 unsigned int cpu;
2359
2360 spin_lock(&workqueue_lock);
2361
2362 if (!workqueue_freezing)
2363 goto out_unlock;
2364
2365 for_each_possible_cpu(cpu) {
2366 struct global_cwq *gcwq = get_gcwq(cpu);
2367
2368 spin_lock_irq(&gcwq->lock);
2369
2370 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2371 gcwq->flags &= ~GCWQ_FREEZING;
2372
2373 list_for_each_entry(wq, &workqueues, list) {
2374 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2375
2376 if (!(wq->flags & WQ_FREEZEABLE))
2377 continue;
2378
2379 /* restore max_active and repopulate worklist */
2380 cwq->max_active = wq->saved_max_active;
2381
2382 while (!list_empty(&cwq->delayed_works) &&
2383 cwq->nr_active < cwq->max_active)
2384 cwq_activate_first_delayed(cwq);
2385
2386 /* perform delayed unbind from single cpu if empty */
2387 if (wq->single_cpu == gcwq->cpu &&
2388 !cwq->nr_active && list_empty(&cwq->delayed_works))
2389 cwq_unbind_single_cpu(cwq);
2390
2391 wake_up_process(cwq->worker->task);
2392 }
2393
2394 spin_unlock_irq(&gcwq->lock);
2395 }
2396
2397 workqueue_freezing = false;
2398 out_unlock:
2399 spin_unlock(&workqueue_lock);
2400 }
2401 #endif /* CONFIG_FREEZER */
2402
2403 void __init init_workqueues(void)
2404 {
2405 unsigned int cpu;
2406 int i;
2407
2408 hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
2409
2410 /* initialize gcwqs */
2411 for_each_possible_cpu(cpu) {
2412 struct global_cwq *gcwq = get_gcwq(cpu);
2413
2414 spin_lock_init(&gcwq->lock);
2415 gcwq->cpu = cpu;
2416
2417 INIT_LIST_HEAD(&gcwq->idle_list);
2418 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2419 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2420
2421 ida_init(&gcwq->worker_ida);
2422
2423 gcwq->trustee_state = TRUSTEE_DONE;
2424 init_waitqueue_head(&gcwq->trustee_wait);
2425 }
2426
2427 keventd_wq = create_workqueue("events");
2428 BUG_ON(!keventd_wq);
2429 }
This page took 0.124589 seconds and 6 git commands to generate.