2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
37 #include <linux/delay.h>
40 /* global_cwq flags */
41 GCWQ_FREEZING
= 1 << 3, /* freeze in progress */
44 WORKER_STARTED
= 1 << 0, /* started */
45 WORKER_DIE
= 1 << 1, /* die die die */
46 WORKER_IDLE
= 1 << 2, /* is idle */
47 WORKER_ROGUE
= 1 << 4, /* not bound to any cpu */
49 /* gcwq->trustee_state */
50 TRUSTEE_START
= 0, /* start */
51 TRUSTEE_IN_CHARGE
= 1, /* trustee in charge of gcwq */
52 TRUSTEE_BUTCHER
= 2, /* butcher workers */
53 TRUSTEE_RELEASE
= 3, /* release workers */
54 TRUSTEE_DONE
= 4, /* trustee is done */
56 BUSY_WORKER_HASH_ORDER
= 6, /* 64 pointers */
57 BUSY_WORKER_HASH_SIZE
= 1 << BUSY_WORKER_HASH_ORDER
,
58 BUSY_WORKER_HASH_MASK
= BUSY_WORKER_HASH_SIZE
- 1,
60 TRUSTEE_COOLDOWN
= HZ
/ 10, /* for trustee draining */
64 * Structure fields follow one of the following exclusion rules.
66 * I: Set during initialization and read-only afterwards.
68 * L: gcwq->lock protected. Access with gcwq->lock held.
70 * F: wq->flush_mutex protected.
72 * W: workqueue_lock protected.
78 /* on idle list while idle, on busy hash table while busy */
80 struct list_head entry
; /* L: while idle */
81 struct hlist_node hentry
; /* L: while busy */
84 struct work_struct
*current_work
; /* L: work being processed */
85 struct cpu_workqueue_struct
*current_cwq
; /* L: current_work's cwq */
86 struct list_head scheduled
; /* L: scheduled works */
87 struct task_struct
*task
; /* I: worker task */
88 struct global_cwq
*gcwq
; /* I: the associated gcwq */
89 unsigned int flags
; /* L: flags */
90 int id
; /* I: worker id */
94 * Global per-cpu workqueue.
97 spinlock_t lock
; /* the gcwq lock */
98 struct list_head worklist
; /* L: list of pending works */
99 unsigned int cpu
; /* I: the associated cpu */
100 unsigned int flags
; /* L: GCWQ_* flags */
102 int nr_workers
; /* L: total number of workers */
103 int nr_idle
; /* L: currently idle ones */
105 /* workers are chained either in the idle_list or busy_hash */
106 struct list_head idle_list
; /* L: list of idle workers */
107 struct hlist_head busy_hash
[BUSY_WORKER_HASH_SIZE
];
108 /* L: hash of busy workers */
110 struct ida worker_ida
; /* L: for worker IDs */
112 struct task_struct
*trustee
; /* L: for gcwq shutdown */
113 unsigned int trustee_state
; /* L: trustee state */
114 wait_queue_head_t trustee_wait
; /* trustee wait */
115 } ____cacheline_aligned_in_smp
;
118 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
119 * work_struct->data are used for flags and thus cwqs need to be
120 * aligned at two's power of the number of flag bits.
122 struct cpu_workqueue_struct
{
123 struct global_cwq
*gcwq
; /* I: the associated gcwq */
124 struct worker
*worker
;
125 struct workqueue_struct
*wq
; /* I: the owning workqueue */
126 int work_color
; /* L: current color */
127 int flush_color
; /* L: flushing color */
128 int nr_in_flight
[WORK_NR_COLORS
];
129 /* L: nr of in_flight works */
130 int nr_active
; /* L: nr of active works */
131 int max_active
; /* L: max active works */
132 struct list_head delayed_works
; /* L: delayed works */
136 * Structure used to wait for workqueue flush.
139 struct list_head list
; /* F: list of flushers */
140 int flush_color
; /* F: flush color waiting for */
141 struct completion done
; /* flush completion */
145 * The externally visible workqueue abstraction is an array of
146 * per-CPU workqueues:
148 struct workqueue_struct
{
149 unsigned int flags
; /* I: WQ_* flags */
150 struct cpu_workqueue_struct
*cpu_wq
; /* I: cwq's */
151 struct list_head list
; /* W: list of all workqueues */
153 struct mutex flush_mutex
; /* protects wq flushing */
154 int work_color
; /* F: current work color */
155 int flush_color
; /* F: current flush color */
156 atomic_t nr_cwqs_to_flush
; /* flush in progress */
157 struct wq_flusher
*first_flusher
; /* F: first flusher */
158 struct list_head flusher_queue
; /* F: flush waiters */
159 struct list_head flusher_overflow
; /* F: flush overflow list */
161 unsigned long single_cpu
; /* cpu for single cpu wq */
163 int saved_max_active
; /* I: saved cwq max_active */
164 const char *name
; /* I: workqueue name */
165 #ifdef CONFIG_LOCKDEP
166 struct lockdep_map lockdep_map
;
170 #define for_each_busy_worker(worker, i, pos, gcwq) \
171 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
172 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
174 #ifdef CONFIG_DEBUG_OBJECTS_WORK
176 static struct debug_obj_descr work_debug_descr
;
179 * fixup_init is called when:
180 * - an active object is initialized
182 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
184 struct work_struct
*work
= addr
;
187 case ODEBUG_STATE_ACTIVE
:
188 cancel_work_sync(work
);
189 debug_object_init(work
, &work_debug_descr
);
197 * fixup_activate is called when:
198 * - an active object is activated
199 * - an unknown object is activated (might be a statically initialized object)
201 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
203 struct work_struct
*work
= addr
;
207 case ODEBUG_STATE_NOTAVAILABLE
:
209 * This is not really a fixup. The work struct was
210 * statically initialized. We just make sure that it
211 * is tracked in the object tracker.
213 if (test_bit(WORK_STRUCT_STATIC_BIT
, work_data_bits(work
))) {
214 debug_object_init(work
, &work_debug_descr
);
215 debug_object_activate(work
, &work_debug_descr
);
221 case ODEBUG_STATE_ACTIVE
:
230 * fixup_free is called when:
231 * - an active object is freed
233 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
235 struct work_struct
*work
= addr
;
238 case ODEBUG_STATE_ACTIVE
:
239 cancel_work_sync(work
);
240 debug_object_free(work
, &work_debug_descr
);
247 static struct debug_obj_descr work_debug_descr
= {
248 .name
= "work_struct",
249 .fixup_init
= work_fixup_init
,
250 .fixup_activate
= work_fixup_activate
,
251 .fixup_free
= work_fixup_free
,
254 static inline void debug_work_activate(struct work_struct
*work
)
256 debug_object_activate(work
, &work_debug_descr
);
259 static inline void debug_work_deactivate(struct work_struct
*work
)
261 debug_object_deactivate(work
, &work_debug_descr
);
264 void __init_work(struct work_struct
*work
, int onstack
)
267 debug_object_init_on_stack(work
, &work_debug_descr
);
269 debug_object_init(work
, &work_debug_descr
);
271 EXPORT_SYMBOL_GPL(__init_work
);
273 void destroy_work_on_stack(struct work_struct
*work
)
275 debug_object_free(work
, &work_debug_descr
);
277 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
280 static inline void debug_work_activate(struct work_struct
*work
) { }
281 static inline void debug_work_deactivate(struct work_struct
*work
) { }
284 /* Serializes the accesses to the list of workqueues. */
285 static DEFINE_SPINLOCK(workqueue_lock
);
286 static LIST_HEAD(workqueues
);
287 static bool workqueue_freezing
; /* W: have wqs started freezing? */
289 static DEFINE_PER_CPU(struct global_cwq
, global_cwq
);
291 static int worker_thread(void *__worker
);
293 static struct global_cwq
*get_gcwq(unsigned int cpu
)
295 return &per_cpu(global_cwq
, cpu
);
298 static struct cpu_workqueue_struct
*get_cwq(unsigned int cpu
,
299 struct workqueue_struct
*wq
)
301 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
304 static unsigned int work_color_to_flags(int color
)
306 return color
<< WORK_STRUCT_COLOR_SHIFT
;
309 static int get_work_color(struct work_struct
*work
)
311 return (*work_data_bits(work
) >> WORK_STRUCT_COLOR_SHIFT
) &
312 ((1 << WORK_STRUCT_COLOR_BITS
) - 1);
315 static int work_next_color(int color
)
317 return (color
+ 1) % WORK_NR_COLORS
;
321 * Work data points to the cwq while a work is on queue. Once
322 * execution starts, it points to the cpu the work was last on. This
323 * can be distinguished by comparing the data value against
326 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
327 * cwq, cpu or clear work->data. These functions should only be
328 * called while the work is owned - ie. while the PENDING bit is set.
330 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
331 * corresponding to a work. gcwq is available once the work has been
332 * queued anywhere after initialization. cwq is available only from
333 * queueing until execution starts.
335 static inline void set_work_data(struct work_struct
*work
, unsigned long data
,
338 BUG_ON(!work_pending(work
));
339 atomic_long_set(&work
->data
, data
| flags
| work_static(work
));
342 static void set_work_cwq(struct work_struct
*work
,
343 struct cpu_workqueue_struct
*cwq
,
344 unsigned long extra_flags
)
346 set_work_data(work
, (unsigned long)cwq
,
347 WORK_STRUCT_PENDING
| extra_flags
);
350 static void set_work_cpu(struct work_struct
*work
, unsigned int cpu
)
352 set_work_data(work
, cpu
<< WORK_STRUCT_FLAG_BITS
, WORK_STRUCT_PENDING
);
355 static void clear_work_data(struct work_struct
*work
)
357 set_work_data(work
, WORK_STRUCT_NO_CPU
, 0);
360 static inline unsigned long get_work_data(struct work_struct
*work
)
362 return atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
;
365 static struct cpu_workqueue_struct
*get_work_cwq(struct work_struct
*work
)
367 unsigned long data
= get_work_data(work
);
369 return data
>= PAGE_OFFSET
? (void *)data
: NULL
;
372 static struct global_cwq
*get_work_gcwq(struct work_struct
*work
)
374 unsigned long data
= get_work_data(work
);
377 if (data
>= PAGE_OFFSET
)
378 return ((struct cpu_workqueue_struct
*)data
)->gcwq
;
380 cpu
= data
>> WORK_STRUCT_FLAG_BITS
;
384 BUG_ON(cpu
>= num_possible_cpus());
385 return get_gcwq(cpu
);
388 /* Return the first worker. Safe with preemption disabled */
389 static struct worker
*first_worker(struct global_cwq
*gcwq
)
391 if (unlikely(list_empty(&gcwq
->idle_list
)))
394 return list_first_entry(&gcwq
->idle_list
, struct worker
, entry
);
398 * wake_up_worker - wake up an idle worker
399 * @gcwq: gcwq to wake worker for
401 * Wake up the first idle worker of @gcwq.
404 * spin_lock_irq(gcwq->lock).
406 static void wake_up_worker(struct global_cwq
*gcwq
)
408 struct worker
*worker
= first_worker(gcwq
);
411 wake_up_process(worker
->task
);
415 * busy_worker_head - return the busy hash head for a work
416 * @gcwq: gcwq of interest
417 * @work: work to be hashed
419 * Return hash head of @gcwq for @work.
422 * spin_lock_irq(gcwq->lock).
425 * Pointer to the hash head.
427 static struct hlist_head
*busy_worker_head(struct global_cwq
*gcwq
,
428 struct work_struct
*work
)
430 const int base_shift
= ilog2(sizeof(struct work_struct
));
431 unsigned long v
= (unsigned long)work
;
433 /* simple shift and fold hash, do we need something better? */
435 v
+= v
>> BUSY_WORKER_HASH_ORDER
;
436 v
&= BUSY_WORKER_HASH_MASK
;
438 return &gcwq
->busy_hash
[v
];
442 * __find_worker_executing_work - find worker which is executing a work
443 * @gcwq: gcwq of interest
444 * @bwh: hash head as returned by busy_worker_head()
445 * @work: work to find worker for
447 * Find a worker which is executing @work on @gcwq. @bwh should be
448 * the hash head obtained by calling busy_worker_head() with the same
452 * spin_lock_irq(gcwq->lock).
455 * Pointer to worker which is executing @work if found, NULL
458 static struct worker
*__find_worker_executing_work(struct global_cwq
*gcwq
,
459 struct hlist_head
*bwh
,
460 struct work_struct
*work
)
462 struct worker
*worker
;
463 struct hlist_node
*tmp
;
465 hlist_for_each_entry(worker
, tmp
, bwh
, hentry
)
466 if (worker
->current_work
== work
)
472 * find_worker_executing_work - find worker which is executing a work
473 * @gcwq: gcwq of interest
474 * @work: work to find worker for
476 * Find a worker which is executing @work on @gcwq. This function is
477 * identical to __find_worker_executing_work() except that this
478 * function calculates @bwh itself.
481 * spin_lock_irq(gcwq->lock).
484 * Pointer to worker which is executing @work if found, NULL
487 static struct worker
*find_worker_executing_work(struct global_cwq
*gcwq
,
488 struct work_struct
*work
)
490 return __find_worker_executing_work(gcwq
, busy_worker_head(gcwq
, work
),
495 * insert_work - insert a work into gcwq
496 * @cwq: cwq @work belongs to
497 * @work: work to insert
498 * @head: insertion point
499 * @extra_flags: extra WORK_STRUCT_* flags to set
501 * Insert @work which belongs to @cwq into @gcwq after @head.
502 * @extra_flags is or'd to work_struct flags.
505 * spin_lock_irq(gcwq->lock).
507 static void insert_work(struct cpu_workqueue_struct
*cwq
,
508 struct work_struct
*work
, struct list_head
*head
,
509 unsigned int extra_flags
)
511 /* we own @work, set data and link */
512 set_work_cwq(work
, cwq
, extra_flags
);
515 * Ensure that we get the right work->data if we see the
516 * result of list_add() below, see try_to_grab_pending().
520 list_add_tail(&work
->entry
, head
);
521 wake_up_worker(cwq
->gcwq
);
525 * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
526 * @cwq: cwq to unbind
528 * Try to unbind @cwq from single cpu workqueue processing. If
529 * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
532 * spin_lock_irq(gcwq->lock).
534 static void cwq_unbind_single_cpu(struct cpu_workqueue_struct
*cwq
)
536 struct workqueue_struct
*wq
= cwq
->wq
;
537 struct global_cwq
*gcwq
= cwq
->gcwq
;
539 BUG_ON(wq
->single_cpu
!= gcwq
->cpu
);
541 * Unbind from workqueue if @cwq is not frozen. If frozen,
542 * thaw_workqueues() will either restart processing on this
543 * cpu or unbind if empty. This keeps works queued while
544 * frozen fully ordered and flushable.
546 if (likely(!(gcwq
->flags
& GCWQ_FREEZING
))) {
547 smp_wmb(); /* paired with cmpxchg() in __queue_work() */
548 wq
->single_cpu
= NR_CPUS
;
552 static void __queue_work(unsigned int cpu
, struct workqueue_struct
*wq
,
553 struct work_struct
*work
)
555 struct global_cwq
*gcwq
;
556 struct cpu_workqueue_struct
*cwq
;
557 struct list_head
*worklist
;
561 debug_work_activate(work
);
564 * Determine gcwq to use. SINGLE_CPU is inherently
565 * NON_REENTRANT, so test it first.
567 if (!(wq
->flags
& WQ_SINGLE_CPU
)) {
568 struct global_cwq
*last_gcwq
;
571 * It's multi cpu. If @wq is non-reentrant and @work
572 * was previously on a different cpu, it might still
573 * be running there, in which case the work needs to
574 * be queued on that cpu to guarantee non-reentrance.
576 gcwq
= get_gcwq(cpu
);
577 if (wq
->flags
& WQ_NON_REENTRANT
&&
578 (last_gcwq
= get_work_gcwq(work
)) && last_gcwq
!= gcwq
) {
579 struct worker
*worker
;
581 spin_lock_irqsave(&last_gcwq
->lock
, flags
);
583 worker
= find_worker_executing_work(last_gcwq
, work
);
585 if (worker
&& worker
->current_cwq
->wq
== wq
)
588 /* meh... not running there, queue here */
589 spin_unlock_irqrestore(&last_gcwq
->lock
, flags
);
590 spin_lock_irqsave(&gcwq
->lock
, flags
);
593 spin_lock_irqsave(&gcwq
->lock
, flags
);
595 unsigned int req_cpu
= cpu
;
598 * It's a bit more complex for single cpu workqueues.
599 * We first need to determine which cpu is going to be
600 * used. If no cpu is currently serving this
601 * workqueue, arbitrate using atomic accesses to
602 * wq->single_cpu; otherwise, use the current one.
605 cpu
= wq
->single_cpu
;
606 arbitrate
= cpu
== NR_CPUS
;
610 gcwq
= get_gcwq(cpu
);
611 spin_lock_irqsave(&gcwq
->lock
, flags
);
614 * The following cmpxchg() is a full barrier paired
615 * with smp_wmb() in cwq_unbind_single_cpu() and
616 * guarantees that all changes to wq->st_* fields are
617 * visible on the new cpu after this point.
620 cmpxchg(&wq
->single_cpu
, NR_CPUS
, cpu
);
622 if (unlikely(wq
->single_cpu
!= cpu
)) {
623 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
628 /* gcwq determined, get cwq and queue */
629 cwq
= get_cwq(gcwq
->cpu
, wq
);
631 BUG_ON(!list_empty(&work
->entry
));
633 cwq
->nr_in_flight
[cwq
->work_color
]++;
635 if (likely(cwq
->nr_active
< cwq
->max_active
)) {
637 worklist
= &gcwq
->worklist
;
639 worklist
= &cwq
->delayed_works
;
641 insert_work(cwq
, work
, worklist
, work_color_to_flags(cwq
->work_color
));
643 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
647 * queue_work - queue work on a workqueue
648 * @wq: workqueue to use
649 * @work: work to queue
651 * Returns 0 if @work was already on a queue, non-zero otherwise.
653 * We queue the work to the CPU on which it was submitted, but if the CPU dies
654 * it can be processed by another CPU.
656 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
660 ret
= queue_work_on(get_cpu(), wq
, work
);
665 EXPORT_SYMBOL_GPL(queue_work
);
668 * queue_work_on - queue work on specific cpu
669 * @cpu: CPU number to execute work on
670 * @wq: workqueue to use
671 * @work: work to queue
673 * Returns 0 if @work was already on a queue, non-zero otherwise.
675 * We queue the work to a specific CPU, the caller must ensure it
679 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
683 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
684 __queue_work(cpu
, wq
, work
);
689 EXPORT_SYMBOL_GPL(queue_work_on
);
691 static void delayed_work_timer_fn(unsigned long __data
)
693 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
694 struct cpu_workqueue_struct
*cwq
= get_work_cwq(&dwork
->work
);
696 __queue_work(smp_processor_id(), cwq
->wq
, &dwork
->work
);
700 * queue_delayed_work - queue work on a workqueue after delay
701 * @wq: workqueue to use
702 * @dwork: delayable work to queue
703 * @delay: number of jiffies to wait before queueing
705 * Returns 0 if @work was already on a queue, non-zero otherwise.
707 int queue_delayed_work(struct workqueue_struct
*wq
,
708 struct delayed_work
*dwork
, unsigned long delay
)
711 return queue_work(wq
, &dwork
->work
);
713 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
715 EXPORT_SYMBOL_GPL(queue_delayed_work
);
718 * queue_delayed_work_on - queue work on specific CPU after delay
719 * @cpu: CPU number to execute work on
720 * @wq: workqueue to use
721 * @dwork: work to queue
722 * @delay: number of jiffies to wait before queueing
724 * Returns 0 if @work was already on a queue, non-zero otherwise.
726 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
727 struct delayed_work
*dwork
, unsigned long delay
)
730 struct timer_list
*timer
= &dwork
->timer
;
731 struct work_struct
*work
= &dwork
->work
;
733 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
734 struct global_cwq
*gcwq
= get_work_gcwq(work
);
735 unsigned int lcpu
= gcwq
? gcwq
->cpu
: raw_smp_processor_id();
737 BUG_ON(timer_pending(timer
));
738 BUG_ON(!list_empty(&work
->entry
));
740 timer_stats_timer_set_start_info(&dwork
->timer
);
742 * This stores cwq for the moment, for the timer_fn.
743 * Note that the work's gcwq is preserved to allow
744 * reentrance detection for delayed works.
746 set_work_cwq(work
, get_cwq(lcpu
, wq
), 0);
747 timer
->expires
= jiffies
+ delay
;
748 timer
->data
= (unsigned long)dwork
;
749 timer
->function
= delayed_work_timer_fn
;
751 if (unlikely(cpu
>= 0))
752 add_timer_on(timer
, cpu
);
759 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
762 * worker_enter_idle - enter idle state
763 * @worker: worker which is entering idle state
765 * @worker is entering idle state. Update stats and idle timer if
769 * spin_lock_irq(gcwq->lock).
771 static void worker_enter_idle(struct worker
*worker
)
773 struct global_cwq
*gcwq
= worker
->gcwq
;
775 BUG_ON(worker
->flags
& WORKER_IDLE
);
776 BUG_ON(!list_empty(&worker
->entry
) &&
777 (worker
->hentry
.next
|| worker
->hentry
.pprev
));
779 worker
->flags
|= WORKER_IDLE
;
782 /* idle_list is LIFO */
783 list_add(&worker
->entry
, &gcwq
->idle_list
);
785 if (unlikely(worker
->flags
& WORKER_ROGUE
))
786 wake_up_all(&gcwq
->trustee_wait
);
790 * worker_leave_idle - leave idle state
791 * @worker: worker which is leaving idle state
793 * @worker is leaving idle state. Update stats.
796 * spin_lock_irq(gcwq->lock).
798 static void worker_leave_idle(struct worker
*worker
)
800 struct global_cwq
*gcwq
= worker
->gcwq
;
802 BUG_ON(!(worker
->flags
& WORKER_IDLE
));
803 worker
->flags
&= ~WORKER_IDLE
;
805 list_del_init(&worker
->entry
);
808 static struct worker
*alloc_worker(void)
810 struct worker
*worker
;
812 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
814 INIT_LIST_HEAD(&worker
->entry
);
815 INIT_LIST_HEAD(&worker
->scheduled
);
821 * create_worker - create a new workqueue worker
822 * @gcwq: gcwq the new worker will belong to
823 * @bind: whether to set affinity to @cpu or not
825 * Create a new worker which is bound to @gcwq. The returned worker
826 * can be started by calling start_worker() or destroyed using
830 * Might sleep. Does GFP_KERNEL allocations.
833 * Pointer to the newly created worker.
835 static struct worker
*create_worker(struct global_cwq
*gcwq
, bool bind
)
838 struct worker
*worker
= NULL
;
840 spin_lock_irq(&gcwq
->lock
);
841 while (ida_get_new(&gcwq
->worker_ida
, &id
)) {
842 spin_unlock_irq(&gcwq
->lock
);
843 if (!ida_pre_get(&gcwq
->worker_ida
, GFP_KERNEL
))
845 spin_lock_irq(&gcwq
->lock
);
847 spin_unlock_irq(&gcwq
->lock
);
849 worker
= alloc_worker();
856 worker
->task
= kthread_create(worker_thread
, worker
, "kworker/%u:%d",
858 if (IS_ERR(worker
->task
))
862 * A rogue worker will become a regular one if CPU comes
863 * online later on. Make sure every worker has
864 * PF_THREAD_BOUND set.
867 kthread_bind(worker
->task
, gcwq
->cpu
);
869 worker
->task
->flags
|= PF_THREAD_BOUND
;
874 spin_lock_irq(&gcwq
->lock
);
875 ida_remove(&gcwq
->worker_ida
, id
);
876 spin_unlock_irq(&gcwq
->lock
);
883 * start_worker - start a newly created worker
884 * @worker: worker to start
886 * Make the gcwq aware of @worker and start it.
889 * spin_lock_irq(gcwq->lock).
891 static void start_worker(struct worker
*worker
)
893 worker
->flags
|= WORKER_STARTED
;
894 worker
->gcwq
->nr_workers
++;
895 worker_enter_idle(worker
);
896 wake_up_process(worker
->task
);
900 * destroy_worker - destroy a workqueue worker
901 * @worker: worker to be destroyed
903 * Destroy @worker and adjust @gcwq stats accordingly.
906 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
908 static void destroy_worker(struct worker
*worker
)
910 struct global_cwq
*gcwq
= worker
->gcwq
;
913 /* sanity check frenzy */
914 BUG_ON(worker
->current_work
);
915 BUG_ON(!list_empty(&worker
->scheduled
));
917 if (worker
->flags
& WORKER_STARTED
)
919 if (worker
->flags
& WORKER_IDLE
)
922 list_del_init(&worker
->entry
);
923 worker
->flags
|= WORKER_DIE
;
925 spin_unlock_irq(&gcwq
->lock
);
927 kthread_stop(worker
->task
);
930 spin_lock_irq(&gcwq
->lock
);
931 ida_remove(&gcwq
->worker_ida
, id
);
935 * move_linked_works - move linked works to a list
936 * @work: start of series of works to be scheduled
937 * @head: target list to append @work to
938 * @nextp: out paramter for nested worklist walking
940 * Schedule linked works starting from @work to @head. Work series to
941 * be scheduled starts at @work and includes any consecutive work with
942 * WORK_STRUCT_LINKED set in its predecessor.
944 * If @nextp is not NULL, it's updated to point to the next work of
945 * the last scheduled work. This allows move_linked_works() to be
946 * nested inside outer list_for_each_entry_safe().
949 * spin_lock_irq(gcwq->lock).
951 static void move_linked_works(struct work_struct
*work
, struct list_head
*head
,
952 struct work_struct
**nextp
)
954 struct work_struct
*n
;
957 * Linked worklist will always end before the end of the list,
958 * use NULL for list head.
960 list_for_each_entry_safe_from(work
, n
, NULL
, entry
) {
961 list_move_tail(&work
->entry
, head
);
962 if (!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))
967 * If we're already inside safe list traversal and have moved
968 * multiple works to the scheduled queue, the next position
969 * needs to be updated.
975 static void cwq_activate_first_delayed(struct cpu_workqueue_struct
*cwq
)
977 struct work_struct
*work
= list_first_entry(&cwq
->delayed_works
,
978 struct work_struct
, entry
);
980 move_linked_works(work
, &cwq
->gcwq
->worklist
, NULL
);
985 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
986 * @cwq: cwq of interest
987 * @color: color of work which left the queue
989 * A work either has completed or is removed from pending queue,
990 * decrement nr_in_flight of its cwq and handle workqueue flushing.
993 * spin_lock_irq(gcwq->lock).
995 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct
*cwq
, int color
)
997 /* ignore uncolored works */
998 if (color
== WORK_NO_COLOR
)
1001 cwq
->nr_in_flight
[color
]--;
1004 if (!list_empty(&cwq
->delayed_works
)) {
1005 /* one down, submit a delayed one */
1006 if (cwq
->nr_active
< cwq
->max_active
)
1007 cwq_activate_first_delayed(cwq
);
1008 } else if (!cwq
->nr_active
&& cwq
->wq
->flags
& WQ_SINGLE_CPU
) {
1009 /* this was the last work, unbind from single cpu */
1010 cwq_unbind_single_cpu(cwq
);
1013 /* is flush in progress and are we at the flushing tip? */
1014 if (likely(cwq
->flush_color
!= color
))
1017 /* are there still in-flight works? */
1018 if (cwq
->nr_in_flight
[color
])
1021 /* this cwq is done, clear flush_color */
1022 cwq
->flush_color
= -1;
1025 * If this was the last cwq, wake up the first flusher. It
1026 * will handle the rest.
1028 if (atomic_dec_and_test(&cwq
->wq
->nr_cwqs_to_flush
))
1029 complete(&cwq
->wq
->first_flusher
->done
);
1033 * process_one_work - process single work
1035 * @work: work to process
1037 * Process @work. This function contains all the logics necessary to
1038 * process a single work including synchronization against and
1039 * interaction with other workers on the same cpu, queueing and
1040 * flushing. As long as context requirement is met, any worker can
1041 * call this function to process a work.
1044 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1046 static void process_one_work(struct worker
*worker
, struct work_struct
*work
)
1048 struct cpu_workqueue_struct
*cwq
= get_work_cwq(work
);
1049 struct global_cwq
*gcwq
= cwq
->gcwq
;
1050 struct hlist_head
*bwh
= busy_worker_head(gcwq
, work
);
1051 work_func_t f
= work
->func
;
1053 struct worker
*collision
;
1054 #ifdef CONFIG_LOCKDEP
1056 * It is permissible to free the struct work_struct from
1057 * inside the function that is called from it, this we need to
1058 * take into account for lockdep too. To avoid bogus "held
1059 * lock freed" warnings as well as problems when looking into
1060 * work->lockdep_map, make a copy and use that here.
1062 struct lockdep_map lockdep_map
= work
->lockdep_map
;
1065 * A single work shouldn't be executed concurrently by
1066 * multiple workers on a single cpu. Check whether anyone is
1067 * already processing the work. If so, defer the work to the
1068 * currently executing one.
1070 collision
= __find_worker_executing_work(gcwq
, bwh
, work
);
1071 if (unlikely(collision
)) {
1072 move_linked_works(work
, &collision
->scheduled
, NULL
);
1076 /* claim and process */
1077 debug_work_deactivate(work
);
1078 hlist_add_head(&worker
->hentry
, bwh
);
1079 worker
->current_work
= work
;
1080 worker
->current_cwq
= cwq
;
1081 work_color
= get_work_color(work
);
1083 /* record the current cpu number in the work data and dequeue */
1084 set_work_cpu(work
, gcwq
->cpu
);
1085 list_del_init(&work
->entry
);
1087 spin_unlock_irq(&gcwq
->lock
);
1089 work_clear_pending(work
);
1090 lock_map_acquire(&cwq
->wq
->lockdep_map
);
1091 lock_map_acquire(&lockdep_map
);
1093 lock_map_release(&lockdep_map
);
1094 lock_map_release(&cwq
->wq
->lockdep_map
);
1096 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
1097 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
1099 current
->comm
, preempt_count(), task_pid_nr(current
));
1100 printk(KERN_ERR
" last function: ");
1101 print_symbol("%s\n", (unsigned long)f
);
1102 debug_show_held_locks(current
);
1106 spin_lock_irq(&gcwq
->lock
);
1108 /* we're done with it, release */
1109 hlist_del_init(&worker
->hentry
);
1110 worker
->current_work
= NULL
;
1111 worker
->current_cwq
= NULL
;
1112 cwq_dec_nr_in_flight(cwq
, work_color
);
1116 * process_scheduled_works - process scheduled works
1119 * Process all scheduled works. Please note that the scheduled list
1120 * may change while processing a work, so this function repeatedly
1121 * fetches a work from the top and executes it.
1124 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1127 static void process_scheduled_works(struct worker
*worker
)
1129 while (!list_empty(&worker
->scheduled
)) {
1130 struct work_struct
*work
= list_first_entry(&worker
->scheduled
,
1131 struct work_struct
, entry
);
1132 process_one_work(worker
, work
);
1137 * worker_thread - the worker thread function
1140 * The cwq worker thread function.
1142 static int worker_thread(void *__worker
)
1144 struct worker
*worker
= __worker
;
1145 struct global_cwq
*gcwq
= worker
->gcwq
;
1148 spin_lock_irq(&gcwq
->lock
);
1150 /* DIE can be set only while we're idle, checking here is enough */
1151 if (worker
->flags
& WORKER_DIE
) {
1152 spin_unlock_irq(&gcwq
->lock
);
1156 worker_leave_idle(worker
);
1159 * ->scheduled list can only be filled while a worker is
1160 * preparing to process a work or actually processing it.
1161 * Make sure nobody diddled with it while I was sleeping.
1163 BUG_ON(!list_empty(&worker
->scheduled
));
1165 while (!list_empty(&gcwq
->worklist
)) {
1166 struct work_struct
*work
=
1167 list_first_entry(&gcwq
->worklist
,
1168 struct work_struct
, entry
);
1171 * The following is a rather inefficient way to close
1172 * race window against cpu hotplug operations. Will
1175 if (unlikely(!(worker
->flags
& WORKER_ROGUE
) &&
1176 !cpumask_equal(&worker
->task
->cpus_allowed
,
1177 get_cpu_mask(gcwq
->cpu
)))) {
1178 spin_unlock_irq(&gcwq
->lock
);
1179 set_cpus_allowed_ptr(worker
->task
,
1180 get_cpu_mask(gcwq
->cpu
));
1182 spin_lock_irq(&gcwq
->lock
);
1186 if (likely(!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))) {
1187 /* optimization path, not strictly necessary */
1188 process_one_work(worker
, work
);
1189 if (unlikely(!list_empty(&worker
->scheduled
)))
1190 process_scheduled_works(worker
);
1192 move_linked_works(work
, &worker
->scheduled
, NULL
);
1193 process_scheduled_works(worker
);
1198 * gcwq->lock is held and there's no work to process, sleep.
1199 * Workers are woken up only while holding gcwq->lock, so
1200 * setting the current state before releasing gcwq->lock is
1201 * enough to prevent losing any event.
1203 worker_enter_idle(worker
);
1204 __set_current_state(TASK_INTERRUPTIBLE
);
1205 spin_unlock_irq(&gcwq
->lock
);
1211 struct work_struct work
;
1212 struct completion done
;
1215 static void wq_barrier_func(struct work_struct
*work
)
1217 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
1218 complete(&barr
->done
);
1222 * insert_wq_barrier - insert a barrier work
1223 * @cwq: cwq to insert barrier into
1224 * @barr: wq_barrier to insert
1225 * @target: target work to attach @barr to
1226 * @worker: worker currently executing @target, NULL if @target is not executing
1228 * @barr is linked to @target such that @barr is completed only after
1229 * @target finishes execution. Please note that the ordering
1230 * guarantee is observed only with respect to @target and on the local
1233 * Currently, a queued barrier can't be canceled. This is because
1234 * try_to_grab_pending() can't determine whether the work to be
1235 * grabbed is at the head of the queue and thus can't clear LINKED
1236 * flag of the previous work while there must be a valid next work
1237 * after a work with LINKED flag set.
1239 * Note that when @worker is non-NULL, @target may be modified
1240 * underneath us, so we can't reliably determine cwq from @target.
1243 * spin_lock_irq(gcwq->lock).
1245 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
1246 struct wq_barrier
*barr
,
1247 struct work_struct
*target
, struct worker
*worker
)
1249 struct list_head
*head
;
1250 unsigned int linked
= 0;
1253 * debugobject calls are safe here even with gcwq->lock locked
1254 * as we know for sure that this will not trigger any of the
1255 * checks and call back into the fixup functions where we
1258 INIT_WORK_ON_STACK(&barr
->work
, wq_barrier_func
);
1259 __set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(&barr
->work
));
1260 init_completion(&barr
->done
);
1263 * If @target is currently being executed, schedule the
1264 * barrier to the worker; otherwise, put it after @target.
1267 head
= worker
->scheduled
.next
;
1269 unsigned long *bits
= work_data_bits(target
);
1271 head
= target
->entry
.next
;
1272 /* there can already be other linked works, inherit and set */
1273 linked
= *bits
& WORK_STRUCT_LINKED
;
1274 __set_bit(WORK_STRUCT_LINKED_BIT
, bits
);
1277 debug_work_activate(&barr
->work
);
1278 insert_work(cwq
, &barr
->work
, head
,
1279 work_color_to_flags(WORK_NO_COLOR
) | linked
);
1283 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1284 * @wq: workqueue being flushed
1285 * @flush_color: new flush color, < 0 for no-op
1286 * @work_color: new work color, < 0 for no-op
1288 * Prepare cwqs for workqueue flushing.
1290 * If @flush_color is non-negative, flush_color on all cwqs should be
1291 * -1. If no cwq has in-flight commands at the specified color, all
1292 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
1293 * has in flight commands, its cwq->flush_color is set to
1294 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1295 * wakeup logic is armed and %true is returned.
1297 * The caller should have initialized @wq->first_flusher prior to
1298 * calling this function with non-negative @flush_color. If
1299 * @flush_color is negative, no flush color update is done and %false
1302 * If @work_color is non-negative, all cwqs should have the same
1303 * work_color which is previous to @work_color and all will be
1304 * advanced to @work_color.
1307 * mutex_lock(wq->flush_mutex).
1310 * %true if @flush_color >= 0 and there's something to flush. %false
1313 static bool flush_workqueue_prep_cwqs(struct workqueue_struct
*wq
,
1314 int flush_color
, int work_color
)
1319 if (flush_color
>= 0) {
1320 BUG_ON(atomic_read(&wq
->nr_cwqs_to_flush
));
1321 atomic_set(&wq
->nr_cwqs_to_flush
, 1);
1324 for_each_possible_cpu(cpu
) {
1325 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
1326 struct global_cwq
*gcwq
= cwq
->gcwq
;
1328 spin_lock_irq(&gcwq
->lock
);
1330 if (flush_color
>= 0) {
1331 BUG_ON(cwq
->flush_color
!= -1);
1333 if (cwq
->nr_in_flight
[flush_color
]) {
1334 cwq
->flush_color
= flush_color
;
1335 atomic_inc(&wq
->nr_cwqs_to_flush
);
1340 if (work_color
>= 0) {
1341 BUG_ON(work_color
!= work_next_color(cwq
->work_color
));
1342 cwq
->work_color
= work_color
;
1345 spin_unlock_irq(&gcwq
->lock
);
1348 if (flush_color
>= 0 && atomic_dec_and_test(&wq
->nr_cwqs_to_flush
))
1349 complete(&wq
->first_flusher
->done
);
1355 * flush_workqueue - ensure that any scheduled work has run to completion.
1356 * @wq: workqueue to flush
1358 * Forces execution of the workqueue and blocks until its completion.
1359 * This is typically used in driver shutdown handlers.
1361 * We sleep until all works which were queued on entry have been handled,
1362 * but we are not livelocked by new incoming ones.
1364 void flush_workqueue(struct workqueue_struct
*wq
)
1366 struct wq_flusher this_flusher
= {
1367 .list
= LIST_HEAD_INIT(this_flusher
.list
),
1369 .done
= COMPLETION_INITIALIZER_ONSTACK(this_flusher
.done
),
1373 lock_map_acquire(&wq
->lockdep_map
);
1374 lock_map_release(&wq
->lockdep_map
);
1376 mutex_lock(&wq
->flush_mutex
);
1379 * Start-to-wait phase
1381 next_color
= work_next_color(wq
->work_color
);
1383 if (next_color
!= wq
->flush_color
) {
1385 * Color space is not full. The current work_color
1386 * becomes our flush_color and work_color is advanced
1389 BUG_ON(!list_empty(&wq
->flusher_overflow
));
1390 this_flusher
.flush_color
= wq
->work_color
;
1391 wq
->work_color
= next_color
;
1393 if (!wq
->first_flusher
) {
1394 /* no flush in progress, become the first flusher */
1395 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
1397 wq
->first_flusher
= &this_flusher
;
1399 if (!flush_workqueue_prep_cwqs(wq
, wq
->flush_color
,
1401 /* nothing to flush, done */
1402 wq
->flush_color
= next_color
;
1403 wq
->first_flusher
= NULL
;
1408 BUG_ON(wq
->flush_color
== this_flusher
.flush_color
);
1409 list_add_tail(&this_flusher
.list
, &wq
->flusher_queue
);
1410 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
1414 * Oops, color space is full, wait on overflow queue.
1415 * The next flush completion will assign us
1416 * flush_color and transfer to flusher_queue.
1418 list_add_tail(&this_flusher
.list
, &wq
->flusher_overflow
);
1421 mutex_unlock(&wq
->flush_mutex
);
1423 wait_for_completion(&this_flusher
.done
);
1426 * Wake-up-and-cascade phase
1428 * First flushers are responsible for cascading flushes and
1429 * handling overflow. Non-first flushers can simply return.
1431 if (wq
->first_flusher
!= &this_flusher
)
1434 mutex_lock(&wq
->flush_mutex
);
1436 wq
->first_flusher
= NULL
;
1438 BUG_ON(!list_empty(&this_flusher
.list
));
1439 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
1442 struct wq_flusher
*next
, *tmp
;
1444 /* complete all the flushers sharing the current flush color */
1445 list_for_each_entry_safe(next
, tmp
, &wq
->flusher_queue
, list
) {
1446 if (next
->flush_color
!= wq
->flush_color
)
1448 list_del_init(&next
->list
);
1449 complete(&next
->done
);
1452 BUG_ON(!list_empty(&wq
->flusher_overflow
) &&
1453 wq
->flush_color
!= work_next_color(wq
->work_color
));
1455 /* this flush_color is finished, advance by one */
1456 wq
->flush_color
= work_next_color(wq
->flush_color
);
1458 /* one color has been freed, handle overflow queue */
1459 if (!list_empty(&wq
->flusher_overflow
)) {
1461 * Assign the same color to all overflowed
1462 * flushers, advance work_color and append to
1463 * flusher_queue. This is the start-to-wait
1464 * phase for these overflowed flushers.
1466 list_for_each_entry(tmp
, &wq
->flusher_overflow
, list
)
1467 tmp
->flush_color
= wq
->work_color
;
1469 wq
->work_color
= work_next_color(wq
->work_color
);
1471 list_splice_tail_init(&wq
->flusher_overflow
,
1472 &wq
->flusher_queue
);
1473 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
1476 if (list_empty(&wq
->flusher_queue
)) {
1477 BUG_ON(wq
->flush_color
!= wq
->work_color
);
1482 * Need to flush more colors. Make the next flusher
1483 * the new first flusher and arm cwqs.
1485 BUG_ON(wq
->flush_color
== wq
->work_color
);
1486 BUG_ON(wq
->flush_color
!= next
->flush_color
);
1488 list_del_init(&next
->list
);
1489 wq
->first_flusher
= next
;
1491 if (flush_workqueue_prep_cwqs(wq
, wq
->flush_color
, -1))
1495 * Meh... this color is already done, clear first
1496 * flusher and repeat cascading.
1498 wq
->first_flusher
= NULL
;
1502 mutex_unlock(&wq
->flush_mutex
);
1504 EXPORT_SYMBOL_GPL(flush_workqueue
);
1507 * flush_work - block until a work_struct's callback has terminated
1508 * @work: the work which is to be flushed
1510 * Returns false if @work has already terminated.
1512 * It is expected that, prior to calling flush_work(), the caller has
1513 * arranged for the work to not be requeued, otherwise it doesn't make
1514 * sense to use this function.
1516 int flush_work(struct work_struct
*work
)
1518 struct worker
*worker
= NULL
;
1519 struct global_cwq
*gcwq
;
1520 struct cpu_workqueue_struct
*cwq
;
1521 struct wq_barrier barr
;
1524 gcwq
= get_work_gcwq(work
);
1528 spin_lock_irq(&gcwq
->lock
);
1529 if (!list_empty(&work
->entry
)) {
1531 * See the comment near try_to_grab_pending()->smp_rmb().
1532 * If it was re-queued to a different gcwq under us, we
1533 * are not going to wait.
1536 cwq
= get_work_cwq(work
);
1537 if (unlikely(!cwq
|| gcwq
!= cwq
->gcwq
))
1540 worker
= find_worker_executing_work(gcwq
, work
);
1543 cwq
= worker
->current_cwq
;
1546 insert_wq_barrier(cwq
, &barr
, work
, worker
);
1547 spin_unlock_irq(&gcwq
->lock
);
1549 lock_map_acquire(&cwq
->wq
->lockdep_map
);
1550 lock_map_release(&cwq
->wq
->lockdep_map
);
1552 wait_for_completion(&barr
.done
);
1553 destroy_work_on_stack(&barr
.work
);
1556 spin_unlock_irq(&gcwq
->lock
);
1559 EXPORT_SYMBOL_GPL(flush_work
);
1562 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1563 * so this work can't be re-armed in any way.
1565 static int try_to_grab_pending(struct work_struct
*work
)
1567 struct global_cwq
*gcwq
;
1570 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
)))
1574 * The queueing is in progress, or it is already queued. Try to
1575 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1577 gcwq
= get_work_gcwq(work
);
1581 spin_lock_irq(&gcwq
->lock
);
1582 if (!list_empty(&work
->entry
)) {
1584 * This work is queued, but perhaps we locked the wrong gcwq.
1585 * In that case we must see the new value after rmb(), see
1586 * insert_work()->wmb().
1589 if (gcwq
== get_work_gcwq(work
)) {
1590 debug_work_deactivate(work
);
1591 list_del_init(&work
->entry
);
1592 cwq_dec_nr_in_flight(get_work_cwq(work
),
1593 get_work_color(work
));
1597 spin_unlock_irq(&gcwq
->lock
);
1602 static void wait_on_cpu_work(struct global_cwq
*gcwq
, struct work_struct
*work
)
1604 struct wq_barrier barr
;
1605 struct worker
*worker
;
1607 spin_lock_irq(&gcwq
->lock
);
1609 worker
= find_worker_executing_work(gcwq
, work
);
1610 if (unlikely(worker
))
1611 insert_wq_barrier(worker
->current_cwq
, &barr
, work
, worker
);
1613 spin_unlock_irq(&gcwq
->lock
);
1615 if (unlikely(worker
)) {
1616 wait_for_completion(&barr
.done
);
1617 destroy_work_on_stack(&barr
.work
);
1621 static void wait_on_work(struct work_struct
*work
)
1627 lock_map_acquire(&work
->lockdep_map
);
1628 lock_map_release(&work
->lockdep_map
);
1630 for_each_possible_cpu(cpu
)
1631 wait_on_cpu_work(get_gcwq(cpu
), work
);
1634 static int __cancel_work_timer(struct work_struct
*work
,
1635 struct timer_list
* timer
)
1640 ret
= (timer
&& likely(del_timer(timer
)));
1642 ret
= try_to_grab_pending(work
);
1644 } while (unlikely(ret
< 0));
1646 clear_work_data(work
);
1651 * cancel_work_sync - block until a work_struct's callback has terminated
1652 * @work: the work which is to be flushed
1654 * Returns true if @work was pending.
1656 * cancel_work_sync() will cancel the work if it is queued. If the work's
1657 * callback appears to be running, cancel_work_sync() will block until it
1660 * It is possible to use this function if the work re-queues itself. It can
1661 * cancel the work even if it migrates to another workqueue, however in that
1662 * case it only guarantees that work->func() has completed on the last queued
1665 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1666 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1668 * The caller must ensure that workqueue_struct on which this work was last
1669 * queued can't be destroyed before this function returns.
1671 int cancel_work_sync(struct work_struct
*work
)
1673 return __cancel_work_timer(work
, NULL
);
1675 EXPORT_SYMBOL_GPL(cancel_work_sync
);
1678 * cancel_delayed_work_sync - reliably kill off a delayed work.
1679 * @dwork: the delayed work struct
1681 * Returns true if @dwork was pending.
1683 * It is possible to use this function if @dwork rearms itself via queue_work()
1684 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1686 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
1688 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
1690 EXPORT_SYMBOL(cancel_delayed_work_sync
);
1692 static struct workqueue_struct
*keventd_wq __read_mostly
;
1695 * schedule_work - put work task in global workqueue
1696 * @work: job to be done
1698 * Returns zero if @work was already on the kernel-global workqueue and
1699 * non-zero otherwise.
1701 * This puts a job in the kernel-global workqueue if it was not already
1702 * queued and leaves it in the same position on the kernel-global
1703 * workqueue otherwise.
1705 int schedule_work(struct work_struct
*work
)
1707 return queue_work(keventd_wq
, work
);
1709 EXPORT_SYMBOL(schedule_work
);
1712 * schedule_work_on - put work task on a specific cpu
1713 * @cpu: cpu to put the work task on
1714 * @work: job to be done
1716 * This puts a job on a specific cpu
1718 int schedule_work_on(int cpu
, struct work_struct
*work
)
1720 return queue_work_on(cpu
, keventd_wq
, work
);
1722 EXPORT_SYMBOL(schedule_work_on
);
1725 * schedule_delayed_work - put work task in global workqueue after delay
1726 * @dwork: job to be done
1727 * @delay: number of jiffies to wait or 0 for immediate execution
1729 * After waiting for a given time this puts a job in the kernel-global
1732 int schedule_delayed_work(struct delayed_work
*dwork
,
1733 unsigned long delay
)
1735 return queue_delayed_work(keventd_wq
, dwork
, delay
);
1737 EXPORT_SYMBOL(schedule_delayed_work
);
1740 * flush_delayed_work - block until a dwork_struct's callback has terminated
1741 * @dwork: the delayed work which is to be flushed
1743 * Any timeout is cancelled, and any pending work is run immediately.
1745 void flush_delayed_work(struct delayed_work
*dwork
)
1747 if (del_timer_sync(&dwork
->timer
)) {
1748 __queue_work(get_cpu(), get_work_cwq(&dwork
->work
)->wq
,
1752 flush_work(&dwork
->work
);
1754 EXPORT_SYMBOL(flush_delayed_work
);
1757 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1759 * @dwork: job to be done
1760 * @delay: number of jiffies to wait
1762 * After waiting for a given time this puts a job in the kernel-global
1763 * workqueue on the specified CPU.
1765 int schedule_delayed_work_on(int cpu
,
1766 struct delayed_work
*dwork
, unsigned long delay
)
1768 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
1770 EXPORT_SYMBOL(schedule_delayed_work_on
);
1773 * schedule_on_each_cpu - call a function on each online CPU from keventd
1774 * @func: the function to call
1776 * Returns zero on success.
1777 * Returns -ve errno on failure.
1779 * schedule_on_each_cpu() is very slow.
1781 int schedule_on_each_cpu(work_func_t func
)
1785 struct work_struct
*works
;
1787 works
= alloc_percpu(struct work_struct
);
1794 * When running in keventd don't schedule a work item on
1795 * itself. Can just call directly because the work queue is
1796 * already bound. This also is faster.
1798 if (current_is_keventd())
1799 orig
= raw_smp_processor_id();
1801 for_each_online_cpu(cpu
) {
1802 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
1804 INIT_WORK(work
, func
);
1806 schedule_work_on(cpu
, work
);
1809 func(per_cpu_ptr(works
, orig
));
1811 for_each_online_cpu(cpu
)
1812 flush_work(per_cpu_ptr(works
, cpu
));
1820 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1822 * Forces execution of the kernel-global workqueue and blocks until its
1825 * Think twice before calling this function! It's very easy to get into
1826 * trouble if you don't take great care. Either of the following situations
1827 * will lead to deadlock:
1829 * One of the work items currently on the workqueue needs to acquire
1830 * a lock held by your code or its caller.
1832 * Your code is running in the context of a work routine.
1834 * They will be detected by lockdep when they occur, but the first might not
1835 * occur very often. It depends on what work items are on the workqueue and
1836 * what locks they need, which you have no control over.
1838 * In most situations flushing the entire workqueue is overkill; you merely
1839 * need to know that a particular work item isn't queued and isn't running.
1840 * In such cases you should use cancel_delayed_work_sync() or
1841 * cancel_work_sync() instead.
1843 void flush_scheduled_work(void)
1845 flush_workqueue(keventd_wq
);
1847 EXPORT_SYMBOL(flush_scheduled_work
);
1850 * execute_in_process_context - reliably execute the routine with user context
1851 * @fn: the function to execute
1852 * @ew: guaranteed storage for the execute work structure (must
1853 * be available when the work executes)
1855 * Executes the function immediately if process context is available,
1856 * otherwise schedules the function for delayed execution.
1858 * Returns: 0 - function was executed
1859 * 1 - function was scheduled for execution
1861 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
1863 if (!in_interrupt()) {
1868 INIT_WORK(&ew
->work
, fn
);
1869 schedule_work(&ew
->work
);
1873 EXPORT_SYMBOL_GPL(execute_in_process_context
);
1875 int keventd_up(void)
1877 return keventd_wq
!= NULL
;
1880 int current_is_keventd(void)
1886 * There no longer is one-to-one relation between worker and
1887 * work queue and a worker task might be unbound from its cpu
1888 * if the cpu was offlined. Match all busy workers. This
1889 * function will go away once dynamic pool is implemented.
1891 for_each_possible_cpu(cpu
) {
1892 struct global_cwq
*gcwq
= get_gcwq(cpu
);
1893 struct worker
*worker
;
1894 struct hlist_node
*pos
;
1895 unsigned long flags
;
1898 spin_lock_irqsave(&gcwq
->lock
, flags
);
1900 for_each_busy_worker(worker
, i
, pos
, gcwq
) {
1901 if (worker
->task
== current
) {
1907 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
1915 static struct cpu_workqueue_struct
*alloc_cwqs(void)
1918 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1919 * Make sure that the alignment isn't lower than that of
1920 * unsigned long long.
1922 const size_t size
= sizeof(struct cpu_workqueue_struct
);
1923 const size_t align
= max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS
,
1924 __alignof__(unsigned long long));
1925 struct cpu_workqueue_struct
*cwqs
;
1930 * On UP, percpu allocator doesn't honor alignment parameter
1931 * and simply uses arch-dependent default. Allocate enough
1932 * room to align cwq and put an extra pointer at the end
1933 * pointing back to the originally allocated pointer which
1934 * will be used for free.
1936 * FIXME: This really belongs to UP percpu code. Update UP
1937 * percpu code to honor alignment and remove this ugliness.
1939 ptr
= __alloc_percpu(size
+ align
+ sizeof(void *), 1);
1940 cwqs
= PTR_ALIGN(ptr
, align
);
1941 *(void **)per_cpu_ptr(cwqs
+ 1, 0) = ptr
;
1943 /* On SMP, percpu allocator can do it itself */
1944 cwqs
= __alloc_percpu(size
, align
);
1946 /* just in case, make sure it's actually aligned */
1947 BUG_ON(!IS_ALIGNED((unsigned long)cwqs
, align
));
1951 static void free_cwqs(struct cpu_workqueue_struct
*cwqs
)
1954 /* on UP, the pointer to free is stored right after the cwq */
1956 free_percpu(*(void **)per_cpu_ptr(cwqs
+ 1, 0));
1962 struct workqueue_struct
*__create_workqueue_key(const char *name
,
1965 struct lock_class_key
*key
,
1966 const char *lock_name
)
1968 struct workqueue_struct
*wq
;
1969 bool failed
= false;
1972 max_active
= clamp_val(max_active
, 1, INT_MAX
);
1974 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1978 wq
->cpu_wq
= alloc_cwqs();
1983 wq
->saved_max_active
= max_active
;
1984 mutex_init(&wq
->flush_mutex
);
1985 atomic_set(&wq
->nr_cwqs_to_flush
, 0);
1986 INIT_LIST_HEAD(&wq
->flusher_queue
);
1987 INIT_LIST_HEAD(&wq
->flusher_overflow
);
1988 wq
->single_cpu
= NR_CPUS
;
1991 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
1992 INIT_LIST_HEAD(&wq
->list
);
1994 cpu_maps_update_begin();
1996 * We must initialize cwqs for each possible cpu even if we
1997 * are going to call destroy_workqueue() finally. Otherwise
1998 * cpu_up() can hit the uninitialized cwq once we drop the
2001 for_each_possible_cpu(cpu
) {
2002 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2003 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2005 BUG_ON((unsigned long)cwq
& WORK_STRUCT_FLAG_MASK
);
2008 cwq
->flush_color
= -1;
2009 cwq
->max_active
= max_active
;
2010 INIT_LIST_HEAD(&cwq
->delayed_works
);
2014 cwq
->worker
= create_worker(gcwq
, cpu_online(cpu
));
2016 start_worker(cwq
->worker
);
2022 * workqueue_lock protects global freeze state and workqueues
2023 * list. Grab it, set max_active accordingly and add the new
2024 * workqueue to workqueues list.
2026 spin_lock(&workqueue_lock
);
2028 if (workqueue_freezing
&& wq
->flags
& WQ_FREEZEABLE
)
2029 for_each_possible_cpu(cpu
)
2030 get_cwq(cpu
, wq
)->max_active
= 0;
2032 list_add(&wq
->list
, &workqueues
);
2034 spin_unlock(&workqueue_lock
);
2036 cpu_maps_update_done();
2039 destroy_workqueue(wq
);
2045 free_cwqs(wq
->cpu_wq
);
2050 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
2053 * destroy_workqueue - safely terminate a workqueue
2054 * @wq: target workqueue
2056 * Safely destroy a workqueue. All work currently pending will be done first.
2058 void destroy_workqueue(struct workqueue_struct
*wq
)
2062 flush_workqueue(wq
);
2065 * wq list is used to freeze wq, remove from list after
2066 * flushing is complete in case freeze races us.
2068 cpu_maps_update_begin();
2069 spin_lock(&workqueue_lock
);
2070 list_del(&wq
->list
);
2071 spin_unlock(&workqueue_lock
);
2072 cpu_maps_update_done();
2074 for_each_possible_cpu(cpu
) {
2075 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2076 struct global_cwq
*gcwq
= cwq
->gcwq
;
2081 spin_lock_irq(&gcwq
->lock
);
2083 * Worker can only be destroyed while idle.
2084 * Wait till it becomes idle. This is ugly
2085 * and prone to starvation. It will go away
2086 * once dynamic worker pool is implemented.
2088 if (!(cwq
->worker
->flags
& WORKER_IDLE
)) {
2089 spin_unlock_irq(&gcwq
->lock
);
2093 destroy_worker(cwq
->worker
);
2095 spin_unlock_irq(&gcwq
->lock
);
2098 for (i
= 0; i
< WORK_NR_COLORS
; i
++)
2099 BUG_ON(cwq
->nr_in_flight
[i
]);
2100 BUG_ON(cwq
->nr_active
);
2101 BUG_ON(!list_empty(&cwq
->delayed_works
));
2104 free_cwqs(wq
->cpu_wq
);
2107 EXPORT_SYMBOL_GPL(destroy_workqueue
);
2112 * CPU hotplug is implemented by allowing cwqs to be detached from
2113 * CPU, running with unbound workers and allowing them to be
2114 * reattached later if the cpu comes back online. A separate thread
2115 * is created to govern cwqs in such state and is called the trustee.
2117 * Trustee states and their descriptions.
2119 * START Command state used on startup. On CPU_DOWN_PREPARE, a
2120 * new trustee is started with this state.
2122 * IN_CHARGE Once started, trustee will enter this state after
2123 * making all existing workers rogue. DOWN_PREPARE waits
2124 * for trustee to enter this state. After reaching
2125 * IN_CHARGE, trustee tries to execute the pending
2126 * worklist until it's empty and the state is set to
2127 * BUTCHER, or the state is set to RELEASE.
2129 * BUTCHER Command state which is set by the cpu callback after
2130 * the cpu has went down. Once this state is set trustee
2131 * knows that there will be no new works on the worklist
2132 * and once the worklist is empty it can proceed to
2133 * killing idle workers.
2135 * RELEASE Command state which is set by the cpu callback if the
2136 * cpu down has been canceled or it has come online
2137 * again. After recognizing this state, trustee stops
2138 * trying to drain or butcher and transits to DONE.
2140 * DONE Trustee will enter this state after BUTCHER or RELEASE
2143 * trustee CPU draining
2144 * took over down complete
2145 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2147 * | CPU is back online v return workers |
2148 * ----------------> RELEASE --------------
2152 * trustee_wait_event_timeout - timed event wait for trustee
2153 * @cond: condition to wait for
2154 * @timeout: timeout in jiffies
2156 * wait_event_timeout() for trustee to use. Handles locking and
2157 * checks for RELEASE request.
2160 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2161 * multiple times. To be used by trustee.
2164 * Positive indicating left time if @cond is satisfied, 0 if timed
2165 * out, -1 if canceled.
2167 #define trustee_wait_event_timeout(cond, timeout) ({ \
2168 long __ret = (timeout); \
2169 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2171 spin_unlock_irq(&gcwq->lock); \
2172 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
2173 (gcwq->trustee_state == TRUSTEE_RELEASE), \
2175 spin_lock_irq(&gcwq->lock); \
2177 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
2181 * trustee_wait_event - event wait for trustee
2182 * @cond: condition to wait for
2184 * wait_event() for trustee to use. Automatically handles locking and
2185 * checks for CANCEL request.
2188 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2189 * multiple times. To be used by trustee.
2192 * 0 if @cond is satisfied, -1 if canceled.
2194 #define trustee_wait_event(cond) ({ \
2196 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2197 __ret1 < 0 ? -1 : 0; \
2200 static int __cpuinit
trustee_thread(void *__gcwq
)
2202 struct global_cwq
*gcwq
= __gcwq
;
2203 struct worker
*worker
;
2204 struct hlist_node
*pos
;
2207 BUG_ON(gcwq
->cpu
!= smp_processor_id());
2209 spin_lock_irq(&gcwq
->lock
);
2211 * Make all workers rogue. Trustee must be bound to the
2212 * target cpu and can't be cancelled.
2214 BUG_ON(gcwq
->cpu
!= smp_processor_id());
2216 list_for_each_entry(worker
, &gcwq
->idle_list
, entry
)
2217 worker
->flags
|= WORKER_ROGUE
;
2219 for_each_busy_worker(worker
, i
, pos
, gcwq
)
2220 worker
->flags
|= WORKER_ROGUE
;
2223 * We're now in charge. Notify and proceed to drain. We need
2224 * to keep the gcwq running during the whole CPU down
2225 * procedure as other cpu hotunplug callbacks may need to
2226 * flush currently running tasks.
2228 gcwq
->trustee_state
= TRUSTEE_IN_CHARGE
;
2229 wake_up_all(&gcwq
->trustee_wait
);
2232 * The original cpu is in the process of dying and may go away
2233 * anytime now. When that happens, we and all workers would
2234 * be migrated to other cpus. Try draining any left work.
2235 * Note that if the gcwq is frozen, there may be frozen works
2236 * in freezeable cwqs. Don't declare completion while frozen.
2238 while (gcwq
->nr_workers
!= gcwq
->nr_idle
||
2239 gcwq
->flags
& GCWQ_FREEZING
||
2240 gcwq
->trustee_state
== TRUSTEE_IN_CHARGE
) {
2241 /* give a breather */
2242 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN
) < 0)
2246 /* notify completion */
2247 gcwq
->trustee
= NULL
;
2248 gcwq
->trustee_state
= TRUSTEE_DONE
;
2249 wake_up_all(&gcwq
->trustee_wait
);
2250 spin_unlock_irq(&gcwq
->lock
);
2255 * wait_trustee_state - wait for trustee to enter the specified state
2256 * @gcwq: gcwq the trustee of interest belongs to
2257 * @state: target state to wait for
2259 * Wait for the trustee to reach @state. DONE is already matched.
2262 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2263 * multiple times. To be used by cpu_callback.
2265 static void __cpuinit
wait_trustee_state(struct global_cwq
*gcwq
, int state
)
2267 if (!(gcwq
->trustee_state
== state
||
2268 gcwq
->trustee_state
== TRUSTEE_DONE
)) {
2269 spin_unlock_irq(&gcwq
->lock
);
2270 __wait_event(gcwq
->trustee_wait
,
2271 gcwq
->trustee_state
== state
||
2272 gcwq
->trustee_state
== TRUSTEE_DONE
);
2273 spin_lock_irq(&gcwq
->lock
);
2277 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
2278 unsigned long action
,
2281 unsigned int cpu
= (unsigned long)hcpu
;
2282 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2283 struct task_struct
*new_trustee
= NULL
;
2284 struct worker
*worker
;
2285 struct hlist_node
*pos
;
2286 unsigned long flags
;
2289 action
&= ~CPU_TASKS_FROZEN
;
2292 case CPU_DOWN_PREPARE
:
2293 new_trustee
= kthread_create(trustee_thread
, gcwq
,
2294 "workqueue_trustee/%d\n", cpu
);
2295 if (IS_ERR(new_trustee
))
2296 return notifier_from_errno(PTR_ERR(new_trustee
));
2297 kthread_bind(new_trustee
, cpu
);
2300 /* some are called w/ irq disabled, don't disturb irq status */
2301 spin_lock_irqsave(&gcwq
->lock
, flags
);
2304 case CPU_DOWN_PREPARE
:
2305 /* initialize trustee and tell it to acquire the gcwq */
2306 BUG_ON(gcwq
->trustee
|| gcwq
->trustee_state
!= TRUSTEE_DONE
);
2307 gcwq
->trustee
= new_trustee
;
2308 gcwq
->trustee_state
= TRUSTEE_START
;
2309 wake_up_process(gcwq
->trustee
);
2310 wait_trustee_state(gcwq
, TRUSTEE_IN_CHARGE
);
2314 gcwq
->trustee_state
= TRUSTEE_BUTCHER
;
2317 case CPU_DOWN_FAILED
:
2319 if (gcwq
->trustee_state
!= TRUSTEE_DONE
) {
2320 gcwq
->trustee_state
= TRUSTEE_RELEASE
;
2321 wake_up_process(gcwq
->trustee
);
2322 wait_trustee_state(gcwq
, TRUSTEE_DONE
);
2325 /* clear ROGUE from all workers */
2326 list_for_each_entry(worker
, &gcwq
->idle_list
, entry
)
2327 worker
->flags
&= ~WORKER_ROGUE
;
2329 for_each_busy_worker(worker
, i
, pos
, gcwq
)
2330 worker
->flags
&= ~WORKER_ROGUE
;
2334 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
2336 return notifier_from_errno(0);
2341 struct work_for_cpu
{
2342 struct completion completion
;
2348 static int do_work_for_cpu(void *_wfc
)
2350 struct work_for_cpu
*wfc
= _wfc
;
2351 wfc
->ret
= wfc
->fn(wfc
->arg
);
2352 complete(&wfc
->completion
);
2357 * work_on_cpu - run a function in user context on a particular cpu
2358 * @cpu: the cpu to run on
2359 * @fn: the function to run
2360 * @arg: the function arg
2362 * This will return the value @fn returns.
2363 * It is up to the caller to ensure that the cpu doesn't go offline.
2364 * The caller must not hold any locks which would prevent @fn from completing.
2366 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
2368 struct task_struct
*sub_thread
;
2369 struct work_for_cpu wfc
= {
2370 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
2375 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
2376 if (IS_ERR(sub_thread
))
2377 return PTR_ERR(sub_thread
);
2378 kthread_bind(sub_thread
, cpu
);
2379 wake_up_process(sub_thread
);
2380 wait_for_completion(&wfc
.completion
);
2383 EXPORT_SYMBOL_GPL(work_on_cpu
);
2384 #endif /* CONFIG_SMP */
2386 #ifdef CONFIG_FREEZER
2389 * freeze_workqueues_begin - begin freezing workqueues
2391 * Start freezing workqueues. After this function returns, all
2392 * freezeable workqueues will queue new works to their frozen_works
2393 * list instead of gcwq->worklist.
2396 * Grabs and releases workqueue_lock and gcwq->lock's.
2398 void freeze_workqueues_begin(void)
2400 struct workqueue_struct
*wq
;
2403 spin_lock(&workqueue_lock
);
2405 BUG_ON(workqueue_freezing
);
2406 workqueue_freezing
= true;
2408 for_each_possible_cpu(cpu
) {
2409 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2411 spin_lock_irq(&gcwq
->lock
);
2413 BUG_ON(gcwq
->flags
& GCWQ_FREEZING
);
2414 gcwq
->flags
|= GCWQ_FREEZING
;
2416 list_for_each_entry(wq
, &workqueues
, list
) {
2417 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2419 if (wq
->flags
& WQ_FREEZEABLE
)
2420 cwq
->max_active
= 0;
2423 spin_unlock_irq(&gcwq
->lock
);
2426 spin_unlock(&workqueue_lock
);
2430 * freeze_workqueues_busy - are freezeable workqueues still busy?
2432 * Check whether freezing is complete. This function must be called
2433 * between freeze_workqueues_begin() and thaw_workqueues().
2436 * Grabs and releases workqueue_lock.
2439 * %true if some freezeable workqueues are still busy. %false if
2440 * freezing is complete.
2442 bool freeze_workqueues_busy(void)
2444 struct workqueue_struct
*wq
;
2448 spin_lock(&workqueue_lock
);
2450 BUG_ON(!workqueue_freezing
);
2452 for_each_possible_cpu(cpu
) {
2454 * nr_active is monotonically decreasing. It's safe
2455 * to peek without lock.
2457 list_for_each_entry(wq
, &workqueues
, list
) {
2458 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2460 if (!(wq
->flags
& WQ_FREEZEABLE
))
2463 BUG_ON(cwq
->nr_active
< 0);
2464 if (cwq
->nr_active
) {
2471 spin_unlock(&workqueue_lock
);
2476 * thaw_workqueues - thaw workqueues
2478 * Thaw workqueues. Normal queueing is restored and all collected
2479 * frozen works are transferred to their respective gcwq worklists.
2482 * Grabs and releases workqueue_lock and gcwq->lock's.
2484 void thaw_workqueues(void)
2486 struct workqueue_struct
*wq
;
2489 spin_lock(&workqueue_lock
);
2491 if (!workqueue_freezing
)
2494 for_each_possible_cpu(cpu
) {
2495 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2497 spin_lock_irq(&gcwq
->lock
);
2499 BUG_ON(!(gcwq
->flags
& GCWQ_FREEZING
));
2500 gcwq
->flags
&= ~GCWQ_FREEZING
;
2502 list_for_each_entry(wq
, &workqueues
, list
) {
2503 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2505 if (!(wq
->flags
& WQ_FREEZEABLE
))
2508 /* restore max_active and repopulate worklist */
2509 cwq
->max_active
= wq
->saved_max_active
;
2511 while (!list_empty(&cwq
->delayed_works
) &&
2512 cwq
->nr_active
< cwq
->max_active
)
2513 cwq_activate_first_delayed(cwq
);
2515 /* perform delayed unbind from single cpu if empty */
2516 if (wq
->single_cpu
== gcwq
->cpu
&&
2517 !cwq
->nr_active
&& list_empty(&cwq
->delayed_works
))
2518 cwq_unbind_single_cpu(cwq
);
2520 wake_up_process(cwq
->worker
->task
);
2523 spin_unlock_irq(&gcwq
->lock
);
2526 workqueue_freezing
= false;
2528 spin_unlock(&workqueue_lock
);
2530 #endif /* CONFIG_FREEZER */
2532 void __init
init_workqueues(void)
2538 * The pointer part of work->data is either pointing to the
2539 * cwq or contains the cpu number the work ran last on. Make
2540 * sure cpu number won't overflow into kernel pointer area so
2541 * that they can be distinguished.
2543 BUILD_BUG_ON(NR_CPUS
<< WORK_STRUCT_FLAG_BITS
>= PAGE_OFFSET
);
2545 hotcpu_notifier(workqueue_cpu_callback
, CPU_PRI_WORKQUEUE
);
2547 /* initialize gcwqs */
2548 for_each_possible_cpu(cpu
) {
2549 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2551 spin_lock_init(&gcwq
->lock
);
2552 INIT_LIST_HEAD(&gcwq
->worklist
);
2555 INIT_LIST_HEAD(&gcwq
->idle_list
);
2556 for (i
= 0; i
< BUSY_WORKER_HASH_SIZE
; i
++)
2557 INIT_HLIST_HEAD(&gcwq
->busy_hash
[i
]);
2559 ida_init(&gcwq
->worker_ida
);
2561 gcwq
->trustee_state
= TRUSTEE_DONE
;
2562 init_waitqueue_head(&gcwq
->trustee_wait
);
2565 keventd_wq
= create_workqueue("events");
2566 BUG_ON(!keventd_wq
);