2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
40 * Structure fields follow one of the following exclusion rules.
42 * I: Set during initialization and read-only afterwards.
44 * L: cwq->lock protected. Access with cwq->lock held.
46 * W: workqueue_lock protected.
50 * The per-CPU workqueue (if single thread, we always use the first
53 struct cpu_workqueue_struct
{
57 struct list_head worklist
;
58 wait_queue_head_t more_work
;
59 struct work_struct
*current_work
;
61 struct workqueue_struct
*wq
; /* I: the owning workqueue */
62 struct task_struct
*thread
;
63 } ____cacheline_aligned
;
66 * The externally visible workqueue abstraction is an array of
69 struct workqueue_struct
{
70 unsigned int flags
; /* I: WQ_* flags */
71 struct cpu_workqueue_struct
*cpu_wq
; /* I: cwq's */
72 struct list_head list
; /* W: list of all workqueues */
73 const char *name
; /* I: workqueue name */
75 struct lockdep_map lockdep_map
;
79 #ifdef CONFIG_DEBUG_OBJECTS_WORK
81 static struct debug_obj_descr work_debug_descr
;
84 * fixup_init is called when:
85 * - an active object is initialized
87 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
89 struct work_struct
*work
= addr
;
92 case ODEBUG_STATE_ACTIVE
:
93 cancel_work_sync(work
);
94 debug_object_init(work
, &work_debug_descr
);
102 * fixup_activate is called when:
103 * - an active object is activated
104 * - an unknown object is activated (might be a statically initialized object)
106 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
108 struct work_struct
*work
= addr
;
112 case ODEBUG_STATE_NOTAVAILABLE
:
114 * This is not really a fixup. The work struct was
115 * statically initialized. We just make sure that it
116 * is tracked in the object tracker.
118 if (test_bit(WORK_STRUCT_STATIC_BIT
, work_data_bits(work
))) {
119 debug_object_init(work
, &work_debug_descr
);
120 debug_object_activate(work
, &work_debug_descr
);
126 case ODEBUG_STATE_ACTIVE
:
135 * fixup_free is called when:
136 * - an active object is freed
138 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
140 struct work_struct
*work
= addr
;
143 case ODEBUG_STATE_ACTIVE
:
144 cancel_work_sync(work
);
145 debug_object_free(work
, &work_debug_descr
);
152 static struct debug_obj_descr work_debug_descr
= {
153 .name
= "work_struct",
154 .fixup_init
= work_fixup_init
,
155 .fixup_activate
= work_fixup_activate
,
156 .fixup_free
= work_fixup_free
,
159 static inline void debug_work_activate(struct work_struct
*work
)
161 debug_object_activate(work
, &work_debug_descr
);
164 static inline void debug_work_deactivate(struct work_struct
*work
)
166 debug_object_deactivate(work
, &work_debug_descr
);
169 void __init_work(struct work_struct
*work
, int onstack
)
172 debug_object_init_on_stack(work
, &work_debug_descr
);
174 debug_object_init(work
, &work_debug_descr
);
176 EXPORT_SYMBOL_GPL(__init_work
);
178 void destroy_work_on_stack(struct work_struct
*work
)
180 debug_object_free(work
, &work_debug_descr
);
182 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
185 static inline void debug_work_activate(struct work_struct
*work
) { }
186 static inline void debug_work_deactivate(struct work_struct
*work
) { }
189 /* Serializes the accesses to the list of workqueues. */
190 static DEFINE_SPINLOCK(workqueue_lock
);
191 static LIST_HEAD(workqueues
);
193 static int singlethread_cpu __read_mostly
;
194 static const struct cpumask
*cpu_singlethread_map __read_mostly
;
196 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
197 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
198 * which comes in between can't use for_each_online_cpu(). We could
199 * use cpu_possible_map, the cpumask below is more a documentation
202 static cpumask_var_t cpu_populated_map __read_mostly
;
204 /* If it's single threaded, it isn't in the list of workqueues. */
205 static inline bool is_wq_single_threaded(struct workqueue_struct
*wq
)
207 return wq
->flags
& WQ_SINGLE_THREAD
;
210 static const struct cpumask
*wq_cpu_map(struct workqueue_struct
*wq
)
212 return is_wq_single_threaded(wq
)
213 ? cpu_singlethread_map
: cpu_populated_map
;
216 static struct cpu_workqueue_struct
*get_cwq(unsigned int cpu
,
217 struct workqueue_struct
*wq
)
219 if (unlikely(is_wq_single_threaded(wq
)))
220 cpu
= singlethread_cpu
;
221 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
225 * Set the workqueue on which a work item is to be run
226 * - Must *only* be called if the pending flag is set
228 static inline void set_wq_data(struct work_struct
*work
,
229 struct cpu_workqueue_struct
*cwq
,
230 unsigned long extra_flags
)
232 BUG_ON(!work_pending(work
));
234 atomic_long_set(&work
->data
, (unsigned long)cwq
| work_static(work
) |
235 WORK_STRUCT_PENDING
| extra_flags
);
239 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
241 static inline void clear_wq_data(struct work_struct
*work
)
243 atomic_long_set(&work
->data
, work_static(work
));
247 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
249 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
253 * insert_work - insert a work into cwq
254 * @cwq: cwq @work belongs to
255 * @work: work to insert
256 * @head: insertion point
257 * @extra_flags: extra WORK_STRUCT_* flags to set
259 * Insert @work into @cwq after @head.
262 * spin_lock_irq(cwq->lock).
264 static void insert_work(struct cpu_workqueue_struct
*cwq
,
265 struct work_struct
*work
, struct list_head
*head
,
266 unsigned int extra_flags
)
268 trace_workqueue_insertion(cwq
->thread
, work
);
270 /* we own @work, set data and link */
271 set_wq_data(work
, cwq
, extra_flags
);
274 * Ensure that we get the right work->data if we see the
275 * result of list_add() below, see try_to_grab_pending().
279 list_add_tail(&work
->entry
, head
);
280 wake_up(&cwq
->more_work
);
283 static void __queue_work(unsigned int cpu
, struct workqueue_struct
*wq
,
284 struct work_struct
*work
)
286 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
289 debug_work_activate(work
);
290 spin_lock_irqsave(&cwq
->lock
, flags
);
291 BUG_ON(!list_empty(&work
->entry
));
292 insert_work(cwq
, work
, &cwq
->worklist
, 0);
293 spin_unlock_irqrestore(&cwq
->lock
, flags
);
297 * queue_work - queue work on a workqueue
298 * @wq: workqueue to use
299 * @work: work to queue
301 * Returns 0 if @work was already on a queue, non-zero otherwise.
303 * We queue the work to the CPU on which it was submitted, but if the CPU dies
304 * it can be processed by another CPU.
306 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
310 ret
= queue_work_on(get_cpu(), wq
, work
);
315 EXPORT_SYMBOL_GPL(queue_work
);
318 * queue_work_on - queue work on specific cpu
319 * @cpu: CPU number to execute work on
320 * @wq: workqueue to use
321 * @work: work to queue
323 * Returns 0 if @work was already on a queue, non-zero otherwise.
325 * We queue the work to a specific CPU, the caller must ensure it
329 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
333 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
334 __queue_work(cpu
, wq
, work
);
339 EXPORT_SYMBOL_GPL(queue_work_on
);
341 static void delayed_work_timer_fn(unsigned long __data
)
343 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
344 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
346 __queue_work(smp_processor_id(), cwq
->wq
, &dwork
->work
);
350 * queue_delayed_work - queue work on a workqueue after delay
351 * @wq: workqueue to use
352 * @dwork: delayable work to queue
353 * @delay: number of jiffies to wait before queueing
355 * Returns 0 if @work was already on a queue, non-zero otherwise.
357 int queue_delayed_work(struct workqueue_struct
*wq
,
358 struct delayed_work
*dwork
, unsigned long delay
)
361 return queue_work(wq
, &dwork
->work
);
363 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
365 EXPORT_SYMBOL_GPL(queue_delayed_work
);
368 * queue_delayed_work_on - queue work on specific CPU after delay
369 * @cpu: CPU number to execute work on
370 * @wq: workqueue to use
371 * @dwork: work to queue
372 * @delay: number of jiffies to wait before queueing
374 * Returns 0 if @work was already on a queue, non-zero otherwise.
376 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
377 struct delayed_work
*dwork
, unsigned long delay
)
380 struct timer_list
*timer
= &dwork
->timer
;
381 struct work_struct
*work
= &dwork
->work
;
383 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
384 BUG_ON(timer_pending(timer
));
385 BUG_ON(!list_empty(&work
->entry
));
387 timer_stats_timer_set_start_info(&dwork
->timer
);
389 /* This stores cwq for the moment, for the timer_fn */
390 set_wq_data(work
, get_cwq(raw_smp_processor_id(), wq
), 0);
391 timer
->expires
= jiffies
+ delay
;
392 timer
->data
= (unsigned long)dwork
;
393 timer
->function
= delayed_work_timer_fn
;
395 if (unlikely(cpu
>= 0))
396 add_timer_on(timer
, cpu
);
403 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
405 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
407 spin_lock_irq(&cwq
->lock
);
408 while (!list_empty(&cwq
->worklist
)) {
409 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
410 struct work_struct
, entry
);
411 work_func_t f
= work
->func
;
412 #ifdef CONFIG_LOCKDEP
414 * It is permissible to free the struct work_struct
415 * from inside the function that is called from it,
416 * this we need to take into account for lockdep too.
417 * To avoid bogus "held lock freed" warnings as well
418 * as problems when looking into work->lockdep_map,
419 * make a copy and use that here.
421 struct lockdep_map lockdep_map
= work
->lockdep_map
;
423 trace_workqueue_execution(cwq
->thread
, work
);
424 debug_work_deactivate(work
);
425 cwq
->current_work
= work
;
426 list_del_init(cwq
->worklist
.next
);
427 spin_unlock_irq(&cwq
->lock
);
429 BUG_ON(get_wq_data(work
) != cwq
);
430 work_clear_pending(work
);
431 lock_map_acquire(&cwq
->wq
->lockdep_map
);
432 lock_map_acquire(&lockdep_map
);
434 lock_map_release(&lockdep_map
);
435 lock_map_release(&cwq
->wq
->lockdep_map
);
437 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
438 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
440 current
->comm
, preempt_count(),
441 task_pid_nr(current
));
442 printk(KERN_ERR
" last function: ");
443 print_symbol("%s\n", (unsigned long)f
);
444 debug_show_held_locks(current
);
448 spin_lock_irq(&cwq
->lock
);
449 cwq
->current_work
= NULL
;
451 spin_unlock_irq(&cwq
->lock
);
455 * worker_thread - the worker thread function
456 * @__cwq: cwq to serve
458 * The cwq worker thread function.
460 static int worker_thread(void *__cwq
)
462 struct cpu_workqueue_struct
*cwq
= __cwq
;
465 if (cwq
->wq
->flags
& WQ_FREEZEABLE
)
469 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
470 if (!freezing(current
) &&
471 !kthread_should_stop() &&
472 list_empty(&cwq
->worklist
))
474 finish_wait(&cwq
->more_work
, &wait
);
478 if (kthread_should_stop())
488 struct work_struct work
;
489 struct completion done
;
492 static void wq_barrier_func(struct work_struct
*work
)
494 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
495 complete(&barr
->done
);
499 * insert_wq_barrier - insert a barrier work
500 * @cwq: cwq to insert barrier into
501 * @barr: wq_barrier to insert
502 * @head: insertion point
504 * Insert barrier @barr into @cwq before @head.
507 * spin_lock_irq(cwq->lock).
509 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
510 struct wq_barrier
*barr
, struct list_head
*head
)
513 * debugobject calls are safe here even with cwq->lock locked
514 * as we know for sure that this will not trigger any of the
515 * checks and call back into the fixup functions where we
518 INIT_WORK_ON_STACK(&barr
->work
, wq_barrier_func
);
519 __set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(&barr
->work
));
520 init_completion(&barr
->done
);
522 debug_work_activate(&barr
->work
);
523 insert_work(cwq
, &barr
->work
, head
, 0);
526 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
529 struct wq_barrier barr
;
531 WARN_ON(cwq
->thread
== current
);
533 spin_lock_irq(&cwq
->lock
);
534 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
535 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
538 spin_unlock_irq(&cwq
->lock
);
541 wait_for_completion(&barr
.done
);
542 destroy_work_on_stack(&barr
.work
);
549 * flush_workqueue - ensure that any scheduled work has run to completion.
550 * @wq: workqueue to flush
552 * Forces execution of the workqueue and blocks until its completion.
553 * This is typically used in driver shutdown handlers.
555 * We sleep until all works which were queued on entry have been handled,
556 * but we are not livelocked by new incoming ones.
558 void flush_workqueue(struct workqueue_struct
*wq
)
560 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
564 lock_map_acquire(&wq
->lockdep_map
);
565 lock_map_release(&wq
->lockdep_map
);
566 for_each_cpu(cpu
, cpu_map
)
567 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
569 EXPORT_SYMBOL_GPL(flush_workqueue
);
572 * flush_work - block until a work_struct's callback has terminated
573 * @work: the work which is to be flushed
575 * Returns false if @work has already terminated.
577 * It is expected that, prior to calling flush_work(), the caller has
578 * arranged for the work to not be requeued, otherwise it doesn't make
579 * sense to use this function.
581 int flush_work(struct work_struct
*work
)
583 struct cpu_workqueue_struct
*cwq
;
584 struct list_head
*prev
;
585 struct wq_barrier barr
;
588 cwq
= get_wq_data(work
);
592 lock_map_acquire(&cwq
->wq
->lockdep_map
);
593 lock_map_release(&cwq
->wq
->lockdep_map
);
595 spin_lock_irq(&cwq
->lock
);
596 if (!list_empty(&work
->entry
)) {
598 * See the comment near try_to_grab_pending()->smp_rmb().
599 * If it was re-queued under us we are not going to wait.
602 if (unlikely(cwq
!= get_wq_data(work
)))
606 if (cwq
->current_work
!= work
)
608 prev
= &cwq
->worklist
;
610 insert_wq_barrier(cwq
, &barr
, prev
->next
);
612 spin_unlock_irq(&cwq
->lock
);
613 wait_for_completion(&barr
.done
);
614 destroy_work_on_stack(&barr
.work
);
617 spin_unlock_irq(&cwq
->lock
);
620 EXPORT_SYMBOL_GPL(flush_work
);
623 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
624 * so this work can't be re-armed in any way.
626 static int try_to_grab_pending(struct work_struct
*work
)
628 struct cpu_workqueue_struct
*cwq
;
631 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
)))
635 * The queueing is in progress, or it is already queued. Try to
636 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
639 cwq
= get_wq_data(work
);
643 spin_lock_irq(&cwq
->lock
);
644 if (!list_empty(&work
->entry
)) {
646 * This work is queued, but perhaps we locked the wrong cwq.
647 * In that case we must see the new value after rmb(), see
648 * insert_work()->wmb().
651 if (cwq
== get_wq_data(work
)) {
652 debug_work_deactivate(work
);
653 list_del_init(&work
->entry
);
657 spin_unlock_irq(&cwq
->lock
);
662 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
663 struct work_struct
*work
)
665 struct wq_barrier barr
;
668 spin_lock_irq(&cwq
->lock
);
669 if (unlikely(cwq
->current_work
== work
)) {
670 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
673 spin_unlock_irq(&cwq
->lock
);
675 if (unlikely(running
)) {
676 wait_for_completion(&barr
.done
);
677 destroy_work_on_stack(&barr
.work
);
681 static void wait_on_work(struct work_struct
*work
)
683 struct cpu_workqueue_struct
*cwq
;
684 struct workqueue_struct
*wq
;
685 const struct cpumask
*cpu_map
;
690 lock_map_acquire(&work
->lockdep_map
);
691 lock_map_release(&work
->lockdep_map
);
693 cwq
= get_wq_data(work
);
698 cpu_map
= wq_cpu_map(wq
);
700 for_each_cpu(cpu
, cpu_map
)
701 wait_on_cpu_work(get_cwq(cpu
, wq
), work
);
704 static int __cancel_work_timer(struct work_struct
*work
,
705 struct timer_list
* timer
)
710 ret
= (timer
&& likely(del_timer(timer
)));
712 ret
= try_to_grab_pending(work
);
714 } while (unlikely(ret
< 0));
721 * cancel_work_sync - block until a work_struct's callback has terminated
722 * @work: the work which is to be flushed
724 * Returns true if @work was pending.
726 * cancel_work_sync() will cancel the work if it is queued. If the work's
727 * callback appears to be running, cancel_work_sync() will block until it
730 * It is possible to use this function if the work re-queues itself. It can
731 * cancel the work even if it migrates to another workqueue, however in that
732 * case it only guarantees that work->func() has completed on the last queued
735 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
736 * pending, otherwise it goes into a busy-wait loop until the timer expires.
738 * The caller must ensure that workqueue_struct on which this work was last
739 * queued can't be destroyed before this function returns.
741 int cancel_work_sync(struct work_struct
*work
)
743 return __cancel_work_timer(work
, NULL
);
745 EXPORT_SYMBOL_GPL(cancel_work_sync
);
748 * cancel_delayed_work_sync - reliably kill off a delayed work.
749 * @dwork: the delayed work struct
751 * Returns true if @dwork was pending.
753 * It is possible to use this function if @dwork rearms itself via queue_work()
754 * or queue_delayed_work(). See also the comment for cancel_work_sync().
756 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
758 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
760 EXPORT_SYMBOL(cancel_delayed_work_sync
);
762 static struct workqueue_struct
*keventd_wq __read_mostly
;
765 * schedule_work - put work task in global workqueue
766 * @work: job to be done
768 * Returns zero if @work was already on the kernel-global workqueue and
769 * non-zero otherwise.
771 * This puts a job in the kernel-global workqueue if it was not already
772 * queued and leaves it in the same position on the kernel-global
773 * workqueue otherwise.
775 int schedule_work(struct work_struct
*work
)
777 return queue_work(keventd_wq
, work
);
779 EXPORT_SYMBOL(schedule_work
);
782 * schedule_work_on - put work task on a specific cpu
783 * @cpu: cpu to put the work task on
784 * @work: job to be done
786 * This puts a job on a specific cpu
788 int schedule_work_on(int cpu
, struct work_struct
*work
)
790 return queue_work_on(cpu
, keventd_wq
, work
);
792 EXPORT_SYMBOL(schedule_work_on
);
795 * schedule_delayed_work - put work task in global workqueue after delay
796 * @dwork: job to be done
797 * @delay: number of jiffies to wait or 0 for immediate execution
799 * After waiting for a given time this puts a job in the kernel-global
802 int schedule_delayed_work(struct delayed_work
*dwork
,
805 return queue_delayed_work(keventd_wq
, dwork
, delay
);
807 EXPORT_SYMBOL(schedule_delayed_work
);
810 * flush_delayed_work - block until a dwork_struct's callback has terminated
811 * @dwork: the delayed work which is to be flushed
813 * Any timeout is cancelled, and any pending work is run immediately.
815 void flush_delayed_work(struct delayed_work
*dwork
)
817 if (del_timer_sync(&dwork
->timer
)) {
818 __queue_work(get_cpu(), get_wq_data(&dwork
->work
)->wq
,
822 flush_work(&dwork
->work
);
824 EXPORT_SYMBOL(flush_delayed_work
);
827 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
829 * @dwork: job to be done
830 * @delay: number of jiffies to wait
832 * After waiting for a given time this puts a job in the kernel-global
833 * workqueue on the specified CPU.
835 int schedule_delayed_work_on(int cpu
,
836 struct delayed_work
*dwork
, unsigned long delay
)
838 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
840 EXPORT_SYMBOL(schedule_delayed_work_on
);
843 * schedule_on_each_cpu - call a function on each online CPU from keventd
844 * @func: the function to call
846 * Returns zero on success.
847 * Returns -ve errno on failure.
849 * schedule_on_each_cpu() is very slow.
851 int schedule_on_each_cpu(work_func_t func
)
855 struct work_struct
*works
;
857 works
= alloc_percpu(struct work_struct
);
864 * When running in keventd don't schedule a work item on
865 * itself. Can just call directly because the work queue is
866 * already bound. This also is faster.
868 if (current_is_keventd())
869 orig
= raw_smp_processor_id();
871 for_each_online_cpu(cpu
) {
872 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
874 INIT_WORK(work
, func
);
876 schedule_work_on(cpu
, work
);
879 func(per_cpu_ptr(works
, orig
));
881 for_each_online_cpu(cpu
)
882 flush_work(per_cpu_ptr(works
, cpu
));
890 * flush_scheduled_work - ensure that any scheduled work has run to completion.
892 * Forces execution of the kernel-global workqueue and blocks until its
895 * Think twice before calling this function! It's very easy to get into
896 * trouble if you don't take great care. Either of the following situations
897 * will lead to deadlock:
899 * One of the work items currently on the workqueue needs to acquire
900 * a lock held by your code or its caller.
902 * Your code is running in the context of a work routine.
904 * They will be detected by lockdep when they occur, but the first might not
905 * occur very often. It depends on what work items are on the workqueue and
906 * what locks they need, which you have no control over.
908 * In most situations flushing the entire workqueue is overkill; you merely
909 * need to know that a particular work item isn't queued and isn't running.
910 * In such cases you should use cancel_delayed_work_sync() or
911 * cancel_work_sync() instead.
913 void flush_scheduled_work(void)
915 flush_workqueue(keventd_wq
);
917 EXPORT_SYMBOL(flush_scheduled_work
);
920 * execute_in_process_context - reliably execute the routine with user context
921 * @fn: the function to execute
922 * @ew: guaranteed storage for the execute work structure (must
923 * be available when the work executes)
925 * Executes the function immediately if process context is available,
926 * otherwise schedules the function for delayed execution.
928 * Returns: 0 - function was executed
929 * 1 - function was scheduled for execution
931 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
933 if (!in_interrupt()) {
938 INIT_WORK(&ew
->work
, fn
);
939 schedule_work(&ew
->work
);
943 EXPORT_SYMBOL_GPL(execute_in_process_context
);
947 return keventd_wq
!= NULL
;
950 int current_is_keventd(void)
952 struct cpu_workqueue_struct
*cwq
;
953 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
958 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
959 if (current
== cwq
->thread
)
966 static struct cpu_workqueue_struct
*
967 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
969 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
972 spin_lock_init(&cwq
->lock
);
973 INIT_LIST_HEAD(&cwq
->worklist
);
974 init_waitqueue_head(&cwq
->more_work
);
979 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
981 struct workqueue_struct
*wq
= cwq
->wq
;
982 const char *fmt
= is_wq_single_threaded(wq
) ? "%s" : "%s/%d";
983 struct task_struct
*p
;
985 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
987 * Nobody can add the work_struct to this cwq,
988 * if (caller is __create_workqueue)
989 * nobody should see this wq
990 * else // caller is CPU_UP_PREPARE
991 * cpu is not on cpu_online_map
992 * so we can abort safely.
998 trace_workqueue_creation(cwq
->thread
, cpu
);
1003 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
1005 struct task_struct
*p
= cwq
->thread
;
1009 kthread_bind(p
, cpu
);
1014 struct workqueue_struct
*__create_workqueue_key(const char *name
,
1016 struct lock_class_key
*key
,
1017 const char *lock_name
)
1019 struct workqueue_struct
*wq
;
1020 struct cpu_workqueue_struct
*cwq
;
1023 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1027 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
1033 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
1034 INIT_LIST_HEAD(&wq
->list
);
1036 if (flags
& WQ_SINGLE_THREAD
) {
1037 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
1038 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
1039 start_workqueue_thread(cwq
, -1);
1041 cpu_maps_update_begin();
1043 * We must place this wq on list even if the code below fails.
1044 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1045 * destroy_workqueue() takes the lock, in that case we leak
1048 spin_lock(&workqueue_lock
);
1049 list_add(&wq
->list
, &workqueues
);
1050 spin_unlock(&workqueue_lock
);
1052 * We must initialize cwqs for each possible cpu even if we
1053 * are going to call destroy_workqueue() finally. Otherwise
1054 * cpu_up() can hit the uninitialized cwq once we drop the
1057 for_each_possible_cpu(cpu
) {
1058 cwq
= init_cpu_workqueue(wq
, cpu
);
1059 if (err
|| !cpu_online(cpu
))
1061 err
= create_workqueue_thread(cwq
, cpu
);
1062 start_workqueue_thread(cwq
, cpu
);
1064 cpu_maps_update_done();
1068 destroy_workqueue(wq
);
1074 free_percpu(wq
->cpu_wq
);
1079 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
1081 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
1084 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1085 * cpu_add_remove_lock protects cwq->thread.
1087 if (cwq
->thread
== NULL
)
1090 lock_map_acquire(&cwq
->wq
->lockdep_map
);
1091 lock_map_release(&cwq
->wq
->lockdep_map
);
1093 flush_cpu_workqueue(cwq
);
1095 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
1096 * a concurrent flush_workqueue() can insert a barrier after us.
1097 * However, in that case run_workqueue() won't return and check
1098 * kthread_should_stop() until it flushes all work_struct's.
1099 * When ->worklist becomes empty it is safe to exit because no
1100 * more work_structs can be queued on this cwq: flush_workqueue
1101 * checks list_empty(), and a "normal" queue_work() can't use
1104 trace_workqueue_destruction(cwq
->thread
);
1105 kthread_stop(cwq
->thread
);
1110 * destroy_workqueue - safely terminate a workqueue
1111 * @wq: target workqueue
1113 * Safely destroy a workqueue. All work currently pending will be done first.
1115 void destroy_workqueue(struct workqueue_struct
*wq
)
1117 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
1120 cpu_maps_update_begin();
1121 spin_lock(&workqueue_lock
);
1122 list_del(&wq
->list
);
1123 spin_unlock(&workqueue_lock
);
1125 for_each_cpu(cpu
, cpu_map
)
1126 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
1127 cpu_maps_update_done();
1129 free_percpu(wq
->cpu_wq
);
1132 EXPORT_SYMBOL_GPL(destroy_workqueue
);
1134 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
1135 unsigned long action
,
1138 unsigned int cpu
= (unsigned long)hcpu
;
1139 struct cpu_workqueue_struct
*cwq
;
1140 struct workqueue_struct
*wq
;
1143 action
&= ~CPU_TASKS_FROZEN
;
1146 case CPU_UP_PREPARE
:
1147 cpumask_set_cpu(cpu
, cpu_populated_map
);
1150 list_for_each_entry(wq
, &workqueues
, list
) {
1151 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
1154 case CPU_UP_PREPARE
:
1155 err
= create_workqueue_thread(cwq
, cpu
);
1158 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
1160 action
= CPU_UP_CANCELED
;
1165 start_workqueue_thread(cwq
, cpu
);
1168 case CPU_UP_CANCELED
:
1169 start_workqueue_thread(cwq
, -1);
1171 cleanup_workqueue_thread(cwq
);
1177 case CPU_UP_CANCELED
:
1179 cpumask_clear_cpu(cpu
, cpu_populated_map
);
1182 return notifier_from_errno(err
);
1187 struct work_for_cpu
{
1188 struct completion completion
;
1194 static int do_work_for_cpu(void *_wfc
)
1196 struct work_for_cpu
*wfc
= _wfc
;
1197 wfc
->ret
= wfc
->fn(wfc
->arg
);
1198 complete(&wfc
->completion
);
1203 * work_on_cpu - run a function in user context on a particular cpu
1204 * @cpu: the cpu to run on
1205 * @fn: the function to run
1206 * @arg: the function arg
1208 * This will return the value @fn returns.
1209 * It is up to the caller to ensure that the cpu doesn't go offline.
1210 * The caller must not hold any locks which would prevent @fn from completing.
1212 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
1214 struct task_struct
*sub_thread
;
1215 struct work_for_cpu wfc
= {
1216 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
1221 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
1222 if (IS_ERR(sub_thread
))
1223 return PTR_ERR(sub_thread
);
1224 kthread_bind(sub_thread
, cpu
);
1225 wake_up_process(sub_thread
);
1226 wait_for_completion(&wfc
.completion
);
1229 EXPORT_SYMBOL_GPL(work_on_cpu
);
1230 #endif /* CONFIG_SMP */
1232 void __init
init_workqueues(void)
1234 alloc_cpumask_var(&cpu_populated_map
, GFP_KERNEL
);
1236 cpumask_copy(cpu_populated_map
, cpu_online_mask
);
1237 singlethread_cpu
= cpumask_first(cpu_possible_mask
);
1238 cpu_singlethread_map
= cpumask_of(singlethread_cpu
);
1239 hotcpu_notifier(workqueue_cpu_callback
, 0);
1240 keventd_wq
= create_workqueue("events");
1241 BUG_ON(!keventd_wq
);