2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
37 * The per-CPU workqueue (if single thread, we always use the first
40 struct cpu_workqueue_struct
{
44 struct list_head worklist
;
45 wait_queue_head_t more_work
;
46 struct work_struct
*current_work
;
48 struct workqueue_struct
*wq
;
49 struct task_struct
*thread
;
52 int run_depth
; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
67 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
69 static DEFINE_MUTEX(workqueue_mutex
);
70 static LIST_HEAD(workqueues
);
72 static int singlethread_cpu __read_mostly
;
73 static cpumask_t cpu_singlethread_map __read_mostly
;
74 /* optimization, we could use cpu_possible_map */
75 static cpumask_t cpu_populated_map __read_mostly
;
77 /* If it's single threaded, it isn't in the list of workqueues. */
78 static inline int is_single_threaded(struct workqueue_struct
*wq
)
80 return wq
->singlethread
;
83 static const cpumask_t
*wq_cpu_map(struct workqueue_struct
*wq
)
85 return is_single_threaded(wq
)
86 ? &cpu_singlethread_map
: &cpu_populated_map
;
90 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
92 if (unlikely(is_single_threaded(wq
)))
93 cpu
= singlethread_cpu
;
94 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
98 * Set the workqueue on which a work item is to be run
99 * - Must *only* be called if the pending flag is set
101 static inline void set_wq_data(struct work_struct
*work
,
102 struct cpu_workqueue_struct
*cwq
)
106 BUG_ON(!work_pending(work
));
108 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
109 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
110 atomic_long_set(&work
->data
, new);
114 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
116 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
119 static void insert_work(struct cpu_workqueue_struct
*cwq
,
120 struct work_struct
*work
, int tail
)
122 set_wq_data(work
, cwq
);
124 list_add_tail(&work
->entry
, &cwq
->worklist
);
126 list_add(&work
->entry
, &cwq
->worklist
);
127 wake_up(&cwq
->more_work
);
130 /* Preempt must be disabled. */
131 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
132 struct work_struct
*work
)
136 spin_lock_irqsave(&cwq
->lock
, flags
);
137 insert_work(cwq
, work
, 1);
138 spin_unlock_irqrestore(&cwq
->lock
, flags
);
142 * queue_work - queue work on a workqueue
143 * @wq: workqueue to use
144 * @work: work to queue
146 * Returns 0 if @work was already on a queue, non-zero otherwise.
148 * We queue the work to the CPU it was submitted, but there is no
149 * guarantee that it will be processed by that CPU.
151 int fastcall
queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
155 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
156 BUG_ON(!list_empty(&work
->entry
));
157 __queue_work(wq_per_cpu(wq
, get_cpu()), work
);
163 EXPORT_SYMBOL_GPL(queue_work
);
165 void delayed_work_timer_fn(unsigned long __data
)
167 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
168 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
169 struct workqueue_struct
*wq
= cwq
->wq
;
171 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
175 * queue_delayed_work - queue work on a workqueue after delay
176 * @wq: workqueue to use
177 * @dwork: delayable work to queue
178 * @delay: number of jiffies to wait before queueing
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
182 int fastcall
queue_delayed_work(struct workqueue_struct
*wq
,
183 struct delayed_work
*dwork
, unsigned long delay
)
185 timer_stats_timer_set_start_info(&dwork
->timer
);
187 return queue_work(wq
, &dwork
->work
);
189 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
191 EXPORT_SYMBOL_GPL(queue_delayed_work
);
194 * queue_delayed_work_on - queue work on specific CPU after delay
195 * @cpu: CPU number to execute work on
196 * @wq: workqueue to use
197 * @dwork: work to queue
198 * @delay: number of jiffies to wait before queueing
200 * Returns 0 if @work was already on a queue, non-zero otherwise.
202 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
203 struct delayed_work
*dwork
, unsigned long delay
)
206 struct timer_list
*timer
= &dwork
->timer
;
207 struct work_struct
*work
= &dwork
->work
;
209 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
210 BUG_ON(timer_pending(timer
));
211 BUG_ON(!list_empty(&work
->entry
));
213 /* This stores cwq for the moment, for the timer_fn */
214 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
215 timer
->expires
= jiffies
+ delay
;
216 timer
->data
= (unsigned long)dwork
;
217 timer
->function
= delayed_work_timer_fn
;
219 if (unlikely(cpu
>= 0))
220 add_timer_on(timer
, cpu
);
227 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
229 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
231 spin_lock_irq(&cwq
->lock
);
233 if (cwq
->run_depth
> 3) {
234 /* morton gets to eat his hat */
235 printk("%s: recursion depth exceeded: %d\n",
236 __FUNCTION__
, cwq
->run_depth
);
239 while (!list_empty(&cwq
->worklist
)) {
240 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
241 struct work_struct
, entry
);
242 work_func_t f
= work
->func
;
244 cwq
->current_work
= work
;
245 list_del_init(cwq
->worklist
.next
);
246 spin_unlock_irq(&cwq
->lock
);
248 BUG_ON(get_wq_data(work
) != cwq
);
249 work_clear_pending(work
);
252 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
253 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
255 current
->comm
, preempt_count(),
257 printk(KERN_ERR
" last function: ");
258 print_symbol("%s\n", (unsigned long)f
);
259 debug_show_held_locks(current
);
263 spin_lock_irq(&cwq
->lock
);
264 cwq
->current_work
= NULL
;
267 spin_unlock_irq(&cwq
->lock
);
271 * NOTE: the caller must not touch *cwq if this func returns true
273 static int cwq_should_stop(struct cpu_workqueue_struct
*cwq
)
275 int should_stop
= cwq
->should_stop
;
277 if (unlikely(should_stop
)) {
278 spin_lock_irq(&cwq
->lock
);
279 should_stop
= cwq
->should_stop
&& list_empty(&cwq
->worklist
);
282 spin_unlock_irq(&cwq
->lock
);
288 static int worker_thread(void *__cwq
)
290 struct cpu_workqueue_struct
*cwq
= __cwq
;
292 struct k_sigaction sa
;
295 if (!cwq
->wq
->freezeable
)
296 current
->flags
|= PF_NOFREEZE
;
298 set_user_nice(current
, -5);
300 /* Block and flush all signals */
301 sigfillset(&blocked
);
302 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
303 flush_signals(current
);
306 * We inherited MPOL_INTERLEAVE from the booting kernel.
307 * Set MPOL_DEFAULT to insure node local allocations.
309 numa_default_policy();
311 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
312 sa
.sa
.sa_handler
= SIG_IGN
;
314 siginitset(&sa
.sa
.sa_mask
, sigmask(SIGCHLD
));
315 do_sigaction(SIGCHLD
, &sa
, (struct k_sigaction
*)0);
318 if (cwq
->wq
->freezeable
)
321 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
322 if (!cwq
->should_stop
&& list_empty(&cwq
->worklist
))
324 finish_wait(&cwq
->more_work
, &wait
);
326 if (cwq_should_stop(cwq
))
336 struct work_struct work
;
337 struct completion done
;
340 static void wq_barrier_func(struct work_struct
*work
)
342 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
343 complete(&barr
->done
);
346 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
347 struct wq_barrier
*barr
, int tail
)
349 INIT_WORK(&barr
->work
, wq_barrier_func
);
350 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
352 init_completion(&barr
->done
);
354 insert_work(cwq
, &barr
->work
, tail
);
357 static void flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
359 if (cwq
->thread
== current
) {
361 * Probably keventd trying to flush its own queue. So simply run
362 * it by hand rather than deadlocking.
366 struct wq_barrier barr
;
369 spin_lock_irq(&cwq
->lock
);
370 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
371 insert_wq_barrier(cwq
, &barr
, 1);
374 spin_unlock_irq(&cwq
->lock
);
377 wait_for_completion(&barr
.done
);
382 * flush_workqueue - ensure that any scheduled work has run to completion.
383 * @wq: workqueue to flush
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
394 void fastcall
flush_workqueue(struct workqueue_struct
*wq
)
396 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
400 for_each_cpu_mask(cpu
, *cpu_map
)
401 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
403 EXPORT_SYMBOL_GPL(flush_workqueue
);
405 static void wait_on_work(struct cpu_workqueue_struct
*cwq
,
406 struct work_struct
*work
)
408 struct wq_barrier barr
;
411 spin_lock_irq(&cwq
->lock
);
412 if (unlikely(cwq
->current_work
== work
)) {
413 insert_wq_barrier(cwq
, &barr
, 0);
416 spin_unlock_irq(&cwq
->lock
);
418 if (unlikely(running
))
419 wait_for_completion(&barr
.done
);
423 * flush_work - block until a work_struct's callback has terminated
424 * @wq: the workqueue on which the work is queued
425 * @work: the work which is to be flushed
427 * flush_work() will attempt to cancel the work if it is queued. If the work's
428 * callback appears to be running, flush_work() will block until it has
431 * flush_work() is designed to be used when the caller is tearing down data
432 * structures which the callback function operates upon. It is expected that,
433 * prior to calling flush_work(), the caller has arranged for the work to not
436 void flush_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
438 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
439 struct cpu_workqueue_struct
*cwq
;
444 cwq
= get_wq_data(work
);
445 /* Was it ever queued ? */
450 * This work can't be re-queued, no need to re-check that
451 * get_wq_data() is still the same when we take cwq->lock.
453 spin_lock_irq(&cwq
->lock
);
454 list_del_init(&work
->entry
);
455 work_clear_pending(work
);
456 spin_unlock_irq(&cwq
->lock
);
458 for_each_cpu_mask(cpu
, *cpu_map
)
459 wait_on_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
461 EXPORT_SYMBOL_GPL(flush_work
);
464 static struct workqueue_struct
*keventd_wq
;
467 * schedule_work - put work task in global workqueue
468 * @work: job to be done
470 * This puts a job in the kernel-global workqueue.
472 int fastcall
schedule_work(struct work_struct
*work
)
474 return queue_work(keventd_wq
, work
);
476 EXPORT_SYMBOL(schedule_work
);
479 * schedule_delayed_work - put work task in global workqueue after delay
480 * @dwork: job to be done
481 * @delay: number of jiffies to wait or 0 for immediate execution
483 * After waiting for a given time this puts a job in the kernel-global
486 int fastcall
schedule_delayed_work(struct delayed_work
*dwork
,
489 timer_stats_timer_set_start_info(&dwork
->timer
);
490 return queue_delayed_work(keventd_wq
, dwork
, delay
);
492 EXPORT_SYMBOL(schedule_delayed_work
);
495 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
497 * @dwork: job to be done
498 * @delay: number of jiffies to wait
500 * After waiting for a given time this puts a job in the kernel-global
501 * workqueue on the specified CPU.
503 int schedule_delayed_work_on(int cpu
,
504 struct delayed_work
*dwork
, unsigned long delay
)
506 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
508 EXPORT_SYMBOL(schedule_delayed_work_on
);
511 * schedule_on_each_cpu - call a function on each online CPU from keventd
512 * @func: the function to call
514 * Returns zero on success.
515 * Returns -ve errno on failure.
517 * Appears to be racy against CPU hotplug.
519 * schedule_on_each_cpu() is very slow.
521 int schedule_on_each_cpu(work_func_t func
)
524 struct work_struct
*works
;
526 works
= alloc_percpu(struct work_struct
);
530 preempt_disable(); /* CPU hotplug */
531 for_each_online_cpu(cpu
) {
532 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
534 INIT_WORK(work
, func
);
535 set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
));
536 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
), work
);
539 flush_workqueue(keventd_wq
);
544 void flush_scheduled_work(void)
546 flush_workqueue(keventd_wq
);
548 EXPORT_SYMBOL(flush_scheduled_work
);
550 void flush_work_keventd(struct work_struct
*work
)
552 flush_work(keventd_wq
, work
);
554 EXPORT_SYMBOL(flush_work_keventd
);
557 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
558 * @dwork: the delayed work struct
560 * Note that the work callback function may still be running on return from
561 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
563 void cancel_rearming_delayed_work(struct delayed_work
*dwork
)
565 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
567 /* Was it ever queued ? */
569 struct workqueue_struct
*wq
= cwq
->wq
;
571 while (!cancel_delayed_work(dwork
))
575 EXPORT_SYMBOL(cancel_rearming_delayed_work
);
578 * execute_in_process_context - reliably execute the routine with user context
579 * @fn: the function to execute
580 * @ew: guaranteed storage for the execute work structure (must
581 * be available when the work executes)
583 * Executes the function immediately if process context is available,
584 * otherwise schedules the function for delayed execution.
586 * Returns: 0 - function was executed
587 * 1 - function was scheduled for execution
589 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
591 if (!in_interrupt()) {
596 INIT_WORK(&ew
->work
, fn
);
597 schedule_work(&ew
->work
);
601 EXPORT_SYMBOL_GPL(execute_in_process_context
);
605 return keventd_wq
!= NULL
;
608 int current_is_keventd(void)
610 struct cpu_workqueue_struct
*cwq
;
611 int cpu
= smp_processor_id(); /* preempt-safe: keventd is per-cpu */
616 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
617 if (current
== cwq
->thread
)
624 static struct cpu_workqueue_struct
*
625 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
627 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
630 spin_lock_init(&cwq
->lock
);
631 INIT_LIST_HEAD(&cwq
->worklist
);
632 init_waitqueue_head(&cwq
->more_work
);
637 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
639 struct workqueue_struct
*wq
= cwq
->wq
;
640 const char *fmt
= is_single_threaded(wq
) ? "%s" : "%s/%d";
641 struct task_struct
*p
;
643 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
645 * Nobody can add the work_struct to this cwq,
646 * if (caller is __create_workqueue)
647 * nobody should see this wq
648 * else // caller is CPU_UP_PREPARE
649 * cpu is not on cpu_online_map
650 * so we can abort safely.
656 cwq
->should_stop
= 0;
661 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
663 struct task_struct
*p
= cwq
->thread
;
667 kthread_bind(p
, cpu
);
672 struct workqueue_struct
*__create_workqueue(const char *name
,
673 int singlethread
, int freezeable
)
675 struct workqueue_struct
*wq
;
676 struct cpu_workqueue_struct
*cwq
;
679 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
683 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
690 wq
->singlethread
= singlethread
;
691 wq
->freezeable
= freezeable
;
692 INIT_LIST_HEAD(&wq
->list
);
695 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
696 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
697 start_workqueue_thread(cwq
, -1);
699 mutex_lock(&workqueue_mutex
);
700 list_add(&wq
->list
, &workqueues
);
702 for_each_possible_cpu(cpu
) {
703 cwq
= init_cpu_workqueue(wq
, cpu
);
704 if (err
|| !cpu_online(cpu
))
706 err
= create_workqueue_thread(cwq
, cpu
);
707 start_workqueue_thread(cwq
, cpu
);
709 mutex_unlock(&workqueue_mutex
);
713 destroy_workqueue(wq
);
718 EXPORT_SYMBOL_GPL(__create_workqueue
);
720 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
722 struct wq_barrier barr
;
725 spin_lock_irq(&cwq
->lock
);
726 if (cwq
->thread
!= NULL
) {
727 insert_wq_barrier(cwq
, &barr
, 1);
728 cwq
->should_stop
= 1;
731 spin_unlock_irq(&cwq
->lock
);
734 wait_for_completion(&barr
.done
);
736 while (unlikely(cwq
->thread
!= NULL
))
739 * Wait until cwq->thread unlocks cwq->lock,
740 * it won't touch *cwq after that.
743 spin_unlock_wait(&cwq
->lock
);
748 * destroy_workqueue - safely terminate a workqueue
749 * @wq: target workqueue
751 * Safely destroy a workqueue. All work currently pending will be done first.
753 void destroy_workqueue(struct workqueue_struct
*wq
)
755 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
756 struct cpu_workqueue_struct
*cwq
;
759 mutex_lock(&workqueue_mutex
);
761 mutex_unlock(&workqueue_mutex
);
763 for_each_cpu_mask(cpu
, *cpu_map
) {
764 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
765 cleanup_workqueue_thread(cwq
, cpu
);
768 free_percpu(wq
->cpu_wq
);
771 EXPORT_SYMBOL_GPL(destroy_workqueue
);
773 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
774 unsigned long action
,
777 unsigned int cpu
= (unsigned long)hcpu
;
778 struct cpu_workqueue_struct
*cwq
;
779 struct workqueue_struct
*wq
;
782 case CPU_LOCK_ACQUIRE
:
783 mutex_lock(&workqueue_mutex
);
786 case CPU_LOCK_RELEASE
:
787 mutex_unlock(&workqueue_mutex
);
791 cpu_set(cpu
, cpu_populated_map
);
794 list_for_each_entry(wq
, &workqueues
, list
) {
795 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
799 if (!create_workqueue_thread(cwq
, cpu
))
801 printk(KERN_ERR
"workqueue for %i failed\n", cpu
);
805 start_workqueue_thread(cwq
, cpu
);
808 case CPU_UP_CANCELED
:
809 start_workqueue_thread(cwq
, -1);
811 cleanup_workqueue_thread(cwq
, cpu
);
819 void __init
init_workqueues(void)
821 cpu_populated_map
= cpu_online_map
;
822 singlethread_cpu
= first_cpu(cpu_possible_map
);
823 cpu_singlethread_map
= cpumask_of_cpu(singlethread_cpu
);
824 hotcpu_notifier(workqueue_cpu_callback
, 0);
825 keventd_wq
= create_workqueue("events");