workqueue: define masks for work flags and conditionalize STATIC flags
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
38
39 /*
40 * Structure fields follow one of the following exclusion rules.
41 *
42 * I: Set during initialization and read-only afterwards.
43 *
44 * L: cwq->lock protected. Access with cwq->lock held.
45 *
46 * W: workqueue_lock protected.
47 */
48
49 /*
50 * The per-CPU workqueue (if single thread, we always use the first
51 * possible cpu).
52 */
53 struct cpu_workqueue_struct {
54
55 spinlock_t lock;
56
57 struct list_head worklist;
58 wait_queue_head_t more_work;
59 struct work_struct *current_work;
60
61 struct workqueue_struct *wq; /* I: the owning workqueue */
62 struct task_struct *thread;
63 } ____cacheline_aligned;
64
65 /*
66 * The externally visible workqueue abstraction is an array of
67 * per-CPU workqueues:
68 */
69 struct workqueue_struct {
70 unsigned int flags; /* I: WQ_* flags */
71 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
72 struct list_head list; /* W: list of all workqueues */
73 const char *name; /* I: workqueue name */
74 #ifdef CONFIG_LOCKDEP
75 struct lockdep_map lockdep_map;
76 #endif
77 };
78
79 #ifdef CONFIG_DEBUG_OBJECTS_WORK
80
81 static struct debug_obj_descr work_debug_descr;
82
83 /*
84 * fixup_init is called when:
85 * - an active object is initialized
86 */
87 static int work_fixup_init(void *addr, enum debug_obj_state state)
88 {
89 struct work_struct *work = addr;
90
91 switch (state) {
92 case ODEBUG_STATE_ACTIVE:
93 cancel_work_sync(work);
94 debug_object_init(work, &work_debug_descr);
95 return 1;
96 default:
97 return 0;
98 }
99 }
100
101 /*
102 * fixup_activate is called when:
103 * - an active object is activated
104 * - an unknown object is activated (might be a statically initialized object)
105 */
106 static int work_fixup_activate(void *addr, enum debug_obj_state state)
107 {
108 struct work_struct *work = addr;
109
110 switch (state) {
111
112 case ODEBUG_STATE_NOTAVAILABLE:
113 /*
114 * This is not really a fixup. The work struct was
115 * statically initialized. We just make sure that it
116 * is tracked in the object tracker.
117 */
118 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
119 debug_object_init(work, &work_debug_descr);
120 debug_object_activate(work, &work_debug_descr);
121 return 0;
122 }
123 WARN_ON_ONCE(1);
124 return 0;
125
126 case ODEBUG_STATE_ACTIVE:
127 WARN_ON(1);
128
129 default:
130 return 0;
131 }
132 }
133
134 /*
135 * fixup_free is called when:
136 * - an active object is freed
137 */
138 static int work_fixup_free(void *addr, enum debug_obj_state state)
139 {
140 struct work_struct *work = addr;
141
142 switch (state) {
143 case ODEBUG_STATE_ACTIVE:
144 cancel_work_sync(work);
145 debug_object_free(work, &work_debug_descr);
146 return 1;
147 default:
148 return 0;
149 }
150 }
151
152 static struct debug_obj_descr work_debug_descr = {
153 .name = "work_struct",
154 .fixup_init = work_fixup_init,
155 .fixup_activate = work_fixup_activate,
156 .fixup_free = work_fixup_free,
157 };
158
159 static inline void debug_work_activate(struct work_struct *work)
160 {
161 debug_object_activate(work, &work_debug_descr);
162 }
163
164 static inline void debug_work_deactivate(struct work_struct *work)
165 {
166 debug_object_deactivate(work, &work_debug_descr);
167 }
168
169 void __init_work(struct work_struct *work, int onstack)
170 {
171 if (onstack)
172 debug_object_init_on_stack(work, &work_debug_descr);
173 else
174 debug_object_init(work, &work_debug_descr);
175 }
176 EXPORT_SYMBOL_GPL(__init_work);
177
178 void destroy_work_on_stack(struct work_struct *work)
179 {
180 debug_object_free(work, &work_debug_descr);
181 }
182 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
183
184 #else
185 static inline void debug_work_activate(struct work_struct *work) { }
186 static inline void debug_work_deactivate(struct work_struct *work) { }
187 #endif
188
189 /* Serializes the accesses to the list of workqueues. */
190 static DEFINE_SPINLOCK(workqueue_lock);
191 static LIST_HEAD(workqueues);
192
193 static int singlethread_cpu __read_mostly;
194 static const struct cpumask *cpu_singlethread_map __read_mostly;
195 /*
196 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
197 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
198 * which comes in between can't use for_each_online_cpu(). We could
199 * use cpu_possible_map, the cpumask below is more a documentation
200 * than optimization.
201 */
202 static cpumask_var_t cpu_populated_map __read_mostly;
203
204 /* If it's single threaded, it isn't in the list of workqueues. */
205 static inline bool is_wq_single_threaded(struct workqueue_struct *wq)
206 {
207 return wq->flags & WQ_SINGLE_THREAD;
208 }
209
210 static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
211 {
212 return is_wq_single_threaded(wq)
213 ? cpu_singlethread_map : cpu_populated_map;
214 }
215
216 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
217 struct workqueue_struct *wq)
218 {
219 if (unlikely(is_wq_single_threaded(wq)))
220 cpu = singlethread_cpu;
221 return per_cpu_ptr(wq->cpu_wq, cpu);
222 }
223
224 /*
225 * Set the workqueue on which a work item is to be run
226 * - Must *only* be called if the pending flag is set
227 */
228 static inline void set_wq_data(struct work_struct *work,
229 struct cpu_workqueue_struct *cwq,
230 unsigned long extra_flags)
231 {
232 BUG_ON(!work_pending(work));
233
234 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
235 WORK_STRUCT_PENDING | extra_flags);
236 }
237
238 /*
239 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
240 */
241 static inline void clear_wq_data(struct work_struct *work)
242 {
243 atomic_long_set(&work->data, work_static(work));
244 }
245
246 static inline
247 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
248 {
249 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
250 }
251
252 /**
253 * insert_work - insert a work into cwq
254 * @cwq: cwq @work belongs to
255 * @work: work to insert
256 * @head: insertion point
257 * @extra_flags: extra WORK_STRUCT_* flags to set
258 *
259 * Insert @work into @cwq after @head.
260 *
261 * CONTEXT:
262 * spin_lock_irq(cwq->lock).
263 */
264 static void insert_work(struct cpu_workqueue_struct *cwq,
265 struct work_struct *work, struct list_head *head,
266 unsigned int extra_flags)
267 {
268 trace_workqueue_insertion(cwq->thread, work);
269
270 /* we own @work, set data and link */
271 set_wq_data(work, cwq, extra_flags);
272
273 /*
274 * Ensure that we get the right work->data if we see the
275 * result of list_add() below, see try_to_grab_pending().
276 */
277 smp_wmb();
278
279 list_add_tail(&work->entry, head);
280 wake_up(&cwq->more_work);
281 }
282
283 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
284 struct work_struct *work)
285 {
286 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
287 unsigned long flags;
288
289 debug_work_activate(work);
290 spin_lock_irqsave(&cwq->lock, flags);
291 BUG_ON(!list_empty(&work->entry));
292 insert_work(cwq, work, &cwq->worklist, 0);
293 spin_unlock_irqrestore(&cwq->lock, flags);
294 }
295
296 /**
297 * queue_work - queue work on a workqueue
298 * @wq: workqueue to use
299 * @work: work to queue
300 *
301 * Returns 0 if @work was already on a queue, non-zero otherwise.
302 *
303 * We queue the work to the CPU on which it was submitted, but if the CPU dies
304 * it can be processed by another CPU.
305 */
306 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
307 {
308 int ret;
309
310 ret = queue_work_on(get_cpu(), wq, work);
311 put_cpu();
312
313 return ret;
314 }
315 EXPORT_SYMBOL_GPL(queue_work);
316
317 /**
318 * queue_work_on - queue work on specific cpu
319 * @cpu: CPU number to execute work on
320 * @wq: workqueue to use
321 * @work: work to queue
322 *
323 * Returns 0 if @work was already on a queue, non-zero otherwise.
324 *
325 * We queue the work to a specific CPU, the caller must ensure it
326 * can't go away.
327 */
328 int
329 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
330 {
331 int ret = 0;
332
333 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
334 __queue_work(cpu, wq, work);
335 ret = 1;
336 }
337 return ret;
338 }
339 EXPORT_SYMBOL_GPL(queue_work_on);
340
341 static void delayed_work_timer_fn(unsigned long __data)
342 {
343 struct delayed_work *dwork = (struct delayed_work *)__data;
344 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
345
346 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
347 }
348
349 /**
350 * queue_delayed_work - queue work on a workqueue after delay
351 * @wq: workqueue to use
352 * @dwork: delayable work to queue
353 * @delay: number of jiffies to wait before queueing
354 *
355 * Returns 0 if @work was already on a queue, non-zero otherwise.
356 */
357 int queue_delayed_work(struct workqueue_struct *wq,
358 struct delayed_work *dwork, unsigned long delay)
359 {
360 if (delay == 0)
361 return queue_work(wq, &dwork->work);
362
363 return queue_delayed_work_on(-1, wq, dwork, delay);
364 }
365 EXPORT_SYMBOL_GPL(queue_delayed_work);
366
367 /**
368 * queue_delayed_work_on - queue work on specific CPU after delay
369 * @cpu: CPU number to execute work on
370 * @wq: workqueue to use
371 * @dwork: work to queue
372 * @delay: number of jiffies to wait before queueing
373 *
374 * Returns 0 if @work was already on a queue, non-zero otherwise.
375 */
376 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
377 struct delayed_work *dwork, unsigned long delay)
378 {
379 int ret = 0;
380 struct timer_list *timer = &dwork->timer;
381 struct work_struct *work = &dwork->work;
382
383 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
384 BUG_ON(timer_pending(timer));
385 BUG_ON(!list_empty(&work->entry));
386
387 timer_stats_timer_set_start_info(&dwork->timer);
388
389 /* This stores cwq for the moment, for the timer_fn */
390 set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
391 timer->expires = jiffies + delay;
392 timer->data = (unsigned long)dwork;
393 timer->function = delayed_work_timer_fn;
394
395 if (unlikely(cpu >= 0))
396 add_timer_on(timer, cpu);
397 else
398 add_timer(timer);
399 ret = 1;
400 }
401 return ret;
402 }
403 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
404
405 static void run_workqueue(struct cpu_workqueue_struct *cwq)
406 {
407 spin_lock_irq(&cwq->lock);
408 while (!list_empty(&cwq->worklist)) {
409 struct work_struct *work = list_entry(cwq->worklist.next,
410 struct work_struct, entry);
411 work_func_t f = work->func;
412 #ifdef CONFIG_LOCKDEP
413 /*
414 * It is permissible to free the struct work_struct
415 * from inside the function that is called from it,
416 * this we need to take into account for lockdep too.
417 * To avoid bogus "held lock freed" warnings as well
418 * as problems when looking into work->lockdep_map,
419 * make a copy and use that here.
420 */
421 struct lockdep_map lockdep_map = work->lockdep_map;
422 #endif
423 trace_workqueue_execution(cwq->thread, work);
424 debug_work_deactivate(work);
425 cwq->current_work = work;
426 list_del_init(cwq->worklist.next);
427 spin_unlock_irq(&cwq->lock);
428
429 BUG_ON(get_wq_data(work) != cwq);
430 work_clear_pending(work);
431 lock_map_acquire(&cwq->wq->lockdep_map);
432 lock_map_acquire(&lockdep_map);
433 f(work);
434 lock_map_release(&lockdep_map);
435 lock_map_release(&cwq->wq->lockdep_map);
436
437 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
438 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
439 "%s/0x%08x/%d\n",
440 current->comm, preempt_count(),
441 task_pid_nr(current));
442 printk(KERN_ERR " last function: ");
443 print_symbol("%s\n", (unsigned long)f);
444 debug_show_held_locks(current);
445 dump_stack();
446 }
447
448 spin_lock_irq(&cwq->lock);
449 cwq->current_work = NULL;
450 }
451 spin_unlock_irq(&cwq->lock);
452 }
453
454 /**
455 * worker_thread - the worker thread function
456 * @__cwq: cwq to serve
457 *
458 * The cwq worker thread function.
459 */
460 static int worker_thread(void *__cwq)
461 {
462 struct cpu_workqueue_struct *cwq = __cwq;
463 DEFINE_WAIT(wait);
464
465 if (cwq->wq->flags & WQ_FREEZEABLE)
466 set_freezable();
467
468 for (;;) {
469 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
470 if (!freezing(current) &&
471 !kthread_should_stop() &&
472 list_empty(&cwq->worklist))
473 schedule();
474 finish_wait(&cwq->more_work, &wait);
475
476 try_to_freeze();
477
478 if (kthread_should_stop())
479 break;
480
481 run_workqueue(cwq);
482 }
483
484 return 0;
485 }
486
487 struct wq_barrier {
488 struct work_struct work;
489 struct completion done;
490 };
491
492 static void wq_barrier_func(struct work_struct *work)
493 {
494 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
495 complete(&barr->done);
496 }
497
498 /**
499 * insert_wq_barrier - insert a barrier work
500 * @cwq: cwq to insert barrier into
501 * @barr: wq_barrier to insert
502 * @head: insertion point
503 *
504 * Insert barrier @barr into @cwq before @head.
505 *
506 * CONTEXT:
507 * spin_lock_irq(cwq->lock).
508 */
509 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
510 struct wq_barrier *barr, struct list_head *head)
511 {
512 /*
513 * debugobject calls are safe here even with cwq->lock locked
514 * as we know for sure that this will not trigger any of the
515 * checks and call back into the fixup functions where we
516 * might deadlock.
517 */
518 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
519 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
520 init_completion(&barr->done);
521
522 debug_work_activate(&barr->work);
523 insert_work(cwq, &barr->work, head, 0);
524 }
525
526 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
527 {
528 int active = 0;
529 struct wq_barrier barr;
530
531 WARN_ON(cwq->thread == current);
532
533 spin_lock_irq(&cwq->lock);
534 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
535 insert_wq_barrier(cwq, &barr, &cwq->worklist);
536 active = 1;
537 }
538 spin_unlock_irq(&cwq->lock);
539
540 if (active) {
541 wait_for_completion(&barr.done);
542 destroy_work_on_stack(&barr.work);
543 }
544
545 return active;
546 }
547
548 /**
549 * flush_workqueue - ensure that any scheduled work has run to completion.
550 * @wq: workqueue to flush
551 *
552 * Forces execution of the workqueue and blocks until its completion.
553 * This is typically used in driver shutdown handlers.
554 *
555 * We sleep until all works which were queued on entry have been handled,
556 * but we are not livelocked by new incoming ones.
557 */
558 void flush_workqueue(struct workqueue_struct *wq)
559 {
560 const struct cpumask *cpu_map = wq_cpu_map(wq);
561 int cpu;
562
563 might_sleep();
564 lock_map_acquire(&wq->lockdep_map);
565 lock_map_release(&wq->lockdep_map);
566 for_each_cpu(cpu, cpu_map)
567 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
568 }
569 EXPORT_SYMBOL_GPL(flush_workqueue);
570
571 /**
572 * flush_work - block until a work_struct's callback has terminated
573 * @work: the work which is to be flushed
574 *
575 * Returns false if @work has already terminated.
576 *
577 * It is expected that, prior to calling flush_work(), the caller has
578 * arranged for the work to not be requeued, otherwise it doesn't make
579 * sense to use this function.
580 */
581 int flush_work(struct work_struct *work)
582 {
583 struct cpu_workqueue_struct *cwq;
584 struct list_head *prev;
585 struct wq_barrier barr;
586
587 might_sleep();
588 cwq = get_wq_data(work);
589 if (!cwq)
590 return 0;
591
592 lock_map_acquire(&cwq->wq->lockdep_map);
593 lock_map_release(&cwq->wq->lockdep_map);
594
595 spin_lock_irq(&cwq->lock);
596 if (!list_empty(&work->entry)) {
597 /*
598 * See the comment near try_to_grab_pending()->smp_rmb().
599 * If it was re-queued under us we are not going to wait.
600 */
601 smp_rmb();
602 if (unlikely(cwq != get_wq_data(work)))
603 goto already_gone;
604 prev = &work->entry;
605 } else {
606 if (cwq->current_work != work)
607 goto already_gone;
608 prev = &cwq->worklist;
609 }
610 insert_wq_barrier(cwq, &barr, prev->next);
611
612 spin_unlock_irq(&cwq->lock);
613 wait_for_completion(&barr.done);
614 destroy_work_on_stack(&barr.work);
615 return 1;
616 already_gone:
617 spin_unlock_irq(&cwq->lock);
618 return 0;
619 }
620 EXPORT_SYMBOL_GPL(flush_work);
621
622 /*
623 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
624 * so this work can't be re-armed in any way.
625 */
626 static int try_to_grab_pending(struct work_struct *work)
627 {
628 struct cpu_workqueue_struct *cwq;
629 int ret = -1;
630
631 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
632 return 0;
633
634 /*
635 * The queueing is in progress, or it is already queued. Try to
636 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
637 */
638
639 cwq = get_wq_data(work);
640 if (!cwq)
641 return ret;
642
643 spin_lock_irq(&cwq->lock);
644 if (!list_empty(&work->entry)) {
645 /*
646 * This work is queued, but perhaps we locked the wrong cwq.
647 * In that case we must see the new value after rmb(), see
648 * insert_work()->wmb().
649 */
650 smp_rmb();
651 if (cwq == get_wq_data(work)) {
652 debug_work_deactivate(work);
653 list_del_init(&work->entry);
654 ret = 1;
655 }
656 }
657 spin_unlock_irq(&cwq->lock);
658
659 return ret;
660 }
661
662 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
663 struct work_struct *work)
664 {
665 struct wq_barrier barr;
666 int running = 0;
667
668 spin_lock_irq(&cwq->lock);
669 if (unlikely(cwq->current_work == work)) {
670 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
671 running = 1;
672 }
673 spin_unlock_irq(&cwq->lock);
674
675 if (unlikely(running)) {
676 wait_for_completion(&barr.done);
677 destroy_work_on_stack(&barr.work);
678 }
679 }
680
681 static void wait_on_work(struct work_struct *work)
682 {
683 struct cpu_workqueue_struct *cwq;
684 struct workqueue_struct *wq;
685 const struct cpumask *cpu_map;
686 int cpu;
687
688 might_sleep();
689
690 lock_map_acquire(&work->lockdep_map);
691 lock_map_release(&work->lockdep_map);
692
693 cwq = get_wq_data(work);
694 if (!cwq)
695 return;
696
697 wq = cwq->wq;
698 cpu_map = wq_cpu_map(wq);
699
700 for_each_cpu(cpu, cpu_map)
701 wait_on_cpu_work(get_cwq(cpu, wq), work);
702 }
703
704 static int __cancel_work_timer(struct work_struct *work,
705 struct timer_list* timer)
706 {
707 int ret;
708
709 do {
710 ret = (timer && likely(del_timer(timer)));
711 if (!ret)
712 ret = try_to_grab_pending(work);
713 wait_on_work(work);
714 } while (unlikely(ret < 0));
715
716 clear_wq_data(work);
717 return ret;
718 }
719
720 /**
721 * cancel_work_sync - block until a work_struct's callback has terminated
722 * @work: the work which is to be flushed
723 *
724 * Returns true if @work was pending.
725 *
726 * cancel_work_sync() will cancel the work if it is queued. If the work's
727 * callback appears to be running, cancel_work_sync() will block until it
728 * has completed.
729 *
730 * It is possible to use this function if the work re-queues itself. It can
731 * cancel the work even if it migrates to another workqueue, however in that
732 * case it only guarantees that work->func() has completed on the last queued
733 * workqueue.
734 *
735 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
736 * pending, otherwise it goes into a busy-wait loop until the timer expires.
737 *
738 * The caller must ensure that workqueue_struct on which this work was last
739 * queued can't be destroyed before this function returns.
740 */
741 int cancel_work_sync(struct work_struct *work)
742 {
743 return __cancel_work_timer(work, NULL);
744 }
745 EXPORT_SYMBOL_GPL(cancel_work_sync);
746
747 /**
748 * cancel_delayed_work_sync - reliably kill off a delayed work.
749 * @dwork: the delayed work struct
750 *
751 * Returns true if @dwork was pending.
752 *
753 * It is possible to use this function if @dwork rearms itself via queue_work()
754 * or queue_delayed_work(). See also the comment for cancel_work_sync().
755 */
756 int cancel_delayed_work_sync(struct delayed_work *dwork)
757 {
758 return __cancel_work_timer(&dwork->work, &dwork->timer);
759 }
760 EXPORT_SYMBOL(cancel_delayed_work_sync);
761
762 static struct workqueue_struct *keventd_wq __read_mostly;
763
764 /**
765 * schedule_work - put work task in global workqueue
766 * @work: job to be done
767 *
768 * Returns zero if @work was already on the kernel-global workqueue and
769 * non-zero otherwise.
770 *
771 * This puts a job in the kernel-global workqueue if it was not already
772 * queued and leaves it in the same position on the kernel-global
773 * workqueue otherwise.
774 */
775 int schedule_work(struct work_struct *work)
776 {
777 return queue_work(keventd_wq, work);
778 }
779 EXPORT_SYMBOL(schedule_work);
780
781 /*
782 * schedule_work_on - put work task on a specific cpu
783 * @cpu: cpu to put the work task on
784 * @work: job to be done
785 *
786 * This puts a job on a specific cpu
787 */
788 int schedule_work_on(int cpu, struct work_struct *work)
789 {
790 return queue_work_on(cpu, keventd_wq, work);
791 }
792 EXPORT_SYMBOL(schedule_work_on);
793
794 /**
795 * schedule_delayed_work - put work task in global workqueue after delay
796 * @dwork: job to be done
797 * @delay: number of jiffies to wait or 0 for immediate execution
798 *
799 * After waiting for a given time this puts a job in the kernel-global
800 * workqueue.
801 */
802 int schedule_delayed_work(struct delayed_work *dwork,
803 unsigned long delay)
804 {
805 return queue_delayed_work(keventd_wq, dwork, delay);
806 }
807 EXPORT_SYMBOL(schedule_delayed_work);
808
809 /**
810 * flush_delayed_work - block until a dwork_struct's callback has terminated
811 * @dwork: the delayed work which is to be flushed
812 *
813 * Any timeout is cancelled, and any pending work is run immediately.
814 */
815 void flush_delayed_work(struct delayed_work *dwork)
816 {
817 if (del_timer_sync(&dwork->timer)) {
818 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
819 &dwork->work);
820 put_cpu();
821 }
822 flush_work(&dwork->work);
823 }
824 EXPORT_SYMBOL(flush_delayed_work);
825
826 /**
827 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
828 * @cpu: cpu to use
829 * @dwork: job to be done
830 * @delay: number of jiffies to wait
831 *
832 * After waiting for a given time this puts a job in the kernel-global
833 * workqueue on the specified CPU.
834 */
835 int schedule_delayed_work_on(int cpu,
836 struct delayed_work *dwork, unsigned long delay)
837 {
838 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
839 }
840 EXPORT_SYMBOL(schedule_delayed_work_on);
841
842 /**
843 * schedule_on_each_cpu - call a function on each online CPU from keventd
844 * @func: the function to call
845 *
846 * Returns zero on success.
847 * Returns -ve errno on failure.
848 *
849 * schedule_on_each_cpu() is very slow.
850 */
851 int schedule_on_each_cpu(work_func_t func)
852 {
853 int cpu;
854 int orig = -1;
855 struct work_struct *works;
856
857 works = alloc_percpu(struct work_struct);
858 if (!works)
859 return -ENOMEM;
860
861 get_online_cpus();
862
863 /*
864 * When running in keventd don't schedule a work item on
865 * itself. Can just call directly because the work queue is
866 * already bound. This also is faster.
867 */
868 if (current_is_keventd())
869 orig = raw_smp_processor_id();
870
871 for_each_online_cpu(cpu) {
872 struct work_struct *work = per_cpu_ptr(works, cpu);
873
874 INIT_WORK(work, func);
875 if (cpu != orig)
876 schedule_work_on(cpu, work);
877 }
878 if (orig >= 0)
879 func(per_cpu_ptr(works, orig));
880
881 for_each_online_cpu(cpu)
882 flush_work(per_cpu_ptr(works, cpu));
883
884 put_online_cpus();
885 free_percpu(works);
886 return 0;
887 }
888
889 /**
890 * flush_scheduled_work - ensure that any scheduled work has run to completion.
891 *
892 * Forces execution of the kernel-global workqueue and blocks until its
893 * completion.
894 *
895 * Think twice before calling this function! It's very easy to get into
896 * trouble if you don't take great care. Either of the following situations
897 * will lead to deadlock:
898 *
899 * One of the work items currently on the workqueue needs to acquire
900 * a lock held by your code or its caller.
901 *
902 * Your code is running in the context of a work routine.
903 *
904 * They will be detected by lockdep when they occur, but the first might not
905 * occur very often. It depends on what work items are on the workqueue and
906 * what locks they need, which you have no control over.
907 *
908 * In most situations flushing the entire workqueue is overkill; you merely
909 * need to know that a particular work item isn't queued and isn't running.
910 * In such cases you should use cancel_delayed_work_sync() or
911 * cancel_work_sync() instead.
912 */
913 void flush_scheduled_work(void)
914 {
915 flush_workqueue(keventd_wq);
916 }
917 EXPORT_SYMBOL(flush_scheduled_work);
918
919 /**
920 * execute_in_process_context - reliably execute the routine with user context
921 * @fn: the function to execute
922 * @ew: guaranteed storage for the execute work structure (must
923 * be available when the work executes)
924 *
925 * Executes the function immediately if process context is available,
926 * otherwise schedules the function for delayed execution.
927 *
928 * Returns: 0 - function was executed
929 * 1 - function was scheduled for execution
930 */
931 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
932 {
933 if (!in_interrupt()) {
934 fn(&ew->work);
935 return 0;
936 }
937
938 INIT_WORK(&ew->work, fn);
939 schedule_work(&ew->work);
940
941 return 1;
942 }
943 EXPORT_SYMBOL_GPL(execute_in_process_context);
944
945 int keventd_up(void)
946 {
947 return keventd_wq != NULL;
948 }
949
950 int current_is_keventd(void)
951 {
952 struct cpu_workqueue_struct *cwq;
953 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
954 int ret = 0;
955
956 BUG_ON(!keventd_wq);
957
958 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
959 if (current == cwq->thread)
960 ret = 1;
961
962 return ret;
963
964 }
965
966 static struct cpu_workqueue_struct *
967 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
968 {
969 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
970
971 cwq->wq = wq;
972 spin_lock_init(&cwq->lock);
973 INIT_LIST_HEAD(&cwq->worklist);
974 init_waitqueue_head(&cwq->more_work);
975
976 return cwq;
977 }
978
979 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
980 {
981 struct workqueue_struct *wq = cwq->wq;
982 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
983 struct task_struct *p;
984
985 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
986 /*
987 * Nobody can add the work_struct to this cwq,
988 * if (caller is __create_workqueue)
989 * nobody should see this wq
990 * else // caller is CPU_UP_PREPARE
991 * cpu is not on cpu_online_map
992 * so we can abort safely.
993 */
994 if (IS_ERR(p))
995 return PTR_ERR(p);
996 cwq->thread = p;
997
998 trace_workqueue_creation(cwq->thread, cpu);
999
1000 return 0;
1001 }
1002
1003 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1004 {
1005 struct task_struct *p = cwq->thread;
1006
1007 if (p != NULL) {
1008 if (cpu >= 0)
1009 kthread_bind(p, cpu);
1010 wake_up_process(p);
1011 }
1012 }
1013
1014 struct workqueue_struct *__create_workqueue_key(const char *name,
1015 unsigned int flags,
1016 struct lock_class_key *key,
1017 const char *lock_name)
1018 {
1019 struct workqueue_struct *wq;
1020 struct cpu_workqueue_struct *cwq;
1021 int err = 0, cpu;
1022
1023 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1024 if (!wq)
1025 goto err;
1026
1027 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
1028 if (!wq->cpu_wq)
1029 goto err;
1030
1031 wq->flags = flags;
1032 wq->name = name;
1033 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1034 INIT_LIST_HEAD(&wq->list);
1035
1036 if (flags & WQ_SINGLE_THREAD) {
1037 cwq = init_cpu_workqueue(wq, singlethread_cpu);
1038 err = create_workqueue_thread(cwq, singlethread_cpu);
1039 start_workqueue_thread(cwq, -1);
1040 } else {
1041 cpu_maps_update_begin();
1042 /*
1043 * We must place this wq on list even if the code below fails.
1044 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1045 * destroy_workqueue() takes the lock, in that case we leak
1046 * cwq[cpu]->thread.
1047 */
1048 spin_lock(&workqueue_lock);
1049 list_add(&wq->list, &workqueues);
1050 spin_unlock(&workqueue_lock);
1051 /*
1052 * We must initialize cwqs for each possible cpu even if we
1053 * are going to call destroy_workqueue() finally. Otherwise
1054 * cpu_up() can hit the uninitialized cwq once we drop the
1055 * lock.
1056 */
1057 for_each_possible_cpu(cpu) {
1058 cwq = init_cpu_workqueue(wq, cpu);
1059 if (err || !cpu_online(cpu))
1060 continue;
1061 err = create_workqueue_thread(cwq, cpu);
1062 start_workqueue_thread(cwq, cpu);
1063 }
1064 cpu_maps_update_done();
1065 }
1066
1067 if (err) {
1068 destroy_workqueue(wq);
1069 wq = NULL;
1070 }
1071 return wq;
1072 err:
1073 if (wq) {
1074 free_percpu(wq->cpu_wq);
1075 kfree(wq);
1076 }
1077 return NULL;
1078 }
1079 EXPORT_SYMBOL_GPL(__create_workqueue_key);
1080
1081 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1082 {
1083 /*
1084 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1085 * cpu_add_remove_lock protects cwq->thread.
1086 */
1087 if (cwq->thread == NULL)
1088 return;
1089
1090 lock_map_acquire(&cwq->wq->lockdep_map);
1091 lock_map_release(&cwq->wq->lockdep_map);
1092
1093 flush_cpu_workqueue(cwq);
1094 /*
1095 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
1096 * a concurrent flush_workqueue() can insert a barrier after us.
1097 * However, in that case run_workqueue() won't return and check
1098 * kthread_should_stop() until it flushes all work_struct's.
1099 * When ->worklist becomes empty it is safe to exit because no
1100 * more work_structs can be queued on this cwq: flush_workqueue
1101 * checks list_empty(), and a "normal" queue_work() can't use
1102 * a dead CPU.
1103 */
1104 trace_workqueue_destruction(cwq->thread);
1105 kthread_stop(cwq->thread);
1106 cwq->thread = NULL;
1107 }
1108
1109 /**
1110 * destroy_workqueue - safely terminate a workqueue
1111 * @wq: target workqueue
1112 *
1113 * Safely destroy a workqueue. All work currently pending will be done first.
1114 */
1115 void destroy_workqueue(struct workqueue_struct *wq)
1116 {
1117 const struct cpumask *cpu_map = wq_cpu_map(wq);
1118 int cpu;
1119
1120 cpu_maps_update_begin();
1121 spin_lock(&workqueue_lock);
1122 list_del(&wq->list);
1123 spin_unlock(&workqueue_lock);
1124
1125 for_each_cpu(cpu, cpu_map)
1126 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
1127 cpu_maps_update_done();
1128
1129 free_percpu(wq->cpu_wq);
1130 kfree(wq);
1131 }
1132 EXPORT_SYMBOL_GPL(destroy_workqueue);
1133
1134 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1135 unsigned long action,
1136 void *hcpu)
1137 {
1138 unsigned int cpu = (unsigned long)hcpu;
1139 struct cpu_workqueue_struct *cwq;
1140 struct workqueue_struct *wq;
1141 int err = 0;
1142
1143 action &= ~CPU_TASKS_FROZEN;
1144
1145 switch (action) {
1146 case CPU_UP_PREPARE:
1147 cpumask_set_cpu(cpu, cpu_populated_map);
1148 }
1149 undo:
1150 list_for_each_entry(wq, &workqueues, list) {
1151 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1152
1153 switch (action) {
1154 case CPU_UP_PREPARE:
1155 err = create_workqueue_thread(cwq, cpu);
1156 if (!err)
1157 break;
1158 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1159 wq->name, cpu);
1160 action = CPU_UP_CANCELED;
1161 err = -ENOMEM;
1162 goto undo;
1163
1164 case CPU_ONLINE:
1165 start_workqueue_thread(cwq, cpu);
1166 break;
1167
1168 case CPU_UP_CANCELED:
1169 start_workqueue_thread(cwq, -1);
1170 case CPU_POST_DEAD:
1171 cleanup_workqueue_thread(cwq);
1172 break;
1173 }
1174 }
1175
1176 switch (action) {
1177 case CPU_UP_CANCELED:
1178 case CPU_POST_DEAD:
1179 cpumask_clear_cpu(cpu, cpu_populated_map);
1180 }
1181
1182 return notifier_from_errno(err);
1183 }
1184
1185 #ifdef CONFIG_SMP
1186
1187 struct work_for_cpu {
1188 struct completion completion;
1189 long (*fn)(void *);
1190 void *arg;
1191 long ret;
1192 };
1193
1194 static int do_work_for_cpu(void *_wfc)
1195 {
1196 struct work_for_cpu *wfc = _wfc;
1197 wfc->ret = wfc->fn(wfc->arg);
1198 complete(&wfc->completion);
1199 return 0;
1200 }
1201
1202 /**
1203 * work_on_cpu - run a function in user context on a particular cpu
1204 * @cpu: the cpu to run on
1205 * @fn: the function to run
1206 * @arg: the function arg
1207 *
1208 * This will return the value @fn returns.
1209 * It is up to the caller to ensure that the cpu doesn't go offline.
1210 * The caller must not hold any locks which would prevent @fn from completing.
1211 */
1212 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1213 {
1214 struct task_struct *sub_thread;
1215 struct work_for_cpu wfc = {
1216 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1217 .fn = fn,
1218 .arg = arg,
1219 };
1220
1221 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1222 if (IS_ERR(sub_thread))
1223 return PTR_ERR(sub_thread);
1224 kthread_bind(sub_thread, cpu);
1225 wake_up_process(sub_thread);
1226 wait_for_completion(&wfc.completion);
1227 return wfc.ret;
1228 }
1229 EXPORT_SYMBOL_GPL(work_on_cpu);
1230 #endif /* CONFIG_SMP */
1231
1232 void __init init_workqueues(void)
1233 {
1234 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1235
1236 cpumask_copy(cpu_populated_map, cpu_online_mask);
1237 singlethread_cpu = cpumask_first(cpu_possible_mask);
1238 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1239 hotcpu_notifier(workqueue_cpu_callback, 0);
1240 keventd_wq = create_workqueue("events");
1241 BUG_ON(!keventd_wq);
1242 }
This page took 0.056762 seconds and 6 git commands to generate.