workqueue: kill cpu_populated_map
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36
37 /*
38 * Structure fields follow one of the following exclusion rules.
39 *
40 * I: Set during initialization and read-only afterwards.
41 *
42 * L: cwq->lock protected. Access with cwq->lock held.
43 *
44 * W: workqueue_lock protected.
45 */
46
47 /*
48 * The per-CPU workqueue (if single thread, we always use the first
49 * possible cpu).
50 */
51 struct cpu_workqueue_struct {
52
53 spinlock_t lock;
54
55 struct list_head worklist;
56 wait_queue_head_t more_work;
57 struct work_struct *current_work;
58 unsigned int cpu;
59
60 struct workqueue_struct *wq; /* I: the owning workqueue */
61 struct task_struct *thread;
62 } ____cacheline_aligned;
63
64 /*
65 * The externally visible workqueue abstraction is an array of
66 * per-CPU workqueues:
67 */
68 struct workqueue_struct {
69 unsigned int flags; /* I: WQ_* flags */
70 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
71 struct list_head list; /* W: list of all workqueues */
72 const char *name; /* I: workqueue name */
73 #ifdef CONFIG_LOCKDEP
74 struct lockdep_map lockdep_map;
75 #endif
76 };
77
78 #ifdef CONFIG_DEBUG_OBJECTS_WORK
79
80 static struct debug_obj_descr work_debug_descr;
81
82 /*
83 * fixup_init is called when:
84 * - an active object is initialized
85 */
86 static int work_fixup_init(void *addr, enum debug_obj_state state)
87 {
88 struct work_struct *work = addr;
89
90 switch (state) {
91 case ODEBUG_STATE_ACTIVE:
92 cancel_work_sync(work);
93 debug_object_init(work, &work_debug_descr);
94 return 1;
95 default:
96 return 0;
97 }
98 }
99
100 /*
101 * fixup_activate is called when:
102 * - an active object is activated
103 * - an unknown object is activated (might be a statically initialized object)
104 */
105 static int work_fixup_activate(void *addr, enum debug_obj_state state)
106 {
107 struct work_struct *work = addr;
108
109 switch (state) {
110
111 case ODEBUG_STATE_NOTAVAILABLE:
112 /*
113 * This is not really a fixup. The work struct was
114 * statically initialized. We just make sure that it
115 * is tracked in the object tracker.
116 */
117 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
118 debug_object_init(work, &work_debug_descr);
119 debug_object_activate(work, &work_debug_descr);
120 return 0;
121 }
122 WARN_ON_ONCE(1);
123 return 0;
124
125 case ODEBUG_STATE_ACTIVE:
126 WARN_ON(1);
127
128 default:
129 return 0;
130 }
131 }
132
133 /*
134 * fixup_free is called when:
135 * - an active object is freed
136 */
137 static int work_fixup_free(void *addr, enum debug_obj_state state)
138 {
139 struct work_struct *work = addr;
140
141 switch (state) {
142 case ODEBUG_STATE_ACTIVE:
143 cancel_work_sync(work);
144 debug_object_free(work, &work_debug_descr);
145 return 1;
146 default:
147 return 0;
148 }
149 }
150
151 static struct debug_obj_descr work_debug_descr = {
152 .name = "work_struct",
153 .fixup_init = work_fixup_init,
154 .fixup_activate = work_fixup_activate,
155 .fixup_free = work_fixup_free,
156 };
157
158 static inline void debug_work_activate(struct work_struct *work)
159 {
160 debug_object_activate(work, &work_debug_descr);
161 }
162
163 static inline void debug_work_deactivate(struct work_struct *work)
164 {
165 debug_object_deactivate(work, &work_debug_descr);
166 }
167
168 void __init_work(struct work_struct *work, int onstack)
169 {
170 if (onstack)
171 debug_object_init_on_stack(work, &work_debug_descr);
172 else
173 debug_object_init(work, &work_debug_descr);
174 }
175 EXPORT_SYMBOL_GPL(__init_work);
176
177 void destroy_work_on_stack(struct work_struct *work)
178 {
179 debug_object_free(work, &work_debug_descr);
180 }
181 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
182
183 #else
184 static inline void debug_work_activate(struct work_struct *work) { }
185 static inline void debug_work_deactivate(struct work_struct *work) { }
186 #endif
187
188 /* Serializes the accesses to the list of workqueues. */
189 static DEFINE_SPINLOCK(workqueue_lock);
190 static LIST_HEAD(workqueues);
191
192 static int singlethread_cpu __read_mostly;
193
194 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
195 struct workqueue_struct *wq)
196 {
197 return per_cpu_ptr(wq->cpu_wq, cpu);
198 }
199
200 static struct cpu_workqueue_struct *target_cwq(unsigned int cpu,
201 struct workqueue_struct *wq)
202 {
203 if (unlikely(wq->flags & WQ_SINGLE_THREAD))
204 cpu = singlethread_cpu;
205 return get_cwq(cpu, wq);
206 }
207
208 /*
209 * Set the workqueue on which a work item is to be run
210 * - Must *only* be called if the pending flag is set
211 */
212 static inline void set_wq_data(struct work_struct *work,
213 struct cpu_workqueue_struct *cwq,
214 unsigned long extra_flags)
215 {
216 BUG_ON(!work_pending(work));
217
218 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
219 WORK_STRUCT_PENDING | extra_flags);
220 }
221
222 /*
223 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
224 */
225 static inline void clear_wq_data(struct work_struct *work)
226 {
227 atomic_long_set(&work->data, work_static(work));
228 }
229
230 static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
231 {
232 return (void *)(atomic_long_read(&work->data) &
233 WORK_STRUCT_WQ_DATA_MASK);
234 }
235
236 /**
237 * insert_work - insert a work into cwq
238 * @cwq: cwq @work belongs to
239 * @work: work to insert
240 * @head: insertion point
241 * @extra_flags: extra WORK_STRUCT_* flags to set
242 *
243 * Insert @work into @cwq after @head.
244 *
245 * CONTEXT:
246 * spin_lock_irq(cwq->lock).
247 */
248 static void insert_work(struct cpu_workqueue_struct *cwq,
249 struct work_struct *work, struct list_head *head,
250 unsigned int extra_flags)
251 {
252 /* we own @work, set data and link */
253 set_wq_data(work, cwq, extra_flags);
254
255 /*
256 * Ensure that we get the right work->data if we see the
257 * result of list_add() below, see try_to_grab_pending().
258 */
259 smp_wmb();
260
261 list_add_tail(&work->entry, head);
262 wake_up(&cwq->more_work);
263 }
264
265 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
266 struct work_struct *work)
267 {
268 struct cpu_workqueue_struct *cwq = target_cwq(cpu, wq);
269 unsigned long flags;
270
271 debug_work_activate(work);
272 spin_lock_irqsave(&cwq->lock, flags);
273 BUG_ON(!list_empty(&work->entry));
274 insert_work(cwq, work, &cwq->worklist, 0);
275 spin_unlock_irqrestore(&cwq->lock, flags);
276 }
277
278 /**
279 * queue_work - queue work on a workqueue
280 * @wq: workqueue to use
281 * @work: work to queue
282 *
283 * Returns 0 if @work was already on a queue, non-zero otherwise.
284 *
285 * We queue the work to the CPU on which it was submitted, but if the CPU dies
286 * it can be processed by another CPU.
287 */
288 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
289 {
290 int ret;
291
292 ret = queue_work_on(get_cpu(), wq, work);
293 put_cpu();
294
295 return ret;
296 }
297 EXPORT_SYMBOL_GPL(queue_work);
298
299 /**
300 * queue_work_on - queue work on specific cpu
301 * @cpu: CPU number to execute work on
302 * @wq: workqueue to use
303 * @work: work to queue
304 *
305 * Returns 0 if @work was already on a queue, non-zero otherwise.
306 *
307 * We queue the work to a specific CPU, the caller must ensure it
308 * can't go away.
309 */
310 int
311 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
312 {
313 int ret = 0;
314
315 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
316 __queue_work(cpu, wq, work);
317 ret = 1;
318 }
319 return ret;
320 }
321 EXPORT_SYMBOL_GPL(queue_work_on);
322
323 static void delayed_work_timer_fn(unsigned long __data)
324 {
325 struct delayed_work *dwork = (struct delayed_work *)__data;
326 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
327
328 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
329 }
330
331 /**
332 * queue_delayed_work - queue work on a workqueue after delay
333 * @wq: workqueue to use
334 * @dwork: delayable work to queue
335 * @delay: number of jiffies to wait before queueing
336 *
337 * Returns 0 if @work was already on a queue, non-zero otherwise.
338 */
339 int queue_delayed_work(struct workqueue_struct *wq,
340 struct delayed_work *dwork, unsigned long delay)
341 {
342 if (delay == 0)
343 return queue_work(wq, &dwork->work);
344
345 return queue_delayed_work_on(-1, wq, dwork, delay);
346 }
347 EXPORT_SYMBOL_GPL(queue_delayed_work);
348
349 /**
350 * queue_delayed_work_on - queue work on specific CPU after delay
351 * @cpu: CPU number to execute work on
352 * @wq: workqueue to use
353 * @dwork: work to queue
354 * @delay: number of jiffies to wait before queueing
355 *
356 * Returns 0 if @work was already on a queue, non-zero otherwise.
357 */
358 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
359 struct delayed_work *dwork, unsigned long delay)
360 {
361 int ret = 0;
362 struct timer_list *timer = &dwork->timer;
363 struct work_struct *work = &dwork->work;
364
365 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
366 BUG_ON(timer_pending(timer));
367 BUG_ON(!list_empty(&work->entry));
368
369 timer_stats_timer_set_start_info(&dwork->timer);
370
371 /* This stores cwq for the moment, for the timer_fn */
372 set_wq_data(work, target_cwq(raw_smp_processor_id(), wq), 0);
373 timer->expires = jiffies + delay;
374 timer->data = (unsigned long)dwork;
375 timer->function = delayed_work_timer_fn;
376
377 if (unlikely(cpu >= 0))
378 add_timer_on(timer, cpu);
379 else
380 add_timer(timer);
381 ret = 1;
382 }
383 return ret;
384 }
385 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
386
387 /**
388 * process_one_work - process single work
389 * @cwq: cwq to process work for
390 * @work: work to process
391 *
392 * Process @work. This function contains all the logics necessary to
393 * process a single work including synchronization against and
394 * interaction with other workers on the same cpu, queueing and
395 * flushing. As long as context requirement is met, any worker can
396 * call this function to process a work.
397 *
398 * CONTEXT:
399 * spin_lock_irq(cwq->lock) which is released and regrabbed.
400 */
401 static void process_one_work(struct cpu_workqueue_struct *cwq,
402 struct work_struct *work)
403 {
404 work_func_t f = work->func;
405 #ifdef CONFIG_LOCKDEP
406 /*
407 * It is permissible to free the struct work_struct from
408 * inside the function that is called from it, this we need to
409 * take into account for lockdep too. To avoid bogus "held
410 * lock freed" warnings as well as problems when looking into
411 * work->lockdep_map, make a copy and use that here.
412 */
413 struct lockdep_map lockdep_map = work->lockdep_map;
414 #endif
415 /* claim and process */
416 debug_work_deactivate(work);
417 cwq->current_work = work;
418 list_del_init(&work->entry);
419
420 spin_unlock_irq(&cwq->lock);
421
422 BUG_ON(get_wq_data(work) != cwq);
423 work_clear_pending(work);
424 lock_map_acquire(&cwq->wq->lockdep_map);
425 lock_map_acquire(&lockdep_map);
426 f(work);
427 lock_map_release(&lockdep_map);
428 lock_map_release(&cwq->wq->lockdep_map);
429
430 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
431 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
432 "%s/0x%08x/%d\n",
433 current->comm, preempt_count(), task_pid_nr(current));
434 printk(KERN_ERR " last function: ");
435 print_symbol("%s\n", (unsigned long)f);
436 debug_show_held_locks(current);
437 dump_stack();
438 }
439
440 spin_lock_irq(&cwq->lock);
441
442 /* we're done with it, release */
443 cwq->current_work = NULL;
444 }
445
446 static void run_workqueue(struct cpu_workqueue_struct *cwq)
447 {
448 spin_lock_irq(&cwq->lock);
449 while (!list_empty(&cwq->worklist)) {
450 struct work_struct *work = list_entry(cwq->worklist.next,
451 struct work_struct, entry);
452 process_one_work(cwq, work);
453 }
454 spin_unlock_irq(&cwq->lock);
455 }
456
457 /**
458 * worker_thread - the worker thread function
459 * @__cwq: cwq to serve
460 *
461 * The cwq worker thread function.
462 */
463 static int worker_thread(void *__cwq)
464 {
465 struct cpu_workqueue_struct *cwq = __cwq;
466 DEFINE_WAIT(wait);
467
468 if (cwq->wq->flags & WQ_FREEZEABLE)
469 set_freezable();
470
471 for (;;) {
472 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
473 if (!freezing(current) &&
474 !kthread_should_stop() &&
475 list_empty(&cwq->worklist))
476 schedule();
477 finish_wait(&cwq->more_work, &wait);
478
479 try_to_freeze();
480
481 if (kthread_should_stop())
482 break;
483
484 if (unlikely(!cpumask_equal(&cwq->thread->cpus_allowed,
485 get_cpu_mask(cwq->cpu))))
486 set_cpus_allowed_ptr(cwq->thread,
487 get_cpu_mask(cwq->cpu));
488 run_workqueue(cwq);
489 }
490
491 return 0;
492 }
493
494 struct wq_barrier {
495 struct work_struct work;
496 struct completion done;
497 };
498
499 static void wq_barrier_func(struct work_struct *work)
500 {
501 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
502 complete(&barr->done);
503 }
504
505 /**
506 * insert_wq_barrier - insert a barrier work
507 * @cwq: cwq to insert barrier into
508 * @barr: wq_barrier to insert
509 * @head: insertion point
510 *
511 * Insert barrier @barr into @cwq before @head.
512 *
513 * CONTEXT:
514 * spin_lock_irq(cwq->lock).
515 */
516 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
517 struct wq_barrier *barr, struct list_head *head)
518 {
519 /*
520 * debugobject calls are safe here even with cwq->lock locked
521 * as we know for sure that this will not trigger any of the
522 * checks and call back into the fixup functions where we
523 * might deadlock.
524 */
525 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
526 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
527 init_completion(&barr->done);
528
529 debug_work_activate(&barr->work);
530 insert_work(cwq, &barr->work, head, 0);
531 }
532
533 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
534 {
535 int active = 0;
536 struct wq_barrier barr;
537
538 WARN_ON(cwq->thread == current);
539
540 spin_lock_irq(&cwq->lock);
541 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
542 insert_wq_barrier(cwq, &barr, &cwq->worklist);
543 active = 1;
544 }
545 spin_unlock_irq(&cwq->lock);
546
547 if (active) {
548 wait_for_completion(&barr.done);
549 destroy_work_on_stack(&barr.work);
550 }
551
552 return active;
553 }
554
555 /**
556 * flush_workqueue - ensure that any scheduled work has run to completion.
557 * @wq: workqueue to flush
558 *
559 * Forces execution of the workqueue and blocks until its completion.
560 * This is typically used in driver shutdown handlers.
561 *
562 * We sleep until all works which were queued on entry have been handled,
563 * but we are not livelocked by new incoming ones.
564 */
565 void flush_workqueue(struct workqueue_struct *wq)
566 {
567 int cpu;
568
569 might_sleep();
570 lock_map_acquire(&wq->lockdep_map);
571 lock_map_release(&wq->lockdep_map);
572 for_each_possible_cpu(cpu)
573 flush_cpu_workqueue(get_cwq(cpu, wq));
574 }
575 EXPORT_SYMBOL_GPL(flush_workqueue);
576
577 /**
578 * flush_work - block until a work_struct's callback has terminated
579 * @work: the work which is to be flushed
580 *
581 * Returns false if @work has already terminated.
582 *
583 * It is expected that, prior to calling flush_work(), the caller has
584 * arranged for the work to not be requeued, otherwise it doesn't make
585 * sense to use this function.
586 */
587 int flush_work(struct work_struct *work)
588 {
589 struct cpu_workqueue_struct *cwq;
590 struct list_head *prev;
591 struct wq_barrier barr;
592
593 might_sleep();
594 cwq = get_wq_data(work);
595 if (!cwq)
596 return 0;
597
598 lock_map_acquire(&cwq->wq->lockdep_map);
599 lock_map_release(&cwq->wq->lockdep_map);
600
601 spin_lock_irq(&cwq->lock);
602 if (!list_empty(&work->entry)) {
603 /*
604 * See the comment near try_to_grab_pending()->smp_rmb().
605 * If it was re-queued under us we are not going to wait.
606 */
607 smp_rmb();
608 if (unlikely(cwq != get_wq_data(work)))
609 goto already_gone;
610 prev = &work->entry;
611 } else {
612 if (cwq->current_work != work)
613 goto already_gone;
614 prev = &cwq->worklist;
615 }
616 insert_wq_barrier(cwq, &barr, prev->next);
617
618 spin_unlock_irq(&cwq->lock);
619 wait_for_completion(&barr.done);
620 destroy_work_on_stack(&barr.work);
621 return 1;
622 already_gone:
623 spin_unlock_irq(&cwq->lock);
624 return 0;
625 }
626 EXPORT_SYMBOL_GPL(flush_work);
627
628 /*
629 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
630 * so this work can't be re-armed in any way.
631 */
632 static int try_to_grab_pending(struct work_struct *work)
633 {
634 struct cpu_workqueue_struct *cwq;
635 int ret = -1;
636
637 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
638 return 0;
639
640 /*
641 * The queueing is in progress, or it is already queued. Try to
642 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
643 */
644
645 cwq = get_wq_data(work);
646 if (!cwq)
647 return ret;
648
649 spin_lock_irq(&cwq->lock);
650 if (!list_empty(&work->entry)) {
651 /*
652 * This work is queued, but perhaps we locked the wrong cwq.
653 * In that case we must see the new value after rmb(), see
654 * insert_work()->wmb().
655 */
656 smp_rmb();
657 if (cwq == get_wq_data(work)) {
658 debug_work_deactivate(work);
659 list_del_init(&work->entry);
660 ret = 1;
661 }
662 }
663 spin_unlock_irq(&cwq->lock);
664
665 return ret;
666 }
667
668 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
669 struct work_struct *work)
670 {
671 struct wq_barrier barr;
672 int running = 0;
673
674 spin_lock_irq(&cwq->lock);
675 if (unlikely(cwq->current_work == work)) {
676 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
677 running = 1;
678 }
679 spin_unlock_irq(&cwq->lock);
680
681 if (unlikely(running)) {
682 wait_for_completion(&barr.done);
683 destroy_work_on_stack(&barr.work);
684 }
685 }
686
687 static void wait_on_work(struct work_struct *work)
688 {
689 struct cpu_workqueue_struct *cwq;
690 struct workqueue_struct *wq;
691 int cpu;
692
693 might_sleep();
694
695 lock_map_acquire(&work->lockdep_map);
696 lock_map_release(&work->lockdep_map);
697
698 cwq = get_wq_data(work);
699 if (!cwq)
700 return;
701
702 wq = cwq->wq;
703
704 for_each_possible_cpu(cpu)
705 wait_on_cpu_work(get_cwq(cpu, wq), work);
706 }
707
708 static int __cancel_work_timer(struct work_struct *work,
709 struct timer_list* timer)
710 {
711 int ret;
712
713 do {
714 ret = (timer && likely(del_timer(timer)));
715 if (!ret)
716 ret = try_to_grab_pending(work);
717 wait_on_work(work);
718 } while (unlikely(ret < 0));
719
720 clear_wq_data(work);
721 return ret;
722 }
723
724 /**
725 * cancel_work_sync - block until a work_struct's callback has terminated
726 * @work: the work which is to be flushed
727 *
728 * Returns true if @work was pending.
729 *
730 * cancel_work_sync() will cancel the work if it is queued. If the work's
731 * callback appears to be running, cancel_work_sync() will block until it
732 * has completed.
733 *
734 * It is possible to use this function if the work re-queues itself. It can
735 * cancel the work even if it migrates to another workqueue, however in that
736 * case it only guarantees that work->func() has completed on the last queued
737 * workqueue.
738 *
739 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
740 * pending, otherwise it goes into a busy-wait loop until the timer expires.
741 *
742 * The caller must ensure that workqueue_struct on which this work was last
743 * queued can't be destroyed before this function returns.
744 */
745 int cancel_work_sync(struct work_struct *work)
746 {
747 return __cancel_work_timer(work, NULL);
748 }
749 EXPORT_SYMBOL_GPL(cancel_work_sync);
750
751 /**
752 * cancel_delayed_work_sync - reliably kill off a delayed work.
753 * @dwork: the delayed work struct
754 *
755 * Returns true if @dwork was pending.
756 *
757 * It is possible to use this function if @dwork rearms itself via queue_work()
758 * or queue_delayed_work(). See also the comment for cancel_work_sync().
759 */
760 int cancel_delayed_work_sync(struct delayed_work *dwork)
761 {
762 return __cancel_work_timer(&dwork->work, &dwork->timer);
763 }
764 EXPORT_SYMBOL(cancel_delayed_work_sync);
765
766 static struct workqueue_struct *keventd_wq __read_mostly;
767
768 /**
769 * schedule_work - put work task in global workqueue
770 * @work: job to be done
771 *
772 * Returns zero if @work was already on the kernel-global workqueue and
773 * non-zero otherwise.
774 *
775 * This puts a job in the kernel-global workqueue if it was not already
776 * queued and leaves it in the same position on the kernel-global
777 * workqueue otherwise.
778 */
779 int schedule_work(struct work_struct *work)
780 {
781 return queue_work(keventd_wq, work);
782 }
783 EXPORT_SYMBOL(schedule_work);
784
785 /*
786 * schedule_work_on - put work task on a specific cpu
787 * @cpu: cpu to put the work task on
788 * @work: job to be done
789 *
790 * This puts a job on a specific cpu
791 */
792 int schedule_work_on(int cpu, struct work_struct *work)
793 {
794 return queue_work_on(cpu, keventd_wq, work);
795 }
796 EXPORT_SYMBOL(schedule_work_on);
797
798 /**
799 * schedule_delayed_work - put work task in global workqueue after delay
800 * @dwork: job to be done
801 * @delay: number of jiffies to wait or 0 for immediate execution
802 *
803 * After waiting for a given time this puts a job in the kernel-global
804 * workqueue.
805 */
806 int schedule_delayed_work(struct delayed_work *dwork,
807 unsigned long delay)
808 {
809 return queue_delayed_work(keventd_wq, dwork, delay);
810 }
811 EXPORT_SYMBOL(schedule_delayed_work);
812
813 /**
814 * flush_delayed_work - block until a dwork_struct's callback has terminated
815 * @dwork: the delayed work which is to be flushed
816 *
817 * Any timeout is cancelled, and any pending work is run immediately.
818 */
819 void flush_delayed_work(struct delayed_work *dwork)
820 {
821 if (del_timer_sync(&dwork->timer)) {
822 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
823 &dwork->work);
824 put_cpu();
825 }
826 flush_work(&dwork->work);
827 }
828 EXPORT_SYMBOL(flush_delayed_work);
829
830 /**
831 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
832 * @cpu: cpu to use
833 * @dwork: job to be done
834 * @delay: number of jiffies to wait
835 *
836 * After waiting for a given time this puts a job in the kernel-global
837 * workqueue on the specified CPU.
838 */
839 int schedule_delayed_work_on(int cpu,
840 struct delayed_work *dwork, unsigned long delay)
841 {
842 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
843 }
844 EXPORT_SYMBOL(schedule_delayed_work_on);
845
846 /**
847 * schedule_on_each_cpu - call a function on each online CPU from keventd
848 * @func: the function to call
849 *
850 * Returns zero on success.
851 * Returns -ve errno on failure.
852 *
853 * schedule_on_each_cpu() is very slow.
854 */
855 int schedule_on_each_cpu(work_func_t func)
856 {
857 int cpu;
858 int orig = -1;
859 struct work_struct *works;
860
861 works = alloc_percpu(struct work_struct);
862 if (!works)
863 return -ENOMEM;
864
865 get_online_cpus();
866
867 /*
868 * When running in keventd don't schedule a work item on
869 * itself. Can just call directly because the work queue is
870 * already bound. This also is faster.
871 */
872 if (current_is_keventd())
873 orig = raw_smp_processor_id();
874
875 for_each_online_cpu(cpu) {
876 struct work_struct *work = per_cpu_ptr(works, cpu);
877
878 INIT_WORK(work, func);
879 if (cpu != orig)
880 schedule_work_on(cpu, work);
881 }
882 if (orig >= 0)
883 func(per_cpu_ptr(works, orig));
884
885 for_each_online_cpu(cpu)
886 flush_work(per_cpu_ptr(works, cpu));
887
888 put_online_cpus();
889 free_percpu(works);
890 return 0;
891 }
892
893 /**
894 * flush_scheduled_work - ensure that any scheduled work has run to completion.
895 *
896 * Forces execution of the kernel-global workqueue and blocks until its
897 * completion.
898 *
899 * Think twice before calling this function! It's very easy to get into
900 * trouble if you don't take great care. Either of the following situations
901 * will lead to deadlock:
902 *
903 * One of the work items currently on the workqueue needs to acquire
904 * a lock held by your code or its caller.
905 *
906 * Your code is running in the context of a work routine.
907 *
908 * They will be detected by lockdep when they occur, but the first might not
909 * occur very often. It depends on what work items are on the workqueue and
910 * what locks they need, which you have no control over.
911 *
912 * In most situations flushing the entire workqueue is overkill; you merely
913 * need to know that a particular work item isn't queued and isn't running.
914 * In such cases you should use cancel_delayed_work_sync() or
915 * cancel_work_sync() instead.
916 */
917 void flush_scheduled_work(void)
918 {
919 flush_workqueue(keventd_wq);
920 }
921 EXPORT_SYMBOL(flush_scheduled_work);
922
923 /**
924 * execute_in_process_context - reliably execute the routine with user context
925 * @fn: the function to execute
926 * @ew: guaranteed storage for the execute work structure (must
927 * be available when the work executes)
928 *
929 * Executes the function immediately if process context is available,
930 * otherwise schedules the function for delayed execution.
931 *
932 * Returns: 0 - function was executed
933 * 1 - function was scheduled for execution
934 */
935 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
936 {
937 if (!in_interrupt()) {
938 fn(&ew->work);
939 return 0;
940 }
941
942 INIT_WORK(&ew->work, fn);
943 schedule_work(&ew->work);
944
945 return 1;
946 }
947 EXPORT_SYMBOL_GPL(execute_in_process_context);
948
949 int keventd_up(void)
950 {
951 return keventd_wq != NULL;
952 }
953
954 int current_is_keventd(void)
955 {
956 struct cpu_workqueue_struct *cwq;
957 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
958 int ret = 0;
959
960 BUG_ON(!keventd_wq);
961
962 cwq = get_cwq(cpu, keventd_wq);
963 if (current == cwq->thread)
964 ret = 1;
965
966 return ret;
967
968 }
969
970 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
971 {
972 struct workqueue_struct *wq = cwq->wq;
973 struct task_struct *p;
974
975 p = kthread_create(worker_thread, cwq, "%s/%d", wq->name, cpu);
976 /*
977 * Nobody can add the work_struct to this cwq,
978 * if (caller is __create_workqueue)
979 * nobody should see this wq
980 * else // caller is CPU_UP_PREPARE
981 * cpu is not on cpu_online_map
982 * so we can abort safely.
983 */
984 if (IS_ERR(p))
985 return PTR_ERR(p);
986 cwq->thread = p;
987
988 return 0;
989 }
990
991 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
992 {
993 struct task_struct *p = cwq->thread;
994
995 if (p != NULL) {
996 if (cpu >= 0)
997 kthread_bind(p, cpu);
998 wake_up_process(p);
999 }
1000 }
1001
1002 struct workqueue_struct *__create_workqueue_key(const char *name,
1003 unsigned int flags,
1004 struct lock_class_key *key,
1005 const char *lock_name)
1006 {
1007 bool singlethread = flags & WQ_SINGLE_THREAD;
1008 struct workqueue_struct *wq;
1009 int err = 0, cpu;
1010
1011 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1012 if (!wq)
1013 goto err;
1014
1015 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
1016 if (!wq->cpu_wq)
1017 goto err;
1018
1019 wq->flags = flags;
1020 wq->name = name;
1021 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1022 INIT_LIST_HEAD(&wq->list);
1023
1024 cpu_maps_update_begin();
1025 /*
1026 * We must initialize cwqs for each possible cpu even if we
1027 * are going to call destroy_workqueue() finally. Otherwise
1028 * cpu_up() can hit the uninitialized cwq once we drop the
1029 * lock.
1030 */
1031 for_each_possible_cpu(cpu) {
1032 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1033
1034 cwq->wq = wq;
1035 cwq->cpu = cpu;
1036 spin_lock_init(&cwq->lock);
1037 INIT_LIST_HEAD(&cwq->worklist);
1038 init_waitqueue_head(&cwq->more_work);
1039
1040 if (err)
1041 continue;
1042 err = create_workqueue_thread(cwq, cpu);
1043 if (cpu_online(cpu) && !singlethread)
1044 start_workqueue_thread(cwq, cpu);
1045 else
1046 start_workqueue_thread(cwq, -1);
1047 }
1048
1049 spin_lock(&workqueue_lock);
1050 list_add(&wq->list, &workqueues);
1051 spin_unlock(&workqueue_lock);
1052
1053 cpu_maps_update_done();
1054
1055 if (err) {
1056 destroy_workqueue(wq);
1057 wq = NULL;
1058 }
1059 return wq;
1060 err:
1061 if (wq) {
1062 free_percpu(wq->cpu_wq);
1063 kfree(wq);
1064 }
1065 return NULL;
1066 }
1067 EXPORT_SYMBOL_GPL(__create_workqueue_key);
1068
1069 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1070 {
1071 /*
1072 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1073 * cpu_add_remove_lock protects cwq->thread.
1074 */
1075 if (cwq->thread == NULL)
1076 return;
1077
1078 lock_map_acquire(&cwq->wq->lockdep_map);
1079 lock_map_release(&cwq->wq->lockdep_map);
1080
1081 flush_cpu_workqueue(cwq);
1082 /*
1083 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
1084 * a concurrent flush_workqueue() can insert a barrier after us.
1085 * However, in that case run_workqueue() won't return and check
1086 * kthread_should_stop() until it flushes all work_struct's.
1087 * When ->worklist becomes empty it is safe to exit because no
1088 * more work_structs can be queued on this cwq: flush_workqueue
1089 * checks list_empty(), and a "normal" queue_work() can't use
1090 * a dead CPU.
1091 */
1092 kthread_stop(cwq->thread);
1093 cwq->thread = NULL;
1094 }
1095
1096 /**
1097 * destroy_workqueue - safely terminate a workqueue
1098 * @wq: target workqueue
1099 *
1100 * Safely destroy a workqueue. All work currently pending will be done first.
1101 */
1102 void destroy_workqueue(struct workqueue_struct *wq)
1103 {
1104 int cpu;
1105
1106 cpu_maps_update_begin();
1107 spin_lock(&workqueue_lock);
1108 list_del(&wq->list);
1109 spin_unlock(&workqueue_lock);
1110 cpu_maps_update_done();
1111
1112 for_each_possible_cpu(cpu)
1113 cleanup_workqueue_thread(get_cwq(cpu, wq));
1114
1115 free_percpu(wq->cpu_wq);
1116 kfree(wq);
1117 }
1118 EXPORT_SYMBOL_GPL(destroy_workqueue);
1119
1120 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1121 unsigned long action,
1122 void *hcpu)
1123 {
1124 unsigned int cpu = (unsigned long)hcpu;
1125 struct cpu_workqueue_struct *cwq;
1126 struct workqueue_struct *wq;
1127
1128 action &= ~CPU_TASKS_FROZEN;
1129
1130 list_for_each_entry(wq, &workqueues, list) {
1131 if (wq->flags & WQ_SINGLE_THREAD)
1132 continue;
1133
1134 cwq = get_cwq(cpu, wq);
1135
1136 switch (action) {
1137 case CPU_POST_DEAD:
1138 lock_map_acquire(&cwq->wq->lockdep_map);
1139 lock_map_release(&cwq->wq->lockdep_map);
1140 flush_cpu_workqueue(cwq);
1141 break;
1142 }
1143 }
1144
1145 return notifier_from_errno(0);
1146 }
1147
1148 #ifdef CONFIG_SMP
1149
1150 struct work_for_cpu {
1151 struct completion completion;
1152 long (*fn)(void *);
1153 void *arg;
1154 long ret;
1155 };
1156
1157 static int do_work_for_cpu(void *_wfc)
1158 {
1159 struct work_for_cpu *wfc = _wfc;
1160 wfc->ret = wfc->fn(wfc->arg);
1161 complete(&wfc->completion);
1162 return 0;
1163 }
1164
1165 /**
1166 * work_on_cpu - run a function in user context on a particular cpu
1167 * @cpu: the cpu to run on
1168 * @fn: the function to run
1169 * @arg: the function arg
1170 *
1171 * This will return the value @fn returns.
1172 * It is up to the caller to ensure that the cpu doesn't go offline.
1173 * The caller must not hold any locks which would prevent @fn from completing.
1174 */
1175 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1176 {
1177 struct task_struct *sub_thread;
1178 struct work_for_cpu wfc = {
1179 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1180 .fn = fn,
1181 .arg = arg,
1182 };
1183
1184 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1185 if (IS_ERR(sub_thread))
1186 return PTR_ERR(sub_thread);
1187 kthread_bind(sub_thread, cpu);
1188 wake_up_process(sub_thread);
1189 wait_for_completion(&wfc.completion);
1190 return wfc.ret;
1191 }
1192 EXPORT_SYMBOL_GPL(work_on_cpu);
1193 #endif /* CONFIG_SMP */
1194
1195 void __init init_workqueues(void)
1196 {
1197 singlethread_cpu = cpumask_first(cpu_possible_mask);
1198 hotcpu_notifier(workqueue_cpu_callback, 0);
1199 keventd_wq = create_workqueue("events");
1200 BUG_ON(!keventd_wq);
1201 }
This page took 0.056248 seconds and 6 git commands to generate.