S390 topology: don't use kthread() for arch_reinit_sched_domains()
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36
37 /*
38 * The per-CPU workqueue (if single thread, we always use the first
39 * possible cpu).
40 */
41 struct cpu_workqueue_struct {
42
43 spinlock_t lock;
44
45 struct list_head worklist;
46 wait_queue_head_t more_work;
47 struct work_struct *current_work;
48
49 struct workqueue_struct *wq;
50 struct task_struct *thread;
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
54
55 /*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59 struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
61 struct list_head list;
62 const char *name;
63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */
65 #ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map;
67 #endif
68 };
69
70 /* Serializes the accesses to the list of workqueues. */
71 static DEFINE_SPINLOCK(workqueue_lock);
72 static LIST_HEAD(workqueues);
73
74 static int singlethread_cpu __read_mostly;
75 static cpumask_t cpu_singlethread_map __read_mostly;
76 /*
77 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79 * which comes in between can't use for_each_online_cpu(). We could
80 * use cpu_possible_map, the cpumask below is more a documentation
81 * than optimization.
82 */
83 static cpumask_t cpu_populated_map __read_mostly;
84
85 /* If it's single threaded, it isn't in the list of workqueues. */
86 static inline int is_single_threaded(struct workqueue_struct *wq)
87 {
88 return wq->singlethread;
89 }
90
91 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
92 {
93 return is_single_threaded(wq)
94 ? &cpu_singlethread_map : &cpu_populated_map;
95 }
96
97 static
98 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
99 {
100 if (unlikely(is_single_threaded(wq)))
101 cpu = singlethread_cpu;
102 return per_cpu_ptr(wq->cpu_wq, cpu);
103 }
104
105 /*
106 * Set the workqueue on which a work item is to be run
107 * - Must *only* be called if the pending flag is set
108 */
109 static inline void set_wq_data(struct work_struct *work,
110 struct cpu_workqueue_struct *cwq)
111 {
112 unsigned long new;
113
114 BUG_ON(!work_pending(work));
115
116 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
117 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118 atomic_long_set(&work->data, new);
119 }
120
121 static inline
122 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
123 {
124 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
125 }
126
127 static void insert_work(struct cpu_workqueue_struct *cwq,
128 struct work_struct *work, struct list_head *head)
129 {
130 set_wq_data(work, cwq);
131 /*
132 * Ensure that we get the right work->data if we see the
133 * result of list_add() below, see try_to_grab_pending().
134 */
135 smp_wmb();
136 list_add_tail(&work->entry, head);
137 wake_up(&cwq->more_work);
138 }
139
140 static void __queue_work(struct cpu_workqueue_struct *cwq,
141 struct work_struct *work)
142 {
143 unsigned long flags;
144
145 spin_lock_irqsave(&cwq->lock, flags);
146 insert_work(cwq, work, &cwq->worklist);
147 spin_unlock_irqrestore(&cwq->lock, flags);
148 }
149
150 /**
151 * queue_work - queue work on a workqueue
152 * @wq: workqueue to use
153 * @work: work to queue
154 *
155 * Returns 0 if @work was already on a queue, non-zero otherwise.
156 *
157 * We queue the work to the CPU on which it was submitted, but if the CPU dies
158 * it can be processed by another CPU.
159 */
160 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
161 {
162 int ret = 0;
163
164 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
165 BUG_ON(!list_empty(&work->entry));
166 __queue_work(wq_per_cpu(wq, get_cpu()), work);
167 put_cpu();
168 ret = 1;
169 }
170 return ret;
171 }
172 EXPORT_SYMBOL_GPL(queue_work);
173
174 /**
175 * queue_work_on - queue work on specific cpu
176 * @cpu: CPU number to execute work on
177 * @wq: workqueue to use
178 * @work: work to queue
179 *
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
181 *
182 * We queue the work to a specific CPU, the caller must ensure it
183 * can't go away.
184 */
185 int
186 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
187 {
188 int ret = 0;
189
190 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
191 BUG_ON(!list_empty(&work->entry));
192 __queue_work(wq_per_cpu(wq, cpu), work);
193 ret = 1;
194 }
195 return ret;
196 }
197 EXPORT_SYMBOL_GPL(queue_work_on);
198
199 static void delayed_work_timer_fn(unsigned long __data)
200 {
201 struct delayed_work *dwork = (struct delayed_work *)__data;
202 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
203 struct workqueue_struct *wq = cwq->wq;
204
205 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
206 }
207
208 /**
209 * queue_delayed_work - queue work on a workqueue after delay
210 * @wq: workqueue to use
211 * @dwork: delayable work to queue
212 * @delay: number of jiffies to wait before queueing
213 *
214 * Returns 0 if @work was already on a queue, non-zero otherwise.
215 */
216 int queue_delayed_work(struct workqueue_struct *wq,
217 struct delayed_work *dwork, unsigned long delay)
218 {
219 if (delay == 0)
220 return queue_work(wq, &dwork->work);
221
222 return queue_delayed_work_on(-1, wq, dwork, delay);
223 }
224 EXPORT_SYMBOL_GPL(queue_delayed_work);
225
226 /**
227 * queue_delayed_work_on - queue work on specific CPU after delay
228 * @cpu: CPU number to execute work on
229 * @wq: workqueue to use
230 * @dwork: work to queue
231 * @delay: number of jiffies to wait before queueing
232 *
233 * Returns 0 if @work was already on a queue, non-zero otherwise.
234 */
235 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
236 struct delayed_work *dwork, unsigned long delay)
237 {
238 int ret = 0;
239 struct timer_list *timer = &dwork->timer;
240 struct work_struct *work = &dwork->work;
241
242 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
243 BUG_ON(timer_pending(timer));
244 BUG_ON(!list_empty(&work->entry));
245
246 timer_stats_timer_set_start_info(&dwork->timer);
247
248 /* This stores cwq for the moment, for the timer_fn */
249 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
250 timer->expires = jiffies + delay;
251 timer->data = (unsigned long)dwork;
252 timer->function = delayed_work_timer_fn;
253
254 if (unlikely(cpu >= 0))
255 add_timer_on(timer, cpu);
256 else
257 add_timer(timer);
258 ret = 1;
259 }
260 return ret;
261 }
262 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
263
264 static void run_workqueue(struct cpu_workqueue_struct *cwq)
265 {
266 spin_lock_irq(&cwq->lock);
267 cwq->run_depth++;
268 if (cwq->run_depth > 3) {
269 /* morton gets to eat his hat */
270 printk("%s: recursion depth exceeded: %d\n",
271 __func__, cwq->run_depth);
272 dump_stack();
273 }
274 while (!list_empty(&cwq->worklist)) {
275 struct work_struct *work = list_entry(cwq->worklist.next,
276 struct work_struct, entry);
277 work_func_t f = work->func;
278 #ifdef CONFIG_LOCKDEP
279 /*
280 * It is permissible to free the struct work_struct
281 * from inside the function that is called from it,
282 * this we need to take into account for lockdep too.
283 * To avoid bogus "held lock freed" warnings as well
284 * as problems when looking into work->lockdep_map,
285 * make a copy and use that here.
286 */
287 struct lockdep_map lockdep_map = work->lockdep_map;
288 #endif
289
290 cwq->current_work = work;
291 list_del_init(cwq->worklist.next);
292 spin_unlock_irq(&cwq->lock);
293
294 BUG_ON(get_wq_data(work) != cwq);
295 work_clear_pending(work);
296 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
297 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
298 f(work);
299 lock_release(&lockdep_map, 1, _THIS_IP_);
300 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
301
302 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
303 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
304 "%s/0x%08x/%d\n",
305 current->comm, preempt_count(),
306 task_pid_nr(current));
307 printk(KERN_ERR " last function: ");
308 print_symbol("%s\n", (unsigned long)f);
309 debug_show_held_locks(current);
310 dump_stack();
311 }
312
313 spin_lock_irq(&cwq->lock);
314 cwq->current_work = NULL;
315 }
316 cwq->run_depth--;
317 spin_unlock_irq(&cwq->lock);
318 }
319
320 static int worker_thread(void *__cwq)
321 {
322 struct cpu_workqueue_struct *cwq = __cwq;
323 DEFINE_WAIT(wait);
324
325 if (cwq->wq->freezeable)
326 set_freezable();
327
328 set_user_nice(current, -5);
329
330 for (;;) {
331 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
332 if (!freezing(current) &&
333 !kthread_should_stop() &&
334 list_empty(&cwq->worklist))
335 schedule();
336 finish_wait(&cwq->more_work, &wait);
337
338 try_to_freeze();
339
340 if (kthread_should_stop())
341 break;
342
343 run_workqueue(cwq);
344 }
345
346 return 0;
347 }
348
349 struct wq_barrier {
350 struct work_struct work;
351 struct completion done;
352 };
353
354 static void wq_barrier_func(struct work_struct *work)
355 {
356 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
357 complete(&barr->done);
358 }
359
360 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
361 struct wq_barrier *barr, struct list_head *head)
362 {
363 INIT_WORK(&barr->work, wq_barrier_func);
364 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
365
366 init_completion(&barr->done);
367
368 insert_work(cwq, &barr->work, head);
369 }
370
371 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
372 {
373 int active;
374
375 if (cwq->thread == current) {
376 /*
377 * Probably keventd trying to flush its own queue. So simply run
378 * it by hand rather than deadlocking.
379 */
380 run_workqueue(cwq);
381 active = 1;
382 } else {
383 struct wq_barrier barr;
384
385 active = 0;
386 spin_lock_irq(&cwq->lock);
387 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
388 insert_wq_barrier(cwq, &barr, &cwq->worklist);
389 active = 1;
390 }
391 spin_unlock_irq(&cwq->lock);
392
393 if (active)
394 wait_for_completion(&barr.done);
395 }
396
397 return active;
398 }
399
400 /**
401 * flush_workqueue - ensure that any scheduled work has run to completion.
402 * @wq: workqueue to flush
403 *
404 * Forces execution of the workqueue and blocks until its completion.
405 * This is typically used in driver shutdown handlers.
406 *
407 * We sleep until all works which were queued on entry have been handled,
408 * but we are not livelocked by new incoming ones.
409 *
410 * This function used to run the workqueues itself. Now we just wait for the
411 * helper threads to do it.
412 */
413 void flush_workqueue(struct workqueue_struct *wq)
414 {
415 const cpumask_t *cpu_map = wq_cpu_map(wq);
416 int cpu;
417
418 might_sleep();
419 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
420 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
421 for_each_cpu_mask_nr(cpu, *cpu_map)
422 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
423 }
424 EXPORT_SYMBOL_GPL(flush_workqueue);
425
426 /**
427 * flush_work - block until a work_struct's callback has terminated
428 * @work: the work which is to be flushed
429 *
430 * It is expected that, prior to calling flush_work(), the caller has
431 * arranged for the work to not be requeued, otherwise it doesn't make
432 * sense to use this function.
433 */
434 int flush_work(struct work_struct *work)
435 {
436 struct cpu_workqueue_struct *cwq;
437 struct list_head *prev;
438 struct wq_barrier barr;
439
440 might_sleep();
441 cwq = get_wq_data(work);
442 if (!cwq)
443 return 0;
444
445 prev = NULL;
446 spin_lock_irq(&cwq->lock);
447 if (!list_empty(&work->entry)) {
448 /*
449 * See the comment near try_to_grab_pending()->smp_rmb().
450 * If it was re-queued under us we are not going to wait.
451 */
452 smp_rmb();
453 if (unlikely(cwq != get_wq_data(work)))
454 goto out;
455 prev = &work->entry;
456 } else {
457 if (cwq->current_work != work)
458 goto out;
459 prev = &cwq->worklist;
460 }
461 insert_wq_barrier(cwq, &barr, prev->next);
462 out:
463 spin_unlock_irq(&cwq->lock);
464 if (!prev)
465 return 0;
466
467 wait_for_completion(&barr.done);
468 return 1;
469 }
470 EXPORT_SYMBOL_GPL(flush_work);
471
472 /*
473 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
474 * so this work can't be re-armed in any way.
475 */
476 static int try_to_grab_pending(struct work_struct *work)
477 {
478 struct cpu_workqueue_struct *cwq;
479 int ret = -1;
480
481 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
482 return 0;
483
484 /*
485 * The queueing is in progress, or it is already queued. Try to
486 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
487 */
488
489 cwq = get_wq_data(work);
490 if (!cwq)
491 return ret;
492
493 spin_lock_irq(&cwq->lock);
494 if (!list_empty(&work->entry)) {
495 /*
496 * This work is queued, but perhaps we locked the wrong cwq.
497 * In that case we must see the new value after rmb(), see
498 * insert_work()->wmb().
499 */
500 smp_rmb();
501 if (cwq == get_wq_data(work)) {
502 list_del_init(&work->entry);
503 ret = 1;
504 }
505 }
506 spin_unlock_irq(&cwq->lock);
507
508 return ret;
509 }
510
511 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
512 struct work_struct *work)
513 {
514 struct wq_barrier barr;
515 int running = 0;
516
517 spin_lock_irq(&cwq->lock);
518 if (unlikely(cwq->current_work == work)) {
519 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
520 running = 1;
521 }
522 spin_unlock_irq(&cwq->lock);
523
524 if (unlikely(running))
525 wait_for_completion(&barr.done);
526 }
527
528 static void wait_on_work(struct work_struct *work)
529 {
530 struct cpu_workqueue_struct *cwq;
531 struct workqueue_struct *wq;
532 const cpumask_t *cpu_map;
533 int cpu;
534
535 might_sleep();
536
537 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
538 lock_release(&work->lockdep_map, 1, _THIS_IP_);
539
540 cwq = get_wq_data(work);
541 if (!cwq)
542 return;
543
544 wq = cwq->wq;
545 cpu_map = wq_cpu_map(wq);
546
547 for_each_cpu_mask_nr(cpu, *cpu_map)
548 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
549 }
550
551 static int __cancel_work_timer(struct work_struct *work,
552 struct timer_list* timer)
553 {
554 int ret;
555
556 do {
557 ret = (timer && likely(del_timer(timer)));
558 if (!ret)
559 ret = try_to_grab_pending(work);
560 wait_on_work(work);
561 } while (unlikely(ret < 0));
562
563 work_clear_pending(work);
564 return ret;
565 }
566
567 /**
568 * cancel_work_sync - block until a work_struct's callback has terminated
569 * @work: the work which is to be flushed
570 *
571 * Returns true if @work was pending.
572 *
573 * cancel_work_sync() will cancel the work if it is queued. If the work's
574 * callback appears to be running, cancel_work_sync() will block until it
575 * has completed.
576 *
577 * It is possible to use this function if the work re-queues itself. It can
578 * cancel the work even if it migrates to another workqueue, however in that
579 * case it only guarantees that work->func() has completed on the last queued
580 * workqueue.
581 *
582 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
583 * pending, otherwise it goes into a busy-wait loop until the timer expires.
584 *
585 * The caller must ensure that workqueue_struct on which this work was last
586 * queued can't be destroyed before this function returns.
587 */
588 int cancel_work_sync(struct work_struct *work)
589 {
590 return __cancel_work_timer(work, NULL);
591 }
592 EXPORT_SYMBOL_GPL(cancel_work_sync);
593
594 /**
595 * cancel_delayed_work_sync - reliably kill off a delayed work.
596 * @dwork: the delayed work struct
597 *
598 * Returns true if @dwork was pending.
599 *
600 * It is possible to use this function if @dwork rearms itself via queue_work()
601 * or queue_delayed_work(). See also the comment for cancel_work_sync().
602 */
603 int cancel_delayed_work_sync(struct delayed_work *dwork)
604 {
605 return __cancel_work_timer(&dwork->work, &dwork->timer);
606 }
607 EXPORT_SYMBOL(cancel_delayed_work_sync);
608
609 static struct workqueue_struct *keventd_wq __read_mostly;
610
611 /**
612 * schedule_work - put work task in global workqueue
613 * @work: job to be done
614 *
615 * This puts a job in the kernel-global workqueue.
616 */
617 int schedule_work(struct work_struct *work)
618 {
619 return queue_work(keventd_wq, work);
620 }
621 EXPORT_SYMBOL(schedule_work);
622
623 /*
624 * schedule_work_on - put work task on a specific cpu
625 * @cpu: cpu to put the work task on
626 * @work: job to be done
627 *
628 * This puts a job on a specific cpu
629 */
630 int schedule_work_on(int cpu, struct work_struct *work)
631 {
632 return queue_work_on(cpu, keventd_wq, work);
633 }
634 EXPORT_SYMBOL(schedule_work_on);
635
636 /**
637 * schedule_delayed_work - put work task in global workqueue after delay
638 * @dwork: job to be done
639 * @delay: number of jiffies to wait or 0 for immediate execution
640 *
641 * After waiting for a given time this puts a job in the kernel-global
642 * workqueue.
643 */
644 int schedule_delayed_work(struct delayed_work *dwork,
645 unsigned long delay)
646 {
647 return queue_delayed_work(keventd_wq, dwork, delay);
648 }
649 EXPORT_SYMBOL(schedule_delayed_work);
650
651 /**
652 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
653 * @cpu: cpu to use
654 * @dwork: job to be done
655 * @delay: number of jiffies to wait
656 *
657 * After waiting for a given time this puts a job in the kernel-global
658 * workqueue on the specified CPU.
659 */
660 int schedule_delayed_work_on(int cpu,
661 struct delayed_work *dwork, unsigned long delay)
662 {
663 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
664 }
665 EXPORT_SYMBOL(schedule_delayed_work_on);
666
667 /**
668 * schedule_on_each_cpu - call a function on each online CPU from keventd
669 * @func: the function to call
670 *
671 * Returns zero on success.
672 * Returns -ve errno on failure.
673 *
674 * schedule_on_each_cpu() is very slow.
675 */
676 int schedule_on_each_cpu(work_func_t func)
677 {
678 int cpu;
679 struct work_struct *works;
680
681 works = alloc_percpu(struct work_struct);
682 if (!works)
683 return -ENOMEM;
684
685 get_online_cpus();
686 for_each_online_cpu(cpu) {
687 struct work_struct *work = per_cpu_ptr(works, cpu);
688
689 INIT_WORK(work, func);
690 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
691 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
692 }
693 for_each_online_cpu(cpu)
694 flush_work(per_cpu_ptr(works, cpu));
695 put_online_cpus();
696 free_percpu(works);
697 return 0;
698 }
699
700 void flush_scheduled_work(void)
701 {
702 flush_workqueue(keventd_wq);
703 }
704 EXPORT_SYMBOL(flush_scheduled_work);
705
706 /**
707 * execute_in_process_context - reliably execute the routine with user context
708 * @fn: the function to execute
709 * @ew: guaranteed storage for the execute work structure (must
710 * be available when the work executes)
711 *
712 * Executes the function immediately if process context is available,
713 * otherwise schedules the function for delayed execution.
714 *
715 * Returns: 0 - function was executed
716 * 1 - function was scheduled for execution
717 */
718 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
719 {
720 if (!in_interrupt()) {
721 fn(&ew->work);
722 return 0;
723 }
724
725 INIT_WORK(&ew->work, fn);
726 schedule_work(&ew->work);
727
728 return 1;
729 }
730 EXPORT_SYMBOL_GPL(execute_in_process_context);
731
732 int keventd_up(void)
733 {
734 return keventd_wq != NULL;
735 }
736
737 int current_is_keventd(void)
738 {
739 struct cpu_workqueue_struct *cwq;
740 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
741 int ret = 0;
742
743 BUG_ON(!keventd_wq);
744
745 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
746 if (current == cwq->thread)
747 ret = 1;
748
749 return ret;
750
751 }
752
753 static struct cpu_workqueue_struct *
754 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
755 {
756 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
757
758 cwq->wq = wq;
759 spin_lock_init(&cwq->lock);
760 INIT_LIST_HEAD(&cwq->worklist);
761 init_waitqueue_head(&cwq->more_work);
762
763 return cwq;
764 }
765
766 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
767 {
768 struct workqueue_struct *wq = cwq->wq;
769 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
770 struct task_struct *p;
771
772 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
773 /*
774 * Nobody can add the work_struct to this cwq,
775 * if (caller is __create_workqueue)
776 * nobody should see this wq
777 * else // caller is CPU_UP_PREPARE
778 * cpu is not on cpu_online_map
779 * so we can abort safely.
780 */
781 if (IS_ERR(p))
782 return PTR_ERR(p);
783
784 cwq->thread = p;
785
786 return 0;
787 }
788
789 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
790 {
791 struct task_struct *p = cwq->thread;
792
793 if (p != NULL) {
794 if (cpu >= 0)
795 kthread_bind(p, cpu);
796 wake_up_process(p);
797 }
798 }
799
800 struct workqueue_struct *__create_workqueue_key(const char *name,
801 int singlethread,
802 int freezeable,
803 struct lock_class_key *key,
804 const char *lock_name)
805 {
806 struct workqueue_struct *wq;
807 struct cpu_workqueue_struct *cwq;
808 int err = 0, cpu;
809
810 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
811 if (!wq)
812 return NULL;
813
814 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
815 if (!wq->cpu_wq) {
816 kfree(wq);
817 return NULL;
818 }
819
820 wq->name = name;
821 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
822 wq->singlethread = singlethread;
823 wq->freezeable = freezeable;
824 INIT_LIST_HEAD(&wq->list);
825
826 if (singlethread) {
827 cwq = init_cpu_workqueue(wq, singlethread_cpu);
828 err = create_workqueue_thread(cwq, singlethread_cpu);
829 start_workqueue_thread(cwq, -1);
830 } else {
831 cpu_maps_update_begin();
832 spin_lock(&workqueue_lock);
833 list_add(&wq->list, &workqueues);
834 spin_unlock(&workqueue_lock);
835
836 for_each_possible_cpu(cpu) {
837 cwq = init_cpu_workqueue(wq, cpu);
838 if (err || !cpu_online(cpu))
839 continue;
840 err = create_workqueue_thread(cwq, cpu);
841 start_workqueue_thread(cwq, cpu);
842 }
843 cpu_maps_update_done();
844 }
845
846 if (err) {
847 destroy_workqueue(wq);
848 wq = NULL;
849 }
850 return wq;
851 }
852 EXPORT_SYMBOL_GPL(__create_workqueue_key);
853
854 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
855 {
856 /*
857 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
858 * cpu_add_remove_lock protects cwq->thread.
859 */
860 if (cwq->thread == NULL)
861 return;
862
863 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
864 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
865
866 flush_cpu_workqueue(cwq);
867 /*
868 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
869 * a concurrent flush_workqueue() can insert a barrier after us.
870 * However, in that case run_workqueue() won't return and check
871 * kthread_should_stop() until it flushes all work_struct's.
872 * When ->worklist becomes empty it is safe to exit because no
873 * more work_structs can be queued on this cwq: flush_workqueue
874 * checks list_empty(), and a "normal" queue_work() can't use
875 * a dead CPU.
876 */
877 kthread_stop(cwq->thread);
878 cwq->thread = NULL;
879 }
880
881 /**
882 * destroy_workqueue - safely terminate a workqueue
883 * @wq: target workqueue
884 *
885 * Safely destroy a workqueue. All work currently pending will be done first.
886 */
887 void destroy_workqueue(struct workqueue_struct *wq)
888 {
889 const cpumask_t *cpu_map = wq_cpu_map(wq);
890 int cpu;
891
892 cpu_maps_update_begin();
893 spin_lock(&workqueue_lock);
894 list_del(&wq->list);
895 spin_unlock(&workqueue_lock);
896
897 for_each_cpu_mask_nr(cpu, *cpu_map)
898 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
899 cpu_maps_update_done();
900
901 free_percpu(wq->cpu_wq);
902 kfree(wq);
903 }
904 EXPORT_SYMBOL_GPL(destroy_workqueue);
905
906 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
907 unsigned long action,
908 void *hcpu)
909 {
910 unsigned int cpu = (unsigned long)hcpu;
911 struct cpu_workqueue_struct *cwq;
912 struct workqueue_struct *wq;
913
914 action &= ~CPU_TASKS_FROZEN;
915
916 switch (action) {
917 case CPU_UP_PREPARE:
918 cpu_set(cpu, cpu_populated_map);
919 }
920
921 list_for_each_entry(wq, &workqueues, list) {
922 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
923
924 switch (action) {
925 case CPU_UP_PREPARE:
926 if (!create_workqueue_thread(cwq, cpu))
927 break;
928 printk(KERN_ERR "workqueue [%s] for %i failed\n",
929 wq->name, cpu);
930 return NOTIFY_BAD;
931
932 case CPU_ONLINE:
933 start_workqueue_thread(cwq, cpu);
934 break;
935
936 case CPU_UP_CANCELED:
937 start_workqueue_thread(cwq, -1);
938 case CPU_POST_DEAD:
939 cleanup_workqueue_thread(cwq);
940 break;
941 }
942 }
943
944 switch (action) {
945 case CPU_UP_CANCELED:
946 case CPU_POST_DEAD:
947 cpu_clear(cpu, cpu_populated_map);
948 }
949
950 return NOTIFY_OK;
951 }
952
953 void __init init_workqueues(void)
954 {
955 cpu_populated_map = cpu_online_map;
956 singlethread_cpu = first_cpu(cpu_possible_map);
957 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
958 hotcpu_notifier(workqueue_cpu_callback, 0);
959 keventd_wq = create_workqueue("events");
960 BUG_ON(!keventd_wq);
961 }
This page took 0.049283 seconds and 6 git commands to generate.