workqueue: kill NOAUTOREL works
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35
36 /*
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
39 */
40 struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
44 struct list_head worklist;
45 wait_queue_head_t more_work;
46 struct work_struct *current_work;
47
48 struct workqueue_struct *wq;
49 struct task_struct *thread;
50 int should_stop;
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
54
55 /*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59 struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
61 struct list_head list;
62 const char *name;
63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */
65 };
66
67 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
69 static DEFINE_MUTEX(workqueue_mutex);
70 static LIST_HEAD(workqueues);
71
72 static int singlethread_cpu __read_mostly;
73 static cpumask_t cpu_singlethread_map __read_mostly;
74 /* optimization, we could use cpu_possible_map */
75 static cpumask_t cpu_populated_map __read_mostly;
76
77 /* If it's single threaded, it isn't in the list of workqueues. */
78 static inline int is_single_threaded(struct workqueue_struct *wq)
79 {
80 return wq->singlethread;
81 }
82
83 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
84 {
85 return is_single_threaded(wq)
86 ? &cpu_singlethread_map : &cpu_populated_map;
87 }
88
89 static
90 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
91 {
92 if (unlikely(is_single_threaded(wq)))
93 cpu = singlethread_cpu;
94 return per_cpu_ptr(wq->cpu_wq, cpu);
95 }
96
97 /*
98 * Set the workqueue on which a work item is to be run
99 * - Must *only* be called if the pending flag is set
100 */
101 static inline void set_wq_data(struct work_struct *work,
102 struct cpu_workqueue_struct *cwq)
103 {
104 unsigned long new;
105
106 BUG_ON(!work_pending(work));
107
108 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
109 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
110 atomic_long_set(&work->data, new);
111 }
112
113 static inline
114 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
115 {
116 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
117 }
118
119 static void insert_work(struct cpu_workqueue_struct *cwq,
120 struct work_struct *work, int tail)
121 {
122 set_wq_data(work, cwq);
123 if (tail)
124 list_add_tail(&work->entry, &cwq->worklist);
125 else
126 list_add(&work->entry, &cwq->worklist);
127 wake_up(&cwq->more_work);
128 }
129
130 /* Preempt must be disabled. */
131 static void __queue_work(struct cpu_workqueue_struct *cwq,
132 struct work_struct *work)
133 {
134 unsigned long flags;
135
136 spin_lock_irqsave(&cwq->lock, flags);
137 insert_work(cwq, work, 1);
138 spin_unlock_irqrestore(&cwq->lock, flags);
139 }
140
141 /**
142 * queue_work - queue work on a workqueue
143 * @wq: workqueue to use
144 * @work: work to queue
145 *
146 * Returns 0 if @work was already on a queue, non-zero otherwise.
147 *
148 * We queue the work to the CPU it was submitted, but there is no
149 * guarantee that it will be processed by that CPU.
150 */
151 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
152 {
153 int ret = 0;
154
155 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
156 BUG_ON(!list_empty(&work->entry));
157 __queue_work(wq_per_cpu(wq, get_cpu()), work);
158 put_cpu();
159 ret = 1;
160 }
161 return ret;
162 }
163 EXPORT_SYMBOL_GPL(queue_work);
164
165 void delayed_work_timer_fn(unsigned long __data)
166 {
167 struct delayed_work *dwork = (struct delayed_work *)__data;
168 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
169 struct workqueue_struct *wq = cwq->wq;
170
171 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
172 }
173
174 /**
175 * queue_delayed_work - queue work on a workqueue after delay
176 * @wq: workqueue to use
177 * @dwork: delayable work to queue
178 * @delay: number of jiffies to wait before queueing
179 *
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
181 */
182 int fastcall queue_delayed_work(struct workqueue_struct *wq,
183 struct delayed_work *dwork, unsigned long delay)
184 {
185 timer_stats_timer_set_start_info(&dwork->timer);
186 if (delay == 0)
187 return queue_work(wq, &dwork->work);
188
189 return queue_delayed_work_on(-1, wq, dwork, delay);
190 }
191 EXPORT_SYMBOL_GPL(queue_delayed_work);
192
193 /**
194 * queue_delayed_work_on - queue work on specific CPU after delay
195 * @cpu: CPU number to execute work on
196 * @wq: workqueue to use
197 * @dwork: work to queue
198 * @delay: number of jiffies to wait before queueing
199 *
200 * Returns 0 if @work was already on a queue, non-zero otherwise.
201 */
202 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
203 struct delayed_work *dwork, unsigned long delay)
204 {
205 int ret = 0;
206 struct timer_list *timer = &dwork->timer;
207 struct work_struct *work = &dwork->work;
208
209 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
210 BUG_ON(timer_pending(timer));
211 BUG_ON(!list_empty(&work->entry));
212
213 /* This stores cwq for the moment, for the timer_fn */
214 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
215 timer->expires = jiffies + delay;
216 timer->data = (unsigned long)dwork;
217 timer->function = delayed_work_timer_fn;
218
219 if (unlikely(cpu >= 0))
220 add_timer_on(timer, cpu);
221 else
222 add_timer(timer);
223 ret = 1;
224 }
225 return ret;
226 }
227 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
228
229 static void run_workqueue(struct cpu_workqueue_struct *cwq)
230 {
231 spin_lock_irq(&cwq->lock);
232 cwq->run_depth++;
233 if (cwq->run_depth > 3) {
234 /* morton gets to eat his hat */
235 printk("%s: recursion depth exceeded: %d\n",
236 __FUNCTION__, cwq->run_depth);
237 dump_stack();
238 }
239 while (!list_empty(&cwq->worklist)) {
240 struct work_struct *work = list_entry(cwq->worklist.next,
241 struct work_struct, entry);
242 work_func_t f = work->func;
243
244 cwq->current_work = work;
245 list_del_init(cwq->worklist.next);
246 spin_unlock_irq(&cwq->lock);
247
248 BUG_ON(get_wq_data(work) != cwq);
249 work_clear_pending(work);
250 f(work);
251
252 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
253 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
254 "%s/0x%08x/%d\n",
255 current->comm, preempt_count(),
256 current->pid);
257 printk(KERN_ERR " last function: ");
258 print_symbol("%s\n", (unsigned long)f);
259 debug_show_held_locks(current);
260 dump_stack();
261 }
262
263 spin_lock_irq(&cwq->lock);
264 cwq->current_work = NULL;
265 }
266 cwq->run_depth--;
267 spin_unlock_irq(&cwq->lock);
268 }
269
270 /*
271 * NOTE: the caller must not touch *cwq if this func returns true
272 */
273 static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
274 {
275 int should_stop = cwq->should_stop;
276
277 if (unlikely(should_stop)) {
278 spin_lock_irq(&cwq->lock);
279 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
280 if (should_stop)
281 cwq->thread = NULL;
282 spin_unlock_irq(&cwq->lock);
283 }
284
285 return should_stop;
286 }
287
288 static int worker_thread(void *__cwq)
289 {
290 struct cpu_workqueue_struct *cwq = __cwq;
291 DEFINE_WAIT(wait);
292 struct k_sigaction sa;
293 sigset_t blocked;
294
295 if (!cwq->wq->freezeable)
296 current->flags |= PF_NOFREEZE;
297
298 set_user_nice(current, -5);
299
300 /* Block and flush all signals */
301 sigfillset(&blocked);
302 sigprocmask(SIG_BLOCK, &blocked, NULL);
303 flush_signals(current);
304
305 /*
306 * We inherited MPOL_INTERLEAVE from the booting kernel.
307 * Set MPOL_DEFAULT to insure node local allocations.
308 */
309 numa_default_policy();
310
311 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
312 sa.sa.sa_handler = SIG_IGN;
313 sa.sa.sa_flags = 0;
314 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
315 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
316
317 for (;;) {
318 if (cwq->wq->freezeable)
319 try_to_freeze();
320
321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
322 if (!cwq->should_stop && list_empty(&cwq->worklist))
323 schedule();
324 finish_wait(&cwq->more_work, &wait);
325
326 if (cwq_should_stop(cwq))
327 break;
328
329 run_workqueue(cwq);
330 }
331
332 return 0;
333 }
334
335 struct wq_barrier {
336 struct work_struct work;
337 struct completion done;
338 };
339
340 static void wq_barrier_func(struct work_struct *work)
341 {
342 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
343 complete(&barr->done);
344 }
345
346 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
347 struct wq_barrier *barr, int tail)
348 {
349 INIT_WORK(&barr->work, wq_barrier_func);
350 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
351
352 init_completion(&barr->done);
353
354 insert_work(cwq, &barr->work, tail);
355 }
356
357 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
358 {
359 if (cwq->thread == current) {
360 /*
361 * Probably keventd trying to flush its own queue. So simply run
362 * it by hand rather than deadlocking.
363 */
364 run_workqueue(cwq);
365 } else {
366 struct wq_barrier barr;
367 int active = 0;
368
369 spin_lock_irq(&cwq->lock);
370 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
371 insert_wq_barrier(cwq, &barr, 1);
372 active = 1;
373 }
374 spin_unlock_irq(&cwq->lock);
375
376 if (active)
377 wait_for_completion(&barr.done);
378 }
379 }
380
381 /**
382 * flush_workqueue - ensure that any scheduled work has run to completion.
383 * @wq: workqueue to flush
384 *
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
387 *
388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
390 *
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
393 */
394 void fastcall flush_workqueue(struct workqueue_struct *wq)
395 {
396 const cpumask_t *cpu_map = wq_cpu_map(wq);
397 int cpu;
398
399 might_sleep();
400 for_each_cpu_mask(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402 }
403 EXPORT_SYMBOL_GPL(flush_workqueue);
404
405 static void wait_on_work(struct cpu_workqueue_struct *cwq,
406 struct work_struct *work)
407 {
408 struct wq_barrier barr;
409 int running = 0;
410
411 spin_lock_irq(&cwq->lock);
412 if (unlikely(cwq->current_work == work)) {
413 insert_wq_barrier(cwq, &barr, 0);
414 running = 1;
415 }
416 spin_unlock_irq(&cwq->lock);
417
418 if (unlikely(running))
419 wait_for_completion(&barr.done);
420 }
421
422 /**
423 * flush_work - block until a work_struct's callback has terminated
424 * @wq: the workqueue on which the work is queued
425 * @work: the work which is to be flushed
426 *
427 * flush_work() will attempt to cancel the work if it is queued. If the work's
428 * callback appears to be running, flush_work() will block until it has
429 * completed.
430 *
431 * flush_work() is designed to be used when the caller is tearing down data
432 * structures which the callback function operates upon. It is expected that,
433 * prior to calling flush_work(), the caller has arranged for the work to not
434 * be requeued.
435 */
436 void flush_work(struct workqueue_struct *wq, struct work_struct *work)
437 {
438 const cpumask_t *cpu_map = wq_cpu_map(wq);
439 struct cpu_workqueue_struct *cwq;
440 int cpu;
441
442 might_sleep();
443
444 cwq = get_wq_data(work);
445 /* Was it ever queued ? */
446 if (!cwq)
447 return;
448
449 /*
450 * This work can't be re-queued, no need to re-check that
451 * get_wq_data() is still the same when we take cwq->lock.
452 */
453 spin_lock_irq(&cwq->lock);
454 list_del_init(&work->entry);
455 work_clear_pending(work);
456 spin_unlock_irq(&cwq->lock);
457
458 for_each_cpu_mask(cpu, *cpu_map)
459 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
460 }
461 EXPORT_SYMBOL_GPL(flush_work);
462
463
464 static struct workqueue_struct *keventd_wq;
465
466 /**
467 * schedule_work - put work task in global workqueue
468 * @work: job to be done
469 *
470 * This puts a job in the kernel-global workqueue.
471 */
472 int fastcall schedule_work(struct work_struct *work)
473 {
474 return queue_work(keventd_wq, work);
475 }
476 EXPORT_SYMBOL(schedule_work);
477
478 /**
479 * schedule_delayed_work - put work task in global workqueue after delay
480 * @dwork: job to be done
481 * @delay: number of jiffies to wait or 0 for immediate execution
482 *
483 * After waiting for a given time this puts a job in the kernel-global
484 * workqueue.
485 */
486 int fastcall schedule_delayed_work(struct delayed_work *dwork,
487 unsigned long delay)
488 {
489 timer_stats_timer_set_start_info(&dwork->timer);
490 return queue_delayed_work(keventd_wq, dwork, delay);
491 }
492 EXPORT_SYMBOL(schedule_delayed_work);
493
494 /**
495 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
496 * @cpu: cpu to use
497 * @dwork: job to be done
498 * @delay: number of jiffies to wait
499 *
500 * After waiting for a given time this puts a job in the kernel-global
501 * workqueue on the specified CPU.
502 */
503 int schedule_delayed_work_on(int cpu,
504 struct delayed_work *dwork, unsigned long delay)
505 {
506 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
507 }
508 EXPORT_SYMBOL(schedule_delayed_work_on);
509
510 /**
511 * schedule_on_each_cpu - call a function on each online CPU from keventd
512 * @func: the function to call
513 *
514 * Returns zero on success.
515 * Returns -ve errno on failure.
516 *
517 * Appears to be racy against CPU hotplug.
518 *
519 * schedule_on_each_cpu() is very slow.
520 */
521 int schedule_on_each_cpu(work_func_t func)
522 {
523 int cpu;
524 struct work_struct *works;
525
526 works = alloc_percpu(struct work_struct);
527 if (!works)
528 return -ENOMEM;
529
530 preempt_disable(); /* CPU hotplug */
531 for_each_online_cpu(cpu) {
532 struct work_struct *work = per_cpu_ptr(works, cpu);
533
534 INIT_WORK(work, func);
535 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
536 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
537 }
538 preempt_enable();
539 flush_workqueue(keventd_wq);
540 free_percpu(works);
541 return 0;
542 }
543
544 void flush_scheduled_work(void)
545 {
546 flush_workqueue(keventd_wq);
547 }
548 EXPORT_SYMBOL(flush_scheduled_work);
549
550 void flush_work_keventd(struct work_struct *work)
551 {
552 flush_work(keventd_wq, work);
553 }
554 EXPORT_SYMBOL(flush_work_keventd);
555
556 /**
557 * cancel_rearming_delayed_work - kill off a delayed work whose handler rearms the delayed work.
558 * @dwork: the delayed work struct
559 *
560 * Note that the work callback function may still be running on return from
561 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
562 */
563 void cancel_rearming_delayed_work(struct delayed_work *dwork)
564 {
565 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
566
567 /* Was it ever queued ? */
568 if (cwq != NULL) {
569 struct workqueue_struct *wq = cwq->wq;
570
571 while (!cancel_delayed_work(dwork))
572 flush_workqueue(wq);
573 }
574 }
575 EXPORT_SYMBOL(cancel_rearming_delayed_work);
576
577 /**
578 * execute_in_process_context - reliably execute the routine with user context
579 * @fn: the function to execute
580 * @ew: guaranteed storage for the execute work structure (must
581 * be available when the work executes)
582 *
583 * Executes the function immediately if process context is available,
584 * otherwise schedules the function for delayed execution.
585 *
586 * Returns: 0 - function was executed
587 * 1 - function was scheduled for execution
588 */
589 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
590 {
591 if (!in_interrupt()) {
592 fn(&ew->work);
593 return 0;
594 }
595
596 INIT_WORK(&ew->work, fn);
597 schedule_work(&ew->work);
598
599 return 1;
600 }
601 EXPORT_SYMBOL_GPL(execute_in_process_context);
602
603 int keventd_up(void)
604 {
605 return keventd_wq != NULL;
606 }
607
608 int current_is_keventd(void)
609 {
610 struct cpu_workqueue_struct *cwq;
611 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
612 int ret = 0;
613
614 BUG_ON(!keventd_wq);
615
616 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
617 if (current == cwq->thread)
618 ret = 1;
619
620 return ret;
621
622 }
623
624 static struct cpu_workqueue_struct *
625 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
626 {
627 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
628
629 cwq->wq = wq;
630 spin_lock_init(&cwq->lock);
631 INIT_LIST_HEAD(&cwq->worklist);
632 init_waitqueue_head(&cwq->more_work);
633
634 return cwq;
635 }
636
637 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
638 {
639 struct workqueue_struct *wq = cwq->wq;
640 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
641 struct task_struct *p;
642
643 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
644 /*
645 * Nobody can add the work_struct to this cwq,
646 * if (caller is __create_workqueue)
647 * nobody should see this wq
648 * else // caller is CPU_UP_PREPARE
649 * cpu is not on cpu_online_map
650 * so we can abort safely.
651 */
652 if (IS_ERR(p))
653 return PTR_ERR(p);
654
655 cwq->thread = p;
656 cwq->should_stop = 0;
657
658 return 0;
659 }
660
661 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
662 {
663 struct task_struct *p = cwq->thread;
664
665 if (p != NULL) {
666 if (cpu >= 0)
667 kthread_bind(p, cpu);
668 wake_up_process(p);
669 }
670 }
671
672 struct workqueue_struct *__create_workqueue(const char *name,
673 int singlethread, int freezeable)
674 {
675 struct workqueue_struct *wq;
676 struct cpu_workqueue_struct *cwq;
677 int err = 0, cpu;
678
679 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
680 if (!wq)
681 return NULL;
682
683 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
684 if (!wq->cpu_wq) {
685 kfree(wq);
686 return NULL;
687 }
688
689 wq->name = name;
690 wq->singlethread = singlethread;
691 wq->freezeable = freezeable;
692 INIT_LIST_HEAD(&wq->list);
693
694 if (singlethread) {
695 cwq = init_cpu_workqueue(wq, singlethread_cpu);
696 err = create_workqueue_thread(cwq, singlethread_cpu);
697 start_workqueue_thread(cwq, -1);
698 } else {
699 mutex_lock(&workqueue_mutex);
700 list_add(&wq->list, &workqueues);
701
702 for_each_possible_cpu(cpu) {
703 cwq = init_cpu_workqueue(wq, cpu);
704 if (err || !cpu_online(cpu))
705 continue;
706 err = create_workqueue_thread(cwq, cpu);
707 start_workqueue_thread(cwq, cpu);
708 }
709 mutex_unlock(&workqueue_mutex);
710 }
711
712 if (err) {
713 destroy_workqueue(wq);
714 wq = NULL;
715 }
716 return wq;
717 }
718 EXPORT_SYMBOL_GPL(__create_workqueue);
719
720 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
721 {
722 struct wq_barrier barr;
723 int alive = 0;
724
725 spin_lock_irq(&cwq->lock);
726 if (cwq->thread != NULL) {
727 insert_wq_barrier(cwq, &barr, 1);
728 cwq->should_stop = 1;
729 alive = 1;
730 }
731 spin_unlock_irq(&cwq->lock);
732
733 if (alive) {
734 wait_for_completion(&barr.done);
735
736 while (unlikely(cwq->thread != NULL))
737 cpu_relax();
738 /*
739 * Wait until cwq->thread unlocks cwq->lock,
740 * it won't touch *cwq after that.
741 */
742 smp_rmb();
743 spin_unlock_wait(&cwq->lock);
744 }
745 }
746
747 /**
748 * destroy_workqueue - safely terminate a workqueue
749 * @wq: target workqueue
750 *
751 * Safely destroy a workqueue. All work currently pending will be done first.
752 */
753 void destroy_workqueue(struct workqueue_struct *wq)
754 {
755 const cpumask_t *cpu_map = wq_cpu_map(wq);
756 struct cpu_workqueue_struct *cwq;
757 int cpu;
758
759 mutex_lock(&workqueue_mutex);
760 list_del(&wq->list);
761 mutex_unlock(&workqueue_mutex);
762
763 for_each_cpu_mask(cpu, *cpu_map) {
764 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
765 cleanup_workqueue_thread(cwq, cpu);
766 }
767
768 free_percpu(wq->cpu_wq);
769 kfree(wq);
770 }
771 EXPORT_SYMBOL_GPL(destroy_workqueue);
772
773 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
774 unsigned long action,
775 void *hcpu)
776 {
777 unsigned int cpu = (unsigned long)hcpu;
778 struct cpu_workqueue_struct *cwq;
779 struct workqueue_struct *wq;
780
781 switch (action) {
782 case CPU_LOCK_ACQUIRE:
783 mutex_lock(&workqueue_mutex);
784 return NOTIFY_OK;
785
786 case CPU_LOCK_RELEASE:
787 mutex_unlock(&workqueue_mutex);
788 return NOTIFY_OK;
789
790 case CPU_UP_PREPARE:
791 cpu_set(cpu, cpu_populated_map);
792 }
793
794 list_for_each_entry(wq, &workqueues, list) {
795 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
796
797 switch (action) {
798 case CPU_UP_PREPARE:
799 if (!create_workqueue_thread(cwq, cpu))
800 break;
801 printk(KERN_ERR "workqueue for %i failed\n", cpu);
802 return NOTIFY_BAD;
803
804 case CPU_ONLINE:
805 start_workqueue_thread(cwq, cpu);
806 break;
807
808 case CPU_UP_CANCELED:
809 start_workqueue_thread(cwq, -1);
810 case CPU_DEAD:
811 cleanup_workqueue_thread(cwq, cpu);
812 break;
813 }
814 }
815
816 return NOTIFY_OK;
817 }
818
819 void __init init_workqueues(void)
820 {
821 cpu_populated_map = cpu_online_map;
822 singlethread_cpu = first_cpu(cpu_possible_map);
823 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
824 hotcpu_notifier(workqueue_cpu_callback, 0);
825 keventd_wq = create_workqueue("events");
826 BUG_ON(!keventd_wq);
827 }
This page took 0.04755 seconds and 6 git commands to generate.