workqueue: don't save interrupts in run_workqueue()
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * linux/kernel/workqueue.c
3 *
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
6 *
7 * Started by Ingo Molnar, Copyright (C) 2002
8 *
9 * Derived from the taskqueue/keventd code by:
10 *
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
15 *
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
17 */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35
36 /*
37 * The per-CPU workqueue (if single thread, we always use the first
38 * possible cpu).
39 */
40 struct cpu_workqueue_struct {
41
42 spinlock_t lock;
43
44 struct list_head worklist;
45 wait_queue_head_t more_work;
46 struct work_struct *current_work;
47
48 struct workqueue_struct *wq;
49 struct task_struct *thread;
50 int should_stop;
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
54
55 /*
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
58 */
59 struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
61 const char *name;
62 struct list_head list; /* Empty if single thread */
63 int freezeable; /* Freeze threads during suspend */
64 };
65
66 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
67 threads to each one as cpus come/go. */
68 static DEFINE_MUTEX(workqueue_mutex);
69 static LIST_HEAD(workqueues);
70
71 static int singlethread_cpu __read_mostly;
72 /* optimization, we could use cpu_possible_map */
73 static cpumask_t cpu_populated_map __read_mostly;
74
75 /* If it's single threaded, it isn't in the list of workqueues. */
76 static inline int is_single_threaded(struct workqueue_struct *wq)
77 {
78 return list_empty(&wq->list);
79 }
80
81 /*
82 * Set the workqueue on which a work item is to be run
83 * - Must *only* be called if the pending flag is set
84 */
85 static inline void set_wq_data(struct work_struct *work, void *wq)
86 {
87 unsigned long new;
88
89 BUG_ON(!work_pending(work));
90
91 new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING);
92 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
93 atomic_long_set(&work->data, new);
94 }
95
96 static inline void *get_wq_data(struct work_struct *work)
97 {
98 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
99 }
100
101 static void insert_work(struct cpu_workqueue_struct *cwq,
102 struct work_struct *work, int tail)
103 {
104 set_wq_data(work, cwq);
105 if (tail)
106 list_add_tail(&work->entry, &cwq->worklist);
107 else
108 list_add(&work->entry, &cwq->worklist);
109 wake_up(&cwq->more_work);
110 }
111
112 /* Preempt must be disabled. */
113 static void __queue_work(struct cpu_workqueue_struct *cwq,
114 struct work_struct *work)
115 {
116 unsigned long flags;
117
118 spin_lock_irqsave(&cwq->lock, flags);
119 insert_work(cwq, work, 1);
120 spin_unlock_irqrestore(&cwq->lock, flags);
121 }
122
123 /**
124 * queue_work - queue work on a workqueue
125 * @wq: workqueue to use
126 * @work: work to queue
127 *
128 * Returns 0 if @work was already on a queue, non-zero otherwise.
129 *
130 * We queue the work to the CPU it was submitted, but there is no
131 * guarantee that it will be processed by that CPU.
132 */
133 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
134 {
135 int ret = 0, cpu = get_cpu();
136
137 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
138 if (unlikely(is_single_threaded(wq)))
139 cpu = singlethread_cpu;
140 BUG_ON(!list_empty(&work->entry));
141 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
142 ret = 1;
143 }
144 put_cpu();
145 return ret;
146 }
147 EXPORT_SYMBOL_GPL(queue_work);
148
149 void delayed_work_timer_fn(unsigned long __data)
150 {
151 struct delayed_work *dwork = (struct delayed_work *)__data;
152 struct workqueue_struct *wq = get_wq_data(&dwork->work);
153 int cpu = smp_processor_id();
154
155 if (unlikely(is_single_threaded(wq)))
156 cpu = singlethread_cpu;
157
158 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work);
159 }
160
161 /**
162 * queue_delayed_work - queue work on a workqueue after delay
163 * @wq: workqueue to use
164 * @dwork: delayable work to queue
165 * @delay: number of jiffies to wait before queueing
166 *
167 * Returns 0 if @work was already on a queue, non-zero otherwise.
168 */
169 int fastcall queue_delayed_work(struct workqueue_struct *wq,
170 struct delayed_work *dwork, unsigned long delay)
171 {
172 int ret = 0;
173 struct timer_list *timer = &dwork->timer;
174 struct work_struct *work = &dwork->work;
175
176 timer_stats_timer_set_start_info(timer);
177 if (delay == 0)
178 return queue_work(wq, work);
179
180 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
181 BUG_ON(timer_pending(timer));
182 BUG_ON(!list_empty(&work->entry));
183
184 /* This stores wq for the moment, for the timer_fn */
185 set_wq_data(work, wq);
186 timer->expires = jiffies + delay;
187 timer->data = (unsigned long)dwork;
188 timer->function = delayed_work_timer_fn;
189 add_timer(timer);
190 ret = 1;
191 }
192 return ret;
193 }
194 EXPORT_SYMBOL_GPL(queue_delayed_work);
195
196 /**
197 * queue_delayed_work_on - queue work on specific CPU after delay
198 * @cpu: CPU number to execute work on
199 * @wq: workqueue to use
200 * @dwork: work to queue
201 * @delay: number of jiffies to wait before queueing
202 *
203 * Returns 0 if @work was already on a queue, non-zero otherwise.
204 */
205 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
206 struct delayed_work *dwork, unsigned long delay)
207 {
208 int ret = 0;
209 struct timer_list *timer = &dwork->timer;
210 struct work_struct *work = &dwork->work;
211
212 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
213 BUG_ON(timer_pending(timer));
214 BUG_ON(!list_empty(&work->entry));
215
216 /* This stores wq for the moment, for the timer_fn */
217 set_wq_data(work, wq);
218 timer->expires = jiffies + delay;
219 timer->data = (unsigned long)dwork;
220 timer->function = delayed_work_timer_fn;
221 add_timer_on(timer, cpu);
222 ret = 1;
223 }
224 return ret;
225 }
226 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
227
228 static void run_workqueue(struct cpu_workqueue_struct *cwq)
229 {
230 spin_lock_irq(&cwq->lock);
231 cwq->run_depth++;
232 if (cwq->run_depth > 3) {
233 /* morton gets to eat his hat */
234 printk("%s: recursion depth exceeded: %d\n",
235 __FUNCTION__, cwq->run_depth);
236 dump_stack();
237 }
238 while (!list_empty(&cwq->worklist)) {
239 struct work_struct *work = list_entry(cwq->worklist.next,
240 struct work_struct, entry);
241 work_func_t f = work->func;
242
243 cwq->current_work = work;
244 list_del_init(cwq->worklist.next);
245 spin_unlock_irq(&cwq->lock);
246
247 BUG_ON(get_wq_data(work) != cwq);
248 if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
249 work_release(work);
250 f(work);
251
252 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
253 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
254 "%s/0x%08x/%d\n",
255 current->comm, preempt_count(),
256 current->pid);
257 printk(KERN_ERR " last function: ");
258 print_symbol("%s\n", (unsigned long)f);
259 debug_show_held_locks(current);
260 dump_stack();
261 }
262
263 spin_lock_irq(&cwq->lock);
264 cwq->current_work = NULL;
265 }
266 cwq->run_depth--;
267 spin_unlock_irq(&cwq->lock);
268 }
269
270 /*
271 * NOTE: the caller must not touch *cwq if this func returns true
272 */
273 static int cwq_should_stop(struct cpu_workqueue_struct *cwq)
274 {
275 int should_stop = cwq->should_stop;
276
277 if (unlikely(should_stop)) {
278 spin_lock_irq(&cwq->lock);
279 should_stop = cwq->should_stop && list_empty(&cwq->worklist);
280 if (should_stop)
281 cwq->thread = NULL;
282 spin_unlock_irq(&cwq->lock);
283 }
284
285 return should_stop;
286 }
287
288 static int worker_thread(void *__cwq)
289 {
290 struct cpu_workqueue_struct *cwq = __cwq;
291 DEFINE_WAIT(wait);
292 struct k_sigaction sa;
293 sigset_t blocked;
294
295 if (!cwq->wq->freezeable)
296 current->flags |= PF_NOFREEZE;
297
298 set_user_nice(current, -5);
299
300 /* Block and flush all signals */
301 sigfillset(&blocked);
302 sigprocmask(SIG_BLOCK, &blocked, NULL);
303 flush_signals(current);
304
305 /*
306 * We inherited MPOL_INTERLEAVE from the booting kernel.
307 * Set MPOL_DEFAULT to insure node local allocations.
308 */
309 numa_default_policy();
310
311 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
312 sa.sa.sa_handler = SIG_IGN;
313 sa.sa.sa_flags = 0;
314 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
315 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
316
317 for (;;) {
318 if (cwq->wq->freezeable)
319 try_to_freeze();
320
321 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
322 if (!cwq->should_stop && list_empty(&cwq->worklist))
323 schedule();
324 finish_wait(&cwq->more_work, &wait);
325
326 if (cwq_should_stop(cwq))
327 break;
328
329 run_workqueue(cwq);
330 }
331
332 return 0;
333 }
334
335 struct wq_barrier {
336 struct work_struct work;
337 struct completion done;
338 };
339
340 static void wq_barrier_func(struct work_struct *work)
341 {
342 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
343 complete(&barr->done);
344 }
345
346 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
347 struct wq_barrier *barr, int tail)
348 {
349 INIT_WORK(&barr->work, wq_barrier_func);
350 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
351
352 init_completion(&barr->done);
353
354 insert_work(cwq, &barr->work, tail);
355 }
356
357 static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
358 {
359 if (cwq->thread == current) {
360 /*
361 * Probably keventd trying to flush its own queue. So simply run
362 * it by hand rather than deadlocking.
363 */
364 run_workqueue(cwq);
365 } else {
366 struct wq_barrier barr;
367 int active = 0;
368
369 spin_lock_irq(&cwq->lock);
370 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
371 insert_wq_barrier(cwq, &barr, 1);
372 active = 1;
373 }
374 spin_unlock_irq(&cwq->lock);
375
376 if (active)
377 wait_for_completion(&barr.done);
378 }
379 }
380
381 /**
382 * flush_workqueue - ensure that any scheduled work has run to completion.
383 * @wq: workqueue to flush
384 *
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
387 *
388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
390 *
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
393 */
394 void fastcall flush_workqueue(struct workqueue_struct *wq)
395 {
396 might_sleep();
397
398 if (is_single_threaded(wq))
399 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
400 else {
401 int cpu;
402
403 for_each_cpu_mask(cpu, cpu_populated_map)
404 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
405 }
406 }
407 EXPORT_SYMBOL_GPL(flush_workqueue);
408
409 static void wait_on_work(struct cpu_workqueue_struct *cwq,
410 struct work_struct *work)
411 {
412 struct wq_barrier barr;
413 int running = 0;
414
415 spin_lock_irq(&cwq->lock);
416 if (unlikely(cwq->current_work == work)) {
417 insert_wq_barrier(cwq, &barr, 0);
418 running = 1;
419 }
420 spin_unlock_irq(&cwq->lock);
421
422 if (unlikely(running))
423 wait_for_completion(&barr.done);
424 }
425
426 /**
427 * flush_work - block until a work_struct's callback has terminated
428 * @wq: the workqueue on which the work is queued
429 * @work: the work which is to be flushed
430 *
431 * flush_work() will attempt to cancel the work if it is queued. If the work's
432 * callback appears to be running, flush_work() will block until it has
433 * completed.
434 *
435 * flush_work() is designed to be used when the caller is tearing down data
436 * structures which the callback function operates upon. It is expected that,
437 * prior to calling flush_work(), the caller has arranged for the work to not
438 * be requeued.
439 */
440 void flush_work(struct workqueue_struct *wq, struct work_struct *work)
441 {
442 struct cpu_workqueue_struct *cwq;
443
444 might_sleep();
445
446 cwq = get_wq_data(work);
447 /* Was it ever queued ? */
448 if (!cwq)
449 return;
450
451 /*
452 * This work can't be re-queued, no need to re-check that
453 * get_wq_data() is still the same when we take cwq->lock.
454 */
455 spin_lock_irq(&cwq->lock);
456 list_del_init(&work->entry);
457 work_release(work);
458 spin_unlock_irq(&cwq->lock);
459
460 if (is_single_threaded(wq))
461 wait_on_work(per_cpu_ptr(wq->cpu_wq, singlethread_cpu), work);
462 else {
463 int cpu;
464
465 for_each_cpu_mask(cpu, cpu_populated_map)
466 wait_on_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
467 }
468 }
469 EXPORT_SYMBOL_GPL(flush_work);
470
471
472 static struct workqueue_struct *keventd_wq;
473
474 /**
475 * schedule_work - put work task in global workqueue
476 * @work: job to be done
477 *
478 * This puts a job in the kernel-global workqueue.
479 */
480 int fastcall schedule_work(struct work_struct *work)
481 {
482 return queue_work(keventd_wq, work);
483 }
484 EXPORT_SYMBOL(schedule_work);
485
486 /**
487 * schedule_delayed_work - put work task in global workqueue after delay
488 * @dwork: job to be done
489 * @delay: number of jiffies to wait or 0 for immediate execution
490 *
491 * After waiting for a given time this puts a job in the kernel-global
492 * workqueue.
493 */
494 int fastcall schedule_delayed_work(struct delayed_work *dwork,
495 unsigned long delay)
496 {
497 timer_stats_timer_set_start_info(&dwork->timer);
498 return queue_delayed_work(keventd_wq, dwork, delay);
499 }
500 EXPORT_SYMBOL(schedule_delayed_work);
501
502 /**
503 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
504 * @cpu: cpu to use
505 * @dwork: job to be done
506 * @delay: number of jiffies to wait
507 *
508 * After waiting for a given time this puts a job in the kernel-global
509 * workqueue on the specified CPU.
510 */
511 int schedule_delayed_work_on(int cpu,
512 struct delayed_work *dwork, unsigned long delay)
513 {
514 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
515 }
516 EXPORT_SYMBOL(schedule_delayed_work_on);
517
518 /**
519 * schedule_on_each_cpu - call a function on each online CPU from keventd
520 * @func: the function to call
521 *
522 * Returns zero on success.
523 * Returns -ve errno on failure.
524 *
525 * Appears to be racy against CPU hotplug.
526 *
527 * schedule_on_each_cpu() is very slow.
528 */
529 int schedule_on_each_cpu(work_func_t func)
530 {
531 int cpu;
532 struct work_struct *works;
533
534 works = alloc_percpu(struct work_struct);
535 if (!works)
536 return -ENOMEM;
537
538 preempt_disable(); /* CPU hotplug */
539 for_each_online_cpu(cpu) {
540 struct work_struct *work = per_cpu_ptr(works, cpu);
541
542 INIT_WORK(work, func);
543 set_bit(WORK_STRUCT_PENDING, work_data_bits(work));
544 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
545 }
546 preempt_enable();
547 flush_workqueue(keventd_wq);
548 free_percpu(works);
549 return 0;
550 }
551
552 void flush_scheduled_work(void)
553 {
554 flush_workqueue(keventd_wq);
555 }
556 EXPORT_SYMBOL(flush_scheduled_work);
557
558 void flush_work_keventd(struct work_struct *work)
559 {
560 flush_work(keventd_wq, work);
561 }
562 EXPORT_SYMBOL(flush_work_keventd);
563
564 /**
565 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
566 * @wq: the controlling workqueue structure
567 * @dwork: the delayed work struct
568 */
569 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
570 struct delayed_work *dwork)
571 {
572 while (!cancel_delayed_work(dwork))
573 flush_workqueue(wq);
574 }
575 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
576
577 /**
578 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
579 * @dwork: the delayed work struct
580 */
581 void cancel_rearming_delayed_work(struct delayed_work *dwork)
582 {
583 cancel_rearming_delayed_workqueue(keventd_wq, dwork);
584 }
585 EXPORT_SYMBOL(cancel_rearming_delayed_work);
586
587 /**
588 * execute_in_process_context - reliably execute the routine with user context
589 * @fn: the function to execute
590 * @ew: guaranteed storage for the execute work structure (must
591 * be available when the work executes)
592 *
593 * Executes the function immediately if process context is available,
594 * otherwise schedules the function for delayed execution.
595 *
596 * Returns: 0 - function was executed
597 * 1 - function was scheduled for execution
598 */
599 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
600 {
601 if (!in_interrupt()) {
602 fn(&ew->work);
603 return 0;
604 }
605
606 INIT_WORK(&ew->work, fn);
607 schedule_work(&ew->work);
608
609 return 1;
610 }
611 EXPORT_SYMBOL_GPL(execute_in_process_context);
612
613 int keventd_up(void)
614 {
615 return keventd_wq != NULL;
616 }
617
618 int current_is_keventd(void)
619 {
620 struct cpu_workqueue_struct *cwq;
621 int cpu = smp_processor_id(); /* preempt-safe: keventd is per-cpu */
622 int ret = 0;
623
624 BUG_ON(!keventd_wq);
625
626 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
627 if (current == cwq->thread)
628 ret = 1;
629
630 return ret;
631
632 }
633
634 static struct cpu_workqueue_struct *
635 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
636 {
637 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
638
639 cwq->wq = wq;
640 spin_lock_init(&cwq->lock);
641 INIT_LIST_HEAD(&cwq->worklist);
642 init_waitqueue_head(&cwq->more_work);
643
644 return cwq;
645 }
646
647 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
648 {
649 struct workqueue_struct *wq = cwq->wq;
650 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
651 struct task_struct *p;
652
653 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
654 /*
655 * Nobody can add the work_struct to this cwq,
656 * if (caller is __create_workqueue)
657 * nobody should see this wq
658 * else // caller is CPU_UP_PREPARE
659 * cpu is not on cpu_online_map
660 * so we can abort safely.
661 */
662 if (IS_ERR(p))
663 return PTR_ERR(p);
664
665 cwq->thread = p;
666 cwq->should_stop = 0;
667 if (!is_single_threaded(wq))
668 kthread_bind(p, cpu);
669
670 if (is_single_threaded(wq) || cpu_online(cpu))
671 wake_up_process(p);
672
673 return 0;
674 }
675
676 struct workqueue_struct *__create_workqueue(const char *name,
677 int singlethread, int freezeable)
678 {
679 struct workqueue_struct *wq;
680 struct cpu_workqueue_struct *cwq;
681 int err = 0, cpu;
682
683 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
684 if (!wq)
685 return NULL;
686
687 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
688 if (!wq->cpu_wq) {
689 kfree(wq);
690 return NULL;
691 }
692
693 wq->name = name;
694 wq->freezeable = freezeable;
695
696 if (singlethread) {
697 INIT_LIST_HEAD(&wq->list);
698 cwq = init_cpu_workqueue(wq, singlethread_cpu);
699 err = create_workqueue_thread(cwq, singlethread_cpu);
700 } else {
701 mutex_lock(&workqueue_mutex);
702 list_add(&wq->list, &workqueues);
703
704 for_each_possible_cpu(cpu) {
705 cwq = init_cpu_workqueue(wq, cpu);
706 if (err || !cpu_online(cpu))
707 continue;
708 err = create_workqueue_thread(cwq, cpu);
709 }
710 mutex_unlock(&workqueue_mutex);
711 }
712
713 if (err) {
714 destroy_workqueue(wq);
715 wq = NULL;
716 }
717 return wq;
718 }
719 EXPORT_SYMBOL_GPL(__create_workqueue);
720
721 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
722 {
723 struct wq_barrier barr;
724 int alive = 0;
725
726 spin_lock_irq(&cwq->lock);
727 if (cwq->thread != NULL) {
728 insert_wq_barrier(cwq, &barr, 1);
729 cwq->should_stop = 1;
730 alive = 1;
731 }
732 spin_unlock_irq(&cwq->lock);
733
734 if (alive) {
735 wait_for_completion(&barr.done);
736
737 while (unlikely(cwq->thread != NULL))
738 cpu_relax();
739 /*
740 * Wait until cwq->thread unlocks cwq->lock,
741 * it won't touch *cwq after that.
742 */
743 smp_rmb();
744 spin_unlock_wait(&cwq->lock);
745 }
746 }
747
748 /**
749 * destroy_workqueue - safely terminate a workqueue
750 * @wq: target workqueue
751 *
752 * Safely destroy a workqueue. All work currently pending will be done first.
753 */
754 void destroy_workqueue(struct workqueue_struct *wq)
755 {
756 struct cpu_workqueue_struct *cwq;
757
758 if (is_single_threaded(wq)) {
759 cwq = per_cpu_ptr(wq->cpu_wq, singlethread_cpu);
760 cleanup_workqueue_thread(cwq, singlethread_cpu);
761 } else {
762 int cpu;
763
764 mutex_lock(&workqueue_mutex);
765 list_del(&wq->list);
766 mutex_unlock(&workqueue_mutex);
767
768 for_each_cpu_mask(cpu, cpu_populated_map) {
769 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
770 cleanup_workqueue_thread(cwq, cpu);
771 }
772 }
773
774 free_percpu(wq->cpu_wq);
775 kfree(wq);
776 }
777 EXPORT_SYMBOL_GPL(destroy_workqueue);
778
779 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
780 unsigned long action,
781 void *hcpu)
782 {
783 unsigned int cpu = (unsigned long)hcpu;
784 struct cpu_workqueue_struct *cwq;
785 struct workqueue_struct *wq;
786
787 switch (action) {
788 case CPU_LOCK_ACQUIRE:
789 mutex_lock(&workqueue_mutex);
790 return NOTIFY_OK;
791
792 case CPU_LOCK_RELEASE:
793 mutex_unlock(&workqueue_mutex);
794 return NOTIFY_OK;
795
796 case CPU_UP_PREPARE:
797 cpu_set(cpu, cpu_populated_map);
798 }
799
800 list_for_each_entry(wq, &workqueues, list) {
801 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
802
803 switch (action) {
804 case CPU_UP_PREPARE:
805 if (!create_workqueue_thread(cwq, cpu))
806 break;
807 printk(KERN_ERR "workqueue for %i failed\n", cpu);
808 return NOTIFY_BAD;
809
810 case CPU_ONLINE:
811 wake_up_process(cwq->thread);
812 break;
813
814 case CPU_UP_CANCELED:
815 if (cwq->thread)
816 wake_up_process(cwq->thread);
817 case CPU_DEAD:
818 cleanup_workqueue_thread(cwq, cpu);
819 break;
820 }
821 }
822
823 return NOTIFY_OK;
824 }
825
826 void init_workqueues(void)
827 {
828 cpu_populated_map = cpu_online_map;
829 singlethread_cpu = first_cpu(cpu_possible_map);
830 hotcpu_notifier(workqueue_cpu_callback, 0);
831 keventd_wq = create_workqueue("events");
832 BUG_ON(!keventd_wq);
833 }
This page took 0.047766 seconds and 5 git commands to generate.