workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback()
[deliverable/linux.git] / kernel / workqueue.c
1 /*
2 * kernel/workqueue.c - generic async execution with shared worker pool
3 *
4 * Copyright (C) 2002 Ingo Molnar
5 *
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
8 * Andrew Morton
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
11 *
12 * Made to use alloc_percpu by Christoph Lameter.
13 *
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
16 *
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
22 *
23 * Please read Documentation/workqueue.txt for details.
24 */
25
26 #include <linux/export.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/completion.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
36 #include <linux/kthread.h>
37 #include <linux/hardirq.h>
38 #include <linux/mempolicy.h>
39 #include <linux/freezer.h>
40 #include <linux/kallsyms.h>
41 #include <linux/debug_locks.h>
42 #include <linux/lockdep.h>
43 #include <linux/idr.h>
44
45 #include "workqueue_sched.h"
46
47 enum {
48 /*
49 * global_cwq flags
50 *
51 * A bound gcwq is either associated or disassociated with its CPU.
52 * While associated (!DISASSOCIATED), all workers are bound to the
53 * CPU and none has %WORKER_UNBOUND set and concurrency management
54 * is in effect.
55 *
56 * While DISASSOCIATED, the cpu may be offline and all workers have
57 * %WORKER_UNBOUND set and concurrency management disabled, and may
58 * be executing on any CPU. The gcwq behaves as an unbound one.
59 *
60 * Note that DISASSOCIATED can be flipped only while holding
61 * assoc_mutex of all pools on the gcwq to avoid changing binding
62 * state while create_worker() is in progress.
63 */
64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
65 GCWQ_FREEZING = 1 << 1, /* freeze in progress */
66
67 /* pool flags */
68 POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
69 POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */
70
71 /* worker flags */
72 WORKER_STARTED = 1 << 0, /* started */
73 WORKER_DIE = 1 << 1, /* die die die */
74 WORKER_IDLE = 1 << 2, /* is idle */
75 WORKER_PREP = 1 << 3, /* preparing to run works */
76 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
77 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
78
79 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
80 WORKER_CPU_INTENSIVE,
81
82 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
83
84 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
85 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
86 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
87
88 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
89 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
90
91 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
92 /* call for help after 10ms
93 (min two ticks) */
94 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
95 CREATE_COOLDOWN = HZ, /* time to breath after fail */
96
97 /*
98 * Rescue workers are used only on emergencies and shared by
99 * all cpus. Give -20.
100 */
101 RESCUER_NICE_LEVEL = -20,
102 HIGHPRI_NICE_LEVEL = -20,
103 };
104
105 /*
106 * Structure fields follow one of the following exclusion rules.
107 *
108 * I: Modifiable by initialization/destruction paths and read-only for
109 * everyone else.
110 *
111 * P: Preemption protected. Disabling preemption is enough and should
112 * only be modified and accessed from the local cpu.
113 *
114 * L: gcwq->lock protected. Access with gcwq->lock held.
115 *
116 * X: During normal operation, modification requires gcwq->lock and
117 * should be done only from local cpu. Either disabling preemption
118 * on local cpu or grabbing gcwq->lock is enough for read access.
119 * If GCWQ_DISASSOCIATED is set, it's identical to L.
120 *
121 * F: wq->flush_mutex protected.
122 *
123 * W: workqueue_lock protected.
124 */
125
126 struct global_cwq;
127 struct worker_pool;
128
129 /*
130 * The poor guys doing the actual heavy lifting. All on-duty workers
131 * are either serving the manager role, on idle list or on busy hash.
132 */
133 struct worker {
134 /* on idle list while idle, on busy hash table while busy */
135 union {
136 struct list_head entry; /* L: while idle */
137 struct hlist_node hentry; /* L: while busy */
138 };
139
140 struct work_struct *current_work; /* L: work being processed */
141 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
142 struct list_head scheduled; /* L: scheduled works */
143 struct task_struct *task; /* I: worker task */
144 struct worker_pool *pool; /* I: the associated pool */
145 /* 64 bytes boundary on 64bit, 32 on 32bit */
146 unsigned long last_active; /* L: last active timestamp */
147 unsigned int flags; /* X: flags */
148 int id; /* I: worker id */
149
150 /* for rebinding worker to CPU */
151 struct work_struct rebind_work; /* L: for busy worker */
152 };
153
154 struct worker_pool {
155 struct global_cwq *gcwq; /* I: the owning gcwq */
156 unsigned int flags; /* X: flags */
157
158 struct list_head worklist; /* L: list of pending works */
159 int nr_workers; /* L: total number of workers */
160
161 /* nr_idle includes the ones off idle_list for rebinding */
162 int nr_idle; /* L: currently idle ones */
163
164 struct list_head idle_list; /* X: list of idle workers */
165 struct timer_list idle_timer; /* L: worker idle timeout */
166 struct timer_list mayday_timer; /* L: SOS timer for workers */
167
168 struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */
169 struct ida worker_ida; /* L: for worker IDs */
170 };
171
172 /*
173 * Global per-cpu workqueue. There's one and only one for each cpu
174 * and all works are queued and processed here regardless of their
175 * target workqueues.
176 */
177 struct global_cwq {
178 spinlock_t lock; /* the gcwq lock */
179 unsigned int cpu; /* I: the associated cpu */
180 unsigned int flags; /* L: GCWQ_* flags */
181
182 /* workers are chained either in busy_hash or pool idle_list */
183 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
184 /* L: hash of busy workers */
185
186 struct worker_pool pools[NR_WORKER_POOLS];
187 /* normal and highpri pools */
188 } ____cacheline_aligned_in_smp;
189
190 /*
191 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
192 * work_struct->data are used for flags and thus cwqs need to be
193 * aligned at two's power of the number of flag bits.
194 */
195 struct cpu_workqueue_struct {
196 struct worker_pool *pool; /* I: the associated pool */
197 struct workqueue_struct *wq; /* I: the owning workqueue */
198 int work_color; /* L: current color */
199 int flush_color; /* L: flushing color */
200 int nr_in_flight[WORK_NR_COLORS];
201 /* L: nr of in_flight works */
202 int nr_active; /* L: nr of active works */
203 int max_active; /* L: max active works */
204 struct list_head delayed_works; /* L: delayed works */
205 };
206
207 /*
208 * Structure used to wait for workqueue flush.
209 */
210 struct wq_flusher {
211 struct list_head list; /* F: list of flushers */
212 int flush_color; /* F: flush color waiting for */
213 struct completion done; /* flush completion */
214 };
215
216 /*
217 * All cpumasks are assumed to be always set on UP and thus can't be
218 * used to determine whether there's something to be done.
219 */
220 #ifdef CONFIG_SMP
221 typedef cpumask_var_t mayday_mask_t;
222 #define mayday_test_and_set_cpu(cpu, mask) \
223 cpumask_test_and_set_cpu((cpu), (mask))
224 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
225 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
226 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
227 #define free_mayday_mask(mask) free_cpumask_var((mask))
228 #else
229 typedef unsigned long mayday_mask_t;
230 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
231 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
232 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
233 #define alloc_mayday_mask(maskp, gfp) true
234 #define free_mayday_mask(mask) do { } while (0)
235 #endif
236
237 /*
238 * The externally visible workqueue abstraction is an array of
239 * per-CPU workqueues:
240 */
241 struct workqueue_struct {
242 unsigned int flags; /* W: WQ_* flags */
243 union {
244 struct cpu_workqueue_struct __percpu *pcpu;
245 struct cpu_workqueue_struct *single;
246 unsigned long v;
247 } cpu_wq; /* I: cwq's */
248 struct list_head list; /* W: list of all workqueues */
249
250 struct mutex flush_mutex; /* protects wq flushing */
251 int work_color; /* F: current work color */
252 int flush_color; /* F: current flush color */
253 atomic_t nr_cwqs_to_flush; /* flush in progress */
254 struct wq_flusher *first_flusher; /* F: first flusher */
255 struct list_head flusher_queue; /* F: flush waiters */
256 struct list_head flusher_overflow; /* F: flush overflow list */
257
258 mayday_mask_t mayday_mask; /* cpus requesting rescue */
259 struct worker *rescuer; /* I: rescue worker */
260
261 int nr_drainers; /* W: drain in progress */
262 int saved_max_active; /* W: saved cwq max_active */
263 #ifdef CONFIG_LOCKDEP
264 struct lockdep_map lockdep_map;
265 #endif
266 char name[]; /* I: workqueue name */
267 };
268
269 struct workqueue_struct *system_wq __read_mostly;
270 EXPORT_SYMBOL_GPL(system_wq);
271 struct workqueue_struct *system_highpri_wq __read_mostly;
272 EXPORT_SYMBOL_GPL(system_highpri_wq);
273 struct workqueue_struct *system_long_wq __read_mostly;
274 EXPORT_SYMBOL_GPL(system_long_wq);
275 struct workqueue_struct *system_unbound_wq __read_mostly;
276 EXPORT_SYMBOL_GPL(system_unbound_wq);
277 struct workqueue_struct *system_freezable_wq __read_mostly;
278 EXPORT_SYMBOL_GPL(system_freezable_wq);
279
280 #define CREATE_TRACE_POINTS
281 #include <trace/events/workqueue.h>
282
283 #define for_each_worker_pool(pool, gcwq) \
284 for ((pool) = &(gcwq)->pools[0]; \
285 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
286
287 #define for_each_busy_worker(worker, i, pos, gcwq) \
288 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
289 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
290
291 static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
292 unsigned int sw)
293 {
294 if (cpu < nr_cpu_ids) {
295 if (sw & 1) {
296 cpu = cpumask_next(cpu, mask);
297 if (cpu < nr_cpu_ids)
298 return cpu;
299 }
300 if (sw & 2)
301 return WORK_CPU_UNBOUND;
302 }
303 return WORK_CPU_NONE;
304 }
305
306 static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
307 struct workqueue_struct *wq)
308 {
309 return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
310 }
311
312 /*
313 * CPU iterators
314 *
315 * An extra gcwq is defined for an invalid cpu number
316 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
317 * specific CPU. The following iterators are similar to
318 * for_each_*_cpu() iterators but also considers the unbound gcwq.
319 *
320 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
321 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
322 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
323 * WORK_CPU_UNBOUND for unbound workqueues
324 */
325 #define for_each_gcwq_cpu(cpu) \
326 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
327 (cpu) < WORK_CPU_NONE; \
328 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
329
330 #define for_each_online_gcwq_cpu(cpu) \
331 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
332 (cpu) < WORK_CPU_NONE; \
333 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
334
335 #define for_each_cwq_cpu(cpu, wq) \
336 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
337 (cpu) < WORK_CPU_NONE; \
338 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
339
340 #ifdef CONFIG_DEBUG_OBJECTS_WORK
341
342 static struct debug_obj_descr work_debug_descr;
343
344 static void *work_debug_hint(void *addr)
345 {
346 return ((struct work_struct *) addr)->func;
347 }
348
349 /*
350 * fixup_init is called when:
351 * - an active object is initialized
352 */
353 static int work_fixup_init(void *addr, enum debug_obj_state state)
354 {
355 struct work_struct *work = addr;
356
357 switch (state) {
358 case ODEBUG_STATE_ACTIVE:
359 cancel_work_sync(work);
360 debug_object_init(work, &work_debug_descr);
361 return 1;
362 default:
363 return 0;
364 }
365 }
366
367 /*
368 * fixup_activate is called when:
369 * - an active object is activated
370 * - an unknown object is activated (might be a statically initialized object)
371 */
372 static int work_fixup_activate(void *addr, enum debug_obj_state state)
373 {
374 struct work_struct *work = addr;
375
376 switch (state) {
377
378 case ODEBUG_STATE_NOTAVAILABLE:
379 /*
380 * This is not really a fixup. The work struct was
381 * statically initialized. We just make sure that it
382 * is tracked in the object tracker.
383 */
384 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
385 debug_object_init(work, &work_debug_descr);
386 debug_object_activate(work, &work_debug_descr);
387 return 0;
388 }
389 WARN_ON_ONCE(1);
390 return 0;
391
392 case ODEBUG_STATE_ACTIVE:
393 WARN_ON(1);
394
395 default:
396 return 0;
397 }
398 }
399
400 /*
401 * fixup_free is called when:
402 * - an active object is freed
403 */
404 static int work_fixup_free(void *addr, enum debug_obj_state state)
405 {
406 struct work_struct *work = addr;
407
408 switch (state) {
409 case ODEBUG_STATE_ACTIVE:
410 cancel_work_sync(work);
411 debug_object_free(work, &work_debug_descr);
412 return 1;
413 default:
414 return 0;
415 }
416 }
417
418 static struct debug_obj_descr work_debug_descr = {
419 .name = "work_struct",
420 .debug_hint = work_debug_hint,
421 .fixup_init = work_fixup_init,
422 .fixup_activate = work_fixup_activate,
423 .fixup_free = work_fixup_free,
424 };
425
426 static inline void debug_work_activate(struct work_struct *work)
427 {
428 debug_object_activate(work, &work_debug_descr);
429 }
430
431 static inline void debug_work_deactivate(struct work_struct *work)
432 {
433 debug_object_deactivate(work, &work_debug_descr);
434 }
435
436 void __init_work(struct work_struct *work, int onstack)
437 {
438 if (onstack)
439 debug_object_init_on_stack(work, &work_debug_descr);
440 else
441 debug_object_init(work, &work_debug_descr);
442 }
443 EXPORT_SYMBOL_GPL(__init_work);
444
445 void destroy_work_on_stack(struct work_struct *work)
446 {
447 debug_object_free(work, &work_debug_descr);
448 }
449 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
450
451 #else
452 static inline void debug_work_activate(struct work_struct *work) { }
453 static inline void debug_work_deactivate(struct work_struct *work) { }
454 #endif
455
456 /* Serializes the accesses to the list of workqueues. */
457 static DEFINE_SPINLOCK(workqueue_lock);
458 static LIST_HEAD(workqueues);
459 static bool workqueue_freezing; /* W: have wqs started freezing? */
460
461 /*
462 * The almighty global cpu workqueues. nr_running is the only field
463 * which is expected to be used frequently by other cpus via
464 * try_to_wake_up(). Put it in a separate cacheline.
465 */
466 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
467 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
468
469 /*
470 * Global cpu workqueue and nr_running counter for unbound gcwq. The
471 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
472 * workers have WORKER_UNBOUND set.
473 */
474 static struct global_cwq unbound_global_cwq;
475 static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
476 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
477 };
478
479 static int worker_thread(void *__worker);
480
481 static int worker_pool_pri(struct worker_pool *pool)
482 {
483 return pool - pool->gcwq->pools;
484 }
485
486 static struct global_cwq *get_gcwq(unsigned int cpu)
487 {
488 if (cpu != WORK_CPU_UNBOUND)
489 return &per_cpu(global_cwq, cpu);
490 else
491 return &unbound_global_cwq;
492 }
493
494 static atomic_t *get_pool_nr_running(struct worker_pool *pool)
495 {
496 int cpu = pool->gcwq->cpu;
497 int idx = worker_pool_pri(pool);
498
499 if (cpu != WORK_CPU_UNBOUND)
500 return &per_cpu(pool_nr_running, cpu)[idx];
501 else
502 return &unbound_pool_nr_running[idx];
503 }
504
505 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
506 struct workqueue_struct *wq)
507 {
508 if (!(wq->flags & WQ_UNBOUND)) {
509 if (likely(cpu < nr_cpu_ids))
510 return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
511 } else if (likely(cpu == WORK_CPU_UNBOUND))
512 return wq->cpu_wq.single;
513 return NULL;
514 }
515
516 static unsigned int work_color_to_flags(int color)
517 {
518 return color << WORK_STRUCT_COLOR_SHIFT;
519 }
520
521 static int get_work_color(struct work_struct *work)
522 {
523 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
524 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
525 }
526
527 static int work_next_color(int color)
528 {
529 return (color + 1) % WORK_NR_COLORS;
530 }
531
532 /*
533 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
534 * contain the pointer to the queued cwq. Once execution starts, the flag
535 * is cleared and the high bits contain OFFQ flags and CPU number.
536 *
537 * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
538 * and clear_work_data() can be used to set the cwq, cpu or clear
539 * work->data. These functions should only be called while the work is
540 * owned - ie. while the PENDING bit is set.
541 *
542 * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
543 * a work. gcwq is available once the work has been queued anywhere after
544 * initialization until it is sync canceled. cwq is available only while
545 * the work item is queued.
546 *
547 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
548 * canceled. While being canceled, a work item may have its PENDING set
549 * but stay off timer and worklist for arbitrarily long and nobody should
550 * try to steal the PENDING bit.
551 */
552 static inline void set_work_data(struct work_struct *work, unsigned long data,
553 unsigned long flags)
554 {
555 BUG_ON(!work_pending(work));
556 atomic_long_set(&work->data, data | flags | work_static(work));
557 }
558
559 static void set_work_cwq(struct work_struct *work,
560 struct cpu_workqueue_struct *cwq,
561 unsigned long extra_flags)
562 {
563 set_work_data(work, (unsigned long)cwq,
564 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
565 }
566
567 static void set_work_cpu_and_clear_pending(struct work_struct *work,
568 unsigned int cpu)
569 {
570 /*
571 * The following wmb is paired with the implied mb in
572 * test_and_set_bit(PENDING) and ensures all updates to @work made
573 * here are visible to and precede any updates by the next PENDING
574 * owner.
575 */
576 smp_wmb();
577 set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
578 }
579
580 static void clear_work_data(struct work_struct *work)
581 {
582 smp_wmb(); /* see set_work_cpu_and_clear_pending() */
583 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
584 }
585
586 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
587 {
588 unsigned long data = atomic_long_read(&work->data);
589
590 if (data & WORK_STRUCT_CWQ)
591 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
592 else
593 return NULL;
594 }
595
596 static struct global_cwq *get_work_gcwq(struct work_struct *work)
597 {
598 unsigned long data = atomic_long_read(&work->data);
599 unsigned int cpu;
600
601 if (data & WORK_STRUCT_CWQ)
602 return ((struct cpu_workqueue_struct *)
603 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
604
605 cpu = data >> WORK_OFFQ_CPU_SHIFT;
606 if (cpu == WORK_CPU_NONE)
607 return NULL;
608
609 BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
610 return get_gcwq(cpu);
611 }
612
613 static void mark_work_canceling(struct work_struct *work)
614 {
615 struct global_cwq *gcwq = get_work_gcwq(work);
616 unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
617
618 set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
619 WORK_STRUCT_PENDING);
620 }
621
622 static bool work_is_canceling(struct work_struct *work)
623 {
624 unsigned long data = atomic_long_read(&work->data);
625
626 return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
627 }
628
629 /*
630 * Policy functions. These define the policies on how the global worker
631 * pools are managed. Unless noted otherwise, these functions assume that
632 * they're being called with gcwq->lock held.
633 */
634
635 static bool __need_more_worker(struct worker_pool *pool)
636 {
637 return !atomic_read(get_pool_nr_running(pool));
638 }
639
640 /*
641 * Need to wake up a worker? Called from anything but currently
642 * running workers.
643 *
644 * Note that, because unbound workers never contribute to nr_running, this
645 * function will always return %true for unbound gcwq as long as the
646 * worklist isn't empty.
647 */
648 static bool need_more_worker(struct worker_pool *pool)
649 {
650 return !list_empty(&pool->worklist) && __need_more_worker(pool);
651 }
652
653 /* Can I start working? Called from busy but !running workers. */
654 static bool may_start_working(struct worker_pool *pool)
655 {
656 return pool->nr_idle;
657 }
658
659 /* Do I need to keep working? Called from currently running workers. */
660 static bool keep_working(struct worker_pool *pool)
661 {
662 atomic_t *nr_running = get_pool_nr_running(pool);
663
664 return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
665 }
666
667 /* Do we need a new worker? Called from manager. */
668 static bool need_to_create_worker(struct worker_pool *pool)
669 {
670 return need_more_worker(pool) && !may_start_working(pool);
671 }
672
673 /* Do I need to be the manager? */
674 static bool need_to_manage_workers(struct worker_pool *pool)
675 {
676 return need_to_create_worker(pool) ||
677 (pool->flags & POOL_MANAGE_WORKERS);
678 }
679
680 /* Do we have too many workers and should some go away? */
681 static bool too_many_workers(struct worker_pool *pool)
682 {
683 bool managing = pool->flags & POOL_MANAGING_WORKERS;
684 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
685 int nr_busy = pool->nr_workers - nr_idle;
686
687 /*
688 * nr_idle and idle_list may disagree if idle rebinding is in
689 * progress. Never return %true if idle_list is empty.
690 */
691 if (list_empty(&pool->idle_list))
692 return false;
693
694 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
695 }
696
697 /*
698 * Wake up functions.
699 */
700
701 /* Return the first worker. Safe with preemption disabled */
702 static struct worker *first_worker(struct worker_pool *pool)
703 {
704 if (unlikely(list_empty(&pool->idle_list)))
705 return NULL;
706
707 return list_first_entry(&pool->idle_list, struct worker, entry);
708 }
709
710 /**
711 * wake_up_worker - wake up an idle worker
712 * @pool: worker pool to wake worker from
713 *
714 * Wake up the first idle worker of @pool.
715 *
716 * CONTEXT:
717 * spin_lock_irq(gcwq->lock).
718 */
719 static void wake_up_worker(struct worker_pool *pool)
720 {
721 struct worker *worker = first_worker(pool);
722
723 if (likely(worker))
724 wake_up_process(worker->task);
725 }
726
727 /**
728 * wq_worker_waking_up - a worker is waking up
729 * @task: task waking up
730 * @cpu: CPU @task is waking up to
731 *
732 * This function is called during try_to_wake_up() when a worker is
733 * being awoken.
734 *
735 * CONTEXT:
736 * spin_lock_irq(rq->lock)
737 */
738 void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
739 {
740 struct worker *worker = kthread_data(task);
741
742 if (!(worker->flags & WORKER_NOT_RUNNING))
743 atomic_inc(get_pool_nr_running(worker->pool));
744 }
745
746 /**
747 * wq_worker_sleeping - a worker is going to sleep
748 * @task: task going to sleep
749 * @cpu: CPU in question, must be the current CPU number
750 *
751 * This function is called during schedule() when a busy worker is
752 * going to sleep. Worker on the same cpu can be woken up by
753 * returning pointer to its task.
754 *
755 * CONTEXT:
756 * spin_lock_irq(rq->lock)
757 *
758 * RETURNS:
759 * Worker task on @cpu to wake up, %NULL if none.
760 */
761 struct task_struct *wq_worker_sleeping(struct task_struct *task,
762 unsigned int cpu)
763 {
764 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
765 struct worker_pool *pool = worker->pool;
766 atomic_t *nr_running = get_pool_nr_running(pool);
767
768 if (worker->flags & WORKER_NOT_RUNNING)
769 return NULL;
770
771 /* this can only happen on the local cpu */
772 BUG_ON(cpu != raw_smp_processor_id());
773
774 /*
775 * The counterpart of the following dec_and_test, implied mb,
776 * worklist not empty test sequence is in insert_work().
777 * Please read comment there.
778 *
779 * NOT_RUNNING is clear. This means that we're bound to and
780 * running on the local cpu w/ rq lock held and preemption
781 * disabled, which in turn means that none else could be
782 * manipulating idle_list, so dereferencing idle_list without gcwq
783 * lock is safe.
784 */
785 if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
786 to_wakeup = first_worker(pool);
787 return to_wakeup ? to_wakeup->task : NULL;
788 }
789
790 /**
791 * worker_set_flags - set worker flags and adjust nr_running accordingly
792 * @worker: self
793 * @flags: flags to set
794 * @wakeup: wakeup an idle worker if necessary
795 *
796 * Set @flags in @worker->flags and adjust nr_running accordingly. If
797 * nr_running becomes zero and @wakeup is %true, an idle worker is
798 * woken up.
799 *
800 * CONTEXT:
801 * spin_lock_irq(gcwq->lock)
802 */
803 static inline void worker_set_flags(struct worker *worker, unsigned int flags,
804 bool wakeup)
805 {
806 struct worker_pool *pool = worker->pool;
807
808 WARN_ON_ONCE(worker->task != current);
809
810 /*
811 * If transitioning into NOT_RUNNING, adjust nr_running and
812 * wake up an idle worker as necessary if requested by
813 * @wakeup.
814 */
815 if ((flags & WORKER_NOT_RUNNING) &&
816 !(worker->flags & WORKER_NOT_RUNNING)) {
817 atomic_t *nr_running = get_pool_nr_running(pool);
818
819 if (wakeup) {
820 if (atomic_dec_and_test(nr_running) &&
821 !list_empty(&pool->worklist))
822 wake_up_worker(pool);
823 } else
824 atomic_dec(nr_running);
825 }
826
827 worker->flags |= flags;
828 }
829
830 /**
831 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
832 * @worker: self
833 * @flags: flags to clear
834 *
835 * Clear @flags in @worker->flags and adjust nr_running accordingly.
836 *
837 * CONTEXT:
838 * spin_lock_irq(gcwq->lock)
839 */
840 static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
841 {
842 struct worker_pool *pool = worker->pool;
843 unsigned int oflags = worker->flags;
844
845 WARN_ON_ONCE(worker->task != current);
846
847 worker->flags &= ~flags;
848
849 /*
850 * If transitioning out of NOT_RUNNING, increment nr_running. Note
851 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
852 * of multiple flags, not a single flag.
853 */
854 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
855 if (!(worker->flags & WORKER_NOT_RUNNING))
856 atomic_inc(get_pool_nr_running(pool));
857 }
858
859 /**
860 * busy_worker_head - return the busy hash head for a work
861 * @gcwq: gcwq of interest
862 * @work: work to be hashed
863 *
864 * Return hash head of @gcwq for @work.
865 *
866 * CONTEXT:
867 * spin_lock_irq(gcwq->lock).
868 *
869 * RETURNS:
870 * Pointer to the hash head.
871 */
872 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
873 struct work_struct *work)
874 {
875 const int base_shift = ilog2(sizeof(struct work_struct));
876 unsigned long v = (unsigned long)work;
877
878 /* simple shift and fold hash, do we need something better? */
879 v >>= base_shift;
880 v += v >> BUSY_WORKER_HASH_ORDER;
881 v &= BUSY_WORKER_HASH_MASK;
882
883 return &gcwq->busy_hash[v];
884 }
885
886 /**
887 * __find_worker_executing_work - find worker which is executing a work
888 * @gcwq: gcwq of interest
889 * @bwh: hash head as returned by busy_worker_head()
890 * @work: work to find worker for
891 *
892 * Find a worker which is executing @work on @gcwq. @bwh should be
893 * the hash head obtained by calling busy_worker_head() with the same
894 * work.
895 *
896 * CONTEXT:
897 * spin_lock_irq(gcwq->lock).
898 *
899 * RETURNS:
900 * Pointer to worker which is executing @work if found, NULL
901 * otherwise.
902 */
903 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
904 struct hlist_head *bwh,
905 struct work_struct *work)
906 {
907 struct worker *worker;
908 struct hlist_node *tmp;
909
910 hlist_for_each_entry(worker, tmp, bwh, hentry)
911 if (worker->current_work == work)
912 return worker;
913 return NULL;
914 }
915
916 /**
917 * find_worker_executing_work - find worker which is executing a work
918 * @gcwq: gcwq of interest
919 * @work: work to find worker for
920 *
921 * Find a worker which is executing @work on @gcwq. This function is
922 * identical to __find_worker_executing_work() except that this
923 * function calculates @bwh itself.
924 *
925 * CONTEXT:
926 * spin_lock_irq(gcwq->lock).
927 *
928 * RETURNS:
929 * Pointer to worker which is executing @work if found, NULL
930 * otherwise.
931 */
932 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
933 struct work_struct *work)
934 {
935 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
936 work);
937 }
938
939 /**
940 * move_linked_works - move linked works to a list
941 * @work: start of series of works to be scheduled
942 * @head: target list to append @work to
943 * @nextp: out paramter for nested worklist walking
944 *
945 * Schedule linked works starting from @work to @head. Work series to
946 * be scheduled starts at @work and includes any consecutive work with
947 * WORK_STRUCT_LINKED set in its predecessor.
948 *
949 * If @nextp is not NULL, it's updated to point to the next work of
950 * the last scheduled work. This allows move_linked_works() to be
951 * nested inside outer list_for_each_entry_safe().
952 *
953 * CONTEXT:
954 * spin_lock_irq(gcwq->lock).
955 */
956 static void move_linked_works(struct work_struct *work, struct list_head *head,
957 struct work_struct **nextp)
958 {
959 struct work_struct *n;
960
961 /*
962 * Linked worklist will always end before the end of the list,
963 * use NULL for list head.
964 */
965 list_for_each_entry_safe_from(work, n, NULL, entry) {
966 list_move_tail(&work->entry, head);
967 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
968 break;
969 }
970
971 /*
972 * If we're already inside safe list traversal and have moved
973 * multiple works to the scheduled queue, the next position
974 * needs to be updated.
975 */
976 if (nextp)
977 *nextp = n;
978 }
979
980 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
981 {
982 struct work_struct *work = list_first_entry(&cwq->delayed_works,
983 struct work_struct, entry);
984
985 trace_workqueue_activate_work(work);
986 move_linked_works(work, &cwq->pool->worklist, NULL);
987 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
988 cwq->nr_active++;
989 }
990
991 /**
992 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
993 * @cwq: cwq of interest
994 * @color: color of work which left the queue
995 * @delayed: for a delayed work
996 *
997 * A work either has completed or is removed from pending queue,
998 * decrement nr_in_flight of its cwq and handle workqueue flushing.
999 *
1000 * CONTEXT:
1001 * spin_lock_irq(gcwq->lock).
1002 */
1003 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1004 bool delayed)
1005 {
1006 /* ignore uncolored works */
1007 if (color == WORK_NO_COLOR)
1008 return;
1009
1010 cwq->nr_in_flight[color]--;
1011
1012 if (!delayed) {
1013 cwq->nr_active--;
1014 if (!list_empty(&cwq->delayed_works)) {
1015 /* one down, submit a delayed one */
1016 if (cwq->nr_active < cwq->max_active)
1017 cwq_activate_first_delayed(cwq);
1018 }
1019 }
1020
1021 /* is flush in progress and are we at the flushing tip? */
1022 if (likely(cwq->flush_color != color))
1023 return;
1024
1025 /* are there still in-flight works? */
1026 if (cwq->nr_in_flight[color])
1027 return;
1028
1029 /* this cwq is done, clear flush_color */
1030 cwq->flush_color = -1;
1031
1032 /*
1033 * If this was the last cwq, wake up the first flusher. It
1034 * will handle the rest.
1035 */
1036 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1037 complete(&cwq->wq->first_flusher->done);
1038 }
1039
1040 /**
1041 * try_to_grab_pending - steal work item from worklist and disable irq
1042 * @work: work item to steal
1043 * @is_dwork: @work is a delayed_work
1044 * @flags: place to store irq state
1045 *
1046 * Try to grab PENDING bit of @work. This function can handle @work in any
1047 * stable state - idle, on timer or on worklist. Return values are
1048 *
1049 * 1 if @work was pending and we successfully stole PENDING
1050 * 0 if @work was idle and we claimed PENDING
1051 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1052 * -ENOENT if someone else is canceling @work, this state may persist
1053 * for arbitrarily long
1054 *
1055 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1056 * interrupted while holding PENDING and @work off queue, irq must be
1057 * disabled on entry. This, combined with delayed_work->timer being
1058 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1059 *
1060 * On successful return, >= 0, irq is disabled and the caller is
1061 * responsible for releasing it using local_irq_restore(*@flags).
1062 *
1063 * This function is safe to call from any context including IRQ handler.
1064 */
1065 static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1066 unsigned long *flags)
1067 {
1068 struct global_cwq *gcwq;
1069
1070 WARN_ON_ONCE(in_irq());
1071
1072 local_irq_save(*flags);
1073
1074 /* try to steal the timer if it exists */
1075 if (is_dwork) {
1076 struct delayed_work *dwork = to_delayed_work(work);
1077
1078 /*
1079 * dwork->timer is irqsafe. If del_timer() fails, it's
1080 * guaranteed that the timer is not queued anywhere and not
1081 * running on the local CPU.
1082 */
1083 if (likely(del_timer(&dwork->timer)))
1084 return 1;
1085 }
1086
1087 /* try to claim PENDING the normal way */
1088 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1089 return 0;
1090
1091 /*
1092 * The queueing is in progress, or it is already queued. Try to
1093 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1094 */
1095 gcwq = get_work_gcwq(work);
1096 if (!gcwq)
1097 goto fail;
1098
1099 spin_lock(&gcwq->lock);
1100 if (!list_empty(&work->entry)) {
1101 /*
1102 * This work is queued, but perhaps we locked the wrong gcwq.
1103 * In that case we must see the new value after rmb(), see
1104 * insert_work()->wmb().
1105 */
1106 smp_rmb();
1107 if (gcwq == get_work_gcwq(work)) {
1108 debug_work_deactivate(work);
1109 list_del_init(&work->entry);
1110 cwq_dec_nr_in_flight(get_work_cwq(work),
1111 get_work_color(work),
1112 *work_data_bits(work) & WORK_STRUCT_DELAYED);
1113
1114 spin_unlock(&gcwq->lock);
1115 return 1;
1116 }
1117 }
1118 spin_unlock(&gcwq->lock);
1119 fail:
1120 local_irq_restore(*flags);
1121 if (work_is_canceling(work))
1122 return -ENOENT;
1123 cpu_relax();
1124 return -EAGAIN;
1125 }
1126
1127 /**
1128 * insert_work - insert a work into gcwq
1129 * @cwq: cwq @work belongs to
1130 * @work: work to insert
1131 * @head: insertion point
1132 * @extra_flags: extra WORK_STRUCT_* flags to set
1133 *
1134 * Insert @work which belongs to @cwq into @gcwq after @head.
1135 * @extra_flags is or'd to work_struct flags.
1136 *
1137 * CONTEXT:
1138 * spin_lock_irq(gcwq->lock).
1139 */
1140 static void insert_work(struct cpu_workqueue_struct *cwq,
1141 struct work_struct *work, struct list_head *head,
1142 unsigned int extra_flags)
1143 {
1144 struct worker_pool *pool = cwq->pool;
1145
1146 /* we own @work, set data and link */
1147 set_work_cwq(work, cwq, extra_flags);
1148
1149 /*
1150 * Ensure that we get the right work->data if we see the
1151 * result of list_add() below, see try_to_grab_pending().
1152 */
1153 smp_wmb();
1154
1155 list_add_tail(&work->entry, head);
1156
1157 /*
1158 * Ensure either worker_sched_deactivated() sees the above
1159 * list_add_tail() or we see zero nr_running to avoid workers
1160 * lying around lazily while there are works to be processed.
1161 */
1162 smp_mb();
1163
1164 if (__need_more_worker(pool))
1165 wake_up_worker(pool);
1166 }
1167
1168 /*
1169 * Test whether @work is being queued from another work executing on the
1170 * same workqueue. This is rather expensive and should only be used from
1171 * cold paths.
1172 */
1173 static bool is_chained_work(struct workqueue_struct *wq)
1174 {
1175 unsigned long flags;
1176 unsigned int cpu;
1177
1178 for_each_gcwq_cpu(cpu) {
1179 struct global_cwq *gcwq = get_gcwq(cpu);
1180 struct worker *worker;
1181 struct hlist_node *pos;
1182 int i;
1183
1184 spin_lock_irqsave(&gcwq->lock, flags);
1185 for_each_busy_worker(worker, i, pos, gcwq) {
1186 if (worker->task != current)
1187 continue;
1188 spin_unlock_irqrestore(&gcwq->lock, flags);
1189 /*
1190 * I'm @worker, no locking necessary. See if @work
1191 * is headed to the same workqueue.
1192 */
1193 return worker->current_cwq->wq == wq;
1194 }
1195 spin_unlock_irqrestore(&gcwq->lock, flags);
1196 }
1197 return false;
1198 }
1199
1200 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1201 struct work_struct *work)
1202 {
1203 struct global_cwq *gcwq;
1204 struct cpu_workqueue_struct *cwq;
1205 struct list_head *worklist;
1206 unsigned int work_flags;
1207 unsigned int req_cpu = cpu;
1208
1209 /*
1210 * While a work item is PENDING && off queue, a task trying to
1211 * steal the PENDING will busy-loop waiting for it to either get
1212 * queued or lose PENDING. Grabbing PENDING and queueing should
1213 * happen with IRQ disabled.
1214 */
1215 WARN_ON_ONCE(!irqs_disabled());
1216
1217 debug_work_activate(work);
1218
1219 /* if dying, only works from the same workqueue are allowed */
1220 if (unlikely(wq->flags & WQ_DRAINING) &&
1221 WARN_ON_ONCE(!is_chained_work(wq)))
1222 return;
1223
1224 /* determine gcwq to use */
1225 if (!(wq->flags & WQ_UNBOUND)) {
1226 struct global_cwq *last_gcwq;
1227
1228 if (cpu == WORK_CPU_UNBOUND)
1229 cpu = raw_smp_processor_id();
1230
1231 /*
1232 * It's multi cpu. If @work was previously on a different
1233 * cpu, it might still be running there, in which case the
1234 * work needs to be queued on that cpu to guarantee
1235 * non-reentrancy.
1236 */
1237 gcwq = get_gcwq(cpu);
1238 last_gcwq = get_work_gcwq(work);
1239
1240 if (last_gcwq && last_gcwq != gcwq) {
1241 struct worker *worker;
1242
1243 spin_lock(&last_gcwq->lock);
1244
1245 worker = find_worker_executing_work(last_gcwq, work);
1246
1247 if (worker && worker->current_cwq->wq == wq)
1248 gcwq = last_gcwq;
1249 else {
1250 /* meh... not running there, queue here */
1251 spin_unlock(&last_gcwq->lock);
1252 spin_lock(&gcwq->lock);
1253 }
1254 } else {
1255 spin_lock(&gcwq->lock);
1256 }
1257 } else {
1258 gcwq = get_gcwq(WORK_CPU_UNBOUND);
1259 spin_lock(&gcwq->lock);
1260 }
1261
1262 /* gcwq determined, get cwq and queue */
1263 cwq = get_cwq(gcwq->cpu, wq);
1264 trace_workqueue_queue_work(req_cpu, cwq, work);
1265
1266 if (WARN_ON(!list_empty(&work->entry))) {
1267 spin_unlock(&gcwq->lock);
1268 return;
1269 }
1270
1271 cwq->nr_in_flight[cwq->work_color]++;
1272 work_flags = work_color_to_flags(cwq->work_color);
1273
1274 if (likely(cwq->nr_active < cwq->max_active)) {
1275 trace_workqueue_activate_work(work);
1276 cwq->nr_active++;
1277 worklist = &cwq->pool->worklist;
1278 } else {
1279 work_flags |= WORK_STRUCT_DELAYED;
1280 worklist = &cwq->delayed_works;
1281 }
1282
1283 insert_work(cwq, work, worklist, work_flags);
1284
1285 spin_unlock(&gcwq->lock);
1286 }
1287
1288 /**
1289 * queue_work_on - queue work on specific cpu
1290 * @cpu: CPU number to execute work on
1291 * @wq: workqueue to use
1292 * @work: work to queue
1293 *
1294 * Returns %false if @work was already on a queue, %true otherwise.
1295 *
1296 * We queue the work to a specific CPU, the caller must ensure it
1297 * can't go away.
1298 */
1299 bool queue_work_on(int cpu, struct workqueue_struct *wq,
1300 struct work_struct *work)
1301 {
1302 bool ret = false;
1303 unsigned long flags;
1304
1305 local_irq_save(flags);
1306
1307 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1308 __queue_work(cpu, wq, work);
1309 ret = true;
1310 }
1311
1312 local_irq_restore(flags);
1313 return ret;
1314 }
1315 EXPORT_SYMBOL_GPL(queue_work_on);
1316
1317 /**
1318 * queue_work - queue work on a workqueue
1319 * @wq: workqueue to use
1320 * @work: work to queue
1321 *
1322 * Returns %false if @work was already on a queue, %true otherwise.
1323 *
1324 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1325 * it can be processed by another CPU.
1326 */
1327 bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
1328 {
1329 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
1330 }
1331 EXPORT_SYMBOL_GPL(queue_work);
1332
1333 void delayed_work_timer_fn(unsigned long __data)
1334 {
1335 struct delayed_work *dwork = (struct delayed_work *)__data;
1336 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1337
1338 /* should have been called from irqsafe timer with irq already off */
1339 __queue_work(dwork->cpu, cwq->wq, &dwork->work);
1340 }
1341 EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
1342
1343 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1344 struct delayed_work *dwork, unsigned long delay)
1345 {
1346 struct timer_list *timer = &dwork->timer;
1347 struct work_struct *work = &dwork->work;
1348 unsigned int lcpu;
1349
1350 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1351 timer->data != (unsigned long)dwork);
1352 BUG_ON(timer_pending(timer));
1353 BUG_ON(!list_empty(&work->entry));
1354
1355 timer_stats_timer_set_start_info(&dwork->timer);
1356
1357 /*
1358 * This stores cwq for the moment, for the timer_fn. Note that the
1359 * work's gcwq is preserved to allow reentrance detection for
1360 * delayed works.
1361 */
1362 if (!(wq->flags & WQ_UNBOUND)) {
1363 struct global_cwq *gcwq = get_work_gcwq(work);
1364
1365 /*
1366 * If we cannot get the last gcwq from @work directly,
1367 * select the last CPU such that it avoids unnecessarily
1368 * triggering non-reentrancy check in __queue_work().
1369 */
1370 lcpu = cpu;
1371 if (gcwq)
1372 lcpu = gcwq->cpu;
1373 if (lcpu == WORK_CPU_UNBOUND)
1374 lcpu = raw_smp_processor_id();
1375 } else {
1376 lcpu = WORK_CPU_UNBOUND;
1377 }
1378
1379 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1380
1381 dwork->cpu = cpu;
1382 timer->expires = jiffies + delay;
1383
1384 if (unlikely(cpu != WORK_CPU_UNBOUND))
1385 add_timer_on(timer, cpu);
1386 else
1387 add_timer(timer);
1388 }
1389
1390 /**
1391 * queue_delayed_work_on - queue work on specific CPU after delay
1392 * @cpu: CPU number to execute work on
1393 * @wq: workqueue to use
1394 * @dwork: work to queue
1395 * @delay: number of jiffies to wait before queueing
1396 *
1397 * Returns %false if @work was already on a queue, %true otherwise. If
1398 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1399 * execution.
1400 */
1401 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1402 struct delayed_work *dwork, unsigned long delay)
1403 {
1404 struct work_struct *work = &dwork->work;
1405 bool ret = false;
1406 unsigned long flags;
1407
1408 if (!delay)
1409 return queue_work_on(cpu, wq, &dwork->work);
1410
1411 /* read the comment in __queue_work() */
1412 local_irq_save(flags);
1413
1414 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1415 __queue_delayed_work(cpu, wq, dwork, delay);
1416 ret = true;
1417 }
1418
1419 local_irq_restore(flags);
1420 return ret;
1421 }
1422 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1423
1424 /**
1425 * queue_delayed_work - queue work on a workqueue after delay
1426 * @wq: workqueue to use
1427 * @dwork: delayable work to queue
1428 * @delay: number of jiffies to wait before queueing
1429 *
1430 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
1431 */
1432 bool queue_delayed_work(struct workqueue_struct *wq,
1433 struct delayed_work *dwork, unsigned long delay)
1434 {
1435 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1436 }
1437 EXPORT_SYMBOL_GPL(queue_delayed_work);
1438
1439 /**
1440 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1441 * @cpu: CPU number to execute work on
1442 * @wq: workqueue to use
1443 * @dwork: work to queue
1444 * @delay: number of jiffies to wait before queueing
1445 *
1446 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1447 * modify @dwork's timer so that it expires after @delay. If @delay is
1448 * zero, @work is guaranteed to be scheduled immediately regardless of its
1449 * current state.
1450 *
1451 * Returns %false if @dwork was idle and queued, %true if @dwork was
1452 * pending and its timer was modified.
1453 *
1454 * This function is safe to call from any context including IRQ handler.
1455 * See try_to_grab_pending() for details.
1456 */
1457 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1458 struct delayed_work *dwork, unsigned long delay)
1459 {
1460 unsigned long flags;
1461 int ret;
1462
1463 do {
1464 ret = try_to_grab_pending(&dwork->work, true, &flags);
1465 } while (unlikely(ret == -EAGAIN));
1466
1467 if (likely(ret >= 0)) {
1468 __queue_delayed_work(cpu, wq, dwork, delay);
1469 local_irq_restore(flags);
1470 }
1471
1472 /* -ENOENT from try_to_grab_pending() becomes %true */
1473 return ret;
1474 }
1475 EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1476
1477 /**
1478 * mod_delayed_work - modify delay of or queue a delayed work
1479 * @wq: workqueue to use
1480 * @dwork: work to queue
1481 * @delay: number of jiffies to wait before queueing
1482 *
1483 * mod_delayed_work_on() on local CPU.
1484 */
1485 bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
1486 unsigned long delay)
1487 {
1488 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1489 }
1490 EXPORT_SYMBOL_GPL(mod_delayed_work);
1491
1492 /**
1493 * worker_enter_idle - enter idle state
1494 * @worker: worker which is entering idle state
1495 *
1496 * @worker is entering idle state. Update stats and idle timer if
1497 * necessary.
1498 *
1499 * LOCKING:
1500 * spin_lock_irq(gcwq->lock).
1501 */
1502 static void worker_enter_idle(struct worker *worker)
1503 {
1504 struct worker_pool *pool = worker->pool;
1505 struct global_cwq *gcwq = pool->gcwq;
1506
1507 BUG_ON(worker->flags & WORKER_IDLE);
1508 BUG_ON(!list_empty(&worker->entry) &&
1509 (worker->hentry.next || worker->hentry.pprev));
1510
1511 /* can't use worker_set_flags(), also called from start_worker() */
1512 worker->flags |= WORKER_IDLE;
1513 pool->nr_idle++;
1514 worker->last_active = jiffies;
1515
1516 /* idle_list is LIFO */
1517 list_add(&worker->entry, &pool->idle_list);
1518
1519 if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1520 mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1521
1522 /*
1523 * Sanity check nr_running. Because gcwq_unbind_fn() releases
1524 * gcwq->lock between setting %WORKER_UNBOUND and zapping
1525 * nr_running, the warning may trigger spuriously. Check iff
1526 * unbind is not in progress.
1527 */
1528 WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
1529 pool->nr_workers == pool->nr_idle &&
1530 atomic_read(get_pool_nr_running(pool)));
1531 }
1532
1533 /**
1534 * worker_leave_idle - leave idle state
1535 * @worker: worker which is leaving idle state
1536 *
1537 * @worker is leaving idle state. Update stats.
1538 *
1539 * LOCKING:
1540 * spin_lock_irq(gcwq->lock).
1541 */
1542 static void worker_leave_idle(struct worker *worker)
1543 {
1544 struct worker_pool *pool = worker->pool;
1545
1546 BUG_ON(!(worker->flags & WORKER_IDLE));
1547 worker_clr_flags(worker, WORKER_IDLE);
1548 pool->nr_idle--;
1549 list_del_init(&worker->entry);
1550 }
1551
1552 /**
1553 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1554 * @worker: self
1555 *
1556 * Works which are scheduled while the cpu is online must at least be
1557 * scheduled to a worker which is bound to the cpu so that if they are
1558 * flushed from cpu callbacks while cpu is going down, they are
1559 * guaranteed to execute on the cpu.
1560 *
1561 * This function is to be used by rogue workers and rescuers to bind
1562 * themselves to the target cpu and may race with cpu going down or
1563 * coming online. kthread_bind() can't be used because it may put the
1564 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1565 * verbatim as it's best effort and blocking and gcwq may be
1566 * [dis]associated in the meantime.
1567 *
1568 * This function tries set_cpus_allowed() and locks gcwq and verifies the
1569 * binding against %GCWQ_DISASSOCIATED which is set during
1570 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
1571 * enters idle state or fetches works without dropping lock, it can
1572 * guarantee the scheduling requirement described in the first paragraph.
1573 *
1574 * CONTEXT:
1575 * Might sleep. Called without any lock but returns with gcwq->lock
1576 * held.
1577 *
1578 * RETURNS:
1579 * %true if the associated gcwq is online (@worker is successfully
1580 * bound), %false if offline.
1581 */
1582 static bool worker_maybe_bind_and_lock(struct worker *worker)
1583 __acquires(&gcwq->lock)
1584 {
1585 struct global_cwq *gcwq = worker->pool->gcwq;
1586 struct task_struct *task = worker->task;
1587
1588 while (true) {
1589 /*
1590 * The following call may fail, succeed or succeed
1591 * without actually migrating the task to the cpu if
1592 * it races with cpu hotunplug operation. Verify
1593 * against GCWQ_DISASSOCIATED.
1594 */
1595 if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1596 set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1597
1598 spin_lock_irq(&gcwq->lock);
1599 if (gcwq->flags & GCWQ_DISASSOCIATED)
1600 return false;
1601 if (task_cpu(task) == gcwq->cpu &&
1602 cpumask_equal(&current->cpus_allowed,
1603 get_cpu_mask(gcwq->cpu)))
1604 return true;
1605 spin_unlock_irq(&gcwq->lock);
1606
1607 /*
1608 * We've raced with CPU hot[un]plug. Give it a breather
1609 * and retry migration. cond_resched() is required here;
1610 * otherwise, we might deadlock against cpu_stop trying to
1611 * bring down the CPU on non-preemptive kernel.
1612 */
1613 cpu_relax();
1614 cond_resched();
1615 }
1616 }
1617
1618 /*
1619 * Rebind an idle @worker to its CPU. worker_thread() will test
1620 * list_empty(@worker->entry) before leaving idle and call this function.
1621 */
1622 static void idle_worker_rebind(struct worker *worker)
1623 {
1624 struct global_cwq *gcwq = worker->pool->gcwq;
1625
1626 /* CPU may go down again inbetween, clear UNBOUND only on success */
1627 if (worker_maybe_bind_and_lock(worker))
1628 worker_clr_flags(worker, WORKER_UNBOUND);
1629
1630 /* rebind complete, become available again */
1631 list_add(&worker->entry, &worker->pool->idle_list);
1632 spin_unlock_irq(&gcwq->lock);
1633 }
1634
1635 /*
1636 * Function for @worker->rebind.work used to rebind unbound busy workers to
1637 * the associated cpu which is coming back online. This is scheduled by
1638 * cpu up but can race with other cpu hotplug operations and may be
1639 * executed twice without intervening cpu down.
1640 */
1641 static void busy_worker_rebind_fn(struct work_struct *work)
1642 {
1643 struct worker *worker = container_of(work, struct worker, rebind_work);
1644 struct global_cwq *gcwq = worker->pool->gcwq;
1645
1646 if (worker_maybe_bind_and_lock(worker))
1647 worker_clr_flags(worker, WORKER_UNBOUND);
1648
1649 spin_unlock_irq(&gcwq->lock);
1650 }
1651
1652 /**
1653 * rebind_workers - rebind all workers of a gcwq to the associated CPU
1654 * @gcwq: gcwq of interest
1655 *
1656 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding
1657 * is different for idle and busy ones.
1658 *
1659 * Idle ones will be removed from the idle_list and woken up. They will
1660 * add themselves back after completing rebind. This ensures that the
1661 * idle_list doesn't contain any unbound workers when re-bound busy workers
1662 * try to perform local wake-ups for concurrency management.
1663 *
1664 * Busy workers can rebind after they finish their current work items.
1665 * Queueing the rebind work item at the head of the scheduled list is
1666 * enough. Note that nr_running will be properly bumped as busy workers
1667 * rebind.
1668 *
1669 * On return, all non-manager workers are scheduled for rebind - see
1670 * manage_workers() for the manager special case. Any idle worker
1671 * including the manager will not appear on @idle_list until rebind is
1672 * complete, making local wake-ups safe.
1673 */
1674 static void rebind_workers(struct global_cwq *gcwq)
1675 {
1676 struct worker_pool *pool;
1677 struct worker *worker, *n;
1678 struct hlist_node *pos;
1679 int i;
1680
1681 lockdep_assert_held(&gcwq->lock);
1682
1683 for_each_worker_pool(pool, gcwq)
1684 lockdep_assert_held(&pool->assoc_mutex);
1685
1686 /* dequeue and kick idle ones */
1687 for_each_worker_pool(pool, gcwq) {
1688 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
1689 /*
1690 * idle workers should be off @pool->idle_list
1691 * until rebind is complete to avoid receiving
1692 * premature local wake-ups.
1693 */
1694 list_del_init(&worker->entry);
1695
1696 /*
1697 * worker_thread() will see the above dequeuing
1698 * and call idle_worker_rebind().
1699 */
1700 wake_up_process(worker->task);
1701 }
1702 }
1703
1704 /* rebind busy workers */
1705 for_each_busy_worker(worker, i, pos, gcwq) {
1706 struct work_struct *rebind_work = &worker->rebind_work;
1707 struct workqueue_struct *wq;
1708
1709 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1710 work_data_bits(rebind_work)))
1711 continue;
1712
1713 debug_work_activate(rebind_work);
1714
1715 /*
1716 * wq doesn't really matter but let's keep @worker->pool
1717 * and @cwq->pool consistent for sanity.
1718 */
1719 if (worker_pool_pri(worker->pool))
1720 wq = system_highpri_wq;
1721 else
1722 wq = system_wq;
1723
1724 insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
1725 worker->scheduled.next,
1726 work_color_to_flags(WORK_NO_COLOR));
1727 }
1728 }
1729
1730 static struct worker *alloc_worker(void)
1731 {
1732 struct worker *worker;
1733
1734 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1735 if (worker) {
1736 INIT_LIST_HEAD(&worker->entry);
1737 INIT_LIST_HEAD(&worker->scheduled);
1738 INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
1739 /* on creation a worker is in !idle && prep state */
1740 worker->flags = WORKER_PREP;
1741 }
1742 return worker;
1743 }
1744
1745 /**
1746 * create_worker - create a new workqueue worker
1747 * @pool: pool the new worker will belong to
1748 *
1749 * Create a new worker which is bound to @pool. The returned worker
1750 * can be started by calling start_worker() or destroyed using
1751 * destroy_worker().
1752 *
1753 * CONTEXT:
1754 * Might sleep. Does GFP_KERNEL allocations.
1755 *
1756 * RETURNS:
1757 * Pointer to the newly created worker.
1758 */
1759 static struct worker *create_worker(struct worker_pool *pool)
1760 {
1761 struct global_cwq *gcwq = pool->gcwq;
1762 const char *pri = worker_pool_pri(pool) ? "H" : "";
1763 struct worker *worker = NULL;
1764 int id = -1;
1765
1766 spin_lock_irq(&gcwq->lock);
1767 while (ida_get_new(&pool->worker_ida, &id)) {
1768 spin_unlock_irq(&gcwq->lock);
1769 if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
1770 goto fail;
1771 spin_lock_irq(&gcwq->lock);
1772 }
1773 spin_unlock_irq(&gcwq->lock);
1774
1775 worker = alloc_worker();
1776 if (!worker)
1777 goto fail;
1778
1779 worker->pool = pool;
1780 worker->id = id;
1781
1782 if (gcwq->cpu != WORK_CPU_UNBOUND)
1783 worker->task = kthread_create_on_node(worker_thread,
1784 worker, cpu_to_node(gcwq->cpu),
1785 "kworker/%u:%d%s", gcwq->cpu, id, pri);
1786 else
1787 worker->task = kthread_create(worker_thread, worker,
1788 "kworker/u:%d%s", id, pri);
1789 if (IS_ERR(worker->task))
1790 goto fail;
1791
1792 if (worker_pool_pri(pool))
1793 set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
1794
1795 /*
1796 * Determine CPU binding of the new worker depending on
1797 * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the
1798 * flag remains stable across this function. See the comments
1799 * above the flag definition for details.
1800 *
1801 * As an unbound worker may later become a regular one if CPU comes
1802 * online, make sure every worker has %PF_THREAD_BOUND set.
1803 */
1804 if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
1805 kthread_bind(worker->task, gcwq->cpu);
1806 } else {
1807 worker->task->flags |= PF_THREAD_BOUND;
1808 worker->flags |= WORKER_UNBOUND;
1809 }
1810
1811 return worker;
1812 fail:
1813 if (id >= 0) {
1814 spin_lock_irq(&gcwq->lock);
1815 ida_remove(&pool->worker_ida, id);
1816 spin_unlock_irq(&gcwq->lock);
1817 }
1818 kfree(worker);
1819 return NULL;
1820 }
1821
1822 /**
1823 * start_worker - start a newly created worker
1824 * @worker: worker to start
1825 *
1826 * Make the gcwq aware of @worker and start it.
1827 *
1828 * CONTEXT:
1829 * spin_lock_irq(gcwq->lock).
1830 */
1831 static void start_worker(struct worker *worker)
1832 {
1833 worker->flags |= WORKER_STARTED;
1834 worker->pool->nr_workers++;
1835 worker_enter_idle(worker);
1836 wake_up_process(worker->task);
1837 }
1838
1839 /**
1840 * destroy_worker - destroy a workqueue worker
1841 * @worker: worker to be destroyed
1842 *
1843 * Destroy @worker and adjust @gcwq stats accordingly.
1844 *
1845 * CONTEXT:
1846 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1847 */
1848 static void destroy_worker(struct worker *worker)
1849 {
1850 struct worker_pool *pool = worker->pool;
1851 struct global_cwq *gcwq = pool->gcwq;
1852 int id = worker->id;
1853
1854 /* sanity check frenzy */
1855 BUG_ON(worker->current_work);
1856 BUG_ON(!list_empty(&worker->scheduled));
1857
1858 if (worker->flags & WORKER_STARTED)
1859 pool->nr_workers--;
1860 if (worker->flags & WORKER_IDLE)
1861 pool->nr_idle--;
1862
1863 list_del_init(&worker->entry);
1864 worker->flags |= WORKER_DIE;
1865
1866 spin_unlock_irq(&gcwq->lock);
1867
1868 kthread_stop(worker->task);
1869 kfree(worker);
1870
1871 spin_lock_irq(&gcwq->lock);
1872 ida_remove(&pool->worker_ida, id);
1873 }
1874
1875 static void idle_worker_timeout(unsigned long __pool)
1876 {
1877 struct worker_pool *pool = (void *)__pool;
1878 struct global_cwq *gcwq = pool->gcwq;
1879
1880 spin_lock_irq(&gcwq->lock);
1881
1882 if (too_many_workers(pool)) {
1883 struct worker *worker;
1884 unsigned long expires;
1885
1886 /* idle_list is kept in LIFO order, check the last one */
1887 worker = list_entry(pool->idle_list.prev, struct worker, entry);
1888 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1889
1890 if (time_before(jiffies, expires))
1891 mod_timer(&pool->idle_timer, expires);
1892 else {
1893 /* it's been idle for too long, wake up manager */
1894 pool->flags |= POOL_MANAGE_WORKERS;
1895 wake_up_worker(pool);
1896 }
1897 }
1898
1899 spin_unlock_irq(&gcwq->lock);
1900 }
1901
1902 static bool send_mayday(struct work_struct *work)
1903 {
1904 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1905 struct workqueue_struct *wq = cwq->wq;
1906 unsigned int cpu;
1907
1908 if (!(wq->flags & WQ_RESCUER))
1909 return false;
1910
1911 /* mayday mayday mayday */
1912 cpu = cwq->pool->gcwq->cpu;
1913 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1914 if (cpu == WORK_CPU_UNBOUND)
1915 cpu = 0;
1916 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1917 wake_up_process(wq->rescuer->task);
1918 return true;
1919 }
1920
1921 static void gcwq_mayday_timeout(unsigned long __pool)
1922 {
1923 struct worker_pool *pool = (void *)__pool;
1924 struct global_cwq *gcwq = pool->gcwq;
1925 struct work_struct *work;
1926
1927 spin_lock_irq(&gcwq->lock);
1928
1929 if (need_to_create_worker(pool)) {
1930 /*
1931 * We've been trying to create a new worker but
1932 * haven't been successful. We might be hitting an
1933 * allocation deadlock. Send distress signals to
1934 * rescuers.
1935 */
1936 list_for_each_entry(work, &pool->worklist, entry)
1937 send_mayday(work);
1938 }
1939
1940 spin_unlock_irq(&gcwq->lock);
1941
1942 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1943 }
1944
1945 /**
1946 * maybe_create_worker - create a new worker if necessary
1947 * @pool: pool to create a new worker for
1948 *
1949 * Create a new worker for @pool if necessary. @pool is guaranteed to
1950 * have at least one idle worker on return from this function. If
1951 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1952 * sent to all rescuers with works scheduled on @pool to resolve
1953 * possible allocation deadlock.
1954 *
1955 * On return, need_to_create_worker() is guaranteed to be false and
1956 * may_start_working() true.
1957 *
1958 * LOCKING:
1959 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1960 * multiple times. Does GFP_KERNEL allocations. Called only from
1961 * manager.
1962 *
1963 * RETURNS:
1964 * false if no action was taken and gcwq->lock stayed locked, true
1965 * otherwise.
1966 */
1967 static bool maybe_create_worker(struct worker_pool *pool)
1968 __releases(&gcwq->lock)
1969 __acquires(&gcwq->lock)
1970 {
1971 struct global_cwq *gcwq = pool->gcwq;
1972
1973 if (!need_to_create_worker(pool))
1974 return false;
1975 restart:
1976 spin_unlock_irq(&gcwq->lock);
1977
1978 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1979 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1980
1981 while (true) {
1982 struct worker *worker;
1983
1984 worker = create_worker(pool);
1985 if (worker) {
1986 del_timer_sync(&pool->mayday_timer);
1987 spin_lock_irq(&gcwq->lock);
1988 start_worker(worker);
1989 BUG_ON(need_to_create_worker(pool));
1990 return true;
1991 }
1992
1993 if (!need_to_create_worker(pool))
1994 break;
1995
1996 __set_current_state(TASK_INTERRUPTIBLE);
1997 schedule_timeout(CREATE_COOLDOWN);
1998
1999 if (!need_to_create_worker(pool))
2000 break;
2001 }
2002
2003 del_timer_sync(&pool->mayday_timer);
2004 spin_lock_irq(&gcwq->lock);
2005 if (need_to_create_worker(pool))
2006 goto restart;
2007 return true;
2008 }
2009
2010 /**
2011 * maybe_destroy_worker - destroy workers which have been idle for a while
2012 * @pool: pool to destroy workers for
2013 *
2014 * Destroy @pool workers which have been idle for longer than
2015 * IDLE_WORKER_TIMEOUT.
2016 *
2017 * LOCKING:
2018 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2019 * multiple times. Called only from manager.
2020 *
2021 * RETURNS:
2022 * false if no action was taken and gcwq->lock stayed locked, true
2023 * otherwise.
2024 */
2025 static bool maybe_destroy_workers(struct worker_pool *pool)
2026 {
2027 bool ret = false;
2028
2029 while (too_many_workers(pool)) {
2030 struct worker *worker;
2031 unsigned long expires;
2032
2033 worker = list_entry(pool->idle_list.prev, struct worker, entry);
2034 expires = worker->last_active + IDLE_WORKER_TIMEOUT;
2035
2036 if (time_before(jiffies, expires)) {
2037 mod_timer(&pool->idle_timer, expires);
2038 break;
2039 }
2040
2041 destroy_worker(worker);
2042 ret = true;
2043 }
2044
2045 return ret;
2046 }
2047
2048 /**
2049 * manage_workers - manage worker pool
2050 * @worker: self
2051 *
2052 * Assume the manager role and manage gcwq worker pool @worker belongs
2053 * to. At any given time, there can be only zero or one manager per
2054 * gcwq. The exclusion is handled automatically by this function.
2055 *
2056 * The caller can safely start processing works on false return. On
2057 * true return, it's guaranteed that need_to_create_worker() is false
2058 * and may_start_working() is true.
2059 *
2060 * CONTEXT:
2061 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2062 * multiple times. Does GFP_KERNEL allocations.
2063 *
2064 * RETURNS:
2065 * false if no action was taken and gcwq->lock stayed locked, true if
2066 * some action was taken.
2067 */
2068 static bool manage_workers(struct worker *worker)
2069 {
2070 struct worker_pool *pool = worker->pool;
2071 bool ret = false;
2072
2073 if (pool->flags & POOL_MANAGING_WORKERS)
2074 return ret;
2075
2076 pool->flags |= POOL_MANAGING_WORKERS;
2077
2078 /*
2079 * To simplify both worker management and CPU hotplug, hold off
2080 * management while hotplug is in progress. CPU hotplug path can't
2081 * grab %POOL_MANAGING_WORKERS to achieve this because that can
2082 * lead to idle worker depletion (all become busy thinking someone
2083 * else is managing) which in turn can result in deadlock under
2084 * extreme circumstances. Use @pool->assoc_mutex to synchronize
2085 * manager against CPU hotplug.
2086 *
2087 * assoc_mutex would always be free unless CPU hotplug is in
2088 * progress. trylock first without dropping @gcwq->lock.
2089 */
2090 if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
2091 spin_unlock_irq(&pool->gcwq->lock);
2092 mutex_lock(&pool->assoc_mutex);
2093 /*
2094 * CPU hotplug could have happened while we were waiting
2095 * for assoc_mutex. Hotplug itself can't handle us
2096 * because manager isn't either on idle or busy list, and
2097 * @gcwq's state and ours could have deviated.
2098 *
2099 * As hotplug is now excluded via assoc_mutex, we can
2100 * simply try to bind. It will succeed or fail depending
2101 * on @gcwq's current state. Try it and adjust
2102 * %WORKER_UNBOUND accordingly.
2103 */
2104 if (worker_maybe_bind_and_lock(worker))
2105 worker->flags &= ~WORKER_UNBOUND;
2106 else
2107 worker->flags |= WORKER_UNBOUND;
2108
2109 ret = true;
2110 }
2111
2112 pool->flags &= ~POOL_MANAGE_WORKERS;
2113
2114 /*
2115 * Destroy and then create so that may_start_working() is true
2116 * on return.
2117 */
2118 ret |= maybe_destroy_workers(pool);
2119 ret |= maybe_create_worker(pool);
2120
2121 pool->flags &= ~POOL_MANAGING_WORKERS;
2122 mutex_unlock(&pool->assoc_mutex);
2123 return ret;
2124 }
2125
2126 /**
2127 * process_one_work - process single work
2128 * @worker: self
2129 * @work: work to process
2130 *
2131 * Process @work. This function contains all the logics necessary to
2132 * process a single work including synchronization against and
2133 * interaction with other workers on the same cpu, queueing and
2134 * flushing. As long as context requirement is met, any worker can
2135 * call this function to process a work.
2136 *
2137 * CONTEXT:
2138 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
2139 */
2140 static void process_one_work(struct worker *worker, struct work_struct *work)
2141 __releases(&gcwq->lock)
2142 __acquires(&gcwq->lock)
2143 {
2144 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
2145 struct worker_pool *pool = worker->pool;
2146 struct global_cwq *gcwq = pool->gcwq;
2147 struct hlist_head *bwh = busy_worker_head(gcwq, work);
2148 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
2149 work_func_t f = work->func;
2150 int work_color;
2151 struct worker *collision;
2152 #ifdef CONFIG_LOCKDEP
2153 /*
2154 * It is permissible to free the struct work_struct from
2155 * inside the function that is called from it, this we need to
2156 * take into account for lockdep too. To avoid bogus "held
2157 * lock freed" warnings as well as problems when looking into
2158 * work->lockdep_map, make a copy and use that here.
2159 */
2160 struct lockdep_map lockdep_map;
2161
2162 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2163 #endif
2164 /*
2165 * Ensure we're on the correct CPU. DISASSOCIATED test is
2166 * necessary to avoid spurious warnings from rescuers servicing the
2167 * unbound or a disassociated gcwq.
2168 */
2169 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2170 !(gcwq->flags & GCWQ_DISASSOCIATED) &&
2171 raw_smp_processor_id() != gcwq->cpu);
2172
2173 /*
2174 * A single work shouldn't be executed concurrently by
2175 * multiple workers on a single cpu. Check whether anyone is
2176 * already processing the work. If so, defer the work to the
2177 * currently executing one.
2178 */
2179 collision = __find_worker_executing_work(gcwq, bwh, work);
2180 if (unlikely(collision)) {
2181 move_linked_works(work, &collision->scheduled, NULL);
2182 return;
2183 }
2184
2185 /* claim and dequeue */
2186 debug_work_deactivate(work);
2187 hlist_add_head(&worker->hentry, bwh);
2188 worker->current_work = work;
2189 worker->current_cwq = cwq;
2190 work_color = get_work_color(work);
2191
2192 list_del_init(&work->entry);
2193
2194 /*
2195 * CPU intensive works don't participate in concurrency
2196 * management. They're the scheduler's responsibility.
2197 */
2198 if (unlikely(cpu_intensive))
2199 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
2200
2201 /*
2202 * Unbound gcwq isn't concurrency managed and work items should be
2203 * executed ASAP. Wake up another worker if necessary.
2204 */
2205 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
2206 wake_up_worker(pool);
2207
2208 /*
2209 * Record the last CPU and clear PENDING which should be the last
2210 * update to @work. Also, do this inside @gcwq->lock so that
2211 * PENDING and queued state changes happen together while IRQ is
2212 * disabled.
2213 */
2214 set_work_cpu_and_clear_pending(work, gcwq->cpu);
2215
2216 spin_unlock_irq(&gcwq->lock);
2217
2218 lock_map_acquire_read(&cwq->wq->lockdep_map);
2219 lock_map_acquire(&lockdep_map);
2220 trace_workqueue_execute_start(work);
2221 f(work);
2222 /*
2223 * While we must be careful to not use "work" after this, the trace
2224 * point will only record its address.
2225 */
2226 trace_workqueue_execute_end(work);
2227 lock_map_release(&lockdep_map);
2228 lock_map_release(&cwq->wq->lockdep_map);
2229
2230 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2231 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2232 " last function: %pf\n",
2233 current->comm, preempt_count(), task_pid_nr(current), f);
2234 debug_show_held_locks(current);
2235 dump_stack();
2236 }
2237
2238 spin_lock_irq(&gcwq->lock);
2239
2240 /* clear cpu intensive status */
2241 if (unlikely(cpu_intensive))
2242 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2243
2244 /* we're done with it, release */
2245 hlist_del_init(&worker->hentry);
2246 worker->current_work = NULL;
2247 worker->current_cwq = NULL;
2248 cwq_dec_nr_in_flight(cwq, work_color, false);
2249 }
2250
2251 /**
2252 * process_scheduled_works - process scheduled works
2253 * @worker: self
2254 *
2255 * Process all scheduled works. Please note that the scheduled list
2256 * may change while processing a work, so this function repeatedly
2257 * fetches a work from the top and executes it.
2258 *
2259 * CONTEXT:
2260 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2261 * multiple times.
2262 */
2263 static void process_scheduled_works(struct worker *worker)
2264 {
2265 while (!list_empty(&worker->scheduled)) {
2266 struct work_struct *work = list_first_entry(&worker->scheduled,
2267 struct work_struct, entry);
2268 process_one_work(worker, work);
2269 }
2270 }
2271
2272 /**
2273 * worker_thread - the worker thread function
2274 * @__worker: self
2275 *
2276 * The gcwq worker thread function. There's a single dynamic pool of
2277 * these per each cpu. These workers process all works regardless of
2278 * their specific target workqueue. The only exception is works which
2279 * belong to workqueues with a rescuer which will be explained in
2280 * rescuer_thread().
2281 */
2282 static int worker_thread(void *__worker)
2283 {
2284 struct worker *worker = __worker;
2285 struct worker_pool *pool = worker->pool;
2286 struct global_cwq *gcwq = pool->gcwq;
2287
2288 /* tell the scheduler that this is a workqueue worker */
2289 worker->task->flags |= PF_WQ_WORKER;
2290 woke_up:
2291 spin_lock_irq(&gcwq->lock);
2292
2293 /* we are off idle list if destruction or rebind is requested */
2294 if (unlikely(list_empty(&worker->entry))) {
2295 spin_unlock_irq(&gcwq->lock);
2296
2297 /* if DIE is set, destruction is requested */
2298 if (worker->flags & WORKER_DIE) {
2299 worker->task->flags &= ~PF_WQ_WORKER;
2300 return 0;
2301 }
2302
2303 /* otherwise, rebind */
2304 idle_worker_rebind(worker);
2305 goto woke_up;
2306 }
2307
2308 worker_leave_idle(worker);
2309 recheck:
2310 /* no more worker necessary? */
2311 if (!need_more_worker(pool))
2312 goto sleep;
2313
2314 /* do we need to manage? */
2315 if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2316 goto recheck;
2317
2318 /*
2319 * ->scheduled list can only be filled while a worker is
2320 * preparing to process a work or actually processing it.
2321 * Make sure nobody diddled with it while I was sleeping.
2322 */
2323 BUG_ON(!list_empty(&worker->scheduled));
2324
2325 /*
2326 * When control reaches this point, we're guaranteed to have
2327 * at least one idle worker or that someone else has already
2328 * assumed the manager role.
2329 */
2330 worker_clr_flags(worker, WORKER_PREP);
2331
2332 do {
2333 struct work_struct *work =
2334 list_first_entry(&pool->worklist,
2335 struct work_struct, entry);
2336
2337 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2338 /* optimization path, not strictly necessary */
2339 process_one_work(worker, work);
2340 if (unlikely(!list_empty(&worker->scheduled)))
2341 process_scheduled_works(worker);
2342 } else {
2343 move_linked_works(work, &worker->scheduled, NULL);
2344 process_scheduled_works(worker);
2345 }
2346 } while (keep_working(pool));
2347
2348 worker_set_flags(worker, WORKER_PREP, false);
2349 sleep:
2350 if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
2351 goto recheck;
2352
2353 /*
2354 * gcwq->lock is held and there's no work to process and no
2355 * need to manage, sleep. Workers are woken up only while
2356 * holding gcwq->lock or from local cpu, so setting the
2357 * current state before releasing gcwq->lock is enough to
2358 * prevent losing any event.
2359 */
2360 worker_enter_idle(worker);
2361 __set_current_state(TASK_INTERRUPTIBLE);
2362 spin_unlock_irq(&gcwq->lock);
2363 schedule();
2364 goto woke_up;
2365 }
2366
2367 /**
2368 * rescuer_thread - the rescuer thread function
2369 * @__wq: the associated workqueue
2370 *
2371 * Workqueue rescuer thread function. There's one rescuer for each
2372 * workqueue which has WQ_RESCUER set.
2373 *
2374 * Regular work processing on a gcwq may block trying to create a new
2375 * worker which uses GFP_KERNEL allocation which has slight chance of
2376 * developing into deadlock if some works currently on the same queue
2377 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2378 * the problem rescuer solves.
2379 *
2380 * When such condition is possible, the gcwq summons rescuers of all
2381 * workqueues which have works queued on the gcwq and let them process
2382 * those works so that forward progress can be guaranteed.
2383 *
2384 * This should happen rarely.
2385 */
2386 static int rescuer_thread(void *__wq)
2387 {
2388 struct workqueue_struct *wq = __wq;
2389 struct worker *rescuer = wq->rescuer;
2390 struct list_head *scheduled = &rescuer->scheduled;
2391 bool is_unbound = wq->flags & WQ_UNBOUND;
2392 unsigned int cpu;
2393
2394 set_user_nice(current, RESCUER_NICE_LEVEL);
2395 repeat:
2396 set_current_state(TASK_INTERRUPTIBLE);
2397
2398 if (kthread_should_stop())
2399 return 0;
2400
2401 /*
2402 * See whether any cpu is asking for help. Unbounded
2403 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2404 */
2405 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2406 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2407 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2408 struct worker_pool *pool = cwq->pool;
2409 struct global_cwq *gcwq = pool->gcwq;
2410 struct work_struct *work, *n;
2411
2412 __set_current_state(TASK_RUNNING);
2413 mayday_clear_cpu(cpu, wq->mayday_mask);
2414
2415 /* migrate to the target cpu if possible */
2416 rescuer->pool = pool;
2417 worker_maybe_bind_and_lock(rescuer);
2418
2419 /*
2420 * Slurp in all works issued via this workqueue and
2421 * process'em.
2422 */
2423 BUG_ON(!list_empty(&rescuer->scheduled));
2424 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2425 if (get_work_cwq(work) == cwq)
2426 move_linked_works(work, scheduled, &n);
2427
2428 process_scheduled_works(rescuer);
2429
2430 /*
2431 * Leave this gcwq. If keep_working() is %true, notify a
2432 * regular worker; otherwise, we end up with 0 concurrency
2433 * and stalling the execution.
2434 */
2435 if (keep_working(pool))
2436 wake_up_worker(pool);
2437
2438 spin_unlock_irq(&gcwq->lock);
2439 }
2440
2441 schedule();
2442 goto repeat;
2443 }
2444
2445 struct wq_barrier {
2446 struct work_struct work;
2447 struct completion done;
2448 };
2449
2450 static void wq_barrier_func(struct work_struct *work)
2451 {
2452 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2453 complete(&barr->done);
2454 }
2455
2456 /**
2457 * insert_wq_barrier - insert a barrier work
2458 * @cwq: cwq to insert barrier into
2459 * @barr: wq_barrier to insert
2460 * @target: target work to attach @barr to
2461 * @worker: worker currently executing @target, NULL if @target is not executing
2462 *
2463 * @barr is linked to @target such that @barr is completed only after
2464 * @target finishes execution. Please note that the ordering
2465 * guarantee is observed only with respect to @target and on the local
2466 * cpu.
2467 *
2468 * Currently, a queued barrier can't be canceled. This is because
2469 * try_to_grab_pending() can't determine whether the work to be
2470 * grabbed is at the head of the queue and thus can't clear LINKED
2471 * flag of the previous work while there must be a valid next work
2472 * after a work with LINKED flag set.
2473 *
2474 * Note that when @worker is non-NULL, @target may be modified
2475 * underneath us, so we can't reliably determine cwq from @target.
2476 *
2477 * CONTEXT:
2478 * spin_lock_irq(gcwq->lock).
2479 */
2480 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2481 struct wq_barrier *barr,
2482 struct work_struct *target, struct worker *worker)
2483 {
2484 struct list_head *head;
2485 unsigned int linked = 0;
2486
2487 /*
2488 * debugobject calls are safe here even with gcwq->lock locked
2489 * as we know for sure that this will not trigger any of the
2490 * checks and call back into the fixup functions where we
2491 * might deadlock.
2492 */
2493 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2494 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2495 init_completion(&barr->done);
2496
2497 /*
2498 * If @target is currently being executed, schedule the
2499 * barrier to the worker; otherwise, put it after @target.
2500 */
2501 if (worker)
2502 head = worker->scheduled.next;
2503 else {
2504 unsigned long *bits = work_data_bits(target);
2505
2506 head = target->entry.next;
2507 /* there can already be other linked works, inherit and set */
2508 linked = *bits & WORK_STRUCT_LINKED;
2509 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2510 }
2511
2512 debug_work_activate(&barr->work);
2513 insert_work(cwq, &barr->work, head,
2514 work_color_to_flags(WORK_NO_COLOR) | linked);
2515 }
2516
2517 /**
2518 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2519 * @wq: workqueue being flushed
2520 * @flush_color: new flush color, < 0 for no-op
2521 * @work_color: new work color, < 0 for no-op
2522 *
2523 * Prepare cwqs for workqueue flushing.
2524 *
2525 * If @flush_color is non-negative, flush_color on all cwqs should be
2526 * -1. If no cwq has in-flight commands at the specified color, all
2527 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2528 * has in flight commands, its cwq->flush_color is set to
2529 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2530 * wakeup logic is armed and %true is returned.
2531 *
2532 * The caller should have initialized @wq->first_flusher prior to
2533 * calling this function with non-negative @flush_color. If
2534 * @flush_color is negative, no flush color update is done and %false
2535 * is returned.
2536 *
2537 * If @work_color is non-negative, all cwqs should have the same
2538 * work_color which is previous to @work_color and all will be
2539 * advanced to @work_color.
2540 *
2541 * CONTEXT:
2542 * mutex_lock(wq->flush_mutex).
2543 *
2544 * RETURNS:
2545 * %true if @flush_color >= 0 and there's something to flush. %false
2546 * otherwise.
2547 */
2548 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2549 int flush_color, int work_color)
2550 {
2551 bool wait = false;
2552 unsigned int cpu;
2553
2554 if (flush_color >= 0) {
2555 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2556 atomic_set(&wq->nr_cwqs_to_flush, 1);
2557 }
2558
2559 for_each_cwq_cpu(cpu, wq) {
2560 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2561 struct global_cwq *gcwq = cwq->pool->gcwq;
2562
2563 spin_lock_irq(&gcwq->lock);
2564
2565 if (flush_color >= 0) {
2566 BUG_ON(cwq->flush_color != -1);
2567
2568 if (cwq->nr_in_flight[flush_color]) {
2569 cwq->flush_color = flush_color;
2570 atomic_inc(&wq->nr_cwqs_to_flush);
2571 wait = true;
2572 }
2573 }
2574
2575 if (work_color >= 0) {
2576 BUG_ON(work_color != work_next_color(cwq->work_color));
2577 cwq->work_color = work_color;
2578 }
2579
2580 spin_unlock_irq(&gcwq->lock);
2581 }
2582
2583 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2584 complete(&wq->first_flusher->done);
2585
2586 return wait;
2587 }
2588
2589 /**
2590 * flush_workqueue - ensure that any scheduled work has run to completion.
2591 * @wq: workqueue to flush
2592 *
2593 * Forces execution of the workqueue and blocks until its completion.
2594 * This is typically used in driver shutdown handlers.
2595 *
2596 * We sleep until all works which were queued on entry have been handled,
2597 * but we are not livelocked by new incoming ones.
2598 */
2599 void flush_workqueue(struct workqueue_struct *wq)
2600 {
2601 struct wq_flusher this_flusher = {
2602 .list = LIST_HEAD_INIT(this_flusher.list),
2603 .flush_color = -1,
2604 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2605 };
2606 int next_color;
2607
2608 lock_map_acquire(&wq->lockdep_map);
2609 lock_map_release(&wq->lockdep_map);
2610
2611 mutex_lock(&wq->flush_mutex);
2612
2613 /*
2614 * Start-to-wait phase
2615 */
2616 next_color = work_next_color(wq->work_color);
2617
2618 if (next_color != wq->flush_color) {
2619 /*
2620 * Color space is not full. The current work_color
2621 * becomes our flush_color and work_color is advanced
2622 * by one.
2623 */
2624 BUG_ON(!list_empty(&wq->flusher_overflow));
2625 this_flusher.flush_color = wq->work_color;
2626 wq->work_color = next_color;
2627
2628 if (!wq->first_flusher) {
2629 /* no flush in progress, become the first flusher */
2630 BUG_ON(wq->flush_color != this_flusher.flush_color);
2631
2632 wq->first_flusher = &this_flusher;
2633
2634 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2635 wq->work_color)) {
2636 /* nothing to flush, done */
2637 wq->flush_color = next_color;
2638 wq->first_flusher = NULL;
2639 goto out_unlock;
2640 }
2641 } else {
2642 /* wait in queue */
2643 BUG_ON(wq->flush_color == this_flusher.flush_color);
2644 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2645 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2646 }
2647 } else {
2648 /*
2649 * Oops, color space is full, wait on overflow queue.
2650 * The next flush completion will assign us
2651 * flush_color and transfer to flusher_queue.
2652 */
2653 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2654 }
2655
2656 mutex_unlock(&wq->flush_mutex);
2657
2658 wait_for_completion(&this_flusher.done);
2659
2660 /*
2661 * Wake-up-and-cascade phase
2662 *
2663 * First flushers are responsible for cascading flushes and
2664 * handling overflow. Non-first flushers can simply return.
2665 */
2666 if (wq->first_flusher != &this_flusher)
2667 return;
2668
2669 mutex_lock(&wq->flush_mutex);
2670
2671 /* we might have raced, check again with mutex held */
2672 if (wq->first_flusher != &this_flusher)
2673 goto out_unlock;
2674
2675 wq->first_flusher = NULL;
2676
2677 BUG_ON(!list_empty(&this_flusher.list));
2678 BUG_ON(wq->flush_color != this_flusher.flush_color);
2679
2680 while (true) {
2681 struct wq_flusher *next, *tmp;
2682
2683 /* complete all the flushers sharing the current flush color */
2684 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2685 if (next->flush_color != wq->flush_color)
2686 break;
2687 list_del_init(&next->list);
2688 complete(&next->done);
2689 }
2690
2691 BUG_ON(!list_empty(&wq->flusher_overflow) &&
2692 wq->flush_color != work_next_color(wq->work_color));
2693
2694 /* this flush_color is finished, advance by one */
2695 wq->flush_color = work_next_color(wq->flush_color);
2696
2697 /* one color has been freed, handle overflow queue */
2698 if (!list_empty(&wq->flusher_overflow)) {
2699 /*
2700 * Assign the same color to all overflowed
2701 * flushers, advance work_color and append to
2702 * flusher_queue. This is the start-to-wait
2703 * phase for these overflowed flushers.
2704 */
2705 list_for_each_entry(tmp, &wq->flusher_overflow, list)
2706 tmp->flush_color = wq->work_color;
2707
2708 wq->work_color = work_next_color(wq->work_color);
2709
2710 list_splice_tail_init(&wq->flusher_overflow,
2711 &wq->flusher_queue);
2712 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2713 }
2714
2715 if (list_empty(&wq->flusher_queue)) {
2716 BUG_ON(wq->flush_color != wq->work_color);
2717 break;
2718 }
2719
2720 /*
2721 * Need to flush more colors. Make the next flusher
2722 * the new first flusher and arm cwqs.
2723 */
2724 BUG_ON(wq->flush_color == wq->work_color);
2725 BUG_ON(wq->flush_color != next->flush_color);
2726
2727 list_del_init(&next->list);
2728 wq->first_flusher = next;
2729
2730 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2731 break;
2732
2733 /*
2734 * Meh... this color is already done, clear first
2735 * flusher and repeat cascading.
2736 */
2737 wq->first_flusher = NULL;
2738 }
2739
2740 out_unlock:
2741 mutex_unlock(&wq->flush_mutex);
2742 }
2743 EXPORT_SYMBOL_GPL(flush_workqueue);
2744
2745 /**
2746 * drain_workqueue - drain a workqueue
2747 * @wq: workqueue to drain
2748 *
2749 * Wait until the workqueue becomes empty. While draining is in progress,
2750 * only chain queueing is allowed. IOW, only currently pending or running
2751 * work items on @wq can queue further work items on it. @wq is flushed
2752 * repeatedly until it becomes empty. The number of flushing is detemined
2753 * by the depth of chaining and should be relatively short. Whine if it
2754 * takes too long.
2755 */
2756 void drain_workqueue(struct workqueue_struct *wq)
2757 {
2758 unsigned int flush_cnt = 0;
2759 unsigned int cpu;
2760
2761 /*
2762 * __queue_work() needs to test whether there are drainers, is much
2763 * hotter than drain_workqueue() and already looks at @wq->flags.
2764 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2765 */
2766 spin_lock(&workqueue_lock);
2767 if (!wq->nr_drainers++)
2768 wq->flags |= WQ_DRAINING;
2769 spin_unlock(&workqueue_lock);
2770 reflush:
2771 flush_workqueue(wq);
2772
2773 for_each_cwq_cpu(cpu, wq) {
2774 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2775 bool drained;
2776
2777 spin_lock_irq(&cwq->pool->gcwq->lock);
2778 drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2779 spin_unlock_irq(&cwq->pool->gcwq->lock);
2780
2781 if (drained)
2782 continue;
2783
2784 if (++flush_cnt == 10 ||
2785 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2786 pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
2787 wq->name, flush_cnt);
2788 goto reflush;
2789 }
2790
2791 spin_lock(&workqueue_lock);
2792 if (!--wq->nr_drainers)
2793 wq->flags &= ~WQ_DRAINING;
2794 spin_unlock(&workqueue_lock);
2795 }
2796 EXPORT_SYMBOL_GPL(drain_workqueue);
2797
2798 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2799 {
2800 struct worker *worker = NULL;
2801 struct global_cwq *gcwq;
2802 struct cpu_workqueue_struct *cwq;
2803
2804 might_sleep();
2805 gcwq = get_work_gcwq(work);
2806 if (!gcwq)
2807 return false;
2808
2809 spin_lock_irq(&gcwq->lock);
2810 if (!list_empty(&work->entry)) {
2811 /*
2812 * See the comment near try_to_grab_pending()->smp_rmb().
2813 * If it was re-queued to a different gcwq under us, we
2814 * are not going to wait.
2815 */
2816 smp_rmb();
2817 cwq = get_work_cwq(work);
2818 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
2819 goto already_gone;
2820 } else {
2821 worker = find_worker_executing_work(gcwq, work);
2822 if (!worker)
2823 goto already_gone;
2824 cwq = worker->current_cwq;
2825 }
2826
2827 insert_wq_barrier(cwq, barr, work, worker);
2828 spin_unlock_irq(&gcwq->lock);
2829
2830 /*
2831 * If @max_active is 1 or rescuer is in use, flushing another work
2832 * item on the same workqueue may lead to deadlock. Make sure the
2833 * flusher is not running on the same workqueue by verifying write
2834 * access.
2835 */
2836 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2837 lock_map_acquire(&cwq->wq->lockdep_map);
2838 else
2839 lock_map_acquire_read(&cwq->wq->lockdep_map);
2840 lock_map_release(&cwq->wq->lockdep_map);
2841
2842 return true;
2843 already_gone:
2844 spin_unlock_irq(&gcwq->lock);
2845 return false;
2846 }
2847
2848 /**
2849 * flush_work - wait for a work to finish executing the last queueing instance
2850 * @work: the work to flush
2851 *
2852 * Wait until @work has finished execution. @work is guaranteed to be idle
2853 * on return if it hasn't been requeued since flush started.
2854 *
2855 * RETURNS:
2856 * %true if flush_work() waited for the work to finish execution,
2857 * %false if it was already idle.
2858 */
2859 bool flush_work(struct work_struct *work)
2860 {
2861 struct wq_barrier barr;
2862
2863 lock_map_acquire(&work->lockdep_map);
2864 lock_map_release(&work->lockdep_map);
2865
2866 if (start_flush_work(work, &barr)) {
2867 wait_for_completion(&barr.done);
2868 destroy_work_on_stack(&barr.work);
2869 return true;
2870 } else {
2871 return false;
2872 }
2873 }
2874 EXPORT_SYMBOL_GPL(flush_work);
2875
2876 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2877 {
2878 unsigned long flags;
2879 int ret;
2880
2881 do {
2882 ret = try_to_grab_pending(work, is_dwork, &flags);
2883 /*
2884 * If someone else is canceling, wait for the same event it
2885 * would be waiting for before retrying.
2886 */
2887 if (unlikely(ret == -ENOENT))
2888 flush_work(work);
2889 } while (unlikely(ret < 0));
2890
2891 /* tell other tasks trying to grab @work to back off */
2892 mark_work_canceling(work);
2893 local_irq_restore(flags);
2894
2895 flush_work(work);
2896 clear_work_data(work);
2897 return ret;
2898 }
2899
2900 /**
2901 * cancel_work_sync - cancel a work and wait for it to finish
2902 * @work: the work to cancel
2903 *
2904 * Cancel @work and wait for its execution to finish. This function
2905 * can be used even if the work re-queues itself or migrates to
2906 * another workqueue. On return from this function, @work is
2907 * guaranteed to be not pending or executing on any CPU.
2908 *
2909 * cancel_work_sync(&delayed_work->work) must not be used for
2910 * delayed_work's. Use cancel_delayed_work_sync() instead.
2911 *
2912 * The caller must ensure that the workqueue on which @work was last
2913 * queued can't be destroyed before this function returns.
2914 *
2915 * RETURNS:
2916 * %true if @work was pending, %false otherwise.
2917 */
2918 bool cancel_work_sync(struct work_struct *work)
2919 {
2920 return __cancel_work_timer(work, false);
2921 }
2922 EXPORT_SYMBOL_GPL(cancel_work_sync);
2923
2924 /**
2925 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2926 * @dwork: the delayed work to flush
2927 *
2928 * Delayed timer is cancelled and the pending work is queued for
2929 * immediate execution. Like flush_work(), this function only
2930 * considers the last queueing instance of @dwork.
2931 *
2932 * RETURNS:
2933 * %true if flush_work() waited for the work to finish execution,
2934 * %false if it was already idle.
2935 */
2936 bool flush_delayed_work(struct delayed_work *dwork)
2937 {
2938 local_irq_disable();
2939 if (del_timer_sync(&dwork->timer))
2940 __queue_work(dwork->cpu,
2941 get_work_cwq(&dwork->work)->wq, &dwork->work);
2942 local_irq_enable();
2943 return flush_work(&dwork->work);
2944 }
2945 EXPORT_SYMBOL(flush_delayed_work);
2946
2947 /**
2948 * cancel_delayed_work - cancel a delayed work
2949 * @dwork: delayed_work to cancel
2950 *
2951 * Kill off a pending delayed_work. Returns %true if @dwork was pending
2952 * and canceled; %false if wasn't pending. Note that the work callback
2953 * function may still be running on return, unless it returns %true and the
2954 * work doesn't re-arm itself. Explicitly flush or use
2955 * cancel_delayed_work_sync() to wait on it.
2956 *
2957 * This function is safe to call from any context including IRQ handler.
2958 */
2959 bool cancel_delayed_work(struct delayed_work *dwork)
2960 {
2961 unsigned long flags;
2962 int ret;
2963
2964 do {
2965 ret = try_to_grab_pending(&dwork->work, true, &flags);
2966 } while (unlikely(ret == -EAGAIN));
2967
2968 if (unlikely(ret < 0))
2969 return false;
2970
2971 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
2972 local_irq_restore(flags);
2973 return true;
2974 }
2975 EXPORT_SYMBOL(cancel_delayed_work);
2976
2977 /**
2978 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2979 * @dwork: the delayed work cancel
2980 *
2981 * This is cancel_work_sync() for delayed works.
2982 *
2983 * RETURNS:
2984 * %true if @dwork was pending, %false otherwise.
2985 */
2986 bool cancel_delayed_work_sync(struct delayed_work *dwork)
2987 {
2988 return __cancel_work_timer(&dwork->work, true);
2989 }
2990 EXPORT_SYMBOL(cancel_delayed_work_sync);
2991
2992 /**
2993 * schedule_work_on - put work task on a specific cpu
2994 * @cpu: cpu to put the work task on
2995 * @work: job to be done
2996 *
2997 * This puts a job on a specific cpu
2998 */
2999 bool schedule_work_on(int cpu, struct work_struct *work)
3000 {
3001 return queue_work_on(cpu, system_wq, work);
3002 }
3003 EXPORT_SYMBOL(schedule_work_on);
3004
3005 /**
3006 * schedule_work - put work task in global workqueue
3007 * @work: job to be done
3008 *
3009 * Returns %false if @work was already on the kernel-global workqueue and
3010 * %true otherwise.
3011 *
3012 * This puts a job in the kernel-global workqueue if it was not already
3013 * queued and leaves it in the same position on the kernel-global
3014 * workqueue otherwise.
3015 */
3016 bool schedule_work(struct work_struct *work)
3017 {
3018 return queue_work(system_wq, work);
3019 }
3020 EXPORT_SYMBOL(schedule_work);
3021
3022 /**
3023 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
3024 * @cpu: cpu to use
3025 * @dwork: job to be done
3026 * @delay: number of jiffies to wait
3027 *
3028 * After waiting for a given time this puts a job in the kernel-global
3029 * workqueue on the specified CPU.
3030 */
3031 bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3032 unsigned long delay)
3033 {
3034 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
3035 }
3036 EXPORT_SYMBOL(schedule_delayed_work_on);
3037
3038 /**
3039 * schedule_delayed_work - put work task in global workqueue after delay
3040 * @dwork: job to be done
3041 * @delay: number of jiffies to wait or 0 for immediate execution
3042 *
3043 * After waiting for a given time this puts a job in the kernel-global
3044 * workqueue.
3045 */
3046 bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
3047 {
3048 return queue_delayed_work(system_wq, dwork, delay);
3049 }
3050 EXPORT_SYMBOL(schedule_delayed_work);
3051
3052 /**
3053 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3054 * @func: the function to call
3055 *
3056 * schedule_on_each_cpu() executes @func on each online CPU using the
3057 * system workqueue and blocks until all CPUs have completed.
3058 * schedule_on_each_cpu() is very slow.
3059 *
3060 * RETURNS:
3061 * 0 on success, -errno on failure.
3062 */
3063 int schedule_on_each_cpu(work_func_t func)
3064 {
3065 int cpu;
3066 struct work_struct __percpu *works;
3067
3068 works = alloc_percpu(struct work_struct);
3069 if (!works)
3070 return -ENOMEM;
3071
3072 get_online_cpus();
3073
3074 for_each_online_cpu(cpu) {
3075 struct work_struct *work = per_cpu_ptr(works, cpu);
3076
3077 INIT_WORK(work, func);
3078 schedule_work_on(cpu, work);
3079 }
3080
3081 for_each_online_cpu(cpu)
3082 flush_work(per_cpu_ptr(works, cpu));
3083
3084 put_online_cpus();
3085 free_percpu(works);
3086 return 0;
3087 }
3088
3089 /**
3090 * flush_scheduled_work - ensure that any scheduled work has run to completion.
3091 *
3092 * Forces execution of the kernel-global workqueue and blocks until its
3093 * completion.
3094 *
3095 * Think twice before calling this function! It's very easy to get into
3096 * trouble if you don't take great care. Either of the following situations
3097 * will lead to deadlock:
3098 *
3099 * One of the work items currently on the workqueue needs to acquire
3100 * a lock held by your code or its caller.
3101 *
3102 * Your code is running in the context of a work routine.
3103 *
3104 * They will be detected by lockdep when they occur, but the first might not
3105 * occur very often. It depends on what work items are on the workqueue and
3106 * what locks they need, which you have no control over.
3107 *
3108 * In most situations flushing the entire workqueue is overkill; you merely
3109 * need to know that a particular work item isn't queued and isn't running.
3110 * In such cases you should use cancel_delayed_work_sync() or
3111 * cancel_work_sync() instead.
3112 */
3113 void flush_scheduled_work(void)
3114 {
3115 flush_workqueue(system_wq);
3116 }
3117 EXPORT_SYMBOL(flush_scheduled_work);
3118
3119 /**
3120 * execute_in_process_context - reliably execute the routine with user context
3121 * @fn: the function to execute
3122 * @ew: guaranteed storage for the execute work structure (must
3123 * be available when the work executes)
3124 *
3125 * Executes the function immediately if process context is available,
3126 * otherwise schedules the function for delayed execution.
3127 *
3128 * Returns: 0 - function was executed
3129 * 1 - function was scheduled for execution
3130 */
3131 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3132 {
3133 if (!in_interrupt()) {
3134 fn(&ew->work);
3135 return 0;
3136 }
3137
3138 INIT_WORK(&ew->work, fn);
3139 schedule_work(&ew->work);
3140
3141 return 1;
3142 }
3143 EXPORT_SYMBOL_GPL(execute_in_process_context);
3144
3145 int keventd_up(void)
3146 {
3147 return system_wq != NULL;
3148 }
3149
3150 static int alloc_cwqs(struct workqueue_struct *wq)
3151 {
3152 /*
3153 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
3154 * Make sure that the alignment isn't lower than that of
3155 * unsigned long long.
3156 */
3157 const size_t size = sizeof(struct cpu_workqueue_struct);
3158 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
3159 __alignof__(unsigned long long));
3160
3161 if (!(wq->flags & WQ_UNBOUND))
3162 wq->cpu_wq.pcpu = __alloc_percpu(size, align);
3163 else {
3164 void *ptr;
3165
3166 /*
3167 * Allocate enough room to align cwq and put an extra
3168 * pointer at the end pointing back to the originally
3169 * allocated pointer which will be used for free.
3170 */
3171 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
3172 if (ptr) {
3173 wq->cpu_wq.single = PTR_ALIGN(ptr, align);
3174 *(void **)(wq->cpu_wq.single + 1) = ptr;
3175 }
3176 }
3177
3178 /* just in case, make sure it's actually aligned */
3179 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
3180 return wq->cpu_wq.v ? 0 : -ENOMEM;
3181 }
3182
3183 static void free_cwqs(struct workqueue_struct *wq)
3184 {
3185 if (!(wq->flags & WQ_UNBOUND))
3186 free_percpu(wq->cpu_wq.pcpu);
3187 else if (wq->cpu_wq.single) {
3188 /* the pointer to free is stored right after the cwq */
3189 kfree(*(void **)(wq->cpu_wq.single + 1));
3190 }
3191 }
3192
3193 static int wq_clamp_max_active(int max_active, unsigned int flags,
3194 const char *name)
3195 {
3196 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3197
3198 if (max_active < 1 || max_active > lim)
3199 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3200 max_active, name, 1, lim);
3201
3202 return clamp_val(max_active, 1, lim);
3203 }
3204
3205 struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3206 unsigned int flags,
3207 int max_active,
3208 struct lock_class_key *key,
3209 const char *lock_name, ...)
3210 {
3211 va_list args, args1;
3212 struct workqueue_struct *wq;
3213 unsigned int cpu;
3214 size_t namelen;
3215
3216 /* determine namelen, allocate wq and format name */
3217 va_start(args, lock_name);
3218 va_copy(args1, args);
3219 namelen = vsnprintf(NULL, 0, fmt, args) + 1;
3220
3221 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
3222 if (!wq)
3223 goto err;
3224
3225 vsnprintf(wq->name, namelen, fmt, args1);
3226 va_end(args);
3227 va_end(args1);
3228
3229 /*
3230 * Workqueues which may be used during memory reclaim should
3231 * have a rescuer to guarantee forward progress.
3232 */
3233 if (flags & WQ_MEM_RECLAIM)
3234 flags |= WQ_RESCUER;
3235
3236 max_active = max_active ?: WQ_DFL_ACTIVE;
3237 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3238
3239 /* init wq */
3240 wq->flags = flags;
3241 wq->saved_max_active = max_active;
3242 mutex_init(&wq->flush_mutex);
3243 atomic_set(&wq->nr_cwqs_to_flush, 0);
3244 INIT_LIST_HEAD(&wq->flusher_queue);
3245 INIT_LIST_HEAD(&wq->flusher_overflow);
3246
3247 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3248 INIT_LIST_HEAD(&wq->list);
3249
3250 if (alloc_cwqs(wq) < 0)
3251 goto err;
3252
3253 for_each_cwq_cpu(cpu, wq) {
3254 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3255 struct global_cwq *gcwq = get_gcwq(cpu);
3256 int pool_idx = (bool)(flags & WQ_HIGHPRI);
3257
3258 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3259 cwq->pool = &gcwq->pools[pool_idx];
3260 cwq->wq = wq;
3261 cwq->flush_color = -1;
3262 cwq->max_active = max_active;
3263 INIT_LIST_HEAD(&cwq->delayed_works);
3264 }
3265
3266 if (flags & WQ_RESCUER) {
3267 struct worker *rescuer;
3268
3269 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3270 goto err;
3271
3272 wq->rescuer = rescuer = alloc_worker();
3273 if (!rescuer)
3274 goto err;
3275
3276 rescuer->task = kthread_create(rescuer_thread, wq, "%s",
3277 wq->name);
3278 if (IS_ERR(rescuer->task))
3279 goto err;
3280
3281 rescuer->task->flags |= PF_THREAD_BOUND;
3282 wake_up_process(rescuer->task);
3283 }
3284
3285 /*
3286 * workqueue_lock protects global freeze state and workqueues
3287 * list. Grab it, set max_active accordingly and add the new
3288 * workqueue to workqueues list.
3289 */
3290 spin_lock(&workqueue_lock);
3291
3292 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3293 for_each_cwq_cpu(cpu, wq)
3294 get_cwq(cpu, wq)->max_active = 0;
3295
3296 list_add(&wq->list, &workqueues);
3297
3298 spin_unlock(&workqueue_lock);
3299
3300 return wq;
3301 err:
3302 if (wq) {
3303 free_cwqs(wq);
3304 free_mayday_mask(wq->mayday_mask);
3305 kfree(wq->rescuer);
3306 kfree(wq);
3307 }
3308 return NULL;
3309 }
3310 EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3311
3312 /**
3313 * destroy_workqueue - safely terminate a workqueue
3314 * @wq: target workqueue
3315 *
3316 * Safely destroy a workqueue. All work currently pending will be done first.
3317 */
3318 void destroy_workqueue(struct workqueue_struct *wq)
3319 {
3320 unsigned int cpu;
3321
3322 /* drain it before proceeding with destruction */
3323 drain_workqueue(wq);
3324
3325 /*
3326 * wq list is used to freeze wq, remove from list after
3327 * flushing is complete in case freeze races us.
3328 */
3329 spin_lock(&workqueue_lock);
3330 list_del(&wq->list);
3331 spin_unlock(&workqueue_lock);
3332
3333 /* sanity check */
3334 for_each_cwq_cpu(cpu, wq) {
3335 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3336 int i;
3337
3338 for (i = 0; i < WORK_NR_COLORS; i++)
3339 BUG_ON(cwq->nr_in_flight[i]);
3340 BUG_ON(cwq->nr_active);
3341 BUG_ON(!list_empty(&cwq->delayed_works));
3342 }
3343
3344 if (wq->flags & WQ_RESCUER) {
3345 kthread_stop(wq->rescuer->task);
3346 free_mayday_mask(wq->mayday_mask);
3347 kfree(wq->rescuer);
3348 }
3349
3350 free_cwqs(wq);
3351 kfree(wq);
3352 }
3353 EXPORT_SYMBOL_GPL(destroy_workqueue);
3354
3355 /**
3356 * workqueue_set_max_active - adjust max_active of a workqueue
3357 * @wq: target workqueue
3358 * @max_active: new max_active value.
3359 *
3360 * Set max_active of @wq to @max_active.
3361 *
3362 * CONTEXT:
3363 * Don't call from IRQ context.
3364 */
3365 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3366 {
3367 unsigned int cpu;
3368
3369 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3370
3371 spin_lock(&workqueue_lock);
3372
3373 wq->saved_max_active = max_active;
3374
3375 for_each_cwq_cpu(cpu, wq) {
3376 struct global_cwq *gcwq = get_gcwq(cpu);
3377
3378 spin_lock_irq(&gcwq->lock);
3379
3380 if (!(wq->flags & WQ_FREEZABLE) ||
3381 !(gcwq->flags & GCWQ_FREEZING))
3382 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3383
3384 spin_unlock_irq(&gcwq->lock);
3385 }
3386
3387 spin_unlock(&workqueue_lock);
3388 }
3389 EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3390
3391 /**
3392 * workqueue_congested - test whether a workqueue is congested
3393 * @cpu: CPU in question
3394 * @wq: target workqueue
3395 *
3396 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3397 * no synchronization around this function and the test result is
3398 * unreliable and only useful as advisory hints or for debugging.
3399 *
3400 * RETURNS:
3401 * %true if congested, %false otherwise.
3402 */
3403 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3404 {
3405 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3406
3407 return !list_empty(&cwq->delayed_works);
3408 }
3409 EXPORT_SYMBOL_GPL(workqueue_congested);
3410
3411 /**
3412 * work_cpu - return the last known associated cpu for @work
3413 * @work: the work of interest
3414 *
3415 * RETURNS:
3416 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3417 */
3418 unsigned int work_cpu(struct work_struct *work)
3419 {
3420 struct global_cwq *gcwq = get_work_gcwq(work);
3421
3422 return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3423 }
3424 EXPORT_SYMBOL_GPL(work_cpu);
3425
3426 /**
3427 * work_busy - test whether a work is currently pending or running
3428 * @work: the work to be tested
3429 *
3430 * Test whether @work is currently pending or running. There is no
3431 * synchronization around this function and the test result is
3432 * unreliable and only useful as advisory hints or for debugging.
3433 * Especially for reentrant wqs, the pending state might hide the
3434 * running state.
3435 *
3436 * RETURNS:
3437 * OR'd bitmask of WORK_BUSY_* bits.
3438 */
3439 unsigned int work_busy(struct work_struct *work)
3440 {
3441 struct global_cwq *gcwq = get_work_gcwq(work);
3442 unsigned long flags;
3443 unsigned int ret = 0;
3444
3445 if (!gcwq)
3446 return false;
3447
3448 spin_lock_irqsave(&gcwq->lock, flags);
3449
3450 if (work_pending(work))
3451 ret |= WORK_BUSY_PENDING;
3452 if (find_worker_executing_work(gcwq, work))
3453 ret |= WORK_BUSY_RUNNING;
3454
3455 spin_unlock_irqrestore(&gcwq->lock, flags);
3456
3457 return ret;
3458 }
3459 EXPORT_SYMBOL_GPL(work_busy);
3460
3461 /*
3462 * CPU hotplug.
3463 *
3464 * There are two challenges in supporting CPU hotplug. Firstly, there
3465 * are a lot of assumptions on strong associations among work, cwq and
3466 * gcwq which make migrating pending and scheduled works very
3467 * difficult to implement without impacting hot paths. Secondly,
3468 * gcwqs serve mix of short, long and very long running works making
3469 * blocked draining impractical.
3470 *
3471 * This is solved by allowing a gcwq to be disassociated from the CPU
3472 * running as an unbound one and allowing it to be reattached later if the
3473 * cpu comes back online.
3474 */
3475
3476 /* claim manager positions of all pools */
3477 static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
3478 {
3479 struct worker_pool *pool;
3480
3481 for_each_worker_pool(pool, gcwq)
3482 mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
3483 spin_lock_irq(&gcwq->lock);
3484 }
3485
3486 /* release manager positions */
3487 static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
3488 {
3489 struct worker_pool *pool;
3490
3491 spin_unlock_irq(&gcwq->lock);
3492 for_each_worker_pool(pool, gcwq)
3493 mutex_unlock(&pool->assoc_mutex);
3494 }
3495
3496 static void gcwq_unbind_fn(struct work_struct *work)
3497 {
3498 struct global_cwq *gcwq = get_gcwq(smp_processor_id());
3499 struct worker_pool *pool;
3500 struct worker *worker;
3501 struct hlist_node *pos;
3502 int i;
3503
3504 BUG_ON(gcwq->cpu != smp_processor_id());
3505
3506 gcwq_claim_assoc_and_lock(gcwq);
3507
3508 /*
3509 * We've claimed all manager positions. Make all workers unbound
3510 * and set DISASSOCIATED. Before this, all workers except for the
3511 * ones which are still executing works from before the last CPU
3512 * down must be on the cpu. After this, they may become diasporas.
3513 */
3514 for_each_worker_pool(pool, gcwq)
3515 list_for_each_entry(worker, &pool->idle_list, entry)
3516 worker->flags |= WORKER_UNBOUND;
3517
3518 for_each_busy_worker(worker, i, pos, gcwq)
3519 worker->flags |= WORKER_UNBOUND;
3520
3521 gcwq->flags |= GCWQ_DISASSOCIATED;
3522
3523 gcwq_release_assoc_and_unlock(gcwq);
3524
3525 /*
3526 * Call schedule() so that we cross rq->lock and thus can guarantee
3527 * sched callbacks see the %WORKER_UNBOUND flag. This is necessary
3528 * as scheduler callbacks may be invoked from other cpus.
3529 */
3530 schedule();
3531
3532 /*
3533 * Sched callbacks are disabled now. Zap nr_running. After this,
3534 * nr_running stays zero and need_more_worker() and keep_working()
3535 * are always true as long as the worklist is not empty. @gcwq now
3536 * behaves as unbound (in terms of concurrency management) gcwq
3537 * which is served by workers tied to the CPU.
3538 *
3539 * On return from this function, the current worker would trigger
3540 * unbound chain execution of pending work items if other workers
3541 * didn't already.
3542 */
3543 for_each_worker_pool(pool, gcwq)
3544 atomic_set(get_pool_nr_running(pool), 0);
3545 }
3546
3547 /*
3548 * Workqueues should be brought up before normal priority CPU notifiers.
3549 * This will be registered high priority CPU notifier.
3550 */
3551 static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3552 unsigned long action,
3553 void *hcpu)
3554 {
3555 unsigned int cpu = (unsigned long)hcpu;
3556 struct global_cwq *gcwq = get_gcwq(cpu);
3557 struct worker_pool *pool;
3558
3559 switch (action & ~CPU_TASKS_FROZEN) {
3560 case CPU_UP_PREPARE:
3561 for_each_worker_pool(pool, gcwq) {
3562 struct worker *worker;
3563
3564 if (pool->nr_workers)
3565 continue;
3566
3567 worker = create_worker(pool);
3568 if (!worker)
3569 return NOTIFY_BAD;
3570
3571 spin_lock_irq(&gcwq->lock);
3572 start_worker(worker);
3573 spin_unlock_irq(&gcwq->lock);
3574 }
3575 break;
3576
3577 case CPU_DOWN_FAILED:
3578 case CPU_ONLINE:
3579 gcwq_claim_assoc_and_lock(gcwq);
3580 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3581 rebind_workers(gcwq);
3582 gcwq_release_assoc_and_unlock(gcwq);
3583 break;
3584 }
3585 return NOTIFY_OK;
3586 }
3587
3588 /*
3589 * Workqueues should be brought down after normal priority CPU notifiers.
3590 * This will be registered as low priority CPU notifier.
3591 */
3592 static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3593 unsigned long action,
3594 void *hcpu)
3595 {
3596 unsigned int cpu = (unsigned long)hcpu;
3597 struct work_struct unbind_work;
3598
3599 switch (action & ~CPU_TASKS_FROZEN) {
3600 case CPU_DOWN_PREPARE:
3601 /* unbinding should happen on the local CPU */
3602 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
3603 queue_work_on(cpu, system_highpri_wq, &unbind_work);
3604 flush_work(&unbind_work);
3605 break;
3606 }
3607 return NOTIFY_OK;
3608 }
3609
3610 #ifdef CONFIG_SMP
3611
3612 struct work_for_cpu {
3613 struct completion completion;
3614 long (*fn)(void *);
3615 void *arg;
3616 long ret;
3617 };
3618
3619 static int do_work_for_cpu(void *_wfc)
3620 {
3621 struct work_for_cpu *wfc = _wfc;
3622 wfc->ret = wfc->fn(wfc->arg);
3623 complete(&wfc->completion);
3624 return 0;
3625 }
3626
3627 /**
3628 * work_on_cpu - run a function in user context on a particular cpu
3629 * @cpu: the cpu to run on
3630 * @fn: the function to run
3631 * @arg: the function arg
3632 *
3633 * This will return the value @fn returns.
3634 * It is up to the caller to ensure that the cpu doesn't go offline.
3635 * The caller must not hold any locks which would prevent @fn from completing.
3636 */
3637 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3638 {
3639 struct task_struct *sub_thread;
3640 struct work_for_cpu wfc = {
3641 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3642 .fn = fn,
3643 .arg = arg,
3644 };
3645
3646 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3647 if (IS_ERR(sub_thread))
3648 return PTR_ERR(sub_thread);
3649 kthread_bind(sub_thread, cpu);
3650 wake_up_process(sub_thread);
3651 wait_for_completion(&wfc.completion);
3652 return wfc.ret;
3653 }
3654 EXPORT_SYMBOL_GPL(work_on_cpu);
3655 #endif /* CONFIG_SMP */
3656
3657 #ifdef CONFIG_FREEZER
3658
3659 /**
3660 * freeze_workqueues_begin - begin freezing workqueues
3661 *
3662 * Start freezing workqueues. After this function returns, all freezable
3663 * workqueues will queue new works to their frozen_works list instead of
3664 * gcwq->worklist.
3665 *
3666 * CONTEXT:
3667 * Grabs and releases workqueue_lock and gcwq->lock's.
3668 */
3669 void freeze_workqueues_begin(void)
3670 {
3671 unsigned int cpu;
3672
3673 spin_lock(&workqueue_lock);
3674
3675 BUG_ON(workqueue_freezing);
3676 workqueue_freezing = true;
3677
3678 for_each_gcwq_cpu(cpu) {
3679 struct global_cwq *gcwq = get_gcwq(cpu);
3680 struct workqueue_struct *wq;
3681
3682 spin_lock_irq(&gcwq->lock);
3683
3684 BUG_ON(gcwq->flags & GCWQ_FREEZING);
3685 gcwq->flags |= GCWQ_FREEZING;
3686
3687 list_for_each_entry(wq, &workqueues, list) {
3688 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3689
3690 if (cwq && wq->flags & WQ_FREEZABLE)
3691 cwq->max_active = 0;
3692 }
3693
3694 spin_unlock_irq(&gcwq->lock);
3695 }
3696
3697 spin_unlock(&workqueue_lock);
3698 }
3699
3700 /**
3701 * freeze_workqueues_busy - are freezable workqueues still busy?
3702 *
3703 * Check whether freezing is complete. This function must be called
3704 * between freeze_workqueues_begin() and thaw_workqueues().
3705 *
3706 * CONTEXT:
3707 * Grabs and releases workqueue_lock.
3708 *
3709 * RETURNS:
3710 * %true if some freezable workqueues are still busy. %false if freezing
3711 * is complete.
3712 */
3713 bool freeze_workqueues_busy(void)
3714 {
3715 unsigned int cpu;
3716 bool busy = false;
3717
3718 spin_lock(&workqueue_lock);
3719
3720 BUG_ON(!workqueue_freezing);
3721
3722 for_each_gcwq_cpu(cpu) {
3723 struct workqueue_struct *wq;
3724 /*
3725 * nr_active is monotonically decreasing. It's safe
3726 * to peek without lock.
3727 */
3728 list_for_each_entry(wq, &workqueues, list) {
3729 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3730
3731 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3732 continue;
3733
3734 BUG_ON(cwq->nr_active < 0);
3735 if (cwq->nr_active) {
3736 busy = true;
3737 goto out_unlock;
3738 }
3739 }
3740 }
3741 out_unlock:
3742 spin_unlock(&workqueue_lock);
3743 return busy;
3744 }
3745
3746 /**
3747 * thaw_workqueues - thaw workqueues
3748 *
3749 * Thaw workqueues. Normal queueing is restored and all collected
3750 * frozen works are transferred to their respective gcwq worklists.
3751 *
3752 * CONTEXT:
3753 * Grabs and releases workqueue_lock and gcwq->lock's.
3754 */
3755 void thaw_workqueues(void)
3756 {
3757 unsigned int cpu;
3758
3759 spin_lock(&workqueue_lock);
3760
3761 if (!workqueue_freezing)
3762 goto out_unlock;
3763
3764 for_each_gcwq_cpu(cpu) {
3765 struct global_cwq *gcwq = get_gcwq(cpu);
3766 struct worker_pool *pool;
3767 struct workqueue_struct *wq;
3768
3769 spin_lock_irq(&gcwq->lock);
3770
3771 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3772 gcwq->flags &= ~GCWQ_FREEZING;
3773
3774 list_for_each_entry(wq, &workqueues, list) {
3775 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3776
3777 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3778 continue;
3779
3780 /* restore max_active and repopulate worklist */
3781 cwq->max_active = wq->saved_max_active;
3782
3783 while (!list_empty(&cwq->delayed_works) &&
3784 cwq->nr_active < cwq->max_active)
3785 cwq_activate_first_delayed(cwq);
3786 }
3787
3788 for_each_worker_pool(pool, gcwq)
3789 wake_up_worker(pool);
3790
3791 spin_unlock_irq(&gcwq->lock);
3792 }
3793
3794 workqueue_freezing = false;
3795 out_unlock:
3796 spin_unlock(&workqueue_lock);
3797 }
3798 #endif /* CONFIG_FREEZER */
3799
3800 static int __init init_workqueues(void)
3801 {
3802 unsigned int cpu;
3803 int i;
3804
3805 /* make sure we have enough bits for OFFQ CPU number */
3806 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
3807 WORK_CPU_LAST);
3808
3809 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3810 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3811
3812 /* initialize gcwqs */
3813 for_each_gcwq_cpu(cpu) {
3814 struct global_cwq *gcwq = get_gcwq(cpu);
3815 struct worker_pool *pool;
3816
3817 spin_lock_init(&gcwq->lock);
3818 gcwq->cpu = cpu;
3819 gcwq->flags |= GCWQ_DISASSOCIATED;
3820
3821 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3822 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3823
3824 for_each_worker_pool(pool, gcwq) {
3825 pool->gcwq = gcwq;
3826 INIT_LIST_HEAD(&pool->worklist);
3827 INIT_LIST_HEAD(&pool->idle_list);
3828
3829 init_timer_deferrable(&pool->idle_timer);
3830 pool->idle_timer.function = idle_worker_timeout;
3831 pool->idle_timer.data = (unsigned long)pool;
3832
3833 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
3834 (unsigned long)pool);
3835
3836 mutex_init(&pool->assoc_mutex);
3837 ida_init(&pool->worker_ida);
3838 }
3839 }
3840
3841 /* create the initial worker */
3842 for_each_online_gcwq_cpu(cpu) {
3843 struct global_cwq *gcwq = get_gcwq(cpu);
3844 struct worker_pool *pool;
3845
3846 if (cpu != WORK_CPU_UNBOUND)
3847 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3848
3849 for_each_worker_pool(pool, gcwq) {
3850 struct worker *worker;
3851
3852 worker = create_worker(pool);
3853 BUG_ON(!worker);
3854 spin_lock_irq(&gcwq->lock);
3855 start_worker(worker);
3856 spin_unlock_irq(&gcwq->lock);
3857 }
3858 }
3859
3860 system_wq = alloc_workqueue("events", 0, 0);
3861 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
3862 system_long_wq = alloc_workqueue("events_long", 0, 0);
3863 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3864 WQ_UNBOUND_MAX_ACTIVE);
3865 system_freezable_wq = alloc_workqueue("events_freezable",
3866 WQ_FREEZABLE, 0);
3867 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
3868 !system_unbound_wq || !system_freezable_wq);
3869 return 0;
3870 }
3871 early_initcall(init_workqueues);
This page took 0.109672 seconds and 5 git commands to generate.