Commit | Line | Data |
---|---|---|
07fe7cb7 DH |
1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs |
2 | * | |
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public Licence | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the Licence, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/slow-work.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/freezer.h> | |
16 | #include <linux/wait.h> | |
17 | #include <asm/system.h> | |
18 | ||
109d9272 DH |
19 | #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of |
20 | * things to do */ | |
21 | #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after | |
22 | * OOM */ | |
23 | ||
24 | static void slow_work_cull_timeout(unsigned long); | |
25 | static void slow_work_oom_timeout(unsigned long); | |
26 | ||
07fe7cb7 DH |
27 | /* |
28 | * The pool of threads has at least min threads in it as long as someone is | |
29 | * using the facility, and may have as many as max. | |
30 | * | |
31 | * A portion of the pool may be processing very slow operations. | |
32 | */ | |
33 | static unsigned slow_work_min_threads = 2; | |
34 | static unsigned slow_work_max_threads = 4; | |
35 | static unsigned vslow_work_proportion = 50; /* % of threads that may process | |
36 | * very slow work */ | |
37 | static atomic_t slow_work_thread_count; | |
38 | static atomic_t vslow_work_executing_count; | |
39 | ||
109d9272 DH |
40 | static bool slow_work_may_not_start_new_thread; |
41 | static bool slow_work_cull; /* cull a thread due to lack of activity */ | |
42 | static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); | |
43 | static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); | |
44 | static struct slow_work slow_work_new_thread; /* new thread starter */ | |
45 | ||
07fe7cb7 DH |
46 | /* |
47 | * The queues of work items and the lock governing access to them. These are | |
48 | * shared between all the CPUs. It doesn't make sense to have per-CPU queues | |
49 | * as the number of threads bears no relation to the number of CPUs. | |
50 | * | |
51 | * There are two queues of work items: one for slow work items, and one for | |
52 | * very slow work items. | |
53 | */ | |
54 | static LIST_HEAD(slow_work_queue); | |
55 | static LIST_HEAD(vslow_work_queue); | |
56 | static DEFINE_SPINLOCK(slow_work_queue_lock); | |
57 | ||
58 | /* | |
59 | * The thread controls. A variable used to signal to the threads that they | |
60 | * should exit when the queue is empty, a waitqueue used by the threads to wait | |
61 | * for signals, and a completion set by the last thread to exit. | |
62 | */ | |
63 | static bool slow_work_threads_should_exit; | |
64 | static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq); | |
65 | static DECLARE_COMPLETION(slow_work_last_thread_exited); | |
66 | ||
67 | /* | |
68 | * The number of users of the thread pool and its lock. Whilst this is zero we | |
69 | * have no threads hanging around, and when this reaches zero, we wait for all | |
70 | * active or queued work items to complete and kill all the threads we do have. | |
71 | */ | |
72 | static int slow_work_user_count; | |
73 | static DEFINE_MUTEX(slow_work_user_lock); | |
74 | ||
75 | /* | |
76 | * Calculate the maximum number of active threads in the pool that are | |
77 | * permitted to process very slow work items. | |
78 | * | |
79 | * The answer is rounded up to at least 1, but may not equal or exceed the | |
80 | * maximum number of the threads in the pool. This means we always have at | |
81 | * least one thread that can process slow work items, and we always have at | |
82 | * least one thread that won't get tied up doing so. | |
83 | */ | |
84 | static unsigned slow_work_calc_vsmax(void) | |
85 | { | |
86 | unsigned vsmax; | |
87 | ||
88 | vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion; | |
89 | vsmax /= 100; | |
90 | vsmax = max(vsmax, 1U); | |
91 | return min(vsmax, slow_work_max_threads - 1); | |
92 | } | |
93 | ||
94 | /* | |
95 | * Attempt to execute stuff queued on a slow thread. Return true if we managed | |
96 | * it, false if there was nothing to do. | |
97 | */ | |
98 | static bool slow_work_execute(void) | |
99 | { | |
100 | struct slow_work *work = NULL; | |
101 | unsigned vsmax; | |
102 | bool very_slow; | |
103 | ||
104 | vsmax = slow_work_calc_vsmax(); | |
105 | ||
109d9272 DH |
106 | /* see if we can schedule a new thread to be started if we're not |
107 | * keeping up with the work */ | |
108 | if (!waitqueue_active(&slow_work_thread_wq) && | |
109 | (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) && | |
110 | atomic_read(&slow_work_thread_count) < slow_work_max_threads && | |
111 | !slow_work_may_not_start_new_thread) | |
112 | slow_work_enqueue(&slow_work_new_thread); | |
113 | ||
07fe7cb7 DH |
114 | /* find something to execute */ |
115 | spin_lock_irq(&slow_work_queue_lock); | |
116 | if (!list_empty(&vslow_work_queue) && | |
117 | atomic_read(&vslow_work_executing_count) < vsmax) { | |
118 | work = list_entry(vslow_work_queue.next, | |
119 | struct slow_work, link); | |
120 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) | |
121 | BUG(); | |
122 | list_del_init(&work->link); | |
123 | atomic_inc(&vslow_work_executing_count); | |
124 | very_slow = true; | |
125 | } else if (!list_empty(&slow_work_queue)) { | |
126 | work = list_entry(slow_work_queue.next, | |
127 | struct slow_work, link); | |
128 | if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags)) | |
129 | BUG(); | |
130 | list_del_init(&work->link); | |
131 | very_slow = false; | |
132 | } else { | |
133 | very_slow = false; /* avoid the compiler warning */ | |
134 | } | |
135 | spin_unlock_irq(&slow_work_queue_lock); | |
136 | ||
137 | if (!work) | |
138 | return false; | |
139 | ||
140 | if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) | |
141 | BUG(); | |
142 | ||
143 | work->ops->execute(work); | |
144 | ||
145 | if (very_slow) | |
146 | atomic_dec(&vslow_work_executing_count); | |
147 | clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); | |
148 | ||
149 | /* if someone tried to enqueue the item whilst we were executing it, | |
150 | * then it'll be left unenqueued to avoid multiple threads trying to | |
151 | * execute it simultaneously | |
152 | * | |
153 | * there is, however, a race between us testing the pending flag and | |
154 | * getting the spinlock, and between the enqueuer setting the pending | |
155 | * flag and getting the spinlock, so we use a deferral bit to tell us | |
156 | * if the enqueuer got there first | |
157 | */ | |
158 | if (test_bit(SLOW_WORK_PENDING, &work->flags)) { | |
159 | spin_lock_irq(&slow_work_queue_lock); | |
160 | ||
161 | if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) && | |
162 | test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) | |
163 | goto auto_requeue; | |
164 | ||
165 | spin_unlock_irq(&slow_work_queue_lock); | |
166 | } | |
167 | ||
168 | work->ops->put_ref(work); | |
169 | return true; | |
170 | ||
171 | auto_requeue: | |
172 | /* we must complete the enqueue operation | |
173 | * - we transfer our ref on the item back to the appropriate queue | |
174 | * - don't wake another thread up as we're awake already | |
175 | */ | |
176 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | |
177 | list_add_tail(&work->link, &vslow_work_queue); | |
178 | else | |
179 | list_add_tail(&work->link, &slow_work_queue); | |
180 | spin_unlock_irq(&slow_work_queue_lock); | |
181 | return true; | |
182 | } | |
183 | ||
184 | /** | |
185 | * slow_work_enqueue - Schedule a slow work item for processing | |
186 | * @work: The work item to queue | |
187 | * | |
188 | * Schedule a slow work item for processing. If the item is already undergoing | |
189 | * execution, this guarantees not to re-enter the execution routine until the | |
190 | * first execution finishes. | |
191 | * | |
192 | * The item is pinned by this function as it retains a reference to it, managed | |
193 | * through the item operations. The item is unpinned once it has been | |
194 | * executed. | |
195 | * | |
196 | * An item may hog the thread that is running it for a relatively large amount | |
197 | * of time, sufficient, for example, to perform several lookup, mkdir, create | |
198 | * and setxattr operations. It may sleep on I/O and may sleep to obtain locks. | |
199 | * | |
200 | * Conversely, if a number of items are awaiting processing, it may take some | |
201 | * time before any given item is given attention. The number of threads in the | |
202 | * pool may be increased to deal with demand, but only up to a limit. | |
203 | * | |
204 | * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in | |
205 | * the very slow queue, from which only a portion of the threads will be | |
206 | * allowed to pick items to execute. This ensures that very slow items won't | |
207 | * overly block ones that are just ordinarily slow. | |
208 | * | |
209 | * Returns 0 if successful, -EAGAIN if not. | |
210 | */ | |
211 | int slow_work_enqueue(struct slow_work *work) | |
212 | { | |
213 | unsigned long flags; | |
214 | ||
215 | BUG_ON(slow_work_user_count <= 0); | |
216 | BUG_ON(!work); | |
217 | BUG_ON(!work->ops); | |
218 | BUG_ON(!work->ops->get_ref); | |
219 | ||
220 | /* when honouring an enqueue request, we only promise that we will run | |
221 | * the work function in the future; we do not promise to run it once | |
222 | * per enqueue request | |
223 | * | |
224 | * we use the PENDING bit to merge together repeat requests without | |
225 | * having to disable IRQs and take the spinlock, whilst still | |
226 | * maintaining our promise | |
227 | */ | |
228 | if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { | |
229 | spin_lock_irqsave(&slow_work_queue_lock, flags); | |
230 | ||
231 | /* we promise that we will not attempt to execute the work | |
232 | * function in more than one thread simultaneously | |
233 | * | |
234 | * this, however, leaves us with a problem if we're asked to | |
235 | * enqueue the work whilst someone is executing the work | |
236 | * function as simply queueing the work immediately means that | |
237 | * another thread may try executing it whilst it is already | |
238 | * under execution | |
239 | * | |
240 | * to deal with this, we set the ENQ_DEFERRED bit instead of | |
241 | * enqueueing, and the thread currently executing the work | |
242 | * function will enqueue the work item when the work function | |
243 | * returns and it has cleared the EXECUTING bit | |
244 | */ | |
245 | if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { | |
246 | set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); | |
247 | } else { | |
248 | if (work->ops->get_ref(work) < 0) | |
249 | goto cant_get_ref; | |
250 | if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) | |
251 | list_add_tail(&work->link, &vslow_work_queue); | |
252 | else | |
253 | list_add_tail(&work->link, &slow_work_queue); | |
254 | wake_up(&slow_work_thread_wq); | |
255 | } | |
256 | ||
257 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | |
258 | } | |
259 | return 0; | |
260 | ||
261 | cant_get_ref: | |
262 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | |
263 | return -EAGAIN; | |
264 | } | |
265 | EXPORT_SYMBOL(slow_work_enqueue); | |
266 | ||
109d9272 DH |
267 | /* |
268 | * Worker thread culling algorithm | |
269 | */ | |
270 | static bool slow_work_cull_thread(void) | |
271 | { | |
272 | unsigned long flags; | |
273 | bool do_cull = false; | |
274 | ||
275 | spin_lock_irqsave(&slow_work_queue_lock, flags); | |
276 | ||
277 | if (slow_work_cull) { | |
278 | slow_work_cull = false; | |
279 | ||
280 | if (list_empty(&slow_work_queue) && | |
281 | list_empty(&vslow_work_queue) && | |
282 | atomic_read(&slow_work_thread_count) > | |
283 | slow_work_min_threads) { | |
284 | mod_timer(&slow_work_cull_timer, | |
285 | jiffies + SLOW_WORK_CULL_TIMEOUT); | |
286 | do_cull = true; | |
287 | } | |
288 | } | |
289 | ||
290 | spin_unlock_irqrestore(&slow_work_queue_lock, flags); | |
291 | return do_cull; | |
292 | } | |
293 | ||
07fe7cb7 DH |
294 | /* |
295 | * Determine if there is slow work available for dispatch | |
296 | */ | |
297 | static inline bool slow_work_available(int vsmax) | |
298 | { | |
299 | return !list_empty(&slow_work_queue) || | |
300 | (!list_empty(&vslow_work_queue) && | |
301 | atomic_read(&vslow_work_executing_count) < vsmax); | |
302 | } | |
303 | ||
304 | /* | |
305 | * Worker thread dispatcher | |
306 | */ | |
307 | static int slow_work_thread(void *_data) | |
308 | { | |
309 | int vsmax; | |
310 | ||
311 | DEFINE_WAIT(wait); | |
312 | ||
313 | set_freezable(); | |
314 | set_user_nice(current, -5); | |
315 | ||
316 | for (;;) { | |
317 | vsmax = vslow_work_proportion; | |
318 | vsmax *= atomic_read(&slow_work_thread_count); | |
319 | vsmax /= 100; | |
320 | ||
321 | prepare_to_wait(&slow_work_thread_wq, &wait, | |
322 | TASK_INTERRUPTIBLE); | |
323 | if (!freezing(current) && | |
324 | !slow_work_threads_should_exit && | |
109d9272 DH |
325 | !slow_work_available(vsmax) && |
326 | !slow_work_cull) | |
07fe7cb7 DH |
327 | schedule(); |
328 | finish_wait(&slow_work_thread_wq, &wait); | |
329 | ||
330 | try_to_freeze(); | |
331 | ||
332 | vsmax = vslow_work_proportion; | |
333 | vsmax *= atomic_read(&slow_work_thread_count); | |
334 | vsmax /= 100; | |
335 | ||
336 | if (slow_work_available(vsmax) && slow_work_execute()) { | |
337 | cond_resched(); | |
109d9272 DH |
338 | if (list_empty(&slow_work_queue) && |
339 | list_empty(&vslow_work_queue) && | |
340 | atomic_read(&slow_work_thread_count) > | |
341 | slow_work_min_threads) | |
342 | mod_timer(&slow_work_cull_timer, | |
343 | jiffies + SLOW_WORK_CULL_TIMEOUT); | |
07fe7cb7 DH |
344 | continue; |
345 | } | |
346 | ||
347 | if (slow_work_threads_should_exit) | |
348 | break; | |
109d9272 DH |
349 | |
350 | if (slow_work_cull && slow_work_cull_thread()) | |
351 | break; | |
07fe7cb7 DH |
352 | } |
353 | ||
354 | if (atomic_dec_and_test(&slow_work_thread_count)) | |
355 | complete_and_exit(&slow_work_last_thread_exited, 0); | |
356 | return 0; | |
357 | } | |
358 | ||
109d9272 DH |
359 | /* |
360 | * Handle thread cull timer expiration | |
361 | */ | |
362 | static void slow_work_cull_timeout(unsigned long data) | |
363 | { | |
364 | slow_work_cull = true; | |
365 | wake_up(&slow_work_thread_wq); | |
366 | } | |
367 | ||
368 | /* | |
369 | * Get a reference on slow work thread starter | |
370 | */ | |
371 | static int slow_work_new_thread_get_ref(struct slow_work *work) | |
372 | { | |
373 | return 0; | |
374 | } | |
375 | ||
376 | /* | |
377 | * Drop a reference on slow work thread starter | |
378 | */ | |
379 | static void slow_work_new_thread_put_ref(struct slow_work *work) | |
380 | { | |
381 | } | |
382 | ||
383 | /* | |
384 | * Start a new slow work thread | |
385 | */ | |
386 | static void slow_work_new_thread_execute(struct slow_work *work) | |
387 | { | |
388 | struct task_struct *p; | |
389 | ||
390 | if (slow_work_threads_should_exit) | |
391 | return; | |
392 | ||
393 | if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads) | |
394 | return; | |
395 | ||
396 | if (!mutex_trylock(&slow_work_user_lock)) | |
397 | return; | |
398 | ||
399 | slow_work_may_not_start_new_thread = true; | |
400 | atomic_inc(&slow_work_thread_count); | |
401 | p = kthread_run(slow_work_thread, NULL, "kslowd"); | |
402 | if (IS_ERR(p)) { | |
403 | printk(KERN_DEBUG "Slow work thread pool: OOM\n"); | |
404 | if (atomic_dec_and_test(&slow_work_thread_count)) | |
405 | BUG(); /* we're running on a slow work thread... */ | |
406 | mod_timer(&slow_work_oom_timer, | |
407 | jiffies + SLOW_WORK_OOM_TIMEOUT); | |
408 | } else { | |
409 | /* ratelimit the starting of new threads */ | |
410 | mod_timer(&slow_work_oom_timer, jiffies + 1); | |
411 | } | |
412 | ||
413 | mutex_unlock(&slow_work_user_lock); | |
414 | } | |
415 | ||
416 | static const struct slow_work_ops slow_work_new_thread_ops = { | |
417 | .get_ref = slow_work_new_thread_get_ref, | |
418 | .put_ref = slow_work_new_thread_put_ref, | |
419 | .execute = slow_work_new_thread_execute, | |
420 | }; | |
421 | ||
422 | /* | |
423 | * post-OOM new thread start suppression expiration | |
424 | */ | |
425 | static void slow_work_oom_timeout(unsigned long data) | |
426 | { | |
427 | slow_work_may_not_start_new_thread = false; | |
428 | } | |
429 | ||
07fe7cb7 DH |
430 | /** |
431 | * slow_work_register_user - Register a user of the facility | |
432 | * | |
433 | * Register a user of the facility, starting up the initial threads if there | |
434 | * aren't any other users at this point. This will return 0 if successful, or | |
435 | * an error if not. | |
436 | */ | |
437 | int slow_work_register_user(void) | |
438 | { | |
439 | struct task_struct *p; | |
440 | int loop; | |
441 | ||
442 | mutex_lock(&slow_work_user_lock); | |
443 | ||
444 | if (slow_work_user_count == 0) { | |
445 | printk(KERN_NOTICE "Slow work thread pool: Starting up\n"); | |
446 | init_completion(&slow_work_last_thread_exited); | |
447 | ||
448 | slow_work_threads_should_exit = false; | |
109d9272 DH |
449 | slow_work_init(&slow_work_new_thread, |
450 | &slow_work_new_thread_ops); | |
451 | slow_work_may_not_start_new_thread = false; | |
452 | slow_work_cull = false; | |
07fe7cb7 DH |
453 | |
454 | /* start the minimum number of threads */ | |
455 | for (loop = 0; loop < slow_work_min_threads; loop++) { | |
456 | atomic_inc(&slow_work_thread_count); | |
457 | p = kthread_run(slow_work_thread, NULL, "kslowd"); | |
458 | if (IS_ERR(p)) | |
459 | goto error; | |
460 | } | |
461 | printk(KERN_NOTICE "Slow work thread pool: Ready\n"); | |
462 | } | |
463 | ||
464 | slow_work_user_count++; | |
465 | mutex_unlock(&slow_work_user_lock); | |
466 | return 0; | |
467 | ||
468 | error: | |
469 | if (atomic_dec_and_test(&slow_work_thread_count)) | |
470 | complete(&slow_work_last_thread_exited); | |
471 | if (loop > 0) { | |
472 | printk(KERN_ERR "Slow work thread pool:" | |
473 | " Aborting startup on ENOMEM\n"); | |
474 | slow_work_threads_should_exit = true; | |
475 | wake_up_all(&slow_work_thread_wq); | |
476 | wait_for_completion(&slow_work_last_thread_exited); | |
477 | printk(KERN_ERR "Slow work thread pool: Aborted\n"); | |
478 | } | |
479 | mutex_unlock(&slow_work_user_lock); | |
480 | return PTR_ERR(p); | |
481 | } | |
482 | EXPORT_SYMBOL(slow_work_register_user); | |
483 | ||
484 | /** | |
485 | * slow_work_unregister_user - Unregister a user of the facility | |
486 | * | |
487 | * Unregister a user of the facility, killing all the threads if this was the | |
488 | * last one. | |
489 | */ | |
490 | void slow_work_unregister_user(void) | |
491 | { | |
492 | mutex_lock(&slow_work_user_lock); | |
493 | ||
494 | BUG_ON(slow_work_user_count <= 0); | |
495 | ||
496 | slow_work_user_count--; | |
497 | if (slow_work_user_count == 0) { | |
498 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); | |
499 | slow_work_threads_should_exit = true; | |
500 | wake_up_all(&slow_work_thread_wq); | |
501 | wait_for_completion(&slow_work_last_thread_exited); | |
502 | printk(KERN_NOTICE "Slow work thread pool:" | |
503 | " Shut down complete\n"); | |
504 | } | |
505 | ||
109d9272 DH |
506 | del_timer_sync(&slow_work_cull_timer); |
507 | ||
07fe7cb7 DH |
508 | mutex_unlock(&slow_work_user_lock); |
509 | } | |
510 | EXPORT_SYMBOL(slow_work_unregister_user); | |
511 | ||
512 | /* | |
513 | * Initialise the slow work facility | |
514 | */ | |
515 | static int __init init_slow_work(void) | |
516 | { | |
517 | unsigned nr_cpus = num_possible_cpus(); | |
518 | ||
519 | if (nr_cpus > slow_work_max_threads) | |
520 | slow_work_max_threads = nr_cpus; | |
521 | return 0; | |
522 | } | |
523 | ||
524 | subsys_initcall(init_slow_work); |