2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kthread.h>
20 #include <linux/slab.h>
21 #include <linux/list.h>
22 #include <linux/spinlock.h>
23 #include <linux/freezer.h>
24 #include "async-thread.h"
26 #define WORK_QUEUED_BIT 0
27 #define WORK_DONE_BIT 1
28 #define WORK_ORDER_DONE_BIT 2
29 #define WORK_HIGH_PRIO_BIT 3
32 * container for the kthread task pointer and the list of pending work
33 * One of these is allocated per thread.
35 struct btrfs_worker_thread
{
36 /* pool we belong to */
37 struct btrfs_workers
*workers
;
39 /* list of struct btrfs_work that are waiting for service */
40 struct list_head pending
;
41 struct list_head prio_pending
;
43 /* list of worker threads from struct btrfs_workers */
44 struct list_head worker_list
;
47 struct task_struct
*task
;
49 /* number of things on the pending list */
52 /* reference counter for this struct */
55 unsigned long sequence
;
57 /* protects the pending list. */
60 /* set to non-zero when this thread is already awake and kicking */
63 /* are we currently idle */
67 static int __btrfs_start_workers(struct btrfs_workers
*workers
);
70 * btrfs_start_workers uses kthread_run, which can block waiting for memory
71 * for a very long time. It will actually throttle on page writeback,
72 * and so it may not make progress until after our btrfs worker threads
73 * process all of the pending work structs in their queue
75 * This means we can't use btrfs_start_workers from inside a btrfs worker
76 * thread that is used as part of cleaning dirty memory, which pretty much
77 * involves all of the worker threads.
79 * Instead we have a helper queue who never has more than one thread
80 * where we scheduler thread start operations. This worker_start struct
81 * is used to contain the work and hold a pointer to the queue that needs
85 struct btrfs_work work
;
86 struct btrfs_workers
*queue
;
89 static void start_new_worker_func(struct btrfs_work
*work
)
91 struct worker_start
*start
;
92 start
= container_of(work
, struct worker_start
, work
);
93 __btrfs_start_workers(start
->queue
);
98 * helper function to move a thread onto the idle list after it
99 * has finished some requests.
101 static void check_idle_worker(struct btrfs_worker_thread
*worker
)
103 if (!worker
->idle
&& atomic_read(&worker
->num_pending
) <
104 worker
->workers
->idle_thresh
/ 2) {
106 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
109 /* the list may be empty if the worker is just starting */
110 if (!list_empty(&worker
->worker_list
)) {
111 list_move(&worker
->worker_list
,
112 &worker
->workers
->idle_list
);
114 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
119 * helper function to move a thread off the idle list after new
120 * pending work is added.
122 static void check_busy_worker(struct btrfs_worker_thread
*worker
)
124 if (worker
->idle
&& atomic_read(&worker
->num_pending
) >=
125 worker
->workers
->idle_thresh
) {
127 spin_lock_irqsave(&worker
->workers
->lock
, flags
);
130 if (!list_empty(&worker
->worker_list
)) {
131 list_move_tail(&worker
->worker_list
,
132 &worker
->workers
->worker_list
);
134 spin_unlock_irqrestore(&worker
->workers
->lock
, flags
);
138 static void check_pending_worker_creates(struct btrfs_worker_thread
*worker
)
140 struct btrfs_workers
*workers
= worker
->workers
;
141 struct worker_start
*start
;
145 if (!workers
->atomic_start_pending
)
148 start
= kzalloc(sizeof(*start
), GFP_NOFS
);
152 start
->work
.func
= start_new_worker_func
;
153 start
->queue
= workers
;
155 spin_lock_irqsave(&workers
->lock
, flags
);
156 if (!workers
->atomic_start_pending
)
159 workers
->atomic_start_pending
= 0;
160 if (workers
->num_workers
+ workers
->num_workers_starting
>=
161 workers
->max_workers
)
164 workers
->num_workers_starting
+= 1;
165 spin_unlock_irqrestore(&workers
->lock
, flags
);
166 btrfs_queue_worker(workers
->atomic_worker_start
, &start
->work
);
171 spin_unlock_irqrestore(&workers
->lock
, flags
);
174 static noinline
void run_ordered_completions(struct btrfs_workers
*workers
,
175 struct btrfs_work
*work
)
177 if (!workers
->ordered
)
180 set_bit(WORK_DONE_BIT
, &work
->flags
);
182 spin_lock(&workers
->order_lock
);
185 if (!list_empty(&workers
->prio_order_list
)) {
186 work
= list_entry(workers
->prio_order_list
.next
,
187 struct btrfs_work
, order_list
);
188 } else if (!list_empty(&workers
->order_list
)) {
189 work
= list_entry(workers
->order_list
.next
,
190 struct btrfs_work
, order_list
);
194 if (!test_bit(WORK_DONE_BIT
, &work
->flags
))
197 /* we are going to call the ordered done function, but
198 * we leave the work item on the list as a barrier so
199 * that later work items that are done don't have their
200 * functions called before this one returns
202 if (test_and_set_bit(WORK_ORDER_DONE_BIT
, &work
->flags
))
205 spin_unlock(&workers
->order_lock
);
207 work
->ordered_func(work
);
209 /* now take the lock again and call the freeing code */
210 spin_lock(&workers
->order_lock
);
211 list_del(&work
->order_list
);
212 work
->ordered_free(work
);
215 spin_unlock(&workers
->order_lock
);
218 static void put_worker(struct btrfs_worker_thread
*worker
)
220 if (atomic_dec_and_test(&worker
->refs
))
224 static int try_worker_shutdown(struct btrfs_worker_thread
*worker
)
228 spin_lock_irq(&worker
->lock
);
229 spin_lock(&worker
->workers
->lock
);
230 if (worker
->workers
->num_workers
> 1 &&
233 !list_empty(&worker
->worker_list
) &&
234 list_empty(&worker
->prio_pending
) &&
235 list_empty(&worker
->pending
) &&
236 atomic_read(&worker
->num_pending
) == 0) {
238 list_del_init(&worker
->worker_list
);
239 worker
->workers
->num_workers
--;
241 spin_unlock(&worker
->workers
->lock
);
242 spin_unlock_irq(&worker
->lock
);
249 static struct btrfs_work
*get_next_work(struct btrfs_worker_thread
*worker
,
250 struct list_head
*prio_head
,
251 struct list_head
*head
)
253 struct btrfs_work
*work
= NULL
;
254 struct list_head
*cur
= NULL
;
256 if(!list_empty(prio_head
))
257 cur
= prio_head
->next
;
260 if (!list_empty(&worker
->prio_pending
))
263 if (!list_empty(head
))
270 spin_lock_irq(&worker
->lock
);
271 list_splice_tail_init(&worker
->prio_pending
, prio_head
);
272 list_splice_tail_init(&worker
->pending
, head
);
274 if (!list_empty(prio_head
))
275 cur
= prio_head
->next
;
276 else if (!list_empty(head
))
278 spin_unlock_irq(&worker
->lock
);
284 work
= list_entry(cur
, struct btrfs_work
, list
);
291 * main loop for servicing work items
293 static int worker_loop(void *arg
)
295 struct btrfs_worker_thread
*worker
= arg
;
296 struct list_head head
;
297 struct list_head prio_head
;
298 struct btrfs_work
*work
;
300 INIT_LIST_HEAD(&head
);
301 INIT_LIST_HEAD(&prio_head
);
308 work
= get_next_work(worker
, &prio_head
, &head
);
312 list_del(&work
->list
);
313 clear_bit(WORK_QUEUED_BIT
, &work
->flags
);
315 work
->worker
= worker
;
319 atomic_dec(&worker
->num_pending
);
321 * unless this is an ordered work queue,
322 * 'work' was probably freed by func above.
324 run_ordered_completions(worker
->workers
, work
);
326 check_pending_worker_creates(worker
);
330 spin_lock_irq(&worker
->lock
);
331 check_idle_worker(worker
);
333 if (freezing(current
)) {
335 spin_unlock_irq(&worker
->lock
);
338 spin_unlock_irq(&worker
->lock
);
339 if (!kthread_should_stop()) {
342 * we've dropped the lock, did someone else
346 if (!list_empty(&worker
->pending
) ||
347 !list_empty(&worker
->prio_pending
))
351 * this short schedule allows more work to
352 * come in without the queue functions
353 * needing to go through wake_up_process()
355 * worker->working is still 1, so nobody
356 * is going to try and wake us up
360 if (!list_empty(&worker
->pending
) ||
361 !list_empty(&worker
->prio_pending
))
364 if (kthread_should_stop())
367 /* still no more work?, sleep for real */
368 spin_lock_irq(&worker
->lock
);
369 set_current_state(TASK_INTERRUPTIBLE
);
370 if (!list_empty(&worker
->pending
) ||
371 !list_empty(&worker
->prio_pending
)) {
372 spin_unlock_irq(&worker
->lock
);
373 set_current_state(TASK_RUNNING
);
378 * this makes sure we get a wakeup when someone
379 * adds something new to the queue
382 spin_unlock_irq(&worker
->lock
);
384 if (!kthread_should_stop()) {
385 schedule_timeout(HZ
* 120);
386 if (!worker
->working
&&
387 try_worker_shutdown(worker
)) {
392 __set_current_state(TASK_RUNNING
);
394 } while (!kthread_should_stop());
399 * this will wait for all the worker threads to shutdown
401 void btrfs_stop_workers(struct btrfs_workers
*workers
)
403 struct list_head
*cur
;
404 struct btrfs_worker_thread
*worker
;
407 spin_lock_irq(&workers
->lock
);
408 list_splice_init(&workers
->idle_list
, &workers
->worker_list
);
409 while (!list_empty(&workers
->worker_list
)) {
410 cur
= workers
->worker_list
.next
;
411 worker
= list_entry(cur
, struct btrfs_worker_thread
,
414 atomic_inc(&worker
->refs
);
415 workers
->num_workers
-= 1;
416 if (!list_empty(&worker
->worker_list
)) {
417 list_del_init(&worker
->worker_list
);
422 spin_unlock_irq(&workers
->lock
);
424 kthread_stop(worker
->task
);
425 spin_lock_irq(&workers
->lock
);
428 spin_unlock_irq(&workers
->lock
);
432 * simple init on struct btrfs_workers
434 void btrfs_init_workers(struct btrfs_workers
*workers
, char *name
, int max
,
435 struct btrfs_workers
*async_helper
)
437 workers
->num_workers
= 0;
438 workers
->num_workers_starting
= 0;
439 INIT_LIST_HEAD(&workers
->worker_list
);
440 INIT_LIST_HEAD(&workers
->idle_list
);
441 INIT_LIST_HEAD(&workers
->order_list
);
442 INIT_LIST_HEAD(&workers
->prio_order_list
);
443 spin_lock_init(&workers
->lock
);
444 spin_lock_init(&workers
->order_lock
);
445 workers
->max_workers
= max
;
446 workers
->idle_thresh
= 32;
447 workers
->name
= name
;
448 workers
->ordered
= 0;
449 workers
->atomic_start_pending
= 0;
450 workers
->atomic_worker_start
= async_helper
;
454 * starts new worker threads. This does not enforce the max worker
455 * count in case you need to temporarily go past it.
457 static int __btrfs_start_workers(struct btrfs_workers
*workers
)
459 struct btrfs_worker_thread
*worker
;
462 worker
= kzalloc(sizeof(*worker
), GFP_NOFS
);
468 INIT_LIST_HEAD(&worker
->pending
);
469 INIT_LIST_HEAD(&worker
->prio_pending
);
470 INIT_LIST_HEAD(&worker
->worker_list
);
471 spin_lock_init(&worker
->lock
);
473 atomic_set(&worker
->num_pending
, 0);
474 atomic_set(&worker
->refs
, 1);
475 worker
->workers
= workers
;
476 worker
->task
= kthread_run(worker_loop
, worker
,
477 "btrfs-%s-%d", workers
->name
,
478 workers
->num_workers
+ 1);
479 if (IS_ERR(worker
->task
)) {
480 ret
= PTR_ERR(worker
->task
);
484 spin_lock_irq(&workers
->lock
);
485 list_add_tail(&worker
->worker_list
, &workers
->idle_list
);
487 workers
->num_workers
++;
488 workers
->num_workers_starting
--;
489 WARN_ON(workers
->num_workers_starting
< 0);
490 spin_unlock_irq(&workers
->lock
);
494 spin_lock_irq(&workers
->lock
);
495 workers
->num_workers_starting
--;
496 spin_unlock_irq(&workers
->lock
);
500 int btrfs_start_workers(struct btrfs_workers
*workers
)
502 spin_lock_irq(&workers
->lock
);
503 workers
->num_workers_starting
++;
504 spin_unlock_irq(&workers
->lock
);
505 return __btrfs_start_workers(workers
);
509 * run through the list and find a worker thread that doesn't have a lot
510 * to do right now. This can return null if we aren't yet at the thread
511 * count limit and all of the threads are busy.
513 static struct btrfs_worker_thread
*next_worker(struct btrfs_workers
*workers
)
515 struct btrfs_worker_thread
*worker
;
516 struct list_head
*next
;
519 enforce_min
= (workers
->num_workers
+ workers
->num_workers_starting
) <
520 workers
->max_workers
;
523 * if we find an idle thread, don't move it to the end of the
524 * idle list. This improves the chance that the next submission
525 * will reuse the same thread, and maybe catch it while it is still
528 if (!list_empty(&workers
->idle_list
)) {
529 next
= workers
->idle_list
.next
;
530 worker
= list_entry(next
, struct btrfs_worker_thread
,
534 if (enforce_min
|| list_empty(&workers
->worker_list
))
538 * if we pick a busy task, move the task to the end of the list.
539 * hopefully this will keep things somewhat evenly balanced.
540 * Do the move in batches based on the sequence number. This groups
541 * requests submitted at roughly the same time onto the same worker.
543 next
= workers
->worker_list
.next
;
544 worker
= list_entry(next
, struct btrfs_worker_thread
, worker_list
);
547 if (worker
->sequence
% workers
->idle_thresh
== 0)
548 list_move_tail(next
, &workers
->worker_list
);
553 * selects a worker thread to take the next job. This will either find
554 * an idle worker, start a new worker up to the max count, or just return
555 * one of the existing busy workers.
557 static struct btrfs_worker_thread
*find_worker(struct btrfs_workers
*workers
)
559 struct btrfs_worker_thread
*worker
;
561 struct list_head
*fallback
;
564 spin_lock_irqsave(&workers
->lock
, flags
);
566 worker
= next_worker(workers
);
569 if (workers
->num_workers
+ workers
->num_workers_starting
>=
570 workers
->max_workers
) {
572 } else if (workers
->atomic_worker_start
) {
573 workers
->atomic_start_pending
= 1;
576 workers
->num_workers_starting
++;
577 spin_unlock_irqrestore(&workers
->lock
, flags
);
578 /* we're below the limit, start another worker */
579 ret
= __btrfs_start_workers(workers
);
580 spin_lock_irqsave(&workers
->lock
, flags
);
591 * we have failed to find any workers, just
592 * return the first one we can find.
594 if (!list_empty(&workers
->worker_list
))
595 fallback
= workers
->worker_list
.next
;
596 if (!list_empty(&workers
->idle_list
))
597 fallback
= workers
->idle_list
.next
;
599 worker
= list_entry(fallback
,
600 struct btrfs_worker_thread
, worker_list
);
603 * this makes sure the worker doesn't exit before it is placed
604 * onto a busy/idle list
606 atomic_inc(&worker
->num_pending
);
607 spin_unlock_irqrestore(&workers
->lock
, flags
);
612 * btrfs_requeue_work just puts the work item back on the tail of the list
613 * it was taken from. It is intended for use with long running work functions
614 * that make some progress and want to give the cpu up for others.
616 void btrfs_requeue_work(struct btrfs_work
*work
)
618 struct btrfs_worker_thread
*worker
= work
->worker
;
622 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
625 spin_lock_irqsave(&worker
->lock
, flags
);
626 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
))
627 list_add_tail(&work
->list
, &worker
->prio_pending
);
629 list_add_tail(&work
->list
, &worker
->pending
);
630 atomic_inc(&worker
->num_pending
);
632 /* by definition we're busy, take ourselves off the idle
636 spin_lock(&worker
->workers
->lock
);
638 list_move_tail(&worker
->worker_list
,
639 &worker
->workers
->worker_list
);
640 spin_unlock(&worker
->workers
->lock
);
642 if (!worker
->working
) {
648 wake_up_process(worker
->task
);
649 spin_unlock_irqrestore(&worker
->lock
, flags
);
652 void btrfs_set_work_high_prio(struct btrfs_work
*work
)
654 set_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
);
658 * places a struct btrfs_work into the pending queue of one of the kthreads
660 void btrfs_queue_worker(struct btrfs_workers
*workers
, struct btrfs_work
*work
)
662 struct btrfs_worker_thread
*worker
;
666 /* don't requeue something already on a list */
667 if (test_and_set_bit(WORK_QUEUED_BIT
, &work
->flags
))
670 worker
= find_worker(workers
);
671 if (workers
->ordered
) {
673 * you're not allowed to do ordered queues from an
676 spin_lock(&workers
->order_lock
);
677 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
)) {
678 list_add_tail(&work
->order_list
,
679 &workers
->prio_order_list
);
681 list_add_tail(&work
->order_list
, &workers
->order_list
);
683 spin_unlock(&workers
->order_lock
);
685 INIT_LIST_HEAD(&work
->order_list
);
688 spin_lock_irqsave(&worker
->lock
, flags
);
690 if (test_bit(WORK_HIGH_PRIO_BIT
, &work
->flags
))
691 list_add_tail(&work
->list
, &worker
->prio_pending
);
693 list_add_tail(&work
->list
, &worker
->pending
);
694 check_busy_worker(worker
);
697 * avoid calling into wake_up_process if this thread has already
700 if (!worker
->working
)
705 wake_up_process(worker
->task
);
706 spin_unlock_irqrestore(&worker
->lock
, flags
);
This page took 0.050818 seconds and 5 git commands to generate.