writeback: move last_active to bdi
[deliverable/linux.git] / mm / backing-dev.c
index b1dc2d4b9cddc5ef2057ba49a26a9b084cbd0e94..672c17bb32db691c6314a8b9404f6e815b172e30 100644 (file)
@@ -50,8 +50,6 @@ static struct timer_list sync_supers_timer;
 static int bdi_sync_supers(void *);
 static void sync_supers_timer_fn(unsigned long);
 
-static void bdi_add_default_flusher_thread(struct backing_dev_info *bdi);
-
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
@@ -331,8 +329,9 @@ static int bdi_forker_thread(void *ptr)
        set_user_nice(current, 0);
 
        for (;;) {
+               bool fork = false;
                struct task_struct *task;
-               struct backing_dev_info *bdi, *tmp;
+               struct backing_dev_info *bdi;
 
                /*
                 * Temporary measure, we want to make sure we don't see
@@ -348,20 +347,36 @@ static int bdi_forker_thread(void *ptr)
                 * Check if any existing bdi's have dirty data without
                 * a thread registered. If so, set that up.
                 */
-               list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
+               list_for_each_entry(bdi, &bdi_list, bdi_list) {
+                       if (!bdi_cap_writeback_dirty(bdi))
+                               continue;
                        if (bdi->wb.task)
                                continue;
                        if (list_empty(&bdi->work_list) &&
                            !bdi_has_dirty_io(bdi))
                                continue;
 
-                       bdi_add_default_flusher_thread(bdi);
+                       WARN(!test_bit(BDI_registered, &bdi->state),
+                            "bdi %p/%s is not registered!\n", bdi, bdi->name);
+
+                       fork = true;
+
+                       /*
+                        * Set the pending bit - if someone will try to
+                        * unregister this bdi - it'll wait on this bit.
+                        */
+                       set_bit(BDI_pending, &bdi->state);
+                       break;
                }
+               spin_unlock_bh(&bdi_lock);
+
+               /* Keep working if default bdi still has things to do */
+               if (!list_empty(&me->bdi->work_list))
+                       __set_current_state(TASK_RUNNING);
 
-               if (list_empty(&bdi_pending_list)) {
+               if (!fork) {
                        unsigned long wait;
 
-                       spin_unlock_bh(&bdi_lock);
                        wait = msecs_to_jiffies(dirty_writeback_interval * 10);
                        if (wait)
                                schedule_timeout(wait);
@@ -373,29 +388,13 @@ static int bdi_forker_thread(void *ptr)
 
                __set_current_state(TASK_RUNNING);
 
-               /*
-                * This is our real job - check for pending entries in
-                * bdi_pending_list, and create the threads that got added
-                */
-               bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
-                                bdi_list);
-               list_del_init(&bdi->bdi_list);
-               spin_unlock_bh(&bdi_lock);
-
                task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s",
                                   dev_name(bdi->dev));
                if (IS_ERR(task)) {
                        /*
-                        * If thread creation fails, then readd the bdi back to
-                        * the list and force writeout of the bdi from this
-                        * forker thread. That will free some memory and we can
-                        * try again. Add it to the tail so we get a chance to
-                        * flush other bdi's to free memory.
+                        * If thread creation fails, force writeout of the bdi
+                        * from the thread.
                         */
-                       spin_lock_bh(&bdi_lock);
-                       list_add_tail(&bdi->bdi_list, &bdi_pending_list);
-                       spin_unlock_bh(&bdi_lock);
-
                        bdi_flush_io(bdi);
                } else
                        bdi->wb.task = task;
@@ -404,57 +403,6 @@ static int bdi_forker_thread(void *ptr)
        return 0;
 }
 
-static void bdi_add_to_pending(struct rcu_head *head)
-{
-       struct backing_dev_info *bdi;
-
-       bdi = container_of(head, struct backing_dev_info, rcu_head);
-       INIT_LIST_HEAD(&bdi->bdi_list);
-
-       spin_lock(&bdi_lock);
-       list_add_tail(&bdi->bdi_list, &bdi_pending_list);
-       spin_unlock(&bdi_lock);
-
-       /*
-        * We are now on the pending list, wake up bdi_forker_task()
-        * to finish the job and add us back to the active bdi_list
-        */
-       wake_up_process(default_backing_dev_info.wb.task);
-}
-
-/*
- * Add the default flusher thread that gets created for any bdi
- * that has dirty data pending writeout
- */
-static void bdi_add_default_flusher_thread(struct backing_dev_info *bdi)
-{
-       if (!bdi_cap_writeback_dirty(bdi))
-               return;
-
-       if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
-               printk(KERN_ERR "bdi %p/%s is not registered!\n",
-                                                       bdi, bdi->name);
-               return;
-       }
-
-       /*
-        * Check with the helper whether to proceed adding a thread. Will only
-        * abort if we two or more simultanous calls to
-        * bdi_add_default_flusher_thread() occured, further additions will
-        * block waiting for previous additions to finish.
-        */
-       if (!test_and_set_bit(BDI_pending, &bdi->state)) {
-               list_del_rcu(&bdi->bdi_list);
-
-               /*
-                * We must wait for the current RCU period to end before
-                * moving to the pending list. So schedule that operation
-                * from an RCU callback.
-                */
-               call_rcu(&bdi->rcu_head, bdi_add_to_pending);
-       }
-}
-
 /*
  * Remove bdi from bdi_list, and ensure that it is no longer visible
  */
@@ -595,7 +543,6 @@ int bdi_init(struct backing_dev_info *bdi)
        bdi->max_ratio = 100;
        bdi->max_prop_frac = PROP_FRAC_BASE;
        spin_lock_init(&bdi->wb_lock);
-       INIT_RCU_HEAD(&bdi->rcu_head);
        INIT_LIST_HEAD(&bdi->bdi_list);
        INIT_LIST_HEAD(&bdi->work_list);
 
This page took 0.027017 seconds and 5 git commands to generate.