[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
[deliverable/linux.git] / mm / page-writeback.c
index 6dcce3a4bbdc6eff21c99607c463bdb15cccd752..da85478300986120ced2ca8fe42afcd89617fed5 100644 (file)
@@ -72,13 +72,12 @@ int dirty_background_ratio = 10;
 int vm_dirty_ratio = 40;
 
 /*
- * The interval between `kupdate'-style writebacks, in centiseconds
- * (hundredths of a second)
+ * The interval between `kupdate'-style writebacks, in jiffies
  */
 int dirty_writeback_interval = 5 * HZ;
 
 /*
- * The longest number of centiseconds for which data is allowed to remain dirty
+ * The longest number of jiffies for which data is allowed to remain dirty
  */
 int dirty_expire_interval = 30 * HZ;
 
@@ -110,9 +109,10 @@ struct writeback_state
 
 static void get_writeback_state(struct writeback_state *wbs)
 {
-       wbs->nr_dirty = read_page_state(nr_dirty);
+       wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
        wbs->nr_unstable = read_page_state(nr_unstable);
-       wbs->nr_mapped = read_page_state(nr_mapped);
+       wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES);
        wbs->nr_writeback = read_page_state(nr_writeback);
 }
 
@@ -205,6 +205,7 @@ static void balance_dirty_pages(struct address_space *mapping)
                        .sync_mode      = WB_SYNC_NONE,
                        .older_than_this = NULL,
                        .nr_to_write    = write_chunk,
+                       .range_cyclic   = 1,
                };
 
                get_dirty_limits(&wbs, &background_thresh,
@@ -332,6 +333,7 @@ static void background_writeout(unsigned long _min_pages)
                .older_than_this = NULL,
                .nr_to_write    = 0,
                .nonblocking    = 1,
+               .range_cyclic   = 1,
        };
 
        for ( ; ; ) {
@@ -408,6 +410,7 @@ static void wb_kupdate(unsigned long arg)
                .nr_to_write    = 0,
                .nonblocking    = 1,
                .for_kupdate    = 1,
+               .range_cyclic   = 1,
        };
 
        sync_supers();
@@ -514,14 +517,14 @@ static void set_ratelimit(void)
                ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 }
 
-static int
+static int __cpuinit
 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 {
        set_ratelimit();
        return 0;
 }
 
-static struct notifier_block ratelimit_nb = {
+static struct notifier_block __cpuinitdata ratelimit_nb = {
        .notifier_call  = ratelimit_handler,
        .next           = NULL,
 };
@@ -638,7 +641,8 @@ int __set_page_dirty_nobuffers(struct page *page)
                        if (mapping2) { /* Race with truncate? */
                                BUG_ON(mapping2 != mapping);
                                if (mapping_cap_account_dirty(mapping))
-                                       inc_page_state(nr_dirty);
+                                       __inc_zone_page_state(page,
+                                                               NR_FILE_DIRTY);
                                radix_tree_tag_set(&mapping->page_tree,
                                        page_index(page), PAGECACHE_TAG_DIRTY);
                        }
@@ -725,9 +729,9 @@ int test_clear_page_dirty(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-                       write_unlock_irqrestore(&mapping->tree_lock, flags);
                        if (mapping_cap_account_dirty(mapping))
-                               dec_page_state(nr_dirty);
+                               __dec_zone_page_state(page, NR_FILE_DIRTY);
+                       write_unlock_irqrestore(&mapping->tree_lock, flags);
                        return 1;
                }
                write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -758,7 +762,7 @@ int clear_page_dirty_for_io(struct page *page)
        if (mapping) {
                if (TestClearPageDirty(page)) {
                        if (mapping_cap_account_dirty(mapping))
-                               dec_page_state(nr_dirty);
+                               dec_zone_page_state(page, NR_FILE_DIRTY);
                        return 1;
                }
                return 0;
This page took 0.049033 seconds and 5 git commands to generate.