[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
[deliverable/linux.git] / mm / page-writeback.c
index 8ccf6f1b1473c1e26e0da73e5f8f95c4b041def9..da85478300986120ced2ca8fe42afcd89617fed5 100644 (file)
@@ -109,9 +109,10 @@ struct writeback_state
 
 static void get_writeback_state(struct writeback_state *wbs)
 {
-       wbs->nr_dirty = read_page_state(nr_dirty);
+       wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
        wbs->nr_unstable = read_page_state(nr_unstable);
-       wbs->nr_mapped = read_page_state(nr_mapped);
+       wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES);
        wbs->nr_writeback = read_page_state(nr_writeback);
 }
 
@@ -516,14 +517,14 @@ static void set_ratelimit(void)
                ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 }
 
-static int
+static int __cpuinit
 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 {
        set_ratelimit();
        return 0;
 }
 
-static struct notifier_block ratelimit_nb = {
+static struct notifier_block __cpuinitdata ratelimit_nb = {
        .notifier_call  = ratelimit_handler,
        .next           = NULL,
 };
@@ -640,7 +641,8 @@ int __set_page_dirty_nobuffers(struct page *page)
                        if (mapping2) { /* Race with truncate? */
                                BUG_ON(mapping2 != mapping);
                                if (mapping_cap_account_dirty(mapping))
-                                       inc_page_state(nr_dirty);
+                                       __inc_zone_page_state(page,
+                                                               NR_FILE_DIRTY);
                                radix_tree_tag_set(&mapping->page_tree,
                                        page_index(page), PAGECACHE_TAG_DIRTY);
                        }
@@ -727,9 +729,9 @@ int test_clear_page_dirty(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-                       write_unlock_irqrestore(&mapping->tree_lock, flags);
                        if (mapping_cap_account_dirty(mapping))
-                               dec_page_state(nr_dirty);
+                               __dec_zone_page_state(page, NR_FILE_DIRTY);
+                       write_unlock_irqrestore(&mapping->tree_lock, flags);
                        return 1;
                }
                write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -760,7 +762,7 @@ int clear_page_dirty_for_io(struct page *page)
        if (mapping) {
                if (TestClearPageDirty(page)) {
                        if (mapping_cap_account_dirty(mapping))
-                               dec_page_state(nr_dirty);
+                               dec_zone_page_state(page, NR_FILE_DIRTY);
                        return 1;
                }
                return 0;
This page took 0.034205 seconds and 5 git commands to generate.