mm: vmstat: account per-zone stalls and pages skipped during reclaim
[deliverable/linux.git] / mm / vmstat.c
index d17d66e85def590ee9d21681918bb3e4728f44df..5caaf3f7b8abca176ce0cfe8c068eb4781530eda 100644 (file)
@@ -921,27 +921,15 @@ int fragmentation_index(struct zone *zone, unsigned int order)
 const char * const vmstat_text[] = {
        /* enum zone_stat_item countes */
        "nr_free_pages",
-       "nr_alloc_batch",
        "nr_zone_anon_lru",
        "nr_zone_file_lru",
+       "nr_zone_write_pending",
        "nr_mlock",
-       "nr_anon_pages",
-       "nr_mapped",
-       "nr_file_pages",
-       "nr_dirty",
-       "nr_writeback",
        "nr_slab_reclaimable",
        "nr_slab_unreclaimable",
        "nr_page_table_pages",
        "nr_kernel_stack",
-       "nr_unstable",
        "nr_bounce",
-       "nr_vmscan_write",
-       "nr_vmscan_immediate_reclaim",
-       "nr_writeback_temp",
-       "nr_shmem",
-       "nr_dirtied",
-       "nr_written",
 #if IS_ENABLED(CONFIG_ZSMALLOC)
        "nr_zspages",
 #endif
@@ -953,9 +941,6 @@ const char * const vmstat_text[] = {
        "numa_local",
        "numa_other",
 #endif
-       "nr_anon_transparent_hugepages",
-       "nr_shmem_hugepages",
-       "nr_shmem_pmdmapped",
        "nr_free_cma",
 
        /* Node-based counters */
@@ -970,6 +955,21 @@ const char * const vmstat_text[] = {
        "workingset_refault",
        "workingset_activate",
        "workingset_nodereclaim",
+       "nr_anon_pages",
+       "nr_mapped",
+       "nr_file_pages",
+       "nr_dirty",
+       "nr_writeback",
+       "nr_writeback_temp",
+       "nr_shmem",
+       "nr_shmem_hugepages",
+       "nr_shmem_pmdmapped",
+       "nr_anon_transparent_hugepages",
+       "nr_unstable",
+       "nr_vmscan_write",
+       "nr_vmscan_immediate_reclaim",
+       "nr_dirtied",
+       "nr_written",
 
        /* enum writeback_stat_item counters */
        "nr_dirty_threshold",
@@ -983,6 +983,8 @@ const char * const vmstat_text[] = {
        "pswpout",
 
        TEXTS_FOR_ZONES("pgalloc")
+       TEXTS_FOR_ZONES("allocstall")
+       TEXTS_FOR_ZONES("pgskip")
 
        "pgfree",
        "pgactivate",
@@ -1008,7 +1010,6 @@ const char * const vmstat_text[] = {
        "kswapd_low_wmark_hit_quickly",
        "kswapd_high_wmark_hit_quickly",
        "pageoutrun",
-       "allocstall",
 
        "pgrotated",
 
@@ -1631,10 +1632,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
                val = atomic_long_read(&vm_zone_stat[i]);
                if (val < 0) {
                        switch (i) {
-                       case NR_ALLOC_BATCH:
                        case NR_PAGES_SCANNED:
                                /*
-                                * These are often seen to go negative in
+                                * This is often seen to go negative in
                                 * recent kernels, but not to go permanently
                                 * negative.  Whilst it would be nicer not to
                                 * have exceptions, rooting them out would be
This page took 0.037352 seconds and 5 git commands to generate.