[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
[deliverable/linux.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
2244b95a
CL
6#include <linux/config.h>
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
f6ac2354
CL
9
10/*
11 * Global page accounting. One instance per CPU. Only unsigned longs are
12 * allowed.
13 *
14 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
15 * any time safely (which protects the instance from modification by
16 * interrupt.
17 * - The __xxx_page_state variants can be used safely when interrupts are
18 * disabled.
19 * - The __xxx_page_state variants can be used if the field is only
20 * modified from process context and protected from preemption, or only
21 * modified from interrupt context. In this case, the field should be
22 * commented here.
23 */
24struct page_state {
f6ac2354
CL
25 unsigned long nr_writeback; /* Pages under writeback */
26 unsigned long nr_unstable; /* NFS unstable pages */
df849a15 27#define GET_PAGE_STATE_LAST nr_unstable
f6ac2354
CL
28
29 /*
30 * The below are zeroed by get_page_state(). Use get_full_page_state()
31 * to add up all these.
32 */
33 unsigned long pgpgin; /* Disk reads */
34 unsigned long pgpgout; /* Disk writes */
35 unsigned long pswpin; /* swap reads */
36 unsigned long pswpout; /* swap writes */
37
38 unsigned long pgalloc_high; /* page allocations */
39 unsigned long pgalloc_normal;
40 unsigned long pgalloc_dma32;
41 unsigned long pgalloc_dma;
42
43 unsigned long pgfree; /* page freeings */
44 unsigned long pgactivate; /* pages moved inactive->active */
45 unsigned long pgdeactivate; /* pages moved active->inactive */
46
47 unsigned long pgfault; /* faults (major+minor) */
48 unsigned long pgmajfault; /* faults (major only) */
49
50 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
51 unsigned long pgrefill_normal;
52 unsigned long pgrefill_dma32;
53 unsigned long pgrefill_dma;
54
55 unsigned long pgsteal_high; /* total highmem pages reclaimed */
56 unsigned long pgsteal_normal;
57 unsigned long pgsteal_dma32;
58 unsigned long pgsteal_dma;
59
60 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
61 unsigned long pgscan_kswapd_normal;
62 unsigned long pgscan_kswapd_dma32;
63 unsigned long pgscan_kswapd_dma;
64
65 unsigned long pgscan_direct_high;/* total highmem pages scanned */
66 unsigned long pgscan_direct_normal;
67 unsigned long pgscan_direct_dma32;
68 unsigned long pgscan_direct_dma;
69
70 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
71 unsigned long slabs_scanned; /* slab objects scanned */
72 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
73 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
74 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
75 unsigned long allocstall; /* direct reclaim calls */
76
77 unsigned long pgrotated; /* pages rotated to tail of the LRU */
78 unsigned long nr_bounce; /* pages for bounce buffers */
79};
80
81extern void get_page_state(struct page_state *ret);
82extern void get_page_state_node(struct page_state *ret, int node);
83extern void get_full_page_state(struct page_state *ret);
84extern unsigned long read_page_state_offset(unsigned long offset);
85extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
86extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
87
88#define read_page_state(member) \
89 read_page_state_offset(offsetof(struct page_state, member))
90
91#define mod_page_state(member, delta) \
92 mod_page_state_offset(offsetof(struct page_state, member), (delta))
93
94#define __mod_page_state(member, delta) \
95 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
96
97#define inc_page_state(member) mod_page_state(member, 1UL)
98#define dec_page_state(member) mod_page_state(member, 0UL - 1)
99#define add_page_state(member,delta) mod_page_state(member, (delta))
100#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
101
102#define __inc_page_state(member) __mod_page_state(member, 1UL)
103#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
104#define __add_page_state(member,delta) __mod_page_state(member, (delta))
105#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
106
107#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
108
109#define state_zone_offset(zone, member) \
110({ \
111 unsigned offset; \
112 if (is_highmem(zone)) \
113 offset = offsetof(struct page_state, member##_high); \
114 else if (is_normal(zone)) \
115 offset = offsetof(struct page_state, member##_normal); \
116 else if (is_dma32(zone)) \
117 offset = offsetof(struct page_state, member##_dma32); \
118 else \
119 offset = offsetof(struct page_state, member##_dma); \
120 offset; \
121})
122
123#define __mod_page_state_zone(zone, member, delta) \
124 do { \
125 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
126 } while (0)
127
128#define mod_page_state_zone(zone, member, delta) \
129 do { \
130 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
131 } while (0)
132
133DECLARE_PER_CPU(struct page_state, page_states);
134
2244b95a
CL
135/*
136 * Zone based page accounting with per cpu differentials.
137 */
138extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
139
140static inline void zone_page_state_add(long x, struct zone *zone,
141 enum zone_stat_item item)
142{
143 atomic_long_add(x, &zone->vm_stat[item]);
144 atomic_long_add(x, &vm_stat[item]);
145}
146
147static inline unsigned long global_page_state(enum zone_stat_item item)
148{
149 long x = atomic_long_read(&vm_stat[item]);
150#ifdef CONFIG_SMP
151 if (x < 0)
152 x = 0;
153#endif
154 return x;
155}
156
157static inline unsigned long zone_page_state(struct zone *zone,
158 enum zone_stat_item item)
159{
160 long x = atomic_long_read(&zone->vm_stat[item]);
161#ifdef CONFIG_SMP
162 if (x < 0)
163 x = 0;
164#endif
165 return x;
166}
167
168#ifdef CONFIG_NUMA
169/*
170 * Determine the per node value of a stat item. This function
171 * is called frequently in a NUMA machine, so try to be as
172 * frugal as possible.
173 */
174static inline unsigned long node_page_state(int node,
175 enum zone_stat_item item)
176{
177 struct zone *zones = NODE_DATA(node)->node_zones;
178
179 return
180#ifndef CONFIG_DMA_IS_NORMAL
181#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
182 zone_page_state(&zones[ZONE_DMA32], item) +
183#endif
184 zone_page_state(&zones[ZONE_NORMAL], item) +
185#endif
186#ifdef CONFIG_HIGHMEM
187 zone_page_state(&zones[ZONE_HIGHMEM], item) +
188#endif
189 zone_page_state(&zones[ZONE_DMA], item);
190}
191#else
192#define node_page_state(node, item) global_page_state(item)
193#endif
194
195#define __add_zone_page_state(__z, __i, __d) \
196 __mod_zone_page_state(__z, __i, __d)
197#define __sub_zone_page_state(__z, __i, __d) \
198 __mod_zone_page_state(__z, __i,-(__d))
199
200#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
201#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
202
203static inline void zap_zone_vm_stats(struct zone *zone)
204{
205 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
206}
207
208#ifdef CONFIG_SMP
209void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
210void __inc_zone_page_state(struct page *, enum zone_stat_item);
211void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 212
2244b95a
CL
213void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
214void inc_zone_page_state(struct page *, enum zone_stat_item);
215void dec_zone_page_state(struct page *, enum zone_stat_item);
216
217extern void inc_zone_state(struct zone *, enum zone_stat_item);
218
219void refresh_cpu_vm_stats(int);
220void refresh_vm_stats(void);
221
222#else /* CONFIG_SMP */
223
224/*
225 * We do not maintain differentials in a single processor configuration.
226 * The functions directly modify the zone and global counters.
227 */
228static inline void __mod_zone_page_state(struct zone *zone,
229 enum zone_stat_item item, int delta)
230{
231 zone_page_state_add(delta, zone, item);
232}
233
234static inline void __inc_zone_page_state(struct page *page,
235 enum zone_stat_item item)
236{
237 atomic_long_inc(&page_zone(page)->vm_stat[item]);
238 atomic_long_inc(&vm_stat[item]);
239}
240
241static inline void __dec_zone_page_state(struct page *page,
242 enum zone_stat_item item)
243{
244 atomic_long_dec(&page_zone(page)->vm_stat[item]);
245 atomic_long_dec(&vm_stat[item]);
246}
247
248/*
249 * We only use atomic operations to update counters. So there is no need to
250 * disable interrupts.
251 */
252#define inc_zone_page_state __inc_zone_page_state
253#define dec_zone_page_state __dec_zone_page_state
254#define mod_zone_page_state __mod_zone_page_state
255
256static inline void refresh_cpu_vm_stats(int cpu) { }
257static inline void refresh_vm_stats(void) { }
258#endif
259
260#endif /* _LINUX_VMSTAT_H */
This page took 0.063332 seconds and 5 git commands to generate.