Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / include / linux / vmstat.h
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
15
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
21
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
27
28
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
41 #ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43 #endif
44 #ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
48 UNEVICTABLE_PGMLOCKED,
49 UNEVICTABLE_PGMUNLOCKED,
50 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
51 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
52 UNEVICTABLE_MLOCKFREED,
53 #endif
54 NR_VM_EVENT_ITEMS
55 };
56
57 extern const struct seq_operations fragmentation_op;
58 extern const struct seq_operations pagetypeinfo_op;
59 extern const struct seq_operations zoneinfo_op;
60 extern const struct seq_operations vmstat_op;
61 extern int sysctl_stat_interval;
62
63 #ifdef CONFIG_VM_EVENT_COUNTERS
64 /*
65 * Light weight per cpu counter implementation.
66 *
67 * Counters should only be incremented and no critical kernel component
68 * should rely on the counter values.
69 *
70 * Counters are handled completely inline. On many platforms the code
71 * generated will simply be the increment of a global address.
72 */
73
74 struct vm_event_state {
75 unsigned long event[NR_VM_EVENT_ITEMS];
76 };
77
78 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
79
80 static inline void __count_vm_event(enum vm_event_item item)
81 {
82 __get_cpu_var(vm_event_states).event[item]++;
83 }
84
85 static inline void count_vm_event(enum vm_event_item item)
86 {
87 get_cpu_var(vm_event_states).event[item]++;
88 put_cpu();
89 }
90
91 static inline void __count_vm_events(enum vm_event_item item, long delta)
92 {
93 __get_cpu_var(vm_event_states).event[item] += delta;
94 }
95
96 static inline void count_vm_events(enum vm_event_item item, long delta)
97 {
98 get_cpu_var(vm_event_states).event[item] += delta;
99 put_cpu();
100 }
101
102 extern void all_vm_events(unsigned long *);
103 #ifdef CONFIG_HOTPLUG
104 extern void vm_events_fold_cpu(int cpu);
105 #else
106 static inline void vm_events_fold_cpu(int cpu)
107 {
108 }
109 #endif
110
111 #else
112
113 /* Disable counters */
114 static inline void count_vm_event(enum vm_event_item item)
115 {
116 }
117 static inline void count_vm_events(enum vm_event_item item, long delta)
118 {
119 }
120 static inline void __count_vm_event(enum vm_event_item item)
121 {
122 }
123 static inline void __count_vm_events(enum vm_event_item item, long delta)
124 {
125 }
126 static inline void all_vm_events(unsigned long *ret)
127 {
128 }
129 static inline void vm_events_fold_cpu(int cpu)
130 {
131 }
132
133 #endif /* CONFIG_VM_EVENT_COUNTERS */
134
135 #define __count_zone_vm_events(item, zone, delta) \
136 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
137 zone_idx(zone), delta)
138
139 /*
140 * Zone based page accounting with per cpu differentials.
141 */
142 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
143
144 static inline void zone_page_state_add(long x, struct zone *zone,
145 enum zone_stat_item item)
146 {
147 atomic_long_add(x, &zone->vm_stat[item]);
148 atomic_long_add(x, &vm_stat[item]);
149 }
150
151 static inline unsigned long global_page_state(enum zone_stat_item item)
152 {
153 long x = atomic_long_read(&vm_stat[item]);
154 #ifdef CONFIG_SMP
155 if (x < 0)
156 x = 0;
157 #endif
158 return x;
159 }
160
161 static inline unsigned long zone_page_state(struct zone *zone,
162 enum zone_stat_item item)
163 {
164 long x = atomic_long_read(&zone->vm_stat[item]);
165 #ifdef CONFIG_SMP
166 if (x < 0)
167 x = 0;
168 #endif
169 return x;
170 }
171
172 extern unsigned long global_lru_pages(void);
173
174 static inline unsigned long zone_lru_pages(struct zone *zone)
175 {
176 return (zone_page_state(zone, NR_ACTIVE_ANON)
177 + zone_page_state(zone, NR_ACTIVE_FILE)
178 + zone_page_state(zone, NR_INACTIVE_ANON)
179 + zone_page_state(zone, NR_INACTIVE_FILE));
180 }
181
182 #ifdef CONFIG_NUMA
183 /*
184 * Determine the per node value of a stat item. This function
185 * is called frequently in a NUMA machine, so try to be as
186 * frugal as possible.
187 */
188 static inline unsigned long node_page_state(int node,
189 enum zone_stat_item item)
190 {
191 struct zone *zones = NODE_DATA(node)->node_zones;
192
193 return
194 #ifdef CONFIG_ZONE_DMA
195 zone_page_state(&zones[ZONE_DMA], item) +
196 #endif
197 #ifdef CONFIG_ZONE_DMA32
198 zone_page_state(&zones[ZONE_DMA32], item) +
199 #endif
200 #ifdef CONFIG_HIGHMEM
201 zone_page_state(&zones[ZONE_HIGHMEM], item) +
202 #endif
203 zone_page_state(&zones[ZONE_NORMAL], item) +
204 zone_page_state(&zones[ZONE_MOVABLE], item);
205 }
206
207 extern void zone_statistics(struct zone *, struct zone *);
208
209 #else
210
211 #define node_page_state(node, item) global_page_state(item)
212 #define zone_statistics(_zl,_z) do { } while (0)
213
214 #endif /* CONFIG_NUMA */
215
216 #define __add_zone_page_state(__z, __i, __d) \
217 __mod_zone_page_state(__z, __i, __d)
218 #define __sub_zone_page_state(__z, __i, __d) \
219 __mod_zone_page_state(__z, __i,-(__d))
220
221 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
222 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
223
224 static inline void zap_zone_vm_stats(struct zone *zone)
225 {
226 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
227 }
228
229 extern void inc_zone_state(struct zone *, enum zone_stat_item);
230
231 #ifdef CONFIG_SMP
232 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
233 void __inc_zone_page_state(struct page *, enum zone_stat_item);
234 void __dec_zone_page_state(struct page *, enum zone_stat_item);
235
236 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
237 void inc_zone_page_state(struct page *, enum zone_stat_item);
238 void dec_zone_page_state(struct page *, enum zone_stat_item);
239
240 extern void inc_zone_state(struct zone *, enum zone_stat_item);
241 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
242 extern void dec_zone_state(struct zone *, enum zone_stat_item);
243 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
244
245 void refresh_cpu_vm_stats(int);
246 #else /* CONFIG_SMP */
247
248 /*
249 * We do not maintain differentials in a single processor configuration.
250 * The functions directly modify the zone and global counters.
251 */
252 static inline void __mod_zone_page_state(struct zone *zone,
253 enum zone_stat_item item, int delta)
254 {
255 zone_page_state_add(delta, zone, item);
256 }
257
258 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
259 {
260 atomic_long_inc(&zone->vm_stat[item]);
261 atomic_long_inc(&vm_stat[item]);
262 }
263
264 static inline void __inc_zone_page_state(struct page *page,
265 enum zone_stat_item item)
266 {
267 __inc_zone_state(page_zone(page), item);
268 }
269
270 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
271 {
272 atomic_long_dec(&zone->vm_stat[item]);
273 atomic_long_dec(&vm_stat[item]);
274 }
275
276 static inline void __dec_zone_page_state(struct page *page,
277 enum zone_stat_item item)
278 {
279 __dec_zone_state(page_zone(page), item);
280 }
281
282 /*
283 * We only use atomic operations to update counters. So there is no need to
284 * disable interrupts.
285 */
286 #define inc_zone_page_state __inc_zone_page_state
287 #define dec_zone_page_state __dec_zone_page_state
288 #define mod_zone_page_state __mod_zone_page_state
289
290 static inline void refresh_cpu_vm_stats(int cpu) { }
291 #endif
292
293 #endif /* _LINUX_VMSTAT_H */
This page took 0.037536 seconds and 6 git commands to generate.