mm: compaction: direct compact when a high-order allocation fails
[deliverable/linux.git] / include / linux / vmstat.h
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
15
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
21
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
27
28
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 #ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41 #endif
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46 #ifdef CONFIG_COMPACTION
47 COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
48 COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
49 #endif
50 #ifdef CONFIG_HUGETLB_PAGE
51 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
52 #endif
53 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
54 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
55 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
56 UNEVICTABLE_PGMLOCKED,
57 UNEVICTABLE_PGMUNLOCKED,
58 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
59 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
60 UNEVICTABLE_MLOCKFREED,
61 NR_VM_EVENT_ITEMS
62 };
63
64 extern int sysctl_stat_interval;
65
66 #ifdef CONFIG_VM_EVENT_COUNTERS
67 /*
68 * Light weight per cpu counter implementation.
69 *
70 * Counters should only be incremented and no critical kernel component
71 * should rely on the counter values.
72 *
73 * Counters are handled completely inline. On many platforms the code
74 * generated will simply be the increment of a global address.
75 */
76
77 struct vm_event_state {
78 unsigned long event[NR_VM_EVENT_ITEMS];
79 };
80
81 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
82
83 static inline void __count_vm_event(enum vm_event_item item)
84 {
85 __this_cpu_inc(vm_event_states.event[item]);
86 }
87
88 static inline void count_vm_event(enum vm_event_item item)
89 {
90 this_cpu_inc(vm_event_states.event[item]);
91 }
92
93 static inline void __count_vm_events(enum vm_event_item item, long delta)
94 {
95 __this_cpu_add(vm_event_states.event[item], delta);
96 }
97
98 static inline void count_vm_events(enum vm_event_item item, long delta)
99 {
100 this_cpu_add(vm_event_states.event[item], delta);
101 }
102
103 extern void all_vm_events(unsigned long *);
104 #ifdef CONFIG_HOTPLUG
105 extern void vm_events_fold_cpu(int cpu);
106 #else
107 static inline void vm_events_fold_cpu(int cpu)
108 {
109 }
110 #endif
111
112 #else
113
114 /* Disable counters */
115 static inline void count_vm_event(enum vm_event_item item)
116 {
117 }
118 static inline void count_vm_events(enum vm_event_item item, long delta)
119 {
120 }
121 static inline void __count_vm_event(enum vm_event_item item)
122 {
123 }
124 static inline void __count_vm_events(enum vm_event_item item, long delta)
125 {
126 }
127 static inline void all_vm_events(unsigned long *ret)
128 {
129 }
130 static inline void vm_events_fold_cpu(int cpu)
131 {
132 }
133
134 #endif /* CONFIG_VM_EVENT_COUNTERS */
135
136 #define __count_zone_vm_events(item, zone, delta) \
137 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
138 zone_idx(zone), delta)
139
140 /*
141 * Zone based page accounting with per cpu differentials.
142 */
143 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
144
145 static inline void zone_page_state_add(long x, struct zone *zone,
146 enum zone_stat_item item)
147 {
148 atomic_long_add(x, &zone->vm_stat[item]);
149 atomic_long_add(x, &vm_stat[item]);
150 }
151
152 static inline unsigned long global_page_state(enum zone_stat_item item)
153 {
154 long x = atomic_long_read(&vm_stat[item]);
155 #ifdef CONFIG_SMP
156 if (x < 0)
157 x = 0;
158 #endif
159 return x;
160 }
161
162 static inline unsigned long zone_page_state(struct zone *zone,
163 enum zone_stat_item item)
164 {
165 long x = atomic_long_read(&zone->vm_stat[item]);
166 #ifdef CONFIG_SMP
167 if (x < 0)
168 x = 0;
169 #endif
170 return x;
171 }
172
173 extern unsigned long global_reclaimable_pages(void);
174 extern unsigned long zone_reclaimable_pages(struct zone *zone);
175
176 #ifdef CONFIG_NUMA
177 /*
178 * Determine the per node value of a stat item. This function
179 * is called frequently in a NUMA machine, so try to be as
180 * frugal as possible.
181 */
182 static inline unsigned long node_page_state(int node,
183 enum zone_stat_item item)
184 {
185 struct zone *zones = NODE_DATA(node)->node_zones;
186
187 return
188 #ifdef CONFIG_ZONE_DMA
189 zone_page_state(&zones[ZONE_DMA], item) +
190 #endif
191 #ifdef CONFIG_ZONE_DMA32
192 zone_page_state(&zones[ZONE_DMA32], item) +
193 #endif
194 #ifdef CONFIG_HIGHMEM
195 zone_page_state(&zones[ZONE_HIGHMEM], item) +
196 #endif
197 zone_page_state(&zones[ZONE_NORMAL], item) +
198 zone_page_state(&zones[ZONE_MOVABLE], item);
199 }
200
201 extern void zone_statistics(struct zone *, struct zone *);
202
203 #else
204
205 #define node_page_state(node, item) global_page_state(item)
206 #define zone_statistics(_zl,_z) do { } while (0)
207
208 #endif /* CONFIG_NUMA */
209
210 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
211 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
212
213 static inline void zap_zone_vm_stats(struct zone *zone)
214 {
215 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
216 }
217
218 extern void inc_zone_state(struct zone *, enum zone_stat_item);
219
220 #ifdef CONFIG_SMP
221 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
222 void __inc_zone_page_state(struct page *, enum zone_stat_item);
223 void __dec_zone_page_state(struct page *, enum zone_stat_item);
224
225 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
226 void inc_zone_page_state(struct page *, enum zone_stat_item);
227 void dec_zone_page_state(struct page *, enum zone_stat_item);
228
229 extern void inc_zone_state(struct zone *, enum zone_stat_item);
230 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
231 extern void dec_zone_state(struct zone *, enum zone_stat_item);
232 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
233
234 void refresh_cpu_vm_stats(int);
235 #else /* CONFIG_SMP */
236
237 /*
238 * We do not maintain differentials in a single processor configuration.
239 * The functions directly modify the zone and global counters.
240 */
241 static inline void __mod_zone_page_state(struct zone *zone,
242 enum zone_stat_item item, int delta)
243 {
244 zone_page_state_add(delta, zone, item);
245 }
246
247 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
248 {
249 atomic_long_inc(&zone->vm_stat[item]);
250 atomic_long_inc(&vm_stat[item]);
251 }
252
253 static inline void __inc_zone_page_state(struct page *page,
254 enum zone_stat_item item)
255 {
256 __inc_zone_state(page_zone(page), item);
257 }
258
259 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
260 {
261 atomic_long_dec(&zone->vm_stat[item]);
262 atomic_long_dec(&vm_stat[item]);
263 }
264
265 static inline void __dec_zone_page_state(struct page *page,
266 enum zone_stat_item item)
267 {
268 __dec_zone_state(page_zone(page), item);
269 }
270
271 /*
272 * We only use atomic operations to update counters. So there is no need to
273 * disable interrupts.
274 */
275 #define inc_zone_page_state __inc_zone_page_state
276 #define dec_zone_page_state __dec_zone_page_state
277 #define mod_zone_page_state __mod_zone_page_state
278
279 static inline void refresh_cpu_vm_stats(int cpu) { }
280 #endif
281
282 #endif /* _LINUX_VMSTAT_H */
This page took 0.043324 seconds and 5 git commands to generate.