Commit | Line | Data |
---|---|---|
f6ac2354 CL |
1 | #ifndef _LINUX_VMSTAT_H |
2 | #define _LINUX_VMSTAT_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
96177299 | 6 | #include <linux/mm.h> |
2244b95a CL |
7 | #include <linux/mmzone.h> |
8 | #include <asm/atomic.h> | |
f6ac2354 | 9 | |
4b51d669 CL |
10 | #ifdef CONFIG_ZONE_DMA |
11 | #define DMA_ZONE(xx) xx##_DMA, | |
12 | #else | |
13 | #define DMA_ZONE(xx) | |
14 | #endif | |
15 | ||
27bf71c2 CL |
16 | #ifdef CONFIG_ZONE_DMA32 |
17 | #define DMA32_ZONE(xx) xx##_DMA32, | |
18 | #else | |
19 | #define DMA32_ZONE(xx) | |
20 | #endif | |
21 | ||
22 | #ifdef CONFIG_HIGHMEM | |
23 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | |
24 | #else | |
25 | #define HIGHMEM_ZONE(xx) | |
26 | #endif | |
27 | ||
4b51d669 | 28 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) |
f8891e5e CL |
29 | |
30 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |
31 | FOR_ALL_ZONES(PGALLOC), | |
32 | PGFREE, PGACTIVATE, PGDEACTIVATE, | |
33 | PGFAULT, PGMAJFAULT, | |
34 | FOR_ALL_ZONES(PGREFILL), | |
35 | FOR_ALL_ZONES(PGSTEAL), | |
36 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | |
37 | FOR_ALL_ZONES(PGSCAN_DIRECT), | |
38 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | |
39 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | |
40 | NR_VM_EVENT_ITEMS | |
41 | }; | |
42 | ||
780a0656 AM |
43 | #ifdef CONFIG_VM_EVENT_COUNTERS |
44 | /* | |
45 | * Light weight per cpu counter implementation. | |
46 | * | |
47 | * Counters should only be incremented and no critical kernel component | |
48 | * should rely on the counter values. | |
49 | * | |
50 | * Counters are handled completely inline. On many platforms the code | |
51 | * generated will simply be the increment of a global address. | |
52 | */ | |
53 | ||
f8891e5e CL |
54 | struct vm_event_state { |
55 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
56 | }; |
57 | ||
f8891e5e CL |
58 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
59 | ||
60 | static inline void __count_vm_event(enum vm_event_item item) | |
61 | { | |
38cbcdc0 | 62 | __get_cpu_var(vm_event_states).event[item]++; |
f8891e5e CL |
63 | } |
64 | ||
65 | static inline void count_vm_event(enum vm_event_item item) | |
66 | { | |
38cbcdc0 | 67 | get_cpu_var(vm_event_states).event[item]++; |
f8891e5e CL |
68 | put_cpu(); |
69 | } | |
70 | ||
71 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
72 | { | |
38cbcdc0 | 73 | __get_cpu_var(vm_event_states).event[item] += delta; |
f8891e5e CL |
74 | } |
75 | ||
76 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
77 | { | |
38cbcdc0 | 78 | get_cpu_var(vm_event_states).event[item] += delta; |
f8891e5e CL |
79 | put_cpu(); |
80 | } | |
81 | ||
82 | extern void all_vm_events(unsigned long *); | |
e903387f | 83 | #ifdef CONFIG_HOTPLUG |
f8891e5e | 84 | extern void vm_events_fold_cpu(int cpu); |
e903387f MD |
85 | #else |
86 | static inline void vm_events_fold_cpu(int cpu) | |
87 | { | |
88 | } | |
89 | #endif | |
f8891e5e CL |
90 | |
91 | #else | |
92 | ||
93 | /* Disable counters */ | |
780a0656 AM |
94 | static inline void count_vm_event(enum vm_event_item item) |
95 | { | |
96 | } | |
97 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
98 | { | |
99 | } | |
100 | static inline void __count_vm_event(enum vm_event_item item) | |
101 | { | |
102 | } | |
103 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
104 | { | |
105 | } | |
106 | static inline void all_vm_events(unsigned long *ret) | |
107 | { | |
108 | } | |
109 | static inline void vm_events_fold_cpu(int cpu) | |
110 | { | |
111 | } | |
f8891e5e CL |
112 | |
113 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
114 | ||
115 | #define __count_zone_vm_events(item, zone, delta) \ | |
4b51d669 CL |
116 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
117 | zone_idx(zone), delta) | |
f6ac2354 | 118 | |
2244b95a CL |
119 | /* |
120 | * Zone based page accounting with per cpu differentials. | |
121 | */ | |
122 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
123 | ||
124 | static inline void zone_page_state_add(long x, struct zone *zone, | |
125 | enum zone_stat_item item) | |
126 | { | |
127 | atomic_long_add(x, &zone->vm_stat[item]); | |
128 | atomic_long_add(x, &vm_stat[item]); | |
129 | } | |
130 | ||
131 | static inline unsigned long global_page_state(enum zone_stat_item item) | |
132 | { | |
133 | long x = atomic_long_read(&vm_stat[item]); | |
134 | #ifdef CONFIG_SMP | |
135 | if (x < 0) | |
136 | x = 0; | |
137 | #endif | |
138 | return x; | |
139 | } | |
140 | ||
141 | static inline unsigned long zone_page_state(struct zone *zone, | |
142 | enum zone_stat_item item) | |
143 | { | |
144 | long x = atomic_long_read(&zone->vm_stat[item]); | |
145 | #ifdef CONFIG_SMP | |
146 | if (x < 0) | |
147 | x = 0; | |
148 | #endif | |
149 | return x; | |
150 | } | |
151 | ||
152 | #ifdef CONFIG_NUMA | |
153 | /* | |
154 | * Determine the per node value of a stat item. This function | |
155 | * is called frequently in a NUMA machine, so try to be as | |
156 | * frugal as possible. | |
157 | */ | |
158 | static inline unsigned long node_page_state(int node, | |
159 | enum zone_stat_item item) | |
160 | { | |
161 | struct zone *zones = NODE_DATA(node)->node_zones; | |
162 | ||
163 | return | |
4b51d669 CL |
164 | #ifdef CONFIG_ZONE_DMA |
165 | zone_page_state(&zones[ZONE_DMA], item) + | |
166 | #endif | |
fb0e7942 | 167 | #ifdef CONFIG_ZONE_DMA32 |
2244b95a CL |
168 | zone_page_state(&zones[ZONE_DMA32], item) + |
169 | #endif | |
2244b95a CL |
170 | #ifdef CONFIG_HIGHMEM |
171 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | |
172 | #endif | |
4b51d669 | 173 | zone_page_state(&zones[ZONE_NORMAL], item); |
2244b95a | 174 | } |
ca889e6c CL |
175 | |
176 | extern void zone_statistics(struct zonelist *, struct zone *); | |
177 | ||
2244b95a | 178 | #else |
ca889e6c | 179 | |
2244b95a | 180 | #define node_page_state(node, item) global_page_state(item) |
ca889e6c CL |
181 | #define zone_statistics(_zl,_z) do { } while (0) |
182 | ||
183 | #endif /* CONFIG_NUMA */ | |
2244b95a CL |
184 | |
185 | #define __add_zone_page_state(__z, __i, __d) \ | |
186 | __mod_zone_page_state(__z, __i, __d) | |
187 | #define __sub_zone_page_state(__z, __i, __d) \ | |
188 | __mod_zone_page_state(__z, __i,-(__d)) | |
189 | ||
190 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | |
191 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | |
192 | ||
193 | static inline void zap_zone_vm_stats(struct zone *zone) | |
194 | { | |
195 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | |
196 | } | |
197 | ||
ca889e6c CL |
198 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
199 | ||
2244b95a CL |
200 | #ifdef CONFIG_SMP |
201 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | |
202 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | |
203 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 204 | |
2244b95a CL |
205 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
206 | void inc_zone_page_state(struct page *, enum zone_stat_item); | |
207 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
208 | ||
209 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | |
c8785385 CL |
210 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
211 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | |
212 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
2244b95a CL |
213 | |
214 | void refresh_cpu_vm_stats(int); | |
215 | void refresh_vm_stats(void); | |
216 | ||
217 | #else /* CONFIG_SMP */ | |
218 | ||
219 | /* | |
220 | * We do not maintain differentials in a single processor configuration. | |
221 | * The functions directly modify the zone and global counters. | |
222 | */ | |
223 | static inline void __mod_zone_page_state(struct zone *zone, | |
224 | enum zone_stat_item item, int delta) | |
225 | { | |
226 | zone_page_state_add(delta, zone, item); | |
227 | } | |
228 | ||
7f4599e9 CL |
229 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
230 | { | |
231 | atomic_long_inc(&zone->vm_stat[item]); | |
232 | atomic_long_inc(&vm_stat[item]); | |
233 | } | |
234 | ||
2244b95a CL |
235 | static inline void __inc_zone_page_state(struct page *page, |
236 | enum zone_stat_item item) | |
237 | { | |
7f4599e9 | 238 | __inc_zone_state(page_zone(page), item); |
2244b95a CL |
239 | } |
240 | ||
c8785385 CL |
241 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
242 | { | |
243 | atomic_long_dec(&zone->vm_stat[item]); | |
244 | atomic_long_dec(&vm_stat[item]); | |
245 | } | |
246 | ||
2244b95a CL |
247 | static inline void __dec_zone_page_state(struct page *page, |
248 | enum zone_stat_item item) | |
249 | { | |
250 | atomic_long_dec(&page_zone(page)->vm_stat[item]); | |
251 | atomic_long_dec(&vm_stat[item]); | |
252 | } | |
253 | ||
254 | /* | |
255 | * We only use atomic operations to update counters. So there is no need to | |
256 | * disable interrupts. | |
257 | */ | |
258 | #define inc_zone_page_state __inc_zone_page_state | |
259 | #define dec_zone_page_state __dec_zone_page_state | |
260 | #define mod_zone_page_state __mod_zone_page_state | |
261 | ||
262 | static inline void refresh_cpu_vm_stats(int cpu) { } | |
263 | static inline void refresh_vm_stats(void) { } | |
264 | #endif | |
265 | ||
266 | #endif /* _LINUX_VMSTAT_H */ |