1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
7 #include <linux/mmzone.h>
8 #include <linux/vm_event_item.h>
9 #include <linux/atomic.h>
11 extern int sysctl_stat_interval
;
13 #ifdef CONFIG_VM_EVENT_COUNTERS
15 * Light weight per cpu counter implementation.
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
24 struct vm_event_state
{
25 unsigned long event
[NR_VM_EVENT_ITEMS
];
28 DECLARE_PER_CPU(struct vm_event_state
, vm_event_states
);
31 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
32 * local_irq_disable overhead.
34 static inline void __count_vm_event(enum vm_event_item item
)
36 raw_cpu_inc(vm_event_states
.event
[item
]);
39 static inline void count_vm_event(enum vm_event_item item
)
41 this_cpu_inc(vm_event_states
.event
[item
]);
44 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
46 raw_cpu_add(vm_event_states
.event
[item
], delta
);
49 static inline void count_vm_events(enum vm_event_item item
, long delta
)
51 this_cpu_add(vm_event_states
.event
[item
], delta
);
54 extern void all_vm_events(unsigned long *);
56 extern void vm_events_fold_cpu(int cpu
);
60 /* Disable counters */
61 static inline void count_vm_event(enum vm_event_item item
)
64 static inline void count_vm_events(enum vm_event_item item
, long delta
)
67 static inline void __count_vm_event(enum vm_event_item item
)
70 static inline void __count_vm_events(enum vm_event_item item
, long delta
)
73 static inline void all_vm_events(unsigned long *ret
)
76 static inline void vm_events_fold_cpu(int cpu
)
80 #endif /* CONFIG_VM_EVENT_COUNTERS */
82 #ifdef CONFIG_NUMA_BALANCING
83 #define count_vm_numa_event(x) count_vm_event(x)
84 #define count_vm_numa_events(x, y) count_vm_events(x, y)
86 #define count_vm_numa_event(x) do {} while (0)
87 #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
88 #endif /* CONFIG_NUMA_BALANCING */
90 #ifdef CONFIG_DEBUG_TLBFLUSH
91 #define count_vm_tlb_event(x) count_vm_event(x)
92 #define count_vm_tlb_events(x, y) count_vm_events(x, y)
94 #define count_vm_tlb_event(x) do {} while (0)
95 #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
98 #define __count_zone_vm_events(item, zone, delta) \
99 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
100 zone_idx(zone), delta)
103 * Zone based page accounting with per cpu differentials.
105 extern atomic_long_t vm_stat
[NR_VM_ZONE_STAT_ITEMS
];
107 static inline void zone_page_state_add(long x
, struct zone
*zone
,
108 enum zone_stat_item item
)
110 atomic_long_add(x
, &zone
->vm_stat
[item
]);
111 atomic_long_add(x
, &vm_stat
[item
]);
114 static inline unsigned long global_page_state(enum zone_stat_item item
)
116 long x
= atomic_long_read(&vm_stat
[item
]);
124 static inline unsigned long zone_page_state(struct zone
*zone
,
125 enum zone_stat_item item
)
127 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
136 * More accurate version that also considers the currently pending
137 * deltas. For that we need to loop over all cpus to find the current
138 * deltas. There is no synchronization so the result cannot be
139 * exactly accurate either.
141 static inline unsigned long zone_page_state_snapshot(struct zone
*zone
,
142 enum zone_stat_item item
)
144 long x
= atomic_long_read(&zone
->vm_stat
[item
]);
148 for_each_online_cpu(cpu
)
149 x
+= per_cpu_ptr(zone
->pageset
, cpu
)->vm_stat_diff
[item
];
159 * Determine the per node value of a stat item. This function
160 * is called frequently in a NUMA machine, so try to be as
161 * frugal as possible.
163 static inline unsigned long node_page_state(int node
,
164 enum zone_stat_item item
)
166 struct zone
*zones
= NODE_DATA(node
)->node_zones
;
169 #ifdef CONFIG_ZONE_DMA
170 zone_page_state(&zones
[ZONE_DMA
], item
) +
172 #ifdef CONFIG_ZONE_DMA32
173 zone_page_state(&zones
[ZONE_DMA32
], item
) +
175 #ifdef CONFIG_HIGHMEM
176 zone_page_state(&zones
[ZONE_HIGHMEM
], item
) +
178 zone_page_state(&zones
[ZONE_NORMAL
], item
) +
179 zone_page_state(&zones
[ZONE_MOVABLE
], item
);
182 extern void zone_statistics(struct zone
*, struct zone
*, gfp_t gfp
);
186 #define node_page_state(node, item) global_page_state(item)
187 #define zone_statistics(_zl, _z, gfp) do { } while (0)
189 #endif /* CONFIG_NUMA */
191 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
192 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
195 void __mod_zone_page_state(struct zone
*, enum zone_stat_item item
, int);
196 void __inc_zone_page_state(struct page
*, enum zone_stat_item
);
197 void __dec_zone_page_state(struct page
*, enum zone_stat_item
);
199 void mod_zone_page_state(struct zone
*, enum zone_stat_item
, int);
200 void inc_zone_page_state(struct page
*, enum zone_stat_item
);
201 void dec_zone_page_state(struct page
*, enum zone_stat_item
);
203 extern void inc_zone_state(struct zone
*, enum zone_stat_item
);
204 extern void __inc_zone_state(struct zone
*, enum zone_stat_item
);
205 extern void dec_zone_state(struct zone
*, enum zone_stat_item
);
206 extern void __dec_zone_state(struct zone
*, enum zone_stat_item
);
208 void cpu_vm_stats_fold(int cpu
);
209 void refresh_zone_stat_thresholds(void);
211 void drain_zonestat(struct zone
*zone
, struct per_cpu_pageset
*);
213 int calculate_pressure_threshold(struct zone
*zone
);
214 int calculate_normal_threshold(struct zone
*zone
);
215 void set_pgdat_percpu_threshold(pg_data_t
*pgdat
,
216 int (*calculate_pressure
)(struct zone
*));
217 #else /* CONFIG_SMP */
220 * We do not maintain differentials in a single processor configuration.
221 * The functions directly modify the zone and global counters.
223 static inline void __mod_zone_page_state(struct zone
*zone
,
224 enum zone_stat_item item
, int delta
)
226 zone_page_state_add(delta
, zone
, item
);
229 static inline void __inc_zone_state(struct zone
*zone
, enum zone_stat_item item
)
231 atomic_long_inc(&zone
->vm_stat
[item
]);
232 atomic_long_inc(&vm_stat
[item
]);
235 static inline void __dec_zone_state(struct zone
*zone
, enum zone_stat_item item
)
237 atomic_long_dec(&zone
->vm_stat
[item
]);
238 atomic_long_dec(&vm_stat
[item
]);
241 static inline void __inc_zone_page_state(struct page
*page
,
242 enum zone_stat_item item
)
244 __inc_zone_state(page_zone(page
), item
);
247 static inline void __dec_zone_page_state(struct page
*page
,
248 enum zone_stat_item item
)
250 __dec_zone_state(page_zone(page
), item
);
254 * We only use atomic operations to update counters. So there is no need to
255 * disable interrupts.
257 #define inc_zone_page_state __inc_zone_page_state
258 #define dec_zone_page_state __dec_zone_page_state
259 #define mod_zone_page_state __mod_zone_page_state
261 #define inc_zone_state __inc_zone_state
262 #define dec_zone_state __dec_zone_state
264 #define set_pgdat_percpu_threshold(pgdat, callback) { }
266 static inline void refresh_cpu_vm_stats(int cpu
) { }
267 static inline void refresh_zone_stat_thresholds(void) { }
268 static inline void cpu_vm_stats_fold(int cpu
) { }
270 static inline void drain_zonestat(struct zone
*zone
,
271 struct per_cpu_pageset
*pset
) { }
272 #endif /* CONFIG_SMP */
274 static inline void __mod_zone_freepage_state(struct zone
*zone
, int nr_pages
,
277 __mod_zone_page_state(zone
, NR_FREE_PAGES
, nr_pages
);
278 if (is_migrate_cma(migratetype
))
279 __mod_zone_page_state(zone
, NR_FREE_CMA_PAGES
, nr_pages
);
282 extern const char * const vmstat_text
[];
284 #endif /* _LINUX_VMSTAT_H */
This page took 0.037271 seconds and 5 git commands to generate.