Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
f6ac2354 10
c748e134
AB
11extern int sysctl_stat_interval;
12
780a0656
AM
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
f8891e5e
CL
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
26};
27
f8891e5e
CL
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
30static inline void __count_vm_event(enum vm_event_item item)
31{
dd17c8f7 32 __this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
33}
34
35static inline void count_vm_event(enum vm_event_item item)
36{
dd17c8f7 37 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
38}
39
40static inline void __count_vm_events(enum vm_event_item item, long delta)
41{
dd17c8f7 42 __this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
43}
44
45static inline void count_vm_events(enum vm_event_item item, long delta)
46{
dd17c8f7 47 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
48}
49
50extern void all_vm_events(unsigned long *);
f1cb0879 51
f8891e5e
CL
52extern void vm_events_fold_cpu(int cpu);
53
54#else
55
56/* Disable counters */
780a0656
AM
57static inline void count_vm_event(enum vm_event_item item)
58{
59}
60static inline void count_vm_events(enum vm_event_item item, long delta)
61{
62}
63static inline void __count_vm_event(enum vm_event_item item)
64{
65}
66static inline void __count_vm_events(enum vm_event_item item, long delta)
67{
68}
69static inline void all_vm_events(unsigned long *ret)
70{
71}
72static inline void vm_events_fold_cpu(int cpu)
73{
74}
f8891e5e
CL
75
76#endif /* CONFIG_VM_EVENT_COUNTERS */
77
03c5a6e1
MG
78#ifdef CONFIG_NUMA_BALANCING
79#define count_vm_numa_event(x) count_vm_event(x)
80#define count_vm_numa_events(x, y) count_vm_events(x, y)
81#else
82#define count_vm_numa_event(x) do {} while (0)
3c0ff468 83#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
84#endif /* CONFIG_NUMA_BALANCING */
85
ec659934
MG
86#ifdef CONFIG_DEBUG_TLBFLUSH
87#define count_vm_tlb_event(x) count_vm_event(x)
88#define count_vm_tlb_events(x, y) count_vm_events(x, y)
89#else
90#define count_vm_tlb_event(x) do {} while (0)
91#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
92#endif
93
f8891e5e 94#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
95 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
96 zone_idx(zone), delta)
f6ac2354 97
2244b95a
CL
98/*
99 * Zone based page accounting with per cpu differentials.
100 */
101extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
102
103static inline void zone_page_state_add(long x, struct zone *zone,
104 enum zone_stat_item item)
105{
106 atomic_long_add(x, &zone->vm_stat[item]);
107 atomic_long_add(x, &vm_stat[item]);
108}
109
110static inline unsigned long global_page_state(enum zone_stat_item item)
111{
112 long x = atomic_long_read(&vm_stat[item]);
113#ifdef CONFIG_SMP
114 if (x < 0)
115 x = 0;
116#endif
117 return x;
118}
119
120static inline unsigned long zone_page_state(struct zone *zone,
121 enum zone_stat_item item)
122{
123 long x = atomic_long_read(&zone->vm_stat[item]);
124#ifdef CONFIG_SMP
125 if (x < 0)
126 x = 0;
127#endif
128 return x;
129}
130
aa454840
CL
131/*
132 * More accurate version that also considers the currently pending
133 * deltas. For that we need to loop over all cpus to find the current
134 * deltas. There is no synchronization so the result cannot be
135 * exactly accurate either.
136 */
137static inline unsigned long zone_page_state_snapshot(struct zone *zone,
138 enum zone_stat_item item)
139{
140 long x = atomic_long_read(&zone->vm_stat[item]);
141
142#ifdef CONFIG_SMP
143 int cpu;
144 for_each_online_cpu(cpu)
145 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
146
147 if (x < 0)
148 x = 0;
149#endif
150 return x;
151}
152
2244b95a
CL
153#ifdef CONFIG_NUMA
154/*
155 * Determine the per node value of a stat item. This function
156 * is called frequently in a NUMA machine, so try to be as
157 * frugal as possible.
158 */
159static inline unsigned long node_page_state(int node,
160 enum zone_stat_item item)
161{
162 struct zone *zones = NODE_DATA(node)->node_zones;
163
164 return
4b51d669
CL
165#ifdef CONFIG_ZONE_DMA
166 zone_page_state(&zones[ZONE_DMA], item) +
167#endif
fb0e7942 168#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
169 zone_page_state(&zones[ZONE_DMA32], item) +
170#endif
2244b95a
CL
171#ifdef CONFIG_HIGHMEM
172 zone_page_state(&zones[ZONE_HIGHMEM], item) +
173#endif
2a1e274a
MG
174 zone_page_state(&zones[ZONE_NORMAL], item) +
175 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 176}
ca889e6c 177
78afd561 178extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
ca889e6c 179
2244b95a 180#else
ca889e6c 181
2244b95a 182#define node_page_state(node, item) global_page_state(item)
78afd561 183#define zone_statistics(_zl, _z, gfp) do { } while (0)
ca889e6c
CL
184
185#endif /* CONFIG_NUMA */
2244b95a 186
2244b95a
CL
187#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
188#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
189
ca889e6c
CL
190extern void inc_zone_state(struct zone *, enum zone_stat_item);
191
2244b95a
CL
192#ifdef CONFIG_SMP
193void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
194void __inc_zone_page_state(struct page *, enum zone_stat_item);
195void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 196
2244b95a
CL
197void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
198void inc_zone_page_state(struct page *, enum zone_stat_item);
199void dec_zone_page_state(struct page *, enum zone_stat_item);
200
201extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
202extern void __inc_zone_state(struct zone *, enum zone_stat_item);
203extern void dec_zone_state(struct zone *, enum zone_stat_item);
204extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a 205
2bb921e5 206void cpu_vm_stats_fold(int cpu);
a6cccdc3 207void refresh_zone_stat_thresholds(void);
b44129b3 208
5a883813
MK
209void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
210
b44129b3
MG
211int calculate_pressure_threshold(struct zone *zone);
212int calculate_normal_threshold(struct zone *zone);
213void set_pgdat_percpu_threshold(pg_data_t *pgdat,
214 int (*calculate_pressure)(struct zone *));
2244b95a
CL
215#else /* CONFIG_SMP */
216
217/*
218 * We do not maintain differentials in a single processor configuration.
219 * The functions directly modify the zone and global counters.
220 */
221static inline void __mod_zone_page_state(struct zone *zone,
222 enum zone_stat_item item, int delta)
223{
224 zone_page_state_add(delta, zone, item);
225}
226
7f4599e9
CL
227static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
228{
229 atomic_long_inc(&zone->vm_stat[item]);
230 atomic_long_inc(&vm_stat[item]);
231}
232
2244b95a
CL
233static inline void __inc_zone_page_state(struct page *page,
234 enum zone_stat_item item)
235{
7f4599e9 236 __inc_zone_state(page_zone(page), item);
2244b95a
CL
237}
238
c8785385
CL
239static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
240{
241 atomic_long_dec(&zone->vm_stat[item]);
242 atomic_long_dec(&vm_stat[item]);
243}
244
2244b95a
CL
245static inline void __dec_zone_page_state(struct page *page,
246 enum zone_stat_item item)
247{
57ce36fe 248 __dec_zone_state(page_zone(page), item);
2244b95a
CL
249}
250
251/*
252 * We only use atomic operations to update counters. So there is no need to
253 * disable interrupts.
254 */
255#define inc_zone_page_state __inc_zone_page_state
256#define dec_zone_page_state __dec_zone_page_state
257#define mod_zone_page_state __mod_zone_page_state
258
b44129b3 259#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 260
2244b95a 261static inline void refresh_cpu_vm_stats(int cpu) { }
a6cccdc3 262static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 263static inline void cpu_vm_stats_fold(int cpu) { }
a6cccdc3 264
5a883813
MK
265static inline void drain_zonestat(struct zone *zone,
266 struct per_cpu_pageset *pset) { }
fa25c503
KM
267#endif /* CONFIG_SMP */
268
d1ce749a
BZ
269static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
270 int migratetype)
271{
272 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
273 if (is_migrate_cma(migratetype))
274 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
275}
276
fa25c503 277extern const char * const vmstat_text[];
2244b95a
CL
278
279#endif /* _LINUX_VMSTAT_H */
This page took 1.148816 seconds and 5 git commands to generate.