ath10k: remove htt rx amsdu clear retry bit hack
[deliverable/linux.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a 7#include <linux/mmzone.h>
f042e707 8#include <linux/vm_event_item.h>
60063497 9#include <linux/atomic.h>
f6ac2354 10
c748e134
AB
11extern int sysctl_stat_interval;
12
780a0656
AM
13#ifdef CONFIG_VM_EVENT_COUNTERS
14/*
15 * Light weight per cpu counter implementation.
16 *
17 * Counters should only be incremented and no critical kernel component
18 * should rely on the counter values.
19 *
20 * Counters are handled completely inline. On many platforms the code
21 * generated will simply be the increment of a global address.
22 */
23
f8891e5e
CL
24struct vm_event_state {
25 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
26};
27
f8891e5e
CL
28DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
29
30static inline void __count_vm_event(enum vm_event_item item)
31{
dd17c8f7 32 __this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
33}
34
35static inline void count_vm_event(enum vm_event_item item)
36{
dd17c8f7 37 this_cpu_inc(vm_event_states.event[item]);
f8891e5e
CL
38}
39
40static inline void __count_vm_events(enum vm_event_item item, long delta)
41{
dd17c8f7 42 __this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
43}
44
45static inline void count_vm_events(enum vm_event_item item, long delta)
46{
dd17c8f7 47 this_cpu_add(vm_event_states.event[item], delta);
f8891e5e
CL
48}
49
50extern void all_vm_events(unsigned long *);
f1cb0879 51
f8891e5e
CL
52extern void vm_events_fold_cpu(int cpu);
53
54#else
55
56/* Disable counters */
780a0656
AM
57static inline void count_vm_event(enum vm_event_item item)
58{
59}
60static inline void count_vm_events(enum vm_event_item item, long delta)
61{
62}
63static inline void __count_vm_event(enum vm_event_item item)
64{
65}
66static inline void __count_vm_events(enum vm_event_item item, long delta)
67{
68}
69static inline void all_vm_events(unsigned long *ret)
70{
71}
72static inline void vm_events_fold_cpu(int cpu)
73{
74}
f8891e5e
CL
75
76#endif /* CONFIG_VM_EVENT_COUNTERS */
77
03c5a6e1
MG
78#ifdef CONFIG_NUMA_BALANCING
79#define count_vm_numa_event(x) count_vm_event(x)
80#define count_vm_numa_events(x, y) count_vm_events(x, y)
81#else
82#define count_vm_numa_event(x) do {} while (0)
3c0ff468 83#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
03c5a6e1
MG
84#endif /* CONFIG_NUMA_BALANCING */
85
f8891e5e 86#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
87 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
88 zone_idx(zone), delta)
f6ac2354 89
2244b95a
CL
90/*
91 * Zone based page accounting with per cpu differentials.
92 */
93extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
94
95static inline void zone_page_state_add(long x, struct zone *zone,
96 enum zone_stat_item item)
97{
98 atomic_long_add(x, &zone->vm_stat[item]);
99 atomic_long_add(x, &vm_stat[item]);
100}
101
102static inline unsigned long global_page_state(enum zone_stat_item item)
103{
104 long x = atomic_long_read(&vm_stat[item]);
105#ifdef CONFIG_SMP
106 if (x < 0)
107 x = 0;
108#endif
109 return x;
110}
111
112static inline unsigned long zone_page_state(struct zone *zone,
113 enum zone_stat_item item)
114{
115 long x = atomic_long_read(&zone->vm_stat[item]);
116#ifdef CONFIG_SMP
117 if (x < 0)
118 x = 0;
119#endif
120 return x;
121}
122
aa454840
CL
123/*
124 * More accurate version that also considers the currently pending
125 * deltas. For that we need to loop over all cpus to find the current
126 * deltas. There is no synchronization so the result cannot be
127 * exactly accurate either.
128 */
129static inline unsigned long zone_page_state_snapshot(struct zone *zone,
130 enum zone_stat_item item)
131{
132 long x = atomic_long_read(&zone->vm_stat[item]);
133
134#ifdef CONFIG_SMP
135 int cpu;
136 for_each_online_cpu(cpu)
137 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
138
139 if (x < 0)
140 x = 0;
141#endif
142 return x;
143}
144
adea02a1 145extern unsigned long global_reclaimable_pages(void);
4f98a2fe 146
2244b95a
CL
147#ifdef CONFIG_NUMA
148/*
149 * Determine the per node value of a stat item. This function
150 * is called frequently in a NUMA machine, so try to be as
151 * frugal as possible.
152 */
153static inline unsigned long node_page_state(int node,
154 enum zone_stat_item item)
155{
156 struct zone *zones = NODE_DATA(node)->node_zones;
157
158 return
4b51d669
CL
159#ifdef CONFIG_ZONE_DMA
160 zone_page_state(&zones[ZONE_DMA], item) +
161#endif
fb0e7942 162#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
163 zone_page_state(&zones[ZONE_DMA32], item) +
164#endif
2244b95a
CL
165#ifdef CONFIG_HIGHMEM
166 zone_page_state(&zones[ZONE_HIGHMEM], item) +
167#endif
2a1e274a
MG
168 zone_page_state(&zones[ZONE_NORMAL], item) +
169 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 170}
ca889e6c 171
78afd561 172extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
ca889e6c 173
2244b95a 174#else
ca889e6c 175
2244b95a 176#define node_page_state(node, item) global_page_state(item)
78afd561 177#define zone_statistics(_zl, _z, gfp) do { } while (0)
ca889e6c
CL
178
179#endif /* CONFIG_NUMA */
2244b95a 180
2244b95a
CL
181#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
182#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
183
ca889e6c
CL
184extern void inc_zone_state(struct zone *, enum zone_stat_item);
185
2244b95a
CL
186#ifdef CONFIG_SMP
187void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
188void __inc_zone_page_state(struct page *, enum zone_stat_item);
189void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 190
2244b95a
CL
191void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
192void inc_zone_page_state(struct page *, enum zone_stat_item);
193void dec_zone_page_state(struct page *, enum zone_stat_item);
194
195extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
196extern void __inc_zone_state(struct zone *, enum zone_stat_item);
197extern void dec_zone_state(struct zone *, enum zone_stat_item);
198extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a 199
2bb921e5 200void cpu_vm_stats_fold(int cpu);
a6cccdc3 201void refresh_zone_stat_thresholds(void);
b44129b3 202
5a883813
MK
203void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
204
b44129b3
MG
205int calculate_pressure_threshold(struct zone *zone);
206int calculate_normal_threshold(struct zone *zone);
207void set_pgdat_percpu_threshold(pg_data_t *pgdat,
208 int (*calculate_pressure)(struct zone *));
2244b95a
CL
209#else /* CONFIG_SMP */
210
211/*
212 * We do not maintain differentials in a single processor configuration.
213 * The functions directly modify the zone and global counters.
214 */
215static inline void __mod_zone_page_state(struct zone *zone,
216 enum zone_stat_item item, int delta)
217{
218 zone_page_state_add(delta, zone, item);
219}
220
7f4599e9
CL
221static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
222{
223 atomic_long_inc(&zone->vm_stat[item]);
224 atomic_long_inc(&vm_stat[item]);
225}
226
2244b95a
CL
227static inline void __inc_zone_page_state(struct page *page,
228 enum zone_stat_item item)
229{
7f4599e9 230 __inc_zone_state(page_zone(page), item);
2244b95a
CL
231}
232
c8785385
CL
233static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
234{
235 atomic_long_dec(&zone->vm_stat[item]);
236 atomic_long_dec(&vm_stat[item]);
237}
238
2244b95a
CL
239static inline void __dec_zone_page_state(struct page *page,
240 enum zone_stat_item item)
241{
57ce36fe 242 __dec_zone_state(page_zone(page), item);
2244b95a
CL
243}
244
245/*
246 * We only use atomic operations to update counters. So there is no need to
247 * disable interrupts.
248 */
249#define inc_zone_page_state __inc_zone_page_state
250#define dec_zone_page_state __dec_zone_page_state
251#define mod_zone_page_state __mod_zone_page_state
252
b44129b3 253#define set_pgdat_percpu_threshold(pgdat, callback) { }
88f5acf8 254
2244b95a 255static inline void refresh_cpu_vm_stats(int cpu) { }
a6cccdc3 256static inline void refresh_zone_stat_thresholds(void) { }
2bb921e5 257static inline void cpu_vm_stats_fold(int cpu) { }
a6cccdc3 258
5a883813
MK
259static inline void drain_zonestat(struct zone *zone,
260 struct per_cpu_pageset *pset) { }
fa25c503
KM
261#endif /* CONFIG_SMP */
262
d1ce749a
BZ
263static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
264 int migratetype)
265{
266 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
267 if (is_migrate_cma(migratetype))
268 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
269}
270
fa25c503 271extern const char * const vmstat_text[];
2244b95a
CL
272
273#endif /* _LINUX_VMSTAT_H */
This page took 1.144821 seconds and 5 git commands to generate.