KVM: Maintain back mapping from irqchip/pin to gsi
[deliverable/linux.git] / include / linux / percpu_counter.h
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15
16 #ifdef CONFIG_SMP
17
18 struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23 #endif
24 s32 *counters;
25 };
26
27 extern int percpu_counter_batch;
28
29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 struct lock_class_key *key);
31
32 #define percpu_counter_init(fbc, value) \
33 ({ \
34 static struct lock_class_key __key; \
35 \
36 __percpu_counter_init(fbc, value, &__key); \
37 })
38
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 s64 __percpu_counter_sum(struct percpu_counter *fbc);
43
44 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
45 {
46 __percpu_counter_add(fbc, amount, percpu_counter_batch);
47 }
48
49 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
50 {
51 s64 ret = __percpu_counter_sum(fbc);
52 return ret < 0 ? 0 : ret;
53 }
54
55 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
56 {
57 return __percpu_counter_sum(fbc);
58 }
59
60 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
61 {
62 return fbc->count;
63 }
64
65 /*
66 * It is possible for the percpu_counter_read() to return a small negative
67 * number for some counter which should never be negative.
68 *
69 */
70 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
71 {
72 s64 ret = fbc->count;
73
74 barrier(); /* Prevent reloads of fbc->count */
75 if (ret >= 0)
76 return ret;
77 return 1;
78 }
79
80 #else
81
82 struct percpu_counter {
83 s64 count;
84 };
85
86 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
87 {
88 fbc->count = amount;
89 return 0;
90 }
91
92 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
93 {
94 }
95
96 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
97 {
98 fbc->count = amount;
99 }
100
101 #define __percpu_counter_add(fbc, amount, batch) \
102 percpu_counter_add(fbc, amount)
103
104 static inline void
105 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
106 {
107 preempt_disable();
108 fbc->count += amount;
109 preempt_enable();
110 }
111
112 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
113 {
114 return fbc->count;
115 }
116
117 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
118 {
119 return fbc->count;
120 }
121
122 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
123 {
124 return percpu_counter_read_positive(fbc);
125 }
126
127 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
128 {
129 return percpu_counter_read(fbc);
130 }
131
132 #endif /* CONFIG_SMP */
133
134 static inline void percpu_counter_inc(struct percpu_counter *fbc)
135 {
136 percpu_counter_add(fbc, 1);
137 }
138
139 static inline void percpu_counter_dec(struct percpu_counter *fbc)
140 {
141 percpu_counter_add(fbc, -1);
142 }
143
144 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
145 {
146 percpu_counter_add(fbc, -amount);
147 }
148
149 #endif /* _LINUX_PERCPU_COUNTER_H */
This page took 0.038046 seconds and 5 git commands to generate.