1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 #include <linux/gfp.h>
19 struct percpu_counter
{
22 #ifdef CONFIG_HOTPLUG_CPU
23 struct list_head list
; /* All percpu_counters are on a list */
25 s32 __percpu
*counters
;
28 extern int percpu_counter_batch
;
30 int __percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
, gfp_t gfp
,
31 struct lock_class_key
*key
);
33 #define percpu_counter_init(fbc, value, gfp) \
35 static struct lock_class_key __key; \
37 __percpu_counter_init(fbc, value, gfp, &__key); \
40 void percpu_counter_destroy(struct percpu_counter
*fbc
);
41 void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
);
42 void __percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
, s32 batch
);
43 s64
__percpu_counter_sum(struct percpu_counter
*fbc
);
44 int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
);
46 static inline void percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
48 __percpu_counter_add(fbc
, amount
, percpu_counter_batch
);
51 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
53 s64 ret
= __percpu_counter_sum(fbc
);
54 return ret
< 0 ? 0 : ret
;
57 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
59 return __percpu_counter_sum(fbc
);
62 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
68 * It is possible for the percpu_counter_read() to return a small negative
69 * number for some counter which should never be negative.
72 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
76 barrier(); /* Prevent reloads of fbc->count */
82 static inline int percpu_counter_initialized(struct percpu_counter
*fbc
)
84 return (fbc
->counters
!= NULL
);
87 #else /* !CONFIG_SMP */
89 struct percpu_counter
{
93 static inline int percpu_counter_init(struct percpu_counter
*fbc
, s64 amount
,
100 static inline void percpu_counter_destroy(struct percpu_counter
*fbc
)
104 static inline void percpu_counter_set(struct percpu_counter
*fbc
, s64 amount
)
109 static inline int percpu_counter_compare(struct percpu_counter
*fbc
, s64 rhs
)
111 if (fbc
->count
> rhs
)
113 else if (fbc
->count
< rhs
)
120 percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
)
123 fbc
->count
+= amount
;
128 __percpu_counter_add(struct percpu_counter
*fbc
, s64 amount
, s32 batch
)
130 percpu_counter_add(fbc
, amount
);
133 static inline s64
percpu_counter_read(struct percpu_counter
*fbc
)
139 * percpu_counter is intended to track positive numbers. In the UP case the
140 * number should never be negative.
142 static inline s64
percpu_counter_read_positive(struct percpu_counter
*fbc
)
147 static inline s64
percpu_counter_sum_positive(struct percpu_counter
*fbc
)
149 return percpu_counter_read_positive(fbc
);
152 static inline s64
percpu_counter_sum(struct percpu_counter
*fbc
)
154 return percpu_counter_read(fbc
);
157 static inline int percpu_counter_initialized(struct percpu_counter
*fbc
)
162 #endif /* CONFIG_SMP */
164 static inline void percpu_counter_inc(struct percpu_counter
*fbc
)
166 percpu_counter_add(fbc
, 1);
169 static inline void percpu_counter_dec(struct percpu_counter
*fbc
)
171 percpu_counter_add(fbc
, -1);
174 static inline void percpu_counter_sub(struct percpu_counter
*fbc
, s64 amount
)
176 percpu_counter_add(fbc
, -amount
);
179 #endif /* _LINUX_PERCPU_COUNTER_H */
This page took 0.035181 seconds and 5 git commands to generate.