Slab allocators: consistent ZERO_SIZE_PTR support and NULL result semantics
[deliverable/linux.git] / include / linux / percpu_counter.h
1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15
16 #ifdef CONFIG_SMP
17
18 struct percpu_counter {
19 spinlock_t lock;
20 s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 struct list_head list; /* All percpu_counters are on a list */
23 #endif
24 s32 *counters;
25 };
26
27 #if NR_CPUS >= 16
28 #define FBC_BATCH (NR_CPUS*2)
29 #else
30 #define FBC_BATCH (NR_CPUS*4)
31 #endif
32
33 void percpu_counter_init(struct percpu_counter *fbc, s64 amount);
34 void percpu_counter_destroy(struct percpu_counter *fbc);
35 void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
36 s64 percpu_counter_sum(struct percpu_counter *fbc);
37
38 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
39 {
40 return fbc->count;
41 }
42
43 /*
44 * It is possible for the percpu_counter_read() to return a small negative
45 * number for some counter which should never be negative.
46 *
47 */
48 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
49 {
50 s64 ret = fbc->count;
51
52 barrier(); /* Prevent reloads of fbc->count */
53 if (ret >= 0)
54 return ret;
55 return 1;
56 }
57
58 #else
59
60 struct percpu_counter {
61 s64 count;
62 };
63
64 static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
65 {
66 fbc->count = amount;
67 }
68
69 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
70 {
71 }
72
73 static inline void
74 percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
75 {
76 preempt_disable();
77 fbc->count += amount;
78 preempt_enable();
79 }
80
81 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
82 {
83 return fbc->count;
84 }
85
86 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
87 {
88 return fbc->count;
89 }
90
91 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
92 {
93 return percpu_counter_read_positive(fbc);
94 }
95
96 #endif /* CONFIG_SMP */
97
98 static inline void percpu_counter_inc(struct percpu_counter *fbc)
99 {
100 percpu_counter_mod(fbc, 1);
101 }
102
103 static inline void percpu_counter_dec(struct percpu_counter *fbc)
104 {
105 percpu_counter_mod(fbc, -1);
106 }
107
108 #endif /* _LINUX_PERCPU_COUNTER_H */
This page took 0.047469 seconds and 5 git commands to generate.