locking, percpu counters: introduce separate lock classes
[deliverable/linux.git] / lib / percpu_counter.c
1 /*
2 * Fast batching percpu counters.
3 */
4
5 #include <linux/percpu_counter.h>
6 #include <linux/notifier.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11
12 #ifdef CONFIG_HOTPLUG_CPU
13 static LIST_HEAD(percpu_counters);
14 static DEFINE_MUTEX(percpu_counters_lock);
15 #endif
16
17 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
18 {
19 int cpu;
20
21 spin_lock(&fbc->lock);
22 for_each_possible_cpu(cpu) {
23 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
24 *pcount = 0;
25 }
26 fbc->count = amount;
27 spin_unlock(&fbc->lock);
28 }
29 EXPORT_SYMBOL(percpu_counter_set);
30
31 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
32 {
33 s64 count;
34 s32 *pcount;
35 int cpu = get_cpu();
36
37 pcount = per_cpu_ptr(fbc->counters, cpu);
38 count = *pcount + amount;
39 if (count >= batch || count <= -batch) {
40 spin_lock(&fbc->lock);
41 fbc->count += count;
42 *pcount = 0;
43 spin_unlock(&fbc->lock);
44 } else {
45 *pcount = count;
46 }
47 put_cpu();
48 }
49 EXPORT_SYMBOL(__percpu_counter_add);
50
51 /*
52 * Add up all the per-cpu counts, return the result. This is a more accurate
53 * but much slower version of percpu_counter_read_positive()
54 */
55 s64 __percpu_counter_sum(struct percpu_counter *fbc)
56 {
57 s64 ret;
58 int cpu;
59
60 spin_lock(&fbc->lock);
61 ret = fbc->count;
62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount;
65 *pcount = 0;
66 }
67 fbc->count = ret;
68
69 spin_unlock(&fbc->lock);
70 return ret;
71 }
72 EXPORT_SYMBOL(__percpu_counter_sum);
73
74 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
75 struct lock_class_key *key)
76 {
77 spin_lock_init(&fbc->lock);
78 lockdep_set_class(&fbc->lock, key);
79 fbc->count = amount;
80 fbc->counters = alloc_percpu(s32);
81 if (!fbc->counters)
82 return -ENOMEM;
83 #ifdef CONFIG_HOTPLUG_CPU
84 mutex_lock(&percpu_counters_lock);
85 list_add(&fbc->list, &percpu_counters);
86 mutex_unlock(&percpu_counters_lock);
87 #endif
88 return 0;
89 }
90 EXPORT_SYMBOL(__percpu_counter_init);
91
92 void percpu_counter_destroy(struct percpu_counter *fbc)
93 {
94 if (!fbc->counters)
95 return;
96
97 free_percpu(fbc->counters);
98 fbc->counters = NULL;
99 #ifdef CONFIG_HOTPLUG_CPU
100 mutex_lock(&percpu_counters_lock);
101 list_del(&fbc->list);
102 mutex_unlock(&percpu_counters_lock);
103 #endif
104 }
105 EXPORT_SYMBOL(percpu_counter_destroy);
106
107 #ifdef CONFIG_HOTPLUG_CPU
108 static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
109 unsigned long action, void *hcpu)
110 {
111 unsigned int cpu;
112 struct percpu_counter *fbc;
113
114 if (action != CPU_DEAD)
115 return NOTIFY_OK;
116
117 cpu = (unsigned long)hcpu;
118 mutex_lock(&percpu_counters_lock);
119 list_for_each_entry(fbc, &percpu_counters, list) {
120 s32 *pcount;
121 unsigned long flags;
122
123 spin_lock_irqsave(&fbc->lock, flags);
124 pcount = per_cpu_ptr(fbc->counters, cpu);
125 fbc->count += *pcount;
126 *pcount = 0;
127 spin_unlock_irqrestore(&fbc->lock, flags);
128 }
129 mutex_unlock(&percpu_counters_lock);
130 return NOTIFY_OK;
131 }
132
133 static int __init percpu_counter_startup(void)
134 {
135 hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
136 return 0;
137 }
138 module_init(percpu_counter_startup);
139 #endif
This page took 0.037103 seconds and 6 git commands to generate.