Commit | Line | Data |
---|---|---|
215e262f KO |
1 | #define pr_fmt(fmt) "%s: " fmt "\n", __func__ |
2 | ||
3 | #include <linux/kernel.h> | |
4 | #include <linux/percpu-refcount.h> | |
5 | ||
6 | /* | |
7 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we | |
8 | * don't try to detect the ref hitting 0 - which means that get/put can just | |
9 | * increment or decrement the local counter. Note that the counter on a | |
10 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the | |
11 | * percpu counters will all sum to the correct value | |
12 | * | |
13 | * (More precisely: because moduler arithmatic is commutative the sum of all the | |
14 | * pcpu_count vars will be equal to what it would have been if all the gets and | |
15 | * puts were done to a single integer, even if some of the percpu integers | |
16 | * overflow or underflow). | |
17 | * | |
18 | * The real trick to implementing percpu refcounts is shutdown. We can't detect | |
19 | * the ref hitting 0 on every put - this would require global synchronization | |
20 | * and defeat the whole purpose of using percpu refs. | |
21 | * | |
22 | * What we do is require the user to keep track of the initial refcount; we know | |
23 | * the ref can't hit 0 before the user drops the initial ref, so as long as we | |
24 | * convert to non percpu mode before the initial ref is dropped everything | |
25 | * works. | |
26 | * | |
27 | * Converting to non percpu mode is done with some RCUish stuff in | |
28 | * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t | |
29 | * can't hit 0 before we've added up all the percpu refs. | |
30 | */ | |
31 | ||
32 | #define PCPU_COUNT_BIAS (1U << 31) | |
33 | ||
34 | /** | |
35 | * percpu_ref_init - initialize a percpu refcount | |
ac899061 TH |
36 | * @ref: percpu_ref to initialize |
37 | * @release: function which will be called when refcount hits 0 | |
215e262f KO |
38 | * |
39 | * Initializes the refcount in single atomic counter mode with a refcount of 1; | |
40 | * analagous to atomic_set(ref, 1). | |
41 | * | |
42 | * Note that @release must not sleep - it may potentially be called from RCU | |
43 | * callback context by percpu_ref_kill(). | |
44 | */ | |
ac899061 | 45 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) |
215e262f KO |
46 | { |
47 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | |
48 | ||
49 | ref->pcpu_count = alloc_percpu(unsigned); | |
50 | if (!ref->pcpu_count) | |
51 | return -ENOMEM; | |
52 | ||
53 | ref->release = release; | |
54 | return 0; | |
55 | } | |
5e9dd373 | 56 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
215e262f | 57 | |
bc497bd3 TH |
58 | /** |
59 | * percpu_ref_cancel_init - cancel percpu_ref_init() | |
60 | * @ref: percpu_ref to cancel init for | |
61 | * | |
62 | * Once a percpu_ref is initialized, its destruction is initiated by | |
63 | * percpu_ref_kill() and completes asynchronously, which can be painful to | |
64 | * do when destroying a half-constructed object in init failure path. | |
65 | * | |
66 | * This function destroys @ref without invoking @ref->release and the | |
67 | * memory area containing it can be freed immediately on return. To | |
68 | * prevent accidental misuse, it's required that @ref has finished | |
69 | * percpu_ref_init(), whether successful or not, but never used. | |
70 | * | |
71 | * The weird name and usage restriction are to prevent people from using | |
72 | * this function by mistake for normal shutdown instead of | |
73 | * percpu_ref_kill(). | |
74 | */ | |
75 | void percpu_ref_cancel_init(struct percpu_ref *ref) | |
76 | { | |
77 | unsigned __percpu *pcpu_count = ref->pcpu_count; | |
78 | int cpu; | |
79 | ||
80 | WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); | |
81 | ||
82 | if (pcpu_count) { | |
83 | for_each_possible_cpu(cpu) | |
84 | WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); | |
85 | free_percpu(ref->pcpu_count); | |
86 | } | |
87 | } | |
5e9dd373 | 88 | EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); |
bc497bd3 | 89 | |
215e262f KO |
90 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
91 | { | |
92 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | |
acac7883 | 93 | unsigned __percpu *pcpu_count = ref->pcpu_count; |
215e262f KO |
94 | unsigned count = 0; |
95 | int cpu; | |
96 | ||
215e262f KO |
97 | /* Mask out PCPU_REF_DEAD */ |
98 | pcpu_count = (unsigned __percpu *) | |
99 | (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK); | |
100 | ||
101 | for_each_possible_cpu(cpu) | |
102 | count += *per_cpu_ptr(pcpu_count, cpu); | |
103 | ||
104 | free_percpu(pcpu_count); | |
105 | ||
106 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); | |
107 | ||
108 | /* | |
109 | * It's crucial that we sum the percpu counters _before_ adding the sum | |
110 | * to &ref->count; since gets could be happening on one cpu while puts | |
111 | * happen on another, adding a single cpu's count could cause | |
112 | * @ref->count to hit 0 before we've got a consistent value - but the | |
113 | * sum of all the counts will be consistent and correct. | |
114 | * | |
115 | * Subtracting the bias value then has to happen _after_ adding count to | |
116 | * &ref->count; we need the bias value to prevent &ref->count from | |
117 | * reaching 0 before we add the percpu counts. But doing it at the same | |
118 | * time is equivalent and saves us atomic operations: | |
119 | */ | |
120 | ||
121 | atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); | |
122 | ||
dbece3a0 TH |
123 | /* @ref is viewed as dead on all CPUs, send out kill confirmation */ |
124 | if (ref->confirm_kill) | |
125 | ref->confirm_kill(ref); | |
126 | ||
215e262f KO |
127 | /* |
128 | * Now we're in single atomic_t mode with a consistent refcount, so it's | |
129 | * safe to drop our initial ref: | |
130 | */ | |
131 | percpu_ref_put(ref); | |
132 | } | |
133 | ||
134 | /** | |
dbece3a0 | 135 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation |
ac899061 | 136 | * @ref: percpu_ref to kill |
dbece3a0 | 137 | * @confirm_kill: optional confirmation callback |
215e262f | 138 | * |
dbece3a0 TH |
139 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if |
140 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be | |
141 | * called after @ref is seen as dead from all CPUs - all further | |
142 | * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget() | |
143 | * for more details. | |
215e262f | 144 | * |
dbece3a0 TH |
145 | * Due to the way percpu_ref is implemented, @confirm_kill will be called |
146 | * after at least one full RCU grace period has passed but this is an | |
147 | * implementation detail and callers must not depend on it. | |
215e262f | 148 | */ |
dbece3a0 TH |
149 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
150 | percpu_ref_func_t *confirm_kill) | |
215e262f | 151 | { |
c1ae6e9b KO |
152 | WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, |
153 | "percpu_ref_kill() called more than once!\n"); | |
215e262f | 154 | |
c1ae6e9b KO |
155 | ref->pcpu_count = (unsigned __percpu *) |
156 | (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD); | |
dbece3a0 | 157 | ref->confirm_kill = confirm_kill; |
215e262f | 158 | |
a4244454 | 159 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
215e262f | 160 | } |
5e9dd373 | 161 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |