Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
3219b3b7 ND |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * | |
e15ae2dd | 11 | * Small id to pointer translation service. |
1da177e4 | 12 | * |
e15ae2dd | 13 | * It uses a radix tree like structure as a sparse array indexed |
1da177e4 | 14 | * by the id to obtain the pointer. The bitmap makes allocating |
e15ae2dd | 15 | * a new id quick. |
1da177e4 LT |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a | |
18 | * pointer or what ever, we treat it as a (void *). You can pass this | |
19 | * id to a user for him to pass back at a later time. You then pass | |
20 | * that id to this code and it returns your pointer. | |
21 | ||
e15ae2dd | 22 | * You can release ids at any time. When all ids are released, most of |
125c4c70 | 23 | * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we |
e15ae2dd | 24 | * don't need to go to the memory "store" during an id allocate, just |
1da177e4 LT |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. | |
27 | */ | |
28 | ||
29 | #ifndef TEST // to test in user space... | |
30 | #include <linux/slab.h> | |
31 | #include <linux/init.h> | |
8bc3bcc9 | 32 | #include <linux/export.h> |
1da177e4 | 33 | #endif |
5806f07c | 34 | #include <linux/err.h> |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | |
88eca020 | 37 | #include <linux/spinlock.h> |
d5c7409f TH |
38 | #include <linux/percpu.h> |
39 | #include <linux/hardirq.h> | |
1da177e4 | 40 | |
e8c8d1bc TH |
41 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
42 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) | |
43 | ||
44 | /* Leave the possibility of an incomplete final layer */ | |
45 | #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS) | |
46 | ||
47 | /* Number of id_layer structs to leave in free list */ | |
48 | #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2) | |
49 | ||
e18b890b | 50 | static struct kmem_cache *idr_layer_cache; |
d5c7409f TH |
51 | static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); |
52 | static DEFINE_PER_CPU(int, idr_preload_cnt); | |
88eca020 | 53 | static DEFINE_SPINLOCK(simple_ida_lock); |
1da177e4 | 54 | |
326cf0f0 TH |
55 | /* the maximum ID which can be allocated given idr->layers */ |
56 | static int idr_max(int layers) | |
57 | { | |
58 | int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT); | |
59 | ||
60 | return (1 << bits) - 1; | |
61 | } | |
62 | ||
54616283 TH |
63 | /* |
64 | * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is | |
65 | * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and | |
66 | * so on. | |
67 | */ | |
68 | static int idr_layer_prefix_mask(int layer) | |
69 | { | |
70 | return ~idr_max(layer + 1); | |
71 | } | |
72 | ||
4ae53789 | 73 | static struct idr_layer *get_from_free_list(struct idr *idp) |
1da177e4 LT |
74 | { |
75 | struct idr_layer *p; | |
c259cc28 | 76 | unsigned long flags; |
1da177e4 | 77 | |
c259cc28 | 78 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
79 | if ((p = idp->id_free)) { |
80 | idp->id_free = p->ary[0]; | |
81 | idp->id_free_cnt--; | |
82 | p->ary[0] = NULL; | |
83 | } | |
c259cc28 | 84 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
85 | return(p); |
86 | } | |
87 | ||
d5c7409f TH |
88 | /** |
89 | * idr_layer_alloc - allocate a new idr_layer | |
90 | * @gfp_mask: allocation mask | |
91 | * @layer_idr: optional idr to allocate from | |
92 | * | |
93 | * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch | |
94 | * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch | |
95 | * an idr_layer from @idr->id_free. | |
96 | * | |
97 | * @layer_idr is to maintain backward compatibility with the old alloc | |
98 | * interface - idr_pre_get() and idr_get_new*() - and will be removed | |
99 | * together with per-pool preload buffer. | |
100 | */ | |
101 | static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) | |
102 | { | |
103 | struct idr_layer *new; | |
104 | ||
105 | /* this is the old path, bypass to get_from_free_list() */ | |
106 | if (layer_idr) | |
107 | return get_from_free_list(layer_idr); | |
108 | ||
109 | /* try to allocate directly from kmem_cache */ | |
110 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | |
111 | if (new) | |
112 | return new; | |
113 | ||
114 | /* | |
115 | * Try to fetch one from the per-cpu preload buffer if in process | |
116 | * context. See idr_preload() for details. | |
117 | */ | |
118 | if (in_interrupt()) | |
119 | return NULL; | |
120 | ||
121 | preempt_disable(); | |
122 | new = __this_cpu_read(idr_preload_head); | |
123 | if (new) { | |
124 | __this_cpu_write(idr_preload_head, new->ary[0]); | |
125 | __this_cpu_dec(idr_preload_cnt); | |
126 | new->ary[0] = NULL; | |
127 | } | |
128 | preempt_enable(); | |
129 | return new; | |
130 | } | |
131 | ||
cf481c20 ND |
132 | static void idr_layer_rcu_free(struct rcu_head *head) |
133 | { | |
134 | struct idr_layer *layer; | |
135 | ||
136 | layer = container_of(head, struct idr_layer, rcu_head); | |
137 | kmem_cache_free(idr_layer_cache, layer); | |
138 | } | |
139 | ||
0ffc2a9c | 140 | static inline void free_layer(struct idr *idr, struct idr_layer *p) |
cf481c20 | 141 | { |
0ffc2a9c TH |
142 | if (idr->hint && idr->hint == p) |
143 | RCU_INIT_POINTER(idr->hint, NULL); | |
cf481c20 ND |
144 | call_rcu(&p->rcu_head, idr_layer_rcu_free); |
145 | } | |
146 | ||
1eec0056 | 147 | /* only called when idp->lock is held */ |
4ae53789 | 148 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
1eec0056 SR |
149 | { |
150 | p->ary[0] = idp->id_free; | |
151 | idp->id_free = p; | |
152 | idp->id_free_cnt++; | |
153 | } | |
154 | ||
4ae53789 | 155 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
1da177e4 | 156 | { |
c259cc28 RD |
157 | unsigned long flags; |
158 | ||
1da177e4 LT |
159 | /* |
160 | * Depends on the return element being zeroed. | |
161 | */ | |
c259cc28 | 162 | spin_lock_irqsave(&idp->lock, flags); |
4ae53789 | 163 | __move_to_free_list(idp, p); |
c259cc28 | 164 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
165 | } |
166 | ||
e33ac8bd TH |
167 | static void idr_mark_full(struct idr_layer **pa, int id) |
168 | { | |
169 | struct idr_layer *p = pa[0]; | |
170 | int l = 0; | |
171 | ||
1d9b2e1e | 172 | __set_bit(id & IDR_MASK, p->bitmap); |
e33ac8bd TH |
173 | /* |
174 | * If this layer is full mark the bit in the layer above to | |
175 | * show that this part of the radix tree is full. This may | |
176 | * complete the layer above and require walking up the radix | |
177 | * tree. | |
178 | */ | |
1d9b2e1e | 179 | while (bitmap_full(p->bitmap, IDR_SIZE)) { |
e33ac8bd TH |
180 | if (!(p = pa[++l])) |
181 | break; | |
182 | id = id >> IDR_BITS; | |
1d9b2e1e | 183 | __set_bit((id & IDR_MASK), p->bitmap); |
e33ac8bd TH |
184 | } |
185 | } | |
186 | ||
1da177e4 | 187 | /** |
56083ab1 | 188 | * idr_pre_get - reserve resources for idr allocation |
1da177e4 LT |
189 | * @idp: idr handle |
190 | * @gfp_mask: memory allocation flags | |
191 | * | |
066a9be6 NA |
192 | * This function should be called prior to calling the idr_get_new* functions. |
193 | * It preallocates enough memory to satisfy the worst possible allocation. The | |
194 | * caller should pass in GFP_KERNEL if possible. This of course requires that | |
195 | * no spinning locks be held. | |
1da177e4 | 196 | * |
56083ab1 RD |
197 | * If the system is REALLY out of memory this function returns %0, |
198 | * otherwise %1. | |
1da177e4 | 199 | */ |
fd4f2df2 | 200 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
1da177e4 | 201 | { |
125c4c70 | 202 | while (idp->id_free_cnt < MAX_IDR_FREE) { |
1da177e4 | 203 | struct idr_layer *new; |
5b019e99 | 204 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
e15ae2dd | 205 | if (new == NULL) |
1da177e4 | 206 | return (0); |
4ae53789 | 207 | move_to_free_list(idp, new); |
1da177e4 LT |
208 | } |
209 | return 1; | |
210 | } | |
211 | EXPORT_SYMBOL(idr_pre_get); | |
212 | ||
12d1b439 TH |
213 | /** |
214 | * sub_alloc - try to allocate an id without growing the tree depth | |
215 | * @idp: idr handle | |
216 | * @starting_id: id to start search at | |
217 | * @id: pointer to the allocated handle | |
218 | * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer | |
d5c7409f TH |
219 | * @gfp_mask: allocation mask for idr_layer_alloc() |
220 | * @layer_idr: optional idr passed to idr_layer_alloc() | |
12d1b439 TH |
221 | * |
222 | * Allocate an id in range [@starting_id, INT_MAX] from @idp without | |
223 | * growing its depth. Returns | |
224 | * | |
225 | * the allocated id >= 0 if successful, | |
226 | * -EAGAIN if the tree needs to grow for allocation to succeed, | |
227 | * -ENOSPC if the id space is exhausted, | |
228 | * -ENOMEM if more idr_layers need to be allocated. | |
229 | */ | |
d5c7409f TH |
230 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa, |
231 | gfp_t gfp_mask, struct idr *layer_idr) | |
1da177e4 LT |
232 | { |
233 | int n, m, sh; | |
234 | struct idr_layer *p, *new; | |
7aae6dd8 | 235 | int l, id, oid; |
1da177e4 LT |
236 | |
237 | id = *starting_id; | |
7aae6dd8 | 238 | restart: |
1da177e4 LT |
239 | p = idp->top; |
240 | l = idp->layers; | |
241 | pa[l--] = NULL; | |
242 | while (1) { | |
243 | /* | |
244 | * We run around this while until we reach the leaf node... | |
245 | */ | |
246 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
1d9b2e1e | 247 | m = find_next_zero_bit(p->bitmap, IDR_SIZE, n); |
1da177e4 LT |
248 | if (m == IDR_SIZE) { |
249 | /* no space available go back to previous layer. */ | |
250 | l++; | |
7aae6dd8 | 251 | oid = id; |
e15ae2dd | 252 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
7aae6dd8 TH |
253 | |
254 | /* if already at the top layer, we need to grow */ | |
d2e7276b | 255 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
1da177e4 | 256 | *starting_id = id; |
12d1b439 | 257 | return -EAGAIN; |
1da177e4 | 258 | } |
d2e7276b TH |
259 | p = pa[l]; |
260 | BUG_ON(!p); | |
7aae6dd8 TH |
261 | |
262 | /* If we need to go up one layer, continue the | |
263 | * loop; otherwise, restart from the top. | |
264 | */ | |
265 | sh = IDR_BITS * (l + 1); | |
266 | if (oid >> sh == id >> sh) | |
267 | continue; | |
268 | else | |
269 | goto restart; | |
1da177e4 LT |
270 | } |
271 | if (m != n) { | |
272 | sh = IDR_BITS*l; | |
273 | id = ((id >> sh) ^ n ^ m) << sh; | |
274 | } | |
125c4c70 | 275 | if ((id >= MAX_IDR_BIT) || (id < 0)) |
12d1b439 | 276 | return -ENOSPC; |
1da177e4 LT |
277 | if (l == 0) |
278 | break; | |
279 | /* | |
280 | * Create the layer below if it is missing. | |
281 | */ | |
282 | if (!p->ary[m]) { | |
d5c7409f | 283 | new = idr_layer_alloc(gfp_mask, layer_idr); |
4ae53789 | 284 | if (!new) |
12d1b439 | 285 | return -ENOMEM; |
6ff2d39b | 286 | new->layer = l-1; |
54616283 | 287 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
3219b3b7 | 288 | rcu_assign_pointer(p->ary[m], new); |
1da177e4 LT |
289 | p->count++; |
290 | } | |
291 | pa[l--] = p; | |
292 | p = p->ary[m]; | |
293 | } | |
e33ac8bd TH |
294 | |
295 | pa[l] = p; | |
296 | return id; | |
1da177e4 LT |
297 | } |
298 | ||
e33ac8bd | 299 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
d5c7409f TH |
300 | struct idr_layer **pa, gfp_t gfp_mask, |
301 | struct idr *layer_idr) | |
1da177e4 LT |
302 | { |
303 | struct idr_layer *p, *new; | |
304 | int layers, v, id; | |
c259cc28 | 305 | unsigned long flags; |
e15ae2dd | 306 | |
1da177e4 LT |
307 | id = starting_id; |
308 | build_up: | |
309 | p = idp->top; | |
310 | layers = idp->layers; | |
311 | if (unlikely(!p)) { | |
d5c7409f | 312 | if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) |
12d1b439 | 313 | return -ENOMEM; |
6ff2d39b | 314 | p->layer = 0; |
1da177e4 LT |
315 | layers = 1; |
316 | } | |
317 | /* | |
318 | * Add a new layer to the top of the tree if the requested | |
319 | * id is larger than the currently allocated space. | |
320 | */ | |
326cf0f0 | 321 | while (id > idr_max(layers)) { |
1da177e4 | 322 | layers++; |
711a49a0 MS |
323 | if (!p->count) { |
324 | /* special case: if the tree is currently empty, | |
325 | * then we grow the tree by moving the top node | |
326 | * upwards. | |
327 | */ | |
328 | p->layer++; | |
54616283 | 329 | WARN_ON_ONCE(p->prefix); |
1da177e4 | 330 | continue; |
711a49a0 | 331 | } |
d5c7409f | 332 | if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { |
1da177e4 LT |
333 | /* |
334 | * The allocation failed. If we built part of | |
335 | * the structure tear it down. | |
336 | */ | |
c259cc28 | 337 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
338 | for (new = p; p && p != idp->top; new = p) { |
339 | p = p->ary[0]; | |
340 | new->ary[0] = NULL; | |
1d9b2e1e TH |
341 | new->count = 0; |
342 | bitmap_clear(new->bitmap, 0, IDR_SIZE); | |
4ae53789 | 343 | __move_to_free_list(idp, new); |
1da177e4 | 344 | } |
c259cc28 | 345 | spin_unlock_irqrestore(&idp->lock, flags); |
12d1b439 | 346 | return -ENOMEM; |
1da177e4 LT |
347 | } |
348 | new->ary[0] = p; | |
349 | new->count = 1; | |
6ff2d39b | 350 | new->layer = layers-1; |
54616283 | 351 | new->prefix = id & idr_layer_prefix_mask(new->layer); |
1d9b2e1e TH |
352 | if (bitmap_full(p->bitmap, IDR_SIZE)) |
353 | __set_bit(0, new->bitmap); | |
1da177e4 LT |
354 | p = new; |
355 | } | |
3219b3b7 | 356 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 357 | idp->layers = layers; |
d5c7409f | 358 | v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr); |
12d1b439 | 359 | if (v == -EAGAIN) |
1da177e4 LT |
360 | goto build_up; |
361 | return(v); | |
362 | } | |
363 | ||
3594eb28 TH |
364 | /* |
365 | * @id and @pa are from a successful allocation from idr_get_empty_slot(). | |
366 | * Install the user pointer @ptr and mark the slot full. | |
367 | */ | |
0ffc2a9c TH |
368 | static void idr_fill_slot(struct idr *idr, void *ptr, int id, |
369 | struct idr_layer **pa) | |
e33ac8bd | 370 | { |
0ffc2a9c TH |
371 | /* update hint used for lookup, cleared from free_layer() */ |
372 | rcu_assign_pointer(idr->hint, pa[0]); | |
373 | ||
3594eb28 TH |
374 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr); |
375 | pa[0]->count++; | |
376 | idr_mark_full(pa, id); | |
e33ac8bd TH |
377 | } |
378 | ||
1da177e4 | 379 | /** |
7c657f2f | 380 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
1da177e4 | 381 | * @idp: idr handle |
94e2bd68 | 382 | * @ptr: pointer you want associated with the id |
ea24ea85 | 383 | * @starting_id: id to start search at |
1da177e4 LT |
384 | * @id: pointer to the allocated handle |
385 | * | |
386 | * This is the allocate id function. It should be called with any | |
387 | * required locks. | |
388 | * | |
066a9be6 | 389 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
56083ab1 | 390 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
066a9be6 NA |
391 | * IDR's preallocation and then retry the idr_get_new_above() call. |
392 | * | |
56083ab1 | 393 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
1da177e4 | 394 | * |
56083ab1 | 395 | * @id returns a value in the range @starting_id ... %0x7fffffff |
1da177e4 LT |
396 | */ |
397 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
398 | { | |
326cf0f0 | 399 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
1da177e4 | 400 | int rv; |
e15ae2dd | 401 | |
d5c7409f | 402 | rv = idr_get_empty_slot(idp, starting_id, pa, 0, idp); |
944ca05c | 403 | if (rv < 0) |
12d1b439 | 404 | return rv == -ENOMEM ? -EAGAIN : rv; |
3594eb28 | 405 | |
0ffc2a9c | 406 | idr_fill_slot(idp, ptr, rv, pa); |
1da177e4 LT |
407 | *id = rv; |
408 | return 0; | |
409 | } | |
410 | EXPORT_SYMBOL(idr_get_new_above); | |
411 | ||
d5c7409f TH |
412 | /** |
413 | * idr_preload - preload for idr_alloc() | |
414 | * @gfp_mask: allocation mask to use for preloading | |
415 | * | |
416 | * Preload per-cpu layer buffer for idr_alloc(). Can only be used from | |
417 | * process context and each idr_preload() invocation should be matched with | |
418 | * idr_preload_end(). Note that preemption is disabled while preloaded. | |
419 | * | |
420 | * The first idr_alloc() in the preloaded section can be treated as if it | |
421 | * were invoked with @gfp_mask used for preloading. This allows using more | |
422 | * permissive allocation masks for idrs protected by spinlocks. | |
423 | * | |
424 | * For example, if idr_alloc() below fails, the failure can be treated as | |
425 | * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT. | |
426 | * | |
427 | * idr_preload(GFP_KERNEL); | |
428 | * spin_lock(lock); | |
429 | * | |
430 | * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT); | |
431 | * | |
432 | * spin_unlock(lock); | |
433 | * idr_preload_end(); | |
434 | * if (id < 0) | |
435 | * error; | |
436 | */ | |
437 | void idr_preload(gfp_t gfp_mask) | |
438 | { | |
439 | /* | |
440 | * Consuming preload buffer from non-process context breaks preload | |
441 | * allocation guarantee. Disallow usage from those contexts. | |
442 | */ | |
443 | WARN_ON_ONCE(in_interrupt()); | |
444 | might_sleep_if(gfp_mask & __GFP_WAIT); | |
445 | ||
446 | preempt_disable(); | |
447 | ||
448 | /* | |
449 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and | |
450 | * return value from idr_alloc() needs to be checked for failure | |
451 | * anyway. Silently give up if allocation fails. The caller can | |
452 | * treat failures from idr_alloc() as if idr_alloc() were called | |
453 | * with @gfp_mask which should be enough. | |
454 | */ | |
455 | while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { | |
456 | struct idr_layer *new; | |
457 | ||
458 | preempt_enable(); | |
459 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | |
460 | preempt_disable(); | |
461 | if (!new) | |
462 | break; | |
463 | ||
464 | /* link the new one to per-cpu preload list */ | |
465 | new->ary[0] = __this_cpu_read(idr_preload_head); | |
466 | __this_cpu_write(idr_preload_head, new); | |
467 | __this_cpu_inc(idr_preload_cnt); | |
468 | } | |
469 | } | |
470 | EXPORT_SYMBOL(idr_preload); | |
471 | ||
472 | /** | |
473 | * idr_alloc - allocate new idr entry | |
474 | * @idr: the (initialized) idr | |
475 | * @ptr: pointer to be associated with the new id | |
476 | * @start: the minimum id (inclusive) | |
477 | * @end: the maximum id (exclusive, <= 0 for max) | |
478 | * @gfp_mask: memory allocation flags | |
479 | * | |
480 | * Allocate an id in [start, end) and associate it with @ptr. If no ID is | |
481 | * available in the specified range, returns -ENOSPC. On memory allocation | |
482 | * failure, returns -ENOMEM. | |
483 | * | |
484 | * Note that @end is treated as max when <= 0. This is to always allow | |
485 | * using @start + N as @end as long as N is inside integer range. | |
486 | * | |
487 | * The user is responsible for exclusively synchronizing all operations | |
488 | * which may modify @idr. However, read-only accesses such as idr_find() | |
489 | * or iteration can be performed under RCU read lock provided the user | |
490 | * destroys @ptr in RCU-safe way after removal from idr. | |
491 | */ | |
492 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) | |
493 | { | |
494 | int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ | |
326cf0f0 | 495 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
d5c7409f TH |
496 | int id; |
497 | ||
498 | might_sleep_if(gfp_mask & __GFP_WAIT); | |
499 | ||
500 | /* sanity checks */ | |
501 | if (WARN_ON_ONCE(start < 0)) | |
502 | return -EINVAL; | |
503 | if (unlikely(max < start)) | |
504 | return -ENOSPC; | |
505 | ||
506 | /* allocate id */ | |
507 | id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); | |
508 | if (unlikely(id < 0)) | |
509 | return id; | |
510 | if (unlikely(id > max)) | |
511 | return -ENOSPC; | |
512 | ||
0ffc2a9c | 513 | idr_fill_slot(idr, ptr, id, pa); |
d5c7409f TH |
514 | return id; |
515 | } | |
516 | EXPORT_SYMBOL_GPL(idr_alloc); | |
517 | ||
1da177e4 LT |
518 | static void idr_remove_warning(int id) |
519 | { | |
f098ad65 ND |
520 | printk(KERN_WARNING |
521 | "idr_remove called for id=%d which is not allocated.\n", id); | |
1da177e4 LT |
522 | dump_stack(); |
523 | } | |
524 | ||
525 | static void sub_remove(struct idr *idp, int shift, int id) | |
526 | { | |
527 | struct idr_layer *p = idp->top; | |
326cf0f0 | 528 | struct idr_layer **pa[MAX_IDR_LEVEL + 1]; |
1da177e4 | 529 | struct idr_layer ***paa = &pa[0]; |
cf481c20 | 530 | struct idr_layer *to_free; |
1da177e4 LT |
531 | int n; |
532 | ||
533 | *paa = NULL; | |
534 | *++paa = &idp->top; | |
535 | ||
536 | while ((shift > 0) && p) { | |
537 | n = (id >> shift) & IDR_MASK; | |
1d9b2e1e | 538 | __clear_bit(n, p->bitmap); |
1da177e4 LT |
539 | *++paa = &p->ary[n]; |
540 | p = p->ary[n]; | |
541 | shift -= IDR_BITS; | |
542 | } | |
543 | n = id & IDR_MASK; | |
1d9b2e1e TH |
544 | if (likely(p != NULL && test_bit(n, p->bitmap))) { |
545 | __clear_bit(n, p->bitmap); | |
cf481c20 ND |
546 | rcu_assign_pointer(p->ary[n], NULL); |
547 | to_free = NULL; | |
1da177e4 | 548 | while(*paa && ! --((**paa)->count)){ |
cf481c20 | 549 | if (to_free) |
0ffc2a9c | 550 | free_layer(idp, to_free); |
cf481c20 | 551 | to_free = **paa; |
1da177e4 LT |
552 | **paa-- = NULL; |
553 | } | |
e15ae2dd | 554 | if (!*paa) |
1da177e4 | 555 | idp->layers = 0; |
cf481c20 | 556 | if (to_free) |
0ffc2a9c | 557 | free_layer(idp, to_free); |
e15ae2dd | 558 | } else |
1da177e4 | 559 | idr_remove_warning(id); |
1da177e4 LT |
560 | } |
561 | ||
562 | /** | |
56083ab1 | 563 | * idr_remove - remove the given id and free its slot |
72fd4a35 RD |
564 | * @idp: idr handle |
565 | * @id: unique key | |
1da177e4 LT |
566 | */ |
567 | void idr_remove(struct idr *idp, int id) | |
568 | { | |
569 | struct idr_layer *p; | |
cf481c20 | 570 | struct idr_layer *to_free; |
1da177e4 | 571 | |
7175c61c | 572 | /* see comment in idr_find_slowpath() */ |
e8c8d1bc TH |
573 | if (WARN_ON_ONCE(id < 0)) |
574 | return; | |
1da177e4 LT |
575 | |
576 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
e15ae2dd | 577 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
cf481c20 ND |
578 | idp->top->ary[0]) { |
579 | /* | |
580 | * Single child at leftmost slot: we can shrink the tree. | |
581 | * This level is not needed anymore since when layers are | |
582 | * inserted, they are inserted at the top of the existing | |
583 | * tree. | |
584 | */ | |
585 | to_free = idp->top; | |
1da177e4 | 586 | p = idp->top->ary[0]; |
cf481c20 | 587 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 588 | --idp->layers; |
1d9b2e1e TH |
589 | to_free->count = 0; |
590 | bitmap_clear(to_free->bitmap, 0, IDR_SIZE); | |
0ffc2a9c | 591 | free_layer(idp, to_free); |
1da177e4 | 592 | } |
125c4c70 | 593 | while (idp->id_free_cnt >= MAX_IDR_FREE) { |
4ae53789 | 594 | p = get_from_free_list(idp); |
cf481c20 ND |
595 | /* |
596 | * Note: we don't call the rcu callback here, since the only | |
597 | * layers that fall into the freelist are those that have been | |
598 | * preallocated. | |
599 | */ | |
1da177e4 | 600 | kmem_cache_free(idr_layer_cache, p); |
1da177e4 | 601 | } |
af8e2a4c | 602 | return; |
1da177e4 LT |
603 | } |
604 | EXPORT_SYMBOL(idr_remove); | |
605 | ||
fe6e24ec | 606 | void __idr_remove_all(struct idr *idp) |
23936cc0 | 607 | { |
6ace06dc | 608 | int n, id, max; |
2dcb22b3 | 609 | int bt_mask; |
23936cc0 | 610 | struct idr_layer *p; |
326cf0f0 | 611 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
23936cc0 KH |
612 | struct idr_layer **paa = &pa[0]; |
613 | ||
614 | n = idp->layers * IDR_BITS; | |
615 | p = idp->top; | |
1b23336a | 616 | rcu_assign_pointer(idp->top, NULL); |
326cf0f0 | 617 | max = idr_max(idp->layers); |
23936cc0 KH |
618 | |
619 | id = 0; | |
326cf0f0 | 620 | while (id >= 0 && id <= max) { |
23936cc0 KH |
621 | while (n > IDR_BITS && p) { |
622 | n -= IDR_BITS; | |
623 | *paa++ = p; | |
624 | p = p->ary[(id >> n) & IDR_MASK]; | |
625 | } | |
626 | ||
2dcb22b3 | 627 | bt_mask = id; |
23936cc0 | 628 | id += 1 << n; |
2dcb22b3 ID |
629 | /* Get the highest bit that the above add changed from 0->1. */ |
630 | while (n < fls(id ^ bt_mask)) { | |
cf481c20 | 631 | if (p) |
0ffc2a9c | 632 | free_layer(idp, p); |
23936cc0 KH |
633 | n += IDR_BITS; |
634 | p = *--paa; | |
635 | } | |
636 | } | |
23936cc0 KH |
637 | idp->layers = 0; |
638 | } | |
fe6e24ec | 639 | EXPORT_SYMBOL(__idr_remove_all); |
23936cc0 | 640 | |
8d3b3591 AM |
641 | /** |
642 | * idr_destroy - release all cached layers within an idr tree | |
ea24ea85 | 643 | * @idp: idr handle |
9bb26bc1 TH |
644 | * |
645 | * Free all id mappings and all idp_layers. After this function, @idp is | |
646 | * completely unused and can be freed / recycled. The caller is | |
647 | * responsible for ensuring that no one else accesses @idp during or after | |
648 | * idr_destroy(). | |
649 | * | |
650 | * A typical clean-up sequence for objects stored in an idr tree will use | |
651 | * idr_for_each() to free all objects, if necessay, then idr_destroy() to | |
652 | * free up the id mappings and cached idr_layers. | |
8d3b3591 AM |
653 | */ |
654 | void idr_destroy(struct idr *idp) | |
655 | { | |
fe6e24ec | 656 | __idr_remove_all(idp); |
9bb26bc1 | 657 | |
8d3b3591 | 658 | while (idp->id_free_cnt) { |
4ae53789 | 659 | struct idr_layer *p = get_from_free_list(idp); |
8d3b3591 AM |
660 | kmem_cache_free(idr_layer_cache, p); |
661 | } | |
662 | } | |
663 | EXPORT_SYMBOL(idr_destroy); | |
664 | ||
0ffc2a9c | 665 | void *idr_find_slowpath(struct idr *idp, int id) |
1da177e4 LT |
666 | { |
667 | int n; | |
668 | struct idr_layer *p; | |
669 | ||
7175c61c TH |
670 | /* |
671 | * If @id is negative, idr_find() used to ignore the sign bit and | |
672 | * performed lookup with the rest of bits, which is weird and can | |
673 | * lead to very obscure bugs. We're now returning NULL for all | |
674 | * negative IDs but just in case somebody was depending on the sign | |
675 | * bit being ignored, let's trigger WARN_ON_ONCE() so that they can | |
676 | * be detected and fixed. WARN_ON_ONCE() can later be removed. | |
677 | */ | |
e8c8d1bc TH |
678 | if (WARN_ON_ONCE(id < 0)) |
679 | return NULL; | |
680 | ||
96be753a | 681 | p = rcu_dereference_raw(idp->top); |
6ff2d39b MS |
682 | if (!p) |
683 | return NULL; | |
684 | n = (p->layer+1) * IDR_BITS; | |
1da177e4 | 685 | |
326cf0f0 | 686 | if (id > idr_max(p->layer + 1)) |
1da177e4 | 687 | return NULL; |
6ff2d39b | 688 | BUG_ON(n == 0); |
1da177e4 LT |
689 | |
690 | while (n > 0 && p) { | |
691 | n -= IDR_BITS; | |
6ff2d39b | 692 | BUG_ON(n != p->layer*IDR_BITS); |
96be753a | 693 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
1da177e4 LT |
694 | } |
695 | return((void *)p); | |
696 | } | |
0ffc2a9c | 697 | EXPORT_SYMBOL(idr_find_slowpath); |
1da177e4 | 698 | |
96d7fa42 KH |
699 | /** |
700 | * idr_for_each - iterate through all stored pointers | |
701 | * @idp: idr handle | |
702 | * @fn: function to be called for each pointer | |
703 | * @data: data passed back to callback function | |
704 | * | |
705 | * Iterate over the pointers registered with the given idr. The | |
706 | * callback function will be called for each pointer currently | |
707 | * registered, passing the id, the pointer and the data pointer passed | |
708 | * to this function. It is not safe to modify the idr tree while in | |
709 | * the callback, so functions such as idr_get_new and idr_remove are | |
710 | * not allowed. | |
711 | * | |
712 | * We check the return of @fn each time. If it returns anything other | |
56083ab1 | 713 | * than %0, we break out and return that value. |
96d7fa42 KH |
714 | * |
715 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | |
716 | */ | |
717 | int idr_for_each(struct idr *idp, | |
718 | int (*fn)(int id, void *p, void *data), void *data) | |
719 | { | |
720 | int n, id, max, error = 0; | |
721 | struct idr_layer *p; | |
326cf0f0 | 722 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
96d7fa42 KH |
723 | struct idr_layer **paa = &pa[0]; |
724 | ||
725 | n = idp->layers * IDR_BITS; | |
96be753a | 726 | p = rcu_dereference_raw(idp->top); |
326cf0f0 | 727 | max = idr_max(idp->layers); |
96d7fa42 KH |
728 | |
729 | id = 0; | |
326cf0f0 | 730 | while (id >= 0 && id <= max) { |
96d7fa42 KH |
731 | while (n > 0 && p) { |
732 | n -= IDR_BITS; | |
733 | *paa++ = p; | |
96be753a | 734 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
96d7fa42 KH |
735 | } |
736 | ||
737 | if (p) { | |
738 | error = fn(id, (void *)p, data); | |
739 | if (error) | |
740 | break; | |
741 | } | |
742 | ||
743 | id += 1 << n; | |
744 | while (n < fls(id)) { | |
745 | n += IDR_BITS; | |
746 | p = *--paa; | |
747 | } | |
748 | } | |
749 | ||
750 | return error; | |
751 | } | |
752 | EXPORT_SYMBOL(idr_for_each); | |
753 | ||
38460b48 KH |
754 | /** |
755 | * idr_get_next - lookup next object of id to given id. | |
756 | * @idp: idr handle | |
ea24ea85 | 757 | * @nextidp: pointer to lookup key |
38460b48 KH |
758 | * |
759 | * Returns pointer to registered object with id, which is next number to | |
1458ce16 NA |
760 | * given id. After being looked up, *@nextidp will be updated for the next |
761 | * iteration. | |
9f7de827 HD |
762 | * |
763 | * This function can be called under rcu_read_lock(), given that the leaf | |
764 | * pointers lifetimes are correctly managed. | |
38460b48 | 765 | */ |
38460b48 KH |
766 | void *idr_get_next(struct idr *idp, int *nextidp) |
767 | { | |
326cf0f0 | 768 | struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; |
38460b48 KH |
769 | struct idr_layer **paa = &pa[0]; |
770 | int id = *nextidp; | |
771 | int n, max; | |
772 | ||
773 | /* find first ent */ | |
94bfa3b6 | 774 | p = rcu_dereference_raw(idp->top); |
38460b48 KH |
775 | if (!p) |
776 | return NULL; | |
9f7de827 | 777 | n = (p->layer + 1) * IDR_BITS; |
326cf0f0 | 778 | max = idr_max(p->layer + 1); |
38460b48 | 779 | |
326cf0f0 | 780 | while (id >= 0 && id <= max) { |
38460b48 KH |
781 | while (n > 0 && p) { |
782 | n -= IDR_BITS; | |
783 | *paa++ = p; | |
94bfa3b6 | 784 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
38460b48 KH |
785 | } |
786 | ||
787 | if (p) { | |
788 | *nextidp = id; | |
789 | return p; | |
790 | } | |
791 | ||
6cdae741 TH |
792 | /* |
793 | * Proceed to the next layer at the current level. Unlike | |
794 | * idr_for_each(), @id isn't guaranteed to be aligned to | |
795 | * layer boundary at this point and adding 1 << n may | |
796 | * incorrectly skip IDs. Make sure we jump to the | |
797 | * beginning of the next layer using round_up(). | |
798 | */ | |
799 | id = round_up(id + 1, 1 << n); | |
38460b48 KH |
800 | while (n < fls(id)) { |
801 | n += IDR_BITS; | |
802 | p = *--paa; | |
803 | } | |
804 | } | |
805 | return NULL; | |
806 | } | |
4d1ee80f | 807 | EXPORT_SYMBOL(idr_get_next); |
38460b48 KH |
808 | |
809 | ||
5806f07c JM |
810 | /** |
811 | * idr_replace - replace pointer for given id | |
812 | * @idp: idr handle | |
813 | * @ptr: pointer you want associated with the id | |
814 | * @id: lookup key | |
815 | * | |
816 | * Replace the pointer registered with an id and return the old value. | |
56083ab1 RD |
817 | * A %-ENOENT return indicates that @id was not found. |
818 | * A %-EINVAL return indicates that @id was not within valid constraints. | |
5806f07c | 819 | * |
cf481c20 | 820 | * The caller must serialize with writers. |
5806f07c JM |
821 | */ |
822 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
823 | { | |
824 | int n; | |
825 | struct idr_layer *p, *old_p; | |
826 | ||
7175c61c | 827 | /* see comment in idr_find_slowpath() */ |
e8c8d1bc TH |
828 | if (WARN_ON_ONCE(id < 0)) |
829 | return ERR_PTR(-EINVAL); | |
830 | ||
5806f07c | 831 | p = idp->top; |
6ff2d39b MS |
832 | if (!p) |
833 | return ERR_PTR(-EINVAL); | |
834 | ||
835 | n = (p->layer+1) * IDR_BITS; | |
5806f07c | 836 | |
5806f07c JM |
837 | if (id >= (1 << n)) |
838 | return ERR_PTR(-EINVAL); | |
839 | ||
840 | n -= IDR_BITS; | |
841 | while ((n > 0) && p) { | |
842 | p = p->ary[(id >> n) & IDR_MASK]; | |
843 | n -= IDR_BITS; | |
844 | } | |
845 | ||
846 | n = id & IDR_MASK; | |
1d9b2e1e | 847 | if (unlikely(p == NULL || !test_bit(n, p->bitmap))) |
5806f07c JM |
848 | return ERR_PTR(-ENOENT); |
849 | ||
850 | old_p = p->ary[n]; | |
cf481c20 | 851 | rcu_assign_pointer(p->ary[n], ptr); |
5806f07c JM |
852 | |
853 | return old_p; | |
854 | } | |
855 | EXPORT_SYMBOL(idr_replace); | |
856 | ||
199f0ca5 | 857 | void __init idr_init_cache(void) |
1da177e4 | 858 | { |
199f0ca5 | 859 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
5b019e99 | 860 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
1da177e4 LT |
861 | } |
862 | ||
863 | /** | |
864 | * idr_init - initialize idr handle | |
865 | * @idp: idr handle | |
866 | * | |
867 | * This function is use to set up the handle (@idp) that you will pass | |
868 | * to the rest of the functions. | |
869 | */ | |
870 | void idr_init(struct idr *idp) | |
871 | { | |
1da177e4 LT |
872 | memset(idp, 0, sizeof(struct idr)); |
873 | spin_lock_init(&idp->lock); | |
874 | } | |
875 | EXPORT_SYMBOL(idr_init); | |
72dba584 TH |
876 | |
877 | ||
56083ab1 RD |
878 | /** |
879 | * DOC: IDA description | |
72dba584 TH |
880 | * IDA - IDR based ID allocator |
881 | * | |
56083ab1 | 882 | * This is id allocator without id -> pointer translation. Memory |
72dba584 TH |
883 | * usage is much lower than full blown idr because each id only |
884 | * occupies a bit. ida uses a custom leaf node which contains | |
885 | * IDA_BITMAP_BITS slots. | |
886 | * | |
887 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | |
888 | */ | |
889 | ||
890 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |
891 | { | |
892 | unsigned long flags; | |
893 | ||
894 | if (!ida->free_bitmap) { | |
895 | spin_lock_irqsave(&ida->idr.lock, flags); | |
896 | if (!ida->free_bitmap) { | |
897 | ida->free_bitmap = bitmap; | |
898 | bitmap = NULL; | |
899 | } | |
900 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
901 | } | |
902 | ||
903 | kfree(bitmap); | |
904 | } | |
905 | ||
906 | /** | |
907 | * ida_pre_get - reserve resources for ida allocation | |
908 | * @ida: ida handle | |
909 | * @gfp_mask: memory allocation flag | |
910 | * | |
911 | * This function should be called prior to locking and calling the | |
912 | * following function. It preallocates enough memory to satisfy the | |
913 | * worst possible allocation. | |
914 | * | |
56083ab1 RD |
915 | * If the system is REALLY out of memory this function returns %0, |
916 | * otherwise %1. | |
72dba584 TH |
917 | */ |
918 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | |
919 | { | |
920 | /* allocate idr_layers */ | |
921 | if (!idr_pre_get(&ida->idr, gfp_mask)) | |
922 | return 0; | |
923 | ||
924 | /* allocate free_bitmap */ | |
925 | if (!ida->free_bitmap) { | |
926 | struct ida_bitmap *bitmap; | |
927 | ||
928 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | |
929 | if (!bitmap) | |
930 | return 0; | |
931 | ||
932 | free_bitmap(ida, bitmap); | |
933 | } | |
934 | ||
935 | return 1; | |
936 | } | |
937 | EXPORT_SYMBOL(ida_pre_get); | |
938 | ||
939 | /** | |
940 | * ida_get_new_above - allocate new ID above or equal to a start id | |
941 | * @ida: ida handle | |
ea24ea85 | 942 | * @starting_id: id to start search at |
72dba584 TH |
943 | * @p_id: pointer to the allocated handle |
944 | * | |
e3816c54 WSH |
945 | * Allocate new ID above or equal to @starting_id. It should be called |
946 | * with any required locks. | |
72dba584 | 947 | * |
56083ab1 | 948 | * If memory is required, it will return %-EAGAIN, you should unlock |
72dba584 | 949 | * and go back to the ida_pre_get() call. If the ida is full, it will |
56083ab1 | 950 | * return %-ENOSPC. |
72dba584 | 951 | * |
56083ab1 | 952 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
72dba584 TH |
953 | */ |
954 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |
955 | { | |
326cf0f0 | 956 | struct idr_layer *pa[MAX_IDR_LEVEL + 1]; |
72dba584 TH |
957 | struct ida_bitmap *bitmap; |
958 | unsigned long flags; | |
959 | int idr_id = starting_id / IDA_BITMAP_BITS; | |
960 | int offset = starting_id % IDA_BITMAP_BITS; | |
961 | int t, id; | |
962 | ||
963 | restart: | |
964 | /* get vacant slot */ | |
d5c7409f | 965 | t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); |
944ca05c | 966 | if (t < 0) |
12d1b439 | 967 | return t == -ENOMEM ? -EAGAIN : t; |
72dba584 | 968 | |
125c4c70 | 969 | if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) |
72dba584 TH |
970 | return -ENOSPC; |
971 | ||
972 | if (t != idr_id) | |
973 | offset = 0; | |
974 | idr_id = t; | |
975 | ||
976 | /* if bitmap isn't there, create a new one */ | |
977 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | |
978 | if (!bitmap) { | |
979 | spin_lock_irqsave(&ida->idr.lock, flags); | |
980 | bitmap = ida->free_bitmap; | |
981 | ida->free_bitmap = NULL; | |
982 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
983 | ||
984 | if (!bitmap) | |
985 | return -EAGAIN; | |
986 | ||
987 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | |
3219b3b7 ND |
988 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
989 | (void *)bitmap); | |
72dba584 TH |
990 | pa[0]->count++; |
991 | } | |
992 | ||
993 | /* lookup for empty slot */ | |
994 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | |
995 | if (t == IDA_BITMAP_BITS) { | |
996 | /* no empty slot after offset, continue to the next chunk */ | |
997 | idr_id++; | |
998 | offset = 0; | |
999 | goto restart; | |
1000 | } | |
1001 | ||
1002 | id = idr_id * IDA_BITMAP_BITS + t; | |
125c4c70 | 1003 | if (id >= MAX_IDR_BIT) |
72dba584 TH |
1004 | return -ENOSPC; |
1005 | ||
1006 | __set_bit(t, bitmap->bitmap); | |
1007 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | |
1008 | idr_mark_full(pa, idr_id); | |
1009 | ||
1010 | *p_id = id; | |
1011 | ||
1012 | /* Each leaf node can handle nearly a thousand slots and the | |
1013 | * whole idea of ida is to have small memory foot print. | |
1014 | * Throw away extra resources one by one after each successful | |
1015 | * allocation. | |
1016 | */ | |
1017 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | |
4ae53789 | 1018 | struct idr_layer *p = get_from_free_list(&ida->idr); |
72dba584 TH |
1019 | if (p) |
1020 | kmem_cache_free(idr_layer_cache, p); | |
1021 | } | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | EXPORT_SYMBOL(ida_get_new_above); | |
1026 | ||
72dba584 TH |
1027 | /** |
1028 | * ida_remove - remove the given ID | |
1029 | * @ida: ida handle | |
1030 | * @id: ID to free | |
1031 | */ | |
1032 | void ida_remove(struct ida *ida, int id) | |
1033 | { | |
1034 | struct idr_layer *p = ida->idr.top; | |
1035 | int shift = (ida->idr.layers - 1) * IDR_BITS; | |
1036 | int idr_id = id / IDA_BITMAP_BITS; | |
1037 | int offset = id % IDA_BITMAP_BITS; | |
1038 | int n; | |
1039 | struct ida_bitmap *bitmap; | |
1040 | ||
1041 | /* clear full bits while looking up the leaf idr_layer */ | |
1042 | while ((shift > 0) && p) { | |
1043 | n = (idr_id >> shift) & IDR_MASK; | |
1d9b2e1e | 1044 | __clear_bit(n, p->bitmap); |
72dba584 TH |
1045 | p = p->ary[n]; |
1046 | shift -= IDR_BITS; | |
1047 | } | |
1048 | ||
1049 | if (p == NULL) | |
1050 | goto err; | |
1051 | ||
1052 | n = idr_id & IDR_MASK; | |
1d9b2e1e | 1053 | __clear_bit(n, p->bitmap); |
72dba584 TH |
1054 | |
1055 | bitmap = (void *)p->ary[n]; | |
1056 | if (!test_bit(offset, bitmap->bitmap)) | |
1057 | goto err; | |
1058 | ||
1059 | /* update bitmap and remove it if empty */ | |
1060 | __clear_bit(offset, bitmap->bitmap); | |
1061 | if (--bitmap->nr_busy == 0) { | |
1d9b2e1e | 1062 | __set_bit(n, p->bitmap); /* to please idr_remove() */ |
72dba584 TH |
1063 | idr_remove(&ida->idr, idr_id); |
1064 | free_bitmap(ida, bitmap); | |
1065 | } | |
1066 | ||
1067 | return; | |
1068 | ||
1069 | err: | |
1070 | printk(KERN_WARNING | |
1071 | "ida_remove called for id=%d which is not allocated.\n", id); | |
1072 | } | |
1073 | EXPORT_SYMBOL(ida_remove); | |
1074 | ||
1075 | /** | |
1076 | * ida_destroy - release all cached layers within an ida tree | |
ea24ea85 | 1077 | * @ida: ida handle |
72dba584 TH |
1078 | */ |
1079 | void ida_destroy(struct ida *ida) | |
1080 | { | |
1081 | idr_destroy(&ida->idr); | |
1082 | kfree(ida->free_bitmap); | |
1083 | } | |
1084 | EXPORT_SYMBOL(ida_destroy); | |
1085 | ||
88eca020 RR |
1086 | /** |
1087 | * ida_simple_get - get a new id. | |
1088 | * @ida: the (initialized) ida. | |
1089 | * @start: the minimum id (inclusive, < 0x8000000) | |
1090 | * @end: the maximum id (exclusive, < 0x8000000 or 0) | |
1091 | * @gfp_mask: memory allocation flags | |
1092 | * | |
1093 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. | |
1094 | * On memory allocation failure, returns -ENOMEM. | |
1095 | * | |
1096 | * Use ida_simple_remove() to get rid of an id. | |
1097 | */ | |
1098 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, | |
1099 | gfp_t gfp_mask) | |
1100 | { | |
1101 | int ret, id; | |
1102 | unsigned int max; | |
46cbc1d3 | 1103 | unsigned long flags; |
88eca020 RR |
1104 | |
1105 | BUG_ON((int)start < 0); | |
1106 | BUG_ON((int)end < 0); | |
1107 | ||
1108 | if (end == 0) | |
1109 | max = 0x80000000; | |
1110 | else { | |
1111 | BUG_ON(end < start); | |
1112 | max = end - 1; | |
1113 | } | |
1114 | ||
1115 | again: | |
1116 | if (!ida_pre_get(ida, gfp_mask)) | |
1117 | return -ENOMEM; | |
1118 | ||
46cbc1d3 | 1119 | spin_lock_irqsave(&simple_ida_lock, flags); |
88eca020 RR |
1120 | ret = ida_get_new_above(ida, start, &id); |
1121 | if (!ret) { | |
1122 | if (id > max) { | |
1123 | ida_remove(ida, id); | |
1124 | ret = -ENOSPC; | |
1125 | } else { | |
1126 | ret = id; | |
1127 | } | |
1128 | } | |
46cbc1d3 | 1129 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
88eca020 RR |
1130 | |
1131 | if (unlikely(ret == -EAGAIN)) | |
1132 | goto again; | |
1133 | ||
1134 | return ret; | |
1135 | } | |
1136 | EXPORT_SYMBOL(ida_simple_get); | |
1137 | ||
1138 | /** | |
1139 | * ida_simple_remove - remove an allocated id. | |
1140 | * @ida: the (initialized) ida. | |
1141 | * @id: the id returned by ida_simple_get. | |
1142 | */ | |
1143 | void ida_simple_remove(struct ida *ida, unsigned int id) | |
1144 | { | |
46cbc1d3 TH |
1145 | unsigned long flags; |
1146 | ||
88eca020 | 1147 | BUG_ON((int)id < 0); |
46cbc1d3 | 1148 | spin_lock_irqsave(&simple_ida_lock, flags); |
88eca020 | 1149 | ida_remove(ida, id); |
46cbc1d3 | 1150 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
88eca020 RR |
1151 | } |
1152 | EXPORT_SYMBOL(ida_simple_remove); | |
1153 | ||
72dba584 TH |
1154 | /** |
1155 | * ida_init - initialize ida handle | |
1156 | * @ida: ida handle | |
1157 | * | |
1158 | * This function is use to set up the handle (@ida) that you will pass | |
1159 | * to the rest of the functions. | |
1160 | */ | |
1161 | void ida_init(struct ida *ida) | |
1162 | { | |
1163 | memset(ida, 0, sizeof(struct ida)); | |
1164 | idr_init(&ida->idr); | |
1165 | ||
1166 | } | |
1167 | EXPORT_SYMBOL(ida_init); |