lib: add idr_for_each()
[deliverable/linux.git] / lib / idr.c
CommitLineData
1da177e4
LT
1/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
e15ae2dd 9 * Small id to pointer translation service.
1da177e4 10 *
e15ae2dd 11 * It uses a radix tree like structure as a sparse array indexed
1da177e4 12 * by the id to obtain the pointer. The bitmap makes allocating
e15ae2dd 13 * a new id quick.
1da177e4
LT
14 *
15 * You call it to allocate an id (an int) an associate with that id a
16 * pointer or what ever, we treat it as a (void *). You can pass this
17 * id to a user for him to pass back at a later time. You then pass
18 * that id to this code and it returns your pointer.
19
e15ae2dd 20 * You can release ids at any time. When all ids are released, most of
1da177e4 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
e15ae2dd 22 * don't need to go to the memory "store" during an id allocate, just
1da177e4
LT
23 * so you don't need to be too concerned about locking and conflicts
24 * with the slab allocator.
25 */
26
27#ifndef TEST // to test in user space...
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#endif
5806f07c 32#include <linux/err.h>
1da177e4
LT
33#include <linux/string.h>
34#include <linux/idr.h>
35
e18b890b 36static struct kmem_cache *idr_layer_cache;
1da177e4
LT
37
38static struct idr_layer *alloc_layer(struct idr *idp)
39{
40 struct idr_layer *p;
c259cc28 41 unsigned long flags;
1da177e4 42
c259cc28 43 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
44 if ((p = idp->id_free)) {
45 idp->id_free = p->ary[0];
46 idp->id_free_cnt--;
47 p->ary[0] = NULL;
48 }
c259cc28 49 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
50 return(p);
51}
52
1eec0056
SR
53/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p)
55{
56 p->ary[0] = idp->id_free;
57 idp->id_free = p;
58 idp->id_free_cnt++;
59}
60
1da177e4
LT
61static void free_layer(struct idr *idp, struct idr_layer *p)
62{
c259cc28
RD
63 unsigned long flags;
64
1da177e4
LT
65 /*
66 * Depends on the return element being zeroed.
67 */
c259cc28 68 spin_lock_irqsave(&idp->lock, flags);
1eec0056 69 __free_layer(idp, p);
c259cc28 70 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
71}
72
e33ac8bd
TH
73static void idr_mark_full(struct idr_layer **pa, int id)
74{
75 struct idr_layer *p = pa[0];
76 int l = 0;
77
78 __set_bit(id & IDR_MASK, &p->bitmap);
79 /*
80 * If this layer is full mark the bit in the layer above to
81 * show that this part of the radix tree is full. This may
82 * complete the layer above and require walking up the radix
83 * tree.
84 */
85 while (p->bitmap == IDR_FULL) {
86 if (!(p = pa[++l]))
87 break;
88 id = id >> IDR_BITS;
89 __set_bit((id & IDR_MASK), &p->bitmap);
90 }
91}
92
1da177e4
LT
93/**
94 * idr_pre_get - reserver resources for idr allocation
95 * @idp: idr handle
96 * @gfp_mask: memory allocation flags
97 *
98 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy
100 * the worst possible allocation.
101 *
102 * If the system is REALLY out of memory this function returns 0,
103 * otherwise 1.
104 */
fd4f2df2 105int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1da177e4
LT
106{
107 while (idp->id_free_cnt < IDR_FREE_MAX) {
108 struct idr_layer *new;
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
e15ae2dd 110 if (new == NULL)
1da177e4
LT
111 return (0);
112 free_layer(idp, new);
113 }
114 return 1;
115}
116EXPORT_SYMBOL(idr_pre_get);
117
e33ac8bd 118static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
1da177e4
LT
119{
120 int n, m, sh;
121 struct idr_layer *p, *new;
7aae6dd8 122 int l, id, oid;
1da177e4
LT
123 long bm;
124
125 id = *starting_id;
7aae6dd8 126 restart:
1da177e4
LT
127 p = idp->top;
128 l = idp->layers;
129 pa[l--] = NULL;
130 while (1) {
131 /*
132 * We run around this while until we reach the leaf node...
133 */
134 n = (id >> (IDR_BITS*l)) & IDR_MASK;
135 bm = ~p->bitmap;
136 m = find_next_bit(&bm, IDR_SIZE, n);
137 if (m == IDR_SIZE) {
138 /* no space available go back to previous layer. */
139 l++;
7aae6dd8 140 oid = id;
e15ae2dd 141 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
7aae6dd8
TH
142
143 /* if already at the top layer, we need to grow */
1da177e4
LT
144 if (!(p = pa[l])) {
145 *starting_id = id;
146 return -2;
147 }
7aae6dd8
TH
148
149 /* If we need to go up one layer, continue the
150 * loop; otherwise, restart from the top.
151 */
152 sh = IDR_BITS * (l + 1);
153 if (oid >> sh == id >> sh)
154 continue;
155 else
156 goto restart;
1da177e4
LT
157 }
158 if (m != n) {
159 sh = IDR_BITS*l;
160 id = ((id >> sh) ^ n ^ m) << sh;
161 }
162 if ((id >= MAX_ID_BIT) || (id < 0))
163 return -3;
164 if (l == 0)
165 break;
166 /*
167 * Create the layer below if it is missing.
168 */
169 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp)))
171 return -1;
172 p->ary[m] = new;
173 p->count++;
174 }
175 pa[l--] = p;
176 p = p->ary[m];
177 }
e33ac8bd
TH
178
179 pa[l] = p;
180 return id;
1da177e4
LT
181}
182
e33ac8bd
TH
183static int idr_get_empty_slot(struct idr *idp, int starting_id,
184 struct idr_layer **pa)
1da177e4
LT
185{
186 struct idr_layer *p, *new;
187 int layers, v, id;
c259cc28 188 unsigned long flags;
e15ae2dd 189
1da177e4
LT
190 id = starting_id;
191build_up:
192 p = idp->top;
193 layers = idp->layers;
194 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp)))
196 return -1;
197 layers = 1;
198 }
199 /*
200 * Add a new layer to the top of the tree if the requested
201 * id is larger than the currently allocated space.
202 */
589777ea 203 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
1da177e4
LT
204 layers++;
205 if (!p->count)
206 continue;
207 if (!(new = alloc_layer(idp))) {
208 /*
209 * The allocation failed. If we built part of
210 * the structure tear it down.
211 */
c259cc28 212 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
213 for (new = p; p && p != idp->top; new = p) {
214 p = p->ary[0];
215 new->ary[0] = NULL;
216 new->bitmap = new->count = 0;
1eec0056 217 __free_layer(idp, new);
1da177e4 218 }
c259cc28 219 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
220 return -1;
221 }
222 new->ary[0] = p;
223 new->count = 1;
224 if (p->bitmap == IDR_FULL)
225 __set_bit(0, &new->bitmap);
226 p = new;
227 }
228 idp->top = p;
229 idp->layers = layers;
e33ac8bd 230 v = sub_alloc(idp, &id, pa);
1da177e4
LT
231 if (v == -2)
232 goto build_up;
233 return(v);
234}
235
e33ac8bd
TH
236static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
237{
238 struct idr_layer *pa[MAX_LEVEL];
239 int id;
240
241 id = idr_get_empty_slot(idp, starting_id, pa);
242 if (id >= 0) {
243 /*
244 * Successfully found an empty slot. Install the user
245 * pointer and mark the slot full.
246 */
247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
248 pa[0]->count++;
249 idr_mark_full(pa, id);
250 }
251
252 return id;
253}
254
1da177e4 255/**
7c657f2f 256 * idr_get_new_above - allocate new idr entry above or equal to a start id
1da177e4
LT
257 * @idp: idr handle
258 * @ptr: pointer you want associated with the ide
259 * @start_id: id to start search at
260 * @id: pointer to the allocated handle
261 *
262 * This is the allocate id function. It should be called with any
263 * required locks.
264 *
265 * If memory is required, it will return -EAGAIN, you should unlock
266 * and go back to the idr_pre_get() call. If the idr is full, it will
267 * return -ENOSPC.
268 *
269 * @id returns a value in the range 0 ... 0x7fffffff
270 */
271int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
272{
273 int rv;
e15ae2dd 274
1da177e4
LT
275 rv = idr_get_new_above_int(idp, ptr, starting_id);
276 /*
277 * This is a cheap hack until the IDR code can be fixed to
278 * return proper error values.
279 */
280 if (rv < 0) {
281 if (rv == -1)
282 return -EAGAIN;
283 else /* Will be -3 */
284 return -ENOSPC;
285 }
286 *id = rv;
287 return 0;
288}
289EXPORT_SYMBOL(idr_get_new_above);
290
291/**
292 * idr_get_new - allocate new idr entry
293 * @idp: idr handle
294 * @ptr: pointer you want associated with the ide
295 * @id: pointer to the allocated handle
296 *
297 * This is the allocate id function. It should be called with any
298 * required locks.
299 *
300 * If memory is required, it will return -EAGAIN, you should unlock
301 * and go back to the idr_pre_get() call. If the idr is full, it will
302 * return -ENOSPC.
303 *
304 * @id returns a value in the range 0 ... 0x7fffffff
305 */
306int idr_get_new(struct idr *idp, void *ptr, int *id)
307{
308 int rv;
e15ae2dd 309
1da177e4
LT
310 rv = idr_get_new_above_int(idp, ptr, 0);
311 /*
312 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values.
314 */
315 if (rv < 0) {
316 if (rv == -1)
317 return -EAGAIN;
318 else /* Will be -3 */
319 return -ENOSPC;
320 }
321 *id = rv;
322 return 0;
323}
324EXPORT_SYMBOL(idr_get_new);
325
326static void idr_remove_warning(int id)
327{
328 printk("idr_remove called for id=%d which is not allocated.\n", id);
329 dump_stack();
330}
331
332static void sub_remove(struct idr *idp, int shift, int id)
333{
334 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_LEVEL];
336 struct idr_layer ***paa = &pa[0];
337 int n;
338
339 *paa = NULL;
340 *++paa = &idp->top;
341
342 while ((shift > 0) && p) {
343 n = (id >> shift) & IDR_MASK;
344 __clear_bit(n, &p->bitmap);
345 *++paa = &p->ary[n];
346 p = p->ary[n];
347 shift -= IDR_BITS;
348 }
349 n = id & IDR_MASK;
350 if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL;
353 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa);
355 **paa-- = NULL;
356 }
e15ae2dd 357 if (!*paa)
1da177e4 358 idp->layers = 0;
e15ae2dd 359 } else
1da177e4 360 idr_remove_warning(id);
1da177e4
LT
361}
362
363/**
364 * idr_remove - remove the given id and free it's slot
72fd4a35
RD
365 * @idp: idr handle
366 * @id: unique key
1da177e4
LT
367 */
368void idr_remove(struct idr *idp, int id)
369{
370 struct idr_layer *p;
371
372 /* Mask off upper bits we don't use for the search. */
373 id &= MAX_ID_MASK;
374
375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
e15ae2dd
JJ
376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 idp->top->ary[0]) { // We can drop a layer
1da177e4
LT
378
379 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0;
381 free_layer(idp, idp->top);
382 idp->top = p;
383 --idp->layers;
384 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) {
1da177e4
LT
386 p = alloc_layer(idp);
387 kmem_cache_free(idr_layer_cache, p);
388 return;
389 }
390}
391EXPORT_SYMBOL(idr_remove);
392
8d3b3591
AM
393/**
394 * idr_destroy - release all cached layers within an idr tree
395 * idp: idr handle
396 */
397void idr_destroy(struct idr *idp)
398{
399 while (idp->id_free_cnt) {
400 struct idr_layer *p = alloc_layer(idp);
401 kmem_cache_free(idr_layer_cache, p);
402 }
403}
404EXPORT_SYMBOL(idr_destroy);
405
1da177e4
LT
406/**
407 * idr_find - return pointer for given id
408 * @idp: idr handle
409 * @id: lookup key
410 *
411 * Return the pointer given the id it has been registered with. A %NULL
412 * return indicates that @id is not valid or you passed %NULL in
413 * idr_get_new().
414 *
415 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
416 */
417void *idr_find(struct idr *idp, int id)
418{
419 int n;
420 struct idr_layer *p;
421
422 n = idp->layers * IDR_BITS;
423 p = idp->top;
424
425 /* Mask off upper bits we don't use for the search. */
426 id &= MAX_ID_MASK;
427
428 if (id >= (1 << n))
429 return NULL;
430
431 while (n > 0 && p) {
432 n -= IDR_BITS;
433 p = p->ary[(id >> n) & IDR_MASK];
434 }
435 return((void *)p);
436}
437EXPORT_SYMBOL(idr_find);
438
96d7fa42
KH
439/**
440 * idr_for_each - iterate through all stored pointers
441 * @idp: idr handle
442 * @fn: function to be called for each pointer
443 * @data: data passed back to callback function
444 *
445 * Iterate over the pointers registered with the given idr. The
446 * callback function will be called for each pointer currently
447 * registered, passing the id, the pointer and the data pointer passed
448 * to this function. It is not safe to modify the idr tree while in
449 * the callback, so functions such as idr_get_new and idr_remove are
450 * not allowed.
451 *
452 * We check the return of @fn each time. If it returns anything other
453 * than 0, we break out and return that value.
454 *
455 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
456 */
457int idr_for_each(struct idr *idp,
458 int (*fn)(int id, void *p, void *data), void *data)
459{
460 int n, id, max, error = 0;
461 struct idr_layer *p;
462 struct idr_layer *pa[MAX_LEVEL];
463 struct idr_layer **paa = &pa[0];
464
465 n = idp->layers * IDR_BITS;
466 p = idp->top;
467 max = 1 << n;
468
469 id = 0;
470 while (id < max) {
471 while (n > 0 && p) {
472 n -= IDR_BITS;
473 *paa++ = p;
474 p = p->ary[(id >> n) & IDR_MASK];
475 }
476
477 if (p) {
478 error = fn(id, (void *)p, data);
479 if (error)
480 break;
481 }
482
483 id += 1 << n;
484 while (n < fls(id)) {
485 n += IDR_BITS;
486 p = *--paa;
487 }
488 }
489
490 return error;
491}
492EXPORT_SYMBOL(idr_for_each);
493
5806f07c
JM
494/**
495 * idr_replace - replace pointer for given id
496 * @idp: idr handle
497 * @ptr: pointer you want associated with the id
498 * @id: lookup key
499 *
500 * Replace the pointer registered with an id and return the old value.
501 * A -ENOENT return indicates that @id was not found.
502 * A -EINVAL return indicates that @id was not within valid constraints.
503 *
504 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
505 */
506void *idr_replace(struct idr *idp, void *ptr, int id)
507{
508 int n;
509 struct idr_layer *p, *old_p;
510
511 n = idp->layers * IDR_BITS;
512 p = idp->top;
513
514 id &= MAX_ID_MASK;
515
516 if (id >= (1 << n))
517 return ERR_PTR(-EINVAL);
518
519 n -= IDR_BITS;
520 while ((n > 0) && p) {
521 p = p->ary[(id >> n) & IDR_MASK];
522 n -= IDR_BITS;
523 }
524
525 n = id & IDR_MASK;
526 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
527 return ERR_PTR(-ENOENT);
528
529 old_p = p->ary[n];
530 p->ary[n] = ptr;
531
532 return old_p;
533}
534EXPORT_SYMBOL(idr_replace);
535
e18b890b 536static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
e15ae2dd 537 unsigned long flags)
1da177e4
LT
538{
539 memset(idr_layer, 0, sizeof(struct idr_layer));
540}
541
542static int init_id_cache(void)
543{
544 if (!idr_layer_cache)
e15ae2dd 545 idr_layer_cache = kmem_cache_create("idr_layer_cache",
1da177e4
LT
546 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
547 return 0;
548}
549
550/**
551 * idr_init - initialize idr handle
552 * @idp: idr handle
553 *
554 * This function is use to set up the handle (@idp) that you will pass
555 * to the rest of the functions.
556 */
557void idr_init(struct idr *idp)
558{
559 init_id_cache();
560 memset(idp, 0, sizeof(struct idr));
561 spin_lock_init(&idp->lock);
562}
563EXPORT_SYMBOL(idr_init);
72dba584
TH
564
565
566/*
567 * IDA - IDR based ID allocator
568 *
569 * this is id allocator without id -> pointer translation. Memory
570 * usage is much lower than full blown idr because each id only
571 * occupies a bit. ida uses a custom leaf node which contains
572 * IDA_BITMAP_BITS slots.
573 *
574 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
575 */
576
577static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
578{
579 unsigned long flags;
580
581 if (!ida->free_bitmap) {
582 spin_lock_irqsave(&ida->idr.lock, flags);
583 if (!ida->free_bitmap) {
584 ida->free_bitmap = bitmap;
585 bitmap = NULL;
586 }
587 spin_unlock_irqrestore(&ida->idr.lock, flags);
588 }
589
590 kfree(bitmap);
591}
592
593/**
594 * ida_pre_get - reserve resources for ida allocation
595 * @ida: ida handle
596 * @gfp_mask: memory allocation flag
597 *
598 * This function should be called prior to locking and calling the
599 * following function. It preallocates enough memory to satisfy the
600 * worst possible allocation.
601 *
602 * If the system is REALLY out of memory this function returns 0,
603 * otherwise 1.
604 */
605int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
606{
607 /* allocate idr_layers */
608 if (!idr_pre_get(&ida->idr, gfp_mask))
609 return 0;
610
611 /* allocate free_bitmap */
612 if (!ida->free_bitmap) {
613 struct ida_bitmap *bitmap;
614
615 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
616 if (!bitmap)
617 return 0;
618
619 free_bitmap(ida, bitmap);
620 }
621
622 return 1;
623}
624EXPORT_SYMBOL(ida_pre_get);
625
626/**
627 * ida_get_new_above - allocate new ID above or equal to a start id
628 * @ida: ida handle
629 * @staring_id: id to start search at
630 * @p_id: pointer to the allocated handle
631 *
632 * Allocate new ID above or equal to @ida. It should be called with
633 * any required locks.
634 *
635 * If memory is required, it will return -EAGAIN, you should unlock
636 * and go back to the ida_pre_get() call. If the ida is full, it will
637 * return -ENOSPC.
638 *
639 * @p_id returns a value in the range 0 ... 0x7fffffff.
640 */
641int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
642{
643 struct idr_layer *pa[MAX_LEVEL];
644 struct ida_bitmap *bitmap;
645 unsigned long flags;
646 int idr_id = starting_id / IDA_BITMAP_BITS;
647 int offset = starting_id % IDA_BITMAP_BITS;
648 int t, id;
649
650 restart:
651 /* get vacant slot */
652 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
653 if (t < 0) {
654 if (t == -1)
655 return -EAGAIN;
656 else /* will be -3 */
657 return -ENOSPC;
658 }
659
660 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
661 return -ENOSPC;
662
663 if (t != idr_id)
664 offset = 0;
665 idr_id = t;
666
667 /* if bitmap isn't there, create a new one */
668 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
669 if (!bitmap) {
670 spin_lock_irqsave(&ida->idr.lock, flags);
671 bitmap = ida->free_bitmap;
672 ida->free_bitmap = NULL;
673 spin_unlock_irqrestore(&ida->idr.lock, flags);
674
675 if (!bitmap)
676 return -EAGAIN;
677
678 memset(bitmap, 0, sizeof(struct ida_bitmap));
679 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
680 pa[0]->count++;
681 }
682
683 /* lookup for empty slot */
684 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
685 if (t == IDA_BITMAP_BITS) {
686 /* no empty slot after offset, continue to the next chunk */
687 idr_id++;
688 offset = 0;
689 goto restart;
690 }
691
692 id = idr_id * IDA_BITMAP_BITS + t;
693 if (id >= MAX_ID_BIT)
694 return -ENOSPC;
695
696 __set_bit(t, bitmap->bitmap);
697 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
698 idr_mark_full(pa, idr_id);
699
700 *p_id = id;
701
702 /* Each leaf node can handle nearly a thousand slots and the
703 * whole idea of ida is to have small memory foot print.
704 * Throw away extra resources one by one after each successful
705 * allocation.
706 */
707 if (ida->idr.id_free_cnt || ida->free_bitmap) {
708 struct idr_layer *p = alloc_layer(&ida->idr);
709 if (p)
710 kmem_cache_free(idr_layer_cache, p);
711 }
712
713 return 0;
714}
715EXPORT_SYMBOL(ida_get_new_above);
716
717/**
718 * ida_get_new - allocate new ID
719 * @ida: idr handle
720 * @p_id: pointer to the allocated handle
721 *
722 * Allocate new ID. It should be called with any required locks.
723 *
724 * If memory is required, it will return -EAGAIN, you should unlock
725 * and go back to the idr_pre_get() call. If the idr is full, it will
726 * return -ENOSPC.
727 *
728 * @id returns a value in the range 0 ... 0x7fffffff.
729 */
730int ida_get_new(struct ida *ida, int *p_id)
731{
732 return ida_get_new_above(ida, 0, p_id);
733}
734EXPORT_SYMBOL(ida_get_new);
735
736/**
737 * ida_remove - remove the given ID
738 * @ida: ida handle
739 * @id: ID to free
740 */
741void ida_remove(struct ida *ida, int id)
742{
743 struct idr_layer *p = ida->idr.top;
744 int shift = (ida->idr.layers - 1) * IDR_BITS;
745 int idr_id = id / IDA_BITMAP_BITS;
746 int offset = id % IDA_BITMAP_BITS;
747 int n;
748 struct ida_bitmap *bitmap;
749
750 /* clear full bits while looking up the leaf idr_layer */
751 while ((shift > 0) && p) {
752 n = (idr_id >> shift) & IDR_MASK;
753 __clear_bit(n, &p->bitmap);
754 p = p->ary[n];
755 shift -= IDR_BITS;
756 }
757
758 if (p == NULL)
759 goto err;
760
761 n = idr_id & IDR_MASK;
762 __clear_bit(n, &p->bitmap);
763
764 bitmap = (void *)p->ary[n];
765 if (!test_bit(offset, bitmap->bitmap))
766 goto err;
767
768 /* update bitmap and remove it if empty */
769 __clear_bit(offset, bitmap->bitmap);
770 if (--bitmap->nr_busy == 0) {
771 __set_bit(n, &p->bitmap); /* to please idr_remove() */
772 idr_remove(&ida->idr, idr_id);
773 free_bitmap(ida, bitmap);
774 }
775
776 return;
777
778 err:
779 printk(KERN_WARNING
780 "ida_remove called for id=%d which is not allocated.\n", id);
781}
782EXPORT_SYMBOL(ida_remove);
783
784/**
785 * ida_destroy - release all cached layers within an ida tree
786 * ida: ida handle
787 */
788void ida_destroy(struct ida *ida)
789{
790 idr_destroy(&ida->idr);
791 kfree(ida->free_bitmap);
792}
793EXPORT_SYMBOL(ida_destroy);
794
795/**
796 * ida_init - initialize ida handle
797 * @ida: ida handle
798 *
799 * This function is use to set up the handle (@ida) that you will pass
800 * to the rest of the functions.
801 */
802void ida_init(struct ida *ida)
803{
804 memset(ida, 0, sizeof(struct ida));
805 idr_init(&ida->idr);
806
807}
808EXPORT_SYMBOL(ida_init);
This page took 0.329124 seconds and 5 git commands to generate.