2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38 #include <linux/percpu.h>
39 #include <linux/hardirq.h>
41 static struct kmem_cache
*idr_layer_cache
;
42 static DEFINE_PER_CPU(struct idr_layer
*, idr_preload_head
);
43 static DEFINE_PER_CPU(int, idr_preload_cnt
);
44 static DEFINE_SPINLOCK(simple_ida_lock
);
46 /* the maximum ID which can be allocated given idr->layers */
47 static int idr_max(int layers
)
49 int bits
= min_t(int, layers
* IDR_BITS
, MAX_IDR_SHIFT
);
51 return (1 << bits
) - 1;
54 static struct idr_layer
*get_from_free_list(struct idr
*idp
)
59 spin_lock_irqsave(&idp
->lock
, flags
);
60 if ((p
= idp
->id_free
)) {
61 idp
->id_free
= p
->ary
[0];
65 spin_unlock_irqrestore(&idp
->lock
, flags
);
70 * idr_layer_alloc - allocate a new idr_layer
71 * @gfp_mask: allocation mask
72 * @layer_idr: optional idr to allocate from
74 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
75 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
76 * an idr_layer from @idr->id_free.
78 * @layer_idr is to maintain backward compatibility with the old alloc
79 * interface - idr_pre_get() and idr_get_new*() - and will be removed
80 * together with per-pool preload buffer.
82 static struct idr_layer
*idr_layer_alloc(gfp_t gfp_mask
, struct idr
*layer_idr
)
84 struct idr_layer
*new;
86 /* this is the old path, bypass to get_from_free_list() */
88 return get_from_free_list(layer_idr
);
90 /* try to allocate directly from kmem_cache */
91 new = kmem_cache_zalloc(idr_layer_cache
, gfp_mask
);
96 * Try to fetch one from the per-cpu preload buffer if in process
97 * context. See idr_preload() for details.
103 new = __this_cpu_read(idr_preload_head
);
105 __this_cpu_write(idr_preload_head
, new->ary
[0]);
106 __this_cpu_dec(idr_preload_cnt
);
113 static void idr_layer_rcu_free(struct rcu_head
*head
)
115 struct idr_layer
*layer
;
117 layer
= container_of(head
, struct idr_layer
, rcu_head
);
118 kmem_cache_free(idr_layer_cache
, layer
);
121 static inline void free_layer(struct idr_layer
*p
)
123 call_rcu(&p
->rcu_head
, idr_layer_rcu_free
);
126 /* only called when idp->lock is held */
127 static void __move_to_free_list(struct idr
*idp
, struct idr_layer
*p
)
129 p
->ary
[0] = idp
->id_free
;
134 static void move_to_free_list(struct idr
*idp
, struct idr_layer
*p
)
139 * Depends on the return element being zeroed.
141 spin_lock_irqsave(&idp
->lock
, flags
);
142 __move_to_free_list(idp
, p
);
143 spin_unlock_irqrestore(&idp
->lock
, flags
);
146 static void idr_mark_full(struct idr_layer
**pa
, int id
)
148 struct idr_layer
*p
= pa
[0];
151 __set_bit(id
& IDR_MASK
, &p
->bitmap
);
153 * If this layer is full mark the bit in the layer above to
154 * show that this part of the radix tree is full. This may
155 * complete the layer above and require walking up the radix
158 while (p
->bitmap
== IDR_FULL
) {
162 __set_bit((id
& IDR_MASK
), &p
->bitmap
);
167 * idr_pre_get - reserve resources for idr allocation
169 * @gfp_mask: memory allocation flags
171 * This function should be called prior to calling the idr_get_new* functions.
172 * It preallocates enough memory to satisfy the worst possible allocation. The
173 * caller should pass in GFP_KERNEL if possible. This of course requires that
174 * no spinning locks be held.
176 * If the system is REALLY out of memory this function returns %0,
179 int idr_pre_get(struct idr
*idp
, gfp_t gfp_mask
)
181 while (idp
->id_free_cnt
< MAX_IDR_FREE
) {
182 struct idr_layer
*new;
183 new = kmem_cache_zalloc(idr_layer_cache
, gfp_mask
);
186 move_to_free_list(idp
, new);
190 EXPORT_SYMBOL(idr_pre_get
);
193 * sub_alloc - try to allocate an id without growing the tree depth
195 * @starting_id: id to start search at
196 * @id: pointer to the allocated handle
197 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
198 * @gfp_mask: allocation mask for idr_layer_alloc()
199 * @layer_idr: optional idr passed to idr_layer_alloc()
201 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
202 * growing its depth. Returns
204 * the allocated id >= 0 if successful,
205 * -EAGAIN if the tree needs to grow for allocation to succeed,
206 * -ENOSPC if the id space is exhausted,
207 * -ENOMEM if more idr_layers need to be allocated.
209 static int sub_alloc(struct idr
*idp
, int *starting_id
, struct idr_layer
**pa
,
210 gfp_t gfp_mask
, struct idr
*layer_idr
)
213 struct idr_layer
*p
, *new;
224 * We run around this while until we reach the leaf node...
226 n
= (id
>> (IDR_BITS
*l
)) & IDR_MASK
;
228 m
= find_next_bit(&bm
, IDR_SIZE
, n
);
230 /* no space available go back to previous layer. */
233 id
= (id
| ((1 << (IDR_BITS
* l
)) - 1)) + 1;
235 /* if already at the top layer, we need to grow */
236 if (id
>= 1 << (idp
->layers
* IDR_BITS
)) {
243 /* If we need to go up one layer, continue the
244 * loop; otherwise, restart from the top.
246 sh
= IDR_BITS
* (l
+ 1);
247 if (oid
>> sh
== id
>> sh
)
254 id
= ((id
>> sh
) ^ n
^ m
) << sh
;
256 if ((id
>= MAX_IDR_BIT
) || (id
< 0))
261 * Create the layer below if it is missing.
264 new = idr_layer_alloc(gfp_mask
, layer_idr
);
268 rcu_assign_pointer(p
->ary
[m
], new);
279 static int idr_get_empty_slot(struct idr
*idp
, int starting_id
,
280 struct idr_layer
**pa
, gfp_t gfp_mask
,
281 struct idr
*layer_idr
)
283 struct idr_layer
*p
, *new;
290 layers
= idp
->layers
;
292 if (!(p
= idr_layer_alloc(gfp_mask
, layer_idr
)))
298 * Add a new layer to the top of the tree if the requested
299 * id is larger than the currently allocated space.
301 while (id
> idr_max(layers
)) {
304 /* special case: if the tree is currently empty,
305 * then we grow the tree by moving the top node
311 if (!(new = idr_layer_alloc(gfp_mask
, layer_idr
))) {
313 * The allocation failed. If we built part of
314 * the structure tear it down.
316 spin_lock_irqsave(&idp
->lock
, flags
);
317 for (new = p
; p
&& p
!= idp
->top
; new = p
) {
320 new->bitmap
= new->count
= 0;
321 __move_to_free_list(idp
, new);
323 spin_unlock_irqrestore(&idp
->lock
, flags
);
328 new->layer
= layers
-1;
329 if (p
->bitmap
== IDR_FULL
)
330 __set_bit(0, &new->bitmap
);
333 rcu_assign_pointer(idp
->top
, p
);
334 idp
->layers
= layers
;
335 v
= sub_alloc(idp
, &id
, pa
, gfp_mask
, layer_idr
);
342 * @id and @pa are from a successful allocation from idr_get_empty_slot().
343 * Install the user pointer @ptr and mark the slot full.
345 static void idr_fill_slot(void *ptr
, int id
, struct idr_layer
**pa
)
347 rcu_assign_pointer(pa
[0]->ary
[id
& IDR_MASK
], (struct idr_layer
*)ptr
);
349 idr_mark_full(pa
, id
);
353 * idr_get_new_above - allocate new idr entry above or equal to a start id
355 * @ptr: pointer you want associated with the id
356 * @starting_id: id to start search at
357 * @id: pointer to the allocated handle
359 * This is the allocate id function. It should be called with any
362 * If allocation from IDR's private freelist fails, idr_get_new_above() will
363 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
364 * IDR's preallocation and then retry the idr_get_new_above() call.
366 * If the idr is full idr_get_new_above() will return %-ENOSPC.
368 * @id returns a value in the range @starting_id ... %0x7fffffff
370 int idr_get_new_above(struct idr
*idp
, void *ptr
, int starting_id
, int *id
)
372 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
375 rv
= idr_get_empty_slot(idp
, starting_id
, pa
, 0, idp
);
377 return rv
== -ENOMEM
? -EAGAIN
: rv
;
379 idr_fill_slot(ptr
, rv
, pa
);
383 EXPORT_SYMBOL(idr_get_new_above
);
386 * idr_preload - preload for idr_alloc()
387 * @gfp_mask: allocation mask to use for preloading
389 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
390 * process context and each idr_preload() invocation should be matched with
391 * idr_preload_end(). Note that preemption is disabled while preloaded.
393 * The first idr_alloc() in the preloaded section can be treated as if it
394 * were invoked with @gfp_mask used for preloading. This allows using more
395 * permissive allocation masks for idrs protected by spinlocks.
397 * For example, if idr_alloc() below fails, the failure can be treated as
398 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
400 * idr_preload(GFP_KERNEL);
403 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
410 void idr_preload(gfp_t gfp_mask
)
413 * Consuming preload buffer from non-process context breaks preload
414 * allocation guarantee. Disallow usage from those contexts.
416 WARN_ON_ONCE(in_interrupt());
417 might_sleep_if(gfp_mask
& __GFP_WAIT
);
422 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
423 * return value from idr_alloc() needs to be checked for failure
424 * anyway. Silently give up if allocation fails. The caller can
425 * treat failures from idr_alloc() as if idr_alloc() were called
426 * with @gfp_mask which should be enough.
428 while (__this_cpu_read(idr_preload_cnt
) < MAX_IDR_FREE
) {
429 struct idr_layer
*new;
432 new = kmem_cache_zalloc(idr_layer_cache
, gfp_mask
);
437 /* link the new one to per-cpu preload list */
438 new->ary
[0] = __this_cpu_read(idr_preload_head
);
439 __this_cpu_write(idr_preload_head
, new);
440 __this_cpu_inc(idr_preload_cnt
);
443 EXPORT_SYMBOL(idr_preload
);
446 * idr_alloc - allocate new idr entry
447 * @idr: the (initialized) idr
448 * @ptr: pointer to be associated with the new id
449 * @start: the minimum id (inclusive)
450 * @end: the maximum id (exclusive, <= 0 for max)
451 * @gfp_mask: memory allocation flags
453 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
454 * available in the specified range, returns -ENOSPC. On memory allocation
455 * failure, returns -ENOMEM.
457 * Note that @end is treated as max when <= 0. This is to always allow
458 * using @start + N as @end as long as N is inside integer range.
460 * The user is responsible for exclusively synchronizing all operations
461 * which may modify @idr. However, read-only accesses such as idr_find()
462 * or iteration can be performed under RCU read lock provided the user
463 * destroys @ptr in RCU-safe way after removal from idr.
465 int idr_alloc(struct idr
*idr
, void *ptr
, int start
, int end
, gfp_t gfp_mask
)
467 int max
= end
> 0 ? end
- 1 : INT_MAX
; /* inclusive upper limit */
468 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
471 might_sleep_if(gfp_mask
& __GFP_WAIT
);
474 if (WARN_ON_ONCE(start
< 0))
476 if (unlikely(max
< start
))
480 id
= idr_get_empty_slot(idr
, start
, pa
, gfp_mask
, NULL
);
481 if (unlikely(id
< 0))
483 if (unlikely(id
> max
))
486 idr_fill_slot(ptr
, id
, pa
);
489 EXPORT_SYMBOL_GPL(idr_alloc
);
491 static void idr_remove_warning(int id
)
494 "idr_remove called for id=%d which is not allocated.\n", id
);
498 static void sub_remove(struct idr
*idp
, int shift
, int id
)
500 struct idr_layer
*p
= idp
->top
;
501 struct idr_layer
**pa
[MAX_IDR_LEVEL
+ 1];
502 struct idr_layer
***paa
= &pa
[0];
503 struct idr_layer
*to_free
;
509 while ((shift
> 0) && p
) {
510 n
= (id
>> shift
) & IDR_MASK
;
511 __clear_bit(n
, &p
->bitmap
);
517 if (likely(p
!= NULL
&& test_bit(n
, &p
->bitmap
))){
518 __clear_bit(n
, &p
->bitmap
);
519 rcu_assign_pointer(p
->ary
[n
], NULL
);
521 while(*paa
&& ! --((**paa
)->count
)){
532 idr_remove_warning(id
);
536 * idr_remove - remove the given id and free its slot
540 void idr_remove(struct idr
*idp
, int id
)
543 struct idr_layer
*to_free
;
545 /* Mask off upper bits we don't use for the search. */
548 sub_remove(idp
, (idp
->layers
- 1) * IDR_BITS
, id
);
549 if (idp
->top
&& idp
->top
->count
== 1 && (idp
->layers
> 1) &&
552 * Single child at leftmost slot: we can shrink the tree.
553 * This level is not needed anymore since when layers are
554 * inserted, they are inserted at the top of the existing
558 p
= idp
->top
->ary
[0];
559 rcu_assign_pointer(idp
->top
, p
);
561 to_free
->bitmap
= to_free
->count
= 0;
564 while (idp
->id_free_cnt
>= MAX_IDR_FREE
) {
565 p
= get_from_free_list(idp
);
567 * Note: we don't call the rcu callback here, since the only
568 * layers that fall into the freelist are those that have been
571 kmem_cache_free(idr_layer_cache
, p
);
575 EXPORT_SYMBOL(idr_remove
);
577 void __idr_remove_all(struct idr
*idp
)
582 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
583 struct idr_layer
**paa
= &pa
[0];
585 n
= idp
->layers
* IDR_BITS
;
587 rcu_assign_pointer(idp
->top
, NULL
);
588 max
= idr_max(idp
->layers
);
591 while (id
>= 0 && id
<= max
) {
592 while (n
> IDR_BITS
&& p
) {
595 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
600 /* Get the highest bit that the above add changed from 0->1. */
601 while (n
< fls(id
^ bt_mask
)) {
610 EXPORT_SYMBOL(__idr_remove_all
);
613 * idr_destroy - release all cached layers within an idr tree
616 * Free all id mappings and all idp_layers. After this function, @idp is
617 * completely unused and can be freed / recycled. The caller is
618 * responsible for ensuring that no one else accesses @idp during or after
621 * A typical clean-up sequence for objects stored in an idr tree will use
622 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
623 * free up the id mappings and cached idr_layers.
625 void idr_destroy(struct idr
*idp
)
627 __idr_remove_all(idp
);
629 while (idp
->id_free_cnt
) {
630 struct idr_layer
*p
= get_from_free_list(idp
);
631 kmem_cache_free(idr_layer_cache
, p
);
634 EXPORT_SYMBOL(idr_destroy
);
637 * idr_find - return pointer for given id
641 * Return the pointer given the id it has been registered with. A %NULL
642 * return indicates that @id is not valid or you passed %NULL in
645 * This function can be called under rcu_read_lock(), given that the leaf
646 * pointers lifetimes are correctly managed.
648 void *idr_find(struct idr
*idp
, int id
)
653 p
= rcu_dereference_raw(idp
->top
);
656 n
= (p
->layer
+1) * IDR_BITS
;
658 /* Mask off upper bits we don't use for the search. */
661 if (id
> idr_max(p
->layer
+ 1))
667 BUG_ON(n
!= p
->layer
*IDR_BITS
);
668 p
= rcu_dereference_raw(p
->ary
[(id
>> n
) & IDR_MASK
]);
672 EXPORT_SYMBOL(idr_find
);
675 * idr_for_each - iterate through all stored pointers
677 * @fn: function to be called for each pointer
678 * @data: data passed back to callback function
680 * Iterate over the pointers registered with the given idr. The
681 * callback function will be called for each pointer currently
682 * registered, passing the id, the pointer and the data pointer passed
683 * to this function. It is not safe to modify the idr tree while in
684 * the callback, so functions such as idr_get_new and idr_remove are
687 * We check the return of @fn each time. If it returns anything other
688 * than %0, we break out and return that value.
690 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
692 int idr_for_each(struct idr
*idp
,
693 int (*fn
)(int id
, void *p
, void *data
), void *data
)
695 int n
, id
, max
, error
= 0;
697 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
698 struct idr_layer
**paa
= &pa
[0];
700 n
= idp
->layers
* IDR_BITS
;
701 p
= rcu_dereference_raw(idp
->top
);
702 max
= idr_max(idp
->layers
);
705 while (id
>= 0 && id
<= max
) {
709 p
= rcu_dereference_raw(p
->ary
[(id
>> n
) & IDR_MASK
]);
713 error
= fn(id
, (void *)p
, data
);
719 while (n
< fls(id
)) {
727 EXPORT_SYMBOL(idr_for_each
);
730 * idr_get_next - lookup next object of id to given id.
732 * @nextidp: pointer to lookup key
734 * Returns pointer to registered object with id, which is next number to
735 * given id. After being looked up, *@nextidp will be updated for the next
738 * This function can be called under rcu_read_lock(), given that the leaf
739 * pointers lifetimes are correctly managed.
741 void *idr_get_next(struct idr
*idp
, int *nextidp
)
743 struct idr_layer
*p
, *pa
[MAX_IDR_LEVEL
+ 1];
744 struct idr_layer
**paa
= &pa
[0];
749 p
= rcu_dereference_raw(idp
->top
);
752 n
= (p
->layer
+ 1) * IDR_BITS
;
753 max
= idr_max(p
->layer
+ 1);
755 while (id
>= 0 && id
<= max
) {
759 p
= rcu_dereference_raw(p
->ary
[(id
>> n
) & IDR_MASK
]);
768 * Proceed to the next layer at the current level. Unlike
769 * idr_for_each(), @id isn't guaranteed to be aligned to
770 * layer boundary at this point and adding 1 << n may
771 * incorrectly skip IDs. Make sure we jump to the
772 * beginning of the next layer using round_up().
774 id
= round_up(id
+ 1, 1 << n
);
775 while (n
< fls(id
)) {
782 EXPORT_SYMBOL(idr_get_next
);
786 * idr_replace - replace pointer for given id
788 * @ptr: pointer you want associated with the id
791 * Replace the pointer registered with an id and return the old value.
792 * A %-ENOENT return indicates that @id was not found.
793 * A %-EINVAL return indicates that @id was not within valid constraints.
795 * The caller must serialize with writers.
797 void *idr_replace(struct idr
*idp
, void *ptr
, int id
)
800 struct idr_layer
*p
, *old_p
;
804 return ERR_PTR(-EINVAL
);
806 n
= (p
->layer
+1) * IDR_BITS
;
811 return ERR_PTR(-EINVAL
);
814 while ((n
> 0) && p
) {
815 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
820 if (unlikely(p
== NULL
|| !test_bit(n
, &p
->bitmap
)))
821 return ERR_PTR(-ENOENT
);
824 rcu_assign_pointer(p
->ary
[n
], ptr
);
828 EXPORT_SYMBOL(idr_replace
);
830 void __init
idr_init_cache(void)
832 idr_layer_cache
= kmem_cache_create("idr_layer_cache",
833 sizeof(struct idr_layer
), 0, SLAB_PANIC
, NULL
);
837 * idr_init - initialize idr handle
840 * This function is use to set up the handle (@idp) that you will pass
841 * to the rest of the functions.
843 void idr_init(struct idr
*idp
)
845 memset(idp
, 0, sizeof(struct idr
));
846 spin_lock_init(&idp
->lock
);
848 EXPORT_SYMBOL(idr_init
);
852 * DOC: IDA description
853 * IDA - IDR based ID allocator
855 * This is id allocator without id -> pointer translation. Memory
856 * usage is much lower than full blown idr because each id only
857 * occupies a bit. ida uses a custom leaf node which contains
858 * IDA_BITMAP_BITS slots.
860 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
863 static void free_bitmap(struct ida
*ida
, struct ida_bitmap
*bitmap
)
867 if (!ida
->free_bitmap
) {
868 spin_lock_irqsave(&ida
->idr
.lock
, flags
);
869 if (!ida
->free_bitmap
) {
870 ida
->free_bitmap
= bitmap
;
873 spin_unlock_irqrestore(&ida
->idr
.lock
, flags
);
880 * ida_pre_get - reserve resources for ida allocation
882 * @gfp_mask: memory allocation flag
884 * This function should be called prior to locking and calling the
885 * following function. It preallocates enough memory to satisfy the
886 * worst possible allocation.
888 * If the system is REALLY out of memory this function returns %0,
891 int ida_pre_get(struct ida
*ida
, gfp_t gfp_mask
)
893 /* allocate idr_layers */
894 if (!idr_pre_get(&ida
->idr
, gfp_mask
))
897 /* allocate free_bitmap */
898 if (!ida
->free_bitmap
) {
899 struct ida_bitmap
*bitmap
;
901 bitmap
= kmalloc(sizeof(struct ida_bitmap
), gfp_mask
);
905 free_bitmap(ida
, bitmap
);
910 EXPORT_SYMBOL(ida_pre_get
);
913 * ida_get_new_above - allocate new ID above or equal to a start id
915 * @starting_id: id to start search at
916 * @p_id: pointer to the allocated handle
918 * Allocate new ID above or equal to @starting_id. It should be called
919 * with any required locks.
921 * If memory is required, it will return %-EAGAIN, you should unlock
922 * and go back to the ida_pre_get() call. If the ida is full, it will
925 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
927 int ida_get_new_above(struct ida
*ida
, int starting_id
, int *p_id
)
929 struct idr_layer
*pa
[MAX_IDR_LEVEL
+ 1];
930 struct ida_bitmap
*bitmap
;
932 int idr_id
= starting_id
/ IDA_BITMAP_BITS
;
933 int offset
= starting_id
% IDA_BITMAP_BITS
;
937 /* get vacant slot */
938 t
= idr_get_empty_slot(&ida
->idr
, idr_id
, pa
, 0, &ida
->idr
);
940 return t
== -ENOMEM
? -EAGAIN
: t
;
942 if (t
* IDA_BITMAP_BITS
>= MAX_IDR_BIT
)
949 /* if bitmap isn't there, create a new one */
950 bitmap
= (void *)pa
[0]->ary
[idr_id
& IDR_MASK
];
952 spin_lock_irqsave(&ida
->idr
.lock
, flags
);
953 bitmap
= ida
->free_bitmap
;
954 ida
->free_bitmap
= NULL
;
955 spin_unlock_irqrestore(&ida
->idr
.lock
, flags
);
960 memset(bitmap
, 0, sizeof(struct ida_bitmap
));
961 rcu_assign_pointer(pa
[0]->ary
[idr_id
& IDR_MASK
],
966 /* lookup for empty slot */
967 t
= find_next_zero_bit(bitmap
->bitmap
, IDA_BITMAP_BITS
, offset
);
968 if (t
== IDA_BITMAP_BITS
) {
969 /* no empty slot after offset, continue to the next chunk */
975 id
= idr_id
* IDA_BITMAP_BITS
+ t
;
976 if (id
>= MAX_IDR_BIT
)
979 __set_bit(t
, bitmap
->bitmap
);
980 if (++bitmap
->nr_busy
== IDA_BITMAP_BITS
)
981 idr_mark_full(pa
, idr_id
);
985 /* Each leaf node can handle nearly a thousand slots and the
986 * whole idea of ida is to have small memory foot print.
987 * Throw away extra resources one by one after each successful
990 if (ida
->idr
.id_free_cnt
|| ida
->free_bitmap
) {
991 struct idr_layer
*p
= get_from_free_list(&ida
->idr
);
993 kmem_cache_free(idr_layer_cache
, p
);
998 EXPORT_SYMBOL(ida_get_new_above
);
1001 * ida_remove - remove the given ID
1005 void ida_remove(struct ida
*ida
, int id
)
1007 struct idr_layer
*p
= ida
->idr
.top
;
1008 int shift
= (ida
->idr
.layers
- 1) * IDR_BITS
;
1009 int idr_id
= id
/ IDA_BITMAP_BITS
;
1010 int offset
= id
% IDA_BITMAP_BITS
;
1012 struct ida_bitmap
*bitmap
;
1014 /* clear full bits while looking up the leaf idr_layer */
1015 while ((shift
> 0) && p
) {
1016 n
= (idr_id
>> shift
) & IDR_MASK
;
1017 __clear_bit(n
, &p
->bitmap
);
1025 n
= idr_id
& IDR_MASK
;
1026 __clear_bit(n
, &p
->bitmap
);
1028 bitmap
= (void *)p
->ary
[n
];
1029 if (!test_bit(offset
, bitmap
->bitmap
))
1032 /* update bitmap and remove it if empty */
1033 __clear_bit(offset
, bitmap
->bitmap
);
1034 if (--bitmap
->nr_busy
== 0) {
1035 __set_bit(n
, &p
->bitmap
); /* to please idr_remove() */
1036 idr_remove(&ida
->idr
, idr_id
);
1037 free_bitmap(ida
, bitmap
);
1044 "ida_remove called for id=%d which is not allocated.\n", id
);
1046 EXPORT_SYMBOL(ida_remove
);
1049 * ida_destroy - release all cached layers within an ida tree
1052 void ida_destroy(struct ida
*ida
)
1054 idr_destroy(&ida
->idr
);
1055 kfree(ida
->free_bitmap
);
1057 EXPORT_SYMBOL(ida_destroy
);
1060 * ida_simple_get - get a new id.
1061 * @ida: the (initialized) ida.
1062 * @start: the minimum id (inclusive, < 0x8000000)
1063 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1064 * @gfp_mask: memory allocation flags
1066 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1067 * On memory allocation failure, returns -ENOMEM.
1069 * Use ida_simple_remove() to get rid of an id.
1071 int ida_simple_get(struct ida
*ida
, unsigned int start
, unsigned int end
,
1076 unsigned long flags
;
1078 BUG_ON((int)start
< 0);
1079 BUG_ON((int)end
< 0);
1084 BUG_ON(end
< start
);
1089 if (!ida_pre_get(ida
, gfp_mask
))
1092 spin_lock_irqsave(&simple_ida_lock
, flags
);
1093 ret
= ida_get_new_above(ida
, start
, &id
);
1096 ida_remove(ida
, id
);
1102 spin_unlock_irqrestore(&simple_ida_lock
, flags
);
1104 if (unlikely(ret
== -EAGAIN
))
1109 EXPORT_SYMBOL(ida_simple_get
);
1112 * ida_simple_remove - remove an allocated id.
1113 * @ida: the (initialized) ida.
1114 * @id: the id returned by ida_simple_get.
1116 void ida_simple_remove(struct ida
*ida
, unsigned int id
)
1118 unsigned long flags
;
1120 BUG_ON((int)id
< 0);
1121 spin_lock_irqsave(&simple_ida_lock
, flags
);
1122 ida_remove(ida
, id
);
1123 spin_unlock_irqrestore(&simple_ida_lock
, flags
);
1125 EXPORT_SYMBOL(ida_simple_remove
);
1128 * ida_init - initialize ida handle
1131 * This function is use to set up the handle (@ida) that you will pass
1132 * to the rest of the functions.
1134 void ida_init(struct ida
*ida
)
1136 memset(ida
, 0, sizeof(struct ida
));
1137 idr_init(&ida
->idr
);
1140 EXPORT_SYMBOL(ida_init
);
This page took 0.074891 seconds and 5 git commands to generate.