Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
e15ae2dd | 9 | * Small id to pointer translation service. |
1da177e4 | 10 | * |
e15ae2dd | 11 | * It uses a radix tree like structure as a sparse array indexed |
1da177e4 | 12 | * by the id to obtain the pointer. The bitmap makes allocating |
e15ae2dd | 13 | * a new id quick. |
1da177e4 LT |
14 | * |
15 | * You call it to allocate an id (an int) an associate with that id a | |
16 | * pointer or what ever, we treat it as a (void *). You can pass this | |
17 | * id to a user for him to pass back at a later time. You then pass | |
18 | * that id to this code and it returns your pointer. | |
19 | ||
e15ae2dd | 20 | * You can release ids at any time. When all ids are released, most of |
1da177e4 | 21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
e15ae2dd | 22 | * don't need to go to the memory "store" during an id allocate, just |
1da177e4 LT |
23 | * so you don't need to be too concerned about locking and conflicts |
24 | * with the slab allocator. | |
25 | */ | |
26 | ||
27 | #ifndef TEST // to test in user space... | |
28 | #include <linux/slab.h> | |
29 | #include <linux/init.h> | |
30 | #include <linux/module.h> | |
31 | #endif | |
5806f07c | 32 | #include <linux/err.h> |
1da177e4 LT |
33 | #include <linux/string.h> |
34 | #include <linux/idr.h> | |
35 | ||
36 | static kmem_cache_t *idr_layer_cache; | |
37 | ||
38 | static struct idr_layer *alloc_layer(struct idr *idp) | |
39 | { | |
40 | struct idr_layer *p; | |
41 | ||
42 | spin_lock(&idp->lock); | |
43 | if ((p = idp->id_free)) { | |
44 | idp->id_free = p->ary[0]; | |
45 | idp->id_free_cnt--; | |
46 | p->ary[0] = NULL; | |
47 | } | |
48 | spin_unlock(&idp->lock); | |
49 | return(p); | |
50 | } | |
51 | ||
1eec0056 SR |
52 | /* only called when idp->lock is held */ |
53 | static void __free_layer(struct idr *idp, struct idr_layer *p) | |
54 | { | |
55 | p->ary[0] = idp->id_free; | |
56 | idp->id_free = p; | |
57 | idp->id_free_cnt++; | |
58 | } | |
59 | ||
1da177e4 LT |
60 | static void free_layer(struct idr *idp, struct idr_layer *p) |
61 | { | |
62 | /* | |
63 | * Depends on the return element being zeroed. | |
64 | */ | |
65 | spin_lock(&idp->lock); | |
1eec0056 | 66 | __free_layer(idp, p); |
1da177e4 LT |
67 | spin_unlock(&idp->lock); |
68 | } | |
69 | ||
70 | /** | |
71 | * idr_pre_get - reserver resources for idr allocation | |
72 | * @idp: idr handle | |
73 | * @gfp_mask: memory allocation flags | |
74 | * | |
75 | * This function should be called prior to locking and calling the | |
76 | * following function. It preallocates enough memory to satisfy | |
77 | * the worst possible allocation. | |
78 | * | |
79 | * If the system is REALLY out of memory this function returns 0, | |
80 | * otherwise 1. | |
81 | */ | |
fd4f2df2 | 82 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
1da177e4 LT |
83 | { |
84 | while (idp->id_free_cnt < IDR_FREE_MAX) { | |
85 | struct idr_layer *new; | |
86 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | |
e15ae2dd | 87 | if (new == NULL) |
1da177e4 LT |
88 | return (0); |
89 | free_layer(idp, new); | |
90 | } | |
91 | return 1; | |
92 | } | |
93 | EXPORT_SYMBOL(idr_pre_get); | |
94 | ||
95 | static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | |
96 | { | |
97 | int n, m, sh; | |
98 | struct idr_layer *p, *new; | |
99 | struct idr_layer *pa[MAX_LEVEL]; | |
100 | int l, id; | |
101 | long bm; | |
102 | ||
103 | id = *starting_id; | |
104 | p = idp->top; | |
105 | l = idp->layers; | |
106 | pa[l--] = NULL; | |
107 | while (1) { | |
108 | /* | |
109 | * We run around this while until we reach the leaf node... | |
110 | */ | |
111 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
112 | bm = ~p->bitmap; | |
113 | m = find_next_bit(&bm, IDR_SIZE, n); | |
114 | if (m == IDR_SIZE) { | |
115 | /* no space available go back to previous layer. */ | |
116 | l++; | |
e15ae2dd | 117 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
1da177e4 LT |
118 | if (!(p = pa[l])) { |
119 | *starting_id = id; | |
120 | return -2; | |
121 | } | |
122 | continue; | |
123 | } | |
124 | if (m != n) { | |
125 | sh = IDR_BITS*l; | |
126 | id = ((id >> sh) ^ n ^ m) << sh; | |
127 | } | |
128 | if ((id >= MAX_ID_BIT) || (id < 0)) | |
129 | return -3; | |
130 | if (l == 0) | |
131 | break; | |
132 | /* | |
133 | * Create the layer below if it is missing. | |
134 | */ | |
135 | if (!p->ary[m]) { | |
136 | if (!(new = alloc_layer(idp))) | |
137 | return -1; | |
138 | p->ary[m] = new; | |
139 | p->count++; | |
140 | } | |
141 | pa[l--] = p; | |
142 | p = p->ary[m]; | |
143 | } | |
144 | /* | |
145 | * We have reached the leaf node, plant the | |
146 | * users pointer and return the raw id. | |
147 | */ | |
148 | p->ary[m] = (struct idr_layer *)ptr; | |
149 | __set_bit(m, &p->bitmap); | |
150 | p->count++; | |
151 | /* | |
152 | * If this layer is full mark the bit in the layer above | |
153 | * to show that this part of the radix tree is full. | |
154 | * This may complete the layer above and require walking | |
155 | * up the radix tree. | |
156 | */ | |
157 | n = id; | |
158 | while (p->bitmap == IDR_FULL) { | |
159 | if (!(p = pa[++l])) | |
160 | break; | |
161 | n = n >> IDR_BITS; | |
162 | __set_bit((n & IDR_MASK), &p->bitmap); | |
163 | } | |
164 | return(id); | |
165 | } | |
166 | ||
167 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |
168 | { | |
169 | struct idr_layer *p, *new; | |
170 | int layers, v, id; | |
e15ae2dd | 171 | |
1da177e4 LT |
172 | id = starting_id; |
173 | build_up: | |
174 | p = idp->top; | |
175 | layers = idp->layers; | |
176 | if (unlikely(!p)) { | |
177 | if (!(p = alloc_layer(idp))) | |
178 | return -1; | |
179 | layers = 1; | |
180 | } | |
181 | /* | |
182 | * Add a new layer to the top of the tree if the requested | |
183 | * id is larger than the currently allocated space. | |
184 | */ | |
589777ea | 185 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
1da177e4 LT |
186 | layers++; |
187 | if (!p->count) | |
188 | continue; | |
189 | if (!(new = alloc_layer(idp))) { | |
190 | /* | |
191 | * The allocation failed. If we built part of | |
192 | * the structure tear it down. | |
193 | */ | |
1eec0056 | 194 | spin_lock(&idp->lock); |
1da177e4 LT |
195 | for (new = p; p && p != idp->top; new = p) { |
196 | p = p->ary[0]; | |
197 | new->ary[0] = NULL; | |
198 | new->bitmap = new->count = 0; | |
1eec0056 | 199 | __free_layer(idp, new); |
1da177e4 | 200 | } |
1eec0056 | 201 | spin_unlock(&idp->lock); |
1da177e4 LT |
202 | return -1; |
203 | } | |
204 | new->ary[0] = p; | |
205 | new->count = 1; | |
206 | if (p->bitmap == IDR_FULL) | |
207 | __set_bit(0, &new->bitmap); | |
208 | p = new; | |
209 | } | |
210 | idp->top = p; | |
211 | idp->layers = layers; | |
212 | v = sub_alloc(idp, ptr, &id); | |
213 | if (v == -2) | |
214 | goto build_up; | |
215 | return(v); | |
216 | } | |
217 | ||
218 | /** | |
7c657f2f | 219 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
1da177e4 LT |
220 | * @idp: idr handle |
221 | * @ptr: pointer you want associated with the ide | |
222 | * @start_id: id to start search at | |
223 | * @id: pointer to the allocated handle | |
224 | * | |
225 | * This is the allocate id function. It should be called with any | |
226 | * required locks. | |
227 | * | |
228 | * If memory is required, it will return -EAGAIN, you should unlock | |
229 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
230 | * return -ENOSPC. | |
231 | * | |
232 | * @id returns a value in the range 0 ... 0x7fffffff | |
233 | */ | |
234 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
235 | { | |
236 | int rv; | |
e15ae2dd | 237 | |
1da177e4 LT |
238 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
239 | /* | |
240 | * This is a cheap hack until the IDR code can be fixed to | |
241 | * return proper error values. | |
242 | */ | |
243 | if (rv < 0) { | |
244 | if (rv == -1) | |
245 | return -EAGAIN; | |
246 | else /* Will be -3 */ | |
247 | return -ENOSPC; | |
248 | } | |
249 | *id = rv; | |
250 | return 0; | |
251 | } | |
252 | EXPORT_SYMBOL(idr_get_new_above); | |
253 | ||
254 | /** | |
255 | * idr_get_new - allocate new idr entry | |
256 | * @idp: idr handle | |
257 | * @ptr: pointer you want associated with the ide | |
258 | * @id: pointer to the allocated handle | |
259 | * | |
260 | * This is the allocate id function. It should be called with any | |
261 | * required locks. | |
262 | * | |
263 | * If memory is required, it will return -EAGAIN, you should unlock | |
264 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
265 | * return -ENOSPC. | |
266 | * | |
267 | * @id returns a value in the range 0 ... 0x7fffffff | |
268 | */ | |
269 | int idr_get_new(struct idr *idp, void *ptr, int *id) | |
270 | { | |
271 | int rv; | |
e15ae2dd | 272 | |
1da177e4 LT |
273 | rv = idr_get_new_above_int(idp, ptr, 0); |
274 | /* | |
275 | * This is a cheap hack until the IDR code can be fixed to | |
276 | * return proper error values. | |
277 | */ | |
278 | if (rv < 0) { | |
279 | if (rv == -1) | |
280 | return -EAGAIN; | |
281 | else /* Will be -3 */ | |
282 | return -ENOSPC; | |
283 | } | |
284 | *id = rv; | |
285 | return 0; | |
286 | } | |
287 | EXPORT_SYMBOL(idr_get_new); | |
288 | ||
289 | static void idr_remove_warning(int id) | |
290 | { | |
291 | printk("idr_remove called for id=%d which is not allocated.\n", id); | |
292 | dump_stack(); | |
293 | } | |
294 | ||
295 | static void sub_remove(struct idr *idp, int shift, int id) | |
296 | { | |
297 | struct idr_layer *p = idp->top; | |
298 | struct idr_layer **pa[MAX_LEVEL]; | |
299 | struct idr_layer ***paa = &pa[0]; | |
300 | int n; | |
301 | ||
302 | *paa = NULL; | |
303 | *++paa = &idp->top; | |
304 | ||
305 | while ((shift > 0) && p) { | |
306 | n = (id >> shift) & IDR_MASK; | |
307 | __clear_bit(n, &p->bitmap); | |
308 | *++paa = &p->ary[n]; | |
309 | p = p->ary[n]; | |
310 | shift -= IDR_BITS; | |
311 | } | |
312 | n = id & IDR_MASK; | |
313 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | |
314 | __clear_bit(n, &p->bitmap); | |
315 | p->ary[n] = NULL; | |
316 | while(*paa && ! --((**paa)->count)){ | |
317 | free_layer(idp, **paa); | |
318 | **paa-- = NULL; | |
319 | } | |
e15ae2dd | 320 | if (!*paa) |
1da177e4 | 321 | idp->layers = 0; |
e15ae2dd | 322 | } else |
1da177e4 | 323 | idr_remove_warning(id); |
1da177e4 LT |
324 | } |
325 | ||
326 | /** | |
327 | * idr_remove - remove the given id and free it's slot | |
328 | * idp: idr handle | |
329 | * id: uniqueue key | |
330 | */ | |
331 | void idr_remove(struct idr *idp, int id) | |
332 | { | |
333 | struct idr_layer *p; | |
334 | ||
335 | /* Mask off upper bits we don't use for the search. */ | |
336 | id &= MAX_ID_MASK; | |
337 | ||
338 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
e15ae2dd JJ |
339 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
340 | idp->top->ary[0]) { // We can drop a layer | |
1da177e4 LT |
341 | |
342 | p = idp->top->ary[0]; | |
343 | idp->top->bitmap = idp->top->count = 0; | |
344 | free_layer(idp, idp->top); | |
345 | idp->top = p; | |
346 | --idp->layers; | |
347 | } | |
348 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | |
1da177e4 LT |
349 | p = alloc_layer(idp); |
350 | kmem_cache_free(idr_layer_cache, p); | |
351 | return; | |
352 | } | |
353 | } | |
354 | EXPORT_SYMBOL(idr_remove); | |
355 | ||
8d3b3591 AM |
356 | /** |
357 | * idr_destroy - release all cached layers within an idr tree | |
358 | * idp: idr handle | |
359 | */ | |
360 | void idr_destroy(struct idr *idp) | |
361 | { | |
362 | while (idp->id_free_cnt) { | |
363 | struct idr_layer *p = alloc_layer(idp); | |
364 | kmem_cache_free(idr_layer_cache, p); | |
365 | } | |
366 | } | |
367 | EXPORT_SYMBOL(idr_destroy); | |
368 | ||
1da177e4 LT |
369 | /** |
370 | * idr_find - return pointer for given id | |
371 | * @idp: idr handle | |
372 | * @id: lookup key | |
373 | * | |
374 | * Return the pointer given the id it has been registered with. A %NULL | |
375 | * return indicates that @id is not valid or you passed %NULL in | |
376 | * idr_get_new(). | |
377 | * | |
378 | * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). | |
379 | */ | |
380 | void *idr_find(struct idr *idp, int id) | |
381 | { | |
382 | int n; | |
383 | struct idr_layer *p; | |
384 | ||
385 | n = idp->layers * IDR_BITS; | |
386 | p = idp->top; | |
387 | ||
388 | /* Mask off upper bits we don't use for the search. */ | |
389 | id &= MAX_ID_MASK; | |
390 | ||
391 | if (id >= (1 << n)) | |
392 | return NULL; | |
393 | ||
394 | while (n > 0 && p) { | |
395 | n -= IDR_BITS; | |
396 | p = p->ary[(id >> n) & IDR_MASK]; | |
397 | } | |
398 | return((void *)p); | |
399 | } | |
400 | EXPORT_SYMBOL(idr_find); | |
401 | ||
5806f07c JM |
402 | /** |
403 | * idr_replace - replace pointer for given id | |
404 | * @idp: idr handle | |
405 | * @ptr: pointer you want associated with the id | |
406 | * @id: lookup key | |
407 | * | |
408 | * Replace the pointer registered with an id and return the old value. | |
409 | * A -ENOENT return indicates that @id was not found. | |
410 | * A -EINVAL return indicates that @id was not within valid constraints. | |
411 | * | |
412 | * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). | |
413 | */ | |
414 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
415 | { | |
416 | int n; | |
417 | struct idr_layer *p, *old_p; | |
418 | ||
419 | n = idp->layers * IDR_BITS; | |
420 | p = idp->top; | |
421 | ||
422 | id &= MAX_ID_MASK; | |
423 | ||
424 | if (id >= (1 << n)) | |
425 | return ERR_PTR(-EINVAL); | |
426 | ||
427 | n -= IDR_BITS; | |
428 | while ((n > 0) && p) { | |
429 | p = p->ary[(id >> n) & IDR_MASK]; | |
430 | n -= IDR_BITS; | |
431 | } | |
432 | ||
433 | n = id & IDR_MASK; | |
434 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | |
435 | return ERR_PTR(-ENOENT); | |
436 | ||
437 | old_p = p->ary[n]; | |
438 | p->ary[n] = ptr; | |
439 | ||
440 | return old_p; | |
441 | } | |
442 | EXPORT_SYMBOL(idr_replace); | |
443 | ||
e15ae2dd JJ |
444 | static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache, |
445 | unsigned long flags) | |
1da177e4 LT |
446 | { |
447 | memset(idr_layer, 0, sizeof(struct idr_layer)); | |
448 | } | |
449 | ||
450 | static int init_id_cache(void) | |
451 | { | |
452 | if (!idr_layer_cache) | |
e15ae2dd | 453 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
1da177e4 LT |
454 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); |
455 | return 0; | |
456 | } | |
457 | ||
458 | /** | |
459 | * idr_init - initialize idr handle | |
460 | * @idp: idr handle | |
461 | * | |
462 | * This function is use to set up the handle (@idp) that you will pass | |
463 | * to the rest of the functions. | |
464 | */ | |
465 | void idr_init(struct idr *idp) | |
466 | { | |
467 | init_id_cache(); | |
468 | memset(idp, 0, sizeof(struct idr)); | |
469 | spin_lock_init(&idp->lock); | |
470 | } | |
471 | EXPORT_SYMBOL(idr_init); |