2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
38 static struct kmem_cache
*idr_layer_cache
;
40 static struct idr_layer
*get_from_free_list(struct idr
*idp
)
45 spin_lock_irqsave(&idp
->lock
, flags
);
46 if ((p
= idp
->id_free
)) {
47 idp
->id_free
= p
->ary
[0];
51 spin_unlock_irqrestore(&idp
->lock
, flags
);
55 /* only called when idp->lock is held */
56 static void __move_to_free_list(struct idr
*idp
, struct idr_layer
*p
)
58 p
->ary
[0] = idp
->id_free
;
63 static void move_to_free_list(struct idr
*idp
, struct idr_layer
*p
)
68 * Depends on the return element being zeroed.
70 spin_lock_irqsave(&idp
->lock
, flags
);
71 __move_to_free_list(idp
, p
);
72 spin_unlock_irqrestore(&idp
->lock
, flags
);
75 static void idr_mark_full(struct idr_layer
**pa
, int id
)
77 struct idr_layer
*p
= pa
[0];
80 __set_bit(id
& IDR_MASK
, &p
->bitmap
);
82 * If this layer is full mark the bit in the layer above to
83 * show that this part of the radix tree is full. This may
84 * complete the layer above and require walking up the radix
87 while (p
->bitmap
== IDR_FULL
) {
91 __set_bit((id
& IDR_MASK
), &p
->bitmap
);
96 * idr_pre_get - reserver resources for idr allocation
98 * @gfp_mask: memory allocation flags
100 * This function should be called prior to locking and calling the
101 * idr_get_new* functions. It preallocates enough memory to satisfy
102 * the worst possible allocation.
104 * If the system is REALLY out of memory this function returns 0,
107 int idr_pre_get(struct idr
*idp
, gfp_t gfp_mask
)
109 while (idp
->id_free_cnt
< IDR_FREE_MAX
) {
110 struct idr_layer
*new;
111 new = kmem_cache_alloc(idr_layer_cache
, gfp_mask
);
114 move_to_free_list(idp
, new);
118 EXPORT_SYMBOL(idr_pre_get
);
120 static int sub_alloc(struct idr
*idp
, int *starting_id
, struct idr_layer
**pa
)
123 struct idr_layer
*p
, *new;
134 * We run around this while until we reach the leaf node...
136 n
= (id
>> (IDR_BITS
*l
)) & IDR_MASK
;
138 m
= find_next_bit(&bm
, IDR_SIZE
, n
);
140 /* no space available go back to previous layer. */
143 id
= (id
| ((1 << (IDR_BITS
* l
)) - 1)) + 1;
145 /* if already at the top layer, we need to grow */
148 return IDR_NEED_TO_GROW
;
151 /* If we need to go up one layer, continue the
152 * loop; otherwise, restart from the top.
154 sh
= IDR_BITS
* (l
+ 1);
155 if (oid
>> sh
== id
>> sh
)
162 id
= ((id
>> sh
) ^ n
^ m
) << sh
;
164 if ((id
>= MAX_ID_BIT
) || (id
< 0))
165 return IDR_NOMORE_SPACE
;
169 * Create the layer below if it is missing.
172 new = get_from_free_list(idp
);
175 rcu_assign_pointer(p
->ary
[m
], new);
186 static int idr_get_empty_slot(struct idr
*idp
, int starting_id
,
187 struct idr_layer
**pa
)
189 struct idr_layer
*p
, *new;
196 layers
= idp
->layers
;
198 if (!(p
= get_from_free_list(idp
)))
203 * Add a new layer to the top of the tree if the requested
204 * id is larger than the currently allocated space.
206 while ((layers
< (MAX_LEVEL
- 1)) && (id
>= (1 << (layers
*IDR_BITS
)))) {
210 if (!(new = get_from_free_list(idp
))) {
212 * The allocation failed. If we built part of
213 * the structure tear it down.
215 spin_lock_irqsave(&idp
->lock
, flags
);
216 for (new = p
; p
&& p
!= idp
->top
; new = p
) {
219 new->bitmap
= new->count
= 0;
220 __move_to_free_list(idp
, new);
222 spin_unlock_irqrestore(&idp
->lock
, flags
);
227 if (p
->bitmap
== IDR_FULL
)
228 __set_bit(0, &new->bitmap
);
231 rcu_assign_pointer(idp
->top
, p
);
232 idp
->layers
= layers
;
233 v
= sub_alloc(idp
, &id
, pa
);
234 if (v
== IDR_NEED_TO_GROW
)
239 static int idr_get_new_above_int(struct idr
*idp
, void *ptr
, int starting_id
)
241 struct idr_layer
*pa
[MAX_LEVEL
];
244 id
= idr_get_empty_slot(idp
, starting_id
, pa
);
247 * Successfully found an empty slot. Install the user
248 * pointer and mark the slot full.
250 rcu_assign_pointer(pa
[0]->ary
[id
& IDR_MASK
],
251 (struct idr_layer
*)ptr
);
253 idr_mark_full(pa
, id
);
260 * idr_get_new_above - allocate new idr entry above or equal to a start id
262 * @ptr: pointer you want associated with the ide
263 * @start_id: id to start search at
264 * @id: pointer to the allocated handle
266 * This is the allocate id function. It should be called with any
269 * If memory is required, it will return -EAGAIN, you should unlock
270 * and go back to the idr_pre_get() call. If the idr is full, it will
273 * @id returns a value in the range 0 ... 0x7fffffff
275 int idr_get_new_above(struct idr
*idp
, void *ptr
, int starting_id
, int *id
)
279 rv
= idr_get_new_above_int(idp
, ptr
, starting_id
);
281 * This is a cheap hack until the IDR code can be fixed to
282 * return proper error values.
285 return _idr_rc_to_errno(rv
);
289 EXPORT_SYMBOL(idr_get_new_above
);
292 * idr_get_new - allocate new idr entry
294 * @ptr: pointer you want associated with the ide
295 * @id: pointer to the allocated handle
297 * This is the allocate id function. It should be called with any
300 * If memory is required, it will return -EAGAIN, you should unlock
301 * and go back to the idr_pre_get() call. If the idr is full, it will
304 * @id returns a value in the range 0 ... 0x7fffffff
306 int idr_get_new(struct idr
*idp
, void *ptr
, int *id
)
310 rv
= idr_get_new_above_int(idp
, ptr
, 0);
312 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values.
316 return _idr_rc_to_errno(rv
);
320 EXPORT_SYMBOL(idr_get_new
);
322 static void idr_remove_warning(int id
)
325 "idr_remove called for id=%d which is not allocated.\n", id
);
329 static void sub_remove(struct idr
*idp
, int shift
, int id
)
331 struct idr_layer
*p
= idp
->top
;
332 struct idr_layer
**pa
[MAX_LEVEL
];
333 struct idr_layer
***paa
= &pa
[0];
339 while ((shift
> 0) && p
) {
340 n
= (id
>> shift
) & IDR_MASK
;
341 __clear_bit(n
, &p
->bitmap
);
347 if (likely(p
!= NULL
&& test_bit(n
, &p
->bitmap
))){
348 __clear_bit(n
, &p
->bitmap
);
350 while(*paa
&& ! --((**paa
)->count
)){
351 move_to_free_list(idp
, **paa
);
357 idr_remove_warning(id
);
361 * idr_remove - remove the given id and free it's slot
365 void idr_remove(struct idr
*idp
, int id
)
369 /* Mask off upper bits we don't use for the search. */
372 sub_remove(idp
, (idp
->layers
- 1) * IDR_BITS
, id
);
373 if (idp
->top
&& idp
->top
->count
== 1 && (idp
->layers
> 1) &&
374 idp
->top
->ary
[0]) { // We can drop a layer
376 p
= idp
->top
->ary
[0];
377 idp
->top
->bitmap
= idp
->top
->count
= 0;
378 move_to_free_list(idp
, idp
->top
);
382 while (idp
->id_free_cnt
>= IDR_FREE_MAX
) {
383 p
= get_from_free_list(idp
);
384 kmem_cache_free(idr_layer_cache
, p
);
388 EXPORT_SYMBOL(idr_remove
);
391 * idr_remove_all - remove all ids from the given idr tree
394 * idr_destroy() only frees up unused, cached idp_layers, but this
395 * function will remove all id mappings and leave all idp_layers
398 * A typical clean-up sequence for objects stored in an idr tree, will
399 * use idr_for_each() to free all objects, if necessay, then
400 * idr_remove_all() to remove all ids, and idr_destroy() to free
401 * up the cached idr_layers.
403 void idr_remove_all(struct idr
*idp
)
407 struct idr_layer
*pa
[MAX_LEVEL
];
408 struct idr_layer
**paa
= &pa
[0];
410 n
= idp
->layers
* IDR_BITS
;
416 while (n
> IDR_BITS
&& p
) {
419 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
423 while (n
< fls(id
)) {
425 memset(p
, 0, sizeof *p
);
426 move_to_free_list(idp
, p
);
435 EXPORT_SYMBOL(idr_remove_all
);
438 * idr_destroy - release all cached layers within an idr tree
441 void idr_destroy(struct idr
*idp
)
443 while (idp
->id_free_cnt
) {
444 struct idr_layer
*p
= get_from_free_list(idp
);
445 kmem_cache_free(idr_layer_cache
, p
);
448 EXPORT_SYMBOL(idr_destroy
);
451 * idr_find - return pointer for given id
455 * Return the pointer given the id it has been registered with. A %NULL
456 * return indicates that @id is not valid or you passed %NULL in
459 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
461 void *idr_find(struct idr
*idp
, int id
)
466 n
= idp
->layers
* IDR_BITS
;
469 /* Mask off upper bits we don't use for the search. */
477 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
481 EXPORT_SYMBOL(idr_find
);
484 * idr_for_each - iterate through all stored pointers
486 * @fn: function to be called for each pointer
487 * @data: data passed back to callback function
489 * Iterate over the pointers registered with the given idr. The
490 * callback function will be called for each pointer currently
491 * registered, passing the id, the pointer and the data pointer passed
492 * to this function. It is not safe to modify the idr tree while in
493 * the callback, so functions such as idr_get_new and idr_remove are
496 * We check the return of @fn each time. If it returns anything other
497 * than 0, we break out and return that value.
499 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
501 int idr_for_each(struct idr
*idp
,
502 int (*fn
)(int id
, void *p
, void *data
), void *data
)
504 int n
, id
, max
, error
= 0;
506 struct idr_layer
*pa
[MAX_LEVEL
];
507 struct idr_layer
**paa
= &pa
[0];
509 n
= idp
->layers
* IDR_BITS
;
518 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
522 error
= fn(id
, (void *)p
, data
);
528 while (n
< fls(id
)) {
536 EXPORT_SYMBOL(idr_for_each
);
539 * idr_replace - replace pointer for given id
541 * @ptr: pointer you want associated with the id
544 * Replace the pointer registered with an id and return the old value.
545 * A -ENOENT return indicates that @id was not found.
546 * A -EINVAL return indicates that @id was not within valid constraints.
548 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
550 void *idr_replace(struct idr
*idp
, void *ptr
, int id
)
553 struct idr_layer
*p
, *old_p
;
555 n
= idp
->layers
* IDR_BITS
;
561 return ERR_PTR(-EINVAL
);
564 while ((n
> 0) && p
) {
565 p
= p
->ary
[(id
>> n
) & IDR_MASK
];
570 if (unlikely(p
== NULL
|| !test_bit(n
, &p
->bitmap
)))
571 return ERR_PTR(-ENOENT
);
578 EXPORT_SYMBOL(idr_replace
);
580 static void idr_cache_ctor(struct kmem_cache
*idr_layer_cache
, void *idr_layer
)
582 memset(idr_layer
, 0, sizeof(struct idr_layer
));
585 void __init
idr_init_cache(void)
587 idr_layer_cache
= kmem_cache_create("idr_layer_cache",
588 sizeof(struct idr_layer
), 0, SLAB_PANIC
,
593 * idr_init - initialize idr handle
596 * This function is use to set up the handle (@idp) that you will pass
597 * to the rest of the functions.
599 void idr_init(struct idr
*idp
)
601 memset(idp
, 0, sizeof(struct idr
));
602 spin_lock_init(&idp
->lock
);
604 EXPORT_SYMBOL(idr_init
);
608 * IDA - IDR based ID allocator
610 * this is id allocator without id -> pointer translation. Memory
611 * usage is much lower than full blown idr because each id only
612 * occupies a bit. ida uses a custom leaf node which contains
613 * IDA_BITMAP_BITS slots.
615 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
618 static void free_bitmap(struct ida
*ida
, struct ida_bitmap
*bitmap
)
622 if (!ida
->free_bitmap
) {
623 spin_lock_irqsave(&ida
->idr
.lock
, flags
);
624 if (!ida
->free_bitmap
) {
625 ida
->free_bitmap
= bitmap
;
628 spin_unlock_irqrestore(&ida
->idr
.lock
, flags
);
635 * ida_pre_get - reserve resources for ida allocation
637 * @gfp_mask: memory allocation flag
639 * This function should be called prior to locking and calling the
640 * following function. It preallocates enough memory to satisfy the
641 * worst possible allocation.
643 * If the system is REALLY out of memory this function returns 0,
646 int ida_pre_get(struct ida
*ida
, gfp_t gfp_mask
)
648 /* allocate idr_layers */
649 if (!idr_pre_get(&ida
->idr
, gfp_mask
))
652 /* allocate free_bitmap */
653 if (!ida
->free_bitmap
) {
654 struct ida_bitmap
*bitmap
;
656 bitmap
= kmalloc(sizeof(struct ida_bitmap
), gfp_mask
);
660 free_bitmap(ida
, bitmap
);
665 EXPORT_SYMBOL(ida_pre_get
);
668 * ida_get_new_above - allocate new ID above or equal to a start id
670 * @staring_id: id to start search at
671 * @p_id: pointer to the allocated handle
673 * Allocate new ID above or equal to @ida. It should be called with
674 * any required locks.
676 * If memory is required, it will return -EAGAIN, you should unlock
677 * and go back to the ida_pre_get() call. If the ida is full, it will
680 * @p_id returns a value in the range 0 ... 0x7fffffff.
682 int ida_get_new_above(struct ida
*ida
, int starting_id
, int *p_id
)
684 struct idr_layer
*pa
[MAX_LEVEL
];
685 struct ida_bitmap
*bitmap
;
687 int idr_id
= starting_id
/ IDA_BITMAP_BITS
;
688 int offset
= starting_id
% IDA_BITMAP_BITS
;
692 /* get vacant slot */
693 t
= idr_get_empty_slot(&ida
->idr
, idr_id
, pa
);
695 return _idr_rc_to_errno(t
);
697 if (t
* IDA_BITMAP_BITS
>= MAX_ID_BIT
)
704 /* if bitmap isn't there, create a new one */
705 bitmap
= (void *)pa
[0]->ary
[idr_id
& IDR_MASK
];
707 spin_lock_irqsave(&ida
->idr
.lock
, flags
);
708 bitmap
= ida
->free_bitmap
;
709 ida
->free_bitmap
= NULL
;
710 spin_unlock_irqrestore(&ida
->idr
.lock
, flags
);
715 memset(bitmap
, 0, sizeof(struct ida_bitmap
));
716 rcu_assign_pointer(pa
[0]->ary
[idr_id
& IDR_MASK
],
721 /* lookup for empty slot */
722 t
= find_next_zero_bit(bitmap
->bitmap
, IDA_BITMAP_BITS
, offset
);
723 if (t
== IDA_BITMAP_BITS
) {
724 /* no empty slot after offset, continue to the next chunk */
730 id
= idr_id
* IDA_BITMAP_BITS
+ t
;
731 if (id
>= MAX_ID_BIT
)
734 __set_bit(t
, bitmap
->bitmap
);
735 if (++bitmap
->nr_busy
== IDA_BITMAP_BITS
)
736 idr_mark_full(pa
, idr_id
);
740 /* Each leaf node can handle nearly a thousand slots and the
741 * whole idea of ida is to have small memory foot print.
742 * Throw away extra resources one by one after each successful
745 if (ida
->idr
.id_free_cnt
|| ida
->free_bitmap
) {
746 struct idr_layer
*p
= get_from_free_list(&ida
->idr
);
748 kmem_cache_free(idr_layer_cache
, p
);
753 EXPORT_SYMBOL(ida_get_new_above
);
756 * ida_get_new - allocate new ID
758 * @p_id: pointer to the allocated handle
760 * Allocate new ID. It should be called with any required locks.
762 * If memory is required, it will return -EAGAIN, you should unlock
763 * and go back to the idr_pre_get() call. If the idr is full, it will
766 * @id returns a value in the range 0 ... 0x7fffffff.
768 int ida_get_new(struct ida
*ida
, int *p_id
)
770 return ida_get_new_above(ida
, 0, p_id
);
772 EXPORT_SYMBOL(ida_get_new
);
775 * ida_remove - remove the given ID
779 void ida_remove(struct ida
*ida
, int id
)
781 struct idr_layer
*p
= ida
->idr
.top
;
782 int shift
= (ida
->idr
.layers
- 1) * IDR_BITS
;
783 int idr_id
= id
/ IDA_BITMAP_BITS
;
784 int offset
= id
% IDA_BITMAP_BITS
;
786 struct ida_bitmap
*bitmap
;
788 /* clear full bits while looking up the leaf idr_layer */
789 while ((shift
> 0) && p
) {
790 n
= (idr_id
>> shift
) & IDR_MASK
;
791 __clear_bit(n
, &p
->bitmap
);
799 n
= idr_id
& IDR_MASK
;
800 __clear_bit(n
, &p
->bitmap
);
802 bitmap
= (void *)p
->ary
[n
];
803 if (!test_bit(offset
, bitmap
->bitmap
))
806 /* update bitmap and remove it if empty */
807 __clear_bit(offset
, bitmap
->bitmap
);
808 if (--bitmap
->nr_busy
== 0) {
809 __set_bit(n
, &p
->bitmap
); /* to please idr_remove() */
810 idr_remove(&ida
->idr
, idr_id
);
811 free_bitmap(ida
, bitmap
);
818 "ida_remove called for id=%d which is not allocated.\n", id
);
820 EXPORT_SYMBOL(ida_remove
);
823 * ida_destroy - release all cached layers within an ida tree
826 void ida_destroy(struct ida
*ida
)
828 idr_destroy(&ida
->idr
);
829 kfree(ida
->free_bitmap
);
831 EXPORT_SYMBOL(ida_destroy
);
834 * ida_init - initialize ida handle
837 * This function is use to set up the handle (@ida) that you will pass
838 * to the rest of the functions.
840 void ida_init(struct ida
*ida
)
842 memset(ida
, 0, sizeof(struct ida
));
846 EXPORT_SYMBOL(ida_init
);