1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
49 #define MM_UNUSED_TARGET 4
51 static struct drm_mm_node
*drm_mm_kmalloc(struct drm_mm
*mm
, int atomic
)
53 struct drm_mm_node
*child
;
56 child
= kmalloc(sizeof(*child
), GFP_ATOMIC
);
58 child
= kmalloc(sizeof(*child
), GFP_KERNEL
);
60 if (unlikely(child
== NULL
)) {
61 spin_lock(&mm
->unused_lock
);
62 if (list_empty(&mm
->unused_nodes
))
66 list_entry(mm
->unused_nodes
.next
,
67 struct drm_mm_node
, free_stack
);
68 list_del(&child
->free_stack
);
71 spin_unlock(&mm
->unused_lock
);
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
79 * Returns 0 on success or -ENOMEM if allocation fails.
81 int drm_mm_pre_get(struct drm_mm
*mm
)
83 struct drm_mm_node
*node
;
85 spin_lock(&mm
->unused_lock
);
86 while (mm
->num_unused
< MM_UNUSED_TARGET
) {
87 spin_unlock(&mm
->unused_lock
);
88 node
= kmalloc(sizeof(*node
), GFP_KERNEL
);
89 spin_lock(&mm
->unused_lock
);
91 if (unlikely(node
== NULL
)) {
92 int ret
= (mm
->num_unused
< 2) ? -ENOMEM
: 0;
93 spin_unlock(&mm
->unused_lock
);
97 list_add_tail(&node
->free_stack
, &mm
->unused_nodes
);
99 spin_unlock(&mm
->unused_lock
);
102 EXPORT_SYMBOL(drm_mm_pre_get
);
104 static int drm_mm_create_tail_node(struct drm_mm
*mm
,
106 unsigned long size
, int atomic
)
108 struct drm_mm_node
*child
;
110 child
= drm_mm_kmalloc(mm
, atomic
);
111 if (unlikely(child
== NULL
))
116 child
->start
= start
;
119 list_add_tail(&child
->node_list
, &mm
->node_list
);
120 list_add_tail(&child
->free_stack
, &mm
->free_stack
);
125 static struct drm_mm_node
*drm_mm_split_at_start(struct drm_mm_node
*parent
,
129 struct drm_mm_node
*child
;
131 child
= drm_mm_kmalloc(parent
->mm
, atomic
);
132 if (unlikely(child
== NULL
))
135 INIT_LIST_HEAD(&child
->free_stack
);
139 child
->start
= parent
->start
;
140 child
->mm
= parent
->mm
;
142 list_add_tail(&child
->node_list
, &parent
->node_list
);
143 INIT_LIST_HEAD(&child
->free_stack
);
145 parent
->size
-= size
;
146 parent
->start
+= size
;
151 struct drm_mm_node
*drm_mm_get_block_generic(struct drm_mm_node
*node
,
157 struct drm_mm_node
*align_splitoff
= NULL
;
161 tmp
= node
->start
% alignment
;
165 drm_mm_split_at_start(node
, alignment
- tmp
, atomic
);
166 if (unlikely(align_splitoff
== NULL
))
170 if (node
->size
== size
) {
171 list_del_init(&node
->free_stack
);
174 node
= drm_mm_split_at_start(node
, size
, atomic
);
178 drm_mm_put_block(align_splitoff
);
182 EXPORT_SYMBOL(drm_mm_get_block_generic
);
184 struct drm_mm_node
*drm_mm_get_block_range_generic(struct drm_mm_node
*node
,
191 struct drm_mm_node
*align_splitoff
= NULL
;
195 if (node
->start
< start
)
196 wasted
+= start
- node
->start
;
198 tmp
= ((node
->start
+ wasted
) % alignment
);
201 wasted
+= alignment
- tmp
;
203 align_splitoff
= drm_mm_split_at_start(node
, wasted
, atomic
);
204 if (unlikely(align_splitoff
== NULL
))
208 if (node
->size
== size
) {
209 list_del_init(&node
->free_stack
);
212 node
= drm_mm_split_at_start(node
, size
, atomic
);
216 drm_mm_put_block(align_splitoff
);
220 EXPORT_SYMBOL(drm_mm_get_block_range_generic
);
223 * Put a block. Merge with the previous and / or next block if they are free.
224 * Otherwise add to the free stack.
227 void drm_mm_put_block(struct drm_mm_node
*cur
)
230 struct drm_mm
*mm
= cur
->mm
;
231 struct list_head
*cur_head
= &cur
->node_list
;
232 struct list_head
*root_head
= &mm
->node_list
;
233 struct drm_mm_node
*prev_node
= NULL
;
234 struct drm_mm_node
*next_node
;
238 if (cur_head
->prev
!= root_head
) {
240 list_entry(cur_head
->prev
, struct drm_mm_node
, node_list
);
241 if (prev_node
->free
) {
242 prev_node
->size
+= cur
->size
;
246 if (cur_head
->next
!= root_head
) {
248 list_entry(cur_head
->next
, struct drm_mm_node
, node_list
);
249 if (next_node
->free
) {
251 prev_node
->size
+= next_node
->size
;
252 list_del(&next_node
->node_list
);
253 list_del(&next_node
->free_stack
);
254 spin_lock(&mm
->unused_lock
);
255 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
256 list_add(&next_node
->free_stack
,
261 spin_unlock(&mm
->unused_lock
);
263 next_node
->size
+= cur
->size
;
264 next_node
->start
= cur
->start
;
271 list_add(&cur
->free_stack
, &mm
->free_stack
);
273 list_del(&cur
->node_list
);
274 spin_lock(&mm
->unused_lock
);
275 if (mm
->num_unused
< MM_UNUSED_TARGET
) {
276 list_add(&cur
->free_stack
, &mm
->unused_nodes
);
280 spin_unlock(&mm
->unused_lock
);
284 EXPORT_SYMBOL(drm_mm_put_block
);
286 struct drm_mm_node
*drm_mm_search_free(const struct drm_mm
*mm
,
288 unsigned alignment
, int best_match
)
290 struct drm_mm_node
*entry
;
291 struct drm_mm_node
*best
;
292 unsigned long best_size
;
298 list_for_each_entry(entry
, &mm
->free_stack
, free_stack
) {
301 if (entry
->size
< size
)
305 register unsigned tmp
= entry
->start
% alignment
;
307 wasted
+= alignment
- tmp
;
310 if (entry
->size
>= size
+ wasted
) {
313 if (entry
->size
< best_size
) {
315 best_size
= entry
->size
;
322 EXPORT_SYMBOL(drm_mm_search_free
);
324 struct drm_mm_node
*drm_mm_search_free_in_range(const struct drm_mm
*mm
,
331 struct drm_mm_node
*entry
;
332 struct drm_mm_node
*best
;
333 unsigned long best_size
;
339 list_for_each_entry(entry
, &mm
->free_stack
, free_stack
) {
342 if (entry
->size
< size
)
345 if (entry
->start
> end
|| (entry
->start
+entry
->size
) < start
)
348 if (entry
->start
< start
)
349 wasted
+= start
- entry
->start
;
352 register unsigned tmp
= (entry
->start
+ wasted
) % alignment
;
354 wasted
+= alignment
- tmp
;
357 if (entry
->size
>= size
+ wasted
&&
358 (entry
->start
+ wasted
+ size
) <= end
) {
361 if (entry
->size
< best_size
) {
363 best_size
= entry
->size
;
370 EXPORT_SYMBOL(drm_mm_search_free_in_range
);
372 int drm_mm_clean(struct drm_mm
* mm
)
374 struct list_head
*head
= &mm
->node_list
;
376 return (head
->next
->next
== head
);
378 EXPORT_SYMBOL(drm_mm_clean
);
380 int drm_mm_init(struct drm_mm
* mm
, unsigned long start
, unsigned long size
)
382 INIT_LIST_HEAD(&mm
->node_list
);
383 INIT_LIST_HEAD(&mm
->free_stack
);
384 INIT_LIST_HEAD(&mm
->unused_nodes
);
386 spin_lock_init(&mm
->unused_lock
);
388 return drm_mm_create_tail_node(mm
, start
, size
, 0);
390 EXPORT_SYMBOL(drm_mm_init
);
392 void drm_mm_takedown(struct drm_mm
* mm
)
394 struct list_head
*bnode
= mm
->free_stack
.next
;
395 struct drm_mm_node
*entry
;
396 struct drm_mm_node
*next
;
398 entry
= list_entry(bnode
, struct drm_mm_node
, free_stack
);
400 if (entry
->node_list
.next
!= &mm
->node_list
||
401 entry
->free_stack
.next
!= &mm
->free_stack
) {
402 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
406 list_del(&entry
->free_stack
);
407 list_del(&entry
->node_list
);
410 spin_lock(&mm
->unused_lock
);
411 list_for_each_entry_safe(entry
, next
, &mm
->unused_nodes
, free_stack
) {
412 list_del(&entry
->free_stack
);
416 spin_unlock(&mm
->unused_lock
);
418 BUG_ON(mm
->num_unused
!= 0);
420 EXPORT_SYMBOL(drm_mm_takedown
);
422 void drm_mm_debug_table(struct drm_mm
*mm
, const char *prefix
)
424 struct drm_mm_node
*entry
;
425 int total_used
= 0, total_free
= 0, total
= 0;
427 list_for_each_entry(entry
, &mm
->node_list
, node_list
) {
428 printk(KERN_DEBUG
"%s 0x%08lx-0x%08lx: %8ld: %s\n",
429 prefix
, entry
->start
, entry
->start
+ entry
->size
,
430 entry
->size
, entry
->free
? "free" : "used");
431 total
+= entry
->size
;
433 total_free
+= entry
->size
;
435 total_used
+= entry
->size
;
437 printk(KERN_DEBUG
"%s total: %d, used %d free %d\n", prefix
, total
,
438 total_used
, total_free
);
440 EXPORT_SYMBOL(drm_mm_debug_table
);
442 #if defined(CONFIG_DEBUG_FS)
443 int drm_mm_dump_table(struct seq_file
*m
, struct drm_mm
*mm
)
445 struct drm_mm_node
*entry
;
446 int total_used
= 0, total_free
= 0, total
= 0;
448 list_for_each_entry(entry
, &mm
->node_list
, node_list
) {
449 seq_printf(m
, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry
->start
, entry
->start
+ entry
->size
, entry
->size
, entry
->free
? "free" : "used");
450 total
+= entry
->size
;
452 total_free
+= entry
->size
;
454 total_used
+= entry
->size
;
456 seq_printf(m
, "total: %d, used %d free %d\n", total
, total_used
, total_free
);
459 EXPORT_SYMBOL(drm_mm_dump_table
);