Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / drm_mm.c
CommitLineData
3a1bd924
TH
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
96de0e25 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3a1bd924
TH
42 */
43
760285e7
DH
44#include <drm/drmP.h>
45#include <drm/drm_mm.h>
1d58420b 46#include <linux/slab.h>
fa8a1238 47#include <linux/seq_file.h>
2d1a8a48 48#include <linux/export.h>
1d58420b 49
93110be6
DV
50/**
51 * DOC: Overview
52 *
53 * drm_mm provides a simple range allocator. The drivers are free to use the
54 * resource allocator from the linux core if it suits them, the upside of drm_mm
55 * is that it's in the DRM core. Which means that it's easier to extend for
56 * some of the crazier special purpose needs of gpus.
57 *
58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
59 * Drivers are free to embed either of them into their own suitable
60 * datastructures. drm_mm itself will not do any allocations of its own, so if
61 * drivers choose not to embed nodes they need to still allocate them
62 * themselves.
63 *
64 * The range allocator also supports reservation of preallocated blocks. This is
65 * useful for taking over initial mode setting configurations from the firmware,
66 * where an object needs to be created which exactly matches the firmware's
67 * scanout target. As long as the range is still free it can be inserted anytime
68 * after the allocator is initialized, which helps with avoiding looped
69 * depencies in the driver load sequence.
70 *
71 * drm_mm maintains a stack of most recently freed holes, which of all
72 * simplistic datastructures seems to be a fairly decent approach to clustering
73 * allocations and avoiding too much fragmentation. This means free space
74 * searches are O(num_holes). Given that all the fancy features drm_mm supports
75 * something better would be fairly complex and since gfx thrashing is a fairly
76 * steep cliff not a real concern. Removing a node again is O(1).
77 *
78 * drm_mm supports a few features: Alignment and range restrictions can be
79 * supplied. Further more every &drm_mm_node has a color value (which is just an
80 * opaqua unsigned long) which in conjunction with a driver callback can be used
81 * to implement sophisticated placement restrictions. The i915 DRM driver uses
82 * this to implement guard pages between incompatible caching domains in the
83 * graphics TT.
84 *
62347f9e
LK
85 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
86 * The default is bottom-up. Top-down allocation can be used if the memory area
87 * has different restrictions, or just to reduce fragmentation.
88 *
93110be6
DV
89 * Finally iteration helpers to walk all nodes and all holes are provided as are
90 * some basic allocator dumpers for debugging.
91 */
92
c700c67b 93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
440fd528 94 u64 size,
c700c67b
DH
95 unsigned alignment,
96 unsigned long color,
97 enum drm_mm_search_flags flags);
98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
440fd528 99 u64 size,
c700c67b
DH
100 unsigned alignment,
101 unsigned long color,
440fd528
TR
102 u64 start,
103 u64 end,
c700c67b 104 enum drm_mm_search_flags flags);
1d58420b 105
9fc935de
DV
106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107 struct drm_mm_node *node,
440fd528 108 u64 size, unsigned alignment,
62347f9e
LK
109 unsigned long color,
110 enum drm_mm_allocator_flags flags)
3a1bd924 111{
ea7b1dd4 112 struct drm_mm *mm = hole_node->mm;
440fd528
TR
113 u64 hole_start = drm_mm_hole_node_start(hole_node);
114 u64 hole_end = drm_mm_hole_node_end(hole_node);
115 u64 adj_start = hole_start;
116 u64 adj_end = hole_end;
ea7b1dd4 117
9e8944ab 118 BUG_ON(node->allocated);
b0b7af18 119
6b9d89b4
CW
120 if (mm->color_adjust)
121 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
1d58420b 122
62347f9e
LK
123 if (flags & DRM_MM_CREATE_TOP)
124 adj_start = adj_end - size;
125
6b9d89b4 126 if (alignment) {
440fd528
TR
127 u64 tmp = adj_start;
128 unsigned rem;
129
130 rem = do_div(tmp, alignment);
131 if (rem) {
62347f9e 132 if (flags & DRM_MM_CREATE_TOP)
440fd528 133 adj_start -= rem;
62347f9e 134 else
440fd528 135 adj_start += alignment - rem;
62347f9e 136 }
6b9d89b4
CW
137 }
138
62347f9e
LK
139 BUG_ON(adj_start < hole_start);
140 BUG_ON(adj_end > hole_end);
141
6b9d89b4 142 if (adj_start == hole_start) {
ea7b1dd4 143 hole_node->hole_follows = 0;
6b9d89b4
CW
144 list_del(&hole_node->hole_stack);
145 }
ea7b1dd4 146
6b9d89b4 147 node->start = adj_start;
ea7b1dd4
DV
148 node->size = size;
149 node->mm = mm;
6b9d89b4 150 node->color = color;
b0b7af18 151 node->allocated = 1;
3a1bd924 152
ea7b1dd4
DV
153 INIT_LIST_HEAD(&node->hole_stack);
154 list_add(&node->node_list, &hole_node->node_list);
155
6b9d89b4 156 BUG_ON(node->start + node->size > adj_end);
ea7b1dd4 157
6b9d89b4 158 node->hole_follows = 0;
9e8944ab 159 if (__drm_mm_hole_node_start(node) < hole_end) {
ea7b1dd4
DV
160 list_add(&node->hole_stack, &mm->hole_stack);
161 node->hole_follows = 1;
1d58420b 162 }
9fc935de
DV
163}
164
e18c0412
DV
165/**
166 * drm_mm_reserve_node - insert an pre-initialized node
167 * @mm: drm_mm allocator to insert @node into
168 * @node: drm_mm_node to insert
169 *
170 * This functions inserts an already set-up drm_mm_node into the allocator,
171 * meaning that start, size and color must be set by the caller. This is useful
172 * to initialize the allocator with preallocated objects which must be set-up
173 * before the range allocator can be set-up, e.g. when taking over a firmware
174 * framebuffer.
175 *
176 * Returns:
177 * 0 on success, -ENOSPC if there's no hole where @node is.
178 */
338710e7 179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
5973c7ee 180{
b3a070cc 181 struct drm_mm_node *hole;
b80d3942 182 u64 end;
440fd528
TR
183 u64 hole_start;
184 u64 hole_end;
5973c7ee 185
338710e7
BW
186 BUG_ON(node == NULL);
187
b80d3942
HS
188 end = node->start + node->size;
189
338710e7 190 /* Find the relevant hole to add our node to */
9e8944ab 191 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
338710e7 192 if (hole_start > node->start || hole_end < end)
5973c7ee
CW
193 continue;
194
5973c7ee
CW
195 node->mm = mm;
196 node->allocated = 1;
197
198 INIT_LIST_HEAD(&node->hole_stack);
199 list_add(&node->node_list, &hole->node_list);
200
338710e7 201 if (node->start == hole_start) {
5973c7ee
CW
202 hole->hole_follows = 0;
203 list_del_init(&hole->hole_stack);
204 }
205
206 node->hole_follows = 0;
207 if (end != hole_end) {
208 list_add(&node->hole_stack, &mm->hole_stack);
209 node->hole_follows = 1;
210 }
211
b3a070cc 212 return 0;
5973c7ee
CW
213 }
214
b3a070cc 215 return -ENOSPC;
5973c7ee 216}
338710e7 217EXPORT_SYMBOL(drm_mm_reserve_node);
5973c7ee 218
b0b7af18 219/**
e18c0412
DV
220 * drm_mm_insert_node_generic - search for space and insert @node
221 * @mm: drm_mm to allocate from
222 * @node: preallocate node to insert
223 * @size: size of the allocation
224 * @alignment: alignment of the allocation
225 * @color: opaque tag value to use for this node
62347f9e
LK
226 * @sflags: flags to fine-tune the allocation search
227 * @aflags: flags to fine-tune the allocation behavior
e18c0412
DV
228 *
229 * The preallocated node must be cleared to 0.
230 *
231 * Returns:
232 * 0 on success, -ENOSPC if there's no suitable hole.
b0b7af18 233 */
b8103450 234int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
440fd528 235 u64 size, unsigned alignment,
31e5d7c6 236 unsigned long color,
62347f9e
LK
237 enum drm_mm_search_flags sflags,
238 enum drm_mm_allocator_flags aflags)
b0b7af18
DV
239{
240 struct drm_mm_node *hole_node;
241
b8103450 242 hole_node = drm_mm_search_free_generic(mm, size, alignment,
62347f9e 243 color, sflags);
b0b7af18
DV
244 if (!hole_node)
245 return -ENOSPC;
246
62347f9e 247 drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
b0b7af18
DV
248 return 0;
249}
b8103450
CW
250EXPORT_SYMBOL(drm_mm_insert_node_generic);
251
9fc935de
DV
252static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
253 struct drm_mm_node *node,
440fd528 254 u64 size, unsigned alignment,
6b9d89b4 255 unsigned long color,
440fd528 256 u64 start, u64 end,
62347f9e 257 enum drm_mm_allocator_flags flags)
a2e68e92 258{
ea7b1dd4 259 struct drm_mm *mm = hole_node->mm;
440fd528
TR
260 u64 hole_start = drm_mm_hole_node_start(hole_node);
261 u64 hole_end = drm_mm_hole_node_end(hole_node);
262 u64 adj_start = hole_start;
263 u64 adj_end = hole_end;
a2e68e92 264
b0b7af18
DV
265 BUG_ON(!hole_node->hole_follows || node->allocated);
266
6b9d89b4
CW
267 if (adj_start < start)
268 adj_start = start;
901593f2
CW
269 if (adj_end > end)
270 adj_end = end;
271
272 if (mm->color_adjust)
273 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
6b9d89b4 274
fafecc01
MT
275 if (flags & DRM_MM_CREATE_TOP)
276 adj_start = adj_end - size;
277
6b9d89b4 278 if (alignment) {
440fd528
TR
279 u64 tmp = adj_start;
280 unsigned rem;
281
282 rem = do_div(tmp, alignment);
283 if (rem) {
62347f9e 284 if (flags & DRM_MM_CREATE_TOP)
440fd528 285 adj_start -= rem;
62347f9e 286 else
440fd528 287 adj_start += alignment - rem;
62347f9e 288 }
6b9d89b4 289 }
ea7b1dd4 290
6b9d89b4 291 if (adj_start == hole_start) {
ea7b1dd4 292 hole_node->hole_follows = 0;
6b9d89b4 293 list_del(&hole_node->hole_stack);
a2e68e92
JG
294 }
295
6b9d89b4 296 node->start = adj_start;
ea7b1dd4
DV
297 node->size = size;
298 node->mm = mm;
6b9d89b4 299 node->color = color;
b0b7af18 300 node->allocated = 1;
ea7b1dd4
DV
301
302 INIT_LIST_HEAD(&node->hole_stack);
303 list_add(&node->node_list, &hole_node->node_list);
304
62347f9e
LK
305 BUG_ON(node->start < start);
306 BUG_ON(node->start < adj_start);
6b9d89b4 307 BUG_ON(node->start + node->size > adj_end);
ea7b1dd4
DV
308 BUG_ON(node->start + node->size > end);
309
6b9d89b4 310 node->hole_follows = 0;
9e8944ab 311 if (__drm_mm_hole_node_start(node) < hole_end) {
ea7b1dd4
DV
312 list_add(&node->hole_stack, &mm->hole_stack);
313 node->hole_follows = 1;
a2e68e92 314 }
9fc935de
DV
315}
316
b0b7af18 317/**
e18c0412
DV
318 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
319 * @mm: drm_mm to allocate from
320 * @node: preallocate node to insert
321 * @size: size of the allocation
322 * @alignment: alignment of the allocation
323 * @color: opaque tag value to use for this node
324 * @start: start of the allowed range for this node
325 * @end: end of the allowed range for this node
62347f9e
LK
326 * @sflags: flags to fine-tune the allocation search
327 * @aflags: flags to fine-tune the allocation behavior
e18c0412
DV
328 *
329 * The preallocated node must be cleared to 0.
330 *
331 * Returns:
332 * 0 on success, -ENOSPC if there's no suitable hole.
3a1bd924 333 */
b8103450 334int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
440fd528 335 u64 size, unsigned alignment,
62347f9e 336 unsigned long color,
440fd528 337 u64 start, u64 end,
62347f9e
LK
338 enum drm_mm_search_flags sflags,
339 enum drm_mm_allocator_flags aflags)
3a1bd924 340{
b0b7af18
DV
341 struct drm_mm_node *hole_node;
342
b8103450
CW
343 hole_node = drm_mm_search_free_in_range_generic(mm,
344 size, alignment, color,
62347f9e 345 start, end, sflags);
b0b7af18
DV
346 if (!hole_node)
347 return -ENOSPC;
348
b8103450
CW
349 drm_mm_insert_helper_range(hole_node, node,
350 size, alignment, color,
62347f9e 351 start, end, aflags);
b0b7af18
DV
352 return 0;
353}
b8103450
CW
354EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
355
b0b7af18 356/**
e18c0412
DV
357 * drm_mm_remove_node - Remove a memory node from the allocator.
358 * @node: drm_mm_node to remove
359 *
360 * This just removes a node from its drm_mm allocator. The node does not need to
361 * be cleared again before it can be re-inserted into this or any other drm_mm
362 * allocator. It is a bug to call this function on a un-allocated node.
b0b7af18
DV
363 */
364void drm_mm_remove_node(struct drm_mm_node *node)
365{
ea7b1dd4
DV
366 struct drm_mm *mm = node->mm;
367 struct drm_mm_node *prev_node;
3a1bd924 368
3ef80a81
BW
369 if (WARN_ON(!node->allocated))
370 return;
371
ea7b1dd4
DV
372 BUG_ON(node->scanned_block || node->scanned_prev_free
373 || node->scanned_next_free);
3a1bd924 374
ea7b1dd4
DV
375 prev_node =
376 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
709ea971 377
ea7b1dd4 378 if (node->hole_follows) {
9e8944ab
CW
379 BUG_ON(__drm_mm_hole_node_start(node) ==
380 __drm_mm_hole_node_end(node));
ea7b1dd4
DV
381 list_del(&node->hole_stack);
382 } else
9e8944ab
CW
383 BUG_ON(__drm_mm_hole_node_start(node) !=
384 __drm_mm_hole_node_end(node));
385
249d6048 386
ea7b1dd4
DV
387 if (!prev_node->hole_follows) {
388 prev_node->hole_follows = 1;
389 list_add(&prev_node->hole_stack, &mm->hole_stack);
390 } else
391 list_move(&prev_node->hole_stack, &mm->hole_stack);
392
393 list_del(&node->node_list);
b0b7af18
DV
394 node->allocated = 0;
395}
396EXPORT_SYMBOL(drm_mm_remove_node);
397
440fd528 398static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
7a6b2896 399{
75214733 400 if (end - start < size)
7a6b2896
DV
401 return 0;
402
403 if (alignment) {
440fd528
TR
404 u64 tmp = start;
405 unsigned rem;
406
407 rem = do_div(tmp, alignment);
046d669c 408 if (rem)
440fd528 409 start += alignment - rem;
7a6b2896
DV
410 }
411
6b9d89b4 412 return end >= start + size;
7a6b2896
DV
413}
414
c700c67b 415static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
440fd528 416 u64 size,
c700c67b
DH
417 unsigned alignment,
418 unsigned long color,
419 enum drm_mm_search_flags flags)
3a1bd924 420{
55910517
DA
421 struct drm_mm_node *entry;
422 struct drm_mm_node *best;
440fd528
TR
423 u64 adj_start;
424 u64 adj_end;
425 u64 best_size;
3a1bd924 426
709ea971
DV
427 BUG_ON(mm->scanned_blocks);
428
3a1bd924
TH
429 best = NULL;
430 best_size = ~0UL;
431
62347f9e
LK
432 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
433 flags & DRM_MM_SEARCH_BELOW) {
440fd528 434 u64 hole_size = adj_end - adj_start;
145bccd2 435
6b9d89b4
CW
436 if (mm->color_adjust) {
437 mm->color_adjust(entry, color, &adj_start, &adj_end);
438 if (adj_end <= adj_start)
439 continue;
440 }
441
6b9d89b4 442 if (!check_free_hole(adj_start, adj_end, size, alignment))
1d58420b
TH
443 continue;
444
31e5d7c6 445 if (!(flags & DRM_MM_SEARCH_BEST))
7a6b2896 446 return entry;
1d58420b 447
145bccd2 448 if (hole_size < best_size) {
7a6b2896 449 best = entry;
145bccd2 450 best_size = hole_size;
3a1bd924
TH
451 }
452 }
453
454 return best;
455}
6b9d89b4 456
c700c67b 457static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
440fd528 458 u64 size,
6b9d89b4
CW
459 unsigned alignment,
460 unsigned long color,
440fd528
TR
461 u64 start,
462 u64 end,
31e5d7c6 463 enum drm_mm_search_flags flags)
a2e68e92 464{
a2e68e92
JG
465 struct drm_mm_node *entry;
466 struct drm_mm_node *best;
440fd528
TR
467 u64 adj_start;
468 u64 adj_end;
469 u64 best_size;
a2e68e92 470
709ea971
DV
471 BUG_ON(mm->scanned_blocks);
472
a2e68e92
JG
473 best = NULL;
474 best_size = ~0UL;
475
62347f9e
LK
476 __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
477 flags & DRM_MM_SEARCH_BELOW) {
440fd528 478 u64 hole_size = adj_end - adj_start;
145bccd2 479
9e8944ab
CW
480 if (adj_start < start)
481 adj_start = start;
482 if (adj_end > end)
483 adj_end = end;
6b9d89b4
CW
484
485 if (mm->color_adjust) {
486 mm->color_adjust(entry, color, &adj_start, &adj_end);
487 if (adj_end <= adj_start)
488 continue;
489 }
490
75214733 491 if (!check_free_hole(adj_start, adj_end, size, alignment))
a2e68e92
JG
492 continue;
493
31e5d7c6 494 if (!(flags & DRM_MM_SEARCH_BEST))
7a6b2896 495 return entry;
a2e68e92 496
145bccd2 497 if (hole_size < best_size) {
7a6b2896 498 best = entry;
145bccd2 499 best_size = hole_size;
a2e68e92
JG
500 }
501 }
502
503 return best;
504}
a2e68e92 505
b0b7af18 506/**
e18c0412
DV
507 * drm_mm_replace_node - move an allocation from @old to @new
508 * @old: drm_mm_node to remove from the allocator
509 * @new: drm_mm_node which should inherit @old's allocation
510 *
511 * This is useful for when drivers embed the drm_mm_node structure and hence
512 * can't move allocations by reassigning pointers. It's a combination of remove
513 * and insert with the guarantee that the allocation start will match.
b0b7af18
DV
514 */
515void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
516{
517 list_replace(&old->node_list, &new->node_list);
2bbd4492 518 list_replace(&old->hole_stack, &new->hole_stack);
b0b7af18
DV
519 new->hole_follows = old->hole_follows;
520 new->mm = old->mm;
521 new->start = old->start;
522 new->size = old->size;
6b9d89b4 523 new->color = old->color;
b0b7af18
DV
524
525 old->allocated = 0;
526 new->allocated = 1;
527}
528EXPORT_SYMBOL(drm_mm_replace_node);
529
93110be6
DV
530/**
531 * DOC: lru scan roaster
532 *
533 * Very often GPUs need to have continuous allocations for a given object. When
534 * evicting objects to make space for a new one it is therefore not most
535 * efficient when we simply start to select all objects from the tail of an LRU
536 * until there's a suitable hole: Especially for big objects or nodes that
537 * otherwise have special allocation constraints there's a good chance we evict
538 * lots of (smaller) objects unecessarily.
539 *
540 * The DRM range allocator supports this use-case through the scanning
541 * interfaces. First a scan operation needs to be initialized with
542 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
543 * objects to the roaster (probably by walking an LRU list, but this can be
544 * freely implemented) until a suitable hole is found or there's no further
545 * evitable object.
546 *
547 * The the driver must walk through all objects again in exactly the reverse
548 * order to restore the allocator state. Note that while the allocator is used
549 * in the scan mode no other operation is allowed.
550 *
551 * Finally the driver evicts all objects selected in the scan. Adding and
552 * removing an object is O(1), and since freeing a node is also O(1) the overall
553 * complexity is O(scanned_objects). So like the free stack which needs to be
554 * walked before a scan operation even begins this is linear in the number of
555 * objects. It doesn't seem to hurt badly.
556 */
557
709ea971 558/**
e18c0412
DV
559 * drm_mm_init_scan - initialize lru scanning
560 * @mm: drm_mm to scan
561 * @size: size of the allocation
562 * @alignment: alignment of the allocation
563 * @color: opaque tag value to use for the allocation
709ea971
DV
564 *
565 * This simply sets up the scanning routines with the parameters for the desired
e18c0412
DV
566 * hole. Note that there's no need to specify allocation flags, since they only
567 * change the place a node is allocated from within a suitable hole.
709ea971 568 *
e18c0412
DV
569 * Warning:
570 * As long as the scan list is non-empty, no other operations than
709ea971
DV
571 * adding/removing nodes to/from the scan list are allowed.
572 */
6b9d89b4 573void drm_mm_init_scan(struct drm_mm *mm,
440fd528 574 u64 size,
6b9d89b4
CW
575 unsigned alignment,
576 unsigned long color)
709ea971 577{
6b9d89b4 578 mm->scan_color = color;
709ea971
DV
579 mm->scan_alignment = alignment;
580 mm->scan_size = size;
581 mm->scanned_blocks = 0;
582 mm->scan_hit_start = 0;
901593f2 583 mm->scan_hit_end = 0;
d935cc61 584 mm->scan_check_range = 0;
ae0cec28 585 mm->prev_scanned_node = NULL;
709ea971
DV
586}
587EXPORT_SYMBOL(drm_mm_init_scan);
588
d935cc61 589/**
e18c0412
DV
590 * drm_mm_init_scan - initialize range-restricted lru scanning
591 * @mm: drm_mm to scan
592 * @size: size of the allocation
593 * @alignment: alignment of the allocation
594 * @color: opaque tag value to use for the allocation
595 * @start: start of the allowed range for the allocation
596 * @end: end of the allowed range for the allocation
d935cc61
DV
597 *
598 * This simply sets up the scanning routines with the parameters for the desired
e18c0412
DV
599 * hole. Note that there's no need to specify allocation flags, since they only
600 * change the place a node is allocated from within a suitable hole.
d935cc61 601 *
e18c0412
DV
602 * Warning:
603 * As long as the scan list is non-empty, no other operations than
d935cc61
DV
604 * adding/removing nodes to/from the scan list are allowed.
605 */
6b9d89b4 606void drm_mm_init_scan_with_range(struct drm_mm *mm,
440fd528 607 u64 size,
d935cc61 608 unsigned alignment,
6b9d89b4 609 unsigned long color,
440fd528
TR
610 u64 start,
611 u64 end)
d935cc61 612{
6b9d89b4 613 mm->scan_color = color;
d935cc61
DV
614 mm->scan_alignment = alignment;
615 mm->scan_size = size;
616 mm->scanned_blocks = 0;
617 mm->scan_hit_start = 0;
901593f2 618 mm->scan_hit_end = 0;
d935cc61
DV
619 mm->scan_start = start;
620 mm->scan_end = end;
621 mm->scan_check_range = 1;
ae0cec28 622 mm->prev_scanned_node = NULL;
d935cc61
DV
623}
624EXPORT_SYMBOL(drm_mm_init_scan_with_range);
625
709ea971 626/**
e18c0412
DV
627 * drm_mm_scan_add_block - add a node to the scan list
628 * @node: drm_mm_node to add
629 *
709ea971
DV
630 * Add a node to the scan list that might be freed to make space for the desired
631 * hole.
632 *
e18c0412
DV
633 * Returns:
634 * True if a hole has been found, false otherwise.
709ea971 635 */
e18c0412 636bool drm_mm_scan_add_block(struct drm_mm_node *node)
709ea971
DV
637{
638 struct drm_mm *mm = node->mm;
ea7b1dd4 639 struct drm_mm_node *prev_node;
440fd528
TR
640 u64 hole_start, hole_end;
641 u64 adj_start, adj_end;
709ea971
DV
642
643 mm->scanned_blocks++;
644
ea7b1dd4 645 BUG_ON(node->scanned_block);
709ea971 646 node->scanned_block = 1;
709ea971 647
ea7b1dd4
DV
648 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
649 node_list);
709ea971 650
ea7b1dd4
DV
651 node->scanned_preceeds_hole = prev_node->hole_follows;
652 prev_node->hole_follows = 1;
653 list_del(&node->node_list);
654 node->node_list.prev = &prev_node->node_list;
ae0cec28
DV
655 node->node_list.next = &mm->prev_scanned_node->node_list;
656 mm->prev_scanned_node = node;
709ea971 657
901593f2
CW
658 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
659 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
6b9d89b4 660
d935cc61 661 if (mm->scan_check_range) {
6b9d89b4
CW
662 if (adj_start < mm->scan_start)
663 adj_start = mm->scan_start;
664 if (adj_end > mm->scan_end)
665 adj_end = mm->scan_end;
d935cc61
DV
666 }
667
901593f2
CW
668 if (mm->color_adjust)
669 mm->color_adjust(prev_node, mm->scan_color,
670 &adj_start, &adj_end);
671
6b9d89b4 672 if (check_free_hole(adj_start, adj_end,
75214733 673 mm->scan_size, mm->scan_alignment)) {
ea7b1dd4 674 mm->scan_hit_start = hole_start;
901593f2 675 mm->scan_hit_end = hole_end;
e18c0412 676 return true;
709ea971
DV
677 }
678
e18c0412 679 return false;
709ea971
DV
680}
681EXPORT_SYMBOL(drm_mm_scan_add_block);
682
683/**
e18c0412
DV
684 * drm_mm_scan_remove_block - remove a node from the scan list
685 * @node: drm_mm_node to remove
709ea971
DV
686 *
687 * Nodes _must_ be removed in the exact same order from the scan list as they
688 * have been added, otherwise the internal state of the memory manager will be
689 * corrupted.
690 *
691 * When the scan list is empty, the selected memory nodes can be freed. An
31e5d7c6
DH
692 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
693 * return the just freed block (because its at the top of the free_stack list).
709ea971 694 *
e18c0412
DV
695 * Returns:
696 * True if this block should be evicted, false otherwise. Will always
697 * return false when no hole has been found.
709ea971 698 */
e18c0412 699bool drm_mm_scan_remove_block(struct drm_mm_node *node)
709ea971
DV
700{
701 struct drm_mm *mm = node->mm;
ea7b1dd4 702 struct drm_mm_node *prev_node;
709ea971
DV
703
704 mm->scanned_blocks--;
705
706 BUG_ON(!node->scanned_block);
707 node->scanned_block = 0;
709ea971 708
ea7b1dd4
DV
709 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
710 node_list);
709ea971 711
ea7b1dd4 712 prev_node->hole_follows = node->scanned_preceeds_hole;
ea7b1dd4 713 list_add(&node->node_list, &prev_node->node_list);
709ea971 714
901593f2
CW
715 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
716 node->start < mm->scan_hit_end);
709ea971
DV
717}
718EXPORT_SYMBOL(drm_mm_scan_remove_block);
719
e18c0412
DV
720/**
721 * drm_mm_clean - checks whether an allocator is clean
722 * @mm: drm_mm allocator to check
723 *
724 * Returns:
725 * True if the allocator is completely free, false if there's still a node
726 * allocated in it.
727 */
728bool drm_mm_clean(struct drm_mm * mm)
3a1bd924 729{
ea7b1dd4 730 struct list_head *head = &mm->head_node.node_list;
3a1bd924 731
1d58420b
TH
732 return (head->next->next == head);
733}
249d6048 734EXPORT_SYMBOL(drm_mm_clean);
3a1bd924 735
e18c0412
DV
736/**
737 * drm_mm_init - initialize a drm-mm allocator
738 * @mm: the drm_mm structure to initialize
739 * @start: start of the range managed by @mm
740 * @size: end of the range managed by @mm
741 *
742 * Note that @mm must be cleared to 0 before calling this function.
743 */
440fd528 744void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
1d58420b 745{
ea7b1dd4 746 INIT_LIST_HEAD(&mm->hole_stack);
709ea971 747 mm->scanned_blocks = 0;
3a1bd924 748
ea7b1dd4
DV
749 /* Clever trick to avoid a special case in the free hole tracking. */
750 INIT_LIST_HEAD(&mm->head_node.node_list);
751 INIT_LIST_HEAD(&mm->head_node.hole_stack);
752 mm->head_node.hole_follows = 1;
753 mm->head_node.scanned_block = 0;
754 mm->head_node.scanned_prev_free = 0;
755 mm->head_node.scanned_next_free = 0;
756 mm->head_node.mm = mm;
757 mm->head_node.start = start + size;
758 mm->head_node.size = start - mm->head_node.start;
759 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
760
6b9d89b4 761 mm->color_adjust = NULL;
3a1bd924 762}
673a394b 763EXPORT_SYMBOL(drm_mm_init);
3a1bd924 764
e18c0412
DV
765/**
766 * drm_mm_takedown - clean up a drm_mm allocator
767 * @mm: drm_mm allocator to clean up
768 *
769 * Note that it is a bug to call this function on an allocator which is not
770 * clean.
771 */
55910517 772void drm_mm_takedown(struct drm_mm * mm)
3a1bd924 773{
c700c67b
DH
774 WARN(!list_empty(&mm->head_node.node_list),
775 "Memory manager not clean during takedown.\n");
3a1bd924 776}
f453ba04 777EXPORT_SYMBOL(drm_mm_takedown);
fa8a1238 778
440fd528
TR
779static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
780 const char *prefix)
99d7e48e 781{
440fd528 782 u64 hole_start, hole_end, hole_size;
ea7b1dd4 783
2c54b133
DV
784 if (entry->hole_follows) {
785 hole_start = drm_mm_hole_node_start(entry);
786 hole_end = drm_mm_hole_node_end(entry);
787 hole_size = hole_end - hole_start;
440fd528
TR
788 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
789 hole_end, hole_size);
2c54b133
DV
790 return hole_size;
791 }
792
793 return 0;
794}
795
e18c0412
DV
796/**
797 * drm_mm_debug_table - dump allocator state to dmesg
798 * @mm: drm_mm allocator to dump
799 * @prefix: prefix to use for dumping to dmesg
800 */
2c54b133
DV
801void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
802{
803 struct drm_mm_node *entry;
440fd528 804 u64 total_used = 0, total_free = 0, total = 0;
2c54b133
DV
805
806 total_free += drm_mm_debug_hole(&mm->head_node, prefix);
ea7b1dd4
DV
807
808 drm_mm_for_each_node(entry, mm) {
440fd528
TR
809 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
810 entry->start + entry->size, entry->size);
ea7b1dd4 811 total_used += entry->size;
2c54b133 812 total_free += drm_mm_debug_hole(entry, prefix);
99d7e48e 813 }
ea7b1dd4
DV
814 total = total_free + total_used;
815
440fd528
TR
816 pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
817 total_used, total_free);
99d7e48e
JG
818}
819EXPORT_SYMBOL(drm_mm_debug_table);
820
fa8a1238 821#if defined(CONFIG_DEBUG_FS)
440fd528 822static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
fa8a1238 823{
440fd528 824 u64 hole_start, hole_end, hole_size;
ea7b1dd4 825
3a359f0b
DV
826 if (entry->hole_follows) {
827 hole_start = drm_mm_hole_node_start(entry);
828 hole_end = drm_mm_hole_node_end(entry);
829 hole_size = hole_end - hole_start;
2f15791c 830 seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
440fd528 831 hole_end, hole_size);
3a359f0b
DV
832 return hole_size;
833 }
834
835 return 0;
836}
837
e18c0412
DV
838/**
839 * drm_mm_dump_table - dump allocator state to a seq_file
840 * @m: seq_file to dump to
841 * @mm: drm_mm allocator to dump
842 */
3a359f0b
DV
843int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
844{
845 struct drm_mm_node *entry;
440fd528 846 u64 total_used = 0, total_free = 0, total = 0;
3a359f0b
DV
847
848 total_free += drm_mm_dump_hole(m, &mm->head_node);
ea7b1dd4
DV
849
850 drm_mm_for_each_node(entry, mm) {
2f15791c 851 seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
440fd528 852 entry->start + entry->size, entry->size);
ea7b1dd4 853 total_used += entry->size;
3a359f0b 854 total_free += drm_mm_dump_hole(m, entry);
fa8a1238 855 }
ea7b1dd4
DV
856 total = total_free + total_used;
857
440fd528
TR
858 seq_printf(m, "total: %llu, used %llu free %llu\n", total,
859 total_used, total_free);
fa8a1238
DA
860 return 0;
861}
862EXPORT_SYMBOL(drm_mm_dump_table);
863#endif
This page took 0.671508 seconds and 5 git commands to generate.