1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
38 * Aligned allocations can also see improvement.
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49 #include <linux/interval_tree_generic.h>
54 * drm_mm provides a simple range allocator. The drivers are free to use the
55 * resource allocator from the linux core if it suits them, the upside of drm_mm
56 * is that it's in the DRM core. Which means that it's easier to extend for
57 * some of the crazier special purpose needs of gpus.
59 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
60 * Drivers are free to embed either of them into their own suitable
61 * datastructures. drm_mm itself will not do any allocations of its own, so if
62 * drivers choose not to embed nodes they need to still allocate them
65 * The range allocator also supports reservation of preallocated blocks. This is
66 * useful for taking over initial mode setting configurations from the firmware,
67 * where an object needs to be created which exactly matches the firmware's
68 * scanout target. As long as the range is still free it can be inserted anytime
69 * after the allocator is initialized, which helps with avoiding looped
70 * depencies in the driver load sequence.
72 * drm_mm maintains a stack of most recently freed holes, which of all
73 * simplistic datastructures seems to be a fairly decent approach to clustering
74 * allocations and avoiding too much fragmentation. This means free space
75 * searches are O(num_holes). Given that all the fancy features drm_mm supports
76 * something better would be fairly complex and since gfx thrashing is a fairly
77 * steep cliff not a real concern. Removing a node again is O(1).
79 * drm_mm supports a few features: Alignment and range restrictions can be
80 * supplied. Further more every &drm_mm_node has a color value (which is just an
81 * opaqua unsigned long) which in conjunction with a driver callback can be used
82 * to implement sophisticated placement restrictions. The i915 DRM driver uses
83 * this to implement guard pages between incompatible caching domains in the
86 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
87 * The default is bottom-up. Top-down allocation can be used if the memory area
88 * has different restrictions, or just to reduce fragmentation.
90 * Finally iteration helpers to walk all nodes and all holes are provided as are
91 * some basic allocator dumpers for debugging.
94 static struct drm_mm_node
*drm_mm_search_free_generic(const struct drm_mm
*mm
,
98 enum drm_mm_search_flags flags
);
99 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
105 enum drm_mm_search_flags flags
);
107 #ifdef CONFIG_DRM_DEBUG_MM
108 #define STACKDEPTH 32
111 static noinline
void save_stack(struct drm_mm_node
*node
)
113 unsigned long entries
[STACKDEPTH
];
114 struct stack_trace trace
= {
116 .max_entries
= STACKDEPTH
,
120 save_stack_trace(&trace
);
121 if (trace
.nr_entries
!= 0 &&
122 trace
.entries
[trace
.nr_entries
-1] == ULONG_MAX
)
125 /* May be called under spinlock, so avoid sleeping */
126 node
->stack
= depot_save_stack(&trace
, GFP_NOWAIT
);
129 static void show_leaks(struct drm_mm
*mm
)
131 struct drm_mm_node
*node
;
132 unsigned long entries
[STACKDEPTH
];
135 buf
= kmalloc(BUFSZ
, GFP_KERNEL
);
139 list_for_each_entry(node
, &mm
->head_node
.node_list
, node_list
) {
140 struct stack_trace trace
= {
142 .max_entries
= STACKDEPTH
146 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
147 node
->start
, node
->size
);
151 depot_fetch_stack(node
->stack
, &trace
);
152 snprint_stack_trace(buf
, BUFSZ
, &trace
, 0);
153 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
154 node
->start
, node
->size
, buf
);
163 static void save_stack(struct drm_mm_node
*node
) { }
164 static void show_leaks(struct drm_mm
*mm
) { }
167 #define START(node) ((node)->start)
168 #define LAST(node) ((node)->start + (node)->size - 1)
170 INTERVAL_TREE_DEFINE(struct drm_mm_node
, rb
,
172 START
, LAST
, static inline, drm_mm_interval_tree
)
175 drm_mm_interval_first(struct drm_mm
*mm
, u64 start
, u64 last
)
177 return drm_mm_interval_tree_iter_first(&mm
->interval_tree
,
180 EXPORT_SYMBOL(drm_mm_interval_first
);
183 drm_mm_interval_next(struct drm_mm_node
*node
, u64 start
, u64 last
)
185 return drm_mm_interval_tree_iter_next(node
, start
, last
);
187 EXPORT_SYMBOL(drm_mm_interval_next
);
189 static void drm_mm_interval_tree_add_node(struct drm_mm_node
*hole_node
,
190 struct drm_mm_node
*node
)
192 struct drm_mm
*mm
= hole_node
->mm
;
193 struct rb_node
**link
, *rb
;
194 struct drm_mm_node
*parent
;
196 node
->__subtree_last
= LAST(node
);
198 if (hole_node
->allocated
) {
201 parent
= rb_entry(rb
, struct drm_mm_node
, rb
);
202 if (parent
->__subtree_last
>= node
->__subtree_last
)
205 parent
->__subtree_last
= node
->__subtree_last
;
210 link
= &hole_node
->rb
.rb_right
;
213 link
= &mm
->interval_tree
.rb_node
;
218 parent
= rb_entry(rb
, struct drm_mm_node
, rb
);
219 if (parent
->__subtree_last
< node
->__subtree_last
)
220 parent
->__subtree_last
= node
->__subtree_last
;
221 if (node
->start
< parent
->start
)
222 link
= &parent
->rb
.rb_left
;
224 link
= &parent
->rb
.rb_right
;
227 rb_link_node(&node
->rb
, rb
, link
);
228 rb_insert_augmented(&node
->rb
,
230 &drm_mm_interval_tree_augment
);
233 static void drm_mm_insert_helper(struct drm_mm_node
*hole_node
,
234 struct drm_mm_node
*node
,
235 u64 size
, unsigned alignment
,
237 enum drm_mm_allocator_flags flags
)
239 struct drm_mm
*mm
= hole_node
->mm
;
240 u64 hole_start
= drm_mm_hole_node_start(hole_node
);
241 u64 hole_end
= drm_mm_hole_node_end(hole_node
);
242 u64 adj_start
= hole_start
;
243 u64 adj_end
= hole_end
;
245 BUG_ON(node
->allocated
);
247 if (mm
->color_adjust
)
248 mm
->color_adjust(hole_node
, color
, &adj_start
, &adj_end
);
250 if (flags
& DRM_MM_CREATE_TOP
)
251 adj_start
= adj_end
- size
;
257 rem
= do_div(tmp
, alignment
);
259 if (flags
& DRM_MM_CREATE_TOP
)
262 adj_start
+= alignment
- rem
;
266 BUG_ON(adj_start
< hole_start
);
267 BUG_ON(adj_end
> hole_end
);
269 if (adj_start
== hole_start
) {
270 hole_node
->hole_follows
= 0;
271 list_del(&hole_node
->hole_stack
);
274 node
->start
= adj_start
;
280 list_add(&node
->node_list
, &hole_node
->node_list
);
282 drm_mm_interval_tree_add_node(hole_node
, node
);
284 BUG_ON(node
->start
+ node
->size
> adj_end
);
286 node
->hole_follows
= 0;
287 if (__drm_mm_hole_node_start(node
) < hole_end
) {
288 list_add(&node
->hole_stack
, &mm
->hole_stack
);
289 node
->hole_follows
= 1;
296 * drm_mm_reserve_node - insert an pre-initialized node
297 * @mm: drm_mm allocator to insert @node into
298 * @node: drm_mm_node to insert
300 * This functions inserts an already set-up drm_mm_node into the allocator,
301 * meaning that start, size and color must be set by the caller. This is useful
302 * to initialize the allocator with preallocated objects which must be set-up
303 * before the range allocator can be set-up, e.g. when taking over a firmware
307 * 0 on success, -ENOSPC if there's no hole where @node is.
309 int drm_mm_reserve_node(struct drm_mm
*mm
, struct drm_mm_node
*node
)
311 u64 end
= node
->start
+ node
->size
;
312 struct drm_mm_node
*hole
;
313 u64 hole_start
, hole_end
;
315 if (WARN_ON(node
->size
== 0))
318 end
= node
->start
+ node
->size
;
320 /* Find the relevant hole to add our node to */
321 hole
= drm_mm_interval_tree_iter_first(&mm
->interval_tree
,
322 node
->start
, ~(u64
)0);
324 if (hole
->start
< end
)
327 hole
= list_entry(&mm
->head_node
.node_list
,
328 typeof(*hole
), node_list
);
331 hole
= list_last_entry(&hole
->node_list
, typeof(*hole
), node_list
);
332 if (!hole
->hole_follows
)
335 hole_start
= __drm_mm_hole_node_start(hole
);
336 hole_end
= __drm_mm_hole_node_end(hole
);
337 if (hole_start
> node
->start
|| hole_end
< end
)
343 list_add(&node
->node_list
, &hole
->node_list
);
345 drm_mm_interval_tree_add_node(hole
, node
);
347 if (node
->start
== hole_start
) {
348 hole
->hole_follows
= 0;
349 list_del(&hole
->hole_stack
);
352 node
->hole_follows
= 0;
353 if (end
!= hole_end
) {
354 list_add(&node
->hole_stack
, &mm
->hole_stack
);
355 node
->hole_follows
= 1;
362 EXPORT_SYMBOL(drm_mm_reserve_node
);
365 * drm_mm_insert_node_generic - search for space and insert @node
366 * @mm: drm_mm to allocate from
367 * @node: preallocate node to insert
368 * @size: size of the allocation
369 * @alignment: alignment of the allocation
370 * @color: opaque tag value to use for this node
371 * @sflags: flags to fine-tune the allocation search
372 * @aflags: flags to fine-tune the allocation behavior
374 * The preallocated node must be cleared to 0.
377 * 0 on success, -ENOSPC if there's no suitable hole.
379 int drm_mm_insert_node_generic(struct drm_mm
*mm
, struct drm_mm_node
*node
,
380 u64 size
, unsigned alignment
,
382 enum drm_mm_search_flags sflags
,
383 enum drm_mm_allocator_flags aflags
)
385 struct drm_mm_node
*hole_node
;
387 if (WARN_ON(size
== 0))
390 hole_node
= drm_mm_search_free_generic(mm
, size
, alignment
,
395 drm_mm_insert_helper(hole_node
, node
, size
, alignment
, color
, aflags
);
398 EXPORT_SYMBOL(drm_mm_insert_node_generic
);
400 static void drm_mm_insert_helper_range(struct drm_mm_node
*hole_node
,
401 struct drm_mm_node
*node
,
402 u64 size
, unsigned alignment
,
405 enum drm_mm_allocator_flags flags
)
407 struct drm_mm
*mm
= hole_node
->mm
;
408 u64 hole_start
= drm_mm_hole_node_start(hole_node
);
409 u64 hole_end
= drm_mm_hole_node_end(hole_node
);
410 u64 adj_start
= hole_start
;
411 u64 adj_end
= hole_end
;
413 BUG_ON(!hole_node
->hole_follows
|| node
->allocated
);
415 if (adj_start
< start
)
420 if (mm
->color_adjust
)
421 mm
->color_adjust(hole_node
, color
, &adj_start
, &adj_end
);
423 if (flags
& DRM_MM_CREATE_TOP
)
424 adj_start
= adj_end
- size
;
430 rem
= do_div(tmp
, alignment
);
432 if (flags
& DRM_MM_CREATE_TOP
)
435 adj_start
+= alignment
- rem
;
439 if (adj_start
== hole_start
) {
440 hole_node
->hole_follows
= 0;
441 list_del(&hole_node
->hole_stack
);
444 node
->start
= adj_start
;
450 list_add(&node
->node_list
, &hole_node
->node_list
);
452 drm_mm_interval_tree_add_node(hole_node
, node
);
454 BUG_ON(node
->start
< start
);
455 BUG_ON(node
->start
< adj_start
);
456 BUG_ON(node
->start
+ node
->size
> adj_end
);
457 BUG_ON(node
->start
+ node
->size
> end
);
459 node
->hole_follows
= 0;
460 if (__drm_mm_hole_node_start(node
) < hole_end
) {
461 list_add(&node
->hole_stack
, &mm
->hole_stack
);
462 node
->hole_follows
= 1;
469 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
470 * @mm: drm_mm to allocate from
471 * @node: preallocate node to insert
472 * @size: size of the allocation
473 * @alignment: alignment of the allocation
474 * @color: opaque tag value to use for this node
475 * @start: start of the allowed range for this node
476 * @end: end of the allowed range for this node
477 * @sflags: flags to fine-tune the allocation search
478 * @aflags: flags to fine-tune the allocation behavior
480 * The preallocated node must be cleared to 0.
483 * 0 on success, -ENOSPC if there's no suitable hole.
485 int drm_mm_insert_node_in_range_generic(struct drm_mm
*mm
, struct drm_mm_node
*node
,
486 u64 size
, unsigned alignment
,
489 enum drm_mm_search_flags sflags
,
490 enum drm_mm_allocator_flags aflags
)
492 struct drm_mm_node
*hole_node
;
494 if (WARN_ON(size
== 0))
497 hole_node
= drm_mm_search_free_in_range_generic(mm
,
498 size
, alignment
, color
,
503 drm_mm_insert_helper_range(hole_node
, node
,
504 size
, alignment
, color
,
508 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic
);
511 * drm_mm_remove_node - Remove a memory node from the allocator.
512 * @node: drm_mm_node to remove
514 * This just removes a node from its drm_mm allocator. The node does not need to
515 * be cleared again before it can be re-inserted into this or any other drm_mm
516 * allocator. It is a bug to call this function on a un-allocated node.
518 void drm_mm_remove_node(struct drm_mm_node
*node
)
520 struct drm_mm
*mm
= node
->mm
;
521 struct drm_mm_node
*prev_node
;
523 if (WARN_ON(!node
->allocated
))
526 BUG_ON(node
->scanned_block
|| node
->scanned_prev_free
527 || node
->scanned_next_free
);
530 list_entry(node
->node_list
.prev
, struct drm_mm_node
, node_list
);
532 if (node
->hole_follows
) {
533 BUG_ON(__drm_mm_hole_node_start(node
) ==
534 __drm_mm_hole_node_end(node
));
535 list_del(&node
->hole_stack
);
537 BUG_ON(__drm_mm_hole_node_start(node
) !=
538 __drm_mm_hole_node_end(node
));
541 if (!prev_node
->hole_follows
) {
542 prev_node
->hole_follows
= 1;
543 list_add(&prev_node
->hole_stack
, &mm
->hole_stack
);
545 list_move(&prev_node
->hole_stack
, &mm
->hole_stack
);
547 drm_mm_interval_tree_remove(node
, &mm
->interval_tree
);
548 list_del(&node
->node_list
);
551 EXPORT_SYMBOL(drm_mm_remove_node
);
553 static int check_free_hole(u64 start
, u64 end
, u64 size
, unsigned alignment
)
555 if (end
- start
< size
)
562 rem
= do_div(tmp
, alignment
);
564 start
+= alignment
- rem
;
567 return end
>= start
+ size
;
570 static struct drm_mm_node
*drm_mm_search_free_generic(const struct drm_mm
*mm
,
574 enum drm_mm_search_flags flags
)
576 struct drm_mm_node
*entry
;
577 struct drm_mm_node
*best
;
582 BUG_ON(mm
->scanned_blocks
);
587 __drm_mm_for_each_hole(entry
, mm
, adj_start
, adj_end
,
588 flags
& DRM_MM_SEARCH_BELOW
) {
589 u64 hole_size
= adj_end
- adj_start
;
591 if (mm
->color_adjust
) {
592 mm
->color_adjust(entry
, color
, &adj_start
, &adj_end
);
593 if (adj_end
<= adj_start
)
597 if (!check_free_hole(adj_start
, adj_end
, size
, alignment
))
600 if (!(flags
& DRM_MM_SEARCH_BEST
))
603 if (hole_size
< best_size
) {
605 best_size
= hole_size
;
612 static struct drm_mm_node
*drm_mm_search_free_in_range_generic(const struct drm_mm
*mm
,
618 enum drm_mm_search_flags flags
)
620 struct drm_mm_node
*entry
;
621 struct drm_mm_node
*best
;
626 BUG_ON(mm
->scanned_blocks
);
631 __drm_mm_for_each_hole(entry
, mm
, adj_start
, adj_end
,
632 flags
& DRM_MM_SEARCH_BELOW
) {
633 u64 hole_size
= adj_end
- adj_start
;
635 if (adj_start
< start
)
640 if (mm
->color_adjust
) {
641 mm
->color_adjust(entry
, color
, &adj_start
, &adj_end
);
642 if (adj_end
<= adj_start
)
646 if (!check_free_hole(adj_start
, adj_end
, size
, alignment
))
649 if (!(flags
& DRM_MM_SEARCH_BEST
))
652 if (hole_size
< best_size
) {
654 best_size
= hole_size
;
662 * drm_mm_replace_node - move an allocation from @old to @new
663 * @old: drm_mm_node to remove from the allocator
664 * @new: drm_mm_node which should inherit @old's allocation
666 * This is useful for when drivers embed the drm_mm_node structure and hence
667 * can't move allocations by reassigning pointers. It's a combination of remove
668 * and insert with the guarantee that the allocation start will match.
670 void drm_mm_replace_node(struct drm_mm_node
*old
, struct drm_mm_node
*new)
672 list_replace(&old
->node_list
, &new->node_list
);
673 list_replace(&old
->hole_stack
, &new->hole_stack
);
674 rb_replace_node(&old
->rb
, &new->rb
, &old
->mm
->interval_tree
);
675 new->hole_follows
= old
->hole_follows
;
677 new->start
= old
->start
;
678 new->size
= old
->size
;
679 new->color
= old
->color
;
680 new->__subtree_last
= old
->__subtree_last
;
685 EXPORT_SYMBOL(drm_mm_replace_node
);
688 * DOC: lru scan roaster
690 * Very often GPUs need to have continuous allocations for a given object. When
691 * evicting objects to make space for a new one it is therefore not most
692 * efficient when we simply start to select all objects from the tail of an LRU
693 * until there's a suitable hole: Especially for big objects or nodes that
694 * otherwise have special allocation constraints there's a good chance we evict
695 * lots of (smaller) objects unecessarily.
697 * The DRM range allocator supports this use-case through the scanning
698 * interfaces. First a scan operation needs to be initialized with
699 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
700 * objects to the roaster (probably by walking an LRU list, but this can be
701 * freely implemented) until a suitable hole is found or there's no further
704 * The the driver must walk through all objects again in exactly the reverse
705 * order to restore the allocator state. Note that while the allocator is used
706 * in the scan mode no other operation is allowed.
708 * Finally the driver evicts all objects selected in the scan. Adding and
709 * removing an object is O(1), and since freeing a node is also O(1) the overall
710 * complexity is O(scanned_objects). So like the free stack which needs to be
711 * walked before a scan operation even begins this is linear in the number of
712 * objects. It doesn't seem to hurt badly.
716 * drm_mm_init_scan - initialize lru scanning
717 * @mm: drm_mm to scan
718 * @size: size of the allocation
719 * @alignment: alignment of the allocation
720 * @color: opaque tag value to use for the allocation
722 * This simply sets up the scanning routines with the parameters for the desired
723 * hole. Note that there's no need to specify allocation flags, since they only
724 * change the place a node is allocated from within a suitable hole.
727 * As long as the scan list is non-empty, no other operations than
728 * adding/removing nodes to/from the scan list are allowed.
730 void drm_mm_init_scan(struct drm_mm
*mm
,
735 mm
->scan_color
= color
;
736 mm
->scan_alignment
= alignment
;
737 mm
->scan_size
= size
;
738 mm
->scanned_blocks
= 0;
739 mm
->scan_hit_start
= 0;
740 mm
->scan_hit_end
= 0;
741 mm
->scan_check_range
= 0;
742 mm
->prev_scanned_node
= NULL
;
744 EXPORT_SYMBOL(drm_mm_init_scan
);
747 * drm_mm_init_scan - initialize range-restricted lru scanning
748 * @mm: drm_mm to scan
749 * @size: size of the allocation
750 * @alignment: alignment of the allocation
751 * @color: opaque tag value to use for the allocation
752 * @start: start of the allowed range for the allocation
753 * @end: end of the allowed range for the allocation
755 * This simply sets up the scanning routines with the parameters for the desired
756 * hole. Note that there's no need to specify allocation flags, since they only
757 * change the place a node is allocated from within a suitable hole.
760 * As long as the scan list is non-empty, no other operations than
761 * adding/removing nodes to/from the scan list are allowed.
763 void drm_mm_init_scan_with_range(struct drm_mm
*mm
,
770 mm
->scan_color
= color
;
771 mm
->scan_alignment
= alignment
;
772 mm
->scan_size
= size
;
773 mm
->scanned_blocks
= 0;
774 mm
->scan_hit_start
= 0;
775 mm
->scan_hit_end
= 0;
776 mm
->scan_start
= start
;
778 mm
->scan_check_range
= 1;
779 mm
->prev_scanned_node
= NULL
;
781 EXPORT_SYMBOL(drm_mm_init_scan_with_range
);
784 * drm_mm_scan_add_block - add a node to the scan list
785 * @node: drm_mm_node to add
787 * Add a node to the scan list that might be freed to make space for the desired
791 * True if a hole has been found, false otherwise.
793 bool drm_mm_scan_add_block(struct drm_mm_node
*node
)
795 struct drm_mm
*mm
= node
->mm
;
796 struct drm_mm_node
*prev_node
;
797 u64 hole_start
, hole_end
;
798 u64 adj_start
, adj_end
;
800 mm
->scanned_blocks
++;
802 BUG_ON(node
->scanned_block
);
803 node
->scanned_block
= 1;
805 prev_node
= list_entry(node
->node_list
.prev
, struct drm_mm_node
,
808 node
->scanned_preceeds_hole
= prev_node
->hole_follows
;
809 prev_node
->hole_follows
= 1;
810 list_del(&node
->node_list
);
811 node
->node_list
.prev
= &prev_node
->node_list
;
812 node
->node_list
.next
= &mm
->prev_scanned_node
->node_list
;
813 mm
->prev_scanned_node
= node
;
815 adj_start
= hole_start
= drm_mm_hole_node_start(prev_node
);
816 adj_end
= hole_end
= drm_mm_hole_node_end(prev_node
);
818 if (mm
->scan_check_range
) {
819 if (adj_start
< mm
->scan_start
)
820 adj_start
= mm
->scan_start
;
821 if (adj_end
> mm
->scan_end
)
822 adj_end
= mm
->scan_end
;
825 if (mm
->color_adjust
)
826 mm
->color_adjust(prev_node
, mm
->scan_color
,
827 &adj_start
, &adj_end
);
829 if (check_free_hole(adj_start
, adj_end
,
830 mm
->scan_size
, mm
->scan_alignment
)) {
831 mm
->scan_hit_start
= hole_start
;
832 mm
->scan_hit_end
= hole_end
;
838 EXPORT_SYMBOL(drm_mm_scan_add_block
);
841 * drm_mm_scan_remove_block - remove a node from the scan list
842 * @node: drm_mm_node to remove
844 * Nodes _must_ be removed in the exact same order from the scan list as they
845 * have been added, otherwise the internal state of the memory manager will be
848 * When the scan list is empty, the selected memory nodes can be freed. An
849 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
850 * return the just freed block (because its at the top of the free_stack list).
853 * True if this block should be evicted, false otherwise. Will always
854 * return false when no hole has been found.
856 bool drm_mm_scan_remove_block(struct drm_mm_node
*node
)
858 struct drm_mm
*mm
= node
->mm
;
859 struct drm_mm_node
*prev_node
;
861 mm
->scanned_blocks
--;
863 BUG_ON(!node
->scanned_block
);
864 node
->scanned_block
= 0;
866 prev_node
= list_entry(node
->node_list
.prev
, struct drm_mm_node
,
869 prev_node
->hole_follows
= node
->scanned_preceeds_hole
;
870 list_add(&node
->node_list
, &prev_node
->node_list
);
872 return (drm_mm_hole_node_end(node
) > mm
->scan_hit_start
&&
873 node
->start
< mm
->scan_hit_end
);
875 EXPORT_SYMBOL(drm_mm_scan_remove_block
);
878 * drm_mm_clean - checks whether an allocator is clean
879 * @mm: drm_mm allocator to check
882 * True if the allocator is completely free, false if there's still a node
885 bool drm_mm_clean(struct drm_mm
* mm
)
887 struct list_head
*head
= &mm
->head_node
.node_list
;
889 return (head
->next
->next
== head
);
891 EXPORT_SYMBOL(drm_mm_clean
);
894 * drm_mm_init - initialize a drm-mm allocator
895 * @mm: the drm_mm structure to initialize
896 * @start: start of the range managed by @mm
897 * @size: end of the range managed by @mm
899 * Note that @mm must be cleared to 0 before calling this function.
901 void drm_mm_init(struct drm_mm
* mm
, u64 start
, u64 size
)
903 INIT_LIST_HEAD(&mm
->hole_stack
);
904 mm
->scanned_blocks
= 0;
906 /* Clever trick to avoid a special case in the free hole tracking. */
907 INIT_LIST_HEAD(&mm
->head_node
.node_list
);
908 mm
->head_node
.hole_follows
= 1;
909 mm
->head_node
.scanned_block
= 0;
910 mm
->head_node
.scanned_prev_free
= 0;
911 mm
->head_node
.scanned_next_free
= 0;
912 mm
->head_node
.mm
= mm
;
913 mm
->head_node
.start
= start
+ size
;
914 mm
->head_node
.size
= start
- mm
->head_node
.start
;
915 list_add_tail(&mm
->head_node
.hole_stack
, &mm
->hole_stack
);
917 mm
->interval_tree
= RB_ROOT
;
919 mm
->color_adjust
= NULL
;
921 EXPORT_SYMBOL(drm_mm_init
);
924 * drm_mm_takedown - clean up a drm_mm allocator
925 * @mm: drm_mm allocator to clean up
927 * Note that it is a bug to call this function on an allocator which is not
930 void drm_mm_takedown(struct drm_mm
*mm
)
932 if (WARN(!list_empty(&mm
->head_node
.node_list
),
933 "Memory manager not clean during takedown.\n"))
937 EXPORT_SYMBOL(drm_mm_takedown
);
939 static u64
drm_mm_debug_hole(struct drm_mm_node
*entry
,
942 u64 hole_start
, hole_end
, hole_size
;
944 if (entry
->hole_follows
) {
945 hole_start
= drm_mm_hole_node_start(entry
);
946 hole_end
= drm_mm_hole_node_end(entry
);
947 hole_size
= hole_end
- hole_start
;
948 pr_debug("%s %#llx-%#llx: %llu: free\n", prefix
, hole_start
,
949 hole_end
, hole_size
);
957 * drm_mm_debug_table - dump allocator state to dmesg
958 * @mm: drm_mm allocator to dump
959 * @prefix: prefix to use for dumping to dmesg
961 void drm_mm_debug_table(struct drm_mm
*mm
, const char *prefix
)
963 struct drm_mm_node
*entry
;
964 u64 total_used
= 0, total_free
= 0, total
= 0;
966 total_free
+= drm_mm_debug_hole(&mm
->head_node
, prefix
);
968 drm_mm_for_each_node(entry
, mm
) {
969 pr_debug("%s %#llx-%#llx: %llu: used\n", prefix
, entry
->start
,
970 entry
->start
+ entry
->size
, entry
->size
);
971 total_used
+= entry
->size
;
972 total_free
+= drm_mm_debug_hole(entry
, prefix
);
974 total
= total_free
+ total_used
;
976 pr_debug("%s total: %llu, used %llu free %llu\n", prefix
, total
,
977 total_used
, total_free
);
979 EXPORT_SYMBOL(drm_mm_debug_table
);
981 #if defined(CONFIG_DEBUG_FS)
982 static u64
drm_mm_dump_hole(struct seq_file
*m
, struct drm_mm_node
*entry
)
984 u64 hole_start
, hole_end
, hole_size
;
986 if (entry
->hole_follows
) {
987 hole_start
= drm_mm_hole_node_start(entry
);
988 hole_end
= drm_mm_hole_node_end(entry
);
989 hole_size
= hole_end
- hole_start
;
990 seq_printf(m
, "%#018llx-%#018llx: %llu: free\n", hole_start
,
991 hole_end
, hole_size
);
999 * drm_mm_dump_table - dump allocator state to a seq_file
1000 * @m: seq_file to dump to
1001 * @mm: drm_mm allocator to dump
1003 int drm_mm_dump_table(struct seq_file
*m
, struct drm_mm
*mm
)
1005 struct drm_mm_node
*entry
;
1006 u64 total_used
= 0, total_free
= 0, total
= 0;
1008 total_free
+= drm_mm_dump_hole(m
, &mm
->head_node
);
1010 drm_mm_for_each_node(entry
, mm
) {
1011 seq_printf(m
, "%#018llx-%#018llx: %llu: used\n", entry
->start
,
1012 entry
->start
+ entry
->size
, entry
->size
);
1013 total_used
+= entry
->size
;
1014 total_free
+= drm_mm_dump_hole(m
, entry
);
1016 total
= total_free
+ total_used
;
1018 seq_printf(m
, "total: %llu, used %llu free %llu\n", total
,
1019 total_used
, total_free
);
1022 EXPORT_SYMBOL(drm_mm_dump_table
);