]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/drm_mm.c
drm: mm: track free areas implicitly
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / drm_mm.c
CommitLineData
3a1bd924
TH
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
96de0e25 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3a1bd924
TH
42 */
43
44#include "drmP.h"
249d6048 45#include "drm_mm.h"
1d58420b 46#include <linux/slab.h>
fa8a1238 47#include <linux/seq_file.h>
1d58420b 48
249d6048
JG
49#define MM_UNUSED_TARGET 4
50
249d6048
JG
51static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52{
53 struct drm_mm_node *child;
54
55 if (atomic)
709ea971 56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
249d6048 57 else
709ea971 58 child = kzalloc(sizeof(*child), GFP_KERNEL);
249d6048
JG
59
60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock);
62 if (list_empty(&mm->unused_nodes))
63 child = NULL;
64 else {
65 child =
66 list_entry(mm->unused_nodes.next,
ea7b1dd4
DV
67 struct drm_mm_node, node_list);
68 list_del(&child->node_list);
249d6048
JG
69 --mm->num_unused;
70 }
71 spin_unlock(&mm->unused_lock);
72 }
73 return child;
74}
75
a698cf34
JG
76/* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
78 *
79 * Returns 0 on success or -ENOMEM if allocation fails.
80 */
249d6048
JG
81int drm_mm_pre_get(struct drm_mm *mm)
82{
83 struct drm_mm_node *node;
84
85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock);
709ea971 88 node = kzalloc(sizeof(*node), GFP_KERNEL);
249d6048
JG
89 spin_lock(&mm->unused_lock);
90
91 if (unlikely(node == NULL)) {
92 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 spin_unlock(&mm->unused_lock);
94 return ret;
95 }
96 ++mm->num_unused;
ea7b1dd4 97 list_add_tail(&node->node_list, &mm->unused_nodes);
249d6048
JG
98 }
99 spin_unlock(&mm->unused_lock);
100 return 0;
101}
102EXPORT_SYMBOL(drm_mm_pre_get);
1d58420b 103
ea7b1dd4 104static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
1d58420b 105{
ea7b1dd4 106 return hole_node->start + hole_node->size;
1d58420b
TH
107}
108
ea7b1dd4 109static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
1d58420b 110{
ea7b1dd4
DV
111 struct drm_mm_node *next_node =
112 list_entry(hole_node->node_list.next, struct drm_mm_node,
113 node_list);
1d58420b 114
ea7b1dd4 115 return next_node->start;
1d58420b
TH
116}
117
ea7b1dd4 118struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
89579f77
TH
119 unsigned long size,
120 unsigned alignment,
121 int atomic)
3a1bd924
TH
122{
123
ea7b1dd4
DV
124 struct drm_mm_node *node;
125 struct drm_mm *mm = hole_node->mm;
126 unsigned long tmp = 0, wasted = 0;
127 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
128 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
129
130 BUG_ON(!hole_node->hole_follows);
131
132 node = drm_mm_kmalloc(mm, atomic);
133 if (unlikely(node == NULL))
134 return NULL;
3a1bd924
TH
135
136 if (alignment)
ea7b1dd4 137 tmp = hole_start % alignment;
1d58420b 138
ea7b1dd4
DV
139 if (!tmp) {
140 hole_node->hole_follows = 0;
141 list_del_init(&hole_node->hole_stack);
142 } else
143 wasted = alignment - tmp;
144
145 node->start = hole_start + wasted;
146 node->size = size;
147 node->mm = mm;
3a1bd924 148
ea7b1dd4
DV
149 INIT_LIST_HEAD(&node->hole_stack);
150 list_add(&node->node_list, &hole_node->node_list);
151
152 BUG_ON(node->start + node->size > hole_end);
153
154 if (node->start + node->size < hole_end) {
155 list_add(&node->hole_stack, &mm->hole_stack);
156 node->hole_follows = 1;
3a1bd924 157 } else {
ea7b1dd4 158 node->hole_follows = 0;
1d58420b 159 }
3a1bd924 160
e6c03c5b 161 return node;
3a1bd924 162}
89579f77 163EXPORT_SYMBOL(drm_mm_get_block_generic);
249d6048 164
ea7b1dd4 165struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
a2e68e92
JG
166 unsigned long size,
167 unsigned alignment,
168 unsigned long start,
169 unsigned long end,
170 int atomic)
171{
ea7b1dd4
DV
172 struct drm_mm_node *node;
173 struct drm_mm *mm = hole_node->mm;
174 unsigned long tmp = 0, wasted = 0;
175 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
176 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
a2e68e92 177
ea7b1dd4
DV
178 BUG_ON(!hole_node->hole_follows);
179
180 node = drm_mm_kmalloc(mm, atomic);
181 if (unlikely(node == NULL))
182 return NULL;
183
184 if (hole_start < start)
185 wasted += start - hole_start;
a2e68e92 186 if (alignment)
ea7b1dd4 187 tmp = (hole_start + wasted) % alignment;
a2e68e92
JG
188
189 if (tmp)
190 wasted += alignment - tmp;
ea7b1dd4
DV
191
192 if (!wasted) {
193 hole_node->hole_follows = 0;
194 list_del_init(&hole_node->hole_stack);
a2e68e92
JG
195 }
196
ea7b1dd4
DV
197 node->start = hole_start + wasted;
198 node->size = size;
199 node->mm = mm;
200
201 INIT_LIST_HEAD(&node->hole_stack);
202 list_add(&node->node_list, &hole_node->node_list);
203
204 BUG_ON(node->start + node->size > hole_end);
205 BUG_ON(node->start + node->size > end);
206
207 if (node->start + node->size < hole_end) {
208 list_add(&node->hole_stack, &mm->hole_stack);
209 node->hole_follows = 1;
a2e68e92 210 } else {
ea7b1dd4 211 node->hole_follows = 0;
a2e68e92
JG
212 }
213
a2e68e92
JG
214 return node;
215}
216EXPORT_SYMBOL(drm_mm_get_block_range_generic);
217
3a1bd924
TH
218/*
219 * Put a block. Merge with the previous and / or next block if they are free.
220 * Otherwise add to the free stack.
221 */
222
ea7b1dd4 223void drm_mm_put_block(struct drm_mm_node *node)
3a1bd924
TH
224{
225
ea7b1dd4
DV
226 struct drm_mm *mm = node->mm;
227 struct drm_mm_node *prev_node;
3a1bd924 228
ea7b1dd4
DV
229 BUG_ON(node->scanned_block || node->scanned_prev_free
230 || node->scanned_next_free);
3a1bd924 231
ea7b1dd4
DV
232 prev_node =
233 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
709ea971 234
ea7b1dd4
DV
235 if (node->hole_follows) {
236 BUG_ON(drm_mm_hole_node_start(node)
237 == drm_mm_hole_node_end(node));
238 list_del(&node->hole_stack);
239 } else
240 BUG_ON(drm_mm_hole_node_start(node)
241 != drm_mm_hole_node_end(node));
249d6048 242
ea7b1dd4
DV
243 if (!prev_node->hole_follows) {
244 prev_node->hole_follows = 1;
245 list_add(&prev_node->hole_stack, &mm->hole_stack);
246 } else
247 list_move(&prev_node->hole_stack, &mm->hole_stack);
248
249 list_del(&node->node_list);
250 spin_lock(&mm->unused_lock);
251 if (mm->num_unused < MM_UNUSED_TARGET) {
252 list_add(&node->node_list, &mm->unused_nodes);
253 ++mm->num_unused;
254 } else
255 kfree(node);
256 spin_unlock(&mm->unused_lock);
257}
673a394b 258EXPORT_SYMBOL(drm_mm_put_block);
3a1bd924 259
75214733
DV
260static int check_free_hole(unsigned long start, unsigned long end,
261 unsigned long size, unsigned alignment)
7a6b2896
DV
262{
263 unsigned wasted = 0;
264
75214733 265 if (end - start < size)
7a6b2896
DV
266 return 0;
267
268 if (alignment) {
75214733 269 unsigned tmp = start % alignment;
7a6b2896
DV
270 if (tmp)
271 wasted = alignment - tmp;
272 }
273
75214733 274 if (end >= start + size + wasted) {
7a6b2896
DV
275 return 1;
276 }
277
278 return 0;
279}
280
249d6048
JG
281struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
282 unsigned long size,
283 unsigned alignment, int best_match)
3a1bd924 284{
55910517
DA
285 struct drm_mm_node *entry;
286 struct drm_mm_node *best;
3a1bd924
TH
287 unsigned long best_size;
288
709ea971
DV
289 BUG_ON(mm->scanned_blocks);
290
3a1bd924
TH
291 best = NULL;
292 best_size = ~0UL;
293
ea7b1dd4
DV
294 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
295 BUG_ON(!entry->hole_follows);
296 if (!check_free_hole(drm_mm_hole_node_start(entry),
297 drm_mm_hole_node_end(entry),
75214733 298 size, alignment))
1d58420b
TH
299 continue;
300
7a6b2896
DV
301 if (!best_match)
302 return entry;
1d58420b 303
7a6b2896
DV
304 if (entry->size < best_size) {
305 best = entry;
306 best_size = entry->size;
3a1bd924
TH
307 }
308 }
309
310 return best;
311}
249d6048 312EXPORT_SYMBOL(drm_mm_search_free);
3a1bd924 313
a2e68e92
JG
314struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
315 unsigned long size,
316 unsigned alignment,
317 unsigned long start,
318 unsigned long end,
319 int best_match)
320{
a2e68e92
JG
321 struct drm_mm_node *entry;
322 struct drm_mm_node *best;
323 unsigned long best_size;
a2e68e92 324
709ea971
DV
325 BUG_ON(mm->scanned_blocks);
326
a2e68e92
JG
327 best = NULL;
328 best_size = ~0UL;
329
ea7b1dd4
DV
330 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
331 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
332 start : drm_mm_hole_node_start(entry);
333 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
334 end : drm_mm_hole_node_end(entry);
a2e68e92 335
ea7b1dd4 336 BUG_ON(!entry->hole_follows);
75214733 337 if (!check_free_hole(adj_start, adj_end, size, alignment))
a2e68e92
JG
338 continue;
339
7a6b2896
DV
340 if (!best_match)
341 return entry;
a2e68e92 342
7a6b2896
DV
343 if (entry->size < best_size) {
344 best = entry;
345 best_size = entry->size;
a2e68e92
JG
346 }
347 }
348
349 return best;
350}
351EXPORT_SYMBOL(drm_mm_search_free_in_range);
352
709ea971
DV
353/**
354 * Initializa lru scanning.
355 *
356 * This simply sets up the scanning routines with the parameters for the desired
357 * hole.
358 *
359 * Warning: As long as the scan list is non-empty, no other operations than
360 * adding/removing nodes to/from the scan list are allowed.
361 */
362void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
363 unsigned alignment)
364{
365 mm->scan_alignment = alignment;
366 mm->scan_size = size;
367 mm->scanned_blocks = 0;
368 mm->scan_hit_start = 0;
369 mm->scan_hit_size = 0;
d935cc61 370 mm->scan_check_range = 0;
709ea971
DV
371}
372EXPORT_SYMBOL(drm_mm_init_scan);
373
d935cc61
DV
374/**
375 * Initializa lru scanning.
376 *
377 * This simply sets up the scanning routines with the parameters for the desired
378 * hole. This version is for range-restricted scans.
379 *
380 * Warning: As long as the scan list is non-empty, no other operations than
381 * adding/removing nodes to/from the scan list are allowed.
382 */
383void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
384 unsigned alignment,
385 unsigned long start,
386 unsigned long end)
387{
388 mm->scan_alignment = alignment;
389 mm->scan_size = size;
390 mm->scanned_blocks = 0;
391 mm->scan_hit_start = 0;
392 mm->scan_hit_size = 0;
393 mm->scan_start = start;
394 mm->scan_end = end;
395 mm->scan_check_range = 1;
396}
397EXPORT_SYMBOL(drm_mm_init_scan_with_range);
398
709ea971
DV
399/**
400 * Add a node to the scan list that might be freed to make space for the desired
401 * hole.
402 *
403 * Returns non-zero, if a hole has been found, zero otherwise.
404 */
405int drm_mm_scan_add_block(struct drm_mm_node *node)
406{
407 struct drm_mm *mm = node->mm;
ea7b1dd4
DV
408 struct drm_mm_node *prev_node;
409 unsigned long hole_start, hole_end;
d935cc61
DV
410 unsigned long adj_start;
411 unsigned long adj_end;
709ea971
DV
412
413 mm->scanned_blocks++;
414
ea7b1dd4 415 BUG_ON(node->scanned_block);
709ea971 416 node->scanned_block = 1;
709ea971 417
ea7b1dd4
DV
418 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
419 node_list);
709ea971 420
ea7b1dd4
DV
421 node->scanned_preceeds_hole = prev_node->hole_follows;
422 prev_node->hole_follows = 1;
423 list_del(&node->node_list);
424 node->node_list.prev = &prev_node->node_list;
709ea971 425
ea7b1dd4
DV
426 hole_start = drm_mm_hole_node_start(prev_node);
427 hole_end = drm_mm_hole_node_end(prev_node);
d935cc61 428 if (mm->scan_check_range) {
ea7b1dd4
DV
429 adj_start = hole_start < mm->scan_start ?
430 mm->scan_start : hole_start;
431 adj_end = hole_end > mm->scan_end ?
432 mm->scan_end : hole_end;
d935cc61 433 } else {
ea7b1dd4
DV
434 adj_start = hole_start;
435 adj_end = hole_end;
d935cc61
DV
436 }
437
438 if (check_free_hole(adj_start , adj_end,
75214733 439 mm->scan_size, mm->scan_alignment)) {
ea7b1dd4
DV
440 mm->scan_hit_start = hole_start;
441 mm->scan_hit_size = hole_end;
709ea971
DV
442
443 return 1;
444 }
445
446 return 0;
447}
448EXPORT_SYMBOL(drm_mm_scan_add_block);
449
450/**
451 * Remove a node from the scan list.
452 *
453 * Nodes _must_ be removed in the exact same order from the scan list as they
454 * have been added, otherwise the internal state of the memory manager will be
455 * corrupted.
456 *
457 * When the scan list is empty, the selected memory nodes can be freed. An
458 * immediatly following drm_mm_search_free with best_match = 0 will then return
459 * the just freed block (because its at the top of the free_stack list).
460 *
461 * Returns one if this block should be evicted, zero otherwise. Will always
462 * return zero when no hole has been found.
463 */
464int drm_mm_scan_remove_block(struct drm_mm_node *node)
465{
466 struct drm_mm *mm = node->mm;
ea7b1dd4 467 struct drm_mm_node *prev_node;
709ea971
DV
468
469 mm->scanned_blocks--;
470
471 BUG_ON(!node->scanned_block);
472 node->scanned_block = 0;
709ea971 473
ea7b1dd4
DV
474 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
475 node_list);
709ea971 476
ea7b1dd4
DV
477 prev_node->hole_follows = node->scanned_preceeds_hole;
478 INIT_LIST_HEAD(&node->node_list);
479 list_add(&node->node_list, &prev_node->node_list);
709ea971
DV
480
481 /* Only need to check for containement because start&size for the
482 * complete resulting free block (not just the desired part) is
483 * stored. */
484 if (node->start >= mm->scan_hit_start &&
485 node->start + node->size
486 <= mm->scan_hit_start + mm->scan_hit_size) {
487 return 1;
488 }
489
490 return 0;
491}
492EXPORT_SYMBOL(drm_mm_scan_remove_block);
493
55910517 494int drm_mm_clean(struct drm_mm * mm)
3a1bd924 495{
ea7b1dd4 496 struct list_head *head = &mm->head_node.node_list;
3a1bd924 497
1d58420b
TH
498 return (head->next->next == head);
499}
249d6048 500EXPORT_SYMBOL(drm_mm_clean);
3a1bd924 501
55910517 502int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
1d58420b 503{
ea7b1dd4 504 INIT_LIST_HEAD(&mm->hole_stack);
249d6048
JG
505 INIT_LIST_HEAD(&mm->unused_nodes);
506 mm->num_unused = 0;
709ea971 507 mm->scanned_blocks = 0;
249d6048 508 spin_lock_init(&mm->unused_lock);
3a1bd924 509
ea7b1dd4
DV
510 /* Clever trick to avoid a special case in the free hole tracking. */
511 INIT_LIST_HEAD(&mm->head_node.node_list);
512 INIT_LIST_HEAD(&mm->head_node.hole_stack);
513 mm->head_node.hole_follows = 1;
514 mm->head_node.scanned_block = 0;
515 mm->head_node.scanned_prev_free = 0;
516 mm->head_node.scanned_next_free = 0;
517 mm->head_node.mm = mm;
518 mm->head_node.start = start + size;
519 mm->head_node.size = start - mm->head_node.start;
520 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
521
522 return 0;
3a1bd924 523}
673a394b 524EXPORT_SYMBOL(drm_mm_init);
3a1bd924 525
55910517 526void drm_mm_takedown(struct drm_mm * mm)
3a1bd924 527{
ea7b1dd4 528 struct drm_mm_node *entry, *next;
3a1bd924 529
ea7b1dd4 530 if (!list_empty(&mm->head_node.node_list)) {
3a1bd924
TH
531 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
532 return;
533 }
534
249d6048 535 spin_lock(&mm->unused_lock);
ea7b1dd4
DV
536 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
537 list_del(&entry->node_list);
249d6048
JG
538 kfree(entry);
539 --mm->num_unused;
540 }
541 spin_unlock(&mm->unused_lock);
3a1bd924 542
249d6048 543 BUG_ON(mm->num_unused != 0);
3a1bd924 544}
f453ba04 545EXPORT_SYMBOL(drm_mm_takedown);
fa8a1238 546
99d7e48e
JG
547void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
548{
549 struct drm_mm_node *entry;
ea7b1dd4
DV
550 unsigned long total_used = 0, total_free = 0, total = 0;
551 unsigned long hole_start, hole_end, hole_size;
552
553 hole_start = drm_mm_hole_node_start(&mm->head_node);
554 hole_end = drm_mm_hole_node_end(&mm->head_node);
555 hole_size = hole_end - hole_start;
556 if (hole_size)
557 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
558 prefix, hole_start, hole_end,
559 hole_size);
560 total_free += hole_size;
561
562 drm_mm_for_each_node(entry, mm) {
563 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
99d7e48e 564 prefix, entry->start, entry->start + entry->size,
ea7b1dd4
DV
565 entry->size);
566 total_used += entry->size;
567
568 if (entry->hole_follows) {
569 hole_start = drm_mm_hole_node_start(entry);
570 hole_end = drm_mm_hole_node_end(entry);
571 hole_size = hole_end - hole_start;
572 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
573 prefix, hole_start, hole_end,
574 hole_size);
575 total_free += hole_size;
576 }
99d7e48e 577 }
ea7b1dd4
DV
578 total = total_free + total_used;
579
580 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
99d7e48e
JG
581 total_used, total_free);
582}
583EXPORT_SYMBOL(drm_mm_debug_table);
584
fa8a1238
DA
585#if defined(CONFIG_DEBUG_FS)
586int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
587{
588 struct drm_mm_node *entry;
ea7b1dd4
DV
589 unsigned long total_used = 0, total_free = 0, total = 0;
590 unsigned long hole_start, hole_end, hole_size;
591
592 hole_start = drm_mm_hole_node_start(&mm->head_node);
593 hole_end = drm_mm_hole_node_end(&mm->head_node);
594 hole_size = hole_end - hole_start;
595 if (hole_size)
596 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
597 hole_start, hole_end, hole_size);
598 total_free += hole_size;
599
600 drm_mm_for_each_node(entry, mm) {
601 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
602 entry->start, entry->start + entry->size,
603 entry->size);
604 total_used += entry->size;
605 if (entry->hole_follows) {
606 hole_start = drm_mm_hole_node_start(&mm->head_node);
607 hole_end = drm_mm_hole_node_end(&mm->head_node);
608 hole_size = hole_end - hole_start;
609 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
610 hole_start, hole_end, hole_size);
611 total_free += hole_size;
612 }
fa8a1238 613 }
ea7b1dd4
DV
614 total = total_free + total_used;
615
616 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
fa8a1238
DA
617 return 0;
618}
619EXPORT_SYMBOL(drm_mm_dump_table);
620#endif