]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - drivers/gpu/drm/drm_mm.c
drm: Introduce drm_mm_create_block()
[mirror_ubuntu-disco-kernel.git] / drivers / gpu / drm / drm_mm.c
CommitLineData
3a1bd924
TH
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
96de0e25 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3a1bd924
TH
42 */
43
760285e7
DH
44#include <drm/drmP.h>
45#include <drm/drm_mm.h>
1d58420b 46#include <linux/slab.h>
fa8a1238 47#include <linux/seq_file.h>
2d1a8a48 48#include <linux/export.h>
1d58420b 49
249d6048
JG
50#define MM_UNUSED_TARGET 4
51
249d6048
JG
52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53{
54 struct drm_mm_node *child;
55
56 if (atomic)
709ea971 57 child = kzalloc(sizeof(*child), GFP_ATOMIC);
249d6048 58 else
709ea971 59 child = kzalloc(sizeof(*child), GFP_KERNEL);
249d6048
JG
60
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
ea7b1dd4
DV
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
249d6048
JG
70 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
a698cf34
JG
77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
249d6048
JG
82int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
709ea971 89 node = kzalloc(sizeof(*node), GFP_KERNEL);
249d6048
JG
90 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
ea7b1dd4 98 list_add_tail(&node->node_list, &mm->unused_nodes);
249d6048
JG
99 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
1d58420b 104
ea7b1dd4 105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
1d58420b 106{
ea7b1dd4 107 return hole_node->start + hole_node->size;
1d58420b
TH
108}
109
ea7b1dd4 110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
1d58420b 111{
ea7b1dd4
DV
112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
1d58420b 115
ea7b1dd4 116 return next_node->start;
1d58420b
TH
117}
118
9fc935de
DV
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node,
6b9d89b4
CW
121 unsigned long size, unsigned alignment,
122 unsigned long color)
3a1bd924 123{
ea7b1dd4 124 struct drm_mm *mm = hole_node->mm;
ea7b1dd4
DV
125 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
6b9d89b4
CW
127 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end;
ea7b1dd4 129
b0b7af18
DV
130 BUG_ON(!hole_node->hole_follows || node->allocated);
131
6b9d89b4
CW
132 if (mm->color_adjust)
133 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
1d58420b 134
6b9d89b4
CW
135 if (alignment) {
136 unsigned tmp = adj_start % alignment;
137 if (tmp)
138 adj_start += alignment - tmp;
139 }
140
141 if (adj_start == hole_start) {
ea7b1dd4 142 hole_node->hole_follows = 0;
6b9d89b4
CW
143 list_del(&hole_node->hole_stack);
144 }
ea7b1dd4 145
6b9d89b4 146 node->start = adj_start;
ea7b1dd4
DV
147 node->size = size;
148 node->mm = mm;
6b9d89b4 149 node->color = color;
b0b7af18 150 node->allocated = 1;
3a1bd924 151
ea7b1dd4
DV
152 INIT_LIST_HEAD(&node->hole_stack);
153 list_add(&node->node_list, &hole_node->node_list);
154
6b9d89b4 155 BUG_ON(node->start + node->size > adj_end);
ea7b1dd4 156
6b9d89b4 157 node->hole_follows = 0;
ea7b1dd4
DV
158 if (node->start + node->size < hole_end) {
159 list_add(&node->hole_stack, &mm->hole_stack);
160 node->hole_follows = 1;
1d58420b 161 }
9fc935de
DV
162}
163
5973c7ee
CW
164struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
165 unsigned long start,
166 unsigned long size,
167 bool atomic)
168{
169 struct drm_mm_node *hole, *node;
170 unsigned long end = start + size;
171
172 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
173 unsigned long hole_start;
174 unsigned long hole_end;
175
176 BUG_ON(!hole->hole_follows);
177 hole_start = drm_mm_hole_node_start(hole);
178 hole_end = drm_mm_hole_node_end(hole);
179
180 if (hole_start > start || hole_end < end)
181 continue;
182
183 node = drm_mm_kmalloc(mm, atomic);
184 if (unlikely(node == NULL))
185 return NULL;
186
187 node->start = start;
188 node->size = size;
189 node->mm = mm;
190 node->allocated = 1;
191
192 INIT_LIST_HEAD(&node->hole_stack);
193 list_add(&node->node_list, &hole->node_list);
194
195 if (start == hole_start) {
196 hole->hole_follows = 0;
197 list_del_init(&hole->hole_stack);
198 }
199
200 node->hole_follows = 0;
201 if (end != hole_end) {
202 list_add(&node->hole_stack, &mm->hole_stack);
203 node->hole_follows = 1;
204 }
205
206 return node;
207 }
208
209 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
210 return NULL;
211}
212EXPORT_SYMBOL(drm_mm_create_block);
213
9fc935de
DV
214struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
215 unsigned long size,
216 unsigned alignment,
6b9d89b4 217 unsigned long color,
9fc935de
DV
218 int atomic)
219{
220 struct drm_mm_node *node;
221
9fc935de
DV
222 node = drm_mm_kmalloc(hole_node->mm, atomic);
223 if (unlikely(node == NULL))
224 return NULL;
225
6b9d89b4 226 drm_mm_insert_helper(hole_node, node, size, alignment, color);
3a1bd924 227
e6c03c5b 228 return node;
3a1bd924 229}
89579f77 230EXPORT_SYMBOL(drm_mm_get_block_generic);
249d6048 231
b0b7af18
DV
232/**
233 * Search for free space and insert a preallocated memory node. Returns
234 * -ENOSPC if no suitable free area is available. The preallocated memory node
235 * must be cleared.
236 */
237int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
238 unsigned long size, unsigned alignment)
239{
240 struct drm_mm_node *hole_node;
241
6b9d89b4 242 hole_node = drm_mm_search_free(mm, size, alignment, false);
b0b7af18
DV
243 if (!hole_node)
244 return -ENOSPC;
245
6b9d89b4 246 drm_mm_insert_helper(hole_node, node, size, alignment, 0);
b0b7af18
DV
247
248 return 0;
249}
250EXPORT_SYMBOL(drm_mm_insert_node);
251
9fc935de
DV
252static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
253 struct drm_mm_node *node,
254 unsigned long size, unsigned alignment,
6b9d89b4 255 unsigned long color,
9fc935de 256 unsigned long start, unsigned long end)
a2e68e92 257{
ea7b1dd4 258 struct drm_mm *mm = hole_node->mm;
ea7b1dd4
DV
259 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
260 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
6b9d89b4
CW
261 unsigned long adj_start = hole_start;
262 unsigned long adj_end = hole_end;
a2e68e92 263
b0b7af18
DV
264 BUG_ON(!hole_node->hole_follows || node->allocated);
265
6b9d89b4
CW
266 if (mm->color_adjust)
267 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
a2e68e92 268
6b9d89b4
CW
269 if (adj_start < start)
270 adj_start = start;
271
272 if (alignment) {
273 unsigned tmp = adj_start % alignment;
274 if (tmp)
275 adj_start += alignment - tmp;
276 }
ea7b1dd4 277
6b9d89b4 278 if (adj_start == hole_start) {
ea7b1dd4 279 hole_node->hole_follows = 0;
6b9d89b4 280 list_del(&hole_node->hole_stack);
a2e68e92
JG
281 }
282
6b9d89b4 283 node->start = adj_start;
ea7b1dd4
DV
284 node->size = size;
285 node->mm = mm;
6b9d89b4 286 node->color = color;
b0b7af18 287 node->allocated = 1;
ea7b1dd4
DV
288
289 INIT_LIST_HEAD(&node->hole_stack);
290 list_add(&node->node_list, &hole_node->node_list);
291
6b9d89b4 292 BUG_ON(node->start + node->size > adj_end);
ea7b1dd4
DV
293 BUG_ON(node->start + node->size > end);
294
6b9d89b4 295 node->hole_follows = 0;
ea7b1dd4
DV
296 if (node->start + node->size < hole_end) {
297 list_add(&node->hole_stack, &mm->hole_stack);
298 node->hole_follows = 1;
a2e68e92 299 }
9fc935de
DV
300}
301
302struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
303 unsigned long size,
304 unsigned alignment,
6b9d89b4 305 unsigned long color,
9fc935de
DV
306 unsigned long start,
307 unsigned long end,
308 int atomic)
309{
310 struct drm_mm_node *node;
311
9fc935de
DV
312 node = drm_mm_kmalloc(hole_node->mm, atomic);
313 if (unlikely(node == NULL))
314 return NULL;
315
6b9d89b4 316 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
9fc935de 317 start, end);
a2e68e92 318
a2e68e92
JG
319 return node;
320}
321EXPORT_SYMBOL(drm_mm_get_block_range_generic);
322
b0b7af18
DV
323/**
324 * Search for free space and insert a preallocated memory node. Returns
325 * -ENOSPC if no suitable free area is available. This is for range
326 * restricted allocations. The preallocated memory node must be cleared.
3a1bd924 327 */
b0b7af18
DV
328int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
329 unsigned long size, unsigned alignment,
330 unsigned long start, unsigned long end)
3a1bd924 331{
b0b7af18
DV
332 struct drm_mm_node *hole_node;
333
334 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
6b9d89b4 335 start, end, false);
b0b7af18
DV
336 if (!hole_node)
337 return -ENOSPC;
338
6b9d89b4 339 drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
b0b7af18 340 start, end);
3a1bd924 341
b0b7af18
DV
342 return 0;
343}
344EXPORT_SYMBOL(drm_mm_insert_node_in_range);
345
346/**
347 * Remove a memory node from the allocator.
348 */
349void drm_mm_remove_node(struct drm_mm_node *node)
350{
ea7b1dd4
DV
351 struct drm_mm *mm = node->mm;
352 struct drm_mm_node *prev_node;
3a1bd924 353
ea7b1dd4
DV
354 BUG_ON(node->scanned_block || node->scanned_prev_free
355 || node->scanned_next_free);
3a1bd924 356
ea7b1dd4
DV
357 prev_node =
358 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
709ea971 359
ea7b1dd4
DV
360 if (node->hole_follows) {
361 BUG_ON(drm_mm_hole_node_start(node)
362 == drm_mm_hole_node_end(node));
363 list_del(&node->hole_stack);
364 } else
365 BUG_ON(drm_mm_hole_node_start(node)
366 != drm_mm_hole_node_end(node));
249d6048 367
ea7b1dd4
DV
368 if (!prev_node->hole_follows) {
369 prev_node->hole_follows = 1;
370 list_add(&prev_node->hole_stack, &mm->hole_stack);
371 } else
372 list_move(&prev_node->hole_stack, &mm->hole_stack);
373
374 list_del(&node->node_list);
b0b7af18
DV
375 node->allocated = 0;
376}
377EXPORT_SYMBOL(drm_mm_remove_node);
378
379/*
380 * Remove a memory node from the allocator and free the allocated struct
381 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
382 * drm_mm_get_block functions.
383 */
384void drm_mm_put_block(struct drm_mm_node *node)
385{
386
387 struct drm_mm *mm = node->mm;
388
389 drm_mm_remove_node(node);
390
ea7b1dd4
DV
391 spin_lock(&mm->unused_lock);
392 if (mm->num_unused < MM_UNUSED_TARGET) {
393 list_add(&node->node_list, &mm->unused_nodes);
394 ++mm->num_unused;
395 } else
396 kfree(node);
397 spin_unlock(&mm->unused_lock);
398}
673a394b 399EXPORT_SYMBOL(drm_mm_put_block);
3a1bd924 400
75214733
DV
401static int check_free_hole(unsigned long start, unsigned long end,
402 unsigned long size, unsigned alignment)
7a6b2896 403{
75214733 404 if (end - start < size)
7a6b2896
DV
405 return 0;
406
407 if (alignment) {
75214733 408 unsigned tmp = start % alignment;
7a6b2896 409 if (tmp)
6b9d89b4 410 start += alignment - tmp;
7a6b2896
DV
411 }
412
6b9d89b4 413 return end >= start + size;
7a6b2896
DV
414}
415
6b9d89b4
CW
416struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
417 unsigned long size,
418 unsigned alignment,
419 unsigned long color,
420 bool best_match)
3a1bd924 421{
55910517
DA
422 struct drm_mm_node *entry;
423 struct drm_mm_node *best;
3a1bd924
TH
424 unsigned long best_size;
425
709ea971
DV
426 BUG_ON(mm->scanned_blocks);
427
3a1bd924
TH
428 best = NULL;
429 best_size = ~0UL;
430
ea7b1dd4 431 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
6b9d89b4
CW
432 unsigned long adj_start = drm_mm_hole_node_start(entry);
433 unsigned long adj_end = drm_mm_hole_node_end(entry);
434
435 if (mm->color_adjust) {
436 mm->color_adjust(entry, color, &adj_start, &adj_end);
437 if (adj_end <= adj_start)
438 continue;
439 }
440
ea7b1dd4 441 BUG_ON(!entry->hole_follows);
6b9d89b4 442 if (!check_free_hole(adj_start, adj_end, size, alignment))
1d58420b
TH
443 continue;
444
7a6b2896
DV
445 if (!best_match)
446 return entry;
1d58420b 447
7a6b2896
DV
448 if (entry->size < best_size) {
449 best = entry;
450 best_size = entry->size;
3a1bd924
TH
451 }
452 }
453
454 return best;
455}
6b9d89b4
CW
456EXPORT_SYMBOL(drm_mm_search_free_generic);
457
458struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
459 unsigned long size,
460 unsigned alignment,
461 unsigned long color,
462 unsigned long start,
463 unsigned long end,
464 bool best_match)
a2e68e92 465{
a2e68e92
JG
466 struct drm_mm_node *entry;
467 struct drm_mm_node *best;
468 unsigned long best_size;
a2e68e92 469
709ea971
DV
470 BUG_ON(mm->scanned_blocks);
471
a2e68e92
JG
472 best = NULL;
473 best_size = ~0UL;
474
ea7b1dd4
DV
475 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
476 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
477 start : drm_mm_hole_node_start(entry);
478 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
479 end : drm_mm_hole_node_end(entry);
a2e68e92 480
ea7b1dd4 481 BUG_ON(!entry->hole_follows);
6b9d89b4
CW
482
483 if (mm->color_adjust) {
484 mm->color_adjust(entry, color, &adj_start, &adj_end);
485 if (adj_end <= adj_start)
486 continue;
487 }
488
75214733 489 if (!check_free_hole(adj_start, adj_end, size, alignment))
a2e68e92
JG
490 continue;
491
7a6b2896
DV
492 if (!best_match)
493 return entry;
a2e68e92 494
7a6b2896
DV
495 if (entry->size < best_size) {
496 best = entry;
497 best_size = entry->size;
a2e68e92
JG
498 }
499 }
500
501 return best;
502}
6b9d89b4 503EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
a2e68e92 504
b0b7af18
DV
505/**
506 * Moves an allocation. To be used with embedded struct drm_mm_node.
507 */
508void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
509{
510 list_replace(&old->node_list, &new->node_list);
2bbd4492 511 list_replace(&old->hole_stack, &new->hole_stack);
b0b7af18
DV
512 new->hole_follows = old->hole_follows;
513 new->mm = old->mm;
514 new->start = old->start;
515 new->size = old->size;
6b9d89b4 516 new->color = old->color;
b0b7af18
DV
517
518 old->allocated = 0;
519 new->allocated = 1;
520}
521EXPORT_SYMBOL(drm_mm_replace_node);
522
709ea971
DV
523/**
524 * Initializa lru scanning.
525 *
526 * This simply sets up the scanning routines with the parameters for the desired
527 * hole.
528 *
529 * Warning: As long as the scan list is non-empty, no other operations than
530 * adding/removing nodes to/from the scan list are allowed.
531 */
6b9d89b4
CW
532void drm_mm_init_scan(struct drm_mm *mm,
533 unsigned long size,
534 unsigned alignment,
535 unsigned long color)
709ea971 536{
6b9d89b4 537 mm->scan_color = color;
709ea971
DV
538 mm->scan_alignment = alignment;
539 mm->scan_size = size;
540 mm->scanned_blocks = 0;
541 mm->scan_hit_start = 0;
542 mm->scan_hit_size = 0;
d935cc61 543 mm->scan_check_range = 0;
ae0cec28 544 mm->prev_scanned_node = NULL;
709ea971
DV
545}
546EXPORT_SYMBOL(drm_mm_init_scan);
547
d935cc61
DV
548/**
549 * Initializa lru scanning.
550 *
551 * This simply sets up the scanning routines with the parameters for the desired
552 * hole. This version is for range-restricted scans.
553 *
554 * Warning: As long as the scan list is non-empty, no other operations than
555 * adding/removing nodes to/from the scan list are allowed.
556 */
6b9d89b4
CW
557void drm_mm_init_scan_with_range(struct drm_mm *mm,
558 unsigned long size,
d935cc61 559 unsigned alignment,
6b9d89b4 560 unsigned long color,
d935cc61
DV
561 unsigned long start,
562 unsigned long end)
563{
6b9d89b4 564 mm->scan_color = color;
d935cc61
DV
565 mm->scan_alignment = alignment;
566 mm->scan_size = size;
567 mm->scanned_blocks = 0;
568 mm->scan_hit_start = 0;
569 mm->scan_hit_size = 0;
570 mm->scan_start = start;
571 mm->scan_end = end;
572 mm->scan_check_range = 1;
ae0cec28 573 mm->prev_scanned_node = NULL;
d935cc61
DV
574}
575EXPORT_SYMBOL(drm_mm_init_scan_with_range);
576
709ea971
DV
577/**
578 * Add a node to the scan list that might be freed to make space for the desired
579 * hole.
580 *
581 * Returns non-zero, if a hole has been found, zero otherwise.
582 */
583int drm_mm_scan_add_block(struct drm_mm_node *node)
584{
585 struct drm_mm *mm = node->mm;
ea7b1dd4
DV
586 struct drm_mm_node *prev_node;
587 unsigned long hole_start, hole_end;
d935cc61
DV
588 unsigned long adj_start;
589 unsigned long adj_end;
709ea971
DV
590
591 mm->scanned_blocks++;
592
ea7b1dd4 593 BUG_ON(node->scanned_block);
709ea971 594 node->scanned_block = 1;
709ea971 595
ea7b1dd4
DV
596 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
597 node_list);
709ea971 598
ea7b1dd4
DV
599 node->scanned_preceeds_hole = prev_node->hole_follows;
600 prev_node->hole_follows = 1;
601 list_del(&node->node_list);
602 node->node_list.prev = &prev_node->node_list;
ae0cec28
DV
603 node->node_list.next = &mm->prev_scanned_node->node_list;
604 mm->prev_scanned_node = node;
709ea971 605
ea7b1dd4
DV
606 hole_start = drm_mm_hole_node_start(prev_node);
607 hole_end = drm_mm_hole_node_end(prev_node);
6b9d89b4
CW
608
609 adj_start = hole_start;
610 adj_end = hole_end;
611
612 if (mm->color_adjust)
613 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
614
d935cc61 615 if (mm->scan_check_range) {
6b9d89b4
CW
616 if (adj_start < mm->scan_start)
617 adj_start = mm->scan_start;
618 if (adj_end > mm->scan_end)
619 adj_end = mm->scan_end;
d935cc61
DV
620 }
621
6b9d89b4 622 if (check_free_hole(adj_start, adj_end,
75214733 623 mm->scan_size, mm->scan_alignment)) {
ea7b1dd4
DV
624 mm->scan_hit_start = hole_start;
625 mm->scan_hit_size = hole_end;
709ea971
DV
626
627 return 1;
628 }
629
630 return 0;
631}
632EXPORT_SYMBOL(drm_mm_scan_add_block);
633
634/**
635 * Remove a node from the scan list.
636 *
637 * Nodes _must_ be removed in the exact same order from the scan list as they
638 * have been added, otherwise the internal state of the memory manager will be
639 * corrupted.
640 *
641 * When the scan list is empty, the selected memory nodes can be freed. An
25985edc 642 * immediately following drm_mm_search_free with best_match = 0 will then return
709ea971
DV
643 * the just freed block (because its at the top of the free_stack list).
644 *
645 * Returns one if this block should be evicted, zero otherwise. Will always
646 * return zero when no hole has been found.
647 */
648int drm_mm_scan_remove_block(struct drm_mm_node *node)
649{
650 struct drm_mm *mm = node->mm;
ea7b1dd4 651 struct drm_mm_node *prev_node;
709ea971
DV
652
653 mm->scanned_blocks--;
654
655 BUG_ON(!node->scanned_block);
656 node->scanned_block = 0;
709ea971 657
ea7b1dd4
DV
658 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
659 node_list);
709ea971 660
ea7b1dd4
DV
661 prev_node->hole_follows = node->scanned_preceeds_hole;
662 INIT_LIST_HEAD(&node->node_list);
663 list_add(&node->node_list, &prev_node->node_list);
709ea971
DV
664
665 /* Only need to check for containement because start&size for the
666 * complete resulting free block (not just the desired part) is
667 * stored. */
668 if (node->start >= mm->scan_hit_start &&
669 node->start + node->size
670 <= mm->scan_hit_start + mm->scan_hit_size) {
671 return 1;
672 }
673
674 return 0;
675}
676EXPORT_SYMBOL(drm_mm_scan_remove_block);
677
55910517 678int drm_mm_clean(struct drm_mm * mm)
3a1bd924 679{
ea7b1dd4 680 struct list_head *head = &mm->head_node.node_list;
3a1bd924 681
1d58420b
TH
682 return (head->next->next == head);
683}
249d6048 684EXPORT_SYMBOL(drm_mm_clean);
3a1bd924 685
55910517 686int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
1d58420b 687{
ea7b1dd4 688 INIT_LIST_HEAD(&mm->hole_stack);
249d6048
JG
689 INIT_LIST_HEAD(&mm->unused_nodes);
690 mm->num_unused = 0;
709ea971 691 mm->scanned_blocks = 0;
249d6048 692 spin_lock_init(&mm->unused_lock);
3a1bd924 693
ea7b1dd4
DV
694 /* Clever trick to avoid a special case in the free hole tracking. */
695 INIT_LIST_HEAD(&mm->head_node.node_list);
696 INIT_LIST_HEAD(&mm->head_node.hole_stack);
697 mm->head_node.hole_follows = 1;
698 mm->head_node.scanned_block = 0;
699 mm->head_node.scanned_prev_free = 0;
700 mm->head_node.scanned_next_free = 0;
701 mm->head_node.mm = mm;
702 mm->head_node.start = start + size;
703 mm->head_node.size = start - mm->head_node.start;
704 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
705
6b9d89b4
CW
706 mm->color_adjust = NULL;
707
ea7b1dd4 708 return 0;
3a1bd924 709}
673a394b 710EXPORT_SYMBOL(drm_mm_init);
3a1bd924 711
55910517 712void drm_mm_takedown(struct drm_mm * mm)
3a1bd924 713{
ea7b1dd4 714 struct drm_mm_node *entry, *next;
3a1bd924 715
ea7b1dd4 716 if (!list_empty(&mm->head_node.node_list)) {
3a1bd924
TH
717 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
718 return;
719 }
720
249d6048 721 spin_lock(&mm->unused_lock);
ea7b1dd4
DV
722 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
723 list_del(&entry->node_list);
249d6048
JG
724 kfree(entry);
725 --mm->num_unused;
726 }
727 spin_unlock(&mm->unused_lock);
3a1bd924 728
249d6048 729 BUG_ON(mm->num_unused != 0);
3a1bd924 730}
f453ba04 731EXPORT_SYMBOL(drm_mm_takedown);
fa8a1238 732
99d7e48e
JG
733void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
734{
735 struct drm_mm_node *entry;
ea7b1dd4
DV
736 unsigned long total_used = 0, total_free = 0, total = 0;
737 unsigned long hole_start, hole_end, hole_size;
738
739 hole_start = drm_mm_hole_node_start(&mm->head_node);
740 hole_end = drm_mm_hole_node_end(&mm->head_node);
741 hole_size = hole_end - hole_start;
742 if (hole_size)
743 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
744 prefix, hole_start, hole_end,
745 hole_size);
746 total_free += hole_size;
747
748 drm_mm_for_each_node(entry, mm) {
749 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
99d7e48e 750 prefix, entry->start, entry->start + entry->size,
ea7b1dd4
DV
751 entry->size);
752 total_used += entry->size;
753
754 if (entry->hole_follows) {
755 hole_start = drm_mm_hole_node_start(entry);
756 hole_end = drm_mm_hole_node_end(entry);
757 hole_size = hole_end - hole_start;
758 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
759 prefix, hole_start, hole_end,
760 hole_size);
761 total_free += hole_size;
762 }
99d7e48e 763 }
ea7b1dd4
DV
764 total = total_free + total_used;
765
766 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
99d7e48e
JG
767 total_used, total_free);
768}
769EXPORT_SYMBOL(drm_mm_debug_table);
770
fa8a1238
DA
771#if defined(CONFIG_DEBUG_FS)
772int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
773{
774 struct drm_mm_node *entry;
ea7b1dd4
DV
775 unsigned long total_used = 0, total_free = 0, total = 0;
776 unsigned long hole_start, hole_end, hole_size;
777
778 hole_start = drm_mm_hole_node_start(&mm->head_node);
779 hole_end = drm_mm_hole_node_end(&mm->head_node);
780 hole_size = hole_end - hole_start;
781 if (hole_size)
782 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
783 hole_start, hole_end, hole_size);
784 total_free += hole_size;
785
786 drm_mm_for_each_node(entry, mm) {
787 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
788 entry->start, entry->start + entry->size,
789 entry->size);
790 total_used += entry->size;
791 if (entry->hole_follows) {
2bbd4492
DV
792 hole_start = drm_mm_hole_node_start(entry);
793 hole_end = drm_mm_hole_node_end(entry);
ea7b1dd4
DV
794 hole_size = hole_end - hole_start;
795 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
796 hole_start, hole_end, hole_size);
797 total_free += hole_size;
798 }
fa8a1238 799 }
ea7b1dd4
DV
800 total = total_free + total_used;
801
802 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
fa8a1238
DA
803 return 0;
804}
805EXPORT_SYMBOL(drm_mm_dump_table);
806#endif