]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/drm_mm.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_mm.c
CommitLineData
3a1bd924
TH
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
ba004e39 4 * Copyright 2016 Intel Corporation
3a1bd924
TH
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 *
28 **************************************************************************/
29
30/*
31 * Generic simple memory manager implementation. Intended to be used as a base
32 * class implementation for more advanced memory managers.
33 *
34 * Note that the algorithm used is quite simple and there might be substantial
ba004e39
CW
35 * performance gains if a smarter free list is implemented. Currently it is
36 * just an unordered stack of free regions. This could easily be improved if
37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
3a1bd924
TH
38 *
39 * Aligned allocations can also see improvement.
40 *
41 * Authors:
96de0e25 42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3a1bd924
TH
43 */
44
760285e7
DH
45#include <drm/drmP.h>
46#include <drm/drm_mm.h>
1d58420b 47#include <linux/slab.h>
fa8a1238 48#include <linux/seq_file.h>
2d1a8a48 49#include <linux/export.h>
202b52b7 50#include <linux/interval_tree_generic.h>
1d58420b 51
93110be6
DV
52/**
53 * DOC: Overview
54 *
55 * drm_mm provides a simple range allocator. The drivers are free to use the
56 * resource allocator from the linux core if it suits them, the upside of drm_mm
57 * is that it's in the DRM core. Which means that it's easier to extend for
58 * some of the crazier special purpose needs of gpus.
59 *
60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
61 * Drivers are free to embed either of them into their own suitable
05fc0321
DV
62 * datastructures. drm_mm itself will not do any memory allocations of its own,
63 * so if drivers choose not to embed nodes they need to still allocate them
93110be6
DV
64 * themselves.
65 *
66 * The range allocator also supports reservation of preallocated blocks. This is
67 * useful for taking over initial mode setting configurations from the firmware,
68 * where an object needs to be created which exactly matches the firmware's
69 * scanout target. As long as the range is still free it can be inserted anytime
70 * after the allocator is initialized, which helps with avoiding looped
ba004e39 71 * dependencies in the driver load sequence.
93110be6
DV
72 *
73 * drm_mm maintains a stack of most recently freed holes, which of all
74 * simplistic datastructures seems to be a fairly decent approach to clustering
75 * allocations and avoiding too much fragmentation. This means free space
76 * searches are O(num_holes). Given that all the fancy features drm_mm supports
77 * something better would be fairly complex and since gfx thrashing is a fairly
78 * steep cliff not a real concern. Removing a node again is O(1).
79 *
80 * drm_mm supports a few features: Alignment and range restrictions can be
05fc0321 81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
ba004e39 82 * opaque unsigned long) which in conjunction with a driver callback can be used
93110be6
DV
83 * to implement sophisticated placement restrictions. The i915 DRM driver uses
84 * this to implement guard pages between incompatible caching domains in the
85 * graphics TT.
86 *
ba004e39
CW
87 * Two behaviors are supported for searching and allocating: bottom-up and
88 * top-down. The default is bottom-up. Top-down allocation can be used if the
89 * memory area has different restrictions, or just to reduce fragmentation.
62347f9e 90 *
93110be6
DV
91 * Finally iteration helpers to walk all nodes and all holes are provided as are
92 * some basic allocator dumpers for debugging.
1c9bd1ed
DV
93 *
94 * Note that this range allocator is not thread-safe, drivers need to protect
95 * modifications with their on locking. The idea behind this is that for a full
96 * memory manager additional data needs to be protected anyway, hence internal
97 * locking would be fully redundant.
93110be6
DV
98 */
99
5705670d 100#ifdef CONFIG_DRM_DEBUG_MM
93ce75fa
CW
101#include <linux/stackdepot.h>
102
5705670d
CW
103#define STACKDEPTH 32
104#define BUFSZ 4096
105
106static noinline void save_stack(struct drm_mm_node *node)
107{
108 unsigned long entries[STACKDEPTH];
109 struct stack_trace trace = {
110 .entries = entries,
111 .max_entries = STACKDEPTH,
112 .skip = 1
113 };
114
115 save_stack_trace(&trace);
116 if (trace.nr_entries != 0 &&
117 trace.entries[trace.nr_entries-1] == ULONG_MAX)
118 trace.nr_entries--;
119
120 /* May be called under spinlock, so avoid sleeping */
121 node->stack = depot_save_stack(&trace, GFP_NOWAIT);
122}
123
124static void show_leaks(struct drm_mm *mm)
125{
126 struct drm_mm_node *node;
127 unsigned long entries[STACKDEPTH];
128 char *buf;
129
130 buf = kmalloc(BUFSZ, GFP_KERNEL);
131 if (!buf)
132 return;
133
2bc98c86 134 list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
5705670d
CW
135 struct stack_trace trace = {
136 .entries = entries,
137 .max_entries = STACKDEPTH
138 };
139
140 if (!node->stack) {
141 DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
142 node->start, node->size);
143 continue;
144 }
145
146 depot_fetch_stack(node->stack, &trace);
147 snprint_stack_trace(buf, BUFSZ, &trace, 0);
148 DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
149 node->start, node->size, buf);
150 }
151
152 kfree(buf);
153}
154
155#undef STACKDEPTH
156#undef BUFSZ
157#else
158static void save_stack(struct drm_mm_node *node) { }
159static void show_leaks(struct drm_mm *mm) { }
160#endif
161
202b52b7
CW
162#define START(node) ((node)->start)
163#define LAST(node) ((node)->start + (node)->size - 1)
164
165INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
166 u64, __subtree_last,
167 START, LAST, static inline, drm_mm_interval_tree)
168
169struct drm_mm_node *
45b186f1 170__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
202b52b7 171{
f808c13f 172 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
bbba9693 173 start, last) ?: (struct drm_mm_node *)&mm->head_node;
202b52b7 174}
522e85dd 175EXPORT_SYMBOL(__drm_mm_interval_first);
202b52b7
CW
176
177static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
178 struct drm_mm_node *node)
179{
180 struct drm_mm *mm = hole_node->mm;
181 struct rb_node **link, *rb;
182 struct drm_mm_node *parent;
f808c13f 183 bool leftmost = true;
202b52b7
CW
184
185 node->__subtree_last = LAST(node);
186
187 if (hole_node->allocated) {
188 rb = &hole_node->rb;
189 while (rb) {
190 parent = rb_entry(rb, struct drm_mm_node, rb);
191 if (parent->__subtree_last >= node->__subtree_last)
192 break;
193
194 parent->__subtree_last = node->__subtree_last;
195 rb = rb_parent(rb);
196 }
197
198 rb = &hole_node->rb;
199 link = &hole_node->rb.rb_right;
f808c13f 200 leftmost = false;
202b52b7
CW
201 } else {
202 rb = NULL;
f808c13f 203 link = &mm->interval_tree.rb_root.rb_node;
202b52b7
CW
204 }
205
206 while (*link) {
207 rb = *link;
208 parent = rb_entry(rb, struct drm_mm_node, rb);
209 if (parent->__subtree_last < node->__subtree_last)
210 parent->__subtree_last = node->__subtree_last;
211 if (node->start < parent->start)
212 link = &parent->rb.rb_left;
f808c13f 213 else {
202b52b7 214 link = &parent->rb.rb_right;
f808c13f
DB
215 leftmost = true;
216 }
202b52b7
CW
217 }
218
219 rb_link_node(&node->rb, rb, link);
f808c13f
DB
220 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
221 &drm_mm_interval_tree_augment);
202b52b7
CW
222}
223
4e64e553
CW
224#define RB_INSERT(root, member, expr) do { \
225 struct rb_node **link = &root.rb_node, *rb = NULL; \
226 u64 x = expr(node); \
227 while (*link) { \
228 rb = *link; \
229 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
230 link = &rb->rb_left; \
231 else \
232 link = &rb->rb_right; \
233 } \
234 rb_link_node(&node->member, rb, link); \
235 rb_insert_color(&node->member, &root); \
236} while (0)
237
238#define HOLE_SIZE(NODE) ((NODE)->hole_size)
239#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
240
241static void add_hole(struct drm_mm_node *node)
3a1bd924 242{
4e64e553 243 struct drm_mm *mm = node->mm;
ea7b1dd4 244
4e64e553
CW
245 node->hole_size =
246 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
247 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
b0b7af18 248
4e64e553
CW
249 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
250 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
1d58420b 251
4e64e553
CW
252 list_add(&node->hole_stack, &mm->hole_stack);
253}
adb040b8 254
4e64e553
CW
255static void rm_hole(struct drm_mm_node *node)
256{
257 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
62347f9e 258
4e64e553
CW
259 list_del(&node->hole_stack);
260 rb_erase(&node->rb_hole_size, &node->mm->holes_size);
261 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
262 node->hole_size = 0;
440fd528 263
4e64e553
CW
264 DRM_MM_BUG_ON(drm_mm_hole_follows(node));
265}
266
267static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
268{
269 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
270}
271
272static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
273{
274 return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
275}
276
277static inline u64 rb_hole_size(struct rb_node *rb)
278{
279 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
280}
281
282static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
283{
284 struct rb_node *best = NULL;
285 struct rb_node **link = &mm->holes_size.rb_node;
286
287 while (*link) {
288 struct rb_node *rb = *link;
289
290 if (size <= rb_hole_size(rb)) {
291 link = &rb->rb_left;
292 best = rb;
293 } else {
294 link = &rb->rb_right;
62347f9e 295 }
6b9d89b4
CW
296 }
297
4e64e553
CW
298 return rb_hole_size_to_node(best);
299}
300
301static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
302{
303 struct drm_mm_node *node = NULL;
304 struct rb_node **link = &mm->holes_addr.rb_node;
305
306 while (*link) {
307 u64 hole_start;
308
309 node = rb_hole_addr_to_node(*link);
310 hole_start = __drm_mm_hole_node_start(node);
311
312 if (addr < hole_start)
313 link = &node->rb_hole_addr.rb_left;
314 else if (addr > hole_start + node->hole_size)
315 link = &node->rb_hole_addr.rb_right;
316 else
317 break;
6b9d89b4 318 }
ea7b1dd4 319
4e64e553
CW
320 return node;
321}
3a1bd924 322
4e64e553
CW
323static struct drm_mm_node *
324first_hole(struct drm_mm *mm,
325 u64 start, u64 end, u64 size,
326 enum drm_mm_insert_mode mode)
327{
328 if (RB_EMPTY_ROOT(&mm->holes_size))
329 return NULL;
ea7b1dd4 330
4e64e553
CW
331 switch (mode) {
332 default:
333 case DRM_MM_INSERT_BEST:
334 return best_hole(mm, size);
202b52b7 335
4e64e553
CW
336 case DRM_MM_INSERT_LOW:
337 return find_hole(mm, start);
ea7b1dd4 338
4e64e553
CW
339 case DRM_MM_INSERT_HIGH:
340 return find_hole(mm, end);
341
342 case DRM_MM_INSERT_EVICT:
343 return list_first_entry_or_null(&mm->hole_stack,
344 struct drm_mm_node,
345 hole_stack);
1d58420b 346 }
4e64e553 347}
5705670d 348
4e64e553
CW
349static struct drm_mm_node *
350next_hole(struct drm_mm *mm,
351 struct drm_mm_node *node,
352 enum drm_mm_insert_mode mode)
353{
354 switch (mode) {
355 default:
356 case DRM_MM_INSERT_BEST:
357 return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
358
359 case DRM_MM_INSERT_LOW:
360 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
361
362 case DRM_MM_INSERT_HIGH:
363 return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
364
365 case DRM_MM_INSERT_EVICT:
366 node = list_next_entry(node, hole_stack);
367 return &node->hole_stack == &mm->hole_stack ? NULL : node;
368 }
9fc935de
DV
369}
370
e18c0412
DV
371/**
372 * drm_mm_reserve_node - insert an pre-initialized node
373 * @mm: drm_mm allocator to insert @node into
374 * @node: drm_mm_node to insert
375 *
05fc0321
DV
376 * This functions inserts an already set-up &drm_mm_node into the allocator,
377 * meaning that start, size and color must be set by the caller. All other
378 * fields must be cleared to 0. This is useful to initialize the allocator with
379 * preallocated objects which must be set-up before the range allocator can be
380 * set-up, e.g. when taking over a firmware framebuffer.
e18c0412
DV
381 *
382 * Returns:
383 * 0 on success, -ENOSPC if there's no hole where @node is.
384 */
338710e7 385int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
5973c7ee 386{
202b52b7 387 u64 end = node->start + node->size;
b3a070cc 388 struct drm_mm_node *hole;
202b52b7 389 u64 hole_start, hole_end;
2db86dfc 390 u64 adj_start, adj_end;
338710e7 391
b80d3942 392 end = node->start + node->size;
c820186d
CW
393 if (unlikely(end <= node->start))
394 return -ENOSPC;
b80d3942 395
338710e7 396 /* Find the relevant hole to add our node to */
4e64e553
CW
397 hole = find_hole(mm, node->start);
398 if (!hole)
202b52b7 399 return -ENOSPC;
5973c7ee 400
2db86dfc 401 adj_start = hole_start = __drm_mm_hole_node_start(hole);
4e64e553 402 adj_end = hole_end = hole_start + hole->hole_size;
2db86dfc
CW
403
404 if (mm->color_adjust)
405 mm->color_adjust(hole, node->color, &adj_start, &adj_end);
406
407 if (adj_start > node->start || adj_end < end)
202b52b7 408 return -ENOSPC;
5973c7ee 409
202b52b7 410 node->mm = mm;
5973c7ee 411
202b52b7 412 list_add(&node->node_list, &hole->node_list);
202b52b7 413 drm_mm_interval_tree_add_node(hole, node);
4e64e553
CW
414 node->allocated = true;
415 node->hole_size = 0;
202b52b7 416
4e64e553
CW
417 rm_hole(hole);
418 if (node->start > hole_start)
419 add_hole(hole);
420 if (end < hole_end)
421 add_hole(node);
5973c7ee 422
5705670d 423 save_stack(node);
202b52b7 424 return 0;
5973c7ee 425}
338710e7 426EXPORT_SYMBOL(drm_mm_reserve_node);
5973c7ee 427
b0b7af18 428/**
4e64e553 429 * drm_mm_insert_node_in_range - ranged search for space and insert @node
e18c0412
DV
430 * @mm: drm_mm to allocate from
431 * @node: preallocate node to insert
432 * @size: size of the allocation
433 * @alignment: alignment of the allocation
434 * @color: opaque tag value to use for this node
4e64e553
CW
435 * @range_start: start of the allowed range for this node
436 * @range_end: end of the allowed range for this node
437 * @mode: fine-tune the allocation search and placement
e18c0412 438 *
05fc0321 439 * The preallocated @node must be cleared to 0.
e18c0412
DV
440 *
441 * Returns:
442 * 0 on success, -ENOSPC if there's no suitable hole.
3a1bd924 443 */
4e64e553
CW
444int drm_mm_insert_node_in_range(struct drm_mm * const mm,
445 struct drm_mm_node * const node,
446 u64 size, u64 alignment,
447 unsigned long color,
448 u64 range_start, u64 range_end,
449 enum drm_mm_insert_mode mode)
3a1bd924 450{
4e64e553
CW
451 struct drm_mm_node *hole;
452 u64 remainder_mask;
b0b7af18 453
4e64e553 454 DRM_MM_BUG_ON(range_start >= range_end);
aafdcfd3 455
4e64e553 456 if (unlikely(size == 0 || range_end - range_start < size))
b0b7af18
DV
457 return -ENOSPC;
458
4e64e553
CW
459 if (alignment <= 1)
460 alignment = 0;
461
462 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
463 for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
464 hole = next_hole(mm, hole, mode)) {
465 u64 hole_start = __drm_mm_hole_node_start(hole);
466 u64 hole_end = hole_start + hole->hole_size;
467 u64 adj_start, adj_end;
468 u64 col_start, col_end;
469
470 if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
471 break;
472
473 if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
474 break;
475
476 col_start = hole_start;
477 col_end = hole_end;
478 if (mm->color_adjust)
479 mm->color_adjust(hole, color, &col_start, &col_end);
480
481 adj_start = max(col_start, range_start);
482 adj_end = min(col_end, range_end);
483
484 if (adj_end <= adj_start || adj_end - adj_start < size)
485 continue;
486
487 if (mode == DRM_MM_INSERT_HIGH)
488 adj_start = adj_end - size;
489
490 if (alignment) {
491 u64 rem;
492
493 if (likely(remainder_mask))
494 rem = adj_start & remainder_mask;
495 else
496 div64_u64_rem(adj_start, alignment, &rem);
497 if (rem) {
498 adj_start -= rem;
499 if (mode != DRM_MM_INSERT_HIGH)
500 adj_start += alignment;
501
502 if (adj_start < max(col_start, range_start) ||
503 min(col_end, range_end) - adj_start < size)
504 continue;
505
506 if (adj_end <= adj_start ||
507 adj_end - adj_start < size)
508 continue;
509 }
510 }
511
512 node->mm = mm;
513 node->size = size;
514 node->start = adj_start;
515 node->color = color;
516 node->hole_size = 0;
517
518 list_add(&node->node_list, &hole->node_list);
519 drm_mm_interval_tree_add_node(hole, node);
520 node->allocated = true;
521
522 rm_hole(hole);
523 if (adj_start > hole_start)
524 add_hole(hole);
525 if (adj_start + size < hole_end)
526 add_hole(node);
527
528 save_stack(node);
529 return 0;
530 }
531
532 return -ENOSPC;
b0b7af18 533}
4e64e553 534EXPORT_SYMBOL(drm_mm_insert_node_in_range);
b8103450 535
b0b7af18 536/**
e18c0412
DV
537 * drm_mm_remove_node - Remove a memory node from the allocator.
538 * @node: drm_mm_node to remove
539 *
540 * This just removes a node from its drm_mm allocator. The node does not need to
541 * be cleared again before it can be re-inserted into this or any other drm_mm
ba004e39 542 * allocator. It is a bug to call this function on a unallocated node.
b0b7af18
DV
543 */
544void drm_mm_remove_node(struct drm_mm_node *node)
545{
ea7b1dd4
DV
546 struct drm_mm *mm = node->mm;
547 struct drm_mm_node *prev_node;
3a1bd924 548
b3ee963f 549 DRM_MM_BUG_ON(!node->allocated);
f29051f1 550 DRM_MM_BUG_ON(node->scanned_block);
3a1bd924 551
4e64e553 552 prev_node = list_prev_entry(node, node_list);
9e8944ab 553
4e64e553
CW
554 if (drm_mm_hole_follows(node))
555 rm_hole(node);
ea7b1dd4 556
202b52b7 557 drm_mm_interval_tree_remove(node, &mm->interval_tree);
ea7b1dd4 558 list_del(&node->node_list);
4e64e553 559 node->allocated = false;
7a6b2896 560
4e64e553
CW
561 if (drm_mm_hole_follows(prev_node))
562 rm_hole(prev_node);
563 add_hole(prev_node);
a2e68e92 564}
4e64e553 565EXPORT_SYMBOL(drm_mm_remove_node);
a2e68e92 566
b0b7af18 567/**
e18c0412
DV
568 * drm_mm_replace_node - move an allocation from @old to @new
569 * @old: drm_mm_node to remove from the allocator
570 * @new: drm_mm_node which should inherit @old's allocation
571 *
572 * This is useful for when drivers embed the drm_mm_node structure and hence
573 * can't move allocations by reassigning pointers. It's a combination of remove
574 * and insert with the guarantee that the allocation start will match.
b0b7af18
DV
575 */
576void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
577{
338f1d9d
CW
578 struct drm_mm *mm = old->mm;
579
b3ee963f
CW
580 DRM_MM_BUG_ON(!old->allocated);
581
4e64e553
CW
582 *new = *old;
583
b0b7af18 584 list_replace(&old->node_list, &new->node_list);
338f1d9d 585 rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
4e64e553
CW
586
587 if (drm_mm_hole_follows(old)) {
588 list_replace(&old->hole_stack, &new->hole_stack);
589 rb_replace_node(&old->rb_hole_size,
590 &new->rb_hole_size,
338f1d9d 591 &mm->holes_size);
4e64e553
CW
592 rb_replace_node(&old->rb_hole_addr,
593 &new->rb_hole_addr,
338f1d9d 594 &mm->holes_addr);
4e64e553
CW
595 }
596
597 old->allocated = false;
598 new->allocated = true;
b0b7af18
DV
599}
600EXPORT_SYMBOL(drm_mm_replace_node);
601
93110be6 602/**
05fc0321 603 * DOC: lru scan roster
93110be6
DV
604 *
605 * Very often GPUs need to have continuous allocations for a given object. When
606 * evicting objects to make space for a new one it is therefore not most
607 * efficient when we simply start to select all objects from the tail of an LRU
608 * until there's a suitable hole: Especially for big objects or nodes that
609 * otherwise have special allocation constraints there's a good chance we evict
ba004e39 610 * lots of (smaller) objects unnecessarily.
93110be6
DV
611 *
612 * The DRM range allocator supports this use-case through the scanning
613 * interfaces. First a scan operation needs to be initialized with
9a71e277 614 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
05fc0321
DV
615 * objects to the roster, probably by walking an LRU list, but this can be
616 * freely implemented. Eviction candiates are added using
617 * drm_mm_scan_add_block() until a suitable hole is found or there are no
940eba2d
DV
618 * further evictable objects. Eviction roster metadata is tracked in &struct
619 * drm_mm_scan.
93110be6 620 *
ba004e39 621 * The driver must walk through all objects again in exactly the reverse
93110be6
DV
622 * order to restore the allocator state. Note that while the allocator is used
623 * in the scan mode no other operation is allowed.
624 *
3fa489da
CW
625 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
626 * reported true) in the scan, and any overlapping nodes after color adjustment
05fc0321 627 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
3fa489da
CW
628 * since freeing a node is also O(1) the overall complexity is
629 * O(scanned_objects). So like the free stack which needs to be walked before a
630 * scan operation even begins this is linear in the number of objects. It
631 * doesn't seem to hurt too badly.
93110be6
DV
632 */
633
d935cc61 634/**
9a71e277
CW
635 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
636 * @scan: scan state
e18c0412
DV
637 * @mm: drm_mm to scan
638 * @size: size of the allocation
639 * @alignment: alignment of the allocation
640 * @color: opaque tag value to use for the allocation
641 * @start: start of the allowed range for the allocation
642 * @end: end of the allowed range for the allocation
4e64e553 643 * @mode: fine-tune the allocation search and placement
d935cc61
DV
644 *
645 * This simply sets up the scanning routines with the parameters for the desired
0b04d474 646 * hole.
d935cc61 647 *
e18c0412
DV
648 * Warning:
649 * As long as the scan list is non-empty, no other operations than
d935cc61
DV
650 * adding/removing nodes to/from the scan list are allowed.
651 */
9a71e277
CW
652void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
653 struct drm_mm *mm,
440fd528 654 u64 size,
71733207 655 u64 alignment,
6b9d89b4 656 unsigned long color,
440fd528 657 u64 start,
0b04d474 658 u64 end,
4e64e553 659 enum drm_mm_insert_mode mode)
d935cc61 660{
6259a56b
CW
661 DRM_MM_BUG_ON(start >= end);
662 DRM_MM_BUG_ON(!size || size > end - start);
9a71e277
CW
663 DRM_MM_BUG_ON(mm->scan_active);
664
665 scan->mm = mm;
666
9a956b15
CW
667 if (alignment <= 1)
668 alignment = 0;
669
9a71e277
CW
670 scan->color = color;
671 scan->alignment = alignment;
9a956b15 672 scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
9a71e277 673 scan->size = size;
4e64e553 674 scan->mode = mode;
9a71e277
CW
675
676 DRM_MM_BUG_ON(end <= start);
677 scan->range_start = start;
678 scan->range_end = end;
6259a56b 679
9a71e277
CW
680 scan->hit_start = U64_MAX;
681 scan->hit_end = 0;
d935cc61 682}
9a71e277 683EXPORT_SYMBOL(drm_mm_scan_init_with_range);
d935cc61 684
709ea971 685/**
e18c0412 686 * drm_mm_scan_add_block - add a node to the scan list
9b8b75de 687 * @scan: the active drm_mm scanner
e18c0412
DV
688 * @node: drm_mm_node to add
689 *
709ea971
DV
690 * Add a node to the scan list that might be freed to make space for the desired
691 * hole.
692 *
e18c0412
DV
693 * Returns:
694 * True if a hole has been found, false otherwise.
709ea971 695 */
9a71e277
CW
696bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
697 struct drm_mm_node *node)
709ea971 698{
9a71e277 699 struct drm_mm *mm = scan->mm;
4a6c156f 700 struct drm_mm_node *hole;
440fd528 701 u64 hole_start, hole_end;
268c6498 702 u64 col_start, col_end;
440fd528 703 u64 adj_start, adj_end;
709ea971 704
9a71e277
CW
705 DRM_MM_BUG_ON(node->mm != mm);
706 DRM_MM_BUG_ON(!node->allocated);
b3ee963f 707 DRM_MM_BUG_ON(node->scanned_block);
0b04d474 708 node->scanned_block = true;
9a71e277 709 mm->scan_active++;
709ea971 710
f29051f1
CW
711 /* Remove this block from the node_list so that we enlarge the hole
712 * (distance between the end of our previous node and the start of
713 * or next), without poisoning the link so that we can restore it
714 * later in drm_mm_scan_remove_block().
715 */
4a6c156f 716 hole = list_prev_entry(node, node_list);
f29051f1
CW
717 DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
718 __list_del_entry(&node->node_list);
709ea971 719
268c6498
CW
720 hole_start = __drm_mm_hole_node_start(hole);
721 hole_end = __drm_mm_hole_node_end(hole);
d935cc61 722
268c6498
CW
723 col_start = hole_start;
724 col_end = hole_end;
901593f2 725 if (mm->color_adjust)
268c6498
CW
726 mm->color_adjust(hole, scan->color, &col_start, &col_end);
727
728 adj_start = max(col_start, scan->range_start);
729 adj_end = min(col_end, scan->range_end);
0b04d474
CW
730 if (adj_end <= adj_start || adj_end - adj_start < scan->size)
731 return false;
732
4e64e553 733 if (scan->mode == DRM_MM_INSERT_HIGH)
0b04d474
CW
734 adj_start = adj_end - scan->size;
735
736 if (scan->alignment) {
737 u64 rem;
738
9a956b15
CW
739 if (likely(scan->remainder_mask))
740 rem = adj_start & scan->remainder_mask;
741 else
742 div64_u64_rem(adj_start, scan->alignment, &rem);
0b04d474
CW
743 if (rem) {
744 adj_start -= rem;
4e64e553 745 if (scan->mode != DRM_MM_INSERT_HIGH)
0b04d474
CW
746 adj_start += scan->alignment;
747 if (adj_start < max(col_start, scan->range_start) ||
748 min(col_end, scan->range_end) - adj_start < scan->size)
749 return false;
750
751 if (adj_end <= adj_start ||
752 adj_end - adj_start < scan->size)
753 return false;
754 }
755 }
901593f2 756
3fa489da
CW
757 scan->hit_start = adj_start;
758 scan->hit_end = adj_start + scan->size;
709ea971 759
0b04d474
CW
760 DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
761 DRM_MM_BUG_ON(scan->hit_start < hole_start);
762 DRM_MM_BUG_ON(scan->hit_end > hole_end);
763
764 return true;
709ea971
DV
765}
766EXPORT_SYMBOL(drm_mm_scan_add_block);
767
768/**
e18c0412 769 * drm_mm_scan_remove_block - remove a node from the scan list
9b8b75de 770 * @scan: the active drm_mm scanner
e18c0412 771 * @node: drm_mm_node to remove
709ea971 772 *
05fc0321
DV
773 * Nodes **must** be removed in exactly the reverse order from the scan list as
774 * they have been added (e.g. using list_add() as they are added and then
775 * list_for_each() over that eviction list to remove), otherwise the internal
ba004e39 776 * state of the memory manager will be corrupted.
709ea971
DV
777 *
778 * When the scan list is empty, the selected memory nodes can be freed. An
05fc0321
DV
779 * immediately following drm_mm_insert_node_in_range_generic() or one of the
780 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
781 * the just freed block (because its at the top of the free_stack list).
709ea971 782 *
e18c0412
DV
783 * Returns:
784 * True if this block should be evicted, false otherwise. Will always
785 * return false when no hole has been found.
709ea971 786 */
9a71e277
CW
787bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
788 struct drm_mm_node *node)
709ea971 789{
ea7b1dd4 790 struct drm_mm_node *prev_node;
709ea971 791
9a71e277 792 DRM_MM_BUG_ON(node->mm != scan->mm);
b3ee963f 793 DRM_MM_BUG_ON(!node->scanned_block);
0b04d474 794 node->scanned_block = false;
709ea971 795
9a71e277
CW
796 DRM_MM_BUG_ON(!node->mm->scan_active);
797 node->mm->scan_active--;
798
f29051f1
CW
799 /* During drm_mm_scan_add_block() we decoupled this node leaving
800 * its pointers intact. Now that the caller is walking back along
801 * the eviction list we can restore this block into its rightful
802 * place on the full node_list. To confirm that the caller is walking
803 * backwards correctly we check that prev_node->next == node->next,
804 * i.e. both believe the same node should be on the other side of the
805 * hole.
806 */
9a71e277 807 prev_node = list_prev_entry(node, node_list);
f29051f1
CW
808 DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
809 list_next_entry(node, node_list));
ea7b1dd4 810 list_add(&node->node_list, &prev_node->node_list);
709ea971 811
0b04d474 812 return (node->start + node->size > scan->hit_start &&
9a71e277 813 node->start < scan->hit_end);
709ea971
DV
814}
815EXPORT_SYMBOL(drm_mm_scan_remove_block);
816
3fa489da
CW
817/**
818 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
819 * @scan: drm_mm scan with target hole
820 *
821 * After completing an eviction scan and removing the selected nodes, we may
822 * need to remove a few more nodes from either side of the target hole if
823 * mm.color_adjust is being used.
824 *
825 * Returns:
826 * A node to evict, or NULL if there are no overlapping nodes.
827 */
828struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
829{
830 struct drm_mm *mm = scan->mm;
831 struct drm_mm_node *hole;
832 u64 hole_start, hole_end;
833
834 DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
835
836 if (!mm->color_adjust)
837 return NULL;
838
5f0b3b50
CW
839 /*
840 * The hole found during scanning should ideally be the first element
841 * in the hole_stack list, but due to side-effects in the driver it
842 * may not be.
843 */
844 list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
845 hole_start = __drm_mm_hole_node_start(hole);
846 hole_end = hole_start + hole->hole_size;
847
848 if (hole_start <= scan->hit_start &&
849 hole_end >= scan->hit_end)
850 break;
851 }
852
853 /* We should only be called after we found the hole previously */
854 DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
855 if (unlikely(&hole->hole_stack == &mm->hole_stack))
856 return NULL;
3fa489da
CW
857
858 DRM_MM_BUG_ON(hole_start > scan->hit_start);
859 DRM_MM_BUG_ON(hole_end < scan->hit_end);
860
861 mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
862 if (hole_start > scan->hit_start)
863 return hole;
864 if (hole_end < scan->hit_end)
865 return list_next_entry(hole, node_list);
866
867 return NULL;
868}
869EXPORT_SYMBOL(drm_mm_scan_color_evict);
870
e18c0412
DV
871/**
872 * drm_mm_init - initialize a drm-mm allocator
873 * @mm: the drm_mm structure to initialize
874 * @start: start of the range managed by @mm
875 * @size: end of the range managed by @mm
876 *
877 * Note that @mm must be cleared to 0 before calling this function.
878 */
45b186f1 879void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
1d58420b 880{
6259a56b
CW
881 DRM_MM_BUG_ON(start + size <= start);
882
4e64e553
CW
883 mm->color_adjust = NULL;
884
ea7b1dd4 885 INIT_LIST_HEAD(&mm->hole_stack);
f808c13f 886 mm->interval_tree = RB_ROOT_CACHED;
4e64e553
CW
887 mm->holes_size = RB_ROOT;
888 mm->holes_addr = RB_ROOT;
3a1bd924 889
ea7b1dd4
DV
890 /* Clever trick to avoid a special case in the free hole tracking. */
891 INIT_LIST_HEAD(&mm->head_node.node_list);
4e64e553 892 mm->head_node.allocated = false;
ea7b1dd4
DV
893 mm->head_node.mm = mm;
894 mm->head_node.start = start + size;
4e64e553
CW
895 mm->head_node.size = -size;
896 add_hole(&mm->head_node);
ea7b1dd4 897
4e64e553 898 mm->scan_active = 0;
3a1bd924 899}
673a394b 900EXPORT_SYMBOL(drm_mm_init);
3a1bd924 901
e18c0412
DV
902/**
903 * drm_mm_takedown - clean up a drm_mm allocator
904 * @mm: drm_mm allocator to clean up
905 *
906 * Note that it is a bug to call this function on an allocator which is not
907 * clean.
908 */
5705670d 909void drm_mm_takedown(struct drm_mm *mm)
3a1bd924 910{
ac9bb7b7 911 if (WARN(!drm_mm_clean(mm),
5705670d
CW
912 "Memory manager not clean during takedown.\n"))
913 show_leaks(mm);
3a1bd924 914}
f453ba04 915EXPORT_SYMBOL(drm_mm_takedown);
fa8a1238 916
b5c3714f 917static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
99d7e48e 918{
4e64e553
CW
919 u64 start, size;
920
921 size = entry->hole_size;
922 if (size) {
923 start = drm_mm_hole_node_start(entry);
924 drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
925 start, start + size, size);
3a359f0b
DV
926 }
927
4e64e553 928 return size;
3a359f0b 929}
e18c0412 930/**
b5c3714f
DV
931 * drm_mm_print - print allocator state
932 * @mm: drm_mm allocator to print
933 * @p: DRM printer to use
e18c0412 934 */
b5c3714f 935void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
3a359f0b 936{
45b186f1 937 const struct drm_mm_node *entry;
440fd528 938 u64 total_used = 0, total_free = 0, total = 0;
3a359f0b 939
b5c3714f 940 total_free += drm_mm_dump_hole(p, &mm->head_node);
ea7b1dd4
DV
941
942 drm_mm_for_each_node(entry, mm) {
b5c3714f 943 drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
440fd528 944 entry->start + entry->size, entry->size);
ea7b1dd4 945 total_used += entry->size;
b5c3714f 946 total_free += drm_mm_dump_hole(p, entry);
fa8a1238 947 }
ea7b1dd4
DV
948 total = total_free + total_used;
949
b5c3714f 950 drm_printf(p, "total: %llu, used %llu free %llu\n", total,
440fd528 951 total_used, total_free);
fa8a1238 952}
b5c3714f 953EXPORT_SYMBOL(drm_mm_print);