]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/drm/drm_mm.h
drm: mxsfb: Make local symbol mxsfb_funcs static
[mirror_ubuntu-bionic-kernel.git] / include / drm / drm_mm.h
CommitLineData
249d6048
JG
1/**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
ba004e39 4 * Copyright 2016 Intel Corporation
249d6048
JG
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 *
28 **************************************************************************/
29/*
30 * Authors:
31 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 */
33
34#ifndef _DRM_MM_H_
35#define _DRM_MM_H_
36
37/*
38 * Generic range manager structs
39 */
86e81f0e 40#include <linux/bug.h>
202b52b7 41#include <linux/rbtree.h>
86e81f0e 42#include <linux/kernel.h>
249d6048 43#include <linux/list.h>
86e81f0e 44#include <linux/spinlock.h>
5705670d
CW
45#ifdef CONFIG_DRM_DEBUG_MM
46#include <linux/stackdepot.h>
47#endif
b5c3714f 48#include <drm/drm_print.h>
249d6048 49
b3ee963f
CW
50#ifdef CONFIG_DRM_DEBUG_MM
51#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
52#else
53#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
54#endif
55
4e64e553
CW
56/**
57 * enum drm_mm_insert_mode - control search and allocation behaviour
58 *
59 * The &struct drm_mm range manager supports finding a suitable modes using
60 * a number of search trees. These trees are oranised by size, by address and
61 * in most recent eviction order. This allows the user to find either the
62 * smallest hole to reuse, the lowest or highest address to reuse, or simply
63 * reuse the most recent eviction that fits. When allocating the &drm_mm_node
64 * from within the hole, the &drm_mm_insert_mode also dictate whether to
65 * allocate the lowest matching address or the highest.
66 */
67enum drm_mm_insert_mode {
68 /**
69 * @DRM_MM_INSERT_BEST:
70 *
71 * Search for the smallest hole (within the search range) that fits
72 * the desired node.
73 *
74 * Allocates the node from the bottom of the found hole.
75 */
76 DRM_MM_INSERT_BEST = 0,
31e5d7c6 77
4e64e553
CW
78 /**
79 * @DRM_MM_INSERT_LOW:
80 *
81 * Search for the lowest hole (address closest to 0, within the search
82 * range) that fits the desired node.
83 *
84 * Allocates the node from the bottom of the found hole.
85 */
86 DRM_MM_INSERT_LOW,
62347f9e 87
4e64e553
CW
88 /**
89 * @DRM_MM_INSERT_HIGH:
90 *
91 * Search for the highest hole (address closest to U64_MAX, within the
92 * search range) that fits the desired node.
93 *
94 * Allocates the node from the *top* of the found hole. The specified
95 * alignment for the node is applied to the base of the node
96 * (&drm_mm_node.start).
97 */
98 DRM_MM_INSERT_HIGH,
99
100 /**
101 * @DRM_MM_INSERT_EVICT:
102 *
103 * Search for the most recently evicted hole (within the search range)
104 * that fits the desired node. This is appropriate for use immediately
105 * after performing an eviction scan (see drm_mm_scan_init()) and
106 * removing the selected nodes to form a hole.
107 *
108 * Allocates the node from the bottom of the found hole.
109 */
110 DRM_MM_INSERT_EVICT,
111};
62347f9e 112
05fc0321
DV
113/**
114 * struct drm_mm_node - allocated block in the DRM allocator
115 *
116 * This represents an allocated block in a &drm_mm allocator. Except for
117 * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
118 * entirely opaque and should only be accessed through the provided funcions.
119 * Since allocation of these nodes is entirely handled by the driver they can be
120 * embedded.
121 */
249d6048 122struct drm_mm_node {
05fc0321
DV
123 /** @color: Opaque driver-private tag. */
124 unsigned long color;
125 /** @start: Start address of the allocated block. */
126 u64 start;
127 /** @size: Size of the allocated block. */
128 u64 size;
129 /* private: */
4e64e553 130 struct drm_mm *mm;
d1024ce9 131 struct list_head node_list;
ea7b1dd4 132 struct list_head hole_stack;
202b52b7 133 struct rb_node rb;
4e64e553
CW
134 struct rb_node rb_hole_size;
135 struct rb_node rb_hole_addr;
202b52b7 136 u64 __subtree_last;
4e64e553
CW
137 u64 hole_size;
138 bool allocated : 1;
139 bool scanned_block : 1;
5705670d
CW
140#ifdef CONFIG_DRM_DEBUG_MM
141 depot_stack_handle_t stack;
142#endif
249d6048
JG
143};
144
05fc0321
DV
145/**
146 * struct drm_mm - DRM allocator
147 *
148 * DRM range allocator with a few special functions and features geared towards
149 * managing GPU memory. Except for the @color_adjust callback the structure is
150 * entirely opaque and should only be accessed through the provided functions
151 * and macros. This structure can be embedded into larger driver structures.
152 */
249d6048 153struct drm_mm {
05fc0321
DV
154 /**
155 * @color_adjust:
156 *
157 * Optional driver callback to further apply restrictions on a hole. The
158 * node argument points at the node containing the hole from which the
159 * block would be allocated (see drm_mm_hole_follows() and friends). The
160 * other arguments are the size of the block to be allocated. The driver
161 * can adjust the start and end as needed to e.g. insert guard pages.
162 */
163 void (*color_adjust)(const struct drm_mm_node *node,
164 unsigned long color,
165 u64 *start, u64 *end);
166
167 /* private: */
25985edc 168 /* List of all memory nodes that immediately precede a free hole. */
ea7b1dd4
DV
169 struct list_head hole_stack;
170 /* head_node.node_list is the list of all memory nodes, ordered
171 * according to the (increasing) start address of the memory node. */
172 struct drm_mm_node head_node;
202b52b7
CW
173 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
174 struct rb_root interval_tree;
4e64e553
CW
175 struct rb_root holes_size;
176 struct rb_root holes_addr;
202b52b7 177
9a71e277
CW
178 unsigned long scan_active;
179};
180
05fc0321
DV
181/**
182 * struct drm_mm_scan - DRM allocator eviction roaster data
183 *
184 * This structure tracks data needed for the eviction roaster set up using
185 * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
186 * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
187 * be accessed through the provided functions and macros. It is meant to be
188 * allocated temporarily by the driver on the stack.
189 */
9a71e277 190struct drm_mm_scan {
05fc0321 191 /* private: */
9a71e277
CW
192 struct drm_mm *mm;
193
194 u64 size;
195 u64 alignment;
9a956b15 196 u64 remainder_mask;
9a71e277
CW
197
198 u64 range_start;
199 u64 range_end;
200
201 u64 hit_start;
202 u64 hit_end;
203
9a71e277 204 unsigned long color;
4e64e553 205 enum drm_mm_insert_mode mode;
249d6048
JG
206};
207
e18c0412
DV
208/**
209 * drm_mm_node_allocated - checks whether a node is allocated
210 * @node: drm_mm_node to check
211 *
ba004e39
CW
212 * Drivers are required to clear a node prior to using it with the
213 * drm_mm range manager.
214 *
215 * Drivers should use this helper for proper encapsulation of drm_mm
e18c0412
DV
216 * internals.
217 *
218 * Returns:
219 * True if the @node is allocated.
220 */
45b186f1 221static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
b0b7af18
DV
222{
223 return node->allocated;
224}
225
e18c0412
DV
226/**
227 * drm_mm_initialized - checks whether an allocator is initialized
228 * @mm: drm_mm to check
229 *
ba004e39
CW
230 * Drivers should clear the struct drm_mm prior to initialisation if they
231 * want to use this function.
232 *
233 * Drivers should use this helper for proper encapsulation of drm_mm
e18c0412
DV
234 * internals.
235 *
236 * Returns:
237 * True if the @mm is initialized.
238 */
45b186f1 239static inline bool drm_mm_initialized(const struct drm_mm *mm)
31a5b8ce 240{
ea7b1dd4 241 return mm->hole_stack.next;
31a5b8ce 242}
9e8944ab 243
3f85fb34
CW
244/**
245 * drm_mm_hole_follows - checks whether a hole follows this node
246 * @node: drm_mm_node to check
247 *
248 * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
249 * If you wish to know whether a hole follows this particular node,
05fc0321
DV
250 * query this function. See also drm_mm_hole_node_start() and
251 * drm_mm_hole_node_end().
3f85fb34
CW
252 *
253 * Returns:
254 * True if a hole follows the @node.
255 */
256static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
257{
4e64e553 258 return node->hole_size;
3f85fb34
CW
259}
260
45b186f1 261static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
9e8944ab
CW
262{
263 return hole_node->start + hole_node->size;
264}
265
e18c0412
DV
266/**
267 * drm_mm_hole_node_start - computes the start of the hole following @node
268 * @hole_node: drm_mm_node which implicitly tracks the following hole
269 *
ba004e39
CW
270 * This is useful for driver-specific debug dumpers. Otherwise drivers should
271 * not inspect holes themselves. Drivers must check first whether a hole indeed
3f85fb34 272 * follows by looking at drm_mm_hole_follows()
e18c0412
DV
273 *
274 * Returns:
275 * Start of the subsequent hole.
276 */
45b186f1 277static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
9e8944ab 278{
3f85fb34 279 DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
9e8944ab
CW
280 return __drm_mm_hole_node_start(hole_node);
281}
282
45b186f1 283static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
9e8944ab 284{
87069f44 285 return list_next_entry(hole_node, node_list)->start;
9e8944ab
CW
286}
287
e18c0412
DV
288/**
289 * drm_mm_hole_node_end - computes the end of the hole following @node
290 * @hole_node: drm_mm_node which implicitly tracks the following hole
291 *
ba004e39
CW
292 * This is useful for driver-specific debug dumpers. Otherwise drivers should
293 * not inspect holes themselves. Drivers must check first whether a hole indeed
3f85fb34 294 * follows by looking at drm_mm_hole_follows().
e18c0412
DV
295 *
296 * Returns:
297 * End of the subsequent hole.
298 */
45b186f1 299static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
9e8944ab
CW
300{
301 return __drm_mm_hole_node_end(hole_node);
302}
303
2bc98c86
CW
304/**
305 * drm_mm_nodes - list of nodes under the drm_mm range manager
306 * @mm: the struct drm_mm range manger
307 *
308 * As the drm_mm range manager hides its node_list deep with its
309 * structure, extracting it looks painful and repetitive. This is
310 * not expected to be used outside of the drm_mm_for_each_node()
311 * macros and similar internal functions.
312 *
313 * Returns:
314 * The node list, may be empty.
315 */
316#define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
ad579002 317
e18c0412
DV
318/**
319 * drm_mm_for_each_node - iterator to walk over all allocated nodes
05fc0321
DV
320 * @entry: &struct drm_mm_node to assign to in each iteration step
321 * @mm: &drm_mm allocator to walk
e18c0412
DV
322 *
323 * This iterator walks over all nodes in the range allocator. It is implemented
05fc0321 324 * with list_for_each(), so not save against removal of elements.
e18c0412 325 */
ad579002 326#define drm_mm_for_each_node(entry, mm) \
2bc98c86 327 list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
ad579002
CW
328
329/**
330 * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
05fc0321
DV
331 * @entry: &struct drm_mm_node to assign to in each iteration step
332 * @next: &struct drm_mm_node to store the next step
333 * @mm: &drm_mm allocator to walk
ad579002
CW
334 *
335 * This iterator walks over all nodes in the range allocator. It is implemented
05fc0321 336 * with list_for_each_safe(), so save against removal of elements.
ad579002
CW
337 */
338#define drm_mm_for_each_node_safe(entry, next, mm) \
2bc98c86 339 list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
9e8944ab 340
e18c0412
DV
341/**
342 * drm_mm_for_each_hole - iterator to walk over all holes
4e64e553 343 * @pos: &drm_mm_node used internally to track progress
05fc0321 344 * @mm: &drm_mm allocator to walk
e18c0412
DV
345 * @hole_start: ulong variable to assign the hole start to on each iteration
346 * @hole_end: ulong variable to assign the hole end to on each iteration
347 *
348 * This iterator walks over all holes in the range allocator. It is implemented
05fc0321 349 * with list_for_each(), so not save against removal of elements. @entry is used
e18c0412
DV
350 * internally and will not reflect a real drm_mm_node for the very first hole.
351 * Hence users of this iterator may not access it.
352 *
353 * Implementation Note:
354 * We need to inline list_for_each_entry in order to be able to set hole_start
355 * and hole_end on each iteration while keeping the macro sane.
9e8944ab 356 */
4e64e553
CW
357#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
358 for (pos = list_first_entry(&(mm)->hole_stack, \
359 typeof(*pos), hole_stack); \
360 &pos->hole_stack != &(mm)->hole_stack ? \
361 hole_start = drm_mm_hole_node_start(pos), \
362 hole_end = hole_start + pos->hole_size, \
363 1 : 0; \
364 pos = list_next_entry(pos, hole_stack))
62347f9e 365
249d6048
JG
366/*
367 * Basic range manager support (drm_mm.c)
368 */
e18c0412 369int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
4e64e553
CW
370int drm_mm_insert_node_in_range(struct drm_mm *mm,
371 struct drm_mm_node *node,
372 u64 size,
373 u64 alignment,
374 unsigned long color,
375 u64 start,
376 u64 end,
377 enum drm_mm_insert_mode mode);
31e5d7c6 378
adb040b8
CW
379/**
380 * drm_mm_insert_node_generic - search for space and insert @node
381 * @mm: drm_mm to allocate from
382 * @node: preallocate node to insert
383 * @size: size of the allocation
384 * @alignment: alignment of the allocation
385 * @color: opaque tag value to use for this node
4e64e553 386 * @mode: fine-tune the allocation search and placement
adb040b8 387 *
05fc0321
DV
388 * This is a simplified version of drm_mm_insert_node_in_range_generic() with no
389 * range restrictions applied.
390 *
adb040b8
CW
391 * The preallocated node must be cleared to 0.
392 *
393 * Returns:
394 * 0 on success, -ENOSPC if there's no suitable hole.
395 */
396static inline int
397drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
398 u64 size, u64 alignment,
399 unsigned long color,
4e64e553 400 enum drm_mm_insert_mode mode)
adb040b8 401{
4e64e553
CW
402 return drm_mm_insert_node_in_range(mm, node,
403 size, alignment, color,
404 0, U64_MAX, mode);
adb040b8
CW
405}
406
407/**
408 * drm_mm_insert_node - search for space and insert @node
409 * @mm: drm_mm to allocate from
410 * @node: preallocate node to insert
411 * @size: size of the allocation
adb040b8
CW
412 *
413 * This is a simplified version of drm_mm_insert_node_generic() with @color set
414 * to 0.
415 *
416 * The preallocated node must be cleared to 0.
417 *
418 * Returns:
419 * 0 on success, -ENOSPC if there's no suitable hole.
420 */
421static inline int drm_mm_insert_node(struct drm_mm *mm,
422 struct drm_mm_node *node,
4e64e553 423 u64 size)
adb040b8 424{
4e64e553 425 return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
adb040b8
CW
426}
427
e18c0412
DV
428void drm_mm_remove_node(struct drm_mm_node *node);
429void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
45b186f1 430void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
e18c0412 431void drm_mm_takedown(struct drm_mm *mm);
ac9bb7b7
CW
432
433/**
434 * drm_mm_clean - checks whether an allocator is clean
435 * @mm: drm_mm allocator to check
436 *
437 * Returns:
438 * True if the allocator is completely free, false if there's still a node
439 * allocated in it.
440 */
441static inline bool drm_mm_clean(const struct drm_mm *mm)
442{
443 return list_empty(drm_mm_nodes(mm));
444}
249d6048 445
202b52b7 446struct drm_mm_node *
45b186f1 447__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
202b52b7 448
522e85dd
CW
449/**
450 * drm_mm_for_each_node_in_range - iterator to walk over a range of
451 * allocated nodes
8b2fb7b6
CW
452 * @node__: drm_mm_node structure to assign to in each iteration step
453 * @mm__: drm_mm allocator to walk
454 * @start__: starting offset, the first node will overlap this
455 * @end__: ending offset, the last node will start before this (but may overlap)
522e85dd
CW
456 *
457 * This iterator walks over all nodes in the range allocator that lie
458 * between @start and @end. It is implemented similarly to list_for_each(),
459 * but using the internal interval tree to accelerate the search for the
460 * starting node, and so not safe against removal of elements. It assumes
461 * that @end is within (or is the upper limit of) the drm_mm allocator.
462 */
8b2fb7b6
CW
463#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
464 for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
465 node__ && node__->start < (end__); \
466 node__ = list_next_entry(node__, node_list))
202b52b7 467
9a71e277
CW
468void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
469 struct drm_mm *mm,
0b04d474
CW
470 u64 size, u64 alignment, unsigned long color,
471 u64 start, u64 end,
4e64e553 472 enum drm_mm_insert_mode mode);
2c4b3895
CW
473
474/**
475 * drm_mm_scan_init - initialize lru scanning
476 * @scan: scan state
477 * @mm: drm_mm to scan
478 * @size: size of the allocation
479 * @alignment: alignment of the allocation
480 * @color: opaque tag value to use for the allocation
4e64e553 481 * @mode: fine-tune the allocation search and placement
2c4b3895 482 *
05fc0321
DV
483 * This is a simplified version of drm_mm_scan_init_with_range() with no range
484 * restrictions applied.
485 *
2c4b3895 486 * This simply sets up the scanning routines with the parameters for the desired
0b04d474 487 * hole.
2c4b3895
CW
488 *
489 * Warning:
490 * As long as the scan list is non-empty, no other operations than
491 * adding/removing nodes to/from the scan list are allowed.
492 */
493static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
494 struct drm_mm *mm,
495 u64 size,
496 u64 alignment,
0b04d474 497 unsigned long color,
4e64e553 498 enum drm_mm_insert_mode mode)
2c4b3895 499{
0b04d474
CW
500 drm_mm_scan_init_with_range(scan, mm,
501 size, alignment, color,
4e64e553 502 0, U64_MAX, mode);
2c4b3895
CW
503}
504
9a71e277
CW
505bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
506 struct drm_mm_node *node);
507bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
508 struct drm_mm_node *node);
3fa489da 509struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
709ea971 510
b5c3714f 511void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
fa8a1238 512
249d6048 513#endif