]>
Commit | Line | Data |
---|---|---|
249d6048 JG |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | * | |
27 | **************************************************************************/ | |
28 | /* | |
29 | * Authors: | |
30 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
31 | */ | |
32 | ||
33 | #ifndef _DRM_MM_H_ | |
34 | #define _DRM_MM_H_ | |
35 | ||
36 | /* | |
37 | * Generic range manager structs | |
38 | */ | |
86e81f0e | 39 | #include <linux/bug.h> |
202b52b7 | 40 | #include <linux/rbtree.h> |
86e81f0e | 41 | #include <linux/kernel.h> |
249d6048 | 42 | #include <linux/list.h> |
86e81f0e | 43 | #include <linux/spinlock.h> |
f1938cd6 DA |
44 | #ifdef CONFIG_DEBUG_FS |
45 | #include <linux/seq_file.h> | |
46 | #endif | |
5705670d CW |
47 | #ifdef CONFIG_DRM_DEBUG_MM |
48 | #include <linux/stackdepot.h> | |
49 | #endif | |
249d6048 | 50 | |
31e5d7c6 DH |
51 | enum drm_mm_search_flags { |
52 | DRM_MM_SEARCH_DEFAULT = 0, | |
53 | DRM_MM_SEARCH_BEST = 1 << 0, | |
62347f9e | 54 | DRM_MM_SEARCH_BELOW = 1 << 1, |
31e5d7c6 DH |
55 | }; |
56 | ||
62347f9e LK |
57 | enum drm_mm_allocator_flags { |
58 | DRM_MM_CREATE_DEFAULT = 0, | |
59 | DRM_MM_CREATE_TOP = 1 << 0, | |
60 | }; | |
61 | ||
62 | #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT | |
63 | #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP | |
64 | ||
249d6048 | 65 | struct drm_mm_node { |
d1024ce9 | 66 | struct list_head node_list; |
ea7b1dd4 | 67 | struct list_head hole_stack; |
202b52b7 | 68 | struct rb_node rb; |
ea7b1dd4 | 69 | unsigned hole_follows : 1; |
709ea971 DV |
70 | unsigned scanned_block : 1; |
71 | unsigned scanned_prev_free : 1; | |
72 | unsigned scanned_next_free : 1; | |
ea7b1dd4 | 73 | unsigned scanned_preceeds_hole : 1; |
b0b7af18 | 74 | unsigned allocated : 1; |
6b9d89b4 | 75 | unsigned long color; |
440fd528 TR |
76 | u64 start; |
77 | u64 size; | |
202b52b7 | 78 | u64 __subtree_last; |
249d6048 | 79 | struct drm_mm *mm; |
5705670d CW |
80 | #ifdef CONFIG_DRM_DEBUG_MM |
81 | depot_stack_handle_t stack; | |
82 | #endif | |
249d6048 JG |
83 | }; |
84 | ||
85 | struct drm_mm { | |
25985edc | 86 | /* List of all memory nodes that immediately precede a free hole. */ |
ea7b1dd4 DV |
87 | struct list_head hole_stack; |
88 | /* head_node.node_list is the list of all memory nodes, ordered | |
89 | * according to the (increasing) start address of the memory node. */ | |
90 | struct drm_mm_node head_node; | |
202b52b7 CW |
91 | /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ |
92 | struct rb_root interval_tree; | |
93 | ||
d935cc61 | 94 | unsigned int scan_check_range : 1; |
709ea971 | 95 | unsigned scan_alignment; |
6b9d89b4 | 96 | unsigned long scan_color; |
440fd528 TR |
97 | u64 scan_size; |
98 | u64 scan_hit_start; | |
99 | u64 scan_hit_end; | |
709ea971 | 100 | unsigned scanned_blocks; |
440fd528 TR |
101 | u64 scan_start; |
102 | u64 scan_end; | |
ae0cec28 | 103 | struct drm_mm_node *prev_scanned_node; |
6b9d89b4 CW |
104 | |
105 | void (*color_adjust)(struct drm_mm_node *node, unsigned long color, | |
440fd528 | 106 | u64 *start, u64 *end); |
249d6048 JG |
107 | }; |
108 | ||
e18c0412 DV |
109 | /** |
110 | * drm_mm_node_allocated - checks whether a node is allocated | |
111 | * @node: drm_mm_node to check | |
112 | * | |
113 | * Drivers should use this helpers for proper encapusulation of drm_mm | |
114 | * internals. | |
115 | * | |
116 | * Returns: | |
117 | * True if the @node is allocated. | |
118 | */ | |
b0b7af18 DV |
119 | static inline bool drm_mm_node_allocated(struct drm_mm_node *node) |
120 | { | |
121 | return node->allocated; | |
122 | } | |
123 | ||
e18c0412 DV |
124 | /** |
125 | * drm_mm_initialized - checks whether an allocator is initialized | |
126 | * @mm: drm_mm to check | |
127 | * | |
128 | * Drivers should use this helpers for proper encapusulation of drm_mm | |
129 | * internals. | |
130 | * | |
131 | * Returns: | |
132 | * True if the @mm is initialized. | |
133 | */ | |
31a5b8ce DV |
134 | static inline bool drm_mm_initialized(struct drm_mm *mm) |
135 | { | |
ea7b1dd4 | 136 | return mm->hole_stack.next; |
31a5b8ce | 137 | } |
9e8944ab | 138 | |
440fd528 | 139 | static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
9e8944ab CW |
140 | { |
141 | return hole_node->start + hole_node->size; | |
142 | } | |
143 | ||
e18c0412 DV |
144 | /** |
145 | * drm_mm_hole_node_start - computes the start of the hole following @node | |
146 | * @hole_node: drm_mm_node which implicitly tracks the following hole | |
147 | * | |
148 | * This is useful for driver-sepific debug dumpers. Otherwise drivers should not | |
149 | * inspect holes themselves. Drivers must check first whether a hole indeed | |
150 | * follows by looking at node->hole_follows. | |
151 | * | |
152 | * Returns: | |
153 | * Start of the subsequent hole. | |
154 | */ | |
440fd528 | 155 | static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
9e8944ab CW |
156 | { |
157 | BUG_ON(!hole_node->hole_follows); | |
158 | return __drm_mm_hole_node_start(hole_node); | |
159 | } | |
160 | ||
440fd528 | 161 | static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
9e8944ab | 162 | { |
87069f44 | 163 | return list_next_entry(hole_node, node_list)->start; |
9e8944ab CW |
164 | } |
165 | ||
e18c0412 DV |
166 | /** |
167 | * drm_mm_hole_node_end - computes the end of the hole following @node | |
168 | * @hole_node: drm_mm_node which implicitly tracks the following hole | |
169 | * | |
170 | * This is useful for driver-sepific debug dumpers. Otherwise drivers should not | |
171 | * inspect holes themselves. Drivers must check first whether a hole indeed | |
172 | * follows by looking at node->hole_follows. | |
173 | * | |
174 | * Returns: | |
175 | * End of the subsequent hole. | |
176 | */ | |
440fd528 | 177 | static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
9e8944ab CW |
178 | { |
179 | return __drm_mm_hole_node_end(hole_node); | |
180 | } | |
181 | ||
e18c0412 DV |
182 | /** |
183 | * drm_mm_for_each_node - iterator to walk over all allocated nodes | |
184 | * @entry: drm_mm_node structure to assign to in each iteration step | |
185 | * @mm: drm_mm allocator to walk | |
186 | * | |
187 | * This iterator walks over all nodes in the range allocator. It is implemented | |
188 | * with list_for_each, so not save against removal of elements. | |
189 | */ | |
ea7b1dd4 DV |
190 | #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ |
191 | &(mm)->head_node.node_list, \ | |
2bbd4492 | 192 | node_list) |
9e8944ab | 193 | |
18b40c58 GT |
194 | #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ |
195 | for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ | |
196 | &entry->hole_stack != &(mm)->hole_stack ? \ | |
197 | hole_start = drm_mm_hole_node_start(entry), \ | |
198 | hole_end = drm_mm_hole_node_end(entry), \ | |
199 | 1 : 0; \ | |
200 | entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) | |
201 | ||
e18c0412 DV |
202 | /** |
203 | * drm_mm_for_each_hole - iterator to walk over all holes | |
204 | * @entry: drm_mm_node used internally to track progress | |
205 | * @mm: drm_mm allocator to walk | |
206 | * @hole_start: ulong variable to assign the hole start to on each iteration | |
207 | * @hole_end: ulong variable to assign the hole end to on each iteration | |
208 | * | |
209 | * This iterator walks over all holes in the range allocator. It is implemented | |
210 | * with list_for_each, so not save against removal of elements. @entry is used | |
211 | * internally and will not reflect a real drm_mm_node for the very first hole. | |
212 | * Hence users of this iterator may not access it. | |
213 | * | |
214 | * Implementation Note: | |
215 | * We need to inline list_for_each_entry in order to be able to set hole_start | |
216 | * and hole_end on each iteration while keeping the macro sane. | |
62347f9e LK |
217 | * |
218 | * The __drm_mm_for_each_hole version is similar, but with added support for | |
219 | * going backwards. | |
9e8944ab CW |
220 | */ |
221 | #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ | |
18b40c58 | 222 | __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) |
62347f9e | 223 | |
249d6048 JG |
224 | /* |
225 | * Basic range manager support (drm_mm.c) | |
226 | */ | |
e18c0412 DV |
227 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); |
228 | ||
229 | int drm_mm_insert_node_generic(struct drm_mm *mm, | |
230 | struct drm_mm_node *node, | |
440fd528 | 231 | u64 size, |
e18c0412 DV |
232 | unsigned alignment, |
233 | unsigned long color, | |
62347f9e LK |
234 | enum drm_mm_search_flags sflags, |
235 | enum drm_mm_allocator_flags aflags); | |
e18c0412 DV |
236 | /** |
237 | * drm_mm_insert_node - search for space and insert @node | |
238 | * @mm: drm_mm to allocate from | |
239 | * @node: preallocate node to insert | |
240 | * @size: size of the allocation | |
241 | * @alignment: alignment of the allocation | |
242 | * @flags: flags to fine-tune the allocation | |
243 | * | |
244 | * This is a simplified version of drm_mm_insert_node_generic() with @color set | |
245 | * to 0. | |
246 | * | |
247 | * The preallocated node must be cleared to 0. | |
248 | * | |
249 | * Returns: | |
250 | * 0 on success, -ENOSPC if there's no suitable hole. | |
251 | */ | |
31e5d7c6 DH |
252 | static inline int drm_mm_insert_node(struct drm_mm *mm, |
253 | struct drm_mm_node *node, | |
440fd528 | 254 | u64 size, |
31e5d7c6 DH |
255 | unsigned alignment, |
256 | enum drm_mm_search_flags flags) | |
257 | { | |
62347f9e LK |
258 | return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, |
259 | DRM_MM_CREATE_DEFAULT); | |
31e5d7c6 DH |
260 | } |
261 | ||
e18c0412 DV |
262 | int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
263 | struct drm_mm_node *node, | |
440fd528 | 264 | u64 size, |
e18c0412 DV |
265 | unsigned alignment, |
266 | unsigned long color, | |
440fd528 TR |
267 | u64 start, |
268 | u64 end, | |
62347f9e LK |
269 | enum drm_mm_search_flags sflags, |
270 | enum drm_mm_allocator_flags aflags); | |
e18c0412 DV |
271 | /** |
272 | * drm_mm_insert_node_in_range - ranged search for space and insert @node | |
273 | * @mm: drm_mm to allocate from | |
274 | * @node: preallocate node to insert | |
275 | * @size: size of the allocation | |
276 | * @alignment: alignment of the allocation | |
277 | * @start: start of the allowed range for this node | |
278 | * @end: end of the allowed range for this node | |
279 | * @flags: flags to fine-tune the allocation | |
280 | * | |
281 | * This is a simplified version of drm_mm_insert_node_in_range_generic() with | |
282 | * @color set to 0. | |
283 | * | |
284 | * The preallocated node must be cleared to 0. | |
285 | * | |
286 | * Returns: | |
287 | * 0 on success, -ENOSPC if there's no suitable hole. | |
288 | */ | |
31e5d7c6 DH |
289 | static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, |
290 | struct drm_mm_node *node, | |
440fd528 | 291 | u64 size, |
31e5d7c6 | 292 | unsigned alignment, |
440fd528 TR |
293 | u64 start, |
294 | u64 end, | |
31e5d7c6 DH |
295 | enum drm_mm_search_flags flags) |
296 | { | |
297 | return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, | |
62347f9e LK |
298 | 0, start, end, flags, |
299 | DRM_MM_CREATE_DEFAULT); | |
31e5d7c6 DH |
300 | } |
301 | ||
e18c0412 DV |
302 | void drm_mm_remove_node(struct drm_mm_node *node); |
303 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); | |
304 | void drm_mm_init(struct drm_mm *mm, | |
440fd528 TR |
305 | u64 start, |
306 | u64 size); | |
e18c0412 DV |
307 | void drm_mm_takedown(struct drm_mm *mm); |
308 | bool drm_mm_clean(struct drm_mm *mm); | |
249d6048 | 309 | |
202b52b7 | 310 | struct drm_mm_node * |
522e85dd | 311 | __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); |
202b52b7 | 312 | |
522e85dd CW |
313 | /** |
314 | * drm_mm_for_each_node_in_range - iterator to walk over a range of | |
315 | * allocated nodes | |
316 | * @node: drm_mm_node structure to assign to in each iteration step | |
317 | * @mm: drm_mm allocator to walk | |
318 | * @start: starting offset, the first node will overlap this | |
319 | * @end: ending offset, the last node will start before this (but may overlap) | |
320 | * | |
321 | * This iterator walks over all nodes in the range allocator that lie | |
322 | * between @start and @end. It is implemented similarly to list_for_each(), | |
323 | * but using the internal interval tree to accelerate the search for the | |
324 | * starting node, and so not safe against removal of elements. It assumes | |
325 | * that @end is within (or is the upper limit of) the drm_mm allocator. | |
326 | */ | |
327 | #define drm_mm_for_each_node_in_range(node, mm, start, end) \ | |
328 | for (node = __drm_mm_interval_first((mm), (start), (end)-1); \ | |
329 | node && node->start < (end); \ | |
330 | node = list_next_entry(node, node_list)) \ | |
202b52b7 | 331 | |
6b9d89b4 | 332 | void drm_mm_init_scan(struct drm_mm *mm, |
440fd528 | 333 | u64 size, |
6b9d89b4 CW |
334 | unsigned alignment, |
335 | unsigned long color); | |
336 | void drm_mm_init_scan_with_range(struct drm_mm *mm, | |
440fd528 | 337 | u64 size, |
d935cc61 | 338 | unsigned alignment, |
6b9d89b4 | 339 | unsigned long color, |
440fd528 TR |
340 | u64 start, |
341 | u64 end); | |
e18c0412 DV |
342 | bool drm_mm_scan_add_block(struct drm_mm_node *node); |
343 | bool drm_mm_scan_remove_block(struct drm_mm_node *node); | |
709ea971 | 344 | |
e18c0412 | 345 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); |
fa8a1238 DA |
346 | #ifdef CONFIG_DEBUG_FS |
347 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); | |
348 | #endif | |
349 | ||
249d6048 | 350 | #endif |