]>
Commit | Line | Data |
---|---|---|
1 | /************************************************************************** | |
2 | * | |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | * | |
27 | **************************************************************************/ | |
28 | ||
29 | /* | |
30 | * Generic simple memory manager implementation. Intended to be used as a base | |
31 | * class implementation for more advanced memory managers. | |
32 | * | |
33 | * Note that the algorithm used is quite simple and there might be substantial | |
34 | * performance gains if a smarter free list is implemented. Currently it is just an | |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree | |
36 | * is used instead. At least if we expect heavy fragmentation. | |
37 | * | |
38 | * Aligned allocations can also see improvement. | |
39 | * | |
40 | * Authors: | |
41 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> | |
42 | */ | |
43 | ||
44 | #include "drmP.h" | |
45 | #include "drm_mm.h" | |
46 | #include <linux/slab.h> | |
47 | #include <linux/seq_file.h> | |
48 | ||
49 | #define MM_UNUSED_TARGET 4 | |
50 | ||
51 | unsigned long drm_mm_tail_space(struct drm_mm *mm) | |
52 | { | |
53 | struct list_head *tail_node; | |
54 | struct drm_mm_node *entry; | |
55 | ||
56 | tail_node = mm->ml_entry.prev; | |
57 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | |
58 | if (!entry->free) | |
59 | return 0; | |
60 | ||
61 | return entry->size; | |
62 | } | |
63 | ||
64 | int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) | |
65 | { | |
66 | struct list_head *tail_node; | |
67 | struct drm_mm_node *entry; | |
68 | ||
69 | tail_node = mm->ml_entry.prev; | |
70 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | |
71 | if (!entry->free) | |
72 | return -ENOMEM; | |
73 | ||
74 | if (entry->size <= size) | |
75 | return -ENOMEM; | |
76 | ||
77 | entry->size -= size; | |
78 | return 0; | |
79 | } | |
80 | ||
81 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) | |
82 | { | |
83 | struct drm_mm_node *child; | |
84 | ||
85 | if (atomic) | |
86 | child = kmalloc(sizeof(*child), GFP_ATOMIC); | |
87 | else | |
88 | child = kmalloc(sizeof(*child), GFP_KERNEL); | |
89 | ||
90 | if (unlikely(child == NULL)) { | |
91 | spin_lock(&mm->unused_lock); | |
92 | if (list_empty(&mm->unused_nodes)) | |
93 | child = NULL; | |
94 | else { | |
95 | child = | |
96 | list_entry(mm->unused_nodes.next, | |
97 | struct drm_mm_node, fl_entry); | |
98 | list_del(&child->fl_entry); | |
99 | --mm->num_unused; | |
100 | } | |
101 | spin_unlock(&mm->unused_lock); | |
102 | } | |
103 | return child; | |
104 | } | |
105 | ||
106 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure | |
107 | * drm_mm: memory manager struct we are pre-allocating for | |
108 | * | |
109 | * Returns 0 on success or -ENOMEM if allocation fails. | |
110 | */ | |
111 | int drm_mm_pre_get(struct drm_mm *mm) | |
112 | { | |
113 | struct drm_mm_node *node; | |
114 | ||
115 | spin_lock(&mm->unused_lock); | |
116 | while (mm->num_unused < MM_UNUSED_TARGET) { | |
117 | spin_unlock(&mm->unused_lock); | |
118 | node = kmalloc(sizeof(*node), GFP_KERNEL); | |
119 | spin_lock(&mm->unused_lock); | |
120 | ||
121 | if (unlikely(node == NULL)) { | |
122 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; | |
123 | spin_unlock(&mm->unused_lock); | |
124 | return ret; | |
125 | } | |
126 | ++mm->num_unused; | |
127 | list_add_tail(&node->fl_entry, &mm->unused_nodes); | |
128 | } | |
129 | spin_unlock(&mm->unused_lock); | |
130 | return 0; | |
131 | } | |
132 | EXPORT_SYMBOL(drm_mm_pre_get); | |
133 | ||
134 | static int drm_mm_create_tail_node(struct drm_mm *mm, | |
135 | unsigned long start, | |
136 | unsigned long size, int atomic) | |
137 | { | |
138 | struct drm_mm_node *child; | |
139 | ||
140 | child = drm_mm_kmalloc(mm, atomic); | |
141 | if (unlikely(child == NULL)) | |
142 | return -ENOMEM; | |
143 | ||
144 | child->free = 1; | |
145 | child->size = size; | |
146 | child->start = start; | |
147 | child->mm = mm; | |
148 | ||
149 | list_add_tail(&child->ml_entry, &mm->ml_entry); | |
150 | list_add_tail(&child->fl_entry, &mm->fl_entry); | |
151 | ||
152 | return 0; | |
153 | } | |
154 | ||
155 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) | |
156 | { | |
157 | struct list_head *tail_node; | |
158 | struct drm_mm_node *entry; | |
159 | ||
160 | tail_node = mm->ml_entry.prev; | |
161 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | |
162 | if (!entry->free) { | |
163 | return drm_mm_create_tail_node(mm, entry->start + entry->size, | |
164 | size, atomic); | |
165 | } | |
166 | entry->size += size; | |
167 | return 0; | |
168 | } | |
169 | ||
170 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, | |
171 | unsigned long size, | |
172 | int atomic) | |
173 | { | |
174 | struct drm_mm_node *child; | |
175 | ||
176 | child = drm_mm_kmalloc(parent->mm, atomic); | |
177 | if (unlikely(child == NULL)) | |
178 | return NULL; | |
179 | ||
180 | INIT_LIST_HEAD(&child->fl_entry); | |
181 | ||
182 | child->free = 0; | |
183 | child->size = size; | |
184 | child->start = parent->start; | |
185 | child->mm = parent->mm; | |
186 | ||
187 | list_add_tail(&child->ml_entry, &parent->ml_entry); | |
188 | INIT_LIST_HEAD(&child->fl_entry); | |
189 | ||
190 | parent->size -= size; | |
191 | parent->start += size; | |
192 | return child; | |
193 | } | |
194 | ||
195 | ||
196 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, | |
197 | unsigned long size, | |
198 | unsigned alignment, | |
199 | int atomic) | |
200 | { | |
201 | ||
202 | struct drm_mm_node *align_splitoff = NULL; | |
203 | unsigned tmp = 0; | |
204 | ||
205 | if (alignment) | |
206 | tmp = node->start % alignment; | |
207 | ||
208 | if (tmp) { | |
209 | align_splitoff = | |
210 | drm_mm_split_at_start(node, alignment - tmp, atomic); | |
211 | if (unlikely(align_splitoff == NULL)) | |
212 | return NULL; | |
213 | } | |
214 | ||
215 | if (node->size == size) { | |
216 | list_del_init(&node->fl_entry); | |
217 | node->free = 0; | |
218 | } else { | |
219 | node = drm_mm_split_at_start(node, size, atomic); | |
220 | } | |
221 | ||
222 | if (align_splitoff) | |
223 | drm_mm_put_block(align_splitoff); | |
224 | ||
225 | return node; | |
226 | } | |
227 | EXPORT_SYMBOL(drm_mm_get_block_generic); | |
228 | ||
229 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, | |
230 | unsigned long size, | |
231 | unsigned alignment, | |
232 | unsigned long start, | |
233 | unsigned long end, | |
234 | int atomic) | |
235 | { | |
236 | struct drm_mm_node *align_splitoff = NULL; | |
237 | unsigned tmp = 0; | |
238 | unsigned wasted = 0; | |
239 | ||
240 | if (node->start < start) | |
241 | wasted += start - node->start; | |
242 | if (alignment) | |
243 | tmp = ((node->start + wasted) % alignment); | |
244 | ||
245 | if (tmp) | |
246 | wasted += alignment - tmp; | |
247 | if (wasted) { | |
248 | align_splitoff = drm_mm_split_at_start(node, wasted, atomic); | |
249 | if (unlikely(align_splitoff == NULL)) | |
250 | return NULL; | |
251 | } | |
252 | ||
253 | if (node->size == size) { | |
254 | list_del_init(&node->fl_entry); | |
255 | node->free = 0; | |
256 | } else { | |
257 | node = drm_mm_split_at_start(node, size, atomic); | |
258 | } | |
259 | ||
260 | if (align_splitoff) | |
261 | drm_mm_put_block(align_splitoff); | |
262 | ||
263 | return node; | |
264 | } | |
265 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); | |
266 | ||
267 | /* | |
268 | * Put a block. Merge with the previous and / or next block if they are free. | |
269 | * Otherwise add to the free stack. | |
270 | */ | |
271 | ||
272 | void drm_mm_put_block(struct drm_mm_node *cur) | |
273 | { | |
274 | ||
275 | struct drm_mm *mm = cur->mm; | |
276 | struct list_head *cur_head = &cur->ml_entry; | |
277 | struct list_head *root_head = &mm->ml_entry; | |
278 | struct drm_mm_node *prev_node = NULL; | |
279 | struct drm_mm_node *next_node; | |
280 | ||
281 | int merged = 0; | |
282 | ||
283 | if (cur_head->prev != root_head) { | |
284 | prev_node = | |
285 | list_entry(cur_head->prev, struct drm_mm_node, ml_entry); | |
286 | if (prev_node->free) { | |
287 | prev_node->size += cur->size; | |
288 | merged = 1; | |
289 | } | |
290 | } | |
291 | if (cur_head->next != root_head) { | |
292 | next_node = | |
293 | list_entry(cur_head->next, struct drm_mm_node, ml_entry); | |
294 | if (next_node->free) { | |
295 | if (merged) { | |
296 | prev_node->size += next_node->size; | |
297 | list_del(&next_node->ml_entry); | |
298 | list_del(&next_node->fl_entry); | |
299 | spin_lock(&mm->unused_lock); | |
300 | if (mm->num_unused < MM_UNUSED_TARGET) { | |
301 | list_add(&next_node->fl_entry, | |
302 | &mm->unused_nodes); | |
303 | ++mm->num_unused; | |
304 | } else | |
305 | kfree(next_node); | |
306 | spin_unlock(&mm->unused_lock); | |
307 | } else { | |
308 | next_node->size += cur->size; | |
309 | next_node->start = cur->start; | |
310 | merged = 1; | |
311 | } | |
312 | } | |
313 | } | |
314 | if (!merged) { | |
315 | cur->free = 1; | |
316 | list_add(&cur->fl_entry, &mm->fl_entry); | |
317 | } else { | |
318 | list_del(&cur->ml_entry); | |
319 | spin_lock(&mm->unused_lock); | |
320 | if (mm->num_unused < MM_UNUSED_TARGET) { | |
321 | list_add(&cur->fl_entry, &mm->unused_nodes); | |
322 | ++mm->num_unused; | |
323 | } else | |
324 | kfree(cur); | |
325 | spin_unlock(&mm->unused_lock); | |
326 | } | |
327 | } | |
328 | ||
329 | EXPORT_SYMBOL(drm_mm_put_block); | |
330 | ||
331 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | |
332 | unsigned long size, | |
333 | unsigned alignment, int best_match) | |
334 | { | |
335 | struct list_head *list; | |
336 | const struct list_head *free_stack = &mm->fl_entry; | |
337 | struct drm_mm_node *entry; | |
338 | struct drm_mm_node *best; | |
339 | unsigned long best_size; | |
340 | unsigned wasted; | |
341 | ||
342 | best = NULL; | |
343 | best_size = ~0UL; | |
344 | ||
345 | list_for_each(list, free_stack) { | |
346 | entry = list_entry(list, struct drm_mm_node, fl_entry); | |
347 | wasted = 0; | |
348 | ||
349 | if (entry->size < size) | |
350 | continue; | |
351 | ||
352 | if (alignment) { | |
353 | register unsigned tmp = entry->start % alignment; | |
354 | if (tmp) | |
355 | wasted += alignment - tmp; | |
356 | } | |
357 | ||
358 | if (entry->size >= size + wasted) { | |
359 | if (!best_match) | |
360 | return entry; | |
361 | if (size < best_size) { | |
362 | best = entry; | |
363 | best_size = entry->size; | |
364 | } | |
365 | } | |
366 | } | |
367 | ||
368 | return best; | |
369 | } | |
370 | EXPORT_SYMBOL(drm_mm_search_free); | |
371 | ||
372 | struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |
373 | unsigned long size, | |
374 | unsigned alignment, | |
375 | unsigned long start, | |
376 | unsigned long end, | |
377 | int best_match) | |
378 | { | |
379 | struct list_head *list; | |
380 | const struct list_head *free_stack = &mm->fl_entry; | |
381 | struct drm_mm_node *entry; | |
382 | struct drm_mm_node *best; | |
383 | unsigned long best_size; | |
384 | unsigned wasted; | |
385 | ||
386 | best = NULL; | |
387 | best_size = ~0UL; | |
388 | ||
389 | list_for_each(list, free_stack) { | |
390 | entry = list_entry(list, struct drm_mm_node, fl_entry); | |
391 | wasted = 0; | |
392 | ||
393 | if (entry->size < size) | |
394 | continue; | |
395 | ||
396 | if (entry->start > end || (entry->start+entry->size) < start) | |
397 | continue; | |
398 | ||
399 | if (entry->start < start) | |
400 | wasted += start - entry->start; | |
401 | ||
402 | if (alignment) { | |
403 | register unsigned tmp = (entry->start + wasted) % alignment; | |
404 | if (tmp) | |
405 | wasted += alignment - tmp; | |
406 | } | |
407 | ||
408 | if (entry->size >= size + wasted) { | |
409 | if (!best_match) | |
410 | return entry; | |
411 | if (size < best_size) { | |
412 | best = entry; | |
413 | best_size = entry->size; | |
414 | } | |
415 | } | |
416 | } | |
417 | ||
418 | return best; | |
419 | } | |
420 | EXPORT_SYMBOL(drm_mm_search_free_in_range); | |
421 | ||
422 | int drm_mm_clean(struct drm_mm * mm) | |
423 | { | |
424 | struct list_head *head = &mm->ml_entry; | |
425 | ||
426 | return (head->next->next == head); | |
427 | } | |
428 | EXPORT_SYMBOL(drm_mm_clean); | |
429 | ||
430 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | |
431 | { | |
432 | INIT_LIST_HEAD(&mm->ml_entry); | |
433 | INIT_LIST_HEAD(&mm->fl_entry); | |
434 | INIT_LIST_HEAD(&mm->unused_nodes); | |
435 | mm->num_unused = 0; | |
436 | spin_lock_init(&mm->unused_lock); | |
437 | ||
438 | return drm_mm_create_tail_node(mm, start, size, 0); | |
439 | } | |
440 | EXPORT_SYMBOL(drm_mm_init); | |
441 | ||
442 | void drm_mm_takedown(struct drm_mm * mm) | |
443 | { | |
444 | struct list_head *bnode = mm->fl_entry.next; | |
445 | struct drm_mm_node *entry; | |
446 | struct drm_mm_node *next; | |
447 | ||
448 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); | |
449 | ||
450 | if (entry->ml_entry.next != &mm->ml_entry || | |
451 | entry->fl_entry.next != &mm->fl_entry) { | |
452 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); | |
453 | return; | |
454 | } | |
455 | ||
456 | list_del(&entry->fl_entry); | |
457 | list_del(&entry->ml_entry); | |
458 | kfree(entry); | |
459 | ||
460 | spin_lock(&mm->unused_lock); | |
461 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { | |
462 | list_del(&entry->fl_entry); | |
463 | kfree(entry); | |
464 | --mm->num_unused; | |
465 | } | |
466 | spin_unlock(&mm->unused_lock); | |
467 | ||
468 | BUG_ON(mm->num_unused != 0); | |
469 | } | |
470 | EXPORT_SYMBOL(drm_mm_takedown); | |
471 | ||
472 | #if defined(CONFIG_DEBUG_FS) | |
473 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | |
474 | { | |
475 | struct drm_mm_node *entry; | |
476 | int total_used = 0, total_free = 0, total = 0; | |
477 | ||
478 | list_for_each_entry(entry, &mm->ml_entry, ml_entry) { | |
479 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); | |
480 | total += entry->size; | |
481 | if (entry->free) | |
482 | total_free += entry->size; | |
483 | else | |
484 | total_used += entry->size; | |
485 | } | |
486 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); | |
487 | return 0; | |
488 | } | |
489 | EXPORT_SYMBOL(drm_mm_dump_table); | |
490 | #endif |