]>
Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
31 | #include "ttm/ttm_module.h" | |
32 | #include "ttm/ttm_bo_driver.h" | |
33 | #include "ttm/ttm_placement.h" | |
34 | #include <linux/jiffies.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/mm.h> | |
38 | #include <linux/file.h> | |
39 | #include <linux/module.h> | |
40 | ||
41 | #define TTM_ASSERT_LOCKED(param) | |
42 | #define TTM_DEBUG(fmt, arg...) | |
43 | #define TTM_BO_HASH_ORDER 13 | |
44 | ||
45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); | |
46 | static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | |
47 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); | |
48 | ||
49 | static inline uint32_t ttm_bo_type_flags(unsigned type) | |
50 | { | |
51 | return 1 << (type); | |
52 | } | |
53 | ||
54 | static void ttm_bo_release_list(struct kref *list_kref) | |
55 | { | |
56 | struct ttm_buffer_object *bo = | |
57 | container_of(list_kref, struct ttm_buffer_object, list_kref); | |
58 | struct ttm_bo_device *bdev = bo->bdev; | |
59 | ||
60 | BUG_ON(atomic_read(&bo->list_kref.refcount)); | |
61 | BUG_ON(atomic_read(&bo->kref.refcount)); | |
62 | BUG_ON(atomic_read(&bo->cpu_writers)); | |
63 | BUG_ON(bo->sync_obj != NULL); | |
64 | BUG_ON(bo->mem.mm_node != NULL); | |
65 | BUG_ON(!list_empty(&bo->lru)); | |
66 | BUG_ON(!list_empty(&bo->ddestroy)); | |
67 | ||
68 | if (bo->ttm) | |
69 | ttm_tt_destroy(bo->ttm); | |
70 | if (bo->destroy) | |
71 | bo->destroy(bo); | |
72 | else { | |
73 | ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); | |
74 | kfree(bo); | |
75 | } | |
76 | } | |
77 | ||
78 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | |
79 | { | |
80 | ||
81 | if (interruptible) { | |
82 | int ret = 0; | |
83 | ||
84 | ret = wait_event_interruptible(bo->event_queue, | |
85 | atomic_read(&bo->reserved) == 0); | |
86 | if (unlikely(ret != 0)) | |
87 | return -ERESTART; | |
88 | } else { | |
89 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); | |
90 | } | |
91 | return 0; | |
92 | } | |
93 | ||
94 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | |
95 | { | |
96 | struct ttm_bo_device *bdev = bo->bdev; | |
97 | struct ttm_mem_type_manager *man; | |
98 | ||
99 | BUG_ON(!atomic_read(&bo->reserved)); | |
100 | ||
101 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { | |
102 | ||
103 | BUG_ON(!list_empty(&bo->lru)); | |
104 | ||
105 | man = &bdev->man[bo->mem.mem_type]; | |
106 | list_add_tail(&bo->lru, &man->lru); | |
107 | kref_get(&bo->list_kref); | |
108 | ||
109 | if (bo->ttm != NULL) { | |
110 | list_add_tail(&bo->swap, &bdev->swap_lru); | |
111 | kref_get(&bo->list_kref); | |
112 | } | |
113 | } | |
114 | } | |
115 | ||
116 | /** | |
117 | * Call with the lru_lock held. | |
118 | */ | |
119 | ||
120 | static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) | |
121 | { | |
122 | int put_count = 0; | |
123 | ||
124 | if (!list_empty(&bo->swap)) { | |
125 | list_del_init(&bo->swap); | |
126 | ++put_count; | |
127 | } | |
128 | if (!list_empty(&bo->lru)) { | |
129 | list_del_init(&bo->lru); | |
130 | ++put_count; | |
131 | } | |
132 | ||
133 | /* | |
134 | * TODO: Add a driver hook to delete from | |
135 | * driver-specific LRU's here. | |
136 | */ | |
137 | ||
138 | return put_count; | |
139 | } | |
140 | ||
141 | int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |
142 | bool interruptible, | |
143 | bool no_wait, bool use_sequence, uint32_t sequence) | |
144 | { | |
145 | struct ttm_bo_device *bdev = bo->bdev; | |
146 | int ret; | |
147 | ||
148 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | |
149 | if (use_sequence && bo->seq_valid && | |
150 | (sequence - bo->val_seq < (1 << 31))) { | |
151 | return -EAGAIN; | |
152 | } | |
153 | ||
154 | if (no_wait) | |
155 | return -EBUSY; | |
156 | ||
157 | spin_unlock(&bdev->lru_lock); | |
158 | ret = ttm_bo_wait_unreserved(bo, interruptible); | |
159 | spin_lock(&bdev->lru_lock); | |
160 | ||
161 | if (unlikely(ret)) | |
162 | return ret; | |
163 | } | |
164 | ||
165 | if (use_sequence) { | |
166 | bo->val_seq = sequence; | |
167 | bo->seq_valid = true; | |
168 | } else { | |
169 | bo->seq_valid = false; | |
170 | } | |
171 | ||
172 | return 0; | |
173 | } | |
174 | EXPORT_SYMBOL(ttm_bo_reserve); | |
175 | ||
176 | static void ttm_bo_ref_bug(struct kref *list_kref) | |
177 | { | |
178 | BUG(); | |
179 | } | |
180 | ||
181 | int ttm_bo_reserve(struct ttm_buffer_object *bo, | |
182 | bool interruptible, | |
183 | bool no_wait, bool use_sequence, uint32_t sequence) | |
184 | { | |
185 | struct ttm_bo_device *bdev = bo->bdev; | |
186 | int put_count = 0; | |
187 | int ret; | |
188 | ||
189 | spin_lock(&bdev->lru_lock); | |
190 | ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, | |
191 | sequence); | |
192 | if (likely(ret == 0)) | |
193 | put_count = ttm_bo_del_from_lru(bo); | |
194 | spin_unlock(&bdev->lru_lock); | |
195 | ||
196 | while (put_count--) | |
197 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | |
198 | ||
199 | return ret; | |
200 | } | |
201 | ||
202 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) | |
203 | { | |
204 | struct ttm_bo_device *bdev = bo->bdev; | |
205 | ||
206 | spin_lock(&bdev->lru_lock); | |
207 | ttm_bo_add_to_lru(bo); | |
208 | atomic_set(&bo->reserved, 0); | |
209 | wake_up_all(&bo->event_queue); | |
210 | spin_unlock(&bdev->lru_lock); | |
211 | } | |
212 | EXPORT_SYMBOL(ttm_bo_unreserve); | |
213 | ||
214 | /* | |
215 | * Call bo->mutex locked. | |
216 | */ | |
217 | ||
218 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |
219 | { | |
220 | struct ttm_bo_device *bdev = bo->bdev; | |
221 | int ret = 0; | |
222 | uint32_t page_flags = 0; | |
223 | ||
224 | TTM_ASSERT_LOCKED(&bo->mutex); | |
225 | bo->ttm = NULL; | |
226 | ||
227 | switch (bo->type) { | |
228 | case ttm_bo_type_device: | |
229 | if (zero_alloc) | |
230 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; | |
231 | case ttm_bo_type_kernel: | |
232 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | |
233 | page_flags, bdev->dummy_read_page); | |
234 | if (unlikely(bo->ttm == NULL)) | |
235 | ret = -ENOMEM; | |
236 | break; | |
237 | case ttm_bo_type_user: | |
238 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | |
239 | page_flags | TTM_PAGE_FLAG_USER, | |
240 | bdev->dummy_read_page); | |
241 | if (unlikely(bo->ttm == NULL)) | |
242 | ret = -ENOMEM; | |
243 | break; | |
244 | ||
245 | ret = ttm_tt_set_user(bo->ttm, current, | |
246 | bo->buffer_start, bo->num_pages); | |
247 | if (unlikely(ret != 0)) | |
248 | ttm_tt_destroy(bo->ttm); | |
249 | break; | |
250 | default: | |
251 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); | |
252 | ret = -EINVAL; | |
253 | break; | |
254 | } | |
255 | ||
256 | return ret; | |
257 | } | |
258 | ||
259 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |
260 | struct ttm_mem_reg *mem, | |
261 | bool evict, bool interruptible, bool no_wait) | |
262 | { | |
263 | struct ttm_bo_device *bdev = bo->bdev; | |
264 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); | |
265 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); | |
266 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; | |
267 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; | |
268 | int ret = 0; | |
269 | ||
270 | if (old_is_pci || new_is_pci || | |
271 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) | |
272 | ttm_bo_unmap_virtual(bo); | |
273 | ||
274 | /* | |
275 | * Create and bind a ttm if required. | |
276 | */ | |
277 | ||
278 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { | |
279 | ret = ttm_bo_add_ttm(bo, false); | |
280 | if (ret) | |
281 | goto out_err; | |
282 | ||
283 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); | |
284 | if (ret) | |
285 | return ret; | |
286 | ||
287 | if (mem->mem_type != TTM_PL_SYSTEM) { | |
288 | ret = ttm_tt_bind(bo->ttm, mem); | |
289 | if (ret) | |
290 | goto out_err; | |
291 | } | |
292 | ||
293 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | |
294 | ||
295 | struct ttm_mem_reg *old_mem = &bo->mem; | |
296 | uint32_t save_flags = old_mem->placement; | |
297 | ||
298 | *old_mem = *mem; | |
299 | mem->mm_node = NULL; | |
300 | ttm_flag_masked(&save_flags, mem->placement, | |
301 | TTM_PL_MASK_MEMTYPE); | |
302 | goto moved; | |
303 | } | |
304 | ||
305 | } | |
306 | ||
307 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && | |
308 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
309 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); | |
310 | else if (bdev->driver->move) | |
311 | ret = bdev->driver->move(bo, evict, interruptible, | |
312 | no_wait, mem); | |
313 | else | |
314 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); | |
315 | ||
316 | if (ret) | |
317 | goto out_err; | |
318 | ||
319 | moved: | |
320 | if (bo->evicted) { | |
321 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | |
322 | if (ret) | |
323 | printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); | |
324 | bo->evicted = false; | |
325 | } | |
326 | ||
327 | if (bo->mem.mm_node) { | |
328 | spin_lock(&bo->lock); | |
329 | bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + | |
330 | bdev->man[bo->mem.mem_type].gpu_offset; | |
331 | bo->cur_placement = bo->mem.placement; | |
332 | spin_unlock(&bo->lock); | |
333 | } | |
334 | ||
335 | return 0; | |
336 | ||
337 | out_err: | |
338 | new_man = &bdev->man[bo->mem.mem_type]; | |
339 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { | |
340 | ttm_tt_unbind(bo->ttm); | |
341 | ttm_tt_destroy(bo->ttm); | |
342 | bo->ttm = NULL; | |
343 | } | |
344 | ||
345 | return ret; | |
346 | } | |
347 | ||
348 | /** | |
349 | * If bo idle, remove from delayed- and lru lists, and unref. | |
350 | * If not idle, and already on delayed list, do nothing. | |
351 | * If not idle, and not on delayed list, put on delayed list, | |
352 | * up the list_kref and schedule a delayed list check. | |
353 | */ | |
354 | ||
355 | static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |
356 | { | |
357 | struct ttm_bo_device *bdev = bo->bdev; | |
358 | struct ttm_bo_driver *driver = bdev->driver; | |
359 | int ret; | |
360 | ||
361 | spin_lock(&bo->lock); | |
362 | (void) ttm_bo_wait(bo, false, false, !remove_all); | |
363 | ||
364 | if (!bo->sync_obj) { | |
365 | int put_count; | |
366 | ||
367 | spin_unlock(&bo->lock); | |
368 | ||
369 | spin_lock(&bdev->lru_lock); | |
370 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | |
371 | BUG_ON(ret); | |
372 | if (bo->ttm) | |
373 | ttm_tt_unbind(bo->ttm); | |
374 | ||
375 | if (!list_empty(&bo->ddestroy)) { | |
376 | list_del_init(&bo->ddestroy); | |
377 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | |
378 | } | |
379 | if (bo->mem.mm_node) { | |
380 | drm_mm_put_block(bo->mem.mm_node); | |
381 | bo->mem.mm_node = NULL; | |
382 | } | |
383 | put_count = ttm_bo_del_from_lru(bo); | |
384 | spin_unlock(&bdev->lru_lock); | |
385 | ||
386 | atomic_set(&bo->reserved, 0); | |
387 | ||
388 | while (put_count--) | |
389 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
390 | ||
391 | return 0; | |
392 | } | |
393 | ||
394 | spin_lock(&bdev->lru_lock); | |
395 | if (list_empty(&bo->ddestroy)) { | |
396 | void *sync_obj = bo->sync_obj; | |
397 | void *sync_obj_arg = bo->sync_obj_arg; | |
398 | ||
399 | kref_get(&bo->list_kref); | |
400 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | |
401 | spin_unlock(&bdev->lru_lock); | |
402 | spin_unlock(&bo->lock); | |
403 | ||
404 | if (sync_obj) | |
405 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | |
406 | schedule_delayed_work(&bdev->wq, | |
407 | ((HZ / 100) < 1) ? 1 : HZ / 100); | |
408 | ret = 0; | |
409 | ||
410 | } else { | |
411 | spin_unlock(&bdev->lru_lock); | |
412 | spin_unlock(&bo->lock); | |
413 | ret = -EBUSY; | |
414 | } | |
415 | ||
416 | return ret; | |
417 | } | |
418 | ||
419 | /** | |
420 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all | |
421 | * encountered buffers. | |
422 | */ | |
423 | ||
424 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | |
425 | { | |
426 | struct ttm_buffer_object *entry, *nentry; | |
427 | struct list_head *list, *next; | |
428 | int ret; | |
429 | ||
430 | spin_lock(&bdev->lru_lock); | |
431 | list_for_each_safe(list, next, &bdev->ddestroy) { | |
432 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | |
433 | nentry = NULL; | |
434 | ||
435 | /* | |
436 | * Protect the next list entry from destruction while we | |
437 | * unlock the lru_lock. | |
438 | */ | |
439 | ||
440 | if (next != &bdev->ddestroy) { | |
441 | nentry = list_entry(next, struct ttm_buffer_object, | |
442 | ddestroy); | |
443 | kref_get(&nentry->list_kref); | |
444 | } | |
445 | kref_get(&entry->list_kref); | |
446 | ||
447 | spin_unlock(&bdev->lru_lock); | |
448 | ret = ttm_bo_cleanup_refs(entry, remove_all); | |
449 | kref_put(&entry->list_kref, ttm_bo_release_list); | |
450 | ||
451 | spin_lock(&bdev->lru_lock); | |
452 | if (nentry) { | |
453 | bool next_onlist = !list_empty(next); | |
454 | spin_unlock(&bdev->lru_lock); | |
455 | kref_put(&nentry->list_kref, ttm_bo_release_list); | |
456 | spin_lock(&bdev->lru_lock); | |
457 | /* | |
458 | * Someone might have raced us and removed the | |
459 | * next entry from the list. We don't bother restarting | |
460 | * list traversal. | |
461 | */ | |
462 | ||
463 | if (!next_onlist) | |
464 | break; | |
465 | } | |
466 | if (ret) | |
467 | break; | |
468 | } | |
469 | ret = !list_empty(&bdev->ddestroy); | |
470 | spin_unlock(&bdev->lru_lock); | |
471 | ||
472 | return ret; | |
473 | } | |
474 | ||
475 | static void ttm_bo_delayed_workqueue(struct work_struct *work) | |
476 | { | |
477 | struct ttm_bo_device *bdev = | |
478 | container_of(work, struct ttm_bo_device, wq.work); | |
479 | ||
480 | if (ttm_bo_delayed_delete(bdev, false)) { | |
481 | schedule_delayed_work(&bdev->wq, | |
482 | ((HZ / 100) < 1) ? 1 : HZ / 100); | |
483 | } | |
484 | } | |
485 | ||
486 | static void ttm_bo_release(struct kref *kref) | |
487 | { | |
488 | struct ttm_buffer_object *bo = | |
489 | container_of(kref, struct ttm_buffer_object, kref); | |
490 | struct ttm_bo_device *bdev = bo->bdev; | |
491 | ||
492 | if (likely(bo->vm_node != NULL)) { | |
493 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); | |
494 | drm_mm_put_block(bo->vm_node); | |
495 | bo->vm_node = NULL; | |
496 | } | |
497 | write_unlock(&bdev->vm_lock); | |
498 | ttm_bo_cleanup_refs(bo, false); | |
499 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
500 | write_lock(&bdev->vm_lock); | |
501 | } | |
502 | ||
503 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) | |
504 | { | |
505 | struct ttm_buffer_object *bo = *p_bo; | |
506 | struct ttm_bo_device *bdev = bo->bdev; | |
507 | ||
508 | *p_bo = NULL; | |
509 | write_lock(&bdev->vm_lock); | |
510 | kref_put(&bo->kref, ttm_bo_release); | |
511 | write_unlock(&bdev->vm_lock); | |
512 | } | |
513 | EXPORT_SYMBOL(ttm_bo_unref); | |
514 | ||
515 | static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, | |
516 | bool interruptible, bool no_wait) | |
517 | { | |
518 | int ret = 0; | |
519 | struct ttm_bo_device *bdev = bo->bdev; | |
520 | struct ttm_mem_reg evict_mem; | |
521 | uint32_t proposed_placement; | |
522 | ||
523 | if (bo->mem.mem_type != mem_type) | |
524 | goto out; | |
525 | ||
526 | spin_lock(&bo->lock); | |
527 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | |
528 | spin_unlock(&bo->lock); | |
529 | ||
530 | if (ret && ret != -ERESTART) { | |
531 | printk(KERN_ERR TTM_PFX "Failed to expire sync object before " | |
532 | "buffer eviction.\n"); | |
533 | goto out; | |
534 | } | |
535 | ||
536 | BUG_ON(!atomic_read(&bo->reserved)); | |
537 | ||
538 | evict_mem = bo->mem; | |
539 | evict_mem.mm_node = NULL; | |
540 | ||
541 | proposed_placement = bdev->driver->evict_flags(bo); | |
542 | ||
543 | ret = ttm_bo_mem_space(bo, proposed_placement, | |
544 | &evict_mem, interruptible, no_wait); | |
545 | if (unlikely(ret != 0 && ret != -ERESTART)) | |
546 | ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, | |
547 | &evict_mem, interruptible, no_wait); | |
548 | ||
549 | if (ret) { | |
550 | if (ret != -ERESTART) | |
551 | printk(KERN_ERR TTM_PFX | |
552 | "Failed to find memory space for " | |
553 | "buffer 0x%p eviction.\n", bo); | |
554 | goto out; | |
555 | } | |
556 | ||
557 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, | |
558 | no_wait); | |
559 | if (ret) { | |
560 | if (ret != -ERESTART) | |
561 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | |
562 | goto out; | |
563 | } | |
564 | ||
565 | spin_lock(&bdev->lru_lock); | |
566 | if (evict_mem.mm_node) { | |
567 | drm_mm_put_block(evict_mem.mm_node); | |
568 | evict_mem.mm_node = NULL; | |
569 | } | |
570 | spin_unlock(&bdev->lru_lock); | |
571 | bo->evicted = true; | |
572 | out: | |
573 | return ret; | |
574 | } | |
575 | ||
576 | /** | |
577 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | |
578 | * space, or we've evicted everything and there isn't enough space. | |
579 | */ | |
580 | static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, | |
581 | struct ttm_mem_reg *mem, | |
582 | uint32_t mem_type, | |
583 | bool interruptible, bool no_wait) | |
584 | { | |
585 | struct drm_mm_node *node; | |
586 | struct ttm_buffer_object *entry; | |
587 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | |
588 | struct list_head *lru; | |
589 | unsigned long num_pages = mem->num_pages; | |
590 | int put_count = 0; | |
591 | int ret; | |
592 | ||
593 | retry_pre_get: | |
594 | ret = drm_mm_pre_get(&man->manager); | |
595 | if (unlikely(ret != 0)) | |
596 | return ret; | |
597 | ||
598 | spin_lock(&bdev->lru_lock); | |
599 | do { | |
600 | node = drm_mm_search_free(&man->manager, num_pages, | |
601 | mem->page_alignment, 1); | |
602 | if (node) | |
603 | break; | |
604 | ||
605 | lru = &man->lru; | |
606 | if (list_empty(lru)) | |
607 | break; | |
608 | ||
609 | entry = list_first_entry(lru, struct ttm_buffer_object, lru); | |
610 | kref_get(&entry->list_kref); | |
611 | ||
612 | ret = | |
613 | ttm_bo_reserve_locked(entry, interruptible, no_wait, | |
614 | false, 0); | |
615 | ||
616 | if (likely(ret == 0)) | |
617 | put_count = ttm_bo_del_from_lru(entry); | |
618 | ||
619 | spin_unlock(&bdev->lru_lock); | |
620 | ||
621 | if (unlikely(ret != 0)) | |
622 | return ret; | |
623 | ||
624 | while (put_count--) | |
625 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | |
626 | ||
627 | ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); | |
628 | ||
629 | ttm_bo_unreserve(entry); | |
630 | ||
631 | kref_put(&entry->list_kref, ttm_bo_release_list); | |
632 | if (ret) | |
633 | return ret; | |
634 | ||
635 | spin_lock(&bdev->lru_lock); | |
636 | } while (1); | |
637 | ||
638 | if (!node) { | |
639 | spin_unlock(&bdev->lru_lock); | |
640 | return -ENOMEM; | |
641 | } | |
642 | ||
643 | node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); | |
644 | if (unlikely(!node)) { | |
645 | spin_unlock(&bdev->lru_lock); | |
646 | goto retry_pre_get; | |
647 | } | |
648 | ||
649 | spin_unlock(&bdev->lru_lock); | |
650 | mem->mm_node = node; | |
651 | mem->mem_type = mem_type; | |
652 | return 0; | |
653 | } | |
654 | ||
655 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | |
656 | bool disallow_fixed, | |
657 | uint32_t mem_type, | |
658 | uint32_t mask, uint32_t *res_mask) | |
659 | { | |
660 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); | |
661 | ||
662 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) | |
663 | return false; | |
664 | ||
665 | if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) | |
666 | return false; | |
667 | ||
668 | if ((mask & man->available_caching) == 0) | |
669 | return false; | |
670 | if (mask & man->default_caching) | |
671 | cur_flags |= man->default_caching; | |
672 | else if (mask & TTM_PL_FLAG_CACHED) | |
673 | cur_flags |= TTM_PL_FLAG_CACHED; | |
674 | else if (mask & TTM_PL_FLAG_WC) | |
675 | cur_flags |= TTM_PL_FLAG_WC; | |
676 | else | |
677 | cur_flags |= TTM_PL_FLAG_UNCACHED; | |
678 | ||
679 | *res_mask = cur_flags; | |
680 | return true; | |
681 | } | |
682 | ||
683 | /** | |
684 | * Creates space for memory region @mem according to its type. | |
685 | * | |
686 | * This function first searches for free space in compatible memory types in | |
687 | * the priority order defined by the driver. If free space isn't found, then | |
688 | * ttm_bo_mem_force_space is attempted in priority order to evict and find | |
689 | * space. | |
690 | */ | |
691 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |
692 | uint32_t proposed_placement, | |
693 | struct ttm_mem_reg *mem, | |
694 | bool interruptible, bool no_wait) | |
695 | { | |
696 | struct ttm_bo_device *bdev = bo->bdev; | |
697 | struct ttm_mem_type_manager *man; | |
698 | ||
699 | uint32_t num_prios = bdev->driver->num_mem_type_prio; | |
700 | const uint32_t *prios = bdev->driver->mem_type_prio; | |
701 | uint32_t i; | |
702 | uint32_t mem_type = TTM_PL_SYSTEM; | |
703 | uint32_t cur_flags = 0; | |
704 | bool type_found = false; | |
705 | bool type_ok = false; | |
706 | bool has_eagain = false; | |
707 | struct drm_mm_node *node = NULL; | |
708 | int ret; | |
709 | ||
710 | mem->mm_node = NULL; | |
711 | for (i = 0; i < num_prios; ++i) { | |
712 | mem_type = prios[i]; | |
713 | man = &bdev->man[mem_type]; | |
714 | ||
715 | type_ok = ttm_bo_mt_compatible(man, | |
716 | bo->type == ttm_bo_type_user, | |
717 | mem_type, proposed_placement, | |
718 | &cur_flags); | |
719 | ||
720 | if (!type_ok) | |
721 | continue; | |
722 | ||
723 | if (mem_type == TTM_PL_SYSTEM) | |
724 | break; | |
725 | ||
726 | if (man->has_type && man->use_type) { | |
727 | type_found = true; | |
728 | do { | |
729 | ret = drm_mm_pre_get(&man->manager); | |
730 | if (unlikely(ret)) | |
731 | return ret; | |
732 | ||
733 | spin_lock(&bdev->lru_lock); | |
734 | node = drm_mm_search_free(&man->manager, | |
735 | mem->num_pages, | |
736 | mem->page_alignment, | |
737 | 1); | |
738 | if (unlikely(!node)) { | |
739 | spin_unlock(&bdev->lru_lock); | |
740 | break; | |
741 | } | |
742 | node = drm_mm_get_block_atomic(node, | |
743 | mem->num_pages, | |
744 | mem-> | |
745 | page_alignment); | |
746 | spin_unlock(&bdev->lru_lock); | |
747 | } while (!node); | |
748 | } | |
749 | if (node) | |
750 | break; | |
751 | } | |
752 | ||
753 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { | |
754 | mem->mm_node = node; | |
755 | mem->mem_type = mem_type; | |
756 | mem->placement = cur_flags; | |
757 | return 0; | |
758 | } | |
759 | ||
760 | if (!type_found) | |
761 | return -EINVAL; | |
762 | ||
763 | num_prios = bdev->driver->num_mem_busy_prio; | |
764 | prios = bdev->driver->mem_busy_prio; | |
765 | ||
766 | for (i = 0; i < num_prios; ++i) { | |
767 | mem_type = prios[i]; | |
768 | man = &bdev->man[mem_type]; | |
769 | ||
770 | if (!man->has_type) | |
771 | continue; | |
772 | ||
773 | if (!ttm_bo_mt_compatible(man, | |
774 | bo->type == ttm_bo_type_user, | |
775 | mem_type, | |
776 | proposed_placement, &cur_flags)) | |
777 | continue; | |
778 | ||
779 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, | |
780 | interruptible, no_wait); | |
781 | ||
782 | if (ret == 0 && mem->mm_node) { | |
783 | mem->placement = cur_flags; | |
784 | return 0; | |
785 | } | |
786 | ||
787 | if (ret == -ERESTART) | |
788 | has_eagain = true; | |
789 | } | |
790 | ||
791 | ret = (has_eagain) ? -ERESTART : -ENOMEM; | |
792 | return ret; | |
793 | } | |
794 | EXPORT_SYMBOL(ttm_bo_mem_space); | |
795 | ||
796 | int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) | |
797 | { | |
798 | int ret = 0; | |
799 | ||
800 | if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) | |
801 | return -EBUSY; | |
802 | ||
803 | ret = wait_event_interruptible(bo->event_queue, | |
804 | atomic_read(&bo->cpu_writers) == 0); | |
805 | ||
806 | if (ret == -ERESTARTSYS) | |
807 | ret = -ERESTART; | |
808 | ||
809 | return ret; | |
810 | } | |
811 | ||
812 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |
813 | uint32_t proposed_placement, | |
814 | bool interruptible, bool no_wait) | |
815 | { | |
816 | struct ttm_bo_device *bdev = bo->bdev; | |
817 | int ret = 0; | |
818 | struct ttm_mem_reg mem; | |
819 | ||
820 | BUG_ON(!atomic_read(&bo->reserved)); | |
821 | ||
822 | /* | |
823 | * FIXME: It's possible to pipeline buffer moves. | |
824 | * Have the driver move function wait for idle when necessary, | |
825 | * instead of doing it here. | |
826 | */ | |
827 | ||
828 | spin_lock(&bo->lock); | |
829 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | |
830 | spin_unlock(&bo->lock); | |
831 | ||
832 | if (ret) | |
833 | return ret; | |
834 | ||
835 | mem.num_pages = bo->num_pages; | |
836 | mem.size = mem.num_pages << PAGE_SHIFT; | |
837 | mem.page_alignment = bo->mem.page_alignment; | |
838 | ||
839 | /* | |
840 | * Determine where to move the buffer. | |
841 | */ | |
842 | ||
843 | ret = ttm_bo_mem_space(bo, proposed_placement, &mem, | |
844 | interruptible, no_wait); | |
845 | if (ret) | |
846 | goto out_unlock; | |
847 | ||
848 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); | |
849 | ||
850 | out_unlock: | |
851 | if (ret && mem.mm_node) { | |
852 | spin_lock(&bdev->lru_lock); | |
853 | drm_mm_put_block(mem.mm_node); | |
854 | spin_unlock(&bdev->lru_lock); | |
855 | } | |
856 | return ret; | |
857 | } | |
858 | ||
859 | static int ttm_bo_mem_compat(uint32_t proposed_placement, | |
860 | struct ttm_mem_reg *mem) | |
861 | { | |
862 | if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) | |
863 | return 0; | |
864 | if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) | |
865 | return 0; | |
866 | ||
867 | return 1; | |
868 | } | |
869 | ||
870 | int ttm_buffer_object_validate(struct ttm_buffer_object *bo, | |
871 | uint32_t proposed_placement, | |
872 | bool interruptible, bool no_wait) | |
873 | { | |
874 | int ret; | |
875 | ||
876 | BUG_ON(!atomic_read(&bo->reserved)); | |
877 | bo->proposed_placement = proposed_placement; | |
878 | ||
879 | TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", | |
880 | (unsigned long)proposed_placement, | |
881 | (unsigned long)bo->mem.placement); | |
882 | ||
883 | /* | |
884 | * Check whether we need to move buffer. | |
885 | */ | |
886 | ||
887 | if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { | |
888 | ret = ttm_bo_move_buffer(bo, bo->proposed_placement, | |
889 | interruptible, no_wait); | |
890 | if (ret) { | |
891 | if (ret != -ERESTART) | |
892 | printk(KERN_ERR TTM_PFX | |
893 | "Failed moving buffer. " | |
894 | "Proposed placement 0x%08x\n", | |
895 | bo->proposed_placement); | |
896 | if (ret == -ENOMEM) | |
897 | printk(KERN_ERR TTM_PFX | |
898 | "Out of aperture space or " | |
899 | "DRM memory quota.\n"); | |
900 | return ret; | |
901 | } | |
902 | } | |
903 | ||
904 | /* | |
905 | * We might need to add a TTM. | |
906 | */ | |
907 | ||
908 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
909 | ret = ttm_bo_add_ttm(bo, true); | |
910 | if (ret) | |
911 | return ret; | |
912 | } | |
913 | /* | |
914 | * Validation has succeeded, move the access and other | |
915 | * non-mapping-related flag bits from the proposed flags to | |
916 | * the active flags | |
917 | */ | |
918 | ||
919 | ttm_flag_masked(&bo->mem.placement, bo->proposed_placement, | |
920 | ~TTM_PL_MASK_MEMTYPE); | |
921 | ||
922 | return 0; | |
923 | } | |
924 | EXPORT_SYMBOL(ttm_buffer_object_validate); | |
925 | ||
926 | int | |
927 | ttm_bo_check_placement(struct ttm_buffer_object *bo, | |
928 | uint32_t set_flags, uint32_t clr_flags) | |
929 | { | |
930 | uint32_t new_mask = set_flags | clr_flags; | |
931 | ||
932 | if ((bo->type == ttm_bo_type_user) && | |
933 | (clr_flags & TTM_PL_FLAG_CACHED)) { | |
934 | printk(KERN_ERR TTM_PFX | |
935 | "User buffers require cache-coherent memory.\n"); | |
936 | return -EINVAL; | |
937 | } | |
938 | ||
939 | if (!capable(CAP_SYS_ADMIN)) { | |
940 | if (new_mask & TTM_PL_FLAG_NO_EVICT) { | |
941 | printk(KERN_ERR TTM_PFX "Need to be root to modify" | |
942 | " NO_EVICT status.\n"); | |
943 | return -EINVAL; | |
944 | } | |
945 | ||
946 | if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && | |
947 | (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { | |
948 | printk(KERN_ERR TTM_PFX | |
949 | "Incompatible memory specification" | |
950 | " for NO_EVICT buffer.\n"); | |
951 | return -EINVAL; | |
952 | } | |
953 | } | |
954 | return 0; | |
955 | } | |
956 | ||
957 | int ttm_buffer_object_init(struct ttm_bo_device *bdev, | |
958 | struct ttm_buffer_object *bo, | |
959 | unsigned long size, | |
960 | enum ttm_bo_type type, | |
961 | uint32_t flags, | |
962 | uint32_t page_alignment, | |
963 | unsigned long buffer_start, | |
964 | bool interruptible, | |
965 | struct file *persistant_swap_storage, | |
966 | size_t acc_size, | |
967 | void (*destroy) (struct ttm_buffer_object *)) | |
968 | { | |
969 | int ret = 0; | |
970 | unsigned long num_pages; | |
971 | ||
972 | size += buffer_start & ~PAGE_MASK; | |
973 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
974 | if (num_pages == 0) { | |
975 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | |
976 | return -EINVAL; | |
977 | } | |
978 | bo->destroy = destroy; | |
979 | ||
980 | spin_lock_init(&bo->lock); | |
981 | kref_init(&bo->kref); | |
982 | kref_init(&bo->list_kref); | |
983 | atomic_set(&bo->cpu_writers, 0); | |
984 | atomic_set(&bo->reserved, 1); | |
985 | init_waitqueue_head(&bo->event_queue); | |
986 | INIT_LIST_HEAD(&bo->lru); | |
987 | INIT_LIST_HEAD(&bo->ddestroy); | |
988 | INIT_LIST_HEAD(&bo->swap); | |
989 | bo->bdev = bdev; | |
990 | bo->type = type; | |
991 | bo->num_pages = num_pages; | |
992 | bo->mem.mem_type = TTM_PL_SYSTEM; | |
993 | bo->mem.num_pages = bo->num_pages; | |
994 | bo->mem.mm_node = NULL; | |
995 | bo->mem.page_alignment = page_alignment; | |
996 | bo->buffer_start = buffer_start & PAGE_MASK; | |
997 | bo->priv_flags = 0; | |
998 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | |
999 | bo->seq_valid = false; | |
1000 | bo->persistant_swap_storage = persistant_swap_storage; | |
1001 | bo->acc_size = acc_size; | |
1002 | ||
1003 | ret = ttm_bo_check_placement(bo, flags, 0ULL); | |
1004 | if (unlikely(ret != 0)) | |
1005 | goto out_err; | |
1006 | ||
1007 | /* | |
1008 | * If no caching attributes are set, accept any form of caching. | |
1009 | */ | |
1010 | ||
1011 | if ((flags & TTM_PL_MASK_CACHING) == 0) | |
1012 | flags |= TTM_PL_MASK_CACHING; | |
1013 | ||
1014 | /* | |
1015 | * For ttm_bo_type_device buffers, allocate | |
1016 | * address space from the device. | |
1017 | */ | |
1018 | ||
1019 | if (bo->type == ttm_bo_type_device) { | |
1020 | ret = ttm_bo_setup_vm(bo); | |
1021 | if (ret) | |
1022 | goto out_err; | |
1023 | } | |
1024 | ||
1025 | ret = ttm_buffer_object_validate(bo, flags, interruptible, false); | |
1026 | if (ret) | |
1027 | goto out_err; | |
1028 | ||
1029 | ttm_bo_unreserve(bo); | |
1030 | return 0; | |
1031 | ||
1032 | out_err: | |
1033 | ttm_bo_unreserve(bo); | |
1034 | ttm_bo_unref(&bo); | |
1035 | ||
1036 | return ret; | |
1037 | } | |
1038 | EXPORT_SYMBOL(ttm_buffer_object_init); | |
1039 | ||
1040 | static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, | |
1041 | unsigned long num_pages) | |
1042 | { | |
1043 | size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & | |
1044 | PAGE_MASK; | |
1045 | ||
1046 | return bdev->ttm_bo_size + 2 * page_array_size; | |
1047 | } | |
1048 | ||
1049 | int ttm_buffer_object_create(struct ttm_bo_device *bdev, | |
1050 | unsigned long size, | |
1051 | enum ttm_bo_type type, | |
1052 | uint32_t flags, | |
1053 | uint32_t page_alignment, | |
1054 | unsigned long buffer_start, | |
1055 | bool interruptible, | |
1056 | struct file *persistant_swap_storage, | |
1057 | struct ttm_buffer_object **p_bo) | |
1058 | { | |
1059 | struct ttm_buffer_object *bo; | |
1060 | int ret; | |
1061 | struct ttm_mem_global *mem_glob = bdev->mem_glob; | |
1062 | ||
1063 | size_t acc_size = | |
1064 | ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); | |
1065 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); | |
1066 | if (unlikely(ret != 0)) | |
1067 | return ret; | |
1068 | ||
1069 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
1070 | ||
1071 | if (unlikely(bo == NULL)) { | |
1072 | ttm_mem_global_free(mem_glob, acc_size, false); | |
1073 | return -ENOMEM; | |
1074 | } | |
1075 | ||
1076 | ret = ttm_buffer_object_init(bdev, bo, size, type, flags, | |
1077 | page_alignment, buffer_start, | |
1078 | interruptible, | |
1079 | persistant_swap_storage, acc_size, NULL); | |
1080 | if (likely(ret == 0)) | |
1081 | *p_bo = bo; | |
1082 | ||
1083 | return ret; | |
1084 | } | |
1085 | ||
1086 | static int ttm_bo_leave_list(struct ttm_buffer_object *bo, | |
1087 | uint32_t mem_type, bool allow_errors) | |
1088 | { | |
1089 | int ret; | |
1090 | ||
1091 | spin_lock(&bo->lock); | |
1092 | ret = ttm_bo_wait(bo, false, false, false); | |
1093 | spin_unlock(&bo->lock); | |
1094 | ||
1095 | if (ret && allow_errors) | |
1096 | goto out; | |
1097 | ||
1098 | if (bo->mem.mem_type == mem_type) | |
1099 | ret = ttm_bo_evict(bo, mem_type, false, false); | |
1100 | ||
1101 | if (ret) { | |
1102 | if (allow_errors) { | |
1103 | goto out; | |
1104 | } else { | |
1105 | ret = 0; | |
1106 | printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n"); | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | out: | |
1111 | return ret; | |
1112 | } | |
1113 | ||
1114 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |
1115 | struct list_head *head, | |
1116 | unsigned mem_type, bool allow_errors) | |
1117 | { | |
1118 | struct ttm_buffer_object *entry; | |
1119 | int ret; | |
1120 | int put_count; | |
1121 | ||
1122 | /* | |
1123 | * Can't use standard list traversal since we're unlocking. | |
1124 | */ | |
1125 | ||
1126 | spin_lock(&bdev->lru_lock); | |
1127 | ||
1128 | while (!list_empty(head)) { | |
1129 | entry = list_first_entry(head, struct ttm_buffer_object, lru); | |
1130 | kref_get(&entry->list_kref); | |
1131 | ret = ttm_bo_reserve_locked(entry, false, false, false, 0); | |
1132 | put_count = ttm_bo_del_from_lru(entry); | |
1133 | spin_unlock(&bdev->lru_lock); | |
1134 | while (put_count--) | |
1135 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | |
1136 | BUG_ON(ret); | |
1137 | ret = ttm_bo_leave_list(entry, mem_type, allow_errors); | |
1138 | ttm_bo_unreserve(entry); | |
1139 | kref_put(&entry->list_kref, ttm_bo_release_list); | |
1140 | spin_lock(&bdev->lru_lock); | |
1141 | } | |
1142 | ||
1143 | spin_unlock(&bdev->lru_lock); | |
1144 | ||
1145 | return 0; | |
1146 | } | |
1147 | ||
1148 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |
1149 | { | |
1150 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | |
1151 | int ret = -EINVAL; | |
1152 | ||
1153 | if (mem_type >= TTM_NUM_MEM_TYPES) { | |
1154 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); | |
1155 | return ret; | |
1156 | } | |
1157 | ||
1158 | if (!man->has_type) { | |
1159 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " | |
1160 | "memory manager type %u\n", mem_type); | |
1161 | return ret; | |
1162 | } | |
1163 | ||
1164 | man->use_type = false; | |
1165 | man->has_type = false; | |
1166 | ||
1167 | ret = 0; | |
1168 | if (mem_type > 0) { | |
1169 | ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); | |
1170 | ||
1171 | spin_lock(&bdev->lru_lock); | |
1172 | if (drm_mm_clean(&man->manager)) | |
1173 | drm_mm_takedown(&man->manager); | |
1174 | else | |
1175 | ret = -EBUSY; | |
1176 | ||
1177 | spin_unlock(&bdev->lru_lock); | |
1178 | } | |
1179 | ||
1180 | return ret; | |
1181 | } | |
1182 | EXPORT_SYMBOL(ttm_bo_clean_mm); | |
1183 | ||
1184 | int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |
1185 | { | |
1186 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | |
1187 | ||
1188 | if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { | |
1189 | printk(KERN_ERR TTM_PFX | |
1190 | "Illegal memory manager memory type %u.\n", | |
1191 | mem_type); | |
1192 | return -EINVAL; | |
1193 | } | |
1194 | ||
1195 | if (!man->has_type) { | |
1196 | printk(KERN_ERR TTM_PFX | |
1197 | "Memory type %u has not been initialized.\n", | |
1198 | mem_type); | |
1199 | return 0; | |
1200 | } | |
1201 | ||
1202 | return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); | |
1203 | } | |
1204 | EXPORT_SYMBOL(ttm_bo_evict_mm); | |
1205 | ||
1206 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |
1207 | unsigned long p_offset, unsigned long p_size) | |
1208 | { | |
1209 | int ret = -EINVAL; | |
1210 | struct ttm_mem_type_manager *man; | |
1211 | ||
1212 | if (type >= TTM_NUM_MEM_TYPES) { | |
1213 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); | |
1214 | return ret; | |
1215 | } | |
1216 | ||
1217 | man = &bdev->man[type]; | |
1218 | if (man->has_type) { | |
1219 | printk(KERN_ERR TTM_PFX | |
1220 | "Memory manager already initialized for type %d\n", | |
1221 | type); | |
1222 | return ret; | |
1223 | } | |
1224 | ||
1225 | ret = bdev->driver->init_mem_type(bdev, type, man); | |
1226 | if (ret) | |
1227 | return ret; | |
1228 | ||
1229 | ret = 0; | |
1230 | if (type != TTM_PL_SYSTEM) { | |
1231 | if (!p_size) { | |
1232 | printk(KERN_ERR TTM_PFX | |
1233 | "Zero size memory manager type %d\n", | |
1234 | type); | |
1235 | return ret; | |
1236 | } | |
1237 | ret = drm_mm_init(&man->manager, p_offset, p_size); | |
1238 | if (ret) | |
1239 | return ret; | |
1240 | } | |
1241 | man->has_type = true; | |
1242 | man->use_type = true; | |
1243 | man->size = p_size; | |
1244 | ||
1245 | INIT_LIST_HEAD(&man->lru); | |
1246 | ||
1247 | return 0; | |
1248 | } | |
1249 | EXPORT_SYMBOL(ttm_bo_init_mm); | |
1250 | ||
1251 | int ttm_bo_device_release(struct ttm_bo_device *bdev) | |
1252 | { | |
1253 | int ret = 0; | |
1254 | unsigned i = TTM_NUM_MEM_TYPES; | |
1255 | struct ttm_mem_type_manager *man; | |
1256 | ||
1257 | while (i--) { | |
1258 | man = &bdev->man[i]; | |
1259 | if (man->has_type) { | |
1260 | man->use_type = false; | |
1261 | if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { | |
1262 | ret = -EBUSY; | |
1263 | printk(KERN_ERR TTM_PFX | |
1264 | "DRM memory manager type %d " | |
1265 | "is not clean.\n", i); | |
1266 | } | |
1267 | man->has_type = false; | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | if (!cancel_delayed_work(&bdev->wq)) | |
1272 | flush_scheduled_work(); | |
1273 | ||
1274 | while (ttm_bo_delayed_delete(bdev, true)) | |
1275 | ; | |
1276 | ||
1277 | spin_lock(&bdev->lru_lock); | |
1278 | if (list_empty(&bdev->ddestroy)) | |
1279 | TTM_DEBUG("Delayed destroy list was clean\n"); | |
1280 | ||
1281 | if (list_empty(&bdev->man[0].lru)) | |
1282 | TTM_DEBUG("Swap list was clean\n"); | |
1283 | spin_unlock(&bdev->lru_lock); | |
1284 | ||
1285 | ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); | |
1286 | BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); | |
1287 | write_lock(&bdev->vm_lock); | |
1288 | drm_mm_takedown(&bdev->addr_space_mm); | |
1289 | write_unlock(&bdev->vm_lock); | |
1290 | ||
1291 | __free_page(bdev->dummy_read_page); | |
1292 | return ret; | |
1293 | } | |
1294 | EXPORT_SYMBOL(ttm_bo_device_release); | |
1295 | ||
1296 | /* | |
1297 | * This function is intended to be called on drm driver load. | |
1298 | * If you decide to call it from firstopen, you must protect the call | |
1299 | * from a potentially racing ttm_bo_driver_finish in lastclose. | |
1300 | * (This may happen on X server restart). | |
1301 | */ | |
1302 | ||
1303 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | |
1304 | struct ttm_mem_global *mem_glob, | |
1305 | struct ttm_bo_driver *driver, uint64_t file_page_offset) | |
1306 | { | |
1307 | int ret = -EINVAL; | |
1308 | ||
1309 | bdev->dummy_read_page = NULL; | |
1310 | rwlock_init(&bdev->vm_lock); | |
1311 | spin_lock_init(&bdev->lru_lock); | |
1312 | ||
1313 | bdev->driver = driver; | |
1314 | bdev->mem_glob = mem_glob; | |
1315 | ||
1316 | memset(bdev->man, 0, sizeof(bdev->man)); | |
1317 | ||
1318 | bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); | |
1319 | if (unlikely(bdev->dummy_read_page == NULL)) { | |
1320 | ret = -ENOMEM; | |
1321 | goto out_err0; | |
1322 | } | |
1323 | ||
1324 | /* | |
1325 | * Initialize the system memory buffer type. | |
1326 | * Other types need to be driver / IOCTL initialized. | |
1327 | */ | |
1328 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); | |
1329 | if (unlikely(ret != 0)) | |
1330 | goto out_err1; | |
1331 | ||
1332 | bdev->addr_space_rb = RB_ROOT; | |
1333 | ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); | |
1334 | if (unlikely(ret != 0)) | |
1335 | goto out_err2; | |
1336 | ||
1337 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); | |
1338 | bdev->nice_mode = true; | |
1339 | INIT_LIST_HEAD(&bdev->ddestroy); | |
1340 | INIT_LIST_HEAD(&bdev->swap_lru); | |
1341 | bdev->dev_mapping = NULL; | |
1342 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); | |
1343 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); | |
1344 | if (unlikely(ret != 0)) { | |
1345 | printk(KERN_ERR TTM_PFX | |
1346 | "Could not register buffer object swapout.\n"); | |
1347 | goto out_err2; | |
1348 | } | |
1349 | ||
1350 | bdev->ttm_bo_extra_size = | |
1351 | ttm_round_pot(sizeof(struct ttm_tt)) + | |
1352 | ttm_round_pot(sizeof(struct ttm_backend)); | |
1353 | ||
1354 | bdev->ttm_bo_size = bdev->ttm_bo_extra_size + | |
1355 | ttm_round_pot(sizeof(struct ttm_buffer_object)); | |
1356 | ||
1357 | return 0; | |
1358 | out_err2: | |
1359 | ttm_bo_clean_mm(bdev, 0); | |
1360 | out_err1: | |
1361 | __free_page(bdev->dummy_read_page); | |
1362 | out_err0: | |
1363 | return ret; | |
1364 | } | |
1365 | EXPORT_SYMBOL(ttm_bo_device_init); | |
1366 | ||
1367 | /* | |
1368 | * buffer object vm functions. | |
1369 | */ | |
1370 | ||
1371 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1372 | { | |
1373 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
1374 | ||
1375 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { | |
1376 | if (mem->mem_type == TTM_PL_SYSTEM) | |
1377 | return false; | |
1378 | ||
1379 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) | |
1380 | return false; | |
1381 | ||
1382 | if (mem->placement & TTM_PL_FLAG_CACHED) | |
1383 | return false; | |
1384 | } | |
1385 | return true; | |
1386 | } | |
1387 | ||
1388 | int ttm_bo_pci_offset(struct ttm_bo_device *bdev, | |
1389 | struct ttm_mem_reg *mem, | |
1390 | unsigned long *bus_base, | |
1391 | unsigned long *bus_offset, unsigned long *bus_size) | |
1392 | { | |
1393 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
1394 | ||
1395 | *bus_size = 0; | |
1396 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
1397 | return -EINVAL; | |
1398 | ||
1399 | if (ttm_mem_reg_is_pci(bdev, mem)) { | |
1400 | *bus_offset = mem->mm_node->start << PAGE_SHIFT; | |
1401 | *bus_size = mem->num_pages << PAGE_SHIFT; | |
1402 | *bus_base = man->io_offset; | |
1403 | } | |
1404 | ||
1405 | return 0; | |
1406 | } | |
1407 | ||
1408 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |
1409 | { | |
1410 | struct ttm_bo_device *bdev = bo->bdev; | |
1411 | loff_t offset = (loff_t) bo->addr_space_offset; | |
1412 | loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; | |
1413 | ||
1414 | if (!bdev->dev_mapping) | |
1415 | return; | |
1416 | ||
1417 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | |
1418 | } | |
1419 | ||
1420 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | |
1421 | { | |
1422 | struct ttm_bo_device *bdev = bo->bdev; | |
1423 | struct rb_node **cur = &bdev->addr_space_rb.rb_node; | |
1424 | struct rb_node *parent = NULL; | |
1425 | struct ttm_buffer_object *cur_bo; | |
1426 | unsigned long offset = bo->vm_node->start; | |
1427 | unsigned long cur_offset; | |
1428 | ||
1429 | while (*cur) { | |
1430 | parent = *cur; | |
1431 | cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); | |
1432 | cur_offset = cur_bo->vm_node->start; | |
1433 | if (offset < cur_offset) | |
1434 | cur = &parent->rb_left; | |
1435 | else if (offset > cur_offset) | |
1436 | cur = &parent->rb_right; | |
1437 | else | |
1438 | BUG(); | |
1439 | } | |
1440 | ||
1441 | rb_link_node(&bo->vm_rb, parent, cur); | |
1442 | rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); | |
1443 | } | |
1444 | ||
1445 | /** | |
1446 | * ttm_bo_setup_vm: | |
1447 | * | |
1448 | * @bo: the buffer to allocate address space for | |
1449 | * | |
1450 | * Allocate address space in the drm device so that applications | |
1451 | * can mmap the buffer and access the contents. This only | |
1452 | * applies to ttm_bo_type_device objects as others are not | |
1453 | * placed in the drm device address space. | |
1454 | */ | |
1455 | ||
1456 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) | |
1457 | { | |
1458 | struct ttm_bo_device *bdev = bo->bdev; | |
1459 | int ret; | |
1460 | ||
1461 | retry_pre_get: | |
1462 | ret = drm_mm_pre_get(&bdev->addr_space_mm); | |
1463 | if (unlikely(ret != 0)) | |
1464 | return ret; | |
1465 | ||
1466 | write_lock(&bdev->vm_lock); | |
1467 | bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, | |
1468 | bo->mem.num_pages, 0, 0); | |
1469 | ||
1470 | if (unlikely(bo->vm_node == NULL)) { | |
1471 | ret = -ENOMEM; | |
1472 | goto out_unlock; | |
1473 | } | |
1474 | ||
1475 | bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, | |
1476 | bo->mem.num_pages, 0); | |
1477 | ||
1478 | if (unlikely(bo->vm_node == NULL)) { | |
1479 | write_unlock(&bdev->vm_lock); | |
1480 | goto retry_pre_get; | |
1481 | } | |
1482 | ||
1483 | ttm_bo_vm_insert_rb(bo); | |
1484 | write_unlock(&bdev->vm_lock); | |
1485 | bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; | |
1486 | ||
1487 | return 0; | |
1488 | out_unlock: | |
1489 | write_unlock(&bdev->vm_lock); | |
1490 | return ret; | |
1491 | } | |
1492 | ||
1493 | int ttm_bo_wait(struct ttm_buffer_object *bo, | |
1494 | bool lazy, bool interruptible, bool no_wait) | |
1495 | { | |
1496 | struct ttm_bo_driver *driver = bo->bdev->driver; | |
1497 | void *sync_obj; | |
1498 | void *sync_obj_arg; | |
1499 | int ret = 0; | |
1500 | ||
1501 | if (likely(bo->sync_obj == NULL)) | |
1502 | return 0; | |
1503 | ||
1504 | while (bo->sync_obj) { | |
1505 | ||
1506 | if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { | |
1507 | void *tmp_obj = bo->sync_obj; | |
1508 | bo->sync_obj = NULL; | |
1509 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | |
1510 | spin_unlock(&bo->lock); | |
1511 | driver->sync_obj_unref(&tmp_obj); | |
1512 | spin_lock(&bo->lock); | |
1513 | continue; | |
1514 | } | |
1515 | ||
1516 | if (no_wait) | |
1517 | return -EBUSY; | |
1518 | ||
1519 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | |
1520 | sync_obj_arg = bo->sync_obj_arg; | |
1521 | spin_unlock(&bo->lock); | |
1522 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, | |
1523 | lazy, interruptible); | |
1524 | if (unlikely(ret != 0)) { | |
1525 | driver->sync_obj_unref(&sync_obj); | |
1526 | spin_lock(&bo->lock); | |
1527 | return ret; | |
1528 | } | |
1529 | spin_lock(&bo->lock); | |
1530 | if (likely(bo->sync_obj == sync_obj && | |
1531 | bo->sync_obj_arg == sync_obj_arg)) { | |
1532 | void *tmp_obj = bo->sync_obj; | |
1533 | bo->sync_obj = NULL; | |
1534 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | |
1535 | &bo->priv_flags); | |
1536 | spin_unlock(&bo->lock); | |
1537 | driver->sync_obj_unref(&sync_obj); | |
1538 | driver->sync_obj_unref(&tmp_obj); | |
1539 | spin_lock(&bo->lock); | |
1540 | } | |
1541 | } | |
1542 | return 0; | |
1543 | } | |
1544 | EXPORT_SYMBOL(ttm_bo_wait); | |
1545 | ||
1546 | void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) | |
1547 | { | |
1548 | atomic_set(&bo->reserved, 0); | |
1549 | wake_up_all(&bo->event_queue); | |
1550 | } | |
1551 | ||
1552 | int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, | |
1553 | bool no_wait) | |
1554 | { | |
1555 | int ret; | |
1556 | ||
1557 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | |
1558 | if (no_wait) | |
1559 | return -EBUSY; | |
1560 | else if (interruptible) { | |
1561 | ret = wait_event_interruptible | |
1562 | (bo->event_queue, atomic_read(&bo->reserved) == 0); | |
1563 | if (unlikely(ret != 0)) | |
1564 | return -ERESTART; | |
1565 | } else { | |
1566 | wait_event(bo->event_queue, | |
1567 | atomic_read(&bo->reserved) == 0); | |
1568 | } | |
1569 | } | |
1570 | return 0; | |
1571 | } | |
1572 | ||
1573 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |
1574 | { | |
1575 | int ret = 0; | |
1576 | ||
1577 | /* | |
1578 | * Using ttm_bo_reserve instead of ttm_bo_block_reservation | |
1579 | * makes sure the lru lists are updated. | |
1580 | */ | |
1581 | ||
1582 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | |
1583 | if (unlikely(ret != 0)) | |
1584 | return ret; | |
1585 | spin_lock(&bo->lock); | |
1586 | ret = ttm_bo_wait(bo, false, true, no_wait); | |
1587 | spin_unlock(&bo->lock); | |
1588 | if (likely(ret == 0)) | |
1589 | atomic_inc(&bo->cpu_writers); | |
1590 | ttm_bo_unreserve(bo); | |
1591 | return ret; | |
1592 | } | |
1593 | ||
1594 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) | |
1595 | { | |
1596 | if (atomic_dec_and_test(&bo->cpu_writers)) | |
1597 | wake_up_all(&bo->event_queue); | |
1598 | } | |
1599 | ||
1600 | /** | |
1601 | * A buffer object shrink method that tries to swap out the first | |
1602 | * buffer object on the bo_global::swap_lru list. | |
1603 | */ | |
1604 | ||
1605 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |
1606 | { | |
1607 | struct ttm_bo_device *bdev = | |
1608 | container_of(shrink, struct ttm_bo_device, shrink); | |
1609 | struct ttm_buffer_object *bo; | |
1610 | int ret = -EBUSY; | |
1611 | int put_count; | |
1612 | uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); | |
1613 | ||
1614 | spin_lock(&bdev->lru_lock); | |
1615 | while (ret == -EBUSY) { | |
1616 | if (unlikely(list_empty(&bdev->swap_lru))) { | |
1617 | spin_unlock(&bdev->lru_lock); | |
1618 | return -EBUSY; | |
1619 | } | |
1620 | ||
1621 | bo = list_first_entry(&bdev->swap_lru, | |
1622 | struct ttm_buffer_object, swap); | |
1623 | kref_get(&bo->list_kref); | |
1624 | ||
1625 | /** | |
1626 | * Reserve buffer. Since we unlock while sleeping, we need | |
1627 | * to re-check that nobody removed us from the swap-list while | |
1628 | * we slept. | |
1629 | */ | |
1630 | ||
1631 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); | |
1632 | if (unlikely(ret == -EBUSY)) { | |
1633 | spin_unlock(&bdev->lru_lock); | |
1634 | ttm_bo_wait_unreserved(bo, false); | |
1635 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
1636 | spin_lock(&bdev->lru_lock); | |
1637 | } | |
1638 | } | |
1639 | ||
1640 | BUG_ON(ret != 0); | |
1641 | put_count = ttm_bo_del_from_lru(bo); | |
1642 | spin_unlock(&bdev->lru_lock); | |
1643 | ||
1644 | while (put_count--) | |
1645 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | |
1646 | ||
1647 | /** | |
1648 | * Wait for GPU, then move to system cached. | |
1649 | */ | |
1650 | ||
1651 | spin_lock(&bo->lock); | |
1652 | ret = ttm_bo_wait(bo, false, false, false); | |
1653 | spin_unlock(&bo->lock); | |
1654 | ||
1655 | if (unlikely(ret != 0)) | |
1656 | goto out; | |
1657 | ||
1658 | if ((bo->mem.placement & swap_placement) != swap_placement) { | |
1659 | struct ttm_mem_reg evict_mem; | |
1660 | ||
1661 | evict_mem = bo->mem; | |
1662 | evict_mem.mm_node = NULL; | |
1663 | evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; | |
1664 | evict_mem.mem_type = TTM_PL_SYSTEM; | |
1665 | ||
1666 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, | |
1667 | false, false); | |
1668 | if (unlikely(ret != 0)) | |
1669 | goto out; | |
1670 | } | |
1671 | ||
1672 | ttm_bo_unmap_virtual(bo); | |
1673 | ||
1674 | /** | |
1675 | * Swap out. Buffer will be swapped in again as soon as | |
1676 | * anyone tries to access a ttm page. | |
1677 | */ | |
1678 | ||
1679 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); | |
1680 | out: | |
1681 | ||
1682 | /** | |
1683 | * | |
1684 | * Unreserve without putting on LRU to avoid swapping out an | |
1685 | * already swapped buffer. | |
1686 | */ | |
1687 | ||
1688 | atomic_set(&bo->reserved, 0); | |
1689 | wake_up_all(&bo->event_queue); | |
1690 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
1691 | return ret; | |
1692 | } | |
1693 | ||
1694 | void ttm_bo_swapout_all(struct ttm_bo_device *bdev) | |
1695 | { | |
1696 | while (ttm_bo_swapout(&bdev->shrink) == 0) | |
1697 | ; | |
1698 | } |