]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/ttm/ttm_bo.c
drm: Add memory manager debug function
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / ttm / ttm_bo.c
CommitLineData
ba4e7d97
TH
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
ca262a99
JG
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
ba4e7d97
TH
38
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
ba4e7d97 54static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
a987fcaa
TH
55static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58 .name = "bo_count",
59 .mode = S_IRUGO
60};
61
62static ssize_t ttm_bo_global_show(struct kobject *kobj,
63 struct attribute *attr,
64 char *buffer)
65{
66 struct ttm_bo_global *glob =
67 container_of(kobj, struct ttm_bo_global, kobj);
68
69 return snprintf(buffer, PAGE_SIZE, "%lu\n",
70 (unsigned long) atomic_read(&glob->bo_count));
71}
72
73static struct attribute *ttm_bo_global_attrs[] = {
74 &ttm_bo_count,
75 NULL
76};
77
78static struct sysfs_ops ttm_bo_global_ops = {
79 .show = &ttm_bo_global_show
80};
81
82static struct kobj_type ttm_bo_glob_kobj_type = {
83 .release = &ttm_bo_global_kobj_release,
84 .sysfs_ops = &ttm_bo_global_ops,
85 .default_attrs = ttm_bo_global_attrs
86};
87
ba4e7d97
TH
88
89static inline uint32_t ttm_bo_type_flags(unsigned type)
90{
91 return 1 << (type);
92}
93
94static void ttm_bo_release_list(struct kref *list_kref)
95{
96 struct ttm_buffer_object *bo =
97 container_of(list_kref, struct ttm_buffer_object, list_kref);
98 struct ttm_bo_device *bdev = bo->bdev;
99
100 BUG_ON(atomic_read(&bo->list_kref.refcount));
101 BUG_ON(atomic_read(&bo->kref.refcount));
102 BUG_ON(atomic_read(&bo->cpu_writers));
103 BUG_ON(bo->sync_obj != NULL);
104 BUG_ON(bo->mem.mm_node != NULL);
105 BUG_ON(!list_empty(&bo->lru));
106 BUG_ON(!list_empty(&bo->ddestroy));
107
108 if (bo->ttm)
109 ttm_tt_destroy(bo->ttm);
a987fcaa 110 atomic_dec(&bo->glob->bo_count);
ba4e7d97
TH
111 if (bo->destroy)
112 bo->destroy(bo);
113 else {
a987fcaa 114 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
ba4e7d97
TH
115 kfree(bo);
116 }
117}
118
119int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
120{
121
122 if (interruptible) {
123 int ret = 0;
124
125 ret = wait_event_interruptible(bo->event_queue,
126 atomic_read(&bo->reserved) == 0);
127 if (unlikely(ret != 0))
98ffc415 128 return ret;
ba4e7d97
TH
129 } else {
130 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
131 }
132 return 0;
133}
134
135static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
136{
137 struct ttm_bo_device *bdev = bo->bdev;
138 struct ttm_mem_type_manager *man;
139
140 BUG_ON(!atomic_read(&bo->reserved));
141
142 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
143
144 BUG_ON(!list_empty(&bo->lru));
145
146 man = &bdev->man[bo->mem.mem_type];
147 list_add_tail(&bo->lru, &man->lru);
148 kref_get(&bo->list_kref);
149
150 if (bo->ttm != NULL) {
a987fcaa 151 list_add_tail(&bo->swap, &bo->glob->swap_lru);
ba4e7d97
TH
152 kref_get(&bo->list_kref);
153 }
154 }
155}
156
157/**
158 * Call with the lru_lock held.
159 */
160
161static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
162{
163 int put_count = 0;
164
165 if (!list_empty(&bo->swap)) {
166 list_del_init(&bo->swap);
167 ++put_count;
168 }
169 if (!list_empty(&bo->lru)) {
170 list_del_init(&bo->lru);
171 ++put_count;
172 }
173
174 /*
175 * TODO: Add a driver hook to delete from
176 * driver-specific LRU's here.
177 */
178
179 return put_count;
180}
181
182int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
183 bool interruptible,
184 bool no_wait, bool use_sequence, uint32_t sequence)
185{
a987fcaa 186 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
187 int ret;
188
189 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
190 if (use_sequence && bo->seq_valid &&
191 (sequence - bo->val_seq < (1 << 31))) {
192 return -EAGAIN;
193 }
194
195 if (no_wait)
196 return -EBUSY;
197
a987fcaa 198 spin_unlock(&glob->lru_lock);
ba4e7d97 199 ret = ttm_bo_wait_unreserved(bo, interruptible);
a987fcaa 200 spin_lock(&glob->lru_lock);
ba4e7d97
TH
201
202 if (unlikely(ret))
203 return ret;
204 }
205
206 if (use_sequence) {
207 bo->val_seq = sequence;
208 bo->seq_valid = true;
209 } else {
210 bo->seq_valid = false;
211 }
212
213 return 0;
214}
215EXPORT_SYMBOL(ttm_bo_reserve);
216
217static void ttm_bo_ref_bug(struct kref *list_kref)
218{
219 BUG();
220}
221
222int ttm_bo_reserve(struct ttm_buffer_object *bo,
223 bool interruptible,
224 bool no_wait, bool use_sequence, uint32_t sequence)
225{
a987fcaa 226 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
227 int put_count = 0;
228 int ret;
229
a987fcaa 230 spin_lock(&glob->lru_lock);
ba4e7d97
TH
231 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
232 sequence);
233 if (likely(ret == 0))
234 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 235 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
236
237 while (put_count--)
238 kref_put(&bo->list_kref, ttm_bo_ref_bug);
239
240 return ret;
241}
242
243void ttm_bo_unreserve(struct ttm_buffer_object *bo)
244{
a987fcaa 245 struct ttm_bo_global *glob = bo->glob;
ba4e7d97 246
a987fcaa 247 spin_lock(&glob->lru_lock);
ba4e7d97
TH
248 ttm_bo_add_to_lru(bo);
249 atomic_set(&bo->reserved, 0);
250 wake_up_all(&bo->event_queue);
a987fcaa 251 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
252}
253EXPORT_SYMBOL(ttm_bo_unreserve);
254
255/*
256 * Call bo->mutex locked.
257 */
ba4e7d97
TH
258static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
259{
260 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 261 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
262 int ret = 0;
263 uint32_t page_flags = 0;
264
265 TTM_ASSERT_LOCKED(&bo->mutex);
266 bo->ttm = NULL;
267
ad49f501
DA
268 if (bdev->need_dma32)
269 page_flags |= TTM_PAGE_FLAG_DMA32;
270
ba4e7d97
TH
271 switch (bo->type) {
272 case ttm_bo_type_device:
273 if (zero_alloc)
274 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
275 case ttm_bo_type_kernel:
276 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
a987fcaa 277 page_flags, glob->dummy_read_page);
ba4e7d97
TH
278 if (unlikely(bo->ttm == NULL))
279 ret = -ENOMEM;
280 break;
281 case ttm_bo_type_user:
282 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
283 page_flags | TTM_PAGE_FLAG_USER,
a987fcaa 284 glob->dummy_read_page);
447aeb90 285 if (unlikely(bo->ttm == NULL)) {
ba4e7d97 286 ret = -ENOMEM;
447aeb90
DA
287 break;
288 }
ba4e7d97
TH
289
290 ret = ttm_tt_set_user(bo->ttm, current,
291 bo->buffer_start, bo->num_pages);
292 if (unlikely(ret != 0))
293 ttm_tt_destroy(bo->ttm);
294 break;
295 default:
296 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
297 ret = -EINVAL;
298 break;
299 }
300
301 return ret;
302}
303
304static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
305 struct ttm_mem_reg *mem,
306 bool evict, bool interruptible, bool no_wait)
307{
308 struct ttm_bo_device *bdev = bo->bdev;
309 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
310 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
311 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
312 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
313 int ret = 0;
314
315 if (old_is_pci || new_is_pci ||
316 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
317 ttm_bo_unmap_virtual(bo);
318
319 /*
320 * Create and bind a ttm if required.
321 */
322
323 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
324 ret = ttm_bo_add_ttm(bo, false);
325 if (ret)
326 goto out_err;
327
328 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
329 if (ret)
87ef9209 330 goto out_err;
ba4e7d97
TH
331
332 if (mem->mem_type != TTM_PL_SYSTEM) {
333 ret = ttm_tt_bind(bo->ttm, mem);
334 if (ret)
335 goto out_err;
336 }
337
338 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
ca262a99 339 bo->mem = *mem;
ba4e7d97 340 mem->mm_node = NULL;
ba4e7d97
TH
341 goto moved;
342 }
343
344 }
345
e024e110
DA
346 if (bdev->driver->move_notify)
347 bdev->driver->move_notify(bo, mem);
348
ba4e7d97
TH
349 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
350 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
351 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
352 else if (bdev->driver->move)
353 ret = bdev->driver->move(bo, evict, interruptible,
354 no_wait, mem);
355 else
356 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
357
358 if (ret)
359 goto out_err;
360
361moved:
362 if (bo->evicted) {
363 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
364 if (ret)
365 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
366 bo->evicted = false;
367 }
368
369 if (bo->mem.mm_node) {
370 spin_lock(&bo->lock);
371 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
372 bdev->man[bo->mem.mem_type].gpu_offset;
373 bo->cur_placement = bo->mem.placement;
374 spin_unlock(&bo->lock);
375 }
376
377 return 0;
378
379out_err:
380 new_man = &bdev->man[bo->mem.mem_type];
381 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
382 ttm_tt_unbind(bo->ttm);
383 ttm_tt_destroy(bo->ttm);
384 bo->ttm = NULL;
385 }
386
387 return ret;
388}
389
390/**
391 * If bo idle, remove from delayed- and lru lists, and unref.
392 * If not idle, and already on delayed list, do nothing.
393 * If not idle, and not on delayed list, put on delayed list,
394 * up the list_kref and schedule a delayed list check.
395 */
396
397static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
398{
399 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 400 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
401 struct ttm_bo_driver *driver = bdev->driver;
402 int ret;
403
404 spin_lock(&bo->lock);
405 (void) ttm_bo_wait(bo, false, false, !remove_all);
406
407 if (!bo->sync_obj) {
408 int put_count;
409
410 spin_unlock(&bo->lock);
411
a987fcaa 412 spin_lock(&glob->lru_lock);
ba4e7d97
TH
413 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
414 BUG_ON(ret);
415 if (bo->ttm)
416 ttm_tt_unbind(bo->ttm);
417
418 if (!list_empty(&bo->ddestroy)) {
419 list_del_init(&bo->ddestroy);
420 kref_put(&bo->list_kref, ttm_bo_ref_bug);
421 }
422 if (bo->mem.mm_node) {
ca262a99 423 bo->mem.mm_node->private = NULL;
ba4e7d97
TH
424 drm_mm_put_block(bo->mem.mm_node);
425 bo->mem.mm_node = NULL;
426 }
427 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 428 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
429
430 atomic_set(&bo->reserved, 0);
431
432 while (put_count--)
433 kref_put(&bo->list_kref, ttm_bo_release_list);
434
435 return 0;
436 }
437
a987fcaa 438 spin_lock(&glob->lru_lock);
ba4e7d97
TH
439 if (list_empty(&bo->ddestroy)) {
440 void *sync_obj = bo->sync_obj;
441 void *sync_obj_arg = bo->sync_obj_arg;
442
443 kref_get(&bo->list_kref);
444 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
a987fcaa 445 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
446 spin_unlock(&bo->lock);
447
448 if (sync_obj)
449 driver->sync_obj_flush(sync_obj, sync_obj_arg);
450 schedule_delayed_work(&bdev->wq,
451 ((HZ / 100) < 1) ? 1 : HZ / 100);
452 ret = 0;
453
454 } else {
a987fcaa 455 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
456 spin_unlock(&bo->lock);
457 ret = -EBUSY;
458 }
459
460 return ret;
461}
462
463/**
464 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
465 * encountered buffers.
466 */
467
468static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
469{
a987fcaa 470 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97
TH
471 struct ttm_buffer_object *entry, *nentry;
472 struct list_head *list, *next;
473 int ret;
474
a987fcaa 475 spin_lock(&glob->lru_lock);
ba4e7d97
TH
476 list_for_each_safe(list, next, &bdev->ddestroy) {
477 entry = list_entry(list, struct ttm_buffer_object, ddestroy);
478 nentry = NULL;
479
480 /*
481 * Protect the next list entry from destruction while we
482 * unlock the lru_lock.
483 */
484
485 if (next != &bdev->ddestroy) {
486 nentry = list_entry(next, struct ttm_buffer_object,
487 ddestroy);
488 kref_get(&nentry->list_kref);
489 }
490 kref_get(&entry->list_kref);
491
a987fcaa 492 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
493 ret = ttm_bo_cleanup_refs(entry, remove_all);
494 kref_put(&entry->list_kref, ttm_bo_release_list);
495
a987fcaa 496 spin_lock(&glob->lru_lock);
ba4e7d97
TH
497 if (nentry) {
498 bool next_onlist = !list_empty(next);
a987fcaa 499 spin_unlock(&glob->lru_lock);
ba4e7d97 500 kref_put(&nentry->list_kref, ttm_bo_release_list);
a987fcaa 501 spin_lock(&glob->lru_lock);
ba4e7d97
TH
502 /*
503 * Someone might have raced us and removed the
504 * next entry from the list. We don't bother restarting
505 * list traversal.
506 */
507
508 if (!next_onlist)
509 break;
510 }
511 if (ret)
512 break;
513 }
514 ret = !list_empty(&bdev->ddestroy);
a987fcaa 515 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
516
517 return ret;
518}
519
520static void ttm_bo_delayed_workqueue(struct work_struct *work)
521{
522 struct ttm_bo_device *bdev =
523 container_of(work, struct ttm_bo_device, wq.work);
524
525 if (ttm_bo_delayed_delete(bdev, false)) {
526 schedule_delayed_work(&bdev->wq,
527 ((HZ / 100) < 1) ? 1 : HZ / 100);
528 }
529}
530
531static void ttm_bo_release(struct kref *kref)
532{
533 struct ttm_buffer_object *bo =
534 container_of(kref, struct ttm_buffer_object, kref);
535 struct ttm_bo_device *bdev = bo->bdev;
536
537 if (likely(bo->vm_node != NULL)) {
538 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
539 drm_mm_put_block(bo->vm_node);
540 bo->vm_node = NULL;
541 }
542 write_unlock(&bdev->vm_lock);
543 ttm_bo_cleanup_refs(bo, false);
544 kref_put(&bo->list_kref, ttm_bo_release_list);
545 write_lock(&bdev->vm_lock);
546}
547
548void ttm_bo_unref(struct ttm_buffer_object **p_bo)
549{
550 struct ttm_buffer_object *bo = *p_bo;
551 struct ttm_bo_device *bdev = bo->bdev;
552
553 *p_bo = NULL;
554 write_lock(&bdev->vm_lock);
555 kref_put(&bo->kref, ttm_bo_release);
556 write_unlock(&bdev->vm_lock);
557}
558EXPORT_SYMBOL(ttm_bo_unref);
559
ca262a99
JG
560static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
561 bool no_wait)
ba4e7d97 562{
ba4e7d97 563 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 564 struct ttm_bo_global *glob = bo->glob;
ba4e7d97 565 struct ttm_mem_reg evict_mem;
ca262a99
JG
566 struct ttm_placement placement;
567 int ret = 0;
ba4e7d97
TH
568
569 spin_lock(&bo->lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
571 spin_unlock(&bo->lock);
572
78ecf091 573 if (unlikely(ret != 0)) {
98ffc415 574 if (ret != -ERESTARTSYS) {
78ecf091
TH
575 printk(KERN_ERR TTM_PFX
576 "Failed to expire sync object before "
577 "buffer eviction.\n");
578 }
ba4e7d97
TH
579 goto out;
580 }
581
582 BUG_ON(!atomic_read(&bo->reserved));
583
584 evict_mem = bo->mem;
585 evict_mem.mm_node = NULL;
586
7cb7d1d7
JG
587 placement.fpfn = 0;
588 placement.lpfn = 0;
589 placement.num_placement = 0;
590 placement.num_busy_placement = 0;
ca262a99
JG
591 bdev->driver->evict_flags(bo, &placement);
592 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
593 no_wait);
ba4e7d97 594 if (ret) {
98ffc415 595 if (ret != -ERESTARTSYS)
ba4e7d97
TH
596 printk(KERN_ERR TTM_PFX
597 "Failed to find memory space for "
598 "buffer 0x%p eviction.\n", bo);
599 goto out;
600 }
601
602 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
603 no_wait);
604 if (ret) {
98ffc415 605 if (ret != -ERESTARTSYS)
ba4e7d97 606 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
ca262a99
JG
607 spin_lock(&glob->lru_lock);
608 if (evict_mem.mm_node) {
609 evict_mem.mm_node->private = NULL;
610 drm_mm_put_block(evict_mem.mm_node);
611 evict_mem.mm_node = NULL;
612 }
613 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
614 goto out;
615 }
ca262a99
JG
616 bo->evicted = true;
617out:
618 return ret;
619}
620
621static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
622 uint32_t mem_type,
623 bool interruptible, bool no_wait)
624{
625 struct ttm_bo_global *glob = bdev->glob;
626 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
627 struct ttm_buffer_object *bo;
628 int ret, put_count = 0;
ba4e7d97 629
a987fcaa 630 spin_lock(&glob->lru_lock);
ca262a99
JG
631 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
632 kref_get(&bo->list_kref);
633 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
634 if (likely(ret == 0))
635 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 636 spin_unlock(&glob->lru_lock);
ca262a99
JG
637 if (unlikely(ret != 0))
638 return ret;
639 while (put_count--)
640 kref_put(&bo->list_kref, ttm_bo_ref_bug);
641 ret = ttm_bo_evict(bo, interruptible, no_wait);
642 ttm_bo_unreserve(bo);
643 kref_put(&bo->list_kref, ttm_bo_release_list);
ba4e7d97
TH
644 return ret;
645}
646
ca262a99
JG
647static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
648 struct ttm_mem_type_manager *man,
649 struct ttm_placement *placement,
650 struct ttm_mem_reg *mem,
651 struct drm_mm_node **node)
652{
653 struct ttm_bo_global *glob = bo->glob;
654 unsigned long lpfn;
655 int ret;
656
657 lpfn = placement->lpfn;
658 if (!lpfn)
659 lpfn = man->size;
660 *node = NULL;
661 do {
662 ret = drm_mm_pre_get(&man->manager);
663 if (unlikely(ret))
664 return ret;
665
666 spin_lock(&glob->lru_lock);
667 *node = drm_mm_search_free_in_range(&man->manager,
668 mem->num_pages, mem->page_alignment,
669 placement->fpfn, lpfn, 1);
670 if (unlikely(*node == NULL)) {
671 spin_unlock(&glob->lru_lock);
672 return 0;
673 }
674 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
675 mem->page_alignment,
676 placement->fpfn,
677 lpfn);
678 spin_unlock(&glob->lru_lock);
679 } while (*node == NULL);
680 return 0;
681}
682
ba4e7d97
TH
683/**
684 * Repeatedly evict memory from the LRU for @mem_type until we create enough
685 * space, or we've evicted everything and there isn't enough space.
686 */
ca262a99
JG
687static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
688 uint32_t mem_type,
689 struct ttm_placement *placement,
690 struct ttm_mem_reg *mem,
691 bool interruptible, bool no_wait)
ba4e7d97 692{
ca262a99 693 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 694 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97 695 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
ca262a99 696 struct drm_mm_node *node;
ba4e7d97
TH
697 int ret;
698
ba4e7d97 699 do {
ca262a99
JG
700 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
701 if (unlikely(ret != 0))
702 return ret;
ba4e7d97
TH
703 if (node)
704 break;
ca262a99
JG
705 spin_lock(&glob->lru_lock);
706 if (list_empty(&man->lru)) {
707 spin_unlock(&glob->lru_lock);
ba4e7d97 708 break;
ca262a99 709 }
a987fcaa 710 spin_unlock(&glob->lru_lock);
ca262a99
JG
711 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
712 no_wait);
ba4e7d97
TH
713 if (unlikely(ret != 0))
714 return ret;
ba4e7d97 715 } while (1);
ca262a99 716 if (node == NULL)
ba4e7d97 717 return -ENOMEM;
ba4e7d97
TH
718 mem->mm_node = node;
719 mem->mem_type = mem_type;
720 return 0;
721}
722
ae3e8122
TH
723static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
724 uint32_t cur_placement,
725 uint32_t proposed_placement)
726{
727 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
728 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
729
730 /**
731 * Keep current caching if possible.
732 */
733
734 if ((cur_placement & caching) != 0)
735 result |= (cur_placement & caching);
736 else if ((man->default_caching & caching) != 0)
737 result |= man->default_caching;
738 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
739 result |= TTM_PL_FLAG_CACHED;
740 else if ((TTM_PL_FLAG_WC & caching) != 0)
741 result |= TTM_PL_FLAG_WC;
742 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
743 result |= TTM_PL_FLAG_UNCACHED;
744
745 return result;
746}
747
ba4e7d97
TH
748static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
749 bool disallow_fixed,
750 uint32_t mem_type,
ae3e8122
TH
751 uint32_t proposed_placement,
752 uint32_t *masked_placement)
ba4e7d97
TH
753{
754 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
755
756 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
757 return false;
758
ae3e8122 759 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
ba4e7d97
TH
760 return false;
761
ae3e8122 762 if ((proposed_placement & man->available_caching) == 0)
ba4e7d97 763 return false;
ba4e7d97 764
ae3e8122
TH
765 cur_flags |= (proposed_placement & man->available_caching);
766
767 *masked_placement = cur_flags;
ba4e7d97
TH
768 return true;
769}
770
ca262a99
JG
771static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
772{
773 int i;
774
775 for (i = 0; i <= TTM_PL_PRIV5; i++)
776 if (flags & (1 << i)) {
777 *mem_type = i;
778 return 0;
779 }
780 return -EINVAL;
781}
782
ba4e7d97
TH
783/**
784 * Creates space for memory region @mem according to its type.
785 *
786 * This function first searches for free space in compatible memory types in
787 * the priority order defined by the driver. If free space isn't found, then
788 * ttm_bo_mem_force_space is attempted in priority order to evict and find
789 * space.
790 */
791int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ca262a99
JG
792 struct ttm_placement *placement,
793 struct ttm_mem_reg *mem,
794 bool interruptible, bool no_wait)
ba4e7d97
TH
795{
796 struct ttm_bo_device *bdev = bo->bdev;
797 struct ttm_mem_type_manager *man;
ba4e7d97
TH
798 uint32_t mem_type = TTM_PL_SYSTEM;
799 uint32_t cur_flags = 0;
800 bool type_found = false;
801 bool type_ok = false;
98ffc415 802 bool has_erestartsys = false;
ba4e7d97 803 struct drm_mm_node *node = NULL;
ca262a99 804 int i, ret;
ba4e7d97
TH
805
806 mem->mm_node = NULL;
ca262a99
JG
807 for (i = 0; i <= placement->num_placement; ++i) {
808 ret = ttm_mem_type_from_flags(placement->placement[i],
809 &mem_type);
810 if (ret)
811 return ret;
ba4e7d97
TH
812 man = &bdev->man[mem_type];
813
814 type_ok = ttm_bo_mt_compatible(man,
ca262a99
JG
815 bo->type == ttm_bo_type_user,
816 mem_type,
817 placement->placement[i],
818 &cur_flags);
ba4e7d97
TH
819
820 if (!type_ok)
821 continue;
822
ae3e8122
TH
823 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
824 cur_flags);
ca262a99
JG
825 /*
826 * Use the access and other non-mapping-related flag bits from
827 * the memory placement flags to the current flags
828 */
829 ttm_flag_masked(&cur_flags, placement->placement[i],
830 ~TTM_PL_MASK_MEMTYPE);
ae3e8122 831
ba4e7d97
TH
832 if (mem_type == TTM_PL_SYSTEM)
833 break;
834
835 if (man->has_type && man->use_type) {
836 type_found = true;
ca262a99
JG
837 ret = ttm_bo_man_get_node(bo, man, placement, mem,
838 &node);
839 if (unlikely(ret))
840 return ret;
ba4e7d97
TH
841 }
842 if (node)
843 break;
844 }
845
846 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
847 mem->mm_node = node;
848 mem->mem_type = mem_type;
849 mem->placement = cur_flags;
ca262a99
JG
850 if (node)
851 node->private = bo;
ba4e7d97
TH
852 return 0;
853 }
854
855 if (!type_found)
856 return -EINVAL;
857
ca262a99
JG
858 for (i = 0; i <= placement->num_busy_placement; ++i) {
859 ret = ttm_mem_type_from_flags(placement->placement[i],
860 &mem_type);
861 if (ret)
862 return ret;
ba4e7d97 863 man = &bdev->man[mem_type];
ba4e7d97
TH
864 if (!man->has_type)
865 continue;
ba4e7d97 866 if (!ttm_bo_mt_compatible(man,
ca262a99
JG
867 bo->type == ttm_bo_type_user,
868 mem_type,
869 placement->placement[i],
870 &cur_flags))
ba4e7d97
TH
871 continue;
872
ae3e8122
TH
873 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
874 cur_flags);
ca262a99
JG
875 /*
876 * Use the access and other non-mapping-related flag bits from
877 * the memory placement flags to the current flags
878 */
879 ttm_flag_masked(&cur_flags, placement->placement[i],
880 ~TTM_PL_MASK_MEMTYPE);
ae3e8122 881
ca262a99
JG
882 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
883 interruptible, no_wait);
ba4e7d97
TH
884 if (ret == 0 && mem->mm_node) {
885 mem->placement = cur_flags;
ca262a99 886 mem->mm_node->private = bo;
ba4e7d97
TH
887 return 0;
888 }
98ffc415
TH
889 if (ret == -ERESTARTSYS)
890 has_erestartsys = true;
ba4e7d97 891 }
98ffc415 892 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
ba4e7d97
TH
893 return ret;
894}
895EXPORT_SYMBOL(ttm_bo_mem_space);
896
897int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
898{
ba4e7d97
TH
899 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
900 return -EBUSY;
901
98ffc415
TH
902 return wait_event_interruptible(bo->event_queue,
903 atomic_read(&bo->cpu_writers) == 0);
ba4e7d97
TH
904}
905
906int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
ca262a99
JG
907 struct ttm_placement *placement,
908 bool interruptible, bool no_wait)
ba4e7d97 909{
a987fcaa 910 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
911 int ret = 0;
912 struct ttm_mem_reg mem;
913
914 BUG_ON(!atomic_read(&bo->reserved));
915
916 /*
917 * FIXME: It's possible to pipeline buffer moves.
918 * Have the driver move function wait for idle when necessary,
919 * instead of doing it here.
920 */
ba4e7d97
TH
921 spin_lock(&bo->lock);
922 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
923 spin_unlock(&bo->lock);
ba4e7d97
TH
924 if (ret)
925 return ret;
ba4e7d97
TH
926 mem.num_pages = bo->num_pages;
927 mem.size = mem.num_pages << PAGE_SHIFT;
928 mem.page_alignment = bo->mem.page_alignment;
ba4e7d97
TH
929 /*
930 * Determine where to move the buffer.
931 */
ca262a99 932 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
ba4e7d97
TH
933 if (ret)
934 goto out_unlock;
ba4e7d97 935 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
ba4e7d97
TH
936out_unlock:
937 if (ret && mem.mm_node) {
a987fcaa 938 spin_lock(&glob->lru_lock);
ca262a99 939 mem.mm_node->private = NULL;
ba4e7d97 940 drm_mm_put_block(mem.mm_node);
a987fcaa 941 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
942 }
943 return ret;
944}
945
ca262a99 946static int ttm_bo_mem_compat(struct ttm_placement *placement,
ba4e7d97
TH
947 struct ttm_mem_reg *mem)
948{
ca262a99
JG
949 int i;
950
951 for (i = 0; i < placement->num_placement; i++) {
952 if ((placement->placement[i] & mem->placement &
953 TTM_PL_MASK_CACHING) &&
954 (placement->placement[i] & mem->placement &
955 TTM_PL_MASK_MEM))
956 return i;
957 }
958 return -1;
ba4e7d97
TH
959}
960
961int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
ca262a99
JG
962 struct ttm_placement *placement,
963 bool interruptible, bool no_wait)
ba4e7d97
TH
964{
965 int ret;
966
967 BUG_ON(!atomic_read(&bo->reserved));
ca262a99
JG
968 /* Check that range is valid */
969 if (placement->lpfn || placement->fpfn)
970 if (placement->fpfn > placement->lpfn ||
971 (placement->lpfn - placement->fpfn) < bo->num_pages)
972 return -EINVAL;
ba4e7d97
TH
973 /*
974 * Check whether we need to move buffer.
975 */
ca262a99
JG
976 ret = ttm_bo_mem_compat(placement, &bo->mem);
977 if (ret < 0) {
978 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
979 if (ret)
ba4e7d97 980 return ret;
ca262a99
JG
981 } else {
982 /*
983 * Use the access and other non-mapping-related flag bits from
984 * the compatible memory placement flags to the active flags
985 */
986 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
987 ~TTM_PL_MASK_MEMTYPE);
ba4e7d97 988 }
ba4e7d97
TH
989 /*
990 * We might need to add a TTM.
991 */
ba4e7d97
TH
992 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
993 ret = ttm_bo_add_ttm(bo, true);
994 if (ret)
995 return ret;
996 }
ba4e7d97
TH
997 return 0;
998}
999EXPORT_SYMBOL(ttm_buffer_object_validate);
1000
1001int
1002ttm_bo_check_placement(struct ttm_buffer_object *bo,
1003 uint32_t set_flags, uint32_t clr_flags)
1004{
1005 uint32_t new_mask = set_flags | clr_flags;
1006
1007 if ((bo->type == ttm_bo_type_user) &&
1008 (clr_flags & TTM_PL_FLAG_CACHED)) {
1009 printk(KERN_ERR TTM_PFX
1010 "User buffers require cache-coherent memory.\n");
1011 return -EINVAL;
1012 }
1013
1014 if (!capable(CAP_SYS_ADMIN)) {
1015 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1016 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1017 " NO_EVICT status.\n");
1018 return -EINVAL;
1019 }
1020
1021 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
1022 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
1023 printk(KERN_ERR TTM_PFX
1024 "Incompatible memory specification"
1025 " for NO_EVICT buffer.\n");
1026 return -EINVAL;
1027 }
1028 }
1029 return 0;
1030}
1031
1032int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1033 struct ttm_buffer_object *bo,
1034 unsigned long size,
1035 enum ttm_bo_type type,
1036 uint32_t flags,
1037 uint32_t page_alignment,
1038 unsigned long buffer_start,
1039 bool interruptible,
1040 struct file *persistant_swap_storage,
1041 size_t acc_size,
1042 void (*destroy) (struct ttm_buffer_object *))
1043{
ca262a99 1044 int i, c, ret = 0;
ba4e7d97 1045 unsigned long num_pages;
ca262a99
JG
1046 uint32_t placements[8];
1047 struct ttm_placement placement;
ba4e7d97
TH
1048
1049 size += buffer_start & ~PAGE_MASK;
1050 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1051 if (num_pages == 0) {
1052 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1053 return -EINVAL;
1054 }
1055 bo->destroy = destroy;
1056
1057 spin_lock_init(&bo->lock);
1058 kref_init(&bo->kref);
1059 kref_init(&bo->list_kref);
1060 atomic_set(&bo->cpu_writers, 0);
1061 atomic_set(&bo->reserved, 1);
1062 init_waitqueue_head(&bo->event_queue);
1063 INIT_LIST_HEAD(&bo->lru);
1064 INIT_LIST_HEAD(&bo->ddestroy);
1065 INIT_LIST_HEAD(&bo->swap);
1066 bo->bdev = bdev;
a987fcaa 1067 bo->glob = bdev->glob;
ba4e7d97
TH
1068 bo->type = type;
1069 bo->num_pages = num_pages;
1070 bo->mem.mem_type = TTM_PL_SYSTEM;
1071 bo->mem.num_pages = bo->num_pages;
1072 bo->mem.mm_node = NULL;
1073 bo->mem.page_alignment = page_alignment;
1074 bo->buffer_start = buffer_start & PAGE_MASK;
1075 bo->priv_flags = 0;
1076 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1077 bo->seq_valid = false;
1078 bo->persistant_swap_storage = persistant_swap_storage;
1079 bo->acc_size = acc_size;
a987fcaa 1080 atomic_inc(&bo->glob->bo_count);
ba4e7d97
TH
1081
1082 ret = ttm_bo_check_placement(bo, flags, 0ULL);
1083 if (unlikely(ret != 0))
1084 goto out_err;
1085
1086 /*
1087 * If no caching attributes are set, accept any form of caching.
1088 */
1089
1090 if ((flags & TTM_PL_MASK_CACHING) == 0)
1091 flags |= TTM_PL_MASK_CACHING;
1092
1093 /*
1094 * For ttm_bo_type_device buffers, allocate
1095 * address space from the device.
1096 */
1097
1098 if (bo->type == ttm_bo_type_device) {
1099 ret = ttm_bo_setup_vm(bo);
1100 if (ret)
1101 goto out_err;
1102 }
1103
ca262a99
JG
1104 placement.fpfn = 0;
1105 placement.lpfn = 0;
1106 for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
1107 if (flags & (1 << i))
1108 placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
1109 placement.placement = placements;
1110 placement.num_placement = c;
1111 placement.busy_placement = placements;
1112 placement.num_busy_placement = c;
1113 ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
ba4e7d97
TH
1114 if (ret)
1115 goto out_err;
1116
1117 ttm_bo_unreserve(bo);
1118 return 0;
1119
1120out_err:
1121 ttm_bo_unreserve(bo);
1122 ttm_bo_unref(&bo);
1123
1124 return ret;
1125}
1126EXPORT_SYMBOL(ttm_buffer_object_init);
1127
a987fcaa 1128static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
ba4e7d97
TH
1129 unsigned long num_pages)
1130{
1131 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1132 PAGE_MASK;
1133
a987fcaa 1134 return glob->ttm_bo_size + 2 * page_array_size;
ba4e7d97
TH
1135}
1136
1137int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1138 unsigned long size,
1139 enum ttm_bo_type type,
1140 uint32_t flags,
1141 uint32_t page_alignment,
1142 unsigned long buffer_start,
1143 bool interruptible,
1144 struct file *persistant_swap_storage,
1145 struct ttm_buffer_object **p_bo)
1146{
1147 struct ttm_buffer_object *bo;
a987fcaa 1148 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
ca262a99 1149 int ret;
ba4e7d97
TH
1150
1151 size_t acc_size =
a987fcaa 1152 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
5fd9cbad 1153 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
ba4e7d97
TH
1154 if (unlikely(ret != 0))
1155 return ret;
1156
1157 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1158
1159 if (unlikely(bo == NULL)) {
5fd9cbad 1160 ttm_mem_global_free(mem_glob, acc_size);
ba4e7d97
TH
1161 return -ENOMEM;
1162 }
1163
1164 ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
1165 page_alignment, buffer_start,
1166 interruptible,
1167 persistant_swap_storage, acc_size, NULL);
1168 if (likely(ret == 0))
1169 *p_bo = bo;
1170
1171 return ret;
1172}
1173
ba4e7d97 1174static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
ca262a99 1175 unsigned mem_type, bool allow_errors)
ba4e7d97 1176{
ca262a99 1177 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
a987fcaa 1178 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97 1179 int ret;
ba4e7d97
TH
1180
1181 /*
1182 * Can't use standard list traversal since we're unlocking.
1183 */
1184
a987fcaa 1185 spin_lock(&glob->lru_lock);
ca262a99 1186 while (!list_empty(&man->lru)) {
a987fcaa 1187 spin_unlock(&glob->lru_lock);
ca262a99
JG
1188 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1189 if (ret) {
1190 if (allow_errors) {
1191 return ret;
1192 } else {
1193 printk(KERN_ERR TTM_PFX
1194 "Cleanup eviction failed\n");
1195 }
1196 }
a987fcaa 1197 spin_lock(&glob->lru_lock);
ba4e7d97 1198 }
a987fcaa 1199 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1200 return 0;
1201}
1202
1203int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1204{
a987fcaa 1205 struct ttm_bo_global *glob = bdev->glob;
c96e7c7a 1206 struct ttm_mem_type_manager *man;
ba4e7d97
TH
1207 int ret = -EINVAL;
1208
1209 if (mem_type >= TTM_NUM_MEM_TYPES) {
1210 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1211 return ret;
1212 }
c96e7c7a 1213 man = &bdev->man[mem_type];
ba4e7d97
TH
1214
1215 if (!man->has_type) {
1216 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1217 "memory manager type %u\n", mem_type);
1218 return ret;
1219 }
1220
1221 man->use_type = false;
1222 man->has_type = false;
1223
1224 ret = 0;
1225 if (mem_type > 0) {
ca262a99 1226 ttm_bo_force_list_clean(bdev, mem_type, false);
ba4e7d97 1227
a987fcaa 1228 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1229 if (drm_mm_clean(&man->manager))
1230 drm_mm_takedown(&man->manager);
1231 else
1232 ret = -EBUSY;
1233
a987fcaa 1234 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1235 }
1236
1237 return ret;
1238}
1239EXPORT_SYMBOL(ttm_bo_clean_mm);
1240
1241int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1242{
1243 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1244
1245 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1246 printk(KERN_ERR TTM_PFX
1247 "Illegal memory manager memory type %u.\n",
1248 mem_type);
1249 return -EINVAL;
1250 }
1251
1252 if (!man->has_type) {
1253 printk(KERN_ERR TTM_PFX
1254 "Memory type %u has not been initialized.\n",
1255 mem_type);
1256 return 0;
1257 }
1258
ca262a99 1259 return ttm_bo_force_list_clean(bdev, mem_type, true);
ba4e7d97
TH
1260}
1261EXPORT_SYMBOL(ttm_bo_evict_mm);
1262
1263int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
ca262a99 1264 unsigned long p_size)
ba4e7d97
TH
1265{
1266 int ret = -EINVAL;
1267 struct ttm_mem_type_manager *man;
1268
1269 if (type >= TTM_NUM_MEM_TYPES) {
1270 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1271 return ret;
1272 }
1273
1274 man = &bdev->man[type];
1275 if (man->has_type) {
1276 printk(KERN_ERR TTM_PFX
1277 "Memory manager already initialized for type %d\n",
1278 type);
1279 return ret;
1280 }
1281
1282 ret = bdev->driver->init_mem_type(bdev, type, man);
1283 if (ret)
1284 return ret;
1285
1286 ret = 0;
1287 if (type != TTM_PL_SYSTEM) {
1288 if (!p_size) {
1289 printk(KERN_ERR TTM_PFX
1290 "Zero size memory manager type %d\n",
1291 type);
1292 return ret;
1293 }
ca262a99 1294 ret = drm_mm_init(&man->manager, 0, p_size);
ba4e7d97
TH
1295 if (ret)
1296 return ret;
1297 }
1298 man->has_type = true;
1299 man->use_type = true;
1300 man->size = p_size;
1301
1302 INIT_LIST_HEAD(&man->lru);
1303
1304 return 0;
1305}
1306EXPORT_SYMBOL(ttm_bo_init_mm);
1307
a987fcaa
TH
1308static void ttm_bo_global_kobj_release(struct kobject *kobj)
1309{
1310 struct ttm_bo_global *glob =
1311 container_of(kobj, struct ttm_bo_global, kobj);
1312
a987fcaa
TH
1313 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1314 __free_page(glob->dummy_read_page);
1315 kfree(glob);
1316}
1317
1318void ttm_bo_global_release(struct ttm_global_reference *ref)
1319{
1320 struct ttm_bo_global *glob = ref->object;
1321
1322 kobject_del(&glob->kobj);
1323 kobject_put(&glob->kobj);
1324}
1325EXPORT_SYMBOL(ttm_bo_global_release);
1326
1327int ttm_bo_global_init(struct ttm_global_reference *ref)
1328{
1329 struct ttm_bo_global_ref *bo_ref =
1330 container_of(ref, struct ttm_bo_global_ref, ref);
1331 struct ttm_bo_global *glob = ref->object;
1332 int ret;
1333
1334 mutex_init(&glob->device_list_mutex);
1335 spin_lock_init(&glob->lru_lock);
1336 glob->mem_glob = bo_ref->mem_glob;
1337 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1338
1339 if (unlikely(glob->dummy_read_page == NULL)) {
1340 ret = -ENOMEM;
1341 goto out_no_drp;
1342 }
1343
1344 INIT_LIST_HEAD(&glob->swap_lru);
1345 INIT_LIST_HEAD(&glob->device_list);
1346
1347 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1348 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1349 if (unlikely(ret != 0)) {
1350 printk(KERN_ERR TTM_PFX
1351 "Could not register buffer object swapout.\n");
1352 goto out_no_shrink;
1353 }
1354
1355 glob->ttm_bo_extra_size =
1356 ttm_round_pot(sizeof(struct ttm_tt)) +
1357 ttm_round_pot(sizeof(struct ttm_backend));
1358
1359 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1360 ttm_round_pot(sizeof(struct ttm_buffer_object));
1361
1362 atomic_set(&glob->bo_count, 0);
1363
1364 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
1365 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
1366 if (unlikely(ret != 0))
1367 kobject_put(&glob->kobj);
1368 return ret;
1369out_no_shrink:
1370 __free_page(glob->dummy_read_page);
1371out_no_drp:
1372 kfree(glob);
1373 return ret;
1374}
1375EXPORT_SYMBOL(ttm_bo_global_init);
1376
1377
ba4e7d97
TH
1378int ttm_bo_device_release(struct ttm_bo_device *bdev)
1379{
1380 int ret = 0;
1381 unsigned i = TTM_NUM_MEM_TYPES;
1382 struct ttm_mem_type_manager *man;
a987fcaa 1383 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97
TH
1384
1385 while (i--) {
1386 man = &bdev->man[i];
1387 if (man->has_type) {
1388 man->use_type = false;
1389 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1390 ret = -EBUSY;
1391 printk(KERN_ERR TTM_PFX
1392 "DRM memory manager type %d "
1393 "is not clean.\n", i);
1394 }
1395 man->has_type = false;
1396 }
1397 }
1398
a987fcaa
TH
1399 mutex_lock(&glob->device_list_mutex);
1400 list_del(&bdev->device_list);
1401 mutex_unlock(&glob->device_list_mutex);
1402
ba4e7d97
TH
1403 if (!cancel_delayed_work(&bdev->wq))
1404 flush_scheduled_work();
1405
1406 while (ttm_bo_delayed_delete(bdev, true))
1407 ;
1408
a987fcaa 1409 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1410 if (list_empty(&bdev->ddestroy))
1411 TTM_DEBUG("Delayed destroy list was clean\n");
1412
1413 if (list_empty(&bdev->man[0].lru))
1414 TTM_DEBUG("Swap list was clean\n");
a987fcaa 1415 spin_unlock(&glob->lru_lock);
ba4e7d97 1416
ba4e7d97
TH
1417 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1418 write_lock(&bdev->vm_lock);
1419 drm_mm_takedown(&bdev->addr_space_mm);
1420 write_unlock(&bdev->vm_lock);
1421
ba4e7d97
TH
1422 return ret;
1423}
1424EXPORT_SYMBOL(ttm_bo_device_release);
1425
ba4e7d97 1426int ttm_bo_device_init(struct ttm_bo_device *bdev,
a987fcaa
TH
1427 struct ttm_bo_global *glob,
1428 struct ttm_bo_driver *driver,
51c8b407 1429 uint64_t file_page_offset,
ad49f501 1430 bool need_dma32)
ba4e7d97
TH
1431{
1432 int ret = -EINVAL;
1433
ba4e7d97 1434 rwlock_init(&bdev->vm_lock);
ba4e7d97 1435 bdev->driver = driver;
ba4e7d97
TH
1436
1437 memset(bdev->man, 0, sizeof(bdev->man));
1438
ba4e7d97
TH
1439 /*
1440 * Initialize the system memory buffer type.
1441 * Other types need to be driver / IOCTL initialized.
1442 */
ca262a99 1443 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
ba4e7d97 1444 if (unlikely(ret != 0))
a987fcaa 1445 goto out_no_sys;
ba4e7d97
TH
1446
1447 bdev->addr_space_rb = RB_ROOT;
1448 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1449 if (unlikely(ret != 0))
a987fcaa 1450 goto out_no_addr_mm;
ba4e7d97
TH
1451
1452 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1453 bdev->nice_mode = true;
1454 INIT_LIST_HEAD(&bdev->ddestroy);
ba4e7d97 1455 bdev->dev_mapping = NULL;
a987fcaa 1456 bdev->glob = glob;
ad49f501 1457 bdev->need_dma32 = need_dma32;
ba4e7d97 1458
a987fcaa
TH
1459 mutex_lock(&glob->device_list_mutex);
1460 list_add_tail(&bdev->device_list, &glob->device_list);
1461 mutex_unlock(&glob->device_list_mutex);
ba4e7d97
TH
1462
1463 return 0;
a987fcaa 1464out_no_addr_mm:
ba4e7d97 1465 ttm_bo_clean_mm(bdev, 0);
a987fcaa 1466out_no_sys:
ba4e7d97
TH
1467 return ret;
1468}
1469EXPORT_SYMBOL(ttm_bo_device_init);
1470
1471/*
1472 * buffer object vm functions.
1473 */
1474
1475bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1476{
1477 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1478
1479 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1480 if (mem->mem_type == TTM_PL_SYSTEM)
1481 return false;
1482
1483 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1484 return false;
1485
1486 if (mem->placement & TTM_PL_FLAG_CACHED)
1487 return false;
1488 }
1489 return true;
1490}
1491
1492int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1493 struct ttm_mem_reg *mem,
1494 unsigned long *bus_base,
1495 unsigned long *bus_offset, unsigned long *bus_size)
1496{
1497 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1498
1499 *bus_size = 0;
1500 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1501 return -EINVAL;
1502
1503 if (ttm_mem_reg_is_pci(bdev, mem)) {
1504 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1505 *bus_size = mem->num_pages << PAGE_SHIFT;
1506 *bus_base = man->io_offset;
1507 }
1508
1509 return 0;
1510}
1511
1512void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1513{
1514 struct ttm_bo_device *bdev = bo->bdev;
1515 loff_t offset = (loff_t) bo->addr_space_offset;
1516 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1517
1518 if (!bdev->dev_mapping)
1519 return;
1520
1521 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1522}
e024e110 1523EXPORT_SYMBOL(ttm_bo_unmap_virtual);
ba4e7d97
TH
1524
1525static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1526{
1527 struct ttm_bo_device *bdev = bo->bdev;
1528 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1529 struct rb_node *parent = NULL;
1530 struct ttm_buffer_object *cur_bo;
1531 unsigned long offset = bo->vm_node->start;
1532 unsigned long cur_offset;
1533
1534 while (*cur) {
1535 parent = *cur;
1536 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1537 cur_offset = cur_bo->vm_node->start;
1538 if (offset < cur_offset)
1539 cur = &parent->rb_left;
1540 else if (offset > cur_offset)
1541 cur = &parent->rb_right;
1542 else
1543 BUG();
1544 }
1545
1546 rb_link_node(&bo->vm_rb, parent, cur);
1547 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1548}
1549
1550/**
1551 * ttm_bo_setup_vm:
1552 *
1553 * @bo: the buffer to allocate address space for
1554 *
1555 * Allocate address space in the drm device so that applications
1556 * can mmap the buffer and access the contents. This only
1557 * applies to ttm_bo_type_device objects as others are not
1558 * placed in the drm device address space.
1559 */
1560
1561static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1562{
1563 struct ttm_bo_device *bdev = bo->bdev;
1564 int ret;
1565
1566retry_pre_get:
1567 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1568 if (unlikely(ret != 0))
1569 return ret;
1570
1571 write_lock(&bdev->vm_lock);
1572 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1573 bo->mem.num_pages, 0, 0);
1574
1575 if (unlikely(bo->vm_node == NULL)) {
1576 ret = -ENOMEM;
1577 goto out_unlock;
1578 }
1579
1580 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1581 bo->mem.num_pages, 0);
1582
1583 if (unlikely(bo->vm_node == NULL)) {
1584 write_unlock(&bdev->vm_lock);
1585 goto retry_pre_get;
1586 }
1587
1588 ttm_bo_vm_insert_rb(bo);
1589 write_unlock(&bdev->vm_lock);
1590 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1591
1592 return 0;
1593out_unlock:
1594 write_unlock(&bdev->vm_lock);
1595 return ret;
1596}
1597
1598int ttm_bo_wait(struct ttm_buffer_object *bo,
1599 bool lazy, bool interruptible, bool no_wait)
1600{
1601 struct ttm_bo_driver *driver = bo->bdev->driver;
1602 void *sync_obj;
1603 void *sync_obj_arg;
1604 int ret = 0;
1605
1606 if (likely(bo->sync_obj == NULL))
1607 return 0;
1608
1609 while (bo->sync_obj) {
1610
1611 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1612 void *tmp_obj = bo->sync_obj;
1613 bo->sync_obj = NULL;
1614 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1615 spin_unlock(&bo->lock);
1616 driver->sync_obj_unref(&tmp_obj);
1617 spin_lock(&bo->lock);
1618 continue;
1619 }
1620
1621 if (no_wait)
1622 return -EBUSY;
1623
1624 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1625 sync_obj_arg = bo->sync_obj_arg;
1626 spin_unlock(&bo->lock);
1627 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1628 lazy, interruptible);
1629 if (unlikely(ret != 0)) {
1630 driver->sync_obj_unref(&sync_obj);
1631 spin_lock(&bo->lock);
1632 return ret;
1633 }
1634 spin_lock(&bo->lock);
1635 if (likely(bo->sync_obj == sync_obj &&
1636 bo->sync_obj_arg == sync_obj_arg)) {
1637 void *tmp_obj = bo->sync_obj;
1638 bo->sync_obj = NULL;
1639 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1640 &bo->priv_flags);
1641 spin_unlock(&bo->lock);
1642 driver->sync_obj_unref(&sync_obj);
1643 driver->sync_obj_unref(&tmp_obj);
1644 spin_lock(&bo->lock);
fee280d3
TH
1645 } else {
1646 spin_unlock(&bo->lock);
1647 driver->sync_obj_unref(&sync_obj);
1648 spin_lock(&bo->lock);
ba4e7d97
TH
1649 }
1650 }
1651 return 0;
1652}
1653EXPORT_SYMBOL(ttm_bo_wait);
1654
1655void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1656{
1657 atomic_set(&bo->reserved, 0);
1658 wake_up_all(&bo->event_queue);
1659}
1660
1661int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1662 bool no_wait)
1663{
1664 int ret;
1665
1666 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1667 if (no_wait)
1668 return -EBUSY;
1669 else if (interruptible) {
1670 ret = wait_event_interruptible
1671 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1672 if (unlikely(ret != 0))
98ffc415 1673 return ret;
ba4e7d97
TH
1674 } else {
1675 wait_event(bo->event_queue,
1676 atomic_read(&bo->reserved) == 0);
1677 }
1678 }
1679 return 0;
1680}
1681
1682int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1683{
1684 int ret = 0;
1685
1686 /*
1687 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1688 * makes sure the lru lists are updated.
1689 */
1690
1691 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1692 if (unlikely(ret != 0))
1693 return ret;
1694 spin_lock(&bo->lock);
1695 ret = ttm_bo_wait(bo, false, true, no_wait);
1696 spin_unlock(&bo->lock);
1697 if (likely(ret == 0))
1698 atomic_inc(&bo->cpu_writers);
1699 ttm_bo_unreserve(bo);
1700 return ret;
1701}
1702
1703void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1704{
1705 if (atomic_dec_and_test(&bo->cpu_writers))
1706 wake_up_all(&bo->event_queue);
1707}
1708
1709/**
1710 * A buffer object shrink method that tries to swap out the first
1711 * buffer object on the bo_global::swap_lru list.
1712 */
1713
1714static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1715{
a987fcaa
TH
1716 struct ttm_bo_global *glob =
1717 container_of(shrink, struct ttm_bo_global, shrink);
ba4e7d97
TH
1718 struct ttm_buffer_object *bo;
1719 int ret = -EBUSY;
1720 int put_count;
1721 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1722
a987fcaa 1723 spin_lock(&glob->lru_lock);
ba4e7d97 1724 while (ret == -EBUSY) {
a987fcaa
TH
1725 if (unlikely(list_empty(&glob->swap_lru))) {
1726 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1727 return -EBUSY;
1728 }
1729
a987fcaa 1730 bo = list_first_entry(&glob->swap_lru,
ba4e7d97
TH
1731 struct ttm_buffer_object, swap);
1732 kref_get(&bo->list_kref);
1733
1734 /**
1735 * Reserve buffer. Since we unlock while sleeping, we need
1736 * to re-check that nobody removed us from the swap-list while
1737 * we slept.
1738 */
1739
1740 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1741 if (unlikely(ret == -EBUSY)) {
a987fcaa 1742 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1743 ttm_bo_wait_unreserved(bo, false);
1744 kref_put(&bo->list_kref, ttm_bo_release_list);
a987fcaa 1745 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1746 }
1747 }
1748
1749 BUG_ON(ret != 0);
1750 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 1751 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1752
1753 while (put_count--)
1754 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1755
1756 /**
1757 * Wait for GPU, then move to system cached.
1758 */
1759
1760 spin_lock(&bo->lock);
1761 ret = ttm_bo_wait(bo, false, false, false);
1762 spin_unlock(&bo->lock);
1763
1764 if (unlikely(ret != 0))
1765 goto out;
1766
1767 if ((bo->mem.placement & swap_placement) != swap_placement) {
1768 struct ttm_mem_reg evict_mem;
1769
1770 evict_mem = bo->mem;
1771 evict_mem.mm_node = NULL;
1772 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1773 evict_mem.mem_type = TTM_PL_SYSTEM;
1774
1775 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1776 false, false);
1777 if (unlikely(ret != 0))
1778 goto out;
1779 }
1780
1781 ttm_bo_unmap_virtual(bo);
1782
1783 /**
1784 * Swap out. Buffer will be swapped in again as soon as
1785 * anyone tries to access a ttm page.
1786 */
1787
1788 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1789out:
1790
1791 /**
1792 *
1793 * Unreserve without putting on LRU to avoid swapping out an
1794 * already swapped buffer.
1795 */
1796
1797 atomic_set(&bo->reserved, 0);
1798 wake_up_all(&bo->event_queue);
1799 kref_put(&bo->list_kref, ttm_bo_release_list);
1800 return ret;
1801}
1802
1803void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1804{
a987fcaa 1805 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
ba4e7d97
TH
1806 ;
1807}