]>
Commit | Line | Data |
---|---|---|
b47eb4a2 CW |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uuk> | |
26 | * | |
27 | */ | |
28 | ||
760285e7 | 29 | #include <drm/drmP.h> |
b47eb4a2 | 30 | #include "i915_drv.h" |
760285e7 | 31 | #include <drm/i915_drm.h> |
db53a302 | 32 | #include "i915_trace.h" |
b47eb4a2 | 33 | |
cd377ea9 | 34 | static bool |
f6cd1f15 | 35 | mark_free(struct i915_vma *vma, struct list_head *unwind) |
b47eb4a2 | 36 | { |
f6cd1f15 | 37 | if (vma->obj->pin_count) |
1b50247a CW |
38 | return false; |
39 | ||
f6cd1f15 | 40 | list_add(&vma->obj->exec_list, unwind); |
2f633156 | 41 | return drm_mm_scan_add_block(&vma->node); |
b47eb4a2 CW |
42 | } |
43 | ||
44 | int | |
f6cd1f15 BW |
45 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, |
46 | int min_size, unsigned alignment, unsigned cache_level, | |
86a1ee26 | 47 | bool mappable, bool nonblocking) |
b47eb4a2 CW |
48 | { |
49 | drm_i915_private_t *dev_priv = dev->dev_private; | |
cd377ea9 | 50 | struct list_head eviction_list, unwind_list; |
2f633156 | 51 | struct i915_vma *vma; |
05394f39 | 52 | struct drm_i915_gem_object *obj; |
cd377ea9 | 53 | int ret = 0; |
b47eb4a2 | 54 | |
db53a302 CW |
55 | trace_i915_gem_evict(dev, min_size, alignment, mappable); |
56 | ||
cd377ea9 CW |
57 | /* |
58 | * The goal is to evict objects and amalgamate space in LRU order. | |
59 | * The oldest idle objects reside on the inactive list, which is in | |
60 | * retirement order. The next objects to retire are those on the (per | |
61 | * ring) active list that do not have an outstanding flush. Once the | |
62 | * hardware reports completion (the seqno is updated after the | |
63 | * batchbuffer has been finished) the clean buffer objects would | |
64 | * be retired to the inactive list. Any dirty objects would be added | |
65 | * to the tail of the flushing list. So after processing the clean | |
66 | * active objects we need to emit a MI_FLUSH to retire the flushing | |
67 | * list, hence the retirement order of the flushing list is in | |
68 | * advance of the dirty objects on the active lists. | |
69 | * | |
70 | * The retirement sequence is thus: | |
71 | * 1. Inactive objects (already retired) | |
72 | * 2. Clean active objects | |
73 | * 3. Flushing list | |
74 | * 4. Dirty active objects. | |
75 | * | |
76 | * On each list, the oldest objects lie at the HEAD with the freshest | |
77 | * object on the TAIL. | |
78 | */ | |
79 | ||
80 | INIT_LIST_HEAD(&unwind_list); | |
f6cd1f15 BW |
81 | if (mappable) { |
82 | BUG_ON(!i915_is_ggtt(vm)); | |
5cef07e1 | 83 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
93bd8649 BW |
84 | alignment, cache_level, 0, |
85 | dev_priv->gtt.mappable_end); | |
f6cd1f15 | 86 | } else |
5cef07e1 | 87 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
cd377ea9 CW |
88 | |
89 | /* First see if there is a large enough contiguous idle region... */ | |
5cef07e1 | 90 | list_for_each_entry(obj, &vm->inactive_list, mm_list) { |
f6cd1f15 BW |
91 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); |
92 | if (mark_free(vma, &unwind_list)) | |
cd377ea9 CW |
93 | goto found; |
94 | } | |
b47eb4a2 | 95 | |
86a1ee26 CW |
96 | if (nonblocking) |
97 | goto none; | |
b47eb4a2 | 98 | |
cd377ea9 | 99 | /* Now merge in the soon-to-be-expired objects... */ |
5cef07e1 | 100 | list_for_each_entry(obj, &vm->active_list, mm_list) { |
f6cd1f15 BW |
101 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); |
102 | if (mark_free(vma, &unwind_list)) | |
cd377ea9 CW |
103 | goto found; |
104 | } | |
105 | ||
86a1ee26 | 106 | none: |
cd377ea9 | 107 | /* Nothing found, clean up and bail out! */ |
092de6f2 CW |
108 | while (!list_empty(&unwind_list)) { |
109 | obj = list_first_entry(&unwind_list, | |
110 | struct drm_i915_gem_object, | |
111 | exec_list); | |
f6cd1f15 | 112 | vma = i915_gem_obj_to_vma(obj, vm); |
2f633156 | 113 | ret = drm_mm_scan_remove_block(&vma->node); |
cd377ea9 | 114 | BUG_ON(ret); |
092de6f2 CW |
115 | |
116 | list_del_init(&obj->exec_list); | |
cd377ea9 CW |
117 | } |
118 | ||
119 | /* We expect the caller to unpin, evict all and try again, or give up. | |
120 | * So calling i915_gem_evict_everything() is unnecessary. | |
121 | */ | |
122 | return -ENOSPC; | |
123 | ||
124 | found: | |
e39a0150 CW |
125 | /* drm_mm doesn't allow any other other operations while |
126 | * scanning, therefore store to be evicted objects on a | |
127 | * temporary list. */ | |
cd377ea9 | 128 | INIT_LIST_HEAD(&eviction_list); |
e39a0150 | 129 | while (!list_empty(&unwind_list)) { |
05394f39 CW |
130 | obj = list_first_entry(&unwind_list, |
131 | struct drm_i915_gem_object, | |
432e58ed | 132 | exec_list); |
f6cd1f15 | 133 | vma = i915_gem_obj_to_vma(obj, vm); |
2f633156 | 134 | if (drm_mm_scan_remove_block(&vma->node)) { |
432e58ed | 135 | list_move(&obj->exec_list, &eviction_list); |
b6708242 | 136 | drm_gem_object_reference(&obj->base); |
e39a0150 CW |
137 | continue; |
138 | } | |
432e58ed | 139 | list_del_init(&obj->exec_list); |
cd377ea9 | 140 | } |
b47eb4a2 | 141 | |
cd377ea9 | 142 | /* Unbinding will emit any required flushes */ |
e39a0150 | 143 | while (!list_empty(&eviction_list)) { |
05394f39 CW |
144 | obj = list_first_entry(&eviction_list, |
145 | struct drm_i915_gem_object, | |
432e58ed | 146 | exec_list); |
e39a0150 | 147 | if (ret == 0) |
f6cd1f15 | 148 | ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); |
092de6f2 | 149 | |
432e58ed | 150 | list_del_init(&obj->exec_list); |
05394f39 | 151 | drm_gem_object_unreference(&obj->base); |
b47eb4a2 | 152 | } |
cd377ea9 | 153 | |
e39a0150 | 154 | return ret; |
b47eb4a2 CW |
155 | } |
156 | ||
157 | int | |
6c085a72 | 158 | i915_gem_evict_everything(struct drm_device *dev) |
b47eb4a2 CW |
159 | { |
160 | drm_i915_private_t *dev_priv = dev->dev_private; | |
f6cd1f15 | 161 | struct i915_address_space *vm; |
a39d7efc | 162 | struct drm_i915_gem_object *obj, *next; |
f6cd1f15 | 163 | bool lists_empty = true; |
b4519513 | 164 | int ret; |
b47eb4a2 | 165 | |
f6cd1f15 BW |
166 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
167 | lists_empty = (list_empty(&vm->inactive_list) && | |
168 | list_empty(&vm->active_list)); | |
169 | if (!lists_empty) | |
170 | lists_empty = false; | |
171 | } | |
172 | ||
b47eb4a2 CW |
173 | if (lists_empty) |
174 | return -ENOSPC; | |
175 | ||
6c085a72 | 176 | trace_i915_gem_evict_everything(dev); |
db53a302 | 177 | |
b2da9fe5 BW |
178 | /* The gpu_idle will flush everything in the write domain to the |
179 | * active list. Then we must move everything off the active list | |
180 | * with retire requests. | |
181 | */ | |
b4519513 CW |
182 | ret = i915_gpu_idle(dev); |
183 | if (ret) | |
184 | return ret; | |
b2da9fe5 BW |
185 | |
186 | i915_gem_retire_requests(dev); | |
187 | ||
a39d7efc | 188 | /* Having flushed everything, unbind() should never raise an error */ |
f6cd1f15 BW |
189 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
190 | list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) | |
191 | if (obj->pin_count == 0) | |
192 | WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm))); | |
193 | } | |
b47eb4a2 | 194 | |
b4519513 | 195 | return 0; |
b47eb4a2 | 196 | } |