]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_bo_list.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31 #include <drm/drmP.h>
32 #include "amdgpu.h"
33 #include "amdgpu_trace.h"
34
35 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
37
38 static int amdgpu_bo_list_set(struct amdgpu_device *adev,
39 struct drm_file *filp,
40 struct amdgpu_bo_list *list,
41 struct drm_amdgpu_bo_list_entry *info,
42 unsigned num_entries);
43
44 static void amdgpu_bo_list_release_rcu(struct kref *ref)
45 {
46 unsigned i;
47 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
48 refcount);
49
50 for (i = 0; i < list->num_entries; ++i)
51 amdgpu_bo_unref(&list->array[i].robj);
52
53 mutex_destroy(&list->lock);
54 kvfree(list->array);
55 kfree_rcu(list, rhead);
56 }
57
58 static int amdgpu_bo_list_create(struct amdgpu_device *adev,
59 struct drm_file *filp,
60 struct drm_amdgpu_bo_list_entry *info,
61 unsigned num_entries,
62 int *id)
63 {
64 int r;
65 struct amdgpu_fpriv *fpriv = filp->driver_priv;
66 struct amdgpu_bo_list *list;
67
68 list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
69 if (!list)
70 return -ENOMEM;
71
72 /* initialize bo list*/
73 mutex_init(&list->lock);
74 kref_init(&list->refcount);
75 r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
76 if (r) {
77 kfree(list);
78 return r;
79 }
80
81 /* idr alloc should be called only after initialization of bo list. */
82 mutex_lock(&fpriv->bo_list_lock);
83 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
84 mutex_unlock(&fpriv->bo_list_lock);
85 if (r < 0) {
86 kfree(list);
87 return r;
88 }
89 *id = r;
90
91 return 0;
92 }
93
94 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
95 {
96 struct amdgpu_bo_list *list;
97
98 mutex_lock(&fpriv->bo_list_lock);
99 list = idr_remove(&fpriv->bo_list_handles, id);
100 mutex_unlock(&fpriv->bo_list_lock);
101 if (list)
102 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
103 }
104
105 static int amdgpu_bo_list_set(struct amdgpu_device *adev,
106 struct drm_file *filp,
107 struct amdgpu_bo_list *list,
108 struct drm_amdgpu_bo_list_entry *info,
109 unsigned num_entries)
110 {
111 struct amdgpu_bo_list_entry *array;
112 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
113 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
114 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
115
116 unsigned last_entry = 0, first_userptr = num_entries;
117 unsigned i;
118 int r;
119 unsigned long total_size = 0;
120
121 array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
122 if (!array)
123 return -ENOMEM;
124 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
125
126 for (i = 0; i < num_entries; ++i) {
127 struct amdgpu_bo_list_entry *entry;
128 struct drm_gem_object *gobj;
129 struct amdgpu_bo *bo;
130 struct mm_struct *usermm;
131
132 gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
133 if (!gobj) {
134 r = -ENOENT;
135 goto error_free;
136 }
137
138 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
139 drm_gem_object_unreference_unlocked(gobj);
140
141 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
142 if (usermm) {
143 if (usermm != current->mm) {
144 amdgpu_bo_unref(&bo);
145 r = -EPERM;
146 goto error_free;
147 }
148 entry = &array[--first_userptr];
149 } else {
150 entry = &array[last_entry++];
151 }
152
153 entry->robj = bo;
154 entry->priority = min(info[i].bo_priority,
155 AMDGPU_BO_LIST_MAX_PRIORITY);
156 entry->tv.bo = &entry->robj->tbo;
157 entry->tv.shared = !entry->robj->prime_shared_count;
158
159 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
160 gds_obj = entry->robj;
161 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
162 gws_obj = entry->robj;
163 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
164 oa_obj = entry->robj;
165
166 total_size += amdgpu_bo_size(entry->robj);
167 trace_amdgpu_bo_list_set(list, entry->robj);
168 }
169
170 for (i = 0; i < list->num_entries; ++i)
171 amdgpu_bo_unref(&list->array[i].robj);
172
173 kvfree(list->array);
174
175 list->gds_obj = gds_obj;
176 list->gws_obj = gws_obj;
177 list->oa_obj = oa_obj;
178 list->first_userptr = first_userptr;
179 list->array = array;
180 list->num_entries = num_entries;
181
182 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
183 return 0;
184
185 error_free:
186 while (i--)
187 amdgpu_bo_unref(&array[i].robj);
188 kvfree(array);
189 return r;
190 }
191
192 struct amdgpu_bo_list *
193 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
194 {
195 struct amdgpu_bo_list *result;
196
197 rcu_read_lock();
198 result = idr_find(&fpriv->bo_list_handles, id);
199
200 if (result) {
201 if (kref_get_unless_zero(&result->refcount)) {
202 rcu_read_unlock();
203 mutex_lock(&result->lock);
204 } else {
205 rcu_read_unlock();
206 result = NULL;
207 }
208 } else {
209 rcu_read_unlock();
210 }
211
212 return result;
213 }
214
215 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
216 struct list_head *validated)
217 {
218 /* This is based on the bucket sort with O(n) time complexity.
219 * An item with priority "i" is added to bucket[i]. The lists are then
220 * concatenated in descending order.
221 */
222 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
223 unsigned i;
224
225 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
226 INIT_LIST_HEAD(&bucket[i]);
227
228 /* Since buffers which appear sooner in the relocation list are
229 * likely to be used more often than buffers which appear later
230 * in the list, the sort mustn't change the ordering of buffers
231 * with the same priority, i.e. it must be stable.
232 */
233 for (i = 0; i < list->num_entries; i++) {
234 unsigned priority = list->array[i].priority;
235
236 list_add_tail(&list->array[i].tv.head,
237 &bucket[priority]);
238 list->array[i].user_pages = NULL;
239 }
240
241 /* Connect the sorted buckets in the output list. */
242 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
243 list_splice(&bucket[i], validated);
244 }
245
246 void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
247 {
248 mutex_unlock(&list->lock);
249 kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
250 }
251
252 void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
253 {
254 unsigned i;
255
256 for (i = 0; i < list->num_entries; ++i)
257 amdgpu_bo_unref(&list->array[i].robj);
258
259 mutex_destroy(&list->lock);
260 kvfree(list->array);
261 kfree(list);
262 }
263
264 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
265 struct drm_file *filp)
266 {
267 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
268
269 struct amdgpu_device *adev = dev->dev_private;
270 struct amdgpu_fpriv *fpriv = filp->driver_priv;
271 union drm_amdgpu_bo_list *args = data;
272 uint32_t handle = args->in.list_handle;
273 const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
274
275 struct drm_amdgpu_bo_list_entry *info;
276 struct amdgpu_bo_list *list;
277
278 int r;
279
280 info = kvmalloc_array(args->in.bo_number,
281 sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
282 if (!info)
283 return -ENOMEM;
284
285 /* copy the handle array from userspace to a kernel buffer */
286 r = -EFAULT;
287 if (likely(info_size == args->in.bo_info_size)) {
288 unsigned long bytes = args->in.bo_number *
289 args->in.bo_info_size;
290
291 if (copy_from_user(info, uptr, bytes))
292 goto error_free;
293
294 } else {
295 unsigned long bytes = min(args->in.bo_info_size, info_size);
296 unsigned i;
297
298 memset(info, 0, args->in.bo_number * info_size);
299 for (i = 0; i < args->in.bo_number; ++i) {
300 if (copy_from_user(&info[i], uptr, bytes))
301 goto error_free;
302
303 uptr += args->in.bo_info_size;
304 }
305 }
306
307 switch (args->in.operation) {
308 case AMDGPU_BO_LIST_OP_CREATE:
309 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
310 &handle);
311 if (r)
312 goto error_free;
313 break;
314
315 case AMDGPU_BO_LIST_OP_DESTROY:
316 amdgpu_bo_list_destroy(fpriv, handle);
317 handle = 0;
318 break;
319
320 case AMDGPU_BO_LIST_OP_UPDATE:
321 r = -ENOENT;
322 list = amdgpu_bo_list_get(fpriv, handle);
323 if (!list)
324 goto error_free;
325
326 r = amdgpu_bo_list_set(adev, filp, list, info,
327 args->in.bo_number);
328 amdgpu_bo_list_put(list);
329 if (r)
330 goto error_free;
331
332 break;
333
334 default:
335 r = -EINVAL;
336 goto error_free;
337 }
338
339 memset(args, 0, sizeof(*args));
340 args->out.list_handle = handle;
341 kvfree(info);
342
343 return 0;
344
345 error_free:
346 kvfree(info);
347 return r;
348 }