]>
Commit | Line | Data |
---|---|---|
c8b75bca EA |
1 | /* |
2 | * Copyright © 2015 Broadcom | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
72f793f1 EA |
9 | /** |
10 | * DOC: VC4 GEM BO management support | |
c8b75bca EA |
11 | * |
12 | * The VC4 GPU architecture (both scanout and rendering) has direct | |
13 | * access to system memory with no MMU in between. To support it, we | |
14 | * use the GEM CMA helper functions to allocate contiguous ranges of | |
15 | * physical memory for our BOs. | |
c826a6e1 EA |
16 | * |
17 | * Since the CMA allocator is very slow, we keep a cache of recently | |
18 | * freed BOs around so that the kernel's allocation of objects for 3D | |
19 | * rendering can return quickly. | |
c8b75bca EA |
20 | */ |
21 | ||
cdec4d36 EA |
22 | #include <linux/dma-buf.h> |
23 | ||
c8b75bca | 24 | #include "vc4_drv.h" |
d5bc60f6 | 25 | #include "uapi/drm/vc4_drm.h" |
c8b75bca | 26 | |
f3099462 EA |
27 | static const char * const bo_type_names[] = { |
28 | "kernel", | |
29 | "V3D", | |
30 | "V3D shader", | |
31 | "dumb", | |
32 | "binner", | |
33 | "RCL", | |
34 | "BCL", | |
35 | "kernel BO cache", | |
36 | }; | |
37 | ||
38 | static bool is_user_label(int label) | |
39 | { | |
40 | return label >= VC4_BO_TYPE_COUNT; | |
41 | } | |
42 | ||
13f0ec34 | 43 | static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4) |
c826a6e1 | 44 | { |
f3099462 EA |
45 | int i; |
46 | ||
47 | for (i = 0; i < vc4->num_labels; i++) { | |
48 | if (!vc4->bo_labels[i].num_allocated) | |
49 | continue; | |
50 | ||
13f0ec34 EA |
51 | drm_printf(p, "%30s: %6dkb BOs (%d)\n", |
52 | vc4->bo_labels[i].name, | |
53 | vc4->bo_labels[i].size_allocated / 1024, | |
54 | vc4->bo_labels[i].num_allocated); | |
f3099462 | 55 | } |
b9f19259 BB |
56 | |
57 | mutex_lock(&vc4->purgeable.lock); | |
58 | if (vc4->purgeable.num) | |
13f0ec34 EA |
59 | drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache", |
60 | vc4->purgeable.size / 1024, vc4->purgeable.num); | |
b9f19259 BB |
61 | |
62 | if (vc4->purgeable.purged_num) | |
13f0ec34 EA |
63 | drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO", |
64 | vc4->purgeable.purged_size / 1024, | |
65 | vc4->purgeable.purged_num); | |
b9f19259 | 66 | mutex_unlock(&vc4->purgeable.lock); |
c826a6e1 EA |
67 | } |
68 | ||
c9be804c | 69 | static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) |
c826a6e1 EA |
70 | { |
71 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
72 | struct drm_device *dev = node->minor->dev; | |
73 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
13f0ec34 | 74 | struct drm_printer p = drm_seq_file_printer(m); |
c826a6e1 | 75 | |
13f0ec34 | 76 | vc4_bo_stats_print(&p, vc4); |
b9f19259 | 77 | |
c826a6e1 EA |
78 | return 0; |
79 | } | |
c826a6e1 | 80 | |
f3099462 EA |
81 | /* Takes ownership of *name and returns the appropriate slot for it in |
82 | * the bo_labels[] array, extending it as necessary. | |
83 | * | |
84 | * This is inefficient and could use a hash table instead of walking | |
85 | * an array and strcmp()ing. However, the assumption is that user | |
86 | * labeling will be infrequent (scanout buffers and other long-lived | |
87 | * objects, or debug driver builds), so we can live with it for now. | |
88 | */ | |
89 | static int vc4_get_user_label(struct vc4_dev *vc4, const char *name) | |
90 | { | |
91 | int i; | |
92 | int free_slot = -1; | |
93 | ||
94 | for (i = 0; i < vc4->num_labels; i++) { | |
95 | if (!vc4->bo_labels[i].name) { | |
96 | free_slot = i; | |
97 | } else if (strcmp(vc4->bo_labels[i].name, name) == 0) { | |
98 | kfree(name); | |
99 | return i; | |
100 | } | |
101 | } | |
102 | ||
103 | if (free_slot != -1) { | |
104 | WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0); | |
105 | vc4->bo_labels[free_slot].name = name; | |
106 | return free_slot; | |
107 | } else { | |
108 | u32 new_label_count = vc4->num_labels + 1; | |
109 | struct vc4_label *new_labels = | |
110 | krealloc(vc4->bo_labels, | |
111 | new_label_count * sizeof(*new_labels), | |
112 | GFP_KERNEL); | |
113 | ||
114 | if (!new_labels) { | |
115 | kfree(name); | |
116 | return -1; | |
117 | } | |
118 | ||
119 | free_slot = vc4->num_labels; | |
120 | vc4->bo_labels = new_labels; | |
121 | vc4->num_labels = new_label_count; | |
122 | ||
123 | vc4->bo_labels[free_slot].name = name; | |
124 | vc4->bo_labels[free_slot].num_allocated = 0; | |
125 | vc4->bo_labels[free_slot].size_allocated = 0; | |
126 | ||
127 | return free_slot; | |
128 | } | |
129 | } | |
130 | ||
131 | static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label) | |
132 | { | |
133 | struct vc4_bo *bo = to_vc4_bo(gem_obj); | |
134 | struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev); | |
135 | ||
136 | lockdep_assert_held(&vc4->bo_lock); | |
137 | ||
138 | if (label != -1) { | |
139 | vc4->bo_labels[label].num_allocated++; | |
140 | vc4->bo_labels[label].size_allocated += gem_obj->size; | |
141 | } | |
142 | ||
143 | vc4->bo_labels[bo->label].num_allocated--; | |
144 | vc4->bo_labels[bo->label].size_allocated -= gem_obj->size; | |
145 | ||
146 | if (vc4->bo_labels[bo->label].num_allocated == 0 && | |
147 | is_user_label(bo->label)) { | |
148 | /* Free user BO label slots on last unreference. | |
149 | * Slots are just where we track the stats for a given | |
150 | * name, and once a name is unused we can reuse that | |
151 | * slot. | |
152 | */ | |
153 | kfree(vc4->bo_labels[bo->label].name); | |
154 | vc4->bo_labels[bo->label].name = NULL; | |
155 | } | |
156 | ||
157 | bo->label = label; | |
158 | } | |
159 | ||
c826a6e1 EA |
160 | static uint32_t bo_page_index(size_t size) |
161 | { | |
162 | return (size / PAGE_SIZE) - 1; | |
163 | } | |
164 | ||
c826a6e1 | 165 | static void vc4_bo_destroy(struct vc4_bo *bo) |
c8b75bca | 166 | { |
c826a6e1 | 167 | struct drm_gem_object *obj = &bo->base.base; |
4e6b1e91 EA |
168 | struct vc4_dev *vc4 = to_vc4_dev(obj->dev); |
169 | ||
170 | lockdep_assert_held(&vc4->bo_lock); | |
f3099462 EA |
171 | |
172 | vc4_bo_set_label(obj, -1); | |
c826a6e1 | 173 | |
463873d5 | 174 | if (bo->validated_shader) { |
c0db1b67 | 175 | kfree(bo->validated_shader->uniform_addr_offsets); |
463873d5 EA |
176 | kfree(bo->validated_shader->texture_samples); |
177 | kfree(bo->validated_shader); | |
178 | bo->validated_shader = NULL; | |
179 | } | |
180 | ||
c826a6e1 EA |
181 | drm_gem_cma_free_object(obj); |
182 | } | |
183 | ||
c826a6e1 EA |
184 | static void vc4_bo_remove_from_cache(struct vc4_bo *bo) |
185 | { | |
4e6b1e91 EA |
186 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); |
187 | ||
188 | lockdep_assert_held(&vc4->bo_lock); | |
c826a6e1 EA |
189 | list_del(&bo->unref_head); |
190 | list_del(&bo->size_head); | |
191 | } | |
192 | ||
193 | static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, | |
194 | size_t size) | |
195 | { | |
196 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
197 | uint32_t page_index = bo_page_index(size); | |
198 | ||
199 | if (vc4->bo_cache.size_list_size <= page_index) { | |
200 | uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, | |
201 | page_index + 1); | |
202 | struct list_head *new_list; | |
203 | uint32_t i; | |
204 | ||
205 | new_list = kmalloc_array(new_size, sizeof(struct list_head), | |
206 | GFP_KERNEL); | |
207 | if (!new_list) | |
208 | return NULL; | |
209 | ||
210 | /* Rebase the old cached BO lists to their new list | |
211 | * head locations. | |
212 | */ | |
213 | for (i = 0; i < vc4->bo_cache.size_list_size; i++) { | |
214 | struct list_head *old_list = | |
215 | &vc4->bo_cache.size_list[i]; | |
216 | ||
217 | if (list_empty(old_list)) | |
218 | INIT_LIST_HEAD(&new_list[i]); | |
219 | else | |
220 | list_replace(old_list, &new_list[i]); | |
221 | } | |
222 | /* And initialize the brand new BO list heads. */ | |
223 | for (i = vc4->bo_cache.size_list_size; i < new_size; i++) | |
224 | INIT_LIST_HEAD(&new_list[i]); | |
225 | ||
226 | kfree(vc4->bo_cache.size_list); | |
227 | vc4->bo_cache.size_list = new_list; | |
228 | vc4->bo_cache.size_list_size = new_size; | |
229 | } | |
230 | ||
231 | return &vc4->bo_cache.size_list[page_index]; | |
232 | } | |
233 | ||
ea903838 | 234 | static void vc4_bo_cache_purge(struct drm_device *dev) |
c826a6e1 EA |
235 | { |
236 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
237 | ||
238 | mutex_lock(&vc4->bo_lock); | |
239 | while (!list_empty(&vc4->bo_cache.time_list)) { | |
240 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
241 | struct vc4_bo, unref_head); | |
242 | vc4_bo_remove_from_cache(bo); | |
243 | vc4_bo_destroy(bo); | |
244 | } | |
245 | mutex_unlock(&vc4->bo_lock); | |
246 | } | |
247 | ||
b9f19259 BB |
248 | void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) |
249 | { | |
250 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); | |
251 | ||
252 | mutex_lock(&vc4->purgeable.lock); | |
253 | list_add_tail(&bo->size_head, &vc4->purgeable.list); | |
254 | vc4->purgeable.num++; | |
255 | vc4->purgeable.size += bo->base.base.size; | |
256 | mutex_unlock(&vc4->purgeable.lock); | |
257 | } | |
258 | ||
259 | static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) | |
260 | { | |
261 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); | |
262 | ||
263 | /* list_del_init() is used here because the caller might release | |
264 | * the purgeable lock in order to acquire the madv one and update the | |
265 | * madv status. | |
266 | * During this short period of time a user might decide to mark | |
267 | * the BO as unpurgeable, and if bo->madv is set to | |
268 | * VC4_MADV_DONTNEED it will try to remove the BO from the | |
269 | * purgeable list which will fail if the ->next/prev fields | |
270 | * are set to LIST_POISON1/LIST_POISON2 (which is what | |
271 | * list_del() does). | |
272 | * Re-initializing the list element guarantees that list_del() | |
273 | * will work correctly even if it's a NOP. | |
274 | */ | |
275 | list_del_init(&bo->size_head); | |
276 | vc4->purgeable.num--; | |
277 | vc4->purgeable.size -= bo->base.base.size; | |
278 | } | |
279 | ||
280 | void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo) | |
281 | { | |
282 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); | |
283 | ||
284 | mutex_lock(&vc4->purgeable.lock); | |
285 | vc4_bo_remove_from_purgeable_pool_locked(bo); | |
286 | mutex_unlock(&vc4->purgeable.lock); | |
287 | } | |
288 | ||
289 | static void vc4_bo_purge(struct drm_gem_object *obj) | |
290 | { | |
291 | struct vc4_bo *bo = to_vc4_bo(obj); | |
292 | struct drm_device *dev = obj->dev; | |
293 | ||
294 | WARN_ON(!mutex_is_locked(&bo->madv_lock)); | |
295 | WARN_ON(bo->madv != VC4_MADV_DONTNEED); | |
296 | ||
297 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
298 | ||
299 | dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr); | |
300 | bo->base.vaddr = NULL; | |
301 | bo->madv = __VC4_MADV_PURGED; | |
302 | } | |
303 | ||
304 | static void vc4_bo_userspace_cache_purge(struct drm_device *dev) | |
305 | { | |
306 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
307 | ||
308 | mutex_lock(&vc4->purgeable.lock); | |
309 | while (!list_empty(&vc4->purgeable.list)) { | |
310 | struct vc4_bo *bo = list_first_entry(&vc4->purgeable.list, | |
311 | struct vc4_bo, size_head); | |
312 | struct drm_gem_object *obj = &bo->base.base; | |
313 | size_t purged_size = 0; | |
314 | ||
315 | vc4_bo_remove_from_purgeable_pool_locked(bo); | |
316 | ||
317 | /* Release the purgeable lock while we're purging the BO so | |
318 | * that other people can continue inserting things in the | |
319 | * purgeable pool without having to wait for all BOs to be | |
320 | * purged. | |
321 | */ | |
322 | mutex_unlock(&vc4->purgeable.lock); | |
323 | mutex_lock(&bo->madv_lock); | |
324 | ||
325 | /* Since we released the purgeable pool lock before acquiring | |
326 | * the BO madv one, the user may have marked the BO as WILLNEED | |
327 | * and re-used it in the meantime. | |
328 | * Before purging the BO we need to make sure | |
329 | * - it is still marked as DONTNEED | |
330 | * - it has not been re-inserted in the purgeable list | |
331 | * - it is not used by HW blocks | |
332 | * If one of these conditions is not met, just skip the entry. | |
333 | */ | |
334 | if (bo->madv == VC4_MADV_DONTNEED && | |
335 | list_empty(&bo->size_head) && | |
336 | !refcount_read(&bo->usecnt)) { | |
337 | purged_size = bo->base.base.size; | |
338 | vc4_bo_purge(obj); | |
339 | } | |
340 | mutex_unlock(&bo->madv_lock); | |
341 | mutex_lock(&vc4->purgeable.lock); | |
342 | ||
343 | if (purged_size) { | |
344 | vc4->purgeable.purged_size += purged_size; | |
345 | vc4->purgeable.purged_num++; | |
346 | } | |
347 | } | |
348 | mutex_unlock(&vc4->purgeable.lock); | |
349 | } | |
350 | ||
c826a6e1 | 351 | static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, |
f3099462 EA |
352 | uint32_t size, |
353 | enum vc4_kernel_bo_type type) | |
c826a6e1 EA |
354 | { |
355 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
356 | uint32_t page_index = bo_page_index(size); | |
357 | struct vc4_bo *bo = NULL; | |
358 | ||
359 | size = roundup(size, PAGE_SIZE); | |
360 | ||
361 | mutex_lock(&vc4->bo_lock); | |
362 | if (page_index >= vc4->bo_cache.size_list_size) | |
363 | goto out; | |
364 | ||
365 | if (list_empty(&vc4->bo_cache.size_list[page_index])) | |
366 | goto out; | |
367 | ||
368 | bo = list_first_entry(&vc4->bo_cache.size_list[page_index], | |
369 | struct vc4_bo, size_head); | |
370 | vc4_bo_remove_from_cache(bo); | |
371 | kref_init(&bo->base.base.refcount); | |
372 | ||
373 | out: | |
f3099462 EA |
374 | if (bo) |
375 | vc4_bo_set_label(&bo->base.base, type); | |
c826a6e1 EA |
376 | mutex_unlock(&vc4->bo_lock); |
377 | return bo; | |
378 | } | |
379 | ||
380 | /** | |
381 | * vc4_gem_create_object - Implementation of driver->gem_create_object. | |
72f793f1 EA |
382 | * @dev: DRM device |
383 | * @size: Size in bytes of the memory the object will reference | |
c826a6e1 EA |
384 | * |
385 | * This lets the CMA helpers allocate object structs for us, and keep | |
386 | * our BO stats correct. | |
387 | */ | |
388 | struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) | |
389 | { | |
390 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
391 | struct vc4_bo *bo; | |
392 | ||
393 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
394 | if (!bo) | |
395 | return ERR_PTR(-ENOMEM); | |
396 | ||
b9f19259 BB |
397 | bo->madv = VC4_MADV_WILLNEED; |
398 | refcount_set(&bo->usecnt, 0); | |
399 | mutex_init(&bo->madv_lock); | |
c826a6e1 | 400 | mutex_lock(&vc4->bo_lock); |
f3099462 EA |
401 | bo->label = VC4_BO_TYPE_KERNEL; |
402 | vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; | |
403 | vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; | |
c826a6e1 EA |
404 | mutex_unlock(&vc4->bo_lock); |
405 | ||
406 | return &bo->base.base; | |
407 | } | |
408 | ||
409 | struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, | |
f3099462 | 410 | bool allow_unzeroed, enum vc4_kernel_bo_type type) |
c826a6e1 EA |
411 | { |
412 | size_t size = roundup(unaligned_size, PAGE_SIZE); | |
413 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
c8b75bca | 414 | struct drm_gem_cma_object *cma_obj; |
eb981383 | 415 | struct vc4_bo *bo; |
c8b75bca | 416 | |
c826a6e1 | 417 | if (size == 0) |
2c68f1fc | 418 | return ERR_PTR(-EINVAL); |
c826a6e1 EA |
419 | |
420 | /* First, try to get a vc4_bo from the kernel BO cache. */ | |
f3099462 | 421 | bo = vc4_bo_get_from_cache(dev, size, type); |
eb981383 EA |
422 | if (bo) { |
423 | if (!allow_unzeroed) | |
424 | memset(bo->base.vaddr, 0, bo->base.base.size); | |
425 | return bo; | |
c826a6e1 EA |
426 | } |
427 | ||
428 | cma_obj = drm_gem_cma_create(dev, size); | |
429 | if (IS_ERR(cma_obj)) { | |
430 | /* | |
431 | * If we've run out of CMA memory, kill the cache of | |
432 | * CMA allocations we've got laying around and try again. | |
433 | */ | |
434 | vc4_bo_cache_purge(dev); | |
b9f19259 BB |
435 | cma_obj = drm_gem_cma_create(dev, size); |
436 | } | |
c826a6e1 | 437 | |
b9f19259 BB |
438 | if (IS_ERR(cma_obj)) { |
439 | /* | |
440 | * Still not enough CMA memory, purge the userspace BO | |
441 | * cache and retry. | |
442 | * This is sub-optimal since we purge the whole userspace | |
443 | * BO cache which forces user that want to re-use the BO to | |
444 | * restore its initial content. | |
445 | * Ideally, we should purge entries one by one and retry | |
446 | * after each to see if CMA allocation succeeds. Or even | |
447 | * better, try to find an entry with at least the same | |
448 | * size. | |
449 | */ | |
450 | vc4_bo_userspace_cache_purge(dev); | |
c826a6e1 | 451 | cma_obj = drm_gem_cma_create(dev, size); |
b9f19259 BB |
452 | } |
453 | ||
454 | if (IS_ERR(cma_obj)) { | |
13f0ec34 | 455 | struct drm_printer p = drm_info_printer(vc4->dev->dev); |
b9f19259 | 456 | DRM_ERROR("Failed to allocate from CMA:\n"); |
13f0ec34 | 457 | vc4_bo_stats_print(&p, vc4); |
b9f19259 | 458 | return ERR_PTR(-ENOMEM); |
c826a6e1 | 459 | } |
f3099462 EA |
460 | bo = to_vc4_bo(&cma_obj->base); |
461 | ||
b9f19259 BB |
462 | /* By default, BOs do not support the MADV ioctl. This will be enabled |
463 | * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB | |
464 | * BOs). | |
465 | */ | |
466 | bo->madv = __VC4_MADV_NOTSUPP; | |
467 | ||
f3099462 EA |
468 | mutex_lock(&vc4->bo_lock); |
469 | vc4_bo_set_label(&cma_obj->base, type); | |
470 | mutex_unlock(&vc4->bo_lock); | |
471 | ||
472 | return bo; | |
c8b75bca EA |
473 | } |
474 | ||
475 | int vc4_dumb_create(struct drm_file *file_priv, | |
476 | struct drm_device *dev, | |
477 | struct drm_mode_create_dumb *args) | |
478 | { | |
479 | int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | |
480 | struct vc4_bo *bo = NULL; | |
481 | int ret; | |
482 | ||
483 | if (args->pitch < min_pitch) | |
484 | args->pitch = min_pitch; | |
485 | ||
486 | if (args->size < args->pitch * args->height) | |
487 | args->size = args->pitch * args->height; | |
488 | ||
f3099462 | 489 | bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); |
2c68f1fc EA |
490 | if (IS_ERR(bo)) |
491 | return PTR_ERR(bo); | |
c8b75bca | 492 | |
b9f19259 BB |
493 | bo->madv = VC4_MADV_WILLNEED; |
494 | ||
c8b75bca | 495 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); |
1d5494e9 | 496 | drm_gem_object_put_unlocked(&bo->base.base); |
c8b75bca EA |
497 | |
498 | return ret; | |
499 | } | |
c826a6e1 | 500 | |
c826a6e1 EA |
501 | static void vc4_bo_cache_free_old(struct drm_device *dev) |
502 | { | |
503 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
504 | unsigned long expire_time = jiffies - msecs_to_jiffies(1000); | |
505 | ||
4e6b1e91 EA |
506 | lockdep_assert_held(&vc4->bo_lock); |
507 | ||
c826a6e1 EA |
508 | while (!list_empty(&vc4->bo_cache.time_list)) { |
509 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
510 | struct vc4_bo, unref_head); | |
511 | if (time_before(expire_time, bo->free_time)) { | |
512 | mod_timer(&vc4->bo_cache.time_timer, | |
513 | round_jiffies_up(jiffies + | |
514 | msecs_to_jiffies(1000))); | |
515 | return; | |
516 | } | |
517 | ||
518 | vc4_bo_remove_from_cache(bo); | |
519 | vc4_bo_destroy(bo); | |
520 | } | |
521 | } | |
522 | ||
523 | /* Called on the last userspace/kernel unreference of the BO. Returns | |
524 | * it to the BO cache if possible, otherwise frees it. | |
c826a6e1 EA |
525 | */ |
526 | void vc4_free_object(struct drm_gem_object *gem_bo) | |
527 | { | |
528 | struct drm_device *dev = gem_bo->dev; | |
529 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
530 | struct vc4_bo *bo = to_vc4_bo(gem_bo); | |
531 | struct list_head *cache_list; | |
532 | ||
b9f19259 BB |
533 | /* Remove the BO from the purgeable list. */ |
534 | mutex_lock(&bo->madv_lock); | |
535 | if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt)) | |
536 | vc4_bo_remove_from_purgeable_pool(bo); | |
537 | mutex_unlock(&bo->madv_lock); | |
538 | ||
c826a6e1 EA |
539 | mutex_lock(&vc4->bo_lock); |
540 | /* If the object references someone else's memory, we can't cache it. | |
541 | */ | |
542 | if (gem_bo->import_attach) { | |
543 | vc4_bo_destroy(bo); | |
544 | goto out; | |
545 | } | |
546 | ||
547 | /* Don't cache if it was publicly named. */ | |
548 | if (gem_bo->name) { | |
549 | vc4_bo_destroy(bo); | |
550 | goto out; | |
551 | } | |
552 | ||
ca39b449 | 553 | /* If this object was partially constructed but CMA allocation |
b9f19259 BB |
554 | * had failed, just free it. Can also happen when the BO has been |
555 | * purged. | |
ca39b449 EA |
556 | */ |
557 | if (!bo->base.vaddr) { | |
558 | vc4_bo_destroy(bo); | |
559 | goto out; | |
560 | } | |
561 | ||
c826a6e1 EA |
562 | cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); |
563 | if (!cache_list) { | |
564 | vc4_bo_destroy(bo); | |
565 | goto out; | |
566 | } | |
567 | ||
463873d5 | 568 | if (bo->validated_shader) { |
c0db1b67 | 569 | kfree(bo->validated_shader->uniform_addr_offsets); |
463873d5 EA |
570 | kfree(bo->validated_shader->texture_samples); |
571 | kfree(bo->validated_shader); | |
572 | bo->validated_shader = NULL; | |
573 | } | |
574 | ||
b9f19259 BB |
575 | /* Reset madv and usecnt before adding the BO to the cache. */ |
576 | bo->madv = __VC4_MADV_NOTSUPP; | |
577 | refcount_set(&bo->usecnt, 0); | |
578 | ||
83753117 | 579 | bo->t_format = false; |
c826a6e1 EA |
580 | bo->free_time = jiffies; |
581 | list_add(&bo->size_head, cache_list); | |
582 | list_add(&bo->unref_head, &vc4->bo_cache.time_list); | |
583 | ||
f3099462 | 584 | vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE); |
c826a6e1 EA |
585 | |
586 | vc4_bo_cache_free_old(dev); | |
587 | ||
588 | out: | |
589 | mutex_unlock(&vc4->bo_lock); | |
590 | } | |
591 | ||
592 | static void vc4_bo_cache_time_work(struct work_struct *work) | |
593 | { | |
594 | struct vc4_dev *vc4 = | |
595 | container_of(work, struct vc4_dev, bo_cache.time_work); | |
596 | struct drm_device *dev = vc4->dev; | |
597 | ||
598 | mutex_lock(&vc4->bo_lock); | |
599 | vc4_bo_cache_free_old(dev); | |
600 | mutex_unlock(&vc4->bo_lock); | |
601 | } | |
602 | ||
b9f19259 BB |
603 | int vc4_bo_inc_usecnt(struct vc4_bo *bo) |
604 | { | |
605 | int ret; | |
606 | ||
607 | /* Fast path: if the BO is already retained by someone, no need to | |
608 | * check the madv status. | |
609 | */ | |
610 | if (refcount_inc_not_zero(&bo->usecnt)) | |
611 | return 0; | |
612 | ||
613 | mutex_lock(&bo->madv_lock); | |
614 | switch (bo->madv) { | |
615 | case VC4_MADV_WILLNEED: | |
5bfd4013 BB |
616 | if (!refcount_inc_not_zero(&bo->usecnt)) |
617 | refcount_set(&bo->usecnt, 1); | |
b9f19259 BB |
618 | ret = 0; |
619 | break; | |
620 | case VC4_MADV_DONTNEED: | |
621 | /* We shouldn't use a BO marked as purgeable if at least | |
622 | * someone else retained its content by incrementing usecnt. | |
623 | * Luckily the BO hasn't been purged yet, but something wrong | |
624 | * is happening here. Just throw an error instead of | |
625 | * authorizing this use case. | |
626 | */ | |
627 | case __VC4_MADV_PURGED: | |
628 | /* We can't use a purged BO. */ | |
629 | default: | |
630 | /* Invalid madv value. */ | |
631 | ret = -EINVAL; | |
632 | break; | |
633 | } | |
634 | mutex_unlock(&bo->madv_lock); | |
635 | ||
636 | return ret; | |
637 | } | |
638 | ||
639 | void vc4_bo_dec_usecnt(struct vc4_bo *bo) | |
640 | { | |
641 | /* Fast path: if the BO is still retained by someone, no need to test | |
642 | * the madv value. | |
643 | */ | |
644 | if (refcount_dec_not_one(&bo->usecnt)) | |
645 | return; | |
646 | ||
647 | mutex_lock(&bo->madv_lock); | |
648 | if (refcount_dec_and_test(&bo->usecnt) && | |
649 | bo->madv == VC4_MADV_DONTNEED) | |
650 | vc4_bo_add_to_purgeable_pool(bo); | |
651 | mutex_unlock(&bo->madv_lock); | |
652 | } | |
653 | ||
0078730f | 654 | static void vc4_bo_cache_time_timer(struct timer_list *t) |
c826a6e1 | 655 | { |
0078730f | 656 | struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer); |
c826a6e1 EA |
657 | |
658 | schedule_work(&vc4->bo_cache.time_work); | |
659 | } | |
660 | ||
463873d5 EA |
661 | struct dma_buf * |
662 | vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) | |
663 | { | |
664 | struct vc4_bo *bo = to_vc4_bo(obj); | |
b9f19259 BB |
665 | struct dma_buf *dmabuf; |
666 | int ret; | |
463873d5 EA |
667 | |
668 | if (bo->validated_shader) { | |
fb95992a | 669 | DRM_DEBUG("Attempting to export shader BO\n"); |
463873d5 EA |
670 | return ERR_PTR(-EINVAL); |
671 | } | |
672 | ||
b9f19259 BB |
673 | /* Note: as soon as the BO is exported it becomes unpurgeable, because |
674 | * noone ever decrements the usecnt even if the reference held by the | |
675 | * exported BO is released. This shouldn't be a problem since we don't | |
676 | * expect exported BOs to be marked as purgeable. | |
677 | */ | |
678 | ret = vc4_bo_inc_usecnt(bo); | |
679 | if (ret) { | |
680 | DRM_ERROR("Failed to increment BO usecnt\n"); | |
681 | return ERR_PTR(ret); | |
682 | } | |
683 | ||
684 | dmabuf = drm_gem_prime_export(dev, obj, flags); | |
685 | if (IS_ERR(dmabuf)) | |
686 | vc4_bo_dec_usecnt(bo); | |
687 | ||
688 | return dmabuf; | |
689 | } | |
690 | ||
abd7dbe9 | 691 | vm_fault_t vc4_fault(struct vm_fault *vmf) |
b9f19259 BB |
692 | { |
693 | struct vm_area_struct *vma = vmf->vma; | |
694 | struct drm_gem_object *obj = vma->vm_private_data; | |
695 | struct vc4_bo *bo = to_vc4_bo(obj); | |
696 | ||
697 | /* The only reason we would end up here is when user-space accesses | |
698 | * BO's memory after it's been purged. | |
699 | */ | |
700 | mutex_lock(&bo->madv_lock); | |
701 | WARN_ON(bo->madv != __VC4_MADV_PURGED); | |
702 | mutex_unlock(&bo->madv_lock); | |
703 | ||
704 | return VM_FAULT_SIGBUS; | |
463873d5 EA |
705 | } |
706 | ||
707 | int vc4_mmap(struct file *filp, struct vm_area_struct *vma) | |
708 | { | |
709 | struct drm_gem_object *gem_obj; | |
b9f19259 | 710 | unsigned long vm_pgoff; |
463873d5 EA |
711 | struct vc4_bo *bo; |
712 | int ret; | |
713 | ||
714 | ret = drm_gem_mmap(filp, vma); | |
715 | if (ret) | |
716 | return ret; | |
717 | ||
718 | gem_obj = vma->vm_private_data; | |
719 | bo = to_vc4_bo(gem_obj); | |
720 | ||
721 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
fb95992a | 722 | DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); |
463873d5 EA |
723 | return -EINVAL; |
724 | } | |
725 | ||
b9f19259 BB |
726 | if (bo->madv != VC4_MADV_WILLNEED) { |
727 | DRM_DEBUG("mmaping of %s BO not allowed\n", | |
728 | bo->madv == VC4_MADV_DONTNEED ? | |
729 | "purgeable" : "purged"); | |
730 | return -EINVAL; | |
731 | } | |
732 | ||
463873d5 EA |
733 | /* |
734 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the | |
735 | * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map | |
736 | * the whole buffer. | |
737 | */ | |
738 | vma->vm_flags &= ~VM_PFNMAP; | |
463873d5 | 739 | |
b9f19259 BB |
740 | /* This ->vm_pgoff dance is needed to make all parties happy: |
741 | * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated | |
742 | * mem-region, hence the need to set it to zero (the value set by | |
743 | * the DRM core is a virtual offset encoding the GEM object-id) | |
744 | * - the mmap() core logic needs ->vm_pgoff to be restored to its | |
745 | * initial value before returning from this function because it | |
746 | * encodes the offset of this GEM in the dev->anon_inode pseudo-file | |
747 | * and this information will be used when we invalidate userspace | |
748 | * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()). | |
749 | */ | |
750 | vm_pgoff = vma->vm_pgoff; | |
751 | vma->vm_pgoff = 0; | |
f6e45661 LR |
752 | ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, |
753 | bo->base.paddr, vma->vm_end - vma->vm_start); | |
b9f19259 BB |
754 | vma->vm_pgoff = vm_pgoff; |
755 | ||
463873d5 EA |
756 | if (ret) |
757 | drm_gem_vm_close(vma); | |
758 | ||
759 | return ret; | |
760 | } | |
761 | ||
762 | int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) | |
763 | { | |
764 | struct vc4_bo *bo = to_vc4_bo(obj); | |
765 | ||
766 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
fb95992a | 767 | DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); |
463873d5 EA |
768 | return -EINVAL; |
769 | } | |
770 | ||
771 | return drm_gem_cma_prime_mmap(obj, vma); | |
772 | } | |
773 | ||
774 | void *vc4_prime_vmap(struct drm_gem_object *obj) | |
775 | { | |
776 | struct vc4_bo *bo = to_vc4_bo(obj); | |
777 | ||
778 | if (bo->validated_shader) { | |
fb95992a | 779 | DRM_DEBUG("mmaping of shader BOs not allowed.\n"); |
463873d5 EA |
780 | return ERR_PTR(-EINVAL); |
781 | } | |
782 | ||
783 | return drm_gem_cma_prime_vmap(obj); | |
784 | } | |
785 | ||
cdec4d36 EA |
786 | struct drm_gem_object * |
787 | vc4_prime_import_sg_table(struct drm_device *dev, | |
788 | struct dma_buf_attachment *attach, | |
789 | struct sg_table *sgt) | |
790 | { | |
791 | struct drm_gem_object *obj; | |
cdec4d36 EA |
792 | |
793 | obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); | |
794 | if (IS_ERR(obj)) | |
795 | return obj; | |
796 | ||
bd7de1e8 | 797 | obj->resv = attach->dmabuf->resv; |
cdec4d36 EA |
798 | |
799 | return obj; | |
800 | } | |
801 | ||
d5bc60f6 EA |
802 | int vc4_create_bo_ioctl(struct drm_device *dev, void *data, |
803 | struct drm_file *file_priv) | |
804 | { | |
805 | struct drm_vc4_create_bo *args = data; | |
806 | struct vc4_bo *bo = NULL; | |
807 | int ret; | |
808 | ||
809 | /* | |
810 | * We can't allocate from the BO cache, because the BOs don't | |
811 | * get zeroed, and that might leak data between users. | |
812 | */ | |
f3099462 | 813 | bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D); |
2c68f1fc EA |
814 | if (IS_ERR(bo)) |
815 | return PTR_ERR(bo); | |
d5bc60f6 | 816 | |
b9f19259 BB |
817 | bo->madv = VC4_MADV_WILLNEED; |
818 | ||
d5bc60f6 | 819 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); |
1d5494e9 | 820 | drm_gem_object_put_unlocked(&bo->base.base); |
d5bc60f6 EA |
821 | |
822 | return ret; | |
823 | } | |
824 | ||
825 | int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, | |
826 | struct drm_file *file_priv) | |
827 | { | |
828 | struct drm_vc4_mmap_bo *args = data; | |
829 | struct drm_gem_object *gem_obj; | |
830 | ||
a8ad0bd8 | 831 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); |
d5bc60f6 | 832 | if (!gem_obj) { |
fb95992a | 833 | DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); |
d5bc60f6 EA |
834 | return -EINVAL; |
835 | } | |
836 | ||
837 | /* The mmap offset was set up at BO allocation time. */ | |
838 | args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); | |
839 | ||
1d5494e9 | 840 | drm_gem_object_put_unlocked(gem_obj); |
d5bc60f6 EA |
841 | return 0; |
842 | } | |
843 | ||
463873d5 EA |
844 | int |
845 | vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, | |
846 | struct drm_file *file_priv) | |
847 | { | |
848 | struct drm_vc4_create_shader_bo *args = data; | |
849 | struct vc4_bo *bo = NULL; | |
850 | int ret; | |
851 | ||
852 | if (args->size == 0) | |
853 | return -EINVAL; | |
854 | ||
855 | if (args->size % sizeof(u64) != 0) | |
856 | return -EINVAL; | |
857 | ||
858 | if (args->flags != 0) { | |
859 | DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); | |
860 | return -EINVAL; | |
861 | } | |
862 | ||
863 | if (args->pad != 0) { | |
864 | DRM_INFO("Pad set: 0x%08x\n", args->pad); | |
865 | return -EINVAL; | |
866 | } | |
867 | ||
f3099462 | 868 | bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); |
2c68f1fc EA |
869 | if (IS_ERR(bo)) |
870 | return PTR_ERR(bo); | |
463873d5 | 871 | |
b9f19259 BB |
872 | bo->madv = VC4_MADV_WILLNEED; |
873 | ||
585cb132 | 874 | if (copy_from_user(bo->base.vaddr, |
463873d5 | 875 | (void __user *)(uintptr_t)args->data, |
585cb132 DC |
876 | args->size)) { |
877 | ret = -EFAULT; | |
463873d5 | 878 | goto fail; |
585cb132 | 879 | } |
463873d5 EA |
880 | /* Clear the rest of the memory from allocating from the BO |
881 | * cache. | |
882 | */ | |
883 | memset(bo->base.vaddr + args->size, 0, | |
884 | bo->base.base.size - args->size); | |
885 | ||
886 | bo->validated_shader = vc4_validate_shader(&bo->base); | |
887 | if (!bo->validated_shader) { | |
888 | ret = -EINVAL; | |
889 | goto fail; | |
890 | } | |
891 | ||
892 | /* We have to create the handle after validation, to avoid | |
893 | * races for users to do doing things like mmap the shader BO. | |
894 | */ | |
895 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
896 | ||
897 | fail: | |
1d5494e9 | 898 | drm_gem_object_put_unlocked(&bo->base.base); |
463873d5 EA |
899 | |
900 | return ret; | |
901 | } | |
902 | ||
83753117 EA |
903 | /** |
904 | * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO. | |
905 | * @dev: DRM device | |
906 | * @data: ioctl argument | |
907 | * @file_priv: DRM file for this fd | |
908 | * | |
909 | * The tiling state of the BO decides the default modifier of an fb if | |
910 | * no specific modifier was set by userspace, and the return value of | |
911 | * vc4_get_tiling_ioctl() (so that userspace can treat a BO it | |
912 | * received from dmabuf as the same tiling format as the producer | |
913 | * used). | |
914 | */ | |
915 | int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, | |
916 | struct drm_file *file_priv) | |
917 | { | |
918 | struct drm_vc4_set_tiling *args = data; | |
919 | struct drm_gem_object *gem_obj; | |
920 | struct vc4_bo *bo; | |
921 | bool t_format; | |
922 | ||
923 | if (args->flags != 0) | |
924 | return -EINVAL; | |
925 | ||
926 | switch (args->modifier) { | |
927 | case DRM_FORMAT_MOD_NONE: | |
928 | t_format = false; | |
929 | break; | |
930 | case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: | |
931 | t_format = true; | |
932 | break; | |
933 | default: | |
934 | return -EINVAL; | |
935 | } | |
936 | ||
937 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
938 | if (!gem_obj) { | |
fb95992a | 939 | DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); |
83753117 EA |
940 | return -ENOENT; |
941 | } | |
942 | bo = to_vc4_bo(gem_obj); | |
943 | bo->t_format = t_format; | |
944 | ||
1d5494e9 | 945 | drm_gem_object_put_unlocked(gem_obj); |
83753117 EA |
946 | |
947 | return 0; | |
948 | } | |
949 | ||
950 | /** | |
951 | * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO. | |
952 | * @dev: DRM device | |
953 | * @data: ioctl argument | |
954 | * @file_priv: DRM file for this fd | |
955 | * | |
956 | * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl(). | |
957 | */ | |
958 | int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, | |
959 | struct drm_file *file_priv) | |
960 | { | |
961 | struct drm_vc4_get_tiling *args = data; | |
962 | struct drm_gem_object *gem_obj; | |
963 | struct vc4_bo *bo; | |
964 | ||
965 | if (args->flags != 0 || args->modifier != 0) | |
966 | return -EINVAL; | |
967 | ||
968 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
969 | if (!gem_obj) { | |
fb95992a | 970 | DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); |
83753117 EA |
971 | return -ENOENT; |
972 | } | |
973 | bo = to_vc4_bo(gem_obj); | |
974 | ||
975 | if (bo->t_format) | |
976 | args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; | |
977 | else | |
978 | args->modifier = DRM_FORMAT_MOD_NONE; | |
979 | ||
1d5494e9 | 980 | drm_gem_object_put_unlocked(gem_obj); |
83753117 EA |
981 | |
982 | return 0; | |
983 | } | |
984 | ||
f3099462 | 985 | int vc4_bo_cache_init(struct drm_device *dev) |
c826a6e1 EA |
986 | { |
987 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
f3099462 EA |
988 | int i; |
989 | ||
990 | /* Create the initial set of BO labels that the kernel will | |
991 | * use. This lets us avoid a bunch of string reallocation in | |
992 | * the kernel's draw and BO allocation paths. | |
993 | */ | |
994 | vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels), | |
995 | GFP_KERNEL); | |
996 | if (!vc4->bo_labels) | |
997 | return -ENOMEM; | |
998 | vc4->num_labels = VC4_BO_TYPE_COUNT; | |
999 | ||
1000 | BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT); | |
1001 | for (i = 0; i < VC4_BO_TYPE_COUNT; i++) | |
1002 | vc4->bo_labels[i].name = bo_type_names[i]; | |
c826a6e1 EA |
1003 | |
1004 | mutex_init(&vc4->bo_lock); | |
1005 | ||
c9be804c EA |
1006 | vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL); |
1007 | ||
c826a6e1 EA |
1008 | INIT_LIST_HEAD(&vc4->bo_cache.time_list); |
1009 | ||
1010 | INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); | |
0078730f | 1011 | timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0); |
f3099462 EA |
1012 | |
1013 | return 0; | |
c826a6e1 EA |
1014 | } |
1015 | ||
1016 | void vc4_bo_cache_destroy(struct drm_device *dev) | |
1017 | { | |
1018 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
f3099462 | 1019 | int i; |
c826a6e1 EA |
1020 | |
1021 | del_timer(&vc4->bo_cache.time_timer); | |
1022 | cancel_work_sync(&vc4->bo_cache.time_work); | |
1023 | ||
1024 | vc4_bo_cache_purge(dev); | |
1025 | ||
f3099462 EA |
1026 | for (i = 0; i < vc4->num_labels; i++) { |
1027 | if (vc4->bo_labels[i].num_allocated) { | |
1028 | DRM_ERROR("Destroying BO cache with %d %s " | |
1029 | "BOs still allocated\n", | |
1030 | vc4->bo_labels[i].num_allocated, | |
1031 | vc4->bo_labels[i].name); | |
1032 | } | |
1033 | ||
1034 | if (is_user_label(i)) | |
1035 | kfree(vc4->bo_labels[i].name); | |
c826a6e1 | 1036 | } |
f3099462 EA |
1037 | kfree(vc4->bo_labels); |
1038 | } | |
1039 | ||
1040 | int vc4_label_bo_ioctl(struct drm_device *dev, void *data, | |
1041 | struct drm_file *file_priv) | |
1042 | { | |
1043 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
1044 | struct drm_vc4_label_bo *args = data; | |
1045 | char *name; | |
1046 | struct drm_gem_object *gem_obj; | |
1047 | int ret = 0, label; | |
1048 | ||
1049 | if (!args->len) | |
1050 | return -EINVAL; | |
1051 | ||
1052 | name = strndup_user(u64_to_user_ptr(args->name), args->len + 1); | |
1053 | if (IS_ERR(name)) | |
1054 | return PTR_ERR(name); | |
1055 | ||
1056 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
1057 | if (!gem_obj) { | |
1058 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
1059 | kfree(name); | |
1060 | return -ENOENT; | |
1061 | } | |
1062 | ||
1063 | mutex_lock(&vc4->bo_lock); | |
1064 | label = vc4_get_user_label(vc4, name); | |
1065 | if (label != -1) | |
1066 | vc4_bo_set_label(gem_obj, label); | |
1067 | else | |
1068 | ret = -ENOMEM; | |
1069 | mutex_unlock(&vc4->bo_lock); | |
1070 | ||
b9c55b6e | 1071 | drm_gem_object_put_unlocked(gem_obj); |
f3099462 EA |
1072 | |
1073 | return ret; | |
c826a6e1 | 1074 | } |