]>
Commit | Line | Data |
---|---|---|
c8b75bca EA |
1 | /* |
2 | * Copyright © 2015 Broadcom | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
72f793f1 EA |
9 | /** |
10 | * DOC: VC4 GEM BO management support | |
c8b75bca EA |
11 | * |
12 | * The VC4 GPU architecture (both scanout and rendering) has direct | |
13 | * access to system memory with no MMU in between. To support it, we | |
14 | * use the GEM CMA helper functions to allocate contiguous ranges of | |
15 | * physical memory for our BOs. | |
c826a6e1 EA |
16 | * |
17 | * Since the CMA allocator is very slow, we keep a cache of recently | |
18 | * freed BOs around so that the kernel's allocation of objects for 3D | |
19 | * rendering can return quickly. | |
c8b75bca EA |
20 | */ |
21 | ||
cdec4d36 EA |
22 | #include <linux/dma-buf.h> |
23 | ||
c8b75bca | 24 | #include "vc4_drv.h" |
d5bc60f6 | 25 | #include "uapi/drm/vc4_drm.h" |
c8b75bca | 26 | |
f3099462 EA |
27 | static const char * const bo_type_names[] = { |
28 | "kernel", | |
29 | "V3D", | |
30 | "V3D shader", | |
31 | "dumb", | |
32 | "binner", | |
33 | "RCL", | |
34 | "BCL", | |
35 | "kernel BO cache", | |
36 | }; | |
37 | ||
38 | static bool is_user_label(int label) | |
39 | { | |
40 | return label >= VC4_BO_TYPE_COUNT; | |
41 | } | |
42 | ||
c826a6e1 EA |
43 | static void vc4_bo_stats_dump(struct vc4_dev *vc4) |
44 | { | |
f3099462 EA |
45 | int i; |
46 | ||
47 | for (i = 0; i < vc4->num_labels; i++) { | |
48 | if (!vc4->bo_labels[i].num_allocated) | |
49 | continue; | |
50 | ||
51 | DRM_INFO("%30s: %6dkb BOs (%d)\n", | |
52 | vc4->bo_labels[i].name, | |
53 | vc4->bo_labels[i].size_allocated / 1024, | |
54 | vc4->bo_labels[i].num_allocated); | |
55 | } | |
c826a6e1 EA |
56 | } |
57 | ||
58 | #ifdef CONFIG_DEBUG_FS | |
59 | int vc4_bo_stats_debugfs(struct seq_file *m, void *unused) | |
60 | { | |
61 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
62 | struct drm_device *dev = node->minor->dev; | |
63 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
f3099462 | 64 | int i; |
c826a6e1 | 65 | |
c826a6e1 | 66 | mutex_lock(&vc4->bo_lock); |
f3099462 EA |
67 | for (i = 0; i < vc4->num_labels; i++) { |
68 | if (!vc4->bo_labels[i].num_allocated) | |
69 | continue; | |
70 | ||
71 | seq_printf(m, "%30s: %6dkb BOs (%d)\n", | |
72 | vc4->bo_labels[i].name, | |
73 | vc4->bo_labels[i].size_allocated / 1024, | |
74 | vc4->bo_labels[i].num_allocated); | |
75 | } | |
c826a6e1 EA |
76 | mutex_unlock(&vc4->bo_lock); |
77 | ||
c826a6e1 EA |
78 | return 0; |
79 | } | |
80 | #endif | |
81 | ||
f3099462 EA |
82 | /* Takes ownership of *name and returns the appropriate slot for it in |
83 | * the bo_labels[] array, extending it as necessary. | |
84 | * | |
85 | * This is inefficient and could use a hash table instead of walking | |
86 | * an array and strcmp()ing. However, the assumption is that user | |
87 | * labeling will be infrequent (scanout buffers and other long-lived | |
88 | * objects, or debug driver builds), so we can live with it for now. | |
89 | */ | |
90 | static int vc4_get_user_label(struct vc4_dev *vc4, const char *name) | |
91 | { | |
92 | int i; | |
93 | int free_slot = -1; | |
94 | ||
95 | for (i = 0; i < vc4->num_labels; i++) { | |
96 | if (!vc4->bo_labels[i].name) { | |
97 | free_slot = i; | |
98 | } else if (strcmp(vc4->bo_labels[i].name, name) == 0) { | |
99 | kfree(name); | |
100 | return i; | |
101 | } | |
102 | } | |
103 | ||
104 | if (free_slot != -1) { | |
105 | WARN_ON(vc4->bo_labels[free_slot].num_allocated != 0); | |
106 | vc4->bo_labels[free_slot].name = name; | |
107 | return free_slot; | |
108 | } else { | |
109 | u32 new_label_count = vc4->num_labels + 1; | |
110 | struct vc4_label *new_labels = | |
111 | krealloc(vc4->bo_labels, | |
112 | new_label_count * sizeof(*new_labels), | |
113 | GFP_KERNEL); | |
114 | ||
115 | if (!new_labels) { | |
116 | kfree(name); | |
117 | return -1; | |
118 | } | |
119 | ||
120 | free_slot = vc4->num_labels; | |
121 | vc4->bo_labels = new_labels; | |
122 | vc4->num_labels = new_label_count; | |
123 | ||
124 | vc4->bo_labels[free_slot].name = name; | |
125 | vc4->bo_labels[free_slot].num_allocated = 0; | |
126 | vc4->bo_labels[free_slot].size_allocated = 0; | |
127 | ||
128 | return free_slot; | |
129 | } | |
130 | } | |
131 | ||
132 | static void vc4_bo_set_label(struct drm_gem_object *gem_obj, int label) | |
133 | { | |
134 | struct vc4_bo *bo = to_vc4_bo(gem_obj); | |
135 | struct vc4_dev *vc4 = to_vc4_dev(gem_obj->dev); | |
136 | ||
137 | lockdep_assert_held(&vc4->bo_lock); | |
138 | ||
139 | if (label != -1) { | |
140 | vc4->bo_labels[label].num_allocated++; | |
141 | vc4->bo_labels[label].size_allocated += gem_obj->size; | |
142 | } | |
143 | ||
144 | vc4->bo_labels[bo->label].num_allocated--; | |
145 | vc4->bo_labels[bo->label].size_allocated -= gem_obj->size; | |
146 | ||
147 | if (vc4->bo_labels[bo->label].num_allocated == 0 && | |
148 | is_user_label(bo->label)) { | |
149 | /* Free user BO label slots on last unreference. | |
150 | * Slots are just where we track the stats for a given | |
151 | * name, and once a name is unused we can reuse that | |
152 | * slot. | |
153 | */ | |
154 | kfree(vc4->bo_labels[bo->label].name); | |
155 | vc4->bo_labels[bo->label].name = NULL; | |
156 | } | |
157 | ||
158 | bo->label = label; | |
159 | } | |
160 | ||
c826a6e1 EA |
161 | static uint32_t bo_page_index(size_t size) |
162 | { | |
163 | return (size / PAGE_SIZE) - 1; | |
164 | } | |
165 | ||
c826a6e1 | 166 | static void vc4_bo_destroy(struct vc4_bo *bo) |
c8b75bca | 167 | { |
c826a6e1 | 168 | struct drm_gem_object *obj = &bo->base.base; |
4e6b1e91 EA |
169 | struct vc4_dev *vc4 = to_vc4_dev(obj->dev); |
170 | ||
171 | lockdep_assert_held(&vc4->bo_lock); | |
f3099462 EA |
172 | |
173 | vc4_bo_set_label(obj, -1); | |
c826a6e1 | 174 | |
463873d5 EA |
175 | if (bo->validated_shader) { |
176 | kfree(bo->validated_shader->texture_samples); | |
177 | kfree(bo->validated_shader); | |
178 | bo->validated_shader = NULL; | |
179 | } | |
180 | ||
24bb206f | 181 | reservation_object_fini(&bo->_resv); |
cdec4d36 | 182 | |
c826a6e1 EA |
183 | drm_gem_cma_free_object(obj); |
184 | } | |
185 | ||
c826a6e1 EA |
186 | static void vc4_bo_remove_from_cache(struct vc4_bo *bo) |
187 | { | |
4e6b1e91 EA |
188 | struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); |
189 | ||
190 | lockdep_assert_held(&vc4->bo_lock); | |
c826a6e1 EA |
191 | list_del(&bo->unref_head); |
192 | list_del(&bo->size_head); | |
193 | } | |
194 | ||
195 | static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev, | |
196 | size_t size) | |
197 | { | |
198 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
199 | uint32_t page_index = bo_page_index(size); | |
200 | ||
201 | if (vc4->bo_cache.size_list_size <= page_index) { | |
202 | uint32_t new_size = max(vc4->bo_cache.size_list_size * 2, | |
203 | page_index + 1); | |
204 | struct list_head *new_list; | |
205 | uint32_t i; | |
206 | ||
207 | new_list = kmalloc_array(new_size, sizeof(struct list_head), | |
208 | GFP_KERNEL); | |
209 | if (!new_list) | |
210 | return NULL; | |
211 | ||
212 | /* Rebase the old cached BO lists to their new list | |
213 | * head locations. | |
214 | */ | |
215 | for (i = 0; i < vc4->bo_cache.size_list_size; i++) { | |
216 | struct list_head *old_list = | |
217 | &vc4->bo_cache.size_list[i]; | |
218 | ||
219 | if (list_empty(old_list)) | |
220 | INIT_LIST_HEAD(&new_list[i]); | |
221 | else | |
222 | list_replace(old_list, &new_list[i]); | |
223 | } | |
224 | /* And initialize the brand new BO list heads. */ | |
225 | for (i = vc4->bo_cache.size_list_size; i < new_size; i++) | |
226 | INIT_LIST_HEAD(&new_list[i]); | |
227 | ||
228 | kfree(vc4->bo_cache.size_list); | |
229 | vc4->bo_cache.size_list = new_list; | |
230 | vc4->bo_cache.size_list_size = new_size; | |
231 | } | |
232 | ||
233 | return &vc4->bo_cache.size_list[page_index]; | |
234 | } | |
235 | ||
ea903838 | 236 | static void vc4_bo_cache_purge(struct drm_device *dev) |
c826a6e1 EA |
237 | { |
238 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
239 | ||
240 | mutex_lock(&vc4->bo_lock); | |
241 | while (!list_empty(&vc4->bo_cache.time_list)) { | |
242 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
243 | struct vc4_bo, unref_head); | |
244 | vc4_bo_remove_from_cache(bo); | |
245 | vc4_bo_destroy(bo); | |
246 | } | |
247 | mutex_unlock(&vc4->bo_lock); | |
248 | } | |
249 | ||
250 | static struct vc4_bo *vc4_bo_get_from_cache(struct drm_device *dev, | |
f3099462 EA |
251 | uint32_t size, |
252 | enum vc4_kernel_bo_type type) | |
c826a6e1 EA |
253 | { |
254 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
255 | uint32_t page_index = bo_page_index(size); | |
256 | struct vc4_bo *bo = NULL; | |
257 | ||
258 | size = roundup(size, PAGE_SIZE); | |
259 | ||
260 | mutex_lock(&vc4->bo_lock); | |
261 | if (page_index >= vc4->bo_cache.size_list_size) | |
262 | goto out; | |
263 | ||
264 | if (list_empty(&vc4->bo_cache.size_list[page_index])) | |
265 | goto out; | |
266 | ||
267 | bo = list_first_entry(&vc4->bo_cache.size_list[page_index], | |
268 | struct vc4_bo, size_head); | |
269 | vc4_bo_remove_from_cache(bo); | |
270 | kref_init(&bo->base.base.refcount); | |
271 | ||
272 | out: | |
f3099462 EA |
273 | if (bo) |
274 | vc4_bo_set_label(&bo->base.base, type); | |
c826a6e1 EA |
275 | mutex_unlock(&vc4->bo_lock); |
276 | return bo; | |
277 | } | |
278 | ||
279 | /** | |
280 | * vc4_gem_create_object - Implementation of driver->gem_create_object. | |
72f793f1 EA |
281 | * @dev: DRM device |
282 | * @size: Size in bytes of the memory the object will reference | |
c826a6e1 EA |
283 | * |
284 | * This lets the CMA helpers allocate object structs for us, and keep | |
285 | * our BO stats correct. | |
286 | */ | |
287 | struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) | |
288 | { | |
289 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
290 | struct vc4_bo *bo; | |
291 | ||
292 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
293 | if (!bo) | |
294 | return ERR_PTR(-ENOMEM); | |
295 | ||
296 | mutex_lock(&vc4->bo_lock); | |
f3099462 EA |
297 | bo->label = VC4_BO_TYPE_KERNEL; |
298 | vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++; | |
299 | vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size; | |
c826a6e1 | 300 | mutex_unlock(&vc4->bo_lock); |
24bb206f HV |
301 | bo->resv = &bo->_resv; |
302 | reservation_object_init(bo->resv); | |
c826a6e1 EA |
303 | |
304 | return &bo->base.base; | |
305 | } | |
306 | ||
307 | struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, | |
f3099462 | 308 | bool allow_unzeroed, enum vc4_kernel_bo_type type) |
c826a6e1 EA |
309 | { |
310 | size_t size = roundup(unaligned_size, PAGE_SIZE); | |
311 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
c8b75bca | 312 | struct drm_gem_cma_object *cma_obj; |
eb981383 | 313 | struct vc4_bo *bo; |
c8b75bca | 314 | |
c826a6e1 | 315 | if (size == 0) |
2c68f1fc | 316 | return ERR_PTR(-EINVAL); |
c826a6e1 EA |
317 | |
318 | /* First, try to get a vc4_bo from the kernel BO cache. */ | |
f3099462 | 319 | bo = vc4_bo_get_from_cache(dev, size, type); |
eb981383 EA |
320 | if (bo) { |
321 | if (!allow_unzeroed) | |
322 | memset(bo->base.vaddr, 0, bo->base.base.size); | |
323 | return bo; | |
c826a6e1 EA |
324 | } |
325 | ||
326 | cma_obj = drm_gem_cma_create(dev, size); | |
327 | if (IS_ERR(cma_obj)) { | |
328 | /* | |
329 | * If we've run out of CMA memory, kill the cache of | |
330 | * CMA allocations we've got laying around and try again. | |
331 | */ | |
332 | vc4_bo_cache_purge(dev); | |
333 | ||
334 | cma_obj = drm_gem_cma_create(dev, size); | |
335 | if (IS_ERR(cma_obj)) { | |
336 | DRM_ERROR("Failed to allocate from CMA:\n"); | |
337 | vc4_bo_stats_dump(vc4); | |
2c68f1fc | 338 | return ERR_PTR(-ENOMEM); |
c826a6e1 EA |
339 | } |
340 | } | |
f3099462 EA |
341 | bo = to_vc4_bo(&cma_obj->base); |
342 | ||
343 | mutex_lock(&vc4->bo_lock); | |
344 | vc4_bo_set_label(&cma_obj->base, type); | |
345 | mutex_unlock(&vc4->bo_lock); | |
346 | ||
347 | return bo; | |
c8b75bca EA |
348 | } |
349 | ||
350 | int vc4_dumb_create(struct drm_file *file_priv, | |
351 | struct drm_device *dev, | |
352 | struct drm_mode_create_dumb *args) | |
353 | { | |
354 | int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | |
355 | struct vc4_bo *bo = NULL; | |
356 | int ret; | |
357 | ||
358 | if (args->pitch < min_pitch) | |
359 | args->pitch = min_pitch; | |
360 | ||
361 | if (args->size < args->pitch * args->height) | |
362 | args->size = args->pitch * args->height; | |
363 | ||
f3099462 | 364 | bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); |
2c68f1fc EA |
365 | if (IS_ERR(bo)) |
366 | return PTR_ERR(bo); | |
c8b75bca EA |
367 | |
368 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
1d5494e9 | 369 | drm_gem_object_put_unlocked(&bo->base.base); |
c8b75bca EA |
370 | |
371 | return ret; | |
372 | } | |
c826a6e1 | 373 | |
c826a6e1 EA |
374 | static void vc4_bo_cache_free_old(struct drm_device *dev) |
375 | { | |
376 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
377 | unsigned long expire_time = jiffies - msecs_to_jiffies(1000); | |
378 | ||
4e6b1e91 EA |
379 | lockdep_assert_held(&vc4->bo_lock); |
380 | ||
c826a6e1 EA |
381 | while (!list_empty(&vc4->bo_cache.time_list)) { |
382 | struct vc4_bo *bo = list_last_entry(&vc4->bo_cache.time_list, | |
383 | struct vc4_bo, unref_head); | |
384 | if (time_before(expire_time, bo->free_time)) { | |
385 | mod_timer(&vc4->bo_cache.time_timer, | |
386 | round_jiffies_up(jiffies + | |
387 | msecs_to_jiffies(1000))); | |
388 | return; | |
389 | } | |
390 | ||
391 | vc4_bo_remove_from_cache(bo); | |
392 | vc4_bo_destroy(bo); | |
393 | } | |
394 | } | |
395 | ||
396 | /* Called on the last userspace/kernel unreference of the BO. Returns | |
397 | * it to the BO cache if possible, otherwise frees it. | |
c826a6e1 EA |
398 | */ |
399 | void vc4_free_object(struct drm_gem_object *gem_bo) | |
400 | { | |
401 | struct drm_device *dev = gem_bo->dev; | |
402 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
403 | struct vc4_bo *bo = to_vc4_bo(gem_bo); | |
404 | struct list_head *cache_list; | |
405 | ||
406 | mutex_lock(&vc4->bo_lock); | |
407 | /* If the object references someone else's memory, we can't cache it. | |
408 | */ | |
409 | if (gem_bo->import_attach) { | |
410 | vc4_bo_destroy(bo); | |
411 | goto out; | |
412 | } | |
413 | ||
414 | /* Don't cache if it was publicly named. */ | |
415 | if (gem_bo->name) { | |
416 | vc4_bo_destroy(bo); | |
417 | goto out; | |
418 | } | |
419 | ||
ca39b449 EA |
420 | /* If this object was partially constructed but CMA allocation |
421 | * had failed, just free it. | |
422 | */ | |
423 | if (!bo->base.vaddr) { | |
424 | vc4_bo_destroy(bo); | |
425 | goto out; | |
426 | } | |
427 | ||
c826a6e1 EA |
428 | cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); |
429 | if (!cache_list) { | |
430 | vc4_bo_destroy(bo); | |
431 | goto out; | |
432 | } | |
433 | ||
463873d5 EA |
434 | if (bo->validated_shader) { |
435 | kfree(bo->validated_shader->texture_samples); | |
436 | kfree(bo->validated_shader); | |
437 | bo->validated_shader = NULL; | |
438 | } | |
439 | ||
83753117 | 440 | bo->t_format = false; |
c826a6e1 EA |
441 | bo->free_time = jiffies; |
442 | list_add(&bo->size_head, cache_list); | |
443 | list_add(&bo->unref_head, &vc4->bo_cache.time_list); | |
444 | ||
f3099462 | 445 | vc4_bo_set_label(&bo->base.base, VC4_BO_TYPE_KERNEL_CACHE); |
c826a6e1 EA |
446 | |
447 | vc4_bo_cache_free_old(dev); | |
448 | ||
449 | out: | |
450 | mutex_unlock(&vc4->bo_lock); | |
451 | } | |
452 | ||
453 | static void vc4_bo_cache_time_work(struct work_struct *work) | |
454 | { | |
455 | struct vc4_dev *vc4 = | |
456 | container_of(work, struct vc4_dev, bo_cache.time_work); | |
457 | struct drm_device *dev = vc4->dev; | |
458 | ||
459 | mutex_lock(&vc4->bo_lock); | |
460 | vc4_bo_cache_free_old(dev); | |
461 | mutex_unlock(&vc4->bo_lock); | |
462 | } | |
463 | ||
464 | static void vc4_bo_cache_time_timer(unsigned long data) | |
465 | { | |
466 | struct drm_device *dev = (struct drm_device *)data; | |
467 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
468 | ||
469 | schedule_work(&vc4->bo_cache.time_work); | |
470 | } | |
471 | ||
cdec4d36 EA |
472 | struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj) |
473 | { | |
474 | struct vc4_bo *bo = to_vc4_bo(obj); | |
475 | ||
476 | return bo->resv; | |
477 | } | |
478 | ||
463873d5 EA |
479 | struct dma_buf * |
480 | vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) | |
481 | { | |
482 | struct vc4_bo *bo = to_vc4_bo(obj); | |
483 | ||
484 | if (bo->validated_shader) { | |
485 | DRM_ERROR("Attempting to export shader BO\n"); | |
486 | return ERR_PTR(-EINVAL); | |
487 | } | |
488 | ||
489 | return drm_gem_prime_export(dev, obj, flags); | |
490 | } | |
491 | ||
492 | int vc4_mmap(struct file *filp, struct vm_area_struct *vma) | |
493 | { | |
494 | struct drm_gem_object *gem_obj; | |
495 | struct vc4_bo *bo; | |
496 | int ret; | |
497 | ||
498 | ret = drm_gem_mmap(filp, vma); | |
499 | if (ret) | |
500 | return ret; | |
501 | ||
502 | gem_obj = vma->vm_private_data; | |
503 | bo = to_vc4_bo(gem_obj); | |
504 | ||
505 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
506 | DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); | |
507 | return -EINVAL; | |
508 | } | |
509 | ||
510 | /* | |
511 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the | |
512 | * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map | |
513 | * the whole buffer. | |
514 | */ | |
515 | vma->vm_flags &= ~VM_PFNMAP; | |
516 | vma->vm_pgoff = 0; | |
517 | ||
f6e45661 LR |
518 | ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, |
519 | bo->base.paddr, vma->vm_end - vma->vm_start); | |
463873d5 EA |
520 | if (ret) |
521 | drm_gem_vm_close(vma); | |
522 | ||
523 | return ret; | |
524 | } | |
525 | ||
526 | int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) | |
527 | { | |
528 | struct vc4_bo *bo = to_vc4_bo(obj); | |
529 | ||
530 | if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { | |
531 | DRM_ERROR("mmaping of shader BOs for writing not allowed.\n"); | |
532 | return -EINVAL; | |
533 | } | |
534 | ||
535 | return drm_gem_cma_prime_mmap(obj, vma); | |
536 | } | |
537 | ||
538 | void *vc4_prime_vmap(struct drm_gem_object *obj) | |
539 | { | |
540 | struct vc4_bo *bo = to_vc4_bo(obj); | |
541 | ||
542 | if (bo->validated_shader) { | |
543 | DRM_ERROR("mmaping of shader BOs not allowed.\n"); | |
544 | return ERR_PTR(-EINVAL); | |
545 | } | |
546 | ||
547 | return drm_gem_cma_prime_vmap(obj); | |
548 | } | |
549 | ||
cdec4d36 EA |
550 | struct drm_gem_object * |
551 | vc4_prime_import_sg_table(struct drm_device *dev, | |
552 | struct dma_buf_attachment *attach, | |
553 | struct sg_table *sgt) | |
554 | { | |
555 | struct drm_gem_object *obj; | |
556 | struct vc4_bo *bo; | |
557 | ||
558 | obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); | |
559 | if (IS_ERR(obj)) | |
560 | return obj; | |
561 | ||
562 | bo = to_vc4_bo(obj); | |
563 | bo->resv = attach->dmabuf->resv; | |
564 | ||
565 | return obj; | |
566 | } | |
567 | ||
d5bc60f6 EA |
568 | int vc4_create_bo_ioctl(struct drm_device *dev, void *data, |
569 | struct drm_file *file_priv) | |
570 | { | |
571 | struct drm_vc4_create_bo *args = data; | |
572 | struct vc4_bo *bo = NULL; | |
573 | int ret; | |
574 | ||
575 | /* | |
576 | * We can't allocate from the BO cache, because the BOs don't | |
577 | * get zeroed, and that might leak data between users. | |
578 | */ | |
f3099462 | 579 | bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D); |
2c68f1fc EA |
580 | if (IS_ERR(bo)) |
581 | return PTR_ERR(bo); | |
d5bc60f6 EA |
582 | |
583 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
1d5494e9 | 584 | drm_gem_object_put_unlocked(&bo->base.base); |
d5bc60f6 EA |
585 | |
586 | return ret; | |
587 | } | |
588 | ||
589 | int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, | |
590 | struct drm_file *file_priv) | |
591 | { | |
592 | struct drm_vc4_mmap_bo *args = data; | |
593 | struct drm_gem_object *gem_obj; | |
594 | ||
a8ad0bd8 | 595 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); |
d5bc60f6 EA |
596 | if (!gem_obj) { |
597 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
598 | return -EINVAL; | |
599 | } | |
600 | ||
601 | /* The mmap offset was set up at BO allocation time. */ | |
602 | args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); | |
603 | ||
1d5494e9 | 604 | drm_gem_object_put_unlocked(gem_obj); |
d5bc60f6 EA |
605 | return 0; |
606 | } | |
607 | ||
463873d5 EA |
608 | int |
609 | vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, | |
610 | struct drm_file *file_priv) | |
611 | { | |
612 | struct drm_vc4_create_shader_bo *args = data; | |
613 | struct vc4_bo *bo = NULL; | |
614 | int ret; | |
615 | ||
616 | if (args->size == 0) | |
617 | return -EINVAL; | |
618 | ||
619 | if (args->size % sizeof(u64) != 0) | |
620 | return -EINVAL; | |
621 | ||
622 | if (args->flags != 0) { | |
623 | DRM_INFO("Unknown flags set: 0x%08x\n", args->flags); | |
624 | return -EINVAL; | |
625 | } | |
626 | ||
627 | if (args->pad != 0) { | |
628 | DRM_INFO("Pad set: 0x%08x\n", args->pad); | |
629 | return -EINVAL; | |
630 | } | |
631 | ||
f3099462 | 632 | bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); |
2c68f1fc EA |
633 | if (IS_ERR(bo)) |
634 | return PTR_ERR(bo); | |
463873d5 | 635 | |
585cb132 | 636 | if (copy_from_user(bo->base.vaddr, |
463873d5 | 637 | (void __user *)(uintptr_t)args->data, |
585cb132 DC |
638 | args->size)) { |
639 | ret = -EFAULT; | |
463873d5 | 640 | goto fail; |
585cb132 | 641 | } |
463873d5 EA |
642 | /* Clear the rest of the memory from allocating from the BO |
643 | * cache. | |
644 | */ | |
645 | memset(bo->base.vaddr + args->size, 0, | |
646 | bo->base.base.size - args->size); | |
647 | ||
648 | bo->validated_shader = vc4_validate_shader(&bo->base); | |
649 | if (!bo->validated_shader) { | |
650 | ret = -EINVAL; | |
651 | goto fail; | |
652 | } | |
653 | ||
654 | /* We have to create the handle after validation, to avoid | |
655 | * races for users to do doing things like mmap the shader BO. | |
656 | */ | |
657 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | |
658 | ||
659 | fail: | |
1d5494e9 | 660 | drm_gem_object_put_unlocked(&bo->base.base); |
463873d5 EA |
661 | |
662 | return ret; | |
663 | } | |
664 | ||
83753117 EA |
665 | /** |
666 | * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO. | |
667 | * @dev: DRM device | |
668 | * @data: ioctl argument | |
669 | * @file_priv: DRM file for this fd | |
670 | * | |
671 | * The tiling state of the BO decides the default modifier of an fb if | |
672 | * no specific modifier was set by userspace, and the return value of | |
673 | * vc4_get_tiling_ioctl() (so that userspace can treat a BO it | |
674 | * received from dmabuf as the same tiling format as the producer | |
675 | * used). | |
676 | */ | |
677 | int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, | |
678 | struct drm_file *file_priv) | |
679 | { | |
680 | struct drm_vc4_set_tiling *args = data; | |
681 | struct drm_gem_object *gem_obj; | |
682 | struct vc4_bo *bo; | |
683 | bool t_format; | |
684 | ||
685 | if (args->flags != 0) | |
686 | return -EINVAL; | |
687 | ||
688 | switch (args->modifier) { | |
689 | case DRM_FORMAT_MOD_NONE: | |
690 | t_format = false; | |
691 | break; | |
692 | case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: | |
693 | t_format = true; | |
694 | break; | |
695 | default: | |
696 | return -EINVAL; | |
697 | } | |
698 | ||
699 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
700 | if (!gem_obj) { | |
701 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
702 | return -ENOENT; | |
703 | } | |
704 | bo = to_vc4_bo(gem_obj); | |
705 | bo->t_format = t_format; | |
706 | ||
1d5494e9 | 707 | drm_gem_object_put_unlocked(gem_obj); |
83753117 EA |
708 | |
709 | return 0; | |
710 | } | |
711 | ||
712 | /** | |
713 | * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO. | |
714 | * @dev: DRM device | |
715 | * @data: ioctl argument | |
716 | * @file_priv: DRM file for this fd | |
717 | * | |
718 | * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl(). | |
719 | */ | |
720 | int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, | |
721 | struct drm_file *file_priv) | |
722 | { | |
723 | struct drm_vc4_get_tiling *args = data; | |
724 | struct drm_gem_object *gem_obj; | |
725 | struct vc4_bo *bo; | |
726 | ||
727 | if (args->flags != 0 || args->modifier != 0) | |
728 | return -EINVAL; | |
729 | ||
730 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
731 | if (!gem_obj) { | |
732 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
733 | return -ENOENT; | |
734 | } | |
735 | bo = to_vc4_bo(gem_obj); | |
736 | ||
737 | if (bo->t_format) | |
738 | args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; | |
739 | else | |
740 | args->modifier = DRM_FORMAT_MOD_NONE; | |
741 | ||
1d5494e9 | 742 | drm_gem_object_put_unlocked(gem_obj); |
83753117 EA |
743 | |
744 | return 0; | |
745 | } | |
746 | ||
f3099462 | 747 | int vc4_bo_cache_init(struct drm_device *dev) |
c826a6e1 EA |
748 | { |
749 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
f3099462 EA |
750 | int i; |
751 | ||
752 | /* Create the initial set of BO labels that the kernel will | |
753 | * use. This lets us avoid a bunch of string reallocation in | |
754 | * the kernel's draw and BO allocation paths. | |
755 | */ | |
756 | vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels), | |
757 | GFP_KERNEL); | |
758 | if (!vc4->bo_labels) | |
759 | return -ENOMEM; | |
760 | vc4->num_labels = VC4_BO_TYPE_COUNT; | |
761 | ||
762 | BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT); | |
763 | for (i = 0; i < VC4_BO_TYPE_COUNT; i++) | |
764 | vc4->bo_labels[i].name = bo_type_names[i]; | |
c826a6e1 EA |
765 | |
766 | mutex_init(&vc4->bo_lock); | |
767 | ||
768 | INIT_LIST_HEAD(&vc4->bo_cache.time_list); | |
769 | ||
770 | INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); | |
771 | setup_timer(&vc4->bo_cache.time_timer, | |
772 | vc4_bo_cache_time_timer, | |
773 | (unsigned long)dev); | |
f3099462 EA |
774 | |
775 | return 0; | |
c826a6e1 EA |
776 | } |
777 | ||
778 | void vc4_bo_cache_destroy(struct drm_device *dev) | |
779 | { | |
780 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
f3099462 | 781 | int i; |
c826a6e1 EA |
782 | |
783 | del_timer(&vc4->bo_cache.time_timer); | |
784 | cancel_work_sync(&vc4->bo_cache.time_work); | |
785 | ||
786 | vc4_bo_cache_purge(dev); | |
787 | ||
f3099462 EA |
788 | for (i = 0; i < vc4->num_labels; i++) { |
789 | if (vc4->bo_labels[i].num_allocated) { | |
790 | DRM_ERROR("Destroying BO cache with %d %s " | |
791 | "BOs still allocated\n", | |
792 | vc4->bo_labels[i].num_allocated, | |
793 | vc4->bo_labels[i].name); | |
794 | } | |
795 | ||
796 | if (is_user_label(i)) | |
797 | kfree(vc4->bo_labels[i].name); | |
c826a6e1 | 798 | } |
f3099462 EA |
799 | kfree(vc4->bo_labels); |
800 | } | |
801 | ||
802 | int vc4_label_bo_ioctl(struct drm_device *dev, void *data, | |
803 | struct drm_file *file_priv) | |
804 | { | |
805 | struct vc4_dev *vc4 = to_vc4_dev(dev); | |
806 | struct drm_vc4_label_bo *args = data; | |
807 | char *name; | |
808 | struct drm_gem_object *gem_obj; | |
809 | int ret = 0, label; | |
810 | ||
811 | if (!args->len) | |
812 | return -EINVAL; | |
813 | ||
814 | name = strndup_user(u64_to_user_ptr(args->name), args->len + 1); | |
815 | if (IS_ERR(name)) | |
816 | return PTR_ERR(name); | |
817 | ||
818 | gem_obj = drm_gem_object_lookup(file_priv, args->handle); | |
819 | if (!gem_obj) { | |
820 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | |
821 | kfree(name); | |
822 | return -ENOENT; | |
823 | } | |
824 | ||
825 | mutex_lock(&vc4->bo_lock); | |
826 | label = vc4_get_user_label(vc4, name); | |
827 | if (label != -1) | |
828 | vc4_bo_set_label(gem_obj, label); | |
829 | else | |
830 | ret = -ENOMEM; | |
831 | mutex_unlock(&vc4->bo_lock); | |
832 | ||
833 | drm_gem_object_unreference_unlocked(gem_obj); | |
834 | ||
835 | return ret; | |
c826a6e1 | 836 | } |