]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/vgem/vgem_drv.c
Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / vgem / vgem_drv.c
1 /*
2 * Copyright 2011 Red Hat, Inc.
3 * Copyright © 2014 The Chromium OS Authors
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software")
7 * to deal in the software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * them Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Adam Jackson <ajax@redhat.com>
25 * Ben Widawsky <ben@bwidawsk.net>
26 */
27
28 /**
29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's
30 * software renderer and the X server for efficient buffer sharing.
31 */
32
33 #include <linux/module.h>
34 #include <linux/ramfs.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/dma-buf.h>
37 #include "vgem_drv.h"
38
39 #define DRIVER_NAME "vgem"
40 #define DRIVER_DESC "Virtual GEM provider"
41 #define DRIVER_DATE "20120112"
42 #define DRIVER_MAJOR 1
43 #define DRIVER_MINOR 0
44
45 static struct vgem_device {
46 struct drm_device drm;
47 struct platform_device *platform;
48 } *vgem_device;
49
50 static void vgem_gem_free_object(struct drm_gem_object *obj)
51 {
52 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
53
54 kvfree(vgem_obj->pages);
55 mutex_destroy(&vgem_obj->pages_lock);
56
57 if (obj->import_attach)
58 drm_prime_gem_destroy(obj, vgem_obj->table);
59
60 drm_gem_object_release(obj);
61 kfree(vgem_obj);
62 }
63
64 static int vgem_gem_fault(struct vm_fault *vmf)
65 {
66 struct vm_area_struct *vma = vmf->vma;
67 struct drm_vgem_gem_object *obj = vma->vm_private_data;
68 /* We don't use vmf->pgoff since that has the fake offset */
69 unsigned long vaddr = vmf->address;
70 int ret;
71 loff_t num_pages;
72 pgoff_t page_offset;
73 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
74
75 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
76
77 if (page_offset > num_pages)
78 return VM_FAULT_SIGBUS;
79
80 ret = -ENOENT;
81 mutex_lock(&obj->pages_lock);
82 if (obj->pages) {
83 get_page(obj->pages[page_offset]);
84 vmf->page = obj->pages[page_offset];
85 ret = 0;
86 }
87 mutex_unlock(&obj->pages_lock);
88 if (ret) {
89 struct page *page;
90
91 page = shmem_read_mapping_page(
92 file_inode(obj->base.filp)->i_mapping,
93 page_offset);
94 if (!IS_ERR(page)) {
95 vmf->page = page;
96 ret = 0;
97 } else switch (PTR_ERR(page)) {
98 case -ENOSPC:
99 case -ENOMEM:
100 ret = VM_FAULT_OOM;
101 break;
102 case -EBUSY:
103 ret = VM_FAULT_RETRY;
104 break;
105 case -EFAULT:
106 case -EINVAL:
107 ret = VM_FAULT_SIGBUS;
108 break;
109 default:
110 WARN_ON(PTR_ERR(page));
111 ret = VM_FAULT_SIGBUS;
112 break;
113 }
114
115 }
116 return ret;
117 }
118
119 static const struct vm_operations_struct vgem_gem_vm_ops = {
120 .fault = vgem_gem_fault,
121 .open = drm_gem_vm_open,
122 .close = drm_gem_vm_close,
123 };
124
125 static int vgem_open(struct drm_device *dev, struct drm_file *file)
126 {
127 struct vgem_file *vfile;
128 int ret;
129
130 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
131 if (!vfile)
132 return -ENOMEM;
133
134 file->driver_priv = vfile;
135
136 ret = vgem_fence_open(vfile);
137 if (ret) {
138 kfree(vfile);
139 return ret;
140 }
141
142 return 0;
143 }
144
145 static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
146 {
147 struct vgem_file *vfile = file->driver_priv;
148
149 vgem_fence_close(vfile);
150 kfree(vfile);
151 }
152
153 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
154 unsigned long size)
155 {
156 struct drm_vgem_gem_object *obj;
157 int ret;
158
159 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
160 if (!obj)
161 return ERR_PTR(-ENOMEM);
162
163 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
164 if (ret) {
165 kfree(obj);
166 return ERR_PTR(ret);
167 }
168
169 mutex_init(&obj->pages_lock);
170
171 return obj;
172 }
173
174 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
175 {
176 drm_gem_object_release(&obj->base);
177 kfree(obj);
178 }
179
180 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
181 struct drm_file *file,
182 unsigned int *handle,
183 unsigned long size)
184 {
185 struct drm_vgem_gem_object *obj;
186 int ret;
187
188 obj = __vgem_gem_create(dev, size);
189 if (IS_ERR(obj))
190 return ERR_CAST(obj);
191
192 ret = drm_gem_handle_create(file, &obj->base, handle);
193 drm_gem_object_put_unlocked(&obj->base);
194 if (ret)
195 goto err;
196
197 return &obj->base;
198
199 err:
200 __vgem_gem_destroy(obj);
201 return ERR_PTR(ret);
202 }
203
204 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
205 struct drm_mode_create_dumb *args)
206 {
207 struct drm_gem_object *gem_object;
208 u64 pitch, size;
209
210 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
211 size = args->height * pitch;
212 if (size == 0)
213 return -EINVAL;
214
215 gem_object = vgem_gem_create(dev, file, &args->handle, size);
216 if (IS_ERR(gem_object))
217 return PTR_ERR(gem_object);
218
219 args->size = gem_object->size;
220 args->pitch = pitch;
221
222 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
223
224 return 0;
225 }
226
227 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
228 uint32_t handle, uint64_t *offset)
229 {
230 struct drm_gem_object *obj;
231 int ret;
232
233 obj = drm_gem_object_lookup(file, handle);
234 if (!obj)
235 return -ENOENT;
236
237 if (!obj->filp) {
238 ret = -EINVAL;
239 goto unref;
240 }
241
242 ret = drm_gem_create_mmap_offset(obj);
243 if (ret)
244 goto unref;
245
246 *offset = drm_vma_node_offset_addr(&obj->vma_node);
247 unref:
248 drm_gem_object_put_unlocked(obj);
249
250 return ret;
251 }
252
253 static struct drm_ioctl_desc vgem_ioctls[] = {
254 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
255 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
256 };
257
258 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
259 {
260 unsigned long flags = vma->vm_flags;
261 int ret;
262
263 ret = drm_gem_mmap(filp, vma);
264 if (ret)
265 return ret;
266
267 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
268 * are ordinary and not special.
269 */
270 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
271 return 0;
272 }
273
274 static const struct file_operations vgem_driver_fops = {
275 .owner = THIS_MODULE,
276 .open = drm_open,
277 .mmap = vgem_mmap,
278 .poll = drm_poll,
279 .read = drm_read,
280 .unlocked_ioctl = drm_ioctl,
281 .compat_ioctl = drm_compat_ioctl,
282 .release = drm_release,
283 };
284
285 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
286 {
287 mutex_lock(&bo->pages_lock);
288 if (bo->pages_pin_count++ == 0) {
289 struct page **pages;
290
291 pages = drm_gem_get_pages(&bo->base);
292 if (IS_ERR(pages)) {
293 bo->pages_pin_count--;
294 mutex_unlock(&bo->pages_lock);
295 return pages;
296 }
297
298 bo->pages = pages;
299 }
300 mutex_unlock(&bo->pages_lock);
301
302 return bo->pages;
303 }
304
305 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
306 {
307 mutex_lock(&bo->pages_lock);
308 if (--bo->pages_pin_count == 0) {
309 drm_gem_put_pages(&bo->base, bo->pages, true, true);
310 bo->pages = NULL;
311 }
312 mutex_unlock(&bo->pages_lock);
313 }
314
315 static int vgem_prime_pin(struct drm_gem_object *obj)
316 {
317 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
318 long n_pages = obj->size >> PAGE_SHIFT;
319 struct page **pages;
320
321 pages = vgem_pin_pages(bo);
322 if (IS_ERR(pages))
323 return PTR_ERR(pages);
324
325 /* Flush the object from the CPU cache so that importers can rely
326 * on coherent indirect access via the exported dma-address.
327 */
328 drm_clflush_pages(pages, n_pages);
329
330 return 0;
331 }
332
333 static void vgem_prime_unpin(struct drm_gem_object *obj)
334 {
335 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
336
337 vgem_unpin_pages(bo);
338 }
339
340 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
341 {
342 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
343
344 return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
345 }
346
347 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
348 struct dma_buf *dma_buf)
349 {
350 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
351
352 return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
353 }
354
355 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
356 struct dma_buf_attachment *attach, struct sg_table *sg)
357 {
358 struct drm_vgem_gem_object *obj;
359 int npages;
360
361 obj = __vgem_gem_create(dev, attach->dmabuf->size);
362 if (IS_ERR(obj))
363 return ERR_CAST(obj);
364
365 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
366
367 obj->table = sg;
368 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
369 if (!obj->pages) {
370 __vgem_gem_destroy(obj);
371 return ERR_PTR(-ENOMEM);
372 }
373
374 obj->pages_pin_count++; /* perma-pinned */
375 drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
376 npages);
377 return &obj->base;
378 }
379
380 static void *vgem_prime_vmap(struct drm_gem_object *obj)
381 {
382 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
383 long n_pages = obj->size >> PAGE_SHIFT;
384 struct page **pages;
385
386 pages = vgem_pin_pages(bo);
387 if (IS_ERR(pages))
388 return NULL;
389
390 return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
391 }
392
393 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
394 {
395 struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
396
397 vunmap(vaddr);
398 vgem_unpin_pages(bo);
399 }
400
401 static int vgem_prime_mmap(struct drm_gem_object *obj,
402 struct vm_area_struct *vma)
403 {
404 int ret;
405
406 if (obj->size < vma->vm_end - vma->vm_start)
407 return -EINVAL;
408
409 if (!obj->filp)
410 return -ENODEV;
411
412 ret = call_mmap(obj->filp, vma);
413 if (ret)
414 return ret;
415
416 fput(vma->vm_file);
417 vma->vm_file = get_file(obj->filp);
418 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
419 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
420
421 return 0;
422 }
423
424 static void vgem_release(struct drm_device *dev)
425 {
426 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
427
428 platform_device_unregister(vgem->platform);
429 drm_dev_fini(&vgem->drm);
430
431 kfree(vgem);
432 }
433
434 static struct drm_driver vgem_driver = {
435 .driver_features = DRIVER_GEM | DRIVER_PRIME,
436 .release = vgem_release,
437 .open = vgem_open,
438 .postclose = vgem_postclose,
439 .gem_free_object_unlocked = vgem_gem_free_object,
440 .gem_vm_ops = &vgem_gem_vm_ops,
441 .ioctls = vgem_ioctls,
442 .num_ioctls = ARRAY_SIZE(vgem_ioctls),
443 .fops = &vgem_driver_fops,
444
445 .dumb_create = vgem_gem_dumb_create,
446 .dumb_map_offset = vgem_gem_dumb_map,
447
448 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
449 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
450 .gem_prime_pin = vgem_prime_pin,
451 .gem_prime_unpin = vgem_prime_unpin,
452 .gem_prime_import = vgem_prime_import,
453 .gem_prime_export = drm_gem_prime_export,
454 .gem_prime_import_sg_table = vgem_prime_import_sg_table,
455 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
456 .gem_prime_vmap = vgem_prime_vmap,
457 .gem_prime_vunmap = vgem_prime_vunmap,
458 .gem_prime_mmap = vgem_prime_mmap,
459
460 .name = DRIVER_NAME,
461 .desc = DRIVER_DESC,
462 .date = DRIVER_DATE,
463 .major = DRIVER_MAJOR,
464 .minor = DRIVER_MINOR,
465 };
466
467 static int __init vgem_init(void)
468 {
469 int ret;
470
471 vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL);
472 if (!vgem_device)
473 return -ENOMEM;
474
475 ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
476 if (ret)
477 goto out_free;
478
479 vgem_device->platform =
480 platform_device_register_simple("vgem", -1, NULL, 0);
481 if (IS_ERR(vgem_device->platform)) {
482 ret = PTR_ERR(vgem_device->platform);
483 goto out_fini;
484 }
485
486 dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
487 DMA_BIT_MASK(64));
488
489 /* Final step: expose the device/driver to userspace */
490 ret = drm_dev_register(&vgem_device->drm, 0);
491 if (ret)
492 goto out_unregister;
493
494 return 0;
495
496 out_unregister:
497 platform_device_unregister(vgem_device->platform);
498 out_fini:
499 drm_dev_fini(&vgem_device->drm);
500 out_free:
501 kfree(vgem_device);
502 return ret;
503 }
504
505 static void __exit vgem_exit(void)
506 {
507 drm_dev_unregister(&vgem_device->drm);
508 drm_dev_unref(&vgem_device->drm);
509 }
510
511 module_init(vgem_init);
512 module_exit(vgem_exit);
513
514 MODULE_AUTHOR("Red Hat, Inc.");
515 MODULE_AUTHOR("Intel Corporation");
516 MODULE_DESCRIPTION(DRIVER_DESC);
517 MODULE_LICENSE("GPL and additional rights");