]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/armada/armada_gem.c
drm/prime: Align gem_prime_export with obj_funcs.export
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / armada / armada_gem.c
CommitLineData
96f60e37
RK
1/*
2 * Copyright (C) 2012 Russell King
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/dma-buf.h>
9#include <linux/dma-mapping.h>
10#include <linux/shmem_fs.h>
96f60e37
RK
11#include "armada_drm.h"
12#include "armada_gem.h"
13#include <drm/armada_drm.h>
14#include "armada_ioctlP.h"
15
7794ec77 16static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
96f60e37 17{
11bac800
DJ
18 struct drm_gem_object *gobj = vmf->vma->vm_private_data;
19 struct armada_gem_object *obj = drm_to_armada_gem(gobj);
96f60e37 20 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
96f60e37 21
11bac800 22 pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
7794ec77 23 return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
96f60e37
RK
24}
25
26const struct vm_operations_struct armada_gem_vm_ops = {
27 .fault = armada_gem_vm_fault,
28 .open = drm_gem_vm_open,
29 .close = drm_gem_vm_close,
30};
31
32static size_t roundup_gem_size(size_t size)
33{
34 return roundup(size, PAGE_SIZE);
35}
36
96f60e37
RK
37void armada_gem_free_object(struct drm_gem_object *obj)
38{
39 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
0b8ebeac 40 struct armada_private *priv = obj->dev->dev_private;
96f60e37
RK
41
42 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
43
44 drm_gem_free_mmap_offset(&dobj->obj);
45
0b8ebeac
DV
46 might_lock(&priv->linear_lock);
47
96f60e37
RK
48 if (dobj->page) {
49 /* page backed memory */
50 unsigned int order = get_order(dobj->obj.size);
51 __free_pages(dobj->page, order);
52 } else if (dobj->linear) {
53 /* linear backed memory */
0b8ebeac 54 mutex_lock(&priv->linear_lock);
96f60e37 55 drm_mm_remove_node(dobj->linear);
0b8ebeac 56 mutex_unlock(&priv->linear_lock);
96f60e37
RK
57 kfree(dobj->linear);
58 if (dobj->addr)
59 iounmap(dobj->addr);
60 }
61
62 if (dobj->obj.import_attach) {
63 /* We only ever display imported data */
0481c8c4
RK
64 if (dobj->sgt)
65 dma_buf_unmap_attachment(dobj->obj.import_attach,
66 dobj->sgt, DMA_TO_DEVICE);
96f60e37
RK
67 drm_prime_gem_destroy(&dobj->obj, NULL);
68 }
69
70 drm_gem_object_release(&dobj->obj);
71
72 kfree(dobj);
73}
74
75int
76armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
77{
78 struct armada_private *priv = dev->dev_private;
79 size_t size = obj->obj.size;
80
81 if (obj->page || obj->linear)
82 return 0;
83
84 /*
85 * If it is a small allocation (typically cursor, which will
86 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
87 * Framebuffers will never be this small (our minimum size for
88 * framebuffers is larger than this anyway.) Such objects are
89 * only accessed by the CPU so we don't need any special handing
90 * here.
91 */
92 if (size <= 8192) {
93 unsigned int order = get_order(size);
94 struct page *p = alloc_pages(GFP_KERNEL, order);
95
96 if (p) {
97 obj->addr = page_address(p);
98 obj->phys_addr = page_to_phys(p);
99 obj->page = p;
100
101 memset(obj->addr, 0, PAGE_ALIGN(size));
102 }
103 }
104
105 /*
106 * We could grab something from CMA if it's enabled, but that
107 * involves building in a problem:
108 *
109 * CMA's interface uses dma_alloc_coherent(), which provides us
110 * with an CPU virtual address and a device address.
111 *
112 * The CPU virtual address may be either an address in the kernel
113 * direct mapped region (for example, as it would be on x86) or
114 * it may be remapped into another part of kernel memory space
115 * (eg, as it would be on ARM.) This means virt_to_phys() on the
116 * returned virtual address is invalid depending on the architecture
117 * implementation.
118 *
119 * The device address may also not be a physical address; it may
120 * be that there is some kind of remapping between the device and
121 * system RAM, which makes the use of the device address also
122 * unsafe to re-use as a physical address.
123 *
124 * This makes DRM usage of dma_alloc_coherent() in a generic way
125 * at best very questionable and unsafe.
126 */
127
128 /* Otherwise, grab it from our linear allocation */
129 if (!obj->page) {
130 struct drm_mm_node *node;
131 unsigned align = min_t(unsigned, size, SZ_2M);
132 void __iomem *ptr;
133 int ret;
134
135 node = kzalloc(sizeof(*node), GFP_KERNEL);
136 if (!node)
137 return -ENOSPC;
138
0b8ebeac 139 mutex_lock(&priv->linear_lock);
4e64e553
CW
140 ret = drm_mm_insert_node_generic(&priv->linear, node,
141 size, align, 0, 0);
0b8ebeac 142 mutex_unlock(&priv->linear_lock);
96f60e37
RK
143 if (ret) {
144 kfree(node);
145 return ret;
146 }
147
148 obj->linear = node;
149
150 /* Ensure that the memory we're returning is cleared. */
151 ptr = ioremap_wc(obj->linear->start, size);
152 if (!ptr) {
0b8ebeac 153 mutex_lock(&priv->linear_lock);
96f60e37 154 drm_mm_remove_node(obj->linear);
0b8ebeac 155 mutex_unlock(&priv->linear_lock);
96f60e37
RK
156 kfree(obj->linear);
157 obj->linear = NULL;
158 return -ENOMEM;
159 }
160
161 memset_io(ptr, 0, size);
162 iounmap(ptr);
163
164 obj->phys_addr = obj->linear->start;
165 obj->dev_addr = obj->linear->start;
b4005857 166 obj->mapped = true;
96f60e37
RK
167 }
168
7513e095
RK
169 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
170 (unsigned long long)obj->phys_addr,
171 (unsigned long long)obj->dev_addr);
96f60e37
RK
172
173 return 0;
174}
175
176void *
177armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
178{
179 /* only linear objects need to be ioremap'd */
180 if (!dobj->addr && dobj->linear)
181 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
182 return dobj->addr;
183}
184
185struct armada_gem_object *
186armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
187{
188 struct armada_gem_object *obj;
189
190 size = roundup_gem_size(size);
191
192 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
193 if (!obj)
194 return NULL;
195
196 drm_gem_private_object_init(dev, &obj->obj, size);
96f60e37
RK
197
198 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
199
200 return obj;
201}
202
42b45459 203static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
96f60e37
RK
204 size_t size)
205{
206 struct armada_gem_object *obj;
207 struct address_space *mapping;
208
209 size = roundup_gem_size(size);
210
211 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
212 if (!obj)
213 return NULL;
214
215 if (drm_gem_object_init(dev, &obj->obj, size)) {
216 kfree(obj);
217 return NULL;
218 }
219
93c76a3d 220 mapping = obj->obj.filp->f_mapping;
96f60e37
RK
221 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
222
223 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
224
225 return obj;
226}
227
228/* Dumb alloc support */
229int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
230 struct drm_mode_create_dumb *args)
231{
232 struct armada_gem_object *dobj;
233 u32 handle;
234 size_t size;
235 int ret;
236
237 args->pitch = armada_pitch(args->width, args->bpp);
238 args->size = size = args->pitch * args->height;
239
240 dobj = armada_gem_alloc_private_object(dev, size);
241 if (dobj == NULL)
242 return -ENOMEM;
243
244 ret = armada_gem_linear_back(dev, dobj);
245 if (ret)
246 goto err;
247
248 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
249 if (ret)
250 goto err;
251
252 args->handle = handle;
253
254 /* drop reference from allocate - handle holds it now */
255 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
256 err:
4c3cf375 257 drm_gem_object_put_unlocked(&dobj->obj);
96f60e37
RK
258 return ret;
259}
260
96f60e37
RK
261/* Private driver gem ioctls */
262int armada_gem_create_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file)
264{
265 struct drm_armada_gem_create *args = data;
266 struct armada_gem_object *dobj;
267 size_t size;
268 u32 handle;
269 int ret;
270
271 if (args->size == 0)
272 return -ENOMEM;
273
274 size = args->size;
275
276 dobj = armada_gem_alloc_object(dev, size);
277 if (dobj == NULL)
278 return -ENOMEM;
279
280 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
281 if (ret)
282 goto err;
283
284 args->handle = handle;
285
286 /* drop reference from allocate - handle holds it now */
287 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
288 err:
4c3cf375 289 drm_gem_object_put_unlocked(&dobj->obj);
96f60e37
RK
290 return ret;
291}
292
293/* Map a shmem-backed object into process memory space */
294int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file)
296{
297 struct drm_armada_gem_mmap *args = data;
298 struct armada_gem_object *dobj;
299 unsigned long addr;
300
a8ad0bd8 301 dobj = armada_gem_object_lookup(file, args->handle);
96f60e37
RK
302 if (dobj == NULL)
303 return -ENOENT;
304
305 if (!dobj->obj.filp) {
4c3cf375 306 drm_gem_object_put_unlocked(&dobj->obj);
96f60e37
RK
307 return -EINVAL;
308 }
309
310 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
311 MAP_SHARED, args->offset);
4c3cf375 312 drm_gem_object_put_unlocked(&dobj->obj);
96f60e37
RK
313 if (IS_ERR_VALUE(addr))
314 return addr;
315
316 args->addr = addr;
317
318 return 0;
319}
320
321int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
322 struct drm_file *file)
323{
324 struct drm_armada_gem_pwrite *args = data;
325 struct armada_gem_object *dobj;
326 char __user *ptr;
327 int ret;
328
329 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
330 args->handle, args->offset, args->size, args->ptr);
331
332 if (args->size == 0)
333 return 0;
334
335 ptr = (char __user *)(uintptr_t)args->ptr;
336
96d4f267 337 if (!access_ok(ptr, args->size))
96f60e37
RK
338 return -EFAULT;
339
4bce9f6e 340 ret = fault_in_pages_readable(ptr, args->size);
96f60e37
RK
341 if (ret)
342 return ret;
343
a8ad0bd8 344 dobj = armada_gem_object_lookup(file, args->handle);
96f60e37
RK
345 if (dobj == NULL)
346 return -ENOENT;
347
348 /* Must be a kernel-mapped object */
349 if (!dobj->addr)
350 return -EINVAL;
351
352 if (args->offset > dobj->obj.size ||
353 args->size > dobj->obj.size - args->offset) {
354 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
355 ret = -EINVAL;
356 goto unref;
357 }
358
359 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
360 ret = -EFAULT;
361 } else if (dobj->update) {
362 dobj->update(dobj->update_data);
363 ret = 0;
364 }
365
366 unref:
4c3cf375 367 drm_gem_object_put_unlocked(&dobj->obj);
96f60e37
RK
368 return ret;
369}
370
371/* Prime support */
42b45459 372static struct sg_table *
96f60e37
RK
373armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
374 enum dma_data_direction dir)
375{
376 struct drm_gem_object *obj = attach->dmabuf->priv;
377 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
378 struct scatterlist *sg;
379 struct sg_table *sgt;
380 int i, num;
381
382 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
383 if (!sgt)
384 return NULL;
385
386 if (dobj->obj.filp) {
387 struct address_space *mapping;
96f60e37
RK
388 int count;
389
390 count = dobj->obj.size / PAGE_SIZE;
391 if (sg_alloc_table(sgt, count, GFP_KERNEL))
392 goto free_sgt;
393
93c76a3d 394 mapping = dobj->obj.filp->f_mapping;
96f60e37
RK
395
396 for_each_sg(sgt->sgl, sg, count, i) {
397 struct page *page;
398
2524fc7f 399 page = shmem_read_mapping_page(mapping, i);
96f60e37
RK
400 if (IS_ERR(page)) {
401 num = i;
402 goto release;
403 }
404
405 sg_set_page(sg, page, PAGE_SIZE, 0);
406 }
407
408 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
409 num = sgt->nents;
410 goto release;
411 }
412 } else if (dobj->page) {
413 /* Single contiguous page */
414 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
415 goto free_sgt;
416
417 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
418
419 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
420 goto free_table;
421 } else if (dobj->linear) {
422 /* Single contiguous physical region - no struct page */
423 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
424 goto free_sgt;
425 sg_dma_address(sgt->sgl) = dobj->dev_addr;
426 sg_dma_len(sgt->sgl) = dobj->obj.size;
427 } else {
428 goto free_sgt;
429 }
430 return sgt;
431
432 release:
433 for_each_sg(sgt->sgl, sg, num, i)
09cbfeaf 434 put_page(sg_page(sg));
96f60e37
RK
435 free_table:
436 sg_free_table(sgt);
437 free_sgt:
438 kfree(sgt);
439 return NULL;
440}
441
442static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
443 struct sg_table *sgt, enum dma_data_direction dir)
444{
445 struct drm_gem_object *obj = attach->dmabuf->priv;
446 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
447 int i;
448
449 if (!dobj->linear)
450 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
451
452 if (dobj->obj.filp) {
453 struct scatterlist *sg;
454 for_each_sg(sgt->sgl, sg, sgt->nents, i)
09cbfeaf 455 put_page(sg_page(sg));
96f60e37
RK
456 }
457
458 sg_free_table(sgt);
459 kfree(sgt);
460}
461
462static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
463{
464 return NULL;
465}
466
467static void
468armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
469{
470}
471
472static int
473armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
474{
475 return -EINVAL;
476}
477
478static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
479 .map_dma_buf = armada_gem_prime_map_dma_buf,
480 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
481 .release = drm_gem_dmabuf_release,
f9b67f00
LG
482 .map = armada_gem_dmabuf_no_kmap,
483 .unmap = armada_gem_dmabuf_no_kunmap,
96f60e37
RK
484 .mmap = armada_gem_dmabuf_mmap,
485};
486
487struct dma_buf *
e4fa8457 488armada_gem_prime_export(struct drm_gem_object *obj, int flags)
96f60e37 489{
d8fbe341
SS
490 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
491
492 exp_info.ops = &armada_gem_prime_dmabuf_ops;
493 exp_info.size = obj->size;
494 exp_info.flags = O_RDWR;
495 exp_info.priv = obj;
496
e4fa8457 497 return drm_gem_dmabuf_export(obj->dev, &exp_info);
96f60e37
RK
498}
499
500struct drm_gem_object *
501armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
502{
503 struct dma_buf_attachment *attach;
504 struct armada_gem_object *dobj;
505
506 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
507 struct drm_gem_object *obj = buf->priv;
508 if (obj->dev == dev) {
509 /*
510 * Importing our own dmabuf(s) increases the
511 * refcount on the gem object itself.
512 */
4c3cf375 513 drm_gem_object_get(obj);
96f60e37
RK
514 return obj;
515 }
516 }
517
518 attach = dma_buf_attach(buf, dev->dev);
519 if (IS_ERR(attach))
520 return ERR_CAST(attach);
521
522 dobj = armada_gem_alloc_private_object(dev, buf->size);
523 if (!dobj) {
524 dma_buf_detach(buf, attach);
525 return ERR_PTR(-ENOMEM);
526 }
527
528 dobj->obj.import_attach = attach;
5cd52688 529 get_dma_buf(buf);
96f60e37
RK
530
531 /*
532 * Don't call dma_buf_map_attachment() here - it maps the
533 * scatterlist immediately for DMA, and this is not always
534 * an appropriate thing to do.
535 */
536 return &dobj->obj;
537}
538
539int armada_gem_map_import(struct armada_gem_object *dobj)
540{
541 int ret;
542
543 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
244a2419 544 DMA_TO_DEVICE);
96f60e37
RK
545 if (IS_ERR(dobj->sgt)) {
546 ret = PTR_ERR(dobj->sgt);
547 dobj->sgt = NULL;
548 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
549 return ret;
550 }
551 if (dobj->sgt->nents > 1) {
552 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
553 return -EINVAL;
554 }
555 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
556 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
557 return -EINVAL;
558 }
559 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
b4005857 560 dobj->mapped = true;
96f60e37
RK
561 return 0;
562}