2 * Copyright (C) 2012 Russell King
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
12 #include "armada_drm.h"
13 #include "armada_gem.h"
14 #include <drm/armada_drm.h>
15 #include "armada_ioctlP.h"
17 static int armada_gem_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
19 struct armada_gem_object
*obj
= drm_to_armada_gem(vma
->vm_private_data
);
20 unsigned long addr
= (unsigned long)vmf
->virtual_address
;
21 unsigned long pfn
= obj
->phys_addr
>> PAGE_SHIFT
;
24 pfn
+= (addr
- vma
->vm_start
) >> PAGE_SHIFT
;
25 ret
= vm_insert_pfn(vma
, addr
, pfn
);
30 return VM_FAULT_NOPAGE
;
34 return VM_FAULT_SIGBUS
;
38 const struct vm_operations_struct armada_gem_vm_ops
= {
39 .fault
= armada_gem_vm_fault
,
40 .open
= drm_gem_vm_open
,
41 .close
= drm_gem_vm_close
,
44 static size_t roundup_gem_size(size_t size
)
46 return roundup(size
, PAGE_SIZE
);
49 void armada_gem_free_object(struct drm_gem_object
*obj
)
51 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
52 struct armada_private
*priv
= obj
->dev
->dev_private
;
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj
);
56 drm_gem_free_mmap_offset(&dobj
->obj
);
58 might_lock(&priv
->linear_lock
);
61 /* page backed memory */
62 unsigned int order
= get_order(dobj
->obj
.size
);
63 __free_pages(dobj
->page
, order
);
64 } else if (dobj
->linear
) {
65 /* linear backed memory */
66 mutex_lock(&priv
->linear_lock
);
67 drm_mm_remove_node(dobj
->linear
);
68 mutex_unlock(&priv
->linear_lock
);
74 if (dobj
->obj
.import_attach
) {
75 /* We only ever display imported data */
77 dma_buf_unmap_attachment(dobj
->obj
.import_attach
,
78 dobj
->sgt
, DMA_TO_DEVICE
);
79 drm_prime_gem_destroy(&dobj
->obj
, NULL
);
82 drm_gem_object_release(&dobj
->obj
);
88 armada_gem_linear_back(struct drm_device
*dev
, struct armada_gem_object
*obj
)
90 struct armada_private
*priv
= dev
->dev_private
;
91 size_t size
= obj
->obj
.size
;
93 if (obj
->page
|| obj
->linear
)
97 * If it is a small allocation (typically cursor, which will
98 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
99 * Framebuffers will never be this small (our minimum size for
100 * framebuffers is larger than this anyway.) Such objects are
101 * only accessed by the CPU so we don't need any special handing
105 unsigned int order
= get_order(size
);
106 struct page
*p
= alloc_pages(GFP_KERNEL
, order
);
109 obj
->addr
= page_address(p
);
110 obj
->phys_addr
= page_to_phys(p
);
113 memset(obj
->addr
, 0, PAGE_ALIGN(size
));
118 * We could grab something from CMA if it's enabled, but that
119 * involves building in a problem:
121 * CMA's interface uses dma_alloc_coherent(), which provides us
122 * with an CPU virtual address and a device address.
124 * The CPU virtual address may be either an address in the kernel
125 * direct mapped region (for example, as it would be on x86) or
126 * it may be remapped into another part of kernel memory space
127 * (eg, as it would be on ARM.) This means virt_to_phys() on the
128 * returned virtual address is invalid depending on the architecture
131 * The device address may also not be a physical address; it may
132 * be that there is some kind of remapping between the device and
133 * system RAM, which makes the use of the device address also
134 * unsafe to re-use as a physical address.
136 * This makes DRM usage of dma_alloc_coherent() in a generic way
137 * at best very questionable and unsafe.
140 /* Otherwise, grab it from our linear allocation */
142 struct drm_mm_node
*node
;
143 unsigned align
= min_t(unsigned, size
, SZ_2M
);
147 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
151 mutex_lock(&priv
->linear_lock
);
152 ret
= drm_mm_insert_node(&priv
->linear
, node
, size
, align
,
153 DRM_MM_SEARCH_DEFAULT
);
154 mutex_unlock(&priv
->linear_lock
);
162 /* Ensure that the memory we're returning is cleared. */
163 ptr
= ioremap_wc(obj
->linear
->start
, size
);
165 mutex_lock(&priv
->linear_lock
);
166 drm_mm_remove_node(obj
->linear
);
167 mutex_unlock(&priv
->linear_lock
);
173 memset_io(ptr
, 0, size
);
176 obj
->phys_addr
= obj
->linear
->start
;
177 obj
->dev_addr
= obj
->linear
->start
;
180 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj
,
181 (unsigned long long)obj
->phys_addr
,
182 (unsigned long long)obj
->dev_addr
);
188 armada_gem_map_object(struct drm_device
*dev
, struct armada_gem_object
*dobj
)
190 /* only linear objects need to be ioremap'd */
191 if (!dobj
->addr
&& dobj
->linear
)
192 dobj
->addr
= ioremap_wc(dobj
->phys_addr
, dobj
->obj
.size
);
196 struct armada_gem_object
*
197 armada_gem_alloc_private_object(struct drm_device
*dev
, size_t size
)
199 struct armada_gem_object
*obj
;
201 size
= roundup_gem_size(size
);
203 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
207 drm_gem_private_object_init(dev
, &obj
->obj
, size
);
208 obj
->dev_addr
= DMA_ERROR_CODE
;
210 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj
, size
);
215 static struct armada_gem_object
*armada_gem_alloc_object(struct drm_device
*dev
,
218 struct armada_gem_object
*obj
;
219 struct address_space
*mapping
;
221 size
= roundup_gem_size(size
);
223 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
227 if (drm_gem_object_init(dev
, &obj
->obj
, size
)) {
232 obj
->dev_addr
= DMA_ERROR_CODE
;
234 mapping
= obj
->obj
.filp
->f_mapping
;
235 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER
| __GFP_RECLAIMABLE
);
237 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj
, size
);
242 /* Dumb alloc support */
243 int armada_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
244 struct drm_mode_create_dumb
*args
)
246 struct armada_gem_object
*dobj
;
251 args
->pitch
= armada_pitch(args
->width
, args
->bpp
);
252 args
->size
= size
= args
->pitch
* args
->height
;
254 dobj
= armada_gem_alloc_private_object(dev
, size
);
258 ret
= armada_gem_linear_back(dev
, dobj
);
262 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
266 args
->handle
= handle
;
268 /* drop reference from allocate - handle holds it now */
269 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
271 drm_gem_object_unreference_unlocked(&dobj
->obj
);
275 int armada_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
276 uint32_t handle
, uint64_t *offset
)
278 struct armada_gem_object
*obj
;
281 obj
= armada_gem_object_lookup(file
, handle
);
283 DRM_ERROR("failed to lookup gem object\n");
287 /* Don't allow imported objects to be mapped */
288 if (obj
->obj
.import_attach
) {
293 ret
= drm_gem_create_mmap_offset(&obj
->obj
);
295 *offset
= drm_vma_node_offset_addr(&obj
->obj
.vma_node
);
296 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle
, *offset
);
300 drm_gem_object_unreference_unlocked(&obj
->obj
);
305 int armada_gem_dumb_destroy(struct drm_file
*file
, struct drm_device
*dev
,
308 return drm_gem_handle_delete(file
, handle
);
311 /* Private driver gem ioctls */
312 int armada_gem_create_ioctl(struct drm_device
*dev
, void *data
,
313 struct drm_file
*file
)
315 struct drm_armada_gem_create
*args
= data
;
316 struct armada_gem_object
*dobj
;
326 dobj
= armada_gem_alloc_object(dev
, size
);
330 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
334 args
->handle
= handle
;
336 /* drop reference from allocate - handle holds it now */
337 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
339 drm_gem_object_unreference_unlocked(&dobj
->obj
);
343 /* Map a shmem-backed object into process memory space */
344 int armada_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
345 struct drm_file
*file
)
347 struct drm_armada_gem_mmap
*args
= data
;
348 struct armada_gem_object
*dobj
;
351 dobj
= armada_gem_object_lookup(file
, args
->handle
);
355 if (!dobj
->obj
.filp
) {
356 drm_gem_object_unreference_unlocked(&dobj
->obj
);
360 addr
= vm_mmap(dobj
->obj
.filp
, 0, args
->size
, PROT_READ
| PROT_WRITE
,
361 MAP_SHARED
, args
->offset
);
362 drm_gem_object_unreference_unlocked(&dobj
->obj
);
363 if (IS_ERR_VALUE(addr
))
371 int armada_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
372 struct drm_file
*file
)
374 struct drm_armada_gem_pwrite
*args
= data
;
375 struct armada_gem_object
*dobj
;
379 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
380 args
->handle
, args
->offset
, args
->size
, args
->ptr
);
385 ptr
= (char __user
*)(uintptr_t)args
->ptr
;
387 if (!access_ok(VERIFY_READ
, ptr
, args
->size
))
390 ret
= fault_in_pages_readable(ptr
, args
->size
);
394 dobj
= armada_gem_object_lookup(file
, args
->handle
);
398 /* Must be a kernel-mapped object */
402 if (args
->offset
> dobj
->obj
.size
||
403 args
->size
> dobj
->obj
.size
- args
->offset
) {
404 DRM_ERROR("invalid size: object size %u\n", dobj
->obj
.size
);
409 if (copy_from_user(dobj
->addr
+ args
->offset
, ptr
, args
->size
)) {
411 } else if (dobj
->update
) {
412 dobj
->update(dobj
->update_data
);
417 drm_gem_object_unreference_unlocked(&dobj
->obj
);
422 static struct sg_table
*
423 armada_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
424 enum dma_data_direction dir
)
426 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
427 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
428 struct scatterlist
*sg
;
429 struct sg_table
*sgt
;
432 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
436 if (dobj
->obj
.filp
) {
437 struct address_space
*mapping
;
440 count
= dobj
->obj
.size
/ PAGE_SIZE
;
441 if (sg_alloc_table(sgt
, count
, GFP_KERNEL
))
444 mapping
= dobj
->obj
.filp
->f_mapping
;
446 for_each_sg(sgt
->sgl
, sg
, count
, i
) {
449 page
= shmem_read_mapping_page(mapping
, i
);
455 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
458 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0) {
462 } else if (dobj
->page
) {
463 /* Single contiguous page */
464 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
467 sg_set_page(sgt
->sgl
, dobj
->page
, dobj
->obj
.size
, 0);
469 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0)
471 } else if (dobj
->linear
) {
472 /* Single contiguous physical region - no struct page */
473 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
475 sg_dma_address(sgt
->sgl
) = dobj
->dev_addr
;
476 sg_dma_len(sgt
->sgl
) = dobj
->obj
.size
;
483 for_each_sg(sgt
->sgl
, sg
, num
, i
)
484 put_page(sg_page(sg
));
492 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
493 struct sg_table
*sgt
, enum dma_data_direction dir
)
495 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
496 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
500 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
502 if (dobj
->obj
.filp
) {
503 struct scatterlist
*sg
;
504 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
)
505 put_page(sg_page(sg
));
512 static void *armada_gem_dmabuf_no_kmap(struct dma_buf
*buf
, unsigned long n
)
518 armada_gem_dmabuf_no_kunmap(struct dma_buf
*buf
, unsigned long n
, void *addr
)
523 armada_gem_dmabuf_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
528 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops
= {
529 .map_dma_buf
= armada_gem_prime_map_dma_buf
,
530 .unmap_dma_buf
= armada_gem_prime_unmap_dma_buf
,
531 .release
= drm_gem_dmabuf_release
,
532 .kmap_atomic
= armada_gem_dmabuf_no_kmap
,
533 .kunmap_atomic
= armada_gem_dmabuf_no_kunmap
,
534 .kmap
= armada_gem_dmabuf_no_kmap
,
535 .kunmap
= armada_gem_dmabuf_no_kunmap
,
536 .mmap
= armada_gem_dmabuf_mmap
,
540 armada_gem_prime_export(struct drm_device
*dev
, struct drm_gem_object
*obj
,
543 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
545 exp_info
.ops
= &armada_gem_prime_dmabuf_ops
;
546 exp_info
.size
= obj
->size
;
547 exp_info
.flags
= O_RDWR
;
550 return drm_gem_dmabuf_export(dev
, &exp_info
);
553 struct drm_gem_object
*
554 armada_gem_prime_import(struct drm_device
*dev
, struct dma_buf
*buf
)
556 struct dma_buf_attachment
*attach
;
557 struct armada_gem_object
*dobj
;
559 if (buf
->ops
== &armada_gem_prime_dmabuf_ops
) {
560 struct drm_gem_object
*obj
= buf
->priv
;
561 if (obj
->dev
== dev
) {
563 * Importing our own dmabuf(s) increases the
564 * refcount on the gem object itself.
566 drm_gem_object_reference(obj
);
571 attach
= dma_buf_attach(buf
, dev
->dev
);
573 return ERR_CAST(attach
);
575 dobj
= armada_gem_alloc_private_object(dev
, buf
->size
);
577 dma_buf_detach(buf
, attach
);
578 return ERR_PTR(-ENOMEM
);
581 dobj
->obj
.import_attach
= attach
;
585 * Don't call dma_buf_map_attachment() here - it maps the
586 * scatterlist immediately for DMA, and this is not always
587 * an appropriate thing to do.
592 int armada_gem_map_import(struct armada_gem_object
*dobj
)
596 dobj
->sgt
= dma_buf_map_attachment(dobj
->obj
.import_attach
,
598 if (IS_ERR(dobj
->sgt
)) {
599 ret
= PTR_ERR(dobj
->sgt
);
601 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret
);
604 if (dobj
->sgt
->nents
> 1) {
605 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
608 if (sg_dma_len(dobj
->sgt
->sgl
) < dobj
->obj
.size
) {
609 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
612 dobj
->dev_addr
= sg_dma_address(dobj
->sgt
->sgl
);