]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/exynos/exynos_drm_gem.h
Merge branches 'pm-cpufreq' and 'acpi-cppc'
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / exynos / exynos_drm_gem.h
1 /* exynos_drm_gem.h
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12 #ifndef _EXYNOS_DRM_GEM_H_
13 #define _EXYNOS_DRM_GEM_H_
14
15 #include <drm/drm_gem.h>
16
17 #define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base)
18
19 #define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
20
21 /*
22 * exynos drm buffer structure.
23 *
24 * @base: a gem object.
25 * - a new handle to this gem object would be created
26 * by drm_gem_handle_create().
27 * @buffer: a pointer to exynos_drm_gem_buffer object.
28 * - contain the information to memory region allocated
29 * by user request or at framebuffer creation.
30 * continuous memory region allocated by user request
31 * or at framebuffer creation.
32 * @flags: indicate memory type to allocated buffer and cache attruibute.
33 * @size: size requested from user, in bytes and this size is aligned
34 * in page unit.
35 * @cookie: cookie returned by dma_alloc_attrs
36 * @kvaddr: kernel virtual address to allocated memory region.
37 * @dma_addr: bus address(accessed by dma) to allocated memory region.
38 * - this address could be physical address without IOMMU and
39 * device address with IOMMU.
40 * @pages: Array of backing pages.
41 * @sgt: Imported sg_table.
42 *
43 * P.S. this object would be transferred to user as kms_bo.handle so
44 * user can access the buffer through kms_bo.handle.
45 */
46 struct exynos_drm_gem {
47 struct drm_gem_object base;
48 unsigned int flags;
49 unsigned long size;
50 void *cookie;
51 void __iomem *kvaddr;
52 dma_addr_t dma_addr;
53 struct dma_attrs dma_attrs;
54 struct page **pages;
55 struct sg_table *sgt;
56 };
57
58 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
59
60 /* destroy a buffer with gem object */
61 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
62
63 /* create a new buffer with gem object */
64 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
65 unsigned int flags,
66 unsigned long size);
67
68 /*
69 * request gem object creation and buffer allocation as the size
70 * that it is calculated with framebuffer information such as width,
71 * height and bpp.
72 */
73 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
74 struct drm_file *file_priv);
75
76 /*
77 * get dma address from gem handle and this function could be used for
78 * other drivers such as 2d/3d acceleration drivers.
79 * with this function call, gem object reference count would be increased.
80 */
81 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
82 unsigned int gem_handle,
83 struct drm_file *filp);
84
85 /*
86 * put dma address from gem handle and this function could be used for
87 * other drivers such as 2d/3d acceleration drivers.
88 * with this function call, gem object reference count would be decreased.
89 */
90 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
91 unsigned int gem_handle,
92 struct drm_file *filp);
93
94 /* map user space allocated by malloc to pages. */
95 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
96 struct drm_file *file_priv);
97
98 /* get buffer information to memory region allocated by gem. */
99 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
100 struct drm_file *file_priv);
101
102 /* get buffer size to gem handle. */
103 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
104 unsigned int gem_handle,
105 struct drm_file *file_priv);
106
107 /* free gem object. */
108 void exynos_drm_gem_free_object(struct drm_gem_object *obj);
109
110 /* create memory region for drm framebuffer. */
111 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
112 struct drm_device *dev,
113 struct drm_mode_create_dumb *args);
114
115 /* map memory region for drm framebuffer to user space. */
116 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
117 struct drm_device *dev, uint32_t handle,
118 uint64_t *offset);
119
120 /* page fault handler and mmap fault address(virtual) to physical memory. */
121 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
122
123 /* set vm_flags and we can change the vm attribute to other one at here. */
124 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
125
126 static inline int vma_is_io(struct vm_area_struct *vma)
127 {
128 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
129 }
130
131 /* get a copy of a virtual memory region. */
132 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
133
134 /* release a userspace virtual memory area. */
135 void exynos_gem_put_vma(struct vm_area_struct *vma);
136
137 /* get pages from user space. */
138 int exynos_gem_get_pages_from_userptr(unsigned long start,
139 unsigned int npages,
140 struct page **pages,
141 struct vm_area_struct *vma);
142
143 /* drop the reference to pages. */
144 void exynos_gem_put_pages_to_userptr(struct page **pages,
145 unsigned int npages,
146 struct vm_area_struct *vma);
147
148 /* map sgt with dma region. */
149 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
150 struct sg_table *sgt,
151 enum dma_data_direction dir);
152
153 /* unmap sgt from dma region. */
154 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
155 struct sg_table *sgt,
156 enum dma_data_direction dir);
157
158 /* low-level interface prime helpers */
159 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
160 struct drm_gem_object *
161 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
162 struct dma_buf_attachment *attach,
163 struct sg_table *sgt);
164 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
165 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
166
167 #endif