]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/gpu/drm/exynos/exynos_drm_gem.c
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26 #include "drmP.h"
27 #include "drm.h"
28
29 #include <drm/exynos_drm.h>
30
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
34
35 static unsigned int convert_to_vm_err_msg(int msg)
36 {
37 unsigned int out_msg;
38
39 switch (msg) {
40 case 0:
41 case -ERESTARTSYS:
42 case -EINTR:
43 out_msg = VM_FAULT_NOPAGE;
44 break;
45
46 case -ENOMEM:
47 out_msg = VM_FAULT_OOM;
48 break;
49
50 default:
51 out_msg = VM_FAULT_SIGBUS;
52 break;
53 }
54
55 return out_msg;
56 }
57
58 static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
59 {
60 DRM_DEBUG_KMS("%s\n", __FILE__);
61
62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
63 }
64
65 static struct exynos_drm_gem_obj
66 *exynos_drm_gem_init(struct drm_device *drm_dev,
67 struct drm_file *file_priv, unsigned int *handle,
68 unsigned int size)
69 {
70 struct exynos_drm_gem_obj *exynos_gem_obj;
71 struct drm_gem_object *obj;
72 int ret;
73
74 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
75 if (!exynos_gem_obj) {
76 DRM_ERROR("failed to allocate exynos gem object.\n");
77 return ERR_PTR(-ENOMEM);
78 }
79
80 obj = &exynos_gem_obj->base;
81
82 ret = drm_gem_object_init(drm_dev, obj, size);
83 if (ret < 0) {
84 DRM_ERROR("failed to initialize gem object.\n");
85 ret = -EINVAL;
86 goto err_object_init;
87 }
88
89 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
90
91 ret = drm_gem_create_mmap_offset(obj);
92 if (ret < 0) {
93 DRM_ERROR("failed to allocate mmap offset.\n");
94 goto err_create_mmap_offset;
95 }
96
97 /*
98 * allocate a id of idr table where the obj is registered
99 * and handle has the id what user can see.
100 */
101 ret = drm_gem_handle_create(file_priv, obj, handle);
102 if (ret)
103 goto err_handle_create;
104
105 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
106
107 /* drop reference from allocate - handle holds it now. */
108 drm_gem_object_unreference_unlocked(obj);
109
110 return exynos_gem_obj;
111
112 err_handle_create:
113 drm_gem_free_mmap_offset(obj);
114
115 err_create_mmap_offset:
116 drm_gem_object_release(obj);
117
118 err_object_init:
119 kfree(exynos_gem_obj);
120
121 return ERR_PTR(ret);
122 }
123
124 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
125 struct drm_file *file_priv,
126 unsigned int *handle, unsigned long size)
127 {
128
129 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
130 struct exynos_drm_gem_buf *buffer;
131
132 size = roundup(size, PAGE_SIZE);
133
134 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
135
136 buffer = exynos_drm_buf_create(dev, size);
137 if (IS_ERR(buffer)) {
138 return ERR_CAST(buffer);
139 }
140
141 exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
142 if (IS_ERR(exynos_gem_obj)) {
143 exynos_drm_buf_destroy(dev, buffer);
144 return exynos_gem_obj;
145 }
146
147 exynos_gem_obj->buffer = buffer;
148
149 return exynos_gem_obj;
150 }
151
152 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
153 struct drm_file *file_priv)
154 {
155 struct drm_exynos_gem_create *args = data;
156 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
157
158 DRM_DEBUG_KMS("%s\n", __FILE__);
159
160 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
161 &args->handle, args->size);
162 if (IS_ERR(exynos_gem_obj))
163 return PTR_ERR(exynos_gem_obj);
164
165 return 0;
166 }
167
168 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
169 struct drm_file *file_priv)
170 {
171 struct drm_exynos_gem_map_off *args = data;
172
173 DRM_DEBUG_KMS("%s\n", __FILE__);
174
175 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
176 args->handle, (unsigned long)args->offset);
177
178 if (!(dev->driver->driver_features & DRIVER_GEM)) {
179 DRM_ERROR("does not support GEM.\n");
180 return -ENODEV;
181 }
182
183 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
184 &args->offset);
185 }
186
187 static int exynos_drm_gem_mmap_buffer(struct file *filp,
188 struct vm_area_struct *vma)
189 {
190 struct drm_gem_object *obj = filp->private_data;
191 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
192 struct exynos_drm_gem_buf *buffer;
193 unsigned long pfn, vm_size;
194
195 DRM_DEBUG_KMS("%s\n", __FILE__);
196
197 vma->vm_flags |= (VM_IO | VM_RESERVED);
198
199 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
200 vma->vm_file = filp;
201
202 vm_size = vma->vm_end - vma->vm_start;
203 /*
204 * a buffer contains information to physically continuous memory
205 * allocated by user request or at framebuffer creation.
206 */
207 buffer = exynos_gem_obj->buffer;
208
209 /* check if user-requested size is valid. */
210 if (vm_size > buffer->size)
211 return -EINVAL;
212
213 /*
214 * get page frame number to physical memory to be mapped
215 * to user space.
216 */
217 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
218
219 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
220
221 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
222 vma->vm_page_prot)) {
223 DRM_ERROR("failed to remap pfn range.\n");
224 return -EAGAIN;
225 }
226
227 return 0;
228 }
229
230 static const struct file_operations exynos_drm_gem_fops = {
231 .mmap = exynos_drm_gem_mmap_buffer,
232 };
233
234 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
235 struct drm_file *file_priv)
236 {
237 struct drm_exynos_gem_mmap *args = data;
238 struct drm_gem_object *obj;
239 unsigned int addr;
240
241 DRM_DEBUG_KMS("%s\n", __FILE__);
242
243 if (!(dev->driver->driver_features & DRIVER_GEM)) {
244 DRM_ERROR("does not support GEM.\n");
245 return -ENODEV;
246 }
247
248 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
249 if (!obj) {
250 DRM_ERROR("failed to lookup gem object.\n");
251 return -EINVAL;
252 }
253
254 obj->filp->f_op = &exynos_drm_gem_fops;
255 obj->filp->private_data = obj;
256
257 down_write(&current->mm->mmap_sem);
258 addr = do_mmap(obj->filp, 0, args->size,
259 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
260 up_write(&current->mm->mmap_sem);
261
262 drm_gem_object_unreference_unlocked(obj);
263
264 if (IS_ERR((void *)addr))
265 return PTR_ERR((void *)addr);
266
267 args->mapped = addr;
268
269 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
270
271 return 0;
272 }
273
274 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
275 {
276 DRM_DEBUG_KMS("%s\n", __FILE__);
277
278 return 0;
279 }
280
281 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
282 {
283 struct exynos_drm_gem_obj *exynos_gem_obj;
284
285 DRM_DEBUG_KMS("%s\n", __FILE__);
286
287 DRM_DEBUG_KMS("handle count = %d\n",
288 atomic_read(&gem_obj->handle_count));
289
290 if (gem_obj->map_list.map)
291 drm_gem_free_mmap_offset(gem_obj);
292
293 /* release file pointer to gem object. */
294 drm_gem_object_release(gem_obj);
295
296 exynos_gem_obj = to_exynos_gem_obj(gem_obj);
297
298 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
299
300 kfree(exynos_gem_obj);
301 }
302
303 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
304 struct drm_device *dev, struct drm_mode_create_dumb *args)
305 {
306 struct exynos_drm_gem_obj *exynos_gem_obj;
307
308 DRM_DEBUG_KMS("%s\n", __FILE__);
309
310 /*
311 * alocate memory to be used for framebuffer.
312 * - this callback would be called by user application
313 * with DRM_IOCTL_MODE_CREATE_DUMB command.
314 */
315
316 args->pitch = args->width * args->bpp >> 3;
317 args->size = args->pitch * args->height;
318
319 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
320 args->size);
321 if (IS_ERR(exynos_gem_obj))
322 return PTR_ERR(exynos_gem_obj);
323
324 return 0;
325 }
326
327 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
328 struct drm_device *dev, uint32_t handle, uint64_t *offset)
329 {
330 struct exynos_drm_gem_obj *exynos_gem_obj;
331 struct drm_gem_object *obj;
332
333 DRM_DEBUG_KMS("%s\n", __FILE__);
334
335 mutex_lock(&dev->struct_mutex);
336
337 /*
338 * get offset of memory allocated for drm framebuffer.
339 * - this callback would be called by user application
340 * with DRM_IOCTL_MODE_MAP_DUMB command.
341 */
342
343 obj = drm_gem_object_lookup(dev, file_priv, handle);
344 if (!obj) {
345 DRM_ERROR("failed to lookup gem object.\n");
346 mutex_unlock(&dev->struct_mutex);
347 return -EINVAL;
348 }
349
350 exynos_gem_obj = to_exynos_gem_obj(obj);
351
352 *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
353
354 drm_gem_object_unreference(obj);
355
356 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
357
358 mutex_unlock(&dev->struct_mutex);
359
360 return 0;
361 }
362
363 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364 {
365 struct drm_gem_object *obj = vma->vm_private_data;
366 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
367 struct drm_device *dev = obj->dev;
368 unsigned long pfn;
369 pgoff_t page_offset;
370 int ret;
371
372 page_offset = ((unsigned long)vmf->virtual_address -
373 vma->vm_start) >> PAGE_SHIFT;
374
375 mutex_lock(&dev->struct_mutex);
376
377 pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
378 PAGE_SHIFT) + page_offset;
379
380 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
381
382 mutex_unlock(&dev->struct_mutex);
383
384 return convert_to_vm_err_msg(ret);
385 }
386
387 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
388 {
389 int ret;
390
391 DRM_DEBUG_KMS("%s\n", __FILE__);
392
393 /* set vm_area_struct. */
394 ret = drm_gem_mmap(filp, vma);
395 if (ret < 0) {
396 DRM_ERROR("failed to mmap.\n");
397 return ret;
398 }
399
400 vma->vm_flags &= ~VM_PFNMAP;
401 vma->vm_flags |= VM_MIXEDMAP;
402
403 return ret;
404 }
405
406
407 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
408 struct drm_device *dev, unsigned int handle)
409 {
410 int ret;
411
412 DRM_DEBUG_KMS("%s\n", __FILE__);
413
414 /*
415 * obj->refcount and obj->handle_count are decreased and
416 * if both them are 0 then exynos_drm_gem_free_object()
417 * would be called by callback to release resources.
418 */
419 ret = drm_gem_handle_delete(file_priv, handle);
420 if (ret < 0) {
421 DRM_ERROR("failed to delete drm_gem_handle.\n");
422 return ret;
423 }
424
425 return 0;
426 }
427
428 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
429 MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
430 MODULE_LICENSE("GPL");