]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/drm_gem_cma_helper.c
Merge tag 'drm-misc-next-2017-03-06' of git://anongit.freedesktop.org/git/drm-misc...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / drm_gem_cma_helper.c
1 /*
2 * drm gem CMA (contiguous memory allocator) helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 *
6 * Based on Samsung Exynos code
7 *
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <linux/mutex.h>
23 #include <linux/export.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-mapping.h>
26
27 #include <drm/drmP.h>
28 #include <drm/drm.h>
29 #include <drm/drm_gem_cma_helper.h>
30 #include <drm/drm_vma_manager.h>
31
32 /**
33 * DOC: cma helpers
34 *
35 * The Contiguous Memory Allocator reserves a pool of memory at early boot
36 * that is used to service requests for large blocks of contiguous memory.
37 *
38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
39 * objects that are physically contiguous in memory. This is useful for
40 * display drivers that are unable to map scattered buffers via an IOMMU.
41 */
42
43 /**
44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
45 * @drm: DRM device
46 * @size: size of the object to allocate
47 *
48 * This function creates and initializes a GEM CMA object of the given size,
49 * but doesn't allocate any memory to back the object.
50 *
51 * Returns:
52 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
53 * error code on failure.
54 */
55 static struct drm_gem_cma_object *
56 __drm_gem_cma_create(struct drm_device *drm, size_t size)
57 {
58 struct drm_gem_cma_object *cma_obj;
59 struct drm_gem_object *gem_obj;
60 int ret;
61
62 if (drm->driver->gem_create_object)
63 gem_obj = drm->driver->gem_create_object(drm, size);
64 else
65 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
66 if (!gem_obj)
67 return ERR_PTR(-ENOMEM);
68 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
69
70 ret = drm_gem_object_init(drm, gem_obj, size);
71 if (ret)
72 goto error;
73
74 ret = drm_gem_create_mmap_offset(gem_obj);
75 if (ret) {
76 drm_gem_object_release(gem_obj);
77 goto error;
78 }
79
80 return cma_obj;
81
82 error:
83 kfree(cma_obj);
84 return ERR_PTR(ret);
85 }
86
87 /**
88 * drm_gem_cma_create - allocate an object with the given size
89 * @drm: DRM device
90 * @size: size of the object to allocate
91 *
92 * This function creates a CMA GEM object and allocates a contiguous chunk of
93 * memory as backing store. The backing memory has the writecombine attribute
94 * set.
95 *
96 * Returns:
97 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
98 * error code on failure.
99 */
100 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
101 size_t size)
102 {
103 struct drm_gem_cma_object *cma_obj;
104 int ret;
105
106 size = round_up(size, PAGE_SIZE);
107
108 cma_obj = __drm_gem_cma_create(drm, size);
109 if (IS_ERR(cma_obj))
110 return cma_obj;
111
112 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
113 GFP_KERNEL | __GFP_NOWARN);
114 if (!cma_obj->vaddr) {
115 dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
116 size);
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 return cma_obj;
122
123 error:
124 drm_gem_object_put_unlocked(&cma_obj->base);
125 return ERR_PTR(ret);
126 }
127 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
128
129 /**
130 * drm_gem_cma_create_with_handle - allocate an object with the given size and
131 * return a GEM handle to it
132 * @file_priv: DRM file-private structure to register the handle for
133 * @drm: DRM device
134 * @size: size of the object to allocate
135 * @handle: return location for the GEM handle
136 *
137 * This function creates a CMA GEM object, allocating a physically contiguous
138 * chunk of memory as backing store. The GEM object is then added to the list
139 * of object associated with the given file and a handle to it is returned.
140 *
141 * Returns:
142 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
143 * error code on failure.
144 */
145 static struct drm_gem_cma_object *
146 drm_gem_cma_create_with_handle(struct drm_file *file_priv,
147 struct drm_device *drm, size_t size,
148 uint32_t *handle)
149 {
150 struct drm_gem_cma_object *cma_obj;
151 struct drm_gem_object *gem_obj;
152 int ret;
153
154 cma_obj = drm_gem_cma_create(drm, size);
155 if (IS_ERR(cma_obj))
156 return cma_obj;
157
158 gem_obj = &cma_obj->base;
159
160 /*
161 * allocate a id of idr table where the obj is registered
162 * and handle has the id what user can see.
163 */
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165 /* drop reference from allocate - handle holds it now. */
166 drm_gem_object_put_unlocked(gem_obj);
167 if (ret)
168 return ERR_PTR(ret);
169
170 return cma_obj;
171 }
172
173 /**
174 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
175 * @gem_obj: GEM object to free
176 *
177 * This function frees the backing memory of the CMA GEM object, cleans up the
178 * GEM object state and frees the memory used to store the object itself.
179 * Drivers using the CMA helpers should set this as their
180 * &drm_driver.gem_free_object callback.
181 */
182 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
183 {
184 struct drm_gem_cma_object *cma_obj;
185
186 cma_obj = to_drm_gem_cma_obj(gem_obj);
187
188 if (cma_obj->vaddr) {
189 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
190 cma_obj->vaddr, cma_obj->paddr);
191 } else if (gem_obj->import_attach) {
192 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
193 }
194
195 drm_gem_object_release(gem_obj);
196
197 kfree(cma_obj);
198 }
199 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
200
201 /**
202 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
203 * @file_priv: DRM file-private structure to create the dumb buffer for
204 * @drm: DRM device
205 * @args: IOCTL data
206 *
207 * This aligns the pitch and size arguments to the minimum required. This is
208 * an internal helper that can be wrapped by a driver to account for hardware
209 * with more specific alignment requirements. It should not be used directly
210 * as their &drm_driver.dumb_create callback.
211 *
212 * Returns:
213 * 0 on success or a negative error code on failure.
214 */
215 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
216 struct drm_device *drm,
217 struct drm_mode_create_dumb *args)
218 {
219 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
220 struct drm_gem_cma_object *cma_obj;
221
222 if (args->pitch < min_pitch)
223 args->pitch = min_pitch;
224
225 if (args->size < args->pitch * args->height)
226 args->size = args->pitch * args->height;
227
228 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
229 &args->handle);
230 return PTR_ERR_OR_ZERO(cma_obj);
231 }
232 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
233
234 /**
235 * drm_gem_cma_dumb_create - create a dumb buffer object
236 * @file_priv: DRM file-private structure to create the dumb buffer for
237 * @drm: DRM device
238 * @args: IOCTL data
239 *
240 * This function computes the pitch of the dumb buffer and rounds it up to an
241 * integer number of bytes per pixel. Drivers for hardware that doesn't have
242 * any additional restrictions on the pitch can directly use this function as
243 * their &drm_driver.dumb_create callback.
244 *
245 * For hardware with additional restrictions, drivers can adjust the fields
246 * set up by userspace and pass the IOCTL data along to the
247 * drm_gem_cma_dumb_create_internal() function.
248 *
249 * Returns:
250 * 0 on success or a negative error code on failure.
251 */
252 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
253 struct drm_device *drm,
254 struct drm_mode_create_dumb *args)
255 {
256 struct drm_gem_cma_object *cma_obj;
257
258 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
259 args->size = args->pitch * args->height;
260
261 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
262 &args->handle);
263 return PTR_ERR_OR_ZERO(cma_obj);
264 }
265 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
266
267 /**
268 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
269 * object
270 * @file_priv: DRM file-private structure containing the GEM object
271 * @drm: DRM device
272 * @handle: GEM object handle
273 * @offset: return location for the fake mmap offset
274 *
275 * This function look up an object by its handle and returns the fake mmap
276 * offset associated with it. Drivers using the CMA helpers should set this
277 * as their &drm_driver.dumb_map_offset callback.
278 *
279 * Returns:
280 * 0 on success or a negative error code on failure.
281 */
282 int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
283 struct drm_device *drm, u32 handle,
284 u64 *offset)
285 {
286 struct drm_gem_object *gem_obj;
287
288 gem_obj = drm_gem_object_lookup(file_priv, handle);
289 if (!gem_obj) {
290 dev_err(drm->dev, "failed to lookup GEM object\n");
291 return -EINVAL;
292 }
293
294 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
295
296 drm_gem_object_put_unlocked(gem_obj);
297
298 return 0;
299 }
300 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
301
302 const struct vm_operations_struct drm_gem_cma_vm_ops = {
303 .open = drm_gem_vm_open,
304 .close = drm_gem_vm_close,
305 };
306 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
307
308 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
309 struct vm_area_struct *vma)
310 {
311 int ret;
312
313 /*
314 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
315 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
316 * the whole buffer.
317 */
318 vma->vm_flags &= ~VM_PFNMAP;
319 vma->vm_pgoff = 0;
320
321 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
322 cma_obj->paddr, vma->vm_end - vma->vm_start);
323 if (ret)
324 drm_gem_vm_close(vma);
325
326 return ret;
327 }
328
329 /**
330 * drm_gem_cma_mmap - memory-map a CMA GEM object
331 * @filp: file object
332 * @vma: VMA for the area to be mapped
333 *
334 * This function implements an augmented version of the GEM DRM file mmap
335 * operation for CMA objects: In addition to the usual GEM VMA setup it
336 * immediately faults in the entire object instead of using on-demaind
337 * faulting. Drivers which employ the CMA helpers should use this function
338 * as their ->mmap() handler in the DRM device file's file_operations
339 * structure.
340 *
341 * Returns:
342 * 0 on success or a negative error code on failure.
343 */
344 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
345 {
346 struct drm_gem_cma_object *cma_obj;
347 struct drm_gem_object *gem_obj;
348 int ret;
349
350 ret = drm_gem_mmap(filp, vma);
351 if (ret)
352 return ret;
353
354 gem_obj = vma->vm_private_data;
355 cma_obj = to_drm_gem_cma_obj(gem_obj);
356
357 return drm_gem_cma_mmap_obj(cma_obj, vma);
358 }
359 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
360
361 #ifndef CONFIG_MMU
362 /**
363 * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
364 * @filp: file object
365 * @addr: memory address
366 * @len: buffer size
367 * @pgoff: page offset
368 * @flags: memory flags
369 *
370 * This function is used in noMMU platforms to propose address mapping
371 * for a given buffer.
372 * It's intended to be used as a direct handler for the struct
373 * &file_operations.get_unmapped_area operation.
374 *
375 * Returns:
376 * mapping address on success or a negative error code on failure.
377 */
378 unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
379 unsigned long addr,
380 unsigned long len,
381 unsigned long pgoff,
382 unsigned long flags)
383 {
384 struct drm_gem_cma_object *cma_obj;
385 struct drm_gem_object *obj = NULL;
386 struct drm_file *priv = filp->private_data;
387 struct drm_device *dev = priv->minor->dev;
388 struct drm_vma_offset_node *node;
389
390 if (drm_device_is_unplugged(dev))
391 return -ENODEV;
392
393 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
394 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
395 pgoff,
396 len >> PAGE_SHIFT);
397 if (likely(node)) {
398 obj = container_of(node, struct drm_gem_object, vma_node);
399 /*
400 * When the object is being freed, after it hits 0-refcnt it
401 * proceeds to tear down the object. In the process it will
402 * attempt to remove the VMA offset and so acquire this
403 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
404 * that matches our range, we know it is in the process of being
405 * destroyed and will be freed as soon as we release the lock -
406 * so we have to check for the 0-refcnted object and treat it as
407 * invalid.
408 */
409 if (!kref_get_unless_zero(&obj->refcount))
410 obj = NULL;
411 }
412
413 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
414
415 if (!obj)
416 return -EINVAL;
417
418 if (!drm_vma_node_is_allowed(node, priv)) {
419 drm_gem_object_put_unlocked(obj);
420 return -EACCES;
421 }
422
423 cma_obj = to_drm_gem_cma_obj(obj);
424
425 drm_gem_object_put_unlocked(obj);
426
427 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
428 }
429 EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
430 #endif
431
432 #ifdef CONFIG_DEBUG_FS
433 /**
434 * drm_gem_cma_describe - describe a CMA GEM object for debugfs
435 * @cma_obj: CMA GEM object
436 * @m: debugfs file handle
437 *
438 * This function can be used to dump a human-readable representation of the
439 * CMA GEM object into a synthetic file.
440 */
441 void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
442 struct seq_file *m)
443 {
444 struct drm_gem_object *obj = &cma_obj->base;
445 uint64_t off;
446
447 off = drm_vma_node_start(&obj->vma_node);
448
449 seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
450 obj->name, kref_read(&obj->refcount),
451 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
452
453 seq_printf(m, "\n");
454 }
455 EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
456 #endif
457
458 /**
459 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
460 * pages for a CMA GEM object
461 * @obj: GEM object
462 *
463 * This function exports a scatter/gather table suitable for PRIME usage by
464 * calling the standard DMA mapping API. Drivers using the CMA helpers should
465 * set this as their &drm_driver.gem_prime_get_sg_table callback.
466 *
467 * Returns:
468 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
469 */
470 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
471 {
472 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
473 struct sg_table *sgt;
474 int ret;
475
476 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
477 if (!sgt)
478 return NULL;
479
480 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
481 cma_obj->paddr, obj->size);
482 if (ret < 0)
483 goto out;
484
485 return sgt;
486
487 out:
488 kfree(sgt);
489 return NULL;
490 }
491 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
492
493 /**
494 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
495 * driver's scatter/gather table of pinned pages
496 * @dev: device to import into
497 * @attach: DMA-BUF attachment
498 * @sgt: scatter/gather table of pinned pages
499 *
500 * This function imports a scatter/gather table exported via DMA-BUF by
501 * another driver. Imported buffers must be physically contiguous in memory
502 * (i.e. the scatter/gather table must contain a single entry). Drivers that
503 * use the CMA helpers should set this as their
504 * &drm_driver.gem_prime_import_sg_table callback.
505 *
506 * Returns:
507 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
508 * error code on failure.
509 */
510 struct drm_gem_object *
511 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
512 struct dma_buf_attachment *attach,
513 struct sg_table *sgt)
514 {
515 struct drm_gem_cma_object *cma_obj;
516
517 if (sgt->nents != 1)
518 return ERR_PTR(-EINVAL);
519
520 /* Create a CMA GEM buffer. */
521 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
522 if (IS_ERR(cma_obj))
523 return ERR_CAST(cma_obj);
524
525 cma_obj->paddr = sg_dma_address(sgt->sgl);
526 cma_obj->sgt = sgt;
527
528 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
529
530 return &cma_obj->base;
531 }
532 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
533
534 /**
535 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
536 * @obj: GEM object
537 * @vma: VMA for the area to be mapped
538 *
539 * This function maps a buffer imported via DRM PRIME into a userspace
540 * process's address space. Drivers that use the CMA helpers should set this
541 * as their &drm_driver.gem_prime_mmap callback.
542 *
543 * Returns:
544 * 0 on success or a negative error code on failure.
545 */
546 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
547 struct vm_area_struct *vma)
548 {
549 struct drm_gem_cma_object *cma_obj;
550 int ret;
551
552 ret = drm_gem_mmap_obj(obj, obj->size, vma);
553 if (ret < 0)
554 return ret;
555
556 cma_obj = to_drm_gem_cma_obj(obj);
557 return drm_gem_cma_mmap_obj(cma_obj, vma);
558 }
559 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
560
561 /**
562 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
563 * address space
564 * @obj: GEM object
565 *
566 * This function maps a buffer exported via DRM PRIME into the kernel's
567 * virtual address space. Since the CMA buffers are already mapped into the
568 * kernel virtual address space this simply returns the cached virtual
569 * address. Drivers using the CMA helpers should set this as their DRM
570 * driver's &drm_driver.gem_prime_vmap callback.
571 *
572 * Returns:
573 * The kernel virtual address of the CMA GEM object's backing store.
574 */
575 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
576 {
577 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
578
579 return cma_obj->vaddr;
580 }
581 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
582
583 /**
584 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
585 * address space
586 * @obj: GEM object
587 * @vaddr: kernel virtual address where the CMA GEM object was mapped
588 *
589 * This function removes a buffer exported via DRM PRIME from the kernel's
590 * virtual address space. This is a no-op because CMA buffers cannot be
591 * unmapped from kernel space. Drivers using the CMA helpers should set this
592 * as their &drm_driver.gem_prime_vunmap callback.
593 */
594 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
595 {
596 /* Nothing to do */
597 }
598 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);