]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/drm_gem.c
811f6d26d108c3f2e7bb796343855eaa500452cd
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / drm_gem.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <drm/drmP.h>
41 #include <drm/drm_vma_manager.h>
42 #include <drm/drm_gem.h>
43 #include "drm_internal.h"
44
45 /** @file drm_gem.c
46 *
47 * This file provides some of the base ioctls and library routines for
48 * the graphics memory manager implemented by each device driver.
49 *
50 * Because various devices have different requirements in terms of
51 * synchronization and migration strategies, implementing that is left up to
52 * the driver, and all that the general API provides should be generic --
53 * allocating objects, reading/writing data with the cpu, freeing objects.
54 * Even there, platform-dependent optimizations for reading/writing data with
55 * the CPU mean we'll likely hook those out to driver-specific calls. However,
56 * the DRI2 implementation wants to have at least allocate/mmap be generic.
57 *
58 * The goal was to have swap-backed object allocation managed through
59 * struct file. However, file descriptors as handles to a struct file have
60 * two major failings:
61 * - Process limits prevent more than 1024 or so being used at a time by
62 * default.
63 * - Inability to allocate high fds will aggravate the X Server's select()
64 * handling, and likely that of many GL client applications as well.
65 *
66 * This led to a plan of using our own integer IDs (called handles, following
67 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
68 * ioctls. The objects themselves will still include the struct file so
69 * that we can transition to fds if the required kernel infrastructure shows
70 * up at a later date, and as our interface with shmfs for memory allocation.
71 */
72
73 /*
74 * We make up offsets for buffer objects so we can recognize them at
75 * mmap time.
76 */
77
78 /* pgoff in mmap is an unsigned long, so we need to make sure that
79 * the faked up offset will fit
80 */
81
82 #if BITS_PER_LONG == 64
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
85 #else
86 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
87 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
88 #endif
89
90 /**
91 * drm_gem_init - Initialize the GEM device fields
92 * @dev: drm_devic structure to initialize
93 */
94 int
95 drm_gem_init(struct drm_device *dev)
96 {
97 struct drm_vma_offset_manager *vma_offset_manager;
98
99 mutex_init(&dev->object_name_lock);
100 idr_init(&dev->object_name_idr);
101
102 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
103 if (!vma_offset_manager) {
104 DRM_ERROR("out of memory\n");
105 return -ENOMEM;
106 }
107
108 dev->vma_offset_manager = vma_offset_manager;
109 drm_vma_offset_manager_init(vma_offset_manager,
110 DRM_FILE_PAGE_OFFSET_START,
111 DRM_FILE_PAGE_OFFSET_SIZE);
112
113 return 0;
114 }
115
116 void
117 drm_gem_destroy(struct drm_device *dev)
118 {
119
120 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
121 kfree(dev->vma_offset_manager);
122 dev->vma_offset_manager = NULL;
123 }
124
125 /**
126 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
127 * @dev: drm_device the object should be initialized for
128 * @obj: drm_gem_object to initialize
129 * @size: object size
130 *
131 * Initialize an already allocated GEM object of the specified size with
132 * shmfs backing store.
133 */
134 int drm_gem_object_init(struct drm_device *dev,
135 struct drm_gem_object *obj, size_t size)
136 {
137 struct file *filp;
138
139 drm_gem_private_object_init(dev, obj, size);
140
141 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
142 if (IS_ERR(filp))
143 return PTR_ERR(filp);
144
145 obj->filp = filp;
146
147 return 0;
148 }
149 EXPORT_SYMBOL(drm_gem_object_init);
150
151 /**
152 * drm_gem_private_object_init - initialize an allocated private GEM object
153 * @dev: drm_device the object should be initialized for
154 * @obj: drm_gem_object to initialize
155 * @size: object size
156 *
157 * Initialize an already allocated GEM object of the specified size with
158 * no GEM provided backing store. Instead the caller is responsible for
159 * backing the object and handling it.
160 */
161 void drm_gem_private_object_init(struct drm_device *dev,
162 struct drm_gem_object *obj, size_t size)
163 {
164 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
165
166 obj->dev = dev;
167 obj->filp = NULL;
168
169 kref_init(&obj->refcount);
170 obj->handle_count = 0;
171 obj->size = size;
172 drm_vma_node_reset(&obj->vma_node);
173 }
174 EXPORT_SYMBOL(drm_gem_private_object_init);
175
176 static void
177 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
178 {
179 /*
180 * Note: obj->dma_buf can't disappear as long as we still hold a
181 * handle reference in obj->handle_count.
182 */
183 mutex_lock(&filp->prime.lock);
184 if (obj->dma_buf) {
185 drm_prime_remove_buf_handle_locked(&filp->prime,
186 obj->dma_buf);
187 }
188 mutex_unlock(&filp->prime.lock);
189 }
190
191 /**
192 * drm_gem_object_handle_free - release resources bound to userspace handles
193 * @obj: GEM object to clean up.
194 *
195 * Called after the last handle to the object has been closed
196 *
197 * Removes any name for the object. Note that this must be
198 * called before drm_gem_object_free or we'll be touching
199 * freed memory
200 */
201 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
202 {
203 struct drm_device *dev = obj->dev;
204
205 /* Remove any name for this object */
206 if (obj->name) {
207 idr_remove(&dev->object_name_idr, obj->name);
208 obj->name = 0;
209 }
210 }
211
212 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
213 {
214 /* Unbreak the reference cycle if we have an exported dma_buf. */
215 if (obj->dma_buf) {
216 dma_buf_put(obj->dma_buf);
217 obj->dma_buf = NULL;
218 }
219 }
220
221 static void
222 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
223 {
224 struct drm_device *dev = obj->dev;
225 bool final = false;
226
227 if (WARN_ON(obj->handle_count == 0))
228 return;
229
230 /*
231 * Must bump handle count first as this may be the last
232 * ref, in which case the object would disappear before we
233 * checked for a name
234 */
235
236 mutex_lock(&dev->object_name_lock);
237 if (--obj->handle_count == 0) {
238 drm_gem_object_handle_free(obj);
239 drm_gem_object_exported_dma_buf_free(obj);
240 final = true;
241 }
242 mutex_unlock(&dev->object_name_lock);
243
244 if (final)
245 drm_gem_object_put_unlocked(obj);
246 }
247
248 /*
249 * Called at device or object close to release the file's
250 * handle references on objects.
251 */
252 static int
253 drm_gem_object_release_handle(int id, void *ptr, void *data)
254 {
255 struct drm_file *file_priv = data;
256 struct drm_gem_object *obj = ptr;
257 struct drm_device *dev = obj->dev;
258
259 if (dev->driver->gem_close_object)
260 dev->driver->gem_close_object(obj, file_priv);
261
262 if (drm_core_check_feature(dev, DRIVER_PRIME))
263 drm_gem_remove_prime_handles(obj, file_priv);
264 drm_vma_node_revoke(&obj->vma_node, file_priv);
265
266 drm_gem_object_handle_put_unlocked(obj);
267
268 return 0;
269 }
270
271 /**
272 * drm_gem_handle_delete - deletes the given file-private handle
273 * @filp: drm file-private structure to use for the handle look up
274 * @handle: userspace handle to delete
275 *
276 * Removes the GEM handle from the @filp lookup table which has been added with
277 * drm_gem_handle_create(). If this is the last handle also cleans up linked
278 * resources like GEM names.
279 */
280 int
281 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
282 {
283 struct drm_gem_object *obj;
284
285 spin_lock(&filp->table_lock);
286
287 /* Check if we currently have a reference on the object */
288 obj = idr_replace(&filp->object_idr, NULL, handle);
289 spin_unlock(&filp->table_lock);
290 if (IS_ERR_OR_NULL(obj))
291 return -EINVAL;
292
293 /* Release driver's reference and decrement refcount. */
294 drm_gem_object_release_handle(handle, obj, filp);
295
296 /* And finally make the handle available for future allocations. */
297 spin_lock(&filp->table_lock);
298 idr_remove(&filp->object_idr, handle);
299 spin_unlock(&filp->table_lock);
300
301 return 0;
302 }
303 EXPORT_SYMBOL(drm_gem_handle_delete);
304
305 /**
306 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
307 * @file: drm file-private structure containing the gem object
308 * @dev: corresponding drm_device
309 * @handle: gem object handle
310 * @offset: return location for the fake mmap offset
311 *
312 * This implements the &drm_driver.dumb_map_offset kms driver callback for
313 * drivers which use gem to manage their backing storage.
314 *
315 * Returns:
316 * 0 on success or a negative error code on failure.
317 */
318 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
319 u32 handle, u64 *offset)
320 {
321 struct drm_gem_object *obj;
322 int ret;
323
324 obj = drm_gem_object_lookup(file, handle);
325 if (!obj)
326 return -ENOENT;
327
328 /* Don't allow imported objects to be mapped */
329 if (obj->import_attach) {
330 ret = -EINVAL;
331 goto out;
332 }
333
334 ret = drm_gem_create_mmap_offset(obj);
335 if (ret)
336 goto out;
337
338 *offset = drm_vma_node_offset_addr(&obj->vma_node);
339 out:
340 drm_gem_object_put_unlocked(obj);
341
342 return ret;
343 }
344 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
345
346 /**
347 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
348 * @file: drm file-private structure to remove the dumb handle from
349 * @dev: corresponding drm_device
350 * @handle: the dumb handle to remove
351 *
352 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
353 * which use gem to manage their backing storage.
354 */
355 int drm_gem_dumb_destroy(struct drm_file *file,
356 struct drm_device *dev,
357 uint32_t handle)
358 {
359 return drm_gem_handle_delete(file, handle);
360 }
361 EXPORT_SYMBOL(drm_gem_dumb_destroy);
362
363 /**
364 * drm_gem_handle_create_tail - internal functions to create a handle
365 * @file_priv: drm file-private structure to register the handle for
366 * @obj: object to register
367 * @handlep: pointer to return the created handle to the caller
368 *
369 * This expects the &drm_device.object_name_lock to be held already and will
370 * drop it before returning. Used to avoid races in establishing new handles
371 * when importing an object from either an flink name or a dma-buf.
372 *
373 * Handles must be release again through drm_gem_handle_delete(). This is done
374 * when userspace closes @file_priv for all attached handles, or through the
375 * GEM_CLOSE ioctl for individual handles.
376 */
377 int
378 drm_gem_handle_create_tail(struct drm_file *file_priv,
379 struct drm_gem_object *obj,
380 u32 *handlep)
381 {
382 struct drm_device *dev = obj->dev;
383 u32 handle;
384 int ret;
385
386 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
387 if (obj->handle_count++ == 0)
388 drm_gem_object_get(obj);
389
390 /*
391 * Get the user-visible handle using idr. Preload and perform
392 * allocation under our spinlock.
393 */
394 idr_preload(GFP_KERNEL);
395 spin_lock(&file_priv->table_lock);
396
397 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
398
399 spin_unlock(&file_priv->table_lock);
400 idr_preload_end();
401
402 mutex_unlock(&dev->object_name_lock);
403 if (ret < 0)
404 goto err_unref;
405
406 handle = ret;
407
408 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
409 if (ret)
410 goto err_remove;
411
412 if (dev->driver->gem_open_object) {
413 ret = dev->driver->gem_open_object(obj, file_priv);
414 if (ret)
415 goto err_revoke;
416 }
417
418 *handlep = handle;
419 return 0;
420
421 err_revoke:
422 drm_vma_node_revoke(&obj->vma_node, file_priv);
423 err_remove:
424 spin_lock(&file_priv->table_lock);
425 idr_remove(&file_priv->object_idr, handle);
426 spin_unlock(&file_priv->table_lock);
427 err_unref:
428 drm_gem_object_handle_put_unlocked(obj);
429 return ret;
430 }
431
432 /**
433 * drm_gem_handle_create - create a gem handle for an object
434 * @file_priv: drm file-private structure to register the handle for
435 * @obj: object to register
436 * @handlep: pionter to return the created handle to the caller
437 *
438 * Create a handle for this object. This adds a handle reference
439 * to the object, which includes a regular reference count. Callers
440 * will likely want to dereference the object afterwards.
441 */
442 int drm_gem_handle_create(struct drm_file *file_priv,
443 struct drm_gem_object *obj,
444 u32 *handlep)
445 {
446 mutex_lock(&obj->dev->object_name_lock);
447
448 return drm_gem_handle_create_tail(file_priv, obj, handlep);
449 }
450 EXPORT_SYMBOL(drm_gem_handle_create);
451
452
453 /**
454 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
455 * @obj: obj in question
456 *
457 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
458 *
459 * Note that drm_gem_object_release() already calls this function, so drivers
460 * don't have to take care of releasing the mmap offset themselves when freeing
461 * the GEM object.
462 */
463 void
464 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
465 {
466 struct drm_device *dev = obj->dev;
467
468 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
469 }
470 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
471
472 /**
473 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
474 * @obj: obj in question
475 * @size: the virtual size
476 *
477 * GEM memory mapping works by handing back to userspace a fake mmap offset
478 * it can use in a subsequent mmap(2) call. The DRM core code then looks
479 * up the object based on the offset and sets up the various memory mapping
480 * structures.
481 *
482 * This routine allocates and attaches a fake offset for @obj, in cases where
483 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
484 * Otherwise just use drm_gem_create_mmap_offset().
485 *
486 * This function is idempotent and handles an already allocated mmap offset
487 * transparently. Drivers do not need to check for this case.
488 */
489 int
490 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
491 {
492 struct drm_device *dev = obj->dev;
493
494 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
495 size / PAGE_SIZE);
496 }
497 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
498
499 /**
500 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
501 * @obj: obj in question
502 *
503 * GEM memory mapping works by handing back to userspace a fake mmap offset
504 * it can use in a subsequent mmap(2) call. The DRM core code then looks
505 * up the object based on the offset and sets up the various memory mapping
506 * structures.
507 *
508 * This routine allocates and attaches a fake offset for @obj.
509 *
510 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
511 * the fake offset again.
512 */
513 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
514 {
515 return drm_gem_create_mmap_offset_size(obj, obj->size);
516 }
517 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
518
519 /**
520 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
521 * from shmem
522 * @obj: obj in question
523 *
524 * This reads the page-array of the shmem-backing storage of the given gem
525 * object. An array of pages is returned. If a page is not allocated or
526 * swapped-out, this will allocate/swap-in the required pages. Note that the
527 * whole object is covered by the page-array and pinned in memory.
528 *
529 * Use drm_gem_put_pages() to release the array and unpin all pages.
530 *
531 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
532 * If you require other GFP-masks, you have to do those allocations yourself.
533 *
534 * Note that you are not allowed to change gfp-zones during runtime. That is,
535 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
536 * set during initialization. If you have special zone constraints, set them
537 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
538 * to keep pages in the required zone during swap-in.
539 */
540 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
541 {
542 struct address_space *mapping;
543 struct page *p, **pages;
544 int i, npages;
545
546 /* This is the shared memory object that backs the GEM resource */
547 mapping = obj->filp->f_mapping;
548
549 /* We already BUG_ON() for non-page-aligned sizes in
550 * drm_gem_object_init(), so we should never hit this unless
551 * driver author is doing something really wrong:
552 */
553 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
554
555 npages = obj->size >> PAGE_SHIFT;
556
557 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
558 if (pages == NULL)
559 return ERR_PTR(-ENOMEM);
560
561 for (i = 0; i < npages; i++) {
562 p = shmem_read_mapping_page(mapping, i);
563 if (IS_ERR(p))
564 goto fail;
565 pages[i] = p;
566
567 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
568 * correct region during swapin. Note that this requires
569 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
570 * so shmem can relocate pages during swapin if required.
571 */
572 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
573 (page_to_pfn(p) >= 0x00100000UL));
574 }
575
576 return pages;
577
578 fail:
579 while (i--)
580 put_page(pages[i]);
581
582 kvfree(pages);
583 return ERR_CAST(p);
584 }
585 EXPORT_SYMBOL(drm_gem_get_pages);
586
587 /**
588 * drm_gem_put_pages - helper to free backing pages for a GEM object
589 * @obj: obj in question
590 * @pages: pages to free
591 * @dirty: if true, pages will be marked as dirty
592 * @accessed: if true, the pages will be marked as accessed
593 */
594 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
595 bool dirty, bool accessed)
596 {
597 int i, npages;
598
599 /* We already BUG_ON() for non-page-aligned sizes in
600 * drm_gem_object_init(), so we should never hit this unless
601 * driver author is doing something really wrong:
602 */
603 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
604
605 npages = obj->size >> PAGE_SHIFT;
606
607 for (i = 0; i < npages; i++) {
608 if (dirty)
609 set_page_dirty(pages[i]);
610
611 if (accessed)
612 mark_page_accessed(pages[i]);
613
614 /* Undo the reference we took when populating the table */
615 put_page(pages[i]);
616 }
617
618 kvfree(pages);
619 }
620 EXPORT_SYMBOL(drm_gem_put_pages);
621
622 /**
623 * drm_gem_object_lookup - look up a GEM object from it's handle
624 * @filp: DRM file private date
625 * @handle: userspace handle
626 *
627 * Returns:
628 *
629 * A reference to the object named by the handle if such exists on @filp, NULL
630 * otherwise.
631 */
632 struct drm_gem_object *
633 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
634 {
635 struct drm_gem_object *obj;
636
637 spin_lock(&filp->table_lock);
638
639 /* Check if we currently have a reference on the object */
640 obj = idr_find(&filp->object_idr, handle);
641 if (obj)
642 drm_gem_object_get(obj);
643
644 spin_unlock(&filp->table_lock);
645
646 return obj;
647 }
648 EXPORT_SYMBOL(drm_gem_object_lookup);
649
650 /**
651 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
652 * @dev: drm_device
653 * @data: ioctl data
654 * @file_priv: drm file-private structure
655 *
656 * Releases the handle to an mm object.
657 */
658 int
659 drm_gem_close_ioctl(struct drm_device *dev, void *data,
660 struct drm_file *file_priv)
661 {
662 struct drm_gem_close *args = data;
663 int ret;
664
665 if (!drm_core_check_feature(dev, DRIVER_GEM))
666 return -ENODEV;
667
668 ret = drm_gem_handle_delete(file_priv, args->handle);
669
670 return ret;
671 }
672
673 /**
674 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
675 * @dev: drm_device
676 * @data: ioctl data
677 * @file_priv: drm file-private structure
678 *
679 * Create a global name for an object, returning the name.
680 *
681 * Note that the name does not hold a reference; when the object
682 * is freed, the name goes away.
683 */
684 int
685 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
686 struct drm_file *file_priv)
687 {
688 struct drm_gem_flink *args = data;
689 struct drm_gem_object *obj;
690 int ret;
691
692 if (!drm_core_check_feature(dev, DRIVER_GEM))
693 return -ENODEV;
694
695 obj = drm_gem_object_lookup(file_priv, args->handle);
696 if (obj == NULL)
697 return -ENOENT;
698
699 mutex_lock(&dev->object_name_lock);
700 /* prevent races with concurrent gem_close. */
701 if (obj->handle_count == 0) {
702 ret = -ENOENT;
703 goto err;
704 }
705
706 if (!obj->name) {
707 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
708 if (ret < 0)
709 goto err;
710
711 obj->name = ret;
712 }
713
714 args->name = (uint64_t) obj->name;
715 ret = 0;
716
717 err:
718 mutex_unlock(&dev->object_name_lock);
719 drm_gem_object_put_unlocked(obj);
720 return ret;
721 }
722
723 /**
724 * drm_gem_open - implementation of the GEM_OPEN ioctl
725 * @dev: drm_device
726 * @data: ioctl data
727 * @file_priv: drm file-private structure
728 *
729 * Open an object using the global name, returning a handle and the size.
730 *
731 * This handle (of course) holds a reference to the object, so the object
732 * will not go away until the handle is deleted.
733 */
734 int
735 drm_gem_open_ioctl(struct drm_device *dev, void *data,
736 struct drm_file *file_priv)
737 {
738 struct drm_gem_open *args = data;
739 struct drm_gem_object *obj;
740 int ret;
741 u32 handle;
742
743 if (!drm_core_check_feature(dev, DRIVER_GEM))
744 return -ENODEV;
745
746 mutex_lock(&dev->object_name_lock);
747 obj = idr_find(&dev->object_name_idr, (int) args->name);
748 if (obj) {
749 drm_gem_object_get(obj);
750 } else {
751 mutex_unlock(&dev->object_name_lock);
752 return -ENOENT;
753 }
754
755 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
756 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
757 drm_gem_object_put_unlocked(obj);
758 if (ret)
759 return ret;
760
761 args->handle = handle;
762 args->size = obj->size;
763
764 return 0;
765 }
766
767 /**
768 * gem_gem_open - initalizes GEM file-private structures at devnode open time
769 * @dev: drm_device which is being opened by userspace
770 * @file_private: drm file-private structure to set up
771 *
772 * Called at device open time, sets up the structure for handling refcounting
773 * of mm objects.
774 */
775 void
776 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
777 {
778 idr_init(&file_private->object_idr);
779 spin_lock_init(&file_private->table_lock);
780 }
781
782 /**
783 * drm_gem_release - release file-private GEM resources
784 * @dev: drm_device which is being closed by userspace
785 * @file_private: drm file-private structure to clean up
786 *
787 * Called at close time when the filp is going away.
788 *
789 * Releases any remaining references on objects by this filp.
790 */
791 void
792 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
793 {
794 idr_for_each(&file_private->object_idr,
795 &drm_gem_object_release_handle, file_private);
796 idr_destroy(&file_private->object_idr);
797 }
798
799 /**
800 * drm_gem_object_release - release GEM buffer object resources
801 * @obj: GEM buffer object
802 *
803 * This releases any structures and resources used by @obj and is the invers of
804 * drm_gem_object_init().
805 */
806 void
807 drm_gem_object_release(struct drm_gem_object *obj)
808 {
809 WARN_ON(obj->dma_buf);
810
811 if (obj->filp)
812 fput(obj->filp);
813
814 drm_gem_free_mmap_offset(obj);
815 }
816 EXPORT_SYMBOL(drm_gem_object_release);
817
818 /**
819 * drm_gem_object_free - free a GEM object
820 * @kref: kref of the object to free
821 *
822 * Called after the last reference to the object has been lost.
823 * Must be called holding &drm_device.struct_mutex.
824 *
825 * Frees the object
826 */
827 void
828 drm_gem_object_free(struct kref *kref)
829 {
830 struct drm_gem_object *obj =
831 container_of(kref, struct drm_gem_object, refcount);
832 struct drm_device *dev = obj->dev;
833
834 if (dev->driver->gem_free_object_unlocked) {
835 dev->driver->gem_free_object_unlocked(obj);
836 } else if (dev->driver->gem_free_object) {
837 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
838
839 dev->driver->gem_free_object(obj);
840 }
841 }
842 EXPORT_SYMBOL(drm_gem_object_free);
843
844 /**
845 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
846 * @obj: GEM buffer object
847 *
848 * This releases a reference to @obj. Callers must not hold the
849 * &drm_device.struct_mutex lock when calling this function.
850 *
851 * See also __drm_gem_object_put().
852 */
853 void
854 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
855 {
856 struct drm_device *dev;
857
858 if (!obj)
859 return;
860
861 dev = obj->dev;
862
863 if (dev->driver->gem_free_object_unlocked) {
864 kref_put(&obj->refcount, drm_gem_object_free);
865 } else {
866 might_lock(&dev->struct_mutex);
867 if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
868 &dev->struct_mutex))
869 mutex_unlock(&dev->struct_mutex);
870 }
871 }
872 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
873
874 /**
875 * drm_gem_object_put - release a GEM buffer object reference
876 * @obj: GEM buffer object
877 *
878 * This releases a reference to @obj. Callers must hold the
879 * &drm_device.struct_mutex lock when calling this function, even when the
880 * driver doesn't use &drm_device.struct_mutex for anything.
881 *
882 * For drivers not encumbered with legacy locking use
883 * drm_gem_object_put_unlocked() instead.
884 */
885 void
886 drm_gem_object_put(struct drm_gem_object *obj)
887 {
888 if (obj) {
889 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
890
891 kref_put(&obj->refcount, drm_gem_object_free);
892 }
893 }
894 EXPORT_SYMBOL(drm_gem_object_put);
895
896 /**
897 * drm_gem_vm_open - vma->ops->open implementation for GEM
898 * @vma: VM area structure
899 *
900 * This function implements the #vm_operations_struct open() callback for GEM
901 * drivers. This must be used together with drm_gem_vm_close().
902 */
903 void drm_gem_vm_open(struct vm_area_struct *vma)
904 {
905 struct drm_gem_object *obj = vma->vm_private_data;
906
907 drm_gem_object_get(obj);
908 }
909 EXPORT_SYMBOL(drm_gem_vm_open);
910
911 /**
912 * drm_gem_vm_close - vma->ops->close implementation for GEM
913 * @vma: VM area structure
914 *
915 * This function implements the #vm_operations_struct close() callback for GEM
916 * drivers. This must be used together with drm_gem_vm_open().
917 */
918 void drm_gem_vm_close(struct vm_area_struct *vma)
919 {
920 struct drm_gem_object *obj = vma->vm_private_data;
921
922 drm_gem_object_put_unlocked(obj);
923 }
924 EXPORT_SYMBOL(drm_gem_vm_close);
925
926 /**
927 * drm_gem_mmap_obj - memory map a GEM object
928 * @obj: the GEM object to map
929 * @obj_size: the object size to be mapped, in bytes
930 * @vma: VMA for the area to be mapped
931 *
932 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
933 * provided by the driver. Depending on their requirements, drivers can either
934 * provide a fault handler in their gem_vm_ops (in which case any accesses to
935 * the object will be trapped, to perform migration, GTT binding, surface
936 * register allocation, or performance monitoring), or mmap the buffer memory
937 * synchronously after calling drm_gem_mmap_obj.
938 *
939 * This function is mainly intended to implement the DMABUF mmap operation, when
940 * the GEM object is not looked up based on its fake offset. To implement the
941 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
942 *
943 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
944 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
945 * callers must verify access restrictions before calling this helper.
946 *
947 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
948 * size, or if no gem_vm_ops are provided.
949 */
950 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
951 struct vm_area_struct *vma)
952 {
953 struct drm_device *dev = obj->dev;
954
955 /* Check for valid size. */
956 if (obj_size < vma->vm_end - vma->vm_start)
957 return -EINVAL;
958
959 if (!dev->driver->gem_vm_ops)
960 return -EINVAL;
961
962 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
963 vma->vm_ops = dev->driver->gem_vm_ops;
964 vma->vm_private_data = obj;
965 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
966 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
967
968 /* Take a ref for this mapping of the object, so that the fault
969 * handler can dereference the mmap offset's pointer to the object.
970 * This reference is cleaned up by the corresponding vm_close
971 * (which should happen whether the vma was created by this call, or
972 * by a vm_open due to mremap or partial unmap or whatever).
973 */
974 drm_gem_object_get(obj);
975
976 return 0;
977 }
978 EXPORT_SYMBOL(drm_gem_mmap_obj);
979
980 /**
981 * drm_gem_mmap - memory map routine for GEM objects
982 * @filp: DRM file pointer
983 * @vma: VMA for the area to be mapped
984 *
985 * If a driver supports GEM object mapping, mmap calls on the DRM file
986 * descriptor will end up here.
987 *
988 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
989 * contain the fake offset we created when the GTT map ioctl was called on
990 * the object) and map it with a call to drm_gem_mmap_obj().
991 *
992 * If the caller is not granted access to the buffer object, the mmap will fail
993 * with EACCES. Please see the vma manager for more information.
994 */
995 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
996 {
997 struct drm_file *priv = filp->private_data;
998 struct drm_device *dev = priv->minor->dev;
999 struct drm_gem_object *obj = NULL;
1000 struct drm_vma_offset_node *node;
1001 int ret;
1002
1003 if (drm_dev_is_unplugged(dev))
1004 return -ENODEV;
1005
1006 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1007 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1008 vma->vm_pgoff,
1009 vma_pages(vma));
1010 if (likely(node)) {
1011 obj = container_of(node, struct drm_gem_object, vma_node);
1012 /*
1013 * When the object is being freed, after it hits 0-refcnt it
1014 * proceeds to tear down the object. In the process it will
1015 * attempt to remove the VMA offset and so acquire this
1016 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1017 * that matches our range, we know it is in the process of being
1018 * destroyed and will be freed as soon as we release the lock -
1019 * so we have to check for the 0-refcnted object and treat it as
1020 * invalid.
1021 */
1022 if (!kref_get_unless_zero(&obj->refcount))
1023 obj = NULL;
1024 }
1025 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1026
1027 if (!obj)
1028 return -EINVAL;
1029
1030 if (!drm_vma_node_is_allowed(node, priv)) {
1031 drm_gem_object_put_unlocked(obj);
1032 return -EACCES;
1033 }
1034
1035 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1036 vma);
1037
1038 drm_gem_object_put_unlocked(obj);
1039
1040 return ret;
1041 }
1042 EXPORT_SYMBOL(drm_gem_mmap);