1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <drm/drm_debugfs.h>
4 #include <drm/drm_device.h>
5 #include <drm/drm_file.h>
6 #include <drm/drm_framebuffer.h>
7 #include <drm/drm_gem_ttm_helper.h>
8 #include <drm/drm_gem_vram_helper.h>
9 #include <drm/drm_mode.h>
10 #include <drm/drm_plane.h>
11 #include <drm/drm_prime.h>
12 #include <drm/drm_simple_kms_helper.h>
13 #include <drm/ttm/ttm_page_alloc.h>
15 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs
;
20 * This library provides a GEM buffer object that is backed by video RAM
21 * (VRAM). It can be used for framebuffer devices with dedicated memory.
23 * The data structure &struct drm_vram_mm and its helpers implement a memory
24 * manager for simple framebuffer devices with dedicated video memory. Buffer
25 * objects are either placed in video RAM or evicted to system memory. The rsp.
26 * buffer object is provided by &struct drm_gem_vram_object.
30 * Buffer-objects helpers
33 static void drm_gem_vram_cleanup(struct drm_gem_vram_object
*gbo
)
35 /* We got here via ttm_bo_put(), which means that the
36 * TTM buffer object in 'bo' has already been cleaned
37 * up; only release the GEM object.
40 WARN_ON(gbo
->kmap_use_count
);
41 WARN_ON(gbo
->kmap
.virtual);
43 drm_gem_object_release(&gbo
->bo
.base
);
46 static void drm_gem_vram_destroy(struct drm_gem_vram_object
*gbo
)
48 drm_gem_vram_cleanup(gbo
);
52 static void ttm_buffer_object_destroy(struct ttm_buffer_object
*bo
)
54 struct drm_gem_vram_object
*gbo
= drm_gem_vram_of_bo(bo
);
56 drm_gem_vram_destroy(gbo
);
59 static void drm_gem_vram_placement(struct drm_gem_vram_object
*gbo
,
60 unsigned long pl_flag
)
64 u32 invariant_flags
= pl_flag
& TTM_PL_FLAG_TOPDOWN
;
66 gbo
->placement
.placement
= gbo
->placements
;
67 gbo
->placement
.busy_placement
= gbo
->placements
;
69 if (pl_flag
& TTM_PL_FLAG_VRAM
)
70 gbo
->placements
[c
++].flags
= TTM_PL_FLAG_WC
|
71 TTM_PL_FLAG_UNCACHED
|
75 if (pl_flag
& TTM_PL_FLAG_SYSTEM
)
76 gbo
->placements
[c
++].flags
= TTM_PL_MASK_CACHING
|
81 gbo
->placements
[c
++].flags
= TTM_PL_MASK_CACHING
|
85 gbo
->placement
.num_placement
= c
;
86 gbo
->placement
.num_busy_placement
= c
;
88 for (i
= 0; i
< c
; ++i
) {
89 gbo
->placements
[i
].fpfn
= 0;
90 gbo
->placements
[i
].lpfn
= 0;
94 static int drm_gem_vram_init(struct drm_device
*dev
,
95 struct ttm_bo_device
*bdev
,
96 struct drm_gem_vram_object
*gbo
,
97 size_t size
, unsigned long pg_align
,
103 gbo
->bo
.base
.funcs
= &drm_gem_vram_object_funcs
;
105 ret
= drm_gem_object_init(dev
, &gbo
->bo
.base
, size
);
109 acc_size
= ttm_bo_dma_acc_size(bdev
, size
, sizeof(*gbo
));
112 drm_gem_vram_placement(gbo
, TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_SYSTEM
);
114 ret
= ttm_bo_init(bdev
, &gbo
->bo
, size
, ttm_bo_type_device
,
115 &gbo
->placement
, pg_align
, interruptible
, acc_size
,
116 NULL
, NULL
, ttm_buffer_object_destroy
);
118 goto err_drm_gem_object_release
;
122 err_drm_gem_object_release
:
123 drm_gem_object_release(&gbo
->bo
.base
);
128 * drm_gem_vram_create() - Creates a VRAM-backed GEM object
129 * @dev: the DRM device
130 * @bdev: the TTM BO device backing the object
131 * @size: the buffer size in bytes
132 * @pg_align: the buffer's alignment in multiples of the page size
133 * @interruptible: sleep interruptible if waiting for memory
136 * A new instance of &struct drm_gem_vram_object on success, or
137 * an ERR_PTR()-encoded error code otherwise.
139 struct drm_gem_vram_object
*drm_gem_vram_create(struct drm_device
*dev
,
140 struct ttm_bo_device
*bdev
,
142 unsigned long pg_align
,
145 struct drm_gem_vram_object
*gbo
;
148 gbo
= kzalloc(sizeof(*gbo
), GFP_KERNEL
);
150 return ERR_PTR(-ENOMEM
);
152 ret
= drm_gem_vram_init(dev
, bdev
, gbo
, size
, pg_align
, interruptible
);
162 EXPORT_SYMBOL(drm_gem_vram_create
);
165 * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
166 * @gbo: the GEM VRAM object
168 * See ttm_bo_put() for more information.
170 void drm_gem_vram_put(struct drm_gem_vram_object
*gbo
)
172 ttm_bo_put(&gbo
->bo
);
174 EXPORT_SYMBOL(drm_gem_vram_put
);
177 * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
178 * @gbo: the GEM VRAM object
180 * See drm_vma_node_offset_addr() for more information.
183 * The buffer object's offset for userspace mappings on success, or
184 * 0 if no offset is allocated.
186 u64
drm_gem_vram_mmap_offset(struct drm_gem_vram_object
*gbo
)
188 return drm_vma_node_offset_addr(&gbo
->bo
.base
.vma_node
);
190 EXPORT_SYMBOL(drm_gem_vram_mmap_offset
);
193 * drm_gem_vram_offset() - \
194 Returns a GEM VRAM object's offset in video memory
195 * @gbo: the GEM VRAM object
197 * This function returns the buffer object's offset in the device's video
198 * memory. The buffer object has to be pinned to %TTM_PL_VRAM.
201 * The buffer object's offset in video memory on success, or
202 * a negative errno code otherwise.
204 s64
drm_gem_vram_offset(struct drm_gem_vram_object
*gbo
)
206 if (WARN_ON_ONCE(!gbo
->pin_count
))
208 return gbo
->bo
.offset
;
210 EXPORT_SYMBOL(drm_gem_vram_offset
);
212 static int drm_gem_vram_pin_locked(struct drm_gem_vram_object
*gbo
,
213 unsigned long pl_flag
)
216 struct ttm_operation_ctx ctx
= { false, false };
222 drm_gem_vram_placement(gbo
, pl_flag
);
224 for (i
= 0; i
< gbo
->placement
.num_placement
; ++i
)
225 gbo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
227 ret
= ttm_bo_validate(&gbo
->bo
, &gbo
->placement
, &ctx
);
238 * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
239 * @gbo: the GEM VRAM object
240 * @pl_flag: a bitmask of possible memory regions
242 * Pinning a buffer object ensures that it is not evicted from
243 * a memory region. A pinned buffer object has to be unpinned before
244 * it can be pinned to another region. If the pl_flag argument is 0,
245 * the buffer is pinned at its current location (video RAM or system
248 * Small buffer objects, such as cursor images, can lead to memory
249 * fragmentation if they are pinned in the middle of video RAM. This
250 * is especially a problem on devices with only a small amount of
251 * video RAM. Fragmentation can prevent the primary framebuffer from
252 * fitting in, even though there's enough memory overall. The modifier
253 * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
254 * at the high end of the memory region to avoid fragmentation.
258 * a negative error code otherwise.
260 int drm_gem_vram_pin(struct drm_gem_vram_object
*gbo
, unsigned long pl_flag
)
264 ret
= ttm_bo_reserve(&gbo
->bo
, true, false, NULL
);
267 ret
= drm_gem_vram_pin_locked(gbo
, pl_flag
);
268 ttm_bo_unreserve(&gbo
->bo
);
272 EXPORT_SYMBOL(drm_gem_vram_pin
);
274 static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object
*gbo
)
277 struct ttm_operation_ctx ctx
= { false, false };
279 if (WARN_ON_ONCE(!gbo
->pin_count
))
286 for (i
= 0; i
< gbo
->placement
.num_placement
; ++i
)
287 gbo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
289 ret
= ttm_bo_validate(&gbo
->bo
, &gbo
->placement
, &ctx
);
297 * drm_gem_vram_unpin() - Unpins a GEM VRAM object
298 * @gbo: the GEM VRAM object
302 * a negative error code otherwise.
304 int drm_gem_vram_unpin(struct drm_gem_vram_object
*gbo
)
308 ret
= ttm_bo_reserve(&gbo
->bo
, true, false, NULL
);
311 ret
= drm_gem_vram_unpin_locked(gbo
);
312 ttm_bo_unreserve(&gbo
->bo
);
316 EXPORT_SYMBOL(drm_gem_vram_unpin
);
318 static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object
*gbo
,
319 bool map
, bool *is_iomem
)
322 struct ttm_bo_kmap_obj
*kmap
= &gbo
->kmap
;
324 if (gbo
->kmap_use_count
> 0)
327 if (kmap
->virtual || !map
)
330 ret
= ttm_bo_kmap(&gbo
->bo
, 0, gbo
->bo
.num_pages
, kmap
);
335 if (!kmap
->virtual) {
338 return NULL
; /* not mapped; don't increment ref */
340 ++gbo
->kmap_use_count
;
342 return ttm_kmap_obj_virtual(kmap
, is_iomem
);
343 return kmap
->virtual;
347 * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
348 * @gbo: the GEM VRAM object
349 * @map: establish a mapping if necessary
350 * @is_iomem: returns true if the mapped memory is I/O memory, or false \
351 otherwise; can be NULL
353 * This function maps the buffer object into the kernel's address space
354 * or returns the current mapping. If the parameter map is false, the
355 * function only queries the current mapping, but does not establish a
359 * The buffers virtual address if mapped, or
360 * NULL if not mapped, or
361 * an ERR_PTR()-encoded error code otherwise.
363 void *drm_gem_vram_kmap(struct drm_gem_vram_object
*gbo
, bool map
,
369 ret
= ttm_bo_reserve(&gbo
->bo
, true, false, NULL
);
372 virtual = drm_gem_vram_kmap_locked(gbo
, map
, is_iomem
);
373 ttm_bo_unreserve(&gbo
->bo
);
377 EXPORT_SYMBOL(drm_gem_vram_kmap
);
379 static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object
*gbo
)
381 if (WARN_ON_ONCE(!gbo
->kmap_use_count
))
383 if (--gbo
->kmap_use_count
> 0)
387 * Permanently mapping and unmapping buffers adds overhead from
388 * updating the page tables and creates debugging output. Therefore,
389 * we delay the actual unmap operation until the BO gets evicted
390 * from memory. See drm_gem_vram_bo_driver_move_notify().
395 * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
396 * @gbo: the GEM VRAM object
398 void drm_gem_vram_kunmap(struct drm_gem_vram_object
*gbo
)
402 ret
= ttm_bo_reserve(&gbo
->bo
, false, false, NULL
);
403 if (WARN_ONCE(ret
, "ttm_bo_reserve_failed(): ret=%d\n", ret
))
405 drm_gem_vram_kunmap_locked(gbo
);
406 ttm_bo_unreserve(&gbo
->bo
);
408 EXPORT_SYMBOL(drm_gem_vram_kunmap
);
411 * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
413 * @gbo: The GEM VRAM object to map
415 * The vmap function pins a GEM VRAM object to its current location, either
416 * system or video memory, and maps its buffer into kernel address space.
417 * As pinned object cannot be relocated, you should avoid pinning objects
418 * permanently. Call drm_gem_vram_vunmap() with the returned address to
419 * unmap and unpin the GEM VRAM object.
421 * If you have special requirements for the pinning or mapping operations,
422 * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
425 * The buffer's virtual address on success, or
426 * an ERR_PTR()-encoded error code otherwise.
428 void *drm_gem_vram_vmap(struct drm_gem_vram_object
*gbo
)
433 ret
= ttm_bo_reserve(&gbo
->bo
, true, false, NULL
);
437 ret
= drm_gem_vram_pin_locked(gbo
, 0);
439 goto err_ttm_bo_unreserve
;
440 base
= drm_gem_vram_kmap_locked(gbo
, true, NULL
);
443 goto err_drm_gem_vram_unpin_locked
;
446 ttm_bo_unreserve(&gbo
->bo
);
450 err_drm_gem_vram_unpin_locked
:
451 drm_gem_vram_unpin_locked(gbo
);
452 err_ttm_bo_unreserve
:
453 ttm_bo_unreserve(&gbo
->bo
);
456 EXPORT_SYMBOL(drm_gem_vram_vmap
);
459 * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object
460 * @gbo: The GEM VRAM object to unmap
461 * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap()
463 * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See
464 * the documentation for drm_gem_vram_vmap() for more information.
466 void drm_gem_vram_vunmap(struct drm_gem_vram_object
*gbo
, void *vaddr
)
470 ret
= ttm_bo_reserve(&gbo
->bo
, false, false, NULL
);
471 if (WARN_ONCE(ret
, "ttm_bo_reserve_failed(): ret=%d\n", ret
))
474 drm_gem_vram_kunmap_locked(gbo
);
475 drm_gem_vram_unpin_locked(gbo
);
477 ttm_bo_unreserve(&gbo
->bo
);
479 EXPORT_SYMBOL(drm_gem_vram_vunmap
);
482 * drm_gem_vram_fill_create_dumb() - \
483 Helper for implementing &struct drm_driver.dumb_create
484 * @file: the DRM file
485 * @dev: the DRM device
486 * @bdev: the TTM BO device managing the buffer object
487 * @pg_align: the buffer's alignment in multiples of the page size
488 * @interruptible: sleep interruptible if waiting for memory
489 * @args: the arguments as provided to \
490 &struct drm_driver.dumb_create
492 * This helper function fills &struct drm_mode_create_dumb, which is used
493 * by &struct drm_driver.dumb_create. Implementations of this interface
494 * should forwards their arguments to this helper, plus the driver-specific
499 * a negative error code otherwise.
501 int drm_gem_vram_fill_create_dumb(struct drm_file
*file
,
502 struct drm_device
*dev
,
503 struct ttm_bo_device
*bdev
,
504 unsigned long pg_align
,
506 struct drm_mode_create_dumb
*args
)
509 struct drm_gem_vram_object
*gbo
;
513 pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
514 size
= pitch
* args
->height
;
516 size
= roundup(size
, PAGE_SIZE
);
520 gbo
= drm_gem_vram_create(dev
, bdev
, size
, pg_align
, interruptible
);
524 ret
= drm_gem_handle_create(file
, &gbo
->bo
.base
, &handle
);
526 goto err_drm_gem_object_put_unlocked
;
528 drm_gem_object_put_unlocked(&gbo
->bo
.base
);
532 args
->handle
= handle
;
536 err_drm_gem_object_put_unlocked
:
537 drm_gem_object_put_unlocked(&gbo
->bo
.base
);
540 EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb
);
543 * Helpers for struct ttm_bo_driver
546 static bool drm_is_gem_vram(struct ttm_buffer_object
*bo
)
548 return (bo
->destroy
== ttm_buffer_object_destroy
);
551 static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object
*gbo
,
552 struct ttm_placement
*pl
)
554 drm_gem_vram_placement(gbo
, TTM_PL_FLAG_SYSTEM
);
555 *pl
= gbo
->placement
;
558 static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object
*gbo
,
560 struct ttm_mem_reg
*new_mem
)
562 struct ttm_bo_kmap_obj
*kmap
= &gbo
->kmap
;
564 if (WARN_ON_ONCE(gbo
->kmap_use_count
))
570 kmap
->virtual = NULL
;
574 * Helpers for struct drm_gem_object_funcs
578 * drm_gem_vram_object_free() - \
579 Implements &struct drm_gem_object_funcs.free
580 * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
582 static void drm_gem_vram_object_free(struct drm_gem_object
*gem
)
584 struct drm_gem_vram_object
*gbo
= drm_gem_vram_of_gem(gem
);
586 drm_gem_vram_put(gbo
);
590 * Helpers for dump buffers
594 * drm_gem_vram_driver_create_dumb() - \
595 Implements &struct drm_driver.dumb_create
596 * @file: the DRM file
597 * @dev: the DRM device
598 * @args: the arguments as provided to \
599 &struct drm_driver.dumb_create
601 * This function requires the driver to use @drm_device.vram_mm for its
602 * instance of VRAM MM.
606 * a negative error code otherwise.
608 int drm_gem_vram_driver_dumb_create(struct drm_file
*file
,
609 struct drm_device
*dev
,
610 struct drm_mode_create_dumb
*args
)
612 if (WARN_ONCE(!dev
->vram_mm
, "VRAM MM not initialized"))
615 return drm_gem_vram_fill_create_dumb(file
, dev
, &dev
->vram_mm
->bdev
, 0,
618 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create
);
621 * drm_gem_vram_driver_dumb_mmap_offset() - \
622 Implements &struct drm_driver.dumb_mmap_offset
623 * @file: DRM file pointer.
625 * @handle: GEM handle
626 * @offset: Returns the mapping's memory offset on success
630 * a negative errno code otherwise.
632 int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file
*file
,
633 struct drm_device
*dev
,
634 uint32_t handle
, uint64_t *offset
)
636 struct drm_gem_object
*gem
;
637 struct drm_gem_vram_object
*gbo
;
639 gem
= drm_gem_object_lookup(file
, handle
);
643 gbo
= drm_gem_vram_of_gem(gem
);
644 *offset
= drm_gem_vram_mmap_offset(gbo
);
646 drm_gem_object_put_unlocked(gem
);
650 EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset
);
653 * Helpers for struct drm_plane_helper_funcs
657 * drm_gem_vram_plane_helper_prepare_fb() - \
658 * Implements &struct drm_plane_helper_funcs.prepare_fb
659 * @plane: a DRM plane
660 * @new_state: the plane's new state
662 * During plane updates, this function pins the GEM VRAM
663 * objects of the plane's new framebuffer to VRAM. Call
664 * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
668 * a negative errno code otherwise.
671 drm_gem_vram_plane_helper_prepare_fb(struct drm_plane
*plane
,
672 struct drm_plane_state
*new_state
)
675 struct drm_gem_vram_object
*gbo
;
681 for (i
= 0; i
< ARRAY_SIZE(new_state
->fb
->obj
); ++i
) {
682 if (!new_state
->fb
->obj
[i
])
684 gbo
= drm_gem_vram_of_gem(new_state
->fb
->obj
[i
]);
685 ret
= drm_gem_vram_pin(gbo
, DRM_GEM_VRAM_PL_FLAG_VRAM
);
687 goto err_drm_gem_vram_unpin
;
692 err_drm_gem_vram_unpin
:
695 gbo
= drm_gem_vram_of_gem(new_state
->fb
->obj
[i
]);
696 drm_gem_vram_unpin(gbo
);
700 EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb
);
703 * drm_gem_vram_plane_helper_cleanup_fb() - \
704 * Implements &struct drm_plane_helper_funcs.cleanup_fb
705 * @plane: a DRM plane
706 * @old_state: the plane's old state
708 * During plane updates, this function unpins the GEM VRAM
709 * objects of the plane's old framebuffer from VRAM. Complements
710 * drm_gem_vram_plane_helper_prepare_fb().
713 drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane
*plane
,
714 struct drm_plane_state
*old_state
)
717 struct drm_gem_vram_object
*gbo
;
722 for (i
= 0; i
< ARRAY_SIZE(old_state
->fb
->obj
); ++i
) {
723 if (!old_state
->fb
->obj
[i
])
725 gbo
= drm_gem_vram_of_gem(old_state
->fb
->obj
[i
]);
726 drm_gem_vram_unpin(gbo
);
729 EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb
);
732 * Helpers for struct drm_simple_display_pipe_funcs
736 * drm_gem_vram_simple_display_pipe_prepare_fb() - \
737 * Implements &struct drm_simple_display_pipe_funcs.prepare_fb
738 * @pipe: a simple display pipe
739 * @new_state: the plane's new state
741 * During plane updates, this function pins the GEM VRAM
742 * objects of the plane's new framebuffer to VRAM. Call
743 * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them.
747 * a negative errno code otherwise.
749 int drm_gem_vram_simple_display_pipe_prepare_fb(
750 struct drm_simple_display_pipe
*pipe
,
751 struct drm_plane_state
*new_state
)
753 return drm_gem_vram_plane_helper_prepare_fb(&pipe
->plane
, new_state
);
755 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb
);
758 * drm_gem_vram_simple_display_pipe_cleanup_fb() - \
759 * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb
760 * @pipe: a simple display pipe
761 * @old_state: the plane's old state
763 * During plane updates, this function unpins the GEM VRAM
764 * objects of the plane's old framebuffer from VRAM. Complements
765 * drm_gem_vram_simple_display_pipe_prepare_fb().
767 void drm_gem_vram_simple_display_pipe_cleanup_fb(
768 struct drm_simple_display_pipe
*pipe
,
769 struct drm_plane_state
*old_state
)
771 drm_gem_vram_plane_helper_cleanup_fb(&pipe
->plane
, old_state
);
773 EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb
);
780 * drm_gem_vram_object_pin() - \
781 Implements &struct drm_gem_object_funcs.pin
782 * @gem: The GEM object to pin
786 * a negative errno code otherwise.
788 static int drm_gem_vram_object_pin(struct drm_gem_object
*gem
)
790 struct drm_gem_vram_object
*gbo
= drm_gem_vram_of_gem(gem
);
792 /* Fbdev console emulation is the use case of these PRIME
793 * helpers. This may involve updating a hardware buffer from
794 * a shadow FB. We pin the buffer to it's current location
795 * (either video RAM or system memory) to prevent it from
796 * being relocated during the update operation. If you require
797 * the buffer to be pinned to VRAM, implement a callback that
798 * sets the flags accordingly.
800 return drm_gem_vram_pin(gbo
, 0);
804 * drm_gem_vram_object_unpin() - \
805 Implements &struct drm_gem_object_funcs.unpin
806 * @gem: The GEM object to unpin
808 static void drm_gem_vram_object_unpin(struct drm_gem_object
*gem
)
810 struct drm_gem_vram_object
*gbo
= drm_gem_vram_of_gem(gem
);
812 drm_gem_vram_unpin(gbo
);
816 * drm_gem_vram_object_vmap() - \
817 Implements &struct drm_gem_object_funcs.vmap
818 * @gem: The GEM object to map
821 * The buffers virtual address on success, or
824 static void *drm_gem_vram_object_vmap(struct drm_gem_object
*gem
)
826 struct drm_gem_vram_object
*gbo
= drm_gem_vram_of_gem(gem
);
829 base
= drm_gem_vram_vmap(gbo
);
836 * drm_gem_vram_object_vunmap() - \
837 Implements &struct drm_gem_object_funcs.vunmap
838 * @gem: The GEM object to unmap
839 * @vaddr: The mapping's base address
841 static void drm_gem_vram_object_vunmap(struct drm_gem_object
*gem
,
844 struct drm_gem_vram_object
*gbo
= drm_gem_vram_of_gem(gem
);
846 drm_gem_vram_vunmap(gbo
, vaddr
);
853 static const struct drm_gem_object_funcs drm_gem_vram_object_funcs
= {
854 .free
= drm_gem_vram_object_free
,
855 .pin
= drm_gem_vram_object_pin
,
856 .unpin
= drm_gem_vram_object_unpin
,
857 .vmap
= drm_gem_vram_object_vmap
,
858 .vunmap
= drm_gem_vram_object_vunmap
,
859 .mmap
= drm_gem_ttm_mmap
,
860 .print_info
= drm_gem_ttm_print_info
,
864 * VRAM memory manager
871 static void backend_func_destroy(struct ttm_tt
*tt
)
877 static struct ttm_backend_func backend_func
= {
878 .destroy
= backend_func_destroy
885 static struct ttm_tt
*bo_driver_ttm_tt_create(struct ttm_buffer_object
*bo
,
891 tt
= kzalloc(sizeof(*tt
), GFP_KERNEL
);
895 tt
->func
= &backend_func
;
897 ret
= ttm_tt_init(tt
, bo
, page_flags
);
899 goto err_ttm_tt_init
;
908 static int bo_driver_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
909 struct ttm_mem_type_manager
*man
)
913 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
914 man
->available_caching
= TTM_PL_MASK_CACHING
;
915 man
->default_caching
= TTM_PL_FLAG_CACHED
;
918 man
->func
= &ttm_bo_manager_func
;
919 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
920 TTM_MEMTYPE_FLAG_MAPPABLE
;
921 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
923 man
->default_caching
= TTM_PL_FLAG_WC
;
931 static void bo_driver_evict_flags(struct ttm_buffer_object
*bo
,
932 struct ttm_placement
*placement
)
934 struct drm_gem_vram_object
*gbo
;
936 /* TTM may pass BOs that are not GEM VRAM BOs. */
937 if (!drm_is_gem_vram(bo
))
940 gbo
= drm_gem_vram_of_bo(bo
);
942 drm_gem_vram_bo_driver_evict_flags(gbo
, placement
);
945 static void bo_driver_move_notify(struct ttm_buffer_object
*bo
,
947 struct ttm_mem_reg
*new_mem
)
949 struct drm_gem_vram_object
*gbo
;
951 /* TTM may pass BOs that are not GEM VRAM BOs. */
952 if (!drm_is_gem_vram(bo
))
955 gbo
= drm_gem_vram_of_bo(bo
);
957 drm_gem_vram_bo_driver_move_notify(gbo
, evict
, new_mem
);
960 static int bo_driver_io_mem_reserve(struct ttm_bo_device
*bdev
,
961 struct ttm_mem_reg
*mem
)
963 struct ttm_mem_type_manager
*man
= bdev
->man
+ mem
->mem_type
;
964 struct drm_vram_mm
*vmm
= drm_vram_mm_of_bdev(bdev
);
966 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
969 mem
->bus
.addr
= NULL
;
970 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
972 switch (mem
->mem_type
) {
973 case TTM_PL_SYSTEM
: /* nothing to do */
976 mem
->bus
.is_iomem
= false;
979 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
980 mem
->bus
.base
= vmm
->vram_base
;
981 mem
->bus
.is_iomem
= true;
990 static void bo_driver_io_mem_free(struct ttm_bo_device
*bdev
,
991 struct ttm_mem_reg
*mem
)
994 static struct ttm_bo_driver bo_driver
= {
995 .ttm_tt_create
= bo_driver_ttm_tt_create
,
996 .ttm_tt_populate
= ttm_pool_populate
,
997 .ttm_tt_unpopulate
= ttm_pool_unpopulate
,
998 .init_mem_type
= bo_driver_init_mem_type
,
999 .eviction_valuable
= ttm_bo_eviction_valuable
,
1000 .evict_flags
= bo_driver_evict_flags
,
1001 .move_notify
= bo_driver_move_notify
,
1002 .io_mem_reserve
= bo_driver_io_mem_reserve
,
1003 .io_mem_free
= bo_driver_io_mem_free
,
1007 * struct drm_vram_mm
1010 #if defined(CONFIG_DEBUG_FS)
1011 static int drm_vram_mm_debugfs(struct seq_file
*m
, void *data
)
1013 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1014 struct drm_vram_mm
*vmm
= node
->minor
->dev
->vram_mm
;
1015 struct drm_mm
*mm
= vmm
->bdev
.man
[TTM_PL_VRAM
].priv
;
1016 struct drm_printer p
= drm_seq_file_printer(m
);
1018 spin_lock(&ttm_bo_glob
.lru_lock
);
1019 drm_mm_print(mm
, &p
);
1020 spin_unlock(&ttm_bo_glob
.lru_lock
);
1024 static const struct drm_info_list drm_vram_mm_debugfs_list
[] = {
1025 { "vram-mm", drm_vram_mm_debugfs
, 0, NULL
},
1030 * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
1032 * @minor: drm minor device.
1036 * a negative error code otherwise.
1038 int drm_vram_mm_debugfs_init(struct drm_minor
*minor
)
1042 #if defined(CONFIG_DEBUG_FS)
1043 ret
= drm_debugfs_create_files(drm_vram_mm_debugfs_list
,
1044 ARRAY_SIZE(drm_vram_mm_debugfs_list
),
1045 minor
->debugfs_root
, minor
);
1049 EXPORT_SYMBOL(drm_vram_mm_debugfs_init
);
1051 static int drm_vram_mm_init(struct drm_vram_mm
*vmm
, struct drm_device
*dev
,
1052 uint64_t vram_base
, size_t vram_size
)
1056 vmm
->vram_base
= vram_base
;
1057 vmm
->vram_size
= vram_size
;
1059 ret
= ttm_bo_device_init(&vmm
->bdev
, &bo_driver
,
1060 dev
->anon_inode
->i_mapping
,
1061 dev
->vma_offset_manager
,
1066 ret
= ttm_bo_init_mm(&vmm
->bdev
, TTM_PL_VRAM
, vram_size
>> PAGE_SHIFT
);
1073 static void drm_vram_mm_cleanup(struct drm_vram_mm
*vmm
)
1075 ttm_bo_device_release(&vmm
->bdev
);
1079 * Helpers for integration with struct drm_device
1083 * drm_vram_helper_alloc_mm - Allocates a device's instance of \
1085 * @dev: the DRM device
1086 * @vram_base: the base address of the video memory
1087 * @vram_size: the size of the video memory in bytes
1090 * The new instance of &struct drm_vram_mm on success, or
1091 * an ERR_PTR()-encoded errno code otherwise.
1093 struct drm_vram_mm
*drm_vram_helper_alloc_mm(
1094 struct drm_device
*dev
, uint64_t vram_base
, size_t vram_size
)
1098 if (WARN_ON(dev
->vram_mm
))
1099 return dev
->vram_mm
;
1101 dev
->vram_mm
= kzalloc(sizeof(*dev
->vram_mm
), GFP_KERNEL
);
1103 return ERR_PTR(-ENOMEM
);
1105 ret
= drm_vram_mm_init(dev
->vram_mm
, dev
, vram_base
, vram_size
);
1109 return dev
->vram_mm
;
1112 kfree(dev
->vram_mm
);
1113 dev
->vram_mm
= NULL
;
1114 return ERR_PTR(ret
);
1116 EXPORT_SYMBOL(drm_vram_helper_alloc_mm
);
1119 * drm_vram_helper_release_mm - Releases a device's instance of \
1121 * @dev: the DRM device
1123 void drm_vram_helper_release_mm(struct drm_device
*dev
)
1128 drm_vram_mm_cleanup(dev
->vram_mm
);
1129 kfree(dev
->vram_mm
);
1130 dev
->vram_mm
= NULL
;
1132 EXPORT_SYMBOL(drm_vram_helper_release_mm
);