2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_ttm.c
4 * Copyright 2012 Red Hat Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 * Authors: Dave Airlie <airlied@redhat.com>
28 * Michael Thayer <michael.thayer@oracle.com>
31 #include <ttm/ttm_page_alloc.h>
33 static inline struct vbox_private
*vbox_bdev(struct ttm_bo_device
*bd
)
35 return container_of(bd
, struct vbox_private
, ttm
.bdev
);
38 static int vbox_ttm_mem_global_init(struct drm_global_reference
*ref
)
40 return ttm_mem_global_init(ref
->object
);
43 static void vbox_ttm_mem_global_release(struct drm_global_reference
*ref
)
45 ttm_mem_global_release(ref
->object
);
49 * Adds the vbox memory manager object/structures to the global memory manager.
51 static int vbox_ttm_global_init(struct vbox_private
*vbox
)
53 struct drm_global_reference
*global_ref
;
56 global_ref
= &vbox
->ttm
.mem_global_ref
;
57 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
58 global_ref
->size
= sizeof(struct ttm_mem_global
);
59 global_ref
->init
= &vbox_ttm_mem_global_init
;
60 global_ref
->release
= &vbox_ttm_mem_global_release
;
61 ret
= drm_global_item_ref(global_ref
);
63 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
67 vbox
->ttm
.bo_global_ref
.mem_glob
= vbox
->ttm
.mem_global_ref
.object
;
68 global_ref
= &vbox
->ttm
.bo_global_ref
.ref
;
69 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
70 global_ref
->size
= sizeof(struct ttm_bo_global
);
71 global_ref
->init
= &ttm_bo_global_init
;
72 global_ref
->release
= &ttm_bo_global_release
;
74 ret
= drm_global_item_ref(global_ref
);
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&vbox
->ttm
.mem_global_ref
);
85 * Removes the vbox memory manager object from the global memory manager.
87 static void vbox_ttm_global_release(struct vbox_private
*vbox
)
89 drm_global_item_unref(&vbox
->ttm
.bo_global_ref
.ref
);
90 drm_global_item_unref(&vbox
->ttm
.mem_global_ref
);
93 static void vbox_bo_ttm_destroy(struct ttm_buffer_object
*tbo
)
97 bo
= container_of(tbo
, struct vbox_bo
, bo
);
99 drm_gem_object_release(&bo
->gem
);
103 static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object
*bo
)
105 if (bo
->destroy
== &vbox_bo_ttm_destroy
)
112 vbox_bo_init_mem_type(struct ttm_bo_device
*bdev
, u32 type
,
113 struct ttm_mem_type_manager
*man
)
117 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
118 man
->available_caching
= TTM_PL_MASK_CACHING
;
119 man
->default_caching
= TTM_PL_FLAG_CACHED
;
122 man
->func
= &ttm_bo_manager_func
;
123 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
| TTM_MEMTYPE_FLAG_MAPPABLE
;
124 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
125 man
->default_caching
= TTM_PL_FLAG_WC
;
128 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type
);
136 vbox_bo_evict_flags(struct ttm_buffer_object
*bo
, struct ttm_placement
*pl
)
138 struct vbox_bo
*vboxbo
= vbox_bo(bo
);
140 if (!vbox_ttm_bo_is_vbox_bo(bo
))
143 vbox_ttm_placement(vboxbo
, TTM_PL_FLAG_SYSTEM
);
144 *pl
= vboxbo
->placement
;
147 static int vbox_bo_verify_access(struct ttm_buffer_object
*bo
,
153 static int vbox_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
154 struct ttm_mem_reg
*mem
)
156 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
157 struct vbox_private
*vbox
= vbox_bdev(bdev
);
159 mem
->bus
.addr
= NULL
;
161 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
163 mem
->bus
.is_iomem
= false;
164 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
166 switch (mem
->mem_type
) {
171 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
172 mem
->bus
.base
= pci_resource_start(vbox
->dev
->pdev
, 0);
173 mem
->bus
.is_iomem
= true;
181 static void vbox_ttm_io_mem_free(struct ttm_bo_device
*bdev
,
182 struct ttm_mem_reg
*mem
)
186 static int vbox_bo_move(struct ttm_buffer_object
*bo
,
187 bool evict
, bool interruptible
,
188 bool no_wait_gpu
, struct ttm_mem_reg
*new_mem
)
190 return ttm_bo_move_memcpy(bo
, interruptible
, no_wait_gpu
, new_mem
);
193 static void vbox_ttm_backend_destroy(struct ttm_tt
*tt
)
199 static struct ttm_backend_func vbox_tt_backend_func
= {
200 .destroy
= &vbox_ttm_backend_destroy
,
203 static struct ttm_tt
*vbox_ttm_tt_create(struct ttm_bo_device
*bdev
,
206 struct page
*dummy_read_page
)
210 tt
= kzalloc(sizeof(*tt
), GFP_KERNEL
);
214 tt
->func
= &vbox_tt_backend_func
;
215 if (ttm_tt_init(tt
, bdev
, size
, page_flags
, dummy_read_page
)) {
223 static int vbox_ttm_tt_populate(struct ttm_tt
*ttm
)
225 return ttm_pool_populate(ttm
);
228 static void vbox_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
230 ttm_pool_unpopulate(ttm
);
233 struct ttm_bo_driver vbox_bo_driver
= {
234 .ttm_tt_create
= vbox_ttm_tt_create
,
235 .ttm_tt_populate
= vbox_ttm_tt_populate
,
236 .ttm_tt_unpopulate
= vbox_ttm_tt_unpopulate
,
237 .init_mem_type
= vbox_bo_init_mem_type
,
238 .eviction_valuable
= ttm_bo_eviction_valuable
,
239 .evict_flags
= vbox_bo_evict_flags
,
240 .move
= vbox_bo_move
,
241 .verify_access
= vbox_bo_verify_access
,
242 .io_mem_reserve
= &vbox_ttm_io_mem_reserve
,
243 .io_mem_free
= &vbox_ttm_io_mem_free
,
244 .io_mem_pfn
= ttm_bo_default_io_mem_pfn
,
247 int vbox_mm_init(struct vbox_private
*vbox
)
250 struct drm_device
*dev
= vbox
->dev
;
251 struct ttm_bo_device
*bdev
= &vbox
->ttm
.bdev
;
253 ret
= vbox_ttm_global_init(vbox
);
257 ret
= ttm_bo_device_init(&vbox
->ttm
.bdev
,
258 vbox
->ttm
.bo_global_ref
.ref
.object
,
260 dev
->anon_inode
->i_mapping
,
261 DRM_FILE_PAGE_OFFSET
, true);
263 DRM_ERROR("Error initialising bo driver; %d\n", ret
);
264 goto err_ttm_global_release
;
267 ret
= ttm_bo_init_mm(bdev
, TTM_PL_VRAM
,
268 vbox
->available_vram_size
>> PAGE_SHIFT
);
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret
);
271 goto err_device_release
;
275 vbox
->fb_mtrr
= drm_mtrr_add(pci_resource_start(dev
->pdev
, 0),
276 pci_resource_len(dev
->pdev
, 0),
279 vbox
->fb_mtrr
= arch_phys_wc_add(pci_resource_start(dev
->pdev
, 0),
280 pci_resource_len(dev
->pdev
, 0));
285 ttm_bo_device_release(&vbox
->ttm
.bdev
);
286 err_ttm_global_release
:
287 vbox_ttm_global_release(vbox
);
291 void vbox_mm_fini(struct vbox_private
*vbox
)
294 drm_mtrr_del(vbox
->fb_mtrr
,
295 pci_resource_start(vbox
->dev
->pdev
, 0),
296 pci_resource_len(vbox
->dev
->pdev
, 0), DRM_MTRR_WC
);
298 arch_phys_wc_del(vbox
->fb_mtrr
);
300 ttm_bo_device_release(&vbox
->ttm
.bdev
);
301 vbox_ttm_global_release(vbox
);
304 void vbox_ttm_placement(struct vbox_bo
*bo
, int domain
)
309 bo
->placement
.placement
= bo
->placements
;
310 bo
->placement
.busy_placement
= bo
->placements
;
312 if (domain
& TTM_PL_FLAG_VRAM
)
313 bo
->placements
[c
++].flags
=
314 TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_VRAM
;
315 if (domain
& TTM_PL_FLAG_SYSTEM
)
316 bo
->placements
[c
++].flags
=
317 TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
319 bo
->placements
[c
++].flags
=
320 TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
322 bo
->placement
.num_placement
= c
;
323 bo
->placement
.num_busy_placement
= c
;
325 for (i
= 0; i
< c
; ++i
) {
326 bo
->placements
[i
].fpfn
= 0;
327 bo
->placements
[i
].lpfn
= 0;
331 int vbox_bo_create(struct drm_device
*dev
, int size
, int align
,
332 u32 flags
, struct vbox_bo
**pvboxbo
)
334 struct vbox_private
*vbox
= dev
->dev_private
;
335 struct vbox_bo
*vboxbo
;
339 vboxbo
= kzalloc(sizeof(*vboxbo
), GFP_KERNEL
);
343 ret
= drm_gem_object_init(dev
, &vboxbo
->gem
, size
);
345 goto err_free_vboxbo
;
347 vboxbo
->bo
.bdev
= &vbox
->ttm
.bdev
;
349 vbox_ttm_placement(vboxbo
, TTM_PL_FLAG_VRAM
| TTM_PL_FLAG_SYSTEM
);
351 acc_size
= ttm_bo_dma_acc_size(&vbox
->ttm
.bdev
, size
,
352 sizeof(struct vbox_bo
));
354 ret
= ttm_bo_init(&vbox
->ttm
.bdev
, &vboxbo
->bo
, size
,
355 ttm_bo_type_device
, &vboxbo
->placement
,
356 align
>> PAGE_SHIFT
, false, NULL
, acc_size
,
357 NULL
, NULL
, vbox_bo_ttm_destroy
);
359 goto err_free_vboxbo
;
370 static inline u64
vbox_bo_gpu_offset(struct vbox_bo
*bo
)
372 return bo
->bo
.offset
;
375 int vbox_bo_pin(struct vbox_bo
*bo
, u32 pl_flag
, u64
*gpu_addr
)
382 *gpu_addr
= vbox_bo_gpu_offset(bo
);
387 vbox_ttm_placement(bo
, pl_flag
);
389 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
390 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
392 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, false, false);
399 *gpu_addr
= vbox_bo_gpu_offset(bo
);
404 int vbox_bo_unpin(struct vbox_bo
*bo
)
408 if (!bo
->pin_count
) {
409 DRM_ERROR("unpin bad %p\n", bo
);
416 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
417 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
419 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, false, false);
427 * Move a vbox-owned buffer object to system memory if no one else has it
428 * pinned. The caller must have pinned it previously, and this call will
429 * release the caller's pin.
431 int vbox_bo_push_sysram(struct vbox_bo
*bo
)
435 if (!bo
->pin_count
) {
436 DRM_ERROR("unpin bad %p\n", bo
);
443 if (bo
->kmap
.virtual)
444 ttm_bo_kunmap(&bo
->kmap
);
446 vbox_ttm_placement(bo
, TTM_PL_FLAG_SYSTEM
);
448 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
449 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
451 ret
= ttm_bo_validate(&bo
->bo
, &bo
->placement
, false, false);
453 DRM_ERROR("pushing to VRAM failed\n");
460 int vbox_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
462 struct drm_file
*file_priv
;
463 struct vbox_private
*vbox
;
465 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
))
468 file_priv
= filp
->private_data
;
469 vbox
= file_priv
->minor
->dev
->dev_private
;
471 return ttm_bo_mmap(filp
, vma
, &vbox
->ttm
.bdev
);