]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - ubuntu/vbox/vboxvideo/vbox_ttm.c
UBUNTU: ubuntu: vbox -- update to 5.2.0-dfsg-2
[mirror_ubuntu-bionic-kernel.git] / ubuntu / vbox / vboxvideo / vbox_ttm.c
CommitLineData
056a1eb7 1/*
6d209b23
SF
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_ttm.c
056a1eb7
SF
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
6d209b23 26 *
056a1eb7 27 * Authors: Dave Airlie <airlied@redhat.com>
6d209b23 28 * Michael Thayer <michael.thayer@oracle.com>
056a1eb7
SF
29 */
30#include "vbox_drv.h"
31#include <ttm/ttm_page_alloc.h>
32
33#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(RHEL_73)
6d209b23 34#define PLACEMENT_FLAGS(placement) (placement)
056a1eb7 35#else
6d209b23 36#define PLACEMENT_FLAGS(placement) ((placement).flags)
056a1eb7
SF
37#endif
38
6d209b23 39static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
056a1eb7 40{
6d209b23 41 return container_of(bd, struct vbox_private, ttm.bdev);
056a1eb7
SF
42}
43
6d209b23 44static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
056a1eb7 45{
6d209b23 46 return ttm_mem_global_init(ref->object);
056a1eb7
SF
47}
48
6d209b23 49static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
056a1eb7 50{
6d209b23 51 ttm_mem_global_release(ref->object);
056a1eb7
SF
52}
53
54/**
55 * Adds the vbox memory manager object/structures to the global memory manager.
56 */
57static int vbox_ttm_global_init(struct vbox_private *vbox)
58{
6d209b23
SF
59 struct drm_global_reference *global_ref;
60 int r;
61
62 global_ref = &vbox->ttm.mem_global_ref;
63 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
64 global_ref->size = sizeof(struct ttm_mem_global);
65 global_ref->init = &vbox_ttm_mem_global_init;
66 global_ref->release = &vbox_ttm_mem_global_release;
67 r = drm_global_item_ref(global_ref);
68 if (r != 0) {
69 DRM_ERROR("Failed setting up TTM memory accounting subsystem.\n");
70 return r;
71 }
72
73 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
74 global_ref = &vbox->ttm.bo_global_ref.ref;
75 global_ref->global_type = DRM_GLOBAL_TTM_BO;
76 global_ref->size = sizeof(struct ttm_bo_global);
77 global_ref->init = &ttm_bo_global_init;
78 global_ref->release = &ttm_bo_global_release;
79
80 r = drm_global_item_ref(global_ref);
81 if (r != 0) {
82 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
83 drm_global_item_unref(&vbox->ttm.mem_global_ref);
84 return r;
85 }
86
87 return 0;
056a1eb7
SF
88}
89
90/**
91 * Removes the vbox memory manager object from the global memory manager.
92 */
6d209b23 93static void vbox_ttm_global_release(struct vbox_private *vbox)
056a1eb7 94{
6d209b23
SF
95 if (!vbox->ttm.mem_global_ref.release)
96 return;
056a1eb7 97
6d209b23
SF
98 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
99 drm_global_item_unref(&vbox->ttm.mem_global_ref);
100 vbox->ttm.mem_global_ref.release = NULL;
056a1eb7
SF
101}
102
056a1eb7
SF
103static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
104{
6d209b23 105 struct vbox_bo *bo;
056a1eb7 106
6d209b23 107 bo = container_of(tbo, struct vbox_bo, bo);
056a1eb7 108
6d209b23
SF
109 drm_gem_object_release(&bo->gem);
110 kfree(bo);
056a1eb7
SF
111}
112
113static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
114{
6d209b23
SF
115 if (bo->destroy == &vbox_bo_ttm_destroy)
116 return true;
117
118 return false;
056a1eb7
SF
119}
120
121static int
6d209b23
SF
122vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
123 struct ttm_mem_type_manager *man)
056a1eb7 124{
6d209b23
SF
125 switch (type) {
126 case TTM_PL_SYSTEM:
127 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
128 man->available_caching = TTM_PL_MASK_CACHING;
129 man->default_caching = TTM_PL_FLAG_CACHED;
130 break;
131 case TTM_PL_VRAM:
132 man->func = &ttm_bo_manager_func;
133 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
134 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
135 man->default_caching = TTM_PL_FLAG_WC;
136 break;
137 default:
138 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
139 return -EINVAL;
140 }
141
142 return 0;
056a1eb7
SF
143}
144
145static void
146vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
147{
6d209b23 148 struct vbox_bo *vboxbo = vbox_bo(bo);
056a1eb7 149
6d209b23
SF
150 if (!vbox_ttm_bo_is_vbox_bo(bo))
151 return;
056a1eb7 152
6d209b23
SF
153 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
154 *pl = vboxbo->placement;
056a1eb7
SF
155}
156
6d209b23
SF
157static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
158 struct file *filp)
056a1eb7 159{
6d209b23 160 return 0;
056a1eb7
SF
161}
162
163static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
6d209b23 164 struct ttm_mem_reg *mem)
056a1eb7 165{
6d209b23
SF
166 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
167 struct vbox_private *vbox = vbox_bdev(bdev);
168
169 mem->bus.addr = NULL;
170 mem->bus.offset = 0;
171 mem->bus.size = mem->num_pages << PAGE_SHIFT;
172 mem->bus.base = 0;
173 mem->bus.is_iomem = false;
174 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
175 return -EINVAL;
176 switch (mem->mem_type) {
177 case TTM_PL_SYSTEM:
178 /* system memory */
179 return 0;
180 case TTM_PL_VRAM:
181 mem->bus.offset = mem->start << PAGE_SHIFT;
182 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
183 mem->bus.is_iomem = true;
184 break;
185 default:
186 return -EINVAL;
187 }
188 return 0;
056a1eb7
SF
189}
190
6d209b23
SF
191static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
192 struct ttm_mem_reg *mem)
056a1eb7
SF
193{
194}
195
196static int vbox_bo_move(struct ttm_buffer_object *bo,
6d209b23
SF
197 bool evict, bool interruptible,
198 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
056a1eb7 199{
6d209b23
SF
200 int r;
201
056a1eb7 202#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && !defined(RHEL_74)
6d209b23 203 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
056a1eb7 204#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0) && !defined(RHEL_74)
6d209b23 205 r = ttm_bo_move_memcpy(bo, evict, interruptible, no_wait_gpu, new_mem);
056a1eb7 206#else
6d209b23 207 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
056a1eb7 208#endif
6d209b23 209 return r;
056a1eb7
SF
210}
211
056a1eb7
SF
212static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
213{
6d209b23
SF
214 ttm_tt_fini(tt);
215 kfree(tt);
056a1eb7
SF
216}
217
218static struct ttm_backend_func vbox_tt_backend_func = {
6d209b23 219 .destroy = &vbox_ttm_backend_destroy,
056a1eb7
SF
220};
221
056a1eb7 222static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
6d209b23
SF
223 unsigned long size,
224 u32 page_flags,
225 struct page *dummy_read_page)
056a1eb7 226{
6d209b23
SF
227 struct ttm_tt *tt;
228
229 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
230 if (!tt)
231 return NULL;
232
233 tt->func = &vbox_tt_backend_func;
234 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
235 kfree(tt);
236 return NULL;
237 }
238
239 return tt;
056a1eb7
SF
240}
241
242static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
243{
6d209b23 244 return ttm_pool_populate(ttm);
056a1eb7
SF
245}
246
247static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
248{
6d209b23 249 ttm_pool_unpopulate(ttm);
056a1eb7
SF
250}
251
252struct ttm_bo_driver vbox_bo_driver = {
6d209b23
SF
253 .ttm_tt_create = vbox_ttm_tt_create,
254 .ttm_tt_populate = vbox_ttm_tt_populate,
255 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
256 .init_mem_type = vbox_bo_init_mem_type,
056a1eb7 257#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0) || defined(RHEL_74)
6d209b23 258 .eviction_valuable = ttm_bo_eviction_valuable,
056a1eb7 259#endif
6d209b23
SF
260 .evict_flags = vbox_bo_evict_flags,
261 .move = vbox_bo_move,
262 .verify_access = vbox_bo_verify_access,
263 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
264 .io_mem_free = &vbox_ttm_io_mem_free,
056a1eb7 265#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
6d209b23 266 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
056a1eb7
SF
267#endif
268#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) \
6d209b23
SF
269 || defined(RHEL_74)
270 .lru_tail = &ttm_bo_default_lru_tail,
271 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
056a1eb7
SF
272#endif
273};
274
275int vbox_mm_init(struct vbox_private *vbox)
276{
6d209b23
SF
277 int ret;
278 struct drm_device *dev = vbox->dev;
279 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
056a1eb7 280
6d209b23
SF
281 ret = vbox_ttm_global_init(vbox);
282 if (ret)
283 return ret;
056a1eb7 284
6d209b23
SF
285 ret = ttm_bo_device_init(&vbox->ttm.bdev,
286 vbox->ttm.bo_global_ref.ref.object,
287 &vbox_bo_driver,
056a1eb7 288#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) || defined(RHEL_73)
6d209b23 289 dev->anon_inode->i_mapping,
056a1eb7 290#endif
6d209b23
SF
291 DRM_FILE_PAGE_OFFSET, true);
292 if (ret) {
293 DRM_ERROR("Error initialising bo driver; %d\n", ret);
294 return ret;
295 }
296
297 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
298 vbox->available_vram_size >> PAGE_SHIFT);
299 if (ret) {
300 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
301 return ret;
302 }
056a1eb7 303#ifdef DRM_MTRR_WC
6d209b23
SF
304 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
305 pci_resource_len(dev->pdev, 0),
306 DRM_MTRR_WC);
056a1eb7 307#else
6d209b23
SF
308 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
309 pci_resource_len(dev->pdev, 0));
056a1eb7
SF
310#endif
311
6d209b23
SF
312 vbox->ttm.mm_initialised = true;
313
314 return 0;
056a1eb7
SF
315}
316
317void vbox_mm_fini(struct vbox_private *vbox)
318{
319#ifdef DRM_MTRR_WC
6d209b23 320 struct drm_device *dev = vbox->dev;
056a1eb7 321#endif
6d209b23
SF
322 if (!vbox->ttm.mm_initialised)
323 return;
324 ttm_bo_device_release(&vbox->ttm.bdev);
056a1eb7 325
6d209b23 326 vbox_ttm_global_release(vbox);
056a1eb7
SF
327
328#ifdef DRM_MTRR_WC
6d209b23
SF
329 drm_mtrr_del(vbox->fb_mtrr,
330 pci_resource_start(dev->pdev, 0),
331 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
056a1eb7 332#else
6d209b23 333 arch_phys_wc_del(vbox->fb_mtrr);
056a1eb7
SF
334#endif
335}
336
337void vbox_ttm_placement(struct vbox_bo *bo, int domain)
338{
6d209b23 339 u32 c = 0;
056a1eb7 340#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) && !defined(RHEL_73)
6d209b23
SF
341 bo->placement.fpfn = 0;
342 bo->placement.lpfn = 0;
056a1eb7 343#else
6d209b23 344 unsigned int i;
056a1eb7
SF
345#endif
346
6d209b23
SF
347 bo->placement.placement = bo->placements;
348 bo->placement.busy_placement = bo->placements;
349
350 if (domain & TTM_PL_FLAG_VRAM)
351 PLACEMENT_FLAGS(bo->placements[c++]) =
352 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
353 if (domain & TTM_PL_FLAG_SYSTEM)
354 PLACEMENT_FLAGS(bo->placements[c++]) =
355 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
356 if (!c)
357 PLACEMENT_FLAGS(bo->placements[c++]) =
358 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
359
360 bo->placement.num_placement = c;
361 bo->placement.num_busy_placement = c;
056a1eb7 362#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_73)
6d209b23
SF
363 for (i = 0; i < c; ++i) {
364 bo->placements[i].fpfn = 0;
365 bo->placements[i].lpfn = 0;
366 }
056a1eb7
SF
367#endif
368}
369
370int vbox_bo_create(struct drm_device *dev, int size, int align,
6d209b23 371 u32 flags, struct vbox_bo **pvboxbo)
056a1eb7 372{
6d209b23
SF
373 struct vbox_private *vbox = dev->dev_private;
374 struct vbox_bo *vboxbo;
375 size_t acc_size;
376 int ret;
377
378 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
379 if (!vboxbo)
380 return -ENOMEM;
381
382 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
383 if (ret) {
384 kfree(vboxbo);
385 return ret;
386 }
387
388 vboxbo->bo.bdev = &vbox->ttm.bdev;
056a1eb7 389#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) && !defined(RHEL_73)
6d209b23 390 vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
056a1eb7
SF
391#endif
392
6d209b23 393 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
056a1eb7 394
6d209b23
SF
395 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
396 sizeof(struct vbox_bo));
056a1eb7 397
6d209b23
SF
398 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
399 ttm_bo_type_device, &vboxbo->placement,
400 align >> PAGE_SHIFT, false, NULL, acc_size,
056a1eb7 401#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || defined(RHEL_73)
6d209b23 402 NULL,
056a1eb7 403#endif
6d209b23
SF
404 NULL, vbox_bo_ttm_destroy);
405 if (ret)
406 return ret;
407
408 *pvboxbo = vboxbo;
056a1eb7 409
6d209b23 410 return 0;
056a1eb7
SF
411}
412
413static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
414{
6d209b23 415 return bo->bo.offset;
056a1eb7
SF
416}
417
418int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
419{
6d209b23
SF
420 int i, ret;
421
422 if (bo->pin_count) {
423 bo->pin_count++;
424 if (gpu_addr)
425 *gpu_addr = vbox_bo_gpu_offset(bo);
426
427 return 0;
428 }
429
430 vbox_ttm_placement(bo, pl_flag);
431
432 for (i = 0; i < bo->placement.num_placement; i++)
433 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
434
435 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
436 if (ret)
437 return ret;
438
439 bo->pin_count = 1;
440
441 if (gpu_addr)
442 *gpu_addr = vbox_bo_gpu_offset(bo);
443
444 return 0;
056a1eb7
SF
445}
446
447int vbox_bo_unpin(struct vbox_bo *bo)
448{
6d209b23
SF
449 int i, ret;
450
451 if (!bo->pin_count) {
452 DRM_ERROR("unpin bad %p\n", bo);
453 return 0;
454 }
455 bo->pin_count--;
456 if (bo->pin_count)
457 return 0;
458
459 for (i = 0; i < bo->placement.num_placement; i++)
460 PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
461
462 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
463 if (ret)
464 return ret;
465
466 return 0;
056a1eb7
SF
467}
468
6d209b23
SF
469/*
470 * Move a vbox-owned buffer object to system memory if no one else has it
056a1eb7 471 * pinned. The caller must have pinned it previously, and this call will
6d209b23
SF
472 * release the caller's pin.
473 */
056a1eb7
SF
474int vbox_bo_push_sysram(struct vbox_bo *bo)
475{
6d209b23
SF
476 int i, ret;
477
478 if (!bo->pin_count) {
479 DRM_ERROR("unpin bad %p\n", bo);
480 return 0;
481 }
482 bo->pin_count--;
483 if (bo->pin_count)
484 return 0;
485
486 if (bo->kmap.virtual)
487 ttm_bo_kunmap(&bo->kmap);
488
489 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
490
491 for (i = 0; i < bo->placement.num_placement; i++)
492 PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
493
494 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
495 if (ret) {
496 DRM_ERROR("pushing to VRAM failed\n");
497 return ret;
498 }
499
500 return 0;
056a1eb7
SF
501}
502
503int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
504{
6d209b23
SF
505 struct drm_file *file_priv;
506 struct vbox_private *vbox;
507
508 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
509 return -EINVAL;
056a1eb7 510
6d209b23
SF
511 file_priv = filp->private_data;
512 vbox = file_priv->minor->dev->dev_private;
056a1eb7 513
6d209b23 514 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
056a1eb7 515}