]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
Merge tag 'vmwgfx-next-4.19-2' of git://people.freedesktop.org/~thomash/linux into...
authorDave Airlie <airlied@redhat.com>
Tue, 10 Jul 2018 01:05:46 +0000 (11:05 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 10 Jul 2018 01:10:34 +0000 (11:10 +1000)
A series of cleanups / reorganizations and modesetting changes that
mostly target atomic state validation.

[airlied: conflicts with SPDX stuff in amdgpu tree]
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1a88485e-e509-b00e-8485-19194f074115@vmware.com
21 files changed:
1  2 
drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c

index 0000000000000000000000000000000000000000,87204ff67c090f79440e81f747ad34c615aa0511..2dda0334576154ff3c22d56c0a8813dfee375dcd
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1122 +1,1123 @@@
++// SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+  *
+  * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+  * All Rights Reserved.
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+  * "Software"), to deal in the Software without restriction, including
+  * without limitation the rights to use, copy, modify, merge, publish,
+  * distribute, sub license, and/or sell copies of the Software, and to
+  * permit persons to whom the Software is furnished to do so, subject to
+  * the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the
+  * next paragraph) shall be included in all copies or substantial portions
+  * of the Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+  * USE OR OTHER DEALINGS IN THE SOFTWARE.
+  *
+  **************************************************************************/
+ #include <drm/ttm/ttm_placement.h>
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+ #include "drm/ttm/ttm_object.h"
+ /**
+  * struct vmw_user_buffer_object - User-space-visible buffer object
+  *
+  * @prime: The prime object providing user visibility.
+  * @vbo: The struct vmw_buffer_object
+  */
+ struct vmw_user_buffer_object {
+       struct ttm_prime_object prime;
+       struct vmw_buffer_object vbo;
+ };
+ /**
+  * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
+  * vmw_buffer_object.
+  *
+  * @bo: Pointer to the TTM buffer object.
+  * Return: Pointer to the struct vmw_buffer_object embedding the
+  * TTM buffer object.
+  */
+ static struct vmw_buffer_object *
+ vmw_buffer_object(struct ttm_buffer_object *bo)
+ {
+       return container_of(bo, struct vmw_buffer_object, base);
+ }
+ /**
+  * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
+  * vmw_user_buffer_object.
+  *
+  * @bo: Pointer to the TTM buffer object.
+  * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
+  * object.
+  */
+ static struct vmw_user_buffer_object *
+ vmw_user_buffer_object(struct ttm_buffer_object *bo)
+ {
+       struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+       return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
+ }
+ /**
+  * vmw_bo_pin_in_placement - Validate a buffer to placement.
+  *
+  * @dev_priv:  Driver private.
+  * @buf:  DMA buffer to move.
+  * @placement:  The placement to pin it.
+  * @interruptible:  Use interruptible wait.
+  * Return: Zero on success, Negative error code on failure. In particular
+  * -ERESTARTSYS if interrupted by a signal
+  */
+ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+                           struct vmw_buffer_object *buf,
+                           struct ttm_placement *placement,
+                           bool interruptible)
+ {
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       uint32_t new_flags;
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, placement, &ctx);
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+       ttm_bo_unreserve(bo);
+ err:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+ }
+ /**
+  * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+  *
+  * This function takes the reservation_sem in write mode.
+  * Flushes and unpins the query bo to avoid failures.
+  *
+  * @dev_priv:  Driver private.
+  * @buf:  DMA buffer to move.
+  * @pin:  Pin buffer if true.
+  * @interruptible:  Use interruptible wait.
+  * Return: Zero on success, Negative error code on failure. In particular
+  * -ERESTARTSYS if interrupted by a signal
+  */
+ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+                             struct vmw_buffer_object *buf,
+                             bool interruptible)
+ {
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       uint32_t new_flags;
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+       if (buf->pin_count > 0) {
+               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+               goto out_unreserve;
+       }
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+       if (likely(ret == 0) || ret == -ERESTARTSYS)
+               goto out_unreserve;
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+ out_unreserve:
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+       ttm_bo_unreserve(bo);
+ err:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+ }
+ /**
+  * vmw_bo_pin_in_vram - Move a buffer to vram.
+  *
+  * This function takes the reservation_sem in write mode.
+  * Flushes and unpins the query bo to avoid failures.
+  *
+  * @dev_priv:  Driver private.
+  * @buf:  DMA buffer to move.
+  * @interruptible:  Use interruptible wait.
+  * Return: Zero on success, Negative error code on failure. In particular
+  * -ERESTARTSYS if interrupted by a signal
+  */
+ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+                      struct vmw_buffer_object *buf,
+                      bool interruptible)
+ {
+       return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+                                      interruptible);
+ }
+ /**
+  * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+  *
+  * This function takes the reservation_sem in write mode.
+  * Flushes and unpins the query bo to avoid failures.
+  *
+  * @dev_priv:  Driver private.
+  * @buf:  DMA buffer to pin.
+  * @interruptible:  Use interruptible wait.
+  * Return: Zero on success, Negative error code on failure. In particular
+  * -ERESTARTSYS if interrupted by a signal
+  */
+ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+                               struct vmw_buffer_object *buf,
+                               bool interruptible)
+ {
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       struct ttm_placement placement;
+       struct ttm_place place;
+       int ret = 0;
+       uint32_t new_flags;
+       place = vmw_vram_placement.placement[0];
+       place.lpfn = bo->num_pages;
+       placement.num_placement = 1;
+       placement.placement = &place;
+       placement.num_busy_placement = 1;
+       placement.busy_placement = &place;
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err_unlock;
+       /*
+        * Is this buffer already in vram but not at the start of it?
+        * In that case, evict it first because TTM isn't good at handling
+        * that situation.
+        */
+       if (bo->mem.mem_type == TTM_PL_VRAM &&
+           bo->mem.start < bo->num_pages &&
+           bo->mem.start > 0 &&
+           buf->pin_count == 0) {
+               ctx.interruptible = false;
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+       }
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(&placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, &placement, &ctx);
+       /* For some reason we didn't end up at the start of vram */
+       WARN_ON(ret == 0 && bo->offset != 0);
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+       ttm_bo_unreserve(bo);
+ err_unlock:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+ }
+ /**
+  * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+  *
+  * This function takes the reservation_sem in write mode.
+  *
+  * @dev_priv:  Driver private.
+  * @buf:  DMA buffer to unpin.
+  * @interruptible:  Use interruptible wait.
+  * Return: Zero on success, Negative error code on failure. In particular
+  * -ERESTARTSYS if interrupted by a signal
+  */
+ int vmw_bo_unpin(struct vmw_private *dev_priv,
+                struct vmw_buffer_object *buf,
+                bool interruptible)
+ {
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+       vmw_bo_pin_reserved(buf, false);
+       ttm_bo_unreserve(bo);
+ err:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+ }
+ /**
+  * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+  * of a buffer.
+  *
+  * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+  * @ptr: SVGAGuestPtr returning the result.
+  */
+ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+                         SVGAGuestPtr *ptr)
+ {
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+               ptr->offset = bo->offset;
+       } else {
+               ptr->gmrId = bo->mem.start;
+               ptr->offset = 0;
+       }
+ }
+ /**
+  * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+  *
+  * @vbo: The buffer object. Must be reserved.
+  * @pin: Whether to pin or unpin.
+  *
+  */
+ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+ {
+       struct ttm_operation_ctx ctx = { false, true };
+       struct ttm_place pl;
+       struct ttm_placement placement;
+       struct ttm_buffer_object *bo = &vbo->base;
+       uint32_t old_mem_type = bo->mem.mem_type;
+       int ret;
+       lockdep_assert_held(&bo->resv->lock.base);
+       if (pin) {
+               if (vbo->pin_count++ > 0)
+                       return;
+       } else {
+               WARN_ON(vbo->pin_count <= 0);
+               if (--vbo->pin_count > 0)
+                       return;
+       }
+       pl.fpfn = 0;
+       pl.lpfn = 0;
+       pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+               | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+       if (pin)
+               pl.flags |= TTM_PL_FLAG_NO_EVICT;
+       memset(&placement, 0, sizeof(placement));
+       placement.num_placement = 1;
+       placement.placement = &pl;
+       ret = ttm_bo_validate(bo, &placement, &ctx);
+       BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+ }
+ /**
+  * vmw_bo_map_and_cache - Map a buffer object and cache the map
+  *
+  * @vbo: The buffer object to map
+  * Return: A kernel virtual address or NULL if mapping failed.
+  *
+  * This function maps a buffer object into the kernel address space, or
+  * returns the virtual kernel address of an already existing map. The virtual
+  * address remains valid as long as the buffer object is pinned or reserved.
+  * The cached map is torn down on either
+  * 1) Buffer object move
+  * 2) Buffer object swapout
+  * 3) Buffer object destruction
+  *
+  */
+ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+ {
+       struct ttm_buffer_object *bo = &vbo->base;
+       bool not_used;
+       void *virtual;
+       int ret;
+       virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+       if (virtual)
+               return virtual;
+       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+       if (ret)
+               DRM_ERROR("Buffer object map failed: %d.\n", ret);
+       return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ }
+ /**
+  * vmw_bo_unmap - Tear down a cached buffer object map.
+  *
+  * @vbo: The buffer object whose map we are tearing down.
+  *
+  * This function tears down a cached map set up using
+  * vmw_buffer_object_map_and_cache().
+  */
+ void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+ {
+       if (vbo->map.bo == NULL)
+               return;
+       ttm_bo_kunmap(&vbo->map);
+ }
+ /**
+  * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
+  *
+  * @dev_priv: Pointer to a struct vmw_private identifying the device.
+  * @size: The requested buffer size.
+  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+  */
+ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+                             bool user)
+ {
+       static size_t struct_size, user_struct_size;
+       size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
+       if (unlikely(struct_size == 0)) {
+               size_t backend_size = ttm_round_pot(vmw_tt_size);
+               struct_size = backend_size +
+                       ttm_round_pot(sizeof(struct vmw_buffer_object));
+               user_struct_size = backend_size +
+                       ttm_round_pot(sizeof(struct vmw_user_buffer_object));
+       }
+       if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+               page_array_size +=
+                       ttm_round_pot(num_pages * sizeof(dma_addr_t));
+       return ((user) ? user_struct_size : struct_size) +
+               page_array_size;
+ }
+ /**
+  * vmw_bo_bo_free - vmw buffer object destructor
+  *
+  * @bo: Pointer to the embedded struct ttm_buffer_object
+  */
+ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
+ {
+       struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+       vmw_bo_unmap(vmw_bo);
+       kfree(vmw_bo);
+ }
+ /**
+  * vmw_user_bo_destroy - vmw buffer object destructor
+  *
+  * @bo: Pointer to the embedded struct ttm_buffer_object
+  */
+ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
+ {
+       struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
+       vmw_bo_unmap(&vmw_user_bo->vbo);
+       ttm_prime_object_kfree(vmw_user_bo, prime);
+ }
+ /**
+  * vmw_bo_init - Initialize a vmw buffer object
+  *
+  * @dev_priv: Pointer to the device private struct
+  * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
+  * @size: Buffer object size in bytes.
+  * @placement: Initial placement.
+  * @interruptible: Whether waits should be performed interruptible.
+  * @bo_free: The buffer object destructor.
+  * Returns: Zero on success, negative error code on error.
+  *
+  * Note that on error, the code will free the buffer object.
+  */
+ int vmw_bo_init(struct vmw_private *dev_priv,
+               struct vmw_buffer_object *vmw_bo,
+               size_t size, struct ttm_placement *placement,
+               bool interruptible,
+               void (*bo_free)(struct ttm_buffer_object *bo))
+ {
+       struct ttm_bo_device *bdev = &dev_priv->bdev;
+       size_t acc_size;
+       int ret;
+       bool user = (bo_free == &vmw_user_bo_destroy);
+       WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
+       acc_size = vmw_bo_acc_size(dev_priv, size, user);
+       memset(vmw_bo, 0, sizeof(*vmw_bo));
+       INIT_LIST_HEAD(&vmw_bo->res_list);
+       ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+                         ttm_bo_type_device, placement,
+                         0, interruptible, acc_size,
+                         NULL, NULL, bo_free);
+       return ret;
+ }
+ /**
+  * vmw_user_bo_release - TTM reference base object release callback for
+  * vmw user buffer objects
+  *
+  * @p_base: The TTM base object pointer about to be unreferenced.
+  *
+  * Clears the TTM base object pointer and drops the reference the
+  * base object has on the underlying struct vmw_buffer_object.
+  */
+ static void vmw_user_bo_release(struct ttm_base_object **p_base)
+ {
+       struct vmw_user_buffer_object *vmw_user_bo;
+       struct ttm_base_object *base = *p_base;
+       struct ttm_buffer_object *bo;
+       *p_base = NULL;
+       if (unlikely(base == NULL))
+               return;
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+                                  prime.base);
+       bo = &vmw_user_bo->vbo.base;
+       ttm_bo_unref(&bo);
+ }
+ /**
+  * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+  * for vmw user buffer objects
+  *
+  * @base: Pointer to the TTM base object
+  * @ref_type: Reference type of the reference reaching zero.
+  *
+  * Called when user-space drops its last synccpu reference on the buffer
+  * object, Either explicitly or as part of a cleanup file close.
+  */
+ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+                                       enum ttm_ref_type ref_type)
+ {
+       struct vmw_user_buffer_object *user_bo;
+       user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
+       switch (ref_type) {
+       case TTM_REF_SYNCCPU_WRITE:
+               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+               break;
+       default:
+               WARN_ONCE(true, "Undefined buffer object reference release.\n");
+       }
+ }
+ /**
+  * vmw_user_bo_alloc - Allocate a user buffer object
+  *
+  * @dev_priv: Pointer to a struct device private.
+  * @tfile: Pointer to a struct ttm_object_file on which to register the user
+  * object.
+  * @size: Size of the buffer object.
+  * @shareable: Boolean whether the buffer is shareable with other open files.
+  * @handle: Pointer to where the handle value should be assigned.
+  * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
+  * should be assigned.
+  * Return: Zero on success, negative error code on error.
+  */
+ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+                     struct ttm_object_file *tfile,
+                     uint32_t size,
+                     bool shareable,
+                     uint32_t *handle,
+                     struct vmw_buffer_object **p_vbo,
+                     struct ttm_base_object **p_base)
+ {
+       struct vmw_user_buffer_object *user_bo;
+       struct ttm_buffer_object *tmp;
+       int ret;
+       user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+       if (unlikely(!user_bo)) {
+               DRM_ERROR("Failed to allocate a buffer.\n");
+               return -ENOMEM;
+       }
+       ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+                         (dev_priv->has_mob) ?
+                         &vmw_sys_placement :
+                         &vmw_vram_sys_placement, true,
+                         &vmw_user_bo_destroy);
+       if (unlikely(ret != 0))
+               return ret;
+       tmp = ttm_bo_reference(&user_bo->vbo.base);
+       ret = ttm_prime_object_init(tfile,
+                                   size,
+                                   &user_bo->prime,
+                                   shareable,
+                                   ttm_buffer_type,
+                                   &vmw_user_bo_release,
+                                   &vmw_user_bo_ref_obj_release);
+       if (unlikely(ret != 0)) {
+               ttm_bo_unref(&tmp);
+               goto out_no_base_object;
+       }
+       *p_vbo = &user_bo->vbo;
+       if (p_base) {
+               *p_base = &user_bo->prime.base;
+               kref_get(&(*p_base)->refcount);
+       }
+       *handle = user_bo->prime.base.hash.key;
+ out_no_base_object:
+       return ret;
+ }
+ /**
+  * vmw_user_bo_verify_access - verify access permissions on this
+  * buffer object.
+  *
+  * @bo: Pointer to the buffer object being accessed
+  * @tfile: Identifying the caller.
+  */
+ int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+                             struct ttm_object_file *tfile)
+ {
+       struct vmw_user_buffer_object *vmw_user_bo;
+       if (unlikely(bo->destroy != vmw_user_bo_destroy))
+               return -EPERM;
+       vmw_user_bo = vmw_user_buffer_object(bo);
+       /* Check that the caller has opened the object. */
+       if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+               return 0;
+       DRM_ERROR("Could not grant buffer access.\n");
+       return -EPERM;
+ }
+ /**
+  * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
+  * access, idling previous GPU operations on the buffer and optionally
+  * blocking it for further command submissions.
+  *
+  * @user_bo: Pointer to the buffer object being grabbed for CPU access
+  * @tfile: Identifying the caller.
+  * @flags: Flags indicating how the grab should be performed.
+  * Return: Zero on success, Negative error code on error. In particular,
+  * -EBUSY will be returned if a dontblock operation is requested and the
+  * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
+  * interrupted by a signal.
+  *
+  * A blocking grab will be automatically released when @tfile is closed.
+  */
+ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
+                                   struct ttm_object_file *tfile,
+                                   uint32_t flags)
+ {
+       struct ttm_buffer_object *bo = &user_bo->vbo.base;
+       bool existed;
+       int ret;
+       if (flags & drm_vmw_synccpu_allow_cs) {
+               bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
+               long lret;
+               lret = reservation_object_wait_timeout_rcu
+                       (bo->resv, true, true,
+                        nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
+               if (!lret)
+                       return -EBUSY;
+               else if (lret < 0)
+                       return lret;
+               return 0;
+       }
+       ret = ttm_bo_synccpu_write_grab
+               (bo, !!(flags & drm_vmw_synccpu_dontblock));
+       if (unlikely(ret != 0))
+               return ret;
+       ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+                                TTM_REF_SYNCCPU_WRITE, &existed, false);
+       if (ret != 0 || existed)
+               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+       return ret;
+ }
+ /**
+  * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
+  * and unblock command submission on the buffer if blocked.
+  *
+  * @handle: Handle identifying the buffer object.
+  * @tfile: Identifying the caller.
+  * @flags: Flags indicating the type of release.
+  */
+ static int vmw_user_bo_synccpu_release(uint32_t handle,
+                                          struct ttm_object_file *tfile,
+                                          uint32_t flags)
+ {
+       if (!(flags & drm_vmw_synccpu_allow_cs))
+               return ttm_ref_object_base_unref(tfile, handle,
+                                                TTM_REF_SYNCCPU_WRITE);
+       return 0;
+ }
+ /**
+  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
+  * functionality.
+  *
+  * @dev: Identifies the drm device.
+  * @data: Pointer to the ioctl argument.
+  * @file_priv: Identifies the caller.
+  * Return: Zero on success, negative error code on error.
+  *
+  * This function checks the ioctl arguments for validity and calls the
+  * relevant synccpu functions.
+  */
+ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
+ {
+       struct drm_vmw_synccpu_arg *arg =
+               (struct drm_vmw_synccpu_arg *) data;
+       struct vmw_buffer_object *vbo;
+       struct vmw_user_buffer_object *user_bo;
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct ttm_base_object *buffer_base;
+       int ret;
+       if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+           || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+                              drm_vmw_synccpu_dontblock |
+                              drm_vmw_synccpu_allow_cs)) != 0) {
+               DRM_ERROR("Illegal synccpu flags.\n");
+               return -EINVAL;
+       }
+       switch (arg->op) {
+       case drm_vmw_synccpu_grab:
+               ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
+                                            &buffer_base);
+               if (unlikely(ret != 0))
+                       return ret;
+               user_bo = container_of(vbo, struct vmw_user_buffer_object,
+                                      vbo);
+               ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+               vmw_bo_unreference(&vbo);
+               ttm_base_object_unref(&buffer_base);
+               if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+                            ret != -EBUSY)) {
+                       DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+                                 (unsigned int) arg->handle);
+                       return ret;
+               }
+               break;
+       case drm_vmw_synccpu_release:
+               ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+                                                 arg->flags);
+               if (unlikely(ret != 0)) {
+                       DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+                                 (unsigned int) arg->handle);
+                       return ret;
+               }
+               break;
+       default:
+               DRM_ERROR("Invalid synccpu operation.\n");
+               return -EINVAL;
+       }
+       return 0;
+ }
+ /**
+  * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
+  * allocation functionality.
+  *
+  * @dev: Identifies the drm device.
+  * @data: Pointer to the ioctl argument.
+  * @file_priv: Identifies the caller.
+  * Return: Zero on success, negative error code on error.
+  *
+  * This function checks the ioctl arguments for validity and allocates a
+  * struct vmw_user_buffer_object bo.
+  */
+ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+ {
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       union drm_vmw_alloc_dmabuf_arg *arg =
+           (union drm_vmw_alloc_dmabuf_arg *)data;
+       struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
+       struct drm_vmw_dmabuf_rep *rep = &arg->rep;
+       struct vmw_buffer_object *vbo;
+       uint32_t handle;
+       int ret;
+       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+       if (unlikely(ret != 0))
+               return ret;
+       ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                               req->size, false, &handle, &vbo,
+                               NULL);
+       if (unlikely(ret != 0))
+               goto out_no_bo;
+       rep->handle = handle;
+       rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+       rep->cur_gmr_id = handle;
+       rep->cur_gmr_offset = 0;
+       vmw_bo_unreference(&vbo);
+ out_no_bo:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+ }
+ /**
+  * vmw_bo_unref_ioctl - Generic handle close ioctl.
+  *
+  * @dev: Identifies the drm device.
+  * @data: Pointer to the ioctl argument.
+  * @file_priv: Identifies the caller.
+  * Return: Zero on success, negative error code on error.
+  *
+  * This function checks the ioctl arguments for validity and closes a
+  * handle to a TTM base object, optionally freeing the object.
+  */
+ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
+ {
+       struct drm_vmw_unref_dmabuf_arg *arg =
+           (struct drm_vmw_unref_dmabuf_arg *)data;
+       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                        arg->handle,
+                                        TTM_REF_USAGE);
+ }
+ /**
+  * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
+  *
+  * @tfile: The TTM object file the handle is registered with.
+  * @handle: The user buffer object handle
+  * @out: Pointer to a where a pointer to the embedded
+  * struct vmw_buffer_object should be placed.
+  * @p_base: Pointer to where a pointer to the TTM base object should be
+  * placed, or NULL if no such pointer is required.
+  * Return: Zero on success, Negative error code on error.
+  *
+  * Both the output base object pointer and the vmw buffer object pointer
+  * will be refcounted.
+  */
+ int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+                      uint32_t handle, struct vmw_buffer_object **out,
+                      struct ttm_base_object **p_base)
+ {
+       struct vmw_user_buffer_object *vmw_user_bo;
+       struct ttm_base_object *base;
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+                         (unsigned long)handle);
+               return -ESRCH;
+       }
+       if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
+               ttm_base_object_unref(&base);
+               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
+                         (unsigned long)handle);
+               return -EINVAL;
+       }
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
+                                  prime.base);
+       (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
+       if (p_base)
+               *p_base = base;
+       else
+               ttm_base_object_unref(&base);
+       *out = &vmw_user_bo->vbo;
+       return 0;
+ }
+ /**
+  * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
+  *
+  * @tfile: The TTM object file to register the handle with.
+  * @vbo: The embedded vmw buffer object.
+  * @handle: Pointer to where the new handle should be placed.
+  * Return: Zero on success, Negative error code on error.
+  */
+ int vmw_user_bo_reference(struct ttm_object_file *tfile,
+                         struct vmw_buffer_object *vbo,
+                         uint32_t *handle)
+ {
+       struct vmw_user_buffer_object *user_bo;
+       if (vbo->base.destroy != vmw_user_bo_destroy)
+               return -EINVAL;
+       user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
+       *handle = user_bo->prime.base.hash.key;
+       return ttm_ref_object_add(tfile, &user_bo->prime.base,
+                                 TTM_REF_USAGE, NULL, false);
+ }
+ /**
+  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
+  *                       object without unreserving it.
+  *
+  * @bo:             Pointer to the struct ttm_buffer_object to fence.
+  * @fence:          Pointer to the fence. If NULL, this function will
+  *                  insert a fence into the command stream..
+  *
+  * Contrary to the ttm_eu version of this function, it takes only
+  * a single buffer object instead of a list, and it also doesn't
+  * unreserve the buffer object, which needs to be done separately.
+  */
+ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+                        struct vmw_fence_obj *fence)
+ {
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct vmw_private *dev_priv =
+               container_of(bdev, struct vmw_private, bdev);
+       if (fence == NULL) {
+               vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+               reservation_object_add_excl_fence(bo->resv, &fence->base);
+               dma_fence_put(&fence->base);
+       } else
+               reservation_object_add_excl_fence(bo->resv, &fence->base);
+ }
+ /**
+  * vmw_dumb_create - Create a dumb kms buffer
+  *
+  * @file_priv: Pointer to a struct drm_file identifying the caller.
+  * @dev: Pointer to the drm device.
+  * @args: Pointer to a struct drm_mode_create_dumb structure
+  * Return: Zero on success, negative error code on failure.
+  *
+  * This is a driver callback for the core drm create_dumb functionality.
+  * Note that this is very similar to the vmw_bo_alloc ioctl, except
+  * that the arguments have a different format.
+  */
+ int vmw_dumb_create(struct drm_file *file_priv,
+                   struct drm_device *dev,
+                   struct drm_mode_create_dumb *args)
+ {
+       struct vmw_private *dev_priv = vmw_priv(dev);
+       struct vmw_buffer_object *vbo;
+       int ret;
+       args->pitch = args->width * ((args->bpp + 7) / 8);
+       args->size = args->pitch * args->height;
+       ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+       if (unlikely(ret != 0))
+               return ret;
+       ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                                   args->size, false, &args->handle,
+                                   &vbo, NULL);
+       if (unlikely(ret != 0))
+               goto out_no_bo;
+       vmw_bo_unreference(&vbo);
+ out_no_bo:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+ }
+ /**
+  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+  *
+  * @file_priv: Pointer to a struct drm_file identifying the caller.
+  * @dev: Pointer to the drm device.
+  * @handle: Handle identifying the dumb buffer.
+  * @offset: The address space offset returned.
+  * Return: Zero on success, negative error code on failure.
+  *
+  * This is a driver callback for the core drm dumb_map_offset functionality.
+  */
+ int vmw_dumb_map_offset(struct drm_file *file_priv,
+                       struct drm_device *dev, uint32_t handle,
+                       uint64_t *offset)
+ {
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+       struct vmw_buffer_object *out_buf;
+       int ret;
+       ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
+       if (ret != 0)
+               return -EINVAL;
+       *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+       vmw_bo_unreference(&out_buf);
+       return 0;
+ }
+ /**
+  * vmw_dumb_destroy - Destroy a dumb boffer
+  *
+  * @file_priv: Pointer to a struct drm_file identifying the caller.
+  * @dev: Pointer to the drm device.
+  * @handle: Handle identifying the dumb buffer.
+  * Return: Zero on success, negative error code on failure.
+  *
+  * This is a driver callback for the core drm dumb_destroy functionality.
+  */
+ int vmw_dumb_destroy(struct drm_file *file_priv,
+                    struct drm_device *dev,
+                    uint32_t handle)
+ {
+       return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+                                        handle, TTM_REF_USAGE);
+ }
+ /**
+  * vmw_bo_swap_notify - swapout notify callback.
+  *
+  * @bo: The buffer object to be swapped out.
+  */
+ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
+ {
+       /* Is @bo embedded in a struct vmw_buffer_object? */
+       if (bo->destroy != vmw_bo_bo_free &&
+           bo->destroy != vmw_user_bo_destroy)
+               return;
+       /* Kill any cached kernel maps before swapout */
+       vmw_bo_unmap(vmw_buffer_object(bo));
+ }
+ /**
+  * vmw_bo_move_notify - TTM move_notify_callback
+  *
+  * @bo: The TTM buffer object about to move.
+  * @mem: The struct ttm_mem_reg indicating to what memory
+  *       region the move is taking place.
+  *
+  * Detaches cached maps and device bindings that require that the
+  * buffer doesn't move.
+  */
+ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+                       struct ttm_mem_reg *mem)
+ {
+       struct vmw_buffer_object *vbo;
+       if (mem == NULL)
+               return;
+       /* Make sure @bo is embedded in a struct vmw_buffer_object? */
+       if (bo->destroy != vmw_bo_bo_free &&
+           bo->destroy != vmw_user_bo_destroy)
+               return;
+       vbo = container_of(bo, struct vmw_buffer_object, base);
+       /*
+        * Kill any cached kernel maps before move to or from VRAM.
+        * With other types of moves, the underlying pages stay the same,
+        * and the map can be kept.
+        */
+       if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
+               vmw_bo_unmap(vbo);
+       /*
+        * If we're moving a backup MOB out of MOB placement, then make sure we
+        * read back all resource content first, and unbind the MOB from
+        * the resource.
+        */
+       if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
+               vmw_resource_unbind_list(vbo);
+ }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 0000000000000000000000000000000000000000,239e1edf091957273953aca2c84380d55f359afc..31786b200afc470d73d4f661a4e9358959d686f8
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,887 +1,887 @@@
 - * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
 - * All Rights Reserved.
++// SPDX-License-Identifier: GPL-2.0 OR MIT
+ /**************************************************************************
+  *
++ * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the
+  * "Software"), to deal in the Software without restriction, including
+  * without limitation the rights to use, copy, modify, merge, publish,
+  * distribute, sub license, and/or sell copies of the Software, and to
+  * permit persons to whom the Software is furnished to do so, subject to
+  * the following conditions:
+  *
+  * The above copyright notice and this permission notice (including the
+  * next paragraph) shall be included in all copies or substantial portions
+  * of the Software.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+  * USE OR OTHER DEALINGS IN THE SOFTWARE.
+  *
+  **************************************************************************/
+ #include "vmwgfx_drv.h"
+ #include <drm/ttm/ttm_bo_driver.h>
+ #include <drm/ttm/ttm_placement.h>
+ #include <drm/ttm/ttm_page_alloc.h>
+ static const struct ttm_place vram_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+ };
+ static const struct ttm_place vram_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ };
+ static const struct ttm_place sys_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ };
+ static const struct ttm_place sys_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ };
+ static const struct ttm_place gmr_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ };
+ static const struct ttm_place gmr_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ };
+ static const struct ttm_place mob_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ };
+ static const struct ttm_place mob_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ };
+ struct ttm_placement vmw_vram_placement = {
+       .num_placement = 1,
+       .placement = &vram_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &vram_placement_flags
+ };
+ static const struct ttm_place vram_gmr_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }
+ };
+ static const struct ttm_place gmr_vram_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       }
+ };
+ struct ttm_placement vmw_vram_gmr_placement = {
+       .num_placement = 2,
+       .placement = vram_gmr_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &gmr_placement_flags
+ };
+ static const struct ttm_place vram_gmr_ne_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
+                        TTM_PL_FLAG_NO_EVICT
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
+                        TTM_PL_FLAG_NO_EVICT
+       }
+ };
+ struct ttm_placement vmw_vram_gmr_ne_placement = {
+       .num_placement = 2,
+       .placement = vram_gmr_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &gmr_ne_placement_flags
+ };
+ struct ttm_placement vmw_vram_sys_placement = {
+       .num_placement = 1,
+       .placement = &vram_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+ };
+ struct ttm_placement vmw_vram_ne_placement = {
+       .num_placement = 1,
+       .placement = &vram_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &vram_ne_placement_flags
+ };
+ struct ttm_placement vmw_sys_placement = {
+       .num_placement = 1,
+       .placement = &sys_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+ };
+ struct ttm_placement vmw_sys_ne_placement = {
+       .num_placement = 1,
+       .placement = &sys_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_ne_placement_flags
+ };
+ static const struct ttm_place evictable_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+       }
+ };
+ static const struct ttm_place nonfixed_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+       }
+ };
+ struct ttm_placement vmw_evictable_placement = {
+       .num_placement = 4,
+       .placement = evictable_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+ };
+ struct ttm_placement vmw_srf_placement = {
+       .num_placement = 1,
+       .num_busy_placement = 2,
+       .placement = &gmr_placement_flags,
+       .busy_placement = gmr_vram_placement_flags
+ };
+ struct ttm_placement vmw_mob_placement = {
+       .num_placement = 1,
+       .num_busy_placement = 1,
+       .placement = &mob_placement_flags,
+       .busy_placement = &mob_placement_flags
+ };
+ struct ttm_placement vmw_mob_ne_placement = {
+       .num_placement = 1,
+       .num_busy_placement = 1,
+       .placement = &mob_ne_placement_flags,
+       .busy_placement = &mob_ne_placement_flags
+ };
+ struct ttm_placement vmw_nonfixed_placement = {
+       .num_placement = 3,
+       .placement = nonfixed_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+ };
+ struct vmw_ttm_tt {
+       struct ttm_dma_tt dma_ttm;
+       struct vmw_private *dev_priv;
+       int gmr_id;
+       struct vmw_mob *mob;
+       int mem_type;
+       struct sg_table sgt;
+       struct vmw_sg_table vsgt;
+       uint64_t sg_alloc_size;
+       bool mapped;
+ };
+ const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
+ /**
+  * Helper functions to advance a struct vmw_piter iterator.
+  *
+  * @viter: Pointer to the iterator.
+  *
+  * These functions return false if past the end of the list,
+  * true otherwise. Functions are selected depending on the current
+  * DMA mapping mode.
+  */
+ static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
+ {
+       return ++(viter->i) < viter->num_pages;
+ }
+ static bool __vmw_piter_sg_next(struct vmw_piter *viter)
+ {
+       return __sg_page_iter_next(&viter->iter);
+ }
+ /**
+  * Helper functions to return a pointer to the current page.
+  *
+  * @viter: Pointer to the iterator
+  *
+  * These functions return a pointer to the page currently
+  * pointed to by @viter. Functions are selected depending on the
+  * current mapping mode.
+  */
+ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
+ {
+       return viter->pages[viter->i];
+ }
+ static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
+ {
+       return sg_page_iter_page(&viter->iter);
+ }
+ /**
+  * Helper functions to return the DMA address of the current page.
+  *
+  * @viter: Pointer to the iterator
+  *
+  * These functions return the DMA address of the page currently
+  * pointed to by @viter. Functions are selected depending on the
+  * current mapping mode.
+  */
+ static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
+ {
+       return page_to_phys(viter->pages[viter->i]);
+ }
+ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
+ {
+       return viter->addrs[viter->i];
+ }
+ static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
+ {
+       return sg_page_iter_dma_address(&viter->iter);
+ }
+ /**
+  * vmw_piter_start - Initialize a struct vmw_piter.
+  *
+  * @viter: Pointer to the iterator to initialize
+  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
+  *
+  * Note that we're following the convention of __sg_page_iter_start, so that
+  * the iterator doesn't point to a valid page after initialization; it has
+  * to be advanced one step first.
+  */
+ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
+                    unsigned long p_offset)
+ {
+       viter->i = p_offset - 1;
+       viter->num_pages = vsgt->num_pages;
+       switch (vsgt->mode) {
+       case vmw_dma_phys:
+               viter->next = &__vmw_piter_non_sg_next;
+               viter->dma_address = &__vmw_piter_phys_addr;
+               viter->page = &__vmw_piter_non_sg_page;
+               viter->pages = vsgt->pages;
+               break;
+       case vmw_dma_alloc_coherent:
+               viter->next = &__vmw_piter_non_sg_next;
+               viter->dma_address = &__vmw_piter_dma_addr;
+               viter->page = &__vmw_piter_non_sg_page;
+               viter->addrs = vsgt->addrs;
+               viter->pages = vsgt->pages;
+               break;
+       case vmw_dma_map_populate:
+       case vmw_dma_map_bind:
+               viter->next = &__vmw_piter_sg_next;
+               viter->dma_address = &__vmw_piter_sg_addr;
+               viter->page = &__vmw_piter_sg_page;
+               __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
+                                    vsgt->sgt->orig_nents, p_offset);
+               break;
+       default:
+               BUG();
+       }
+ }
+ /**
+  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
+  * TTM pages
+  *
+  * @vmw_tt: Pointer to a struct vmw_ttm_backend
+  *
+  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
+  */
+ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
+ {
+       struct device *dev = vmw_tt->dev_priv->dev->dev;
+       dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
+               DMA_BIDIRECTIONAL);
+       vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+ }
+ /**
+  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
+  *
+  * @vmw_tt: Pointer to a struct vmw_ttm_backend
+  *
+  * This function is used to get device addresses from the kernel DMA layer.
+  * However, it's violating the DMA API in that when this operation has been
+  * performed, it's illegal for the CPU to write to the pages without first
+  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
+  * therefore only legal to call this function if we know that the function
+  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
+  * a CPU write buffer flush.
+  */
+ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
+ {
+       struct device *dev = vmw_tt->dev_priv->dev->dev;
+       int ret;
+       ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
+                        DMA_BIDIRECTIONAL);
+       if (unlikely(ret == 0))
+               return -ENOMEM;
+       vmw_tt->sgt.nents = ret;
+       return 0;
+ }
+ /**
+  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
+  *
+  * @vmw_tt: Pointer to a struct vmw_ttm_tt
+  *
+  * Select the correct function for and make sure the TTM pages are
+  * visible to the device. Allocate storage for the device mappings.
+  * If a mapping has already been performed, indicated by the storage
+  * pointer being non NULL, the function returns success.
+  */
+ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+ {
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+       struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = true,
+               .no_wait_gpu = false
+       };
+       struct vmw_piter iter;
+       dma_addr_t old;
+       int ret = 0;
+       static size_t sgl_size;
+       static size_t sgt_size;
+       if (vmw_tt->mapped)
+               return 0;
+       vsgt->mode = dev_priv->map_mode;
+       vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
+       vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+       vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+       vsgt->sgt = &vmw_tt->sgt;
+       switch (dev_priv->map_mode) {
+       case vmw_dma_map_bind:
+       case vmw_dma_map_populate:
+               if (unlikely(!sgl_size)) {
+                       sgl_size = ttm_round_pot(sizeof(struct scatterlist));
+                       sgt_size = ttm_round_pot(sizeof(struct sg_table));
+               }
+               vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
+               ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
+               if (unlikely(ret != 0))
+                       return ret;
+               ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
+                                               vsgt->num_pages, 0,
+                                               (unsigned long)
+                                               vsgt->num_pages << PAGE_SHIFT,
+                                               GFP_KERNEL);
+               if (unlikely(ret != 0))
+                       goto out_sg_alloc_fail;
+               if (vsgt->num_pages > vmw_tt->sgt.nents) {
+                       uint64_t over_alloc =
+                               sgl_size * (vsgt->num_pages -
+                                           vmw_tt->sgt.nents);
+                       ttm_mem_global_free(glob, over_alloc);
+                       vmw_tt->sg_alloc_size -= over_alloc;
+               }
+               ret = vmw_ttm_map_for_dma(vmw_tt);
+               if (unlikely(ret != 0))
+                       goto out_map_fail;
+               break;
+       default:
+               break;
+       }
+       old = ~((dma_addr_t) 0);
+       vmw_tt->vsgt.num_regions = 0;
+       for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
+               dma_addr_t cur = vmw_piter_dma_addr(&iter);
+               if (cur != old + PAGE_SIZE)
+                       vmw_tt->vsgt.num_regions++;
+               old = cur;
+       }
+       vmw_tt->mapped = true;
+       return 0;
+ out_map_fail:
+       sg_free_table(vmw_tt->vsgt.sgt);
+       vmw_tt->vsgt.sgt = NULL;
+ out_sg_alloc_fail:
+       ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+       return ret;
+ }
+ /**
+  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
+  *
+  * @vmw_tt: Pointer to a struct vmw_ttm_tt
+  *
+  * Tear down any previously set up device DMA mappings and free
+  * any storage space allocated for them. If there are no mappings set up,
+  * this function is a NOP.
+  */
+ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
+ {
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       if (!vmw_tt->vsgt.sgt)
+               return;
+       switch (dev_priv->map_mode) {
+       case vmw_dma_map_bind:
+       case vmw_dma_map_populate:
+               vmw_ttm_unmap_from_dma(vmw_tt);
+               sg_free_table(vmw_tt->vsgt.sgt);
+               vmw_tt->vsgt.sgt = NULL;
+               ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                                   vmw_tt->sg_alloc_size);
+               break;
+       default:
+               break;
+       }
+       vmw_tt->mapped = false;
+ }
+ /**
+  * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+  *
+  * @bo: Pointer to a struct ttm_buffer_object
+  *
+  * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+  * instead of a pointer to a struct vmw_ttm_backend as argument.
+  * Note that the buffer object must be either pinned or reserved before
+  * calling this function.
+  */
+ int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+ {
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       return vmw_ttm_map_dma(vmw_tt);
+ }
+ /**
+  * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+  *
+  * @bo: Pointer to a struct ttm_buffer_object
+  *
+  * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+  * instead of a pointer to a struct vmw_ttm_backend as argument.
+  */
+ void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+ {
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       vmw_ttm_unmap_dma(vmw_tt);
+ }
+ /**
+  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+  * TTM buffer object
+  *
+  * @bo: Pointer to a struct ttm_buffer_object
+  *
+  * Returns a pointer to a struct vmw_sg_table object. The object should
+  * not be freed after use.
+  * Note that for the device addresses to be valid, the buffer object must
+  * either be reserved or pinned.
+  */
+ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+ {
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       return &vmw_tt->vsgt;
+ }
+ static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+ {
+       struct vmw_ttm_tt *vmw_be =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       int ret;
+       ret = vmw_ttm_map_dma(vmw_be);
+       if (unlikely(ret != 0))
+               return ret;
+       vmw_be->gmr_id = bo_mem->start;
+       vmw_be->mem_type = bo_mem->mem_type;
+       switch (bo_mem->mem_type) {
+       case VMW_PL_GMR:
+               return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+                                   ttm->num_pages, vmw_be->gmr_id);
+       case VMW_PL_MOB:
+               if (unlikely(vmw_be->mob == NULL)) {
+                       vmw_be->mob =
+                               vmw_mob_create(ttm->num_pages);
+                       if (unlikely(vmw_be->mob == NULL))
+                               return -ENOMEM;
+               }
+               return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+                                   &vmw_be->vsgt, ttm->num_pages,
+                                   vmw_be->gmr_id);
+       default:
+               BUG();
+       }
+       return 0;
+ }
+ static int vmw_ttm_unbind(struct ttm_tt *ttm)
+ {
+       struct vmw_ttm_tt *vmw_be =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       switch (vmw_be->mem_type) {
+       case VMW_PL_GMR:
+               vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+               break;
+       case VMW_PL_MOB:
+               vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+               break;
+       default:
+               BUG();
+       }
+       if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
+               vmw_ttm_unmap_dma(vmw_be);
+       return 0;
+ }
+ static void vmw_ttm_destroy(struct ttm_tt *ttm)
+ {
+       struct vmw_ttm_tt *vmw_be =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       vmw_ttm_unmap_dma(vmw_be);
+       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+               ttm_dma_tt_fini(&vmw_be->dma_ttm);
+       else
+               ttm_tt_fini(ttm);
+       if (vmw_be->mob)
+               vmw_mob_destroy(vmw_be->mob);
+       kfree(vmw_be);
+ }
+ static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+ {
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+       int ret;
+       if (ttm->state != tt_unpopulated)
+               return 0;
+       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+               size_t size =
+                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+               ret = ttm_mem_global_alloc(glob, size, ctx);
+               if (unlikely(ret != 0))
+                       return ret;
+               ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+                                       ctx);
+               if (unlikely(ret != 0))
+                       ttm_mem_global_free(glob, size);
+       } else
+               ret = ttm_pool_populate(ttm, ctx);
+       return ret;
+ }
+ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+ {
+       struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+                                                dma_ttm.ttm);
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+       if (vmw_tt->mob) {
+               vmw_mob_destroy(vmw_tt->mob);
+               vmw_tt->mob = NULL;
+       }
+       vmw_ttm_unmap_dma(vmw_tt);
+       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+               size_t size =
+                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+               ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+               ttm_mem_global_free(glob, size);
+       } else
+               ttm_pool_unpopulate(ttm);
+ }
+ static struct ttm_backend_func vmw_ttm_func = {
+       .bind = vmw_ttm_bind,
+       .unbind = vmw_ttm_unbind,
+       .destroy = vmw_ttm_destroy,
+ };
+ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+                                       uint32_t page_flags)
+ {
+       struct vmw_ttm_tt *vmw_be;
+       int ret;
+       vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
+       if (!vmw_be)
+               return NULL;
+       vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
+       vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
+       vmw_be->mob = NULL;
+       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+       else
+               ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+       if (unlikely(ret != 0))
+               goto out_no_init;
+       return &vmw_be->dma_ttm.ttm;
+ out_no_init:
+       kfree(vmw_be);
+       return NULL;
+ }
+ static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+ {
+       return 0;
+ }
+ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+                     struct ttm_mem_type_manager *man)
+ {
+       switch (type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case TTM_PL_VRAM:
+               /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case VMW_PL_GMR:
+       case VMW_PL_MOB:
+               /*
+                * "Guest Memory Regions" is an aperture like feature with
+                *  one slot per bo. There is an upper limit of the number of
+                *  slots as well as the bo size.
+                */
+               man->func = &vmw_gmrid_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static void vmw_evict_flags(struct ttm_buffer_object *bo,
+                    struct ttm_placement *placement)
+ {
+       *placement = vmw_sys_placement;
+ }
+ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ {
+       struct ttm_object_file *tfile =
+               vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+       return vmw_user_bo_verify_access(bo, tfile);
+ }
+ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+       mem->bus.addr = NULL;
+       mem->bus.is_iomem = false;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+       case VMW_PL_GMR:
+       case VMW_PL_MOB:
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               mem->bus.base = dev_priv->vram_start;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ {
+ }
+ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+ {
+       return 0;
+ }
+ /**
+  * vmw_move_notify - TTM move_notify_callback
+  *
+  * @bo: The TTM buffer object about to move.
+  * @mem: The struct ttm_mem_reg indicating to what memory
+  *       region the move is taking place.
+  *
+  * Calls move_notify for all subsystems needing it.
+  * (currently only resources).
+  */
+ static void vmw_move_notify(struct ttm_buffer_object *bo,
+                           bool evict,
+                           struct ttm_mem_reg *mem)
+ {
+       vmw_bo_move_notify(bo, mem);
+       vmw_query_move_notify(bo, mem);
+ }
+ /**
+  * vmw_swap_notify - TTM move_notify_callback
+  *
+  * @bo: The TTM buffer object about to be swapped out.
+  */
+ static void vmw_swap_notify(struct ttm_buffer_object *bo)
+ {
+       vmw_bo_swap_notify(bo);
+       (void) ttm_bo_wait(bo, false, false);
+ }
+ struct ttm_bo_driver vmw_bo_driver = {
+       .ttm_tt_create = &vmw_ttm_tt_create,
+       .ttm_tt_populate = &vmw_ttm_populate,
+       .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
+       .invalidate_caches = vmw_invalidate_caches,
+       .init_mem_type = vmw_init_mem_type,
+       .eviction_valuable = ttm_bo_eviction_valuable,
+       .evict_flags = vmw_evict_flags,
+       .move = NULL,
+       .verify_access = vmw_verify_access,
+       .move_notify = vmw_move_notify,
+       .swap_notify = vmw_swap_notify,
+       .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+       .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+       .io_mem_free = &vmw_ttm_io_mem_free,
+ };