]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
vmwgfx: Restrict number of GMR pages to device limit
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 31 Aug 2011 07:42:53 +0000 (07:42 +0000)
committerDave Airlie <airlied@redhat.com>
Thu, 1 Sep 2011 08:38:07 +0000 (09:38 +0100)
When GMR2 is available, make sure we restrict the number of used GMR pages
to the limit indicated by the device.
This is done by failing a GMRID allocation if the total number of GMR pages
exceeds the limit.
As a result TTM will then start evicting buffers in GMR memory on a
LRU basis until the allocation succeeds.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c

index 3d6a073e130b2aa1855d2cc25a1ca456fea82d6a..77e77b2b9d8054f43d519d50c670554ce61ba2b6 100644 (file)
@@ -322,6 +322,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                dev_priv->max_gmr_ids =
                        vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
        }
+       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+               dev_priv->max_gmr_pages =
+                       vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
+               dev_priv->memory_size =
+                       vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+       }
 
        dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
        dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -338,6 +344,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                DRM_INFO("Max GMR descriptors is %u\n",
                         (unsigned)dev_priv->max_gmr_descriptors);
        }
+       if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+               DRM_INFO("Max number of GMR pages is %u\n",
+                        (unsigned)dev_priv->max_gmr_pages);
+               DRM_INFO("Max dedicated hypervisor graphics memory is %u\n",
+                        (unsigned)dev_priv->memory_size);
+       }
        DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
                 dev_priv->vram_start, dev_priv->vram_size / 1024);
        DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
index 95b75000f0c910a8d5bce68855ab294f59239eff..323fc10de2d6b2bd99ff080b514a27f6466b8880 100644 (file)
@@ -190,6 +190,8 @@ struct vmw_private {
        uint32_t capabilities;
        uint32_t max_gmr_descriptors;
        uint32_t max_gmr_ids;
+       uint32_t max_gmr_pages;
+       uint32_t memory_size;
        bool has_gmr;
        struct mutex hw_mutex;
 
index ac6e0d1bd629c17b57a1dce297c5a04e67f1f402..5f717152cff530f4b464b376de220e75e2f2a238 100644 (file)
@@ -40,6 +40,8 @@ struct vmwgfx_gmrid_man {
        spinlock_t lock;
        struct ida gmr_ida;
        uint32_t max_gmr_ids;
+       uint32_t max_gmr_pages;
+       uint32_t used_gmr_pages;
 };
 
 static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
@@ -49,33 +51,50 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
 {
        struct vmwgfx_gmrid_man *gman =
                (struct vmwgfx_gmrid_man *)man->priv;
-       int ret;
+       int ret = 0;
        int id;
 
        mem->mm_node = NULL;
 
-       do {
-               if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
-                       return -ENOMEM;
+       spin_lock(&gman->lock);
+
+       if (gman->max_gmr_pages > 0) {
+               gman->used_gmr_pages += bo->num_pages;
+               if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
+                       goto out_err_locked;
+       }
 
+       do {
+               spin_unlock(&gman->lock);
+               if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
+                       ret = -ENOMEM;
+                       goto out_err;
+               }
                spin_lock(&gman->lock);
-               ret = ida_get_new(&gman->gmr_ida, &id);
 
+               ret = ida_get_new(&gman->gmr_ida, &id);
                if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
                        ida_remove(&gman->gmr_ida, id);
-                       spin_unlock(&gman->lock);
-                       return 0;
+                       ret = 0;
+                       goto out_err_locked;
                }
-
-               spin_unlock(&gman->lock);
-
        } while (ret == -EAGAIN);
 
        if (likely(ret == 0)) {
                mem->mm_node = gman;
                mem->start = id;
-       }
+               mem->num_pages = bo->num_pages;
+       } else
+               goto out_err_locked;
+
+       spin_unlock(&gman->lock);
+       return 0;
 
+out_err:
+       spin_lock(&gman->lock);
+out_err_locked:
+       gman->used_gmr_pages -= bo->num_pages;
+       spin_unlock(&gman->lock);
        return ret;
 }
 
@@ -88,6 +107,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
        if (mem->mm_node) {
                spin_lock(&gman->lock);
                ida_remove(&gman->gmr_ida, mem->start);
+               gman->used_gmr_pages -= mem->num_pages;
                spin_unlock(&gman->lock);
                mem->mm_node = NULL;
        }
@@ -96,6 +116,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
 static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
                              unsigned long p_size)
 {
+       struct vmw_private *dev_priv =
+               container_of(man->bdev, struct vmw_private, bdev);
        struct vmwgfx_gmrid_man *gman =
                kzalloc(sizeof(*gman), GFP_KERNEL);
 
@@ -103,6 +125,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
                return -ENOMEM;
 
        spin_lock_init(&gman->lock);
+       gman->max_gmr_pages = dev_priv->max_gmr_pages;
+       gman->used_gmr_pages = 0;
        ida_init(&gman->gmr_ida);
        gman->max_gmr_ids = p_size;
        man->priv = (void *) gman;