]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge tag 'drm-fixes-for-v4.11-rc6' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 4 Apr 2017 17:12:15 +0000 (10:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 4 Apr 2017 17:12:15 +0000 (10:12 -0700)
Pull drm fixes from Dave Airlie:
 "This is just mostly stuff that missed rc5, from vmwgfx and msm
  drivers"

* tag 'drm-fixes-for-v4.11-rc6' of git://people.freedesktop.org/~airlied/linux:
  drm/msm: Make sure to detach the MMU during GPU cleanup
  drm/msm/hdmi: redefinitions of macros not required
  drm/msm/mdp5: Update SSPP_MAX value
  drm/msm/dsi: Fix bug in dsi_mgr_phy_enable
  drm/msm: Don't allow zero sized buffer objects
  drm/msm: Fix wrong pointer check in a5xx_destroy
  drm/msm: adreno: fix build error without debugfs
  drm/vmwgfx: fix integer overflow in vmw_surface_define_ioctl()
  drm/vmwgfx: Remove getparam error message
  drm/ttm: Avoid calling drm_ht_remove from atomic context
  drm/ttm, drm/vmwgfx: Relax permission checking when opening surfaces
  drm/vmwgfx: avoid calling vzalloc with a 0 size in vmw_get_cap_3d_ioctl()
  drm/vmwgfx: NULL pointer dereference in vmw_surface_define_ioctl()
  drm/vmwgfx: Type-check lookups of fence objects

13 files changed:
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/hdmi/hdmi_audio.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
include/drm/ttm/ttm_object.h

index 4414cf73735d26ccb655756327af8226f539f4f5..36602ac7e24835fb9350b3040037524f4c95b7d1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
        }
 
        if (a5xx_gpu->gpmu_bo) {
-               if (a5xx_gpu->gpmu_bo)
+               if (a5xx_gpu->gpmu_iova)
                        msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
                drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
        }
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
                .idle = a5xx_idle,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
                .show = a5xx_show,
+#endif
        },
        .get_timestamp = a5xx_get_timestamp,
 };
index c9bd1e6225f4f96e3b8a79a101508f244de17baf..5ae65426b4e5593c5f88b19627c10b403995ed73 100644 (file)
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        return 0;
 }
 
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 {
-       if (gpu->memptrs_bo) {
-               if (gpu->memptrs)
-                       msm_gem_put_vaddr(gpu->memptrs_bo);
+       struct msm_gpu *gpu = &adreno_gpu->base;
+
+       if (adreno_gpu->memptrs_bo) {
+               if (adreno_gpu->memptrs)
+                       msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
+
+               if (adreno_gpu->memptrs_iova)
+                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+
+               drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
+       }
+       release_firmware(adreno_gpu->pm4);
+       release_firmware(adreno_gpu->pfp);
 
-               if (gpu->memptrs_iova)
-                       msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+       msm_gpu_cleanup(gpu);
 
-               drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+       if (gpu->aspace) {
+               gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+                       iommu_ports, ARRAY_SIZE(iommu_ports));
+               msm_gem_address_space_destroy(gpu->aspace);
        }
-       release_firmware(gpu->pm4);
-       release_firmware(gpu->pfp);
-       msm_gpu_cleanup(&gpu->base);
 }
index 921270ea6059debadab1a785ed19ca79b064811c..a879ffa534b4d845007d3f974f9ce9c7df69dff0 100644 (file)
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
                        }
                }
        } else {
-               msm_dsi_host_reset_phy(mdsi->host);
+               msm_dsi_host_reset_phy(msm_dsi->host);
                ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
                if (ret)
                        return ret;
index a54d3bb5baad9c01047a582ad3cad98a9efb7f66..8177e8511afd8c6827b1a731688e159da652a8d2 100644 (file)
 #include <linux/hdmi.h>
 #include "hdmi.h"
 
-
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2               0
-#define MSM_HDMI_AUDIO_CHANNEL_4               1
-#define MSM_HDMI_AUDIO_CHANNEL_6               2
-#define MSM_HDMI_AUDIO_CHANNEL_8               3
-
 /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
 static int nchannels[] = { 2, 4, 6, 8 };
 
index 611da7a660c9426ed6c165ae90c8257f7a777360..238901987e00b0c1bc74979647a6cfadcfee084a 100644 (file)
@@ -18,7 +18,8 @@
 #ifndef __MDP5_PIPE_H__
 #define __MDP5_PIPE_H__
 
-#define SSPP_MAX       (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX       (SSPP_CURSOR1 + 1)
 
 /* represents a hw pipe, which is dynamically assigned to a plane */
 struct mdp5_hw_pipe {
index 59811f29607de60f2a22d53f9ec969e62bb39d98..68e509b3b9e4d08730e3901f46a397519c33e77c 100644 (file)
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 
        size = PAGE_ALIGN(size);
 
+       /* Disallow zero sized objects as they make the underlying
+        * infrastructure grumpy
+        */
+       if (size == 0)
+               return ERR_PTR(-EINVAL);
+
        ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
        if (ret)
                goto fail;
index 99e05aacbee181f4341625af8abbc91ad284f381..af5b6ba4095b06f6a24069f62e178618b547209e 100644 (file)
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                msm_ringbuffer_destroy(gpu->rb);
        }
 
-       if (gpu->aspace)
-               msm_gem_address_space_destroy(gpu->aspace);
-
        if (gpu->fctx)
                msm_fence_context_free(gpu->fctx);
 }
index fdb451e3ec01184a4642e6facd6ddf8e5f0cad47..26a7ad0f478978205be87f9409f309021b610884 100644 (file)
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        if (unlikely(ret != 0))
                goto out_err0;
 
-       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
        if (unlikely(ret != 0))
                goto out_err1;
 
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
 
 int ttm_ref_object_add(struct ttm_object_file *tfile,
                       struct ttm_base_object *base,
-                      enum ttm_ref_type ref_type, bool *existed)
+                      enum ttm_ref_type ref_type, bool *existed,
+                      bool require_existed)
 {
        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
        struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
                }
 
                rcu_read_unlock();
+               if (require_existed)
+                       return -EPERM;
+
                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
                                           false, false);
                if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
                ttm_ref_object_release(&ref->kref);
        }
 
+       spin_unlock(&tfile->lock);
        for (i = 0; i < TTM_REF_NUM; ++i)
                drm_ht_remove(&tfile->ref_hash[i]);
 
-       spin_unlock(&tfile->lock);
        ttm_object_file_unref(&tfile);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
        *p_tdev = NULL;
 
-       spin_lock(&tdev->object_lock);
        drm_ht_remove(&tdev->object_hash);
-       spin_unlock(&tdev->object_lock);
 
        kfree(tdev);
 }
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
        prime = (struct ttm_prime_object *) dma_buf->priv;
        base = &prime->base;
        *handle = base->hash.key;
-       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 
        dma_buf_put(dma_buf);
 
index 6541dd8b82dc0747433403b64795843a29e0544c..6b2708b4eafe84c832d41a10f75c2be9e65fc0b9 100644 (file)
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
                     struct vmw_fence_obj **p_fence)
 {
        struct vmw_fence_obj *fence;
-       int ret;
+       int ret;
 
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
        if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 }
 
 
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+       struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+       if (!base) {
+               pr_err("Invalid fence object handle 0x%08lx.\n",
+                      (unsigned long)handle);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (base->refcount_release != vmw_user_fence_base_release) {
+               pr_err("Invalid fence object handle 0x%08lx.\n",
+                      (unsigned long)handle);
+               ttm_base_object_unref(&base);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return base;
+}
+
+
 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv)
 {
@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
                arg->kernel_cookie = jiffies + wait_timeout;
        }
 
-       base = ttm_base_object_lookup(tfile, arg->handle);
-       if (unlikely(base == NULL)) {
-               printk(KERN_ERR "Wait invalid fence object handle "
-                      "0x%08lx.\n",
-                      (unsigned long)arg->handle);
-               return -EINVAL;
-       }
+       base = vmw_fence_obj_lookup(tfile, arg->handle);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 
@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       base = ttm_base_object_lookup(tfile, arg->handle);
-       if (unlikely(base == NULL)) {
-               printk(KERN_ERR "Fence signaled invalid fence object handle "
-                      "0x%08lx.\n",
-                      (unsigned long)arg->handle);
-               return -EINVAL;
-       }
+       base = vmw_fence_obj_lookup(tfile, arg->handle);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
        fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
                (struct drm_vmw_fence_event_arg *) data;
        struct vmw_fence_obj *fence = NULL;
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+       struct ttm_object_file *tfile = vmw_fp->tfile;
        struct drm_vmw_fence_rep __user *user_fence_rep =
                (struct drm_vmw_fence_rep __user *)(unsigned long)
                arg->fence_rep;
@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
         */
        if (arg->handle) {
                struct ttm_base_object *base =
-                       ttm_base_object_lookup_for_ref(dev_priv->tdev,
-                                                      arg->handle);
-
-               if (unlikely(base == NULL)) {
-                       DRM_ERROR("Fence event invalid fence object handle "
-                                 "0x%08lx.\n",
-                                 (unsigned long)arg->handle);
-                       return -EINVAL;
-               }
+                       vmw_fence_obj_lookup(tfile, arg->handle);
+
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+
                fence = &(container_of(base, struct vmw_user_fence,
                                       base)->fence);
                (void) vmw_fence_obj_reference(fence);
 
                if (user_fence_rep != NULL) {
-                       bool existed;
-
                        ret = ttm_ref_object_add(vmw_fp->tfile, base,
-                                                TTM_REF_USAGE, &existed);
+                                                TTM_REF_USAGE, NULL, false);
                        if (unlikely(ret != 0)) {
                                DRM_ERROR("Failed to reference a fence "
                                          "object.\n");
@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
        return 0;
 out_no_create:
        if (user_fence_rep != NULL)
-               ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-                                         handle, TTM_REF_USAGE);
+               ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
 out_no_ref_obj:
        vmw_fence_obj_unreference(&fence);
        return ret;
index b8c6a03c8c54df15def2d359ee5253f213f1816e..5ec24fd801cd2bb1b32b66540b91edcca2ef3b6d 100644 (file)
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                param->value = dev_priv->has_dx;
                break;
        default:
-               DRM_ERROR("Illegal vmwgfx get param request: %d\n",
-                         param->param);
                return -EINVAL;
        }
 
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
-       if (unlikely(arg->pad64 != 0)) {
+       if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
                DRM_ERROR("Illegal GET_3D_CAP argument.\n");
                return -EINVAL;
        }
index 65b3f0369636710eda49086f72e250478dfe288d..bf23153d4f55515b54c66f5a250b8f04aa7f5051 100644 (file)
@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
                return ret;
 
        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                TTM_REF_SYNCCPU_WRITE, &existed);
+                                TTM_REF_SYNCCPU_WRITE, &existed, false);
        if (ret != 0 || existed)
                ttm_bo_synccpu_write_release(&user_bo->dma.base);
 
@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 
        *handle = user_bo->prime.base.hash.key;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                 TTM_REF_USAGE, NULL);
+                                 TTM_REF_USAGE, NULL, false);
 }
 
 /*
index b445ce9b9757861ecc1ece1071f1c8c3a02a166f..05fa092c942beedb209b25777971a0c196d19d85 100644 (file)
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                        128;
 
        num_sizes = 0;
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+               if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+                       return -EINVAL;
                num_sizes += req->mip_levels[i];
+       }
 
-       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
-           DRM_VMW_MAX_MIP_LEVELS)
+       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+           num_sizes == 0)
                return -EINVAL;
 
        size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
        uint32_t handle;
        struct ttm_base_object *base;
        int ret;
+       bool require_exist = false;
 
        if (handle_type == DRM_VMW_HANDLE_PRIME) {
                ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
                if (unlikely(ret != 0))
                        return ret;
        } else {
-               if (unlikely(drm_is_render_client(file_priv))) {
-                       DRM_ERROR("Render client refused legacy "
-                                 "surface reference.\n");
-                       return -EACCES;
-               }
+               if (unlikely(drm_is_render_client(file_priv)))
+                       require_exist = true;
+
                if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
                        DRM_ERROR("Locked master refused legacy "
                                  "surface reference.\n");
@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 
                /*
                 * Make sure the surface creator has the same
-                * authenticating master.
+                * authenticating master, or is already registered with us.
                 */
                if (drm_is_primary_client(file_priv) &&
-                   user_srf->master != file_priv->master) {
-                       DRM_ERROR("Trying to reference surface outside of"
-                                 " master domain.\n");
-                       ret = -EACCES;
-                       goto out_bad_resource;
-               }
+                   user_srf->master != file_priv->master)
+                       require_exist = true;
 
-               ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+               ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+                                        require_exist);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Could not add a reference to a surface.\n");
                        goto out_bad_resource;
index ed953f98f0e1446ce01630c005e406f941282347..1487011fe057ba4b35271bc85e52c6b90291c2c3 100644 (file)
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  * @ref_type: The type of reference.
  * @existed: Upon completion, indicates that an identical reference object
  * already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
  *
  * Checks that the base object is shareable and adds a ref object to it.
  *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  */
 extern int ttm_ref_object_add(struct ttm_object_file *tfile,
                              struct ttm_base_object *base,
-                             enum ttm_ref_type ref_type, bool *existed);
+                             enum ttm_ref_type ref_type, bool *existed,
+                             bool require_existed);
 
 extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
                                  struct ttm_base_object *base);