]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
drm/i915: Add flag to i915_add_request() to skip the cache flush
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / i915_gem.c
index f128ed8d6f65d7b40f325d60014f975bc1df9284..74c319350876ff57be0994b9c51a55fc6d28d903 100644 (file)
 #include <linux/pci.h>
 #include <linux/dma-buf.h>
 
+#define RQ_BUG_ON(expr)
+
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-                              bool readonly);
 static void
-i915_gem_object_retire(struct drm_i915_gem_object *obj);
-
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
+static void
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
 static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj);
 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
@@ -350,7 +350,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
 
@@ -518,8 +518,6 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
                ret = i915_gem_object_wait_rendering(obj, true);
                if (ret)
                        return ret;
-
-               i915_gem_object_retire(obj);
        }
 
        ret = i915_gem_object_get_pages(obj);
@@ -806,7 +804,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 
        offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
-       intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
        while (remain > 0) {
                /* Operation in this page
@@ -939,8 +937,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                ret = i915_gem_object_wait_rendering(obj, false);
                if (ret)
                        return ret;
-
-               i915_gem_object_retire(obj);
        }
        /* Same trick applies to invalidate partially written cachelines read
         * before writing. */
@@ -952,7 +948,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        if (ret)
                return ret;
 
-       intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
        i915_gem_object_pin_pages(obj);
 
@@ -1159,15 +1155,12 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
 int
 i915_gem_check_olr(struct drm_i915_gem_request *req)
 {
-       int ret;
-
        WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
 
-       ret = 0;
        if (req == req->ring->outstanding_lazy_request)
-               ret = i915_add_request(req->ring);
+               i915_add_request(req->ring);
 
-       return ret;
+       return 0;
 }
 
 static void fake_irq(unsigned long data)
@@ -1181,16 +1174,16 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
        return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
 }
 
-static int __i915_spin_request(struct drm_i915_gem_request *rq)
+static int __i915_spin_request(struct drm_i915_gem_request *req)
 {
        unsigned long timeout;
 
-       if (i915_gem_request_get_ring(rq)->irq_refcount)
+       if (i915_gem_request_get_ring(req)->irq_refcount)
                return -EBUSY;
 
        timeout = jiffies + 1;
        while (!need_resched()) {
-               if (i915_gem_request_completed(rq, true))
+               if (i915_gem_request_completed(req, true))
                        return 0;
 
                if (time_after_eq(jiffies, timeout))
@@ -1198,7 +1191,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *rq)
 
                cpu_relax_lowlatency();
        }
-       if (i915_gem_request_completed(rq, false))
+       if (i915_gem_request_completed(req, false))
                return 0;
 
        return -EAGAIN;
@@ -1225,7 +1218,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
-                       struct drm_i915_file_private *file_priv)
+                       struct intel_rps_client *rps)
 {
        struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
        struct drm_device *dev = ring->dev;
@@ -1239,14 +1232,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 
        WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
+       if (list_empty(&req->list))
+               return 0;
+
        if (i915_gem_request_completed(req, true))
                return 0;
 
        timeout_expire = timeout ?
                jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
 
-       if (INTEL_INFO(dev)->gen >= 6)
-               gen6_rps_boost(dev_priv, file_priv);
+       if (INTEL_INFO(dev_priv)->gen >= 6)
+               gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
        /* Record current time in case interrupted by signal, or wedged */
        trace_i915_gem_request_wait_begin(req);
@@ -1338,6 +1334,63 @@ out:
        return ret;
 }
 
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_file_private *file_priv = request->file_priv;
+
+       if (!file_priv)
+               return;
+
+       spin_lock(&file_priv->mm.lock);
+       list_del(&request->client_list);
+       request->file_priv = NULL;
+       spin_unlock(&file_priv->mm.lock);
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+       trace_i915_gem_request_retire(request);
+
+       /* We know the GPU must have read the request to have
+        * sent us the seqno + interrupt, so use the position
+        * of tail of the request to update the last known position
+        * of the GPU head.
+        *
+        * Note this requires that we are always called in request
+        * completion order.
+        */
+       request->ringbuf->last_retired_head = request->postfix;
+
+       list_del_init(&request->list);
+       i915_gem_request_remove_from_client(request);
+
+       put_pid(request->pid);
+
+       i915_gem_request_unreference(request);
+}
+
+static void
+__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *engine = req->ring;
+       struct drm_i915_gem_request *tmp;
+
+       lockdep_assert_held(&engine->dev->struct_mutex);
+
+       if (list_empty(&req->list))
+               return;
+
+       do {
+               tmp = list_first_entry(&engine->request_list,
+                                      typeof(*tmp), list);
+
+               i915_gem_request_retire(tmp);
+       } while (tmp != req);
+
+       WARN_ON(i915_verify_lists(engine->dev));
+}
+
 /**
  * Waits for a request to be signaled, and cleans up the
  * request and object lists appropriately for that event.
@@ -1348,7 +1401,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
        struct drm_device *dev;
        struct drm_i915_private *dev_priv;
        bool interruptible;
-       unsigned reset_counter;
        int ret;
 
        BUG_ON(req == NULL);
@@ -1367,29 +1419,13 @@ i915_wait_request(struct drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-       i915_gem_request_reference(req);
-       ret = __i915_wait_request(req, reset_counter,
+       ret = __i915_wait_request(req,
+                                 atomic_read(&dev_priv->gpu_error.reset_counter),
                                  interruptible, NULL, NULL);
-       i915_gem_request_unreference(req);
-       return ret;
-}
-
-static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
-{
-       if (!obj->active)
-               return 0;
-
-       /* Manually manage the write flush as we may have not yet
-        * retired the buffer.
-        *
-        * Note that the last_write_req is always the earlier of
-        * the two (read/write) requests, so if we haved successfully waited,
-        * we know we have passed the last write.
-        */
-       i915_gem_request_assign(&obj->last_write_req, NULL);
+       if (ret)
+               return ret;
 
+       __i915_gem_request_retire__upto(req);
        return 0;
 }
 
@@ -1397,22 +1433,56 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
-static __must_check int
+int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
 {
-       struct drm_i915_gem_request *req;
-       int ret;
+       int ret, i;
 
-       req = readonly ? obj->last_write_req : obj->last_read_req;
-       if (!req)
+       if (!obj->active)
                return 0;
 
-       ret = i915_wait_request(req);
-       if (ret)
-               return ret;
+       if (readonly) {
+               if (obj->last_write_req != NULL) {
+                       ret = i915_wait_request(obj->last_write_req);
+                       if (ret)
+                               return ret;
+
+                       i = obj->last_write_req->ring->id;
+                       if (obj->last_read_req[i] == obj->last_write_req)
+                               i915_gem_object_retire__read(obj, i);
+                       else
+                               i915_gem_object_retire__write(obj);
+               }
+       } else {
+               for (i = 0; i < I915_NUM_RINGS; i++) {
+                       if (obj->last_read_req[i] == NULL)
+                               continue;
+
+                       ret = i915_wait_request(obj->last_read_req[i]);
+                       if (ret)
+                               return ret;
+
+                       i915_gem_object_retire__read(obj, i);
+               }
+               RQ_BUG_ON(obj->active);
+       }
+
+       return 0;
+}
 
-       return i915_gem_object_wait_rendering__tail(obj);
+static void
+i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
+                              struct drm_i915_gem_request *req)
+{
+       int ring = req->ring->id;
+
+       if (obj->last_read_req[ring] == req)
+               i915_gem_object_retire__read(obj, ring);
+       else if (obj->last_write_req == req)
+               i915_gem_object_retire__write(obj);
+
+       __i915_gem_request_retire__upto(req);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1420,40 +1490,75 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  */
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-                                           struct drm_i915_file_private *file_priv,
+                                           struct intel_rps_client *rps,
                                            bool readonly)
 {
-       struct drm_i915_gem_request *req;
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *requests[I915_NUM_RINGS];
        unsigned reset_counter;
-       int ret;
+       int ret, i, n = 0;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        BUG_ON(!dev_priv->mm.interruptible);
 
-       req = readonly ? obj->last_write_req : obj->last_read_req;
-       if (!req)
+       if (!obj->active)
                return 0;
 
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(req);
-       if (ret)
-               return ret;
-
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-       i915_gem_request_reference(req);
+
+       if (readonly) {
+               struct drm_i915_gem_request *req;
+
+               req = obj->last_write_req;
+               if (req == NULL)
+                       return 0;
+
+               ret = i915_gem_check_olr(req);
+               if (ret)
+                       goto err;
+
+               requests[n++] = i915_gem_request_reference(req);
+       } else {
+               for (i = 0; i < I915_NUM_RINGS; i++) {
+                       struct drm_i915_gem_request *req;
+
+                       req = obj->last_read_req[i];
+                       if (req == NULL)
+                               continue;
+
+                       ret = i915_gem_check_olr(req);
+                       if (ret)
+                               goto err;
+
+                       requests[n++] = i915_gem_request_reference(req);
+               }
+       }
+
        mutex_unlock(&dev->struct_mutex);
-       ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
+       for (i = 0; ret == 0 && i < n; i++)
+               ret = __i915_wait_request(requests[i], reset_counter, true,
+                                         NULL, rps);
        mutex_lock(&dev->struct_mutex);
-       i915_gem_request_unreference(req);
-       if (ret)
-               return ret;
 
-       return i915_gem_object_wait_rendering__tail(obj);
+err:
+       for (i = 0; i < n; i++) {
+               if (ret == 0)
+                       i915_gem_object_retire_request(obj, requests[i]);
+               i915_gem_request_unreference(requests[i]);
+       }
+
+       return ret;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+       struct drm_i915_file_private *fpriv = file->driver_priv;
+       return &fpriv->rps;
 }
 
 /**
@@ -1498,7 +1603,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
         * to catch cases where we are gazumped.
         */
        ret = i915_gem_object_wait_rendering__nonblocking(obj,
-                                                         file->driver_priv,
+                                                         to_rps_client(file),
                                                          !write_domain);
        if (ret)
                goto unref;
@@ -1919,7 +2024,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
                  uint32_t handle,
                  uint64_t *offset)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        int ret;
 
@@ -2235,78 +2339,58 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static void
-i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                              struct intel_engine_cs *ring)
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct intel_engine_cs *ring)
 {
-       struct drm_i915_gem_request *req;
-       struct intel_engine_cs *old_ring;
-
-       BUG_ON(ring == NULL);
-
-       req = intel_ring_get_request(ring);
-       old_ring = i915_gem_request_get_ring(obj->last_read_req);
-
-       if (old_ring != ring && obj->last_write_req) {
-               /* Keep the request relative to the current ring */
-               i915_gem_request_assign(&obj->last_write_req, req);
-       }
+       struct drm_i915_gem_object *obj = vma->obj;
 
        /* Add a reference if we're newly entering the active list. */
-       if (!obj->active) {
+       if (obj->active == 0)
                drm_gem_object_reference(&obj->base);
-               obj->active = 1;
-       }
+       obj->active |= intel_ring_flag(ring);
 
-       list_move_tail(&obj->ring_list, &ring->active_list);
+       list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
+       i915_gem_request_assign(&obj->last_read_req[ring->id],
+                               intel_ring_get_request(ring));
 
-       i915_gem_request_assign(&obj->last_read_req, req);
+       list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
 
-void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct intel_engine_cs *ring)
+static void
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
-       list_move_tail(&vma->mm_list, &vma->vm->active_list);
-       return i915_gem_object_move_to_active(vma->obj, ring);
+       RQ_BUG_ON(obj->last_write_req == NULL);
+       RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+
+       i915_gem_request_assign(&obj->last_write_req, NULL);
+       intel_fb_obj_flush(obj, true);
 }
 
 static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 {
        struct i915_vma *vma;
 
-       BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
-       BUG_ON(!obj->active);
+       RQ_BUG_ON(obj->last_read_req[ring] == NULL);
+       RQ_BUG_ON(!(obj->active & (1 << ring)));
+
+       list_del_init(&obj->ring_list[ring]);
+       i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+
+       if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+               i915_gem_object_retire__write(obj);
+
+       obj->active &= ~(1 << ring);
+       if (obj->active)
+               return;
 
        list_for_each_entry(vma, &obj->vma_list, vma_link) {
                if (!list_empty(&vma->mm_list))
                        list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
        }
 
-       intel_fb_obj_flush(obj, true);
-
-       list_del_init(&obj->ring_list);
-
-       i915_gem_request_assign(&obj->last_read_req, NULL);
-       i915_gem_request_assign(&obj->last_write_req, NULL);
-       obj->base.write_domain = 0;
-
        i915_gem_request_assign(&obj->last_fenced_req, NULL);
-
-       obj->active = 0;
        drm_gem_object_unreference(&obj->base);
-
-       WARN_ON(i915_verify_lists(dev));
-}
-
-static void
-i915_gem_object_retire(struct drm_i915_gem_object *obj)
-{
-       if (obj->last_read_req == NULL)
-               return;
-
-       if (i915_gem_request_completed(obj->last_read_req, true))
-               i915_gem_object_move_to_inactive(obj);
 }
 
 static int
@@ -2379,9 +2463,15 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
        return 0;
 }
 
-int __i915_add_request(struct intel_engine_cs *ring,
-                      struct drm_file *file,
-                      struct drm_i915_gem_object *obj)
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct intel_engine_cs *ring,
+                       struct drm_file *file,
+                       struct drm_i915_gem_object *obj,
+                       bool flush_caches)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
@@ -2391,13 +2481,20 @@ int __i915_add_request(struct intel_engine_cs *ring,
 
        request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
-               return -ENOMEM;
+               return;
 
        if (i915.enable_execlists) {
                ringbuf = request->ctx->engine[ring->id].ringbuf;
        } else
                ringbuf = ring->buffer;
 
+       /*
+        * To ensure that this call will not fail, space for its emissions
+        * should already have been reserved in the ring buffer. Let the ring
+        * know that it is time to use that space up.
+        */
+       intel_ring_reserved_space_use(ringbuf);
+
        request_start = intel_ring_get_tail(ringbuf);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
@@ -2406,14 +2503,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
         * is that the flush _must_ happen before the next request, no matter
         * what.
         */
-       if (i915.enable_execlists) {
-               ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
-               if (ret)
-                       return ret;
-       } else {
-               ret = intel_ring_flush_all_caches(ring);
-               if (ret)
-                       return ret;
+       if (flush_caches) {
+               if (i915.enable_execlists)
+                       ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
+               else
+                       ret = intel_ring_flush_all_caches(ring);
+               /* Not allowed to fail! */
+               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
 
        /* Record the position of the start of the request so that
@@ -2423,17 +2519,15 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->postfix = intel_ring_get_tail(ringbuf);
 
-       if (i915.enable_execlists) {
+       if (i915.enable_execlists)
                ret = ring->emit_request(ringbuf, request);
-               if (ret)
-                       return ret;
-       } else {
+       else {
                ret = ring->add_request(ring);
-               if (ret)
-                       return ret;
 
                request->tail = intel_ring_get_tail(ringbuf);
        }
+       /* Not allowed to fail! */
+       WARN(ret, "emit|add_request failed: %d!\n", ret);
 
        request->head = request_start;
 
@@ -2445,14 +2539,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->batch_obj = obj;
 
-       if (!i915.enable_execlists) {
-               /* Hold a reference to the current context so that we can inspect
-                * it later in case a hangcheck error event fires.
-                */
-               request->ctx = ring->last_context;
-               if (request->ctx)
-                       i915_gem_context_reference(request->ctx);
-       }
+       WARN_ON(!i915.enable_execlists && (request->ctx != ring->last_context));
 
        request->emitted_jiffies = jiffies;
        list_add_tail(&request->list, &ring->request_list);
@@ -2480,21 +2567,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
                           round_jiffies_up_relative(HZ));
        intel_mark_busy(dev_priv->dev);
 
-       return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
-       struct drm_i915_file_private *file_priv = request->file_priv;
-
-       if (!file_priv)
-               return;
-
-       spin_lock(&file_priv->mm.lock);
-       list_del(&request->client_list);
-       request->file_priv = NULL;
-       spin_unlock(&file_priv->mm.lock);
+       /* Sanity check that the reserved size was large enough. */
+       intel_ring_reserved_space_end(ringbuf);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2542,16 +2616,6 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
        }
 }
 
-static void i915_gem_free_request(struct drm_i915_gem_request *request)
-{
-       list_del(&request->list);
-       i915_gem_request_remove_from_client(request);
-
-       put_pid(request->pid);
-
-       i915_gem_request_unreference(request);
-}
-
 void i915_gem_request_free(struct kref *req_ref)
 {
        struct drm_i915_gem_request *req = container_of(req_ref,
@@ -2573,41 +2637,75 @@ void i915_gem_request_free(struct kref *req_ref)
 }
 
 int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx)
+                          struct intel_context *ctx,
+                          struct drm_i915_gem_request **req_out)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
-       struct drm_i915_gem_request *rq;
+       struct drm_i915_gem_request *req;
        int ret;
 
-       if (ring->outstanding_lazy_request)
+       if (!req_out)
+               return -EINVAL;
+
+       if ((*req_out = ring->outstanding_lazy_request) != NULL)
                return 0;
 
-       rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-       if (rq == NULL)
+       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+       if (req == NULL)
                return -ENOMEM;
 
-       kref_init(&rq->ref);
-       rq->i915 = dev_priv;
-
-       ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
-       if (ret) {
-               kfree(rq);
-               return ret;
-       }
+       ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+       if (ret)
+               goto err;
 
-       rq->ring = ring;
+       kref_init(&req->ref);
+       req->i915 = dev_priv;
+       req->ring = ring;
+       req->ctx  = ctx;
+       i915_gem_context_reference(req->ctx);
 
        if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(rq, ctx);
+               ret = intel_logical_ring_alloc_request_extras(req);
        else
-               ret = intel_ring_alloc_request_extras(rq);
+               ret = intel_ring_alloc_request_extras(req);
        if (ret) {
-               kfree(rq);
-               return ret;
+               i915_gem_context_unreference(req->ctx);
+               goto err;
        }
 
-       ring->outstanding_lazy_request = rq;
+       /*
+        * Reserve space in the ring buffer for all the commands required to
+        * eventually emit this request. This is to guarantee that the
+        * i915_add_request() call can't fail. Note that the reserve may need
+        * to be redone if the request is not actually submitted straight
+        * away, e.g. because a GPU scheduler has deferred it.
+        *
+        * Note further that this call merely notes the reserve request. A
+        * subsequent call to *_ring_begin() is required to actually ensure
+        * that the reservation is available. Without the begin, if the
+        * request creator immediately submitted the request without adding
+        * any commands to it then there might not actually be sufficient
+        * room for the submission commands. Unfortunately, the current
+        * *_ring_begin() implementations potentially call back here to
+        * i915_gem_request_alloc(). Thus calling _begin() here would lead to
+        * infinite recursion! Until that back call path is removed, it is
+        * necessary to do a manual _begin() outside.
+        */
+       intel_ring_reserved_space_reserve(req->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+
+       *req_out = ring->outstanding_lazy_request = req;
        return 0;
+
+err:
+       kmem_cache_free(dev_priv->requests, req);
+       return ret;
+}
+
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+       intel_ring_reserved_space_cancel(req->ringbuf);
+
+       i915_gem_request_unreference(req);
 }
 
 struct drm_i915_gem_request *
@@ -2652,9 +2750,9 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
                obj = list_first_entry(&ring->active_list,
                                       struct drm_i915_gem_object,
-                                      ring_list);
+                                      ring_list[ring->id]);
 
-               i915_gem_object_move_to_inactive(obj);
+               i915_gem_object_retire__read(obj, ring->id);
        }
 
        /*
@@ -2690,7 +2788,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                                           struct drm_i915_gem_request,
                                           list);
 
-               i915_gem_free_request(request);
+               i915_gem_request_retire(request);
        }
 
        /* This may not have been flushed before the reset, so clean it now */
@@ -2738,6 +2836,8 @@ void i915_gem_reset(struct drm_device *dev)
        i915_gem_context_reset(dev);
 
        i915_gem_restore_fences(dev);
+
+       WARN_ON(i915_verify_lists(dev));
 }
 
 /**
@@ -2746,9 +2846,6 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
-       if (list_empty(&ring->request_list))
-               return;
-
        WARN_ON(i915_verify_lists(ring->dev));
 
        /* Retire requests first as we use it above for the early return.
@@ -2766,16 +2863,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                if (!i915_gem_request_completed(request, true))
                        break;
 
-               trace_i915_gem_request_retire(request);
-
-               /* We know the GPU must have read the request to have
-                * sent us the seqno + interrupt, so use the position
-                * of tail of the request to update the last known position
-                * of the GPU head.
-                */
-               request->ringbuf->last_retired_head = request->postfix;
-
-               i915_gem_free_request(request);
+               i915_gem_request_retire(request);
        }
 
        /* Move any buffers on the active list that are no longer referenced
@@ -2787,12 +2875,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
                obj = list_first_entry(&ring->active_list,
                                      struct drm_i915_gem_object,
-                                     ring_list);
+                                     ring_list[ring->id]);
 
-               if (!i915_gem_request_completed(obj->last_read_req, true))
+               if (!list_empty(&obj->last_read_req[ring->id]->list))
                        break;
 
-               i915_gem_object_move_to_inactive(obj);
+               i915_gem_object_retire__read(obj, ring->id);
        }
 
        if (unlikely(ring->trace_irq_req &&
@@ -2887,17 +2975,30 @@ i915_gem_idle_work_handler(struct work_struct *work)
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 {
-       struct intel_engine_cs *ring;
-       int ret;
+       int ret, i;
+
+       if (!obj->active)
+               return 0;
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct drm_i915_gem_request *req;
 
-       if (obj->active) {
-               ring = i915_gem_request_get_ring(obj->last_read_req);
+               req = obj->last_read_req[i];
+               if (req == NULL)
+                       continue;
 
-               ret = i915_gem_check_olr(obj->last_read_req);
+               if (list_empty(&req->list))
+                       goto retire;
+
+               ret = i915_gem_check_olr(req);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests_ring(ring);
+               if (i915_gem_request_completed(req, true)) {
+                       __i915_gem_request_retire__upto(req);
+retire:
+                       i915_gem_object_retire__read(obj, i);
+               }
        }
 
        return 0;
@@ -2931,9 +3032,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
-       struct drm_i915_gem_request *req;
+       struct drm_i915_gem_request *req[I915_NUM_RINGS];
        unsigned reset_counter;
-       int ret = 0;
+       int i, n = 0;
+       int ret;
 
        if (args->flags != 0)
                return -EINVAL;
@@ -2953,11 +3055,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (ret)
                goto out;
 
-       if (!obj->active || !obj->last_read_req)
+       if (!obj->active)
                goto out;
 
-       req = obj->last_read_req;
-
        /* Do this after OLR check to make sure we make forward progress polling
         * on this IOCTL with a timeout == 0 (like busy ioctl)
         */
@@ -2968,13 +3068,23 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 
        drm_gem_object_unreference(&obj->base);
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-       i915_gem_request_reference(req);
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               if (obj->last_read_req[i] == NULL)
+                       continue;
+
+               req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+       }
+
        mutex_unlock(&dev->struct_mutex);
 
-       ret = __i915_wait_request(req, reset_counter, true,
-                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
-                                 file->driver_priv);
-       i915_gem_request_unreference__unlocked(req);
+       for (i = 0; i < n; i++) {
+               if (ret == 0)
+                       ret = __i915_wait_request(req[i], reset_counter, true,
+                                                 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+                                                 file->driver_priv);
+               i915_gem_request_unreference__unlocked(req[i]);
+       }
        return ret;
 
 out:
@@ -2983,6 +3093,59 @@ out:
        return ret;
 }
 
+static int
+__i915_gem_object_sync(struct drm_i915_gem_object *obj,
+                      struct intel_engine_cs *to,
+                      struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *from;
+       int ret;
+
+       from = i915_gem_request_get_ring(req);
+       if (to == from)
+               return 0;
+
+       if (i915_gem_request_completed(req, true))
+               return 0;
+
+       ret = i915_gem_check_olr(req);
+       if (ret)
+               return ret;
+
+       if (!i915_semaphore_is_enabled(obj->base.dev)) {
+               struct drm_i915_private *i915 = to_i915(obj->base.dev);
+               ret = __i915_wait_request(req,
+                                         atomic_read(&i915->gpu_error.reset_counter),
+                                         i915->mm.interruptible,
+                                         NULL,
+                                         &i915->rps.semaphores);
+               if (ret)
+                       return ret;
+
+               i915_gem_object_retire_request(obj, req);
+       } else {
+               int idx = intel_ring_sync_index(from, to);
+               u32 seqno = i915_gem_request_get_seqno(req);
+
+               if (seqno <= from->semaphore.sync_seqno[idx])
+                       return 0;
+
+               trace_i915_gem_ring_sync_to(from, to, req);
+               ret = to->semaphore.sync_to(to, from, seqno);
+               if (ret)
+                       return ret;
+
+               /* We use last_read_req because sync_to()
+                * might have just caused seqno wrap under
+                * the radar.
+                */
+               from->semaphore.sync_seqno[idx] =
+                       i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+       }
+
+       return 0;
+}
+
 /**
  * i915_gem_object_sync - sync an object to a ring.
  *
@@ -2991,7 +3154,17 @@ out:
  *
  * This code is meant to abstract object synchronization with the GPU.
  * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring.
+ * rather than a particular GPU ring. Conceptually we serialise writes
+ * between engines inside the GPU. We only allow on engine to write
+ * into a buffer at any time, but multiple readers. To ensure each has
+ * a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ *   request must wait for it to complete (either CPU or in hw, requests
+ *   on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ *   request must wait for outstanding read requests to complete.
  *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
@@ -2999,41 +3172,32 @@ int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
                     struct intel_engine_cs *to)
 {
-       struct intel_engine_cs *from;
-       u32 seqno;
-       int ret, idx;
-
-       from = i915_gem_request_get_ring(obj->last_read_req);
-
-       if (from == NULL || to == from)
-               return 0;
+       const bool readonly = obj->base.pending_write_domain == 0;
+       struct drm_i915_gem_request *req[I915_NUM_RINGS];
+       int ret, i, n;
 
-       if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
-               return i915_gem_object_wait_rendering(obj, false);
-
-       idx = intel_ring_sync_index(from, to);
-
-       seqno = i915_gem_request_get_seqno(obj->last_read_req);
-       /* Optimization: Avoid semaphore sync when we are sure we already
-        * waited for an object with higher seqno */
-       if (seqno <= from->semaphore.sync_seqno[idx])
+       if (!obj->active)
                return 0;
 
-       ret = i915_gem_check_olr(obj->last_read_req);
-       if (ret)
-               return ret;
+       if (to == NULL)
+               return i915_gem_object_wait_rendering(obj, readonly);
 
-       trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
-       ret = to->semaphore.sync_to(to, from, seqno);
-       if (!ret)
-               /* We use last_read_req because sync_to()
-                * might have just caused seqno wrap under
-                * the radar.
-                */
-               from->semaphore.sync_seqno[idx] =
-                               i915_gem_request_get_seqno(obj->last_read_req);
+       n = 0;
+       if (readonly) {
+               if (obj->last_write_req)
+                       req[n++] = obj->last_write_req;
+       } else {
+               for (i = 0; i < I915_NUM_RINGS; i++)
+                       if (obj->last_read_req[i])
+                               req[n++] = obj->last_read_req[i];
+       }
+       for (i = 0; i < n; i++) {
+               ret = __i915_gem_object_sync(obj, to, req[i]);
+               if (ret)
+                       return ret;
+       }
 
-       return ret;
+       return 0;
 }
 
 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
@@ -3079,7 +3243,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        BUG_ON(obj->pages == NULL);
 
-       ret = i915_gem_object_finish_gpu(obj);
+       ret = i915_gem_object_wait_rendering(obj, false);
        if (ret)
                return ret;
        /* Continue on if we fail due to EIO, the GPU is hung so we
@@ -3119,10 +3283,6 @@ int i915_vma_unbind(struct i915_vma *vma)
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
        if (list_empty(&obj->vma_list)) {
-               /* Throw away the active reference before
-                * moving to the unbound list. */
-               i915_gem_object_retire(obj);
-
                i915_gem_gtt_finish_object(obj);
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        }
@@ -3155,6 +3315,7 @@ int i915_gpu_idle(struct drm_device *dev)
                        return ret;
        }
 
+       WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
@@ -3777,8 +3938,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_retire(obj);
-
        /* Flush and acquire obj->pages so that we are coherent through
         * direct access in memory with previous cached writes through
         * shmemfs and that our cache domain tracking remains valid.
@@ -3815,7 +3974,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        }
 
        if (write)
-               intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+               intel_fb_obj_invalidate(obj, ORIGIN_GTT);
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -3854,7 +4013,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
        }
 
        if (i915_gem_obj_bound_any(obj)) {
-               ret = i915_gem_object_finish_gpu(obj);
+               ret = i915_gem_object_wait_rendering(obj, false);
                if (ret)
                        return ret;
 
@@ -3976,11 +4135,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        u32 old_read_domains, old_write_domain;
        int ret;
 
-       if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
-               ret = i915_gem_object_sync(obj, pipelined);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_gem_object_sync(obj, pipelined);
+       if (ret)
+               return ret;
 
        /* Mark the pin_display early so that we account for the
         * display coherency whilst setting up the cache domains.
@@ -4045,23 +4202,6 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
        obj->pin_display--;
 }
 
-int
-i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
-{
-       int ret;
-
-       if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
-               return 0;
-
-       ret = i915_gem_object_wait_rendering(obj, false);
-       if (ret)
-               return ret;
-
-       /* Ensure that we invalidate the GPU's caches and TLBs. */
-       obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-       return 0;
-}
-
 /**
  * Moves a single object to the CPU read, and possibly write domain.
  *
@@ -4081,7 +4221,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        if (ret)
                return ret;
 
-       i915_gem_object_retire(obj);
        i915_gem_object_flush_gtt_write_domain(obj);
 
        old_write_domain = obj->base.write_domain;
@@ -4108,7 +4247,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        }
 
        if (write)
-               intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+               intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -4132,7 +4271,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+       unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
        unsigned reset_counter;
        int ret;
@@ -4370,15 +4509,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
         * necessary flushes here.
         */
        ret = i915_gem_object_flush_active(obj);
+       if (ret)
+               goto unref;
 
-       args->busy = obj->active;
-       if (obj->last_read_req) {
-               struct intel_engine_cs *ring;
-               BUILD_BUG_ON(I915_NUM_RINGS > 16);
-               ring = i915_gem_request_get_ring(obj->last_read_req);
-               args->busy |= intel_ring_flag(ring) << 16;
-       }
+       BUILD_BUG_ON(I915_NUM_RINGS > 16);
+       args->busy = obj->active << 16;
+       if (obj->last_write_req)
+               args->busy |= obj->last_write_req->ring->id;
 
+unref:
        drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -4452,8 +4591,11 @@ unlock:
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
+       int i;
+
        INIT_LIST_HEAD(&obj->global_list);
-       INIT_LIST_HEAD(&obj->ring_list);
+       for (i = 0; i < I915_NUM_RINGS; i++)
+               INIT_LIST_HEAD(&obj->ring_list[i]);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -5112,10 +5254,10 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
        }
        spin_unlock(&file_priv->mm.lock);
 
-       if (!list_empty(&file_priv->rps_boost)) {
-               mutex_lock(&to_i915(dev)->rps.hw_lock);
-               list_del(&file_priv->rps_boost);
-               mutex_unlock(&to_i915(dev)->rps.hw_lock);
+       if (!list_empty(&file_priv->rps.link)) {
+               spin_lock(&to_i915(dev)->rps.client_lock);
+               list_del(&file_priv->rps.link);
+               spin_unlock(&to_i915(dev)->rps.client_lock);
        }
 }
 
@@ -5133,7 +5275,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        file->driver_priv = file_priv;
        file_priv->dev_priv = dev->dev_private;
        file_priv->file = file;
-       INIT_LIST_HEAD(&file_priv->rps_boost);
+       INIT_LIST_HEAD(&file_priv->rps.link);
 
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);