continue;
WARN_ON(ce->pin_count);
- if (ce->ringbuf)
- intel_ringbuffer_free(ce->ringbuf);
+ if (ce->ring)
+ intel_ring_free(ce->ring);
i915_gem_object_put(ce->state);
}
return ctx;
if (USES_FULL_PPGTT(dev)) {
- struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
+ struct i915_hw_ppgtt *ppgtt =
+ i915_ppgtt_create(to_i915(dev), file_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
* itlb_before_ctx_switch.
*/
if (IS_GEN6(dev_priv)) {
- ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+ ret = engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
return ret;
}
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
u32 *remap_info = req->i915->l3_parity.remap_info[slice];
- struct intel_ringbuffer *ring = req->ringbuf;
+ struct intel_ring *ring = req->ring;
int i, ret;
if (!remap_info)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
+ struct drm_i915_gem_object *obj = from->engine[RCS].state;
+
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->engine[RCS].state->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->engine[RCS].state);
+ i915_gem_object_ggtt_unpin(obj);
i915_gem_context_put(from);
}
engine->last_context = i915_gem_context_get(to);
{
struct intel_engine_cs *engine = req->engine;
- WARN_ON(i915.enable_execlists);
lockdep_assert_held(&req->i915->drm.struct_mutex);
+ if (i915.enable_execlists)
+ return 0;
if (!req->ctx->engine[engine->id].state) {
struct i915_gem_context *to = req->ctx;
if (IS_ERR(req))
return PTR_ERR(req);
- ret = 0;
- if (!i915.enable_execlists)
- ret = i915_switch_context(req);
+ ret = i915_switch_context(req);
i915_add_request_no_flush(req);
if (ret)
return ret;