static void nop_submit_request(struct i915_request *request)
{
+ GEM_TRACE("%s fence %llx:%d -> -EIO\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
i915_request_submit(request);
{
unsigned long flags;
+ GEM_TRACE("%s fence %llx:%d -> -EIO\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&request->engine->timeline->lock, flags);
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ GEM_TRACE("start\n");
+
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer(__func__);
i915_gem_reset_finish_engine(engine);
}
+ GEM_TRACE("end\n");
+
wake_up_all(&i915->gpu_error.reset_queue);
}
if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
return true;
+ GEM_TRACE("start\n");
+
/*
* Before unwedging, make sure that all pending operations
* are flushed and errored out - we may have requests waiting upon
intel_engines_reset_default_submission(i915);
i915_gem_contexts_lost(i915);
+ GEM_TRACE("end\n");
+
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
if (ret)
return ret;
+ GEM_BUG_ON(i915->gt.active_requests);
+
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
struct i915_gem_timeline *timeline;
struct intel_timeline *tl = engine->timeline;
+ GEM_TRACE("%s seqno %d -> %d\n",
+ engine->name, tl->seqno, seqno);
+
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
+ GEM_TRACE("%s(%d) fence %llx:%d, global_seqno %d\n",
+ engine->name, intel_engine_get_seqno(engine),
+ request->fence.context, request->fence.seqno,
+ request->global_seqno);
+
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
GEM_BUG_ON(!i915_request_completed(request));
struct intel_timeline *timeline;
u32 seqno;
+ GEM_TRACE("%s fence %llx:%d -> global_seqno %d\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno,
+ engine->timeline->seqno);
+
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
+ GEM_TRACE("%s fence %llx:%d <- global_seqno %d\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno,
+ request->global_seqno);
+
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline->lock);
u32 *cs;
int err;
+ GEM_TRACE("%s fence %llx:%d\n",
+ engine->name, request->fence.context, request->fence.seqno);
+
lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_request_add(request);