struct intel_timeline *timeline;
u32 seqno;
+ GEM_BUG_ON(!irqs_disabled());
+ assert_spin_locked(&engine->timeline->lock);
+
trace_i915_gem_request_execute(request);
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
- assert_spin_locked(&timeline->lock);
seqno = timeline_get_seqno(timeline);
GEM_BUG_ON(!seqno);
struct intel_engine_cs *engine = request->engine;
struct intel_timeline *timeline;
+ GEM_BUG_ON(!irqs_disabled());
assert_spin_locked(&engine->timeline->lock);
/* Only unwind in reverse order, required so that the per-context list
*/
/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
+ GEM_BUG_ON(!irqs_disabled());
assert_spin_locked(&request->lock);
seqno = i915_gem_request_global_seqno(request);
struct intel_engine_cs *engine = request->engine;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ GEM_BUG_ON(!irqs_disabled());
assert_spin_locked(&request->lock);
GEM_BUG_ON(!request->signaling.wait.seqno);