if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ pr_err("Logical preemption supported, but not exposed\n");
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
for_each_engine(engine, i915, id) {
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
struct i915_request *rq_a, *rq_b;
int depth;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin,
for_each_engine(engine, i915, id) {
int depth;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
if (!engine->emit_init_breadcrumb)
continue;
};
int count, i;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
struct i915_request *rq;