cancel_timer(&engine->execlists.preempt);
}
+static bool can_preempt(struct intel_engine_cs *engine)
+{
+ if (INTEL_GEN(engine->i915) > 8)
+ return true;
+
+ /* GPGPU on bdw requires extra w/a; not implemented */
+ return engine->class != RENDER_CLASS;
+}
+
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+ if (can_preempt(engine)) {
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
return PTR_ERR(head);
for_each_engine(engine, outer->gt, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
for (i = 0; i < count; i++) {
struct i915_request *rq;
if (i915_request_wait(head, 0,
2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) {
- pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
- count, n);
+ pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n",
+ outer->name, count, n);
GEM_TRACE_DUMP();
intel_gt_set_wedged(outer->gt);
err = -EIO;
enum intel_engine_id id;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
- pr_err("Logical preemption supported, but not exposed\n");
-
if (igt_spinner_init(&spin_hi, gt))
return -ENOMEM;
enum intel_engine_id id;
int err = -ENOMEM;
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
if (igt_spinner_init(&spin_hi, gt))
return -ENOMEM;
* that may be being observed and not want to be interrupted.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
if (preempt_client_init(gt, &a))
return -ENOMEM;
if (preempt_client_init(gt, &b))
* GPU. That sounds like preemption! Plus a little bit of bookkeeping.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
if (preempt_client_init(gt, &data.a))
return -ENOMEM;
if (preempt_client_init(gt, &data.b))
* completion event.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
if (intel_uc_uses_guc_submission(>->uc))
return 0; /* presume black blox */
* the previously submitted spinner in B.
*/
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
if (preempt_client_init(gt, &hi))
return -ENOMEM;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
/*
* Build as long a chain of preempters as we can, with each
* request higher priority than the last. Once we are ready, we release
u32 *result;
int err = 0;
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
/*
* In our other tests, we look at preemption in carefully
* controlled conditions in the ringbuffer. Since most of the
if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
return 0;
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
if (!intel_has_reset_engine(gt))
return 0;
u32 *cs;
int n;
- if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915))
- return 0;
-
smoke.contexts = kmalloc_array(smoke.ncontext,
sizeof(*smoke.contexts),
GFP_KERNEL);