]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/selftests/i915_gem_request.c
Merge tag 'drm-amdkfd-next-2017-10-18' of git://people.freedesktop.org/~gabbayo/linux...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / selftests / i915_gem_request.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/prime_numbers.h>
26
27 #include "../i915_selftest.h"
28
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
31
32 static int igt_add_request(void *arg)
33 {
34 struct drm_i915_private *i915 = arg;
35 struct drm_i915_gem_request *request;
36 int err = -ENOMEM;
37
38 /* Basic preliminary test to create a request and let it loose! */
39
40 mutex_lock(&i915->drm.struct_mutex);
41 request = mock_request(i915->engine[RCS],
42 i915->kernel_context,
43 HZ / 10);
44 if (!request)
45 goto out_unlock;
46
47 i915_add_request(request);
48
49 err = 0;
50 out_unlock:
51 mutex_unlock(&i915->drm.struct_mutex);
52 return err;
53 }
54
55 static int igt_wait_request(void *arg)
56 {
57 const long T = HZ / 4;
58 struct drm_i915_private *i915 = arg;
59 struct drm_i915_gem_request *request;
60 int err = -EINVAL;
61
62 /* Submit a request, then wait upon it */
63
64 mutex_lock(&i915->drm.struct_mutex);
65 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66 if (!request) {
67 err = -ENOMEM;
68 goto out_unlock;
69 }
70
71 if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73 goto out_unlock;
74 }
75
76 if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 pr_err("request wait succeeded (expected timeout before submit!)\n");
78 goto out_unlock;
79 }
80
81 if (i915_gem_request_completed(request)) {
82 pr_err("request completed before submit!!\n");
83 goto out_unlock;
84 }
85
86 i915_add_request(request);
87
88 if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90 goto out_unlock;
91 }
92
93 if (i915_gem_request_completed(request)) {
94 pr_err("request completed immediately!\n");
95 goto out_unlock;
96 }
97
98 if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 pr_err("request wait succeeded (expected timeout!)\n");
100 goto out_unlock;
101 }
102
103 if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 pr_err("request wait timed out!\n");
105 goto out_unlock;
106 }
107
108 if (!i915_gem_request_completed(request)) {
109 pr_err("request not complete after waiting!\n");
110 goto out_unlock;
111 }
112
113 if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 pr_err("request wait timed out when already complete!\n");
115 goto out_unlock;
116 }
117
118 err = 0;
119 out_unlock:
120 mock_device_flush(i915);
121 mutex_unlock(&i915->drm.struct_mutex);
122 return err;
123 }
124
125 static int igt_fence_wait(void *arg)
126 {
127 const long T = HZ / 4;
128 struct drm_i915_private *i915 = arg;
129 struct drm_i915_gem_request *request;
130 int err = -EINVAL;
131
132 /* Submit a request, treat it as a fence and wait upon it */
133
134 mutex_lock(&i915->drm.struct_mutex);
135 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136 if (!request) {
137 err = -ENOMEM;
138 goto out_locked;
139 }
140 mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
141
142 if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143 pr_err("fence wait success before submit (expected timeout)!\n");
144 goto out_device;
145 }
146
147 mutex_lock(&i915->drm.struct_mutex);
148 i915_add_request(request);
149 mutex_unlock(&i915->drm.struct_mutex);
150
151 if (dma_fence_is_signaled(&request->fence)) {
152 pr_err("fence signaled immediately!\n");
153 goto out_device;
154 }
155
156 if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157 pr_err("fence wait success after submit (expected timeout)!\n");
158 goto out_device;
159 }
160
161 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162 pr_err("fence wait timed out (expected success)!\n");
163 goto out_device;
164 }
165
166 if (!dma_fence_is_signaled(&request->fence)) {
167 pr_err("fence unsignaled after waiting!\n");
168 goto out_device;
169 }
170
171 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172 pr_err("fence wait timed out when complete (expected success)!\n");
173 goto out_device;
174 }
175
176 err = 0;
177 out_device:
178 mutex_lock(&i915->drm.struct_mutex);
179 out_locked:
180 mock_device_flush(i915);
181 mutex_unlock(&i915->drm.struct_mutex);
182 return err;
183 }
184
185 static int igt_request_rewind(void *arg)
186 {
187 struct drm_i915_private *i915 = arg;
188 struct drm_i915_gem_request *request, *vip;
189 struct i915_gem_context *ctx[2];
190 int err = -EINVAL;
191
192 mutex_lock(&i915->drm.struct_mutex);
193 ctx[0] = mock_context(i915, "A");
194 request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195 if (!request) {
196 err = -ENOMEM;
197 goto err_context_0;
198 }
199
200 i915_gem_request_get(request);
201 i915_add_request(request);
202
203 ctx[1] = mock_context(i915, "B");
204 vip = mock_request(i915->engine[RCS], ctx[1], 0);
205 if (!vip) {
206 err = -ENOMEM;
207 goto err_context_1;
208 }
209
210 /* Simulate preemption by manual reordering */
211 if (!mock_cancel_request(request)) {
212 pr_err("failed to cancel request (already executed)!\n");
213 i915_add_request(vip);
214 goto err_context_1;
215 }
216 i915_gem_request_get(vip);
217 i915_add_request(vip);
218 rcu_read_lock();
219 request->engine->submit_request(request);
220 rcu_read_unlock();
221
222 mutex_unlock(&i915->drm.struct_mutex);
223
224 if (i915_wait_request(vip, 0, HZ) == -ETIME) {
225 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
226 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
227 goto err;
228 }
229
230 if (i915_gem_request_completed(request)) {
231 pr_err("low priority request already completed\n");
232 goto err;
233 }
234
235 err = 0;
236 err:
237 i915_gem_request_put(vip);
238 mutex_lock(&i915->drm.struct_mutex);
239 err_context_1:
240 mock_context_close(ctx[1]);
241 i915_gem_request_put(request);
242 err_context_0:
243 mock_context_close(ctx[0]);
244 mock_device_flush(i915);
245 mutex_unlock(&i915->drm.struct_mutex);
246 return err;
247 }
248
249 int i915_gem_request_mock_selftests(void)
250 {
251 static const struct i915_subtest tests[] = {
252 SUBTEST(igt_add_request),
253 SUBTEST(igt_wait_request),
254 SUBTEST(igt_fence_wait),
255 SUBTEST(igt_request_rewind),
256 };
257 struct drm_i915_private *i915;
258 int err;
259
260 i915 = mock_gem_device();
261 if (!i915)
262 return -ENOMEM;
263
264 err = i915_subtests(tests, i915);
265 drm_dev_unref(&i915->drm);
266
267 return err;
268 }
269
270 struct live_test {
271 struct drm_i915_private *i915;
272 const char *func;
273 const char *name;
274
275 unsigned int reset_count;
276 };
277
278 static int begin_live_test(struct live_test *t,
279 struct drm_i915_private *i915,
280 const char *func,
281 const char *name)
282 {
283 int err;
284
285 t->i915 = i915;
286 t->func = func;
287 t->name = name;
288
289 err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
290 if (err) {
291 pr_err("%s(%s): failed to idle before, with err=%d!",
292 func, name, err);
293 return err;
294 }
295
296 i915->gpu_error.missed_irq_rings = 0;
297 t->reset_count = i915_reset_count(&i915->gpu_error);
298
299 return 0;
300 }
301
302 static int end_live_test(struct live_test *t)
303 {
304 struct drm_i915_private *i915 = t->i915;
305
306 i915_gem_retire_requests(i915);
307
308 if (wait_for(intel_engines_are_idle(i915), 10)) {
309 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
310 return -EIO;
311 }
312
313 if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
314 pr_err("%s(%s): GPU was reset %d times!\n",
315 t->func, t->name,
316 i915_reset_count(&i915->gpu_error) - t->reset_count);
317 return -EIO;
318 }
319
320 if (i915->gpu_error.missed_irq_rings) {
321 pr_err("%s(%s): Missed interrupts on engines %lx\n",
322 t->func, t->name, i915->gpu_error.missed_irq_rings);
323 return -EIO;
324 }
325
326 return 0;
327 }
328
329 static int live_nop_request(void *arg)
330 {
331 struct drm_i915_private *i915 = arg;
332 struct intel_engine_cs *engine;
333 struct live_test t;
334 unsigned int id;
335 int err;
336
337 /* Submit various sized batches of empty requests, to each engine
338 * (individually), and wait for the batch to complete. We can check
339 * the overhead of submitting requests to the hardware.
340 */
341
342 mutex_lock(&i915->drm.struct_mutex);
343
344 for_each_engine(engine, i915, id) {
345 IGT_TIMEOUT(end_time);
346 struct drm_i915_gem_request *request;
347 unsigned long n, prime;
348 ktime_t times[2] = {};
349
350 err = begin_live_test(&t, i915, __func__, engine->name);
351 if (err)
352 goto out_unlock;
353
354 for_each_prime_number_from(prime, 1, 8192) {
355 times[1] = ktime_get_raw();
356
357 for (n = 0; n < prime; n++) {
358 request = i915_gem_request_alloc(engine,
359 i915->kernel_context);
360 if (IS_ERR(request)) {
361 err = PTR_ERR(request);
362 goto out_unlock;
363 }
364
365 /* This space is left intentionally blank.
366 *
367 * We do not actually want to perform any
368 * action with this request, we just want
369 * to measure the latency in allocation
370 * and submission of our breadcrumbs -
371 * ensuring that the bare request is sufficient
372 * for the system to work (i.e. proper HEAD
373 * tracking of the rings, interrupt handling,
374 * etc). It also gives us the lowest bounds
375 * for latency.
376 */
377
378 i915_add_request(request);
379 }
380 i915_wait_request(request,
381 I915_WAIT_LOCKED,
382 MAX_SCHEDULE_TIMEOUT);
383
384 times[1] = ktime_sub(ktime_get_raw(), times[1]);
385 if (prime == 1)
386 times[0] = times[1];
387
388 if (__igt_timeout(end_time, NULL))
389 break;
390 }
391
392 err = end_live_test(&t);
393 if (err)
394 goto out_unlock;
395
396 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
397 engine->name,
398 ktime_to_ns(times[0]),
399 prime, div64_u64(ktime_to_ns(times[1]), prime));
400 }
401
402 out_unlock:
403 mutex_unlock(&i915->drm.struct_mutex);
404 return err;
405 }
406
407 static struct i915_vma *empty_batch(struct drm_i915_private *i915)
408 {
409 struct drm_i915_gem_object *obj;
410 struct i915_vma *vma;
411 u32 *cmd;
412 int err;
413
414 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
415 if (IS_ERR(obj))
416 return ERR_CAST(obj);
417
418 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
419 if (IS_ERR(cmd)) {
420 err = PTR_ERR(cmd);
421 goto err;
422 }
423
424 *cmd = MI_BATCH_BUFFER_END;
425 i915_gem_chipset_flush(i915);
426
427 i915_gem_object_unpin_map(obj);
428
429 err = i915_gem_object_set_to_gtt_domain(obj, false);
430 if (err)
431 goto err;
432
433 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
434 if (IS_ERR(vma)) {
435 err = PTR_ERR(vma);
436 goto err;
437 }
438
439 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
440 if (err)
441 goto err;
442
443 return vma;
444
445 err:
446 i915_gem_object_put(obj);
447 return ERR_PTR(err);
448 }
449
450 static struct drm_i915_gem_request *
451 empty_request(struct intel_engine_cs *engine,
452 struct i915_vma *batch)
453 {
454 struct drm_i915_gem_request *request;
455 int err;
456
457 request = i915_gem_request_alloc(engine,
458 engine->i915->kernel_context);
459 if (IS_ERR(request))
460 return request;
461
462 err = engine->emit_flush(request, EMIT_INVALIDATE);
463 if (err)
464 goto out_request;
465
466 err = i915_switch_context(request);
467 if (err)
468 goto out_request;
469
470 err = engine->emit_bb_start(request,
471 batch->node.start,
472 batch->node.size,
473 I915_DISPATCH_SECURE);
474 if (err)
475 goto out_request;
476
477 out_request:
478 __i915_add_request(request, err == 0);
479 return err ? ERR_PTR(err) : request;
480 }
481
482 static int live_empty_request(void *arg)
483 {
484 struct drm_i915_private *i915 = arg;
485 struct intel_engine_cs *engine;
486 struct live_test t;
487 struct i915_vma *batch;
488 unsigned int id;
489 int err = 0;
490
491 /* Submit various sized batches of empty requests, to each engine
492 * (individually), and wait for the batch to complete. We can check
493 * the overhead of submitting requests to the hardware.
494 */
495
496 mutex_lock(&i915->drm.struct_mutex);
497
498 batch = empty_batch(i915);
499 if (IS_ERR(batch)) {
500 err = PTR_ERR(batch);
501 goto out_unlock;
502 }
503
504 for_each_engine(engine, i915, id) {
505 IGT_TIMEOUT(end_time);
506 struct drm_i915_gem_request *request;
507 unsigned long n, prime;
508 ktime_t times[2] = {};
509
510 err = begin_live_test(&t, i915, __func__, engine->name);
511 if (err)
512 goto out_batch;
513
514 /* Warmup / preload */
515 request = empty_request(engine, batch);
516 if (IS_ERR(request)) {
517 err = PTR_ERR(request);
518 goto out_batch;
519 }
520 i915_wait_request(request,
521 I915_WAIT_LOCKED,
522 MAX_SCHEDULE_TIMEOUT);
523
524 for_each_prime_number_from(prime, 1, 8192) {
525 times[1] = ktime_get_raw();
526
527 for (n = 0; n < prime; n++) {
528 request = empty_request(engine, batch);
529 if (IS_ERR(request)) {
530 err = PTR_ERR(request);
531 goto out_batch;
532 }
533 }
534 i915_wait_request(request,
535 I915_WAIT_LOCKED,
536 MAX_SCHEDULE_TIMEOUT);
537
538 times[1] = ktime_sub(ktime_get_raw(), times[1]);
539 if (prime == 1)
540 times[0] = times[1];
541
542 if (__igt_timeout(end_time, NULL))
543 break;
544 }
545
546 err = end_live_test(&t);
547 if (err)
548 goto out_batch;
549
550 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
551 engine->name,
552 ktime_to_ns(times[0]),
553 prime, div64_u64(ktime_to_ns(times[1]), prime));
554 }
555
556 out_batch:
557 i915_vma_unpin(batch);
558 i915_vma_put(batch);
559 out_unlock:
560 mutex_unlock(&i915->drm.struct_mutex);
561 return err;
562 }
563
564 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
565 {
566 struct i915_gem_context *ctx = i915->kernel_context;
567 struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
568 struct drm_i915_gem_object *obj;
569 const int gen = INTEL_GEN(i915);
570 struct i915_vma *vma;
571 u32 *cmd;
572 int err;
573
574 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
575 if (IS_ERR(obj))
576 return ERR_CAST(obj);
577
578 vma = i915_vma_instance(obj, vm, NULL);
579 if (IS_ERR(vma)) {
580 err = PTR_ERR(vma);
581 goto err;
582 }
583
584 err = i915_vma_pin(vma, 0, 0, PIN_USER);
585 if (err)
586 goto err;
587
588 err = i915_gem_object_set_to_wc_domain(obj, true);
589 if (err)
590 goto err;
591
592 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
593 if (IS_ERR(cmd)) {
594 err = PTR_ERR(cmd);
595 goto err;
596 }
597
598 if (gen >= 8) {
599 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
600 *cmd++ = lower_32_bits(vma->node.start);
601 *cmd++ = upper_32_bits(vma->node.start);
602 } else if (gen >= 6) {
603 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
604 *cmd++ = lower_32_bits(vma->node.start);
605 } else if (gen >= 4) {
606 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
607 *cmd++ = lower_32_bits(vma->node.start);
608 } else {
609 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
610 *cmd++ = lower_32_bits(vma->node.start);
611 }
612 *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
613 i915_gem_chipset_flush(i915);
614
615 i915_gem_object_unpin_map(obj);
616
617 return vma;
618
619 err:
620 i915_gem_object_put(obj);
621 return ERR_PTR(err);
622 }
623
624 static int recursive_batch_resolve(struct i915_vma *batch)
625 {
626 u32 *cmd;
627
628 cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
629 if (IS_ERR(cmd))
630 return PTR_ERR(cmd);
631
632 *cmd = MI_BATCH_BUFFER_END;
633 i915_gem_chipset_flush(batch->vm->i915);
634
635 i915_gem_object_unpin_map(batch->obj);
636
637 return 0;
638 }
639
640 static int live_all_engines(void *arg)
641 {
642 struct drm_i915_private *i915 = arg;
643 struct intel_engine_cs *engine;
644 struct drm_i915_gem_request *request[I915_NUM_ENGINES];
645 struct i915_vma *batch;
646 struct live_test t;
647 unsigned int id;
648 int err;
649
650 /* Check we can submit requests to all engines simultaneously. We
651 * send a recursive batch to each engine - checking that we don't
652 * block doing so, and that they don't complete too soon.
653 */
654
655 mutex_lock(&i915->drm.struct_mutex);
656
657 err = begin_live_test(&t, i915, __func__, "");
658 if (err)
659 goto out_unlock;
660
661 batch = recursive_batch(i915);
662 if (IS_ERR(batch)) {
663 err = PTR_ERR(batch);
664 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
665 goto out_unlock;
666 }
667
668 for_each_engine(engine, i915, id) {
669 request[id] = i915_gem_request_alloc(engine,
670 i915->kernel_context);
671 if (IS_ERR(request[id])) {
672 err = PTR_ERR(request[id]);
673 pr_err("%s: Request allocation failed with err=%d\n",
674 __func__, err);
675 goto out_request;
676 }
677
678 err = engine->emit_flush(request[id], EMIT_INVALIDATE);
679 GEM_BUG_ON(err);
680
681 err = i915_switch_context(request[id]);
682 GEM_BUG_ON(err);
683
684 err = engine->emit_bb_start(request[id],
685 batch->node.start,
686 batch->node.size,
687 0);
688 GEM_BUG_ON(err);
689 request[id]->batch = batch;
690
691 if (!i915_gem_object_has_active_reference(batch->obj)) {
692 i915_gem_object_get(batch->obj);
693 i915_gem_object_set_active_reference(batch->obj);
694 }
695
696 i915_vma_move_to_active(batch, request[id], 0);
697 i915_gem_request_get(request[id]);
698 i915_add_request(request[id]);
699 }
700
701 for_each_engine(engine, i915, id) {
702 if (i915_gem_request_completed(request[id])) {
703 pr_err("%s(%s): request completed too early!\n",
704 __func__, engine->name);
705 err = -EINVAL;
706 goto out_request;
707 }
708 }
709
710 err = recursive_batch_resolve(batch);
711 if (err) {
712 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
713 goto out_request;
714 }
715
716 for_each_engine(engine, i915, id) {
717 long timeout;
718
719 timeout = i915_wait_request(request[id],
720 I915_WAIT_LOCKED,
721 MAX_SCHEDULE_TIMEOUT);
722 if (timeout < 0) {
723 err = timeout;
724 pr_err("%s: error waiting for request on %s, err=%d\n",
725 __func__, engine->name, err);
726 goto out_request;
727 }
728
729 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
730 i915_gem_request_put(request[id]);
731 request[id] = NULL;
732 }
733
734 err = end_live_test(&t);
735
736 out_request:
737 for_each_engine(engine, i915, id)
738 if (request[id])
739 i915_gem_request_put(request[id]);
740 i915_vma_unpin(batch);
741 i915_vma_put(batch);
742 out_unlock:
743 mutex_unlock(&i915->drm.struct_mutex);
744 return err;
745 }
746
747 static int live_sequential_engines(void *arg)
748 {
749 struct drm_i915_private *i915 = arg;
750 struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
751 struct drm_i915_gem_request *prev = NULL;
752 struct intel_engine_cs *engine;
753 struct live_test t;
754 unsigned int id;
755 int err;
756
757 /* Check we can submit requests to all engines sequentially, such
758 * that each successive request waits for the earlier ones. This
759 * tests that we don't execute requests out of order, even though
760 * they are running on independent engines.
761 */
762
763 mutex_lock(&i915->drm.struct_mutex);
764
765 err = begin_live_test(&t, i915, __func__, "");
766 if (err)
767 goto out_unlock;
768
769 for_each_engine(engine, i915, id) {
770 struct i915_vma *batch;
771
772 batch = recursive_batch(i915);
773 if (IS_ERR(batch)) {
774 err = PTR_ERR(batch);
775 pr_err("%s: Unable to create batch for %s, err=%d\n",
776 __func__, engine->name, err);
777 goto out_unlock;
778 }
779
780 request[id] = i915_gem_request_alloc(engine,
781 i915->kernel_context);
782 if (IS_ERR(request[id])) {
783 err = PTR_ERR(request[id]);
784 pr_err("%s: Request allocation failed for %s with err=%d\n",
785 __func__, engine->name, err);
786 goto out_request;
787 }
788
789 if (prev) {
790 err = i915_gem_request_await_dma_fence(request[id],
791 &prev->fence);
792 if (err) {
793 i915_add_request(request[id]);
794 pr_err("%s: Request await failed for %s with err=%d\n",
795 __func__, engine->name, err);
796 goto out_request;
797 }
798 }
799
800 err = engine->emit_flush(request[id], EMIT_INVALIDATE);
801 GEM_BUG_ON(err);
802
803 err = i915_switch_context(request[id]);
804 GEM_BUG_ON(err);
805
806 err = engine->emit_bb_start(request[id],
807 batch->node.start,
808 batch->node.size,
809 0);
810 GEM_BUG_ON(err);
811 request[id]->batch = batch;
812
813 i915_vma_move_to_active(batch, request[id], 0);
814 i915_gem_object_set_active_reference(batch->obj);
815 i915_vma_get(batch);
816
817 i915_gem_request_get(request[id]);
818 i915_add_request(request[id]);
819
820 prev = request[id];
821 }
822
823 for_each_engine(engine, i915, id) {
824 long timeout;
825
826 if (i915_gem_request_completed(request[id])) {
827 pr_err("%s(%s): request completed too early!\n",
828 __func__, engine->name);
829 err = -EINVAL;
830 goto out_request;
831 }
832
833 err = recursive_batch_resolve(request[id]->batch);
834 if (err) {
835 pr_err("%s: failed to resolve batch, err=%d\n",
836 __func__, err);
837 goto out_request;
838 }
839
840 timeout = i915_wait_request(request[id],
841 I915_WAIT_LOCKED,
842 MAX_SCHEDULE_TIMEOUT);
843 if (timeout < 0) {
844 err = timeout;
845 pr_err("%s: error waiting for request on %s, err=%d\n",
846 __func__, engine->name, err);
847 goto out_request;
848 }
849
850 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
851 }
852
853 err = end_live_test(&t);
854
855 out_request:
856 for_each_engine(engine, i915, id) {
857 u32 *cmd;
858
859 if (!request[id])
860 break;
861
862 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
863 I915_MAP_WC);
864 if (!IS_ERR(cmd)) {
865 *cmd = MI_BATCH_BUFFER_END;
866 i915_gem_chipset_flush(i915);
867
868 i915_gem_object_unpin_map(request[id]->batch->obj);
869 }
870
871 i915_vma_put(request[id]->batch);
872 i915_gem_request_put(request[id]);
873 }
874 out_unlock:
875 mutex_unlock(&i915->drm.struct_mutex);
876 return err;
877 }
878
879 int i915_gem_request_live_selftests(struct drm_i915_private *i915)
880 {
881 static const struct i915_subtest tests[] = {
882 SUBTEST(live_nop_request),
883 SUBTEST(live_all_engines),
884 SUBTEST(live_sequential_engines),
885 SUBTEST(live_empty_request),
886 };
887 return i915_subtests(tests, i915);
888 }