]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/selftests/i915_gem_request.c
drm/i915: Move retire-requests into i915_gem_wait_for_idle()
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / selftests / i915_gem_request.c
CommitLineData
c835c550
CW
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
b348090d
CW
25#include <linux/prime_numbers.h>
26
c835c550
CW
27#include "../i915_selftest.h"
28
591c0fb8 29#include "mock_context.h"
c835c550
CW
30#include "mock_gem_device.h"
31
32static int igt_add_request(void *arg)
33{
34 struct drm_i915_private *i915 = arg;
35 struct drm_i915_gem_request *request;
36 int err = -ENOMEM;
37
38 /* Basic preliminary test to create a request and let it loose! */
39
40 mutex_lock(&i915->drm.struct_mutex);
41 request = mock_request(i915->engine[RCS],
42 i915->kernel_context,
43 HZ / 10);
44 if (!request)
45 goto out_unlock;
46
47 i915_add_request(request);
48
49 err = 0;
50out_unlock:
51 mutex_unlock(&i915->drm.struct_mutex);
52 return err;
53}
54
f1ae924d
CW
55static int igt_wait_request(void *arg)
56{
57 const long T = HZ / 4;
58 struct drm_i915_private *i915 = arg;
59 struct drm_i915_gem_request *request;
60 int err = -EINVAL;
61
62 /* Submit a request, then wait upon it */
63
64 mutex_lock(&i915->drm.struct_mutex);
65 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
66 if (!request) {
67 err = -ENOMEM;
68 goto out_unlock;
69 }
70
71 if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
73 goto out_unlock;
74 }
75
76 if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
77 pr_err("request wait succeeded (expected timeout before submit!)\n");
78 goto out_unlock;
79 }
80
81 if (i915_gem_request_completed(request)) {
82 pr_err("request completed before submit!!\n");
83 goto out_unlock;
84 }
85
86 i915_add_request(request);
87
88 if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
90 goto out_unlock;
91 }
92
93 if (i915_gem_request_completed(request)) {
94 pr_err("request completed immediately!\n");
95 goto out_unlock;
96 }
97
98 if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
99 pr_err("request wait succeeded (expected timeout!)\n");
100 goto out_unlock;
101 }
102
103 if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
104 pr_err("request wait timed out!\n");
105 goto out_unlock;
106 }
107
108 if (!i915_gem_request_completed(request)) {
109 pr_err("request not complete after waiting!\n");
110 goto out_unlock;
111 }
112
113 if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
114 pr_err("request wait timed out when already complete!\n");
115 goto out_unlock;
116 }
117
118 err = 0;
119out_unlock:
120 mock_device_flush(i915);
121 mutex_unlock(&i915->drm.struct_mutex);
122 return err;
123}
124
5fd4d112
CW
125static int igt_fence_wait(void *arg)
126{
127 const long T = HZ / 4;
128 struct drm_i915_private *i915 = arg;
129 struct drm_i915_gem_request *request;
130 int err = -EINVAL;
131
132 /* Submit a request, treat it as a fence and wait upon it */
133
134 mutex_lock(&i915->drm.struct_mutex);
135 request = mock_request(i915->engine[RCS], i915->kernel_context, T);
136 if (!request) {
137 err = -ENOMEM;
138 goto out_locked;
139 }
140 mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
141
142 if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
143 pr_err("fence wait success before submit (expected timeout)!\n");
144 goto out_device;
145 }
146
147 mutex_lock(&i915->drm.struct_mutex);
148 i915_add_request(request);
149 mutex_unlock(&i915->drm.struct_mutex);
150
151 if (dma_fence_is_signaled(&request->fence)) {
152 pr_err("fence signaled immediately!\n");
153 goto out_device;
154 }
155
156 if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
157 pr_err("fence wait success after submit (expected timeout)!\n");
158 goto out_device;
159 }
160
161 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
162 pr_err("fence wait timed out (expected success)!\n");
163 goto out_device;
164 }
165
166 if (!dma_fence_is_signaled(&request->fence)) {
167 pr_err("fence unsignaled after waiting!\n");
168 goto out_device;
169 }
170
171 if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
172 pr_err("fence wait timed out when complete (expected success)!\n");
173 goto out_device;
174 }
175
176 err = 0;
177out_device:
178 mutex_lock(&i915->drm.struct_mutex);
179out_locked:
180 mock_device_flush(i915);
181 mutex_unlock(&i915->drm.struct_mutex);
182 return err;
183}
184
591c0fb8
CW
185static int igt_request_rewind(void *arg)
186{
187 struct drm_i915_private *i915 = arg;
188 struct drm_i915_gem_request *request, *vip;
189 struct i915_gem_context *ctx[2];
190 int err = -EINVAL;
191
192 mutex_lock(&i915->drm.struct_mutex);
193 ctx[0] = mock_context(i915, "A");
194 request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
195 if (!request) {
196 err = -ENOMEM;
197 goto err_context_0;
198 }
199
200 i915_gem_request_get(request);
201 i915_add_request(request);
202
203 ctx[1] = mock_context(i915, "B");
204 vip = mock_request(i915->engine[RCS], ctx[1], 0);
205 if (!vip) {
206 err = -ENOMEM;
207 goto err_context_1;
208 }
209
210 /* Simulate preemption by manual reordering */
211 if (!mock_cancel_request(request)) {
212 pr_err("failed to cancel request (already executed)!\n");
213 i915_add_request(vip);
214 goto err_context_1;
215 }
216 i915_gem_request_get(vip);
217 i915_add_request(vip);
218 request->engine->submit_request(request);
219
220 mutex_unlock(&i915->drm.struct_mutex);
221
222 if (i915_wait_request(vip, 0, HZ) == -ETIME) {
223 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
224 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
225 goto err;
226 }
227
228 if (i915_gem_request_completed(request)) {
229 pr_err("low priority request already completed\n");
230 goto err;
231 }
232
233 err = 0;
234err:
235 i915_gem_request_put(vip);
236 mutex_lock(&i915->drm.struct_mutex);
237err_context_1:
238 mock_context_close(ctx[1]);
239 i915_gem_request_put(request);
240err_context_0:
241 mock_context_close(ctx[0]);
242 mock_device_flush(i915);
243 mutex_unlock(&i915->drm.struct_mutex);
244 return err;
245}
246
c835c550
CW
247int i915_gem_request_mock_selftests(void)
248{
249 static const struct i915_subtest tests[] = {
250 SUBTEST(igt_add_request),
f1ae924d 251 SUBTEST(igt_wait_request),
5fd4d112 252 SUBTEST(igt_fence_wait),
591c0fb8 253 SUBTEST(igt_request_rewind),
c835c550
CW
254 };
255 struct drm_i915_private *i915;
256 int err;
257
258 i915 = mock_gem_device();
259 if (!i915)
260 return -ENOMEM;
261
262 err = i915_subtests(tests, i915);
263 drm_dev_unref(&i915->drm);
264
265 return err;
266}
b348090d
CW
267
268struct live_test {
269 struct drm_i915_private *i915;
270 const char *func;
271 const char *name;
272
273 unsigned int reset_count;
274};
275
276static int begin_live_test(struct live_test *t,
277 struct drm_i915_private *i915,
278 const char *func,
279 const char *name)
280{
281 int err;
282
283 t->i915 = i915;
284 t->func = func;
285 t->name = name;
286
287 err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
288 if (err) {
289 pr_err("%s(%s): failed to idle before, with err=%d!",
290 func, name, err);
291 return err;
292 }
293
b348090d
CW
294 i915->gpu_error.missed_irq_rings = 0;
295 t->reset_count = i915_reset_count(&i915->gpu_error);
296
297 return 0;
298}
299
300static int end_live_test(struct live_test *t)
301{
302 struct drm_i915_private *i915 = t->i915;
303
05425249 304 if (wait_for(intel_engines_are_idle(i915), 1)) {
b348090d
CW
305 pr_err("%s(%s): GPU not idle\n", t->func, t->name);
306 return -EIO;
307 }
308
309 if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
310 pr_err("%s(%s): GPU was reset %d times!\n",
311 t->func, t->name,
312 i915_reset_count(&i915->gpu_error) - t->reset_count);
313 return -EIO;
314 }
315
316 if (i915->gpu_error.missed_irq_rings) {
317 pr_err("%s(%s): Missed interrupts on engines %lx\n",
318 t->func, t->name, i915->gpu_error.missed_irq_rings);
319 return -EIO;
320 }
321
322 return 0;
323}
324
325static int live_nop_request(void *arg)
326{
327 struct drm_i915_private *i915 = arg;
328 struct intel_engine_cs *engine;
329 struct live_test t;
330 unsigned int id;
331 int err;
332
333 /* Submit various sized batches of empty requests, to each engine
334 * (individually), and wait for the batch to complete. We can check
335 * the overhead of submitting requests to the hardware.
336 */
337
338 mutex_lock(&i915->drm.struct_mutex);
339
340 for_each_engine(engine, i915, id) {
341 IGT_TIMEOUT(end_time);
342 struct drm_i915_gem_request *request;
343 unsigned long n, prime;
344 ktime_t times[2] = {};
345
346 err = begin_live_test(&t, i915, __func__, engine->name);
347 if (err)
348 goto out_unlock;
349
350 for_each_prime_number_from(prime, 1, 8192) {
351 times[1] = ktime_get_raw();
352
353 for (n = 0; n < prime; n++) {
354 request = i915_gem_request_alloc(engine,
355 i915->kernel_context);
356 if (IS_ERR(request)) {
357 err = PTR_ERR(request);
358 goto out_unlock;
359 }
360
361 /* This space is left intentionally blank.
362 *
363 * We do not actually want to perform any
364 * action with this request, we just want
365 * to measure the latency in allocation
366 * and submission of our breadcrumbs -
367 * ensuring that the bare request is sufficient
368 * for the system to work (i.e. proper HEAD
369 * tracking of the rings, interrupt handling,
370 * etc). It also gives us the lowest bounds
371 * for latency.
372 */
373
374 i915_add_request(request);
375 }
376 i915_wait_request(request,
377 I915_WAIT_LOCKED,
378 MAX_SCHEDULE_TIMEOUT);
379
380 times[1] = ktime_sub(ktime_get_raw(), times[1]);
381 if (prime == 1)
382 times[0] = times[1];
383
384 if (__igt_timeout(end_time, NULL))
385 break;
386 }
387
388 err = end_live_test(&t);
389 if (err)
390 goto out_unlock;
391
392 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
393 engine->name,
394 ktime_to_ns(times[0]),
395 prime, div64_u64(ktime_to_ns(times[1]), prime));
396 }
397
398out_unlock:
399 mutex_unlock(&i915->drm.struct_mutex);
400 return err;
401}
402
cd3862dc
CW
403static struct i915_vma *empty_batch(struct drm_i915_private *i915)
404{
405 struct drm_i915_gem_object *obj;
406 struct i915_vma *vma;
407 u32 *cmd;
408 int err;
409
410 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
411 if (IS_ERR(obj))
412 return ERR_CAST(obj);
413
414 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
415 if (IS_ERR(cmd)) {
416 err = PTR_ERR(cmd);
417 goto err;
418 }
419 *cmd = MI_BATCH_BUFFER_END;
420 i915_gem_object_unpin_map(obj);
421
422 err = i915_gem_object_set_to_gtt_domain(obj, false);
423 if (err)
424 goto err;
425
426 vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
427 if (IS_ERR(vma)) {
428 err = PTR_ERR(vma);
429 goto err;
430 }
431
432 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
433 if (err)
434 goto err;
435
436 return vma;
437
438err:
439 i915_gem_object_put(obj);
440 return ERR_PTR(err);
441}
442
443static struct drm_i915_gem_request *
444empty_request(struct intel_engine_cs *engine,
445 struct i915_vma *batch)
446{
447 struct drm_i915_gem_request *request;
448 int err;
449
450 request = i915_gem_request_alloc(engine,
451 engine->i915->kernel_context);
452 if (IS_ERR(request))
453 return request;
454
455 err = engine->emit_flush(request, EMIT_INVALIDATE);
456 if (err)
457 goto out_request;
458
459 err = i915_switch_context(request);
460 if (err)
461 goto out_request;
462
463 err = engine->emit_bb_start(request,
464 batch->node.start,
465 batch->node.size,
466 I915_DISPATCH_SECURE);
467 if (err)
468 goto out_request;
469
470out_request:
471 __i915_add_request(request, err == 0);
472 return err ? ERR_PTR(err) : request;
473}
474
475static int live_empty_request(void *arg)
476{
477 struct drm_i915_private *i915 = arg;
478 struct intel_engine_cs *engine;
479 struct live_test t;
480 struct i915_vma *batch;
481 unsigned int id;
482 int err = 0;
483
484 /* Submit various sized batches of empty requests, to each engine
485 * (individually), and wait for the batch to complete. We can check
486 * the overhead of submitting requests to the hardware.
487 */
488
489 mutex_lock(&i915->drm.struct_mutex);
490
491 batch = empty_batch(i915);
492 if (IS_ERR(batch)) {
493 err = PTR_ERR(batch);
494 goto out_unlock;
495 }
496
497 for_each_engine(engine, i915, id) {
498 IGT_TIMEOUT(end_time);
499 struct drm_i915_gem_request *request;
500 unsigned long n, prime;
501 ktime_t times[2] = {};
502
503 err = begin_live_test(&t, i915, __func__, engine->name);
504 if (err)
505 goto out_batch;
506
507 /* Warmup / preload */
508 request = empty_request(engine, batch);
509 if (IS_ERR(request)) {
510 err = PTR_ERR(request);
511 goto out_batch;
512 }
513 i915_wait_request(request,
514 I915_WAIT_LOCKED,
515 MAX_SCHEDULE_TIMEOUT);
516
517 for_each_prime_number_from(prime, 1, 8192) {
518 times[1] = ktime_get_raw();
519
520 for (n = 0; n < prime; n++) {
521 request = empty_request(engine, batch);
522 if (IS_ERR(request)) {
523 err = PTR_ERR(request);
524 goto out_batch;
525 }
526 }
527 i915_wait_request(request,
528 I915_WAIT_LOCKED,
529 MAX_SCHEDULE_TIMEOUT);
530
531 times[1] = ktime_sub(ktime_get_raw(), times[1]);
532 if (prime == 1)
533 times[0] = times[1];
534
535 if (__igt_timeout(end_time, NULL))
536 break;
537 }
538
539 err = end_live_test(&t);
540 if (err)
541 goto out_batch;
542
543 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
544 engine->name,
545 ktime_to_ns(times[0]),
546 prime, div64_u64(ktime_to_ns(times[1]), prime));
547 }
548
549out_batch:
550 i915_vma_unpin(batch);
551 i915_vma_put(batch);
552out_unlock:
553 mutex_unlock(&i915->drm.struct_mutex);
554 return err;
555}
556
cf8be13d
CW
557static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
558{
559 struct i915_gem_context *ctx = i915->kernel_context;
560 struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
561 struct drm_i915_gem_object *obj;
562 const int gen = INTEL_GEN(i915);
563 struct i915_vma *vma;
564 u32 *cmd;
565 int err;
566
567 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
568 if (IS_ERR(obj))
569 return ERR_CAST(obj);
570
571 vma = i915_vma_instance(obj, vm, NULL);
572 if (IS_ERR(vma)) {
573 err = PTR_ERR(vma);
574 goto err;
575 }
576
577 err = i915_vma_pin(vma, 0, 0, PIN_USER);
578 if (err)
579 goto err;
580
581 err = i915_gem_object_set_to_gtt_domain(obj, true);
582 if (err)
583 goto err;
584
585 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
586 if (IS_ERR(cmd)) {
587 err = PTR_ERR(cmd);
588 goto err;
589 }
590
591 if (gen >= 8) {
592 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
593 *cmd++ = lower_32_bits(vma->node.start);
594 *cmd++ = upper_32_bits(vma->node.start);
595 } else if (gen >= 6) {
596 *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
597 *cmd++ = lower_32_bits(vma->node.start);
598 } else if (gen >= 4) {
599 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
600 *cmd++ = lower_32_bits(vma->node.start);
601 } else {
602 *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
603 *cmd++ = lower_32_bits(vma->node.start);
604 }
605 *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
606
607 wmb();
608 i915_gem_object_unpin_map(obj);
609
610 return vma;
611
612err:
613 i915_gem_object_put(obj);
614 return ERR_PTR(err);
615}
616
617static int recursive_batch_resolve(struct i915_vma *batch)
618{
619 u32 *cmd;
620
621 cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
622 if (IS_ERR(cmd))
623 return PTR_ERR(cmd);
624
625 *cmd = MI_BATCH_BUFFER_END;
626 wmb();
627
628 i915_gem_object_unpin_map(batch->obj);
629
630 return 0;
631}
632
633static int live_all_engines(void *arg)
634{
635 struct drm_i915_private *i915 = arg;
636 struct intel_engine_cs *engine;
637 struct drm_i915_gem_request *request[I915_NUM_ENGINES];
638 struct i915_vma *batch;
639 struct live_test t;
640 unsigned int id;
641 int err;
642
643 /* Check we can submit requests to all engines simultaneously. We
644 * send a recursive batch to each engine - checking that we don't
645 * block doing so, and that they don't complete too soon.
646 */
647
648 mutex_lock(&i915->drm.struct_mutex);
649
650 err = begin_live_test(&t, i915, __func__, "");
651 if (err)
652 goto out_unlock;
653
654 batch = recursive_batch(i915);
655 if (IS_ERR(batch)) {
656 err = PTR_ERR(batch);
657 pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
658 goto out_unlock;
659 }
660
661 for_each_engine(engine, i915, id) {
662 request[id] = i915_gem_request_alloc(engine,
663 i915->kernel_context);
664 if (IS_ERR(request[id])) {
665 err = PTR_ERR(request[id]);
666 pr_err("%s: Request allocation failed with err=%d\n",
667 __func__, err);
668 goto out_request;
669 }
670
671 err = engine->emit_flush(request[id], EMIT_INVALIDATE);
672 GEM_BUG_ON(err);
673
674 err = i915_switch_context(request[id]);
675 GEM_BUG_ON(err);
676
677 err = engine->emit_bb_start(request[id],
678 batch->node.start,
679 batch->node.size,
680 0);
681 GEM_BUG_ON(err);
682 request[id]->batch = batch;
683
684 if (!i915_gem_object_has_active_reference(batch->obj)) {
685 i915_gem_object_get(batch->obj);
686 i915_gem_object_set_active_reference(batch->obj);
687 }
688
689 i915_vma_move_to_active(batch, request[id], 0);
690 i915_gem_request_get(request[id]);
691 i915_add_request(request[id]);
692 }
693
694 for_each_engine(engine, i915, id) {
695 if (i915_gem_request_completed(request[id])) {
696 pr_err("%s(%s): request completed too early!\n",
697 __func__, engine->name);
698 err = -EINVAL;
699 goto out_request;
700 }
701 }
702
703 err = recursive_batch_resolve(batch);
704 if (err) {
705 pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
706 goto out_request;
707 }
708
709 for_each_engine(engine, i915, id) {
710 long timeout;
711
712 timeout = i915_wait_request(request[id],
713 I915_WAIT_LOCKED,
714 MAX_SCHEDULE_TIMEOUT);
715 if (timeout < 0) {
716 err = timeout;
717 pr_err("%s: error waiting for request on %s, err=%d\n",
718 __func__, engine->name, err);
719 goto out_request;
720 }
721
722 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
723 i915_gem_request_put(request[id]);
724 request[id] = NULL;
725 }
726
727 err = end_live_test(&t);
728
729out_request:
730 for_each_engine(engine, i915, id)
731 if (request[id])
732 i915_gem_request_put(request[id]);
733 i915_vma_unpin(batch);
734 i915_vma_put(batch);
735out_unlock:
736 mutex_unlock(&i915->drm.struct_mutex);
737 return err;
738}
739
97b592b1
CW
740static int live_sequential_engines(void *arg)
741{
742 struct drm_i915_private *i915 = arg;
743 struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
744 struct drm_i915_gem_request *prev = NULL;
745 struct intel_engine_cs *engine;
746 struct live_test t;
747 unsigned int id;
748 int err;
749
750 /* Check we can submit requests to all engines sequentially, such
751 * that each successive request waits for the earlier ones. This
752 * tests that we don't execute requests out of order, even though
753 * they are running on independent engines.
754 */
755
756 mutex_lock(&i915->drm.struct_mutex);
757
758 err = begin_live_test(&t, i915, __func__, "");
759 if (err)
760 goto out_unlock;
761
762 for_each_engine(engine, i915, id) {
763 struct i915_vma *batch;
764
765 batch = recursive_batch(i915);
766 if (IS_ERR(batch)) {
767 err = PTR_ERR(batch);
768 pr_err("%s: Unable to create batch for %s, err=%d\n",
769 __func__, engine->name, err);
770 goto out_unlock;
771 }
772
773 request[id] = i915_gem_request_alloc(engine,
774 i915->kernel_context);
775 if (IS_ERR(request[id])) {
776 err = PTR_ERR(request[id]);
777 pr_err("%s: Request allocation failed for %s with err=%d\n",
778 __func__, engine->name, err);
779 goto out_request;
780 }
781
782 if (prev) {
783 err = i915_gem_request_await_dma_fence(request[id],
784 &prev->fence);
785 if (err) {
786 i915_add_request(request[id]);
787 pr_err("%s: Request await failed for %s with err=%d\n",
788 __func__, engine->name, err);
789 goto out_request;
790 }
791 }
792
793 err = engine->emit_flush(request[id], EMIT_INVALIDATE);
794 GEM_BUG_ON(err);
795
796 err = i915_switch_context(request[id]);
797 GEM_BUG_ON(err);
798
799 err = engine->emit_bb_start(request[id],
800 batch->node.start,
801 batch->node.size,
802 0);
803 GEM_BUG_ON(err);
804 request[id]->batch = batch;
805
806 i915_vma_move_to_active(batch, request[id], 0);
807 i915_gem_object_set_active_reference(batch->obj);
808 i915_vma_get(batch);
809
810 i915_gem_request_get(request[id]);
811 i915_add_request(request[id]);
812
813 prev = request[id];
814 }
815
816 for_each_engine(engine, i915, id) {
817 long timeout;
818
819 if (i915_gem_request_completed(request[id])) {
820 pr_err("%s(%s): request completed too early!\n",
821 __func__, engine->name);
822 err = -EINVAL;
823 goto out_request;
824 }
825
826 err = recursive_batch_resolve(request[id]->batch);
827 if (err) {
828 pr_err("%s: failed to resolve batch, err=%d\n",
829 __func__, err);
830 goto out_request;
831 }
832
833 timeout = i915_wait_request(request[id],
834 I915_WAIT_LOCKED,
835 MAX_SCHEDULE_TIMEOUT);
836 if (timeout < 0) {
837 err = timeout;
838 pr_err("%s: error waiting for request on %s, err=%d\n",
839 __func__, engine->name, err);
840 goto out_request;
841 }
842
843 GEM_BUG_ON(!i915_gem_request_completed(request[id]));
844 }
845
846 err = end_live_test(&t);
847
848out_request:
849 for_each_engine(engine, i915, id) {
850 u32 *cmd;
851
852 if (!request[id])
853 break;
854
855 cmd = i915_gem_object_pin_map(request[id]->batch->obj,
856 I915_MAP_WC);
857 if (!IS_ERR(cmd)) {
858 *cmd = MI_BATCH_BUFFER_END;
859 wmb();
860 i915_gem_object_unpin_map(request[id]->batch->obj);
861 }
862
863 i915_vma_put(request[id]->batch);
864 i915_gem_request_put(request[id]);
865 }
866out_unlock:
867 mutex_unlock(&i915->drm.struct_mutex);
868 return err;
869}
870
b348090d
CW
871int i915_gem_request_live_selftests(struct drm_i915_private *i915)
872{
873 static const struct i915_subtest tests[] = {
874 SUBTEST(live_nop_request),
cf8be13d 875 SUBTEST(live_all_engines),
97b592b1 876 SUBTEST(live_sequential_engines),
cd3862dc 877 SUBTEST(live_empty_request),
b348090d
CW
878 };
879 return i915_subtests(tests, i915);
880}