]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/selftests/i915_gem_request.c
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prime_numbers.h>
27 #include "../i915_selftest.h"
29 #include "mock_context.h"
30 #include "mock_gem_device.h"
32 static int igt_add_request(void *arg
)
34 struct drm_i915_private
*i915
= arg
;
35 struct drm_i915_gem_request
*request
;
38 /* Basic preliminary test to create a request and let it loose! */
40 mutex_lock(&i915
->drm
.struct_mutex
);
41 request
= mock_request(i915
->engine
[RCS
],
47 i915_add_request(request
);
51 mutex_unlock(&i915
->drm
.struct_mutex
);
55 static int igt_wait_request(void *arg
)
57 const long T
= HZ
/ 4;
58 struct drm_i915_private
*i915
= arg
;
59 struct drm_i915_gem_request
*request
;
62 /* Submit a request, then wait upon it */
64 mutex_lock(&i915
->drm
.struct_mutex
);
65 request
= mock_request(i915
->engine
[RCS
], i915
->kernel_context
, T
);
71 if (i915_wait_request(request
, I915_WAIT_LOCKED
, 0) != -ETIME
) {
72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
76 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
) != -ETIME
) {
77 pr_err("request wait succeeded (expected timeout before submit!)\n");
81 if (i915_gem_request_completed(request
)) {
82 pr_err("request completed before submit!!\n");
86 i915_add_request(request
);
88 if (i915_wait_request(request
, I915_WAIT_LOCKED
, 0) != -ETIME
) {
89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
93 if (i915_gem_request_completed(request
)) {
94 pr_err("request completed immediately!\n");
98 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
/ 2) != -ETIME
) {
99 pr_err("request wait succeeded (expected timeout!)\n");
103 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
) == -ETIME
) {
104 pr_err("request wait timed out!\n");
108 if (!i915_gem_request_completed(request
)) {
109 pr_err("request not complete after waiting!\n");
113 if (i915_wait_request(request
, I915_WAIT_LOCKED
, T
) == -ETIME
) {
114 pr_err("request wait timed out when already complete!\n");
120 mock_device_flush(i915
);
121 mutex_unlock(&i915
->drm
.struct_mutex
);
125 static int igt_fence_wait(void *arg
)
127 const long T
= HZ
/ 4;
128 struct drm_i915_private
*i915
= arg
;
129 struct drm_i915_gem_request
*request
;
132 /* Submit a request, treat it as a fence and wait upon it */
134 mutex_lock(&i915
->drm
.struct_mutex
);
135 request
= mock_request(i915
->engine
[RCS
], i915
->kernel_context
, T
);
140 mutex_unlock(&i915
->drm
.struct_mutex
); /* safe as we are single user */
142 if (dma_fence_wait_timeout(&request
->fence
, false, T
) != -ETIME
) {
143 pr_err("fence wait success before submit (expected timeout)!\n");
147 mutex_lock(&i915
->drm
.struct_mutex
);
148 i915_add_request(request
);
149 mutex_unlock(&i915
->drm
.struct_mutex
);
151 if (dma_fence_is_signaled(&request
->fence
)) {
152 pr_err("fence signaled immediately!\n");
156 if (dma_fence_wait_timeout(&request
->fence
, false, T
/ 2) != -ETIME
) {
157 pr_err("fence wait success after submit (expected timeout)!\n");
161 if (dma_fence_wait_timeout(&request
->fence
, false, T
) <= 0) {
162 pr_err("fence wait timed out (expected success)!\n");
166 if (!dma_fence_is_signaled(&request
->fence
)) {
167 pr_err("fence unsignaled after waiting!\n");
171 if (dma_fence_wait_timeout(&request
->fence
, false, T
) <= 0) {
172 pr_err("fence wait timed out when complete (expected success)!\n");
178 mutex_lock(&i915
->drm
.struct_mutex
);
180 mock_device_flush(i915
);
181 mutex_unlock(&i915
->drm
.struct_mutex
);
185 static int igt_request_rewind(void *arg
)
187 struct drm_i915_private
*i915
= arg
;
188 struct drm_i915_gem_request
*request
, *vip
;
189 struct i915_gem_context
*ctx
[2];
192 mutex_lock(&i915
->drm
.struct_mutex
);
193 ctx
[0] = mock_context(i915
, "A");
194 request
= mock_request(i915
->engine
[RCS
], ctx
[0], 2 * HZ
);
200 i915_gem_request_get(request
);
201 i915_add_request(request
);
203 ctx
[1] = mock_context(i915
, "B");
204 vip
= mock_request(i915
->engine
[RCS
], ctx
[1], 0);
210 /* Simulate preemption by manual reordering */
211 if (!mock_cancel_request(request
)) {
212 pr_err("failed to cancel request (already executed)!\n");
213 i915_add_request(vip
);
216 i915_gem_request_get(vip
);
217 i915_add_request(vip
);
218 request
->engine
->submit_request(request
);
220 mutex_unlock(&i915
->drm
.struct_mutex
);
222 if (i915_wait_request(vip
, 0, HZ
) == -ETIME
) {
223 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
224 vip
->global_seqno
, intel_engine_get_seqno(i915
->engine
[RCS
]));
228 if (i915_gem_request_completed(request
)) {
229 pr_err("low priority request already completed\n");
235 i915_gem_request_put(vip
);
236 mutex_lock(&i915
->drm
.struct_mutex
);
238 mock_context_close(ctx
[1]);
239 i915_gem_request_put(request
);
241 mock_context_close(ctx
[0]);
242 mock_device_flush(i915
);
243 mutex_unlock(&i915
->drm
.struct_mutex
);
247 int i915_gem_request_mock_selftests(void)
249 static const struct i915_subtest tests
[] = {
250 SUBTEST(igt_add_request
),
251 SUBTEST(igt_wait_request
),
252 SUBTEST(igt_fence_wait
),
253 SUBTEST(igt_request_rewind
),
255 struct drm_i915_private
*i915
;
258 i915
= mock_gem_device();
262 err
= i915_subtests(tests
, i915
);
263 drm_dev_unref(&i915
->drm
);
269 struct drm_i915_private
*i915
;
273 unsigned int reset_count
;
276 static int begin_live_test(struct live_test
*t
,
277 struct drm_i915_private
*i915
,
287 err
= i915_gem_wait_for_idle(i915
, I915_WAIT_LOCKED
);
289 pr_err("%s(%s): failed to idle before, with err=%d!",
294 i915
->gpu_error
.missed_irq_rings
= 0;
295 t
->reset_count
= i915_reset_count(&i915
->gpu_error
);
300 static int end_live_test(struct live_test
*t
)
302 struct drm_i915_private
*i915
= t
->i915
;
304 i915_gem_retire_requests(i915
);
306 if (wait_for(intel_engines_are_idle(i915
), 10)) {
307 pr_err("%s(%s): GPU not idle\n", t
->func
, t
->name
);
311 if (t
->reset_count
!= i915_reset_count(&i915
->gpu_error
)) {
312 pr_err("%s(%s): GPU was reset %d times!\n",
314 i915_reset_count(&i915
->gpu_error
) - t
->reset_count
);
318 if (i915
->gpu_error
.missed_irq_rings
) {
319 pr_err("%s(%s): Missed interrupts on engines %lx\n",
320 t
->func
, t
->name
, i915
->gpu_error
.missed_irq_rings
);
327 static int live_nop_request(void *arg
)
329 struct drm_i915_private
*i915
= arg
;
330 struct intel_engine_cs
*engine
;
335 /* Submit various sized batches of empty requests, to each engine
336 * (individually), and wait for the batch to complete. We can check
337 * the overhead of submitting requests to the hardware.
340 mutex_lock(&i915
->drm
.struct_mutex
);
342 for_each_engine(engine
, i915
, id
) {
343 IGT_TIMEOUT(end_time
);
344 struct drm_i915_gem_request
*request
;
345 unsigned long n
, prime
;
346 ktime_t times
[2] = {};
348 err
= begin_live_test(&t
, i915
, __func__
, engine
->name
);
352 for_each_prime_number_from(prime
, 1, 8192) {
353 times
[1] = ktime_get_raw();
355 for (n
= 0; n
< prime
; n
++) {
356 request
= i915_gem_request_alloc(engine
,
357 i915
->kernel_context
);
358 if (IS_ERR(request
)) {
359 err
= PTR_ERR(request
);
363 /* This space is left intentionally blank.
365 * We do not actually want to perform any
366 * action with this request, we just want
367 * to measure the latency in allocation
368 * and submission of our breadcrumbs -
369 * ensuring that the bare request is sufficient
370 * for the system to work (i.e. proper HEAD
371 * tracking of the rings, interrupt handling,
372 * etc). It also gives us the lowest bounds
376 i915_add_request(request
);
378 i915_wait_request(request
,
380 MAX_SCHEDULE_TIMEOUT
);
382 times
[1] = ktime_sub(ktime_get_raw(), times
[1]);
386 if (__igt_timeout(end_time
, NULL
))
390 err
= end_live_test(&t
);
394 pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
396 ktime_to_ns(times
[0]),
397 prime
, div64_u64(ktime_to_ns(times
[1]), prime
));
401 mutex_unlock(&i915
->drm
.struct_mutex
);
405 static struct i915_vma
*empty_batch(struct drm_i915_private
*i915
)
407 struct drm_i915_gem_object
*obj
;
408 struct i915_vma
*vma
;
412 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
414 return ERR_CAST(obj
);
416 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
421 *cmd
= MI_BATCH_BUFFER_END
;
422 i915_gem_object_unpin_map(obj
);
424 err
= i915_gem_object_set_to_gtt_domain(obj
, false);
428 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
434 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
| PIN_GLOBAL
);
441 i915_gem_object_put(obj
);
445 static struct drm_i915_gem_request
*
446 empty_request(struct intel_engine_cs
*engine
,
447 struct i915_vma
*batch
)
449 struct drm_i915_gem_request
*request
;
452 request
= i915_gem_request_alloc(engine
,
453 engine
->i915
->kernel_context
);
457 err
= engine
->emit_flush(request
, EMIT_INVALIDATE
);
461 err
= i915_switch_context(request
);
465 err
= engine
->emit_bb_start(request
,
468 I915_DISPATCH_SECURE
);
473 __i915_add_request(request
, err
== 0);
474 return err
? ERR_PTR(err
) : request
;
477 static int live_empty_request(void *arg
)
479 struct drm_i915_private
*i915
= arg
;
480 struct intel_engine_cs
*engine
;
482 struct i915_vma
*batch
;
486 /* Submit various sized batches of empty requests, to each engine
487 * (individually), and wait for the batch to complete. We can check
488 * the overhead of submitting requests to the hardware.
491 mutex_lock(&i915
->drm
.struct_mutex
);
493 batch
= empty_batch(i915
);
495 err
= PTR_ERR(batch
);
499 for_each_engine(engine
, i915
, id
) {
500 IGT_TIMEOUT(end_time
);
501 struct drm_i915_gem_request
*request
;
502 unsigned long n
, prime
;
503 ktime_t times
[2] = {};
505 err
= begin_live_test(&t
, i915
, __func__
, engine
->name
);
509 /* Warmup / preload */
510 request
= empty_request(engine
, batch
);
511 if (IS_ERR(request
)) {
512 err
= PTR_ERR(request
);
515 i915_wait_request(request
,
517 MAX_SCHEDULE_TIMEOUT
);
519 for_each_prime_number_from(prime
, 1, 8192) {
520 times
[1] = ktime_get_raw();
522 for (n
= 0; n
< prime
; n
++) {
523 request
= empty_request(engine
, batch
);
524 if (IS_ERR(request
)) {
525 err
= PTR_ERR(request
);
529 i915_wait_request(request
,
531 MAX_SCHEDULE_TIMEOUT
);
533 times
[1] = ktime_sub(ktime_get_raw(), times
[1]);
537 if (__igt_timeout(end_time
, NULL
))
541 err
= end_live_test(&t
);
545 pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
547 ktime_to_ns(times
[0]),
548 prime
, div64_u64(ktime_to_ns(times
[1]), prime
));
552 i915_vma_unpin(batch
);
555 mutex_unlock(&i915
->drm
.struct_mutex
);
559 static struct i915_vma
*recursive_batch(struct drm_i915_private
*i915
)
561 struct i915_gem_context
*ctx
= i915
->kernel_context
;
562 struct i915_address_space
*vm
= ctx
->ppgtt
? &ctx
->ppgtt
->base
: &i915
->ggtt
.base
;
563 struct drm_i915_gem_object
*obj
;
564 const int gen
= INTEL_GEN(i915
);
565 struct i915_vma
*vma
;
569 obj
= i915_gem_object_create_internal(i915
, PAGE_SIZE
);
571 return ERR_CAST(obj
);
573 vma
= i915_vma_instance(obj
, vm
, NULL
);
579 err
= i915_vma_pin(vma
, 0, 0, PIN_USER
);
583 err
= i915_gem_object_set_to_wc_domain(obj
, true);
587 cmd
= i915_gem_object_pin_map(obj
, I915_MAP_WC
);
594 *cmd
++ = MI_BATCH_BUFFER_START
| 1 << 8 | 1;
595 *cmd
++ = lower_32_bits(vma
->node
.start
);
596 *cmd
++ = upper_32_bits(vma
->node
.start
);
597 } else if (gen
>= 6) {
598 *cmd
++ = MI_BATCH_BUFFER_START
| 1 << 8;
599 *cmd
++ = lower_32_bits(vma
->node
.start
);
600 } else if (gen
>= 4) {
601 *cmd
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
602 *cmd
++ = lower_32_bits(vma
->node
.start
);
604 *cmd
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| 1;
605 *cmd
++ = lower_32_bits(vma
->node
.start
);
607 *cmd
++ = MI_BATCH_BUFFER_END
; /* terminate early in case of error */
610 i915_gem_object_unpin_map(obj
);
615 i915_gem_object_put(obj
);
619 static int recursive_batch_resolve(struct i915_vma
*batch
)
623 cmd
= i915_gem_object_pin_map(batch
->obj
, I915_MAP_WC
);
627 *cmd
= MI_BATCH_BUFFER_END
;
630 i915_gem_object_unpin_map(batch
->obj
);
635 static int live_all_engines(void *arg
)
637 struct drm_i915_private
*i915
= arg
;
638 struct intel_engine_cs
*engine
;
639 struct drm_i915_gem_request
*request
[I915_NUM_ENGINES
];
640 struct i915_vma
*batch
;
645 /* Check we can submit requests to all engines simultaneously. We
646 * send a recursive batch to each engine - checking that we don't
647 * block doing so, and that they don't complete too soon.
650 mutex_lock(&i915
->drm
.struct_mutex
);
652 err
= begin_live_test(&t
, i915
, __func__
, "");
656 batch
= recursive_batch(i915
);
658 err
= PTR_ERR(batch
);
659 pr_err("%s: Unable to create batch, err=%d\n", __func__
, err
);
663 for_each_engine(engine
, i915
, id
) {
664 request
[id
] = i915_gem_request_alloc(engine
,
665 i915
->kernel_context
);
666 if (IS_ERR(request
[id
])) {
667 err
= PTR_ERR(request
[id
]);
668 pr_err("%s: Request allocation failed with err=%d\n",
673 err
= engine
->emit_flush(request
[id
], EMIT_INVALIDATE
);
676 err
= i915_switch_context(request
[id
]);
679 err
= engine
->emit_bb_start(request
[id
],
684 request
[id
]->batch
= batch
;
686 if (!i915_gem_object_has_active_reference(batch
->obj
)) {
687 i915_gem_object_get(batch
->obj
);
688 i915_gem_object_set_active_reference(batch
->obj
);
691 i915_vma_move_to_active(batch
, request
[id
], 0);
692 i915_gem_request_get(request
[id
]);
693 i915_add_request(request
[id
]);
696 for_each_engine(engine
, i915
, id
) {
697 if (i915_gem_request_completed(request
[id
])) {
698 pr_err("%s(%s): request completed too early!\n",
699 __func__
, engine
->name
);
705 err
= recursive_batch_resolve(batch
);
707 pr_err("%s: failed to resolve batch, err=%d\n", __func__
, err
);
711 for_each_engine(engine
, i915
, id
) {
714 timeout
= i915_wait_request(request
[id
],
716 MAX_SCHEDULE_TIMEOUT
);
719 pr_err("%s: error waiting for request on %s, err=%d\n",
720 __func__
, engine
->name
, err
);
724 GEM_BUG_ON(!i915_gem_request_completed(request
[id
]));
725 i915_gem_request_put(request
[id
]);
729 err
= end_live_test(&t
);
732 for_each_engine(engine
, i915
, id
)
734 i915_gem_request_put(request
[id
]);
735 i915_vma_unpin(batch
);
738 mutex_unlock(&i915
->drm
.struct_mutex
);
742 static int live_sequential_engines(void *arg
)
744 struct drm_i915_private
*i915
= arg
;
745 struct drm_i915_gem_request
*request
[I915_NUM_ENGINES
] = {};
746 struct drm_i915_gem_request
*prev
= NULL
;
747 struct intel_engine_cs
*engine
;
752 /* Check we can submit requests to all engines sequentially, such
753 * that each successive request waits for the earlier ones. This
754 * tests that we don't execute requests out of order, even though
755 * they are running on independent engines.
758 mutex_lock(&i915
->drm
.struct_mutex
);
760 err
= begin_live_test(&t
, i915
, __func__
, "");
764 for_each_engine(engine
, i915
, id
) {
765 struct i915_vma
*batch
;
767 batch
= recursive_batch(i915
);
769 err
= PTR_ERR(batch
);
770 pr_err("%s: Unable to create batch for %s, err=%d\n",
771 __func__
, engine
->name
, err
);
775 request
[id
] = i915_gem_request_alloc(engine
,
776 i915
->kernel_context
);
777 if (IS_ERR(request
[id
])) {
778 err
= PTR_ERR(request
[id
]);
779 pr_err("%s: Request allocation failed for %s with err=%d\n",
780 __func__
, engine
->name
, err
);
785 err
= i915_gem_request_await_dma_fence(request
[id
],
788 i915_add_request(request
[id
]);
789 pr_err("%s: Request await failed for %s with err=%d\n",
790 __func__
, engine
->name
, err
);
795 err
= engine
->emit_flush(request
[id
], EMIT_INVALIDATE
);
798 err
= i915_switch_context(request
[id
]);
801 err
= engine
->emit_bb_start(request
[id
],
806 request
[id
]->batch
= batch
;
808 i915_vma_move_to_active(batch
, request
[id
], 0);
809 i915_gem_object_set_active_reference(batch
->obj
);
812 i915_gem_request_get(request
[id
]);
813 i915_add_request(request
[id
]);
818 for_each_engine(engine
, i915
, id
) {
821 if (i915_gem_request_completed(request
[id
])) {
822 pr_err("%s(%s): request completed too early!\n",
823 __func__
, engine
->name
);
828 err
= recursive_batch_resolve(request
[id
]->batch
);
830 pr_err("%s: failed to resolve batch, err=%d\n",
835 timeout
= i915_wait_request(request
[id
],
837 MAX_SCHEDULE_TIMEOUT
);
840 pr_err("%s: error waiting for request on %s, err=%d\n",
841 __func__
, engine
->name
, err
);
845 GEM_BUG_ON(!i915_gem_request_completed(request
[id
]));
848 err
= end_live_test(&t
);
851 for_each_engine(engine
, i915
, id
) {
857 cmd
= i915_gem_object_pin_map(request
[id
]->batch
->obj
,
860 *cmd
= MI_BATCH_BUFFER_END
;
862 i915_gem_object_unpin_map(request
[id
]->batch
->obj
);
865 i915_vma_put(request
[id
]->batch
);
866 i915_gem_request_put(request
[id
]);
869 mutex_unlock(&i915
->drm
.struct_mutex
);
873 int i915_gem_request_live_selftests(struct drm_i915_private
*i915
)
875 static const struct i915_subtest tests
[] = {
876 SUBTEST(live_nop_request
),
877 SUBTEST(live_all_engines
),
878 SUBTEST(live_sequential_engines
),
879 SUBTEST(live_empty_request
),
881 return i915_subtests(tests
, i915
);