2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
30 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
35 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
37 return to_request(fence
)->timeline
->common
->name
;
40 static bool i915_fence_signaled(struct dma_fence
*fence
)
42 return i915_gem_request_completed(to_request(fence
));
45 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
47 if (i915_fence_signaled(fence
))
50 intel_engine_enable_signaling(to_request(fence
));
54 static signed long i915_fence_wait(struct dma_fence
*fence
,
58 return i915_wait_request(to_request(fence
), interruptible
, timeout
);
61 static void i915_fence_release(struct dma_fence
*fence
)
63 struct drm_i915_gem_request
*req
= to_request(fence
);
65 kmem_cache_free(req
->i915
->requests
, req
);
68 const struct dma_fence_ops i915_fence_ops
= {
69 .get_driver_name
= i915_fence_get_driver_name
,
70 .get_timeline_name
= i915_fence_get_timeline_name
,
71 .enable_signaling
= i915_fence_enable_signaling
,
72 .signaled
= i915_fence_signaled
,
73 .wait
= i915_fence_wait
,
74 .release
= i915_fence_release
,
77 int i915_gem_request_add_to_client(struct drm_i915_gem_request
*req
,
78 struct drm_file
*file
)
80 struct drm_i915_private
*dev_private
;
81 struct drm_i915_file_private
*file_priv
;
83 WARN_ON(!req
|| !file
|| req
->file_priv
);
91 dev_private
= req
->i915
;
92 file_priv
= file
->driver_priv
;
94 spin_lock(&file_priv
->mm
.lock
);
95 req
->file_priv
= file_priv
;
96 list_add_tail(&req
->client_list
, &file_priv
->mm
.request_list
);
97 spin_unlock(&file_priv
->mm
.lock
);
103 i915_gem_request_remove_from_client(struct drm_i915_gem_request
*request
)
105 struct drm_i915_file_private
*file_priv
= request
->file_priv
;
110 spin_lock(&file_priv
->mm
.lock
);
111 list_del(&request
->client_list
);
112 request
->file_priv
= NULL
;
113 spin_unlock(&file_priv
->mm
.lock
);
116 static struct i915_dependency
*
117 i915_dependency_alloc(struct drm_i915_private
*i915
)
119 return kmem_cache_alloc(i915
->dependencies
, GFP_KERNEL
);
123 i915_dependency_free(struct drm_i915_private
*i915
,
124 struct i915_dependency
*dep
)
126 kmem_cache_free(i915
->dependencies
, dep
);
130 __i915_priotree_add_dependency(struct i915_priotree
*pt
,
131 struct i915_priotree
*signal
,
132 struct i915_dependency
*dep
,
135 INIT_LIST_HEAD(&dep
->dfs_link
);
136 list_add(&dep
->wait_link
, &signal
->waiters_list
);
137 list_add(&dep
->signal_link
, &pt
->signalers_list
);
138 dep
->signaler
= signal
;
143 i915_priotree_add_dependency(struct drm_i915_private
*i915
,
144 struct i915_priotree
*pt
,
145 struct i915_priotree
*signal
)
147 struct i915_dependency
*dep
;
149 dep
= i915_dependency_alloc(i915
);
153 __i915_priotree_add_dependency(pt
, signal
, dep
, I915_DEPENDENCY_ALLOC
);
158 i915_priotree_fini(struct drm_i915_private
*i915
, struct i915_priotree
*pt
)
160 struct i915_dependency
*dep
, *next
;
162 GEM_BUG_ON(!RB_EMPTY_NODE(&pt
->node
));
164 /* Everyone we depended upon (the fences we wait to be signaled)
165 * should retire before us and remove themselves from our list.
166 * However, retirement is run independently on each timeline and
167 * so we may be called out-of-order.
169 list_for_each_entry_safe(dep
, next
, &pt
->signalers_list
, signal_link
) {
170 list_del(&dep
->wait_link
);
171 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
172 i915_dependency_free(i915
, dep
);
175 /* Remove ourselves from everyone who depends upon us */
176 list_for_each_entry_safe(dep
, next
, &pt
->waiters_list
, wait_link
) {
177 list_del(&dep
->signal_link
);
178 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
179 i915_dependency_free(i915
, dep
);
184 i915_priotree_init(struct i915_priotree
*pt
)
186 INIT_LIST_HEAD(&pt
->signalers_list
);
187 INIT_LIST_HEAD(&pt
->waiters_list
);
188 RB_CLEAR_NODE(&pt
->node
);
189 pt
->priority
= INT_MIN
;
192 void i915_gem_retire_noop(struct i915_gem_active
*active
,
193 struct drm_i915_gem_request
*request
)
195 /* Space left intentionally blank */
198 static void i915_gem_request_retire(struct drm_i915_gem_request
*request
)
200 struct i915_gem_active
*active
, *next
;
202 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
203 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->submit
));
204 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->execute
));
205 GEM_BUG_ON(!i915_gem_request_completed(request
));
206 GEM_BUG_ON(!request
->i915
->gt
.active_requests
);
208 trace_i915_gem_request_retire(request
);
210 spin_lock_irq(&request
->engine
->timeline
->lock
);
211 list_del_init(&request
->link
);
212 spin_unlock_irq(&request
->engine
->timeline
->lock
);
214 /* We know the GPU must have read the request to have
215 * sent us the seqno + interrupt, so use the position
216 * of tail of the request to update the last known position
219 * Note this requires that we are always called in request
222 list_del(&request
->ring_link
);
223 request
->ring
->last_retired_head
= request
->postfix
;
224 if (!--request
->i915
->gt
.active_requests
) {
225 GEM_BUG_ON(!request
->i915
->gt
.awake
);
226 mod_delayed_work(request
->i915
->wq
,
227 &request
->i915
->gt
.idle_work
,
228 msecs_to_jiffies(100));
231 /* Walk through the active list, calling retire on each. This allows
232 * objects to track their GPU activity and mark themselves as idle
233 * when their *last* active request is completed (updating state
234 * tracking lists for eviction, active references for GEM, etc).
236 * As the ->retire() may free the node, we decouple it first and
237 * pass along the auxiliary information (to avoid dereferencing
238 * the node after the callback).
240 list_for_each_entry_safe(active
, next
, &request
->active_list
, link
) {
241 /* In microbenchmarks or focusing upon time inside the kernel,
242 * we may spend an inordinate amount of time simply handling
243 * the retirement of requests and processing their callbacks.
244 * Of which, this loop itself is particularly hot due to the
245 * cache misses when jumping around the list of i915_gem_active.
246 * So we try to keep this loop as streamlined as possible and
247 * also prefetch the next i915_gem_active to try and hide
248 * the likely cache miss.
252 INIT_LIST_HEAD(&active
->link
);
253 RCU_INIT_POINTER(active
->request
, NULL
);
255 active
->retire(active
, request
);
258 i915_gem_request_remove_from_client(request
);
260 if (request
->previous_context
) {
261 if (i915
.enable_execlists
)
262 intel_lr_context_unpin(request
->previous_context
,
266 i915_gem_context_put(request
->ctx
);
268 dma_fence_signal(&request
->fence
);
270 i915_priotree_fini(request
->i915
, &request
->priotree
);
271 i915_gem_request_put(request
);
274 void i915_gem_request_retire_upto(struct drm_i915_gem_request
*req
)
276 struct intel_engine_cs
*engine
= req
->engine
;
277 struct drm_i915_gem_request
*tmp
;
279 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
280 if (list_empty(&req
->link
))
284 tmp
= list_first_entry(&engine
->timeline
->requests
,
287 i915_gem_request_retire(tmp
);
288 } while (tmp
!= req
);
291 static int i915_gem_check_wedge(struct drm_i915_private
*dev_priv
)
293 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
295 if (i915_terminally_wedged(error
))
298 if (i915_reset_in_progress(error
)) {
299 /* Non-interruptible callers can't handle -EAGAIN, hence return
300 * -EIO unconditionally for these.
302 if (!dev_priv
->mm
.interruptible
)
311 static int i915_gem_init_global_seqno(struct drm_i915_private
*i915
, u32 seqno
)
313 struct i915_gem_timeline
*timeline
= &i915
->gt
.global_timeline
;
314 struct intel_engine_cs
*engine
;
315 enum intel_engine_id id
;
318 /* Carefully retire all requests without writing to the rings */
319 ret
= i915_gem_wait_for_idle(i915
,
320 I915_WAIT_INTERRUPTIBLE
|
325 i915_gem_retire_requests(i915
);
326 GEM_BUG_ON(i915
->gt
.active_requests
> 1);
328 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
329 if (!i915_seqno_passed(seqno
, atomic_read(&timeline
->next_seqno
))) {
330 while (intel_breadcrumbs_busy(i915
))
331 cond_resched(); /* spin until threads are complete */
333 atomic_set(&timeline
->next_seqno
, seqno
);
335 /* Finally reset hw state */
336 for_each_engine(engine
, i915
, id
)
337 intel_engine_init_global_seqno(engine
, seqno
);
339 list_for_each_entry(timeline
, &i915
->gt
.timelines
, link
) {
340 for_each_engine(engine
, i915
, id
) {
341 struct intel_timeline
*tl
= &timeline
->engine
[id
];
343 memset(tl
->sync_seqno
, 0, sizeof(tl
->sync_seqno
));
350 int i915_gem_set_global_seqno(struct drm_device
*dev
, u32 seqno
)
352 struct drm_i915_private
*dev_priv
= to_i915(dev
);
354 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
359 /* HWS page needs to be set less than what we
360 * will inject to ring
362 return i915_gem_init_global_seqno(dev_priv
, seqno
- 1);
365 static int reserve_global_seqno(struct drm_i915_private
*i915
)
367 u32 active_requests
= ++i915
->gt
.active_requests
;
368 u32 next_seqno
= atomic_read(&i915
->gt
.global_timeline
.next_seqno
);
371 /* Reservation is fine until we need to wrap around */
372 if (likely(next_seqno
+ active_requests
> next_seqno
))
375 ret
= i915_gem_init_global_seqno(i915
, 0);
377 i915
->gt
.active_requests
--;
384 static u32
__timeline_get_seqno(struct i915_gem_timeline
*tl
)
386 /* next_seqno only incremented under a mutex */
387 return ++tl
->next_seqno
.counter
;
390 static u32
timeline_get_seqno(struct i915_gem_timeline
*tl
)
392 return atomic_inc_return(&tl
->next_seqno
);
395 void __i915_gem_request_submit(struct drm_i915_gem_request
*request
)
397 struct intel_engine_cs
*engine
= request
->engine
;
398 struct intel_timeline
*timeline
;
401 /* Transfer from per-context onto the global per-engine timeline */
402 timeline
= engine
->timeline
;
403 GEM_BUG_ON(timeline
== request
->timeline
);
404 assert_spin_locked(&timeline
->lock
);
406 seqno
= timeline_get_seqno(timeline
->common
);
408 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
));
410 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
, seqno
));
411 request
->previous_seqno
= timeline
->last_submitted_seqno
;
412 timeline
->last_submitted_seqno
= seqno
;
414 /* We may be recursing from the signal callback of another i915 fence */
415 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
416 request
->global_seqno
= seqno
;
417 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
418 intel_engine_enable_signaling(request
);
419 spin_unlock(&request
->lock
);
421 GEM_BUG_ON(!request
->global_seqno
);
422 engine
->emit_breadcrumb(request
,
423 request
->ring
->vaddr
+ request
->postfix
);
425 spin_lock(&request
->timeline
->lock
);
426 list_move_tail(&request
->link
, &timeline
->requests
);
427 spin_unlock(&request
->timeline
->lock
);
429 i915_sw_fence_commit(&request
->execute
);
432 void i915_gem_request_submit(struct drm_i915_gem_request
*request
)
434 struct intel_engine_cs
*engine
= request
->engine
;
437 /* Will be called from irq-context when using foreign fences. */
438 spin_lock_irqsave(&engine
->timeline
->lock
, flags
);
440 __i915_gem_request_submit(request
);
442 spin_unlock_irqrestore(&engine
->timeline
->lock
, flags
);
445 static int __i915_sw_fence_call
446 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
448 struct drm_i915_gem_request
*request
=
449 container_of(fence
, typeof(*request
), submit
);
453 request
->engine
->submit_request(request
);
457 i915_gem_request_put(request
);
464 static int __i915_sw_fence_call
465 execute_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
467 struct drm_i915_gem_request
*request
=
468 container_of(fence
, typeof(*request
), execute
);
475 i915_gem_request_put(request
);
483 * i915_gem_request_alloc - allocate a request structure
485 * @engine: engine that we wish to issue the request on.
486 * @ctx: context that the request will be associated with.
487 * This can be NULL if the request is not directly related to
488 * any specific user context, in which case this function will
489 * choose an appropriate context to use.
491 * Returns a pointer to the allocated request if successful,
492 * or an error code if not.
494 struct drm_i915_gem_request
*
495 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
496 struct i915_gem_context
*ctx
)
498 struct drm_i915_private
*dev_priv
= engine
->i915
;
499 struct drm_i915_gem_request
*req
;
502 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
504 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
505 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
508 ret
= i915_gem_check_wedge(dev_priv
);
512 ret
= reserve_global_seqno(dev_priv
);
516 /* Move the oldest request to the slab-cache (if not in use!) */
517 req
= list_first_entry_or_null(&engine
->timeline
->requests
,
519 if (req
&& __i915_gem_request_completed(req
))
520 i915_gem_request_retire(req
);
522 /* Beware: Dragons be flying overhead.
524 * We use RCU to look up requests in flight. The lookups may
525 * race with the request being allocated from the slab freelist.
526 * That is the request we are writing to here, may be in the process
527 * of being read by __i915_gem_active_get_rcu(). As such,
528 * we have to be very careful when overwriting the contents. During
529 * the RCU lookup, we change chase the request->engine pointer,
530 * read the request->global_seqno and increment the reference count.
532 * The reference count is incremented atomically. If it is zero,
533 * the lookup knows the request is unallocated and complete. Otherwise,
534 * it is either still in use, or has been reallocated and reset
535 * with dma_fence_init(). This increment is safe for release as we
536 * check that the request we have a reference to and matches the active
539 * Before we increment the refcount, we chase the request->engine
540 * pointer. We must not call kmem_cache_zalloc() or else we set
541 * that pointer to NULL and cause a crash during the lookup. If
542 * we see the request is completed (based on the value of the
543 * old engine and seqno), the lookup is complete and reports NULL.
544 * If we decide the request is not completed (new engine or seqno),
545 * then we grab a reference and double check that it is still the
546 * active request - which it won't be and restart the lookup.
548 * Do not use kmem_cache_zalloc() here!
550 req
= kmem_cache_alloc(dev_priv
->requests
, GFP_KERNEL
);
556 req
->timeline
= i915_gem_context_lookup_timeline(ctx
, engine
);
557 GEM_BUG_ON(req
->timeline
== engine
->timeline
);
559 spin_lock_init(&req
->lock
);
560 dma_fence_init(&req
->fence
,
563 req
->timeline
->fence_context
,
564 __timeline_get_seqno(req
->timeline
->common
));
566 /* We bump the ref for the fence chain */
567 i915_sw_fence_init(&i915_gem_request_get(req
)->submit
, submit_notify
);
568 i915_sw_fence_init(&i915_gem_request_get(req
)->execute
, execute_notify
);
570 /* Ensure that the execute fence completes after the submit fence -
571 * as we complete the execute fence from within the submit fence
572 * callback, its completion would otherwise be visible first.
574 i915_sw_fence_await_sw_fence(&req
->execute
, &req
->submit
, &req
->execq
);
576 i915_priotree_init(&req
->priotree
);
578 INIT_LIST_HEAD(&req
->active_list
);
579 req
->i915
= dev_priv
;
580 req
->engine
= engine
;
581 req
->ctx
= i915_gem_context_get(ctx
);
583 /* No zalloc, must clear what we need by hand */
584 req
->global_seqno
= 0;
585 req
->previous_context
= NULL
;
586 req
->file_priv
= NULL
;
590 * Reserve space in the ring buffer for all the commands required to
591 * eventually emit this request. This is to guarantee that the
592 * i915_add_request() call can't fail. Note that the reserve may need
593 * to be redone if the request is not actually submitted straight
594 * away, e.g. because a GPU scheduler has deferred it.
596 req
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
597 GEM_BUG_ON(req
->reserved_space
< engine
->emit_breadcrumb_sz
);
599 if (i915
.enable_execlists
)
600 ret
= intel_logical_ring_alloc_request_extras(req
);
602 ret
= intel_ring_alloc_request_extras(req
);
606 /* Record the position of the start of the request so that
607 * should we detect the updated seqno part-way through the
608 * GPU processing the request, we never over-estimate the
609 * position of the head.
611 req
->head
= req
->ring
->tail
;
616 i915_gem_context_put(ctx
);
617 kmem_cache_free(dev_priv
->requests
, req
);
619 dev_priv
->gt
.active_requests
--;
624 i915_gem_request_await_request(struct drm_i915_gem_request
*to
,
625 struct drm_i915_gem_request
*from
)
629 GEM_BUG_ON(to
== from
);
631 if (to
->engine
->schedule
) {
632 ret
= i915_priotree_add_dependency(to
->i915
,
639 if (to
->timeline
== from
->timeline
)
642 if (to
->engine
== from
->engine
) {
643 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
646 return ret
< 0 ? ret
: 0;
649 if (!from
->global_seqno
) {
650 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
653 return ret
< 0 ? ret
: 0;
656 if (from
->global_seqno
<= to
->timeline
->sync_seqno
[from
->engine
->id
])
659 trace_i915_gem_ring_sync_to(to
, from
);
660 if (!i915
.semaphores
) {
661 if (!i915_spin_request(from
, TASK_INTERRUPTIBLE
, 2)) {
662 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
669 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
674 to
->timeline
->sync_seqno
[from
->engine
->id
] = from
->global_seqno
;
679 i915_gem_request_await_dma_fence(struct drm_i915_gem_request
*req
,
680 struct dma_fence
*fence
)
682 struct dma_fence_array
*array
;
686 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
689 if (dma_fence_is_i915(fence
))
690 return i915_gem_request_await_request(req
, to_request(fence
));
692 if (!dma_fence_is_array(fence
)) {
693 ret
= i915_sw_fence_await_dma_fence(&req
->submit
,
694 fence
, I915_FENCE_TIMEOUT
,
696 return ret
< 0 ? ret
: 0;
699 /* Note that if the fence-array was created in signal-on-any mode,
700 * we should *not* decompose it into its individual fences. However,
701 * we don't currently store which mode the fence-array is operating
702 * in. Fortunately, the only user of signal-on-any is private to
703 * amdgpu and we should not see any incoming fence-array from
704 * sync-file being in signal-on-any mode.
707 array
= to_dma_fence_array(fence
);
708 for (i
= 0; i
< array
->num_fences
; i
++) {
709 struct dma_fence
*child
= array
->fences
[i
];
711 if (dma_fence_is_i915(child
))
712 ret
= i915_gem_request_await_request(req
,
715 ret
= i915_sw_fence_await_dma_fence(&req
->submit
,
716 child
, I915_FENCE_TIMEOUT
,
726 * i915_gem_request_await_object - set this request to (async) wait upon a bo
728 * @to: request we are wishing to use
729 * @obj: object which may be in use on another ring.
731 * This code is meant to abstract object synchronization with the GPU.
732 * Conceptually we serialise writes between engines inside the GPU.
733 * We only allow one engine to write into a buffer at any time, but
734 * multiple readers. To ensure each has a coherent view of memory, we must:
736 * - If there is an outstanding write request to the object, the new
737 * request must wait for it to complete (either CPU or in hw, requests
738 * on the same ring will be naturally ordered).
740 * - If we are a write request (pending_write_domain is set), the new
741 * request must wait for outstanding read requests to complete.
743 * Returns 0 if successful, else propagates up the lower layer error.
746 i915_gem_request_await_object(struct drm_i915_gem_request
*to
,
747 struct drm_i915_gem_object
*obj
,
750 struct dma_fence
*excl
;
754 struct dma_fence
**shared
;
755 unsigned int count
, i
;
757 ret
= reservation_object_get_fences_rcu(obj
->resv
,
758 &excl
, &count
, &shared
);
762 for (i
= 0; i
< count
; i
++) {
763 ret
= i915_gem_request_await_dma_fence(to
, shared
[i
]);
767 dma_fence_put(shared
[i
]);
770 for (; i
< count
; i
++)
771 dma_fence_put(shared
[i
]);
774 excl
= reservation_object_get_excl_rcu(obj
->resv
);
779 ret
= i915_gem_request_await_dma_fence(to
, excl
);
787 static void i915_gem_mark_busy(const struct intel_engine_cs
*engine
)
789 struct drm_i915_private
*dev_priv
= engine
->i915
;
791 if (dev_priv
->gt
.awake
)
794 GEM_BUG_ON(!dev_priv
->gt
.active_requests
);
796 intel_runtime_pm_get_noresume(dev_priv
);
797 dev_priv
->gt
.awake
= true;
799 intel_enable_gt_powersave(dev_priv
);
800 i915_update_gfx_val(dev_priv
);
801 if (INTEL_GEN(dev_priv
) >= 6)
802 gen6_rps_busy(dev_priv
);
804 queue_delayed_work(dev_priv
->wq
,
805 &dev_priv
->gt
.retire_work
,
806 round_jiffies_up_relative(HZ
));
810 * NB: This function is not allowed to fail. Doing so would mean the the
811 * request is not being tracked for completion but the work itself is
812 * going to happen on the hardware. This would be a Bad Thing(tm).
814 void __i915_add_request(struct drm_i915_gem_request
*request
, bool flush_caches
)
816 struct intel_engine_cs
*engine
= request
->engine
;
817 struct intel_ring
*ring
= request
->ring
;
818 struct intel_timeline
*timeline
= request
->timeline
;
819 struct drm_i915_gem_request
*prev
;
822 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
823 trace_i915_gem_request_add(request
);
826 * To ensure that this call will not fail, space for its emissions
827 * should already have been reserved in the ring buffer. Let the ring
828 * know that it is time to use that space up.
830 request
->reserved_space
= 0;
833 * Emit any outstanding flushes - execbuf can fail to emit the flush
834 * after having emitted the batchbuffer command. Hence we need to fix
835 * things up similar to emitting the lazy request. The difference here
836 * is that the flush _must_ happen before the next request, no matter
840 err
= engine
->emit_flush(request
, EMIT_FLUSH
);
842 /* Not allowed to fail! */
843 WARN(err
, "engine->emit_flush() failed: %d!\n", err
);
846 /* Record the position of the start of the breadcrumb so that
847 * should we detect the updated seqno part-way through the
848 * GPU processing the request, we never over-estimate the
849 * position of the ring's HEAD.
851 err
= intel_ring_begin(request
, engine
->emit_breadcrumb_sz
);
853 request
->postfix
= ring
->tail
;
854 ring
->tail
+= engine
->emit_breadcrumb_sz
* sizeof(u32
);
856 /* Seal the request and mark it as pending execution. Note that
857 * we may inspect this state, without holding any locks, during
858 * hangcheck. Hence we apply the barrier to ensure that we do not
859 * see a more recent value in the hws than we are tracking.
862 prev
= i915_gem_active_raw(&timeline
->last_request
,
863 &request
->i915
->drm
.struct_mutex
);
865 i915_sw_fence_await_sw_fence(&request
->submit
, &prev
->submit
,
867 if (engine
->schedule
)
868 __i915_priotree_add_dependency(&request
->priotree
,
874 spin_lock_irq(&timeline
->lock
);
875 list_add_tail(&request
->link
, &timeline
->requests
);
876 spin_unlock_irq(&timeline
->lock
);
878 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
,
879 request
->fence
.seqno
));
881 timeline
->last_submitted_seqno
= request
->fence
.seqno
;
882 i915_gem_active_set(&timeline
->last_request
, request
);
884 list_add_tail(&request
->ring_link
, &ring
->request_list
);
885 request
->emitted_jiffies
= jiffies
;
887 i915_gem_mark_busy(engine
);
889 /* Let the backend know a new request has arrived that may need
890 * to adjust the existing execution schedule due to a high priority
891 * request - i.e. we may want to preempt the current request in order
892 * to run a high priority dependency chain *before* we can execute this
895 * This is called before the request is ready to run so that we can
896 * decide whether to preempt the entire chain so that it is ready to
897 * run at the earliest possible convenience.
899 if (engine
->schedule
)
900 engine
->schedule(request
, request
->ctx
->priority
);
903 i915_sw_fence_commit(&request
->submit
);
904 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
907 static void reset_wait_queue(wait_queue_head_t
*q
, wait_queue_t
*wait
)
911 spin_lock_irqsave(&q
->lock
, flags
);
912 if (list_empty(&wait
->task_list
))
913 __add_wait_queue(q
, wait
);
914 spin_unlock_irqrestore(&q
->lock
, flags
);
917 static unsigned long local_clock_us(unsigned int *cpu
)
921 /* Cheaply and approximately convert from nanoseconds to microseconds.
922 * The result and subsequent calculations are also defined in the same
923 * approximate microseconds units. The principal source of timing
924 * error here is from the simple truncation.
926 * Note that local_clock() is only defined wrt to the current CPU;
927 * the comparisons are no longer valid if we switch CPUs. Instead of
928 * blocking preemption for the entire busywait, we can detect the CPU
929 * switch and use that as indicator of system load and a reason to
930 * stop busywaiting, see busywait_stop().
933 t
= local_clock() >> 10;
939 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
941 unsigned int this_cpu
;
943 if (time_after(local_clock_us(&this_cpu
), timeout
))
946 return this_cpu
!= cpu
;
949 bool __i915_spin_request(const struct drm_i915_gem_request
*req
,
950 int state
, unsigned long timeout_us
)
954 /* When waiting for high frequency requests, e.g. during synchronous
955 * rendering split between the CPU and GPU, the finite amount of time
956 * required to set up the irq and wait upon it limits the response
957 * rate. By busywaiting on the request completion for a short while we
958 * can service the high frequency waits as quick as possible. However,
959 * if it is a slow request, we want to sleep as quickly as possible.
960 * The tradeoff between waiting and sleeping is roughly the time it
961 * takes to sleep on a request, on the order of a microsecond.
964 timeout_us
+= local_clock_us(&cpu
);
966 if (__i915_gem_request_completed(req
))
969 if (signal_pending_state(state
, current
))
972 if (busywait_stop(timeout_us
, cpu
))
976 } while (!need_resched());
982 __i915_request_wait_for_execute(struct drm_i915_gem_request
*request
,
986 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
987 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
988 wait_queue_head_t
*q
= &request
->i915
->gpu_error
.wait_queue
;
992 if (flags
& I915_WAIT_LOCKED
)
993 add_wait_queue(q
, &reset
);
996 prepare_to_wait(&request
->execute
.wait
, &wait
, state
);
998 if (i915_sw_fence_done(&request
->execute
))
1001 if (flags
& I915_WAIT_LOCKED
&&
1002 i915_reset_in_progress(&request
->i915
->gpu_error
)) {
1003 __set_current_state(TASK_RUNNING
);
1004 i915_reset(request
->i915
);
1005 reset_wait_queue(q
, &reset
);
1009 if (signal_pending_state(state
, current
)) {
1010 timeout
= -ERESTARTSYS
;
1014 timeout
= io_schedule_timeout(timeout
);
1016 finish_wait(&request
->execute
.wait
, &wait
);
1018 if (flags
& I915_WAIT_LOCKED
)
1019 remove_wait_queue(q
, &reset
);
1025 * i915_wait_request - wait until execution of request has finished
1026 * @req: the request to wait upon
1027 * @flags: how to wait
1028 * @timeout: how long to wait in jiffies
1030 * i915_wait_request() waits for the request to be completed, for a
1031 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1034 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1035 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1036 * must not specify that the wait is locked.
1038 * Returns the remaining time (in jiffies) if the request completed, which may
1039 * be zero or -ETIME if the request is unfinished after the timeout expires.
1040 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1041 * pending before the request completes.
1043 long i915_wait_request(struct drm_i915_gem_request
*req
,
1047 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1048 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1050 struct intel_wait wait
;
1053 #if IS_ENABLED(CONFIG_LOCKDEP)
1054 GEM_BUG_ON(debug_locks
&&
1055 !!lockdep_is_held(&req
->i915
->drm
.struct_mutex
) !=
1056 !!(flags
& I915_WAIT_LOCKED
));
1058 GEM_BUG_ON(timeout
< 0);
1060 if (i915_gem_request_completed(req
))
1066 trace_i915_gem_request_wait_begin(req
);
1068 if (!i915_sw_fence_done(&req
->execute
)) {
1069 timeout
= __i915_request_wait_for_execute(req
, flags
, timeout
);
1073 GEM_BUG_ON(!i915_sw_fence_done(&req
->execute
));
1075 GEM_BUG_ON(!i915_sw_fence_done(&req
->submit
));
1076 GEM_BUG_ON(!req
->global_seqno
);
1078 /* Optimistic short spin before touching IRQs */
1079 if (i915_spin_request(req
, state
, 5))
1082 set_current_state(state
);
1083 if (flags
& I915_WAIT_LOCKED
)
1084 add_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1086 intel_wait_init(&wait
, req
->global_seqno
);
1087 if (intel_engine_add_wait(req
->engine
, &wait
))
1088 /* In order to check that we haven't missed the interrupt
1089 * as we enabled it, we need to kick ourselves to do a
1090 * coherent check on the seqno before we sleep.
1095 if (signal_pending_state(state
, current
)) {
1096 timeout
= -ERESTARTSYS
;
1105 timeout
= io_schedule_timeout(timeout
);
1107 if (intel_wait_complete(&wait
))
1110 set_current_state(state
);
1113 /* Carefully check if the request is complete, giving time
1114 * for the seqno to be visible following the interrupt.
1115 * We also have to check in case we are kicked by the GPU
1116 * reset in order to drop the struct_mutex.
1118 if (__i915_request_irq_complete(req
))
1121 /* If the GPU is hung, and we hold the lock, reset the GPU
1122 * and then check for completion. On a full reset, the engine's
1123 * HW seqno will be advanced passed us and we are complete.
1124 * If we do a partial reset, we have to wait for the GPU to
1125 * resume and update the breadcrumb.
1127 * If we don't hold the mutex, we can just wait for the worker
1128 * to come along and update the breadcrumb (either directly
1129 * itself, or indirectly by recovering the GPU).
1131 if (flags
& I915_WAIT_LOCKED
&&
1132 i915_reset_in_progress(&req
->i915
->gpu_error
)) {
1133 __set_current_state(TASK_RUNNING
);
1134 i915_reset(req
->i915
);
1135 reset_wait_queue(&req
->i915
->gpu_error
.wait_queue
,
1140 /* Only spin if we know the GPU is processing this request */
1141 if (i915_spin_request(req
, state
, 2))
1145 intel_engine_remove_wait(req
->engine
, &wait
);
1146 if (flags
& I915_WAIT_LOCKED
)
1147 remove_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1148 __set_current_state(TASK_RUNNING
);
1151 trace_i915_gem_request_wait_end(req
);
1156 static void engine_retire_requests(struct intel_engine_cs
*engine
)
1158 struct drm_i915_gem_request
*request
, *next
;
1160 list_for_each_entry_safe(request
, next
,
1161 &engine
->timeline
->requests
, link
) {
1162 if (!__i915_gem_request_completed(request
))
1165 i915_gem_request_retire(request
);
1169 void i915_gem_retire_requests(struct drm_i915_private
*dev_priv
)
1171 struct intel_engine_cs
*engine
;
1172 enum intel_engine_id id
;
1174 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
1176 if (!dev_priv
->gt
.active_requests
)
1179 for_each_engine(engine
, dev_priv
, id
)
1180 engine_retire_requests(engine
);