2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
30 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
35 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
37 return to_request(fence
)->timeline
->common
->name
;
40 static bool i915_fence_signaled(struct dma_fence
*fence
)
42 return i915_gem_request_completed(to_request(fence
));
45 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
47 if (i915_fence_signaled(fence
))
50 intel_engine_enable_signaling(to_request(fence
));
54 static signed long i915_fence_wait(struct dma_fence
*fence
,
58 return i915_wait_request(to_request(fence
), interruptible
, timeout
);
61 static void i915_fence_release(struct dma_fence
*fence
)
63 struct drm_i915_gem_request
*req
= to_request(fence
);
65 /* The request is put onto a RCU freelist (i.e. the address
66 * is immediately reused), mark the fences as being freed now.
67 * Otherwise the debugobjects for the fences are only marked as
68 * freed when the slab cache itself is freed, and so we would get
69 * caught trying to reuse dead objects.
71 i915_sw_fence_fini(&req
->submit
);
72 i915_sw_fence_fini(&req
->execute
);
74 kmem_cache_free(req
->i915
->requests
, req
);
77 const struct dma_fence_ops i915_fence_ops
= {
78 .get_driver_name
= i915_fence_get_driver_name
,
79 .get_timeline_name
= i915_fence_get_timeline_name
,
80 .enable_signaling
= i915_fence_enable_signaling
,
81 .signaled
= i915_fence_signaled
,
82 .wait
= i915_fence_wait
,
83 .release
= i915_fence_release
,
86 int i915_gem_request_add_to_client(struct drm_i915_gem_request
*req
,
87 struct drm_file
*file
)
89 struct drm_i915_private
*dev_private
;
90 struct drm_i915_file_private
*file_priv
;
92 WARN_ON(!req
|| !file
|| req
->file_priv
);
100 dev_private
= req
->i915
;
101 file_priv
= file
->driver_priv
;
103 spin_lock(&file_priv
->mm
.lock
);
104 req
->file_priv
= file_priv
;
105 list_add_tail(&req
->client_list
, &file_priv
->mm
.request_list
);
106 spin_unlock(&file_priv
->mm
.lock
);
112 i915_gem_request_remove_from_client(struct drm_i915_gem_request
*request
)
114 struct drm_i915_file_private
*file_priv
= request
->file_priv
;
119 spin_lock(&file_priv
->mm
.lock
);
120 list_del(&request
->client_list
);
121 request
->file_priv
= NULL
;
122 spin_unlock(&file_priv
->mm
.lock
);
125 static struct i915_dependency
*
126 i915_dependency_alloc(struct drm_i915_private
*i915
)
128 return kmem_cache_alloc(i915
->dependencies
, GFP_KERNEL
);
132 i915_dependency_free(struct drm_i915_private
*i915
,
133 struct i915_dependency
*dep
)
135 kmem_cache_free(i915
->dependencies
, dep
);
139 __i915_priotree_add_dependency(struct i915_priotree
*pt
,
140 struct i915_priotree
*signal
,
141 struct i915_dependency
*dep
,
144 INIT_LIST_HEAD(&dep
->dfs_link
);
145 list_add(&dep
->wait_link
, &signal
->waiters_list
);
146 list_add(&dep
->signal_link
, &pt
->signalers_list
);
147 dep
->signaler
= signal
;
152 i915_priotree_add_dependency(struct drm_i915_private
*i915
,
153 struct i915_priotree
*pt
,
154 struct i915_priotree
*signal
)
156 struct i915_dependency
*dep
;
158 dep
= i915_dependency_alloc(i915
);
162 __i915_priotree_add_dependency(pt
, signal
, dep
, I915_DEPENDENCY_ALLOC
);
167 i915_priotree_fini(struct drm_i915_private
*i915
, struct i915_priotree
*pt
)
169 struct i915_dependency
*dep
, *next
;
171 GEM_BUG_ON(!RB_EMPTY_NODE(&pt
->node
));
173 /* Everyone we depended upon (the fences we wait to be signaled)
174 * should retire before us and remove themselves from our list.
175 * However, retirement is run independently on each timeline and
176 * so we may be called out-of-order.
178 list_for_each_entry_safe(dep
, next
, &pt
->signalers_list
, signal_link
) {
179 list_del(&dep
->wait_link
);
180 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
181 i915_dependency_free(i915
, dep
);
184 /* Remove ourselves from everyone who depends upon us */
185 list_for_each_entry_safe(dep
, next
, &pt
->waiters_list
, wait_link
) {
186 list_del(&dep
->signal_link
);
187 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
188 i915_dependency_free(i915
, dep
);
193 i915_priotree_init(struct i915_priotree
*pt
)
195 INIT_LIST_HEAD(&pt
->signalers_list
);
196 INIT_LIST_HEAD(&pt
->waiters_list
);
197 RB_CLEAR_NODE(&pt
->node
);
198 pt
->priority
= INT_MIN
;
201 void i915_gem_retire_noop(struct i915_gem_active
*active
,
202 struct drm_i915_gem_request
*request
)
204 /* Space left intentionally blank */
207 static void i915_gem_request_retire(struct drm_i915_gem_request
*request
)
209 struct intel_engine_cs
*engine
= request
->engine
;
210 struct i915_gem_active
*active
, *next
;
212 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
213 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->submit
));
214 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->execute
));
215 GEM_BUG_ON(!i915_gem_request_completed(request
));
216 GEM_BUG_ON(!request
->i915
->gt
.active_requests
);
218 trace_i915_gem_request_retire(request
);
220 spin_lock_irq(&engine
->timeline
->lock
);
221 list_del_init(&request
->link
);
222 spin_unlock_irq(&engine
->timeline
->lock
);
224 /* We know the GPU must have read the request to have
225 * sent us the seqno + interrupt, so use the position
226 * of tail of the request to update the last known position
229 * Note this requires that we are always called in request
232 list_del(&request
->ring_link
);
233 request
->ring
->last_retired_head
= request
->postfix
;
234 if (!--request
->i915
->gt
.active_requests
) {
235 GEM_BUG_ON(!request
->i915
->gt
.awake
);
236 mod_delayed_work(request
->i915
->wq
,
237 &request
->i915
->gt
.idle_work
,
238 msecs_to_jiffies(100));
241 /* Walk through the active list, calling retire on each. This allows
242 * objects to track their GPU activity and mark themselves as idle
243 * when their *last* active request is completed (updating state
244 * tracking lists for eviction, active references for GEM, etc).
246 * As the ->retire() may free the node, we decouple it first and
247 * pass along the auxiliary information (to avoid dereferencing
248 * the node after the callback).
250 list_for_each_entry_safe(active
, next
, &request
->active_list
, link
) {
251 /* In microbenchmarks or focusing upon time inside the kernel,
252 * we may spend an inordinate amount of time simply handling
253 * the retirement of requests and processing their callbacks.
254 * Of which, this loop itself is particularly hot due to the
255 * cache misses when jumping around the list of i915_gem_active.
256 * So we try to keep this loop as streamlined as possible and
257 * also prefetch the next i915_gem_active to try and hide
258 * the likely cache miss.
262 INIT_LIST_HEAD(&active
->link
);
263 RCU_INIT_POINTER(active
->request
, NULL
);
265 active
->retire(active
, request
);
268 i915_gem_request_remove_from_client(request
);
270 /* Retirement decays the ban score as it is a sign of ctx progress */
271 if (request
->ctx
->ban_score
> 0)
272 request
->ctx
->ban_score
--;
274 /* The backing object for the context is done after switching to the
275 * *next* context. Therefore we cannot retire the previous context until
276 * the next context has already started running. However, since we
277 * cannot take the required locks at i915_gem_request_submit() we
278 * defer the unpinning of the active context to now, retirement of
279 * the subsequent request.
281 if (engine
->last_retired_context
)
282 engine
->context_unpin(engine
, engine
->last_retired_context
);
283 engine
->last_retired_context
= request
->ctx
;
285 dma_fence_signal(&request
->fence
);
287 i915_priotree_fini(request
->i915
, &request
->priotree
);
288 i915_gem_request_put(request
);
291 void i915_gem_request_retire_upto(struct drm_i915_gem_request
*req
)
293 struct intel_engine_cs
*engine
= req
->engine
;
294 struct drm_i915_gem_request
*tmp
;
296 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
297 GEM_BUG_ON(!i915_gem_request_completed(req
));
299 if (list_empty(&req
->link
))
303 tmp
= list_first_entry(&engine
->timeline
->requests
,
306 i915_gem_request_retire(tmp
);
307 } while (tmp
!= req
);
310 static int i915_gem_check_wedge(struct drm_i915_private
*dev_priv
)
312 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
314 if (i915_terminally_wedged(error
))
317 if (i915_reset_in_progress(error
)) {
318 /* Non-interruptible callers can't handle -EAGAIN, hence return
319 * -EIO unconditionally for these.
321 if (!dev_priv
->mm
.interruptible
)
330 static int i915_gem_init_global_seqno(struct drm_i915_private
*i915
, u32 seqno
)
332 struct i915_gem_timeline
*timeline
= &i915
->gt
.global_timeline
;
333 struct intel_engine_cs
*engine
;
334 enum intel_engine_id id
;
337 /* Carefully retire all requests without writing to the rings */
338 ret
= i915_gem_wait_for_idle(i915
,
339 I915_WAIT_INTERRUPTIBLE
|
344 i915_gem_retire_requests(i915
);
345 GEM_BUG_ON(i915
->gt
.active_requests
> 1);
347 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
348 if (!i915_seqno_passed(seqno
, atomic_read(&timeline
->seqno
))) {
349 while (intel_breadcrumbs_busy(i915
))
350 cond_resched(); /* spin until threads are complete */
352 atomic_set(&timeline
->seqno
, seqno
);
354 /* Finally reset hw state */
355 for_each_engine(engine
, i915
, id
)
356 intel_engine_init_global_seqno(engine
, seqno
);
358 list_for_each_entry(timeline
, &i915
->gt
.timelines
, link
) {
359 for_each_engine(engine
, i915
, id
) {
360 struct intel_timeline
*tl
= &timeline
->engine
[id
];
362 memset(tl
->sync_seqno
, 0, sizeof(tl
->sync_seqno
));
369 int i915_gem_set_global_seqno(struct drm_device
*dev
, u32 seqno
)
371 struct drm_i915_private
*dev_priv
= to_i915(dev
);
373 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
378 /* HWS page needs to be set less than what we
379 * will inject to ring
381 return i915_gem_init_global_seqno(dev_priv
, seqno
- 1);
384 static int reserve_global_seqno(struct drm_i915_private
*i915
)
386 u32 active_requests
= ++i915
->gt
.active_requests
;
387 u32 seqno
= atomic_read(&i915
->gt
.global_timeline
.seqno
);
390 /* Reservation is fine until we need to wrap around */
391 if (likely(seqno
+ active_requests
> seqno
))
394 ret
= i915_gem_init_global_seqno(i915
, 0);
396 i915
->gt
.active_requests
--;
403 static u32
__timeline_get_seqno(struct i915_gem_timeline
*tl
)
405 /* seqno only incremented under a mutex */
406 return ++tl
->seqno
.counter
;
409 static u32
timeline_get_seqno(struct i915_gem_timeline
*tl
)
411 return atomic_inc_return(&tl
->seqno
);
414 void __i915_gem_request_submit(struct drm_i915_gem_request
*request
)
416 struct intel_engine_cs
*engine
= request
->engine
;
417 struct intel_timeline
*timeline
;
420 /* Transfer from per-context onto the global per-engine timeline */
421 timeline
= engine
->timeline
;
422 GEM_BUG_ON(timeline
== request
->timeline
);
423 assert_spin_locked(&timeline
->lock
);
425 seqno
= timeline_get_seqno(timeline
->common
);
427 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
));
429 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
, seqno
));
430 request
->previous_seqno
= timeline
->last_submitted_seqno
;
431 timeline
->last_submitted_seqno
= seqno
;
433 /* We may be recursing from the signal callback of another i915 fence */
434 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
435 request
->global_seqno
= seqno
;
436 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
437 intel_engine_enable_signaling(request
);
438 spin_unlock(&request
->lock
);
440 GEM_BUG_ON(!request
->global_seqno
);
441 engine
->emit_breadcrumb(request
,
442 request
->ring
->vaddr
+ request
->postfix
);
444 spin_lock(&request
->timeline
->lock
);
445 list_move_tail(&request
->link
, &timeline
->requests
);
446 spin_unlock(&request
->timeline
->lock
);
448 i915_sw_fence_commit(&request
->execute
);
451 void i915_gem_request_submit(struct drm_i915_gem_request
*request
)
453 struct intel_engine_cs
*engine
= request
->engine
;
456 /* Will be called from irq-context when using foreign fences. */
457 spin_lock_irqsave(&engine
->timeline
->lock
, flags
);
459 __i915_gem_request_submit(request
);
461 spin_unlock_irqrestore(&engine
->timeline
->lock
, flags
);
464 static int __i915_sw_fence_call
465 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
467 struct drm_i915_gem_request
*request
=
468 container_of(fence
, typeof(*request
), submit
);
472 request
->engine
->submit_request(request
);
476 i915_gem_request_put(request
);
483 static int __i915_sw_fence_call
484 execute_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
486 struct drm_i915_gem_request
*request
=
487 container_of(fence
, typeof(*request
), execute
);
494 i915_gem_request_put(request
);
502 * i915_gem_request_alloc - allocate a request structure
504 * @engine: engine that we wish to issue the request on.
505 * @ctx: context that the request will be associated with.
506 * This can be NULL if the request is not directly related to
507 * any specific user context, in which case this function will
508 * choose an appropriate context to use.
510 * Returns a pointer to the allocated request if successful,
511 * or an error code if not.
513 struct drm_i915_gem_request
*
514 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
515 struct i915_gem_context
*ctx
)
517 struct drm_i915_private
*dev_priv
= engine
->i915
;
518 struct drm_i915_gem_request
*req
;
521 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
523 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
524 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
527 ret
= i915_gem_check_wedge(dev_priv
);
531 /* Pinning the contexts may generate requests in order to acquire
532 * GGTT space, so do this first before we reserve a seqno for
535 ret
= engine
->context_pin(engine
, ctx
);
539 ret
= reserve_global_seqno(dev_priv
);
543 /* Move the oldest request to the slab-cache (if not in use!) */
544 req
= list_first_entry_or_null(&engine
->timeline
->requests
,
546 if (req
&& __i915_gem_request_completed(req
))
547 i915_gem_request_retire(req
);
549 /* Beware: Dragons be flying overhead.
551 * We use RCU to look up requests in flight. The lookups may
552 * race with the request being allocated from the slab freelist.
553 * That is the request we are writing to here, may be in the process
554 * of being read by __i915_gem_active_get_rcu(). As such,
555 * we have to be very careful when overwriting the contents. During
556 * the RCU lookup, we change chase the request->engine pointer,
557 * read the request->global_seqno and increment the reference count.
559 * The reference count is incremented atomically. If it is zero,
560 * the lookup knows the request is unallocated and complete. Otherwise,
561 * it is either still in use, or has been reallocated and reset
562 * with dma_fence_init(). This increment is safe for release as we
563 * check that the request we have a reference to and matches the active
566 * Before we increment the refcount, we chase the request->engine
567 * pointer. We must not call kmem_cache_zalloc() or else we set
568 * that pointer to NULL and cause a crash during the lookup. If
569 * we see the request is completed (based on the value of the
570 * old engine and seqno), the lookup is complete and reports NULL.
571 * If we decide the request is not completed (new engine or seqno),
572 * then we grab a reference and double check that it is still the
573 * active request - which it won't be and restart the lookup.
575 * Do not use kmem_cache_zalloc() here!
577 req
= kmem_cache_alloc(dev_priv
->requests
, GFP_KERNEL
);
583 req
->timeline
= i915_gem_context_lookup_timeline(ctx
, engine
);
584 GEM_BUG_ON(req
->timeline
== engine
->timeline
);
586 spin_lock_init(&req
->lock
);
587 dma_fence_init(&req
->fence
,
590 req
->timeline
->fence_context
,
591 __timeline_get_seqno(req
->timeline
->common
));
593 /* We bump the ref for the fence chain */
594 i915_sw_fence_init(&i915_gem_request_get(req
)->submit
, submit_notify
);
595 i915_sw_fence_init(&i915_gem_request_get(req
)->execute
, execute_notify
);
597 /* Ensure that the execute fence completes after the submit fence -
598 * as we complete the execute fence from within the submit fence
599 * callback, its completion would otherwise be visible first.
601 i915_sw_fence_await_sw_fence(&req
->execute
, &req
->submit
, &req
->execq
);
603 i915_priotree_init(&req
->priotree
);
605 INIT_LIST_HEAD(&req
->active_list
);
606 req
->i915
= dev_priv
;
607 req
->engine
= engine
;
610 /* No zalloc, must clear what we need by hand */
611 req
->global_seqno
= 0;
612 req
->file_priv
= NULL
;
616 * Reserve space in the ring buffer for all the commands required to
617 * eventually emit this request. This is to guarantee that the
618 * i915_add_request() call can't fail. Note that the reserve may need
619 * to be redone if the request is not actually submitted straight
620 * away, e.g. because a GPU scheduler has deferred it.
622 req
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
623 GEM_BUG_ON(req
->reserved_space
< engine
->emit_breadcrumb_sz
);
625 ret
= engine
->request_alloc(req
);
629 /* Record the position of the start of the request so that
630 * should we detect the updated seqno part-way through the
631 * GPU processing the request, we never over-estimate the
632 * position of the head.
634 req
->head
= req
->ring
->tail
;
639 /* Make sure we didn't add ourselves to external state before freeing */
640 GEM_BUG_ON(!list_empty(&req
->active_list
));
641 GEM_BUG_ON(!list_empty(&req
->priotree
.signalers_list
));
642 GEM_BUG_ON(!list_empty(&req
->priotree
.waiters_list
));
644 kmem_cache_free(dev_priv
->requests
, req
);
646 dev_priv
->gt
.active_requests
--;
648 engine
->context_unpin(engine
, ctx
);
653 i915_gem_request_await_request(struct drm_i915_gem_request
*to
,
654 struct drm_i915_gem_request
*from
)
658 GEM_BUG_ON(to
== from
);
660 if (to
->engine
->schedule
) {
661 ret
= i915_priotree_add_dependency(to
->i915
,
668 if (to
->timeline
== from
->timeline
)
671 if (to
->engine
== from
->engine
) {
672 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
675 return ret
< 0 ? ret
: 0;
678 if (!from
->global_seqno
) {
679 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
682 return ret
< 0 ? ret
: 0;
685 if (from
->global_seqno
<= to
->timeline
->sync_seqno
[from
->engine
->id
])
688 trace_i915_gem_ring_sync_to(to
, from
);
689 if (!i915
.semaphores
) {
690 if (!i915_spin_request(from
, TASK_INTERRUPTIBLE
, 2)) {
691 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
698 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
703 to
->timeline
->sync_seqno
[from
->engine
->id
] = from
->global_seqno
;
708 i915_gem_request_await_dma_fence(struct drm_i915_gem_request
*req
,
709 struct dma_fence
*fence
)
711 struct dma_fence_array
*array
;
715 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
718 if (dma_fence_is_i915(fence
))
719 return i915_gem_request_await_request(req
, to_request(fence
));
721 if (!dma_fence_is_array(fence
)) {
722 ret
= i915_sw_fence_await_dma_fence(&req
->submit
,
723 fence
, I915_FENCE_TIMEOUT
,
725 return ret
< 0 ? ret
: 0;
728 /* Note that if the fence-array was created in signal-on-any mode,
729 * we should *not* decompose it into its individual fences. However,
730 * we don't currently store which mode the fence-array is operating
731 * in. Fortunately, the only user of signal-on-any is private to
732 * amdgpu and we should not see any incoming fence-array from
733 * sync-file being in signal-on-any mode.
736 array
= to_dma_fence_array(fence
);
737 for (i
= 0; i
< array
->num_fences
; i
++) {
738 struct dma_fence
*child
= array
->fences
[i
];
740 if (dma_fence_is_i915(child
))
741 ret
= i915_gem_request_await_request(req
,
744 ret
= i915_sw_fence_await_dma_fence(&req
->submit
,
745 child
, I915_FENCE_TIMEOUT
,
755 * i915_gem_request_await_object - set this request to (async) wait upon a bo
757 * @to: request we are wishing to use
758 * @obj: object which may be in use on another ring.
760 * This code is meant to abstract object synchronization with the GPU.
761 * Conceptually we serialise writes between engines inside the GPU.
762 * We only allow one engine to write into a buffer at any time, but
763 * multiple readers. To ensure each has a coherent view of memory, we must:
765 * - If there is an outstanding write request to the object, the new
766 * request must wait for it to complete (either CPU or in hw, requests
767 * on the same ring will be naturally ordered).
769 * - If we are a write request (pending_write_domain is set), the new
770 * request must wait for outstanding read requests to complete.
772 * Returns 0 if successful, else propagates up the lower layer error.
775 i915_gem_request_await_object(struct drm_i915_gem_request
*to
,
776 struct drm_i915_gem_object
*obj
,
779 struct dma_fence
*excl
;
783 struct dma_fence
**shared
;
784 unsigned int count
, i
;
786 ret
= reservation_object_get_fences_rcu(obj
->resv
,
787 &excl
, &count
, &shared
);
791 for (i
= 0; i
< count
; i
++) {
792 ret
= i915_gem_request_await_dma_fence(to
, shared
[i
]);
796 dma_fence_put(shared
[i
]);
799 for (; i
< count
; i
++)
800 dma_fence_put(shared
[i
]);
803 excl
= reservation_object_get_excl_rcu(obj
->resv
);
808 ret
= i915_gem_request_await_dma_fence(to
, excl
);
816 static void i915_gem_mark_busy(const struct intel_engine_cs
*engine
)
818 struct drm_i915_private
*dev_priv
= engine
->i915
;
820 if (dev_priv
->gt
.awake
)
823 GEM_BUG_ON(!dev_priv
->gt
.active_requests
);
825 intel_runtime_pm_get_noresume(dev_priv
);
826 dev_priv
->gt
.awake
= true;
828 intel_enable_gt_powersave(dev_priv
);
829 i915_update_gfx_val(dev_priv
);
830 if (INTEL_GEN(dev_priv
) >= 6)
831 gen6_rps_busy(dev_priv
);
833 queue_delayed_work(dev_priv
->wq
,
834 &dev_priv
->gt
.retire_work
,
835 round_jiffies_up_relative(HZ
));
839 * NB: This function is not allowed to fail. Doing so would mean the the
840 * request is not being tracked for completion but the work itself is
841 * going to happen on the hardware. This would be a Bad Thing(tm).
843 void __i915_add_request(struct drm_i915_gem_request
*request
, bool flush_caches
)
845 struct intel_engine_cs
*engine
= request
->engine
;
846 struct intel_ring
*ring
= request
->ring
;
847 struct intel_timeline
*timeline
= request
->timeline
;
848 struct drm_i915_gem_request
*prev
;
851 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
852 trace_i915_gem_request_add(request
);
855 * To ensure that this call will not fail, space for its emissions
856 * should already have been reserved in the ring buffer. Let the ring
857 * know that it is time to use that space up.
859 request
->reserved_space
= 0;
862 * Emit any outstanding flushes - execbuf can fail to emit the flush
863 * after having emitted the batchbuffer command. Hence we need to fix
864 * things up similar to emitting the lazy request. The difference here
865 * is that the flush _must_ happen before the next request, no matter
869 err
= engine
->emit_flush(request
, EMIT_FLUSH
);
871 /* Not allowed to fail! */
872 WARN(err
, "engine->emit_flush() failed: %d!\n", err
);
875 /* Record the position of the start of the breadcrumb so that
876 * should we detect the updated seqno part-way through the
877 * GPU processing the request, we never over-estimate the
878 * position of the ring's HEAD.
880 err
= intel_ring_begin(request
, engine
->emit_breadcrumb_sz
);
882 request
->postfix
= ring
->tail
;
883 ring
->tail
+= engine
->emit_breadcrumb_sz
* sizeof(u32
);
885 /* Seal the request and mark it as pending execution. Note that
886 * we may inspect this state, without holding any locks, during
887 * hangcheck. Hence we apply the barrier to ensure that we do not
888 * see a more recent value in the hws than we are tracking.
891 prev
= i915_gem_active_raw(&timeline
->last_request
,
892 &request
->i915
->drm
.struct_mutex
);
894 i915_sw_fence_await_sw_fence(&request
->submit
, &prev
->submit
,
896 if (engine
->schedule
)
897 __i915_priotree_add_dependency(&request
->priotree
,
903 spin_lock_irq(&timeline
->lock
);
904 list_add_tail(&request
->link
, &timeline
->requests
);
905 spin_unlock_irq(&timeline
->lock
);
907 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
,
908 request
->fence
.seqno
));
910 timeline
->last_submitted_seqno
= request
->fence
.seqno
;
911 i915_gem_active_set(&timeline
->last_request
, request
);
913 list_add_tail(&request
->ring_link
, &ring
->request_list
);
914 request
->emitted_jiffies
= jiffies
;
916 i915_gem_mark_busy(engine
);
918 /* Let the backend know a new request has arrived that may need
919 * to adjust the existing execution schedule due to a high priority
920 * request - i.e. we may want to preempt the current request in order
921 * to run a high priority dependency chain *before* we can execute this
924 * This is called before the request is ready to run so that we can
925 * decide whether to preempt the entire chain so that it is ready to
926 * run at the earliest possible convenience.
928 if (engine
->schedule
)
929 engine
->schedule(request
, request
->ctx
->priority
);
932 i915_sw_fence_commit(&request
->submit
);
933 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
936 static void reset_wait_queue(wait_queue_head_t
*q
, wait_queue_t
*wait
)
940 spin_lock_irqsave(&q
->lock
, flags
);
941 if (list_empty(&wait
->task_list
))
942 __add_wait_queue(q
, wait
);
943 spin_unlock_irqrestore(&q
->lock
, flags
);
946 static unsigned long local_clock_us(unsigned int *cpu
)
950 /* Cheaply and approximately convert from nanoseconds to microseconds.
951 * The result and subsequent calculations are also defined in the same
952 * approximate microseconds units. The principal source of timing
953 * error here is from the simple truncation.
955 * Note that local_clock() is only defined wrt to the current CPU;
956 * the comparisons are no longer valid if we switch CPUs. Instead of
957 * blocking preemption for the entire busywait, we can detect the CPU
958 * switch and use that as indicator of system load and a reason to
959 * stop busywaiting, see busywait_stop().
962 t
= local_clock() >> 10;
968 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
970 unsigned int this_cpu
;
972 if (time_after(local_clock_us(&this_cpu
), timeout
))
975 return this_cpu
!= cpu
;
978 bool __i915_spin_request(const struct drm_i915_gem_request
*req
,
979 int state
, unsigned long timeout_us
)
983 /* When waiting for high frequency requests, e.g. during synchronous
984 * rendering split between the CPU and GPU, the finite amount of time
985 * required to set up the irq and wait upon it limits the response
986 * rate. By busywaiting on the request completion for a short while we
987 * can service the high frequency waits as quick as possible. However,
988 * if it is a slow request, we want to sleep as quickly as possible.
989 * The tradeoff between waiting and sleeping is roughly the time it
990 * takes to sleep on a request, on the order of a microsecond.
993 timeout_us
+= local_clock_us(&cpu
);
995 if (__i915_gem_request_completed(req
))
998 if (signal_pending_state(state
, current
))
1001 if (busywait_stop(timeout_us
, cpu
))
1005 } while (!need_resched());
1011 __i915_request_wait_for_execute(struct drm_i915_gem_request
*request
,
1015 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1016 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1017 wait_queue_head_t
*q
= &request
->i915
->gpu_error
.wait_queue
;
1021 if (flags
& I915_WAIT_LOCKED
)
1022 add_wait_queue(q
, &reset
);
1025 prepare_to_wait(&request
->execute
.wait
, &wait
, state
);
1027 if (i915_sw_fence_done(&request
->execute
))
1030 if (flags
& I915_WAIT_LOCKED
&&
1031 i915_reset_in_progress(&request
->i915
->gpu_error
)) {
1032 __set_current_state(TASK_RUNNING
);
1033 i915_reset(request
->i915
);
1034 reset_wait_queue(q
, &reset
);
1038 if (signal_pending_state(state
, current
)) {
1039 timeout
= -ERESTARTSYS
;
1043 timeout
= io_schedule_timeout(timeout
);
1045 finish_wait(&request
->execute
.wait
, &wait
);
1047 if (flags
& I915_WAIT_LOCKED
)
1048 remove_wait_queue(q
, &reset
);
1054 * i915_wait_request - wait until execution of request has finished
1055 * @req: the request to wait upon
1056 * @flags: how to wait
1057 * @timeout: how long to wait in jiffies
1059 * i915_wait_request() waits for the request to be completed, for a
1060 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1063 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1064 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1065 * must not specify that the wait is locked.
1067 * Returns the remaining time (in jiffies) if the request completed, which may
1068 * be zero or -ETIME if the request is unfinished after the timeout expires.
1069 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1070 * pending before the request completes.
1072 long i915_wait_request(struct drm_i915_gem_request
*req
,
1076 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1077 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1079 struct intel_wait wait
;
1082 #if IS_ENABLED(CONFIG_LOCKDEP)
1083 GEM_BUG_ON(debug_locks
&&
1084 !!lockdep_is_held(&req
->i915
->drm
.struct_mutex
) !=
1085 !!(flags
& I915_WAIT_LOCKED
));
1087 GEM_BUG_ON(timeout
< 0);
1089 if (i915_gem_request_completed(req
))
1095 trace_i915_gem_request_wait_begin(req
);
1097 if (!i915_sw_fence_done(&req
->execute
)) {
1098 timeout
= __i915_request_wait_for_execute(req
, flags
, timeout
);
1102 GEM_BUG_ON(!i915_sw_fence_done(&req
->execute
));
1104 GEM_BUG_ON(!i915_sw_fence_done(&req
->submit
));
1105 GEM_BUG_ON(!req
->global_seqno
);
1107 /* Optimistic short spin before touching IRQs */
1108 if (i915_spin_request(req
, state
, 5))
1111 set_current_state(state
);
1112 if (flags
& I915_WAIT_LOCKED
)
1113 add_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1115 intel_wait_init(&wait
, req
->global_seqno
);
1116 if (intel_engine_add_wait(req
->engine
, &wait
))
1117 /* In order to check that we haven't missed the interrupt
1118 * as we enabled it, we need to kick ourselves to do a
1119 * coherent check on the seqno before we sleep.
1124 if (signal_pending_state(state
, current
)) {
1125 timeout
= -ERESTARTSYS
;
1134 timeout
= io_schedule_timeout(timeout
);
1136 if (intel_wait_complete(&wait
))
1139 set_current_state(state
);
1142 /* Carefully check if the request is complete, giving time
1143 * for the seqno to be visible following the interrupt.
1144 * We also have to check in case we are kicked by the GPU
1145 * reset in order to drop the struct_mutex.
1147 if (__i915_request_irq_complete(req
))
1150 /* If the GPU is hung, and we hold the lock, reset the GPU
1151 * and then check for completion. On a full reset, the engine's
1152 * HW seqno will be advanced passed us and we are complete.
1153 * If we do a partial reset, we have to wait for the GPU to
1154 * resume and update the breadcrumb.
1156 * If we don't hold the mutex, we can just wait for the worker
1157 * to come along and update the breadcrumb (either directly
1158 * itself, or indirectly by recovering the GPU).
1160 if (flags
& I915_WAIT_LOCKED
&&
1161 i915_reset_in_progress(&req
->i915
->gpu_error
)) {
1162 __set_current_state(TASK_RUNNING
);
1163 i915_reset(req
->i915
);
1164 reset_wait_queue(&req
->i915
->gpu_error
.wait_queue
,
1169 /* Only spin if we know the GPU is processing this request */
1170 if (i915_spin_request(req
, state
, 2))
1174 intel_engine_remove_wait(req
->engine
, &wait
);
1175 if (flags
& I915_WAIT_LOCKED
)
1176 remove_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1177 __set_current_state(TASK_RUNNING
);
1180 trace_i915_gem_request_wait_end(req
);
1185 static void engine_retire_requests(struct intel_engine_cs
*engine
)
1187 struct drm_i915_gem_request
*request
, *next
;
1189 list_for_each_entry_safe(request
, next
,
1190 &engine
->timeline
->requests
, link
) {
1191 if (!__i915_gem_request_completed(request
))
1194 i915_gem_request_retire(request
);
1198 void i915_gem_retire_requests(struct drm_i915_private
*dev_priv
)
1200 struct intel_engine_cs
*engine
;
1201 enum intel_engine_id id
;
1203 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
1205 if (!dev_priv
->gt
.active_requests
)
1208 for_each_engine(engine
, dev_priv
, id
)
1209 engine_retire_requests(engine
);