2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
33 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
38 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
40 return to_request(fence
)->timeline
->common
->name
;
43 static bool i915_fence_signaled(struct dma_fence
*fence
)
45 return i915_gem_request_completed(to_request(fence
));
48 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
50 if (i915_fence_signaled(fence
))
53 intel_engine_enable_signaling(to_request(fence
));
57 static signed long i915_fence_wait(struct dma_fence
*fence
,
61 return i915_wait_request(to_request(fence
), interruptible
, timeout
);
64 static void i915_fence_release(struct dma_fence
*fence
)
66 struct drm_i915_gem_request
*req
= to_request(fence
);
68 /* The request is put onto a RCU freelist (i.e. the address
69 * is immediately reused), mark the fences as being freed now.
70 * Otherwise the debugobjects for the fences are only marked as
71 * freed when the slab cache itself is freed, and so we would get
72 * caught trying to reuse dead objects.
74 i915_sw_fence_fini(&req
->submit
);
75 i915_sw_fence_fini(&req
->execute
);
77 kmem_cache_free(req
->i915
->requests
, req
);
80 const struct dma_fence_ops i915_fence_ops
= {
81 .get_driver_name
= i915_fence_get_driver_name
,
82 .get_timeline_name
= i915_fence_get_timeline_name
,
83 .enable_signaling
= i915_fence_enable_signaling
,
84 .signaled
= i915_fence_signaled
,
85 .wait
= i915_fence_wait
,
86 .release
= i915_fence_release
,
89 int i915_gem_request_add_to_client(struct drm_i915_gem_request
*req
,
90 struct drm_file
*file
)
92 struct drm_i915_private
*dev_private
;
93 struct drm_i915_file_private
*file_priv
;
95 WARN_ON(!req
|| !file
|| req
->file_priv
);
103 dev_private
= req
->i915
;
104 file_priv
= file
->driver_priv
;
106 spin_lock(&file_priv
->mm
.lock
);
107 req
->file_priv
= file_priv
;
108 list_add_tail(&req
->client_list
, &file_priv
->mm
.request_list
);
109 spin_unlock(&file_priv
->mm
.lock
);
115 i915_gem_request_remove_from_client(struct drm_i915_gem_request
*request
)
117 struct drm_i915_file_private
*file_priv
= request
->file_priv
;
122 spin_lock(&file_priv
->mm
.lock
);
123 list_del(&request
->client_list
);
124 request
->file_priv
= NULL
;
125 spin_unlock(&file_priv
->mm
.lock
);
128 static struct i915_dependency
*
129 i915_dependency_alloc(struct drm_i915_private
*i915
)
131 return kmem_cache_alloc(i915
->dependencies
, GFP_KERNEL
);
135 i915_dependency_free(struct drm_i915_private
*i915
,
136 struct i915_dependency
*dep
)
138 kmem_cache_free(i915
->dependencies
, dep
);
142 __i915_priotree_add_dependency(struct i915_priotree
*pt
,
143 struct i915_priotree
*signal
,
144 struct i915_dependency
*dep
,
147 INIT_LIST_HEAD(&dep
->dfs_link
);
148 list_add(&dep
->wait_link
, &signal
->waiters_list
);
149 list_add(&dep
->signal_link
, &pt
->signalers_list
);
150 dep
->signaler
= signal
;
155 i915_priotree_add_dependency(struct drm_i915_private
*i915
,
156 struct i915_priotree
*pt
,
157 struct i915_priotree
*signal
)
159 struct i915_dependency
*dep
;
161 dep
= i915_dependency_alloc(i915
);
165 __i915_priotree_add_dependency(pt
, signal
, dep
, I915_DEPENDENCY_ALLOC
);
170 i915_priotree_fini(struct drm_i915_private
*i915
, struct i915_priotree
*pt
)
172 struct i915_dependency
*dep
, *next
;
174 GEM_BUG_ON(!RB_EMPTY_NODE(&pt
->node
));
176 /* Everyone we depended upon (the fences we wait to be signaled)
177 * should retire before us and remove themselves from our list.
178 * However, retirement is run independently on each timeline and
179 * so we may be called out-of-order.
181 list_for_each_entry_safe(dep
, next
, &pt
->signalers_list
, signal_link
) {
182 list_del(&dep
->wait_link
);
183 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
184 i915_dependency_free(i915
, dep
);
187 /* Remove ourselves from everyone who depends upon us */
188 list_for_each_entry_safe(dep
, next
, &pt
->waiters_list
, wait_link
) {
189 list_del(&dep
->signal_link
);
190 if (dep
->flags
& I915_DEPENDENCY_ALLOC
)
191 i915_dependency_free(i915
, dep
);
196 i915_priotree_init(struct i915_priotree
*pt
)
198 INIT_LIST_HEAD(&pt
->signalers_list
);
199 INIT_LIST_HEAD(&pt
->waiters_list
);
200 RB_CLEAR_NODE(&pt
->node
);
201 pt
->priority
= INT_MIN
;
204 void i915_gem_retire_noop(struct i915_gem_active
*active
,
205 struct drm_i915_gem_request
*request
)
207 /* Space left intentionally blank */
210 static void i915_gem_request_retire(struct drm_i915_gem_request
*request
)
212 struct intel_engine_cs
*engine
= request
->engine
;
213 struct i915_gem_active
*active
, *next
;
215 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
216 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->submit
));
217 GEM_BUG_ON(!i915_sw_fence_signaled(&request
->execute
));
218 GEM_BUG_ON(!i915_gem_request_completed(request
));
219 GEM_BUG_ON(!request
->i915
->gt
.active_requests
);
221 trace_i915_gem_request_retire(request
);
223 spin_lock_irq(&engine
->timeline
->lock
);
224 list_del_init(&request
->link
);
225 spin_unlock_irq(&engine
->timeline
->lock
);
227 /* We know the GPU must have read the request to have
228 * sent us the seqno + interrupt, so use the position
229 * of tail of the request to update the last known position
232 * Note this requires that we are always called in request
235 list_del(&request
->ring_link
);
236 request
->ring
->last_retired_head
= request
->postfix
;
237 if (!--request
->i915
->gt
.active_requests
) {
238 GEM_BUG_ON(!request
->i915
->gt
.awake
);
239 mod_delayed_work(request
->i915
->wq
,
240 &request
->i915
->gt
.idle_work
,
241 msecs_to_jiffies(100));
244 /* Walk through the active list, calling retire on each. This allows
245 * objects to track their GPU activity and mark themselves as idle
246 * when their *last* active request is completed (updating state
247 * tracking lists for eviction, active references for GEM, etc).
249 * As the ->retire() may free the node, we decouple it first and
250 * pass along the auxiliary information (to avoid dereferencing
251 * the node after the callback).
253 list_for_each_entry_safe(active
, next
, &request
->active_list
, link
) {
254 /* In microbenchmarks or focusing upon time inside the kernel,
255 * we may spend an inordinate amount of time simply handling
256 * the retirement of requests and processing their callbacks.
257 * Of which, this loop itself is particularly hot due to the
258 * cache misses when jumping around the list of i915_gem_active.
259 * So we try to keep this loop as streamlined as possible and
260 * also prefetch the next i915_gem_active to try and hide
261 * the likely cache miss.
265 INIT_LIST_HEAD(&active
->link
);
266 RCU_INIT_POINTER(active
->request
, NULL
);
268 active
->retire(active
, request
);
271 i915_gem_request_remove_from_client(request
);
273 /* Retirement decays the ban score as it is a sign of ctx progress */
274 if (request
->ctx
->ban_score
> 0)
275 request
->ctx
->ban_score
--;
277 /* The backing object for the context is done after switching to the
278 * *next* context. Therefore we cannot retire the previous context until
279 * the next context has already started running. However, since we
280 * cannot take the required locks at i915_gem_request_submit() we
281 * defer the unpinning of the active context to now, retirement of
282 * the subsequent request.
284 if (engine
->last_retired_context
)
285 engine
->context_unpin(engine
, engine
->last_retired_context
);
286 engine
->last_retired_context
= request
->ctx
;
288 dma_fence_signal(&request
->fence
);
290 i915_priotree_fini(request
->i915
, &request
->priotree
);
291 i915_gem_request_put(request
);
294 void i915_gem_request_retire_upto(struct drm_i915_gem_request
*req
)
296 struct intel_engine_cs
*engine
= req
->engine
;
297 struct drm_i915_gem_request
*tmp
;
299 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
300 GEM_BUG_ON(!i915_gem_request_completed(req
));
302 if (list_empty(&req
->link
))
306 tmp
= list_first_entry(&engine
->timeline
->requests
,
309 i915_gem_request_retire(tmp
);
310 } while (tmp
!= req
);
313 static int i915_gem_init_global_seqno(struct drm_i915_private
*i915
, u32 seqno
)
315 struct i915_gem_timeline
*timeline
= &i915
->gt
.global_timeline
;
316 struct intel_engine_cs
*engine
;
317 enum intel_engine_id id
;
320 /* Carefully retire all requests without writing to the rings */
321 ret
= i915_gem_wait_for_idle(i915
,
322 I915_WAIT_INTERRUPTIBLE
|
327 i915_gem_retire_requests(i915
);
328 GEM_BUG_ON(i915
->gt
.active_requests
> 1);
330 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
331 if (!i915_seqno_passed(seqno
, atomic_read(&timeline
->seqno
))) {
332 while (intel_breadcrumbs_busy(i915
))
333 cond_resched(); /* spin until threads are complete */
335 atomic_set(&timeline
->seqno
, seqno
);
337 /* Finally reset hw state */
338 for_each_engine(engine
, i915
, id
)
339 intel_engine_init_global_seqno(engine
, seqno
);
341 list_for_each_entry(timeline
, &i915
->gt
.timelines
, link
) {
342 for_each_engine(engine
, i915
, id
) {
343 struct intel_timeline
*tl
= &timeline
->engine
[id
];
345 memset(tl
->sync_seqno
, 0, sizeof(tl
->sync_seqno
));
352 int i915_gem_set_global_seqno(struct drm_device
*dev
, u32 seqno
)
354 struct drm_i915_private
*dev_priv
= to_i915(dev
);
356 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
361 /* HWS page needs to be set less than what we
362 * will inject to ring
364 return i915_gem_init_global_seqno(dev_priv
, seqno
- 1);
367 static int reserve_global_seqno(struct drm_i915_private
*i915
)
369 u32 active_requests
= ++i915
->gt
.active_requests
;
370 u32 seqno
= atomic_read(&i915
->gt
.global_timeline
.seqno
);
373 /* Reservation is fine until we need to wrap around */
374 if (likely(seqno
+ active_requests
> seqno
))
377 ret
= i915_gem_init_global_seqno(i915
, 0);
379 i915
->gt
.active_requests
--;
386 static u32
__timeline_get_seqno(struct i915_gem_timeline
*tl
)
388 /* seqno only incremented under a mutex */
389 return ++tl
->seqno
.counter
;
392 static u32
timeline_get_seqno(struct i915_gem_timeline
*tl
)
394 return atomic_inc_return(&tl
->seqno
);
397 void __i915_gem_request_submit(struct drm_i915_gem_request
*request
)
399 struct intel_engine_cs
*engine
= request
->engine
;
400 struct intel_timeline
*timeline
;
403 /* Transfer from per-context onto the global per-engine timeline */
404 timeline
= engine
->timeline
;
405 GEM_BUG_ON(timeline
== request
->timeline
);
406 assert_spin_locked(&timeline
->lock
);
408 seqno
= timeline_get_seqno(timeline
->common
);
410 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine
), seqno
));
412 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
, seqno
));
413 request
->previous_seqno
= timeline
->last_submitted_seqno
;
414 timeline
->last_submitted_seqno
= seqno
;
416 /* We may be recursing from the signal callback of another i915 fence */
417 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
418 request
->global_seqno
= seqno
;
419 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
420 intel_engine_enable_signaling(request
);
421 spin_unlock(&request
->lock
);
423 GEM_BUG_ON(!request
->global_seqno
);
424 engine
->emit_breadcrumb(request
,
425 request
->ring
->vaddr
+ request
->postfix
);
427 spin_lock(&request
->timeline
->lock
);
428 list_move_tail(&request
->link
, &timeline
->requests
);
429 spin_unlock(&request
->timeline
->lock
);
431 i915_sw_fence_commit(&request
->execute
);
434 void i915_gem_request_submit(struct drm_i915_gem_request
*request
)
436 struct intel_engine_cs
*engine
= request
->engine
;
439 /* Will be called from irq-context when using foreign fences. */
440 spin_lock_irqsave(&engine
->timeline
->lock
, flags
);
442 __i915_gem_request_submit(request
);
444 spin_unlock_irqrestore(&engine
->timeline
->lock
, flags
);
447 static int __i915_sw_fence_call
448 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
450 struct drm_i915_gem_request
*request
=
451 container_of(fence
, typeof(*request
), submit
);
455 request
->engine
->submit_request(request
);
459 i915_gem_request_put(request
);
466 static int __i915_sw_fence_call
467 execute_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
469 struct drm_i915_gem_request
*request
=
470 container_of(fence
, typeof(*request
), execute
);
477 i915_gem_request_put(request
);
485 * i915_gem_request_alloc - allocate a request structure
487 * @engine: engine that we wish to issue the request on.
488 * @ctx: context that the request will be associated with.
489 * This can be NULL if the request is not directly related to
490 * any specific user context, in which case this function will
491 * choose an appropriate context to use.
493 * Returns a pointer to the allocated request if successful,
494 * or an error code if not.
496 struct drm_i915_gem_request
*
497 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
498 struct i915_gem_context
*ctx
)
500 struct drm_i915_private
*dev_priv
= engine
->i915
;
501 struct drm_i915_gem_request
*req
;
504 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
506 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
507 * EIO if the GPU is already wedged.
509 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
510 return ERR_PTR(-EIO
);
512 /* Pinning the contexts may generate requests in order to acquire
513 * GGTT space, so do this first before we reserve a seqno for
516 ret
= engine
->context_pin(engine
, ctx
);
520 ret
= reserve_global_seqno(dev_priv
);
524 /* Move the oldest request to the slab-cache (if not in use!) */
525 req
= list_first_entry_or_null(&engine
->timeline
->requests
,
527 if (req
&& __i915_gem_request_completed(req
))
528 i915_gem_request_retire(req
);
530 /* Beware: Dragons be flying overhead.
532 * We use RCU to look up requests in flight. The lookups may
533 * race with the request being allocated from the slab freelist.
534 * That is the request we are writing to here, may be in the process
535 * of being read by __i915_gem_active_get_rcu(). As such,
536 * we have to be very careful when overwriting the contents. During
537 * the RCU lookup, we change chase the request->engine pointer,
538 * read the request->global_seqno and increment the reference count.
540 * The reference count is incremented atomically. If it is zero,
541 * the lookup knows the request is unallocated and complete. Otherwise,
542 * it is either still in use, or has been reallocated and reset
543 * with dma_fence_init(). This increment is safe for release as we
544 * check that the request we have a reference to and matches the active
547 * Before we increment the refcount, we chase the request->engine
548 * pointer. We must not call kmem_cache_zalloc() or else we set
549 * that pointer to NULL and cause a crash during the lookup. If
550 * we see the request is completed (based on the value of the
551 * old engine and seqno), the lookup is complete and reports NULL.
552 * If we decide the request is not completed (new engine or seqno),
553 * then we grab a reference and double check that it is still the
554 * active request - which it won't be and restart the lookup.
556 * Do not use kmem_cache_zalloc() here!
558 req
= kmem_cache_alloc(dev_priv
->requests
, GFP_KERNEL
);
564 req
->timeline
= i915_gem_context_lookup_timeline(ctx
, engine
);
565 GEM_BUG_ON(req
->timeline
== engine
->timeline
);
567 spin_lock_init(&req
->lock
);
568 dma_fence_init(&req
->fence
,
571 req
->timeline
->fence_context
,
572 __timeline_get_seqno(req
->timeline
->common
));
574 /* We bump the ref for the fence chain */
575 i915_sw_fence_init(&i915_gem_request_get(req
)->submit
, submit_notify
);
576 i915_sw_fence_init(&i915_gem_request_get(req
)->execute
, execute_notify
);
578 /* Ensure that the execute fence completes after the submit fence -
579 * as we complete the execute fence from within the submit fence
580 * callback, its completion would otherwise be visible first.
582 i915_sw_fence_await_sw_fence(&req
->execute
, &req
->submit
, &req
->execq
);
584 i915_priotree_init(&req
->priotree
);
586 INIT_LIST_HEAD(&req
->active_list
);
587 req
->i915
= dev_priv
;
588 req
->engine
= engine
;
591 /* No zalloc, must clear what we need by hand */
592 req
->global_seqno
= 0;
593 req
->file_priv
= NULL
;
597 * Reserve space in the ring buffer for all the commands required to
598 * eventually emit this request. This is to guarantee that the
599 * i915_add_request() call can't fail. Note that the reserve may need
600 * to be redone if the request is not actually submitted straight
601 * away, e.g. because a GPU scheduler has deferred it.
603 req
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
604 GEM_BUG_ON(req
->reserved_space
< engine
->emit_breadcrumb_sz
);
606 ret
= engine
->request_alloc(req
);
610 /* Record the position of the start of the request so that
611 * should we detect the updated seqno part-way through the
612 * GPU processing the request, we never over-estimate the
613 * position of the head.
615 req
->head
= req
->ring
->tail
;
620 /* Make sure we didn't add ourselves to external state before freeing */
621 GEM_BUG_ON(!list_empty(&req
->active_list
));
622 GEM_BUG_ON(!list_empty(&req
->priotree
.signalers_list
));
623 GEM_BUG_ON(!list_empty(&req
->priotree
.waiters_list
));
625 kmem_cache_free(dev_priv
->requests
, req
);
627 dev_priv
->gt
.active_requests
--;
629 engine
->context_unpin(engine
, ctx
);
634 i915_gem_request_await_request(struct drm_i915_gem_request
*to
,
635 struct drm_i915_gem_request
*from
)
639 GEM_BUG_ON(to
== from
);
641 if (to
->engine
->schedule
) {
642 ret
= i915_priotree_add_dependency(to
->i915
,
649 if (to
->timeline
== from
->timeline
)
652 if (to
->engine
== from
->engine
) {
653 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
656 return ret
< 0 ? ret
: 0;
659 if (!from
->global_seqno
) {
660 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
663 return ret
< 0 ? ret
: 0;
666 if (from
->global_seqno
<= to
->timeline
->sync_seqno
[from
->engine
->id
])
669 trace_i915_gem_ring_sync_to(to
, from
);
670 if (!i915
.semaphores
) {
671 if (!i915_spin_request(from
, TASK_INTERRUPTIBLE
, 2)) {
672 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
679 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
684 to
->timeline
->sync_seqno
[from
->engine
->id
] = from
->global_seqno
;
689 i915_gem_request_await_dma_fence(struct drm_i915_gem_request
*req
,
690 struct dma_fence
*fence
)
692 struct dma_fence_array
*array
;
696 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
699 if (dma_fence_is_i915(fence
))
700 return i915_gem_request_await_request(req
, to_request(fence
));
702 if (!dma_fence_is_array(fence
)) {
703 ret
= i915_sw_fence_await_dma_fence(&req
->submit
,
704 fence
, I915_FENCE_TIMEOUT
,
706 return ret
< 0 ? ret
: 0;
709 /* Note that if the fence-array was created in signal-on-any mode,
710 * we should *not* decompose it into its individual fences. However,
711 * we don't currently store which mode the fence-array is operating
712 * in. Fortunately, the only user of signal-on-any is private to
713 * amdgpu and we should not see any incoming fence-array from
714 * sync-file being in signal-on-any mode.
717 array
= to_dma_fence_array(fence
);
718 for (i
= 0; i
< array
->num_fences
; i
++) {
719 struct dma_fence
*child
= array
->fences
[i
];
721 if (dma_fence_is_i915(child
))
722 ret
= i915_gem_request_await_request(req
,
725 ret
= i915_sw_fence_await_dma_fence(&req
->submit
,
726 child
, I915_FENCE_TIMEOUT
,
736 * i915_gem_request_await_object - set this request to (async) wait upon a bo
738 * @to: request we are wishing to use
739 * @obj: object which may be in use on another ring.
741 * This code is meant to abstract object synchronization with the GPU.
742 * Conceptually we serialise writes between engines inside the GPU.
743 * We only allow one engine to write into a buffer at any time, but
744 * multiple readers. To ensure each has a coherent view of memory, we must:
746 * - If there is an outstanding write request to the object, the new
747 * request must wait for it to complete (either CPU or in hw, requests
748 * on the same ring will be naturally ordered).
750 * - If we are a write request (pending_write_domain is set), the new
751 * request must wait for outstanding read requests to complete.
753 * Returns 0 if successful, else propagates up the lower layer error.
756 i915_gem_request_await_object(struct drm_i915_gem_request
*to
,
757 struct drm_i915_gem_object
*obj
,
760 struct dma_fence
*excl
;
764 struct dma_fence
**shared
;
765 unsigned int count
, i
;
767 ret
= reservation_object_get_fences_rcu(obj
->resv
,
768 &excl
, &count
, &shared
);
772 for (i
= 0; i
< count
; i
++) {
773 ret
= i915_gem_request_await_dma_fence(to
, shared
[i
]);
777 dma_fence_put(shared
[i
]);
780 for (; i
< count
; i
++)
781 dma_fence_put(shared
[i
]);
784 excl
= reservation_object_get_excl_rcu(obj
->resv
);
789 ret
= i915_gem_request_await_dma_fence(to
, excl
);
797 static void i915_gem_mark_busy(const struct intel_engine_cs
*engine
)
799 struct drm_i915_private
*dev_priv
= engine
->i915
;
801 if (dev_priv
->gt
.awake
)
804 GEM_BUG_ON(!dev_priv
->gt
.active_requests
);
806 intel_runtime_pm_get_noresume(dev_priv
);
807 dev_priv
->gt
.awake
= true;
809 intel_enable_gt_powersave(dev_priv
);
810 i915_update_gfx_val(dev_priv
);
811 if (INTEL_GEN(dev_priv
) >= 6)
812 gen6_rps_busy(dev_priv
);
814 queue_delayed_work(dev_priv
->wq
,
815 &dev_priv
->gt
.retire_work
,
816 round_jiffies_up_relative(HZ
));
820 * NB: This function is not allowed to fail. Doing so would mean the the
821 * request is not being tracked for completion but the work itself is
822 * going to happen on the hardware. This would be a Bad Thing(tm).
824 void __i915_add_request(struct drm_i915_gem_request
*request
, bool flush_caches
)
826 struct intel_engine_cs
*engine
= request
->engine
;
827 struct intel_ring
*ring
= request
->ring
;
828 struct intel_timeline
*timeline
= request
->timeline
;
829 struct drm_i915_gem_request
*prev
;
832 lockdep_assert_held(&request
->i915
->drm
.struct_mutex
);
833 trace_i915_gem_request_add(request
);
835 /* Make sure that no request gazumped us - if it was allocated after
836 * our i915_gem_request_alloc() and called __i915_add_request() before
837 * us, the timeline will hold its seqno which is later than ours.
839 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
,
840 request
->fence
.seqno
));
843 * To ensure that this call will not fail, space for its emissions
844 * should already have been reserved in the ring buffer. Let the ring
845 * know that it is time to use that space up.
847 request
->reserved_space
= 0;
850 * Emit any outstanding flushes - execbuf can fail to emit the flush
851 * after having emitted the batchbuffer command. Hence we need to fix
852 * things up similar to emitting the lazy request. The difference here
853 * is that the flush _must_ happen before the next request, no matter
857 err
= engine
->emit_flush(request
, EMIT_FLUSH
);
859 /* Not allowed to fail! */
860 WARN(err
, "engine->emit_flush() failed: %d!\n", err
);
863 /* Record the position of the start of the breadcrumb so that
864 * should we detect the updated seqno part-way through the
865 * GPU processing the request, we never over-estimate the
866 * position of the ring's HEAD.
868 err
= intel_ring_begin(request
, engine
->emit_breadcrumb_sz
);
870 request
->postfix
= ring
->tail
;
871 ring
->tail
+= engine
->emit_breadcrumb_sz
* sizeof(u32
);
873 /* Seal the request and mark it as pending execution. Note that
874 * we may inspect this state, without holding any locks, during
875 * hangcheck. Hence we apply the barrier to ensure that we do not
876 * see a more recent value in the hws than we are tracking.
879 prev
= i915_gem_active_raw(&timeline
->last_request
,
880 &request
->i915
->drm
.struct_mutex
);
882 i915_sw_fence_await_sw_fence(&request
->submit
, &prev
->submit
,
884 if (engine
->schedule
)
885 __i915_priotree_add_dependency(&request
->priotree
,
891 spin_lock_irq(&timeline
->lock
);
892 list_add_tail(&request
->link
, &timeline
->requests
);
893 spin_unlock_irq(&timeline
->lock
);
895 GEM_BUG_ON(i915_seqno_passed(timeline
->last_submitted_seqno
,
896 request
->fence
.seqno
));
898 timeline
->last_submitted_seqno
= request
->fence
.seqno
;
899 i915_gem_active_set(&timeline
->last_request
, request
);
901 list_add_tail(&request
->ring_link
, &ring
->request_list
);
902 request
->emitted_jiffies
= jiffies
;
904 i915_gem_mark_busy(engine
);
906 /* Let the backend know a new request has arrived that may need
907 * to adjust the existing execution schedule due to a high priority
908 * request - i.e. we may want to preempt the current request in order
909 * to run a high priority dependency chain *before* we can execute this
912 * This is called before the request is ready to run so that we can
913 * decide whether to preempt the entire chain so that it is ready to
914 * run at the earliest possible convenience.
916 if (engine
->schedule
)
917 engine
->schedule(request
, request
->ctx
->priority
);
920 i915_sw_fence_commit(&request
->submit
);
921 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
924 static void reset_wait_queue(wait_queue_head_t
*q
, wait_queue_t
*wait
)
928 spin_lock_irqsave(&q
->lock
, flags
);
929 if (list_empty(&wait
->task_list
))
930 __add_wait_queue(q
, wait
);
931 spin_unlock_irqrestore(&q
->lock
, flags
);
934 static unsigned long local_clock_us(unsigned int *cpu
)
938 /* Cheaply and approximately convert from nanoseconds to microseconds.
939 * The result and subsequent calculations are also defined in the same
940 * approximate microseconds units. The principal source of timing
941 * error here is from the simple truncation.
943 * Note that local_clock() is only defined wrt to the current CPU;
944 * the comparisons are no longer valid if we switch CPUs. Instead of
945 * blocking preemption for the entire busywait, we can detect the CPU
946 * switch and use that as indicator of system load and a reason to
947 * stop busywaiting, see busywait_stop().
950 t
= local_clock() >> 10;
956 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
958 unsigned int this_cpu
;
960 if (time_after(local_clock_us(&this_cpu
), timeout
))
963 return this_cpu
!= cpu
;
966 bool __i915_spin_request(const struct drm_i915_gem_request
*req
,
967 int state
, unsigned long timeout_us
)
971 /* When waiting for high frequency requests, e.g. during synchronous
972 * rendering split between the CPU and GPU, the finite amount of time
973 * required to set up the irq and wait upon it limits the response
974 * rate. By busywaiting on the request completion for a short while we
975 * can service the high frequency waits as quick as possible. However,
976 * if it is a slow request, we want to sleep as quickly as possible.
977 * The tradeoff between waiting and sleeping is roughly the time it
978 * takes to sleep on a request, on the order of a microsecond.
981 timeout_us
+= local_clock_us(&cpu
);
983 if (__i915_gem_request_completed(req
))
986 if (signal_pending_state(state
, current
))
989 if (busywait_stop(timeout_us
, cpu
))
993 } while (!need_resched());
999 __i915_request_wait_for_execute(struct drm_i915_gem_request
*request
,
1003 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1004 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1005 wait_queue_head_t
*q
= &request
->i915
->gpu_error
.wait_queue
;
1009 if (flags
& I915_WAIT_LOCKED
)
1010 add_wait_queue(q
, &reset
);
1013 prepare_to_wait(&request
->execute
.wait
, &wait
, state
);
1015 if (i915_sw_fence_done(&request
->execute
))
1018 if (flags
& I915_WAIT_LOCKED
&&
1019 i915_reset_in_progress(&request
->i915
->gpu_error
)) {
1020 __set_current_state(TASK_RUNNING
);
1021 i915_reset(request
->i915
);
1022 reset_wait_queue(q
, &reset
);
1026 if (signal_pending_state(state
, current
)) {
1027 timeout
= -ERESTARTSYS
;
1036 timeout
= io_schedule_timeout(timeout
);
1038 finish_wait(&request
->execute
.wait
, &wait
);
1040 if (flags
& I915_WAIT_LOCKED
)
1041 remove_wait_queue(q
, &reset
);
1047 * i915_wait_request - wait until execution of request has finished
1048 * @req: the request to wait upon
1049 * @flags: how to wait
1050 * @timeout: how long to wait in jiffies
1052 * i915_wait_request() waits for the request to be completed, for a
1053 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1056 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1057 * in via the flags, and vice versa if the struct_mutex is not held, the caller
1058 * must not specify that the wait is locked.
1060 * Returns the remaining time (in jiffies) if the request completed, which may
1061 * be zero or -ETIME if the request is unfinished after the timeout expires.
1062 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1063 * pending before the request completes.
1065 long i915_wait_request(struct drm_i915_gem_request
*req
,
1069 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1070 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1072 struct intel_wait wait
;
1075 #if IS_ENABLED(CONFIG_LOCKDEP)
1076 GEM_BUG_ON(debug_locks
&&
1077 !!lockdep_is_held(&req
->i915
->drm
.struct_mutex
) !=
1078 !!(flags
& I915_WAIT_LOCKED
));
1080 GEM_BUG_ON(timeout
< 0);
1082 if (i915_gem_request_completed(req
))
1088 trace_i915_gem_request_wait_begin(req
);
1090 if (!i915_sw_fence_done(&req
->execute
)) {
1091 timeout
= __i915_request_wait_for_execute(req
, flags
, timeout
);
1095 GEM_BUG_ON(!i915_sw_fence_done(&req
->execute
));
1097 GEM_BUG_ON(!i915_sw_fence_done(&req
->submit
));
1098 GEM_BUG_ON(!req
->global_seqno
);
1100 /* Optimistic short spin before touching IRQs */
1101 if (i915_spin_request(req
, state
, 5))
1104 set_current_state(state
);
1105 if (flags
& I915_WAIT_LOCKED
)
1106 add_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1108 intel_wait_init(&wait
, req
->global_seqno
);
1109 if (intel_engine_add_wait(req
->engine
, &wait
))
1110 /* In order to check that we haven't missed the interrupt
1111 * as we enabled it, we need to kick ourselves to do a
1112 * coherent check on the seqno before we sleep.
1117 if (signal_pending_state(state
, current
)) {
1118 timeout
= -ERESTARTSYS
;
1127 timeout
= io_schedule_timeout(timeout
);
1129 if (intel_wait_complete(&wait
))
1132 set_current_state(state
);
1135 /* Carefully check if the request is complete, giving time
1136 * for the seqno to be visible following the interrupt.
1137 * We also have to check in case we are kicked by the GPU
1138 * reset in order to drop the struct_mutex.
1140 if (__i915_request_irq_complete(req
))
1143 /* If the GPU is hung, and we hold the lock, reset the GPU
1144 * and then check for completion. On a full reset, the engine's
1145 * HW seqno will be advanced passed us and we are complete.
1146 * If we do a partial reset, we have to wait for the GPU to
1147 * resume and update the breadcrumb.
1149 * If we don't hold the mutex, we can just wait for the worker
1150 * to come along and update the breadcrumb (either directly
1151 * itself, or indirectly by recovering the GPU).
1153 if (flags
& I915_WAIT_LOCKED
&&
1154 i915_reset_in_progress(&req
->i915
->gpu_error
)) {
1155 __set_current_state(TASK_RUNNING
);
1156 i915_reset(req
->i915
);
1157 reset_wait_queue(&req
->i915
->gpu_error
.wait_queue
,
1162 /* Only spin if we know the GPU is processing this request */
1163 if (i915_spin_request(req
, state
, 2))
1167 intel_engine_remove_wait(req
->engine
, &wait
);
1168 if (flags
& I915_WAIT_LOCKED
)
1169 remove_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
1170 __set_current_state(TASK_RUNNING
);
1173 trace_i915_gem_request_wait_end(req
);
1178 static void engine_retire_requests(struct intel_engine_cs
*engine
)
1180 struct drm_i915_gem_request
*request
, *next
;
1182 list_for_each_entry_safe(request
, next
,
1183 &engine
->timeline
->requests
, link
) {
1184 if (!__i915_gem_request_completed(request
))
1187 i915_gem_request_retire(request
);
1191 void i915_gem_retire_requests(struct drm_i915_private
*dev_priv
)
1193 struct intel_engine_cs
*engine
;
1194 enum intel_engine_id id
;
1196 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
1198 if (!dev_priv
->gt
.active_requests
)
1201 for_each_engine(engine
, dev_priv
, id
)
1202 engine_retire_requests(engine
);