2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs
*b
)
32 struct intel_wait
*wait
;
33 unsigned int result
= 0;
35 lockdep_assert_held(&b
->irq_lock
);
39 result
= ENGINE_WAKEUP_WAITER
;
40 if (wake_up_process(wait
->tsk
))
41 result
|= ENGINE_WAKEUP_ASLEEP
;
47 unsigned int intel_engine_wakeup(struct intel_engine_cs
*engine
)
49 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
53 spin_lock_irqsave(&b
->irq_lock
, flags
);
54 result
= __intel_breadcrumbs_wakeup(b
);
55 spin_unlock_irqrestore(&b
->irq_lock
, flags
);
60 static unsigned long wait_timeout(void)
62 return round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
);
65 static noinline
void missed_breadcrumb(struct intel_engine_cs
*engine
)
67 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
68 engine
->name
, __builtin_return_address(0),
69 yesno(test_bit(ENGINE_IRQ_BREADCRUMB
,
70 &engine
->irq_posted
)));
72 set_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
75 static void intel_breadcrumbs_hangcheck(unsigned long data
)
77 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
78 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
83 if (b
->hangcheck_interrupts
!= atomic_read(&engine
->irq_count
)) {
84 b
->hangcheck_interrupts
= atomic_read(&engine
->irq_count
);
85 mod_timer(&b
->hangcheck
, wait_timeout());
89 /* We keep the hangcheck time alive until we disarm the irq, even
90 * if there are no waiters at present.
92 * If the waiter was currently running, assume it hasn't had a chance
93 * to process the pending interrupt (e.g, low priority task on a loaded
94 * system) and wait until it sleeps before declaring a missed interrupt.
96 * If the waiter was asleep (and not even pending a wakeup), then we
97 * must have missed an interrupt as the GPU has stopped advancing
98 * but we still have a waiter. Assuming all batches complete within
99 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
101 if (intel_engine_wakeup(engine
) & ENGINE_WAKEUP_ASLEEP
) {
102 missed_breadcrumb(engine
);
103 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
105 mod_timer(&b
->hangcheck
, wait_timeout());
109 static void intel_breadcrumbs_fake_irq(unsigned long data
)
111 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
112 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
116 * The timer persists in case we cannot enable interrupts,
117 * or if we have previously seen seqno/interrupt incoherency
118 * ("missed interrupt" syndrome). Here the worker will wake up
119 * every jiffie in order to kick the oldest waiter to do the
120 * coherent seqno check.
123 spin_lock_irqsave(&b
->irq_lock
, flags
);
124 if (!__intel_breadcrumbs_wakeup(b
))
125 __intel_engine_disarm_breadcrumbs(engine
);
126 spin_unlock_irqrestore(&b
->irq_lock
, flags
);
130 mod_timer(&b
->fake_irq
, jiffies
+ 1);
132 /* Ensure that even if the GPU hangs, we get woken up.
134 * However, note that if no one is waiting, we never notice
135 * a gpu hang. Eventually, we will have to wait for a resource
136 * held by the GPU and so trigger a hangcheck. In the most
137 * pathological case, this will be upon memory starvation! To
138 * prevent this, we also queue the hangcheck from the retire
141 i915_queue_hangcheck(engine
->i915
);
144 static void irq_enable(struct intel_engine_cs
*engine
)
146 /* Enabling the IRQ may miss the generation of the interrupt, but
147 * we still need to force the barrier before reading the seqno,
150 set_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
152 /* Caller disables interrupts */
153 spin_lock(&engine
->i915
->irq_lock
);
154 engine
->irq_enable(engine
);
155 spin_unlock(&engine
->i915
->irq_lock
);
158 static void irq_disable(struct intel_engine_cs
*engine
)
160 /* Caller disables interrupts */
161 spin_lock(&engine
->i915
->irq_lock
);
162 engine
->irq_disable(engine
);
163 spin_unlock(&engine
->i915
->irq_lock
);
166 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
)
168 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
170 lockdep_assert_held(&b
->irq_lock
);
172 if (b
->irq_enabled
) {
174 b
->irq_enabled
= false;
177 b
->irq_armed
= false;
180 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
)
182 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
188 spin_lock_irqsave(&b
->irq_lock
, flags
);
190 /* We only disarm the irq when we are idle (all requests completed),
191 * so if there remains a sleeping waiter, it missed the request
194 if (__intel_breadcrumbs_wakeup(b
) & ENGINE_WAKEUP_ASLEEP
)
195 missed_breadcrumb(engine
);
197 __intel_engine_disarm_breadcrumbs(engine
);
199 spin_unlock_irqrestore(&b
->irq_lock
, flags
);
202 static bool use_fake_irq(const struct intel_breadcrumbs
*b
)
204 const struct intel_engine_cs
*engine
=
205 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
207 if (!test_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
))
210 /* Only start with the heavy weight fake irq timer if we have not
211 * seen any interrupts since enabling it the first time. If the
212 * interrupts are still arriving, it means we made a mistake in our
213 * engine->seqno_barrier(), a timing error that should be transient
214 * and unlikely to reoccur.
216 return atomic_read(&engine
->irq_count
) == b
->hangcheck_interrupts
;
219 static void enable_fake_irq(struct intel_breadcrumbs
*b
)
221 /* Ensure we never sleep indefinitely */
222 if (!b
->irq_enabled
|| use_fake_irq(b
))
223 mod_timer(&b
->fake_irq
, jiffies
+ 1);
225 mod_timer(&b
->hangcheck
, wait_timeout());
228 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
230 struct intel_engine_cs
*engine
=
231 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
232 struct drm_i915_private
*i915
= engine
->i915
;
234 lockdep_assert_held(&b
->irq_lock
);
238 /* The breadcrumb irq will be disarmed on the interrupt after the
239 * waiters are signaled. This gives us a single interrupt window in
240 * which we can add a new waiter and avoid the cost of re-enabling
244 GEM_BUG_ON(b
->irq_enabled
);
246 if (I915_SELFTEST_ONLY(b
->mock
)) {
247 /* For our mock objects we want to avoid interaction
248 * with the real hardware (which is not set up). So
249 * we simply pretend we have enabled the powerwell
250 * and the irq, and leave it up to the mock
251 * implementation to call intel_engine_wakeup()
252 * itself when it wants to simulate a user interrupt,
257 /* Since we are waiting on a request, the GPU should be busy
258 * and should have its own rpm reference. This is tracked
259 * by i915->gt.awake, we can forgo holding our own wakref
260 * for the interrupt as before i915->gt.awake is released (when
261 * the driver is idle) we disarm the breadcrumbs.
264 /* No interrupts? Kick the waiter every jiffie! */
265 if (intel_irqs_enabled(i915
)) {
266 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
268 b
->irq_enabled
= true;
274 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
276 return rb_entry(node
, struct intel_wait
, node
);
279 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
280 struct intel_wait
*wait
)
282 lockdep_assert_held(&b
->rb_lock
);
284 /* This request is completed, so remove it from the tree, mark it as
285 * complete, and *then* wake up the associated task.
287 rb_erase(&wait
->node
, &b
->waiters
);
288 RB_CLEAR_NODE(&wait
->node
);
290 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
293 static inline void __intel_breadcrumbs_next(struct intel_engine_cs
*engine
,
294 struct rb_node
*next
)
296 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
298 spin_lock(&b
->irq_lock
);
299 GEM_BUG_ON(!b
->irq_armed
);
300 b
->irq_wait
= to_wait(next
);
301 spin_unlock(&b
->irq_lock
);
303 /* We always wake up the next waiter that takes over as the bottom-half
304 * as we may delegate not only the irq-seqno barrier to the next waiter
305 * but also the task of waking up concurrent waiters.
308 wake_up_process(to_wait(next
)->tsk
);
311 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
312 struct intel_wait
*wait
)
314 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
315 struct rb_node
**p
, *parent
, *completed
;
319 /* Insert the request into the retirement ordered list
320 * of waiters by walking the rbtree. If we are the oldest
321 * seqno in the tree (the first to be retired), then
322 * set ourselves as the bottom-half.
324 * As we descend the tree, prune completed branches since we hold the
325 * spinlock we know that the first_waiter must be delayed and can
326 * reduce some of the sequential wake up latency if we take action
327 * ourselves and wake up the completed tasks in parallel. Also, by
328 * removing stale elements in the tree, we may be able to reduce the
329 * ping-pong between the old bottom-half and ourselves as first-waiter.
334 seqno
= intel_engine_get_seqno(engine
);
336 /* If the request completed before we managed to grab the spinlock,
337 * return now before adding ourselves to the rbtree. We let the
338 * current bottom-half handle any pending wakeups and instead
339 * try and get out of the way quickly.
341 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
342 RB_CLEAR_NODE(&wait
->node
);
346 p
= &b
->waiters
.rb_node
;
349 if (wait
->seqno
== to_wait(parent
)->seqno
) {
350 /* We have multiple waiters on the same seqno, select
351 * the highest priority task (that with the smallest
352 * task->prio) to serve as the bottom-half for this
355 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
356 p
= &parent
->rb_right
;
359 p
= &parent
->rb_left
;
361 } else if (i915_seqno_passed(wait
->seqno
,
362 to_wait(parent
)->seqno
)) {
363 p
= &parent
->rb_right
;
364 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
369 p
= &parent
->rb_left
;
372 rb_link_node(&wait
->node
, parent
, p
);
373 rb_insert_color(&wait
->node
, &b
->waiters
);
376 struct rb_node
*next
= rb_next(completed
);
378 GEM_BUG_ON(!next
&& !first
);
379 if (next
&& next
!= &wait
->node
) {
381 __intel_breadcrumbs_next(engine
, next
);
385 struct intel_wait
*crumb
= to_wait(completed
);
386 completed
= rb_prev(completed
);
387 __intel_breadcrumbs_finish(b
, crumb
);
392 spin_lock(&b
->irq_lock
);
393 GEM_BUG_ON(rb_first(&b
->waiters
) != &wait
->node
);
395 /* After assigning ourselves as the new bottom-half, we must
396 * perform a cursory check to prevent a missed interrupt.
397 * Either we miss the interrupt whilst programming the hardware,
398 * or if there was a previous waiter (for a later seqno) they
399 * may be woken instead of us (due to the inherent race
400 * in the unlocked read of b->irq_seqno_bh in the irq handler)
401 * and so we miss the wake up.
403 __intel_breadcrumbs_enable_irq(b
);
404 spin_unlock(&b
->irq_lock
);
406 GEM_BUG_ON(!b
->irq_wait
);
407 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->irq_wait
->node
);
412 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
413 struct intel_wait
*wait
)
415 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
418 spin_lock_irq(&b
->rb_lock
);
419 first
= __intel_engine_add_wait(engine
, wait
);
420 spin_unlock_irq(&b
->rb_lock
);
425 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
427 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
430 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
431 struct task_struct
*tsk
)
433 if (tsk
== b
->signaler
)
439 static void __intel_engine_remove_wait(struct intel_engine_cs
*engine
,
440 struct intel_wait
*wait
)
442 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
444 lockdep_assert_held(&b
->rb_lock
);
446 if (RB_EMPTY_NODE(&wait
->node
))
449 if (b
->irq_wait
== wait
) {
450 const int priority
= wakeup_priority(b
, wait
->tsk
);
451 struct rb_node
*next
;
453 /* We are the current bottom-half. Find the next candidate,
454 * the first waiter in the queue on the remaining oldest
455 * request. As multiple seqnos may complete in the time it
456 * takes us to wake up and find the next waiter, we have to
457 * wake up that waiter for it to perform its own coherent
460 next
= rb_next(&wait
->node
);
461 if (chain_wakeup(next
, priority
)) {
462 /* If the next waiter is already complete,
463 * wake it up and continue onto the next waiter. So
464 * if have a small herd, they will wake up in parallel
465 * rather than sequentially, which should reduce
466 * the overall latency in waking all the completed
469 * However, waking up a chain adds extra latency to
470 * the first_waiter. This is undesirable if that
471 * waiter is a high priority task.
473 u32 seqno
= intel_engine_get_seqno(engine
);
475 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
476 struct rb_node
*n
= rb_next(next
);
478 __intel_breadcrumbs_finish(b
, to_wait(next
));
480 if (!chain_wakeup(next
, priority
))
485 __intel_breadcrumbs_next(engine
, next
);
487 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
490 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
491 rb_erase(&wait
->node
, &b
->waiters
);
494 GEM_BUG_ON(b
->irq_wait
== wait
);
495 GEM_BUG_ON(rb_first(&b
->waiters
) !=
496 (b
->irq_wait
? &b
->irq_wait
->node
: NULL
));
499 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
500 struct intel_wait
*wait
)
502 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
504 /* Quick check to see if this waiter was already decoupled from
505 * the tree by the bottom-half to avoid contention on the spinlock
508 if (RB_EMPTY_NODE(&wait
->node
))
511 spin_lock_irq(&b
->rb_lock
);
512 __intel_engine_remove_wait(engine
, wait
);
513 spin_unlock_irq(&b
->rb_lock
);
516 static bool signal_valid(const struct drm_i915_gem_request
*request
)
518 return intel_wait_check_request(&request
->signaling
.wait
, request
);
521 static bool signal_complete(const struct drm_i915_gem_request
*request
)
526 /* If another process served as the bottom-half it may have already
527 * signalled that this wait is already completed.
529 if (intel_wait_complete(&request
->signaling
.wait
))
530 return signal_valid(request
);
532 /* Carefully check if the request is complete, giving time for the
533 * seqno to be visible or if the GPU hung.
535 if (__i915_request_irq_complete(request
))
541 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
543 return rb_entry(rb
, struct drm_i915_gem_request
, signaling
.node
);
546 static void signaler_set_rtpriority(void)
548 struct sched_param param
= { .sched_priority
= 1 };
550 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
553 static int intel_breadcrumbs_signaler(void *arg
)
555 struct intel_engine_cs
*engine
= arg
;
556 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
557 struct drm_i915_gem_request
*request
;
559 /* Install ourselves with high priority to reduce signalling latency */
560 signaler_set_rtpriority();
563 set_current_state(TASK_INTERRUPTIBLE
);
565 /* We are either woken up by the interrupt bottom-half,
566 * or by a client adding a new signaller. In both cases,
567 * the GPU seqno may have advanced beyond our oldest signal.
568 * If it has, propagate the signal, remove the waiter and
569 * check again with the next oldest signal. Otherwise we
570 * need to wait for a new interrupt from the GPU or for
574 request
= rcu_dereference(b
->first_signal
);
576 request
= i915_gem_request_get_rcu(request
);
578 if (signal_complete(request
)) {
580 dma_fence_signal(&request
->fence
);
581 local_bh_enable(); /* kick start the tasklets */
583 spin_lock_irq(&b
->rb_lock
);
585 /* Wake up all other completed waiters and select the
586 * next bottom-half for the next user interrupt.
588 __intel_engine_remove_wait(engine
,
589 &request
->signaling
.wait
);
591 /* Find the next oldest signal. Note that as we have
592 * not been holding the lock, another client may
593 * have installed an even older signal than the one
594 * we just completed - so double check we are still
595 * the oldest before picking the next one.
597 if (request
== rcu_access_pointer(b
->first_signal
)) {
599 rb_next(&request
->signaling
.node
);
600 rcu_assign_pointer(b
->first_signal
,
601 rb
? to_signaler(rb
) : NULL
);
603 rb_erase(&request
->signaling
.node
, &b
->signals
);
604 RB_CLEAR_NODE(&request
->signaling
.node
);
606 spin_unlock_irq(&b
->rb_lock
);
608 i915_gem_request_put(request
);
612 if (kthread_should_stop()) {
618 add_wait_queue(&request
->execute
, &exec
);
623 remove_wait_queue(&request
->execute
, &exec
);
625 if (kthread_should_park())
628 i915_gem_request_put(request
);
630 __set_current_state(TASK_RUNNING
);
635 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
637 struct intel_engine_cs
*engine
= request
->engine
;
638 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
639 struct rb_node
*parent
, **p
;
643 /* Note that we may be called from an interrupt handler on another
644 * device (e.g. nouveau signaling a fence completion causing us
645 * to submit a request, and so enable signaling). As such,
646 * we need to make sure that all other users of b->lock protect
647 * against interrupts, i.e. use spin_lock_irqsave.
650 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
651 GEM_BUG_ON(!irqs_disabled());
652 lockdep_assert_held(&request
->lock
);
654 seqno
= i915_gem_request_global_seqno(request
);
658 request
->signaling
.wait
.tsk
= b
->signaler
;
659 request
->signaling
.wait
.request
= request
;
660 request
->signaling
.wait
.seqno
= seqno
;
661 i915_gem_request_get(request
);
663 spin_lock(&b
->rb_lock
);
665 /* First add ourselves into the list of waiters, but register our
666 * bottom-half as the signaller thread. As per usual, only the oldest
667 * waiter (not just signaller) is tasked as the bottom-half waking
668 * up all completed waiters after the user interrupt.
670 * If we are the oldest waiter, enable the irq (after which we
671 * must double check that the seqno did not complete).
673 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
675 /* Now insert ourselves into the retirement ordered list of signals
676 * on this engine. We track the oldest seqno as that will be the
677 * first signal to complete.
681 p
= &b
->signals
.rb_node
;
684 if (i915_seqno_passed(seqno
,
685 to_signaler(parent
)->signaling
.wait
.seqno
)) {
686 p
= &parent
->rb_right
;
689 p
= &parent
->rb_left
;
692 rb_link_node(&request
->signaling
.node
, parent
, p
);
693 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
695 rcu_assign_pointer(b
->first_signal
, request
);
697 spin_unlock(&b
->rb_lock
);
700 wake_up_process(b
->signaler
);
703 void intel_engine_cancel_signaling(struct drm_i915_gem_request
*request
)
705 struct intel_engine_cs
*engine
= request
->engine
;
706 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
708 GEM_BUG_ON(!irqs_disabled());
709 lockdep_assert_held(&request
->lock
);
710 GEM_BUG_ON(!request
->signaling
.wait
.seqno
);
712 spin_lock(&b
->rb_lock
);
714 if (!RB_EMPTY_NODE(&request
->signaling
.node
)) {
715 if (request
== rcu_access_pointer(b
->first_signal
)) {
717 rb_next(&request
->signaling
.node
);
718 rcu_assign_pointer(b
->first_signal
,
719 rb
? to_signaler(rb
) : NULL
);
721 rb_erase(&request
->signaling
.node
, &b
->signals
);
722 RB_CLEAR_NODE(&request
->signaling
.node
);
723 i915_gem_request_put(request
);
726 __intel_engine_remove_wait(engine
, &request
->signaling
.wait
);
728 spin_unlock(&b
->rb_lock
);
730 request
->signaling
.wait
.seqno
= 0;
733 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
735 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
736 struct task_struct
*tsk
;
738 spin_lock_init(&b
->rb_lock
);
739 spin_lock_init(&b
->irq_lock
);
741 setup_timer(&b
->fake_irq
,
742 intel_breadcrumbs_fake_irq
,
743 (unsigned long)engine
);
744 setup_timer(&b
->hangcheck
,
745 intel_breadcrumbs_hangcheck
,
746 (unsigned long)engine
);
748 /* Spawn a thread to provide a common bottom-half for all signals.
749 * As this is an asynchronous interface we cannot steal the current
750 * task for handling the bottom-half to the user interrupt, therefore
751 * we create a thread to do the coherent seqno dance after the
752 * interrupt and then signal the waitqueue (via the dma-buf/fence).
754 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
755 "i915/signal:%d", engine
->id
);
764 static void cancel_fake_irq(struct intel_engine_cs
*engine
)
766 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
768 del_timer_sync(&b
->hangcheck
);
769 del_timer_sync(&b
->fake_irq
);
770 clear_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
773 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
)
775 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
777 cancel_fake_irq(engine
);
778 spin_lock_irq(&b
->irq_lock
);
785 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
786 * GPU is active and may have already executed the MI_USER_INTERRUPT
787 * before the CPU is ready to receive. However, the engine is currently
788 * idle (we haven't started it yet), there is no possibility for a
789 * missed interrupt as we enabled the irq and so we can clear the
790 * immediate wakeup (until a real interrupt arrives for the waiter).
792 clear_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
797 spin_unlock_irq(&b
->irq_lock
);
800 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
802 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
804 /* The engines should be idle and all requests accounted for! */
805 WARN_ON(READ_ONCE(b
->irq_wait
));
806 WARN_ON(!RB_EMPTY_ROOT(&b
->waiters
));
807 WARN_ON(rcu_access_pointer(b
->first_signal
));
808 WARN_ON(!RB_EMPTY_ROOT(&b
->signals
));
810 if (!IS_ERR_OR_NULL(b
->signaler
))
811 kthread_stop(b
->signaler
);
813 cancel_fake_irq(engine
);
816 bool intel_breadcrumbs_busy(struct intel_engine_cs
*engine
)
818 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
821 spin_lock_irq(&b
->rb_lock
);
824 wake_up_process(b
->irq_wait
->tsk
);
825 busy
|= intel_engine_flag(engine
);
828 if (rcu_access_pointer(b
->first_signal
)) {
829 wake_up_process(b
->signaler
);
830 busy
|= intel_engine_flag(engine
);
833 spin_unlock_irq(&b
->rb_lock
);
838 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
839 #include "selftests/intel_breadcrumbs.c"