2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
29 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs
*b
)
31 struct intel_wait
*wait
;
32 unsigned int result
= 0;
36 result
= ENGINE_WAKEUP_WAITER
;
37 if (wake_up_process(wait
->tsk
))
38 result
|= ENGINE_WAKEUP_ASLEEP
;
44 unsigned int intel_engine_wakeup(struct intel_engine_cs
*engine
)
46 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
50 spin_lock_irqsave(&b
->lock
, flags
);
51 result
= __intel_breadcrumbs_wakeup(b
);
52 spin_unlock_irqrestore(&b
->lock
, flags
);
57 static unsigned long wait_timeout(void)
59 return round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
);
62 static noinline
void missed_breadcrumb(struct intel_engine_cs
*engine
)
64 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
65 engine
->name
, __builtin_return_address(0),
66 yesno(test_bit(ENGINE_IRQ_BREADCRUMB
,
67 &engine
->irq_posted
)));
69 set_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
72 static void intel_breadcrumbs_hangcheck(unsigned long data
)
74 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
75 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
80 if (b
->hangcheck_interrupts
!= atomic_read(&engine
->irq_count
)) {
81 b
->hangcheck_interrupts
= atomic_read(&engine
->irq_count
);
82 mod_timer(&b
->hangcheck
, wait_timeout());
86 /* We keep the hangcheck time alive until we disarm the irq, even
87 * if there are no waiters at present.
89 * If the waiter was currently running, assume it hasn't had a chance
90 * to process the pending interrupt (e.g, low priority task on a loaded
91 * system) and wait until it sleeps before declaring a missed interrupt.
93 * If the waiter was asleep (and not even pending a wakeup), then we
94 * must have missed an interrupt as the GPU has stopped advancing
95 * but we still have a waiter. Assuming all batches complete within
96 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
98 if (intel_engine_wakeup(engine
) & ENGINE_WAKEUP_ASLEEP
) {
99 missed_breadcrumb(engine
);
100 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
102 mod_timer(&b
->hangcheck
, wait_timeout());
106 static void intel_breadcrumbs_fake_irq(unsigned long data
)
108 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
109 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
113 * The timer persists in case we cannot enable interrupts,
114 * or if we have previously seen seqno/interrupt incoherency
115 * ("missed interrupt" syndrome). Here the worker will wake up
116 * every jiffie in order to kick the oldest waiter to do the
117 * coherent seqno check.
120 spin_lock_irqsave(&b
->lock
, flags
);
121 if (!__intel_breadcrumbs_wakeup(b
))
122 __intel_engine_disarm_breadcrumbs(engine
);
123 spin_unlock_irqrestore(&b
->lock
, flags
);
127 mod_timer(&b
->fake_irq
, jiffies
+ 1);
129 /* Ensure that even if the GPU hangs, we get woken up.
131 * However, note that if no one is waiting, we never notice
132 * a gpu hang. Eventually, we will have to wait for a resource
133 * held by the GPU and so trigger a hangcheck. In the most
134 * pathological case, this will be upon memory starvation! To
135 * prevent this, we also queue the hangcheck from the retire
138 i915_queue_hangcheck(engine
->i915
);
141 static void irq_enable(struct intel_engine_cs
*engine
)
143 /* Enabling the IRQ may miss the generation of the interrupt, but
144 * we still need to force the barrier before reading the seqno,
147 set_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
149 /* Caller disables interrupts */
150 spin_lock(&engine
->i915
->irq_lock
);
151 engine
->irq_enable(engine
);
152 spin_unlock(&engine
->i915
->irq_lock
);
155 static void irq_disable(struct intel_engine_cs
*engine
)
157 /* Caller disables interrupts */
158 spin_lock(&engine
->i915
->irq_lock
);
159 engine
->irq_disable(engine
);
160 spin_unlock(&engine
->i915
->irq_lock
);
163 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
)
165 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
167 assert_spin_locked(&b
->lock
);
169 if (b
->irq_enabled
) {
171 b
->irq_enabled
= false;
174 b
->irq_armed
= false;
177 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
)
179 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
185 spin_lock_irqsave(&b
->lock
, flags
);
187 /* We only disarm the irq when we are idle (all requests completed),
188 * so if there remains a sleeping waiter, it missed the request
191 if (__intel_breadcrumbs_wakeup(b
) & ENGINE_WAKEUP_ASLEEP
)
192 missed_breadcrumb(engine
);
194 __intel_engine_disarm_breadcrumbs(engine
);
196 spin_unlock_irqrestore(&b
->lock
, flags
);
199 static bool use_fake_irq(const struct intel_breadcrumbs
*b
)
201 const struct intel_engine_cs
*engine
=
202 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
204 if (!test_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
))
207 /* Only start with the heavy weight fake irq timer if we have not
208 * seen any interrupts since enabling it the first time. If the
209 * interrupts are still arriving, it means we made a mistake in our
210 * engine->seqno_barrier(), a timing error that should be transient
211 * and unlikely to reoccur.
213 return atomic_read(&engine
->irq_count
) == b
->hangcheck_interrupts
;
216 static void enable_fake_irq(struct intel_breadcrumbs
*b
)
218 /* Ensure we never sleep indefinitely */
219 if (!b
->irq_enabled
|| use_fake_irq(b
))
220 mod_timer(&b
->fake_irq
, jiffies
+ 1);
222 mod_timer(&b
->hangcheck
, wait_timeout());
225 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
227 struct intel_engine_cs
*engine
=
228 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
229 struct drm_i915_private
*i915
= engine
->i915
;
231 assert_spin_locked(&b
->lock
);
235 /* The breadcrumb irq will be disarmed on the interrupt after the
236 * waiters are signaled. This gives us a single interrupt window in
237 * which we can add a new waiter and avoid the cost of re-enabling
241 GEM_BUG_ON(b
->irq_enabled
);
243 if (I915_SELFTEST_ONLY(b
->mock
)) {
244 /* For our mock objects we want to avoid interaction
245 * with the real hardware (which is not set up). So
246 * we simply pretend we have enabled the powerwell
247 * and the irq, and leave it up to the mock
248 * implementation to call intel_engine_wakeup()
249 * itself when it wants to simulate a user interrupt,
254 /* Since we are waiting on a request, the GPU should be busy
255 * and should have its own rpm reference. This is tracked
256 * by i915->gt.awake, we can forgo holding our own wakref
257 * for the interrupt as before i915->gt.awake is released (when
258 * the driver is idle) we disarm the breadcrumbs.
261 /* No interrupts? Kick the waiter every jiffie! */
262 if (intel_irqs_enabled(i915
)) {
263 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
265 b
->irq_enabled
= true;
271 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
273 return rb_entry(node
, struct intel_wait
, node
);
276 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
277 struct intel_wait
*wait
)
279 assert_spin_locked(&b
->lock
);
281 /* This request is completed, so remove it from the tree, mark it as
282 * complete, and *then* wake up the associated task.
284 rb_erase(&wait
->node
, &b
->waiters
);
285 RB_CLEAR_NODE(&wait
->node
);
287 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
290 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
291 struct intel_wait
*wait
)
293 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
294 struct rb_node
**p
, *parent
, *completed
;
298 /* Insert the request into the retirement ordered list
299 * of waiters by walking the rbtree. If we are the oldest
300 * seqno in the tree (the first to be retired), then
301 * set ourselves as the bottom-half.
303 * As we descend the tree, prune completed branches since we hold the
304 * spinlock we know that the first_waiter must be delayed and can
305 * reduce some of the sequential wake up latency if we take action
306 * ourselves and wake up the completed tasks in parallel. Also, by
307 * removing stale elements in the tree, we may be able to reduce the
308 * ping-pong between the old bottom-half and ourselves as first-waiter.
313 seqno
= intel_engine_get_seqno(engine
);
315 /* If the request completed before we managed to grab the spinlock,
316 * return now before adding ourselves to the rbtree. We let the
317 * current bottom-half handle any pending wakeups and instead
318 * try and get out of the way quickly.
320 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
321 RB_CLEAR_NODE(&wait
->node
);
325 p
= &b
->waiters
.rb_node
;
328 if (wait
->seqno
== to_wait(parent
)->seqno
) {
329 /* We have multiple waiters on the same seqno, select
330 * the highest priority task (that with the smallest
331 * task->prio) to serve as the bottom-half for this
334 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
335 p
= &parent
->rb_right
;
338 p
= &parent
->rb_left
;
340 } else if (i915_seqno_passed(wait
->seqno
,
341 to_wait(parent
)->seqno
)) {
342 p
= &parent
->rb_right
;
343 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
348 p
= &parent
->rb_left
;
351 rb_link_node(&wait
->node
, parent
, p
);
352 rb_insert_color(&wait
->node
, &b
->waiters
);
355 struct rb_node
*next
= rb_next(completed
);
357 GEM_BUG_ON(!next
&& !first
);
358 if (next
&& next
!= &wait
->node
) {
360 b
->first_wait
= to_wait(next
);
361 /* As there is a delay between reading the current
362 * seqno, processing the completed tasks and selecting
363 * the next waiter, we may have missed the interrupt
364 * and so need for the next bottom-half to wakeup.
366 * Also as we enable the IRQ, we may miss the
367 * interrupt for that seqno, so we have to wake up
368 * the next bottom-half in order to do a coherent check
369 * in case the seqno passed.
371 __intel_breadcrumbs_enable_irq(b
);
372 if (test_bit(ENGINE_IRQ_BREADCRUMB
,
373 &engine
->irq_posted
))
374 wake_up_process(to_wait(next
)->tsk
);
378 struct intel_wait
*crumb
= to_wait(completed
);
379 completed
= rb_prev(completed
);
380 __intel_breadcrumbs_finish(b
, crumb
);
385 GEM_BUG_ON(rb_first(&b
->waiters
) != &wait
->node
);
386 b
->first_wait
= wait
;
387 /* After assigning ourselves as the new bottom-half, we must
388 * perform a cursory check to prevent a missed interrupt.
389 * Either we miss the interrupt whilst programming the hardware,
390 * or if there was a previous waiter (for a later seqno) they
391 * may be woken instead of us (due to the inherent race
392 * in the unlocked read of b->irq_seqno_bh in the irq handler)
393 * and so we miss the wake up.
395 __intel_breadcrumbs_enable_irq(b
);
397 GEM_BUG_ON(!b
->first_wait
);
398 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->first_wait
->node
);
403 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
404 struct intel_wait
*wait
)
406 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
409 spin_lock_irq(&b
->lock
);
410 first
= __intel_engine_add_wait(engine
, wait
);
411 spin_unlock_irq(&b
->lock
);
416 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
418 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
421 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
422 struct task_struct
*tsk
)
424 if (tsk
== b
->signaler
)
430 static void __intel_engine_remove_wait(struct intel_engine_cs
*engine
,
431 struct intel_wait
*wait
)
433 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
435 assert_spin_locked(&b
->lock
);
437 if (RB_EMPTY_NODE(&wait
->node
))
440 if (b
->first_wait
== wait
) {
441 const int priority
= wakeup_priority(b
, wait
->tsk
);
442 struct rb_node
*next
;
444 /* We are the current bottom-half. Find the next candidate,
445 * the first waiter in the queue on the remaining oldest
446 * request. As multiple seqnos may complete in the time it
447 * takes us to wake up and find the next waiter, we have to
448 * wake up that waiter for it to perform its own coherent
451 next
= rb_next(&wait
->node
);
452 if (chain_wakeup(next
, priority
)) {
453 /* If the next waiter is already complete,
454 * wake it up and continue onto the next waiter. So
455 * if have a small herd, they will wake up in parallel
456 * rather than sequentially, which should reduce
457 * the overall latency in waking all the completed
460 * However, waking up a chain adds extra latency to
461 * the first_waiter. This is undesirable if that
462 * waiter is a high priority task.
464 u32 seqno
= intel_engine_get_seqno(engine
);
466 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
467 struct rb_node
*n
= rb_next(next
);
469 __intel_breadcrumbs_finish(b
, to_wait(next
));
471 if (!chain_wakeup(next
, priority
))
477 /* In our haste, we may have completed the first waiter
478 * before we enabled the interrupt. Do so now as we
479 * have a second waiter for a future seqno. Afterwards,
480 * we have to wake up that waiter in case we missed
481 * the interrupt, or if we have to handle an
482 * exception rather than a seqno completion.
484 b
->first_wait
= to_wait(next
);
485 if (b
->first_wait
->seqno
!= wait
->seqno
)
486 __intel_breadcrumbs_enable_irq(b
);
487 wake_up_process(b
->first_wait
->tsk
);
489 b
->first_wait
= NULL
;
492 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
495 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
496 rb_erase(&wait
->node
, &b
->waiters
);
499 GEM_BUG_ON(b
->first_wait
== wait
);
500 GEM_BUG_ON(rb_first(&b
->waiters
) !=
501 (b
->first_wait
? &b
->first_wait
->node
: NULL
));
504 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
505 struct intel_wait
*wait
)
507 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
509 /* Quick check to see if this waiter was already decoupled from
510 * the tree by the bottom-half to avoid contention on the spinlock
513 if (RB_EMPTY_NODE(&wait
->node
))
516 spin_lock_irq(&b
->lock
);
517 __intel_engine_remove_wait(engine
, wait
);
518 spin_unlock_irq(&b
->lock
);
521 static bool signal_valid(const struct drm_i915_gem_request
*request
)
523 return intel_wait_check_request(&request
->signaling
.wait
, request
);
526 static bool signal_complete(const struct drm_i915_gem_request
*request
)
531 /* If another process served as the bottom-half it may have already
532 * signalled that this wait is already completed.
534 if (intel_wait_complete(&request
->signaling
.wait
))
535 return signal_valid(request
);
537 /* Carefully check if the request is complete, giving time for the
538 * seqno to be visible or if the GPU hung.
540 if (__i915_request_irq_complete(request
))
546 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
548 return rb_entry(rb
, struct drm_i915_gem_request
, signaling
.node
);
551 static void signaler_set_rtpriority(void)
553 struct sched_param param
= { .sched_priority
= 1 };
555 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
558 static int intel_breadcrumbs_signaler(void *arg
)
560 struct intel_engine_cs
*engine
= arg
;
561 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
562 struct drm_i915_gem_request
*request
;
564 /* Install ourselves with high priority to reduce signalling latency */
565 signaler_set_rtpriority();
568 set_current_state(TASK_INTERRUPTIBLE
);
570 /* We are either woken up by the interrupt bottom-half,
571 * or by a client adding a new signaller. In both cases,
572 * the GPU seqno may have advanced beyond our oldest signal.
573 * If it has, propagate the signal, remove the waiter and
574 * check again with the next oldest signal. Otherwise we
575 * need to wait for a new interrupt from the GPU or for
579 request
= rcu_dereference(b
->first_signal
);
581 request
= i915_gem_request_get_rcu(request
);
583 if (signal_complete(request
)) {
585 dma_fence_signal(&request
->fence
);
586 local_bh_enable(); /* kick start the tasklets */
588 spin_lock_irq(&b
->lock
);
590 /* Wake up all other completed waiters and select the
591 * next bottom-half for the next user interrupt.
593 __intel_engine_remove_wait(engine
,
594 &request
->signaling
.wait
);
596 /* Find the next oldest signal. Note that as we have
597 * not been holding the lock, another client may
598 * have installed an even older signal than the one
599 * we just completed - so double check we are still
600 * the oldest before picking the next one.
602 if (request
== rcu_access_pointer(b
->first_signal
)) {
604 rb_next(&request
->signaling
.node
);
605 rcu_assign_pointer(b
->first_signal
,
606 rb
? to_signaler(rb
) : NULL
);
608 rb_erase(&request
->signaling
.node
, &b
->signals
);
609 RB_CLEAR_NODE(&request
->signaling
.node
);
611 spin_unlock_irq(&b
->lock
);
613 i915_gem_request_put(request
);
617 if (kthread_should_stop()) {
623 add_wait_queue(&request
->execute
, &exec
);
628 remove_wait_queue(&request
->execute
, &exec
);
630 if (kthread_should_park())
633 i915_gem_request_put(request
);
635 __set_current_state(TASK_RUNNING
);
640 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
642 struct intel_engine_cs
*engine
= request
->engine
;
643 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
644 struct rb_node
*parent
, **p
;
648 /* Note that we may be called from an interrupt handler on another
649 * device (e.g. nouveau signaling a fence completion causing us
650 * to submit a request, and so enable signaling). As such,
651 * we need to make sure that all other users of b->lock protect
652 * against interrupts, i.e. use spin_lock_irqsave.
655 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
656 GEM_BUG_ON(!irqs_disabled());
657 assert_spin_locked(&request
->lock
);
659 seqno
= i915_gem_request_global_seqno(request
);
663 request
->signaling
.wait
.tsk
= b
->signaler
;
664 request
->signaling
.wait
.request
= request
;
665 request
->signaling
.wait
.seqno
= seqno
;
666 i915_gem_request_get(request
);
670 /* First add ourselves into the list of waiters, but register our
671 * bottom-half as the signaller thread. As per usual, only the oldest
672 * waiter (not just signaller) is tasked as the bottom-half waking
673 * up all completed waiters after the user interrupt.
675 * If we are the oldest waiter, enable the irq (after which we
676 * must double check that the seqno did not complete).
678 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
680 /* Now insert ourselves into the retirement ordered list of signals
681 * on this engine. We track the oldest seqno as that will be the
682 * first signal to complete.
686 p
= &b
->signals
.rb_node
;
689 if (i915_seqno_passed(seqno
,
690 to_signaler(parent
)->signaling
.wait
.seqno
)) {
691 p
= &parent
->rb_right
;
694 p
= &parent
->rb_left
;
697 rb_link_node(&request
->signaling
.node
, parent
, p
);
698 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
700 rcu_assign_pointer(b
->first_signal
, request
);
702 spin_unlock(&b
->lock
);
705 wake_up_process(b
->signaler
);
708 void intel_engine_cancel_signaling(struct drm_i915_gem_request
*request
)
710 struct intel_engine_cs
*engine
= request
->engine
;
711 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
713 GEM_BUG_ON(!irqs_disabled());
714 assert_spin_locked(&request
->lock
);
715 GEM_BUG_ON(!request
->signaling
.wait
.seqno
);
719 if (!RB_EMPTY_NODE(&request
->signaling
.node
)) {
720 if (request
== rcu_access_pointer(b
->first_signal
)) {
722 rb_next(&request
->signaling
.node
);
723 rcu_assign_pointer(b
->first_signal
,
724 rb
? to_signaler(rb
) : NULL
);
726 rb_erase(&request
->signaling
.node
, &b
->signals
);
727 RB_CLEAR_NODE(&request
->signaling
.node
);
728 i915_gem_request_put(request
);
731 __intel_engine_remove_wait(engine
, &request
->signaling
.wait
);
733 spin_unlock(&b
->lock
);
735 request
->signaling
.wait
.seqno
= 0;
738 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
740 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
741 struct task_struct
*tsk
;
743 spin_lock_init(&b
->lock
);
744 setup_timer(&b
->fake_irq
,
745 intel_breadcrumbs_fake_irq
,
746 (unsigned long)engine
);
747 setup_timer(&b
->hangcheck
,
748 intel_breadcrumbs_hangcheck
,
749 (unsigned long)engine
);
751 /* Spawn a thread to provide a common bottom-half for all signals.
752 * As this is an asynchronous interface we cannot steal the current
753 * task for handling the bottom-half to the user interrupt, therefore
754 * we create a thread to do the coherent seqno dance after the
755 * interrupt and then signal the waitqueue (via the dma-buf/fence).
757 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
758 "i915/signal:%d", engine
->id
);
767 static void cancel_fake_irq(struct intel_engine_cs
*engine
)
769 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
771 del_timer_sync(&b
->hangcheck
);
772 del_timer_sync(&b
->fake_irq
);
773 clear_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
776 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
)
778 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
780 cancel_fake_irq(engine
);
781 spin_lock_irq(&b
->lock
);
788 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
789 * GPU is active and may have already executed the MI_USER_INTERRUPT
790 * before the CPU is ready to receive. However, the engine is currently
791 * idle (we haven't started it yet), there is no possibility for a
792 * missed interrupt as we enabled the irq and so we can clear the
793 * immediate wakeup (until a real interrupt arrives for the waiter).
795 clear_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
800 spin_unlock_irq(&b
->lock
);
803 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
805 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
807 /* The engines should be idle and all requests accounted for! */
808 WARN_ON(READ_ONCE(b
->first_wait
));
809 WARN_ON(!RB_EMPTY_ROOT(&b
->waiters
));
810 WARN_ON(rcu_access_pointer(b
->first_signal
));
811 WARN_ON(!RB_EMPTY_ROOT(&b
->signals
));
813 if (!IS_ERR_OR_NULL(b
->signaler
))
814 kthread_stop(b
->signaler
);
816 cancel_fake_irq(engine
);
819 bool intel_breadcrumbs_busy(struct intel_engine_cs
*engine
)
821 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
824 spin_lock_irq(&b
->lock
);
827 wake_up_process(b
->first_wait
->tsk
);
828 busy
|= intel_engine_flag(engine
);
831 if (rcu_access_pointer(b
->first_signal
)) {
832 wake_up_process(b
->signaler
);
833 busy
|= intel_engine_flag(engine
);
836 spin_unlock_irq(&b
->lock
);
841 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
842 #include "selftests/intel_breadcrumbs.c"