2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
29 static void intel_breadcrumbs_hangcheck(unsigned long data
)
31 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
32 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
37 if (time_before(jiffies
, b
->timeout
)) {
38 mod_timer(&b
->hangcheck
, b
->timeout
);
42 DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine
->name
);
43 set_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
44 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
46 /* Ensure that even if the GPU hangs, we get woken up.
48 * However, note that if no one is waiting, we never notice
49 * a gpu hang. Eventually, we will have to wait for a resource
50 * held by the GPU and so trigger a hangcheck. In the most
51 * pathological case, this will be upon memory starvation! To
52 * prevent this, we also queue the hangcheck from the retire
55 i915_queue_hangcheck(engine
->i915
);
58 static unsigned long wait_timeout(void)
60 return round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
);
63 static void intel_breadcrumbs_fake_irq(unsigned long data
)
65 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
68 * The timer persists in case we cannot enable interrupts,
69 * or if we have previously seen seqno/interrupt incoherency
70 * ("missed interrupt" syndrome). Here the worker will wake up
71 * every jiffie in order to kick the oldest waiter to do the
72 * coherent seqno check.
74 if (intel_engine_wakeup(engine
))
75 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
78 static void irq_enable(struct intel_engine_cs
*engine
)
80 /* Enabling the IRQ may miss the generation of the interrupt, but
81 * we still need to force the barrier before reading the seqno,
84 engine
->breadcrumbs
.irq_posted
= true;
86 spin_lock_irq(&engine
->i915
->irq_lock
);
87 engine
->irq_enable(engine
);
88 spin_unlock_irq(&engine
->i915
->irq_lock
);
91 static void irq_disable(struct intel_engine_cs
*engine
)
93 spin_lock_irq(&engine
->i915
->irq_lock
);
94 engine
->irq_disable(engine
);
95 spin_unlock_irq(&engine
->i915
->irq_lock
);
97 engine
->breadcrumbs
.irq_posted
= false;
100 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
102 struct intel_engine_cs
*engine
=
103 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
104 struct drm_i915_private
*i915
= engine
->i915
;
106 assert_spin_locked(&b
->lock
);
110 /* Since we are waiting on a request, the GPU should be busy
111 * and should have its own rpm reference. For completeness,
112 * record an rpm reference for ourselves to cover the
113 * interrupt we unmask.
115 intel_runtime_pm_get_noresume(i915
);
116 b
->rpm_wakelock
= true;
118 /* No interrupts? Kick the waiter every jiffie! */
119 if (intel_irqs_enabled(i915
)) {
120 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
122 b
->irq_enabled
= true;
125 if (!b
->irq_enabled
||
126 test_bit(engine
->id
, &i915
->gpu_error
.missed_irq_rings
)) {
127 mod_timer(&b
->fake_irq
, jiffies
+ 1);
129 /* Ensure we never sleep indefinitely */
130 GEM_BUG_ON(!time_after(b
->timeout
, jiffies
));
131 mod_timer(&b
->hangcheck
, b
->timeout
);
135 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs
*b
)
137 struct intel_engine_cs
*engine
=
138 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
140 assert_spin_locked(&b
->lock
);
141 if (!b
->rpm_wakelock
)
144 if (b
->irq_enabled
) {
146 b
->irq_enabled
= false;
149 intel_runtime_pm_put(engine
->i915
);
150 b
->rpm_wakelock
= false;
153 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
155 return container_of(node
, struct intel_wait
, node
);
158 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
159 struct intel_wait
*wait
)
161 assert_spin_locked(&b
->lock
);
163 /* This request is completed, so remove it from the tree, mark it as
164 * complete, and *then* wake up the associated task.
166 rb_erase(&wait
->node
, &b
->waiters
);
167 RB_CLEAR_NODE(&wait
->node
);
169 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
172 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
173 struct intel_wait
*wait
)
175 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
176 struct rb_node
**p
, *parent
, *completed
;
180 /* Insert the request into the retirement ordered list
181 * of waiters by walking the rbtree. If we are the oldest
182 * seqno in the tree (the first to be retired), then
183 * set ourselves as the bottom-half.
185 * As we descend the tree, prune completed branches since we hold the
186 * spinlock we know that the first_waiter must be delayed and can
187 * reduce some of the sequential wake up latency if we take action
188 * ourselves and wake up the completed tasks in parallel. Also, by
189 * removing stale elements in the tree, we may be able to reduce the
190 * ping-pong between the old bottom-half and ourselves as first-waiter.
195 seqno
= intel_engine_get_seqno(engine
);
197 /* If the request completed before we managed to grab the spinlock,
198 * return now before adding ourselves to the rbtree. We let the
199 * current bottom-half handle any pending wakeups and instead
200 * try and get out of the way quickly.
202 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
203 RB_CLEAR_NODE(&wait
->node
);
207 p
= &b
->waiters
.rb_node
;
210 if (wait
->seqno
== to_wait(parent
)->seqno
) {
211 /* We have multiple waiters on the same seqno, select
212 * the highest priority task (that with the smallest
213 * task->prio) to serve as the bottom-half for this
216 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
217 p
= &parent
->rb_right
;
220 p
= &parent
->rb_left
;
222 } else if (i915_seqno_passed(wait
->seqno
,
223 to_wait(parent
)->seqno
)) {
224 p
= &parent
->rb_right
;
225 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
230 p
= &parent
->rb_left
;
233 rb_link_node(&wait
->node
, parent
, p
);
234 rb_insert_color(&wait
->node
, &b
->waiters
);
235 GEM_BUG_ON(!first
&& !rcu_access_pointer(b
->irq_seqno_bh
));
238 struct rb_node
*next
= rb_next(completed
);
240 GEM_BUG_ON(!next
&& !first
);
241 if (next
&& next
!= &wait
->node
) {
243 b
->timeout
= wait_timeout();
244 b
->first_wait
= to_wait(next
);
245 rcu_assign_pointer(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
246 /* As there is a delay between reading the current
247 * seqno, processing the completed tasks and selecting
248 * the next waiter, we may have missed the interrupt
249 * and so need for the next bottom-half to wakeup.
251 * Also as we enable the IRQ, we may miss the
252 * interrupt for that seqno, so we have to wake up
253 * the next bottom-half in order to do a coherent check
254 * in case the seqno passed.
256 __intel_breadcrumbs_enable_irq(b
);
257 if (READ_ONCE(b
->irq_posted
))
258 wake_up_process(to_wait(next
)->tsk
);
262 struct intel_wait
*crumb
= to_wait(completed
);
263 completed
= rb_prev(completed
);
264 __intel_breadcrumbs_finish(b
, crumb
);
269 GEM_BUG_ON(rb_first(&b
->waiters
) != &wait
->node
);
270 b
->timeout
= wait_timeout();
271 b
->first_wait
= wait
;
272 rcu_assign_pointer(b
->irq_seqno_bh
, wait
->tsk
);
273 /* After assigning ourselves as the new bottom-half, we must
274 * perform a cursory check to prevent a missed interrupt.
275 * Either we miss the interrupt whilst programming the hardware,
276 * or if there was a previous waiter (for a later seqno) they
277 * may be woken instead of us (due to the inherent race
278 * in the unlocked read of b->irq_seqno_bh in the irq handler)
279 * and so we miss the wake up.
281 __intel_breadcrumbs_enable_irq(b
);
283 GEM_BUG_ON(!rcu_access_pointer(b
->irq_seqno_bh
));
284 GEM_BUG_ON(!b
->first_wait
);
285 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->first_wait
->node
);
290 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
291 struct intel_wait
*wait
)
293 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
297 first
= __intel_engine_add_wait(engine
, wait
);
298 spin_unlock(&b
->lock
);
303 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
305 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
308 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
309 struct task_struct
*tsk
)
311 if (tsk
== b
->signaler
)
317 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
318 struct intel_wait
*wait
)
320 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
322 /* Quick check to see if this waiter was already decoupled from
323 * the tree by the bottom-half to avoid contention on the spinlock
326 if (RB_EMPTY_NODE(&wait
->node
))
331 if (RB_EMPTY_NODE(&wait
->node
))
334 if (b
->first_wait
== wait
) {
335 const int priority
= wakeup_priority(b
, wait
->tsk
);
336 struct rb_node
*next
;
338 GEM_BUG_ON(rcu_access_pointer(b
->irq_seqno_bh
) != wait
->tsk
);
340 /* We are the current bottom-half. Find the next candidate,
341 * the first waiter in the queue on the remaining oldest
342 * request. As multiple seqnos may complete in the time it
343 * takes us to wake up and find the next waiter, we have to
344 * wake up that waiter for it to perform its own coherent
347 next
= rb_next(&wait
->node
);
348 if (chain_wakeup(next
, priority
)) {
349 /* If the next waiter is already complete,
350 * wake it up and continue onto the next waiter. So
351 * if have a small herd, they will wake up in parallel
352 * rather than sequentially, which should reduce
353 * the overall latency in waking all the completed
356 * However, waking up a chain adds extra latency to
357 * the first_waiter. This is undesirable if that
358 * waiter is a high priority task.
360 u32 seqno
= intel_engine_get_seqno(engine
);
362 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
363 struct rb_node
*n
= rb_next(next
);
365 __intel_breadcrumbs_finish(b
, to_wait(next
));
367 if (!chain_wakeup(next
, priority
))
373 /* In our haste, we may have completed the first waiter
374 * before we enabled the interrupt. Do so now as we
375 * have a second waiter for a future seqno. Afterwards,
376 * we have to wake up that waiter in case we missed
377 * the interrupt, or if we have to handle an
378 * exception rather than a seqno completion.
380 b
->timeout
= wait_timeout();
381 b
->first_wait
= to_wait(next
);
382 rcu_assign_pointer(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
383 if (b
->first_wait
->seqno
!= wait
->seqno
)
384 __intel_breadcrumbs_enable_irq(b
);
385 wake_up_process(b
->first_wait
->tsk
);
387 b
->first_wait
= NULL
;
388 rcu_assign_pointer(b
->irq_seqno_bh
, NULL
);
389 __intel_breadcrumbs_disable_irq(b
);
392 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
395 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
396 rb_erase(&wait
->node
, &b
->waiters
);
399 GEM_BUG_ON(b
->first_wait
== wait
);
400 GEM_BUG_ON(rb_first(&b
->waiters
) !=
401 (b
->first_wait
? &b
->first_wait
->node
: NULL
));
402 GEM_BUG_ON(!rcu_access_pointer(b
->irq_seqno_bh
) ^ RB_EMPTY_ROOT(&b
->waiters
));
403 spin_unlock(&b
->lock
);
406 static bool signal_complete(struct drm_i915_gem_request
*request
)
411 /* If another process served as the bottom-half it may have already
412 * signalled that this wait is already completed.
414 if (intel_wait_complete(&request
->signaling
.wait
))
417 /* Carefully check if the request is complete, giving time for the
418 * seqno to be visible or if the GPU hung.
420 if (__i915_request_irq_complete(request
))
426 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
428 return container_of(rb
, struct drm_i915_gem_request
, signaling
.node
);
431 static void signaler_set_rtpriority(void)
433 struct sched_param param
= { .sched_priority
= 1 };
435 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
438 static int intel_breadcrumbs_signaler(void *arg
)
440 struct intel_engine_cs
*engine
= arg
;
441 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
442 struct drm_i915_gem_request
*request
;
444 /* Install ourselves with high priority to reduce signalling latency */
445 signaler_set_rtpriority();
448 set_current_state(TASK_INTERRUPTIBLE
);
450 /* We are either woken up by the interrupt bottom-half,
451 * or by a client adding a new signaller. In both cases,
452 * the GPU seqno may have advanced beyond our oldest signal.
453 * If it has, propagate the signal, remove the waiter and
454 * check again with the next oldest signal. Otherwise we
455 * need to wait for a new interrupt from the GPU or for
458 request
= READ_ONCE(b
->first_signal
);
459 if (signal_complete(request
)) {
460 /* Wake up all other completed waiters and select the
461 * next bottom-half for the next user interrupt.
463 intel_engine_remove_wait(engine
,
464 &request
->signaling
.wait
);
467 fence_signal(&request
->fence
);
468 local_bh_enable(); /* kick start the tasklets */
470 /* Find the next oldest signal. Note that as we have
471 * not been holding the lock, another client may
472 * have installed an even older signal than the one
473 * we just completed - so double check we are still
474 * the oldest before picking the next one.
477 if (request
== b
->first_signal
) {
479 rb_next(&request
->signaling
.node
);
480 b
->first_signal
= rb
? to_signaler(rb
) : NULL
;
482 rb_erase(&request
->signaling
.node
, &b
->signals
);
483 spin_unlock(&b
->lock
);
485 i915_gem_request_put(request
);
487 if (kthread_should_stop())
493 __set_current_state(TASK_RUNNING
);
498 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
500 struct intel_engine_cs
*engine
= request
->engine
;
501 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
502 struct rb_node
*parent
, **p
;
505 /* locked by fence_enable_sw_signaling() */
506 assert_spin_locked(&request
->lock
);
508 request
->signaling
.wait
.tsk
= b
->signaler
;
509 request
->signaling
.wait
.seqno
= request
->fence
.seqno
;
510 i915_gem_request_get(request
);
514 /* First add ourselves into the list of waiters, but register our
515 * bottom-half as the signaller thread. As per usual, only the oldest
516 * waiter (not just signaller) is tasked as the bottom-half waking
517 * up all completed waiters after the user interrupt.
519 * If we are the oldest waiter, enable the irq (after which we
520 * must double check that the seqno did not complete).
522 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
524 /* Now insert ourselves into the retirement ordered list of signals
525 * on this engine. We track the oldest seqno as that will be the
526 * first signal to complete.
530 p
= &b
->signals
.rb_node
;
533 if (i915_seqno_passed(request
->fence
.seqno
,
534 to_signaler(parent
)->fence
.seqno
)) {
535 p
= &parent
->rb_right
;
538 p
= &parent
->rb_left
;
541 rb_link_node(&request
->signaling
.node
, parent
, p
);
542 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
544 smp_store_mb(b
->first_signal
, request
);
546 spin_unlock(&b
->lock
);
549 wake_up_process(b
->signaler
);
552 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
554 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
555 struct task_struct
*tsk
;
557 spin_lock_init(&b
->lock
);
558 setup_timer(&b
->fake_irq
,
559 intel_breadcrumbs_fake_irq
,
560 (unsigned long)engine
);
561 setup_timer(&b
->hangcheck
,
562 intel_breadcrumbs_hangcheck
,
563 (unsigned long)engine
);
565 /* Spawn a thread to provide a common bottom-half for all signals.
566 * As this is an asynchronous interface we cannot steal the current
567 * task for handling the bottom-half to the user interrupt, therefore
568 * we create a thread to do the coherent seqno dance after the
569 * interrupt and then signal the waitqueue (via the dma-buf/fence).
571 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
572 "i915/signal:%d", engine
->id
);
581 static void cancel_fake_irq(struct intel_engine_cs
*engine
)
583 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
585 del_timer_sync(&b
->hangcheck
);
586 del_timer_sync(&b
->fake_irq
);
587 clear_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
590 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
)
592 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
594 cancel_fake_irq(engine
);
597 __intel_breadcrumbs_disable_irq(b
);
598 if (intel_engine_has_waiter(engine
)) {
599 b
->timeout
= wait_timeout();
600 __intel_breadcrumbs_enable_irq(b
);
601 if (READ_ONCE(b
->irq_posted
))
602 wake_up_process(b
->first_wait
->tsk
);
604 /* sanitize the IMR and unmask any auxiliary interrupts */
608 spin_unlock(&b
->lock
);
611 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
613 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
615 if (!IS_ERR_OR_NULL(b
->signaler
))
616 kthread_stop(b
->signaler
);
618 cancel_fake_irq(engine
);
621 unsigned int intel_kick_waiters(struct drm_i915_private
*i915
)
623 struct intel_engine_cs
*engine
;
624 enum intel_engine_id id
;
625 unsigned int mask
= 0;
627 /* To avoid the task_struct disappearing beneath us as we wake up
628 * the process, we must first inspect the task_struct->state under the
629 * RCU lock, i.e. as we call wake_up_process() we must be holding the
632 for_each_engine(engine
, i915
, id
)
633 if (unlikely(intel_engine_wakeup(engine
)))
634 mask
|= intel_engine_flag(engine
);
639 unsigned int intel_kick_signalers(struct drm_i915_private
*i915
)
641 struct intel_engine_cs
*engine
;
642 enum intel_engine_id id
;
643 unsigned int mask
= 0;
645 for_each_engine(engine
, i915
, id
) {
646 if (unlikely(READ_ONCE(engine
->breadcrumbs
.first_signal
))) {
647 wake_up_process(engine
->breadcrumbs
.signaler
);
648 mask
|= intel_engine_flag(engine
);