]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/gpu/drm/i915/intel_breadcrumbs.c
Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / intel_breadcrumbs.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
27
28 #include "i915_drv.h"
29
30 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
31 {
32 struct intel_wait *wait;
33 unsigned int result = 0;
34
35 lockdep_assert_held(&b->irq_lock);
36
37 wait = b->irq_wait;
38 if (wait) {
39 result = ENGINE_WAKEUP_WAITER;
40 if (wake_up_process(wait->tsk))
41 result |= ENGINE_WAKEUP_ASLEEP;
42 }
43
44 return result;
45 }
46
47 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
48 {
49 struct intel_breadcrumbs *b = &engine->breadcrumbs;
50 unsigned long flags;
51 unsigned int result;
52
53 spin_lock_irqsave(&b->irq_lock, flags);
54 result = __intel_breadcrumbs_wakeup(b);
55 spin_unlock_irqrestore(&b->irq_lock, flags);
56
57 return result;
58 }
59
60 static unsigned long wait_timeout(void)
61 {
62 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
63 }
64
65 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
66 {
67 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
68 engine->name, __builtin_return_address(0),
69 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
70 &engine->irq_posted)),
71 intel_engine_get_seqno(engine),
72 intel_engine_last_submit(engine));
73
74 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
75 }
76
77 static void intel_breadcrumbs_hangcheck(struct timer_list *t)
78 {
79 struct intel_engine_cs *engine = from_timer(engine, t,
80 breadcrumbs.hangcheck);
81 struct intel_breadcrumbs *b = &engine->breadcrumbs;
82
83 if (!b->irq_armed)
84 return;
85
86 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
87 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
88 mod_timer(&b->hangcheck, wait_timeout());
89 return;
90 }
91
92 /* We keep the hangcheck timer alive until we disarm the irq, even
93 * if there are no waiters at present.
94 *
95 * If the waiter was currently running, assume it hasn't had a chance
96 * to process the pending interrupt (e.g, low priority task on a loaded
97 * system) and wait until it sleeps before declaring a missed interrupt.
98 *
99 * If the waiter was asleep (and not even pending a wakeup), then we
100 * must have missed an interrupt as the GPU has stopped advancing
101 * but we still have a waiter. Assuming all batches complete within
102 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
103 */
104 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
105 missed_breadcrumb(engine);
106 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
107 } else {
108 mod_timer(&b->hangcheck, wait_timeout());
109 }
110 }
111
112 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
113 {
114 struct intel_engine_cs *engine = from_timer(engine, t,
115 breadcrumbs.fake_irq);
116 struct intel_breadcrumbs *b = &engine->breadcrumbs;
117
118 /* The timer persists in case we cannot enable interrupts,
119 * or if we have previously seen seqno/interrupt incoherency
120 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
121 * Here the worker will wake up every jiffie in order to kick the
122 * oldest waiter to do the coherent seqno check.
123 */
124
125 spin_lock_irq(&b->irq_lock);
126 if (!__intel_breadcrumbs_wakeup(b))
127 __intel_engine_disarm_breadcrumbs(engine);
128 spin_unlock_irq(&b->irq_lock);
129 if (!b->irq_armed)
130 return;
131
132 mod_timer(&b->fake_irq, jiffies + 1);
133
134 /* Ensure that even if the GPU hangs, we get woken up.
135 *
136 * However, note that if no one is waiting, we never notice
137 * a gpu hang. Eventually, we will have to wait for a resource
138 * held by the GPU and so trigger a hangcheck. In the most
139 * pathological case, this will be upon memory starvation! To
140 * prevent this, we also queue the hangcheck from the retire
141 * worker.
142 */
143 i915_queue_hangcheck(engine->i915);
144 }
145
146 static void irq_enable(struct intel_engine_cs *engine)
147 {
148 /* Enabling the IRQ may miss the generation of the interrupt, but
149 * we still need to force the barrier before reading the seqno,
150 * just in case.
151 */
152 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
153
154 /* Caller disables interrupts */
155 spin_lock(&engine->i915->irq_lock);
156 engine->irq_enable(engine);
157 spin_unlock(&engine->i915->irq_lock);
158 }
159
160 static void irq_disable(struct intel_engine_cs *engine)
161 {
162 /* Caller disables interrupts */
163 spin_lock(&engine->i915->irq_lock);
164 engine->irq_disable(engine);
165 spin_unlock(&engine->i915->irq_lock);
166 }
167
168 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
169 {
170 struct intel_breadcrumbs *b = &engine->breadcrumbs;
171
172 lockdep_assert_held(&b->irq_lock);
173 GEM_BUG_ON(b->irq_wait);
174
175 if (b->irq_enabled) {
176 irq_disable(engine);
177 b->irq_enabled = false;
178 }
179
180 b->irq_armed = false;
181 }
182
183 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
184 {
185 struct intel_breadcrumbs *b = &engine->breadcrumbs;
186 struct intel_wait *wait, *n, *first;
187
188 if (!b->irq_armed)
189 goto wakeup_signaler;
190
191 /* We only disarm the irq when we are idle (all requests completed),
192 * so if the bottom-half remains asleep, it missed the request
193 * completion.
194 */
195
196 spin_lock_irq(&b->rb_lock);
197
198 spin_lock(&b->irq_lock);
199 first = fetch_and_zero(&b->irq_wait);
200 __intel_engine_disarm_breadcrumbs(engine);
201 spin_unlock(&b->irq_lock);
202
203 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
204 RB_CLEAR_NODE(&wait->node);
205 if (wake_up_process(wait->tsk) && wait == first)
206 missed_breadcrumb(engine);
207 }
208 b->waiters = RB_ROOT;
209
210 spin_unlock_irq(&b->rb_lock);
211
212 /*
213 * The signaling thread may be asleep holding a reference to a request,
214 * that had its signaling cancelled prior to being preempted. We need
215 * to kick the signaler, just in case, to release any such reference.
216 */
217 wakeup_signaler:
218 wake_up_process(b->signaler);
219 }
220
221 static bool use_fake_irq(const struct intel_breadcrumbs *b)
222 {
223 const struct intel_engine_cs *engine =
224 container_of(b, struct intel_engine_cs, breadcrumbs);
225
226 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
227 return false;
228
229 /* Only start with the heavy weight fake irq timer if we have not
230 * seen any interrupts since enabling it the first time. If the
231 * interrupts are still arriving, it means we made a mistake in our
232 * engine->seqno_barrier(), a timing error that should be transient
233 * and unlikely to reoccur.
234 */
235 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
236 }
237
238 static void enable_fake_irq(struct intel_breadcrumbs *b)
239 {
240 /* Ensure we never sleep indefinitely */
241 if (!b->irq_enabled || use_fake_irq(b))
242 mod_timer(&b->fake_irq, jiffies + 1);
243 else
244 mod_timer(&b->hangcheck, wait_timeout());
245 }
246
247 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
248 {
249 struct intel_engine_cs *engine =
250 container_of(b, struct intel_engine_cs, breadcrumbs);
251 struct drm_i915_private *i915 = engine->i915;
252
253 lockdep_assert_held(&b->irq_lock);
254 if (b->irq_armed)
255 return false;
256
257 /* The breadcrumb irq will be disarmed on the interrupt after the
258 * waiters are signaled. This gives us a single interrupt window in
259 * which we can add a new waiter and avoid the cost of re-enabling
260 * the irq.
261 */
262 b->irq_armed = true;
263 GEM_BUG_ON(b->irq_enabled);
264
265 if (I915_SELFTEST_ONLY(b->mock)) {
266 /* For our mock objects we want to avoid interaction
267 * with the real hardware (which is not set up). So
268 * we simply pretend we have enabled the powerwell
269 * and the irq, and leave it up to the mock
270 * implementation to call intel_engine_wakeup()
271 * itself when it wants to simulate a user interrupt,
272 */
273 return true;
274 }
275
276 /* Since we are waiting on a request, the GPU should be busy
277 * and should have its own rpm reference. This is tracked
278 * by i915->gt.awake, we can forgo holding our own wakref
279 * for the interrupt as before i915->gt.awake is released (when
280 * the driver is idle) we disarm the breadcrumbs.
281 */
282
283 /* No interrupts? Kick the waiter every jiffie! */
284 if (intel_irqs_enabled(i915)) {
285 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
286 irq_enable(engine);
287 b->irq_enabled = true;
288 }
289
290 enable_fake_irq(b);
291 return true;
292 }
293
294 static inline struct intel_wait *to_wait(struct rb_node *node)
295 {
296 return rb_entry(node, struct intel_wait, node);
297 }
298
299 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
300 struct intel_wait *wait)
301 {
302 lockdep_assert_held(&b->rb_lock);
303 GEM_BUG_ON(b->irq_wait == wait);
304
305 /* This request is completed, so remove it from the tree, mark it as
306 * complete, and *then* wake up the associated task. N.B. when the
307 * task wakes up, it will find the empty rb_node, discern that it
308 * has already been removed from the tree and skip the serialisation
309 * of the b->rb_lock and b->irq_lock. This means that the destruction
310 * of the intel_wait is not serialised with the interrupt handler
311 * by the waiter - it must instead be serialised by the caller.
312 */
313 rb_erase(&wait->node, &b->waiters);
314 RB_CLEAR_NODE(&wait->node);
315
316 wake_up_process(wait->tsk); /* implicit smp_wmb() */
317 }
318
319 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
320 struct rb_node *next)
321 {
322 struct intel_breadcrumbs *b = &engine->breadcrumbs;
323
324 spin_lock(&b->irq_lock);
325 GEM_BUG_ON(!b->irq_armed);
326 GEM_BUG_ON(!b->irq_wait);
327 b->irq_wait = to_wait(next);
328 spin_unlock(&b->irq_lock);
329
330 /* We always wake up the next waiter that takes over as the bottom-half
331 * as we may delegate not only the irq-seqno barrier to the next waiter
332 * but also the task of waking up concurrent waiters.
333 */
334 if (next)
335 wake_up_process(to_wait(next)->tsk);
336 }
337
338 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
339 struct intel_wait *wait)
340 {
341 struct intel_breadcrumbs *b = &engine->breadcrumbs;
342 struct rb_node **p, *parent, *completed;
343 bool first, armed;
344 u32 seqno;
345
346 /* Insert the request into the retirement ordered list
347 * of waiters by walking the rbtree. If we are the oldest
348 * seqno in the tree (the first to be retired), then
349 * set ourselves as the bottom-half.
350 *
351 * As we descend the tree, prune completed branches since we hold the
352 * spinlock we know that the first_waiter must be delayed and can
353 * reduce some of the sequential wake up latency if we take action
354 * ourselves and wake up the completed tasks in parallel. Also, by
355 * removing stale elements in the tree, we may be able to reduce the
356 * ping-pong between the old bottom-half and ourselves as first-waiter.
357 */
358 armed = false;
359 first = true;
360 parent = NULL;
361 completed = NULL;
362 seqno = intel_engine_get_seqno(engine);
363
364 /* If the request completed before we managed to grab the spinlock,
365 * return now before adding ourselves to the rbtree. We let the
366 * current bottom-half handle any pending wakeups and instead
367 * try and get out of the way quickly.
368 */
369 if (i915_seqno_passed(seqno, wait->seqno)) {
370 RB_CLEAR_NODE(&wait->node);
371 return first;
372 }
373
374 p = &b->waiters.rb_node;
375 while (*p) {
376 parent = *p;
377 if (wait->seqno == to_wait(parent)->seqno) {
378 /* We have multiple waiters on the same seqno, select
379 * the highest priority task (that with the smallest
380 * task->prio) to serve as the bottom-half for this
381 * group.
382 */
383 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
384 p = &parent->rb_right;
385 first = false;
386 } else {
387 p = &parent->rb_left;
388 }
389 } else if (i915_seqno_passed(wait->seqno,
390 to_wait(parent)->seqno)) {
391 p = &parent->rb_right;
392 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
393 completed = parent;
394 else
395 first = false;
396 } else {
397 p = &parent->rb_left;
398 }
399 }
400 rb_link_node(&wait->node, parent, p);
401 rb_insert_color(&wait->node, &b->waiters);
402
403 if (first) {
404 spin_lock(&b->irq_lock);
405 b->irq_wait = wait;
406 /* After assigning ourselves as the new bottom-half, we must
407 * perform a cursory check to prevent a missed interrupt.
408 * Either we miss the interrupt whilst programming the hardware,
409 * or if there was a previous waiter (for a later seqno) they
410 * may be woken instead of us (due to the inherent race
411 * in the unlocked read of b->irq_seqno_bh in the irq handler)
412 * and so we miss the wake up.
413 */
414 armed = __intel_breadcrumbs_enable_irq(b);
415 spin_unlock(&b->irq_lock);
416 }
417
418 if (completed) {
419 /* Advance the bottom-half (b->irq_wait) before we wake up
420 * the waiters who may scribble over their intel_wait
421 * just as the interrupt handler is dereferencing it via
422 * b->irq_wait.
423 */
424 if (!first) {
425 struct rb_node *next = rb_next(completed);
426 GEM_BUG_ON(next == &wait->node);
427 __intel_breadcrumbs_next(engine, next);
428 }
429
430 do {
431 struct intel_wait *crumb = to_wait(completed);
432 completed = rb_prev(completed);
433 __intel_breadcrumbs_finish(b, crumb);
434 } while (completed);
435 }
436
437 GEM_BUG_ON(!b->irq_wait);
438 GEM_BUG_ON(!b->irq_armed);
439 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
440
441 return armed;
442 }
443
444 bool intel_engine_add_wait(struct intel_engine_cs *engine,
445 struct intel_wait *wait)
446 {
447 struct intel_breadcrumbs *b = &engine->breadcrumbs;
448 bool armed;
449
450 spin_lock_irq(&b->rb_lock);
451 armed = __intel_engine_add_wait(engine, wait);
452 spin_unlock_irq(&b->rb_lock);
453 if (armed)
454 return armed;
455
456 /* Make the caller recheck if its request has already started. */
457 return i915_seqno_passed(intel_engine_get_seqno(engine),
458 wait->seqno - 1);
459 }
460
461 static inline bool chain_wakeup(struct rb_node *rb, int priority)
462 {
463 return rb && to_wait(rb)->tsk->prio <= priority;
464 }
465
466 static inline int wakeup_priority(struct intel_breadcrumbs *b,
467 struct task_struct *tsk)
468 {
469 if (tsk == b->signaler)
470 return INT_MIN;
471 else
472 return tsk->prio;
473 }
474
475 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
476 struct intel_wait *wait)
477 {
478 struct intel_breadcrumbs *b = &engine->breadcrumbs;
479
480 lockdep_assert_held(&b->rb_lock);
481
482 if (RB_EMPTY_NODE(&wait->node))
483 goto out;
484
485 if (b->irq_wait == wait) {
486 const int priority = wakeup_priority(b, wait->tsk);
487 struct rb_node *next;
488
489 /* We are the current bottom-half. Find the next candidate,
490 * the first waiter in the queue on the remaining oldest
491 * request. As multiple seqnos may complete in the time it
492 * takes us to wake up and find the next waiter, we have to
493 * wake up that waiter for it to perform its own coherent
494 * completion check.
495 */
496 next = rb_next(&wait->node);
497 if (chain_wakeup(next, priority)) {
498 /* If the next waiter is already complete,
499 * wake it up and continue onto the next waiter. So
500 * if have a small herd, they will wake up in parallel
501 * rather than sequentially, which should reduce
502 * the overall latency in waking all the completed
503 * clients.
504 *
505 * However, waking up a chain adds extra latency to
506 * the first_waiter. This is undesirable if that
507 * waiter is a high priority task.
508 */
509 u32 seqno = intel_engine_get_seqno(engine);
510
511 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
512 struct rb_node *n = rb_next(next);
513
514 __intel_breadcrumbs_finish(b, to_wait(next));
515 next = n;
516 if (!chain_wakeup(next, priority))
517 break;
518 }
519 }
520
521 __intel_breadcrumbs_next(engine, next);
522 } else {
523 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
524 }
525
526 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
527 rb_erase(&wait->node, &b->waiters);
528 RB_CLEAR_NODE(&wait->node);
529
530 out:
531 GEM_BUG_ON(b->irq_wait == wait);
532 GEM_BUG_ON(rb_first(&b->waiters) !=
533 (b->irq_wait ? &b->irq_wait->node : NULL));
534 }
535
536 void intel_engine_remove_wait(struct intel_engine_cs *engine,
537 struct intel_wait *wait)
538 {
539 struct intel_breadcrumbs *b = &engine->breadcrumbs;
540
541 /* Quick check to see if this waiter was already decoupled from
542 * the tree by the bottom-half to avoid contention on the spinlock
543 * by the herd.
544 */
545 if (RB_EMPTY_NODE(&wait->node)) {
546 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
547 return;
548 }
549
550 spin_lock_irq(&b->rb_lock);
551 __intel_engine_remove_wait(engine, wait);
552 spin_unlock_irq(&b->rb_lock);
553 }
554
555 static bool signal_valid(const struct drm_i915_gem_request *request)
556 {
557 return intel_wait_check_request(&request->signaling.wait, request);
558 }
559
560 static bool signal_complete(const struct drm_i915_gem_request *request)
561 {
562 if (!request)
563 return false;
564
565 /* If another process served as the bottom-half it may have already
566 * signalled that this wait is already completed.
567 */
568 if (intel_wait_complete(&request->signaling.wait))
569 return signal_valid(request);
570
571 /* Carefully check if the request is complete, giving time for the
572 * seqno to be visible or if the GPU hung.
573 */
574 if (__i915_request_irq_complete(request))
575 return true;
576
577 return false;
578 }
579
580 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
581 {
582 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
583 }
584
585 static void signaler_set_rtpriority(void)
586 {
587 struct sched_param param = { .sched_priority = 1 };
588
589 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
590 }
591
592 static int intel_breadcrumbs_signaler(void *arg)
593 {
594 struct intel_engine_cs *engine = arg;
595 struct intel_breadcrumbs *b = &engine->breadcrumbs;
596 struct drm_i915_gem_request *request;
597
598 /* Install ourselves with high priority to reduce signalling latency */
599 signaler_set_rtpriority();
600
601 do {
602 bool do_schedule = true;
603
604 set_current_state(TASK_INTERRUPTIBLE);
605
606 /* We are either woken up by the interrupt bottom-half,
607 * or by a client adding a new signaller. In both cases,
608 * the GPU seqno may have advanced beyond our oldest signal.
609 * If it has, propagate the signal, remove the waiter and
610 * check again with the next oldest signal. Otherwise we
611 * need to wait for a new interrupt from the GPU or for
612 * a new client.
613 */
614 rcu_read_lock();
615 request = rcu_dereference(b->first_signal);
616 if (request)
617 request = i915_gem_request_get_rcu(request);
618 rcu_read_unlock();
619 if (signal_complete(request)) {
620 local_bh_disable();
621 dma_fence_signal(&request->fence);
622 local_bh_enable(); /* kick start the tasklets */
623
624 spin_lock_irq(&b->rb_lock);
625
626 /* Wake up all other completed waiters and select the
627 * next bottom-half for the next user interrupt.
628 */
629 __intel_engine_remove_wait(engine,
630 &request->signaling.wait);
631
632 /* Find the next oldest signal. Note that as we have
633 * not been holding the lock, another client may
634 * have installed an even older signal than the one
635 * we just completed - so double check we are still
636 * the oldest before picking the next one.
637 */
638 if (request == rcu_access_pointer(b->first_signal)) {
639 struct rb_node *rb =
640 rb_next(&request->signaling.node);
641 rcu_assign_pointer(b->first_signal,
642 rb ? to_signaler(rb) : NULL);
643 }
644 rb_erase(&request->signaling.node, &b->signals);
645 RB_CLEAR_NODE(&request->signaling.node);
646
647 spin_unlock_irq(&b->rb_lock);
648
649 i915_gem_request_put(request);
650
651 /* If the engine is saturated we may be continually
652 * processing completed requests. This angers the
653 * NMI watchdog if we never let anything else
654 * have access to the CPU. Let's pretend to be nice
655 * and relinquish the CPU if we burn through the
656 * entire RT timeslice!
657 */
658 do_schedule = need_resched();
659 }
660
661 if (unlikely(do_schedule)) {
662 if (kthread_should_park())
663 kthread_parkme();
664
665 if (unlikely(kthread_should_stop())) {
666 i915_gem_request_put(request);
667 break;
668 }
669
670 schedule();
671 }
672 i915_gem_request_put(request);
673 } while (1);
674 __set_current_state(TASK_RUNNING);
675
676 return 0;
677 }
678
679 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
680 bool wakeup)
681 {
682 struct intel_engine_cs *engine = request->engine;
683 struct intel_breadcrumbs *b = &engine->breadcrumbs;
684 u32 seqno;
685
686 /* Note that we may be called from an interrupt handler on another
687 * device (e.g. nouveau signaling a fence completion causing us
688 * to submit a request, and so enable signaling). As such,
689 * we need to make sure that all other users of b->rb_lock protect
690 * against interrupts, i.e. use spin_lock_irqsave.
691 */
692
693 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
694 GEM_BUG_ON(!irqs_disabled());
695 lockdep_assert_held(&request->lock);
696
697 seqno = i915_gem_request_global_seqno(request);
698 if (!seqno)
699 return;
700
701 request->signaling.wait.tsk = b->signaler;
702 request->signaling.wait.request = request;
703 request->signaling.wait.seqno = seqno;
704 i915_gem_request_get(request);
705
706 spin_lock(&b->rb_lock);
707
708 /* First add ourselves into the list of waiters, but register our
709 * bottom-half as the signaller thread. As per usual, only the oldest
710 * waiter (not just signaller) is tasked as the bottom-half waking
711 * up all completed waiters after the user interrupt.
712 *
713 * If we are the oldest waiter, enable the irq (after which we
714 * must double check that the seqno did not complete).
715 */
716 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
717
718 if (!__i915_gem_request_completed(request, seqno)) {
719 struct rb_node *parent, **p;
720 bool first;
721
722 /* Now insert ourselves into the retirement ordered list of
723 * signals on this engine. We track the oldest seqno as that
724 * will be the first signal to complete.
725 */
726 parent = NULL;
727 first = true;
728 p = &b->signals.rb_node;
729 while (*p) {
730 parent = *p;
731 if (i915_seqno_passed(seqno,
732 to_signaler(parent)->signaling.wait.seqno)) {
733 p = &parent->rb_right;
734 first = false;
735 } else {
736 p = &parent->rb_left;
737 }
738 }
739 rb_link_node(&request->signaling.node, parent, p);
740 rb_insert_color(&request->signaling.node, &b->signals);
741 if (first)
742 rcu_assign_pointer(b->first_signal, request);
743 } else {
744 __intel_engine_remove_wait(engine, &request->signaling.wait);
745 i915_gem_request_put(request);
746 wakeup = false;
747 }
748
749 spin_unlock(&b->rb_lock);
750
751 if (wakeup)
752 wake_up_process(b->signaler);
753 }
754
755 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
756 {
757 struct intel_engine_cs *engine = request->engine;
758 struct intel_breadcrumbs *b = &engine->breadcrumbs;
759
760 GEM_BUG_ON(!irqs_disabled());
761 lockdep_assert_held(&request->lock);
762 GEM_BUG_ON(!request->signaling.wait.seqno);
763
764 spin_lock(&b->rb_lock);
765
766 if (!RB_EMPTY_NODE(&request->signaling.node)) {
767 if (request == rcu_access_pointer(b->first_signal)) {
768 struct rb_node *rb =
769 rb_next(&request->signaling.node);
770 rcu_assign_pointer(b->first_signal,
771 rb ? to_signaler(rb) : NULL);
772 }
773 rb_erase(&request->signaling.node, &b->signals);
774 RB_CLEAR_NODE(&request->signaling.node);
775 i915_gem_request_put(request);
776 }
777
778 __intel_engine_remove_wait(engine, &request->signaling.wait);
779
780 spin_unlock(&b->rb_lock);
781
782 request->signaling.wait.seqno = 0;
783 }
784
785 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
786 {
787 struct intel_breadcrumbs *b = &engine->breadcrumbs;
788 struct task_struct *tsk;
789
790 spin_lock_init(&b->rb_lock);
791 spin_lock_init(&b->irq_lock);
792
793 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
794 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
795
796 /* Spawn a thread to provide a common bottom-half for all signals.
797 * As this is an asynchronous interface we cannot steal the current
798 * task for handling the bottom-half to the user interrupt, therefore
799 * we create a thread to do the coherent seqno dance after the
800 * interrupt and then signal the waitqueue (via the dma-buf/fence).
801 */
802 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
803 "i915/signal:%d", engine->id);
804 if (IS_ERR(tsk))
805 return PTR_ERR(tsk);
806
807 b->signaler = tsk;
808
809 return 0;
810 }
811
812 static void cancel_fake_irq(struct intel_engine_cs *engine)
813 {
814 struct intel_breadcrumbs *b = &engine->breadcrumbs;
815
816 del_timer_sync(&b->hangcheck);
817 del_timer_sync(&b->fake_irq);
818 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
819 }
820
821 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
822 {
823 struct intel_breadcrumbs *b = &engine->breadcrumbs;
824
825 cancel_fake_irq(engine);
826 spin_lock_irq(&b->irq_lock);
827
828 if (b->irq_enabled)
829 irq_enable(engine);
830 else
831 irq_disable(engine);
832
833 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
834 * GPU is active and may have already executed the MI_USER_INTERRUPT
835 * before the CPU is ready to receive. However, the engine is currently
836 * idle (we haven't started it yet), there is no possibility for a
837 * missed interrupt as we enabled the irq and so we can clear the
838 * immediate wakeup (until a real interrupt arrives for the waiter).
839 */
840 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
841
842 if (b->irq_armed)
843 enable_fake_irq(b);
844
845 spin_unlock_irq(&b->irq_lock);
846 }
847
848 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
849 {
850 struct intel_breadcrumbs *b = &engine->breadcrumbs;
851
852 /* The engines should be idle and all requests accounted for! */
853 WARN_ON(READ_ONCE(b->irq_wait));
854 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
855 WARN_ON(rcu_access_pointer(b->first_signal));
856 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
857
858 if (!IS_ERR_OR_NULL(b->signaler))
859 kthread_stop(b->signaler);
860
861 cancel_fake_irq(engine);
862 }
863
864 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
865 {
866 struct intel_breadcrumbs *b = &engine->breadcrumbs;
867 bool busy = false;
868
869 spin_lock_irq(&b->rb_lock);
870
871 if (b->irq_wait) {
872 wake_up_process(b->irq_wait->tsk);
873 busy = true;
874 }
875
876 if (rcu_access_pointer(b->first_signal)) {
877 wake_up_process(b->signaler);
878 busy = true;
879 }
880
881 spin_unlock_irq(&b->rb_lock);
882
883 return busy;
884 }
885
886 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
887 #include "selftests/intel_breadcrumbs.c"
888 #endif