]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/i915_gem_request.c
drm/i915: Allocate intel_engine_cs structure only for the enabled engines
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_request.c
1 /*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/prefetch.h>
26
27 #include "i915_drv.h"
28
29 static const char *i915_fence_get_driver_name(struct fence *fence)
30 {
31 return "i915";
32 }
33
34 static const char *i915_fence_get_timeline_name(struct fence *fence)
35 {
36 /* Timelines are bound by eviction to a VM. However, since
37 * we only have a global seqno at the moment, we only have
38 * a single timeline. Note that each timeline will have
39 * multiple execution contexts (fence contexts) as we allow
40 * engines within a single timeline to execute in parallel.
41 */
42 return "global";
43 }
44
45 static bool i915_fence_signaled(struct fence *fence)
46 {
47 return i915_gem_request_completed(to_request(fence));
48 }
49
50 static bool i915_fence_enable_signaling(struct fence *fence)
51 {
52 if (i915_fence_signaled(fence))
53 return false;
54
55 intel_engine_enable_signaling(to_request(fence));
56 return true;
57 }
58
59 static signed long i915_fence_wait(struct fence *fence,
60 bool interruptible,
61 signed long timeout_jiffies)
62 {
63 s64 timeout_ns, *timeout;
64 int ret;
65
66 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
67 timeout_ns = jiffies_to_nsecs(timeout_jiffies);
68 timeout = &timeout_ns;
69 } else {
70 timeout = NULL;
71 }
72
73 ret = i915_wait_request(to_request(fence),
74 interruptible, timeout,
75 NO_WAITBOOST);
76 if (ret == -ETIME)
77 return 0;
78
79 if (ret < 0)
80 return ret;
81
82 if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
83 timeout_jiffies = nsecs_to_jiffies(timeout_ns);
84
85 return timeout_jiffies;
86 }
87
88 static void i915_fence_value_str(struct fence *fence, char *str, int size)
89 {
90 snprintf(str, size, "%u", fence->seqno);
91 }
92
93 static void i915_fence_timeline_value_str(struct fence *fence, char *str,
94 int size)
95 {
96 snprintf(str, size, "%u",
97 intel_engine_get_seqno(to_request(fence)->engine));
98 }
99
100 static void i915_fence_release(struct fence *fence)
101 {
102 struct drm_i915_gem_request *req = to_request(fence);
103
104 kmem_cache_free(req->i915->requests, req);
105 }
106
107 const struct fence_ops i915_fence_ops = {
108 .get_driver_name = i915_fence_get_driver_name,
109 .get_timeline_name = i915_fence_get_timeline_name,
110 .enable_signaling = i915_fence_enable_signaling,
111 .signaled = i915_fence_signaled,
112 .wait = i915_fence_wait,
113 .release = i915_fence_release,
114 .fence_value_str = i915_fence_value_str,
115 .timeline_value_str = i915_fence_timeline_value_str,
116 };
117
118 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
119 struct drm_file *file)
120 {
121 struct drm_i915_private *dev_private;
122 struct drm_i915_file_private *file_priv;
123
124 WARN_ON(!req || !file || req->file_priv);
125
126 if (!req || !file)
127 return -EINVAL;
128
129 if (req->file_priv)
130 return -EINVAL;
131
132 dev_private = req->i915;
133 file_priv = file->driver_priv;
134
135 spin_lock(&file_priv->mm.lock);
136 req->file_priv = file_priv;
137 list_add_tail(&req->client_list, &file_priv->mm.request_list);
138 spin_unlock(&file_priv->mm.lock);
139
140 return 0;
141 }
142
143 static inline void
144 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
145 {
146 struct drm_i915_file_private *file_priv = request->file_priv;
147
148 if (!file_priv)
149 return;
150
151 spin_lock(&file_priv->mm.lock);
152 list_del(&request->client_list);
153 request->file_priv = NULL;
154 spin_unlock(&file_priv->mm.lock);
155 }
156
157 void i915_gem_retire_noop(struct i915_gem_active *active,
158 struct drm_i915_gem_request *request)
159 {
160 /* Space left intentionally blank */
161 }
162
163 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
164 {
165 struct i915_gem_active *active, *next;
166
167 trace_i915_gem_request_retire(request);
168 list_del(&request->link);
169
170 /* We know the GPU must have read the request to have
171 * sent us the seqno + interrupt, so use the position
172 * of tail of the request to update the last known position
173 * of the GPU head.
174 *
175 * Note this requires that we are always called in request
176 * completion order.
177 */
178 list_del(&request->ring_link);
179 request->ring->last_retired_head = request->postfix;
180
181 /* Walk through the active list, calling retire on each. This allows
182 * objects to track their GPU activity and mark themselves as idle
183 * when their *last* active request is completed (updating state
184 * tracking lists for eviction, active references for GEM, etc).
185 *
186 * As the ->retire() may free the node, we decouple it first and
187 * pass along the auxiliary information (to avoid dereferencing
188 * the node after the callback).
189 */
190 list_for_each_entry_safe(active, next, &request->active_list, link) {
191 /* In microbenchmarks or focusing upon time inside the kernel,
192 * we may spend an inordinate amount of time simply handling
193 * the retirement of requests and processing their callbacks.
194 * Of which, this loop itself is particularly hot due to the
195 * cache misses when jumping around the list of i915_gem_active.
196 * So we try to keep this loop as streamlined as possible and
197 * also prefetch the next i915_gem_active to try and hide
198 * the likely cache miss.
199 */
200 prefetchw(next);
201
202 INIT_LIST_HEAD(&active->link);
203 RCU_INIT_POINTER(active->request, NULL);
204
205 active->retire(active, request);
206 }
207
208 i915_gem_request_remove_from_client(request);
209
210 if (request->previous_context) {
211 if (i915.enable_execlists)
212 intel_lr_context_unpin(request->previous_context,
213 request->engine);
214 }
215
216 i915_gem_context_put(request->ctx);
217 i915_gem_request_put(request);
218 }
219
220 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
221 {
222 struct intel_engine_cs *engine = req->engine;
223 struct drm_i915_gem_request *tmp;
224
225 lockdep_assert_held(&req->i915->drm.struct_mutex);
226 GEM_BUG_ON(list_empty(&req->link));
227
228 do {
229 tmp = list_first_entry(&engine->request_list,
230 typeof(*tmp), link);
231
232 i915_gem_request_retire(tmp);
233 } while (tmp != req);
234 }
235
236 static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
237 {
238 struct i915_gpu_error *error = &dev_priv->gpu_error;
239
240 if (i915_terminally_wedged(error))
241 return -EIO;
242
243 if (i915_reset_in_progress(error)) {
244 /* Non-interruptible callers can't handle -EAGAIN, hence return
245 * -EIO unconditionally for these.
246 */
247 if (!dev_priv->mm.interruptible)
248 return -EIO;
249
250 return -EAGAIN;
251 }
252
253 return 0;
254 }
255
256 static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
257 {
258 struct intel_engine_cs *engine;
259 enum intel_engine_id id;
260 int ret;
261
262 /* Carefully retire all requests without writing to the rings */
263 for_each_engine(engine, dev_priv, id) {
264 ret = intel_engine_idle(engine,
265 I915_WAIT_INTERRUPTIBLE |
266 I915_WAIT_LOCKED);
267 if (ret)
268 return ret;
269 }
270 i915_gem_retire_requests(dev_priv);
271
272 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
273 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
274 while (intel_kick_waiters(dev_priv) ||
275 intel_kick_signalers(dev_priv))
276 yield();
277 }
278
279 /* Finally reset hw state */
280 for_each_engine(engine, dev_priv, id)
281 intel_engine_init_seqno(engine, seqno);
282
283 return 0;
284 }
285
286 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
287 {
288 struct drm_i915_private *dev_priv = to_i915(dev);
289 int ret;
290
291 if (seqno == 0)
292 return -EINVAL;
293
294 /* HWS page needs to be set less than what we
295 * will inject to ring
296 */
297 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
298 if (ret)
299 return ret;
300
301 dev_priv->next_seqno = seqno;
302 return 0;
303 }
304
305 static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
306 {
307 /* reserve 0 for non-seqno */
308 if (unlikely(dev_priv->next_seqno == 0)) {
309 int ret;
310
311 ret = i915_gem_init_seqno(dev_priv, 0);
312 if (ret)
313 return ret;
314
315 dev_priv->next_seqno = 1;
316 }
317
318 *seqno = dev_priv->next_seqno++;
319 return 0;
320 }
321
322 static int __i915_sw_fence_call
323 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
324 {
325 struct drm_i915_gem_request *request =
326 container_of(fence, typeof(*request), submit);
327
328 /* Will be called from irq-context when using foreign DMA fences */
329
330 switch (state) {
331 case FENCE_COMPLETE:
332 request->engine->last_submitted_seqno = request->fence.seqno;
333 request->engine->submit_request(request);
334 break;
335
336 case FENCE_FREE:
337 break;
338 }
339
340 return NOTIFY_DONE;
341 }
342
343 /**
344 * i915_gem_request_alloc - allocate a request structure
345 *
346 * @engine: engine that we wish to issue the request on.
347 * @ctx: context that the request will be associated with.
348 * This can be NULL if the request is not directly related to
349 * any specific user context, in which case this function will
350 * choose an appropriate context to use.
351 *
352 * Returns a pointer to the allocated request if successful,
353 * or an error code if not.
354 */
355 struct drm_i915_gem_request *
356 i915_gem_request_alloc(struct intel_engine_cs *engine,
357 struct i915_gem_context *ctx)
358 {
359 struct drm_i915_private *dev_priv = engine->i915;
360 struct drm_i915_gem_request *req;
361 u32 seqno;
362 int ret;
363
364 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
365 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
366 * and restart.
367 */
368 ret = i915_gem_check_wedge(dev_priv);
369 if (ret)
370 return ERR_PTR(ret);
371
372 /* Move the oldest request to the slab-cache (if not in use!) */
373 req = list_first_entry_or_null(&engine->request_list,
374 typeof(*req), link);
375 if (req && i915_gem_request_completed(req))
376 i915_gem_request_retire(req);
377
378 /* Beware: Dragons be flying overhead.
379 *
380 * We use RCU to look up requests in flight. The lookups may
381 * race with the request being allocated from the slab freelist.
382 * That is the request we are writing to here, may be in the process
383 * of being read by __i915_gem_active_get_rcu(). As such,
384 * we have to be very careful when overwriting the contents. During
385 * the RCU lookup, we change chase the request->engine pointer,
386 * read the request->fence.seqno and increment the reference count.
387 *
388 * The reference count is incremented atomically. If it is zero,
389 * the lookup knows the request is unallocated and complete. Otherwise,
390 * it is either still in use, or has been reallocated and reset
391 * with fence_init(). This increment is safe for release as we check
392 * that the request we have a reference to and matches the active
393 * request.
394 *
395 * Before we increment the refcount, we chase the request->engine
396 * pointer. We must not call kmem_cache_zalloc() or else we set
397 * that pointer to NULL and cause a crash during the lookup. If
398 * we see the request is completed (based on the value of the
399 * old engine and seqno), the lookup is complete and reports NULL.
400 * If we decide the request is not completed (new engine or seqno),
401 * then we grab a reference and double check that it is still the
402 * active request - which it won't be and restart the lookup.
403 *
404 * Do not use kmem_cache_zalloc() here!
405 */
406 req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
407 if (!req)
408 return ERR_PTR(-ENOMEM);
409
410 ret = i915_gem_get_seqno(dev_priv, &seqno);
411 if (ret)
412 goto err;
413
414 spin_lock_init(&req->lock);
415 fence_init(&req->fence,
416 &i915_fence_ops,
417 &req->lock,
418 engine->fence_context,
419 seqno);
420
421 i915_sw_fence_init(&req->submit, submit_notify);
422
423 INIT_LIST_HEAD(&req->active_list);
424 req->i915 = dev_priv;
425 req->engine = engine;
426 req->ctx = i915_gem_context_get(ctx);
427
428 /* No zalloc, must clear what we need by hand */
429 req->previous_context = NULL;
430 req->file_priv = NULL;
431 req->batch = NULL;
432
433 /*
434 * Reserve space in the ring buffer for all the commands required to
435 * eventually emit this request. This is to guarantee that the
436 * i915_add_request() call can't fail. Note that the reserve may need
437 * to be redone if the request is not actually submitted straight
438 * away, e.g. because a GPU scheduler has deferred it.
439 */
440 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
441
442 if (i915.enable_execlists)
443 ret = intel_logical_ring_alloc_request_extras(req);
444 else
445 ret = intel_ring_alloc_request_extras(req);
446 if (ret)
447 goto err_ctx;
448
449 /* Record the position of the start of the request so that
450 * should we detect the updated seqno part-way through the
451 * GPU processing the request, we never over-estimate the
452 * position of the head.
453 */
454 req->head = req->ring->tail;
455
456 return req;
457
458 err_ctx:
459 i915_gem_context_put(ctx);
460 err:
461 kmem_cache_free(dev_priv->requests, req);
462 return ERR_PTR(ret);
463 }
464
465 static int
466 i915_gem_request_await_request(struct drm_i915_gem_request *to,
467 struct drm_i915_gem_request *from)
468 {
469 int idx, ret;
470
471 GEM_BUG_ON(to == from);
472
473 if (to->engine == from->engine)
474 return 0;
475
476 idx = intel_engine_sync_index(from->engine, to->engine);
477 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
478 return 0;
479
480 trace_i915_gem_ring_sync_to(to, from);
481 if (!i915.semaphores) {
482 if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
483 ret = i915_sw_fence_await_dma_fence(&to->submit,
484 &from->fence, 0,
485 GFP_KERNEL);
486 if (ret < 0)
487 return ret;
488 }
489 } else {
490 ret = to->engine->semaphore.sync_to(to, from);
491 if (ret)
492 return ret;
493 }
494
495 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
496 return 0;
497 }
498
499 /**
500 * i915_gem_request_await_object - set this request to (async) wait upon a bo
501 *
502 * @to: request we are wishing to use
503 * @obj: object which may be in use on another ring.
504 *
505 * This code is meant to abstract object synchronization with the GPU.
506 * Conceptually we serialise writes between engines inside the GPU.
507 * We only allow one engine to write into a buffer at any time, but
508 * multiple readers. To ensure each has a coherent view of memory, we must:
509 *
510 * - If there is an outstanding write request to the object, the new
511 * request must wait for it to complete (either CPU or in hw, requests
512 * on the same ring will be naturally ordered).
513 *
514 * - If we are a write request (pending_write_domain is set), the new
515 * request must wait for outstanding read requests to complete.
516 *
517 * Returns 0 if successful, else propagates up the lower layer error.
518 */
519 int
520 i915_gem_request_await_object(struct drm_i915_gem_request *to,
521 struct drm_i915_gem_object *obj,
522 bool write)
523 {
524 struct i915_gem_active *active;
525 unsigned long active_mask;
526 int idx;
527
528 if (write) {
529 active_mask = i915_gem_object_get_active(obj);
530 active = obj->last_read;
531 } else {
532 active_mask = 1;
533 active = &obj->last_write;
534 }
535
536 for_each_active(active_mask, idx) {
537 struct drm_i915_gem_request *request;
538 int ret;
539
540 request = i915_gem_active_peek(&active[idx],
541 &obj->base.dev->struct_mutex);
542 if (!request)
543 continue;
544
545 ret = i915_gem_request_await_request(to, request);
546 if (ret)
547 return ret;
548 }
549
550 return 0;
551 }
552
553 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
554 {
555 struct drm_i915_private *dev_priv = engine->i915;
556
557 dev_priv->gt.active_engines |= intel_engine_flag(engine);
558 if (dev_priv->gt.awake)
559 return;
560
561 intel_runtime_pm_get_noresume(dev_priv);
562 dev_priv->gt.awake = true;
563
564 intel_enable_gt_powersave(dev_priv);
565 i915_update_gfx_val(dev_priv);
566 if (INTEL_GEN(dev_priv) >= 6)
567 gen6_rps_busy(dev_priv);
568
569 queue_delayed_work(dev_priv->wq,
570 &dev_priv->gt.retire_work,
571 round_jiffies_up_relative(HZ));
572 }
573
574 /*
575 * NB: This function is not allowed to fail. Doing so would mean the the
576 * request is not being tracked for completion but the work itself is
577 * going to happen on the hardware. This would be a Bad Thing(tm).
578 */
579 void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
580 {
581 struct intel_engine_cs *engine = request->engine;
582 struct intel_ring *ring = request->ring;
583 struct drm_i915_gem_request *prev;
584 u32 request_start;
585 u32 reserved_tail;
586 int ret;
587
588 trace_i915_gem_request_add(request);
589
590 /*
591 * To ensure that this call will not fail, space for its emissions
592 * should already have been reserved in the ring buffer. Let the ring
593 * know that it is time to use that space up.
594 */
595 request_start = ring->tail;
596 reserved_tail = request->reserved_space;
597 request->reserved_space = 0;
598
599 /*
600 * Emit any outstanding flushes - execbuf can fail to emit the flush
601 * after having emitted the batchbuffer command. Hence we need to fix
602 * things up similar to emitting the lazy request. The difference here
603 * is that the flush _must_ happen before the next request, no matter
604 * what.
605 */
606 if (flush_caches) {
607 ret = engine->emit_flush(request, EMIT_FLUSH);
608
609 /* Not allowed to fail! */
610 WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
611 }
612
613 /* Record the position of the start of the breadcrumb so that
614 * should we detect the updated seqno part-way through the
615 * GPU processing the request, we never over-estimate the
616 * position of the ring's HEAD.
617 */
618 request->postfix = ring->tail;
619
620 /* Not allowed to fail! */
621 ret = engine->emit_request(request);
622 WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
623
624 /* Sanity check that the reserved size was large enough. */
625 ret = ring->tail - request_start;
626 if (ret < 0)
627 ret += ring->size;
628 WARN_ONCE(ret > reserved_tail,
629 "Not enough space reserved (%d bytes) "
630 "for adding the request (%d bytes)\n",
631 reserved_tail, ret);
632
633 /* Seal the request and mark it as pending execution. Note that
634 * we may inspect this state, without holding any locks, during
635 * hangcheck. Hence we apply the barrier to ensure that we do not
636 * see a more recent value in the hws than we are tracking.
637 */
638
639 prev = i915_gem_active_raw(&engine->last_request,
640 &request->i915->drm.struct_mutex);
641 if (prev)
642 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
643 &request->submitq);
644
645 request->emitted_jiffies = jiffies;
646 request->previous_seqno = engine->last_pending_seqno;
647 engine->last_pending_seqno = request->fence.seqno;
648 i915_gem_active_set(&engine->last_request, request);
649 list_add_tail(&request->link, &engine->request_list);
650 list_add_tail(&request->ring_link, &ring->request_list);
651
652 i915_gem_mark_busy(engine);
653
654 local_bh_disable();
655 i915_sw_fence_commit(&request->submit);
656 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
657 }
658
659 static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
660 {
661 unsigned long flags;
662
663 spin_lock_irqsave(&q->lock, flags);
664 if (list_empty(&wait->task_list))
665 __add_wait_queue(q, wait);
666 spin_unlock_irqrestore(&q->lock, flags);
667 }
668
669 static unsigned long local_clock_us(unsigned int *cpu)
670 {
671 unsigned long t;
672
673 /* Cheaply and approximately convert from nanoseconds to microseconds.
674 * The result and subsequent calculations are also defined in the same
675 * approximate microseconds units. The principal source of timing
676 * error here is from the simple truncation.
677 *
678 * Note that local_clock() is only defined wrt to the current CPU;
679 * the comparisons are no longer valid if we switch CPUs. Instead of
680 * blocking preemption for the entire busywait, we can detect the CPU
681 * switch and use that as indicator of system load and a reason to
682 * stop busywaiting, see busywait_stop().
683 */
684 *cpu = get_cpu();
685 t = local_clock() >> 10;
686 put_cpu();
687
688 return t;
689 }
690
691 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
692 {
693 unsigned int this_cpu;
694
695 if (time_after(local_clock_us(&this_cpu), timeout))
696 return true;
697
698 return this_cpu != cpu;
699 }
700
701 bool __i915_spin_request(const struct drm_i915_gem_request *req,
702 int state, unsigned long timeout_us)
703 {
704 unsigned int cpu;
705
706 /* When waiting for high frequency requests, e.g. during synchronous
707 * rendering split between the CPU and GPU, the finite amount of time
708 * required to set up the irq and wait upon it limits the response
709 * rate. By busywaiting on the request completion for a short while we
710 * can service the high frequency waits as quick as possible. However,
711 * if it is a slow request, we want to sleep as quickly as possible.
712 * The tradeoff between waiting and sleeping is roughly the time it
713 * takes to sleep on a request, on the order of a microsecond.
714 */
715
716 timeout_us += local_clock_us(&cpu);
717 do {
718 if (i915_gem_request_completed(req))
719 return true;
720
721 if (signal_pending_state(state, current))
722 break;
723
724 if (busywait_stop(timeout_us, cpu))
725 break;
726
727 cpu_relax_lowlatency();
728 } while (!need_resched());
729
730 return false;
731 }
732
733 /**
734 * i915_wait_request - wait until execution of request has finished
735 * @req: duh!
736 * @flags: how to wait
737 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
738 * @rps: client to charge for RPS boosting
739 *
740 * Note: It is of utmost importance that the passed in seqno and reset_counter
741 * values have been read by the caller in an smp safe manner. Where read-side
742 * locks are involved, it is sufficient to read the reset_counter before
743 * unlocking the lock that protects the seqno. For lockless tricks, the
744 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
745 * inserted.
746 *
747 * Returns 0 if the request was found within the alloted time. Else returns the
748 * errno with remaining time filled in timeout argument.
749 */
750 int i915_wait_request(struct drm_i915_gem_request *req,
751 unsigned int flags,
752 s64 *timeout,
753 struct intel_rps_client *rps)
754 {
755 const int state = flags & I915_WAIT_INTERRUPTIBLE ?
756 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
757 DEFINE_WAIT(reset);
758 struct intel_wait wait;
759 unsigned long timeout_remain;
760 int ret = 0;
761
762 might_sleep();
763 #if IS_ENABLED(CONFIG_LOCKDEP)
764 GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) !=
765 !!(flags & I915_WAIT_LOCKED));
766 #endif
767
768 if (i915_gem_request_completed(req))
769 return 0;
770
771 timeout_remain = MAX_SCHEDULE_TIMEOUT;
772 if (timeout) {
773 if (WARN_ON(*timeout < 0))
774 return -EINVAL;
775
776 if (*timeout == 0)
777 return -ETIME;
778
779 /* Record current time in case interrupted, or wedged */
780 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
781 *timeout += ktime_get_raw_ns();
782 }
783
784 trace_i915_gem_request_wait_begin(req);
785
786 /* This client is about to stall waiting for the GPU. In many cases
787 * this is undesirable and limits the throughput of the system, as
788 * many clients cannot continue processing user input/output whilst
789 * blocked. RPS autotuning may take tens of milliseconds to respond
790 * to the GPU load and thus incurs additional latency for the client.
791 * We can circumvent that by promoting the GPU frequency to maximum
792 * before we wait. This makes the GPU throttle up much more quickly
793 * (good for benchmarks and user experience, e.g. window animations),
794 * but at a cost of spending more power processing the workload
795 * (bad for battery). Not all clients even want their results
796 * immediately and for them we should just let the GPU select its own
797 * frequency to maximise efficiency. To prevent a single client from
798 * forcing the clocks too high for the whole system, we only allow
799 * each client to waitboost once in a busy period.
800 */
801 if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
802 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
803
804 /* Optimistic short spin before touching IRQs */
805 if (i915_spin_request(req, state, 5))
806 goto complete;
807
808 set_current_state(state);
809 if (flags & I915_WAIT_LOCKED)
810 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
811
812 intel_wait_init(&wait, req->fence.seqno);
813 if (intel_engine_add_wait(req->engine, &wait))
814 /* In order to check that we haven't missed the interrupt
815 * as we enabled it, we need to kick ourselves to do a
816 * coherent check on the seqno before we sleep.
817 */
818 goto wakeup;
819
820 for (;;) {
821 if (signal_pending_state(state, current)) {
822 ret = -ERESTARTSYS;
823 break;
824 }
825
826 timeout_remain = io_schedule_timeout(timeout_remain);
827 if (timeout_remain == 0) {
828 ret = -ETIME;
829 break;
830 }
831
832 if (intel_wait_complete(&wait))
833 break;
834
835 set_current_state(state);
836
837 wakeup:
838 /* Carefully check if the request is complete, giving time
839 * for the seqno to be visible following the interrupt.
840 * We also have to check in case we are kicked by the GPU
841 * reset in order to drop the struct_mutex.
842 */
843 if (__i915_request_irq_complete(req))
844 break;
845
846 /* If the GPU is hung, and we hold the lock, reset the GPU
847 * and then check for completion. On a full reset, the engine's
848 * HW seqno will be advanced passed us and we are complete.
849 * If we do a partial reset, we have to wait for the GPU to
850 * resume and update the breadcrumb.
851 *
852 * If we don't hold the mutex, we can just wait for the worker
853 * to come along and update the breadcrumb (either directly
854 * itself, or indirectly by recovering the GPU).
855 */
856 if (flags & I915_WAIT_LOCKED &&
857 i915_reset_in_progress(&req->i915->gpu_error)) {
858 __set_current_state(TASK_RUNNING);
859 i915_reset(req->i915);
860 reset_wait_queue(&req->i915->gpu_error.wait_queue,
861 &reset);
862 continue;
863 }
864
865 /* Only spin if we know the GPU is processing this request */
866 if (i915_spin_request(req, state, 2))
867 break;
868 }
869
870 intel_engine_remove_wait(req->engine, &wait);
871 if (flags & I915_WAIT_LOCKED)
872 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
873 __set_current_state(TASK_RUNNING);
874
875 complete:
876 trace_i915_gem_request_wait_end(req);
877
878 if (timeout) {
879 *timeout -= ktime_get_raw_ns();
880 if (*timeout < 0)
881 *timeout = 0;
882
883 /*
884 * Apparently ktime isn't accurate enough and occasionally has a
885 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
886 * things up to make the test happy. We allow up to 1 jiffy.
887 *
888 * This is a regrssion from the timespec->ktime conversion.
889 */
890 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
891 *timeout = 0;
892 }
893
894 if (IS_RPS_USER(rps) &&
895 req->fence.seqno == req->engine->last_submitted_seqno) {
896 /* The GPU is now idle and this client has stalled.
897 * Since no other client has submitted a request in the
898 * meantime, assume that this client is the only one
899 * supplying work to the GPU but is unable to keep that
900 * work supplied because it is waiting. Since the GPU is
901 * then never kept fully busy, RPS autoclocking will
902 * keep the clocks relatively low, causing further delays.
903 * Compensate by giving the synchronous client credit for
904 * a waitboost next time.
905 */
906 spin_lock(&req->i915->rps.client_lock);
907 list_del_init(&rps->link);
908 spin_unlock(&req->i915->rps.client_lock);
909 }
910
911 return ret;
912 }
913
914 static bool engine_retire_requests(struct intel_engine_cs *engine)
915 {
916 struct drm_i915_gem_request *request, *next;
917
918 list_for_each_entry_safe(request, next, &engine->request_list, link) {
919 if (!i915_gem_request_completed(request))
920 return false;
921
922 i915_gem_request_retire(request);
923 }
924
925 return true;
926 }
927
928 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
929 {
930 struct intel_engine_cs *engine;
931 unsigned int tmp;
932
933 lockdep_assert_held(&dev_priv->drm.struct_mutex);
934
935 if (dev_priv->gt.active_engines == 0)
936 return;
937
938 GEM_BUG_ON(!dev_priv->gt.awake);
939
940 for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp)
941 if (engine_retire_requests(engine))
942 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
943
944 if (dev_priv->gt.active_engines == 0)
945 queue_delayed_work(dev_priv->wq,
946 &dev_priv->gt.idle_work,
947 msecs_to_jiffies(100));
948 }