]>
Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
fa545cbf | 25 | #include <linux/prefetch.h> |
b52992c0 | 26 | #include <linux/dma-fence-array.h> |
fa545cbf | 27 | |
05235c53 CW |
28 | #include "i915_drv.h" |
29 | ||
f54d1867 | 30 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
04769652 CW |
31 | { |
32 | return "i915"; | |
33 | } | |
34 | ||
f54d1867 | 35 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
04769652 | 36 | { |
73cb9701 | 37 | return to_request(fence)->timeline->common->name; |
04769652 CW |
38 | } |
39 | ||
f54d1867 | 40 | static bool i915_fence_signaled(struct dma_fence *fence) |
04769652 CW |
41 | { |
42 | return i915_gem_request_completed(to_request(fence)); | |
43 | } | |
44 | ||
f54d1867 | 45 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
04769652 CW |
46 | { |
47 | if (i915_fence_signaled(fence)) | |
48 | return false; | |
49 | ||
50 | intel_engine_enable_signaling(to_request(fence)); | |
51 | return true; | |
52 | } | |
53 | ||
f54d1867 | 54 | static signed long i915_fence_wait(struct dma_fence *fence, |
04769652 | 55 | bool interruptible, |
e95433c7 | 56 | signed long timeout) |
04769652 | 57 | { |
e95433c7 | 58 | return i915_wait_request(to_request(fence), interruptible, timeout); |
04769652 CW |
59 | } |
60 | ||
f54d1867 | 61 | static void i915_fence_release(struct dma_fence *fence) |
04769652 CW |
62 | { |
63 | struct drm_i915_gem_request *req = to_request(fence); | |
64 | ||
65 | kmem_cache_free(req->i915->requests, req); | |
66 | } | |
67 | ||
f54d1867 | 68 | const struct dma_fence_ops i915_fence_ops = { |
04769652 CW |
69 | .get_driver_name = i915_fence_get_driver_name, |
70 | .get_timeline_name = i915_fence_get_timeline_name, | |
71 | .enable_signaling = i915_fence_enable_signaling, | |
72 | .signaled = i915_fence_signaled, | |
73 | .wait = i915_fence_wait, | |
74 | .release = i915_fence_release, | |
04769652 CW |
75 | }; |
76 | ||
05235c53 CW |
77 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
78 | struct drm_file *file) | |
79 | { | |
80 | struct drm_i915_private *dev_private; | |
81 | struct drm_i915_file_private *file_priv; | |
82 | ||
83 | WARN_ON(!req || !file || req->file_priv); | |
84 | ||
85 | if (!req || !file) | |
86 | return -EINVAL; | |
87 | ||
88 | if (req->file_priv) | |
89 | return -EINVAL; | |
90 | ||
91 | dev_private = req->i915; | |
92 | file_priv = file->driver_priv; | |
93 | ||
94 | spin_lock(&file_priv->mm.lock); | |
95 | req->file_priv = file_priv; | |
96 | list_add_tail(&req->client_list, &file_priv->mm.request_list); | |
97 | spin_unlock(&file_priv->mm.lock); | |
98 | ||
05235c53 CW |
99 | return 0; |
100 | } | |
101 | ||
102 | static inline void | |
103 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |
104 | { | |
105 | struct drm_i915_file_private *file_priv = request->file_priv; | |
106 | ||
107 | if (!file_priv) | |
108 | return; | |
109 | ||
110 | spin_lock(&file_priv->mm.lock); | |
111 | list_del(&request->client_list); | |
112 | request->file_priv = NULL; | |
113 | spin_unlock(&file_priv->mm.lock); | |
05235c53 CW |
114 | } |
115 | ||
fa545cbf CW |
116 | void i915_gem_retire_noop(struct i915_gem_active *active, |
117 | struct drm_i915_gem_request *request) | |
118 | { | |
119 | /* Space left intentionally blank */ | |
120 | } | |
121 | ||
05235c53 CW |
122 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
123 | { | |
fa545cbf CW |
124 | struct i915_gem_active *active, *next; |
125 | ||
4c7d62c6 CW |
126 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
127 | GEM_BUG_ON(!i915_gem_request_completed(request)); | |
128 | ||
05235c53 | 129 | trace_i915_gem_request_retire(request); |
80b204bc CW |
130 | |
131 | spin_lock_irq(&request->engine->timeline->lock); | |
e95433c7 | 132 | list_del_init(&request->link); |
80b204bc | 133 | spin_unlock_irq(&request->engine->timeline->lock); |
05235c53 CW |
134 | |
135 | /* We know the GPU must have read the request to have | |
136 | * sent us the seqno + interrupt, so use the position | |
137 | * of tail of the request to update the last known position | |
138 | * of the GPU head. | |
139 | * | |
140 | * Note this requires that we are always called in request | |
141 | * completion order. | |
142 | */ | |
675d9ad7 | 143 | list_del(&request->ring_link); |
1dae2dfb | 144 | request->ring->last_retired_head = request->postfix; |
28176ef4 | 145 | request->i915->gt.active_requests--; |
05235c53 | 146 | |
fa545cbf CW |
147 | /* Walk through the active list, calling retire on each. This allows |
148 | * objects to track their GPU activity and mark themselves as idle | |
149 | * when their *last* active request is completed (updating state | |
150 | * tracking lists for eviction, active references for GEM, etc). | |
151 | * | |
152 | * As the ->retire() may free the node, we decouple it first and | |
153 | * pass along the auxiliary information (to avoid dereferencing | |
154 | * the node after the callback). | |
155 | */ | |
156 | list_for_each_entry_safe(active, next, &request->active_list, link) { | |
157 | /* In microbenchmarks or focusing upon time inside the kernel, | |
158 | * we may spend an inordinate amount of time simply handling | |
159 | * the retirement of requests and processing their callbacks. | |
160 | * Of which, this loop itself is particularly hot due to the | |
161 | * cache misses when jumping around the list of i915_gem_active. | |
162 | * So we try to keep this loop as streamlined as possible and | |
163 | * also prefetch the next i915_gem_active to try and hide | |
164 | * the likely cache miss. | |
165 | */ | |
166 | prefetchw(next); | |
167 | ||
168 | INIT_LIST_HEAD(&active->link); | |
0eafec6d | 169 | RCU_INIT_POINTER(active->request, NULL); |
fa545cbf CW |
170 | |
171 | active->retire(active, request); | |
172 | } | |
173 | ||
05235c53 CW |
174 | i915_gem_request_remove_from_client(request); |
175 | ||
176 | if (request->previous_context) { | |
177 | if (i915.enable_execlists) | |
178 | intel_lr_context_unpin(request->previous_context, | |
179 | request->engine); | |
180 | } | |
181 | ||
9a6feaf0 | 182 | i915_gem_context_put(request->ctx); |
d07f0e59 CW |
183 | |
184 | dma_fence_signal(&request->fence); | |
e8a261ea | 185 | i915_gem_request_put(request); |
05235c53 CW |
186 | } |
187 | ||
188 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) | |
189 | { | |
190 | struct intel_engine_cs *engine = req->engine; | |
191 | struct drm_i915_gem_request *tmp; | |
192 | ||
193 | lockdep_assert_held(&req->i915->drm.struct_mutex); | |
e95433c7 CW |
194 | if (list_empty(&req->link)) |
195 | return; | |
05235c53 CW |
196 | |
197 | do { | |
73cb9701 | 198 | tmp = list_first_entry(&engine->timeline->requests, |
efdf7c06 | 199 | typeof(*tmp), link); |
05235c53 CW |
200 | |
201 | i915_gem_request_retire(tmp); | |
202 | } while (tmp != req); | |
05235c53 CW |
203 | } |
204 | ||
8af29b0c | 205 | static int i915_gem_check_wedge(struct drm_i915_private *dev_priv) |
05235c53 | 206 | { |
8af29b0c CW |
207 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
208 | ||
209 | if (i915_terminally_wedged(error)) | |
05235c53 CW |
210 | return -EIO; |
211 | ||
8af29b0c | 212 | if (i915_reset_in_progress(error)) { |
05235c53 CW |
213 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
214 | * -EIO unconditionally for these. | |
215 | */ | |
8af29b0c | 216 | if (!dev_priv->mm.interruptible) |
05235c53 CW |
217 | return -EIO; |
218 | ||
219 | return -EAGAIN; | |
220 | } | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
85e17f59 | 225 | static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno) |
05235c53 | 226 | { |
85e17f59 | 227 | struct i915_gem_timeline *timeline = &i915->gt.global_timeline; |
05235c53 | 228 | struct intel_engine_cs *engine; |
3b3f1650 | 229 | enum intel_engine_id id; |
05235c53 CW |
230 | int ret; |
231 | ||
232 | /* Carefully retire all requests without writing to the rings */ | |
85e17f59 | 233 | ret = i915_gem_wait_for_idle(i915, |
73cb9701 CW |
234 | I915_WAIT_INTERRUPTIBLE | |
235 | I915_WAIT_LOCKED); | |
236 | if (ret) | |
237 | return ret; | |
238 | ||
85e17f59 | 239 | i915_gem_retire_requests(i915); |
28176ef4 | 240 | GEM_BUG_ON(i915->gt.active_requests > 1); |
05235c53 CW |
241 | |
242 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ | |
28176ef4 | 243 | if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) { |
6a5d1db9 CW |
244 | while (intel_breadcrumbs_busy(i915)) |
245 | cond_resched(); /* spin until threads are complete */ | |
05235c53 | 246 | } |
28176ef4 | 247 | atomic_set(&timeline->next_seqno, seqno); |
05235c53 CW |
248 | |
249 | /* Finally reset hw state */ | |
85e17f59 | 250 | for_each_engine(engine, i915, id) |
73cb9701 | 251 | intel_engine_init_global_seqno(engine, seqno); |
05235c53 | 252 | |
85e17f59 CW |
253 | list_for_each_entry(timeline, &i915->gt.timelines, link) { |
254 | for_each_engine(engine, i915, id) { | |
255 | struct intel_timeline *tl = &timeline->engine[id]; | |
256 | ||
257 | memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno)); | |
258 | } | |
259 | } | |
260 | ||
05235c53 CW |
261 | return 0; |
262 | } | |
263 | ||
73cb9701 | 264 | int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) |
05235c53 CW |
265 | { |
266 | struct drm_i915_private *dev_priv = to_i915(dev); | |
05235c53 | 267 | |
4c7d62c6 CW |
268 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
269 | ||
05235c53 CW |
270 | if (seqno == 0) |
271 | return -EINVAL; | |
272 | ||
273 | /* HWS page needs to be set less than what we | |
274 | * will inject to ring | |
275 | */ | |
28176ef4 | 276 | return i915_gem_init_global_seqno(dev_priv, seqno - 1); |
05235c53 CW |
277 | } |
278 | ||
28176ef4 | 279 | static int reserve_global_seqno(struct drm_i915_private *i915) |
05235c53 | 280 | { |
28176ef4 CW |
281 | u32 active_requests = ++i915->gt.active_requests; |
282 | u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno); | |
283 | int ret; | |
05235c53 | 284 | |
28176ef4 CW |
285 | /* Reservation is fine until we need to wrap around */ |
286 | if (likely(next_seqno + active_requests > next_seqno)) | |
287 | return 0; | |
05235c53 | 288 | |
28176ef4 CW |
289 | ret = i915_gem_init_global_seqno(i915, 0); |
290 | if (ret) { | |
291 | i915->gt.active_requests--; | |
292 | return ret; | |
05235c53 CW |
293 | } |
294 | ||
05235c53 CW |
295 | return 0; |
296 | } | |
297 | ||
80b204bc CW |
298 | static u32 __timeline_get_seqno(struct i915_gem_timeline *tl) |
299 | { | |
300 | /* next_seqno only incremented under a mutex */ | |
301 | return ++tl->next_seqno.counter; | |
302 | } | |
303 | ||
28176ef4 CW |
304 | static u32 timeline_get_seqno(struct i915_gem_timeline *tl) |
305 | { | |
306 | return atomic_inc_return(&tl->next_seqno); | |
307 | } | |
308 | ||
d55ac5bf | 309 | void __i915_gem_request_submit(struct drm_i915_gem_request *request) |
5590af3e | 310 | { |
73cb9701 | 311 | struct intel_engine_cs *engine = request->engine; |
f2d13290 CW |
312 | struct intel_timeline *timeline; |
313 | u32 seqno; | |
5590af3e | 314 | |
80b204bc CW |
315 | /* Transfer from per-context onto the global per-engine timeline */ |
316 | timeline = engine->timeline; | |
317 | GEM_BUG_ON(timeline == request->timeline); | |
d55ac5bf | 318 | assert_spin_locked(&timeline->lock); |
5590af3e | 319 | |
80b204bc | 320 | seqno = timeline_get_seqno(timeline->common); |
f2d13290 CW |
321 | GEM_BUG_ON(!seqno); |
322 | GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); | |
323 | ||
324 | GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno)); | |
325 | request->previous_seqno = timeline->last_submitted_seqno; | |
326 | timeline->last_submitted_seqno = seqno; | |
327 | ||
328 | /* We may be recursing from the signal callback of another i915 fence */ | |
329 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
330 | request->global_seqno = seqno; | |
331 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
332 | intel_engine_enable_signaling(request); | |
333 | spin_unlock(&request->lock); | |
334 | ||
335 | GEM_BUG_ON(!request->global_seqno); | |
caddfe71 CW |
336 | engine->emit_breadcrumb(request, |
337 | request->ring->vaddr + request->postfix); | |
5590af3e | 338 | |
bb89485e | 339 | spin_lock(&request->timeline->lock); |
80b204bc CW |
340 | list_move_tail(&request->link, &timeline->requests); |
341 | spin_unlock(&request->timeline->lock); | |
342 | ||
23902e49 | 343 | i915_sw_fence_commit(&request->execute); |
d55ac5bf CW |
344 | } |
345 | ||
346 | void i915_gem_request_submit(struct drm_i915_gem_request *request) | |
347 | { | |
348 | struct intel_engine_cs *engine = request->engine; | |
349 | unsigned long flags; | |
23902e49 | 350 | |
d55ac5bf CW |
351 | /* Will be called from irq-context when using foreign fences. */ |
352 | spin_lock_irqsave(&engine->timeline->lock, flags); | |
353 | ||
354 | __i915_gem_request_submit(request); | |
355 | ||
356 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | |
357 | } | |
358 | ||
359 | static int __i915_sw_fence_call | |
360 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | |
361 | { | |
362 | if (state == FENCE_COMPLETE) { | |
363 | struct drm_i915_gem_request *request = | |
364 | container_of(fence, typeof(*request), submit); | |
365 | ||
366 | request->engine->submit_request(request); | |
367 | } | |
80b204bc | 368 | |
5590af3e CW |
369 | return NOTIFY_DONE; |
370 | } | |
371 | ||
23902e49 CW |
372 | static int __i915_sw_fence_call |
373 | execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | |
374 | { | |
375 | return NOTIFY_DONE; | |
376 | } | |
377 | ||
8e637178 CW |
378 | /** |
379 | * i915_gem_request_alloc - allocate a request structure | |
380 | * | |
381 | * @engine: engine that we wish to issue the request on. | |
382 | * @ctx: context that the request will be associated with. | |
383 | * This can be NULL if the request is not directly related to | |
384 | * any specific user context, in which case this function will | |
385 | * choose an appropriate context to use. | |
386 | * | |
387 | * Returns a pointer to the allocated request if successful, | |
388 | * or an error code if not. | |
389 | */ | |
390 | struct drm_i915_gem_request * | |
391 | i915_gem_request_alloc(struct intel_engine_cs *engine, | |
392 | struct i915_gem_context *ctx) | |
05235c53 CW |
393 | { |
394 | struct drm_i915_private *dev_priv = engine->i915; | |
05235c53 CW |
395 | struct drm_i915_gem_request *req; |
396 | int ret; | |
397 | ||
28176ef4 CW |
398 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
399 | ||
05235c53 CW |
400 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
401 | * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex | |
402 | * and restart. | |
403 | */ | |
8af29b0c | 404 | ret = i915_gem_check_wedge(dev_priv); |
05235c53 | 405 | if (ret) |
8e637178 | 406 | return ERR_PTR(ret); |
05235c53 | 407 | |
28176ef4 CW |
408 | ret = reserve_global_seqno(dev_priv); |
409 | if (ret) | |
410 | return ERR_PTR(ret); | |
411 | ||
9b5f4e5e | 412 | /* Move the oldest request to the slab-cache (if not in use!) */ |
73cb9701 | 413 | req = list_first_entry_or_null(&engine->timeline->requests, |
efdf7c06 | 414 | typeof(*req), link); |
80b204bc | 415 | if (req && __i915_gem_request_completed(req)) |
2a1d7752 | 416 | i915_gem_request_retire(req); |
9b5f4e5e | 417 | |
5a198b8c CW |
418 | /* Beware: Dragons be flying overhead. |
419 | * | |
420 | * We use RCU to look up requests in flight. The lookups may | |
421 | * race with the request being allocated from the slab freelist. | |
422 | * That is the request we are writing to here, may be in the process | |
1426f715 | 423 | * of being read by __i915_gem_active_get_rcu(). As such, |
5a198b8c CW |
424 | * we have to be very careful when overwriting the contents. During |
425 | * the RCU lookup, we change chase the request->engine pointer, | |
65e4760e | 426 | * read the request->global_seqno and increment the reference count. |
5a198b8c CW |
427 | * |
428 | * The reference count is incremented atomically. If it is zero, | |
429 | * the lookup knows the request is unallocated and complete. Otherwise, | |
430 | * it is either still in use, or has been reallocated and reset | |
f54d1867 CW |
431 | * with dma_fence_init(). This increment is safe for release as we |
432 | * check that the request we have a reference to and matches the active | |
5a198b8c CW |
433 | * request. |
434 | * | |
435 | * Before we increment the refcount, we chase the request->engine | |
436 | * pointer. We must not call kmem_cache_zalloc() or else we set | |
437 | * that pointer to NULL and cause a crash during the lookup. If | |
438 | * we see the request is completed (based on the value of the | |
439 | * old engine and seqno), the lookup is complete and reports NULL. | |
440 | * If we decide the request is not completed (new engine or seqno), | |
441 | * then we grab a reference and double check that it is still the | |
442 | * active request - which it won't be and restart the lookup. | |
443 | * | |
444 | * Do not use kmem_cache_zalloc() here! | |
445 | */ | |
446 | req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); | |
28176ef4 CW |
447 | if (!req) { |
448 | ret = -ENOMEM; | |
449 | goto err_unreserve; | |
450 | } | |
05235c53 | 451 | |
80b204bc CW |
452 | req->timeline = i915_gem_context_lookup_timeline(ctx, engine); |
453 | GEM_BUG_ON(req->timeline == engine->timeline); | |
73cb9701 | 454 | |
04769652 | 455 | spin_lock_init(&req->lock); |
f54d1867 CW |
456 | dma_fence_init(&req->fence, |
457 | &i915_fence_ops, | |
458 | &req->lock, | |
73cb9701 | 459 | req->timeline->fence_context, |
80b204bc | 460 | __timeline_get_seqno(req->timeline->common)); |
04769652 | 461 | |
5590af3e | 462 | i915_sw_fence_init(&req->submit, submit_notify); |
23902e49 CW |
463 | i915_sw_fence_init(&req->execute, execute_notify); |
464 | /* Ensure that the execute fence completes after the submit fence - | |
465 | * as we complete the execute fence from within the submit fence | |
466 | * callback, its completion would otherwise be visible first. | |
467 | */ | |
468 | i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq); | |
5590af3e | 469 | |
fa545cbf | 470 | INIT_LIST_HEAD(&req->active_list); |
05235c53 CW |
471 | req->i915 = dev_priv; |
472 | req->engine = engine; | |
9a6feaf0 | 473 | req->ctx = i915_gem_context_get(ctx); |
05235c53 | 474 | |
5a198b8c | 475 | /* No zalloc, must clear what we need by hand */ |
f2d13290 | 476 | req->global_seqno = 0; |
5a198b8c CW |
477 | req->previous_context = NULL; |
478 | req->file_priv = NULL; | |
058d88c4 | 479 | req->batch = NULL; |
5a198b8c | 480 | |
05235c53 CW |
481 | /* |
482 | * Reserve space in the ring buffer for all the commands required to | |
483 | * eventually emit this request. This is to guarantee that the | |
484 | * i915_add_request() call can't fail. Note that the reserve may need | |
485 | * to be redone if the request is not actually submitted straight | |
486 | * away, e.g. because a GPU scheduler has deferred it. | |
487 | */ | |
488 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; | |
98f29e8d | 489 | GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); |
05235c53 CW |
490 | |
491 | if (i915.enable_execlists) | |
492 | ret = intel_logical_ring_alloc_request_extras(req); | |
493 | else | |
494 | ret = intel_ring_alloc_request_extras(req); | |
495 | if (ret) | |
496 | goto err_ctx; | |
497 | ||
d045446d CW |
498 | /* Record the position of the start of the request so that |
499 | * should we detect the updated seqno part-way through the | |
500 | * GPU processing the request, we never over-estimate the | |
501 | * position of the head. | |
502 | */ | |
503 | req->head = req->ring->tail; | |
504 | ||
8e637178 | 505 | return req; |
05235c53 CW |
506 | |
507 | err_ctx: | |
9a6feaf0 | 508 | i915_gem_context_put(ctx); |
05235c53 | 509 | kmem_cache_free(dev_priv->requests, req); |
28176ef4 CW |
510 | err_unreserve: |
511 | dev_priv->gt.active_requests--; | |
8e637178 | 512 | return ERR_PTR(ret); |
05235c53 CW |
513 | } |
514 | ||
a2bc4695 CW |
515 | static int |
516 | i915_gem_request_await_request(struct drm_i915_gem_request *to, | |
517 | struct drm_i915_gem_request *from) | |
518 | { | |
85e17f59 | 519 | int ret; |
a2bc4695 CW |
520 | |
521 | GEM_BUG_ON(to == from); | |
522 | ||
73cb9701 | 523 | if (to->timeline == from->timeline) |
a2bc4695 CW |
524 | return 0; |
525 | ||
73cb9701 CW |
526 | if (to->engine == from->engine) { |
527 | ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, | |
528 | &from->submit, | |
529 | GFP_KERNEL); | |
530 | return ret < 0 ? ret : 0; | |
531 | } | |
532 | ||
65e4760e CW |
533 | if (!from->global_seqno) { |
534 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
535 | &from->fence, 0, | |
536 | GFP_KERNEL); | |
537 | return ret < 0 ? ret : 0; | |
538 | } | |
539 | ||
85e17f59 | 540 | if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id]) |
a2bc4695 CW |
541 | return 0; |
542 | ||
543 | trace_i915_gem_ring_sync_to(to, from); | |
544 | if (!i915.semaphores) { | |
0a046a0e CW |
545 | if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) { |
546 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
547 | &from->fence, 0, | |
548 | GFP_KERNEL); | |
549 | if (ret < 0) | |
550 | return ret; | |
551 | } | |
a2bc4695 CW |
552 | } else { |
553 | ret = to->engine->semaphore.sync_to(to, from); | |
554 | if (ret) | |
555 | return ret; | |
556 | } | |
557 | ||
85e17f59 | 558 | to->timeline->sync_seqno[from->engine->id] = from->global_seqno; |
a2bc4695 CW |
559 | return 0; |
560 | } | |
561 | ||
b52992c0 CW |
562 | int |
563 | i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, | |
564 | struct dma_fence *fence) | |
565 | { | |
566 | struct dma_fence_array *array; | |
567 | int ret; | |
568 | int i; | |
569 | ||
570 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
571 | return 0; | |
572 | ||
573 | if (dma_fence_is_i915(fence)) | |
574 | return i915_gem_request_await_request(req, to_request(fence)); | |
575 | ||
576 | if (!dma_fence_is_array(fence)) { | |
577 | ret = i915_sw_fence_await_dma_fence(&req->submit, | |
578 | fence, I915_FENCE_TIMEOUT, | |
579 | GFP_KERNEL); | |
580 | return ret < 0 ? ret : 0; | |
581 | } | |
582 | ||
583 | /* Note that if the fence-array was created in signal-on-any mode, | |
584 | * we should *not* decompose it into its individual fences. However, | |
585 | * we don't currently store which mode the fence-array is operating | |
586 | * in. Fortunately, the only user of signal-on-any is private to | |
587 | * amdgpu and we should not see any incoming fence-array from | |
588 | * sync-file being in signal-on-any mode. | |
589 | */ | |
590 | ||
591 | array = to_dma_fence_array(fence); | |
592 | for (i = 0; i < array->num_fences; i++) { | |
593 | struct dma_fence *child = array->fences[i]; | |
594 | ||
595 | if (dma_fence_is_i915(child)) | |
596 | ret = i915_gem_request_await_request(req, | |
597 | to_request(child)); | |
598 | else | |
599 | ret = i915_sw_fence_await_dma_fence(&req->submit, | |
600 | child, I915_FENCE_TIMEOUT, | |
601 | GFP_KERNEL); | |
602 | if (ret < 0) | |
603 | return ret; | |
604 | } | |
605 | ||
606 | return 0; | |
607 | } | |
608 | ||
a2bc4695 CW |
609 | /** |
610 | * i915_gem_request_await_object - set this request to (async) wait upon a bo | |
611 | * | |
612 | * @to: request we are wishing to use | |
613 | * @obj: object which may be in use on another ring. | |
614 | * | |
615 | * This code is meant to abstract object synchronization with the GPU. | |
616 | * Conceptually we serialise writes between engines inside the GPU. | |
617 | * We only allow one engine to write into a buffer at any time, but | |
618 | * multiple readers. To ensure each has a coherent view of memory, we must: | |
619 | * | |
620 | * - If there is an outstanding write request to the object, the new | |
621 | * request must wait for it to complete (either CPU or in hw, requests | |
622 | * on the same ring will be naturally ordered). | |
623 | * | |
624 | * - If we are a write request (pending_write_domain is set), the new | |
625 | * request must wait for outstanding read requests to complete. | |
626 | * | |
627 | * Returns 0 if successful, else propagates up the lower layer error. | |
628 | */ | |
629 | int | |
630 | i915_gem_request_await_object(struct drm_i915_gem_request *to, | |
631 | struct drm_i915_gem_object *obj, | |
632 | bool write) | |
633 | { | |
d07f0e59 CW |
634 | struct dma_fence *excl; |
635 | int ret = 0; | |
a2bc4695 CW |
636 | |
637 | if (write) { | |
d07f0e59 CW |
638 | struct dma_fence **shared; |
639 | unsigned int count, i; | |
640 | ||
641 | ret = reservation_object_get_fences_rcu(obj->resv, | |
642 | &excl, &count, &shared); | |
643 | if (ret) | |
644 | return ret; | |
645 | ||
646 | for (i = 0; i < count; i++) { | |
647 | ret = i915_gem_request_await_dma_fence(to, shared[i]); | |
648 | if (ret) | |
649 | break; | |
650 | ||
651 | dma_fence_put(shared[i]); | |
652 | } | |
653 | ||
654 | for (; i < count; i++) | |
655 | dma_fence_put(shared[i]); | |
656 | kfree(shared); | |
a2bc4695 | 657 | } else { |
d07f0e59 | 658 | excl = reservation_object_get_excl_rcu(obj->resv); |
a2bc4695 CW |
659 | } |
660 | ||
d07f0e59 CW |
661 | if (excl) { |
662 | if (ret == 0) | |
663 | ret = i915_gem_request_await_dma_fence(to, excl); | |
a2bc4695 | 664 | |
d07f0e59 | 665 | dma_fence_put(excl); |
a2bc4695 CW |
666 | } |
667 | ||
d07f0e59 | 668 | return ret; |
a2bc4695 CW |
669 | } |
670 | ||
05235c53 CW |
671 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) |
672 | { | |
673 | struct drm_i915_private *dev_priv = engine->i915; | |
674 | ||
05235c53 CW |
675 | if (dev_priv->gt.awake) |
676 | return; | |
677 | ||
678 | intel_runtime_pm_get_noresume(dev_priv); | |
679 | dev_priv->gt.awake = true; | |
680 | ||
54b4f68f | 681 | intel_enable_gt_powersave(dev_priv); |
05235c53 CW |
682 | i915_update_gfx_val(dev_priv); |
683 | if (INTEL_GEN(dev_priv) >= 6) | |
684 | gen6_rps_busy(dev_priv); | |
685 | ||
686 | queue_delayed_work(dev_priv->wq, | |
687 | &dev_priv->gt.retire_work, | |
688 | round_jiffies_up_relative(HZ)); | |
689 | } | |
690 | ||
691 | /* | |
692 | * NB: This function is not allowed to fail. Doing so would mean the the | |
693 | * request is not being tracked for completion but the work itself is | |
694 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
695 | */ | |
17f298cf | 696 | void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) |
05235c53 | 697 | { |
95b2ab56 CW |
698 | struct intel_engine_cs *engine = request->engine; |
699 | struct intel_ring *ring = request->ring; | |
73cb9701 | 700 | struct intel_timeline *timeline = request->timeline; |
0a046a0e | 701 | struct drm_i915_gem_request *prev; |
caddfe71 | 702 | int err; |
05235c53 | 703 | |
4c7d62c6 | 704 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
0f25dff6 CW |
705 | trace_i915_gem_request_add(request); |
706 | ||
05235c53 CW |
707 | /* |
708 | * To ensure that this call will not fail, space for its emissions | |
709 | * should already have been reserved in the ring buffer. Let the ring | |
710 | * know that it is time to use that space up. | |
711 | */ | |
05235c53 CW |
712 | request->reserved_space = 0; |
713 | ||
714 | /* | |
715 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
716 | * after having emitted the batchbuffer command. Hence we need to fix | |
717 | * things up similar to emitting the lazy request. The difference here | |
718 | * is that the flush _must_ happen before the next request, no matter | |
719 | * what. | |
720 | */ | |
721 | if (flush_caches) { | |
caddfe71 | 722 | err = engine->emit_flush(request, EMIT_FLUSH); |
c7fe7d25 | 723 | |
05235c53 | 724 | /* Not allowed to fail! */ |
caddfe71 | 725 | WARN(err, "engine->emit_flush() failed: %d!\n", err); |
05235c53 CW |
726 | } |
727 | ||
d045446d | 728 | /* Record the position of the start of the breadcrumb so that |
05235c53 CW |
729 | * should we detect the updated seqno part-way through the |
730 | * GPU processing the request, we never over-estimate the | |
d045446d | 731 | * position of the ring's HEAD. |
05235c53 | 732 | */ |
caddfe71 CW |
733 | err = intel_ring_begin(request, engine->emit_breadcrumb_sz); |
734 | GEM_BUG_ON(err); | |
ba76d91b | 735 | request->postfix = ring->tail; |
caddfe71 | 736 | ring->tail += engine->emit_breadcrumb_sz * sizeof(u32); |
05235c53 | 737 | |
0f25dff6 CW |
738 | /* Seal the request and mark it as pending execution. Note that |
739 | * we may inspect this state, without holding any locks, during | |
740 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
741 | * see a more recent value in the hws than we are tracking. | |
742 | */ | |
0a046a0e | 743 | |
73cb9701 | 744 | prev = i915_gem_active_raw(&timeline->last_request, |
0a046a0e CW |
745 | &request->i915->drm.struct_mutex); |
746 | if (prev) | |
747 | i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, | |
748 | &request->submitq); | |
749 | ||
80b204bc | 750 | spin_lock_irq(&timeline->lock); |
f2d13290 | 751 | list_add_tail(&request->link, &timeline->requests); |
80b204bc CW |
752 | spin_unlock_irq(&timeline->lock); |
753 | ||
754 | GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, | |
755 | request->fence.seqno)); | |
28176ef4 | 756 | |
80b204bc | 757 | timeline->last_submitted_seqno = request->fence.seqno; |
73cb9701 | 758 | i915_gem_active_set(&timeline->last_request, request); |
f2d13290 | 759 | |
0f25dff6 | 760 | list_add_tail(&request->ring_link, &ring->request_list); |
f2d13290 | 761 | request->emitted_jiffies = jiffies; |
0f25dff6 | 762 | |
05235c53 | 763 | i915_gem_mark_busy(engine); |
5590af3e CW |
764 | |
765 | local_bh_disable(); | |
766 | i915_sw_fence_commit(&request->submit); | |
767 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | |
05235c53 CW |
768 | } |
769 | ||
221fe799 CW |
770 | static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
771 | { | |
772 | unsigned long flags; | |
773 | ||
774 | spin_lock_irqsave(&q->lock, flags); | |
775 | if (list_empty(&wait->task_list)) | |
776 | __add_wait_queue(q, wait); | |
777 | spin_unlock_irqrestore(&q->lock, flags); | |
778 | } | |
779 | ||
05235c53 CW |
780 | static unsigned long local_clock_us(unsigned int *cpu) |
781 | { | |
782 | unsigned long t; | |
783 | ||
784 | /* Cheaply and approximately convert from nanoseconds to microseconds. | |
785 | * The result and subsequent calculations are also defined in the same | |
786 | * approximate microseconds units. The principal source of timing | |
787 | * error here is from the simple truncation. | |
788 | * | |
789 | * Note that local_clock() is only defined wrt to the current CPU; | |
790 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
791 | * blocking preemption for the entire busywait, we can detect the CPU | |
792 | * switch and use that as indicator of system load and a reason to | |
793 | * stop busywaiting, see busywait_stop(). | |
794 | */ | |
795 | *cpu = get_cpu(); | |
796 | t = local_clock() >> 10; | |
797 | put_cpu(); | |
798 | ||
799 | return t; | |
800 | } | |
801 | ||
802 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
803 | { | |
804 | unsigned int this_cpu; | |
805 | ||
806 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
807 | return true; | |
808 | ||
809 | return this_cpu != cpu; | |
810 | } | |
811 | ||
812 | bool __i915_spin_request(const struct drm_i915_gem_request *req, | |
813 | int state, unsigned long timeout_us) | |
814 | { | |
815 | unsigned int cpu; | |
816 | ||
817 | /* When waiting for high frequency requests, e.g. during synchronous | |
818 | * rendering split between the CPU and GPU, the finite amount of time | |
819 | * required to set up the irq and wait upon it limits the response | |
820 | * rate. By busywaiting on the request completion for a short while we | |
821 | * can service the high frequency waits as quick as possible. However, | |
822 | * if it is a slow request, we want to sleep as quickly as possible. | |
823 | * The tradeoff between waiting and sleeping is roughly the time it | |
824 | * takes to sleep on a request, on the order of a microsecond. | |
825 | */ | |
826 | ||
827 | timeout_us += local_clock_us(&cpu); | |
828 | do { | |
65e4760e | 829 | if (__i915_gem_request_completed(req)) |
05235c53 CW |
830 | return true; |
831 | ||
832 | if (signal_pending_state(state, current)) | |
833 | break; | |
834 | ||
835 | if (busywait_stop(timeout_us, cpu)) | |
836 | break; | |
837 | ||
838 | cpu_relax_lowlatency(); | |
839 | } while (!need_resched()); | |
840 | ||
841 | return false; | |
842 | } | |
843 | ||
4680816b | 844 | static long |
23902e49 CW |
845 | __i915_request_wait_for_execute(struct drm_i915_gem_request *request, |
846 | unsigned int flags, | |
847 | long timeout) | |
4680816b CW |
848 | { |
849 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? | |
850 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
851 | wait_queue_head_t *q = &request->i915->gpu_error.wait_queue; | |
852 | DEFINE_WAIT(reset); | |
853 | DEFINE_WAIT(wait); | |
854 | ||
855 | if (flags & I915_WAIT_LOCKED) | |
856 | add_wait_queue(q, &reset); | |
857 | ||
858 | do { | |
23902e49 | 859 | prepare_to_wait(&request->execute.wait, &wait, state); |
4680816b | 860 | |
23902e49 | 861 | if (i915_sw_fence_done(&request->execute)) |
4680816b CW |
862 | break; |
863 | ||
864 | if (flags & I915_WAIT_LOCKED && | |
865 | i915_reset_in_progress(&request->i915->gpu_error)) { | |
866 | __set_current_state(TASK_RUNNING); | |
867 | i915_reset(request->i915); | |
868 | reset_wait_queue(q, &reset); | |
869 | continue; | |
870 | } | |
871 | ||
872 | if (signal_pending_state(state, current)) { | |
873 | timeout = -ERESTARTSYS; | |
874 | break; | |
875 | } | |
876 | ||
877 | timeout = io_schedule_timeout(timeout); | |
878 | } while (timeout); | |
23902e49 | 879 | finish_wait(&request->execute.wait, &wait); |
4680816b CW |
880 | |
881 | if (flags & I915_WAIT_LOCKED) | |
882 | remove_wait_queue(q, &reset); | |
883 | ||
884 | return timeout; | |
885 | } | |
886 | ||
05235c53 | 887 | /** |
776f3236 | 888 | * i915_wait_request - wait until execution of request has finished |
e95433c7 | 889 | * @req: the request to wait upon |
ea746f36 | 890 | * @flags: how to wait |
e95433c7 CW |
891 | * @timeout: how long to wait in jiffies |
892 | * | |
893 | * i915_wait_request() waits for the request to be completed, for a | |
894 | * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an | |
895 | * unbounded wait). | |
05235c53 | 896 | * |
e95433c7 CW |
897 | * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED |
898 | * in via the flags, and vice versa if the struct_mutex is not held, the caller | |
899 | * must not specify that the wait is locked. | |
05235c53 | 900 | * |
e95433c7 CW |
901 | * Returns the remaining time (in jiffies) if the request completed, which may |
902 | * be zero or -ETIME if the request is unfinished after the timeout expires. | |
903 | * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is | |
904 | * pending before the request completes. | |
05235c53 | 905 | */ |
e95433c7 CW |
906 | long i915_wait_request(struct drm_i915_gem_request *req, |
907 | unsigned int flags, | |
908 | long timeout) | |
05235c53 | 909 | { |
ea746f36 CW |
910 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
911 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
05235c53 CW |
912 | DEFINE_WAIT(reset); |
913 | struct intel_wait wait; | |
05235c53 CW |
914 | |
915 | might_sleep(); | |
22dd3bb9 | 916 | #if IS_ENABLED(CONFIG_LOCKDEP) |
e95433c7 CW |
917 | GEM_BUG_ON(debug_locks && |
918 | !!lockdep_is_held(&req->i915->drm.struct_mutex) != | |
22dd3bb9 CW |
919 | !!(flags & I915_WAIT_LOCKED)); |
920 | #endif | |
e95433c7 | 921 | GEM_BUG_ON(timeout < 0); |
05235c53 | 922 | |
05235c53 | 923 | if (i915_gem_request_completed(req)) |
e95433c7 | 924 | return timeout; |
05235c53 | 925 | |
e95433c7 CW |
926 | if (!timeout) |
927 | return -ETIME; | |
05235c53 CW |
928 | |
929 | trace_i915_gem_request_wait_begin(req); | |
930 | ||
23902e49 CW |
931 | if (!i915_sw_fence_done(&req->execute)) { |
932 | timeout = __i915_request_wait_for_execute(req, flags, timeout); | |
4680816b CW |
933 | if (timeout < 0) |
934 | goto complete; | |
935 | ||
23902e49 | 936 | GEM_BUG_ON(!i915_sw_fence_done(&req->execute)); |
4680816b | 937 | } |
23902e49 | 938 | GEM_BUG_ON(!i915_sw_fence_done(&req->submit)); |
65e4760e | 939 | GEM_BUG_ON(!req->global_seqno); |
4680816b | 940 | |
437c3087 | 941 | /* Optimistic short spin before touching IRQs */ |
05235c53 CW |
942 | if (i915_spin_request(req, state, 5)) |
943 | goto complete; | |
944 | ||
945 | set_current_state(state); | |
22dd3bb9 CW |
946 | if (flags & I915_WAIT_LOCKED) |
947 | add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
05235c53 | 948 | |
65e4760e | 949 | intel_wait_init(&wait, req->global_seqno); |
05235c53 CW |
950 | if (intel_engine_add_wait(req->engine, &wait)) |
951 | /* In order to check that we haven't missed the interrupt | |
952 | * as we enabled it, we need to kick ourselves to do a | |
953 | * coherent check on the seqno before we sleep. | |
954 | */ | |
955 | goto wakeup; | |
956 | ||
957 | for (;;) { | |
958 | if (signal_pending_state(state, current)) { | |
e95433c7 | 959 | timeout = -ERESTARTSYS; |
05235c53 CW |
960 | break; |
961 | } | |
962 | ||
e95433c7 CW |
963 | if (!timeout) { |
964 | timeout = -ETIME; | |
05235c53 CW |
965 | break; |
966 | } | |
967 | ||
e95433c7 CW |
968 | timeout = io_schedule_timeout(timeout); |
969 | ||
05235c53 CW |
970 | if (intel_wait_complete(&wait)) |
971 | break; | |
972 | ||
973 | set_current_state(state); | |
974 | ||
975 | wakeup: | |
976 | /* Carefully check if the request is complete, giving time | |
977 | * for the seqno to be visible following the interrupt. | |
978 | * We also have to check in case we are kicked by the GPU | |
979 | * reset in order to drop the struct_mutex. | |
980 | */ | |
981 | if (__i915_request_irq_complete(req)) | |
982 | break; | |
983 | ||
221fe799 CW |
984 | /* If the GPU is hung, and we hold the lock, reset the GPU |
985 | * and then check for completion. On a full reset, the engine's | |
986 | * HW seqno will be advanced passed us and we are complete. | |
987 | * If we do a partial reset, we have to wait for the GPU to | |
988 | * resume and update the breadcrumb. | |
989 | * | |
990 | * If we don't hold the mutex, we can just wait for the worker | |
991 | * to come along and update the breadcrumb (either directly | |
992 | * itself, or indirectly by recovering the GPU). | |
993 | */ | |
994 | if (flags & I915_WAIT_LOCKED && | |
995 | i915_reset_in_progress(&req->i915->gpu_error)) { | |
996 | __set_current_state(TASK_RUNNING); | |
997 | i915_reset(req->i915); | |
998 | reset_wait_queue(&req->i915->gpu_error.wait_queue, | |
999 | &reset); | |
1000 | continue; | |
1001 | } | |
1002 | ||
05235c53 CW |
1003 | /* Only spin if we know the GPU is processing this request */ |
1004 | if (i915_spin_request(req, state, 2)) | |
1005 | break; | |
1006 | } | |
05235c53 CW |
1007 | |
1008 | intel_engine_remove_wait(req->engine, &wait); | |
22dd3bb9 CW |
1009 | if (flags & I915_WAIT_LOCKED) |
1010 | remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
05235c53 | 1011 | __set_current_state(TASK_RUNNING); |
22dd3bb9 | 1012 | |
05235c53 CW |
1013 | complete: |
1014 | trace_i915_gem_request_wait_end(req); | |
1015 | ||
e95433c7 | 1016 | return timeout; |
05235c53 | 1017 | } |
4b8de8e6 | 1018 | |
28176ef4 | 1019 | static void engine_retire_requests(struct intel_engine_cs *engine) |
4b8de8e6 CW |
1020 | { |
1021 | struct drm_i915_gem_request *request, *next; | |
1022 | ||
73cb9701 CW |
1023 | list_for_each_entry_safe(request, next, |
1024 | &engine->timeline->requests, link) { | |
80b204bc | 1025 | if (!__i915_gem_request_completed(request)) |
28176ef4 | 1026 | return; |
4b8de8e6 CW |
1027 | |
1028 | i915_gem_request_retire(request); | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) | |
1033 | { | |
1034 | struct intel_engine_cs *engine; | |
28176ef4 | 1035 | enum intel_engine_id id; |
4b8de8e6 CW |
1036 | |
1037 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | |
1038 | ||
28176ef4 | 1039 | if (!dev_priv->gt.active_requests) |
4b8de8e6 CW |
1040 | return; |
1041 | ||
1042 | GEM_BUG_ON(!dev_priv->gt.awake); | |
1043 | ||
28176ef4 CW |
1044 | for_each_engine(engine, dev_priv, id) |
1045 | engine_retire_requests(engine); | |
4b8de8e6 | 1046 | |
28176ef4 | 1047 | if (!dev_priv->gt.active_requests) |
5bd11a34 ID |
1048 | mod_delayed_work(dev_priv->wq, |
1049 | &dev_priv->gt.idle_work, | |
1050 | msecs_to_jiffies(100)); | |
4b8de8e6 | 1051 | } |