]>
Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
fa545cbf | 25 | #include <linux/prefetch.h> |
b52992c0 | 26 | #include <linux/dma-fence-array.h> |
fa545cbf | 27 | |
05235c53 CW |
28 | #include "i915_drv.h" |
29 | ||
f54d1867 | 30 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
04769652 CW |
31 | { |
32 | return "i915"; | |
33 | } | |
34 | ||
f54d1867 | 35 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
04769652 | 36 | { |
73cb9701 | 37 | return to_request(fence)->timeline->common->name; |
04769652 CW |
38 | } |
39 | ||
f54d1867 | 40 | static bool i915_fence_signaled(struct dma_fence *fence) |
04769652 CW |
41 | { |
42 | return i915_gem_request_completed(to_request(fence)); | |
43 | } | |
44 | ||
f54d1867 | 45 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
04769652 CW |
46 | { |
47 | if (i915_fence_signaled(fence)) | |
48 | return false; | |
49 | ||
50 | intel_engine_enable_signaling(to_request(fence)); | |
51 | return true; | |
52 | } | |
53 | ||
f54d1867 | 54 | static signed long i915_fence_wait(struct dma_fence *fence, |
04769652 | 55 | bool interruptible, |
e95433c7 | 56 | signed long timeout) |
04769652 | 57 | { |
e95433c7 | 58 | return i915_wait_request(to_request(fence), interruptible, timeout); |
04769652 CW |
59 | } |
60 | ||
f54d1867 | 61 | static void i915_fence_release(struct dma_fence *fence) |
04769652 CW |
62 | { |
63 | struct drm_i915_gem_request *req = to_request(fence); | |
64 | ||
fc158405 CW |
65 | /* The request is put onto a RCU freelist (i.e. the address |
66 | * is immediately reused), mark the fences as being freed now. | |
67 | * Otherwise the debugobjects for the fences are only marked as | |
68 | * freed when the slab cache itself is freed, and so we would get | |
69 | * caught trying to reuse dead objects. | |
70 | */ | |
71 | i915_sw_fence_fini(&req->submit); | |
72 | i915_sw_fence_fini(&req->execute); | |
73 | ||
04769652 CW |
74 | kmem_cache_free(req->i915->requests, req); |
75 | } | |
76 | ||
f54d1867 | 77 | const struct dma_fence_ops i915_fence_ops = { |
04769652 CW |
78 | .get_driver_name = i915_fence_get_driver_name, |
79 | .get_timeline_name = i915_fence_get_timeline_name, | |
80 | .enable_signaling = i915_fence_enable_signaling, | |
81 | .signaled = i915_fence_signaled, | |
82 | .wait = i915_fence_wait, | |
83 | .release = i915_fence_release, | |
04769652 CW |
84 | }; |
85 | ||
05235c53 CW |
86 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
87 | struct drm_file *file) | |
88 | { | |
89 | struct drm_i915_private *dev_private; | |
90 | struct drm_i915_file_private *file_priv; | |
91 | ||
92 | WARN_ON(!req || !file || req->file_priv); | |
93 | ||
94 | if (!req || !file) | |
95 | return -EINVAL; | |
96 | ||
97 | if (req->file_priv) | |
98 | return -EINVAL; | |
99 | ||
100 | dev_private = req->i915; | |
101 | file_priv = file->driver_priv; | |
102 | ||
103 | spin_lock(&file_priv->mm.lock); | |
104 | req->file_priv = file_priv; | |
105 | list_add_tail(&req->client_list, &file_priv->mm.request_list); | |
106 | spin_unlock(&file_priv->mm.lock); | |
107 | ||
05235c53 CW |
108 | return 0; |
109 | } | |
110 | ||
111 | static inline void | |
112 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |
113 | { | |
114 | struct drm_i915_file_private *file_priv = request->file_priv; | |
115 | ||
116 | if (!file_priv) | |
117 | return; | |
118 | ||
119 | spin_lock(&file_priv->mm.lock); | |
120 | list_del(&request->client_list); | |
121 | request->file_priv = NULL; | |
122 | spin_unlock(&file_priv->mm.lock); | |
05235c53 CW |
123 | } |
124 | ||
52e54209 CW |
125 | static struct i915_dependency * |
126 | i915_dependency_alloc(struct drm_i915_private *i915) | |
127 | { | |
128 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); | |
129 | } | |
130 | ||
131 | static void | |
132 | i915_dependency_free(struct drm_i915_private *i915, | |
133 | struct i915_dependency *dep) | |
134 | { | |
135 | kmem_cache_free(i915->dependencies, dep); | |
136 | } | |
137 | ||
138 | static void | |
139 | __i915_priotree_add_dependency(struct i915_priotree *pt, | |
140 | struct i915_priotree *signal, | |
141 | struct i915_dependency *dep, | |
142 | unsigned long flags) | |
143 | { | |
20311bd3 | 144 | INIT_LIST_HEAD(&dep->dfs_link); |
52e54209 CW |
145 | list_add(&dep->wait_link, &signal->waiters_list); |
146 | list_add(&dep->signal_link, &pt->signalers_list); | |
147 | dep->signaler = signal; | |
148 | dep->flags = flags; | |
149 | } | |
150 | ||
151 | static int | |
152 | i915_priotree_add_dependency(struct drm_i915_private *i915, | |
153 | struct i915_priotree *pt, | |
154 | struct i915_priotree *signal) | |
155 | { | |
156 | struct i915_dependency *dep; | |
157 | ||
158 | dep = i915_dependency_alloc(i915); | |
159 | if (!dep) | |
160 | return -ENOMEM; | |
161 | ||
162 | __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC); | |
163 | return 0; | |
164 | } | |
165 | ||
166 | static void | |
167 | i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) | |
168 | { | |
169 | struct i915_dependency *dep, *next; | |
170 | ||
20311bd3 CW |
171 | GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node)); |
172 | ||
52e54209 CW |
173 | /* Everyone we depended upon (the fences we wait to be signaled) |
174 | * should retire before us and remove themselves from our list. | |
175 | * However, retirement is run independently on each timeline and | |
176 | * so we may be called out-of-order. | |
177 | */ | |
178 | list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { | |
179 | list_del(&dep->wait_link); | |
180 | if (dep->flags & I915_DEPENDENCY_ALLOC) | |
181 | i915_dependency_free(i915, dep); | |
182 | } | |
183 | ||
184 | /* Remove ourselves from everyone who depends upon us */ | |
185 | list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { | |
186 | list_del(&dep->signal_link); | |
187 | if (dep->flags & I915_DEPENDENCY_ALLOC) | |
188 | i915_dependency_free(i915, dep); | |
189 | } | |
190 | } | |
191 | ||
192 | static void | |
193 | i915_priotree_init(struct i915_priotree *pt) | |
194 | { | |
195 | INIT_LIST_HEAD(&pt->signalers_list); | |
196 | INIT_LIST_HEAD(&pt->waiters_list); | |
20311bd3 CW |
197 | RB_CLEAR_NODE(&pt->node); |
198 | pt->priority = INT_MIN; | |
52e54209 CW |
199 | } |
200 | ||
fa545cbf CW |
201 | void i915_gem_retire_noop(struct i915_gem_active *active, |
202 | struct drm_i915_gem_request *request) | |
203 | { | |
204 | /* Space left intentionally blank */ | |
205 | } | |
206 | ||
05235c53 CW |
207 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
208 | { | |
e8a9c58f | 209 | struct intel_engine_cs *engine = request->engine; |
fa545cbf CW |
210 | struct i915_gem_active *active, *next; |
211 | ||
4c7d62c6 | 212 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
48bc2a4a CW |
213 | GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); |
214 | GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute)); | |
4c7d62c6 | 215 | GEM_BUG_ON(!i915_gem_request_completed(request)); |
4302055b | 216 | GEM_BUG_ON(!request->i915->gt.active_requests); |
4c7d62c6 | 217 | |
05235c53 | 218 | trace_i915_gem_request_retire(request); |
80b204bc | 219 | |
e8a9c58f | 220 | spin_lock_irq(&engine->timeline->lock); |
e95433c7 | 221 | list_del_init(&request->link); |
e8a9c58f | 222 | spin_unlock_irq(&engine->timeline->lock); |
05235c53 CW |
223 | |
224 | /* We know the GPU must have read the request to have | |
225 | * sent us the seqno + interrupt, so use the position | |
226 | * of tail of the request to update the last known position | |
227 | * of the GPU head. | |
228 | * | |
229 | * Note this requires that we are always called in request | |
230 | * completion order. | |
231 | */ | |
675d9ad7 | 232 | list_del(&request->ring_link); |
1dae2dfb | 233 | request->ring->last_retired_head = request->postfix; |
4302055b CW |
234 | if (!--request->i915->gt.active_requests) { |
235 | GEM_BUG_ON(!request->i915->gt.awake); | |
236 | mod_delayed_work(request->i915->wq, | |
237 | &request->i915->gt.idle_work, | |
238 | msecs_to_jiffies(100)); | |
239 | } | |
05235c53 | 240 | |
fa545cbf CW |
241 | /* Walk through the active list, calling retire on each. This allows |
242 | * objects to track their GPU activity and mark themselves as idle | |
243 | * when their *last* active request is completed (updating state | |
244 | * tracking lists for eviction, active references for GEM, etc). | |
245 | * | |
246 | * As the ->retire() may free the node, we decouple it first and | |
247 | * pass along the auxiliary information (to avoid dereferencing | |
248 | * the node after the callback). | |
249 | */ | |
250 | list_for_each_entry_safe(active, next, &request->active_list, link) { | |
251 | /* In microbenchmarks or focusing upon time inside the kernel, | |
252 | * we may spend an inordinate amount of time simply handling | |
253 | * the retirement of requests and processing their callbacks. | |
254 | * Of which, this loop itself is particularly hot due to the | |
255 | * cache misses when jumping around the list of i915_gem_active. | |
256 | * So we try to keep this loop as streamlined as possible and | |
257 | * also prefetch the next i915_gem_active to try and hide | |
258 | * the likely cache miss. | |
259 | */ | |
260 | prefetchw(next); | |
261 | ||
262 | INIT_LIST_HEAD(&active->link); | |
0eafec6d | 263 | RCU_INIT_POINTER(active->request, NULL); |
fa545cbf CW |
264 | |
265 | active->retire(active, request); | |
266 | } | |
267 | ||
05235c53 CW |
268 | i915_gem_request_remove_from_client(request); |
269 | ||
e5e1fc47 | 270 | /* Retirement decays the ban score as it is a sign of ctx progress */ |
bc1d53c6 MK |
271 | if (request->ctx->ban_score > 0) |
272 | request->ctx->ban_score--; | |
e5e1fc47 | 273 | |
e8a9c58f CW |
274 | /* The backing object for the context is done after switching to the |
275 | * *next* context. Therefore we cannot retire the previous context until | |
276 | * the next context has already started running. However, since we | |
277 | * cannot take the required locks at i915_gem_request_submit() we | |
278 | * defer the unpinning of the active context to now, retirement of | |
279 | * the subsequent request. | |
280 | */ | |
281 | if (engine->last_retired_context) | |
282 | engine->context_unpin(engine, engine->last_retired_context); | |
283 | engine->last_retired_context = request->ctx; | |
d07f0e59 CW |
284 | |
285 | dma_fence_signal(&request->fence); | |
52e54209 CW |
286 | |
287 | i915_priotree_fini(request->i915, &request->priotree); | |
e8a261ea | 288 | i915_gem_request_put(request); |
05235c53 CW |
289 | } |
290 | ||
291 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) | |
292 | { | |
293 | struct intel_engine_cs *engine = req->engine; | |
294 | struct drm_i915_gem_request *tmp; | |
295 | ||
296 | lockdep_assert_held(&req->i915->drm.struct_mutex); | |
4ffd6e0c CW |
297 | GEM_BUG_ON(!i915_gem_request_completed(req)); |
298 | ||
e95433c7 CW |
299 | if (list_empty(&req->link)) |
300 | return; | |
05235c53 CW |
301 | |
302 | do { | |
73cb9701 | 303 | tmp = list_first_entry(&engine->timeline->requests, |
efdf7c06 | 304 | typeof(*tmp), link); |
05235c53 CW |
305 | |
306 | i915_gem_request_retire(tmp); | |
307 | } while (tmp != req); | |
05235c53 CW |
308 | } |
309 | ||
8af29b0c | 310 | static int i915_gem_check_wedge(struct drm_i915_private *dev_priv) |
05235c53 | 311 | { |
8af29b0c CW |
312 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
313 | ||
314 | if (i915_terminally_wedged(error)) | |
05235c53 CW |
315 | return -EIO; |
316 | ||
8af29b0c | 317 | if (i915_reset_in_progress(error)) { |
05235c53 CW |
318 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
319 | * -EIO unconditionally for these. | |
320 | */ | |
8af29b0c | 321 | if (!dev_priv->mm.interruptible) |
05235c53 CW |
322 | return -EIO; |
323 | ||
324 | return -EAGAIN; | |
325 | } | |
326 | ||
327 | return 0; | |
328 | } | |
329 | ||
85e17f59 | 330 | static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno) |
05235c53 | 331 | { |
85e17f59 | 332 | struct i915_gem_timeline *timeline = &i915->gt.global_timeline; |
05235c53 | 333 | struct intel_engine_cs *engine; |
3b3f1650 | 334 | enum intel_engine_id id; |
05235c53 CW |
335 | int ret; |
336 | ||
337 | /* Carefully retire all requests without writing to the rings */ | |
85e17f59 | 338 | ret = i915_gem_wait_for_idle(i915, |
73cb9701 CW |
339 | I915_WAIT_INTERRUPTIBLE | |
340 | I915_WAIT_LOCKED); | |
341 | if (ret) | |
342 | return ret; | |
343 | ||
85e17f59 | 344 | i915_gem_retire_requests(i915); |
28176ef4 | 345 | GEM_BUG_ON(i915->gt.active_requests > 1); |
05235c53 CW |
346 | |
347 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ | |
4c266edb | 348 | if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) { |
6a5d1db9 CW |
349 | while (intel_breadcrumbs_busy(i915)) |
350 | cond_resched(); /* spin until threads are complete */ | |
05235c53 | 351 | } |
4c266edb | 352 | atomic_set(&timeline->seqno, seqno); |
05235c53 CW |
353 | |
354 | /* Finally reset hw state */ | |
85e17f59 | 355 | for_each_engine(engine, i915, id) |
73cb9701 | 356 | intel_engine_init_global_seqno(engine, seqno); |
05235c53 | 357 | |
85e17f59 CW |
358 | list_for_each_entry(timeline, &i915->gt.timelines, link) { |
359 | for_each_engine(engine, i915, id) { | |
360 | struct intel_timeline *tl = &timeline->engine[id]; | |
361 | ||
362 | memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno)); | |
363 | } | |
364 | } | |
365 | ||
05235c53 CW |
366 | return 0; |
367 | } | |
368 | ||
73cb9701 | 369 | int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) |
05235c53 CW |
370 | { |
371 | struct drm_i915_private *dev_priv = to_i915(dev); | |
05235c53 | 372 | |
4c7d62c6 CW |
373 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
374 | ||
05235c53 CW |
375 | if (seqno == 0) |
376 | return -EINVAL; | |
377 | ||
378 | /* HWS page needs to be set less than what we | |
379 | * will inject to ring | |
380 | */ | |
28176ef4 | 381 | return i915_gem_init_global_seqno(dev_priv, seqno - 1); |
05235c53 CW |
382 | } |
383 | ||
28176ef4 | 384 | static int reserve_global_seqno(struct drm_i915_private *i915) |
05235c53 | 385 | { |
28176ef4 | 386 | u32 active_requests = ++i915->gt.active_requests; |
4c266edb | 387 | u32 seqno = atomic_read(&i915->gt.global_timeline.seqno); |
28176ef4 | 388 | int ret; |
05235c53 | 389 | |
28176ef4 | 390 | /* Reservation is fine until we need to wrap around */ |
4c266edb | 391 | if (likely(seqno + active_requests > seqno)) |
28176ef4 | 392 | return 0; |
05235c53 | 393 | |
28176ef4 CW |
394 | ret = i915_gem_init_global_seqno(i915, 0); |
395 | if (ret) { | |
396 | i915->gt.active_requests--; | |
397 | return ret; | |
05235c53 CW |
398 | } |
399 | ||
05235c53 CW |
400 | return 0; |
401 | } | |
402 | ||
80b204bc CW |
403 | static u32 __timeline_get_seqno(struct i915_gem_timeline *tl) |
404 | { | |
4c266edb JL |
405 | /* seqno only incremented under a mutex */ |
406 | return ++tl->seqno.counter; | |
80b204bc CW |
407 | } |
408 | ||
28176ef4 CW |
409 | static u32 timeline_get_seqno(struct i915_gem_timeline *tl) |
410 | { | |
4c266edb | 411 | return atomic_inc_return(&tl->seqno); |
28176ef4 CW |
412 | } |
413 | ||
d55ac5bf | 414 | void __i915_gem_request_submit(struct drm_i915_gem_request *request) |
5590af3e | 415 | { |
73cb9701 | 416 | struct intel_engine_cs *engine = request->engine; |
f2d13290 CW |
417 | struct intel_timeline *timeline; |
418 | u32 seqno; | |
5590af3e | 419 | |
80b204bc CW |
420 | /* Transfer from per-context onto the global per-engine timeline */ |
421 | timeline = engine->timeline; | |
422 | GEM_BUG_ON(timeline == request->timeline); | |
d55ac5bf | 423 | assert_spin_locked(&timeline->lock); |
5590af3e | 424 | |
80b204bc | 425 | seqno = timeline_get_seqno(timeline->common); |
f2d13290 CW |
426 | GEM_BUG_ON(!seqno); |
427 | GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); | |
428 | ||
429 | GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno)); | |
430 | request->previous_seqno = timeline->last_submitted_seqno; | |
431 | timeline->last_submitted_seqno = seqno; | |
432 | ||
433 | /* We may be recursing from the signal callback of another i915 fence */ | |
434 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
435 | request->global_seqno = seqno; | |
436 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
437 | intel_engine_enable_signaling(request); | |
438 | spin_unlock(&request->lock); | |
439 | ||
440 | GEM_BUG_ON(!request->global_seqno); | |
caddfe71 CW |
441 | engine->emit_breadcrumb(request, |
442 | request->ring->vaddr + request->postfix); | |
5590af3e | 443 | |
bb89485e | 444 | spin_lock(&request->timeline->lock); |
80b204bc CW |
445 | list_move_tail(&request->link, &timeline->requests); |
446 | spin_unlock(&request->timeline->lock); | |
447 | ||
23902e49 | 448 | i915_sw_fence_commit(&request->execute); |
d55ac5bf CW |
449 | } |
450 | ||
451 | void i915_gem_request_submit(struct drm_i915_gem_request *request) | |
452 | { | |
453 | struct intel_engine_cs *engine = request->engine; | |
454 | unsigned long flags; | |
23902e49 | 455 | |
d55ac5bf CW |
456 | /* Will be called from irq-context when using foreign fences. */ |
457 | spin_lock_irqsave(&engine->timeline->lock, flags); | |
458 | ||
459 | __i915_gem_request_submit(request); | |
460 | ||
461 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | |
462 | } | |
463 | ||
464 | static int __i915_sw_fence_call | |
465 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | |
466 | { | |
48bc2a4a CW |
467 | struct drm_i915_gem_request *request = |
468 | container_of(fence, typeof(*request), submit); | |
d55ac5bf | 469 | |
48bc2a4a CW |
470 | switch (state) { |
471 | case FENCE_COMPLETE: | |
d55ac5bf | 472 | request->engine->submit_request(request); |
48bc2a4a CW |
473 | break; |
474 | ||
475 | case FENCE_FREE: | |
476 | i915_gem_request_put(request); | |
477 | break; | |
d55ac5bf | 478 | } |
80b204bc | 479 | |
5590af3e CW |
480 | return NOTIFY_DONE; |
481 | } | |
482 | ||
23902e49 CW |
483 | static int __i915_sw_fence_call |
484 | execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | |
485 | { | |
48bc2a4a CW |
486 | struct drm_i915_gem_request *request = |
487 | container_of(fence, typeof(*request), execute); | |
488 | ||
489 | switch (state) { | |
490 | case FENCE_COMPLETE: | |
491 | break; | |
492 | ||
493 | case FENCE_FREE: | |
494 | i915_gem_request_put(request); | |
495 | break; | |
496 | } | |
497 | ||
23902e49 CW |
498 | return NOTIFY_DONE; |
499 | } | |
500 | ||
8e637178 CW |
501 | /** |
502 | * i915_gem_request_alloc - allocate a request structure | |
503 | * | |
504 | * @engine: engine that we wish to issue the request on. | |
505 | * @ctx: context that the request will be associated with. | |
506 | * This can be NULL if the request is not directly related to | |
507 | * any specific user context, in which case this function will | |
508 | * choose an appropriate context to use. | |
509 | * | |
510 | * Returns a pointer to the allocated request if successful, | |
511 | * or an error code if not. | |
512 | */ | |
513 | struct drm_i915_gem_request * | |
514 | i915_gem_request_alloc(struct intel_engine_cs *engine, | |
515 | struct i915_gem_context *ctx) | |
05235c53 CW |
516 | { |
517 | struct drm_i915_private *dev_priv = engine->i915; | |
05235c53 CW |
518 | struct drm_i915_gem_request *req; |
519 | int ret; | |
520 | ||
28176ef4 CW |
521 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
522 | ||
05235c53 CW |
523 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
524 | * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex | |
525 | * and restart. | |
526 | */ | |
8af29b0c | 527 | ret = i915_gem_check_wedge(dev_priv); |
05235c53 | 528 | if (ret) |
8e637178 | 529 | return ERR_PTR(ret); |
05235c53 | 530 | |
e8a9c58f CW |
531 | /* Pinning the contexts may generate requests in order to acquire |
532 | * GGTT space, so do this first before we reserve a seqno for | |
533 | * ourselves. | |
534 | */ | |
535 | ret = engine->context_pin(engine, ctx); | |
28176ef4 CW |
536 | if (ret) |
537 | return ERR_PTR(ret); | |
538 | ||
e8a9c58f CW |
539 | ret = reserve_global_seqno(dev_priv); |
540 | if (ret) | |
541 | goto err_unpin; | |
542 | ||
9b5f4e5e | 543 | /* Move the oldest request to the slab-cache (if not in use!) */ |
73cb9701 | 544 | req = list_first_entry_or_null(&engine->timeline->requests, |
efdf7c06 | 545 | typeof(*req), link); |
80b204bc | 546 | if (req && __i915_gem_request_completed(req)) |
2a1d7752 | 547 | i915_gem_request_retire(req); |
9b5f4e5e | 548 | |
5a198b8c CW |
549 | /* Beware: Dragons be flying overhead. |
550 | * | |
551 | * We use RCU to look up requests in flight. The lookups may | |
552 | * race with the request being allocated from the slab freelist. | |
553 | * That is the request we are writing to here, may be in the process | |
1426f715 | 554 | * of being read by __i915_gem_active_get_rcu(). As such, |
5a198b8c CW |
555 | * we have to be very careful when overwriting the contents. During |
556 | * the RCU lookup, we change chase the request->engine pointer, | |
65e4760e | 557 | * read the request->global_seqno and increment the reference count. |
5a198b8c CW |
558 | * |
559 | * The reference count is incremented atomically. If it is zero, | |
560 | * the lookup knows the request is unallocated and complete. Otherwise, | |
561 | * it is either still in use, or has been reallocated and reset | |
f54d1867 CW |
562 | * with dma_fence_init(). This increment is safe for release as we |
563 | * check that the request we have a reference to and matches the active | |
5a198b8c CW |
564 | * request. |
565 | * | |
566 | * Before we increment the refcount, we chase the request->engine | |
567 | * pointer. We must not call kmem_cache_zalloc() or else we set | |
568 | * that pointer to NULL and cause a crash during the lookup. If | |
569 | * we see the request is completed (based on the value of the | |
570 | * old engine and seqno), the lookup is complete and reports NULL. | |
571 | * If we decide the request is not completed (new engine or seqno), | |
572 | * then we grab a reference and double check that it is still the | |
573 | * active request - which it won't be and restart the lookup. | |
574 | * | |
575 | * Do not use kmem_cache_zalloc() here! | |
576 | */ | |
577 | req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); | |
28176ef4 CW |
578 | if (!req) { |
579 | ret = -ENOMEM; | |
580 | goto err_unreserve; | |
581 | } | |
05235c53 | 582 | |
80b204bc CW |
583 | req->timeline = i915_gem_context_lookup_timeline(ctx, engine); |
584 | GEM_BUG_ON(req->timeline == engine->timeline); | |
73cb9701 | 585 | |
04769652 | 586 | spin_lock_init(&req->lock); |
f54d1867 CW |
587 | dma_fence_init(&req->fence, |
588 | &i915_fence_ops, | |
589 | &req->lock, | |
73cb9701 | 590 | req->timeline->fence_context, |
80b204bc | 591 | __timeline_get_seqno(req->timeline->common)); |
04769652 | 592 | |
48bc2a4a CW |
593 | /* We bump the ref for the fence chain */ |
594 | i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); | |
595 | i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify); | |
596 | ||
23902e49 CW |
597 | /* Ensure that the execute fence completes after the submit fence - |
598 | * as we complete the execute fence from within the submit fence | |
599 | * callback, its completion would otherwise be visible first. | |
600 | */ | |
601 | i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq); | |
5590af3e | 602 | |
52e54209 CW |
603 | i915_priotree_init(&req->priotree); |
604 | ||
fa545cbf | 605 | INIT_LIST_HEAD(&req->active_list); |
05235c53 CW |
606 | req->i915 = dev_priv; |
607 | req->engine = engine; | |
e8a9c58f | 608 | req->ctx = ctx; |
05235c53 | 609 | |
5a198b8c | 610 | /* No zalloc, must clear what we need by hand */ |
f2d13290 | 611 | req->global_seqno = 0; |
5a198b8c | 612 | req->file_priv = NULL; |
058d88c4 | 613 | req->batch = NULL; |
5a198b8c | 614 | |
05235c53 CW |
615 | /* |
616 | * Reserve space in the ring buffer for all the commands required to | |
617 | * eventually emit this request. This is to guarantee that the | |
618 | * i915_add_request() call can't fail. Note that the reserve may need | |
619 | * to be redone if the request is not actually submitted straight | |
620 | * away, e.g. because a GPU scheduler has deferred it. | |
621 | */ | |
622 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; | |
98f29e8d | 623 | GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); |
05235c53 CW |
624 | |
625 | if (i915.enable_execlists) | |
626 | ret = intel_logical_ring_alloc_request_extras(req); | |
627 | else | |
628 | ret = intel_ring_alloc_request_extras(req); | |
629 | if (ret) | |
630 | goto err_ctx; | |
631 | ||
d045446d CW |
632 | /* Record the position of the start of the request so that |
633 | * should we detect the updated seqno part-way through the | |
634 | * GPU processing the request, we never over-estimate the | |
635 | * position of the head. | |
636 | */ | |
637 | req->head = req->ring->tail; | |
638 | ||
8e637178 | 639 | return req; |
05235c53 CW |
640 | |
641 | err_ctx: | |
1618bdb8 CW |
642 | /* Make sure we didn't add ourselves to external state before freeing */ |
643 | GEM_BUG_ON(!list_empty(&req->active_list)); | |
644 | GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); | |
645 | GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); | |
646 | ||
05235c53 | 647 | kmem_cache_free(dev_priv->requests, req); |
28176ef4 CW |
648 | err_unreserve: |
649 | dev_priv->gt.active_requests--; | |
e8a9c58f CW |
650 | err_unpin: |
651 | engine->context_unpin(engine, ctx); | |
8e637178 | 652 | return ERR_PTR(ret); |
05235c53 CW |
653 | } |
654 | ||
a2bc4695 CW |
655 | static int |
656 | i915_gem_request_await_request(struct drm_i915_gem_request *to, | |
657 | struct drm_i915_gem_request *from) | |
658 | { | |
85e17f59 | 659 | int ret; |
a2bc4695 CW |
660 | |
661 | GEM_BUG_ON(to == from); | |
662 | ||
52e54209 CW |
663 | if (to->engine->schedule) { |
664 | ret = i915_priotree_add_dependency(to->i915, | |
665 | &to->priotree, | |
666 | &from->priotree); | |
667 | if (ret < 0) | |
668 | return ret; | |
669 | } | |
670 | ||
73cb9701 | 671 | if (to->timeline == from->timeline) |
a2bc4695 CW |
672 | return 0; |
673 | ||
73cb9701 CW |
674 | if (to->engine == from->engine) { |
675 | ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, | |
676 | &from->submit, | |
677 | GFP_KERNEL); | |
678 | return ret < 0 ? ret : 0; | |
679 | } | |
680 | ||
65e4760e CW |
681 | if (!from->global_seqno) { |
682 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
683 | &from->fence, 0, | |
684 | GFP_KERNEL); | |
685 | return ret < 0 ? ret : 0; | |
686 | } | |
687 | ||
85e17f59 | 688 | if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id]) |
a2bc4695 CW |
689 | return 0; |
690 | ||
691 | trace_i915_gem_ring_sync_to(to, from); | |
692 | if (!i915.semaphores) { | |
0a046a0e CW |
693 | if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) { |
694 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
695 | &from->fence, 0, | |
696 | GFP_KERNEL); | |
697 | if (ret < 0) | |
698 | return ret; | |
699 | } | |
a2bc4695 CW |
700 | } else { |
701 | ret = to->engine->semaphore.sync_to(to, from); | |
702 | if (ret) | |
703 | return ret; | |
704 | } | |
705 | ||
85e17f59 | 706 | to->timeline->sync_seqno[from->engine->id] = from->global_seqno; |
a2bc4695 CW |
707 | return 0; |
708 | } | |
709 | ||
b52992c0 CW |
710 | int |
711 | i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, | |
712 | struct dma_fence *fence) | |
713 | { | |
714 | struct dma_fence_array *array; | |
715 | int ret; | |
716 | int i; | |
717 | ||
718 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
719 | return 0; | |
720 | ||
721 | if (dma_fence_is_i915(fence)) | |
722 | return i915_gem_request_await_request(req, to_request(fence)); | |
723 | ||
724 | if (!dma_fence_is_array(fence)) { | |
725 | ret = i915_sw_fence_await_dma_fence(&req->submit, | |
726 | fence, I915_FENCE_TIMEOUT, | |
727 | GFP_KERNEL); | |
728 | return ret < 0 ? ret : 0; | |
729 | } | |
730 | ||
731 | /* Note that if the fence-array was created in signal-on-any mode, | |
732 | * we should *not* decompose it into its individual fences. However, | |
733 | * we don't currently store which mode the fence-array is operating | |
734 | * in. Fortunately, the only user of signal-on-any is private to | |
735 | * amdgpu and we should not see any incoming fence-array from | |
736 | * sync-file being in signal-on-any mode. | |
737 | */ | |
738 | ||
739 | array = to_dma_fence_array(fence); | |
740 | for (i = 0; i < array->num_fences; i++) { | |
741 | struct dma_fence *child = array->fences[i]; | |
742 | ||
743 | if (dma_fence_is_i915(child)) | |
744 | ret = i915_gem_request_await_request(req, | |
745 | to_request(child)); | |
746 | else | |
747 | ret = i915_sw_fence_await_dma_fence(&req->submit, | |
748 | child, I915_FENCE_TIMEOUT, | |
749 | GFP_KERNEL); | |
750 | if (ret < 0) | |
751 | return ret; | |
752 | } | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
a2bc4695 CW |
757 | /** |
758 | * i915_gem_request_await_object - set this request to (async) wait upon a bo | |
759 | * | |
760 | * @to: request we are wishing to use | |
761 | * @obj: object which may be in use on another ring. | |
762 | * | |
763 | * This code is meant to abstract object synchronization with the GPU. | |
764 | * Conceptually we serialise writes between engines inside the GPU. | |
765 | * We only allow one engine to write into a buffer at any time, but | |
766 | * multiple readers. To ensure each has a coherent view of memory, we must: | |
767 | * | |
768 | * - If there is an outstanding write request to the object, the new | |
769 | * request must wait for it to complete (either CPU or in hw, requests | |
770 | * on the same ring will be naturally ordered). | |
771 | * | |
772 | * - If we are a write request (pending_write_domain is set), the new | |
773 | * request must wait for outstanding read requests to complete. | |
774 | * | |
775 | * Returns 0 if successful, else propagates up the lower layer error. | |
776 | */ | |
777 | int | |
778 | i915_gem_request_await_object(struct drm_i915_gem_request *to, | |
779 | struct drm_i915_gem_object *obj, | |
780 | bool write) | |
781 | { | |
d07f0e59 CW |
782 | struct dma_fence *excl; |
783 | int ret = 0; | |
a2bc4695 CW |
784 | |
785 | if (write) { | |
d07f0e59 CW |
786 | struct dma_fence **shared; |
787 | unsigned int count, i; | |
788 | ||
789 | ret = reservation_object_get_fences_rcu(obj->resv, | |
790 | &excl, &count, &shared); | |
791 | if (ret) | |
792 | return ret; | |
793 | ||
794 | for (i = 0; i < count; i++) { | |
795 | ret = i915_gem_request_await_dma_fence(to, shared[i]); | |
796 | if (ret) | |
797 | break; | |
798 | ||
799 | dma_fence_put(shared[i]); | |
800 | } | |
801 | ||
802 | for (; i < count; i++) | |
803 | dma_fence_put(shared[i]); | |
804 | kfree(shared); | |
a2bc4695 | 805 | } else { |
d07f0e59 | 806 | excl = reservation_object_get_excl_rcu(obj->resv); |
a2bc4695 CW |
807 | } |
808 | ||
d07f0e59 CW |
809 | if (excl) { |
810 | if (ret == 0) | |
811 | ret = i915_gem_request_await_dma_fence(to, excl); | |
a2bc4695 | 812 | |
d07f0e59 | 813 | dma_fence_put(excl); |
a2bc4695 CW |
814 | } |
815 | ||
d07f0e59 | 816 | return ret; |
a2bc4695 CW |
817 | } |
818 | ||
05235c53 CW |
819 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) |
820 | { | |
821 | struct drm_i915_private *dev_priv = engine->i915; | |
822 | ||
05235c53 CW |
823 | if (dev_priv->gt.awake) |
824 | return; | |
825 | ||
4302055b CW |
826 | GEM_BUG_ON(!dev_priv->gt.active_requests); |
827 | ||
05235c53 CW |
828 | intel_runtime_pm_get_noresume(dev_priv); |
829 | dev_priv->gt.awake = true; | |
830 | ||
54b4f68f | 831 | intel_enable_gt_powersave(dev_priv); |
05235c53 CW |
832 | i915_update_gfx_val(dev_priv); |
833 | if (INTEL_GEN(dev_priv) >= 6) | |
834 | gen6_rps_busy(dev_priv); | |
835 | ||
836 | queue_delayed_work(dev_priv->wq, | |
837 | &dev_priv->gt.retire_work, | |
838 | round_jiffies_up_relative(HZ)); | |
839 | } | |
840 | ||
841 | /* | |
842 | * NB: This function is not allowed to fail. Doing so would mean the the | |
843 | * request is not being tracked for completion but the work itself is | |
844 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
845 | */ | |
17f298cf | 846 | void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) |
05235c53 | 847 | { |
95b2ab56 CW |
848 | struct intel_engine_cs *engine = request->engine; |
849 | struct intel_ring *ring = request->ring; | |
73cb9701 | 850 | struct intel_timeline *timeline = request->timeline; |
0a046a0e | 851 | struct drm_i915_gem_request *prev; |
caddfe71 | 852 | int err; |
05235c53 | 853 | |
4c7d62c6 | 854 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
0f25dff6 CW |
855 | trace_i915_gem_request_add(request); |
856 | ||
05235c53 CW |
857 | /* |
858 | * To ensure that this call will not fail, space for its emissions | |
859 | * should already have been reserved in the ring buffer. Let the ring | |
860 | * know that it is time to use that space up. | |
861 | */ | |
05235c53 CW |
862 | request->reserved_space = 0; |
863 | ||
864 | /* | |
865 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
866 | * after having emitted the batchbuffer command. Hence we need to fix | |
867 | * things up similar to emitting the lazy request. The difference here | |
868 | * is that the flush _must_ happen before the next request, no matter | |
869 | * what. | |
870 | */ | |
871 | if (flush_caches) { | |
caddfe71 | 872 | err = engine->emit_flush(request, EMIT_FLUSH); |
c7fe7d25 | 873 | |
05235c53 | 874 | /* Not allowed to fail! */ |
caddfe71 | 875 | WARN(err, "engine->emit_flush() failed: %d!\n", err); |
05235c53 CW |
876 | } |
877 | ||
d045446d | 878 | /* Record the position of the start of the breadcrumb so that |
05235c53 CW |
879 | * should we detect the updated seqno part-way through the |
880 | * GPU processing the request, we never over-estimate the | |
d045446d | 881 | * position of the ring's HEAD. |
05235c53 | 882 | */ |
caddfe71 CW |
883 | err = intel_ring_begin(request, engine->emit_breadcrumb_sz); |
884 | GEM_BUG_ON(err); | |
ba76d91b | 885 | request->postfix = ring->tail; |
caddfe71 | 886 | ring->tail += engine->emit_breadcrumb_sz * sizeof(u32); |
05235c53 | 887 | |
0f25dff6 CW |
888 | /* Seal the request and mark it as pending execution. Note that |
889 | * we may inspect this state, without holding any locks, during | |
890 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
891 | * see a more recent value in the hws than we are tracking. | |
892 | */ | |
0a046a0e | 893 | |
73cb9701 | 894 | prev = i915_gem_active_raw(&timeline->last_request, |
0a046a0e | 895 | &request->i915->drm.struct_mutex); |
52e54209 | 896 | if (prev) { |
0a046a0e CW |
897 | i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, |
898 | &request->submitq); | |
52e54209 CW |
899 | if (engine->schedule) |
900 | __i915_priotree_add_dependency(&request->priotree, | |
901 | &prev->priotree, | |
902 | &request->dep, | |
903 | 0); | |
904 | } | |
0a046a0e | 905 | |
80b204bc | 906 | spin_lock_irq(&timeline->lock); |
f2d13290 | 907 | list_add_tail(&request->link, &timeline->requests); |
80b204bc CW |
908 | spin_unlock_irq(&timeline->lock); |
909 | ||
910 | GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, | |
911 | request->fence.seqno)); | |
28176ef4 | 912 | |
80b204bc | 913 | timeline->last_submitted_seqno = request->fence.seqno; |
73cb9701 | 914 | i915_gem_active_set(&timeline->last_request, request); |
f2d13290 | 915 | |
0f25dff6 | 916 | list_add_tail(&request->ring_link, &ring->request_list); |
f2d13290 | 917 | request->emitted_jiffies = jiffies; |
0f25dff6 | 918 | |
05235c53 | 919 | i915_gem_mark_busy(engine); |
5590af3e | 920 | |
0de9136d CW |
921 | /* Let the backend know a new request has arrived that may need |
922 | * to adjust the existing execution schedule due to a high priority | |
923 | * request - i.e. we may want to preempt the current request in order | |
924 | * to run a high priority dependency chain *before* we can execute this | |
925 | * request. | |
926 | * | |
927 | * This is called before the request is ready to run so that we can | |
928 | * decide whether to preempt the entire chain so that it is ready to | |
929 | * run at the earliest possible convenience. | |
930 | */ | |
931 | if (engine->schedule) | |
9f792eba | 932 | engine->schedule(request, request->ctx->priority); |
0de9136d | 933 | |
5590af3e CW |
934 | local_bh_disable(); |
935 | i915_sw_fence_commit(&request->submit); | |
936 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | |
05235c53 CW |
937 | } |
938 | ||
221fe799 CW |
939 | static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
940 | { | |
941 | unsigned long flags; | |
942 | ||
943 | spin_lock_irqsave(&q->lock, flags); | |
944 | if (list_empty(&wait->task_list)) | |
945 | __add_wait_queue(q, wait); | |
946 | spin_unlock_irqrestore(&q->lock, flags); | |
947 | } | |
948 | ||
05235c53 CW |
949 | static unsigned long local_clock_us(unsigned int *cpu) |
950 | { | |
951 | unsigned long t; | |
952 | ||
953 | /* Cheaply and approximately convert from nanoseconds to microseconds. | |
954 | * The result and subsequent calculations are also defined in the same | |
955 | * approximate microseconds units. The principal source of timing | |
956 | * error here is from the simple truncation. | |
957 | * | |
958 | * Note that local_clock() is only defined wrt to the current CPU; | |
959 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
960 | * blocking preemption for the entire busywait, we can detect the CPU | |
961 | * switch and use that as indicator of system load and a reason to | |
962 | * stop busywaiting, see busywait_stop(). | |
963 | */ | |
964 | *cpu = get_cpu(); | |
965 | t = local_clock() >> 10; | |
966 | put_cpu(); | |
967 | ||
968 | return t; | |
969 | } | |
970 | ||
971 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
972 | { | |
973 | unsigned int this_cpu; | |
974 | ||
975 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
976 | return true; | |
977 | ||
978 | return this_cpu != cpu; | |
979 | } | |
980 | ||
981 | bool __i915_spin_request(const struct drm_i915_gem_request *req, | |
982 | int state, unsigned long timeout_us) | |
983 | { | |
984 | unsigned int cpu; | |
985 | ||
986 | /* When waiting for high frequency requests, e.g. during synchronous | |
987 | * rendering split between the CPU and GPU, the finite amount of time | |
988 | * required to set up the irq and wait upon it limits the response | |
989 | * rate. By busywaiting on the request completion for a short while we | |
990 | * can service the high frequency waits as quick as possible. However, | |
991 | * if it is a slow request, we want to sleep as quickly as possible. | |
992 | * The tradeoff between waiting and sleeping is roughly the time it | |
993 | * takes to sleep on a request, on the order of a microsecond. | |
994 | */ | |
995 | ||
996 | timeout_us += local_clock_us(&cpu); | |
997 | do { | |
65e4760e | 998 | if (__i915_gem_request_completed(req)) |
05235c53 CW |
999 | return true; |
1000 | ||
1001 | if (signal_pending_state(state, current)) | |
1002 | break; | |
1003 | ||
1004 | if (busywait_stop(timeout_us, cpu)) | |
1005 | break; | |
1006 | ||
1007 | cpu_relax_lowlatency(); | |
1008 | } while (!need_resched()); | |
1009 | ||
1010 | return false; | |
1011 | } | |
1012 | ||
4680816b | 1013 | static long |
23902e49 CW |
1014 | __i915_request_wait_for_execute(struct drm_i915_gem_request *request, |
1015 | unsigned int flags, | |
1016 | long timeout) | |
4680816b CW |
1017 | { |
1018 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? | |
1019 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
1020 | wait_queue_head_t *q = &request->i915->gpu_error.wait_queue; | |
1021 | DEFINE_WAIT(reset); | |
1022 | DEFINE_WAIT(wait); | |
1023 | ||
1024 | if (flags & I915_WAIT_LOCKED) | |
1025 | add_wait_queue(q, &reset); | |
1026 | ||
1027 | do { | |
23902e49 | 1028 | prepare_to_wait(&request->execute.wait, &wait, state); |
4680816b | 1029 | |
23902e49 | 1030 | if (i915_sw_fence_done(&request->execute)) |
4680816b CW |
1031 | break; |
1032 | ||
1033 | if (flags & I915_WAIT_LOCKED && | |
1034 | i915_reset_in_progress(&request->i915->gpu_error)) { | |
1035 | __set_current_state(TASK_RUNNING); | |
1036 | i915_reset(request->i915); | |
1037 | reset_wait_queue(q, &reset); | |
1038 | continue; | |
1039 | } | |
1040 | ||
1041 | if (signal_pending_state(state, current)) { | |
1042 | timeout = -ERESTARTSYS; | |
1043 | break; | |
1044 | } | |
1045 | ||
1046 | timeout = io_schedule_timeout(timeout); | |
1047 | } while (timeout); | |
23902e49 | 1048 | finish_wait(&request->execute.wait, &wait); |
4680816b CW |
1049 | |
1050 | if (flags & I915_WAIT_LOCKED) | |
1051 | remove_wait_queue(q, &reset); | |
1052 | ||
1053 | return timeout; | |
1054 | } | |
1055 | ||
05235c53 | 1056 | /** |
776f3236 | 1057 | * i915_wait_request - wait until execution of request has finished |
e95433c7 | 1058 | * @req: the request to wait upon |
ea746f36 | 1059 | * @flags: how to wait |
e95433c7 CW |
1060 | * @timeout: how long to wait in jiffies |
1061 | * | |
1062 | * i915_wait_request() waits for the request to be completed, for a | |
1063 | * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an | |
1064 | * unbounded wait). | |
05235c53 | 1065 | * |
e95433c7 CW |
1066 | * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED |
1067 | * in via the flags, and vice versa if the struct_mutex is not held, the caller | |
1068 | * must not specify that the wait is locked. | |
05235c53 | 1069 | * |
e95433c7 CW |
1070 | * Returns the remaining time (in jiffies) if the request completed, which may |
1071 | * be zero or -ETIME if the request is unfinished after the timeout expires. | |
1072 | * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is | |
1073 | * pending before the request completes. | |
05235c53 | 1074 | */ |
e95433c7 CW |
1075 | long i915_wait_request(struct drm_i915_gem_request *req, |
1076 | unsigned int flags, | |
1077 | long timeout) | |
05235c53 | 1078 | { |
ea746f36 CW |
1079 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
1080 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
05235c53 CW |
1081 | DEFINE_WAIT(reset); |
1082 | struct intel_wait wait; | |
05235c53 CW |
1083 | |
1084 | might_sleep(); | |
22dd3bb9 | 1085 | #if IS_ENABLED(CONFIG_LOCKDEP) |
e95433c7 CW |
1086 | GEM_BUG_ON(debug_locks && |
1087 | !!lockdep_is_held(&req->i915->drm.struct_mutex) != | |
22dd3bb9 CW |
1088 | !!(flags & I915_WAIT_LOCKED)); |
1089 | #endif | |
e95433c7 | 1090 | GEM_BUG_ON(timeout < 0); |
05235c53 | 1091 | |
05235c53 | 1092 | if (i915_gem_request_completed(req)) |
e95433c7 | 1093 | return timeout; |
05235c53 | 1094 | |
e95433c7 CW |
1095 | if (!timeout) |
1096 | return -ETIME; | |
05235c53 CW |
1097 | |
1098 | trace_i915_gem_request_wait_begin(req); | |
1099 | ||
23902e49 CW |
1100 | if (!i915_sw_fence_done(&req->execute)) { |
1101 | timeout = __i915_request_wait_for_execute(req, flags, timeout); | |
4680816b CW |
1102 | if (timeout < 0) |
1103 | goto complete; | |
1104 | ||
23902e49 | 1105 | GEM_BUG_ON(!i915_sw_fence_done(&req->execute)); |
4680816b | 1106 | } |
23902e49 | 1107 | GEM_BUG_ON(!i915_sw_fence_done(&req->submit)); |
65e4760e | 1108 | GEM_BUG_ON(!req->global_seqno); |
4680816b | 1109 | |
437c3087 | 1110 | /* Optimistic short spin before touching IRQs */ |
05235c53 CW |
1111 | if (i915_spin_request(req, state, 5)) |
1112 | goto complete; | |
1113 | ||
1114 | set_current_state(state); | |
22dd3bb9 CW |
1115 | if (flags & I915_WAIT_LOCKED) |
1116 | add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
05235c53 | 1117 | |
65e4760e | 1118 | intel_wait_init(&wait, req->global_seqno); |
05235c53 CW |
1119 | if (intel_engine_add_wait(req->engine, &wait)) |
1120 | /* In order to check that we haven't missed the interrupt | |
1121 | * as we enabled it, we need to kick ourselves to do a | |
1122 | * coherent check on the seqno before we sleep. | |
1123 | */ | |
1124 | goto wakeup; | |
1125 | ||
1126 | for (;;) { | |
1127 | if (signal_pending_state(state, current)) { | |
e95433c7 | 1128 | timeout = -ERESTARTSYS; |
05235c53 CW |
1129 | break; |
1130 | } | |
1131 | ||
e95433c7 CW |
1132 | if (!timeout) { |
1133 | timeout = -ETIME; | |
05235c53 CW |
1134 | break; |
1135 | } | |
1136 | ||
e95433c7 CW |
1137 | timeout = io_schedule_timeout(timeout); |
1138 | ||
05235c53 CW |
1139 | if (intel_wait_complete(&wait)) |
1140 | break; | |
1141 | ||
1142 | set_current_state(state); | |
1143 | ||
1144 | wakeup: | |
1145 | /* Carefully check if the request is complete, giving time | |
1146 | * for the seqno to be visible following the interrupt. | |
1147 | * We also have to check in case we are kicked by the GPU | |
1148 | * reset in order to drop the struct_mutex. | |
1149 | */ | |
1150 | if (__i915_request_irq_complete(req)) | |
1151 | break; | |
1152 | ||
221fe799 CW |
1153 | /* If the GPU is hung, and we hold the lock, reset the GPU |
1154 | * and then check for completion. On a full reset, the engine's | |
1155 | * HW seqno will be advanced passed us and we are complete. | |
1156 | * If we do a partial reset, we have to wait for the GPU to | |
1157 | * resume and update the breadcrumb. | |
1158 | * | |
1159 | * If we don't hold the mutex, we can just wait for the worker | |
1160 | * to come along and update the breadcrumb (either directly | |
1161 | * itself, or indirectly by recovering the GPU). | |
1162 | */ | |
1163 | if (flags & I915_WAIT_LOCKED && | |
1164 | i915_reset_in_progress(&req->i915->gpu_error)) { | |
1165 | __set_current_state(TASK_RUNNING); | |
1166 | i915_reset(req->i915); | |
1167 | reset_wait_queue(&req->i915->gpu_error.wait_queue, | |
1168 | &reset); | |
1169 | continue; | |
1170 | } | |
1171 | ||
05235c53 CW |
1172 | /* Only spin if we know the GPU is processing this request */ |
1173 | if (i915_spin_request(req, state, 2)) | |
1174 | break; | |
1175 | } | |
05235c53 CW |
1176 | |
1177 | intel_engine_remove_wait(req->engine, &wait); | |
22dd3bb9 CW |
1178 | if (flags & I915_WAIT_LOCKED) |
1179 | remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
05235c53 | 1180 | __set_current_state(TASK_RUNNING); |
22dd3bb9 | 1181 | |
05235c53 CW |
1182 | complete: |
1183 | trace_i915_gem_request_wait_end(req); | |
1184 | ||
e95433c7 | 1185 | return timeout; |
05235c53 | 1186 | } |
4b8de8e6 | 1187 | |
28176ef4 | 1188 | static void engine_retire_requests(struct intel_engine_cs *engine) |
4b8de8e6 CW |
1189 | { |
1190 | struct drm_i915_gem_request *request, *next; | |
1191 | ||
73cb9701 CW |
1192 | list_for_each_entry_safe(request, next, |
1193 | &engine->timeline->requests, link) { | |
80b204bc | 1194 | if (!__i915_gem_request_completed(request)) |
28176ef4 | 1195 | return; |
4b8de8e6 CW |
1196 | |
1197 | i915_gem_request_retire(request); | |
1198 | } | |
1199 | } | |
1200 | ||
1201 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) | |
1202 | { | |
1203 | struct intel_engine_cs *engine; | |
28176ef4 | 1204 | enum intel_engine_id id; |
4b8de8e6 CW |
1205 | |
1206 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | |
1207 | ||
28176ef4 | 1208 | if (!dev_priv->gt.active_requests) |
4b8de8e6 CW |
1209 | return; |
1210 | ||
28176ef4 CW |
1211 | for_each_engine(engine, dev_priv, id) |
1212 | engine_retire_requests(engine); | |
4b8de8e6 | 1213 | } |