]>
Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
fa545cbf | 25 | #include <linux/prefetch.h> |
b52992c0 | 26 | #include <linux/dma-fence-array.h> |
e6017571 IM |
27 | #include <linux/sched.h> |
28 | #include <linux/sched/clock.h> | |
f361bf4a | 29 | #include <linux/sched/signal.h> |
fa545cbf | 30 | |
05235c53 CW |
31 | #include "i915_drv.h" |
32 | ||
f54d1867 | 33 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
04769652 CW |
34 | { |
35 | return "i915"; | |
36 | } | |
37 | ||
f54d1867 | 38 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
04769652 | 39 | { |
05506b5b CW |
40 | /* The timeline struct (as part of the ppgtt underneath a context) |
41 | * may be freed when the request is no longer in use by the GPU. | |
42 | * We could extend the life of a context to beyond that of all | |
43 | * fences, possibly keeping the hw resource around indefinitely, | |
44 | * or we just give them a false name. Since | |
45 | * dma_fence_ops.get_timeline_name is a debug feature, the occasional | |
46 | * lie seems justifiable. | |
47 | */ | |
48 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
49 | return "signaled"; | |
50 | ||
73cb9701 | 51 | return to_request(fence)->timeline->common->name; |
04769652 CW |
52 | } |
53 | ||
f54d1867 | 54 | static bool i915_fence_signaled(struct dma_fence *fence) |
04769652 CW |
55 | { |
56 | return i915_gem_request_completed(to_request(fence)); | |
57 | } | |
58 | ||
f54d1867 | 59 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
04769652 CW |
60 | { |
61 | if (i915_fence_signaled(fence)) | |
62 | return false; | |
63 | ||
64 | intel_engine_enable_signaling(to_request(fence)); | |
65 | return true; | |
66 | } | |
67 | ||
f54d1867 | 68 | static signed long i915_fence_wait(struct dma_fence *fence, |
04769652 | 69 | bool interruptible, |
e95433c7 | 70 | signed long timeout) |
04769652 | 71 | { |
e95433c7 | 72 | return i915_wait_request(to_request(fence), interruptible, timeout); |
04769652 CW |
73 | } |
74 | ||
f54d1867 | 75 | static void i915_fence_release(struct dma_fence *fence) |
04769652 CW |
76 | { |
77 | struct drm_i915_gem_request *req = to_request(fence); | |
78 | ||
fc158405 CW |
79 | /* The request is put onto a RCU freelist (i.e. the address |
80 | * is immediately reused), mark the fences as being freed now. | |
81 | * Otherwise the debugobjects for the fences are only marked as | |
82 | * freed when the slab cache itself is freed, and so we would get | |
83 | * caught trying to reuse dead objects. | |
84 | */ | |
85 | i915_sw_fence_fini(&req->submit); | |
fc158405 | 86 | |
04769652 CW |
87 | kmem_cache_free(req->i915->requests, req); |
88 | } | |
89 | ||
f54d1867 | 90 | const struct dma_fence_ops i915_fence_ops = { |
04769652 CW |
91 | .get_driver_name = i915_fence_get_driver_name, |
92 | .get_timeline_name = i915_fence_get_timeline_name, | |
93 | .enable_signaling = i915_fence_enable_signaling, | |
94 | .signaled = i915_fence_signaled, | |
95 | .wait = i915_fence_wait, | |
96 | .release = i915_fence_release, | |
04769652 CW |
97 | }; |
98 | ||
05235c53 CW |
99 | static inline void |
100 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |
101 | { | |
c8659efa | 102 | struct drm_i915_file_private *file_priv; |
05235c53 | 103 | |
c8659efa | 104 | file_priv = request->file_priv; |
05235c53 CW |
105 | if (!file_priv) |
106 | return; | |
107 | ||
108 | spin_lock(&file_priv->mm.lock); | |
c8659efa CW |
109 | if (request->file_priv) { |
110 | list_del(&request->client_link); | |
111 | request->file_priv = NULL; | |
112 | } | |
05235c53 | 113 | spin_unlock(&file_priv->mm.lock); |
05235c53 CW |
114 | } |
115 | ||
52e54209 CW |
116 | static struct i915_dependency * |
117 | i915_dependency_alloc(struct drm_i915_private *i915) | |
118 | { | |
119 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); | |
120 | } | |
121 | ||
122 | static void | |
123 | i915_dependency_free(struct drm_i915_private *i915, | |
124 | struct i915_dependency *dep) | |
125 | { | |
126 | kmem_cache_free(i915->dependencies, dep); | |
127 | } | |
128 | ||
129 | static void | |
130 | __i915_priotree_add_dependency(struct i915_priotree *pt, | |
131 | struct i915_priotree *signal, | |
132 | struct i915_dependency *dep, | |
133 | unsigned long flags) | |
134 | { | |
20311bd3 | 135 | INIT_LIST_HEAD(&dep->dfs_link); |
52e54209 CW |
136 | list_add(&dep->wait_link, &signal->waiters_list); |
137 | list_add(&dep->signal_link, &pt->signalers_list); | |
138 | dep->signaler = signal; | |
139 | dep->flags = flags; | |
140 | } | |
141 | ||
142 | static int | |
143 | i915_priotree_add_dependency(struct drm_i915_private *i915, | |
144 | struct i915_priotree *pt, | |
145 | struct i915_priotree *signal) | |
146 | { | |
147 | struct i915_dependency *dep; | |
148 | ||
149 | dep = i915_dependency_alloc(i915); | |
150 | if (!dep) | |
151 | return -ENOMEM; | |
152 | ||
153 | __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC); | |
154 | return 0; | |
155 | } | |
156 | ||
157 | static void | |
158 | i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) | |
159 | { | |
160 | struct i915_dependency *dep, *next; | |
161 | ||
20311bd3 CW |
162 | GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node)); |
163 | ||
52e54209 CW |
164 | /* Everyone we depended upon (the fences we wait to be signaled) |
165 | * should retire before us and remove themselves from our list. | |
166 | * However, retirement is run independently on each timeline and | |
167 | * so we may be called out-of-order. | |
168 | */ | |
169 | list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { | |
170 | list_del(&dep->wait_link); | |
171 | if (dep->flags & I915_DEPENDENCY_ALLOC) | |
172 | i915_dependency_free(i915, dep); | |
173 | } | |
174 | ||
175 | /* Remove ourselves from everyone who depends upon us */ | |
176 | list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { | |
177 | list_del(&dep->signal_link); | |
178 | if (dep->flags & I915_DEPENDENCY_ALLOC) | |
179 | i915_dependency_free(i915, dep); | |
180 | } | |
181 | } | |
182 | ||
183 | static void | |
184 | i915_priotree_init(struct i915_priotree *pt) | |
185 | { | |
186 | INIT_LIST_HEAD(&pt->signalers_list); | |
187 | INIT_LIST_HEAD(&pt->waiters_list); | |
20311bd3 CW |
188 | RB_CLEAR_NODE(&pt->node); |
189 | pt->priority = INT_MIN; | |
52e54209 CW |
190 | } |
191 | ||
12d3173b CW |
192 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) |
193 | { | |
12d3173b CW |
194 | struct intel_engine_cs *engine; |
195 | enum intel_engine_id id; | |
196 | int ret; | |
197 | ||
198 | /* Carefully retire all requests without writing to the rings */ | |
199 | ret = i915_gem_wait_for_idle(i915, | |
200 | I915_WAIT_INTERRUPTIBLE | | |
201 | I915_WAIT_LOCKED); | |
202 | if (ret) | |
203 | return ret; | |
204 | ||
12d3173b CW |
205 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ |
206 | for_each_engine(engine, i915, id) { | |
ae351beb CW |
207 | struct i915_gem_timeline *timeline; |
208 | struct intel_timeline *tl = engine->timeline; | |
12d3173b CW |
209 | |
210 | if (!i915_seqno_passed(seqno, tl->seqno)) { | |
211 | /* spin until threads are complete */ | |
212 | while (intel_breadcrumbs_busy(engine)) | |
213 | cond_resched(); | |
214 | } | |
215 | ||
216 | /* Finally reset hw state */ | |
217 | tl->seqno = seqno; | |
218 | intel_engine_init_global_seqno(engine, seqno); | |
12d3173b | 219 | |
ae351beb CW |
220 | list_for_each_entry(timeline, &i915->gt.timelines, link) |
221 | memset(timeline->engine[id].sync_seqno, 0, | |
222 | sizeof(timeline->engine[id].sync_seqno)); | |
12d3173b CW |
223 | } |
224 | ||
225 | return 0; | |
226 | } | |
227 | ||
228 | int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) | |
229 | { | |
230 | struct drm_i915_private *dev_priv = to_i915(dev); | |
231 | ||
232 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | |
233 | ||
234 | if (seqno == 0) | |
235 | return -EINVAL; | |
236 | ||
237 | /* HWS page needs to be set less than what we | |
238 | * will inject to ring | |
239 | */ | |
240 | return reset_all_global_seqno(dev_priv, seqno - 1); | |
241 | } | |
242 | ||
243 | static int reserve_seqno(struct intel_engine_cs *engine) | |
244 | { | |
245 | u32 active = ++engine->timeline->inflight_seqnos; | |
246 | u32 seqno = engine->timeline->seqno; | |
247 | int ret; | |
248 | ||
249 | /* Reservation is fine until we need to wrap around */ | |
250 | if (likely(!add_overflows(seqno, active))) | |
251 | return 0; | |
252 | ||
253 | ret = reset_all_global_seqno(engine->i915, 0); | |
254 | if (ret) { | |
255 | engine->timeline->inflight_seqnos--; | |
256 | return ret; | |
257 | } | |
258 | ||
259 | return 0; | |
260 | } | |
261 | ||
9b6586ae CW |
262 | static void unreserve_seqno(struct intel_engine_cs *engine) |
263 | { | |
264 | GEM_BUG_ON(!engine->timeline->inflight_seqnos); | |
265 | engine->timeline->inflight_seqnos--; | |
266 | } | |
267 | ||
fa545cbf CW |
268 | void i915_gem_retire_noop(struct i915_gem_active *active, |
269 | struct drm_i915_gem_request *request) | |
270 | { | |
271 | /* Space left intentionally blank */ | |
272 | } | |
273 | ||
05235c53 CW |
274 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
275 | { | |
e8a9c58f | 276 | struct intel_engine_cs *engine = request->engine; |
fa545cbf CW |
277 | struct i915_gem_active *active, *next; |
278 | ||
4c7d62c6 | 279 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
48bc2a4a | 280 | GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); |
4c7d62c6 | 281 | GEM_BUG_ON(!i915_gem_request_completed(request)); |
4302055b | 282 | GEM_BUG_ON(!request->i915->gt.active_requests); |
4c7d62c6 | 283 | |
05235c53 | 284 | trace_i915_gem_request_retire(request); |
80b204bc | 285 | |
e8a9c58f | 286 | spin_lock_irq(&engine->timeline->lock); |
e95433c7 | 287 | list_del_init(&request->link); |
e8a9c58f | 288 | spin_unlock_irq(&engine->timeline->lock); |
05235c53 CW |
289 | |
290 | /* We know the GPU must have read the request to have | |
291 | * sent us the seqno + interrupt, so use the position | |
292 | * of tail of the request to update the last known position | |
293 | * of the GPU head. | |
294 | * | |
295 | * Note this requires that we are always called in request | |
296 | * completion order. | |
297 | */ | |
675d9ad7 | 298 | list_del(&request->ring_link); |
fe085f13 | 299 | request->ring->head = request->postfix; |
4302055b CW |
300 | if (!--request->i915->gt.active_requests) { |
301 | GEM_BUG_ON(!request->i915->gt.awake); | |
302 | mod_delayed_work(request->i915->wq, | |
303 | &request->i915->gt.idle_work, | |
304 | msecs_to_jiffies(100)); | |
305 | } | |
9b6586ae | 306 | unreserve_seqno(request->engine); |
05235c53 | 307 | |
fa545cbf CW |
308 | /* Walk through the active list, calling retire on each. This allows |
309 | * objects to track their GPU activity and mark themselves as idle | |
310 | * when their *last* active request is completed (updating state | |
311 | * tracking lists for eviction, active references for GEM, etc). | |
312 | * | |
313 | * As the ->retire() may free the node, we decouple it first and | |
314 | * pass along the auxiliary information (to avoid dereferencing | |
315 | * the node after the callback). | |
316 | */ | |
317 | list_for_each_entry_safe(active, next, &request->active_list, link) { | |
318 | /* In microbenchmarks or focusing upon time inside the kernel, | |
319 | * we may spend an inordinate amount of time simply handling | |
320 | * the retirement of requests and processing their callbacks. | |
321 | * Of which, this loop itself is particularly hot due to the | |
322 | * cache misses when jumping around the list of i915_gem_active. | |
323 | * So we try to keep this loop as streamlined as possible and | |
324 | * also prefetch the next i915_gem_active to try and hide | |
325 | * the likely cache miss. | |
326 | */ | |
327 | prefetchw(next); | |
328 | ||
329 | INIT_LIST_HEAD(&active->link); | |
0eafec6d | 330 | RCU_INIT_POINTER(active->request, NULL); |
fa545cbf CW |
331 | |
332 | active->retire(active, request); | |
333 | } | |
334 | ||
05235c53 CW |
335 | i915_gem_request_remove_from_client(request); |
336 | ||
e5e1fc47 | 337 | /* Retirement decays the ban score as it is a sign of ctx progress */ |
bc1d53c6 MK |
338 | if (request->ctx->ban_score > 0) |
339 | request->ctx->ban_score--; | |
e5e1fc47 | 340 | |
e8a9c58f CW |
341 | /* The backing object for the context is done after switching to the |
342 | * *next* context. Therefore we cannot retire the previous context until | |
343 | * the next context has already started running. However, since we | |
344 | * cannot take the required locks at i915_gem_request_submit() we | |
345 | * defer the unpinning of the active context to now, retirement of | |
346 | * the subsequent request. | |
347 | */ | |
348 | if (engine->last_retired_context) | |
349 | engine->context_unpin(engine, engine->last_retired_context); | |
350 | engine->last_retired_context = request->ctx; | |
d07f0e59 CW |
351 | |
352 | dma_fence_signal(&request->fence); | |
52e54209 CW |
353 | |
354 | i915_priotree_fini(request->i915, &request->priotree); | |
e8a261ea | 355 | i915_gem_request_put(request); |
05235c53 CW |
356 | } |
357 | ||
358 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) | |
359 | { | |
360 | struct intel_engine_cs *engine = req->engine; | |
361 | struct drm_i915_gem_request *tmp; | |
362 | ||
363 | lockdep_assert_held(&req->i915->drm.struct_mutex); | |
4ffd6e0c CW |
364 | GEM_BUG_ON(!i915_gem_request_completed(req)); |
365 | ||
e95433c7 CW |
366 | if (list_empty(&req->link)) |
367 | return; | |
05235c53 CW |
368 | |
369 | do { | |
73cb9701 | 370 | tmp = list_first_entry(&engine->timeline->requests, |
efdf7c06 | 371 | typeof(*tmp), link); |
05235c53 CW |
372 | |
373 | i915_gem_request_retire(tmp); | |
374 | } while (tmp != req); | |
05235c53 CW |
375 | } |
376 | ||
9b6586ae | 377 | static u32 timeline_get_seqno(struct intel_timeline *tl) |
05235c53 | 378 | { |
9b6586ae | 379 | return ++tl->seqno; |
28176ef4 CW |
380 | } |
381 | ||
d55ac5bf | 382 | void __i915_gem_request_submit(struct drm_i915_gem_request *request) |
5590af3e | 383 | { |
73cb9701 | 384 | struct intel_engine_cs *engine = request->engine; |
f2d13290 CW |
385 | struct intel_timeline *timeline; |
386 | u32 seqno; | |
5590af3e | 387 | |
e60a870d | 388 | GEM_BUG_ON(!irqs_disabled()); |
67520415 | 389 | lockdep_assert_held(&engine->timeline->lock); |
e60a870d | 390 | |
fe49789f CW |
391 | trace_i915_gem_request_execute(request); |
392 | ||
80b204bc CW |
393 | /* Transfer from per-context onto the global per-engine timeline */ |
394 | timeline = engine->timeline; | |
395 | GEM_BUG_ON(timeline == request->timeline); | |
5590af3e | 396 | |
9b6586ae | 397 | seqno = timeline_get_seqno(timeline); |
f2d13290 CW |
398 | GEM_BUG_ON(!seqno); |
399 | GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); | |
400 | ||
f2d13290 CW |
401 | /* We may be recursing from the signal callback of another i915 fence */ |
402 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
403 | request->global_seqno = seqno; | |
404 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
405 | intel_engine_enable_signaling(request); | |
406 | spin_unlock(&request->lock); | |
407 | ||
caddfe71 CW |
408 | engine->emit_breadcrumb(request, |
409 | request->ring->vaddr + request->postfix); | |
5590af3e | 410 | |
bb89485e | 411 | spin_lock(&request->timeline->lock); |
80b204bc CW |
412 | list_move_tail(&request->link, &timeline->requests); |
413 | spin_unlock(&request->timeline->lock); | |
414 | ||
fe49789f | 415 | wake_up_all(&request->execute); |
d55ac5bf CW |
416 | } |
417 | ||
418 | void i915_gem_request_submit(struct drm_i915_gem_request *request) | |
419 | { | |
420 | struct intel_engine_cs *engine = request->engine; | |
421 | unsigned long flags; | |
23902e49 | 422 | |
d55ac5bf CW |
423 | /* Will be called from irq-context when using foreign fences. */ |
424 | spin_lock_irqsave(&engine->timeline->lock, flags); | |
425 | ||
426 | __i915_gem_request_submit(request); | |
427 | ||
428 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | |
429 | } | |
430 | ||
d6a2289d | 431 | void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) |
d55ac5bf | 432 | { |
d6a2289d CW |
433 | struct intel_engine_cs *engine = request->engine; |
434 | struct intel_timeline *timeline; | |
d55ac5bf | 435 | |
e60a870d | 436 | GEM_BUG_ON(!irqs_disabled()); |
67520415 | 437 | lockdep_assert_held(&engine->timeline->lock); |
48bc2a4a | 438 | |
d6a2289d CW |
439 | /* Only unwind in reverse order, required so that the per-context list |
440 | * is kept in seqno/ring order. | |
441 | */ | |
442 | GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); | |
443 | engine->timeline->seqno--; | |
80b204bc | 444 | |
d6a2289d CW |
445 | /* We may be recursing from the signal callback of another i915 fence */ |
446 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
447 | request->global_seqno = 0; | |
448 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
449 | intel_engine_cancel_signaling(request); | |
450 | spin_unlock(&request->lock); | |
451 | ||
452 | /* Transfer back from the global per-engine timeline to per-context */ | |
453 | timeline = request->timeline; | |
454 | GEM_BUG_ON(timeline == engine->timeline); | |
455 | ||
456 | spin_lock(&timeline->lock); | |
457 | list_move(&request->link, &timeline->requests); | |
458 | spin_unlock(&timeline->lock); | |
459 | ||
460 | /* We don't need to wake_up any waiters on request->execute, they | |
461 | * will get woken by any other event or us re-adding this request | |
462 | * to the engine timeline (__i915_gem_request_submit()). The waiters | |
463 | * should be quite adapt at finding that the request now has a new | |
464 | * global_seqno to the one they went to sleep on. | |
465 | */ | |
466 | } | |
467 | ||
468 | void i915_gem_request_unsubmit(struct drm_i915_gem_request *request) | |
469 | { | |
470 | struct intel_engine_cs *engine = request->engine; | |
471 | unsigned long flags; | |
472 | ||
473 | /* Will be called from irq-context when using foreign fences. */ | |
474 | spin_lock_irqsave(&engine->timeline->lock, flags); | |
475 | ||
476 | __i915_gem_request_unsubmit(request); | |
477 | ||
478 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | |
5590af3e CW |
479 | } |
480 | ||
23902e49 | 481 | static int __i915_sw_fence_call |
d55ac5bf | 482 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) |
23902e49 | 483 | { |
48bc2a4a | 484 | struct drm_i915_gem_request *request = |
48bc2a4a | 485 | container_of(fence, typeof(*request), submit); |
48bc2a4a CW |
486 | |
487 | switch (state) { | |
488 | case FENCE_COMPLETE: | |
354d036f | 489 | trace_i915_gem_request_submit(request); |
d55ac5bf | 490 | request->engine->submit_request(request); |
48bc2a4a CW |
491 | break; |
492 | ||
493 | case FENCE_FREE: | |
494 | i915_gem_request_put(request); | |
495 | break; | |
496 | } | |
497 | ||
23902e49 CW |
498 | return NOTIFY_DONE; |
499 | } | |
500 | ||
8e637178 CW |
501 | /** |
502 | * i915_gem_request_alloc - allocate a request structure | |
503 | * | |
504 | * @engine: engine that we wish to issue the request on. | |
505 | * @ctx: context that the request will be associated with. | |
506 | * This can be NULL if the request is not directly related to | |
507 | * any specific user context, in which case this function will | |
508 | * choose an appropriate context to use. | |
509 | * | |
510 | * Returns a pointer to the allocated request if successful, | |
511 | * or an error code if not. | |
512 | */ | |
513 | struct drm_i915_gem_request * | |
514 | i915_gem_request_alloc(struct intel_engine_cs *engine, | |
515 | struct i915_gem_context *ctx) | |
05235c53 CW |
516 | { |
517 | struct drm_i915_private *dev_priv = engine->i915; | |
05235c53 CW |
518 | struct drm_i915_gem_request *req; |
519 | int ret; | |
520 | ||
28176ef4 CW |
521 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
522 | ||
05235c53 | 523 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
6ffb7d07 | 524 | * EIO if the GPU is already wedged. |
05235c53 | 525 | */ |
6ffb7d07 CW |
526 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
527 | return ERR_PTR(-EIO); | |
05235c53 | 528 | |
e8a9c58f CW |
529 | /* Pinning the contexts may generate requests in order to acquire |
530 | * GGTT space, so do this first before we reserve a seqno for | |
531 | * ourselves. | |
532 | */ | |
533 | ret = engine->context_pin(engine, ctx); | |
28176ef4 CW |
534 | if (ret) |
535 | return ERR_PTR(ret); | |
536 | ||
9b6586ae | 537 | ret = reserve_seqno(engine); |
e8a9c58f CW |
538 | if (ret) |
539 | goto err_unpin; | |
540 | ||
9b5f4e5e | 541 | /* Move the oldest request to the slab-cache (if not in use!) */ |
73cb9701 | 542 | req = list_first_entry_or_null(&engine->timeline->requests, |
efdf7c06 | 543 | typeof(*req), link); |
754c9fd5 | 544 | if (req && i915_gem_request_completed(req)) |
2a1d7752 | 545 | i915_gem_request_retire(req); |
9b5f4e5e | 546 | |
5a198b8c CW |
547 | /* Beware: Dragons be flying overhead. |
548 | * | |
549 | * We use RCU to look up requests in flight. The lookups may | |
550 | * race with the request being allocated from the slab freelist. | |
551 | * That is the request we are writing to here, may be in the process | |
1426f715 | 552 | * of being read by __i915_gem_active_get_rcu(). As such, |
5a198b8c CW |
553 | * we have to be very careful when overwriting the contents. During |
554 | * the RCU lookup, we change chase the request->engine pointer, | |
65e4760e | 555 | * read the request->global_seqno and increment the reference count. |
5a198b8c CW |
556 | * |
557 | * The reference count is incremented atomically. If it is zero, | |
558 | * the lookup knows the request is unallocated and complete. Otherwise, | |
559 | * it is either still in use, or has been reallocated and reset | |
f54d1867 CW |
560 | * with dma_fence_init(). This increment is safe for release as we |
561 | * check that the request we have a reference to and matches the active | |
5a198b8c CW |
562 | * request. |
563 | * | |
564 | * Before we increment the refcount, we chase the request->engine | |
565 | * pointer. We must not call kmem_cache_zalloc() or else we set | |
566 | * that pointer to NULL and cause a crash during the lookup. If | |
567 | * we see the request is completed (based on the value of the | |
568 | * old engine and seqno), the lookup is complete and reports NULL. | |
569 | * If we decide the request is not completed (new engine or seqno), | |
570 | * then we grab a reference and double check that it is still the | |
571 | * active request - which it won't be and restart the lookup. | |
572 | * | |
573 | * Do not use kmem_cache_zalloc() here! | |
574 | */ | |
575 | req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); | |
28176ef4 CW |
576 | if (!req) { |
577 | ret = -ENOMEM; | |
578 | goto err_unreserve; | |
579 | } | |
05235c53 | 580 | |
80b204bc CW |
581 | req->timeline = i915_gem_context_lookup_timeline(ctx, engine); |
582 | GEM_BUG_ON(req->timeline == engine->timeline); | |
73cb9701 | 583 | |
04769652 | 584 | spin_lock_init(&req->lock); |
f54d1867 CW |
585 | dma_fence_init(&req->fence, |
586 | &i915_fence_ops, | |
587 | &req->lock, | |
73cb9701 | 588 | req->timeline->fence_context, |
9b6586ae | 589 | timeline_get_seqno(req->timeline)); |
04769652 | 590 | |
48bc2a4a CW |
591 | /* We bump the ref for the fence chain */ |
592 | i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); | |
fe49789f | 593 | init_waitqueue_head(&req->execute); |
5590af3e | 594 | |
52e54209 CW |
595 | i915_priotree_init(&req->priotree); |
596 | ||
fa545cbf | 597 | INIT_LIST_HEAD(&req->active_list); |
05235c53 CW |
598 | req->i915 = dev_priv; |
599 | req->engine = engine; | |
e8a9c58f | 600 | req->ctx = ctx; |
05235c53 | 601 | |
5a198b8c | 602 | /* No zalloc, must clear what we need by hand */ |
f2d13290 | 603 | req->global_seqno = 0; |
5a198b8c | 604 | req->file_priv = NULL; |
058d88c4 | 605 | req->batch = NULL; |
5a198b8c | 606 | |
05235c53 CW |
607 | /* |
608 | * Reserve space in the ring buffer for all the commands required to | |
609 | * eventually emit this request. This is to guarantee that the | |
610 | * i915_add_request() call can't fail. Note that the reserve may need | |
611 | * to be redone if the request is not actually submitted straight | |
612 | * away, e.g. because a GPU scheduler has deferred it. | |
613 | */ | |
614 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; | |
98f29e8d | 615 | GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); |
05235c53 | 616 | |
f73e7399 | 617 | ret = engine->request_alloc(req); |
05235c53 CW |
618 | if (ret) |
619 | goto err_ctx; | |
620 | ||
d045446d CW |
621 | /* Record the position of the start of the request so that |
622 | * should we detect the updated seqno part-way through the | |
623 | * GPU processing the request, we never over-estimate the | |
624 | * position of the head. | |
625 | */ | |
a21ef715 | 626 | req->head = req->ring->emit; |
d045446d | 627 | |
9b6586ae CW |
628 | /* Check that we didn't interrupt ourselves with a new request */ |
629 | GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); | |
8e637178 | 630 | return req; |
05235c53 CW |
631 | |
632 | err_ctx: | |
1618bdb8 CW |
633 | /* Make sure we didn't add ourselves to external state before freeing */ |
634 | GEM_BUG_ON(!list_empty(&req->active_list)); | |
635 | GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); | |
636 | GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); | |
637 | ||
05235c53 | 638 | kmem_cache_free(dev_priv->requests, req); |
28176ef4 | 639 | err_unreserve: |
9b6586ae | 640 | unreserve_seqno(engine); |
e8a9c58f CW |
641 | err_unpin: |
642 | engine->context_unpin(engine, ctx); | |
8e637178 | 643 | return ERR_PTR(ret); |
05235c53 CW |
644 | } |
645 | ||
a2bc4695 CW |
646 | static int |
647 | i915_gem_request_await_request(struct drm_i915_gem_request *to, | |
648 | struct drm_i915_gem_request *from) | |
649 | { | |
754c9fd5 | 650 | u32 seqno; |
85e17f59 | 651 | int ret; |
a2bc4695 CW |
652 | |
653 | GEM_BUG_ON(to == from); | |
654 | ||
88326ef0 CW |
655 | if (i915_gem_request_completed(from)) |
656 | return 0; | |
657 | ||
52e54209 CW |
658 | if (to->engine->schedule) { |
659 | ret = i915_priotree_add_dependency(to->i915, | |
660 | &to->priotree, | |
661 | &from->priotree); | |
662 | if (ret < 0) | |
663 | return ret; | |
664 | } | |
665 | ||
73cb9701 | 666 | if (to->timeline == from->timeline) |
a2bc4695 CW |
667 | return 0; |
668 | ||
73cb9701 CW |
669 | if (to->engine == from->engine) { |
670 | ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, | |
671 | &from->submit, | |
672 | GFP_KERNEL); | |
673 | return ret < 0 ? ret : 0; | |
674 | } | |
675 | ||
754c9fd5 CW |
676 | seqno = i915_gem_request_global_seqno(from); |
677 | if (!seqno) { | |
65e4760e CW |
678 | ret = i915_sw_fence_await_dma_fence(&to->submit, |
679 | &from->fence, 0, | |
680 | GFP_KERNEL); | |
681 | return ret < 0 ? ret : 0; | |
682 | } | |
683 | ||
754c9fd5 | 684 | if (seqno <= to->timeline->sync_seqno[from->engine->id]) |
a2bc4695 CW |
685 | return 0; |
686 | ||
687 | trace_i915_gem_ring_sync_to(to, from); | |
688 | if (!i915.semaphores) { | |
0a046a0e CW |
689 | if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) { |
690 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
691 | &from->fence, 0, | |
692 | GFP_KERNEL); | |
693 | if (ret < 0) | |
694 | return ret; | |
695 | } | |
a2bc4695 CW |
696 | } else { |
697 | ret = to->engine->semaphore.sync_to(to, from); | |
698 | if (ret) | |
699 | return ret; | |
700 | } | |
701 | ||
754c9fd5 | 702 | to->timeline->sync_seqno[from->engine->id] = seqno; |
a2bc4695 CW |
703 | return 0; |
704 | } | |
705 | ||
b52992c0 CW |
706 | int |
707 | i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, | |
708 | struct dma_fence *fence) | |
709 | { | |
710 | struct dma_fence_array *array; | |
711 | int ret; | |
712 | int i; | |
713 | ||
714 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
715 | return 0; | |
716 | ||
717 | if (dma_fence_is_i915(fence)) | |
718 | return i915_gem_request_await_request(req, to_request(fence)); | |
719 | ||
720 | if (!dma_fence_is_array(fence)) { | |
721 | ret = i915_sw_fence_await_dma_fence(&req->submit, | |
722 | fence, I915_FENCE_TIMEOUT, | |
723 | GFP_KERNEL); | |
724 | return ret < 0 ? ret : 0; | |
725 | } | |
726 | ||
727 | /* Note that if the fence-array was created in signal-on-any mode, | |
728 | * we should *not* decompose it into its individual fences. However, | |
729 | * we don't currently store which mode the fence-array is operating | |
730 | * in. Fortunately, the only user of signal-on-any is private to | |
731 | * amdgpu and we should not see any incoming fence-array from | |
732 | * sync-file being in signal-on-any mode. | |
733 | */ | |
734 | ||
735 | array = to_dma_fence_array(fence); | |
736 | for (i = 0; i < array->num_fences; i++) { | |
737 | struct dma_fence *child = array->fences[i]; | |
738 | ||
739 | if (dma_fence_is_i915(child)) | |
740 | ret = i915_gem_request_await_request(req, | |
741 | to_request(child)); | |
742 | else | |
743 | ret = i915_sw_fence_await_dma_fence(&req->submit, | |
744 | child, I915_FENCE_TIMEOUT, | |
745 | GFP_KERNEL); | |
746 | if (ret < 0) | |
747 | return ret; | |
748 | } | |
749 | ||
750 | return 0; | |
751 | } | |
752 | ||
a2bc4695 CW |
753 | /** |
754 | * i915_gem_request_await_object - set this request to (async) wait upon a bo | |
755 | * | |
756 | * @to: request we are wishing to use | |
757 | * @obj: object which may be in use on another ring. | |
758 | * | |
759 | * This code is meant to abstract object synchronization with the GPU. | |
760 | * Conceptually we serialise writes between engines inside the GPU. | |
761 | * We only allow one engine to write into a buffer at any time, but | |
762 | * multiple readers. To ensure each has a coherent view of memory, we must: | |
763 | * | |
764 | * - If there is an outstanding write request to the object, the new | |
765 | * request must wait for it to complete (either CPU or in hw, requests | |
766 | * on the same ring will be naturally ordered). | |
767 | * | |
768 | * - If we are a write request (pending_write_domain is set), the new | |
769 | * request must wait for outstanding read requests to complete. | |
770 | * | |
771 | * Returns 0 if successful, else propagates up the lower layer error. | |
772 | */ | |
773 | int | |
774 | i915_gem_request_await_object(struct drm_i915_gem_request *to, | |
775 | struct drm_i915_gem_object *obj, | |
776 | bool write) | |
777 | { | |
d07f0e59 CW |
778 | struct dma_fence *excl; |
779 | int ret = 0; | |
a2bc4695 CW |
780 | |
781 | if (write) { | |
d07f0e59 CW |
782 | struct dma_fence **shared; |
783 | unsigned int count, i; | |
784 | ||
785 | ret = reservation_object_get_fences_rcu(obj->resv, | |
786 | &excl, &count, &shared); | |
787 | if (ret) | |
788 | return ret; | |
789 | ||
790 | for (i = 0; i < count; i++) { | |
791 | ret = i915_gem_request_await_dma_fence(to, shared[i]); | |
792 | if (ret) | |
793 | break; | |
794 | ||
795 | dma_fence_put(shared[i]); | |
796 | } | |
797 | ||
798 | for (; i < count; i++) | |
799 | dma_fence_put(shared[i]); | |
800 | kfree(shared); | |
a2bc4695 | 801 | } else { |
d07f0e59 | 802 | excl = reservation_object_get_excl_rcu(obj->resv); |
a2bc4695 CW |
803 | } |
804 | ||
d07f0e59 CW |
805 | if (excl) { |
806 | if (ret == 0) | |
807 | ret = i915_gem_request_await_dma_fence(to, excl); | |
a2bc4695 | 808 | |
d07f0e59 | 809 | dma_fence_put(excl); |
a2bc4695 CW |
810 | } |
811 | ||
d07f0e59 | 812 | return ret; |
a2bc4695 CW |
813 | } |
814 | ||
05235c53 CW |
815 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) |
816 | { | |
817 | struct drm_i915_private *dev_priv = engine->i915; | |
818 | ||
05235c53 CW |
819 | if (dev_priv->gt.awake) |
820 | return; | |
821 | ||
4302055b CW |
822 | GEM_BUG_ON(!dev_priv->gt.active_requests); |
823 | ||
05235c53 CW |
824 | intel_runtime_pm_get_noresume(dev_priv); |
825 | dev_priv->gt.awake = true; | |
826 | ||
54b4f68f | 827 | intel_enable_gt_powersave(dev_priv); |
05235c53 CW |
828 | i915_update_gfx_val(dev_priv); |
829 | if (INTEL_GEN(dev_priv) >= 6) | |
830 | gen6_rps_busy(dev_priv); | |
831 | ||
832 | queue_delayed_work(dev_priv->wq, | |
833 | &dev_priv->gt.retire_work, | |
834 | round_jiffies_up_relative(HZ)); | |
835 | } | |
836 | ||
837 | /* | |
838 | * NB: This function is not allowed to fail. Doing so would mean the the | |
839 | * request is not being tracked for completion but the work itself is | |
840 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
841 | */ | |
17f298cf | 842 | void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) |
05235c53 | 843 | { |
95b2ab56 CW |
844 | struct intel_engine_cs *engine = request->engine; |
845 | struct intel_ring *ring = request->ring; | |
73cb9701 | 846 | struct intel_timeline *timeline = request->timeline; |
0a046a0e | 847 | struct drm_i915_gem_request *prev; |
73dec95e | 848 | u32 *cs; |
caddfe71 | 849 | int err; |
05235c53 | 850 | |
4c7d62c6 | 851 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
0f25dff6 CW |
852 | trace_i915_gem_request_add(request); |
853 | ||
c781c978 CW |
854 | /* Make sure that no request gazumped us - if it was allocated after |
855 | * our i915_gem_request_alloc() and called __i915_add_request() before | |
856 | * us, the timeline will hold its seqno which is later than ours. | |
857 | */ | |
9b6586ae | 858 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
c781c978 | 859 | |
05235c53 CW |
860 | /* |
861 | * To ensure that this call will not fail, space for its emissions | |
862 | * should already have been reserved in the ring buffer. Let the ring | |
863 | * know that it is time to use that space up. | |
864 | */ | |
05235c53 CW |
865 | request->reserved_space = 0; |
866 | ||
867 | /* | |
868 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
869 | * after having emitted the batchbuffer command. Hence we need to fix | |
870 | * things up similar to emitting the lazy request. The difference here | |
871 | * is that the flush _must_ happen before the next request, no matter | |
872 | * what. | |
873 | */ | |
874 | if (flush_caches) { | |
caddfe71 | 875 | err = engine->emit_flush(request, EMIT_FLUSH); |
c7fe7d25 | 876 | |
05235c53 | 877 | /* Not allowed to fail! */ |
caddfe71 | 878 | WARN(err, "engine->emit_flush() failed: %d!\n", err); |
05235c53 CW |
879 | } |
880 | ||
d045446d | 881 | /* Record the position of the start of the breadcrumb so that |
05235c53 CW |
882 | * should we detect the updated seqno part-way through the |
883 | * GPU processing the request, we never over-estimate the | |
d045446d | 884 | * position of the ring's HEAD. |
05235c53 | 885 | */ |
73dec95e TU |
886 | cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); |
887 | GEM_BUG_ON(IS_ERR(cs)); | |
888 | request->postfix = intel_ring_offset(request, cs); | |
05235c53 | 889 | |
0f25dff6 CW |
890 | /* Seal the request and mark it as pending execution. Note that |
891 | * we may inspect this state, without holding any locks, during | |
892 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
893 | * see a more recent value in the hws than we are tracking. | |
894 | */ | |
0a046a0e | 895 | |
73cb9701 | 896 | prev = i915_gem_active_raw(&timeline->last_request, |
0a046a0e | 897 | &request->i915->drm.struct_mutex); |
52e54209 | 898 | if (prev) { |
0a046a0e CW |
899 | i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, |
900 | &request->submitq); | |
52e54209 CW |
901 | if (engine->schedule) |
902 | __i915_priotree_add_dependency(&request->priotree, | |
903 | &prev->priotree, | |
904 | &request->dep, | |
905 | 0); | |
906 | } | |
0a046a0e | 907 | |
80b204bc | 908 | spin_lock_irq(&timeline->lock); |
f2d13290 | 909 | list_add_tail(&request->link, &timeline->requests); |
80b204bc CW |
910 | spin_unlock_irq(&timeline->lock); |
911 | ||
9b6586ae | 912 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
73cb9701 | 913 | i915_gem_active_set(&timeline->last_request, request); |
f2d13290 | 914 | |
0f25dff6 | 915 | list_add_tail(&request->ring_link, &ring->request_list); |
f2d13290 | 916 | request->emitted_jiffies = jiffies; |
0f25dff6 | 917 | |
9b6586ae CW |
918 | if (!request->i915->gt.active_requests++) |
919 | i915_gem_mark_busy(engine); | |
5590af3e | 920 | |
0de9136d CW |
921 | /* Let the backend know a new request has arrived that may need |
922 | * to adjust the existing execution schedule due to a high priority | |
923 | * request - i.e. we may want to preempt the current request in order | |
924 | * to run a high priority dependency chain *before* we can execute this | |
925 | * request. | |
926 | * | |
927 | * This is called before the request is ready to run so that we can | |
928 | * decide whether to preempt the entire chain so that it is ready to | |
929 | * run at the earliest possible convenience. | |
930 | */ | |
931 | if (engine->schedule) | |
9f792eba | 932 | engine->schedule(request, request->ctx->priority); |
0de9136d | 933 | |
5590af3e CW |
934 | local_bh_disable(); |
935 | i915_sw_fence_commit(&request->submit); | |
936 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | |
05235c53 CW |
937 | } |
938 | ||
939 | static unsigned long local_clock_us(unsigned int *cpu) | |
940 | { | |
941 | unsigned long t; | |
942 | ||
943 | /* Cheaply and approximately convert from nanoseconds to microseconds. | |
944 | * The result and subsequent calculations are also defined in the same | |
945 | * approximate microseconds units. The principal source of timing | |
946 | * error here is from the simple truncation. | |
947 | * | |
948 | * Note that local_clock() is only defined wrt to the current CPU; | |
949 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
950 | * blocking preemption for the entire busywait, we can detect the CPU | |
951 | * switch and use that as indicator of system load and a reason to | |
952 | * stop busywaiting, see busywait_stop(). | |
953 | */ | |
954 | *cpu = get_cpu(); | |
955 | t = local_clock() >> 10; | |
956 | put_cpu(); | |
957 | ||
958 | return t; | |
959 | } | |
960 | ||
961 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
962 | { | |
963 | unsigned int this_cpu; | |
964 | ||
965 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
966 | return true; | |
967 | ||
968 | return this_cpu != cpu; | |
969 | } | |
970 | ||
971 | bool __i915_spin_request(const struct drm_i915_gem_request *req, | |
754c9fd5 | 972 | u32 seqno, int state, unsigned long timeout_us) |
05235c53 | 973 | { |
c33ed067 CW |
974 | struct intel_engine_cs *engine = req->engine; |
975 | unsigned int irq, cpu; | |
05235c53 CW |
976 | |
977 | /* When waiting for high frequency requests, e.g. during synchronous | |
978 | * rendering split between the CPU and GPU, the finite amount of time | |
979 | * required to set up the irq and wait upon it limits the response | |
980 | * rate. By busywaiting on the request completion for a short while we | |
981 | * can service the high frequency waits as quick as possible. However, | |
982 | * if it is a slow request, we want to sleep as quickly as possible. | |
983 | * The tradeoff between waiting and sleeping is roughly the time it | |
984 | * takes to sleep on a request, on the order of a microsecond. | |
985 | */ | |
986 | ||
c33ed067 | 987 | irq = atomic_read(&engine->irq_count); |
05235c53 CW |
988 | timeout_us += local_clock_us(&cpu); |
989 | do { | |
754c9fd5 CW |
990 | if (seqno != i915_gem_request_global_seqno(req)) |
991 | break; | |
992 | ||
993 | if (i915_seqno_passed(intel_engine_get_seqno(req->engine), | |
994 | seqno)) | |
05235c53 CW |
995 | return true; |
996 | ||
c33ed067 CW |
997 | /* Seqno are meant to be ordered *before* the interrupt. If |
998 | * we see an interrupt without a corresponding seqno advance, | |
999 | * assume we won't see one in the near future but require | |
1000 | * the engine->seqno_barrier() to fixup coherency. | |
1001 | */ | |
1002 | if (atomic_read(&engine->irq_count) != irq) | |
1003 | break; | |
1004 | ||
05235c53 CW |
1005 | if (signal_pending_state(state, current)) |
1006 | break; | |
1007 | ||
1008 | if (busywait_stop(timeout_us, cpu)) | |
1009 | break; | |
1010 | ||
f2f09a4c | 1011 | cpu_relax(); |
05235c53 CW |
1012 | } while (!need_resched()); |
1013 | ||
1014 | return false; | |
1015 | } | |
1016 | ||
e0705114 | 1017 | static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request) |
4680816b | 1018 | { |
8c185eca | 1019 | if (likely(!i915_reset_handoff(&request->i915->gpu_error))) |
e0705114 | 1020 | return false; |
4680816b | 1021 | |
e0705114 CW |
1022 | __set_current_state(TASK_RUNNING); |
1023 | i915_reset(request->i915); | |
1024 | return true; | |
4680816b CW |
1025 | } |
1026 | ||
05235c53 | 1027 | /** |
776f3236 | 1028 | * i915_wait_request - wait until execution of request has finished |
e95433c7 | 1029 | * @req: the request to wait upon |
ea746f36 | 1030 | * @flags: how to wait |
e95433c7 CW |
1031 | * @timeout: how long to wait in jiffies |
1032 | * | |
1033 | * i915_wait_request() waits for the request to be completed, for a | |
1034 | * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an | |
1035 | * unbounded wait). | |
05235c53 | 1036 | * |
e95433c7 CW |
1037 | * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED |
1038 | * in via the flags, and vice versa if the struct_mutex is not held, the caller | |
1039 | * must not specify that the wait is locked. | |
05235c53 | 1040 | * |
e95433c7 CW |
1041 | * Returns the remaining time (in jiffies) if the request completed, which may |
1042 | * be zero or -ETIME if the request is unfinished after the timeout expires. | |
1043 | * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is | |
1044 | * pending before the request completes. | |
05235c53 | 1045 | */ |
e95433c7 CW |
1046 | long i915_wait_request(struct drm_i915_gem_request *req, |
1047 | unsigned int flags, | |
1048 | long timeout) | |
05235c53 | 1049 | { |
ea746f36 CW |
1050 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
1051 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
4b36b2e5 | 1052 | wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; |
a49625f9 CW |
1053 | DEFINE_WAIT_FUNC(reset, default_wake_function); |
1054 | DEFINE_WAIT_FUNC(exec, default_wake_function); | |
05235c53 | 1055 | struct intel_wait wait; |
05235c53 CW |
1056 | |
1057 | might_sleep(); | |
22dd3bb9 | 1058 | #if IS_ENABLED(CONFIG_LOCKDEP) |
e95433c7 CW |
1059 | GEM_BUG_ON(debug_locks && |
1060 | !!lockdep_is_held(&req->i915->drm.struct_mutex) != | |
22dd3bb9 CW |
1061 | !!(flags & I915_WAIT_LOCKED)); |
1062 | #endif | |
e95433c7 | 1063 | GEM_BUG_ON(timeout < 0); |
05235c53 | 1064 | |
05235c53 | 1065 | if (i915_gem_request_completed(req)) |
e95433c7 | 1066 | return timeout; |
05235c53 | 1067 | |
e95433c7 CW |
1068 | if (!timeout) |
1069 | return -ETIME; | |
05235c53 | 1070 | |
93692502 | 1071 | trace_i915_gem_request_wait_begin(req, flags); |
05235c53 | 1072 | |
a49625f9 | 1073 | add_wait_queue(&req->execute, &exec); |
7de53bf7 CW |
1074 | if (flags & I915_WAIT_LOCKED) |
1075 | add_wait_queue(errq, &reset); | |
1076 | ||
56299fb7 | 1077 | intel_wait_init(&wait, req); |
754c9fd5 | 1078 | |
d6a2289d | 1079 | restart: |
0f2f61d4 CW |
1080 | do { |
1081 | set_current_state(state); | |
1082 | if (intel_wait_update_request(&wait, req)) | |
1083 | break; | |
541ca6ed | 1084 | |
0f2f61d4 CW |
1085 | if (flags & I915_WAIT_LOCKED && |
1086 | __i915_wait_request_check_and_reset(req)) | |
1087 | continue; | |
05235c53 | 1088 | |
0f2f61d4 CW |
1089 | if (signal_pending_state(state, current)) { |
1090 | timeout = -ERESTARTSYS; | |
4680816b | 1091 | goto complete; |
0f2f61d4 | 1092 | } |
4680816b | 1093 | |
0f2f61d4 CW |
1094 | if (!timeout) { |
1095 | timeout = -ETIME; | |
1096 | goto complete; | |
1097 | } | |
541ca6ed | 1098 | |
0f2f61d4 CW |
1099 | timeout = io_schedule_timeout(timeout); |
1100 | } while (1); | |
4680816b | 1101 | |
0f2f61d4 | 1102 | GEM_BUG_ON(!intel_wait_has_seqno(&wait)); |
fe49789f | 1103 | GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); |
4680816b | 1104 | |
437c3087 | 1105 | /* Optimistic short spin before touching IRQs */ |
05235c53 CW |
1106 | if (i915_spin_request(req, state, 5)) |
1107 | goto complete; | |
1108 | ||
1109 | set_current_state(state); | |
05235c53 CW |
1110 | if (intel_engine_add_wait(req->engine, &wait)) |
1111 | /* In order to check that we haven't missed the interrupt | |
1112 | * as we enabled it, we need to kick ourselves to do a | |
1113 | * coherent check on the seqno before we sleep. | |
1114 | */ | |
1115 | goto wakeup; | |
1116 | ||
24f417ec CW |
1117 | if (flags & I915_WAIT_LOCKED) |
1118 | __i915_wait_request_check_and_reset(req); | |
1119 | ||
05235c53 CW |
1120 | for (;;) { |
1121 | if (signal_pending_state(state, current)) { | |
e95433c7 | 1122 | timeout = -ERESTARTSYS; |
05235c53 CW |
1123 | break; |
1124 | } | |
1125 | ||
e95433c7 CW |
1126 | if (!timeout) { |
1127 | timeout = -ETIME; | |
05235c53 CW |
1128 | break; |
1129 | } | |
1130 | ||
e95433c7 CW |
1131 | timeout = io_schedule_timeout(timeout); |
1132 | ||
754c9fd5 CW |
1133 | if (intel_wait_complete(&wait) && |
1134 | intel_wait_check_request(&wait, req)) | |
05235c53 CW |
1135 | break; |
1136 | ||
1137 | set_current_state(state); | |
1138 | ||
1139 | wakeup: | |
1140 | /* Carefully check if the request is complete, giving time | |
1141 | * for the seqno to be visible following the interrupt. | |
1142 | * We also have to check in case we are kicked by the GPU | |
1143 | * reset in order to drop the struct_mutex. | |
1144 | */ | |
1145 | if (__i915_request_irq_complete(req)) | |
1146 | break; | |
1147 | ||
221fe799 CW |
1148 | /* If the GPU is hung, and we hold the lock, reset the GPU |
1149 | * and then check for completion. On a full reset, the engine's | |
1150 | * HW seqno will be advanced passed us and we are complete. | |
1151 | * If we do a partial reset, we have to wait for the GPU to | |
1152 | * resume and update the breadcrumb. | |
1153 | * | |
1154 | * If we don't hold the mutex, we can just wait for the worker | |
1155 | * to come along and update the breadcrumb (either directly | |
1156 | * itself, or indirectly by recovering the GPU). | |
1157 | */ | |
1158 | if (flags & I915_WAIT_LOCKED && | |
e0705114 | 1159 | __i915_wait_request_check_and_reset(req)) |
221fe799 | 1160 | continue; |
221fe799 | 1161 | |
05235c53 CW |
1162 | /* Only spin if we know the GPU is processing this request */ |
1163 | if (i915_spin_request(req, state, 2)) | |
1164 | break; | |
d6a2289d CW |
1165 | |
1166 | if (!intel_wait_check_request(&wait, req)) { | |
1167 | intel_engine_remove_wait(req->engine, &wait); | |
1168 | goto restart; | |
1169 | } | |
05235c53 | 1170 | } |
05235c53 CW |
1171 | |
1172 | intel_engine_remove_wait(req->engine, &wait); | |
05235c53 | 1173 | complete: |
a49625f9 | 1174 | __set_current_state(TASK_RUNNING); |
7de53bf7 CW |
1175 | if (flags & I915_WAIT_LOCKED) |
1176 | remove_wait_queue(errq, &reset); | |
a49625f9 | 1177 | remove_wait_queue(&req->execute, &exec); |
05235c53 CW |
1178 | trace_i915_gem_request_wait_end(req); |
1179 | ||
e95433c7 | 1180 | return timeout; |
05235c53 | 1181 | } |
4b8de8e6 | 1182 | |
28176ef4 | 1183 | static void engine_retire_requests(struct intel_engine_cs *engine) |
4b8de8e6 CW |
1184 | { |
1185 | struct drm_i915_gem_request *request, *next; | |
754c9fd5 CW |
1186 | u32 seqno = intel_engine_get_seqno(engine); |
1187 | LIST_HEAD(retire); | |
4b8de8e6 | 1188 | |
754c9fd5 | 1189 | spin_lock_irq(&engine->timeline->lock); |
73cb9701 CW |
1190 | list_for_each_entry_safe(request, next, |
1191 | &engine->timeline->requests, link) { | |
754c9fd5 CW |
1192 | if (!i915_seqno_passed(seqno, request->global_seqno)) |
1193 | break; | |
4b8de8e6 | 1194 | |
754c9fd5 | 1195 | list_move_tail(&request->link, &retire); |
4b8de8e6 | 1196 | } |
754c9fd5 CW |
1197 | spin_unlock_irq(&engine->timeline->lock); |
1198 | ||
1199 | list_for_each_entry_safe(request, next, &retire, link) | |
1200 | i915_gem_request_retire(request); | |
4b8de8e6 CW |
1201 | } |
1202 | ||
1203 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) | |
1204 | { | |
1205 | struct intel_engine_cs *engine; | |
28176ef4 | 1206 | enum intel_engine_id id; |
4b8de8e6 CW |
1207 | |
1208 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | |
1209 | ||
28176ef4 | 1210 | if (!dev_priv->gt.active_requests) |
4b8de8e6 CW |
1211 | return; |
1212 | ||
28176ef4 CW |
1213 | for_each_engine(engine, dev_priv, id) |
1214 | engine_retire_requests(engine); | |
4b8de8e6 | 1215 | } |
c835c550 CW |
1216 | |
1217 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
1218 | #include "selftests/mock_request.c" | |
1219 | #include "selftests/i915_gem_request.c" | |
1220 | #endif |