]>
Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include "i915_drv.h" | |
26 | ||
04769652 CW |
27 | static const char *i915_fence_get_driver_name(struct fence *fence) |
28 | { | |
29 | return "i915"; | |
30 | } | |
31 | ||
32 | static const char *i915_fence_get_timeline_name(struct fence *fence) | |
33 | { | |
34 | /* Timelines are bound by eviction to a VM. However, since | |
35 | * we only have a global seqno at the moment, we only have | |
36 | * a single timeline. Note that each timeline will have | |
37 | * multiple execution contexts (fence contexts) as we allow | |
38 | * engines within a single timeline to execute in parallel. | |
39 | */ | |
40 | return "global"; | |
41 | } | |
42 | ||
43 | static bool i915_fence_signaled(struct fence *fence) | |
44 | { | |
45 | return i915_gem_request_completed(to_request(fence)); | |
46 | } | |
47 | ||
48 | static bool i915_fence_enable_signaling(struct fence *fence) | |
49 | { | |
50 | if (i915_fence_signaled(fence)) | |
51 | return false; | |
52 | ||
53 | intel_engine_enable_signaling(to_request(fence)); | |
54 | return true; | |
55 | } | |
56 | ||
57 | static signed long i915_fence_wait(struct fence *fence, | |
58 | bool interruptible, | |
59 | signed long timeout_jiffies) | |
60 | { | |
61 | s64 timeout_ns, *timeout; | |
62 | int ret; | |
63 | ||
64 | if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { | |
65 | timeout_ns = jiffies_to_nsecs(timeout_jiffies); | |
66 | timeout = &timeout_ns; | |
67 | } else { | |
68 | timeout = NULL; | |
69 | } | |
70 | ||
71 | ret = __i915_wait_request(to_request(fence), | |
72 | interruptible, timeout, | |
42df2714 | 73 | NO_WAITBOOST); |
04769652 CW |
74 | if (ret == -ETIME) |
75 | return 0; | |
76 | ||
77 | if (ret < 0) | |
78 | return ret; | |
79 | ||
80 | if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) | |
81 | timeout_jiffies = nsecs_to_jiffies(timeout_ns); | |
82 | ||
83 | return timeout_jiffies; | |
84 | } | |
85 | ||
86 | static void i915_fence_value_str(struct fence *fence, char *str, int size) | |
87 | { | |
88 | snprintf(str, size, "%u", fence->seqno); | |
89 | } | |
90 | ||
91 | static void i915_fence_timeline_value_str(struct fence *fence, char *str, | |
92 | int size) | |
93 | { | |
94 | snprintf(str, size, "%u", | |
95 | intel_engine_get_seqno(to_request(fence)->engine)); | |
96 | } | |
97 | ||
98 | static void i915_fence_release(struct fence *fence) | |
99 | { | |
100 | struct drm_i915_gem_request *req = to_request(fence); | |
101 | ||
102 | kmem_cache_free(req->i915->requests, req); | |
103 | } | |
104 | ||
105 | const struct fence_ops i915_fence_ops = { | |
106 | .get_driver_name = i915_fence_get_driver_name, | |
107 | .get_timeline_name = i915_fence_get_timeline_name, | |
108 | .enable_signaling = i915_fence_enable_signaling, | |
109 | .signaled = i915_fence_signaled, | |
110 | .wait = i915_fence_wait, | |
111 | .release = i915_fence_release, | |
112 | .fence_value_str = i915_fence_value_str, | |
113 | .timeline_value_str = i915_fence_timeline_value_str, | |
114 | }; | |
115 | ||
05235c53 CW |
116 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
117 | struct drm_file *file) | |
118 | { | |
119 | struct drm_i915_private *dev_private; | |
120 | struct drm_i915_file_private *file_priv; | |
121 | ||
122 | WARN_ON(!req || !file || req->file_priv); | |
123 | ||
124 | if (!req || !file) | |
125 | return -EINVAL; | |
126 | ||
127 | if (req->file_priv) | |
128 | return -EINVAL; | |
129 | ||
130 | dev_private = req->i915; | |
131 | file_priv = file->driver_priv; | |
132 | ||
133 | spin_lock(&file_priv->mm.lock); | |
134 | req->file_priv = file_priv; | |
135 | list_add_tail(&req->client_list, &file_priv->mm.request_list); | |
136 | spin_unlock(&file_priv->mm.lock); | |
137 | ||
138 | req->pid = get_pid(task_pid(current)); | |
139 | ||
140 | return 0; | |
141 | } | |
142 | ||
143 | static inline void | |
144 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |
145 | { | |
146 | struct drm_i915_file_private *file_priv = request->file_priv; | |
147 | ||
148 | if (!file_priv) | |
149 | return; | |
150 | ||
151 | spin_lock(&file_priv->mm.lock); | |
152 | list_del(&request->client_list); | |
153 | request->file_priv = NULL; | |
154 | spin_unlock(&file_priv->mm.lock); | |
155 | ||
156 | put_pid(request->pid); | |
157 | request->pid = NULL; | |
158 | } | |
159 | ||
160 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) | |
161 | { | |
162 | trace_i915_gem_request_retire(request); | |
efdf7c06 | 163 | list_del_init(&request->link); |
05235c53 CW |
164 | |
165 | /* We know the GPU must have read the request to have | |
166 | * sent us the seqno + interrupt, so use the position | |
167 | * of tail of the request to update the last known position | |
168 | * of the GPU head. | |
169 | * | |
170 | * Note this requires that we are always called in request | |
171 | * completion order. | |
172 | */ | |
1dae2dfb | 173 | request->ring->last_retired_head = request->postfix; |
05235c53 CW |
174 | |
175 | i915_gem_request_remove_from_client(request); | |
176 | ||
177 | if (request->previous_context) { | |
178 | if (i915.enable_execlists) | |
179 | intel_lr_context_unpin(request->previous_context, | |
180 | request->engine); | |
181 | } | |
182 | ||
9a6feaf0 | 183 | i915_gem_context_put(request->ctx); |
e8a261ea | 184 | i915_gem_request_put(request); |
05235c53 CW |
185 | } |
186 | ||
187 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) | |
188 | { | |
189 | struct intel_engine_cs *engine = req->engine; | |
190 | struct drm_i915_gem_request *tmp; | |
191 | ||
192 | lockdep_assert_held(&req->i915->drm.struct_mutex); | |
193 | ||
efdf7c06 | 194 | if (list_empty(&req->link)) |
05235c53 CW |
195 | return; |
196 | ||
197 | do { | |
198 | tmp = list_first_entry(&engine->request_list, | |
efdf7c06 | 199 | typeof(*tmp), link); |
05235c53 CW |
200 | |
201 | i915_gem_request_retire(tmp); | |
202 | } while (tmp != req); | |
203 | ||
204 | WARN_ON(i915_verify_lists(engine->dev)); | |
205 | } | |
206 | ||
207 | static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible) | |
208 | { | |
209 | if (__i915_terminally_wedged(reset_counter)) | |
210 | return -EIO; | |
211 | ||
212 | if (__i915_reset_in_progress(reset_counter)) { | |
213 | /* Non-interruptible callers can't handle -EAGAIN, hence return | |
214 | * -EIO unconditionally for these. | |
215 | */ | |
216 | if (!interruptible) | |
217 | return -EIO; | |
218 | ||
219 | return -EAGAIN; | |
220 | } | |
221 | ||
222 | return 0; | |
223 | } | |
224 | ||
225 | static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) | |
226 | { | |
227 | struct intel_engine_cs *engine; | |
228 | int ret; | |
229 | ||
230 | /* Carefully retire all requests without writing to the rings */ | |
231 | for_each_engine(engine, dev_priv) { | |
232 | ret = intel_engine_idle(engine); | |
233 | if (ret) | |
234 | return ret; | |
235 | } | |
236 | i915_gem_retire_requests(dev_priv); | |
237 | ||
238 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ | |
239 | if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { | |
240 | while (intel_kick_waiters(dev_priv) || | |
241 | intel_kick_signalers(dev_priv)) | |
242 | yield(); | |
243 | } | |
244 | ||
245 | /* Finally reset hw state */ | |
246 | for_each_engine(engine, dev_priv) | |
7e37f889 | 247 | intel_engine_init_seqno(engine, seqno); |
05235c53 CW |
248 | |
249 | return 0; | |
250 | } | |
251 | ||
252 | int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) | |
253 | { | |
254 | struct drm_i915_private *dev_priv = to_i915(dev); | |
255 | int ret; | |
256 | ||
257 | if (seqno == 0) | |
258 | return -EINVAL; | |
259 | ||
260 | /* HWS page needs to be set less than what we | |
261 | * will inject to ring | |
262 | */ | |
263 | ret = i915_gem_init_seqno(dev_priv, seqno - 1); | |
264 | if (ret) | |
265 | return ret; | |
266 | ||
05235c53 | 267 | dev_priv->next_seqno = seqno; |
05235c53 CW |
268 | return 0; |
269 | } | |
270 | ||
271 | static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) | |
272 | { | |
273 | /* reserve 0 for non-seqno */ | |
274 | if (unlikely(dev_priv->next_seqno == 0)) { | |
275 | int ret; | |
276 | ||
277 | ret = i915_gem_init_seqno(dev_priv, 0); | |
278 | if (ret) | |
279 | return ret; | |
280 | ||
281 | dev_priv->next_seqno = 1; | |
282 | } | |
283 | ||
ddf07be7 | 284 | *seqno = dev_priv->next_seqno++; |
05235c53 CW |
285 | return 0; |
286 | } | |
287 | ||
8e637178 CW |
288 | /** |
289 | * i915_gem_request_alloc - allocate a request structure | |
290 | * | |
291 | * @engine: engine that we wish to issue the request on. | |
292 | * @ctx: context that the request will be associated with. | |
293 | * This can be NULL if the request is not directly related to | |
294 | * any specific user context, in which case this function will | |
295 | * choose an appropriate context to use. | |
296 | * | |
297 | * Returns a pointer to the allocated request if successful, | |
298 | * or an error code if not. | |
299 | */ | |
300 | struct drm_i915_gem_request * | |
301 | i915_gem_request_alloc(struct intel_engine_cs *engine, | |
302 | struct i915_gem_context *ctx) | |
05235c53 CW |
303 | { |
304 | struct drm_i915_private *dev_priv = engine->i915; | |
305 | unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error); | |
306 | struct drm_i915_gem_request *req; | |
04769652 | 307 | u32 seqno; |
05235c53 CW |
308 | int ret; |
309 | ||
05235c53 CW |
310 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
311 | * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex | |
312 | * and restart. | |
313 | */ | |
314 | ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible); | |
315 | if (ret) | |
8e637178 | 316 | return ERR_PTR(ret); |
05235c53 | 317 | |
9b5f4e5e | 318 | /* Move the oldest request to the slab-cache (if not in use!) */ |
2a1d7752 | 319 | req = list_first_entry_or_null(&engine->request_list, |
efdf7c06 | 320 | typeof(*req), link); |
2a1d7752 CW |
321 | if (req && i915_gem_request_completed(req)) |
322 | i915_gem_request_retire(req); | |
9b5f4e5e | 323 | |
05235c53 CW |
324 | req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL); |
325 | if (!req) | |
8e637178 | 326 | return ERR_PTR(-ENOMEM); |
05235c53 | 327 | |
04769652 | 328 | ret = i915_gem_get_seqno(dev_priv, &seqno); |
05235c53 CW |
329 | if (ret) |
330 | goto err; | |
331 | ||
04769652 CW |
332 | spin_lock_init(&req->lock); |
333 | fence_init(&req->fence, | |
334 | &i915_fence_ops, | |
335 | &req->lock, | |
336 | engine->fence_context, | |
337 | seqno); | |
338 | ||
05235c53 CW |
339 | req->i915 = dev_priv; |
340 | req->engine = engine; | |
9a6feaf0 | 341 | req->ctx = i915_gem_context_get(ctx); |
05235c53 CW |
342 | |
343 | /* | |
344 | * Reserve space in the ring buffer for all the commands required to | |
345 | * eventually emit this request. This is to guarantee that the | |
346 | * i915_add_request() call can't fail. Note that the reserve may need | |
347 | * to be redone if the request is not actually submitted straight | |
348 | * away, e.g. because a GPU scheduler has deferred it. | |
349 | */ | |
350 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; | |
351 | ||
352 | if (i915.enable_execlists) | |
353 | ret = intel_logical_ring_alloc_request_extras(req); | |
354 | else | |
355 | ret = intel_ring_alloc_request_extras(req); | |
356 | if (ret) | |
357 | goto err_ctx; | |
358 | ||
8e637178 | 359 | return req; |
05235c53 CW |
360 | |
361 | err_ctx: | |
9a6feaf0 | 362 | i915_gem_context_put(ctx); |
05235c53 CW |
363 | err: |
364 | kmem_cache_free(dev_priv->requests, req); | |
8e637178 | 365 | return ERR_PTR(ret); |
05235c53 CW |
366 | } |
367 | ||
368 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) | |
369 | { | |
370 | struct drm_i915_private *dev_priv = engine->i915; | |
371 | ||
372 | dev_priv->gt.active_engines |= intel_engine_flag(engine); | |
373 | if (dev_priv->gt.awake) | |
374 | return; | |
375 | ||
376 | intel_runtime_pm_get_noresume(dev_priv); | |
377 | dev_priv->gt.awake = true; | |
378 | ||
54b4f68f | 379 | intel_enable_gt_powersave(dev_priv); |
05235c53 CW |
380 | i915_update_gfx_val(dev_priv); |
381 | if (INTEL_GEN(dev_priv) >= 6) | |
382 | gen6_rps_busy(dev_priv); | |
383 | ||
384 | queue_delayed_work(dev_priv->wq, | |
385 | &dev_priv->gt.retire_work, | |
386 | round_jiffies_up_relative(HZ)); | |
387 | } | |
388 | ||
389 | /* | |
390 | * NB: This function is not allowed to fail. Doing so would mean the the | |
391 | * request is not being tracked for completion but the work itself is | |
392 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
393 | */ | |
394 | void __i915_add_request(struct drm_i915_gem_request *request, | |
395 | struct drm_i915_gem_object *obj, | |
396 | bool flush_caches) | |
397 | { | |
398 | struct intel_engine_cs *engine; | |
7e37f889 | 399 | struct intel_ring *ring; |
05235c53 CW |
400 | u32 request_start; |
401 | u32 reserved_tail; | |
402 | int ret; | |
403 | ||
404 | if (WARN_ON(!request)) | |
405 | return; | |
406 | ||
407 | engine = request->engine; | |
1dae2dfb | 408 | ring = request->ring; |
05235c53 CW |
409 | |
410 | /* | |
411 | * To ensure that this call will not fail, space for its emissions | |
412 | * should already have been reserved in the ring buffer. Let the ring | |
413 | * know that it is time to use that space up. | |
414 | */ | |
ba76d91b | 415 | request_start = ring->tail; |
05235c53 CW |
416 | reserved_tail = request->reserved_space; |
417 | request->reserved_space = 0; | |
418 | ||
419 | /* | |
420 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
421 | * after having emitted the batchbuffer command. Hence we need to fix | |
422 | * things up similar to emitting the lazy request. The difference here | |
423 | * is that the flush _must_ happen before the next request, no matter | |
424 | * what. | |
425 | */ | |
426 | if (flush_caches) { | |
7c9cf4e3 | 427 | ret = engine->emit_flush(request, EMIT_FLUSH); |
c7fe7d25 | 428 | |
05235c53 | 429 | /* Not allowed to fail! */ |
c7fe7d25 | 430 | WARN(ret, "engine->emit_flush() failed: %d!\n", ret); |
05235c53 CW |
431 | } |
432 | ||
433 | trace_i915_gem_request_add(request); | |
434 | ||
435 | request->head = request_start; | |
436 | ||
437 | /* Whilst this request exists, batch_obj will be on the | |
438 | * active_list, and so will hold the active reference. Only when this | |
439 | * request is retired will the the batch_obj be moved onto the | |
440 | * inactive_list and lose its active reference. Hence we do not need | |
441 | * to explicitly hold another reference here. | |
442 | */ | |
443 | request->batch_obj = obj; | |
444 | ||
445 | /* Seal the request and mark it as pending execution. Note that | |
446 | * we may inspect this state, without holding any locks, during | |
447 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
448 | * see a more recent value in the hws than we are tracking. | |
449 | */ | |
450 | request->emitted_jiffies = jiffies; | |
451 | request->previous_seqno = engine->last_submitted_seqno; | |
04769652 | 452 | smp_store_mb(engine->last_submitted_seqno, request->fence.seqno); |
efdf7c06 | 453 | list_add_tail(&request->link, &engine->request_list); |
05235c53 CW |
454 | |
455 | /* Record the position of the start of the request so that | |
456 | * should we detect the updated seqno part-way through the | |
457 | * GPU processing the request, we never over-estimate the | |
458 | * position of the head. | |
459 | */ | |
ba76d91b | 460 | request->postfix = ring->tail; |
05235c53 | 461 | |
05235c53 | 462 | /* Not allowed to fail! */ |
ddd66c51 CW |
463 | ret = engine->emit_request(request); |
464 | WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret); | |
c5efa1ad | 465 | |
05235c53 | 466 | /* Sanity check that the reserved size was large enough. */ |
ba76d91b | 467 | ret = ring->tail - request_start; |
05235c53 | 468 | if (ret < 0) |
1dae2dfb | 469 | ret += ring->size; |
05235c53 CW |
470 | WARN_ONCE(ret > reserved_tail, |
471 | "Not enough space reserved (%d bytes) " | |
472 | "for adding the request (%d bytes)\n", | |
473 | reserved_tail, ret); | |
474 | ||
475 | i915_gem_mark_busy(engine); | |
ddd66c51 | 476 | engine->submit_request(request); |
05235c53 CW |
477 | } |
478 | ||
479 | static unsigned long local_clock_us(unsigned int *cpu) | |
480 | { | |
481 | unsigned long t; | |
482 | ||
483 | /* Cheaply and approximately convert from nanoseconds to microseconds. | |
484 | * The result and subsequent calculations are also defined in the same | |
485 | * approximate microseconds units. The principal source of timing | |
486 | * error here is from the simple truncation. | |
487 | * | |
488 | * Note that local_clock() is only defined wrt to the current CPU; | |
489 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
490 | * blocking preemption for the entire busywait, we can detect the CPU | |
491 | * switch and use that as indicator of system load and a reason to | |
492 | * stop busywaiting, see busywait_stop(). | |
493 | */ | |
494 | *cpu = get_cpu(); | |
495 | t = local_clock() >> 10; | |
496 | put_cpu(); | |
497 | ||
498 | return t; | |
499 | } | |
500 | ||
501 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
502 | { | |
503 | unsigned int this_cpu; | |
504 | ||
505 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
506 | return true; | |
507 | ||
508 | return this_cpu != cpu; | |
509 | } | |
510 | ||
511 | bool __i915_spin_request(const struct drm_i915_gem_request *req, | |
512 | int state, unsigned long timeout_us) | |
513 | { | |
514 | unsigned int cpu; | |
515 | ||
516 | /* When waiting for high frequency requests, e.g. during synchronous | |
517 | * rendering split between the CPU and GPU, the finite amount of time | |
518 | * required to set up the irq and wait upon it limits the response | |
519 | * rate. By busywaiting on the request completion for a short while we | |
520 | * can service the high frequency waits as quick as possible. However, | |
521 | * if it is a slow request, we want to sleep as quickly as possible. | |
522 | * The tradeoff between waiting and sleeping is roughly the time it | |
523 | * takes to sleep on a request, on the order of a microsecond. | |
524 | */ | |
525 | ||
526 | timeout_us += local_clock_us(&cpu); | |
527 | do { | |
528 | if (i915_gem_request_completed(req)) | |
529 | return true; | |
530 | ||
531 | if (signal_pending_state(state, current)) | |
532 | break; | |
533 | ||
534 | if (busywait_stop(timeout_us, cpu)) | |
535 | break; | |
536 | ||
537 | cpu_relax_lowlatency(); | |
538 | } while (!need_resched()); | |
539 | ||
540 | return false; | |
541 | } | |
542 | ||
543 | /** | |
544 | * __i915_wait_request - wait until execution of request has finished | |
545 | * @req: duh! | |
546 | * @interruptible: do an interruptible wait (normally yes) | |
547 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | |
548 | * @rps: client to charge for RPS boosting | |
549 | * | |
550 | * Note: It is of utmost importance that the passed in seqno and reset_counter | |
551 | * values have been read by the caller in an smp safe manner. Where read-side | |
552 | * locks are involved, it is sufficient to read the reset_counter before | |
553 | * unlocking the lock that protects the seqno. For lockless tricks, the | |
554 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be | |
555 | * inserted. | |
556 | * | |
557 | * Returns 0 if the request was found within the alloted time. Else returns the | |
558 | * errno with remaining time filled in timeout argument. | |
559 | */ | |
560 | int __i915_wait_request(struct drm_i915_gem_request *req, | |
561 | bool interruptible, | |
562 | s64 *timeout, | |
563 | struct intel_rps_client *rps) | |
564 | { | |
565 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
566 | DEFINE_WAIT(reset); | |
567 | struct intel_wait wait; | |
568 | unsigned long timeout_remain; | |
569 | int ret = 0; | |
570 | ||
571 | might_sleep(); | |
572 | ||
efdf7c06 | 573 | if (list_empty(&req->link)) |
05235c53 CW |
574 | return 0; |
575 | ||
576 | if (i915_gem_request_completed(req)) | |
577 | return 0; | |
578 | ||
579 | timeout_remain = MAX_SCHEDULE_TIMEOUT; | |
580 | if (timeout) { | |
581 | if (WARN_ON(*timeout < 0)) | |
582 | return -EINVAL; | |
583 | ||
584 | if (*timeout == 0) | |
585 | return -ETIME; | |
586 | ||
587 | /* Record current time in case interrupted, or wedged */ | |
588 | timeout_remain = nsecs_to_jiffies_timeout(*timeout); | |
589 | *timeout += ktime_get_raw_ns(); | |
590 | } | |
591 | ||
592 | trace_i915_gem_request_wait_begin(req); | |
593 | ||
594 | /* This client is about to stall waiting for the GPU. In many cases | |
595 | * this is undesirable and limits the throughput of the system, as | |
596 | * many clients cannot continue processing user input/output whilst | |
597 | * blocked. RPS autotuning may take tens of milliseconds to respond | |
598 | * to the GPU load and thus incurs additional latency for the client. | |
599 | * We can circumvent that by promoting the GPU frequency to maximum | |
600 | * before we wait. This makes the GPU throttle up much more quickly | |
601 | * (good for benchmarks and user experience, e.g. window animations), | |
602 | * but at a cost of spending more power processing the workload | |
603 | * (bad for battery). Not all clients even want their results | |
604 | * immediately and for them we should just let the GPU select its own | |
605 | * frequency to maximise efficiency. To prevent a single client from | |
606 | * forcing the clocks too high for the whole system, we only allow | |
607 | * each client to waitboost once in a busy period. | |
608 | */ | |
42df2714 | 609 | if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6) |
05235c53 CW |
610 | gen6_rps_boost(req->i915, rps, req->emitted_jiffies); |
611 | ||
612 | /* Optimistic spin for the next ~jiffie before touching IRQs */ | |
613 | if (i915_spin_request(req, state, 5)) | |
614 | goto complete; | |
615 | ||
616 | set_current_state(state); | |
617 | add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
618 | ||
04769652 | 619 | intel_wait_init(&wait, req->fence.seqno); |
05235c53 CW |
620 | if (intel_engine_add_wait(req->engine, &wait)) |
621 | /* In order to check that we haven't missed the interrupt | |
622 | * as we enabled it, we need to kick ourselves to do a | |
623 | * coherent check on the seqno before we sleep. | |
624 | */ | |
625 | goto wakeup; | |
626 | ||
627 | for (;;) { | |
628 | if (signal_pending_state(state, current)) { | |
629 | ret = -ERESTARTSYS; | |
630 | break; | |
631 | } | |
632 | ||
633 | timeout_remain = io_schedule_timeout(timeout_remain); | |
634 | if (timeout_remain == 0) { | |
635 | ret = -ETIME; | |
636 | break; | |
637 | } | |
638 | ||
639 | if (intel_wait_complete(&wait)) | |
640 | break; | |
641 | ||
642 | set_current_state(state); | |
643 | ||
644 | wakeup: | |
645 | /* Carefully check if the request is complete, giving time | |
646 | * for the seqno to be visible following the interrupt. | |
647 | * We also have to check in case we are kicked by the GPU | |
648 | * reset in order to drop the struct_mutex. | |
649 | */ | |
650 | if (__i915_request_irq_complete(req)) | |
651 | break; | |
652 | ||
653 | /* Only spin if we know the GPU is processing this request */ | |
654 | if (i915_spin_request(req, state, 2)) | |
655 | break; | |
656 | } | |
657 | remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
658 | ||
659 | intel_engine_remove_wait(req->engine, &wait); | |
660 | __set_current_state(TASK_RUNNING); | |
661 | complete: | |
662 | trace_i915_gem_request_wait_end(req); | |
663 | ||
664 | if (timeout) { | |
665 | *timeout -= ktime_get_raw_ns(); | |
666 | if (*timeout < 0) | |
667 | *timeout = 0; | |
668 | ||
669 | /* | |
670 | * Apparently ktime isn't accurate enough and occasionally has a | |
671 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch | |
672 | * things up to make the test happy. We allow up to 1 jiffy. | |
673 | * | |
674 | * This is a regrssion from the timespec->ktime conversion. | |
675 | */ | |
676 | if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) | |
677 | *timeout = 0; | |
678 | } | |
679 | ||
42df2714 CW |
680 | if (IS_RPS_USER(rps) && |
681 | req->fence.seqno == req->engine->last_submitted_seqno) { | |
05235c53 CW |
682 | /* The GPU is now idle and this client has stalled. |
683 | * Since no other client has submitted a request in the | |
684 | * meantime, assume that this client is the only one | |
685 | * supplying work to the GPU but is unable to keep that | |
686 | * work supplied because it is waiting. Since the GPU is | |
687 | * then never kept fully busy, RPS autoclocking will | |
688 | * keep the clocks relatively low, causing further delays. | |
689 | * Compensate by giving the synchronous client credit for | |
690 | * a waitboost next time. | |
691 | */ | |
692 | spin_lock(&req->i915->rps.client_lock); | |
693 | list_del_init(&rps->link); | |
694 | spin_unlock(&req->i915->rps.client_lock); | |
695 | } | |
696 | ||
697 | return ret; | |
698 | } | |
699 | ||
700 | /** | |
701 | * Waits for a request to be signaled, and cleans up the | |
702 | * request and object lists appropriately for that event. | |
703 | */ | |
704 | int i915_wait_request(struct drm_i915_gem_request *req) | |
705 | { | |
706 | int ret; | |
707 | ||
708 | GEM_BUG_ON(!req); | |
709 | lockdep_assert_held(&req->i915->drm.struct_mutex); | |
710 | ||
711 | ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL); | |
712 | if (ret) | |
713 | return ret; | |
714 | ||
715 | /* If the GPU hung, we want to keep the requests to find the guilty. */ | |
716 | if (!i915_reset_in_progress(&req->i915->gpu_error)) | |
717 | i915_gem_request_retire_upto(req); | |
718 | ||
719 | return 0; | |
720 | } |