]>
Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
25 | #ifndef I915_GEM_REQUEST_H | |
26 | #define I915_GEM_REQUEST_H | |
27 | ||
f54d1867 | 28 | #include <linux/dma-fence.h> |
04769652 CW |
29 | |
30 | #include "i915_gem.h" | |
5590af3e | 31 | #include "i915_sw_fence.h" |
04769652 | 32 | |
b42fe9ca JL |
33 | struct drm_file; |
34 | struct drm_i915_gem_object; | |
35 | ||
dcff85c8 CW |
36 | struct intel_wait { |
37 | struct rb_node node; | |
38 | struct task_struct *tsk; | |
39 | u32 seqno; | |
40 | }; | |
41 | ||
42 | struct intel_signal_node { | |
43 | struct rb_node node; | |
44 | struct intel_wait wait; | |
45 | }; | |
46 | ||
52e54209 CW |
47 | struct i915_dependency { |
48 | struct i915_priotree *signaler; | |
49 | struct list_head signal_link; | |
50 | struct list_head wait_link; | |
20311bd3 | 51 | struct list_head dfs_link; |
52e54209 CW |
52 | unsigned long flags; |
53 | #define I915_DEPENDENCY_ALLOC BIT(0) | |
54 | }; | |
55 | ||
56 | /* Requests exist in a complex web of interdependencies. Each request | |
57 | * has to wait for some other request to complete before it is ready to be run | |
58 | * (e.g. we have to wait until the pixels have been rendering into a texture | |
59 | * before we can copy from it). We track the readiness of a request in terms | |
60 | * of fences, but we also need to keep the dependency tree for the lifetime | |
61 | * of the request (beyond the life of an individual fence). We use the tree | |
62 | * at various points to reorder the requests whilst keeping the requests | |
63 | * in order with respect to their various dependencies. | |
64 | */ | |
65 | struct i915_priotree { | |
66 | struct list_head signalers_list; /* those before us, we depend upon */ | |
67 | struct list_head waiters_list; /* those after us, they depend upon us */ | |
20311bd3 CW |
68 | struct rb_node node; |
69 | int priority; | |
70 | #define I915_PRIORITY_MAX 1024 | |
71 | #define I915_PRIORITY_MIN (-I915_PRIORITY_MAX) | |
52e54209 CW |
72 | }; |
73 | ||
05235c53 CW |
74 | /** |
75 | * Request queue structure. | |
76 | * | |
77 | * The request queue allows us to note sequence numbers that have been emitted | |
78 | * and may be associated with active buffers to be retired. | |
79 | * | |
80 | * By keeping this list, we can avoid having to do questionable sequence | |
81 | * number comparisons on buffer last_read|write_seqno. It also allows an | |
82 | * emission time to be associated with the request for tracking how far ahead | |
83 | * of the GPU the submission is. | |
84 | * | |
5a198b8c CW |
85 | * When modifying this structure be very aware that we perform a lockless |
86 | * RCU lookup of it that may race against reallocation of the struct | |
87 | * from the slab freelist. We intentionally do not zero the structure on | |
88 | * allocation so that the lookup can use the dangling pointers (and is | |
89 | * cogniscent that those pointers may be wrong). Instead, everything that | |
90 | * needs to be initialised must be done so explicitly. | |
91 | * | |
04769652 | 92 | * The requests are reference counted. |
05235c53 CW |
93 | */ |
94 | struct drm_i915_gem_request { | |
f54d1867 | 95 | struct dma_fence fence; |
04769652 | 96 | spinlock_t lock; |
05235c53 CW |
97 | |
98 | /** On Which ring this request was generated */ | |
99 | struct drm_i915_private *i915; | |
100 | ||
101 | /** | |
102 | * Context and ring buffer related to this request | |
103 | * Contexts are refcounted, so when this request is associated with a | |
104 | * context, we must increment the context's refcount, to guarantee that | |
105 | * it persists while any request is linked to it. Requests themselves | |
106 | * are also refcounted, so the request will only be freed when the last | |
107 | * reference to it is dismissed, and the code in | |
108 | * i915_gem_request_free() will then decrement the refcount on the | |
109 | * context. | |
110 | */ | |
111 | struct i915_gem_context *ctx; | |
112 | struct intel_engine_cs *engine; | |
7e37f889 | 113 | struct intel_ring *ring; |
73cb9701 | 114 | struct intel_timeline *timeline; |
05235c53 CW |
115 | struct intel_signal_node signaling; |
116 | ||
23902e49 CW |
117 | /* Fences for the various phases in the request's lifetime. |
118 | * | |
119 | * The submit fence is used to await upon all of the request's | |
120 | * dependencies. When it is signaled, the request is ready to run. | |
121 | * It is used by the driver to then queue the request for execution. | |
122 | * | |
123 | * The execute fence is used to signal when the request has been | |
124 | * sent to hardware. | |
125 | * | |
126 | * It is illegal for the submit fence of one request to wait upon the | |
127 | * execute fence of an earlier request. It should be sufficient to | |
128 | * wait upon the submit fence of the earlier request. | |
129 | */ | |
5590af3e | 130 | struct i915_sw_fence submit; |
23902e49 | 131 | struct i915_sw_fence execute; |
0a046a0e | 132 | wait_queue_t submitq; |
23902e49 | 133 | wait_queue_t execq; |
5590af3e | 134 | |
52e54209 CW |
135 | /* A list of everyone we wait upon, and everyone who waits upon us. |
136 | * Even though we will not be submitted to the hardware before the | |
137 | * submit fence is signaled (it waits for all external events as well | |
138 | * as our own requests), the scheduler still needs to know the | |
139 | * dependency tree for the lifetime of the request (from execbuf | |
140 | * to retirement), i.e. bidirectional dependency information for the | |
141 | * request not tied to individual fences. | |
142 | */ | |
143 | struct i915_priotree priotree; | |
144 | struct i915_dependency dep; | |
145 | ||
65e4760e CW |
146 | u32 global_seqno; |
147 | ||
05235c53 CW |
148 | /** GEM sequence number associated with the previous request, |
149 | * when the HWS breadcrumb is equal to this the GPU is processing | |
150 | * this request. | |
151 | */ | |
152 | u32 previous_seqno; | |
153 | ||
a52abd2f | 154 | /** Position in the ring of the start of the request */ |
05235c53 CW |
155 | u32 head; |
156 | ||
157 | /** | |
a52abd2f CW |
158 | * Position in the ring of the start of the postfix. |
159 | * This is required to calculate the maximum available ring space | |
160 | * without overwriting the postfix. | |
05235c53 CW |
161 | */ |
162 | u32 postfix; | |
163 | ||
a52abd2f | 164 | /** Position in the ring of the end of the whole request */ |
05235c53 CW |
165 | u32 tail; |
166 | ||
a52abd2f CW |
167 | /** Position in the ring of the end of any workarounds after the tail */ |
168 | u32 wa_tail; | |
169 | ||
170 | /** Preallocate space in the ring for the emitting the request */ | |
05235c53 CW |
171 | u32 reserved_space; |
172 | ||
05235c53 CW |
173 | /** Batch buffer related to this request if any (used for |
174 | * error state dump only). | |
175 | */ | |
058d88c4 | 176 | struct i915_vma *batch; |
fa545cbf | 177 | struct list_head active_list; |
05235c53 CW |
178 | |
179 | /** Time at which this request was emitted, in jiffies. */ | |
180 | unsigned long emitted_jiffies; | |
181 | ||
efdf7c06 CW |
182 | /** engine->request_list entry for this request */ |
183 | struct list_head link; | |
05235c53 | 184 | |
675d9ad7 CW |
185 | /** ring->request_list entry for this request */ |
186 | struct list_head ring_link; | |
187 | ||
05235c53 CW |
188 | struct drm_i915_file_private *file_priv; |
189 | /** file_priv list entry for this request */ | |
190 | struct list_head client_list; | |
05235c53 CW |
191 | }; |
192 | ||
f54d1867 | 193 | extern const struct dma_fence_ops i915_fence_ops; |
04769652 | 194 | |
b52992c0 | 195 | static inline bool dma_fence_is_i915(const struct dma_fence *fence) |
04769652 CW |
196 | { |
197 | return fence->ops == &i915_fence_ops; | |
198 | } | |
199 | ||
05235c53 CW |
200 | struct drm_i915_gem_request * __must_check |
201 | i915_gem_request_alloc(struct intel_engine_cs *engine, | |
202 | struct i915_gem_context *ctx); | |
05235c53 CW |
203 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
204 | struct drm_file *file); | |
205 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); | |
206 | ||
04769652 | 207 | static inline struct drm_i915_gem_request * |
f54d1867 | 208 | to_request(struct dma_fence *fence) |
04769652 CW |
209 | { |
210 | /* We assume that NULL fence/request are interoperable */ | |
211 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); | |
b52992c0 | 212 | GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); |
04769652 CW |
213 | return container_of(fence, struct drm_i915_gem_request, fence); |
214 | } | |
215 | ||
05235c53 | 216 | static inline struct drm_i915_gem_request * |
e8a261ea | 217 | i915_gem_request_get(struct drm_i915_gem_request *req) |
05235c53 | 218 | { |
f54d1867 | 219 | return to_request(dma_fence_get(&req->fence)); |
05235c53 CW |
220 | } |
221 | ||
0eafec6d CW |
222 | static inline struct drm_i915_gem_request * |
223 | i915_gem_request_get_rcu(struct drm_i915_gem_request *req) | |
224 | { | |
f54d1867 | 225 | return to_request(dma_fence_get_rcu(&req->fence)); |
0eafec6d CW |
226 | } |
227 | ||
05235c53 | 228 | static inline void |
e8a261ea | 229 | i915_gem_request_put(struct drm_i915_gem_request *req) |
05235c53 | 230 | { |
f54d1867 | 231 | dma_fence_put(&req->fence); |
05235c53 CW |
232 | } |
233 | ||
234 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, | |
235 | struct drm_i915_gem_request *src) | |
236 | { | |
237 | if (src) | |
e8a261ea | 238 | i915_gem_request_get(src); |
05235c53 CW |
239 | |
240 | if (*pdst) | |
e8a261ea | 241 | i915_gem_request_put(*pdst); |
05235c53 CW |
242 | |
243 | *pdst = src; | |
244 | } | |
245 | ||
a2bc4695 CW |
246 | int |
247 | i915_gem_request_await_object(struct drm_i915_gem_request *to, | |
248 | struct drm_i915_gem_object *obj, | |
249 | bool write); | |
b52992c0 CW |
250 | int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, |
251 | struct dma_fence *fence); | |
a2bc4695 | 252 | |
17f298cf | 253 | void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); |
05235c53 | 254 | #define i915_add_request(req) \ |
17f298cf | 255 | __i915_add_request(req, true) |
05235c53 | 256 | #define i915_add_request_no_flush(req) \ |
17f298cf | 257 | __i915_add_request(req, false) |
05235c53 | 258 | |
d55ac5bf CW |
259 | void __i915_gem_request_submit(struct drm_i915_gem_request *request); |
260 | void i915_gem_request_submit(struct drm_i915_gem_request *request); | |
261 | ||
05235c53 | 262 | struct intel_rps_client; |
42df2714 CW |
263 | #define NO_WAITBOOST ERR_PTR(-1) |
264 | #define IS_RPS_CLIENT(p) (!IS_ERR(p)) | |
265 | #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p)) | |
05235c53 | 266 | |
e95433c7 CW |
267 | long i915_wait_request(struct drm_i915_gem_request *req, |
268 | unsigned int flags, | |
269 | long timeout) | |
fa545cbf | 270 | __attribute__((nonnull(1))); |
22dd3bb9 CW |
271 | #define I915_WAIT_INTERRUPTIBLE BIT(0) |
272 | #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ | |
e95433c7 | 273 | #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ |
fa545cbf | 274 | |
05235c53 CW |
275 | static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); |
276 | ||
277 | /** | |
278 | * Returns true if seq1 is later than seq2. | |
279 | */ | |
280 | static inline bool i915_seqno_passed(u32 seq1, u32 seq2) | |
281 | { | |
282 | return (s32)(seq1 - seq2) >= 0; | |
283 | } | |
284 | ||
285 | static inline bool | |
65e4760e | 286 | __i915_gem_request_started(const struct drm_i915_gem_request *req) |
05235c53 | 287 | { |
65e4760e | 288 | GEM_BUG_ON(!req->global_seqno); |
05235c53 CW |
289 | return i915_seqno_passed(intel_engine_get_seqno(req->engine), |
290 | req->previous_seqno); | |
291 | } | |
292 | ||
293 | static inline bool | |
65e4760e | 294 | i915_gem_request_started(const struct drm_i915_gem_request *req) |
05235c53 | 295 | { |
65e4760e CW |
296 | if (!req->global_seqno) |
297 | return false; | |
298 | ||
299 | return __i915_gem_request_started(req); | |
300 | } | |
301 | ||
302 | static inline bool | |
303 | __i915_gem_request_completed(const struct drm_i915_gem_request *req) | |
304 | { | |
305 | GEM_BUG_ON(!req->global_seqno); | |
05235c53 | 306 | return i915_seqno_passed(intel_engine_get_seqno(req->engine), |
65e4760e CW |
307 | req->global_seqno); |
308 | } | |
309 | ||
310 | static inline bool | |
311 | i915_gem_request_completed(const struct drm_i915_gem_request *req) | |
312 | { | |
313 | if (!req->global_seqno) | |
314 | return false; | |
315 | ||
316 | return __i915_gem_request_completed(req); | |
05235c53 CW |
317 | } |
318 | ||
319 | bool __i915_spin_request(const struct drm_i915_gem_request *request, | |
320 | int state, unsigned long timeout_us); | |
321 | static inline bool i915_spin_request(const struct drm_i915_gem_request *request, | |
322 | int state, unsigned long timeout_us) | |
323 | { | |
65e4760e | 324 | return (__i915_gem_request_started(request) && |
05235c53 CW |
325 | __i915_spin_request(request, state, timeout_us)); |
326 | } | |
327 | ||
381f371b CW |
328 | /* We treat requests as fences. This is not be to confused with our |
329 | * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. | |
330 | * We use the fences to synchronize access from the CPU with activity on the | |
331 | * GPU, for example, we should not rewrite an object's PTE whilst the GPU | |
332 | * is reading them. We also track fences at a higher level to provide | |
333 | * implicit synchronisation around GEM objects, e.g. set-domain will wait | |
334 | * for outstanding GPU rendering before marking the object ready for CPU | |
335 | * access, or a pageflip will wait until the GPU is complete before showing | |
336 | * the frame on the scanout. | |
337 | * | |
338 | * In order to use a fence, the object must track the fence it needs to | |
339 | * serialise with. For example, GEM objects want to track both read and | |
340 | * write access so that we can perform concurrent read operations between | |
341 | * the CPU and GPU engines, as well as waiting for all rendering to | |
342 | * complete, or waiting for the last GPU user of a "fence register". The | |
343 | * object then embeds a #i915_gem_active to track the most recent (in | |
344 | * retirement order) request relevant for the desired mode of access. | |
345 | * The #i915_gem_active is updated with i915_gem_active_set() to track the | |
346 | * most recent fence request, typically this is done as part of | |
347 | * i915_vma_move_to_active(). | |
348 | * | |
349 | * When the #i915_gem_active completes (is retired), it will | |
350 | * signal its completion to the owner through a callback as well as mark | |
351 | * itself as idle (i915_gem_active.request == NULL). The owner | |
352 | * can then perform any action, such as delayed freeing of an active | |
353 | * resource including itself. | |
354 | */ | |
fa545cbf CW |
355 | struct i915_gem_active; |
356 | ||
357 | typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, | |
358 | struct drm_i915_gem_request *); | |
359 | ||
381f371b | 360 | struct i915_gem_active { |
0eafec6d | 361 | struct drm_i915_gem_request __rcu *request; |
fa545cbf CW |
362 | struct list_head link; |
363 | i915_gem_retire_fn retire; | |
381f371b CW |
364 | }; |
365 | ||
fa545cbf CW |
366 | void i915_gem_retire_noop(struct i915_gem_active *, |
367 | struct drm_i915_gem_request *request); | |
368 | ||
369 | /** | |
370 | * init_request_active - prepares the activity tracker for use | |
371 | * @active - the active tracker | |
372 | * @func - a callback when then the tracker is retired (becomes idle), | |
373 | * can be NULL | |
374 | * | |
375 | * init_request_active() prepares the embedded @active struct for use as | |
376 | * an activity tracker, that is for tracking the last known active request | |
377 | * associated with it. When the last request becomes idle, when it is retired | |
378 | * after completion, the optional callback @func is invoked. | |
379 | */ | |
380 | static inline void | |
381 | init_request_active(struct i915_gem_active *active, | |
382 | i915_gem_retire_fn retire) | |
383 | { | |
384 | INIT_LIST_HEAD(&active->link); | |
385 | active->retire = retire ?: i915_gem_retire_noop; | |
386 | } | |
387 | ||
27c01aae CW |
388 | /** |
389 | * i915_gem_active_set - updates the tracker to watch the current request | |
390 | * @active - the active tracker | |
391 | * @request - the request to watch | |
392 | * | |
393 | * i915_gem_active_set() watches the given @request for completion. Whilst | |
394 | * that @request is busy, the @active reports busy. When that @request is | |
395 | * retired, the @active tracker is updated to report idle. | |
396 | */ | |
381f371b CW |
397 | static inline void |
398 | i915_gem_active_set(struct i915_gem_active *active, | |
399 | struct drm_i915_gem_request *request) | |
400 | { | |
fa545cbf | 401 | list_move(&active->link, &request->active_list); |
0eafec6d | 402 | rcu_assign_pointer(active->request, request); |
381f371b CW |
403 | } |
404 | ||
ecd9caa0 VS |
405 | /** |
406 | * i915_gem_active_set_retire_fn - updates the retirement callback | |
407 | * @active - the active tracker | |
408 | * @fn - the routine called when the request is retired | |
409 | * @mutex - struct_mutex used to guard retirements | |
410 | * | |
411 | * i915_gem_active_set_retire_fn() updates the function pointer that | |
412 | * is called when the final request associated with the @active tracker | |
413 | * is retired. | |
414 | */ | |
415 | static inline void | |
416 | i915_gem_active_set_retire_fn(struct i915_gem_active *active, | |
417 | i915_gem_retire_fn fn, | |
418 | struct mutex *mutex) | |
419 | { | |
420 | lockdep_assert_held(mutex); | |
421 | active->retire = fn ?: i915_gem_retire_noop; | |
422 | } | |
423 | ||
d72d908b CW |
424 | static inline struct drm_i915_gem_request * |
425 | __i915_gem_active_peek(const struct i915_gem_active *active) | |
426 | { | |
0eafec6d CW |
427 | /* Inside the error capture (running with the driver in an unknown |
428 | * state), we want to bend the rules slightly (a lot). | |
429 | * | |
430 | * Work is in progress to make it safer, in the meantime this keeps | |
431 | * the known issue from spamming the logs. | |
432 | */ | |
433 | return rcu_dereference_protected(active->request, 1); | |
d72d908b CW |
434 | } |
435 | ||
385384a8 CW |
436 | /** |
437 | * i915_gem_active_raw - return the active request | |
438 | * @active - the active tracker | |
439 | * | |
440 | * i915_gem_active_raw() returns the current request being tracked, or NULL. | |
441 | * It does not obtain a reference on the request for the caller, so the caller | |
442 | * must hold struct_mutex. | |
443 | */ | |
444 | static inline struct drm_i915_gem_request * | |
445 | i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) | |
446 | { | |
447 | return rcu_dereference_protected(active->request, | |
448 | lockdep_is_held(mutex)); | |
449 | } | |
450 | ||
27c01aae | 451 | /** |
fa545cbf | 452 | * i915_gem_active_peek - report the active request being monitored |
27c01aae CW |
453 | * @active - the active tracker |
454 | * | |
fa545cbf CW |
455 | * i915_gem_active_peek() returns the current request being tracked if |
456 | * still active, or NULL. It does not obtain a reference on the request | |
457 | * for the caller, so the caller must hold struct_mutex. | |
27c01aae CW |
458 | */ |
459 | static inline struct drm_i915_gem_request * | |
d72d908b | 460 | i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) |
27c01aae | 461 | { |
fa545cbf CW |
462 | struct drm_i915_gem_request *request; |
463 | ||
385384a8 | 464 | request = i915_gem_active_raw(active, mutex); |
0eafec6d CW |
465 | if (!request || i915_gem_request_completed(request)) |
466 | return NULL; | |
467 | ||
468 | return request; | |
469 | } | |
470 | ||
27c01aae CW |
471 | /** |
472 | * i915_gem_active_get - return a reference to the active request | |
473 | * @active - the active tracker | |
474 | * | |
475 | * i915_gem_active_get() returns a reference to the active request, or NULL | |
476 | * if the active tracker is idle. The caller must hold struct_mutex. | |
477 | */ | |
478 | static inline struct drm_i915_gem_request * | |
d72d908b | 479 | i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) |
27c01aae | 480 | { |
fa545cbf | 481 | return i915_gem_request_get(i915_gem_active_peek(active, mutex)); |
27c01aae CW |
482 | } |
483 | ||
0eafec6d CW |
484 | /** |
485 | * __i915_gem_active_get_rcu - return a reference to the active request | |
486 | * @active - the active tracker | |
487 | * | |
488 | * __i915_gem_active_get() returns a reference to the active request, or NULL | |
489 | * if the active tracker is idle. The caller must hold the RCU read lock, but | |
490 | * the returned pointer is safe to use outside of RCU. | |
491 | */ | |
492 | static inline struct drm_i915_gem_request * | |
493 | __i915_gem_active_get_rcu(const struct i915_gem_active *active) | |
494 | { | |
495 | /* Performing a lockless retrieval of the active request is super | |
5f0d5a3a | 496 | * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing |
0eafec6d CW |
497 | * slab of request objects will not be freed whilst we hold the |
498 | * RCU read lock. It does not guarantee that the request itself | |
499 | * will not be freed and then *reused*. Viz, | |
500 | * | |
501 | * Thread A Thread B | |
502 | * | |
503 | * req = active.request | |
504 | * retire(req) -> free(req); | |
505 | * (req is now first on the slab freelist) | |
506 | * active.request = NULL | |
507 | * | |
508 | * req = new submission on a new object | |
509 | * ref(req) | |
510 | * | |
511 | * To prevent the request from being reused whilst the caller | |
512 | * uses it, we take a reference like normal. Whilst acquiring | |
513 | * the reference we check that it is not in a destroyed state | |
514 | * (refcnt == 0). That prevents the request being reallocated | |
515 | * whilst the caller holds on to it. To check that the request | |
516 | * was not reallocated as we acquired the reference we have to | |
517 | * check that our request remains the active request across | |
518 | * the lookup, in the same manner as a seqlock. The visibility | |
519 | * of the pointer versus the reference counting is controlled | |
520 | * by using RCU barriers (rcu_dereference and rcu_assign_pointer). | |
521 | * | |
522 | * In the middle of all that, we inspect whether the request is | |
523 | * complete. Retiring is lazy so the request may be completed long | |
524 | * before the active tracker is updated. Querying whether the | |
525 | * request is complete is far cheaper (as it involves no locked | |
526 | * instructions setting cachelines to exclusive) than acquiring | |
527 | * the reference, so we do it first. The RCU read lock ensures the | |
528 | * pointer dereference is valid, but does not ensure that the | |
529 | * seqno nor HWS is the right one! However, if the request was | |
530 | * reallocated, that means the active tracker's request was complete. | |
531 | * If the new request is also complete, then both are and we can | |
532 | * just report the active tracker is idle. If the new request is | |
533 | * incomplete, then we acquire a reference on it and check that | |
534 | * it remained the active request. | |
5a198b8c CW |
535 | * |
536 | * It is then imperative that we do not zero the request on | |
537 | * reallocation, so that we can chase the dangling pointers! | |
538 | * See i915_gem_request_alloc(). | |
0eafec6d CW |
539 | */ |
540 | do { | |
541 | struct drm_i915_gem_request *request; | |
542 | ||
543 | request = rcu_dereference(active->request); | |
544 | if (!request || i915_gem_request_completed(request)) | |
545 | return NULL; | |
546 | ||
c75870d8 DV |
547 | /* An especially silly compiler could decide to recompute the |
548 | * result of i915_gem_request_completed, more specifically | |
549 | * re-emit the load for request->fence.seqno. A race would catch | |
550 | * a later seqno value, which could flip the result from true to | |
551 | * false. Which means part of the instructions below might not | |
552 | * be executed, while later on instructions are executed. Due to | |
553 | * barriers within the refcounting the inconsistency can't reach | |
554 | * past the call to i915_gem_request_get_rcu, but not executing | |
555 | * that while still executing i915_gem_request_put() creates | |
556 | * havoc enough. Prevent this with a compiler barrier. | |
557 | */ | |
558 | barrier(); | |
559 | ||
0eafec6d CW |
560 | request = i915_gem_request_get_rcu(request); |
561 | ||
562 | /* What stops the following rcu_access_pointer() from occurring | |
563 | * before the above i915_gem_request_get_rcu()? If we were | |
564 | * to read the value before pausing to get the reference to | |
565 | * the request, we may not notice a change in the active | |
566 | * tracker. | |
567 | * | |
568 | * The rcu_access_pointer() is a mere compiler barrier, which | |
569 | * means both the CPU and compiler are free to perform the | |
570 | * memory read without constraint. The compiler only has to | |
571 | * ensure that any operations after the rcu_access_pointer() | |
572 | * occur afterwards in program order. This means the read may | |
573 | * be performed earlier by an out-of-order CPU, or adventurous | |
574 | * compiler. | |
575 | * | |
576 | * The atomic operation at the heart of | |
f54d1867 | 577 | * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is |
0eafec6d CW |
578 | * atomic_inc_not_zero() which is only a full memory barrier |
579 | * when successful. That is, if i915_gem_request_get_rcu() | |
580 | * returns the request (and so with the reference counted | |
581 | * incremented) then the following read for rcu_access_pointer() | |
582 | * must occur after the atomic operation and so confirm | |
583 | * that this request is the one currently being tracked. | |
edf6b76f CW |
584 | * |
585 | * The corresponding write barrier is part of | |
586 | * rcu_assign_pointer(). | |
0eafec6d CW |
587 | */ |
588 | if (!request || request == rcu_access_pointer(active->request)) | |
589 | return rcu_pointer_handoff(request); | |
590 | ||
591 | i915_gem_request_put(request); | |
592 | } while (1); | |
593 | } | |
594 | ||
595 | /** | |
596 | * i915_gem_active_get_unlocked - return a reference to the active request | |
597 | * @active - the active tracker | |
598 | * | |
599 | * i915_gem_active_get_unlocked() returns a reference to the active request, | |
600 | * or NULL if the active tracker is idle. The reference is obtained under RCU, | |
601 | * so no locking is required by the caller. | |
602 | * | |
603 | * The reference should be freed with i915_gem_request_put(). | |
604 | */ | |
605 | static inline struct drm_i915_gem_request * | |
606 | i915_gem_active_get_unlocked(const struct i915_gem_active *active) | |
607 | { | |
608 | struct drm_i915_gem_request *request; | |
609 | ||
610 | rcu_read_lock(); | |
611 | request = __i915_gem_active_get_rcu(active); | |
612 | rcu_read_unlock(); | |
613 | ||
614 | return request; | |
615 | } | |
616 | ||
27c01aae CW |
617 | /** |
618 | * i915_gem_active_isset - report whether the active tracker is assigned | |
619 | * @active - the active tracker | |
620 | * | |
621 | * i915_gem_active_isset() returns true if the active tracker is currently | |
622 | * assigned to a request. Due to the lazy retiring, that request may be idle | |
623 | * and this may report stale information. | |
624 | */ | |
625 | static inline bool | |
626 | i915_gem_active_isset(const struct i915_gem_active *active) | |
627 | { | |
0eafec6d | 628 | return rcu_access_pointer(active->request); |
27c01aae CW |
629 | } |
630 | ||
631 | /** | |
d07f0e59 | 632 | * i915_gem_active_wait - waits until the request is completed |
2467658e | 633 | * @active - the active request on which to wait |
ea746f36 | 634 | * @flags - how to wait |
2467658e CW |
635 | * @timeout - how long to wait at most |
636 | * @rps - userspace client to charge for a waitboost | |
637 | * | |
2e36991a | 638 | * i915_gem_active_wait() waits until the request is completed before |
2467658e CW |
639 | * returning, without requiring any locks to be held. Note that it does not |
640 | * retire any requests before returning. | |
641 | * | |
642 | * This function relies on RCU in order to acquire the reference to the active | |
643 | * request without holding any locks. See __i915_gem_active_get_rcu() for the | |
644 | * glory details on how that is managed. Once the reference is acquired, we | |
645 | * can then wait upon the request, and afterwards release our reference, | |
646 | * free of any locking. | |
647 | * | |
648 | * This function wraps i915_wait_request(), see it for the full details on | |
649 | * the arguments. | |
650 | * | |
651 | * Returns 0 if successful, or a negative error code. | |
652 | */ | |
653 | static inline int | |
2e36991a | 654 | i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags) |
2467658e CW |
655 | { |
656 | struct drm_i915_gem_request *request; | |
e95433c7 | 657 | long ret = 0; |
2467658e CW |
658 | |
659 | request = i915_gem_active_get_unlocked(active); | |
660 | if (request) { | |
e95433c7 | 661 | ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT); |
2467658e CW |
662 | i915_gem_request_put(request); |
663 | } | |
664 | ||
e95433c7 | 665 | return ret < 0 ? ret : 0; |
2467658e CW |
666 | } |
667 | ||
27c01aae CW |
668 | /** |
669 | * i915_gem_active_retire - waits until the request is retired | |
670 | * @active - the active request on which to wait | |
671 | * | |
672 | * i915_gem_active_retire() waits until the request is completed, | |
673 | * and then ensures that at least the retirement handler for this | |
674 | * @active tracker is called before returning. If the @active | |
675 | * tracker is idle, the function returns immediately. | |
676 | */ | |
677 | static inline int __must_check | |
fa545cbf | 678 | i915_gem_active_retire(struct i915_gem_active *active, |
d72d908b | 679 | struct mutex *mutex) |
27c01aae | 680 | { |
fa545cbf | 681 | struct drm_i915_gem_request *request; |
e95433c7 | 682 | long ret; |
fa545cbf | 683 | |
385384a8 | 684 | request = i915_gem_active_raw(active, mutex); |
fa545cbf CW |
685 | if (!request) |
686 | return 0; | |
687 | ||
22dd3bb9 CW |
688 | ret = i915_wait_request(request, |
689 | I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, | |
e95433c7 CW |
690 | MAX_SCHEDULE_TIMEOUT); |
691 | if (ret < 0) | |
fa545cbf CW |
692 | return ret; |
693 | ||
694 | list_del_init(&active->link); | |
0eafec6d CW |
695 | RCU_INIT_POINTER(active->request, NULL); |
696 | ||
fa545cbf CW |
697 | active->retire(active, request); |
698 | ||
699 | return 0; | |
27c01aae CW |
700 | } |
701 | ||
381f371b CW |
702 | #define for_each_active(mask, idx) \ |
703 | for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) | |
704 | ||
05235c53 | 705 | #endif /* I915_GEM_REQUEST_H */ |