]>
Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
44e895a8 | 4 | #include <linux/hashtable.h> |
06fbca71 | 5 | #include "i915_gem_batch_pool.h" |
dcff85c8 | 6 | #include "i915_gem_request.h" |
73cb9701 | 7 | #include "i915_gem_timeline.h" |
44e895a8 BV |
8 | |
9 | #define I915_CMD_HASH_ORDER 9 | |
10 | ||
4712274c OM |
11 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
12 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just | |
13 | * to give some inclination as to some of the magic values used in the various | |
14 | * workarounds! | |
15 | */ | |
16 | #define CACHELINE_BYTES 64 | |
17ee950d | 17 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
4712274c | 18 | |
633cf8f5 VS |
19 | /* |
20 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | |
21 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | |
22 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | |
23 | * | |
24 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | |
25 | * cacheline, the Head Pointer must not be greater than the Tail | |
26 | * Pointer." | |
27 | */ | |
28 | #define I915_RING_FREE_SPACE 64 | |
29 | ||
57e88531 CW |
30 | struct intel_hw_status_page { |
31 | struct i915_vma *vma; | |
32 | u32 *page_addr; | |
33 | u32 ggtt_offset; | |
8187a2b7 ZN |
34 | }; |
35 | ||
bbdc070a DG |
36 | #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) |
37 | #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) | |
cae5852d | 38 | |
bbdc070a DG |
39 | #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) |
40 | #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) | |
cae5852d | 41 | |
bbdc070a DG |
42 | #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) |
43 | #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) | |
cae5852d | 44 | |
bbdc070a DG |
45 | #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) |
46 | #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) | |
cae5852d | 47 | |
bbdc070a DG |
48 | #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) |
49 | #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) | |
870e86dd | 50 | |
bbdc070a DG |
51 | #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) |
52 | #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) | |
e9fea574 | 53 | |
3e78998a BW |
54 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
55 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. | |
56 | */ | |
8c12672e CW |
57 | #define gen8_semaphore_seqno_size sizeof(uint64_t) |
58 | #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ | |
59 | (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) | |
3e78998a | 60 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
51d545d0 | 61 | (dev_priv->semaphore->node.start + \ |
8c12672e | 62 | GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) |
3e78998a | 63 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
51d545d0 | 64 | (dev_priv->semaphore->node.start + \ |
8c12672e | 65 | GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) |
3e78998a | 66 | |
7e37f889 | 67 | enum intel_engine_hangcheck_action { |
3fe3b030 MK |
68 | ENGINE_IDLE = 0, |
69 | ENGINE_WAIT, | |
70 | ENGINE_ACTIVE_SEQNO, | |
71 | ENGINE_ACTIVE_HEAD, | |
72 | ENGINE_ACTIVE_SUBUNITS, | |
73 | ENGINE_WAIT_KICK, | |
74 | ENGINE_DEAD, | |
f2f4d82f | 75 | }; |
ad8beaea | 76 | |
3fe3b030 MK |
77 | static inline const char * |
78 | hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) | |
79 | { | |
80 | switch (a) { | |
81 | case ENGINE_IDLE: | |
82 | return "idle"; | |
83 | case ENGINE_WAIT: | |
84 | return "wait"; | |
85 | case ENGINE_ACTIVE_SEQNO: | |
86 | return "active seqno"; | |
87 | case ENGINE_ACTIVE_HEAD: | |
88 | return "active head"; | |
89 | case ENGINE_ACTIVE_SUBUNITS: | |
90 | return "active subunits"; | |
91 | case ENGINE_WAIT_KICK: | |
92 | return "wait kick"; | |
93 | case ENGINE_DEAD: | |
94 | return "dead"; | |
95 | } | |
96 | ||
97 | return "unknown"; | |
98 | } | |
b6b0fac0 | 99 | |
f9e61372 BW |
100 | #define I915_MAX_SLICES 3 |
101 | #define I915_MAX_SUBSLICES 3 | |
102 | ||
103 | #define instdone_slice_mask(dev_priv__) \ | |
104 | (INTEL_GEN(dev_priv__) == 7 ? \ | |
105 | 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) | |
106 | ||
107 | #define instdone_subslice_mask(dev_priv__) \ | |
108 | (INTEL_GEN(dev_priv__) == 7 ? \ | |
109 | 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) | |
110 | ||
111 | #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ | |
112 | for ((slice__) = 0, (subslice__) = 0; \ | |
113 | (slice__) < I915_MAX_SLICES; \ | |
114 | (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ | |
115 | (slice__) += ((subslice__) == 0)) \ | |
116 | for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ | |
117 | (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) | |
118 | ||
d636951e BW |
119 | struct intel_instdone { |
120 | u32 instdone; | |
121 | /* The following exist only in the RCS engine */ | |
122 | u32 slice_common; | |
f9e61372 BW |
123 | u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; |
124 | u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; | |
d636951e BW |
125 | }; |
126 | ||
7e37f889 | 127 | struct intel_engine_hangcheck { |
50877445 | 128 | u64 acthd; |
92cab734 | 129 | u32 seqno; |
7e37f889 | 130 | enum intel_engine_hangcheck_action action; |
3fe3b030 | 131 | unsigned long action_timestamp; |
4be17381 | 132 | int deadlock; |
d636951e | 133 | struct intel_instdone instdone; |
3fe3b030 | 134 | bool stalled; |
92cab734 MK |
135 | }; |
136 | ||
7e37f889 | 137 | struct intel_ring { |
0eb973d3 | 138 | struct i915_vma *vma; |
57e88531 | 139 | void *vaddr; |
8ee14975 | 140 | |
4a570db5 | 141 | struct intel_engine_cs *engine; |
0c7dd53b | 142 | |
675d9ad7 CW |
143 | struct list_head request_list; |
144 | ||
8ee14975 OM |
145 | u32 head; |
146 | u32 tail; | |
147 | int space; | |
148 | int size; | |
149 | int effective_size; | |
150 | ||
151 | /** We track the position of the requests in the ring buffer, and | |
152 | * when each is retired we increment last_retired_head as the GPU | |
153 | * must have finished processing the request and so we know we | |
154 | * can advance the ringbuffer up to that position. | |
155 | * | |
156 | * last_retired_head is set to -1 after the value is consumed so | |
157 | * we can detect new retirements. | |
158 | */ | |
159 | u32 last_retired_head; | |
160 | }; | |
161 | ||
e2efd130 | 162 | struct i915_gem_context; |
361b027b | 163 | struct drm_i915_reg_table; |
21076372 | 164 | |
17ee950d AS |
165 | /* |
166 | * we use a single page to load ctx workarounds so all of these | |
167 | * values are referred in terms of dwords | |
168 | * | |
169 | * struct i915_wa_ctx_bb: | |
170 | * offset: specifies batch starting position, also helpful in case | |
171 | * if we want to have multiple batches at different offsets based on | |
172 | * some criteria. It is not a requirement at the moment but provides | |
173 | * an option for future use. | |
174 | * size: size of the batch in DWORDS | |
175 | */ | |
48bb74e4 | 176 | struct i915_ctx_workarounds { |
17ee950d AS |
177 | struct i915_wa_ctx_bb { |
178 | u32 offset; | |
179 | u32 size; | |
180 | } indirect_ctx, per_ctx; | |
48bb74e4 | 181 | struct i915_vma *vma; |
17ee950d AS |
182 | }; |
183 | ||
c81d4613 | 184 | struct drm_i915_gem_request; |
4e50f082 | 185 | struct intel_render_state; |
c81d4613 | 186 | |
c033666a CW |
187 | struct intel_engine_cs { |
188 | struct drm_i915_private *i915; | |
8187a2b7 | 189 | const char *name; |
117897f4 | 190 | enum intel_engine_id { |
de1add36 | 191 | RCS = 0, |
96154f2f | 192 | BCS, |
de1add36 TU |
193 | VCS, |
194 | VCS2, /* Keep instances of the same type engine together. */ | |
195 | VECS | |
9220434a | 196 | } id; |
de1add36 | 197 | #define _VCS(n) (VCS + (n)) |
426960be | 198 | unsigned int exec_id; |
5ec2cf7e TU |
199 | enum intel_engine_hw_id { |
200 | RCS_HW = 0, | |
201 | VCS_HW, | |
202 | BCS_HW, | |
203 | VECS_HW, | |
204 | VCS2_HW | |
205 | } hw_id; | |
206 | enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */ | |
333e9fe9 | 207 | u32 mmio_base; |
c2c7f240 | 208 | unsigned int irq_shift; |
7e37f889 | 209 | struct intel_ring *buffer; |
73cb9701 | 210 | struct intel_timeline *timeline; |
8187a2b7 | 211 | |
4e50f082 CW |
212 | struct intel_render_state *render_state; |
213 | ||
688e6c72 CW |
214 | /* Rather than have every client wait upon all user interrupts, |
215 | * with the herd waking after every interrupt and each doing the | |
216 | * heavyweight seqno dance, we delegate the task (of being the | |
217 | * bottom-half of the user interrupt) to the first client. After | |
218 | * every interrupt, we wake up one client, who does the heavyweight | |
219 | * coherent seqno read and either goes back to sleep (if incomplete), | |
220 | * or wakes up all the completed clients in parallel, before then | |
221 | * transferring the bottom-half status to the next client in the queue. | |
222 | * | |
223 | * Compared to walking the entire list of waiters in a single dedicated | |
224 | * bottom-half, we reduce the latency of the first waiter by avoiding | |
225 | * a context switch, but incur additional coherent seqno reads when | |
226 | * following the chain of request breadcrumbs. Since it is most likely | |
227 | * that we have a single client waiting on each seqno, then reducing | |
228 | * the overhead of waking that client is much preferred. | |
229 | */ | |
230 | struct intel_breadcrumbs { | |
dbd6ef29 | 231 | struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ |
aca34b6e CW |
232 | bool irq_posted; |
233 | ||
f6168e33 | 234 | spinlock_t lock; /* protects the lists of requests; irqsafe */ |
688e6c72 | 235 | struct rb_root waiters; /* sorted by retirement, priority */ |
c81d4613 | 236 | struct rb_root signals; /* sorted by retirement */ |
688e6c72 | 237 | struct intel_wait *first_wait; /* oldest waiter by retirement */ |
c81d4613 | 238 | struct task_struct *signaler; /* used for fence signalling */ |
b3850855 | 239 | struct drm_i915_gem_request *first_signal; |
688e6c72 | 240 | struct timer_list fake_irq; /* used after a missed interrupt */ |
83348ba8 CW |
241 | struct timer_list hangcheck; /* detect missed interrupts */ |
242 | ||
243 | unsigned long timeout; | |
aca34b6e CW |
244 | |
245 | bool irq_enabled : 1; | |
246 | bool rpm_wakelock : 1; | |
688e6c72 CW |
247 | } breadcrumbs; |
248 | ||
06fbca71 CW |
249 | /* |
250 | * A pool of objects to use as shadow copies of client batch buffers | |
251 | * when the command parser is enabled. Prevents the client from | |
252 | * modifying the batch contents after software parsing. | |
253 | */ | |
254 | struct i915_gem_batch_pool batch_pool; | |
255 | ||
8187a2b7 | 256 | struct intel_hw_status_page status_page; |
17ee950d | 257 | struct i915_ctx_workarounds wa_ctx; |
56c0f1a7 | 258 | struct i915_vma *scratch; |
8187a2b7 | 259 | |
61ff75ac CW |
260 | u32 irq_keep_mask; /* always keep these interrupts */ |
261 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | |
38a0f2db DG |
262 | void (*irq_enable)(struct intel_engine_cs *engine); |
263 | void (*irq_disable)(struct intel_engine_cs *engine); | |
8187a2b7 | 264 | |
38a0f2db | 265 | int (*init_hw)(struct intel_engine_cs *engine); |
821ed7df CW |
266 | void (*reset_hw)(struct intel_engine_cs *engine, |
267 | struct drm_i915_gem_request *req); | |
8187a2b7 | 268 | |
e8a9c58f CW |
269 | int (*context_pin)(struct intel_engine_cs *engine, |
270 | struct i915_gem_context *ctx); | |
271 | void (*context_unpin)(struct intel_engine_cs *engine, | |
272 | struct i915_gem_context *ctx); | |
f73e7399 | 273 | int (*request_alloc)(struct drm_i915_gem_request *req); |
8753181e | 274 | int (*init_context)(struct drm_i915_gem_request *req); |
86d7f238 | 275 | |
ddd66c51 CW |
276 | int (*emit_flush)(struct drm_i915_gem_request *request, |
277 | u32 mode); | |
278 | #define EMIT_INVALIDATE BIT(0) | |
279 | #define EMIT_FLUSH BIT(1) | |
280 | #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) | |
281 | int (*emit_bb_start)(struct drm_i915_gem_request *req, | |
282 | u64 offset, u32 length, | |
283 | unsigned int dispatch_flags); | |
284 | #define I915_DISPATCH_SECURE BIT(0) | |
285 | #define I915_DISPATCH_PINNED BIT(1) | |
286 | #define I915_DISPATCH_RS BIT(2) | |
caddfe71 CW |
287 | void (*emit_breadcrumb)(struct drm_i915_gem_request *req, |
288 | u32 *out); | |
98f29e8d | 289 | int emit_breadcrumb_sz; |
5590af3e CW |
290 | |
291 | /* Pass the request to the hardware queue (e.g. directly into | |
292 | * the legacy ringbuffer or to the end of an execlist). | |
293 | * | |
294 | * This is called from an atomic context with irqs disabled; must | |
295 | * be irq safe. | |
296 | */ | |
ddd66c51 | 297 | void (*submit_request)(struct drm_i915_gem_request *req); |
5590af3e | 298 | |
0de9136d CW |
299 | /* Call when the priority on a request has changed and it and its |
300 | * dependencies may need rescheduling. Note the request itself may | |
301 | * not be ready to run! | |
302 | * | |
303 | * Called under the struct_mutex. | |
304 | */ | |
305 | void (*schedule)(struct drm_i915_gem_request *request, | |
306 | int priority); | |
307 | ||
b2eadbc8 CW |
308 | /* Some chipsets are not quite as coherent as advertised and need |
309 | * an expensive kick to force a true read of the up-to-date seqno. | |
310 | * However, the up-to-date seqno is not always required and the last | |
311 | * seen value is good enough. Note that the seqno will always be | |
312 | * monotonic, even if not coherent. | |
313 | */ | |
38a0f2db | 314 | void (*irq_seqno_barrier)(struct intel_engine_cs *engine); |
38a0f2db | 315 | void (*cleanup)(struct intel_engine_cs *engine); |
ebc348b2 | 316 | |
3e78998a BW |
317 | /* GEN8 signal/wait table - never trust comments! |
318 | * signal to signal to signal to signal to signal to | |
319 | * RCS VCS BCS VECS VCS2 | |
320 | * -------------------------------------------------------------------- | |
321 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | | |
322 | * |------------------------------------------------------------------- | |
323 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | | |
324 | * |------------------------------------------------------------------- | |
325 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | | |
326 | * |------------------------------------------------------------------- | |
327 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | | |
328 | * |------------------------------------------------------------------- | |
329 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | | |
330 | * |------------------------------------------------------------------- | |
331 | * | |
332 | * Generalization: | |
333 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) | |
334 | * ie. transpose of g(x, y) | |
335 | * | |
336 | * sync from sync from sync from sync from sync from | |
337 | * RCS VCS BCS VECS VCS2 | |
338 | * -------------------------------------------------------------------- | |
339 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | | |
340 | * |------------------------------------------------------------------- | |
341 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | | |
342 | * |------------------------------------------------------------------- | |
343 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | | |
344 | * |------------------------------------------------------------------- | |
345 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | | |
346 | * |------------------------------------------------------------------- | |
347 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | | |
348 | * |------------------------------------------------------------------- | |
349 | * | |
350 | * Generalization: | |
351 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) | |
352 | * ie. transpose of f(x, y) | |
353 | */ | |
ebc348b2 | 354 | struct { |
3e78998a | 355 | union { |
318f89ca TU |
356 | #define GEN6_SEMAPHORE_LAST VECS_HW |
357 | #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) | |
358 | #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) | |
3e78998a BW |
359 | struct { |
360 | /* our mbox written by others */ | |
318f89ca | 361 | u32 wait[GEN6_NUM_SEMAPHORES]; |
3e78998a | 362 | /* mboxes this ring signals to */ |
318f89ca | 363 | i915_reg_t signal[GEN6_NUM_SEMAPHORES]; |
3e78998a | 364 | } mbox; |
666796da | 365 | u64 signal_ggtt[I915_NUM_ENGINES]; |
3e78998a | 366 | }; |
78325f2d BW |
367 | |
368 | /* AKA wait() */ | |
ad7bdb2b CW |
369 | int (*sync_to)(struct drm_i915_gem_request *req, |
370 | struct drm_i915_gem_request *signal); | |
caddfe71 | 371 | u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out); |
ebc348b2 | 372 | } semaphore; |
ad776f8b | 373 | |
4da46e1e | 374 | /* Execlists */ |
27af5eea | 375 | struct tasklet_struct irq_tasklet; |
70c2a24d CW |
376 | struct execlist_port { |
377 | struct drm_i915_gem_request *request; | |
378 | unsigned int count; | |
379 | } execlist_port[2]; | |
20311bd3 CW |
380 | struct rb_root execlist_queue; |
381 | struct rb_node *execlist_first; | |
3756685a | 382 | unsigned int fw_domains; |
ca82580c | 383 | bool disable_lite_restore_wa; |
70c2a24d | 384 | bool preempt_wa; |
ca82580c | 385 | u32 ctx_desc_template; |
4da46e1e | 386 | |
e8a9c58f CW |
387 | /* Contexts are pinned whilst they are active on the GPU. The last |
388 | * context executed remains active whilst the GPU is idle - the | |
389 | * switch away and write to the context object only occurs on the | |
390 | * next execution. Contexts are only unpinned on retirement of the | |
391 | * following request ensuring that we can always write to the object | |
392 | * on the context switch even after idling. Across suspend, we switch | |
393 | * to the kernel context and trash it as the save may not happen | |
394 | * before the hardware is powered down. | |
395 | */ | |
396 | struct i915_gem_context *last_retired_context; | |
397 | ||
398 | /* We track the current MI_SET_CONTEXT in order to eliminate | |
399 | * redudant context switches. This presumes that requests are not | |
400 | * reordered! Or when they are the tracking is updated along with | |
401 | * the emission of individual requests into the legacy command | |
402 | * stream (ring). | |
403 | */ | |
404 | struct i915_gem_context *legacy_active_context; | |
40521054 | 405 | |
590379ae CD |
406 | /* status_notifier: list of callbacks for context-switch changes */ |
407 | struct atomic_notifier_head context_status_notifier; | |
408 | ||
7e37f889 | 409 | struct intel_engine_hangcheck hangcheck; |
92cab734 | 410 | |
44e895a8 BV |
411 | bool needs_cmd_parser; |
412 | ||
351e3db2 | 413 | /* |
44e895a8 | 414 | * Table of commands the command parser needs to know about |
33a051a5 | 415 | * for this engine. |
351e3db2 | 416 | */ |
44e895a8 | 417 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
351e3db2 BV |
418 | |
419 | /* | |
420 | * Table of registers allowed in commands that read/write registers. | |
421 | */ | |
361b027b JJ |
422 | const struct drm_i915_reg_table *reg_tables; |
423 | int reg_table_count; | |
351e3db2 BV |
424 | |
425 | /* | |
426 | * Returns the bitmask for the length field of the specified command. | |
427 | * Return 0 for an unrecognized/invalid command. | |
428 | * | |
33a051a5 | 429 | * If the command parser finds an entry for a command in the engine's |
351e3db2 | 430 | * cmd_tables, it gets the command's length based on the table entry. |
33a051a5 CW |
431 | * If not, it calls this function to determine the per-engine length |
432 | * field encoding for the command (i.e. different opcode ranges use | |
433 | * certain bits to encode the command length in the header). | |
351e3db2 BV |
434 | */ |
435 | u32 (*get_cmd_length_mask)(u32 cmd_header); | |
8187a2b7 ZN |
436 | }; |
437 | ||
96154f2f | 438 | static inline unsigned |
67d97da3 | 439 | intel_engine_flag(const struct intel_engine_cs *engine) |
96154f2f | 440 | { |
0bc40be8 | 441 | return 1 << engine->id; |
96154f2f DV |
442 | } |
443 | ||
319404df | 444 | static inline void |
0bc40be8 | 445 | intel_flush_status_page(struct intel_engine_cs *engine, int reg) |
319404df | 446 | { |
0d317ce9 CW |
447 | mb(); |
448 | clflush(&engine->status_page.page_addr[reg]); | |
449 | mb(); | |
319404df ID |
450 | } |
451 | ||
8187a2b7 | 452 | static inline u32 |
5dd8e50c | 453 | intel_read_status_page(struct intel_engine_cs *engine, int reg) |
8187a2b7 | 454 | { |
4225d0f2 | 455 | /* Ensure that the compiler doesn't optimize away the load. */ |
5dd8e50c | 456 | return READ_ONCE(engine->status_page.page_addr[reg]); |
8187a2b7 ZN |
457 | } |
458 | ||
b70ec5bf | 459 | static inline void |
0bc40be8 | 460 | intel_write_status_page(struct intel_engine_cs *engine, |
b70ec5bf MK |
461 | int reg, u32 value) |
462 | { | |
0bc40be8 | 463 | engine->status_page.page_addr[reg] = value; |
b70ec5bf MK |
464 | } |
465 | ||
e2828914 | 466 | /* |
311bd68e CW |
467 | * Reads a dword out of the status page, which is written to from the command |
468 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | |
469 | * MI_STORE_DATA_IMM. | |
470 | * | |
471 | * The following dwords have a reserved meaning: | |
472 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | |
473 | * 0x04: ring 0 head pointer | |
474 | * 0x05: ring 1 head pointer (915-class) | |
475 | * 0x06: ring 2 head pointer (915-class) | |
476 | * 0x10-0x1b: Context status DWords (GM45) | |
477 | * 0x1f: Last written status offset. (GM45) | |
b07da53c | 478 | * 0x20-0x2f: Reserved (Gen6+) |
311bd68e | 479 | * |
b07da53c | 480 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
311bd68e | 481 | */ |
b07da53c | 482 | #define I915_GEM_HWS_INDEX 0x30 |
7c17d377 | 483 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
b07da53c | 484 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
9a289771 | 485 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
311bd68e | 486 | |
7e37f889 CW |
487 | struct intel_ring * |
488 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); | |
d3ef1af6 | 489 | int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); |
aad29fbb | 490 | void intel_ring_unpin(struct intel_ring *ring); |
7e37f889 | 491 | void intel_ring_free(struct intel_ring *ring); |
84c2377f | 492 | |
7e37f889 CW |
493 | void intel_engine_stop(struct intel_engine_cs *engine); |
494 | void intel_engine_cleanup(struct intel_engine_cs *engine); | |
96f298aa | 495 | |
821ed7df CW |
496 | void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); |
497 | ||
5fb9de1a | 498 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
bba09b12 | 499 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
406ea8d2 | 500 | |
7e37f889 | 501 | static inline void intel_ring_emit(struct intel_ring *ring, u32 data) |
406ea8d2 | 502 | { |
b5321f30 CW |
503 | *(uint32_t *)(ring->vaddr + ring->tail) = data; |
504 | ring->tail += 4; | |
406ea8d2 CW |
505 | } |
506 | ||
7e37f889 | 507 | static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg) |
f92a9162 | 508 | { |
b5321f30 | 509 | intel_ring_emit(ring, i915_mmio_reg_offset(reg)); |
f92a9162 | 510 | } |
406ea8d2 | 511 | |
7e37f889 | 512 | static inline void intel_ring_advance(struct intel_ring *ring) |
09246732 | 513 | { |
8f942018 CW |
514 | /* Dummy function. |
515 | * | |
516 | * This serves as a placeholder in the code so that the reader | |
517 | * can compare against the preceding intel_ring_begin() and | |
518 | * check that the number of dwords emitted matches the space | |
519 | * reserved for the command packet (i.e. the value passed to | |
520 | * intel_ring_begin()). | |
c5efa1ad | 521 | */ |
8f942018 CW |
522 | } |
523 | ||
dd68f2ba CW |
524 | static inline u32 |
525 | intel_ring_wrap(const struct intel_ring *ring, u32 pos) | |
526 | { | |
527 | return pos & (ring->size - 1); | |
528 | } | |
529 | ||
caddfe71 | 530 | static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr) |
8f942018 CW |
531 | { |
532 | /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ | |
caddfe71 | 533 | u32 offset = addr - ring->vaddr; |
dd68f2ba | 534 | return intel_ring_wrap(ring, offset); |
09246732 | 535 | } |
406ea8d2 | 536 | |
82e104cc | 537 | int __intel_ring_space(int head, int tail, int size); |
32c04f16 | 538 | void intel_ring_update_space(struct intel_ring *ring); |
09246732 | 539 | |
73cb9701 | 540 | void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); |
8187a2b7 | 541 | |
019bf277 TU |
542 | void intel_engine_setup_common(struct intel_engine_cs *engine); |
543 | int intel_engine_init_common(struct intel_engine_cs *engine); | |
adc320c4 | 544 | int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); |
96a945aa | 545 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); |
019bf277 | 546 | |
8b3e2d36 TU |
547 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); |
548 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); | |
549 | int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine); | |
550 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); | |
551 | int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); | |
8187a2b7 | 552 | |
7e37f889 | 553 | u64 intel_engine_get_active_head(struct intel_engine_cs *engine); |
1b36595f CW |
554 | u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); |
555 | ||
1b7744e7 CW |
556 | static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) |
557 | { | |
558 | return intel_read_status_page(engine, I915_GEM_HWS_INDEX); | |
559 | } | |
79f321b7 | 560 | |
cb399eab CW |
561 | static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) |
562 | { | |
563 | /* We are only peeking at the tail of the submit queue (and not the | |
564 | * queue itself) in order to gain a hint as to the current active | |
565 | * state of the engine. Callers are not expected to be taking | |
566 | * engine->timeline->lock, nor are they expected to be concerned | |
567 | * wtih serialising this hint with anything, so document it as | |
568 | * a hint and nothing more. | |
569 | */ | |
570 | return READ_ONCE(engine->timeline->last_submitted_seqno); | |
571 | } | |
572 | ||
0bc40be8 | 573 | int init_workarounds_ring(struct intel_engine_cs *engine); |
771b9a53 | 574 | |
0e704476 CW |
575 | void intel_engine_get_instdone(struct intel_engine_cs *engine, |
576 | struct intel_instdone *instdone); | |
577 | ||
29b1b415 JH |
578 | /* |
579 | * Arbitrary size for largest possible 'add request' sequence. The code paths | |
580 | * are complex and variable. Empirical measurement shows that the worst case | |
596e5efc CW |
581 | * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, |
582 | * we need to allocate double the largest single packet within that emission | |
583 | * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). | |
29b1b415 | 584 | */ |
596e5efc | 585 | #define MIN_SPACE_FOR_ADD_REQUEST 336 |
29b1b415 | 586 | |
a58c01aa CW |
587 | static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) |
588 | { | |
57e88531 | 589 | return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; |
a58c01aa CW |
590 | } |
591 | ||
688e6c72 | 592 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ |
688e6c72 CW |
593 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); |
594 | ||
595 | static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) | |
596 | { | |
597 | wait->tsk = current; | |
598 | wait->seqno = seqno; | |
599 | } | |
600 | ||
601 | static inline bool intel_wait_complete(const struct intel_wait *wait) | |
602 | { | |
603 | return RB_EMPTY_NODE(&wait->node); | |
604 | } | |
605 | ||
606 | bool intel_engine_add_wait(struct intel_engine_cs *engine, | |
607 | struct intel_wait *wait); | |
608 | void intel_engine_remove_wait(struct intel_engine_cs *engine, | |
609 | struct intel_wait *wait); | |
b3850855 | 610 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request); |
688e6c72 | 611 | |
dbd6ef29 | 612 | static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) |
688e6c72 | 613 | { |
dbd6ef29 | 614 | return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh); |
688e6c72 CW |
615 | } |
616 | ||
dbd6ef29 | 617 | static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) |
688e6c72 CW |
618 | { |
619 | bool wakeup = false; | |
dbd6ef29 | 620 | |
688e6c72 | 621 | /* Note that for this not to dangerously chase a dangling pointer, |
dbd6ef29 | 622 | * we must hold the rcu_read_lock here. |
688e6c72 CW |
623 | * |
624 | * Also note that tsk is likely to be in !TASK_RUNNING state so an | |
625 | * early test for tsk->state != TASK_RUNNING before wake_up_process() | |
626 | * is unlikely to be beneficial. | |
627 | */ | |
dbd6ef29 CW |
628 | if (intel_engine_has_waiter(engine)) { |
629 | struct task_struct *tsk; | |
630 | ||
631 | rcu_read_lock(); | |
632 | tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); | |
633 | if (tsk) | |
634 | wakeup = wake_up_process(tsk); | |
635 | rcu_read_unlock(); | |
636 | } | |
637 | ||
688e6c72 CW |
638 | return wakeup; |
639 | } | |
640 | ||
ad07dfcd | 641 | void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine); |
688e6c72 | 642 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); |
6a5d1db9 | 643 | unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915); |
688e6c72 | 644 | |
8187a2b7 | 645 | #endif /* _INTEL_RINGBUFFER_H_ */ |