1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
7 #define I915_CMD_HASH_ORDER 9
9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
10 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
11 * to give some inclination as to some of the magic values used in the various
14 #define CACHELINE_BYTES 64
15 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
18 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
19 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
20 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
22 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
23 * cacheline, the Head Pointer must not be greater than the Tail
26 #define I915_RING_FREE_SPACE 64
28 struct intel_hw_status_page
{
30 unsigned int gfx_addr
;
31 struct drm_i915_gem_object
*obj
;
34 #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
35 #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
37 #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
38 #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
40 #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
41 #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
43 #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
44 #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
46 #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
47 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
49 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
50 #define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
52 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
53 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
55 #define gen8_semaphore_seqno_size sizeof(uint64_t)
56 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
57 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
58 #define GEN8_SIGNAL_OFFSET(__ring, to) \
59 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
60 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
61 #define GEN8_WAIT_OFFSET(__ring, from) \
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
65 #define GEN8_RING_SEMAPHORE_INIT(e) do { \
66 if (!dev_priv->semaphore_obj) { \
69 (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
70 (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
71 (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
72 (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
73 (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
74 (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
77 enum intel_ring_hangcheck_action
{
85 #define HANGCHECK_SCORE_RING_HUNG 31
87 struct intel_ring_hangcheck
{
90 unsigned user_interrupts
;
92 enum intel_ring_hangcheck_action action
;
94 u32 instdone
[I915_NUM_INSTDONE_REG
];
97 struct intel_ringbuffer
{
98 struct drm_i915_gem_object
*obj
;
99 void __iomem
*virtual_start
;
100 struct i915_vma
*vma
;
102 struct intel_engine_cs
*engine
;
103 struct list_head link
;
112 bool reserved_in_use
;
114 /** We track the position of the requests in the ring buffer, and
115 * when each is retired we increment last_retired_head as the GPU
116 * must have finished processing the request and so we know we
117 * can advance the ringbuffer up to that position.
119 * last_retired_head is set to -1 after the value is consumed so
120 * we can detect new retirements.
122 u32 last_retired_head
;
125 struct intel_context
;
126 struct drm_i915_reg_table
;
129 * we use a single page to load ctx workarounds so all of these
130 * values are referred in terms of dwords
132 * struct i915_wa_ctx_bb:
133 * offset: specifies batch starting position, also helpful in case
134 * if we want to have multiple batches at different offsets based on
135 * some criteria. It is not a requirement at the moment but provides
136 * an option for future use.
137 * size: size of the batch in DWORDS
139 struct i915_ctx_workarounds
{
140 struct i915_wa_ctx_bb
{
143 } indirect_ctx
, per_ctx
;
144 struct drm_i915_gem_object
*obj
;
147 struct intel_engine_cs
{
149 enum intel_engine_id
{
153 VCS2
, /* Keep instances of the same type engine together. */
156 #define I915_NUM_ENGINES 5
157 #define _VCS(n) (VCS + (n))
158 unsigned int exec_id
;
161 struct drm_device
*dev
;
162 struct intel_ringbuffer
*buffer
;
163 struct list_head buffers
;
166 * A pool of objects to use as shadow copies of client batch buffers
167 * when the command parser is enabled. Prevents the client from
168 * modifying the batch contents after software parsing.
170 struct i915_gem_batch_pool batch_pool
;
172 struct intel_hw_status_page status_page
;
173 struct i915_ctx_workarounds wa_ctx
;
175 unsigned irq_refcount
; /* protected by dev_priv->irq_lock */
176 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
177 struct drm_i915_gem_request
*trace_irq_req
;
178 bool __must_check (*irq_get
)(struct intel_engine_cs
*ring
);
179 void (*irq_put
)(struct intel_engine_cs
*ring
);
181 int (*init_hw
)(struct intel_engine_cs
*ring
);
183 int (*init_context
)(struct drm_i915_gem_request
*req
);
185 void (*write_tail
)(struct intel_engine_cs
*ring
,
187 int __must_check (*flush
)(struct drm_i915_gem_request
*req
,
188 u32 invalidate_domains
,
190 int (*add_request
)(struct drm_i915_gem_request
*req
);
191 /* Some chipsets are not quite as coherent as advertised and need
192 * an expensive kick to force a true read of the up-to-date seqno.
193 * However, the up-to-date seqno is not always required and the last
194 * seen value is good enough. Note that the seqno will always be
195 * monotonic, even if not coherent.
197 void (*irq_seqno_barrier
)(struct intel_engine_cs
*ring
);
198 u32 (*get_seqno
)(struct intel_engine_cs
*ring
);
199 void (*set_seqno
)(struct intel_engine_cs
*ring
,
201 int (*dispatch_execbuffer
)(struct drm_i915_gem_request
*req
,
202 u64 offset
, u32 length
,
203 unsigned dispatch_flags
);
204 #define I915_DISPATCH_SECURE 0x1
205 #define I915_DISPATCH_PINNED 0x2
206 #define I915_DISPATCH_RS 0x4
207 void (*cleanup
)(struct intel_engine_cs
*ring
);
209 /* GEN8 signal/wait table - never trust comments!
210 * signal to signal to signal to signal to signal to
211 * RCS VCS BCS VECS VCS2
212 * --------------------------------------------------------------------
213 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
214 * |-------------------------------------------------------------------
215 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
216 * |-------------------------------------------------------------------
217 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
218 * |-------------------------------------------------------------------
219 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
220 * |-------------------------------------------------------------------
221 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
222 * |-------------------------------------------------------------------
225 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
226 * ie. transpose of g(x, y)
228 * sync from sync from sync from sync from sync from
229 * RCS VCS BCS VECS VCS2
230 * --------------------------------------------------------------------
231 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
232 * |-------------------------------------------------------------------
233 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
234 * |-------------------------------------------------------------------
235 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
236 * |-------------------------------------------------------------------
237 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
238 * |-------------------------------------------------------------------
239 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
240 * |-------------------------------------------------------------------
243 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
244 * ie. transpose of f(x, y)
247 u32 sync_seqno
[I915_NUM_ENGINES
-1];
251 /* our mbox written by others */
252 u32 wait
[I915_NUM_ENGINES
];
253 /* mboxes this ring signals to */
254 i915_reg_t signal
[I915_NUM_ENGINES
];
256 u64 signal_ggtt
[I915_NUM_ENGINES
];
260 int (*sync_to
)(struct drm_i915_gem_request
*to_req
,
261 struct intel_engine_cs
*from
,
263 int (*signal
)(struct drm_i915_gem_request
*signaller_req
,
264 /* num_dwords needed by caller */
265 unsigned int num_dwords
);
269 struct tasklet_struct irq_tasklet
;
270 spinlock_t execlist_lock
; /* used inside tasklet, use spin_lock_bh */
271 struct list_head execlist_queue
;
272 struct list_head execlist_retired_req_list
;
273 unsigned int fw_domains
;
274 unsigned int next_context_status_buffer
;
275 unsigned int idle_lite_restore_wa
;
276 bool disable_lite_restore_wa
;
277 u32 ctx_desc_template
;
278 u32 irq_keep_mask
; /* bitmask for interrupts that should not be masked */
279 int (*emit_request
)(struct drm_i915_gem_request
*request
);
280 int (*emit_flush
)(struct drm_i915_gem_request
*request
,
281 u32 invalidate_domains
,
283 int (*emit_bb_start
)(struct drm_i915_gem_request
*req
,
284 u64 offset
, unsigned dispatch_flags
);
287 * List of objects currently involved in rendering from the
290 * Includes buffers having the contents of their GPU caches
291 * flushed, not necessarily primitives. last_read_req
292 * represents when the rendering involved will be completed.
294 * A reference is held on the buffer while on this list.
296 struct list_head active_list
;
299 * List of breadcrumbs associated with GPU requests currently
302 struct list_head request_list
;
305 * Seqno of request most recently submitted to request_list.
306 * Used exclusively by hang checker to avoid grabbing lock while
307 * inspecting request list.
309 u32 last_submitted_seqno
;
310 unsigned user_interrupts
;
312 bool gpu_caches_dirty
;
314 wait_queue_head_t irq_queue
;
316 struct intel_context
*last_context
;
318 struct intel_ring_hangcheck hangcheck
;
321 struct drm_i915_gem_object
*obj
;
323 volatile u32
*cpu_page
;
326 bool needs_cmd_parser
;
329 * Table of commands the command parser needs to know about
332 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
335 * Table of registers allowed in commands that read/write registers.
337 const struct drm_i915_reg_table
*reg_tables
;
341 * Returns the bitmask for the length field of the specified command.
342 * Return 0 for an unrecognized/invalid command.
344 * If the command parser finds an entry for a command in the ring's
345 * cmd_tables, it gets the command's length based on the table entry.
346 * If not, it calls this function to determine the per-ring length field
347 * encoding for the command (i.e. certain opcode ranges use certain bits
348 * to encode the command length in the header).
350 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
354 intel_engine_initialized(struct intel_engine_cs
*engine
)
356 return engine
->dev
!= NULL
;
359 static inline unsigned
360 intel_engine_flag(struct intel_engine_cs
*engine
)
362 return 1 << engine
->id
;
366 intel_ring_sync_index(struct intel_engine_cs
*engine
,
367 struct intel_engine_cs
*other
)
372 * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
373 * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
374 * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
375 * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
376 * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
379 idx
= (other
- engine
) - 1;
381 idx
+= I915_NUM_ENGINES
;
387 intel_flush_status_page(struct intel_engine_cs
*engine
, int reg
)
390 clflush(&engine
->status_page
.page_addr
[reg
]);
395 intel_read_status_page(struct intel_engine_cs
*engine
, int reg
)
397 /* Ensure that the compiler doesn't optimize away the load. */
398 return READ_ONCE(engine
->status_page
.page_addr
[reg
]);
402 intel_write_status_page(struct intel_engine_cs
*engine
,
405 engine
->status_page
.page_addr
[reg
] = value
;
409 * Reads a dword out of the status page, which is written to from the command
410 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
413 * The following dwords have a reserved meaning:
414 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
415 * 0x04: ring 0 head pointer
416 * 0x05: ring 1 head pointer (915-class)
417 * 0x06: ring 2 head pointer (915-class)
418 * 0x10-0x1b: Context status DWords (GM45)
419 * 0x1f: Last written status offset. (GM45)
420 * 0x20-0x2f: Reserved (Gen6+)
422 * The area from dword 0x30 to 0x3ff is available for driver usage.
424 #define I915_GEM_HWS_INDEX 0x30
425 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
426 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
427 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
429 struct intel_ringbuffer
*
430 intel_engine_create_ringbuffer(struct intel_engine_cs
*engine
, int size
);
431 int intel_pin_and_map_ringbuffer_obj(struct drm_device
*dev
,
432 struct intel_ringbuffer
*ringbuf
);
433 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer
*ringbuf
);
434 void intel_ringbuffer_free(struct intel_ringbuffer
*ring
);
436 void intel_stop_engine(struct intel_engine_cs
*engine
);
437 void intel_cleanup_engine(struct intel_engine_cs
*engine
);
439 int intel_ring_alloc_request_extras(struct drm_i915_gem_request
*request
);
441 int __must_check
intel_ring_begin(struct drm_i915_gem_request
*req
, int n
);
442 int __must_check
intel_ring_cacheline_align(struct drm_i915_gem_request
*req
);
443 static inline void intel_ring_emit(struct intel_engine_cs
*engine
,
446 struct intel_ringbuffer
*ringbuf
= engine
->buffer
;
447 iowrite32(data
, ringbuf
->virtual_start
+ ringbuf
->tail
);
450 static inline void intel_ring_emit_reg(struct intel_engine_cs
*engine
,
453 intel_ring_emit(engine
, i915_mmio_reg_offset(reg
));
455 static inline void intel_ring_advance(struct intel_engine_cs
*engine
)
457 struct intel_ringbuffer
*ringbuf
= engine
->buffer
;
458 ringbuf
->tail
&= ringbuf
->size
- 1;
460 int __intel_ring_space(int head
, int tail
, int size
);
461 void intel_ring_update_space(struct intel_ringbuffer
*ringbuf
);
462 int intel_ring_space(struct intel_ringbuffer
*ringbuf
);
463 bool intel_engine_stopped(struct intel_engine_cs
*engine
);
465 int __must_check
intel_engine_idle(struct intel_engine_cs
*engine
);
466 void intel_ring_init_seqno(struct intel_engine_cs
*engine
, u32 seqno
);
467 int intel_ring_flush_all_caches(struct drm_i915_gem_request
*req
);
468 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request
*req
);
470 void intel_fini_pipe_control(struct intel_engine_cs
*engine
);
471 int intel_init_pipe_control(struct intel_engine_cs
*engine
);
473 int intel_init_render_ring_buffer(struct drm_device
*dev
);
474 int intel_init_bsd_ring_buffer(struct drm_device
*dev
);
475 int intel_init_bsd2_ring_buffer(struct drm_device
*dev
);
476 int intel_init_blt_ring_buffer(struct drm_device
*dev
);
477 int intel_init_vebox_ring_buffer(struct drm_device
*dev
);
479 u64
intel_ring_get_active_head(struct intel_engine_cs
*engine
);
481 int init_workarounds_ring(struct intel_engine_cs
*engine
);
483 static inline u32
intel_ring_get_tail(struct intel_ringbuffer
*ringbuf
)
485 return ringbuf
->tail
;
489 * Arbitrary size for largest possible 'add request' sequence. The code paths
490 * are complex and variable. Empirical measurement shows that the worst case
491 * is ILK at 136 words. Reserving too much is better than reserving too little
492 * as that allows for corner cases that might have been missed. So the figure
493 * has been rounded up to 160 words.
495 #define MIN_SPACE_FOR_ADD_REQUEST 160
498 * Reserve space in the ring to guarantee that the i915_add_request() call
499 * will always have sufficient room to do its stuff. The request creation
500 * code calls this automatically.
502 void intel_ring_reserved_space_reserve(struct intel_ringbuffer
*ringbuf
, int size
);
503 /* Cancel the reservation, e.g. because the request is being discarded. */
504 void intel_ring_reserved_space_cancel(struct intel_ringbuffer
*ringbuf
);
505 /* Use the reserved space - for use by i915_add_request() only. */
506 void intel_ring_reserved_space_use(struct intel_ringbuffer
*ringbuf
);
507 /* Finish with the reserved space - for use by i915_add_request() only. */
508 void intel_ring_reserved_space_end(struct intel_ringbuffer
*ringbuf
);
510 /* Legacy ringbuffer specific portion of reservation code: */
511 int intel_ring_reserve_space(struct drm_i915_gem_request
*request
);
513 #endif /* _INTEL_RINGBUFFER_H_ */