1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
5 #include <linux/hashtable.h>
6 #include "i915_gem_batch_pool.h"
7 #include "i915_gem_request.h"
8 #include "i915_gem_timeline.h"
9 #include "i915_selftest.h"
13 #define I915_CMD_HASH_ORDER 9
15 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
16 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
17 * to give some inclination as to some of the magic values used in the various
20 #define CACHELINE_BYTES 64
21 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
23 struct intel_hw_status_page
{
29 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
30 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
32 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
33 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
35 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
36 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
38 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
39 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
41 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
42 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
44 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
45 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
47 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
48 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
50 #define gen8_semaphore_seqno_size sizeof(uint64_t)
51 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
52 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
53 #define GEN8_SIGNAL_OFFSET(__ring, to) \
54 (dev_priv->semaphore->node.start + \
55 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
56 #define GEN8_WAIT_OFFSET(__ring, from) \
57 (dev_priv->semaphore->node.start + \
58 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
60 enum intel_engine_hangcheck_action
{
65 ENGINE_ACTIVE_SUBUNITS
,
70 static inline const char *
71 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a
)
78 case ENGINE_ACTIVE_SEQNO
:
79 return "active seqno";
80 case ENGINE_ACTIVE_HEAD
:
82 case ENGINE_ACTIVE_SUBUNITS
:
83 return "active subunits";
84 case ENGINE_WAIT_KICK
:
93 #define I915_MAX_SLICES 3
94 #define I915_MAX_SUBSLICES 3
96 #define instdone_slice_mask(dev_priv__) \
97 (INTEL_GEN(dev_priv__) == 7 ? \
98 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
100 #define instdone_subslice_mask(dev_priv__) \
101 (INTEL_GEN(dev_priv__) == 7 ? \
102 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
104 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
105 for ((slice__) = 0, (subslice__) = 0; \
106 (slice__) < I915_MAX_SLICES; \
107 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
108 (slice__) += ((subslice__) == 0)) \
109 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
110 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
112 struct intel_instdone
{
114 /* The following exist only in the RCS engine */
116 u32 sampler
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
117 u32 row
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
120 struct intel_engine_hangcheck
{
123 enum intel_engine_hangcheck_action action
;
124 unsigned long action_timestamp
;
126 struct intel_instdone instdone
;
127 struct drm_i915_gem_request
*active_request
;
132 struct i915_vma
*vma
;
135 struct list_head request_list
;
146 struct i915_gem_context
;
147 struct drm_i915_reg_table
;
150 * we use a single page to load ctx workarounds so all of these
151 * values are referred in terms of dwords
153 * struct i915_wa_ctx_bb:
154 * offset: specifies batch starting position, also helpful in case
155 * if we want to have multiple batches at different offsets based on
156 * some criteria. It is not a requirement at the moment but provides
157 * an option for future use.
158 * size: size of the batch in DWORDS
160 struct i915_ctx_workarounds
{
161 struct i915_wa_ctx_bb
{
164 } indirect_ctx
, per_ctx
;
165 struct i915_vma
*vma
;
168 struct drm_i915_gem_request
;
169 struct intel_render_state
;
172 * Engine IDs definitions.
173 * Keep instances of the same type engine together.
175 enum intel_engine_id
{
180 #define _VCS(n) (VCS + (n))
184 struct i915_priolist
{
186 struct list_head requests
;
191 * struct intel_engine_execlists - execlist submission queue and port state
193 * The struct intel_engine_execlists represents the combined logical state of
194 * driver and the hardware state for execlist mode of submission.
196 struct intel_engine_execlists
{
198 * @irq_tasklet: softirq tasklet for bottom handler
200 struct tasklet_struct irq_tasklet
;
203 * @default_priolist: priority list for I915_PRIORITY_NORMAL
205 struct i915_priolist default_priolist
;
208 * @no_priolist: priority lists disabled
213 * @port: execlist port states
215 * For each hardware ELSP (ExecList Submission Port) we keep
216 * track of the last request and the number of times we submitted
217 * that port to hw. We then count the number of times the hw reports
218 * a context completion or preemption. As only one context can
219 * be active on hw, we limit resubmission of context to port[0]. This
220 * is called Lite Restore, of the context.
222 struct execlist_port
{
224 * @request_count: combined request and submission count
226 struct drm_i915_gem_request
*request_count
;
227 #define EXECLIST_COUNT_BITS 2
228 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
229 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
230 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
231 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
232 #define port_set(p, packed) ((p)->request_count = (packed))
233 #define port_isset(p) ((p)->request_count)
234 #define port_index(p, execlists) ((p) - (execlists)->port)
237 * @context_id: context ID for port
239 GEM_DEBUG_DECL(u32 context_id
);
241 #define EXECLIST_MAX_PORTS 2
242 } port
[EXECLIST_MAX_PORTS
];
245 * @active: is the HW active? We consider the HW as active after
246 * submitting any context for execution and until we have seen the
247 * last context completion event. After that, we do not expect any
248 * more events until we submit, and so can park the HW.
250 * As we have a small number of different sources from which we feed
251 * the HW, we track the state of each inside a single bitfield.
254 #define EXECLISTS_ACTIVE_USER 0
255 #define EXECLISTS_ACTIVE_PREEMPT 1
258 * @port_mask: number of execlist ports - 1
260 unsigned int port_mask
;
263 * @queue: queue of requests, in priority lists
265 struct rb_root queue
;
268 * @first: leftmost level in priority @queue
270 struct rb_node
*first
;
273 * @fw_domains: forcewake domains for irq tasklet
275 unsigned int fw_domains
;
278 * @csb_head: context status buffer head
280 unsigned int csb_head
;
283 * @csb_use_mmio: access csb through mmio, instead of hwsp
288 #define INTEL_ENGINE_CS_MAX_NAME 8
290 struct intel_engine_cs
{
291 struct drm_i915_private
*i915
;
292 char name
[INTEL_ENGINE_CS_MAX_NAME
];
294 enum intel_engine_id id
;
305 unsigned int irq_shift
;
307 struct intel_ring
*buffer
;
308 struct intel_timeline
*timeline
;
310 struct intel_render_state
*render_state
;
313 unsigned long irq_posted
;
314 #define ENGINE_IRQ_BREADCRUMB 0
315 #define ENGINE_IRQ_EXECLIST 1
317 /* Rather than have every client wait upon all user interrupts,
318 * with the herd waking after every interrupt and each doing the
319 * heavyweight seqno dance, we delegate the task (of being the
320 * bottom-half of the user interrupt) to the first client. After
321 * every interrupt, we wake up one client, who does the heavyweight
322 * coherent seqno read and either goes back to sleep (if incomplete),
323 * or wakes up all the completed clients in parallel, before then
324 * transferring the bottom-half status to the next client in the queue.
326 * Compared to walking the entire list of waiters in a single dedicated
327 * bottom-half, we reduce the latency of the first waiter by avoiding
328 * a context switch, but incur additional coherent seqno reads when
329 * following the chain of request breadcrumbs. Since it is most likely
330 * that we have a single client waiting on each seqno, then reducing
331 * the overhead of waking that client is much preferred.
333 struct intel_breadcrumbs
{
334 spinlock_t irq_lock
; /* protects irq_*; irqsafe */
335 struct intel_wait
*irq_wait
; /* oldest waiter by retirement */
337 spinlock_t rb_lock
; /* protects the rb and wraps irq_lock */
338 struct rb_root waiters
; /* sorted by retirement, priority */
339 struct rb_root signals
; /* sorted by retirement */
340 struct task_struct
*signaler
; /* used for fence signalling */
341 struct drm_i915_gem_request __rcu
*first_signal
;
342 struct timer_list fake_irq
; /* used after a missed interrupt */
343 struct timer_list hangcheck
; /* detect missed interrupts */
345 unsigned int hangcheck_interrupts
;
348 bool irq_enabled
: 1;
349 I915_SELFTEST_DECLARE(bool mock
: 1);
353 * A pool of objects to use as shadow copies of client batch buffers
354 * when the command parser is enabled. Prevents the client from
355 * modifying the batch contents after software parsing.
357 struct i915_gem_batch_pool batch_pool
;
359 struct intel_hw_status_page status_page
;
360 struct i915_ctx_workarounds wa_ctx
;
361 struct i915_vma
*scratch
;
363 u32 irq_keep_mask
; /* always keep these interrupts */
364 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
365 void (*irq_enable
)(struct intel_engine_cs
*engine
);
366 void (*irq_disable
)(struct intel_engine_cs
*engine
);
368 int (*init_hw
)(struct intel_engine_cs
*engine
);
369 void (*reset_hw
)(struct intel_engine_cs
*engine
,
370 struct drm_i915_gem_request
*req
);
372 void (*set_default_submission
)(struct intel_engine_cs
*engine
);
374 struct intel_ring
*(*context_pin
)(struct intel_engine_cs
*engine
,
375 struct i915_gem_context
*ctx
);
376 void (*context_unpin
)(struct intel_engine_cs
*engine
,
377 struct i915_gem_context
*ctx
);
378 int (*request_alloc
)(struct drm_i915_gem_request
*req
);
379 int (*init_context
)(struct drm_i915_gem_request
*req
);
381 int (*emit_flush
)(struct drm_i915_gem_request
*request
,
383 #define EMIT_INVALIDATE BIT(0)
384 #define EMIT_FLUSH BIT(1)
385 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
386 int (*emit_bb_start
)(struct drm_i915_gem_request
*req
,
387 u64 offset
, u32 length
,
388 unsigned int dispatch_flags
);
389 #define I915_DISPATCH_SECURE BIT(0)
390 #define I915_DISPATCH_PINNED BIT(1)
391 #define I915_DISPATCH_RS BIT(2)
392 void (*emit_breadcrumb
)(struct drm_i915_gem_request
*req
,
394 int emit_breadcrumb_sz
;
396 /* Pass the request to the hardware queue (e.g. directly into
397 * the legacy ringbuffer or to the end of an execlist).
399 * This is called from an atomic context with irqs disabled; must
402 void (*submit_request
)(struct drm_i915_gem_request
*req
);
404 /* Call when the priority on a request has changed and it and its
405 * dependencies may need rescheduling. Note the request itself may
406 * not be ready to run!
408 * Called under the struct_mutex.
410 void (*schedule
)(struct drm_i915_gem_request
*request
,
414 * Cancel all requests on the hardware, or queued for execution.
415 * This should only cancel the ready requests that have been
416 * submitted to the engine (via the engine->submit_request callback).
417 * This is called when marking the device as wedged.
419 void (*cancel_requests
)(struct intel_engine_cs
*engine
);
421 /* Some chipsets are not quite as coherent as advertised and need
422 * an expensive kick to force a true read of the up-to-date seqno.
423 * However, the up-to-date seqno is not always required and the last
424 * seen value is good enough. Note that the seqno will always be
425 * monotonic, even if not coherent.
427 void (*irq_seqno_barrier
)(struct intel_engine_cs
*engine
);
428 void (*cleanup
)(struct intel_engine_cs
*engine
);
430 /* GEN8 signal/wait table - never trust comments!
431 * signal to signal to signal to signal to signal to
432 * RCS VCS BCS VECS VCS2
433 * --------------------------------------------------------------------
434 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
435 * |-------------------------------------------------------------------
436 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
437 * |-------------------------------------------------------------------
438 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
439 * |-------------------------------------------------------------------
440 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
441 * |-------------------------------------------------------------------
442 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
443 * |-------------------------------------------------------------------
446 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
447 * ie. transpose of g(x, y)
449 * sync from sync from sync from sync from sync from
450 * RCS VCS BCS VECS VCS2
451 * --------------------------------------------------------------------
452 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
453 * |-------------------------------------------------------------------
454 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
455 * |-------------------------------------------------------------------
456 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
457 * |-------------------------------------------------------------------
458 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
459 * |-------------------------------------------------------------------
460 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
461 * |-------------------------------------------------------------------
464 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
465 * ie. transpose of f(x, y)
469 #define GEN6_SEMAPHORE_LAST VECS_HW
470 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
471 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
473 /* our mbox written by others */
474 u32 wait
[GEN6_NUM_SEMAPHORES
];
475 /* mboxes this ring signals to */
476 i915_reg_t signal
[GEN6_NUM_SEMAPHORES
];
478 u64 signal_ggtt
[I915_NUM_ENGINES
];
482 int (*sync_to
)(struct drm_i915_gem_request
*req
,
483 struct drm_i915_gem_request
*signal
);
484 u32
*(*signal
)(struct drm_i915_gem_request
*req
, u32
*cs
);
487 struct intel_engine_execlists execlists
;
489 /* Contexts are pinned whilst they are active on the GPU. The last
490 * context executed remains active whilst the GPU is idle - the
491 * switch away and write to the context object only occurs on the
492 * next execution. Contexts are only unpinned on retirement of the
493 * following request ensuring that we can always write to the object
494 * on the context switch even after idling. Across suspend, we switch
495 * to the kernel context and trash it as the save may not happen
496 * before the hardware is powered down.
498 struct i915_gem_context
*last_retired_context
;
500 /* We track the current MI_SET_CONTEXT in order to eliminate
501 * redudant context switches. This presumes that requests are not
502 * reordered! Or when they are the tracking is updated along with
503 * the emission of individual requests into the legacy command
506 struct i915_gem_context
*legacy_active_context
;
508 /* status_notifier: list of callbacks for context-switch changes */
509 struct atomic_notifier_head context_status_notifier
;
511 struct intel_engine_hangcheck hangcheck
;
513 #define I915_ENGINE_USING_CMD_PARSER BIT(0)
514 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3)
518 * Table of commands the command parser needs to know about
521 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
524 * Table of registers allowed in commands that read/write registers.
526 const struct drm_i915_reg_table
*reg_tables
;
530 * Returns the bitmask for the length field of the specified command.
531 * Return 0 for an unrecognized/invalid command.
533 * If the command parser finds an entry for a command in the engine's
534 * cmd_tables, it gets the command's length based on the table entry.
535 * If not, it calls this function to determine the per-engine length
536 * field encoding for the command (i.e. different opcode ranges use
537 * certain bits to encode the command length in the header).
539 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
543 intel_engine_using_cmd_parser(const struct intel_engine_cs
*engine
)
545 return engine
->flags
& I915_ENGINE_USING_CMD_PARSER
;
549 intel_engine_requires_cmd_parser(const struct intel_engine_cs
*engine
)
551 return engine
->flags
& I915_ENGINE_REQUIRES_CMD_PARSER
;
555 execlists_set_active(struct intel_engine_execlists
*execlists
,
558 __set_bit(bit
, (unsigned long *)&execlists
->active
);
562 execlists_clear_active(struct intel_engine_execlists
*execlists
,
565 __clear_bit(bit
, (unsigned long *)&execlists
->active
);
569 execlists_is_active(const struct intel_engine_execlists
*execlists
,
572 return test_bit(bit
, (unsigned long *)&execlists
->active
);
575 static inline unsigned int
576 execlists_num_ports(const struct intel_engine_execlists
* const execlists
)
578 return execlists
->port_mask
+ 1;
582 execlists_port_complete(struct intel_engine_execlists
* const execlists
,
583 struct execlist_port
* const port
)
585 const unsigned int m
= execlists
->port_mask
;
587 GEM_BUG_ON(port_index(port
, execlists
) != 0);
588 GEM_BUG_ON(!execlists_is_active(execlists
, EXECLISTS_ACTIVE_USER
));
590 memmove(port
, port
+ 1, m
* sizeof(struct execlist_port
));
591 memset(port
+ m
, 0, sizeof(struct execlist_port
));
594 static inline unsigned int
595 intel_engine_flag(const struct intel_engine_cs
*engine
)
597 return BIT(engine
->id
);
601 intel_read_status_page(struct intel_engine_cs
*engine
, int reg
)
603 /* Ensure that the compiler doesn't optimize away the load. */
604 return READ_ONCE(engine
->status_page
.page_addr
[reg
]);
608 intel_write_status_page(struct intel_engine_cs
*engine
, int reg
, u32 value
)
610 /* Writing into the status page should be done sparingly. Since
611 * we do when we are uncertain of the device state, we take a bit
612 * of extra paranoia to try and ensure that the HWS takes the value
613 * we give and that it doesn't end up trapped inside the CPU!
615 if (static_cpu_has(X86_FEATURE_CLFLUSH
)) {
617 clflush(&engine
->status_page
.page_addr
[reg
]);
618 engine
->status_page
.page_addr
[reg
] = value
;
619 clflush(&engine
->status_page
.page_addr
[reg
]);
622 WRITE_ONCE(engine
->status_page
.page_addr
[reg
], value
);
627 * Reads a dword out of the status page, which is written to from the command
628 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
631 * The following dwords have a reserved meaning:
632 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
633 * 0x04: ring 0 head pointer
634 * 0x05: ring 1 head pointer (915-class)
635 * 0x06: ring 2 head pointer (915-class)
636 * 0x10-0x1b: Context status DWords (GM45)
637 * 0x1f: Last written status offset. (GM45)
638 * 0x20-0x2f: Reserved (Gen6+)
640 * The area from dword 0x30 to 0x3ff is available for driver usage.
642 #define I915_GEM_HWS_INDEX 0x30
643 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
644 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
645 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
647 #define I915_HWS_CSB_BUF0_INDEX 0x10
648 #define I915_HWS_CSB_WRITE_INDEX 0x1f
649 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
652 intel_engine_create_ring(struct intel_engine_cs
*engine
, int size
);
653 int intel_ring_pin(struct intel_ring
*ring
,
654 struct drm_i915_private
*i915
,
655 unsigned int offset_bias
);
656 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
);
657 unsigned int intel_ring_update_space(struct intel_ring
*ring
);
658 void intel_ring_unpin(struct intel_ring
*ring
);
659 void intel_ring_free(struct intel_ring
*ring
);
661 void intel_engine_stop(struct intel_engine_cs
*engine
);
662 void intel_engine_cleanup(struct intel_engine_cs
*engine
);
664 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
);
666 int __must_check
intel_ring_cacheline_align(struct drm_i915_gem_request
*req
);
668 u32 __must_check
*intel_ring_begin(struct drm_i915_gem_request
*req
,
672 intel_ring_advance(struct drm_i915_gem_request
*req
, u32
*cs
)
676 * This serves as a placeholder in the code so that the reader
677 * can compare against the preceding intel_ring_begin() and
678 * check that the number of dwords emitted matches the space
679 * reserved for the command packet (i.e. the value passed to
680 * intel_ring_begin()).
682 GEM_BUG_ON((req
->ring
->vaddr
+ req
->ring
->emit
) != cs
);
686 intel_ring_wrap(const struct intel_ring
*ring
, u32 pos
)
688 return pos
& (ring
->size
- 1);
692 intel_ring_offset(const struct drm_i915_gem_request
*req
, void *addr
)
694 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
695 u32 offset
= addr
- req
->ring
->vaddr
;
696 GEM_BUG_ON(offset
> req
->ring
->size
);
697 return intel_ring_wrap(req
->ring
, offset
);
701 assert_ring_tail_valid(const struct intel_ring
*ring
, unsigned int tail
)
703 /* We could combine these into a single tail operation, but keeping
704 * them as seperate tests will help identify the cause should one
707 GEM_BUG_ON(!IS_ALIGNED(tail
, 8));
708 GEM_BUG_ON(tail
>= ring
->size
);
712 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
713 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
714 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
715 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
716 * same cacheline, the Head Pointer must not be greater than the Tail
719 * We use ring->head as the last known location of the actual RING_HEAD,
720 * it may have advanced but in the worst case it is equally the same
721 * as ring->head and so we should never program RING_TAIL to advance
722 * into the same cacheline as ring->head.
724 #define cacheline(a) round_down(a, CACHELINE_BYTES)
725 GEM_BUG_ON(cacheline(tail
) == cacheline(ring
->head
) &&
730 static inline unsigned int
731 intel_ring_set_tail(struct intel_ring
*ring
, unsigned int tail
)
733 /* Whilst writes to the tail are strictly order, there is no
734 * serialisation between readers and the writers. The tail may be
735 * read by i915_gem_request_retire() just as it is being updated
736 * by execlists, as although the breadcrumb is complete, the context
737 * switch hasn't been seen.
739 assert_ring_tail_valid(ring
, tail
);
744 void intel_engine_init_global_seqno(struct intel_engine_cs
*engine
, u32 seqno
);
746 void intel_engine_setup_common(struct intel_engine_cs
*engine
);
747 int intel_engine_init_common(struct intel_engine_cs
*engine
);
748 int intel_engine_create_scratch(struct intel_engine_cs
*engine
, int size
);
749 void intel_engine_cleanup_common(struct intel_engine_cs
*engine
);
751 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
);
752 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
);
753 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
);
754 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
);
756 u64
intel_engine_get_active_head(struct intel_engine_cs
*engine
);
757 u64
intel_engine_get_last_batch_head(struct intel_engine_cs
*engine
);
759 static inline u32
intel_engine_get_seqno(struct intel_engine_cs
*engine
)
761 return intel_read_status_page(engine
, I915_GEM_HWS_INDEX
);
764 static inline u32
intel_engine_last_submit(struct intel_engine_cs
*engine
)
766 /* We are only peeking at the tail of the submit queue (and not the
767 * queue itself) in order to gain a hint as to the current active
768 * state of the engine. Callers are not expected to be taking
769 * engine->timeline->lock, nor are they expected to be concerned
770 * wtih serialising this hint with anything, so document it as
771 * a hint and nothing more.
773 return READ_ONCE(engine
->timeline
->seqno
);
776 int init_workarounds_ring(struct intel_engine_cs
*engine
);
777 int intel_ring_workarounds_emit(struct drm_i915_gem_request
*req
);
779 void intel_engine_get_instdone(struct intel_engine_cs
*engine
,
780 struct intel_instdone
*instdone
);
783 * Arbitrary size for largest possible 'add request' sequence. The code paths
784 * are complex and variable. Empirical measurement shows that the worst case
785 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
786 * we need to allocate double the largest single packet within that emission
787 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
789 #define MIN_SPACE_FOR_ADD_REQUEST 336
791 static inline u32
intel_hws_seqno_address(struct intel_engine_cs
*engine
)
793 return engine
->status_page
.ggtt_offset
+ I915_GEM_HWS_INDEX_ADDR
;
796 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
797 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
);
799 static inline void intel_wait_init(struct intel_wait
*wait
,
800 struct drm_i915_gem_request
*rq
)
806 static inline void intel_wait_init_for_seqno(struct intel_wait
*wait
, u32 seqno
)
812 static inline bool intel_wait_has_seqno(const struct intel_wait
*wait
)
818 intel_wait_update_seqno(struct intel_wait
*wait
, u32 seqno
)
821 return intel_wait_has_seqno(wait
);
825 intel_wait_update_request(struct intel_wait
*wait
,
826 const struct drm_i915_gem_request
*rq
)
828 return intel_wait_update_seqno(wait
, i915_gem_request_global_seqno(rq
));
832 intel_wait_check_seqno(const struct intel_wait
*wait
, u32 seqno
)
834 return wait
->seqno
== seqno
;
838 intel_wait_check_request(const struct intel_wait
*wait
,
839 const struct drm_i915_gem_request
*rq
)
841 return intel_wait_check_seqno(wait
, i915_gem_request_global_seqno(rq
));
844 static inline bool intel_wait_complete(const struct intel_wait
*wait
)
846 return RB_EMPTY_NODE(&wait
->node
);
849 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
850 struct intel_wait
*wait
);
851 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
852 struct intel_wait
*wait
);
853 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
,
855 void intel_engine_cancel_signaling(struct drm_i915_gem_request
*request
);
857 static inline bool intel_engine_has_waiter(const struct intel_engine_cs
*engine
)
859 return READ_ONCE(engine
->breadcrumbs
.irq_wait
);
862 unsigned int intel_engine_wakeup(struct intel_engine_cs
*engine
);
863 #define ENGINE_WAKEUP_WAITER BIT(0)
864 #define ENGINE_WAKEUP_ASLEEP BIT(1)
866 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
);
867 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
);
869 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
);
870 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
);
871 bool intel_breadcrumbs_busy(struct intel_engine_cs
*engine
);
873 static inline u32
*gen8_emit_pipe_control(u32
*batch
, u32 flags
, u32 offset
)
875 memset(batch
, 0, 6 * sizeof(u32
));
877 batch
[0] = GFX_OP_PIPE_CONTROL(6);
884 bool intel_engine_is_idle(struct intel_engine_cs
*engine
);
885 bool intel_engines_are_idle(struct drm_i915_private
*dev_priv
);
887 bool intel_engine_has_kernel_context(const struct intel_engine_cs
*engine
);
889 void intel_engines_mark_idle(struct drm_i915_private
*i915
);
890 void intel_engines_reset_default_submission(struct drm_i915_private
*i915
);
892 bool intel_engine_can_store_dword(struct intel_engine_cs
*engine
);
894 void intel_engine_dump(struct intel_engine_cs
*engine
, struct drm_printer
*p
);
896 #endif /* _INTEL_RINGBUFFER_H_ */