]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.h
4b6230d9f69526226544f2ea6446eb32c6285594
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
4
5 #include <linux/hashtable.h>
6 #include "i915_gem_batch_pool.h"
7 #include "i915_gem_request.h"
8 #include "i915_gem_timeline.h"
9 #include "i915_selftest.h"
10
11 struct drm_printer;
12
13 #define I915_CMD_HASH_ORDER 9
14
15 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
16 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
17 * to give some inclination as to some of the magic values used in the various
18 * workarounds!
19 */
20 #define CACHELINE_BYTES 64
21 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
22
23 struct intel_hw_status_page {
24 struct i915_vma *vma;
25 u32 *page_addr;
26 u32 ggtt_offset;
27 };
28
29 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
30 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
31
32 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
33 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
34
35 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
36 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
37
38 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
39 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
40
41 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
42 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
43
44 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
45 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
46
47 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
48 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
49 */
50 #define gen8_semaphore_seqno_size sizeof(uint64_t)
51 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
52 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
53 #define GEN8_SIGNAL_OFFSET(__ring, to) \
54 (dev_priv->semaphore->node.start + \
55 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
56 #define GEN8_WAIT_OFFSET(__ring, from) \
57 (dev_priv->semaphore->node.start + \
58 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
59
60 enum intel_engine_hangcheck_action {
61 ENGINE_IDLE = 0,
62 ENGINE_WAIT,
63 ENGINE_ACTIVE_SEQNO,
64 ENGINE_ACTIVE_HEAD,
65 ENGINE_ACTIVE_SUBUNITS,
66 ENGINE_WAIT_KICK,
67 ENGINE_DEAD,
68 };
69
70 static inline const char *
71 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
72 {
73 switch (a) {
74 case ENGINE_IDLE:
75 return "idle";
76 case ENGINE_WAIT:
77 return "wait";
78 case ENGINE_ACTIVE_SEQNO:
79 return "active seqno";
80 case ENGINE_ACTIVE_HEAD:
81 return "active head";
82 case ENGINE_ACTIVE_SUBUNITS:
83 return "active subunits";
84 case ENGINE_WAIT_KICK:
85 return "wait kick";
86 case ENGINE_DEAD:
87 return "dead";
88 }
89
90 return "unknown";
91 }
92
93 #define I915_MAX_SLICES 3
94 #define I915_MAX_SUBSLICES 3
95
96 #define instdone_slice_mask(dev_priv__) \
97 (INTEL_GEN(dev_priv__) == 7 ? \
98 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
99
100 #define instdone_subslice_mask(dev_priv__) \
101 (INTEL_GEN(dev_priv__) == 7 ? \
102 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
103
104 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
105 for ((slice__) = 0, (subslice__) = 0; \
106 (slice__) < I915_MAX_SLICES; \
107 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
108 (slice__) += ((subslice__) == 0)) \
109 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
110 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
111
112 struct intel_instdone {
113 u32 instdone;
114 /* The following exist only in the RCS engine */
115 u32 slice_common;
116 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
117 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
118 };
119
120 struct intel_engine_hangcheck {
121 u64 acthd;
122 u32 seqno;
123 enum intel_engine_hangcheck_action action;
124 unsigned long action_timestamp;
125 int deadlock;
126 struct intel_instdone instdone;
127 struct drm_i915_gem_request *active_request;
128 bool stalled;
129 };
130
131 struct intel_ring {
132 struct i915_vma *vma;
133 void *vaddr;
134
135 struct list_head request_list;
136
137 u32 head;
138 u32 tail;
139 u32 emit;
140
141 u32 space;
142 u32 size;
143 u32 effective_size;
144 };
145
146 struct i915_gem_context;
147 struct drm_i915_reg_table;
148
149 /*
150 * we use a single page to load ctx workarounds so all of these
151 * values are referred in terms of dwords
152 *
153 * struct i915_wa_ctx_bb:
154 * offset: specifies batch starting position, also helpful in case
155 * if we want to have multiple batches at different offsets based on
156 * some criteria. It is not a requirement at the moment but provides
157 * an option for future use.
158 * size: size of the batch in DWORDS
159 */
160 struct i915_ctx_workarounds {
161 struct i915_wa_ctx_bb {
162 u32 offset;
163 u32 size;
164 } indirect_ctx, per_ctx;
165 struct i915_vma *vma;
166 };
167
168 struct drm_i915_gem_request;
169 struct intel_render_state;
170
171 /*
172 * Engine IDs definitions.
173 * Keep instances of the same type engine together.
174 */
175 enum intel_engine_id {
176 RCS = 0,
177 BCS,
178 VCS,
179 VCS2,
180 #define _VCS(n) (VCS + (n))
181 VECS
182 };
183
184 struct i915_priolist {
185 struct rb_node node;
186 struct list_head requests;
187 int priority;
188 };
189
190 /**
191 * struct intel_engine_execlists - execlist submission queue and port state
192 *
193 * The struct intel_engine_execlists represents the combined logical state of
194 * driver and the hardware state for execlist mode of submission.
195 */
196 struct intel_engine_execlists {
197 /**
198 * @irq_tasklet: softirq tasklet for bottom handler
199 */
200 struct tasklet_struct irq_tasklet;
201
202 /**
203 * @default_priolist: priority list for I915_PRIORITY_NORMAL
204 */
205 struct i915_priolist default_priolist;
206
207 /**
208 * @no_priolist: priority lists disabled
209 */
210 bool no_priolist;
211
212 /**
213 * @port: execlist port states
214 *
215 * For each hardware ELSP (ExecList Submission Port) we keep
216 * track of the last request and the number of times we submitted
217 * that port to hw. We then count the number of times the hw reports
218 * a context completion or preemption. As only one context can
219 * be active on hw, we limit resubmission of context to port[0]. This
220 * is called Lite Restore, of the context.
221 */
222 struct execlist_port {
223 /**
224 * @request_count: combined request and submission count
225 */
226 struct drm_i915_gem_request *request_count;
227 #define EXECLIST_COUNT_BITS 2
228 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
229 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
230 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
231 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
232 #define port_set(p, packed) ((p)->request_count = (packed))
233 #define port_isset(p) ((p)->request_count)
234 #define port_index(p, execlists) ((p) - (execlists)->port)
235
236 /**
237 * @context_id: context ID for port
238 */
239 GEM_DEBUG_DECL(u32 context_id);
240
241 #define EXECLIST_MAX_PORTS 2
242 } port[EXECLIST_MAX_PORTS];
243
244 /**
245 * @active: is the HW active? We consider the HW as active after
246 * submitting any context for execution and until we have seen the
247 * last context completion event. After that, we do not expect any
248 * more events until we submit, and so can park the HW.
249 *
250 * As we have a small number of different sources from which we feed
251 * the HW, we track the state of each inside a single bitfield.
252 */
253 unsigned int active;
254 #define EXECLISTS_ACTIVE_USER 0
255 #define EXECLISTS_ACTIVE_PREEMPT 1
256
257 /**
258 * @port_mask: number of execlist ports - 1
259 */
260 unsigned int port_mask;
261
262 /**
263 * @queue: queue of requests, in priority lists
264 */
265 struct rb_root queue;
266
267 /**
268 * @first: leftmost level in priority @queue
269 */
270 struct rb_node *first;
271
272 /**
273 * @fw_domains: forcewake domains for irq tasklet
274 */
275 unsigned int fw_domains;
276
277 /**
278 * @csb_head: context status buffer head
279 */
280 unsigned int csb_head;
281
282 /**
283 * @csb_use_mmio: access csb through mmio, instead of hwsp
284 */
285 bool csb_use_mmio;
286 };
287
288 #define INTEL_ENGINE_CS_MAX_NAME 8
289
290 struct intel_engine_cs {
291 struct drm_i915_private *i915;
292 char name[INTEL_ENGINE_CS_MAX_NAME];
293
294 enum intel_engine_id id;
295 unsigned int hw_id;
296 unsigned int guc_id;
297
298 u8 uabi_id;
299 u8 uabi_class;
300
301 u8 class;
302 u8 instance;
303 u32 context_size;
304 u32 mmio_base;
305 unsigned int irq_shift;
306
307 struct intel_ring *buffer;
308 struct intel_timeline *timeline;
309
310 struct intel_render_state *render_state;
311
312 atomic_t irq_count;
313 unsigned long irq_posted;
314 #define ENGINE_IRQ_BREADCRUMB 0
315 #define ENGINE_IRQ_EXECLIST 1
316
317 /* Rather than have every client wait upon all user interrupts,
318 * with the herd waking after every interrupt and each doing the
319 * heavyweight seqno dance, we delegate the task (of being the
320 * bottom-half of the user interrupt) to the first client. After
321 * every interrupt, we wake up one client, who does the heavyweight
322 * coherent seqno read and either goes back to sleep (if incomplete),
323 * or wakes up all the completed clients in parallel, before then
324 * transferring the bottom-half status to the next client in the queue.
325 *
326 * Compared to walking the entire list of waiters in a single dedicated
327 * bottom-half, we reduce the latency of the first waiter by avoiding
328 * a context switch, but incur additional coherent seqno reads when
329 * following the chain of request breadcrumbs. Since it is most likely
330 * that we have a single client waiting on each seqno, then reducing
331 * the overhead of waking that client is much preferred.
332 */
333 struct intel_breadcrumbs {
334 spinlock_t irq_lock; /* protects irq_*; irqsafe */
335 struct intel_wait *irq_wait; /* oldest waiter by retirement */
336
337 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
338 struct rb_root waiters; /* sorted by retirement, priority */
339 struct rb_root signals; /* sorted by retirement */
340 struct task_struct *signaler; /* used for fence signalling */
341 struct drm_i915_gem_request __rcu *first_signal;
342 struct timer_list fake_irq; /* used after a missed interrupt */
343 struct timer_list hangcheck; /* detect missed interrupts */
344
345 unsigned int hangcheck_interrupts;
346
347 bool irq_armed : 1;
348 bool irq_enabled : 1;
349 I915_SELFTEST_DECLARE(bool mock : 1);
350 } breadcrumbs;
351
352 /*
353 * A pool of objects to use as shadow copies of client batch buffers
354 * when the command parser is enabled. Prevents the client from
355 * modifying the batch contents after software parsing.
356 */
357 struct i915_gem_batch_pool batch_pool;
358
359 struct intel_hw_status_page status_page;
360 struct i915_ctx_workarounds wa_ctx;
361 struct i915_vma *scratch;
362
363 u32 irq_keep_mask; /* always keep these interrupts */
364 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
365 void (*irq_enable)(struct intel_engine_cs *engine);
366 void (*irq_disable)(struct intel_engine_cs *engine);
367
368 int (*init_hw)(struct intel_engine_cs *engine);
369 void (*reset_hw)(struct intel_engine_cs *engine,
370 struct drm_i915_gem_request *req);
371
372 void (*set_default_submission)(struct intel_engine_cs *engine);
373
374 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
375 struct i915_gem_context *ctx);
376 void (*context_unpin)(struct intel_engine_cs *engine,
377 struct i915_gem_context *ctx);
378 int (*request_alloc)(struct drm_i915_gem_request *req);
379 int (*init_context)(struct drm_i915_gem_request *req);
380
381 int (*emit_flush)(struct drm_i915_gem_request *request,
382 u32 mode);
383 #define EMIT_INVALIDATE BIT(0)
384 #define EMIT_FLUSH BIT(1)
385 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
386 int (*emit_bb_start)(struct drm_i915_gem_request *req,
387 u64 offset, u32 length,
388 unsigned int dispatch_flags);
389 #define I915_DISPATCH_SECURE BIT(0)
390 #define I915_DISPATCH_PINNED BIT(1)
391 #define I915_DISPATCH_RS BIT(2)
392 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
393 u32 *cs);
394 int emit_breadcrumb_sz;
395
396 /* Pass the request to the hardware queue (e.g. directly into
397 * the legacy ringbuffer or to the end of an execlist).
398 *
399 * This is called from an atomic context with irqs disabled; must
400 * be irq safe.
401 */
402 void (*submit_request)(struct drm_i915_gem_request *req);
403
404 /* Call when the priority on a request has changed and it and its
405 * dependencies may need rescheduling. Note the request itself may
406 * not be ready to run!
407 *
408 * Called under the struct_mutex.
409 */
410 void (*schedule)(struct drm_i915_gem_request *request,
411 int priority);
412
413 /*
414 * Cancel all requests on the hardware, or queued for execution.
415 * This should only cancel the ready requests that have been
416 * submitted to the engine (via the engine->submit_request callback).
417 * This is called when marking the device as wedged.
418 */
419 void (*cancel_requests)(struct intel_engine_cs *engine);
420
421 /* Some chipsets are not quite as coherent as advertised and need
422 * an expensive kick to force a true read of the up-to-date seqno.
423 * However, the up-to-date seqno is not always required and the last
424 * seen value is good enough. Note that the seqno will always be
425 * monotonic, even if not coherent.
426 */
427 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
428 void (*cleanup)(struct intel_engine_cs *engine);
429
430 /* GEN8 signal/wait table - never trust comments!
431 * signal to signal to signal to signal to signal to
432 * RCS VCS BCS VECS VCS2
433 * --------------------------------------------------------------------
434 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
435 * |-------------------------------------------------------------------
436 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
437 * |-------------------------------------------------------------------
438 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
439 * |-------------------------------------------------------------------
440 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
441 * |-------------------------------------------------------------------
442 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
443 * |-------------------------------------------------------------------
444 *
445 * Generalization:
446 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
447 * ie. transpose of g(x, y)
448 *
449 * sync from sync from sync from sync from sync from
450 * RCS VCS BCS VECS VCS2
451 * --------------------------------------------------------------------
452 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
453 * |-------------------------------------------------------------------
454 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
455 * |-------------------------------------------------------------------
456 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
457 * |-------------------------------------------------------------------
458 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
459 * |-------------------------------------------------------------------
460 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
461 * |-------------------------------------------------------------------
462 *
463 * Generalization:
464 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
465 * ie. transpose of f(x, y)
466 */
467 struct {
468 union {
469 #define GEN6_SEMAPHORE_LAST VECS_HW
470 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
471 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
472 struct {
473 /* our mbox written by others */
474 u32 wait[GEN6_NUM_SEMAPHORES];
475 /* mboxes this ring signals to */
476 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
477 } mbox;
478 u64 signal_ggtt[I915_NUM_ENGINES];
479 };
480
481 /* AKA wait() */
482 int (*sync_to)(struct drm_i915_gem_request *req,
483 struct drm_i915_gem_request *signal);
484 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
485 } semaphore;
486
487 struct intel_engine_execlists execlists;
488
489 /* Contexts are pinned whilst they are active on the GPU. The last
490 * context executed remains active whilst the GPU is idle - the
491 * switch away and write to the context object only occurs on the
492 * next execution. Contexts are only unpinned on retirement of the
493 * following request ensuring that we can always write to the object
494 * on the context switch even after idling. Across suspend, we switch
495 * to the kernel context and trash it as the save may not happen
496 * before the hardware is powered down.
497 */
498 struct i915_gem_context *last_retired_context;
499
500 /* We track the current MI_SET_CONTEXT in order to eliminate
501 * redudant context switches. This presumes that requests are not
502 * reordered! Or when they are the tracking is updated along with
503 * the emission of individual requests into the legacy command
504 * stream (ring).
505 */
506 struct i915_gem_context *legacy_active_context;
507
508 /* status_notifier: list of callbacks for context-switch changes */
509 struct atomic_notifier_head context_status_notifier;
510
511 struct intel_engine_hangcheck hangcheck;
512
513 #define I915_ENGINE_USING_CMD_PARSER BIT(0)
514 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(3)
515 unsigned int flags;
516
517 /*
518 * Table of commands the command parser needs to know about
519 * for this engine.
520 */
521 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
522
523 /*
524 * Table of registers allowed in commands that read/write registers.
525 */
526 const struct drm_i915_reg_table *reg_tables;
527 int reg_table_count;
528
529 /*
530 * Returns the bitmask for the length field of the specified command.
531 * Return 0 for an unrecognized/invalid command.
532 *
533 * If the command parser finds an entry for a command in the engine's
534 * cmd_tables, it gets the command's length based on the table entry.
535 * If not, it calls this function to determine the per-engine length
536 * field encoding for the command (i.e. different opcode ranges use
537 * certain bits to encode the command length in the header).
538 */
539 u32 (*get_cmd_length_mask)(u32 cmd_header);
540 };
541
542 static inline bool
543 intel_engine_using_cmd_parser(const struct intel_engine_cs *engine)
544 {
545 return engine->flags & I915_ENGINE_USING_CMD_PARSER;
546 }
547
548 static inline bool
549 intel_engine_requires_cmd_parser(const struct intel_engine_cs *engine)
550 {
551 return engine->flags & I915_ENGINE_REQUIRES_CMD_PARSER;
552 }
553
554 static inline void
555 execlists_set_active(struct intel_engine_execlists *execlists,
556 unsigned int bit)
557 {
558 __set_bit(bit, (unsigned long *)&execlists->active);
559 }
560
561 static inline void
562 execlists_clear_active(struct intel_engine_execlists *execlists,
563 unsigned int bit)
564 {
565 __clear_bit(bit, (unsigned long *)&execlists->active);
566 }
567
568 static inline bool
569 execlists_is_active(const struct intel_engine_execlists *execlists,
570 unsigned int bit)
571 {
572 return test_bit(bit, (unsigned long *)&execlists->active);
573 }
574
575 static inline unsigned int
576 execlists_num_ports(const struct intel_engine_execlists * const execlists)
577 {
578 return execlists->port_mask + 1;
579 }
580
581 static inline void
582 execlists_port_complete(struct intel_engine_execlists * const execlists,
583 struct execlist_port * const port)
584 {
585 const unsigned int m = execlists->port_mask;
586
587 GEM_BUG_ON(port_index(port, execlists) != 0);
588 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
589
590 memmove(port, port + 1, m * sizeof(struct execlist_port));
591 memset(port + m, 0, sizeof(struct execlist_port));
592 }
593
594 static inline unsigned int
595 intel_engine_flag(const struct intel_engine_cs *engine)
596 {
597 return BIT(engine->id);
598 }
599
600 static inline u32
601 intel_read_status_page(struct intel_engine_cs *engine, int reg)
602 {
603 /* Ensure that the compiler doesn't optimize away the load. */
604 return READ_ONCE(engine->status_page.page_addr[reg]);
605 }
606
607 static inline void
608 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
609 {
610 /* Writing into the status page should be done sparingly. Since
611 * we do when we are uncertain of the device state, we take a bit
612 * of extra paranoia to try and ensure that the HWS takes the value
613 * we give and that it doesn't end up trapped inside the CPU!
614 */
615 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
616 mb();
617 clflush(&engine->status_page.page_addr[reg]);
618 engine->status_page.page_addr[reg] = value;
619 clflush(&engine->status_page.page_addr[reg]);
620 mb();
621 } else {
622 WRITE_ONCE(engine->status_page.page_addr[reg], value);
623 }
624 }
625
626 /*
627 * Reads a dword out of the status page, which is written to from the command
628 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
629 * MI_STORE_DATA_IMM.
630 *
631 * The following dwords have a reserved meaning:
632 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
633 * 0x04: ring 0 head pointer
634 * 0x05: ring 1 head pointer (915-class)
635 * 0x06: ring 2 head pointer (915-class)
636 * 0x10-0x1b: Context status DWords (GM45)
637 * 0x1f: Last written status offset. (GM45)
638 * 0x20-0x2f: Reserved (Gen6+)
639 *
640 * The area from dword 0x30 to 0x3ff is available for driver usage.
641 */
642 #define I915_GEM_HWS_INDEX 0x30
643 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
644 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
645 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
646
647 #define I915_HWS_CSB_BUF0_INDEX 0x10
648 #define I915_HWS_CSB_WRITE_INDEX 0x1f
649 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
650
651 struct intel_ring *
652 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
653 int intel_ring_pin(struct intel_ring *ring,
654 struct drm_i915_private *i915,
655 unsigned int offset_bias);
656 void intel_ring_reset(struct intel_ring *ring, u32 tail);
657 unsigned int intel_ring_update_space(struct intel_ring *ring);
658 void intel_ring_unpin(struct intel_ring *ring);
659 void intel_ring_free(struct intel_ring *ring);
660
661 void intel_engine_stop(struct intel_engine_cs *engine);
662 void intel_engine_cleanup(struct intel_engine_cs *engine);
663
664 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
665
666 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
667
668 u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
669 unsigned int n);
670
671 static inline void
672 intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
673 {
674 /* Dummy function.
675 *
676 * This serves as a placeholder in the code so that the reader
677 * can compare against the preceding intel_ring_begin() and
678 * check that the number of dwords emitted matches the space
679 * reserved for the command packet (i.e. the value passed to
680 * intel_ring_begin()).
681 */
682 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
683 }
684
685 static inline u32
686 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
687 {
688 return pos & (ring->size - 1);
689 }
690
691 static inline u32
692 intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
693 {
694 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
695 u32 offset = addr - req->ring->vaddr;
696 GEM_BUG_ON(offset > req->ring->size);
697 return intel_ring_wrap(req->ring, offset);
698 }
699
700 static inline void
701 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
702 {
703 /* We could combine these into a single tail operation, but keeping
704 * them as seperate tests will help identify the cause should one
705 * ever fire.
706 */
707 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
708 GEM_BUG_ON(tail >= ring->size);
709
710 /*
711 * "Ring Buffer Use"
712 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
713 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
714 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
715 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
716 * same cacheline, the Head Pointer must not be greater than the Tail
717 * Pointer."
718 *
719 * We use ring->head as the last known location of the actual RING_HEAD,
720 * it may have advanced but in the worst case it is equally the same
721 * as ring->head and so we should never program RING_TAIL to advance
722 * into the same cacheline as ring->head.
723 */
724 #define cacheline(a) round_down(a, CACHELINE_BYTES)
725 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
726 tail < ring->head);
727 #undef cacheline
728 }
729
730 static inline unsigned int
731 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
732 {
733 /* Whilst writes to the tail are strictly order, there is no
734 * serialisation between readers and the writers. The tail may be
735 * read by i915_gem_request_retire() just as it is being updated
736 * by execlists, as although the breadcrumb is complete, the context
737 * switch hasn't been seen.
738 */
739 assert_ring_tail_valid(ring, tail);
740 ring->tail = tail;
741 return tail;
742 }
743
744 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
745
746 void intel_engine_setup_common(struct intel_engine_cs *engine);
747 int intel_engine_init_common(struct intel_engine_cs *engine);
748 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
749 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
750
751 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
752 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
753 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
754 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
755
756 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
757 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
758
759 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
760 {
761 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
762 }
763
764 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
765 {
766 /* We are only peeking at the tail of the submit queue (and not the
767 * queue itself) in order to gain a hint as to the current active
768 * state of the engine. Callers are not expected to be taking
769 * engine->timeline->lock, nor are they expected to be concerned
770 * wtih serialising this hint with anything, so document it as
771 * a hint and nothing more.
772 */
773 return READ_ONCE(engine->timeline->seqno);
774 }
775
776 int init_workarounds_ring(struct intel_engine_cs *engine);
777 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
778
779 void intel_engine_get_instdone(struct intel_engine_cs *engine,
780 struct intel_instdone *instdone);
781
782 /*
783 * Arbitrary size for largest possible 'add request' sequence. The code paths
784 * are complex and variable. Empirical measurement shows that the worst case
785 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
786 * we need to allocate double the largest single packet within that emission
787 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
788 */
789 #define MIN_SPACE_FOR_ADD_REQUEST 336
790
791 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
792 {
793 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
794 }
795
796 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
797 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
798
799 static inline void intel_wait_init(struct intel_wait *wait,
800 struct drm_i915_gem_request *rq)
801 {
802 wait->tsk = current;
803 wait->request = rq;
804 }
805
806 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
807 {
808 wait->tsk = current;
809 wait->seqno = seqno;
810 }
811
812 static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
813 {
814 return wait->seqno;
815 }
816
817 static inline bool
818 intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
819 {
820 wait->seqno = seqno;
821 return intel_wait_has_seqno(wait);
822 }
823
824 static inline bool
825 intel_wait_update_request(struct intel_wait *wait,
826 const struct drm_i915_gem_request *rq)
827 {
828 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
829 }
830
831 static inline bool
832 intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
833 {
834 return wait->seqno == seqno;
835 }
836
837 static inline bool
838 intel_wait_check_request(const struct intel_wait *wait,
839 const struct drm_i915_gem_request *rq)
840 {
841 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
842 }
843
844 static inline bool intel_wait_complete(const struct intel_wait *wait)
845 {
846 return RB_EMPTY_NODE(&wait->node);
847 }
848
849 bool intel_engine_add_wait(struct intel_engine_cs *engine,
850 struct intel_wait *wait);
851 void intel_engine_remove_wait(struct intel_engine_cs *engine,
852 struct intel_wait *wait);
853 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
854 bool wakeup);
855 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
856
857 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
858 {
859 return READ_ONCE(engine->breadcrumbs.irq_wait);
860 }
861
862 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
863 #define ENGINE_WAKEUP_WAITER BIT(0)
864 #define ENGINE_WAKEUP_ASLEEP BIT(1)
865
866 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
867 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
868
869 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
870 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
871 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
872
873 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
874 {
875 memset(batch, 0, 6 * sizeof(u32));
876
877 batch[0] = GFX_OP_PIPE_CONTROL(6);
878 batch[1] = flags;
879 batch[2] = offset;
880
881 return batch + 6;
882 }
883
884 bool intel_engine_is_idle(struct intel_engine_cs *engine);
885 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
886
887 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
888
889 void intel_engines_mark_idle(struct drm_i915_private *i915);
890 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
891
892 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
893
894 void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
895
896 #endif /* _INTEL_RINGBUFFER_H_ */