]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.h
spi-imx: Implements handling of the SPI_READY mode flag.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
1 #ifndef _INTEL_RINGBUFFER_H_
2 #define _INTEL_RINGBUFFER_H_
3
4 #include <linux/hashtable.h>
5 #include "i915_gem_batch_pool.h"
6 #include "i915_gem_request.h"
7 #include "i915_gem_timeline.h"
8
9 #define I915_CMD_HASH_ORDER 9
10
11 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
12 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
13 * to give some inclination as to some of the magic values used in the various
14 * workarounds!
15 */
16 #define CACHELINE_BYTES 64
17 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
18
19 /*
20 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
21 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
22 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
23 *
24 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
25 * cacheline, the Head Pointer must not be greater than the Tail
26 * Pointer."
27 */
28 #define I915_RING_FREE_SPACE 64
29
30 struct intel_hw_status_page {
31 struct i915_vma *vma;
32 u32 *page_addr;
33 u32 ggtt_offset;
34 };
35
36 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
37 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
38
39 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
40 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
41
42 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
43 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
44
45 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
46 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
47
48 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
49 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
50
51 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
52 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
53
54 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
55 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
56 */
57 #define gen8_semaphore_seqno_size sizeof(uint64_t)
58 #define GEN8_SEMAPHORE_OFFSET(__from, __to) \
59 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
60 #define GEN8_SIGNAL_OFFSET(__ring, to) \
61 (dev_priv->semaphore->node.start + \
62 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
63 #define GEN8_WAIT_OFFSET(__ring, from) \
64 (dev_priv->semaphore->node.start + \
65 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
66
67 enum intel_engine_hangcheck_action {
68 ENGINE_IDLE = 0,
69 ENGINE_WAIT,
70 ENGINE_ACTIVE_SEQNO,
71 ENGINE_ACTIVE_HEAD,
72 ENGINE_ACTIVE_SUBUNITS,
73 ENGINE_WAIT_KICK,
74 ENGINE_DEAD,
75 };
76
77 static inline const char *
78 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
79 {
80 switch (a) {
81 case ENGINE_IDLE:
82 return "idle";
83 case ENGINE_WAIT:
84 return "wait";
85 case ENGINE_ACTIVE_SEQNO:
86 return "active seqno";
87 case ENGINE_ACTIVE_HEAD:
88 return "active head";
89 case ENGINE_ACTIVE_SUBUNITS:
90 return "active subunits";
91 case ENGINE_WAIT_KICK:
92 return "wait kick";
93 case ENGINE_DEAD:
94 return "dead";
95 }
96
97 return "unknown";
98 }
99
100 #define I915_MAX_SLICES 3
101 #define I915_MAX_SUBSLICES 3
102
103 #define instdone_slice_mask(dev_priv__) \
104 (INTEL_GEN(dev_priv__) == 7 ? \
105 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
106
107 #define instdone_subslice_mask(dev_priv__) \
108 (INTEL_GEN(dev_priv__) == 7 ? \
109 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
110
111 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
112 for ((slice__) = 0, (subslice__) = 0; \
113 (slice__) < I915_MAX_SLICES; \
114 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
115 (slice__) += ((subslice__) == 0)) \
116 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
117 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
118
119 struct intel_instdone {
120 u32 instdone;
121 /* The following exist only in the RCS engine */
122 u32 slice_common;
123 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
124 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
125 };
126
127 struct intel_engine_hangcheck {
128 u64 acthd;
129 u32 seqno;
130 enum intel_engine_hangcheck_action action;
131 unsigned long action_timestamp;
132 int deadlock;
133 struct intel_instdone instdone;
134 bool stalled;
135 };
136
137 struct intel_ring {
138 struct i915_vma *vma;
139 void *vaddr;
140
141 struct intel_engine_cs *engine;
142
143 struct list_head request_list;
144
145 u32 head;
146 u32 tail;
147 int space;
148 int size;
149 int effective_size;
150
151 /** We track the position of the requests in the ring buffer, and
152 * when each is retired we increment last_retired_head as the GPU
153 * must have finished processing the request and so we know we
154 * can advance the ringbuffer up to that position.
155 *
156 * last_retired_head is set to -1 after the value is consumed so
157 * we can detect new retirements.
158 */
159 u32 last_retired_head;
160 };
161
162 struct i915_gem_context;
163 struct drm_i915_reg_table;
164
165 /*
166 * we use a single page to load ctx workarounds so all of these
167 * values are referred in terms of dwords
168 *
169 * struct i915_wa_ctx_bb:
170 * offset: specifies batch starting position, also helpful in case
171 * if we want to have multiple batches at different offsets based on
172 * some criteria. It is not a requirement at the moment but provides
173 * an option for future use.
174 * size: size of the batch in DWORDS
175 */
176 struct i915_ctx_workarounds {
177 struct i915_wa_ctx_bb {
178 u32 offset;
179 u32 size;
180 } indirect_ctx, per_ctx;
181 struct i915_vma *vma;
182 };
183
184 struct drm_i915_gem_request;
185 struct intel_render_state;
186
187 struct intel_engine_cs {
188 struct drm_i915_private *i915;
189 const char *name;
190 enum intel_engine_id {
191 RCS = 0,
192 BCS,
193 VCS,
194 VCS2, /* Keep instances of the same type engine together. */
195 VECS
196 } id;
197 #define _VCS(n) (VCS + (n))
198 unsigned int exec_id;
199 enum intel_engine_hw_id {
200 RCS_HW = 0,
201 VCS_HW,
202 BCS_HW,
203 VECS_HW,
204 VCS2_HW
205 } hw_id;
206 enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
207 u32 mmio_base;
208 unsigned int irq_shift;
209 struct intel_ring *buffer;
210 struct intel_timeline *timeline;
211
212 struct intel_render_state *render_state;
213
214 /* Rather than have every client wait upon all user interrupts,
215 * with the herd waking after every interrupt and each doing the
216 * heavyweight seqno dance, we delegate the task (of being the
217 * bottom-half of the user interrupt) to the first client. After
218 * every interrupt, we wake up one client, who does the heavyweight
219 * coherent seqno read and either goes back to sleep (if incomplete),
220 * or wakes up all the completed clients in parallel, before then
221 * transferring the bottom-half status to the next client in the queue.
222 *
223 * Compared to walking the entire list of waiters in a single dedicated
224 * bottom-half, we reduce the latency of the first waiter by avoiding
225 * a context switch, but incur additional coherent seqno reads when
226 * following the chain of request breadcrumbs. Since it is most likely
227 * that we have a single client waiting on each seqno, then reducing
228 * the overhead of waking that client is much preferred.
229 */
230 struct intel_breadcrumbs {
231 struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
232 bool irq_posted;
233
234 spinlock_t lock; /* protects the lists of requests; irqsafe */
235 struct rb_root waiters; /* sorted by retirement, priority */
236 struct rb_root signals; /* sorted by retirement */
237 struct intel_wait *first_wait; /* oldest waiter by retirement */
238 struct task_struct *signaler; /* used for fence signalling */
239 struct drm_i915_gem_request *first_signal;
240 struct timer_list fake_irq; /* used after a missed interrupt */
241 struct timer_list hangcheck; /* detect missed interrupts */
242
243 unsigned long timeout;
244
245 bool irq_enabled : 1;
246 bool rpm_wakelock : 1;
247 } breadcrumbs;
248
249 /*
250 * A pool of objects to use as shadow copies of client batch buffers
251 * when the command parser is enabled. Prevents the client from
252 * modifying the batch contents after software parsing.
253 */
254 struct i915_gem_batch_pool batch_pool;
255
256 struct intel_hw_status_page status_page;
257 struct i915_ctx_workarounds wa_ctx;
258 struct i915_vma *scratch;
259
260 u32 irq_keep_mask; /* always keep these interrupts */
261 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
262 void (*irq_enable)(struct intel_engine_cs *engine);
263 void (*irq_disable)(struct intel_engine_cs *engine);
264
265 int (*init_hw)(struct intel_engine_cs *engine);
266 void (*reset_hw)(struct intel_engine_cs *engine,
267 struct drm_i915_gem_request *req);
268
269 int (*context_pin)(struct intel_engine_cs *engine,
270 struct i915_gem_context *ctx);
271 void (*context_unpin)(struct intel_engine_cs *engine,
272 struct i915_gem_context *ctx);
273 int (*request_alloc)(struct drm_i915_gem_request *req);
274 int (*init_context)(struct drm_i915_gem_request *req);
275
276 int (*emit_flush)(struct drm_i915_gem_request *request,
277 u32 mode);
278 #define EMIT_INVALIDATE BIT(0)
279 #define EMIT_FLUSH BIT(1)
280 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
281 int (*emit_bb_start)(struct drm_i915_gem_request *req,
282 u64 offset, u32 length,
283 unsigned int dispatch_flags);
284 #define I915_DISPATCH_SECURE BIT(0)
285 #define I915_DISPATCH_PINNED BIT(1)
286 #define I915_DISPATCH_RS BIT(2)
287 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
288 u32 *out);
289 int emit_breadcrumb_sz;
290
291 /* Pass the request to the hardware queue (e.g. directly into
292 * the legacy ringbuffer or to the end of an execlist).
293 *
294 * This is called from an atomic context with irqs disabled; must
295 * be irq safe.
296 */
297 void (*submit_request)(struct drm_i915_gem_request *req);
298
299 /* Call when the priority on a request has changed and it and its
300 * dependencies may need rescheduling. Note the request itself may
301 * not be ready to run!
302 *
303 * Called under the struct_mutex.
304 */
305 void (*schedule)(struct drm_i915_gem_request *request,
306 int priority);
307
308 /* Some chipsets are not quite as coherent as advertised and need
309 * an expensive kick to force a true read of the up-to-date seqno.
310 * However, the up-to-date seqno is not always required and the last
311 * seen value is good enough. Note that the seqno will always be
312 * monotonic, even if not coherent.
313 */
314 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
315 void (*cleanup)(struct intel_engine_cs *engine);
316
317 /* GEN8 signal/wait table - never trust comments!
318 * signal to signal to signal to signal to signal to
319 * RCS VCS BCS VECS VCS2
320 * --------------------------------------------------------------------
321 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
322 * |-------------------------------------------------------------------
323 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
324 * |-------------------------------------------------------------------
325 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
326 * |-------------------------------------------------------------------
327 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
328 * |-------------------------------------------------------------------
329 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
330 * |-------------------------------------------------------------------
331 *
332 * Generalization:
333 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
334 * ie. transpose of g(x, y)
335 *
336 * sync from sync from sync from sync from sync from
337 * RCS VCS BCS VECS VCS2
338 * --------------------------------------------------------------------
339 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
340 * |-------------------------------------------------------------------
341 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
342 * |-------------------------------------------------------------------
343 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
344 * |-------------------------------------------------------------------
345 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
346 * |-------------------------------------------------------------------
347 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
348 * |-------------------------------------------------------------------
349 *
350 * Generalization:
351 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
352 * ie. transpose of f(x, y)
353 */
354 struct {
355 union {
356 #define GEN6_SEMAPHORE_LAST VECS_HW
357 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
358 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
359 struct {
360 /* our mbox written by others */
361 u32 wait[GEN6_NUM_SEMAPHORES];
362 /* mboxes this ring signals to */
363 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
364 } mbox;
365 u64 signal_ggtt[I915_NUM_ENGINES];
366 };
367
368 /* AKA wait() */
369 int (*sync_to)(struct drm_i915_gem_request *req,
370 struct drm_i915_gem_request *signal);
371 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
372 } semaphore;
373
374 /* Execlists */
375 struct tasklet_struct irq_tasklet;
376 struct execlist_port {
377 struct drm_i915_gem_request *request;
378 unsigned int count;
379 } execlist_port[2];
380 struct rb_root execlist_queue;
381 struct rb_node *execlist_first;
382 unsigned int fw_domains;
383 bool disable_lite_restore_wa;
384 bool preempt_wa;
385 u32 ctx_desc_template;
386
387 /* Contexts are pinned whilst they are active on the GPU. The last
388 * context executed remains active whilst the GPU is idle - the
389 * switch away and write to the context object only occurs on the
390 * next execution. Contexts are only unpinned on retirement of the
391 * following request ensuring that we can always write to the object
392 * on the context switch even after idling. Across suspend, we switch
393 * to the kernel context and trash it as the save may not happen
394 * before the hardware is powered down.
395 */
396 struct i915_gem_context *last_retired_context;
397
398 /* We track the current MI_SET_CONTEXT in order to eliminate
399 * redudant context switches. This presumes that requests are not
400 * reordered! Or when they are the tracking is updated along with
401 * the emission of individual requests into the legacy command
402 * stream (ring).
403 */
404 struct i915_gem_context *legacy_active_context;
405
406 struct intel_engine_hangcheck hangcheck;
407
408 bool needs_cmd_parser;
409
410 /*
411 * Table of commands the command parser needs to know about
412 * for this engine.
413 */
414 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
415
416 /*
417 * Table of registers allowed in commands that read/write registers.
418 */
419 const struct drm_i915_reg_table *reg_tables;
420 int reg_table_count;
421
422 /*
423 * Returns the bitmask for the length field of the specified command.
424 * Return 0 for an unrecognized/invalid command.
425 *
426 * If the command parser finds an entry for a command in the engine's
427 * cmd_tables, it gets the command's length based on the table entry.
428 * If not, it calls this function to determine the per-engine length
429 * field encoding for the command (i.e. different opcode ranges use
430 * certain bits to encode the command length in the header).
431 */
432 u32 (*get_cmd_length_mask)(u32 cmd_header);
433 };
434
435 static inline unsigned
436 intel_engine_flag(const struct intel_engine_cs *engine)
437 {
438 return 1 << engine->id;
439 }
440
441 static inline void
442 intel_flush_status_page(struct intel_engine_cs *engine, int reg)
443 {
444 mb();
445 clflush(&engine->status_page.page_addr[reg]);
446 mb();
447 }
448
449 static inline u32
450 intel_read_status_page(struct intel_engine_cs *engine, int reg)
451 {
452 /* Ensure that the compiler doesn't optimize away the load. */
453 return READ_ONCE(engine->status_page.page_addr[reg]);
454 }
455
456 static inline void
457 intel_write_status_page(struct intel_engine_cs *engine,
458 int reg, u32 value)
459 {
460 engine->status_page.page_addr[reg] = value;
461 }
462
463 /*
464 * Reads a dword out of the status page, which is written to from the command
465 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
466 * MI_STORE_DATA_IMM.
467 *
468 * The following dwords have a reserved meaning:
469 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
470 * 0x04: ring 0 head pointer
471 * 0x05: ring 1 head pointer (915-class)
472 * 0x06: ring 2 head pointer (915-class)
473 * 0x10-0x1b: Context status DWords (GM45)
474 * 0x1f: Last written status offset. (GM45)
475 * 0x20-0x2f: Reserved (Gen6+)
476 *
477 * The area from dword 0x30 to 0x3ff is available for driver usage.
478 */
479 #define I915_GEM_HWS_INDEX 0x30
480 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
481 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
482 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
483
484 struct intel_ring *
485 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
486 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
487 void intel_ring_unpin(struct intel_ring *ring);
488 void intel_ring_free(struct intel_ring *ring);
489
490 void intel_engine_stop(struct intel_engine_cs *engine);
491 void intel_engine_cleanup(struct intel_engine_cs *engine);
492
493 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
494
495 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
496 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
497
498 static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
499 {
500 *(uint32_t *)(ring->vaddr + ring->tail) = data;
501 ring->tail += 4;
502 }
503
504 static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
505 {
506 intel_ring_emit(ring, i915_mmio_reg_offset(reg));
507 }
508
509 static inline void intel_ring_advance(struct intel_ring *ring)
510 {
511 /* Dummy function.
512 *
513 * This serves as a placeholder in the code so that the reader
514 * can compare against the preceding intel_ring_begin() and
515 * check that the number of dwords emitted matches the space
516 * reserved for the command packet (i.e. the value passed to
517 * intel_ring_begin()).
518 */
519 }
520
521 static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
522 {
523 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
524 u32 offset = addr - ring->vaddr;
525 return offset & (ring->size - 1);
526 }
527
528 int __intel_ring_space(int head, int tail, int size);
529 void intel_ring_update_space(struct intel_ring *ring);
530
531 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
532
533 void intel_engine_setup_common(struct intel_engine_cs *engine);
534 int intel_engine_init_common(struct intel_engine_cs *engine);
535 int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
536 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
537
538 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
539 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
540 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
541 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
542 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
543
544 u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
545 u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
546
547 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
548 {
549 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
550 }
551
552 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
553 {
554 /* We are only peeking at the tail of the submit queue (and not the
555 * queue itself) in order to gain a hint as to the current active
556 * state of the engine. Callers are not expected to be taking
557 * engine->timeline->lock, nor are they expected to be concerned
558 * wtih serialising this hint with anything, so document it as
559 * a hint and nothing more.
560 */
561 return READ_ONCE(engine->timeline->last_submitted_seqno);
562 }
563
564 int init_workarounds_ring(struct intel_engine_cs *engine);
565
566 void intel_engine_get_instdone(struct intel_engine_cs *engine,
567 struct intel_instdone *instdone);
568
569 /*
570 * Arbitrary size for largest possible 'add request' sequence. The code paths
571 * are complex and variable. Empirical measurement shows that the worst case
572 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
573 * we need to allocate double the largest single packet within that emission
574 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
575 */
576 #define MIN_SPACE_FOR_ADD_REQUEST 336
577
578 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
579 {
580 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
581 }
582
583 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
584 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
585
586 static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
587 {
588 wait->tsk = current;
589 wait->seqno = seqno;
590 }
591
592 static inline bool intel_wait_complete(const struct intel_wait *wait)
593 {
594 return RB_EMPTY_NODE(&wait->node);
595 }
596
597 bool intel_engine_add_wait(struct intel_engine_cs *engine,
598 struct intel_wait *wait);
599 void intel_engine_remove_wait(struct intel_engine_cs *engine,
600 struct intel_wait *wait);
601 void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
602
603 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
604 {
605 return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
606 }
607
608 static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
609 {
610 bool wakeup = false;
611
612 /* Note that for this not to dangerously chase a dangling pointer,
613 * we must hold the rcu_read_lock here.
614 *
615 * Also note that tsk is likely to be in !TASK_RUNNING state so an
616 * early test for tsk->state != TASK_RUNNING before wake_up_process()
617 * is unlikely to be beneficial.
618 */
619 if (intel_engine_has_waiter(engine)) {
620 struct task_struct *tsk;
621
622 rcu_read_lock();
623 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
624 if (tsk)
625 wakeup = wake_up_process(tsk);
626 rcu_read_unlock();
627 }
628
629 return wakeup;
630 }
631
632 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
633 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
634 unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
635
636 #endif /* _INTEL_RINGBUFFER_H_ */