]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/intel_ringbuffer.c
8c17db72489f9ff32f53beecad89cc13818be466
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
27 *
28 */
29
30 #include <linux/log2.h>
31 #include <drm/drmP.h>
32 #include "i915_drv.h"
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 /* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40 #define LEGACY_REQUEST_SIZE 200
41
42 static int __intel_ring_space(int head, int tail, int size)
43 {
44 int space = head - tail;
45 if (space <= 0)
46 space += size;
47 return space - I915_RING_FREE_SPACE;
48 }
49
50 void intel_ring_update_space(struct intel_ring *ring)
51 {
52 if (ring->last_retired_head != -1) {
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
55 }
56
57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
59 }
60
61 static int
62 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
63 {
64 u32 cmd, *cs;
65
66 cmd = MI_FLUSH;
67
68 if (mode & EMIT_INVALIDATE)
69 cmd |= MI_READ_FLUSH;
70
71 cs = intel_ring_begin(req, 2);
72 if (IS_ERR(cs))
73 return PTR_ERR(cs);
74
75 *cs++ = cmd;
76 *cs++ = MI_NOOP;
77 intel_ring_advance(req, cs);
78
79 return 0;
80 }
81
82 static int
83 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
84 {
85 u32 cmd, *cs;
86
87 /*
88 * read/write caches:
89 *
90 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
91 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
92 * also flushed at 2d versus 3d pipeline switches.
93 *
94 * read-only caches:
95 *
96 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
97 * MI_READ_FLUSH is set, and is always flushed on 965.
98 *
99 * I915_GEM_DOMAIN_COMMAND may not exist?
100 *
101 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
102 * invalidated when MI_EXE_FLUSH is set.
103 *
104 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
105 * invalidated with every MI_FLUSH.
106 *
107 * TLBs:
108 *
109 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
110 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
111 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
112 * are flushed at any MI_FLUSH.
113 */
114
115 cmd = MI_FLUSH;
116 if (mode & EMIT_INVALIDATE) {
117 cmd |= MI_EXE_FLUSH;
118 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
119 cmd |= MI_INVALIDATE_ISP;
120 }
121
122 cs = intel_ring_begin(req, 2);
123 if (IS_ERR(cs))
124 return PTR_ERR(cs);
125
126 *cs++ = cmd;
127 *cs++ = MI_NOOP;
128 intel_ring_advance(req, cs);
129
130 return 0;
131 }
132
133 /**
134 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
135 * implementing two workarounds on gen6. From section 1.4.7.1
136 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
137 *
138 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
139 * produced by non-pipelined state commands), software needs to first
140 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
141 * 0.
142 *
143 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
144 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
145 *
146 * And the workaround for these two requires this workaround first:
147 *
148 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
149 * BEFORE the pipe-control with a post-sync op and no write-cache
150 * flushes.
151 *
152 * And this last workaround is tricky because of the requirements on
153 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
154 * volume 2 part 1:
155 *
156 * "1 of the following must also be set:
157 * - Render Target Cache Flush Enable ([12] of DW1)
158 * - Depth Cache Flush Enable ([0] of DW1)
159 * - Stall at Pixel Scoreboard ([1] of DW1)
160 * - Depth Stall ([13] of DW1)
161 * - Post-Sync Operation ([13] of DW1)
162 * - Notify Enable ([8] of DW1)"
163 *
164 * The cache flushes require the workaround flush that triggered this
165 * one, so we can't use it. Depth stall would trigger the same.
166 * Post-sync nonzero is what triggered this second workaround, so we
167 * can't use that one either. Notify enable is IRQs, which aren't
168 * really our business. That leaves only stall at scoreboard.
169 */
170 static int
171 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
172 {
173 u32 scratch_addr =
174 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
175 u32 *cs;
176
177 cs = intel_ring_begin(req, 6);
178 if (IS_ERR(cs))
179 return PTR_ERR(cs);
180
181 *cs++ = GFX_OP_PIPE_CONTROL(5);
182 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
183 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
184 *cs++ = 0; /* low dword */
185 *cs++ = 0; /* high dword */
186 *cs++ = MI_NOOP;
187 intel_ring_advance(req, cs);
188
189 cs = intel_ring_begin(req, 6);
190 if (IS_ERR(cs))
191 return PTR_ERR(cs);
192
193 *cs++ = GFX_OP_PIPE_CONTROL(5);
194 *cs++ = PIPE_CONTROL_QW_WRITE;
195 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
196 *cs++ = 0;
197 *cs++ = 0;
198 *cs++ = MI_NOOP;
199 intel_ring_advance(req, cs);
200
201 return 0;
202 }
203
204 static int
205 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
206 {
207 u32 scratch_addr =
208 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
209 u32 *cs, flags = 0;
210 int ret;
211
212 /* Force SNB workarounds for PIPE_CONTROL flushes */
213 ret = intel_emit_post_sync_nonzero_flush(req);
214 if (ret)
215 return ret;
216
217 /* Just flush everything. Experiments have shown that reducing the
218 * number of bits based on the write domains has little performance
219 * impact.
220 */
221 if (mode & EMIT_FLUSH) {
222 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
223 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
224 /*
225 * Ensure that any following seqno writes only happen
226 * when the render cache is indeed flushed.
227 */
228 flags |= PIPE_CONTROL_CS_STALL;
229 }
230 if (mode & EMIT_INVALIDATE) {
231 flags |= PIPE_CONTROL_TLB_INVALIDATE;
232 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
233 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
234 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
235 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
236 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
237 /*
238 * TLB invalidate requires a post-sync write.
239 */
240 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
241 }
242
243 cs = intel_ring_begin(req, 4);
244 if (IS_ERR(cs))
245 return PTR_ERR(cs);
246
247 *cs++ = GFX_OP_PIPE_CONTROL(4);
248 *cs++ = flags;
249 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
250 *cs++ = 0;
251 intel_ring_advance(req, cs);
252
253 return 0;
254 }
255
256 static int
257 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
258 {
259 u32 *cs;
260
261 cs = intel_ring_begin(req, 4);
262 if (IS_ERR(cs))
263 return PTR_ERR(cs);
264
265 *cs++ = GFX_OP_PIPE_CONTROL(4);
266 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
267 *cs++ = 0;
268 *cs++ = 0;
269 intel_ring_advance(req, cs);
270
271 return 0;
272 }
273
274 static int
275 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
276 {
277 u32 scratch_addr =
278 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
279 u32 *cs, flags = 0;
280
281 /*
282 * Ensure that any following seqno writes only happen when the render
283 * cache is indeed flushed.
284 *
285 * Workaround: 4th PIPE_CONTROL command (except the ones with only
286 * read-cache invalidate bits set) must have the CS_STALL bit set. We
287 * don't try to be clever and just set it unconditionally.
288 */
289 flags |= PIPE_CONTROL_CS_STALL;
290
291 /* Just flush everything. Experiments have shown that reducing the
292 * number of bits based on the write domains has little performance
293 * impact.
294 */
295 if (mode & EMIT_FLUSH) {
296 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
297 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
298 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
299 flags |= PIPE_CONTROL_FLUSH_ENABLE;
300 }
301 if (mode & EMIT_INVALIDATE) {
302 flags |= PIPE_CONTROL_TLB_INVALIDATE;
303 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
304 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
305 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
306 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
307 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
308 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
309 /*
310 * TLB invalidate requires a post-sync write.
311 */
312 flags |= PIPE_CONTROL_QW_WRITE;
313 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
314
315 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
316
317 /* Workaround: we must issue a pipe_control with CS-stall bit
318 * set before a pipe_control command that has the state cache
319 * invalidate bit set. */
320 gen7_render_ring_cs_stall_wa(req);
321 }
322
323 cs = intel_ring_begin(req, 4);
324 if (IS_ERR(cs))
325 return PTR_ERR(cs);
326
327 *cs++ = GFX_OP_PIPE_CONTROL(4);
328 *cs++ = flags;
329 *cs++ = scratch_addr;
330 *cs++ = 0;
331 intel_ring_advance(req, cs);
332
333 return 0;
334 }
335
336 static int
337 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
338 u32 flags, u32 scratch_addr)
339 {
340 u32 *cs;
341
342 cs = intel_ring_begin(req, 6);
343 if (IS_ERR(cs))
344 return PTR_ERR(cs);
345
346 *cs++ = GFX_OP_PIPE_CONTROL(6);
347 *cs++ = flags;
348 *cs++ = scratch_addr;
349 *cs++ = 0;
350 *cs++ = 0;
351 *cs++ = 0;
352 intel_ring_advance(req, cs);
353
354 return 0;
355 }
356
357 static int
358 gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
359 {
360 u32 scratch_addr =
361 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
362 u32 flags = 0;
363 int ret;
364
365 flags |= PIPE_CONTROL_CS_STALL;
366
367 if (mode & EMIT_FLUSH) {
368 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
369 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
370 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
371 flags |= PIPE_CONTROL_FLUSH_ENABLE;
372 }
373 if (mode & EMIT_INVALIDATE) {
374 flags |= PIPE_CONTROL_TLB_INVALIDATE;
375 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
376 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
377 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
378 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
379 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
380 flags |= PIPE_CONTROL_QW_WRITE;
381 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
382
383 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
384 ret = gen8_emit_pipe_control(req,
385 PIPE_CONTROL_CS_STALL |
386 PIPE_CONTROL_STALL_AT_SCOREBOARD,
387 0);
388 if (ret)
389 return ret;
390 }
391
392 return gen8_emit_pipe_control(req, flags, scratch_addr);
393 }
394
395 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
396 {
397 struct drm_i915_private *dev_priv = engine->i915;
398 u32 addr;
399
400 addr = dev_priv->status_page_dmah->busaddr;
401 if (INTEL_GEN(dev_priv) >= 4)
402 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
403 I915_WRITE(HWS_PGA, addr);
404 }
405
406 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
407 {
408 struct drm_i915_private *dev_priv = engine->i915;
409 i915_reg_t mmio;
410
411 /* The ring status page addresses are no longer next to the rest of
412 * the ring registers as of gen7.
413 */
414 if (IS_GEN7(dev_priv)) {
415 switch (engine->id) {
416 case RCS:
417 mmio = RENDER_HWS_PGA_GEN7;
418 break;
419 case BCS:
420 mmio = BLT_HWS_PGA_GEN7;
421 break;
422 /*
423 * VCS2 actually doesn't exist on Gen7. Only shut up
424 * gcc switch check warning
425 */
426 case VCS2:
427 case VCS:
428 mmio = BSD_HWS_PGA_GEN7;
429 break;
430 case VECS:
431 mmio = VEBOX_HWS_PGA_GEN7;
432 break;
433 }
434 } else if (IS_GEN6(dev_priv)) {
435 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
436 } else {
437 /* XXX: gen8 returns to sanity */
438 mmio = RING_HWS_PGA(engine->mmio_base);
439 }
440
441 I915_WRITE(mmio, engine->status_page.ggtt_offset);
442 POSTING_READ(mmio);
443
444 /*
445 * Flush the TLB for this page
446 *
447 * FIXME: These two bits have disappeared on gen8, so a question
448 * arises: do we still need this and if so how should we go about
449 * invalidating the TLB?
450 */
451 if (IS_GEN(dev_priv, 6, 7)) {
452 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
453
454 /* ring should be idle before issuing a sync flush*/
455 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
456
457 I915_WRITE(reg,
458 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
459 INSTPM_SYNC_FLUSH));
460 if (intel_wait_for_register(dev_priv,
461 reg, INSTPM_SYNC_FLUSH, 0,
462 1000))
463 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
464 engine->name);
465 }
466 }
467
468 static bool stop_ring(struct intel_engine_cs *engine)
469 {
470 struct drm_i915_private *dev_priv = engine->i915;
471
472 if (INTEL_GEN(dev_priv) > 2) {
473 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
474 if (intel_wait_for_register(dev_priv,
475 RING_MI_MODE(engine->mmio_base),
476 MODE_IDLE,
477 MODE_IDLE,
478 1000)) {
479 DRM_ERROR("%s : timed out trying to stop ring\n",
480 engine->name);
481 /* Sometimes we observe that the idle flag is not
482 * set even though the ring is empty. So double
483 * check before giving up.
484 */
485 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
486 return false;
487 }
488 }
489
490 I915_WRITE_CTL(engine, 0);
491 I915_WRITE_HEAD(engine, 0);
492 I915_WRITE_TAIL(engine, 0);
493
494 if (INTEL_GEN(dev_priv) > 2) {
495 (void)I915_READ_CTL(engine);
496 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
497 }
498
499 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
500 }
501
502 static int init_ring_common(struct intel_engine_cs *engine)
503 {
504 struct drm_i915_private *dev_priv = engine->i915;
505 struct intel_ring *ring = engine->buffer;
506 int ret = 0;
507
508 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
509
510 if (!stop_ring(engine)) {
511 /* G45 ring initialization often fails to reset head to zero */
512 DRM_DEBUG_KMS("%s head not reset to zero "
513 "ctl %08x head %08x tail %08x start %08x\n",
514 engine->name,
515 I915_READ_CTL(engine),
516 I915_READ_HEAD(engine),
517 I915_READ_TAIL(engine),
518 I915_READ_START(engine));
519
520 if (!stop_ring(engine)) {
521 DRM_ERROR("failed to set %s head to zero "
522 "ctl %08x head %08x tail %08x start %08x\n",
523 engine->name,
524 I915_READ_CTL(engine),
525 I915_READ_HEAD(engine),
526 I915_READ_TAIL(engine),
527 I915_READ_START(engine));
528 ret = -EIO;
529 goto out;
530 }
531 }
532
533 if (HWS_NEEDS_PHYSICAL(dev_priv))
534 ring_setup_phys_status_page(engine);
535 else
536 intel_ring_setup_status_page(engine);
537
538 intel_engine_reset_breadcrumbs(engine);
539
540 /* Enforce ordering by reading HEAD register back */
541 I915_READ_HEAD(engine);
542
543 /* Initialize the ring. This must happen _after_ we've cleared the ring
544 * registers with the above sequence (the readback of the HEAD registers
545 * also enforces ordering), otherwise the hw might lose the new ring
546 * register values. */
547 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
548
549 /* WaClearRingBufHeadRegAtInit:ctg,elk */
550 if (I915_READ_HEAD(engine))
551 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
552 engine->name, I915_READ_HEAD(engine));
553
554 intel_ring_update_space(ring);
555 I915_WRITE_HEAD(engine, ring->head);
556 I915_WRITE_TAIL(engine, ring->tail);
557 (void)I915_READ_TAIL(engine);
558
559 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
560
561 /* If the head is still not zero, the ring is dead */
562 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
563 RING_VALID, RING_VALID,
564 50)) {
565 DRM_ERROR("%s initialization failed "
566 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
567 engine->name,
568 I915_READ_CTL(engine),
569 I915_READ_CTL(engine) & RING_VALID,
570 I915_READ_HEAD(engine), ring->head,
571 I915_READ_TAIL(engine), ring->tail,
572 I915_READ_START(engine),
573 i915_ggtt_offset(ring->vma));
574 ret = -EIO;
575 goto out;
576 }
577
578 intel_engine_init_hangcheck(engine);
579
580 out:
581 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
582
583 return ret;
584 }
585
586 static void reset_ring_common(struct intel_engine_cs *engine,
587 struct drm_i915_gem_request *request)
588 {
589 /* Try to restore the logical GPU state to match the continuation
590 * of the request queue. If we skip the context/PD restore, then
591 * the next request may try to execute assuming that its context
592 * is valid and loaded on the GPU and so may try to access invalid
593 * memory, prompting repeated GPU hangs.
594 *
595 * If the request was guilty, we still restore the logical state
596 * in case the next request requires it (e.g. the aliasing ppgtt),
597 * but skip over the hung batch.
598 *
599 * If the request was innocent, we try to replay the request with
600 * the restored context.
601 */
602 if (request) {
603 struct drm_i915_private *dev_priv = request->i915;
604 struct intel_context *ce = &request->ctx->engine[engine->id];
605 struct i915_hw_ppgtt *ppgtt;
606
607 /* FIXME consider gen8 reset */
608
609 if (ce->state) {
610 I915_WRITE(CCID,
611 i915_ggtt_offset(ce->state) |
612 BIT(8) /* must be set! */ |
613 CCID_EXTENDED_STATE_SAVE |
614 CCID_EXTENDED_STATE_RESTORE |
615 CCID_EN);
616 }
617
618 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
619 if (ppgtt) {
620 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
621
622 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
623 I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
624
625 /* Wait for the PD reload to complete */
626 if (intel_wait_for_register(dev_priv,
627 RING_PP_DIR_BASE(engine),
628 BIT(0), 0,
629 10))
630 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
631
632 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
633 }
634
635 /* If the rq hung, jump to its breadcrumb and skip the batch */
636 if (request->fence.error == -EIO) {
637 struct intel_ring *ring = request->ring;
638
639 ring->head = request->postfix;
640 ring->last_retired_head = -1;
641 }
642 } else {
643 engine->legacy_active_context = NULL;
644 }
645 }
646
647 int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
648 {
649 struct i915_workarounds *w = &req->i915->workarounds;
650 u32 *cs;
651 int ret, i;
652
653 if (w->count == 0)
654 return 0;
655
656 ret = req->engine->emit_flush(req, EMIT_BARRIER);
657 if (ret)
658 return ret;
659
660 cs = intel_ring_begin(req, (w->count * 2 + 2));
661 if (IS_ERR(cs))
662 return PTR_ERR(cs);
663
664 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
665 for (i = 0; i < w->count; i++) {
666 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
667 *cs++ = w->reg[i].value;
668 }
669 *cs++ = MI_NOOP;
670
671 intel_ring_advance(req, cs);
672
673 ret = req->engine->emit_flush(req, EMIT_BARRIER);
674 if (ret)
675 return ret;
676
677 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
678
679 return 0;
680 }
681
682 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
683 {
684 int ret;
685
686 ret = intel_ring_workarounds_emit(req);
687 if (ret != 0)
688 return ret;
689
690 ret = i915_gem_render_state_emit(req);
691 if (ret)
692 return ret;
693
694 return 0;
695 }
696
697 static int wa_add(struct drm_i915_private *dev_priv,
698 i915_reg_t addr,
699 const u32 mask, const u32 val)
700 {
701 const u32 idx = dev_priv->workarounds.count;
702
703 if (WARN_ON(idx >= I915_MAX_WA_REGS))
704 return -ENOSPC;
705
706 dev_priv->workarounds.reg[idx].addr = addr;
707 dev_priv->workarounds.reg[idx].value = val;
708 dev_priv->workarounds.reg[idx].mask = mask;
709
710 dev_priv->workarounds.count++;
711
712 return 0;
713 }
714
715 #define WA_REG(addr, mask, val) do { \
716 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
717 if (r) \
718 return r; \
719 } while (0)
720
721 #define WA_SET_BIT_MASKED(addr, mask) \
722 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
723
724 #define WA_CLR_BIT_MASKED(addr, mask) \
725 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
726
727 #define WA_SET_FIELD_MASKED(addr, mask, value) \
728 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
729
730 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
731 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
732
733 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
734
735 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
736 i915_reg_t reg)
737 {
738 struct drm_i915_private *dev_priv = engine->i915;
739 struct i915_workarounds *wa = &dev_priv->workarounds;
740 const uint32_t index = wa->hw_whitelist_count[engine->id];
741
742 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
743 return -EINVAL;
744
745 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
746 i915_mmio_reg_offset(reg));
747 wa->hw_whitelist_count[engine->id]++;
748
749 return 0;
750 }
751
752 static int gen8_init_workarounds(struct intel_engine_cs *engine)
753 {
754 struct drm_i915_private *dev_priv = engine->i915;
755
756 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
757
758 /* WaDisableAsyncFlipPerfMode:bdw,chv */
759 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
760
761 /* WaDisablePartialInstShootdown:bdw,chv */
762 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
763 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
764
765 /* Use Force Non-Coherent whenever executing a 3D context. This is a
766 * workaround for for a possible hang in the unlikely event a TLB
767 * invalidation occurs during a PSD flush.
768 */
769 /* WaForceEnableNonCoherent:bdw,chv */
770 /* WaHdcDisableFetchWhenMasked:bdw,chv */
771 WA_SET_BIT_MASKED(HDC_CHICKEN0,
772 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
773 HDC_FORCE_NON_COHERENT);
774
775 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
776 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
777 * polygons in the same 8x4 pixel/sample area to be processed without
778 * stalling waiting for the earlier ones to write to Hierarchical Z
779 * buffer."
780 *
781 * This optimization is off by default for BDW and CHV; turn it on.
782 */
783 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
784
785 /* Wa4x4STCOptimizationDisable:bdw,chv */
786 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
787
788 /*
789 * BSpec recommends 8x4 when MSAA is used,
790 * however in practice 16x4 seems fastest.
791 *
792 * Note that PS/WM thread counts depend on the WIZ hashing
793 * disable bit, which we don't touch here, but it's good
794 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
795 */
796 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
797 GEN6_WIZ_HASHING_MASK,
798 GEN6_WIZ_HASHING_16x4);
799
800 return 0;
801 }
802
803 static int bdw_init_workarounds(struct intel_engine_cs *engine)
804 {
805 struct drm_i915_private *dev_priv = engine->i915;
806 int ret;
807
808 ret = gen8_init_workarounds(engine);
809 if (ret)
810 return ret;
811
812 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
813 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
814
815 /* WaDisableDopClockGating:bdw
816 *
817 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
818 * to disable EUTC clock gating.
819 */
820 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
821 DOP_CLOCK_GATING_DISABLE);
822
823 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
824 GEN8_SAMPLER_POWER_BYPASS_DIS);
825
826 WA_SET_BIT_MASKED(HDC_CHICKEN0,
827 /* WaForceContextSaveRestoreNonCoherent:bdw */
828 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
829 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
830 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
831
832 return 0;
833 }
834
835 static int chv_init_workarounds(struct intel_engine_cs *engine)
836 {
837 struct drm_i915_private *dev_priv = engine->i915;
838 int ret;
839
840 ret = gen8_init_workarounds(engine);
841 if (ret)
842 return ret;
843
844 /* WaDisableThreadStallDopClockGating:chv */
845 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
846
847 /* Improve HiZ throughput on CHV. */
848 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
849
850 return 0;
851 }
852
853 static int gen9_init_workarounds(struct intel_engine_cs *engine)
854 {
855 struct drm_i915_private *dev_priv = engine->i915;
856 int ret;
857
858 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
859 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
860
861 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
862 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
863 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
864
865 /* WaDisableKillLogic:bxt,skl,kbl */
866 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
867 ECOCHK_DIS_TLB);
868
869 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
870 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
871 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
872 FLOW_CONTROL_ENABLE |
873 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
874
875 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
876 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
877 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
878
879 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
880 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
881 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
882 GEN9_DG_MIRROR_FIX_ENABLE);
883
884 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
885 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
886 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
887 GEN9_RHWO_OPTIMIZATION_DISABLE);
888 /*
889 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
890 * but we do that in per ctx batchbuffer as there is an issue
891 * with this register not getting restored on ctx restore
892 */
893 }
894
895 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
896 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
897 GEN9_ENABLE_GPGPU_PREEMPTION);
898
899 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
900 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
901 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
902 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
903
904 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
905 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
906 GEN9_CCS_TLB_PREFETCH_ENABLE);
907
908 /* WaDisableMaskBasedCammingInRCC:bxt */
909 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
910 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
911 PIXEL_MASK_CAMMING_DISABLE);
912
913 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
914 WA_SET_BIT_MASKED(HDC_CHICKEN0,
915 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
916 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
917
918 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
919 * both tied to WaForceContextSaveRestoreNonCoherent
920 * in some hsds for skl. We keep the tie for all gen9. The
921 * documentation is a bit hazy and so we want to get common behaviour,
922 * even though there is no clear evidence we would need both on kbl/bxt.
923 * This area has been source of system hangs so we play it safe
924 * and mimic the skl regardless of what bspec says.
925 *
926 * Use Force Non-Coherent whenever executing a 3D context. This
927 * is a workaround for a possible hang in the unlikely event
928 * a TLB invalidation occurs during a PSD flush.
929 */
930
931 /* WaForceEnableNonCoherent:skl,bxt,kbl */
932 WA_SET_BIT_MASKED(HDC_CHICKEN0,
933 HDC_FORCE_NON_COHERENT);
934
935 /* WaDisableHDCInvalidation:skl,bxt,kbl */
936 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
937 BDW_DISABLE_HDC_INVALIDATION);
938
939 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
940 if (IS_SKYLAKE(dev_priv) ||
941 IS_KABYLAKE(dev_priv) ||
942 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
943 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
944 GEN8_SAMPLER_POWER_BYPASS_DIS);
945
946 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
947 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
948
949 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
950 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
951 GEN8_LQSC_FLUSH_COHERENT_LINES));
952
953 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
954 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
955 if (ret)
956 return ret;
957
958 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
959 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
960 if (ret)
961 return ret;
962
963 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
964 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
965 if (ret)
966 return ret;
967
968 return 0;
969 }
970
971 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
972 {
973 struct drm_i915_private *dev_priv = engine->i915;
974 u8 vals[3] = { 0, 0, 0 };
975 unsigned int i;
976
977 for (i = 0; i < 3; i++) {
978 u8 ss;
979
980 /*
981 * Only consider slices where one, and only one, subslice has 7
982 * EUs
983 */
984 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
985 continue;
986
987 /*
988 * subslice_7eu[i] != 0 (because of the check above) and
989 * ss_max == 4 (maximum number of subslices possible per slice)
990 *
991 * -> 0 <= ss <= 3;
992 */
993 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
994 vals[i] = 3 - ss;
995 }
996
997 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
998 return 0;
999
1000 /* Tune IZ hashing. See intel_device_info_runtime_init() */
1001 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
1002 GEN9_IZ_HASHING_MASK(2) |
1003 GEN9_IZ_HASHING_MASK(1) |
1004 GEN9_IZ_HASHING_MASK(0),
1005 GEN9_IZ_HASHING(2, vals[2]) |
1006 GEN9_IZ_HASHING(1, vals[1]) |
1007 GEN9_IZ_HASHING(0, vals[0]));
1008
1009 return 0;
1010 }
1011
1012 static int skl_init_workarounds(struct intel_engine_cs *engine)
1013 {
1014 struct drm_i915_private *dev_priv = engine->i915;
1015 int ret;
1016
1017 ret = gen9_init_workarounds(engine);
1018 if (ret)
1019 return ret;
1020
1021 /*
1022 * Actual WA is to disable percontext preemption granularity control
1023 * until D0 which is the default case so this is equivalent to
1024 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1025 */
1026 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1027 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1028
1029 /* WaEnableGapsTsvCreditFix:skl */
1030 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1031 GEN9_GAPS_TSV_CREDIT_DISABLE));
1032
1033 /* WaDisableGafsUnitClkGating:skl */
1034 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1035
1036 /* WaInPlaceDecompressionHang:skl */
1037 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1038 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1039 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1040
1041 /* WaDisableLSQCROPERFforOCL:skl */
1042 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1043 if (ret)
1044 return ret;
1045
1046 return skl_tune_iz_hashing(engine);
1047 }
1048
1049 static int bxt_init_workarounds(struct intel_engine_cs *engine)
1050 {
1051 struct drm_i915_private *dev_priv = engine->i915;
1052 int ret;
1053
1054 ret = gen9_init_workarounds(engine);
1055 if (ret)
1056 return ret;
1057
1058 /* WaStoreMultiplePTEenable:bxt */
1059 /* This is a requirement according to Hardware specification */
1060 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1061 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1062
1063 /* WaSetClckGatingDisableMedia:bxt */
1064 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1065 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1066 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1067 }
1068
1069 /* WaDisableThreadStallDopClockGating:bxt */
1070 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1071 STALL_DOP_GATING_DISABLE);
1072
1073 /* WaDisablePooledEuLoadBalancingFix:bxt */
1074 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1075 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1076 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1077 }
1078
1079 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1080 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1081 WA_SET_BIT_MASKED(
1082 GEN7_HALF_SLICE_CHICKEN1,
1083 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1084 }
1085
1086 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1087 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1088 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1089 /* WaDisableLSQCROPERFforOCL:bxt */
1090 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1091 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1092 if (ret)
1093 return ret;
1094
1095 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1096 if (ret)
1097 return ret;
1098 }
1099
1100 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1101 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1102 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1103 L3_HIGH_PRIO_CREDITS(2));
1104
1105 /* WaToEnableHwFixForPushConstHWBug:bxt */
1106 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1107 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1108 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1109
1110 /* WaInPlaceDecompressionHang:bxt */
1111 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1112 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1113 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1114
1115 return 0;
1116 }
1117
1118 static int kbl_init_workarounds(struct intel_engine_cs *engine)
1119 {
1120 struct drm_i915_private *dev_priv = engine->i915;
1121 int ret;
1122
1123 ret = gen9_init_workarounds(engine);
1124 if (ret)
1125 return ret;
1126
1127 /* WaEnableGapsTsvCreditFix:kbl */
1128 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1129 GEN9_GAPS_TSV_CREDIT_DISABLE));
1130
1131 /* WaDisableDynamicCreditSharing:kbl */
1132 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1133 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1134 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1135
1136 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1137 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1138 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1139 HDC_FENCE_DEST_SLM_DISABLE);
1140
1141 /* WaToEnableHwFixForPushConstHWBug:kbl */
1142 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
1143 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1144 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1145
1146 /* WaDisableGafsUnitClkGating:kbl */
1147 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1148
1149 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1150 WA_SET_BIT_MASKED(
1151 GEN7_HALF_SLICE_CHICKEN1,
1152 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1153
1154 /* WaInPlaceDecompressionHang:kbl */
1155 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1156 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1157
1158 /* WaDisableLSQCROPERFforOCL:kbl */
1159 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1160 if (ret)
1161 return ret;
1162
1163 return 0;
1164 }
1165
1166 static int glk_init_workarounds(struct intel_engine_cs *engine)
1167 {
1168 struct drm_i915_private *dev_priv = engine->i915;
1169 int ret;
1170
1171 ret = gen9_init_workarounds(engine);
1172 if (ret)
1173 return ret;
1174
1175 /* WaToEnableHwFixForPushConstHWBug:glk */
1176 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1177 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1178
1179 return 0;
1180 }
1181
1182 int init_workarounds_ring(struct intel_engine_cs *engine)
1183 {
1184 struct drm_i915_private *dev_priv = engine->i915;
1185
1186 WARN_ON(engine->id != RCS);
1187
1188 dev_priv->workarounds.count = 0;
1189 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1190
1191 if (IS_BROADWELL(dev_priv))
1192 return bdw_init_workarounds(engine);
1193
1194 if (IS_CHERRYVIEW(dev_priv))
1195 return chv_init_workarounds(engine);
1196
1197 if (IS_SKYLAKE(dev_priv))
1198 return skl_init_workarounds(engine);
1199
1200 if (IS_BROXTON(dev_priv))
1201 return bxt_init_workarounds(engine);
1202
1203 if (IS_KABYLAKE(dev_priv))
1204 return kbl_init_workarounds(engine);
1205
1206 if (IS_GEMINILAKE(dev_priv))
1207 return glk_init_workarounds(engine);
1208
1209 return 0;
1210 }
1211
1212 static int init_render_ring(struct intel_engine_cs *engine)
1213 {
1214 struct drm_i915_private *dev_priv = engine->i915;
1215 int ret = init_ring_common(engine);
1216 if (ret)
1217 return ret;
1218
1219 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1220 if (IS_GEN(dev_priv, 4, 6))
1221 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1222
1223 /* We need to disable the AsyncFlip performance optimisations in order
1224 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1225 * programmed to '1' on all products.
1226 *
1227 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1228 */
1229 if (IS_GEN(dev_priv, 6, 7))
1230 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1231
1232 /* Required for the hardware to program scanline values for waiting */
1233 /* WaEnableFlushTlbInvalidationMode:snb */
1234 if (IS_GEN6(dev_priv))
1235 I915_WRITE(GFX_MODE,
1236 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1237
1238 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1239 if (IS_GEN7(dev_priv))
1240 I915_WRITE(GFX_MODE_GEN7,
1241 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1242 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1243
1244 if (IS_GEN6(dev_priv)) {
1245 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1246 * "If this bit is set, STCunit will have LRA as replacement
1247 * policy. [...] This bit must be reset. LRA replacement
1248 * policy is not supported."
1249 */
1250 I915_WRITE(CACHE_MODE_0,
1251 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1252 }
1253
1254 if (IS_GEN(dev_priv, 6, 7))
1255 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1256
1257 if (INTEL_INFO(dev_priv)->gen >= 6)
1258 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1259
1260 return init_workarounds_ring(engine);
1261 }
1262
1263 static void render_ring_cleanup(struct intel_engine_cs *engine)
1264 {
1265 struct drm_i915_private *dev_priv = engine->i915;
1266
1267 i915_vma_unpin_and_release(&dev_priv->semaphore);
1268 }
1269
1270 static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
1271 {
1272 struct drm_i915_private *dev_priv = req->i915;
1273 struct intel_engine_cs *waiter;
1274 enum intel_engine_id id;
1275
1276 for_each_engine(waiter, dev_priv, id) {
1277 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1278 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1279 continue;
1280
1281 *cs++ = GFX_OP_PIPE_CONTROL(6);
1282 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
1283 PIPE_CONTROL_CS_STALL;
1284 *cs++ = lower_32_bits(gtt_offset);
1285 *cs++ = upper_32_bits(gtt_offset);
1286 *cs++ = req->global_seqno;
1287 *cs++ = 0;
1288 *cs++ = MI_SEMAPHORE_SIGNAL |
1289 MI_SEMAPHORE_TARGET(waiter->hw_id);
1290 *cs++ = 0;
1291 }
1292
1293 return cs;
1294 }
1295
1296 static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
1297 {
1298 struct drm_i915_private *dev_priv = req->i915;
1299 struct intel_engine_cs *waiter;
1300 enum intel_engine_id id;
1301
1302 for_each_engine(waiter, dev_priv, id) {
1303 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
1304 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1305 continue;
1306
1307 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1308 *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
1309 *cs++ = upper_32_bits(gtt_offset);
1310 *cs++ = req->global_seqno;
1311 *cs++ = MI_SEMAPHORE_SIGNAL |
1312 MI_SEMAPHORE_TARGET(waiter->hw_id);
1313 *cs++ = 0;
1314 }
1315
1316 return cs;
1317 }
1318
1319 static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
1320 {
1321 struct drm_i915_private *dev_priv = req->i915;
1322 struct intel_engine_cs *engine;
1323 enum intel_engine_id id;
1324 int num_rings = 0;
1325
1326 for_each_engine(engine, dev_priv, id) {
1327 i915_reg_t mbox_reg;
1328
1329 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1330 continue;
1331
1332 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
1333 if (i915_mmio_reg_valid(mbox_reg)) {
1334 *cs++ = MI_LOAD_REGISTER_IMM(1);
1335 *cs++ = i915_mmio_reg_offset(mbox_reg);
1336 *cs++ = req->global_seqno;
1337 num_rings++;
1338 }
1339 }
1340 if (num_rings & 1)
1341 *cs++ = MI_NOOP;
1342
1343 return cs;
1344 }
1345
1346 static void i9xx_submit_request(struct drm_i915_gem_request *request)
1347 {
1348 struct drm_i915_private *dev_priv = request->i915;
1349
1350 i915_gem_request_submit(request);
1351
1352 I915_WRITE_TAIL(request->engine, request->tail);
1353 }
1354
1355 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
1356 {
1357 *cs++ = MI_STORE_DWORD_INDEX;
1358 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
1359 *cs++ = req->global_seqno;
1360 *cs++ = MI_USER_INTERRUPT;
1361
1362 req->tail = intel_ring_offset(req, cs);
1363 }
1364
1365 static const int i9xx_emit_breadcrumb_sz = 4;
1366
1367 /**
1368 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
1369 *
1370 * @request - request to write to the ring
1371 *
1372 * Update the mailbox registers in the *other* rings with the current seqno.
1373 * This acts like a signal in the canonical semaphore.
1374 */
1375 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
1376 {
1377 return i9xx_emit_breadcrumb(req,
1378 req->engine->semaphore.signal(req, cs));
1379 }
1380
1381 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
1382 u32 *cs)
1383 {
1384 struct intel_engine_cs *engine = req->engine;
1385
1386 if (engine->semaphore.signal)
1387 cs = engine->semaphore.signal(req, cs);
1388
1389 *cs++ = GFX_OP_PIPE_CONTROL(6);
1390 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
1391 PIPE_CONTROL_QW_WRITE;
1392 *cs++ = intel_hws_seqno_address(engine);
1393 *cs++ = 0;
1394 *cs++ = req->global_seqno;
1395 /* We're thrashing one dword of HWS. */
1396 *cs++ = 0;
1397 *cs++ = MI_USER_INTERRUPT;
1398 *cs++ = MI_NOOP;
1399
1400 req->tail = intel_ring_offset(req, cs);
1401 }
1402
1403 static const int gen8_render_emit_breadcrumb_sz = 8;
1404
1405 /**
1406 * intel_ring_sync - sync the waiter to the signaller on seqno
1407 *
1408 * @waiter - ring that is waiting
1409 * @signaller - ring which has, or will signal
1410 * @seqno - seqno which the waiter will block on
1411 */
1412
1413 static int
1414 gen8_ring_sync_to(struct drm_i915_gem_request *req,
1415 struct drm_i915_gem_request *signal)
1416 {
1417 struct drm_i915_private *dev_priv = req->i915;
1418 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
1419 struct i915_hw_ppgtt *ppgtt;
1420 u32 *cs;
1421
1422 cs = intel_ring_begin(req, 4);
1423 if (IS_ERR(cs))
1424 return PTR_ERR(cs);
1425
1426 *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
1427 MI_SEMAPHORE_SAD_GTE_SDD;
1428 *cs++ = signal->global_seqno;
1429 *cs++ = lower_32_bits(offset);
1430 *cs++ = upper_32_bits(offset);
1431 intel_ring_advance(req, cs);
1432
1433 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1434 * pagetables and we must reload them before executing the batch.
1435 * We do this on the i915_switch_context() following the wait and
1436 * before the dispatch.
1437 */
1438 ppgtt = req->ctx->ppgtt;
1439 if (ppgtt && req->engine->id != RCS)
1440 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
1441 return 0;
1442 }
1443
1444 static int
1445 gen6_ring_sync_to(struct drm_i915_gem_request *req,
1446 struct drm_i915_gem_request *signal)
1447 {
1448 u32 dw1 = MI_SEMAPHORE_MBOX |
1449 MI_SEMAPHORE_COMPARE |
1450 MI_SEMAPHORE_REGISTER;
1451 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
1452 u32 *cs;
1453
1454 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
1455
1456 cs = intel_ring_begin(req, 4);
1457 if (IS_ERR(cs))
1458 return PTR_ERR(cs);
1459
1460 *cs++ = dw1 | wait_mbox;
1461 /* Throughout all of the GEM code, seqno passed implies our current
1462 * seqno is >= the last seqno executed. However for hardware the
1463 * comparison is strictly greater than.
1464 */
1465 *cs++ = signal->global_seqno - 1;
1466 *cs++ = 0;
1467 *cs++ = MI_NOOP;
1468 intel_ring_advance(req, cs);
1469
1470 return 0;
1471 }
1472
1473 static void
1474 gen5_seqno_barrier(struct intel_engine_cs *engine)
1475 {
1476 /* MI_STORE are internally buffered by the GPU and not flushed
1477 * either by MI_FLUSH or SyncFlush or any other combination of
1478 * MI commands.
1479 *
1480 * "Only the submission of the store operation is guaranteed.
1481 * The write result will be complete (coherent) some time later
1482 * (this is practically a finite period but there is no guaranteed
1483 * latency)."
1484 *
1485 * Empirically, we observe that we need a delay of at least 75us to
1486 * be sure that the seqno write is visible by the CPU.
1487 */
1488 usleep_range(125, 250);
1489 }
1490
1491 static void
1492 gen6_seqno_barrier(struct intel_engine_cs *engine)
1493 {
1494 struct drm_i915_private *dev_priv = engine->i915;
1495
1496 /* Workaround to force correct ordering between irq and seqno writes on
1497 * ivb (and maybe also on snb) by reading from a CS register (like
1498 * ACTHD) before reading the status page.
1499 *
1500 * Note that this effectively stalls the read by the time it takes to
1501 * do a memory transaction, which more or less ensures that the write
1502 * from the GPU has sufficient time to invalidate the CPU cacheline.
1503 * Alternatively we could delay the interrupt from the CS ring to give
1504 * the write time to land, but that would incur a delay after every
1505 * batch i.e. much more frequent than a delay when waiting for the
1506 * interrupt (with the same net latency).
1507 *
1508 * Also note that to prevent whole machine hangs on gen7, we have to
1509 * take the spinlock to guard against concurrent cacheline access.
1510 */
1511 spin_lock_irq(&dev_priv->uncore.lock);
1512 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
1513 spin_unlock_irq(&dev_priv->uncore.lock);
1514 }
1515
1516 static void
1517 gen5_irq_enable(struct intel_engine_cs *engine)
1518 {
1519 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1520 }
1521
1522 static void
1523 gen5_irq_disable(struct intel_engine_cs *engine)
1524 {
1525 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1526 }
1527
1528 static void
1529 i9xx_irq_enable(struct intel_engine_cs *engine)
1530 {
1531 struct drm_i915_private *dev_priv = engine->i915;
1532
1533 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1534 I915_WRITE(IMR, dev_priv->irq_mask);
1535 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1536 }
1537
1538 static void
1539 i9xx_irq_disable(struct intel_engine_cs *engine)
1540 {
1541 struct drm_i915_private *dev_priv = engine->i915;
1542
1543 dev_priv->irq_mask |= engine->irq_enable_mask;
1544 I915_WRITE(IMR, dev_priv->irq_mask);
1545 }
1546
1547 static void
1548 i8xx_irq_enable(struct intel_engine_cs *engine)
1549 {
1550 struct drm_i915_private *dev_priv = engine->i915;
1551
1552 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1553 I915_WRITE16(IMR, dev_priv->irq_mask);
1554 POSTING_READ16(RING_IMR(engine->mmio_base));
1555 }
1556
1557 static void
1558 i8xx_irq_disable(struct intel_engine_cs *engine)
1559 {
1560 struct drm_i915_private *dev_priv = engine->i915;
1561
1562 dev_priv->irq_mask |= engine->irq_enable_mask;
1563 I915_WRITE16(IMR, dev_priv->irq_mask);
1564 }
1565
1566 static int
1567 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
1568 {
1569 u32 *cs;
1570
1571 cs = intel_ring_begin(req, 2);
1572 if (IS_ERR(cs))
1573 return PTR_ERR(cs);
1574
1575 *cs++ = MI_FLUSH;
1576 *cs++ = MI_NOOP;
1577 intel_ring_advance(req, cs);
1578 return 0;
1579 }
1580
1581 static void
1582 gen6_irq_enable(struct intel_engine_cs *engine)
1583 {
1584 struct drm_i915_private *dev_priv = engine->i915;
1585
1586 I915_WRITE_IMR(engine,
1587 ~(engine->irq_enable_mask |
1588 engine->irq_keep_mask));
1589 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1590 }
1591
1592 static void
1593 gen6_irq_disable(struct intel_engine_cs *engine)
1594 {
1595 struct drm_i915_private *dev_priv = engine->i915;
1596
1597 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1598 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1599 }
1600
1601 static void
1602 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1603 {
1604 struct drm_i915_private *dev_priv = engine->i915;
1605
1606 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1607 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1608 }
1609
1610 static void
1611 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1612 {
1613 struct drm_i915_private *dev_priv = engine->i915;
1614
1615 I915_WRITE_IMR(engine, ~0);
1616 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1617 }
1618
1619 static void
1620 gen8_irq_enable(struct intel_engine_cs *engine)
1621 {
1622 struct drm_i915_private *dev_priv = engine->i915;
1623
1624 I915_WRITE_IMR(engine,
1625 ~(engine->irq_enable_mask |
1626 engine->irq_keep_mask));
1627 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1628 }
1629
1630 static void
1631 gen8_irq_disable(struct intel_engine_cs *engine)
1632 {
1633 struct drm_i915_private *dev_priv = engine->i915;
1634
1635 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1636 }
1637
1638 static int
1639 i965_emit_bb_start(struct drm_i915_gem_request *req,
1640 u64 offset, u32 length,
1641 unsigned int dispatch_flags)
1642 {
1643 u32 *cs;
1644
1645 cs = intel_ring_begin(req, 2);
1646 if (IS_ERR(cs))
1647 return PTR_ERR(cs);
1648
1649 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1650 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1651 *cs++ = offset;
1652 intel_ring_advance(req, cs);
1653
1654 return 0;
1655 }
1656
1657 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1658 #define I830_BATCH_LIMIT (256*1024)
1659 #define I830_TLB_ENTRIES (2)
1660 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1661 static int
1662 i830_emit_bb_start(struct drm_i915_gem_request *req,
1663 u64 offset, u32 len,
1664 unsigned int dispatch_flags)
1665 {
1666 u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
1667
1668 cs = intel_ring_begin(req, 6);
1669 if (IS_ERR(cs))
1670 return PTR_ERR(cs);
1671
1672 /* Evict the invalid PTE TLBs */
1673 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1674 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1675 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1676 *cs++ = cs_offset;
1677 *cs++ = 0xdeadbeef;
1678 *cs++ = MI_NOOP;
1679 intel_ring_advance(req, cs);
1680
1681 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1682 if (len > I830_BATCH_LIMIT)
1683 return -ENOSPC;
1684
1685 cs = intel_ring_begin(req, 6 + 2);
1686 if (IS_ERR(cs))
1687 return PTR_ERR(cs);
1688
1689 /* Blit the batch (which has now all relocs applied) to the
1690 * stable batch scratch bo area (so that the CS never
1691 * stumbles over its tlb invalidation bug) ...
1692 */
1693 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1694 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1695 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1696 *cs++ = cs_offset;
1697 *cs++ = 4096;
1698 *cs++ = offset;
1699
1700 *cs++ = MI_FLUSH;
1701 *cs++ = MI_NOOP;
1702 intel_ring_advance(req, cs);
1703
1704 /* ... and execute it. */
1705 offset = cs_offset;
1706 }
1707
1708 cs = intel_ring_begin(req, 2);
1709 if (IS_ERR(cs))
1710 return PTR_ERR(cs);
1711
1712 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1713 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1714 MI_BATCH_NON_SECURE);
1715 intel_ring_advance(req, cs);
1716
1717 return 0;
1718 }
1719
1720 static int
1721 i915_emit_bb_start(struct drm_i915_gem_request *req,
1722 u64 offset, u32 len,
1723 unsigned int dispatch_flags)
1724 {
1725 u32 *cs;
1726
1727 cs = intel_ring_begin(req, 2);
1728 if (IS_ERR(cs))
1729 return PTR_ERR(cs);
1730
1731 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1732 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1733 MI_BATCH_NON_SECURE);
1734 intel_ring_advance(req, cs);
1735
1736 return 0;
1737 }
1738
1739 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
1740 {
1741 struct drm_i915_private *dev_priv = engine->i915;
1742
1743 if (!dev_priv->status_page_dmah)
1744 return;
1745
1746 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
1747 engine->status_page.page_addr = NULL;
1748 }
1749
1750 static void cleanup_status_page(struct intel_engine_cs *engine)
1751 {
1752 struct i915_vma *vma;
1753 struct drm_i915_gem_object *obj;
1754
1755 vma = fetch_and_zero(&engine->status_page.vma);
1756 if (!vma)
1757 return;
1758
1759 obj = vma->obj;
1760
1761 i915_vma_unpin(vma);
1762 i915_vma_close(vma);
1763
1764 i915_gem_object_unpin_map(obj);
1765 __i915_gem_object_release_unless_active(obj);
1766 }
1767
1768 static int init_status_page(struct intel_engine_cs *engine)
1769 {
1770 struct drm_i915_gem_object *obj;
1771 struct i915_vma *vma;
1772 unsigned int flags;
1773 void *vaddr;
1774 int ret;
1775
1776 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1777 if (IS_ERR(obj)) {
1778 DRM_ERROR("Failed to allocate status page\n");
1779 return PTR_ERR(obj);
1780 }
1781
1782 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1783 if (ret)
1784 goto err;
1785
1786 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1787 if (IS_ERR(vma)) {
1788 ret = PTR_ERR(vma);
1789 goto err;
1790 }
1791
1792 flags = PIN_GLOBAL;
1793 if (!HAS_LLC(engine->i915))
1794 /* On g33, we cannot place HWS above 256MiB, so
1795 * restrict its pinning to the low mappable arena.
1796 * Though this restriction is not documented for
1797 * gen4, gen5, or byt, they also behave similarly
1798 * and hang if the HWS is placed at the top of the
1799 * GTT. To generalise, it appears that all !llc
1800 * platforms have issues with us placing the HWS
1801 * above the mappable region (even though we never
1802 * actualy map it).
1803 */
1804 flags |= PIN_MAPPABLE;
1805 ret = i915_vma_pin(vma, 0, 4096, flags);
1806 if (ret)
1807 goto err;
1808
1809 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1810 if (IS_ERR(vaddr)) {
1811 ret = PTR_ERR(vaddr);
1812 goto err_unpin;
1813 }
1814
1815 engine->status_page.vma = vma;
1816 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
1817 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
1818
1819 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1820 engine->name, i915_ggtt_offset(vma));
1821 return 0;
1822
1823 err_unpin:
1824 i915_vma_unpin(vma);
1825 err:
1826 i915_gem_object_put(obj);
1827 return ret;
1828 }
1829
1830 static int init_phys_status_page(struct intel_engine_cs *engine)
1831 {
1832 struct drm_i915_private *dev_priv = engine->i915;
1833
1834 dev_priv->status_page_dmah =
1835 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1836 if (!dev_priv->status_page_dmah)
1837 return -ENOMEM;
1838
1839 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1840 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
1841
1842 return 0;
1843 }
1844
1845 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
1846 {
1847 unsigned int flags;
1848 enum i915_map_type map;
1849 struct i915_vma *vma = ring->vma;
1850 void *addr;
1851 int ret;
1852
1853 GEM_BUG_ON(ring->vaddr);
1854
1855 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1856
1857 flags = PIN_GLOBAL;
1858 if (offset_bias)
1859 flags |= PIN_OFFSET_BIAS | offset_bias;
1860 if (vma->obj->stolen)
1861 flags |= PIN_MAPPABLE;
1862
1863 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1864 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1865 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1866 else
1867 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1868 if (unlikely(ret))
1869 return ret;
1870 }
1871
1872 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1873 if (unlikely(ret))
1874 return ret;
1875
1876 if (i915_vma_is_map_and_fenceable(vma))
1877 addr = (void __force *)i915_vma_pin_iomap(vma);
1878 else
1879 addr = i915_gem_object_pin_map(vma->obj, map);
1880 if (IS_ERR(addr))
1881 goto err;
1882
1883 ring->vaddr = addr;
1884 return 0;
1885
1886 err:
1887 i915_vma_unpin(vma);
1888 return PTR_ERR(addr);
1889 }
1890
1891 void intel_ring_unpin(struct intel_ring *ring)
1892 {
1893 GEM_BUG_ON(!ring->vma);
1894 GEM_BUG_ON(!ring->vaddr);
1895
1896 if (i915_vma_is_map_and_fenceable(ring->vma))
1897 i915_vma_unpin_iomap(ring->vma);
1898 else
1899 i915_gem_object_unpin_map(ring->vma->obj);
1900 ring->vaddr = NULL;
1901
1902 i915_vma_unpin(ring->vma);
1903 }
1904
1905 static struct i915_vma *
1906 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1907 {
1908 struct drm_i915_gem_object *obj;
1909 struct i915_vma *vma;
1910
1911 obj = i915_gem_object_create_stolen(dev_priv, size);
1912 if (!obj)
1913 obj = i915_gem_object_create(dev_priv, size);
1914 if (IS_ERR(obj))
1915 return ERR_CAST(obj);
1916
1917 /* mark ring buffers as read-only from GPU side by default */
1918 obj->gt_ro = 1;
1919
1920 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
1921 if (IS_ERR(vma))
1922 goto err;
1923
1924 return vma;
1925
1926 err:
1927 i915_gem_object_put(obj);
1928 return vma;
1929 }
1930
1931 struct intel_ring *
1932 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
1933 {
1934 struct intel_ring *ring;
1935 struct i915_vma *vma;
1936
1937 GEM_BUG_ON(!is_power_of_2(size));
1938 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1939
1940 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1941 if (!ring)
1942 return ERR_PTR(-ENOMEM);
1943
1944 ring->engine = engine;
1945
1946 INIT_LIST_HEAD(&ring->request_list);
1947
1948 ring->size = size;
1949 /* Workaround an erratum on the i830 which causes a hang if
1950 * the TAIL pointer points to within the last 2 cachelines
1951 * of the buffer.
1952 */
1953 ring->effective_size = size;
1954 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1955 ring->effective_size -= 2 * CACHELINE_BYTES;
1956
1957 ring->last_retired_head = -1;
1958 intel_ring_update_space(ring);
1959
1960 vma = intel_ring_create_vma(engine->i915, size);
1961 if (IS_ERR(vma)) {
1962 kfree(ring);
1963 return ERR_CAST(vma);
1964 }
1965 ring->vma = vma;
1966
1967 return ring;
1968 }
1969
1970 void
1971 intel_ring_free(struct intel_ring *ring)
1972 {
1973 struct drm_i915_gem_object *obj = ring->vma->obj;
1974
1975 i915_vma_close(ring->vma);
1976 __i915_gem_object_release_unless_active(obj);
1977
1978 kfree(ring);
1979 }
1980
1981 static int context_pin(struct i915_gem_context *ctx)
1982 {
1983 struct i915_vma *vma = ctx->engine[RCS].state;
1984 int ret;
1985
1986 /* Clear this page out of any CPU caches for coherent swap-in/out.
1987 * We only want to do this on the first bind so that we do not stall
1988 * on an active context (which by nature is already on the GPU).
1989 */
1990 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1991 ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
1992 if (ret)
1993 return ret;
1994 }
1995
1996 return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | PIN_HIGH);
1997 }
1998
1999 static int intel_ring_context_pin(struct intel_engine_cs *engine,
2000 struct i915_gem_context *ctx)
2001 {
2002 struct intel_context *ce = &ctx->engine[engine->id];
2003 int ret;
2004
2005 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2006
2007 if (ce->pin_count++)
2008 return 0;
2009
2010 if (ce->state) {
2011 ret = context_pin(ctx);
2012 if (ret)
2013 goto error;
2014 }
2015
2016 /* The kernel context is only used as a placeholder for flushing the
2017 * active context. It is never used for submitting user rendering and
2018 * as such never requires the golden render context, and so we can skip
2019 * emitting it when we switch to the kernel context. This is required
2020 * as during eviction we cannot allocate and pin the renderstate in
2021 * order to initialise the context.
2022 */
2023 if (i915_gem_context_is_kernel(ctx))
2024 ce->initialised = true;
2025
2026 i915_gem_context_get(ctx);
2027 return 0;
2028
2029 error:
2030 ce->pin_count = 0;
2031 return ret;
2032 }
2033
2034 static void intel_ring_context_unpin(struct intel_engine_cs *engine,
2035 struct i915_gem_context *ctx)
2036 {
2037 struct intel_context *ce = &ctx->engine[engine->id];
2038
2039 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2040 GEM_BUG_ON(ce->pin_count == 0);
2041
2042 if (--ce->pin_count)
2043 return;
2044
2045 if (ce->state)
2046 i915_vma_unpin(ce->state);
2047
2048 i915_gem_context_put(ctx);
2049 }
2050
2051 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
2052 {
2053 struct drm_i915_private *dev_priv = engine->i915;
2054 struct intel_ring *ring;
2055 int ret;
2056
2057 WARN_ON(engine->buffer);
2058
2059 intel_engine_setup_common(engine);
2060
2061 ret = intel_engine_init_common(engine);
2062 if (ret)
2063 goto error;
2064
2065 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2066 if (IS_ERR(ring)) {
2067 ret = PTR_ERR(ring);
2068 goto error;
2069 }
2070
2071 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2072 WARN_ON(engine->id != RCS);
2073 ret = init_phys_status_page(engine);
2074 if (ret)
2075 goto error;
2076 } else {
2077 ret = init_status_page(engine);
2078 if (ret)
2079 goto error;
2080 }
2081
2082 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2083 ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
2084 if (ret) {
2085 intel_ring_free(ring);
2086 goto error;
2087 }
2088 engine->buffer = ring;
2089
2090 return 0;
2091
2092 error:
2093 intel_engine_cleanup(engine);
2094 return ret;
2095 }
2096
2097 void intel_engine_cleanup(struct intel_engine_cs *engine)
2098 {
2099 struct drm_i915_private *dev_priv;
2100
2101 dev_priv = engine->i915;
2102
2103 if (engine->buffer) {
2104 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2105 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2106
2107 intel_ring_unpin(engine->buffer);
2108 intel_ring_free(engine->buffer);
2109 engine->buffer = NULL;
2110 }
2111
2112 if (engine->cleanup)
2113 engine->cleanup(engine);
2114
2115 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2116 WARN_ON(engine->id != RCS);
2117 cleanup_phys_status_page(engine);
2118 } else {
2119 cleanup_status_page(engine);
2120 }
2121
2122 intel_engine_cleanup_common(engine);
2123
2124 engine->i915 = NULL;
2125 dev_priv->engine[engine->id] = NULL;
2126 kfree(engine);
2127 }
2128
2129 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2130 {
2131 struct intel_engine_cs *engine;
2132 enum intel_engine_id id;
2133
2134 for_each_engine(engine, dev_priv, id) {
2135 engine->buffer->head = engine->buffer->tail;
2136 engine->buffer->last_retired_head = -1;
2137 }
2138 }
2139
2140 static int ring_request_alloc(struct drm_i915_gem_request *request)
2141 {
2142 u32 *cs;
2143
2144 GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
2145
2146 /* Flush enough space to reduce the likelihood of waiting after
2147 * we start building the request - in which case we will just
2148 * have to repeat work.
2149 */
2150 request->reserved_space += LEGACY_REQUEST_SIZE;
2151
2152 GEM_BUG_ON(!request->engine->buffer);
2153 request->ring = request->engine->buffer;
2154
2155 cs = intel_ring_begin(request, 0);
2156 if (IS_ERR(cs))
2157 return PTR_ERR(cs);
2158
2159 request->reserved_space -= LEGACY_REQUEST_SIZE;
2160 return 0;
2161 }
2162
2163 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2164 {
2165 struct intel_ring *ring = req->ring;
2166 struct drm_i915_gem_request *target;
2167 long timeout;
2168
2169 lockdep_assert_held(&req->i915->drm.struct_mutex);
2170
2171 intel_ring_update_space(ring);
2172 if (ring->space >= bytes)
2173 return 0;
2174
2175 /*
2176 * Space is reserved in the ringbuffer for finalising the request,
2177 * as that cannot be allowed to fail. During request finalisation,
2178 * reserved_space is set to 0 to stop the overallocation and the
2179 * assumption is that then we never need to wait (which has the
2180 * risk of failing with EINTR).
2181 *
2182 * See also i915_gem_request_alloc() and i915_add_request().
2183 */
2184 GEM_BUG_ON(!req->reserved_space);
2185
2186 list_for_each_entry(target, &ring->request_list, ring_link) {
2187 unsigned space;
2188
2189 /* Would completion of this request free enough space? */
2190 space = __intel_ring_space(target->postfix, ring->tail,
2191 ring->size);
2192 if (space >= bytes)
2193 break;
2194 }
2195
2196 if (WARN_ON(&target->ring_link == &ring->request_list))
2197 return -ENOSPC;
2198
2199 timeout = i915_wait_request(target,
2200 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
2201 MAX_SCHEDULE_TIMEOUT);
2202 if (timeout < 0)
2203 return timeout;
2204
2205 i915_gem_request_retire_upto(target);
2206
2207 intel_ring_update_space(ring);
2208 GEM_BUG_ON(ring->space < bytes);
2209 return 0;
2210 }
2211
2212 u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2213 {
2214 struct intel_ring *ring = req->ring;
2215 int remain_actual = ring->size - ring->tail;
2216 int remain_usable = ring->effective_size - ring->tail;
2217 int bytes = num_dwords * sizeof(u32);
2218 int total_bytes, wait_bytes;
2219 bool need_wrap = false;
2220 u32 *cs;
2221
2222 total_bytes = bytes + req->reserved_space;
2223
2224 if (unlikely(bytes > remain_usable)) {
2225 /*
2226 * Not enough space for the basic request. So need to flush
2227 * out the remainder and then wait for base + reserved.
2228 */
2229 wait_bytes = remain_actual + total_bytes;
2230 need_wrap = true;
2231 } else if (unlikely(total_bytes > remain_usable)) {
2232 /*
2233 * The base request will fit but the reserved space
2234 * falls off the end. So we don't need an immediate wrap
2235 * and only need to effectively wait for the reserved
2236 * size space from the start of ringbuffer.
2237 */
2238 wait_bytes = remain_actual + req->reserved_space;
2239 } else {
2240 /* No wrapping required, just waiting. */
2241 wait_bytes = total_bytes;
2242 }
2243
2244 if (wait_bytes > ring->space) {
2245 int ret = wait_for_space(req, wait_bytes);
2246 if (unlikely(ret))
2247 return ERR_PTR(ret);
2248 }
2249
2250 if (unlikely(need_wrap)) {
2251 GEM_BUG_ON(remain_actual > ring->space);
2252 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
2253
2254 /* Fill the tail with MI_NOOP */
2255 memset(ring->vaddr + ring->tail, 0, remain_actual);
2256 ring->tail = 0;
2257 ring->space -= remain_actual;
2258 }
2259
2260 GEM_BUG_ON(ring->tail > ring->size - bytes);
2261 cs = ring->vaddr + ring->tail;
2262 ring->tail += bytes;
2263 ring->space -= bytes;
2264 GEM_BUG_ON(ring->space < 0);
2265
2266 return cs;
2267 }
2268
2269 /* Align the ring tail to a cacheline boundary */
2270 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2271 {
2272 int num_dwords =
2273 (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
2274 u32 *cs;
2275
2276 if (num_dwords == 0)
2277 return 0;
2278
2279 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
2280 cs = intel_ring_begin(req, num_dwords);
2281 if (IS_ERR(cs))
2282 return PTR_ERR(cs);
2283
2284 while (num_dwords--)
2285 *cs++ = MI_NOOP;
2286
2287 intel_ring_advance(req, cs);
2288
2289 return 0;
2290 }
2291
2292 static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
2293 {
2294 struct drm_i915_private *dev_priv = request->i915;
2295
2296 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2297
2298 /* Every tail move must follow the sequence below */
2299
2300 /* Disable notification that the ring is IDLE. The GT
2301 * will then assume that it is busy and bring it out of rc6.
2302 */
2303 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2304 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2305
2306 /* Clear the context id. Here be magic! */
2307 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2308
2309 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2310 if (intel_wait_for_register_fw(dev_priv,
2311 GEN6_BSD_SLEEP_PSMI_CONTROL,
2312 GEN6_BSD_SLEEP_INDICATOR,
2313 0,
2314 50))
2315 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2316
2317 /* Now that the ring is fully powered up, update the tail */
2318 i9xx_submit_request(request);
2319
2320 /* Let the ring send IDLE messages to the GT again,
2321 * and so let it sleep to conserve power when idle.
2322 */
2323 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2324 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2325
2326 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2327 }
2328
2329 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2330 {
2331 u32 cmd, *cs;
2332
2333 cs = intel_ring_begin(req, 4);
2334 if (IS_ERR(cs))
2335 return PTR_ERR(cs);
2336
2337 cmd = MI_FLUSH_DW;
2338 if (INTEL_GEN(req->i915) >= 8)
2339 cmd += 1;
2340
2341 /* We always require a command barrier so that subsequent
2342 * commands, such as breadcrumb interrupts, are strictly ordered
2343 * wrt the contents of the write cache being flushed to memory
2344 * (and thus being coherent from the CPU).
2345 */
2346 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2347
2348 /*
2349 * Bspec vol 1c.5 - video engine command streamer:
2350 * "If ENABLED, all TLBs will be invalidated once the flush
2351 * operation is complete. This bit is only valid when the
2352 * Post-Sync Operation field is a value of 1h or 3h."
2353 */
2354 if (mode & EMIT_INVALIDATE)
2355 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2356
2357 *cs++ = cmd;
2358 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2359 if (INTEL_GEN(req->i915) >= 8) {
2360 *cs++ = 0; /* upper addr */
2361 *cs++ = 0; /* value */
2362 } else {
2363 *cs++ = 0;
2364 *cs++ = MI_NOOP;
2365 }
2366 intel_ring_advance(req, cs);
2367 return 0;
2368 }
2369
2370 static int
2371 gen8_emit_bb_start(struct drm_i915_gem_request *req,
2372 u64 offset, u32 len,
2373 unsigned int dispatch_flags)
2374 {
2375 bool ppgtt = USES_PPGTT(req->i915) &&
2376 !(dispatch_flags & I915_DISPATCH_SECURE);
2377 u32 *cs;
2378
2379 cs = intel_ring_begin(req, 4);
2380 if (IS_ERR(cs))
2381 return PTR_ERR(cs);
2382
2383 /* FIXME(BDW): Address space and security selectors. */
2384 *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
2385 I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
2386 *cs++ = lower_32_bits(offset);
2387 *cs++ = upper_32_bits(offset);
2388 *cs++ = MI_NOOP;
2389 intel_ring_advance(req, cs);
2390
2391 return 0;
2392 }
2393
2394 static int
2395 hsw_emit_bb_start(struct drm_i915_gem_request *req,
2396 u64 offset, u32 len,
2397 unsigned int dispatch_flags)
2398 {
2399 u32 *cs;
2400
2401 cs = intel_ring_begin(req, 2);
2402 if (IS_ERR(cs))
2403 return PTR_ERR(cs);
2404
2405 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2406 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2407 (dispatch_flags & I915_DISPATCH_RS ?
2408 MI_BATCH_RESOURCE_STREAMER : 0);
2409 /* bit0-7 is the length on GEN6+ */
2410 *cs++ = offset;
2411 intel_ring_advance(req, cs);
2412
2413 return 0;
2414 }
2415
2416 static int
2417 gen6_emit_bb_start(struct drm_i915_gem_request *req,
2418 u64 offset, u32 len,
2419 unsigned int dispatch_flags)
2420 {
2421 u32 *cs;
2422
2423 cs = intel_ring_begin(req, 2);
2424 if (IS_ERR(cs))
2425 return PTR_ERR(cs);
2426
2427 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2428 0 : MI_BATCH_NON_SECURE_I965);
2429 /* bit0-7 is the length on GEN6+ */
2430 *cs++ = offset;
2431 intel_ring_advance(req, cs);
2432
2433 return 0;
2434 }
2435
2436 /* Blitter support (SandyBridge+) */
2437
2438 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
2439 {
2440 u32 cmd, *cs;
2441
2442 cs = intel_ring_begin(req, 4);
2443 if (IS_ERR(cs))
2444 return PTR_ERR(cs);
2445
2446 cmd = MI_FLUSH_DW;
2447 if (INTEL_GEN(req->i915) >= 8)
2448 cmd += 1;
2449
2450 /* We always require a command barrier so that subsequent
2451 * commands, such as breadcrumb interrupts, are strictly ordered
2452 * wrt the contents of the write cache being flushed to memory
2453 * (and thus being coherent from the CPU).
2454 */
2455 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2456
2457 /*
2458 * Bspec vol 1c.3 - blitter engine command streamer:
2459 * "If ENABLED, all TLBs will be invalidated once the flush
2460 * operation is complete. This bit is only valid when the
2461 * Post-Sync Operation field is a value of 1h or 3h."
2462 */
2463 if (mode & EMIT_INVALIDATE)
2464 cmd |= MI_INVALIDATE_TLB;
2465 *cs++ = cmd;
2466 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2467 if (INTEL_GEN(req->i915) >= 8) {
2468 *cs++ = 0; /* upper addr */
2469 *cs++ = 0; /* value */
2470 } else {
2471 *cs++ = 0;
2472 *cs++ = MI_NOOP;
2473 }
2474 intel_ring_advance(req, cs);
2475
2476 return 0;
2477 }
2478
2479 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2480 struct intel_engine_cs *engine)
2481 {
2482 struct drm_i915_gem_object *obj;
2483 int ret, i;
2484
2485 if (!i915.semaphores)
2486 return;
2487
2488 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2489 struct i915_vma *vma;
2490
2491 obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
2492 if (IS_ERR(obj))
2493 goto err;
2494
2495 vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
2496 if (IS_ERR(vma))
2497 goto err_obj;
2498
2499 ret = i915_gem_object_set_to_gtt_domain(obj, false);
2500 if (ret)
2501 goto err_obj;
2502
2503 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2504 if (ret)
2505 goto err_obj;
2506
2507 dev_priv->semaphore = vma;
2508 }
2509
2510 if (INTEL_GEN(dev_priv) >= 8) {
2511 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
2512
2513 engine->semaphore.sync_to = gen8_ring_sync_to;
2514 engine->semaphore.signal = gen8_xcs_signal;
2515
2516 for (i = 0; i < I915_NUM_ENGINES; i++) {
2517 u32 ring_offset;
2518
2519 if (i != engine->id)
2520 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2521 else
2522 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2523
2524 engine->semaphore.signal_ggtt[i] = ring_offset;
2525 }
2526 } else if (INTEL_GEN(dev_priv) >= 6) {
2527 engine->semaphore.sync_to = gen6_ring_sync_to;
2528 engine->semaphore.signal = gen6_signal;
2529
2530 /*
2531 * The current semaphore is only applied on pre-gen8
2532 * platform. And there is no VCS2 ring on the pre-gen8
2533 * platform. So the semaphore between RCS and VCS2 is
2534 * initialized as INVALID. Gen8 will initialize the
2535 * sema between VCS2 and RCS later.
2536 */
2537 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2538 static const struct {
2539 u32 wait_mbox;
2540 i915_reg_t mbox_reg;
2541 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2542 [RCS_HW] = {
2543 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2544 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2545 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2546 },
2547 [VCS_HW] = {
2548 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2549 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2550 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2551 },
2552 [BCS_HW] = {
2553 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2554 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2555 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2556 },
2557 [VECS_HW] = {
2558 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2559 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2560 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2561 },
2562 };
2563 u32 wait_mbox;
2564 i915_reg_t mbox_reg;
2565
2566 if (i == engine->hw_id) {
2567 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2568 mbox_reg = GEN6_NOSYNC;
2569 } else {
2570 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2571 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2572 }
2573
2574 engine->semaphore.mbox.wait[i] = wait_mbox;
2575 engine->semaphore.mbox.signal[i] = mbox_reg;
2576 }
2577 }
2578
2579 return;
2580
2581 err_obj:
2582 i915_gem_object_put(obj);
2583 err:
2584 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2585 i915.semaphores = 0;
2586 }
2587
2588 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2589 struct intel_engine_cs *engine)
2590 {
2591 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2592
2593 if (INTEL_GEN(dev_priv) >= 8) {
2594 engine->irq_enable = gen8_irq_enable;
2595 engine->irq_disable = gen8_irq_disable;
2596 engine->irq_seqno_barrier = gen6_seqno_barrier;
2597 } else if (INTEL_GEN(dev_priv) >= 6) {
2598 engine->irq_enable = gen6_irq_enable;
2599 engine->irq_disable = gen6_irq_disable;
2600 engine->irq_seqno_barrier = gen6_seqno_barrier;
2601 } else if (INTEL_GEN(dev_priv) >= 5) {
2602 engine->irq_enable = gen5_irq_enable;
2603 engine->irq_disable = gen5_irq_disable;
2604 engine->irq_seqno_barrier = gen5_seqno_barrier;
2605 } else if (INTEL_GEN(dev_priv) >= 3) {
2606 engine->irq_enable = i9xx_irq_enable;
2607 engine->irq_disable = i9xx_irq_disable;
2608 } else {
2609 engine->irq_enable = i8xx_irq_enable;
2610 engine->irq_disable = i8xx_irq_disable;
2611 }
2612 }
2613
2614 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2615 struct intel_engine_cs *engine)
2616 {
2617 intel_ring_init_irq(dev_priv, engine);
2618 intel_ring_init_semaphores(dev_priv, engine);
2619
2620 engine->init_hw = init_ring_common;
2621 engine->reset_hw = reset_ring_common;
2622
2623 engine->context_pin = intel_ring_context_pin;
2624 engine->context_unpin = intel_ring_context_unpin;
2625
2626 engine->request_alloc = ring_request_alloc;
2627
2628 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2629 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2630 if (i915.semaphores) {
2631 int num_rings;
2632
2633 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2634
2635 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2636 if (INTEL_GEN(dev_priv) >= 8) {
2637 engine->emit_breadcrumb_sz += num_rings * 6;
2638 } else {
2639 engine->emit_breadcrumb_sz += num_rings * 3;
2640 if (num_rings & 1)
2641 engine->emit_breadcrumb_sz++;
2642 }
2643 }
2644 engine->submit_request = i9xx_submit_request;
2645
2646 if (INTEL_GEN(dev_priv) >= 8)
2647 engine->emit_bb_start = gen8_emit_bb_start;
2648 else if (INTEL_GEN(dev_priv) >= 6)
2649 engine->emit_bb_start = gen6_emit_bb_start;
2650 else if (INTEL_GEN(dev_priv) >= 4)
2651 engine->emit_bb_start = i965_emit_bb_start;
2652 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2653 engine->emit_bb_start = i830_emit_bb_start;
2654 else
2655 engine->emit_bb_start = i915_emit_bb_start;
2656 }
2657
2658 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2659 {
2660 struct drm_i915_private *dev_priv = engine->i915;
2661 int ret;
2662
2663 intel_ring_default_vfuncs(dev_priv, engine);
2664
2665 if (HAS_L3_DPF(dev_priv))
2666 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2667
2668 if (INTEL_GEN(dev_priv) >= 8) {
2669 engine->init_context = intel_rcs_ctx_init;
2670 engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
2671 engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
2672 engine->emit_flush = gen8_render_ring_flush;
2673 if (i915.semaphores) {
2674 int num_rings;
2675
2676 engine->semaphore.signal = gen8_rcs_signal;
2677
2678 num_rings =
2679 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
2680 engine->emit_breadcrumb_sz += num_rings * 6;
2681 }
2682 } else if (INTEL_GEN(dev_priv) >= 6) {
2683 engine->init_context = intel_rcs_ctx_init;
2684 engine->emit_flush = gen7_render_ring_flush;
2685 if (IS_GEN6(dev_priv))
2686 engine->emit_flush = gen6_render_ring_flush;
2687 } else if (IS_GEN5(dev_priv)) {
2688 engine->emit_flush = gen4_render_ring_flush;
2689 } else {
2690 if (INTEL_GEN(dev_priv) < 4)
2691 engine->emit_flush = gen2_render_ring_flush;
2692 else
2693 engine->emit_flush = gen4_render_ring_flush;
2694 engine->irq_enable_mask = I915_USER_INTERRUPT;
2695 }
2696
2697 if (IS_HASWELL(dev_priv))
2698 engine->emit_bb_start = hsw_emit_bb_start;
2699
2700 engine->init_hw = init_render_ring;
2701 engine->cleanup = render_ring_cleanup;
2702
2703 ret = intel_init_ring_buffer(engine);
2704 if (ret)
2705 return ret;
2706
2707 if (INTEL_GEN(dev_priv) >= 6) {
2708 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2709 if (ret)
2710 return ret;
2711 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2712 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
2713 if (ret)
2714 return ret;
2715 }
2716
2717 return 0;
2718 }
2719
2720 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2721 {
2722 struct drm_i915_private *dev_priv = engine->i915;
2723
2724 intel_ring_default_vfuncs(dev_priv, engine);
2725
2726 if (INTEL_GEN(dev_priv) >= 6) {
2727 /* gen6 bsd needs a special wa for tail updates */
2728 if (IS_GEN6(dev_priv))
2729 engine->submit_request = gen6_bsd_submit_request;
2730 engine->emit_flush = gen6_bsd_ring_flush;
2731 if (INTEL_GEN(dev_priv) < 8)
2732 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2733 } else {
2734 engine->mmio_base = BSD_RING_BASE;
2735 engine->emit_flush = bsd_ring_flush;
2736 if (IS_GEN5(dev_priv))
2737 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2738 else
2739 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2740 }
2741
2742 return intel_init_ring_buffer(engine);
2743 }
2744
2745 /**
2746 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
2747 */
2748 int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
2749 {
2750 struct drm_i915_private *dev_priv = engine->i915;
2751
2752 intel_ring_default_vfuncs(dev_priv, engine);
2753
2754 engine->emit_flush = gen6_bsd_ring_flush;
2755
2756 return intel_init_ring_buffer(engine);
2757 }
2758
2759 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2760 {
2761 struct drm_i915_private *dev_priv = engine->i915;
2762
2763 intel_ring_default_vfuncs(dev_priv, engine);
2764
2765 engine->emit_flush = gen6_ring_flush;
2766 if (INTEL_GEN(dev_priv) < 8)
2767 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2768
2769 return intel_init_ring_buffer(engine);
2770 }
2771
2772 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2773 {
2774 struct drm_i915_private *dev_priv = engine->i915;
2775
2776 intel_ring_default_vfuncs(dev_priv, engine);
2777
2778 engine->emit_flush = gen6_ring_flush;
2779
2780 if (INTEL_GEN(dev_priv) < 8) {
2781 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2782 engine->irq_enable = hsw_vebox_irq_enable;
2783 engine->irq_disable = hsw_vebox_irq_disable;
2784 }
2785
2786 return intel_init_ring_buffer(engine);
2787 }