2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
37 /* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
40 #define LEGACY_REQUEST_SIZE 200
42 static unsigned int __intel_ring_space(unsigned int head
,
47 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
48 * same cacheline, the Head Pointer must not be greater than the Tail
51 GEM_BUG_ON(!is_power_of_2(size
));
52 return (head
- tail
- CACHELINE_BYTES
) & (size
- 1);
55 unsigned int intel_ring_update_space(struct intel_ring
*ring
)
59 space
= __intel_ring_space(ring
->head
, ring
->emit
, ring
->size
);
66 gen2_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
72 if (mode
& EMIT_INVALIDATE
)
75 cs
= intel_ring_begin(req
, 2);
81 intel_ring_advance(req
, cs
);
87 gen4_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
103 * I915_GEM_DOMAIN_COMMAND may not exist?
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
120 if (mode
& EMIT_INVALIDATE
) {
122 if (IS_G4X(req
->i915
) || IS_GEN5(req
->i915
))
123 cmd
|= MI_INVALIDATE_ISP
;
126 cs
= intel_ring_begin(req
, 2);
132 intel_ring_advance(req
, cs
);
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 * And the workaround for these two requires this workaround first:
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request
*req
)
178 i915_ggtt_offset(req
->engine
->scratch
) + 2 * CACHELINE_BYTES
;
181 cs
= intel_ring_begin(req
, 6);
185 *cs
++ = GFX_OP_PIPE_CONTROL(5);
186 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
187 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
188 *cs
++ = 0; /* low dword */
189 *cs
++ = 0; /* high dword */
191 intel_ring_advance(req
, cs
);
193 cs
= intel_ring_begin(req
, 6);
197 *cs
++ = GFX_OP_PIPE_CONTROL(5);
198 *cs
++ = PIPE_CONTROL_QW_WRITE
;
199 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
203 intel_ring_advance(req
, cs
);
209 gen6_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
212 i915_ggtt_offset(req
->engine
->scratch
) + 2 * CACHELINE_BYTES
;
216 /* Force SNB workarounds for PIPE_CONTROL flushes */
217 ret
= intel_emit_post_sync_nonzero_flush(req
);
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
225 if (mode
& EMIT_FLUSH
) {
226 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
227 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
229 * Ensure that any following seqno writes only happen
230 * when the render cache is indeed flushed.
232 flags
|= PIPE_CONTROL_CS_STALL
;
234 if (mode
& EMIT_INVALIDATE
) {
235 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
236 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
237 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
238 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
239 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
240 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
242 * TLB invalidate requires a post-sync write.
244 flags
|= PIPE_CONTROL_QW_WRITE
| PIPE_CONTROL_CS_STALL
;
247 cs
= intel_ring_begin(req
, 4);
251 *cs
++ = GFX_OP_PIPE_CONTROL(4);
253 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
255 intel_ring_advance(req
, cs
);
261 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request
*req
)
265 cs
= intel_ring_begin(req
, 4);
269 *cs
++ = GFX_OP_PIPE_CONTROL(4);
270 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
273 intel_ring_advance(req
, cs
);
279 gen7_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
282 i915_ggtt_offset(req
->engine
->scratch
) + 2 * CACHELINE_BYTES
;
286 * Ensure that any following seqno writes only happen when the render
287 * cache is indeed flushed.
289 * Workaround: 4th PIPE_CONTROL command (except the ones with only
290 * read-cache invalidate bits set) must have the CS_STALL bit set. We
291 * don't try to be clever and just set it unconditionally.
293 flags
|= PIPE_CONTROL_CS_STALL
;
295 /* Just flush everything. Experiments have shown that reducing the
296 * number of bits based on the write domains has little performance
299 if (mode
& EMIT_FLUSH
) {
300 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
301 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
302 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
303 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
305 if (mode
& EMIT_INVALIDATE
) {
306 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
307 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
308 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
309 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
310 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
311 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
312 flags
|= PIPE_CONTROL_MEDIA_STATE_CLEAR
;
314 * TLB invalidate requires a post-sync write.
316 flags
|= PIPE_CONTROL_QW_WRITE
;
317 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
319 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
321 /* Workaround: we must issue a pipe_control with CS-stall bit
322 * set before a pipe_control command that has the state cache
323 * invalidate bit set. */
324 gen7_render_ring_cs_stall_wa(req
);
327 cs
= intel_ring_begin(req
, 4);
331 *cs
++ = GFX_OP_PIPE_CONTROL(4);
333 *cs
++ = scratch_addr
;
335 intel_ring_advance(req
, cs
);
341 gen8_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
346 cs
= intel_ring_begin(req
, mode
& EMIT_INVALIDATE
? 12 : 6);
350 flags
= PIPE_CONTROL_CS_STALL
;
352 if (mode
& EMIT_FLUSH
) {
353 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
354 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
355 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
356 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
358 if (mode
& EMIT_INVALIDATE
) {
359 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
360 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
361 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
362 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
363 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
364 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
365 flags
|= PIPE_CONTROL_QW_WRITE
;
366 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
368 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
369 cs
= gen8_emit_pipe_control(cs
,
370 PIPE_CONTROL_CS_STALL
|
371 PIPE_CONTROL_STALL_AT_SCOREBOARD
,
375 cs
= gen8_emit_pipe_control(cs
, flags
,
376 i915_ggtt_offset(req
->engine
->scratch
) +
377 2 * CACHELINE_BYTES
);
379 intel_ring_advance(req
, cs
);
384 static void ring_setup_phys_status_page(struct intel_engine_cs
*engine
)
386 struct drm_i915_private
*dev_priv
= engine
->i915
;
389 addr
= dev_priv
->status_page_dmah
->busaddr
;
390 if (INTEL_GEN(dev_priv
) >= 4)
391 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
392 I915_WRITE(HWS_PGA
, addr
);
395 static void intel_ring_setup_status_page(struct intel_engine_cs
*engine
)
397 struct drm_i915_private
*dev_priv
= engine
->i915
;
400 /* The ring status page addresses are no longer next to the rest of
401 * the ring registers as of gen7.
403 if (IS_GEN7(dev_priv
)) {
404 switch (engine
->id
) {
406 * No more rings exist on Gen7. Default case is only to shut up
407 * gcc switch check warning.
410 GEM_BUG_ON(engine
->id
);
412 mmio
= RENDER_HWS_PGA_GEN7
;
415 mmio
= BLT_HWS_PGA_GEN7
;
418 mmio
= BSD_HWS_PGA_GEN7
;
421 mmio
= VEBOX_HWS_PGA_GEN7
;
424 } else if (IS_GEN6(dev_priv
)) {
425 mmio
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
427 /* XXX: gen8 returns to sanity */
428 mmio
= RING_HWS_PGA(engine
->mmio_base
);
431 if (INTEL_GEN(dev_priv
) >= 6)
432 I915_WRITE(RING_HWSTAM(engine
->mmio_base
), 0xffffffff);
434 I915_WRITE(mmio
, engine
->status_page
.ggtt_offset
);
438 * Flush the TLB for this page
440 * FIXME: These two bits have disappeared on gen8, so a question
441 * arises: do we still need this and if so how should we go about
442 * invalidating the TLB?
444 if (IS_GEN(dev_priv
, 6, 7)) {
445 i915_reg_t reg
= RING_INSTPM(engine
->mmio_base
);
447 /* ring should be idle before issuing a sync flush*/
448 WARN_ON((I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
451 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE
|
453 if (intel_wait_for_register(dev_priv
,
454 reg
, INSTPM_SYNC_FLUSH
, 0,
456 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
461 static bool stop_ring(struct intel_engine_cs
*engine
)
463 struct drm_i915_private
*dev_priv
= engine
->i915
;
465 if (INTEL_GEN(dev_priv
) > 2) {
466 I915_WRITE_MODE(engine
, _MASKED_BIT_ENABLE(STOP_RING
));
467 if (intel_wait_for_register(dev_priv
,
468 RING_MI_MODE(engine
->mmio_base
),
472 DRM_ERROR("%s : timed out trying to stop ring\n",
474 /* Sometimes we observe that the idle flag is not
475 * set even though the ring is empty. So double
476 * check before giving up.
478 if (I915_READ_HEAD(engine
) != I915_READ_TAIL(engine
))
483 I915_WRITE_CTL(engine
, 0);
484 I915_WRITE_HEAD(engine
, 0);
485 I915_WRITE_TAIL(engine
, 0);
487 return (I915_READ_HEAD(engine
) & HEAD_ADDR
) == 0;
490 static int init_ring_common(struct intel_engine_cs
*engine
)
492 struct drm_i915_private
*dev_priv
= engine
->i915
;
493 struct intel_ring
*ring
= engine
->buffer
;
496 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
498 if (!stop_ring(engine
)) {
499 /* G45 ring initialization often fails to reset head to zero */
500 DRM_DEBUG_KMS("%s head not reset to zero "
501 "ctl %08x head %08x tail %08x start %08x\n",
503 I915_READ_CTL(engine
),
504 I915_READ_HEAD(engine
),
505 I915_READ_TAIL(engine
),
506 I915_READ_START(engine
));
508 if (!stop_ring(engine
)) {
509 DRM_ERROR("failed to set %s head to zero "
510 "ctl %08x head %08x tail %08x start %08x\n",
512 I915_READ_CTL(engine
),
513 I915_READ_HEAD(engine
),
514 I915_READ_TAIL(engine
),
515 I915_READ_START(engine
));
521 if (HWS_NEEDS_PHYSICAL(dev_priv
))
522 ring_setup_phys_status_page(engine
);
524 intel_ring_setup_status_page(engine
);
526 intel_engine_reset_breadcrumbs(engine
);
528 /* Enforce ordering by reading HEAD register back */
529 I915_READ_HEAD(engine
);
531 /* Initialize the ring. This must happen _after_ we've cleared the ring
532 * registers with the above sequence (the readback of the HEAD registers
533 * also enforces ordering), otherwise the hw might lose the new ring
534 * register values. */
535 I915_WRITE_START(engine
, i915_ggtt_offset(ring
->vma
));
537 /* WaClearRingBufHeadRegAtInit:ctg,elk */
538 if (I915_READ_HEAD(engine
))
539 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
540 engine
->name
, I915_READ_HEAD(engine
));
542 intel_ring_update_space(ring
);
543 I915_WRITE_HEAD(engine
, ring
->head
);
544 I915_WRITE_TAIL(engine
, ring
->tail
);
545 (void)I915_READ_TAIL(engine
);
547 I915_WRITE_CTL(engine
, RING_CTL_SIZE(ring
->size
) | RING_VALID
);
549 /* If the head is still not zero, the ring is dead */
550 if (intel_wait_for_register(dev_priv
, RING_CTL(engine
->mmio_base
),
551 RING_VALID
, RING_VALID
,
553 DRM_ERROR("%s initialization failed "
554 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
556 I915_READ_CTL(engine
),
557 I915_READ_CTL(engine
) & RING_VALID
,
558 I915_READ_HEAD(engine
), ring
->head
,
559 I915_READ_TAIL(engine
), ring
->tail
,
560 I915_READ_START(engine
),
561 i915_ggtt_offset(ring
->vma
));
566 intel_engine_init_hangcheck(engine
);
568 if (INTEL_GEN(dev_priv
) > 2)
569 I915_WRITE_MODE(engine
, _MASKED_BIT_DISABLE(STOP_RING
));
572 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
577 static void reset_ring_common(struct intel_engine_cs
*engine
,
578 struct drm_i915_gem_request
*request
)
581 * RC6 must be prevented until the reset is complete and the engine
582 * reinitialised. If it occurs in the middle of this sequence, the
583 * state written to/loaded from the power context is ill-defined (e.g.
584 * the PP_BASE_DIR may be lost).
586 assert_forcewakes_active(engine
->i915
, FORCEWAKE_ALL
);
589 * Try to restore the logical GPU state to match the continuation
590 * of the request queue. If we skip the context/PD restore, then
591 * the next request may try to execute assuming that its context
592 * is valid and loaded on the GPU and so may try to access invalid
593 * memory, prompting repeated GPU hangs.
595 * If the request was guilty, we still restore the logical state
596 * in case the next request requires it (e.g. the aliasing ppgtt),
597 * but skip over the hung batch.
599 * If the request was innocent, we try to replay the request with
600 * the restored context.
603 struct drm_i915_private
*dev_priv
= request
->i915
;
604 struct intel_context
*ce
= &request
->ctx
->engine
[engine
->id
];
605 struct i915_hw_ppgtt
*ppgtt
;
607 /* FIXME consider gen8 reset */
611 i915_ggtt_offset(ce
->state
) |
612 BIT(8) /* must be set! */ |
613 CCID_EXTENDED_STATE_SAVE
|
614 CCID_EXTENDED_STATE_RESTORE
|
618 ppgtt
= request
->ctx
->ppgtt
?: engine
->i915
->mm
.aliasing_ppgtt
;
620 u32 pd_offset
= ppgtt
->pd
.base
.ggtt_offset
<< 10;
622 I915_WRITE(RING_PP_DIR_DCLV(engine
), PP_DIR_DCLV_2G
);
623 I915_WRITE(RING_PP_DIR_BASE(engine
), pd_offset
);
625 /* Wait for the PD reload to complete */
626 if (intel_wait_for_register(dev_priv
,
627 RING_PP_DIR_BASE(engine
),
630 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
632 ppgtt
->pd_dirty_rings
&= ~intel_engine_flag(engine
);
635 /* If the rq hung, jump to its breadcrumb and skip the batch */
636 if (request
->fence
.error
== -EIO
)
637 request
->ring
->head
= request
->postfix
;
639 engine
->legacy_active_context
= NULL
;
643 static int intel_rcs_ctx_init(struct drm_i915_gem_request
*req
)
647 ret
= intel_ring_workarounds_emit(req
);
651 ret
= i915_gem_render_state_emit(req
);
658 static int init_render_ring(struct intel_engine_cs
*engine
)
660 struct drm_i915_private
*dev_priv
= engine
->i915
;
661 int ret
= init_ring_common(engine
);
665 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
666 if (IS_GEN(dev_priv
, 4, 6))
667 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH
));
669 /* We need to disable the AsyncFlip performance optimisations in order
670 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
671 * programmed to '1' on all products.
673 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
675 if (IS_GEN(dev_priv
, 6, 7))
676 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE
));
678 /* Required for the hardware to program scanline values for waiting */
679 /* WaEnableFlushTlbInvalidationMode:snb */
680 if (IS_GEN6(dev_priv
))
682 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
));
684 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
685 if (IS_GEN7(dev_priv
))
686 I915_WRITE(GFX_MODE_GEN7
,
687 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
) |
688 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE
));
690 if (IS_GEN6(dev_priv
)) {
691 /* From the Sandybridge PRM, volume 1 part 3, page 24:
692 * "If this bit is set, STCunit will have LRA as replacement
693 * policy. [...] This bit must be reset. LRA replacement
694 * policy is not supported."
696 I915_WRITE(CACHE_MODE_0
,
697 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
700 if (IS_GEN(dev_priv
, 6, 7))
701 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING
));
703 if (INTEL_INFO(dev_priv
)->gen
>= 6)
704 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
706 return init_workarounds_ring(engine
);
709 static void render_ring_cleanup(struct intel_engine_cs
*engine
)
711 struct drm_i915_private
*dev_priv
= engine
->i915
;
713 i915_vma_unpin_and_release(&dev_priv
->semaphore
);
716 static u32
*gen8_rcs_signal(struct drm_i915_gem_request
*req
, u32
*cs
)
718 struct drm_i915_private
*dev_priv
= req
->i915
;
719 struct intel_engine_cs
*waiter
;
720 enum intel_engine_id id
;
722 for_each_engine(waiter
, dev_priv
, id
) {
723 u64 gtt_offset
= req
->engine
->semaphore
.signal_ggtt
[id
];
724 if (gtt_offset
== MI_SEMAPHORE_SYNC_INVALID
)
727 *cs
++ = GFX_OP_PIPE_CONTROL(6);
728 *cs
++ = PIPE_CONTROL_GLOBAL_GTT_IVB
| PIPE_CONTROL_QW_WRITE
|
729 PIPE_CONTROL_CS_STALL
;
730 *cs
++ = lower_32_bits(gtt_offset
);
731 *cs
++ = upper_32_bits(gtt_offset
);
732 *cs
++ = req
->global_seqno
;
734 *cs
++ = MI_SEMAPHORE_SIGNAL
|
735 MI_SEMAPHORE_TARGET(waiter
->hw_id
);
742 static u32
*gen8_xcs_signal(struct drm_i915_gem_request
*req
, u32
*cs
)
744 struct drm_i915_private
*dev_priv
= req
->i915
;
745 struct intel_engine_cs
*waiter
;
746 enum intel_engine_id id
;
748 for_each_engine(waiter
, dev_priv
, id
) {
749 u64 gtt_offset
= req
->engine
->semaphore
.signal_ggtt
[id
];
750 if (gtt_offset
== MI_SEMAPHORE_SYNC_INVALID
)
753 *cs
++ = (MI_FLUSH_DW
+ 1) | MI_FLUSH_DW_OP_STOREDW
;
754 *cs
++ = lower_32_bits(gtt_offset
) | MI_FLUSH_DW_USE_GTT
;
755 *cs
++ = upper_32_bits(gtt_offset
);
756 *cs
++ = req
->global_seqno
;
757 *cs
++ = MI_SEMAPHORE_SIGNAL
|
758 MI_SEMAPHORE_TARGET(waiter
->hw_id
);
765 static u32
*gen6_signal(struct drm_i915_gem_request
*req
, u32
*cs
)
767 struct drm_i915_private
*dev_priv
= req
->i915
;
768 struct intel_engine_cs
*engine
;
769 enum intel_engine_id id
;
772 for_each_engine(engine
, dev_priv
, id
) {
775 if (!(BIT(engine
->hw_id
) & GEN6_SEMAPHORES_MASK
))
778 mbox_reg
= req
->engine
->semaphore
.mbox
.signal
[engine
->hw_id
];
779 if (i915_mmio_reg_valid(mbox_reg
)) {
780 *cs
++ = MI_LOAD_REGISTER_IMM(1);
781 *cs
++ = i915_mmio_reg_offset(mbox_reg
);
782 *cs
++ = req
->global_seqno
;
792 static void cancel_requests(struct intel_engine_cs
*engine
)
794 struct drm_i915_gem_request
*request
;
797 spin_lock_irqsave(&engine
->timeline
->lock
, flags
);
799 /* Mark all submitted requests as skipped. */
800 list_for_each_entry(request
, &engine
->timeline
->requests
, link
) {
801 GEM_BUG_ON(!request
->global_seqno
);
802 if (!i915_gem_request_completed(request
))
803 dma_fence_set_error(&request
->fence
, -EIO
);
805 /* Remaining _unready_ requests will be nop'ed when submitted */
807 spin_unlock_irqrestore(&engine
->timeline
->lock
, flags
);
810 static void i9xx_submit_request(struct drm_i915_gem_request
*request
)
812 struct drm_i915_private
*dev_priv
= request
->i915
;
814 i915_gem_request_submit(request
);
816 I915_WRITE_TAIL(request
->engine
,
817 intel_ring_set_tail(request
->ring
, request
->tail
));
820 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request
*req
, u32
*cs
)
822 *cs
++ = MI_STORE_DWORD_INDEX
;
823 *cs
++ = I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
;
824 *cs
++ = req
->global_seqno
;
825 *cs
++ = MI_USER_INTERRUPT
;
827 req
->tail
= intel_ring_offset(req
, cs
);
828 assert_ring_tail_valid(req
->ring
, req
->tail
);
831 static const int i9xx_emit_breadcrumb_sz
= 4;
834 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
836 * @request - request to write to the ring
838 * Update the mailbox registers in the *other* rings with the current seqno.
839 * This acts like a signal in the canonical semaphore.
841 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request
*req
, u32
*cs
)
843 return i9xx_emit_breadcrumb(req
,
844 req
->engine
->semaphore
.signal(req
, cs
));
847 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request
*req
,
850 struct intel_engine_cs
*engine
= req
->engine
;
852 if (engine
->semaphore
.signal
)
853 cs
= engine
->semaphore
.signal(req
, cs
);
855 *cs
++ = GFX_OP_PIPE_CONTROL(6);
856 *cs
++ = PIPE_CONTROL_GLOBAL_GTT_IVB
| PIPE_CONTROL_CS_STALL
|
857 PIPE_CONTROL_QW_WRITE
;
858 *cs
++ = intel_hws_seqno_address(engine
);
860 *cs
++ = req
->global_seqno
;
861 /* We're thrashing one dword of HWS. */
863 *cs
++ = MI_USER_INTERRUPT
;
866 req
->tail
= intel_ring_offset(req
, cs
);
867 assert_ring_tail_valid(req
->ring
, req
->tail
);
870 static const int gen8_render_emit_breadcrumb_sz
= 8;
873 * intel_ring_sync - sync the waiter to the signaller on seqno
875 * @waiter - ring that is waiting
876 * @signaller - ring which has, or will signal
877 * @seqno - seqno which the waiter will block on
881 gen8_ring_sync_to(struct drm_i915_gem_request
*req
,
882 struct drm_i915_gem_request
*signal
)
884 struct drm_i915_private
*dev_priv
= req
->i915
;
885 u64 offset
= GEN8_WAIT_OFFSET(req
->engine
, signal
->engine
->id
);
886 struct i915_hw_ppgtt
*ppgtt
;
889 cs
= intel_ring_begin(req
, 4);
893 *cs
++ = MI_SEMAPHORE_WAIT
| MI_SEMAPHORE_GLOBAL_GTT
|
894 MI_SEMAPHORE_SAD_GTE_SDD
;
895 *cs
++ = signal
->global_seqno
;
896 *cs
++ = lower_32_bits(offset
);
897 *cs
++ = upper_32_bits(offset
);
898 intel_ring_advance(req
, cs
);
900 /* When the !RCS engines idle waiting upon a semaphore, they lose their
901 * pagetables and we must reload them before executing the batch.
902 * We do this on the i915_switch_context() following the wait and
903 * before the dispatch.
905 ppgtt
= req
->ctx
->ppgtt
;
906 if (ppgtt
&& req
->engine
->id
!= RCS
)
907 ppgtt
->pd_dirty_rings
|= intel_engine_flag(req
->engine
);
912 gen6_ring_sync_to(struct drm_i915_gem_request
*req
,
913 struct drm_i915_gem_request
*signal
)
915 u32 dw1
= MI_SEMAPHORE_MBOX
|
916 MI_SEMAPHORE_COMPARE
|
917 MI_SEMAPHORE_REGISTER
;
918 u32 wait_mbox
= signal
->engine
->semaphore
.mbox
.wait
[req
->engine
->hw_id
];
921 WARN_ON(wait_mbox
== MI_SEMAPHORE_SYNC_INVALID
);
923 cs
= intel_ring_begin(req
, 4);
927 *cs
++ = dw1
| wait_mbox
;
928 /* Throughout all of the GEM code, seqno passed implies our current
929 * seqno is >= the last seqno executed. However for hardware the
930 * comparison is strictly greater than.
932 *cs
++ = signal
->global_seqno
- 1;
935 intel_ring_advance(req
, cs
);
941 gen5_seqno_barrier(struct intel_engine_cs
*engine
)
943 /* MI_STORE are internally buffered by the GPU and not flushed
944 * either by MI_FLUSH or SyncFlush or any other combination of
947 * "Only the submission of the store operation is guaranteed.
948 * The write result will be complete (coherent) some time later
949 * (this is practically a finite period but there is no guaranteed
952 * Empirically, we observe that we need a delay of at least 75us to
953 * be sure that the seqno write is visible by the CPU.
955 usleep_range(125, 250);
959 gen6_seqno_barrier(struct intel_engine_cs
*engine
)
961 struct drm_i915_private
*dev_priv
= engine
->i915
;
963 /* Workaround to force correct ordering between irq and seqno writes on
964 * ivb (and maybe also on snb) by reading from a CS register (like
965 * ACTHD) before reading the status page.
967 * Note that this effectively stalls the read by the time it takes to
968 * do a memory transaction, which more or less ensures that the write
969 * from the GPU has sufficient time to invalidate the CPU cacheline.
970 * Alternatively we could delay the interrupt from the CS ring to give
971 * the write time to land, but that would incur a delay after every
972 * batch i.e. much more frequent than a delay when waiting for the
973 * interrupt (with the same net latency).
975 * Also note that to prevent whole machine hangs on gen7, we have to
976 * take the spinlock to guard against concurrent cacheline access.
978 spin_lock_irq(&dev_priv
->uncore
.lock
);
979 POSTING_READ_FW(RING_ACTHD(engine
->mmio_base
));
980 spin_unlock_irq(&dev_priv
->uncore
.lock
);
984 gen5_irq_enable(struct intel_engine_cs
*engine
)
986 gen5_enable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
990 gen5_irq_disable(struct intel_engine_cs
*engine
)
992 gen5_disable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
996 i9xx_irq_enable(struct intel_engine_cs
*engine
)
998 struct drm_i915_private
*dev_priv
= engine
->i915
;
1000 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
1001 I915_WRITE(IMR
, dev_priv
->irq_mask
);
1002 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
1006 i9xx_irq_disable(struct intel_engine_cs
*engine
)
1008 struct drm_i915_private
*dev_priv
= engine
->i915
;
1010 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
1011 I915_WRITE(IMR
, dev_priv
->irq_mask
);
1015 i8xx_irq_enable(struct intel_engine_cs
*engine
)
1017 struct drm_i915_private
*dev_priv
= engine
->i915
;
1019 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
1020 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
1021 POSTING_READ16(RING_IMR(engine
->mmio_base
));
1025 i8xx_irq_disable(struct intel_engine_cs
*engine
)
1027 struct drm_i915_private
*dev_priv
= engine
->i915
;
1029 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
1030 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
1034 bsd_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
1038 cs
= intel_ring_begin(req
, 2);
1044 intel_ring_advance(req
, cs
);
1049 gen6_irq_enable(struct intel_engine_cs
*engine
)
1051 struct drm_i915_private
*dev_priv
= engine
->i915
;
1053 I915_WRITE_IMR(engine
,
1054 ~(engine
->irq_enable_mask
|
1055 engine
->irq_keep_mask
));
1056 gen5_enable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
1060 gen6_irq_disable(struct intel_engine_cs
*engine
)
1062 struct drm_i915_private
*dev_priv
= engine
->i915
;
1064 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
1065 gen5_disable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
1069 hsw_vebox_irq_enable(struct intel_engine_cs
*engine
)
1071 struct drm_i915_private
*dev_priv
= engine
->i915
;
1073 I915_WRITE_IMR(engine
, ~engine
->irq_enable_mask
);
1074 gen6_unmask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
1078 hsw_vebox_irq_disable(struct intel_engine_cs
*engine
)
1080 struct drm_i915_private
*dev_priv
= engine
->i915
;
1082 I915_WRITE_IMR(engine
, ~0);
1083 gen6_mask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
1087 gen8_irq_enable(struct intel_engine_cs
*engine
)
1089 struct drm_i915_private
*dev_priv
= engine
->i915
;
1091 I915_WRITE_IMR(engine
,
1092 ~(engine
->irq_enable_mask
|
1093 engine
->irq_keep_mask
));
1094 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
1098 gen8_irq_disable(struct intel_engine_cs
*engine
)
1100 struct drm_i915_private
*dev_priv
= engine
->i915
;
1102 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
1106 i965_emit_bb_start(struct drm_i915_gem_request
*req
,
1107 u64 offset
, u32 length
,
1108 unsigned int dispatch_flags
)
1112 cs
= intel_ring_begin(req
, 2);
1116 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| (dispatch_flags
&
1117 I915_DISPATCH_SECURE
? 0 : MI_BATCH_NON_SECURE_I965
);
1119 intel_ring_advance(req
, cs
);
1124 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1125 #define I830_BATCH_LIMIT (256*1024)
1126 #define I830_TLB_ENTRIES (2)
1127 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1129 i830_emit_bb_start(struct drm_i915_gem_request
*req
,
1130 u64 offset
, u32 len
,
1131 unsigned int dispatch_flags
)
1133 u32
*cs
, cs_offset
= i915_ggtt_offset(req
->engine
->scratch
);
1135 cs
= intel_ring_begin(req
, 6);
1139 /* Evict the invalid PTE TLBs */
1140 *cs
++ = COLOR_BLT_CMD
| BLT_WRITE_RGBA
;
1141 *cs
++ = BLT_DEPTH_32
| BLT_ROP_COLOR_COPY
| 4096;
1142 *cs
++ = I830_TLB_ENTRIES
<< 16 | 4; /* load each page */
1146 intel_ring_advance(req
, cs
);
1148 if ((dispatch_flags
& I915_DISPATCH_PINNED
) == 0) {
1149 if (len
> I830_BATCH_LIMIT
)
1152 cs
= intel_ring_begin(req
, 6 + 2);
1156 /* Blit the batch (which has now all relocs applied) to the
1157 * stable batch scratch bo area (so that the CS never
1158 * stumbles over its tlb invalidation bug) ...
1160 *cs
++ = SRC_COPY_BLT_CMD
| BLT_WRITE_RGBA
;
1161 *cs
++ = BLT_DEPTH_32
| BLT_ROP_SRC_COPY
| 4096;
1162 *cs
++ = DIV_ROUND_UP(len
, 4096) << 16 | 4096;
1169 intel_ring_advance(req
, cs
);
1171 /* ... and execute it. */
1175 cs
= intel_ring_begin(req
, 2);
1179 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1180 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1181 MI_BATCH_NON_SECURE
);
1182 intel_ring_advance(req
, cs
);
1188 i915_emit_bb_start(struct drm_i915_gem_request
*req
,
1189 u64 offset
, u32 len
,
1190 unsigned int dispatch_flags
)
1194 cs
= intel_ring_begin(req
, 2);
1198 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1199 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1200 MI_BATCH_NON_SECURE
);
1201 intel_ring_advance(req
, cs
);
1208 int intel_ring_pin(struct intel_ring
*ring
,
1209 struct drm_i915_private
*i915
,
1210 unsigned int offset_bias
)
1212 enum i915_map_type map
= HAS_LLC(i915
) ? I915_MAP_WB
: I915_MAP_WC
;
1213 struct i915_vma
*vma
= ring
->vma
;
1218 GEM_BUG_ON(ring
->vaddr
);
1223 flags
|= PIN_OFFSET_BIAS
| offset_bias
;
1224 if (vma
->obj
->stolen
)
1225 flags
|= PIN_MAPPABLE
;
1227 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1228 if (flags
& PIN_MAPPABLE
|| map
== I915_MAP_WC
)
1229 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1231 ret
= i915_gem_object_set_to_cpu_domain(vma
->obj
, true);
1236 ret
= i915_vma_pin(vma
, 0, PAGE_SIZE
, flags
);
1240 if (i915_vma_is_map_and_fenceable(vma
))
1241 addr
= (void __force
*)i915_vma_pin_iomap(vma
);
1243 addr
= i915_gem_object_pin_map(vma
->obj
, map
);
1247 vma
->obj
->pin_global
++;
1253 i915_vma_unpin(vma
);
1254 return PTR_ERR(addr
);
1257 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
)
1259 GEM_BUG_ON(!list_empty(&ring
->request_list
));
1263 intel_ring_update_space(ring
);
1266 void intel_ring_unpin(struct intel_ring
*ring
)
1268 GEM_BUG_ON(!ring
->vma
);
1269 GEM_BUG_ON(!ring
->vaddr
);
1271 /* Discard any unused bytes beyond that submitted to hw. */
1272 intel_ring_reset(ring
, ring
->tail
);
1274 if (i915_vma_is_map_and_fenceable(ring
->vma
))
1275 i915_vma_unpin_iomap(ring
->vma
);
1277 i915_gem_object_unpin_map(ring
->vma
->obj
);
1280 ring
->vma
->obj
->pin_global
--;
1281 i915_vma_unpin(ring
->vma
);
1284 static struct i915_vma
*
1285 intel_ring_create_vma(struct drm_i915_private
*dev_priv
, int size
)
1287 struct i915_address_space
*vm
= &dev_priv
->ggtt
.base
;
1288 struct drm_i915_gem_object
*obj
;
1289 struct i915_vma
*vma
;
1291 obj
= i915_gem_object_create_stolen(dev_priv
, size
);
1293 obj
= i915_gem_object_create_internal(dev_priv
, size
);
1295 return ERR_CAST(obj
);
1298 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1299 * if supported by the platform's GGTT.
1301 if (vm
->has_read_only
)
1302 i915_gem_object_set_readonly(obj
);
1304 vma
= i915_vma_instance(obj
, vm
, NULL
);
1311 i915_gem_object_put(obj
);
1316 intel_engine_create_ring(struct intel_engine_cs
*engine
, int size
)
1318 struct intel_ring
*ring
;
1319 struct i915_vma
*vma
;
1321 GEM_BUG_ON(!is_power_of_2(size
));
1322 GEM_BUG_ON(RING_CTL_SIZE(size
) & ~RING_NR_PAGES
);
1324 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1326 return ERR_PTR(-ENOMEM
);
1328 INIT_LIST_HEAD(&ring
->request_list
);
1331 /* Workaround an erratum on the i830 which causes a hang if
1332 * the TAIL pointer points to within the last 2 cachelines
1335 ring
->effective_size
= size
;
1336 if (IS_I830(engine
->i915
) || IS_I845G(engine
->i915
))
1337 ring
->effective_size
-= 2 * CACHELINE_BYTES
;
1339 intel_ring_update_space(ring
);
1341 vma
= intel_ring_create_vma(engine
->i915
, size
);
1344 return ERR_CAST(vma
);
1352 intel_ring_free(struct intel_ring
*ring
)
1354 struct drm_i915_gem_object
*obj
= ring
->vma
->obj
;
1356 i915_vma_close(ring
->vma
);
1357 __i915_gem_object_release_unless_active(obj
);
1362 static int context_pin(struct i915_gem_context
*ctx
)
1364 struct i915_vma
*vma
= ctx
->engine
[RCS
].state
;
1368 * Clear this page out of any CPU caches for coherent swap-in/out.
1369 * We only want to do this on the first bind so that we do not stall
1370 * on an active context (which by nature is already on the GPU).
1372 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1373 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1378 return i915_vma_pin(vma
, 0, I915_GTT_MIN_ALIGNMENT
,
1379 PIN_GLOBAL
| PIN_HIGH
);
1382 static struct i915_vma
*
1383 alloc_context_vma(struct intel_engine_cs
*engine
)
1385 struct drm_i915_private
*i915
= engine
->i915
;
1386 struct drm_i915_gem_object
*obj
;
1387 struct i915_vma
*vma
;
1389 obj
= i915_gem_object_create(i915
, engine
->context_size
);
1391 return ERR_CAST(obj
);
1394 * Try to make the context utilize L3 as well as LLC.
1396 * On VLV we don't have L3 controls in the PTEs so we
1397 * shouldn't touch the cache level, especially as that
1398 * would make the object snooped which might have a
1399 * negative performance impact.
1401 * Snooping is required on non-llc platforms in execlist
1402 * mode, but since all GGTT accesses use PAT entry 0 we
1403 * get snooping anyway regardless of cache_level.
1405 * This is only applicable for Ivy Bridge devices since
1406 * later platforms don't have L3 control bits in the PTE.
1408 if (IS_IVYBRIDGE(i915
)) {
1409 /* Ignore any error, regard it as a simple optimisation */
1410 i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
1413 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1415 i915_gem_object_put(obj
);
1420 static struct intel_ring
*
1421 intel_ring_context_pin(struct intel_engine_cs
*engine
,
1422 struct i915_gem_context
*ctx
)
1424 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1427 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1429 if (likely(ce
->pin_count
++))
1431 GEM_BUG_ON(!ce
->pin_count
); /* no overflow please! */
1433 if (!ce
->state
&& engine
->context_size
) {
1434 struct i915_vma
*vma
;
1436 vma
= alloc_context_vma(engine
);
1446 ret
= context_pin(ctx
);
1450 ce
->state
->obj
->pin_global
++;
1453 /* The kernel context is only used as a placeholder for flushing the
1454 * active context. It is never used for submitting user rendering and
1455 * as such never requires the golden render context, and so we can skip
1456 * emitting it when we switch to the kernel context. This is required
1457 * as during eviction we cannot allocate and pin the renderstate in
1458 * order to initialise the context.
1460 if (i915_gem_context_is_kernel(ctx
))
1461 ce
->initialised
= true;
1463 i915_gem_context_get(ctx
);
1466 /* One ringbuffer to rule them all */
1467 return engine
->buffer
;
1471 return ERR_PTR(ret
);
1474 static void intel_ring_context_unpin(struct intel_engine_cs
*engine
,
1475 struct i915_gem_context
*ctx
)
1477 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1479 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1480 GEM_BUG_ON(ce
->pin_count
== 0);
1482 if (--ce
->pin_count
)
1486 ce
->state
->obj
->pin_global
--;
1487 i915_vma_unpin(ce
->state
);
1490 i915_gem_context_put(ctx
);
1493 static int intel_init_ring_buffer(struct intel_engine_cs
*engine
)
1495 struct intel_ring
*ring
;
1498 intel_engine_setup_common(engine
);
1500 err
= intel_engine_init_common(engine
);
1504 ring
= intel_engine_create_ring(engine
, 32 * PAGE_SIZE
);
1506 err
= PTR_ERR(ring
);
1510 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1511 err
= intel_ring_pin(ring
, engine
->i915
, I915_GTT_PAGE_SIZE
);
1515 GEM_BUG_ON(engine
->buffer
);
1516 engine
->buffer
= ring
;
1521 intel_ring_free(ring
);
1523 intel_engine_cleanup_common(engine
);
1527 void intel_engine_cleanup(struct intel_engine_cs
*engine
)
1529 struct drm_i915_private
*dev_priv
= engine
->i915
;
1531 WARN_ON(INTEL_GEN(dev_priv
) > 2 &&
1532 (I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
1534 intel_ring_unpin(engine
->buffer
);
1535 intel_ring_free(engine
->buffer
);
1537 if (engine
->cleanup
)
1538 engine
->cleanup(engine
);
1540 intel_engine_cleanup_common(engine
);
1542 dev_priv
->engine
[engine
->id
] = NULL
;
1546 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
)
1548 struct intel_engine_cs
*engine
;
1549 enum intel_engine_id id
;
1551 /* Restart from the beginning of the rings for convenience */
1552 for_each_engine(engine
, dev_priv
, id
)
1553 intel_ring_reset(engine
->buffer
, 0);
1556 static int ring_request_alloc(struct drm_i915_gem_request
*request
)
1560 GEM_BUG_ON(!request
->ctx
->engine
[request
->engine
->id
].pin_count
);
1562 /* Flush enough space to reduce the likelihood of waiting after
1563 * we start building the request - in which case we will just
1564 * have to repeat work.
1566 request
->reserved_space
+= LEGACY_REQUEST_SIZE
;
1568 cs
= intel_ring_begin(request
, 0);
1572 request
->reserved_space
-= LEGACY_REQUEST_SIZE
;
1576 static noinline
int wait_for_space(struct drm_i915_gem_request
*req
,
1579 struct intel_ring
*ring
= req
->ring
;
1580 struct drm_i915_gem_request
*target
;
1583 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
1585 if (intel_ring_update_space(ring
) >= bytes
)
1589 * Space is reserved in the ringbuffer for finalising the request,
1590 * as that cannot be allowed to fail. During request finalisation,
1591 * reserved_space is set to 0 to stop the overallocation and the
1592 * assumption is that then we never need to wait (which has the
1593 * risk of failing with EINTR).
1595 * See also i915_gem_request_alloc() and i915_add_request().
1597 GEM_BUG_ON(!req
->reserved_space
);
1599 list_for_each_entry(target
, &ring
->request_list
, ring_link
) {
1600 /* Would completion of this request free enough space? */
1601 if (bytes
<= __intel_ring_space(target
->postfix
,
1602 ring
->emit
, ring
->size
))
1606 if (WARN_ON(&target
->ring_link
== &ring
->request_list
))
1609 timeout
= i915_wait_request(target
,
1610 I915_WAIT_INTERRUPTIBLE
| I915_WAIT_LOCKED
,
1611 MAX_SCHEDULE_TIMEOUT
);
1615 i915_gem_request_retire_upto(target
);
1617 intel_ring_update_space(ring
);
1618 GEM_BUG_ON(ring
->space
< bytes
);
1622 u32
*intel_ring_begin(struct drm_i915_gem_request
*req
,
1623 unsigned int num_dwords
)
1625 struct intel_ring
*ring
= req
->ring
;
1626 const unsigned int remain_usable
= ring
->effective_size
- ring
->emit
;
1627 const unsigned int bytes
= num_dwords
* sizeof(u32
);
1628 unsigned int need_wrap
= 0;
1629 unsigned int total_bytes
;
1632 /* Packets must be qword aligned. */
1633 GEM_BUG_ON(num_dwords
& 1);
1635 total_bytes
= bytes
+ req
->reserved_space
;
1636 GEM_BUG_ON(total_bytes
> ring
->effective_size
);
1638 if (unlikely(total_bytes
> remain_usable
)) {
1639 const int remain_actual
= ring
->size
- ring
->emit
;
1641 if (bytes
> remain_usable
) {
1643 * Not enough space for the basic request. So need to
1644 * flush out the remainder and then wait for
1647 total_bytes
+= remain_actual
;
1648 need_wrap
= remain_actual
| 1;
1651 * The base request will fit but the reserved space
1652 * falls off the end. So we don't need an immediate
1653 * wrap and only need to effectively wait for the
1654 * reserved size from the start of ringbuffer.
1656 total_bytes
= req
->reserved_space
+ remain_actual
;
1660 if (unlikely(total_bytes
> ring
->space
)) {
1661 int ret
= wait_for_space(req
, total_bytes
);
1663 return ERR_PTR(ret
);
1666 if (unlikely(need_wrap
)) {
1668 GEM_BUG_ON(need_wrap
> ring
->space
);
1669 GEM_BUG_ON(ring
->emit
+ need_wrap
> ring
->size
);
1671 /* Fill the tail with MI_NOOP */
1672 memset(ring
->vaddr
+ ring
->emit
, 0, need_wrap
);
1674 ring
->space
-= need_wrap
;
1677 GEM_BUG_ON(ring
->emit
> ring
->size
- bytes
);
1678 GEM_BUG_ON(ring
->space
< bytes
);
1679 cs
= ring
->vaddr
+ ring
->emit
;
1680 GEM_DEBUG_EXEC(memset(cs
, POISON_INUSE
, bytes
));
1681 ring
->emit
+= bytes
;
1682 ring
->space
-= bytes
;
1687 /* Align the ring tail to a cacheline boundary */
1688 int intel_ring_cacheline_align(struct drm_i915_gem_request
*req
)
1691 (req
->ring
->emit
& (CACHELINE_BYTES
- 1)) / sizeof(uint32_t);
1694 if (num_dwords
== 0)
1697 num_dwords
= CACHELINE_BYTES
/ sizeof(uint32_t) - num_dwords
;
1698 cs
= intel_ring_begin(req
, num_dwords
);
1702 while (num_dwords
--)
1705 intel_ring_advance(req
, cs
);
1710 static void gen6_bsd_submit_request(struct drm_i915_gem_request
*request
)
1712 struct drm_i915_private
*dev_priv
= request
->i915
;
1714 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1716 /* Every tail move must follow the sequence below */
1718 /* Disable notification that the ring is IDLE. The GT
1719 * will then assume that it is busy and bring it out of rc6.
1721 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
1722 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
1724 /* Clear the context id. Here be magic! */
1725 I915_WRITE64_FW(GEN6_BSD_RNCID
, 0x0);
1727 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1728 if (__intel_wait_for_register_fw(dev_priv
,
1729 GEN6_BSD_SLEEP_PSMI_CONTROL
,
1730 GEN6_BSD_SLEEP_INDICATOR
,
1733 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1735 /* Now that the ring is fully powered up, update the tail */
1736 i9xx_submit_request(request
);
1738 /* Let the ring send IDLE messages to the GT again,
1739 * and so let it sleep to conserve power when idle.
1741 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
1742 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
1744 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1747 static int gen6_bsd_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
1751 cs
= intel_ring_begin(req
, 4);
1756 if (INTEL_GEN(req
->i915
) >= 8)
1759 /* We always require a command barrier so that subsequent
1760 * commands, such as breadcrumb interrupts, are strictly ordered
1761 * wrt the contents of the write cache being flushed to memory
1762 * (and thus being coherent from the CPU).
1764 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
1767 * Bspec vol 1c.5 - video engine command streamer:
1768 * "If ENABLED, all TLBs will be invalidated once the flush
1769 * operation is complete. This bit is only valid when the
1770 * Post-Sync Operation field is a value of 1h or 3h."
1772 if (mode
& EMIT_INVALIDATE
)
1773 cmd
|= MI_INVALIDATE_TLB
| MI_INVALIDATE_BSD
;
1776 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
1777 if (INTEL_GEN(req
->i915
) >= 8) {
1778 *cs
++ = 0; /* upper addr */
1779 *cs
++ = 0; /* value */
1784 intel_ring_advance(req
, cs
);
1789 gen8_emit_bb_start(struct drm_i915_gem_request
*req
,
1790 u64 offset
, u32 len
,
1791 unsigned int dispatch_flags
)
1793 bool ppgtt
= USES_PPGTT(req
->i915
) &&
1794 !(dispatch_flags
& I915_DISPATCH_SECURE
);
1797 cs
= intel_ring_begin(req
, 4);
1801 /* FIXME(BDW): Address space and security selectors. */
1802 *cs
++ = MI_BATCH_BUFFER_START_GEN8
| (ppgtt
<< 8) | (dispatch_flags
&
1803 I915_DISPATCH_RS
? MI_BATCH_RESOURCE_STREAMER
: 0);
1804 *cs
++ = lower_32_bits(offset
);
1805 *cs
++ = upper_32_bits(offset
);
1807 intel_ring_advance(req
, cs
);
1813 hsw_emit_bb_start(struct drm_i915_gem_request
*req
,
1814 u64 offset
, u32 len
,
1815 unsigned int dispatch_flags
)
1819 cs
= intel_ring_begin(req
, 2);
1823 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
1824 0 : MI_BATCH_PPGTT_HSW
| MI_BATCH_NON_SECURE_HSW
) |
1825 (dispatch_flags
& I915_DISPATCH_RS
?
1826 MI_BATCH_RESOURCE_STREAMER
: 0);
1827 /* bit0-7 is the length on GEN6+ */
1829 intel_ring_advance(req
, cs
);
1835 gen6_emit_bb_start(struct drm_i915_gem_request
*req
,
1836 u64 offset
, u32 len
,
1837 unsigned int dispatch_flags
)
1841 cs
= intel_ring_begin(req
, 2);
1845 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
1846 0 : MI_BATCH_NON_SECURE_I965
);
1847 /* bit0-7 is the length on GEN6+ */
1849 intel_ring_advance(req
, cs
);
1854 /* Blitter support (SandyBridge+) */
1856 static int gen6_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
1860 cs
= intel_ring_begin(req
, 4);
1865 if (INTEL_GEN(req
->i915
) >= 8)
1868 /* We always require a command barrier so that subsequent
1869 * commands, such as breadcrumb interrupts, are strictly ordered
1870 * wrt the contents of the write cache being flushed to memory
1871 * (and thus being coherent from the CPU).
1873 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
1876 * Bspec vol 1c.3 - blitter engine command streamer:
1877 * "If ENABLED, all TLBs will be invalidated once the flush
1878 * operation is complete. This bit is only valid when the
1879 * Post-Sync Operation field is a value of 1h or 3h."
1881 if (mode
& EMIT_INVALIDATE
)
1882 cmd
|= MI_INVALIDATE_TLB
;
1884 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
1885 if (INTEL_GEN(req
->i915
) >= 8) {
1886 *cs
++ = 0; /* upper addr */
1887 *cs
++ = 0; /* value */
1892 intel_ring_advance(req
, cs
);
1897 static void intel_ring_init_semaphores(struct drm_i915_private
*dev_priv
,
1898 struct intel_engine_cs
*engine
)
1900 struct drm_i915_gem_object
*obj
;
1903 if (!i915_modparams
.semaphores
)
1906 if (INTEL_GEN(dev_priv
) >= 8 && !dev_priv
->semaphore
) {
1907 struct i915_vma
*vma
;
1909 obj
= i915_gem_object_create(dev_priv
, PAGE_SIZE
);
1913 vma
= i915_vma_instance(obj
, &dev_priv
->ggtt
.base
, NULL
);
1917 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
1921 ret
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
1925 dev_priv
->semaphore
= vma
;
1928 if (INTEL_GEN(dev_priv
) >= 8) {
1929 u32 offset
= i915_ggtt_offset(dev_priv
->semaphore
);
1931 engine
->semaphore
.sync_to
= gen8_ring_sync_to
;
1932 engine
->semaphore
.signal
= gen8_xcs_signal
;
1934 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
1937 if (i
!= engine
->id
)
1938 ring_offset
= offset
+ GEN8_SEMAPHORE_OFFSET(engine
->id
, i
);
1940 ring_offset
= MI_SEMAPHORE_SYNC_INVALID
;
1942 engine
->semaphore
.signal_ggtt
[i
] = ring_offset
;
1944 } else if (INTEL_GEN(dev_priv
) >= 6) {
1945 engine
->semaphore
.sync_to
= gen6_ring_sync_to
;
1946 engine
->semaphore
.signal
= gen6_signal
;
1949 * The current semaphore is only applied on pre-gen8
1950 * platform. And there is no VCS2 ring on the pre-gen8
1951 * platform. So the semaphore between RCS and VCS2 is
1952 * initialized as INVALID. Gen8 will initialize the
1953 * sema between VCS2 and RCS later.
1955 for (i
= 0; i
< GEN6_NUM_SEMAPHORES
; i
++) {
1956 static const struct {
1958 i915_reg_t mbox_reg
;
1959 } sem_data
[GEN6_NUM_SEMAPHORES
][GEN6_NUM_SEMAPHORES
] = {
1961 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RV
, .mbox_reg
= GEN6_VRSYNC
},
1962 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RB
, .mbox_reg
= GEN6_BRSYNC
},
1963 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RVE
, .mbox_reg
= GEN6_VERSYNC
},
1966 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VR
, .mbox_reg
= GEN6_RVSYNC
},
1967 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VB
, .mbox_reg
= GEN6_BVSYNC
},
1968 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VVE
, .mbox_reg
= GEN6_VEVSYNC
},
1971 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BR
, .mbox_reg
= GEN6_RBSYNC
},
1972 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BV
, .mbox_reg
= GEN6_VBSYNC
},
1973 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BVE
, .mbox_reg
= GEN6_VEBSYNC
},
1976 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VER
, .mbox_reg
= GEN6_RVESYNC
},
1977 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEV
, .mbox_reg
= GEN6_VVESYNC
},
1978 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEB
, .mbox_reg
= GEN6_BVESYNC
},
1982 i915_reg_t mbox_reg
;
1984 if (i
== engine
->hw_id
) {
1985 wait_mbox
= MI_SEMAPHORE_SYNC_INVALID
;
1986 mbox_reg
= GEN6_NOSYNC
;
1988 wait_mbox
= sem_data
[engine
->hw_id
][i
].wait_mbox
;
1989 mbox_reg
= sem_data
[engine
->hw_id
][i
].mbox_reg
;
1992 engine
->semaphore
.mbox
.wait
[i
] = wait_mbox
;
1993 engine
->semaphore
.mbox
.signal
[i
] = mbox_reg
;
2000 i915_gem_object_put(obj
);
2002 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2003 i915_modparams
.semaphores
= 0;
2006 static void intel_ring_init_irq(struct drm_i915_private
*dev_priv
,
2007 struct intel_engine_cs
*engine
)
2009 engine
->irq_enable_mask
= GT_RENDER_USER_INTERRUPT
<< engine
->irq_shift
;
2011 if (INTEL_GEN(dev_priv
) >= 8) {
2012 engine
->irq_enable
= gen8_irq_enable
;
2013 engine
->irq_disable
= gen8_irq_disable
;
2014 engine
->irq_seqno_barrier
= gen6_seqno_barrier
;
2015 } else if (INTEL_GEN(dev_priv
) >= 6) {
2016 engine
->irq_enable
= gen6_irq_enable
;
2017 engine
->irq_disable
= gen6_irq_disable
;
2018 engine
->irq_seqno_barrier
= gen6_seqno_barrier
;
2019 } else if (INTEL_GEN(dev_priv
) >= 5) {
2020 engine
->irq_enable
= gen5_irq_enable
;
2021 engine
->irq_disable
= gen5_irq_disable
;
2022 engine
->irq_seqno_barrier
= gen5_seqno_barrier
;
2023 } else if (INTEL_GEN(dev_priv
) >= 3) {
2024 engine
->irq_enable
= i9xx_irq_enable
;
2025 engine
->irq_disable
= i9xx_irq_disable
;
2027 engine
->irq_enable
= i8xx_irq_enable
;
2028 engine
->irq_disable
= i8xx_irq_disable
;
2032 static void i9xx_set_default_submission(struct intel_engine_cs
*engine
)
2034 engine
->submit_request
= i9xx_submit_request
;
2035 engine
->cancel_requests
= cancel_requests
;
2038 static void gen6_bsd_set_default_submission(struct intel_engine_cs
*engine
)
2040 engine
->submit_request
= gen6_bsd_submit_request
;
2041 engine
->cancel_requests
= cancel_requests
;
2044 static void intel_ring_default_vfuncs(struct drm_i915_private
*dev_priv
,
2045 struct intel_engine_cs
*engine
)
2047 intel_ring_init_irq(dev_priv
, engine
);
2048 intel_ring_init_semaphores(dev_priv
, engine
);
2050 engine
->init_hw
= init_ring_common
;
2051 engine
->reset_hw
= reset_ring_common
;
2053 engine
->context_pin
= intel_ring_context_pin
;
2054 engine
->context_unpin
= intel_ring_context_unpin
;
2056 engine
->request_alloc
= ring_request_alloc
;
2058 engine
->emit_breadcrumb
= i9xx_emit_breadcrumb
;
2059 engine
->emit_breadcrumb_sz
= i9xx_emit_breadcrumb_sz
;
2060 if (i915_modparams
.semaphores
) {
2063 engine
->emit_breadcrumb
= gen6_sema_emit_breadcrumb
;
2065 num_rings
= INTEL_INFO(dev_priv
)->num_rings
- 1;
2066 if (INTEL_GEN(dev_priv
) >= 8) {
2067 engine
->emit_breadcrumb_sz
+= num_rings
* 6;
2069 engine
->emit_breadcrumb_sz
+= num_rings
* 3;
2071 engine
->emit_breadcrumb_sz
++;
2075 engine
->set_default_submission
= i9xx_set_default_submission
;
2077 if (INTEL_GEN(dev_priv
) >= 8)
2078 engine
->emit_bb_start
= gen8_emit_bb_start
;
2079 else if (INTEL_GEN(dev_priv
) >= 6)
2080 engine
->emit_bb_start
= gen6_emit_bb_start
;
2081 else if (INTEL_GEN(dev_priv
) >= 4)
2082 engine
->emit_bb_start
= i965_emit_bb_start
;
2083 else if (IS_I830(dev_priv
) || IS_I845G(dev_priv
))
2084 engine
->emit_bb_start
= i830_emit_bb_start
;
2086 engine
->emit_bb_start
= i915_emit_bb_start
;
2089 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
)
2091 struct drm_i915_private
*dev_priv
= engine
->i915
;
2094 intel_ring_default_vfuncs(dev_priv
, engine
);
2096 if (HAS_L3_DPF(dev_priv
))
2097 engine
->irq_keep_mask
= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2099 if (INTEL_GEN(dev_priv
) >= 8) {
2100 engine
->init_context
= intel_rcs_ctx_init
;
2101 engine
->emit_breadcrumb
= gen8_render_emit_breadcrumb
;
2102 engine
->emit_breadcrumb_sz
= gen8_render_emit_breadcrumb_sz
;
2103 engine
->emit_flush
= gen8_render_ring_flush
;
2104 if (i915_modparams
.semaphores
) {
2107 engine
->semaphore
.signal
= gen8_rcs_signal
;
2109 num_rings
= INTEL_INFO(dev_priv
)->num_rings
- 1;
2110 engine
->emit_breadcrumb_sz
+= num_rings
* 8;
2112 } else if (INTEL_GEN(dev_priv
) >= 6) {
2113 engine
->init_context
= intel_rcs_ctx_init
;
2114 engine
->emit_flush
= gen7_render_ring_flush
;
2115 if (IS_GEN6(dev_priv
))
2116 engine
->emit_flush
= gen6_render_ring_flush
;
2117 } else if (IS_GEN5(dev_priv
)) {
2118 engine
->emit_flush
= gen4_render_ring_flush
;
2120 if (INTEL_GEN(dev_priv
) < 4)
2121 engine
->emit_flush
= gen2_render_ring_flush
;
2123 engine
->emit_flush
= gen4_render_ring_flush
;
2124 engine
->irq_enable_mask
= I915_USER_INTERRUPT
;
2127 if (IS_HASWELL(dev_priv
))
2128 engine
->emit_bb_start
= hsw_emit_bb_start
;
2130 engine
->init_hw
= init_render_ring
;
2131 engine
->cleanup
= render_ring_cleanup
;
2133 ret
= intel_init_ring_buffer(engine
);
2137 if (INTEL_GEN(dev_priv
) >= 6) {
2138 ret
= intel_engine_create_scratch(engine
, PAGE_SIZE
);
2141 } else if (HAS_BROKEN_CS_TLB(dev_priv
)) {
2142 ret
= intel_engine_create_scratch(engine
, I830_WA_SIZE
);
2150 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
)
2152 struct drm_i915_private
*dev_priv
= engine
->i915
;
2154 intel_ring_default_vfuncs(dev_priv
, engine
);
2156 if (INTEL_GEN(dev_priv
) >= 6) {
2157 /* gen6 bsd needs a special wa for tail updates */
2158 if (IS_GEN6(dev_priv
))
2159 engine
->set_default_submission
= gen6_bsd_set_default_submission
;
2160 engine
->emit_flush
= gen6_bsd_ring_flush
;
2161 if (INTEL_GEN(dev_priv
) < 8)
2162 engine
->irq_enable_mask
= GT_BSD_USER_INTERRUPT
;
2164 engine
->mmio_base
= BSD_RING_BASE
;
2165 engine
->emit_flush
= bsd_ring_flush
;
2166 if (IS_GEN5(dev_priv
))
2167 engine
->irq_enable_mask
= ILK_BSD_USER_INTERRUPT
;
2169 engine
->irq_enable_mask
= I915_BSD_USER_INTERRUPT
;
2172 return intel_init_ring_buffer(engine
);
2175 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
)
2177 struct drm_i915_private
*dev_priv
= engine
->i915
;
2179 intel_ring_default_vfuncs(dev_priv
, engine
);
2181 engine
->emit_flush
= gen6_ring_flush
;
2182 if (INTEL_GEN(dev_priv
) < 8)
2183 engine
->irq_enable_mask
= GT_BLT_USER_INTERRUPT
;
2185 return intel_init_ring_buffer(engine
);
2188 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
)
2190 struct drm_i915_private
*dev_priv
= engine
->i915
;
2192 intel_ring_default_vfuncs(dev_priv
, engine
);
2194 engine
->emit_flush
= gen6_ring_flush
;
2196 if (INTEL_GEN(dev_priv
) < 8) {
2197 engine
->irq_enable_mask
= PM_VEBOX_USER_INTERRUPT
;
2198 engine
->irq_enable
= hsw_vebox_irq_enable
;
2199 engine
->irq_disable
= hsw_vebox_irq_disable
;
2202 return intel_init_ring_buffer(engine
);