2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
37 /* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
40 #define LEGACY_REQUEST_SIZE 200
42 static unsigned int __intel_ring_space(unsigned int head
,
47 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
48 * same cacheline, the Head Pointer must not be greater than the Tail
51 GEM_BUG_ON(!is_power_of_2(size
));
52 return (head
- tail
- CACHELINE_BYTES
) & (size
- 1);
55 unsigned int intel_ring_update_space(struct intel_ring
*ring
)
59 space
= __intel_ring_space(ring
->head
, ring
->emit
, ring
->size
);
66 gen2_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
72 if (mode
& EMIT_INVALIDATE
)
75 cs
= intel_ring_begin(req
, 2);
81 intel_ring_advance(req
, cs
);
87 gen4_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
103 * I915_GEM_DOMAIN_COMMAND may not exist?
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
120 if (mode
& EMIT_INVALIDATE
) {
122 if (IS_G4X(req
->i915
) || IS_GEN5(req
->i915
))
123 cmd
|= MI_INVALIDATE_ISP
;
126 cs
= intel_ring_begin(req
, 2);
132 intel_ring_advance(req
, cs
);
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
150 * And the workaround for these two requires this workaround first:
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
175 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request
*req
)
178 i915_ggtt_offset(req
->engine
->scratch
) + 2 * CACHELINE_BYTES
;
181 cs
= intel_ring_begin(req
, 6);
185 *cs
++ = GFX_OP_PIPE_CONTROL(5);
186 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
187 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
188 *cs
++ = 0; /* low dword */
189 *cs
++ = 0; /* high dword */
191 intel_ring_advance(req
, cs
);
193 cs
= intel_ring_begin(req
, 6);
197 *cs
++ = GFX_OP_PIPE_CONTROL(5);
198 *cs
++ = PIPE_CONTROL_QW_WRITE
;
199 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
203 intel_ring_advance(req
, cs
);
209 gen6_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
212 i915_ggtt_offset(req
->engine
->scratch
) + 2 * CACHELINE_BYTES
;
216 /* Force SNB workarounds for PIPE_CONTROL flushes */
217 ret
= intel_emit_post_sync_nonzero_flush(req
);
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
225 if (mode
& EMIT_FLUSH
) {
226 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
227 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
229 * Ensure that any following seqno writes only happen
230 * when the render cache is indeed flushed.
232 flags
|= PIPE_CONTROL_CS_STALL
;
234 if (mode
& EMIT_INVALIDATE
) {
235 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
236 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
237 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
238 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
239 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
240 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
242 * TLB invalidate requires a post-sync write.
244 flags
|= PIPE_CONTROL_QW_WRITE
| PIPE_CONTROL_CS_STALL
;
247 cs
= intel_ring_begin(req
, 4);
251 *cs
++ = GFX_OP_PIPE_CONTROL(4);
253 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
255 intel_ring_advance(req
, cs
);
261 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request
*req
)
265 cs
= intel_ring_begin(req
, 4);
269 *cs
++ = GFX_OP_PIPE_CONTROL(4);
270 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
273 intel_ring_advance(req
, cs
);
279 gen7_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
282 i915_ggtt_offset(req
->engine
->scratch
) + 2 * CACHELINE_BYTES
;
286 * Ensure that any following seqno writes only happen when the render
287 * cache is indeed flushed.
289 * Workaround: 4th PIPE_CONTROL command (except the ones with only
290 * read-cache invalidate bits set) must have the CS_STALL bit set. We
291 * don't try to be clever and just set it unconditionally.
293 flags
|= PIPE_CONTROL_CS_STALL
;
295 /* Just flush everything. Experiments have shown that reducing the
296 * number of bits based on the write domains has little performance
299 if (mode
& EMIT_FLUSH
) {
300 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
301 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
302 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
303 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
305 if (mode
& EMIT_INVALIDATE
) {
306 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
307 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
308 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
309 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
310 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
311 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
312 flags
|= PIPE_CONTROL_MEDIA_STATE_CLEAR
;
314 * TLB invalidate requires a post-sync write.
316 flags
|= PIPE_CONTROL_QW_WRITE
;
317 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
319 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
321 /* Workaround: we must issue a pipe_control with CS-stall bit
322 * set before a pipe_control command that has the state cache
323 * invalidate bit set. */
324 gen7_render_ring_cs_stall_wa(req
);
327 cs
= intel_ring_begin(req
, 4);
331 *cs
++ = GFX_OP_PIPE_CONTROL(4);
333 *cs
++ = scratch_addr
;
335 intel_ring_advance(req
, cs
);
341 gen8_render_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
346 cs
= intel_ring_begin(req
, mode
& EMIT_INVALIDATE
? 12 : 6);
350 flags
= PIPE_CONTROL_CS_STALL
;
352 if (mode
& EMIT_FLUSH
) {
353 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
354 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
355 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
356 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
358 if (mode
& EMIT_INVALIDATE
) {
359 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
360 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
361 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
362 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
363 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
364 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
365 flags
|= PIPE_CONTROL_QW_WRITE
;
366 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
368 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
369 cs
= gen8_emit_pipe_control(cs
,
370 PIPE_CONTROL_CS_STALL
|
371 PIPE_CONTROL_STALL_AT_SCOREBOARD
,
375 cs
= gen8_emit_pipe_control(cs
, flags
,
376 i915_ggtt_offset(req
->engine
->scratch
) +
377 2 * CACHELINE_BYTES
);
379 intel_ring_advance(req
, cs
);
384 static void ring_setup_phys_status_page(struct intel_engine_cs
*engine
)
386 struct drm_i915_private
*dev_priv
= engine
->i915
;
389 addr
= dev_priv
->status_page_dmah
->busaddr
;
390 if (INTEL_GEN(dev_priv
) >= 4)
391 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
392 I915_WRITE(HWS_PGA
, addr
);
395 static void intel_ring_setup_status_page(struct intel_engine_cs
*engine
)
397 struct drm_i915_private
*dev_priv
= engine
->i915
;
400 /* The ring status page addresses are no longer next to the rest of
401 * the ring registers as of gen7.
403 if (IS_GEN7(dev_priv
)) {
404 switch (engine
->id
) {
406 mmio
= RENDER_HWS_PGA_GEN7
;
409 mmio
= BLT_HWS_PGA_GEN7
;
412 * VCS2 actually doesn't exist on Gen7. Only shut up
413 * gcc switch check warning
417 mmio
= BSD_HWS_PGA_GEN7
;
420 mmio
= VEBOX_HWS_PGA_GEN7
;
423 } else if (IS_GEN6(dev_priv
)) {
424 mmio
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
426 /* XXX: gen8 returns to sanity */
427 mmio
= RING_HWS_PGA(engine
->mmio_base
);
430 I915_WRITE(mmio
, engine
->status_page
.ggtt_offset
);
434 * Flush the TLB for this page
436 * FIXME: These two bits have disappeared on gen8, so a question
437 * arises: do we still need this and if so how should we go about
438 * invalidating the TLB?
440 if (IS_GEN(dev_priv
, 6, 7)) {
441 i915_reg_t reg
= RING_INSTPM(engine
->mmio_base
);
443 /* ring should be idle before issuing a sync flush*/
444 WARN_ON((I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
447 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE
|
449 if (intel_wait_for_register(dev_priv
,
450 reg
, INSTPM_SYNC_FLUSH
, 0,
452 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
457 static bool stop_ring(struct intel_engine_cs
*engine
)
459 struct drm_i915_private
*dev_priv
= engine
->i915
;
461 if (INTEL_GEN(dev_priv
) > 2) {
462 I915_WRITE_MODE(engine
, _MASKED_BIT_ENABLE(STOP_RING
));
463 if (intel_wait_for_register(dev_priv
,
464 RING_MI_MODE(engine
->mmio_base
),
468 DRM_ERROR("%s : timed out trying to stop ring\n",
470 /* Sometimes we observe that the idle flag is not
471 * set even though the ring is empty. So double
472 * check before giving up.
474 if (I915_READ_HEAD(engine
) != I915_READ_TAIL(engine
))
479 I915_WRITE_CTL(engine
, 0);
480 I915_WRITE_HEAD(engine
, 0);
481 I915_WRITE_TAIL(engine
, 0);
483 if (INTEL_GEN(dev_priv
) > 2) {
484 (void)I915_READ_CTL(engine
);
485 I915_WRITE_MODE(engine
, _MASKED_BIT_DISABLE(STOP_RING
));
488 return (I915_READ_HEAD(engine
) & HEAD_ADDR
) == 0;
491 static int init_ring_common(struct intel_engine_cs
*engine
)
493 struct drm_i915_private
*dev_priv
= engine
->i915
;
494 struct intel_ring
*ring
= engine
->buffer
;
497 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
499 if (!stop_ring(engine
)) {
500 /* G45 ring initialization often fails to reset head to zero */
501 DRM_DEBUG_KMS("%s head not reset to zero "
502 "ctl %08x head %08x tail %08x start %08x\n",
504 I915_READ_CTL(engine
),
505 I915_READ_HEAD(engine
),
506 I915_READ_TAIL(engine
),
507 I915_READ_START(engine
));
509 if (!stop_ring(engine
)) {
510 DRM_ERROR("failed to set %s head to zero "
511 "ctl %08x head %08x tail %08x start %08x\n",
513 I915_READ_CTL(engine
),
514 I915_READ_HEAD(engine
),
515 I915_READ_TAIL(engine
),
516 I915_READ_START(engine
));
522 if (HWS_NEEDS_PHYSICAL(dev_priv
))
523 ring_setup_phys_status_page(engine
);
525 intel_ring_setup_status_page(engine
);
527 intel_engine_reset_breadcrumbs(engine
);
529 /* Enforce ordering by reading HEAD register back */
530 I915_READ_HEAD(engine
);
532 /* Initialize the ring. This must happen _after_ we've cleared the ring
533 * registers with the above sequence (the readback of the HEAD registers
534 * also enforces ordering), otherwise the hw might lose the new ring
535 * register values. */
536 I915_WRITE_START(engine
, i915_ggtt_offset(ring
->vma
));
538 /* WaClearRingBufHeadRegAtInit:ctg,elk */
539 if (I915_READ_HEAD(engine
))
540 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
541 engine
->name
, I915_READ_HEAD(engine
));
543 intel_ring_update_space(ring
);
544 I915_WRITE_HEAD(engine
, ring
->head
);
545 I915_WRITE_TAIL(engine
, ring
->tail
);
546 (void)I915_READ_TAIL(engine
);
548 I915_WRITE_CTL(engine
, RING_CTL_SIZE(ring
->size
) | RING_VALID
);
550 /* If the head is still not zero, the ring is dead */
551 if (intel_wait_for_register(dev_priv
, RING_CTL(engine
->mmio_base
),
552 RING_VALID
, RING_VALID
,
554 DRM_ERROR("%s initialization failed "
555 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
557 I915_READ_CTL(engine
),
558 I915_READ_CTL(engine
) & RING_VALID
,
559 I915_READ_HEAD(engine
), ring
->head
,
560 I915_READ_TAIL(engine
), ring
->tail
,
561 I915_READ_START(engine
),
562 i915_ggtt_offset(ring
->vma
));
567 intel_engine_init_hangcheck(engine
);
570 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
575 static void reset_ring_common(struct intel_engine_cs
*engine
,
576 struct drm_i915_gem_request
*request
)
578 /* Try to restore the logical GPU state to match the continuation
579 * of the request queue. If we skip the context/PD restore, then
580 * the next request may try to execute assuming that its context
581 * is valid and loaded on the GPU and so may try to access invalid
582 * memory, prompting repeated GPU hangs.
584 * If the request was guilty, we still restore the logical state
585 * in case the next request requires it (e.g. the aliasing ppgtt),
586 * but skip over the hung batch.
588 * If the request was innocent, we try to replay the request with
589 * the restored context.
592 struct drm_i915_private
*dev_priv
= request
->i915
;
593 struct intel_context
*ce
= &request
->ctx
->engine
[engine
->id
];
594 struct i915_hw_ppgtt
*ppgtt
;
596 /* FIXME consider gen8 reset */
600 i915_ggtt_offset(ce
->state
) |
601 BIT(8) /* must be set! */ |
602 CCID_EXTENDED_STATE_SAVE
|
603 CCID_EXTENDED_STATE_RESTORE
|
607 ppgtt
= request
->ctx
->ppgtt
?: engine
->i915
->mm
.aliasing_ppgtt
;
609 u32 pd_offset
= ppgtt
->pd
.base
.ggtt_offset
<< 10;
611 I915_WRITE(RING_PP_DIR_DCLV(engine
), PP_DIR_DCLV_2G
);
612 I915_WRITE(RING_PP_DIR_BASE(engine
), pd_offset
);
614 /* Wait for the PD reload to complete */
615 if (intel_wait_for_register(dev_priv
,
616 RING_PP_DIR_BASE(engine
),
619 DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
621 ppgtt
->pd_dirty_rings
&= ~intel_engine_flag(engine
);
624 /* If the rq hung, jump to its breadcrumb and skip the batch */
625 if (request
->fence
.error
== -EIO
)
626 request
->ring
->head
= request
->postfix
;
628 engine
->legacy_active_context
= NULL
;
632 static int intel_rcs_ctx_init(struct drm_i915_gem_request
*req
)
636 ret
= intel_ring_workarounds_emit(req
);
640 ret
= i915_gem_render_state_emit(req
);
647 static int init_render_ring(struct intel_engine_cs
*engine
)
649 struct drm_i915_private
*dev_priv
= engine
->i915
;
650 int ret
= init_ring_common(engine
);
654 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
655 if (IS_GEN(dev_priv
, 4, 6))
656 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH
));
658 /* We need to disable the AsyncFlip performance optimisations in order
659 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
660 * programmed to '1' on all products.
662 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
664 if (IS_GEN(dev_priv
, 6, 7))
665 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE
));
667 /* Required for the hardware to program scanline values for waiting */
668 /* WaEnableFlushTlbInvalidationMode:snb */
669 if (IS_GEN6(dev_priv
))
671 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
));
673 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
674 if (IS_GEN7(dev_priv
))
675 I915_WRITE(GFX_MODE_GEN7
,
676 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
) |
677 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE
));
679 if (IS_GEN6(dev_priv
)) {
680 /* From the Sandybridge PRM, volume 1 part 3, page 24:
681 * "If this bit is set, STCunit will have LRA as replacement
682 * policy. [...] This bit must be reset. LRA replacement
683 * policy is not supported."
685 I915_WRITE(CACHE_MODE_0
,
686 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
689 if (IS_GEN(dev_priv
, 6, 7))
690 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING
));
692 if (INTEL_INFO(dev_priv
)->gen
>= 6)
693 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
695 return init_workarounds_ring(engine
);
698 static void render_ring_cleanup(struct intel_engine_cs
*engine
)
700 struct drm_i915_private
*dev_priv
= engine
->i915
;
702 i915_vma_unpin_and_release(&dev_priv
->semaphore
);
705 static u32
*gen8_rcs_signal(struct drm_i915_gem_request
*req
, u32
*cs
)
707 struct drm_i915_private
*dev_priv
= req
->i915
;
708 struct intel_engine_cs
*waiter
;
709 enum intel_engine_id id
;
711 for_each_engine(waiter
, dev_priv
, id
) {
712 u64 gtt_offset
= req
->engine
->semaphore
.signal_ggtt
[id
];
713 if (gtt_offset
== MI_SEMAPHORE_SYNC_INVALID
)
716 *cs
++ = GFX_OP_PIPE_CONTROL(6);
717 *cs
++ = PIPE_CONTROL_GLOBAL_GTT_IVB
| PIPE_CONTROL_QW_WRITE
|
718 PIPE_CONTROL_CS_STALL
;
719 *cs
++ = lower_32_bits(gtt_offset
);
720 *cs
++ = upper_32_bits(gtt_offset
);
721 *cs
++ = req
->global_seqno
;
723 *cs
++ = MI_SEMAPHORE_SIGNAL
|
724 MI_SEMAPHORE_TARGET(waiter
->hw_id
);
731 static u32
*gen8_xcs_signal(struct drm_i915_gem_request
*req
, u32
*cs
)
733 struct drm_i915_private
*dev_priv
= req
->i915
;
734 struct intel_engine_cs
*waiter
;
735 enum intel_engine_id id
;
737 for_each_engine(waiter
, dev_priv
, id
) {
738 u64 gtt_offset
= req
->engine
->semaphore
.signal_ggtt
[id
];
739 if (gtt_offset
== MI_SEMAPHORE_SYNC_INVALID
)
742 *cs
++ = (MI_FLUSH_DW
+ 1) | MI_FLUSH_DW_OP_STOREDW
;
743 *cs
++ = lower_32_bits(gtt_offset
) | MI_FLUSH_DW_USE_GTT
;
744 *cs
++ = upper_32_bits(gtt_offset
);
745 *cs
++ = req
->global_seqno
;
746 *cs
++ = MI_SEMAPHORE_SIGNAL
|
747 MI_SEMAPHORE_TARGET(waiter
->hw_id
);
754 static u32
*gen6_signal(struct drm_i915_gem_request
*req
, u32
*cs
)
756 struct drm_i915_private
*dev_priv
= req
->i915
;
757 struct intel_engine_cs
*engine
;
758 enum intel_engine_id id
;
761 for_each_engine(engine
, dev_priv
, id
) {
764 if (!(BIT(engine
->hw_id
) & GEN6_SEMAPHORES_MASK
))
767 mbox_reg
= req
->engine
->semaphore
.mbox
.signal
[engine
->hw_id
];
768 if (i915_mmio_reg_valid(mbox_reg
)) {
769 *cs
++ = MI_LOAD_REGISTER_IMM(1);
770 *cs
++ = i915_mmio_reg_offset(mbox_reg
);
771 *cs
++ = req
->global_seqno
;
781 static void i9xx_submit_request(struct drm_i915_gem_request
*request
)
783 struct drm_i915_private
*dev_priv
= request
->i915
;
785 i915_gem_request_submit(request
);
787 I915_WRITE_TAIL(request
->engine
,
788 intel_ring_set_tail(request
->ring
, request
->tail
));
791 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request
*req
, u32
*cs
)
793 *cs
++ = MI_STORE_DWORD_INDEX
;
794 *cs
++ = I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
;
795 *cs
++ = req
->global_seqno
;
796 *cs
++ = MI_USER_INTERRUPT
;
798 req
->tail
= intel_ring_offset(req
, cs
);
799 assert_ring_tail_valid(req
->ring
, req
->tail
);
802 static const int i9xx_emit_breadcrumb_sz
= 4;
805 * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers
807 * @request - request to write to the ring
809 * Update the mailbox registers in the *other* rings with the current seqno.
810 * This acts like a signal in the canonical semaphore.
812 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request
*req
, u32
*cs
)
814 return i9xx_emit_breadcrumb(req
,
815 req
->engine
->semaphore
.signal(req
, cs
));
818 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request
*req
,
821 struct intel_engine_cs
*engine
= req
->engine
;
823 if (engine
->semaphore
.signal
)
824 cs
= engine
->semaphore
.signal(req
, cs
);
826 *cs
++ = GFX_OP_PIPE_CONTROL(6);
827 *cs
++ = PIPE_CONTROL_GLOBAL_GTT_IVB
| PIPE_CONTROL_CS_STALL
|
828 PIPE_CONTROL_QW_WRITE
;
829 *cs
++ = intel_hws_seqno_address(engine
);
831 *cs
++ = req
->global_seqno
;
832 /* We're thrashing one dword of HWS. */
834 *cs
++ = MI_USER_INTERRUPT
;
837 req
->tail
= intel_ring_offset(req
, cs
);
838 assert_ring_tail_valid(req
->ring
, req
->tail
);
841 static const int gen8_render_emit_breadcrumb_sz
= 8;
844 * intel_ring_sync - sync the waiter to the signaller on seqno
846 * @waiter - ring that is waiting
847 * @signaller - ring which has, or will signal
848 * @seqno - seqno which the waiter will block on
852 gen8_ring_sync_to(struct drm_i915_gem_request
*req
,
853 struct drm_i915_gem_request
*signal
)
855 struct drm_i915_private
*dev_priv
= req
->i915
;
856 u64 offset
= GEN8_WAIT_OFFSET(req
->engine
, signal
->engine
->id
);
857 struct i915_hw_ppgtt
*ppgtt
;
860 cs
= intel_ring_begin(req
, 4);
864 *cs
++ = MI_SEMAPHORE_WAIT
| MI_SEMAPHORE_GLOBAL_GTT
|
865 MI_SEMAPHORE_SAD_GTE_SDD
;
866 *cs
++ = signal
->global_seqno
;
867 *cs
++ = lower_32_bits(offset
);
868 *cs
++ = upper_32_bits(offset
);
869 intel_ring_advance(req
, cs
);
871 /* When the !RCS engines idle waiting upon a semaphore, they lose their
872 * pagetables and we must reload them before executing the batch.
873 * We do this on the i915_switch_context() following the wait and
874 * before the dispatch.
876 ppgtt
= req
->ctx
->ppgtt
;
877 if (ppgtt
&& req
->engine
->id
!= RCS
)
878 ppgtt
->pd_dirty_rings
|= intel_engine_flag(req
->engine
);
883 gen6_ring_sync_to(struct drm_i915_gem_request
*req
,
884 struct drm_i915_gem_request
*signal
)
886 u32 dw1
= MI_SEMAPHORE_MBOX
|
887 MI_SEMAPHORE_COMPARE
|
888 MI_SEMAPHORE_REGISTER
;
889 u32 wait_mbox
= signal
->engine
->semaphore
.mbox
.wait
[req
->engine
->hw_id
];
892 WARN_ON(wait_mbox
== MI_SEMAPHORE_SYNC_INVALID
);
894 cs
= intel_ring_begin(req
, 4);
898 *cs
++ = dw1
| wait_mbox
;
899 /* Throughout all of the GEM code, seqno passed implies our current
900 * seqno is >= the last seqno executed. However for hardware the
901 * comparison is strictly greater than.
903 *cs
++ = signal
->global_seqno
- 1;
906 intel_ring_advance(req
, cs
);
912 gen5_seqno_barrier(struct intel_engine_cs
*engine
)
914 /* MI_STORE are internally buffered by the GPU and not flushed
915 * either by MI_FLUSH or SyncFlush or any other combination of
918 * "Only the submission of the store operation is guaranteed.
919 * The write result will be complete (coherent) some time later
920 * (this is practically a finite period but there is no guaranteed
923 * Empirically, we observe that we need a delay of at least 75us to
924 * be sure that the seqno write is visible by the CPU.
926 usleep_range(125, 250);
930 gen6_seqno_barrier(struct intel_engine_cs
*engine
)
932 struct drm_i915_private
*dev_priv
= engine
->i915
;
934 /* Workaround to force correct ordering between irq and seqno writes on
935 * ivb (and maybe also on snb) by reading from a CS register (like
936 * ACTHD) before reading the status page.
938 * Note that this effectively stalls the read by the time it takes to
939 * do a memory transaction, which more or less ensures that the write
940 * from the GPU has sufficient time to invalidate the CPU cacheline.
941 * Alternatively we could delay the interrupt from the CS ring to give
942 * the write time to land, but that would incur a delay after every
943 * batch i.e. much more frequent than a delay when waiting for the
944 * interrupt (with the same net latency).
946 * Also note that to prevent whole machine hangs on gen7, we have to
947 * take the spinlock to guard against concurrent cacheline access.
949 spin_lock_irq(&dev_priv
->uncore
.lock
);
950 POSTING_READ_FW(RING_ACTHD(engine
->mmio_base
));
951 spin_unlock_irq(&dev_priv
->uncore
.lock
);
955 gen5_irq_enable(struct intel_engine_cs
*engine
)
957 gen5_enable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
961 gen5_irq_disable(struct intel_engine_cs
*engine
)
963 gen5_disable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
967 i9xx_irq_enable(struct intel_engine_cs
*engine
)
969 struct drm_i915_private
*dev_priv
= engine
->i915
;
971 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
972 I915_WRITE(IMR
, dev_priv
->irq_mask
);
973 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
977 i9xx_irq_disable(struct intel_engine_cs
*engine
)
979 struct drm_i915_private
*dev_priv
= engine
->i915
;
981 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
982 I915_WRITE(IMR
, dev_priv
->irq_mask
);
986 i8xx_irq_enable(struct intel_engine_cs
*engine
)
988 struct drm_i915_private
*dev_priv
= engine
->i915
;
990 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
991 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
992 POSTING_READ16(RING_IMR(engine
->mmio_base
));
996 i8xx_irq_disable(struct intel_engine_cs
*engine
)
998 struct drm_i915_private
*dev_priv
= engine
->i915
;
1000 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
1001 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
1005 bsd_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
1009 cs
= intel_ring_begin(req
, 2);
1015 intel_ring_advance(req
, cs
);
1020 gen6_irq_enable(struct intel_engine_cs
*engine
)
1022 struct drm_i915_private
*dev_priv
= engine
->i915
;
1024 I915_WRITE_IMR(engine
,
1025 ~(engine
->irq_enable_mask
|
1026 engine
->irq_keep_mask
));
1027 gen5_enable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
1031 gen6_irq_disable(struct intel_engine_cs
*engine
)
1033 struct drm_i915_private
*dev_priv
= engine
->i915
;
1035 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
1036 gen5_disable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
1040 hsw_vebox_irq_enable(struct intel_engine_cs
*engine
)
1042 struct drm_i915_private
*dev_priv
= engine
->i915
;
1044 I915_WRITE_IMR(engine
, ~engine
->irq_enable_mask
);
1045 gen6_unmask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
1049 hsw_vebox_irq_disable(struct intel_engine_cs
*engine
)
1051 struct drm_i915_private
*dev_priv
= engine
->i915
;
1053 I915_WRITE_IMR(engine
, ~0);
1054 gen6_mask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
1058 gen8_irq_enable(struct intel_engine_cs
*engine
)
1060 struct drm_i915_private
*dev_priv
= engine
->i915
;
1062 I915_WRITE_IMR(engine
,
1063 ~(engine
->irq_enable_mask
|
1064 engine
->irq_keep_mask
));
1065 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
1069 gen8_irq_disable(struct intel_engine_cs
*engine
)
1071 struct drm_i915_private
*dev_priv
= engine
->i915
;
1073 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
1077 i965_emit_bb_start(struct drm_i915_gem_request
*req
,
1078 u64 offset
, u32 length
,
1079 unsigned int dispatch_flags
)
1083 cs
= intel_ring_begin(req
, 2);
1087 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| (dispatch_flags
&
1088 I915_DISPATCH_SECURE
? 0 : MI_BATCH_NON_SECURE_I965
);
1090 intel_ring_advance(req
, cs
);
1095 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1096 #define I830_BATCH_LIMIT (256*1024)
1097 #define I830_TLB_ENTRIES (2)
1098 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1100 i830_emit_bb_start(struct drm_i915_gem_request
*req
,
1101 u64 offset
, u32 len
,
1102 unsigned int dispatch_flags
)
1104 u32
*cs
, cs_offset
= i915_ggtt_offset(req
->engine
->scratch
);
1106 cs
= intel_ring_begin(req
, 6);
1110 /* Evict the invalid PTE TLBs */
1111 *cs
++ = COLOR_BLT_CMD
| BLT_WRITE_RGBA
;
1112 *cs
++ = BLT_DEPTH_32
| BLT_ROP_COLOR_COPY
| 4096;
1113 *cs
++ = I830_TLB_ENTRIES
<< 16 | 4; /* load each page */
1117 intel_ring_advance(req
, cs
);
1119 if ((dispatch_flags
& I915_DISPATCH_PINNED
) == 0) {
1120 if (len
> I830_BATCH_LIMIT
)
1123 cs
= intel_ring_begin(req
, 6 + 2);
1127 /* Blit the batch (which has now all relocs applied) to the
1128 * stable batch scratch bo area (so that the CS never
1129 * stumbles over its tlb invalidation bug) ...
1131 *cs
++ = SRC_COPY_BLT_CMD
| BLT_WRITE_RGBA
;
1132 *cs
++ = BLT_DEPTH_32
| BLT_ROP_SRC_COPY
| 4096;
1133 *cs
++ = DIV_ROUND_UP(len
, 4096) << 16 | 4096;
1140 intel_ring_advance(req
, cs
);
1142 /* ... and execute it. */
1146 cs
= intel_ring_begin(req
, 2);
1150 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1151 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1152 MI_BATCH_NON_SECURE
);
1153 intel_ring_advance(req
, cs
);
1159 i915_emit_bb_start(struct drm_i915_gem_request
*req
,
1160 u64 offset
, u32 len
,
1161 unsigned int dispatch_flags
)
1165 cs
= intel_ring_begin(req
, 2);
1169 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1170 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1171 MI_BATCH_NON_SECURE
);
1172 intel_ring_advance(req
, cs
);
1177 static void cleanup_phys_status_page(struct intel_engine_cs
*engine
)
1179 struct drm_i915_private
*dev_priv
= engine
->i915
;
1181 if (!dev_priv
->status_page_dmah
)
1184 drm_pci_free(&dev_priv
->drm
, dev_priv
->status_page_dmah
);
1185 engine
->status_page
.page_addr
= NULL
;
1188 static void cleanup_status_page(struct intel_engine_cs
*engine
)
1190 struct i915_vma
*vma
;
1191 struct drm_i915_gem_object
*obj
;
1193 vma
= fetch_and_zero(&engine
->status_page
.vma
);
1199 i915_vma_unpin(vma
);
1200 i915_vma_close(vma
);
1202 i915_gem_object_unpin_map(obj
);
1203 __i915_gem_object_release_unless_active(obj
);
1206 static int init_status_page(struct intel_engine_cs
*engine
)
1208 struct drm_i915_gem_object
*obj
;
1209 struct i915_vma
*vma
;
1214 obj
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
1216 DRM_ERROR("Failed to allocate status page\n");
1217 return PTR_ERR(obj
);
1220 ret
= i915_gem_object_set_cache_level(obj
, I915_CACHE_LLC
);
1224 vma
= i915_vma_instance(obj
, &engine
->i915
->ggtt
.base
, NULL
);
1231 if (!HAS_LLC(engine
->i915
))
1232 /* On g33, we cannot place HWS above 256MiB, so
1233 * restrict its pinning to the low mappable arena.
1234 * Though this restriction is not documented for
1235 * gen4, gen5, or byt, they also behave similarly
1236 * and hang if the HWS is placed at the top of the
1237 * GTT. To generalise, it appears that all !llc
1238 * platforms have issues with us placing the HWS
1239 * above the mappable region (even though we never
1242 flags
|= PIN_MAPPABLE
;
1243 ret
= i915_vma_pin(vma
, 0, 4096, flags
);
1247 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1248 if (IS_ERR(vaddr
)) {
1249 ret
= PTR_ERR(vaddr
);
1253 engine
->status_page
.vma
= vma
;
1254 engine
->status_page
.ggtt_offset
= i915_ggtt_offset(vma
);
1255 engine
->status_page
.page_addr
= memset(vaddr
, 0, PAGE_SIZE
);
1257 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1258 engine
->name
, i915_ggtt_offset(vma
));
1262 i915_vma_unpin(vma
);
1264 i915_gem_object_put(obj
);
1268 static int init_phys_status_page(struct intel_engine_cs
*engine
)
1270 struct drm_i915_private
*dev_priv
= engine
->i915
;
1272 GEM_BUG_ON(engine
->id
!= RCS
);
1274 dev_priv
->status_page_dmah
=
1275 drm_pci_alloc(&dev_priv
->drm
, PAGE_SIZE
, PAGE_SIZE
);
1276 if (!dev_priv
->status_page_dmah
)
1279 engine
->status_page
.page_addr
= dev_priv
->status_page_dmah
->vaddr
;
1280 memset(engine
->status_page
.page_addr
, 0, PAGE_SIZE
);
1285 int intel_ring_pin(struct intel_ring
*ring
,
1286 struct drm_i915_private
*i915
,
1287 unsigned int offset_bias
)
1289 enum i915_map_type map
= HAS_LLC(i915
) ? I915_MAP_WB
: I915_MAP_WC
;
1290 struct i915_vma
*vma
= ring
->vma
;
1295 GEM_BUG_ON(ring
->vaddr
);
1300 flags
|= PIN_OFFSET_BIAS
| offset_bias
;
1301 if (vma
->obj
->stolen
)
1302 flags
|= PIN_MAPPABLE
;
1304 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1305 if (flags
& PIN_MAPPABLE
|| map
== I915_MAP_WC
)
1306 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1308 ret
= i915_gem_object_set_to_cpu_domain(vma
->obj
, true);
1313 ret
= i915_vma_pin(vma
, 0, PAGE_SIZE
, flags
);
1317 if (i915_vma_is_map_and_fenceable(vma
))
1318 addr
= (void __force
*)i915_vma_pin_iomap(vma
);
1320 addr
= i915_gem_object_pin_map(vma
->obj
, map
);
1328 i915_vma_unpin(vma
);
1329 return PTR_ERR(addr
);
1332 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
)
1334 GEM_BUG_ON(!list_empty(&ring
->request_list
));
1338 intel_ring_update_space(ring
);
1341 void intel_ring_unpin(struct intel_ring
*ring
)
1343 GEM_BUG_ON(!ring
->vma
);
1344 GEM_BUG_ON(!ring
->vaddr
);
1346 /* Discard any unused bytes beyond that submitted to hw. */
1347 intel_ring_reset(ring
, ring
->tail
);
1349 if (i915_vma_is_map_and_fenceable(ring
->vma
))
1350 i915_vma_unpin_iomap(ring
->vma
);
1352 i915_gem_object_unpin_map(ring
->vma
->obj
);
1355 i915_vma_unpin(ring
->vma
);
1358 static struct i915_vma
*
1359 intel_ring_create_vma(struct drm_i915_private
*dev_priv
, int size
)
1361 struct drm_i915_gem_object
*obj
;
1362 struct i915_vma
*vma
;
1364 obj
= i915_gem_object_create_stolen(dev_priv
, size
);
1366 obj
= i915_gem_object_create_internal(dev_priv
, size
);
1368 return ERR_CAST(obj
);
1370 /* mark ring buffers as read-only from GPU side by default */
1373 vma
= i915_vma_instance(obj
, &dev_priv
->ggtt
.base
, NULL
);
1380 i915_gem_object_put(obj
);
1385 intel_engine_create_ring(struct intel_engine_cs
*engine
, int size
)
1387 struct intel_ring
*ring
;
1388 struct i915_vma
*vma
;
1390 GEM_BUG_ON(!is_power_of_2(size
));
1391 GEM_BUG_ON(RING_CTL_SIZE(size
) & ~RING_NR_PAGES
);
1393 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1395 return ERR_PTR(-ENOMEM
);
1397 INIT_LIST_HEAD(&ring
->request_list
);
1400 /* Workaround an erratum on the i830 which causes a hang if
1401 * the TAIL pointer points to within the last 2 cachelines
1404 ring
->effective_size
= size
;
1405 if (IS_I830(engine
->i915
) || IS_I845G(engine
->i915
))
1406 ring
->effective_size
-= 2 * CACHELINE_BYTES
;
1408 intel_ring_update_space(ring
);
1410 vma
= intel_ring_create_vma(engine
->i915
, size
);
1413 return ERR_CAST(vma
);
1421 intel_ring_free(struct intel_ring
*ring
)
1423 struct drm_i915_gem_object
*obj
= ring
->vma
->obj
;
1425 i915_vma_close(ring
->vma
);
1426 __i915_gem_object_release_unless_active(obj
);
1431 static int context_pin(struct i915_gem_context
*ctx
)
1433 struct i915_vma
*vma
= ctx
->engine
[RCS
].state
;
1436 /* Clear this page out of any CPU caches for coherent swap-in/out.
1437 * We only want to do this on the first bind so that we do not stall
1438 * on an active context (which by nature is already on the GPU).
1440 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1441 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, false);
1446 return i915_vma_pin(vma
, 0, I915_GTT_MIN_ALIGNMENT
,
1447 PIN_GLOBAL
| PIN_HIGH
);
1450 static struct i915_vma
*
1451 alloc_context_vma(struct intel_engine_cs
*engine
)
1453 struct drm_i915_private
*i915
= engine
->i915
;
1454 struct drm_i915_gem_object
*obj
;
1455 struct i915_vma
*vma
;
1457 obj
= i915_gem_object_create(i915
, engine
->context_size
);
1459 return ERR_CAST(obj
);
1462 * Try to make the context utilize L3 as well as LLC.
1464 * On VLV we don't have L3 controls in the PTEs so we
1465 * shouldn't touch the cache level, especially as that
1466 * would make the object snooped which might have a
1467 * negative performance impact.
1469 * Snooping is required on non-llc platforms in execlist
1470 * mode, but since all GGTT accesses use PAT entry 0 we
1471 * get snooping anyway regardless of cache_level.
1473 * This is only applicable for Ivy Bridge devices since
1474 * later platforms don't have L3 control bits in the PTE.
1476 if (IS_IVYBRIDGE(i915
)) {
1477 /* Ignore any error, regard it as a simple optimisation */
1478 i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
1481 vma
= i915_vma_instance(obj
, &i915
->ggtt
.base
, NULL
);
1483 i915_gem_object_put(obj
);
1488 static struct intel_ring
*
1489 intel_ring_context_pin(struct intel_engine_cs
*engine
,
1490 struct i915_gem_context
*ctx
)
1492 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1495 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1497 if (likely(ce
->pin_count
++))
1499 GEM_BUG_ON(!ce
->pin_count
); /* no overflow please! */
1501 if (!ce
->state
&& engine
->context_size
) {
1502 struct i915_vma
*vma
;
1504 vma
= alloc_context_vma(engine
);
1514 ret
= context_pin(ctx
);
1518 ce
->state
->obj
->mm
.dirty
= true;
1521 /* The kernel context is only used as a placeholder for flushing the
1522 * active context. It is never used for submitting user rendering and
1523 * as such never requires the golden render context, and so we can skip
1524 * emitting it when we switch to the kernel context. This is required
1525 * as during eviction we cannot allocate and pin the renderstate in
1526 * order to initialise the context.
1528 if (i915_gem_context_is_kernel(ctx
))
1529 ce
->initialised
= true;
1531 i915_gem_context_get(ctx
);
1534 /* One ringbuffer to rule them all */
1535 return engine
->buffer
;
1539 return ERR_PTR(ret
);
1542 static void intel_ring_context_unpin(struct intel_engine_cs
*engine
,
1543 struct i915_gem_context
*ctx
)
1545 struct intel_context
*ce
= &ctx
->engine
[engine
->id
];
1547 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1548 GEM_BUG_ON(ce
->pin_count
== 0);
1550 if (--ce
->pin_count
)
1554 i915_vma_unpin(ce
->state
);
1556 i915_gem_context_put(ctx
);
1559 static int intel_init_ring_buffer(struct intel_engine_cs
*engine
)
1561 struct intel_ring
*ring
;
1564 intel_engine_setup_common(engine
);
1566 err
= intel_engine_init_common(engine
);
1570 if (HWS_NEEDS_PHYSICAL(engine
->i915
))
1571 err
= init_phys_status_page(engine
);
1573 err
= init_status_page(engine
);
1577 ring
= intel_engine_create_ring(engine
, 32 * PAGE_SIZE
);
1579 err
= PTR_ERR(ring
);
1583 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1584 err
= intel_ring_pin(ring
, engine
->i915
, I915_GTT_PAGE_SIZE
);
1588 GEM_BUG_ON(engine
->buffer
);
1589 engine
->buffer
= ring
;
1594 intel_ring_free(ring
);
1596 if (HWS_NEEDS_PHYSICAL(engine
->i915
))
1597 cleanup_phys_status_page(engine
);
1599 cleanup_status_page(engine
);
1601 intel_engine_cleanup_common(engine
);
1605 void intel_engine_cleanup(struct intel_engine_cs
*engine
)
1607 struct drm_i915_private
*dev_priv
= engine
->i915
;
1609 WARN_ON(INTEL_GEN(dev_priv
) > 2 &&
1610 (I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
1612 intel_ring_unpin(engine
->buffer
);
1613 intel_ring_free(engine
->buffer
);
1615 if (engine
->cleanup
)
1616 engine
->cleanup(engine
);
1618 if (HWS_NEEDS_PHYSICAL(dev_priv
))
1619 cleanup_phys_status_page(engine
);
1621 cleanup_status_page(engine
);
1623 intel_engine_cleanup_common(engine
);
1625 dev_priv
->engine
[engine
->id
] = NULL
;
1629 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
)
1631 struct intel_engine_cs
*engine
;
1632 enum intel_engine_id id
;
1634 /* Restart from the beginning of the rings for convenience */
1635 for_each_engine(engine
, dev_priv
, id
)
1636 intel_ring_reset(engine
->buffer
, 0);
1639 static int ring_request_alloc(struct drm_i915_gem_request
*request
)
1643 GEM_BUG_ON(!request
->ctx
->engine
[request
->engine
->id
].pin_count
);
1645 /* Flush enough space to reduce the likelihood of waiting after
1646 * we start building the request - in which case we will just
1647 * have to repeat work.
1649 request
->reserved_space
+= LEGACY_REQUEST_SIZE
;
1651 cs
= intel_ring_begin(request
, 0);
1655 request
->reserved_space
-= LEGACY_REQUEST_SIZE
;
1659 static noinline
int wait_for_space(struct drm_i915_gem_request
*req
,
1662 struct intel_ring
*ring
= req
->ring
;
1663 struct drm_i915_gem_request
*target
;
1666 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
1668 if (intel_ring_update_space(ring
) >= bytes
)
1672 * Space is reserved in the ringbuffer for finalising the request,
1673 * as that cannot be allowed to fail. During request finalisation,
1674 * reserved_space is set to 0 to stop the overallocation and the
1675 * assumption is that then we never need to wait (which has the
1676 * risk of failing with EINTR).
1678 * See also i915_gem_request_alloc() and i915_add_request().
1680 GEM_BUG_ON(!req
->reserved_space
);
1682 list_for_each_entry(target
, &ring
->request_list
, ring_link
) {
1683 /* Would completion of this request free enough space? */
1684 if (bytes
<= __intel_ring_space(target
->postfix
,
1685 ring
->emit
, ring
->size
))
1689 if (WARN_ON(&target
->ring_link
== &ring
->request_list
))
1692 timeout
= i915_wait_request(target
,
1693 I915_WAIT_INTERRUPTIBLE
| I915_WAIT_LOCKED
,
1694 MAX_SCHEDULE_TIMEOUT
);
1698 i915_gem_request_retire_upto(target
);
1700 intel_ring_update_space(ring
);
1701 GEM_BUG_ON(ring
->space
< bytes
);
1705 u32
*intel_ring_begin(struct drm_i915_gem_request
*req
,
1706 unsigned int num_dwords
)
1708 struct intel_ring
*ring
= req
->ring
;
1709 const unsigned int remain_usable
= ring
->effective_size
- ring
->emit
;
1710 const unsigned int bytes
= num_dwords
* sizeof(u32
);
1711 unsigned int need_wrap
= 0;
1712 unsigned int total_bytes
;
1715 /* Packets must be qword aligned. */
1716 GEM_BUG_ON(num_dwords
& 1);
1718 total_bytes
= bytes
+ req
->reserved_space
;
1719 GEM_BUG_ON(total_bytes
> ring
->effective_size
);
1721 if (unlikely(total_bytes
> remain_usable
)) {
1722 const int remain_actual
= ring
->size
- ring
->emit
;
1724 if (bytes
> remain_usable
) {
1726 * Not enough space for the basic request. So need to
1727 * flush out the remainder and then wait for
1730 total_bytes
+= remain_actual
;
1731 need_wrap
= remain_actual
| 1;
1734 * The base request will fit but the reserved space
1735 * falls off the end. So we don't need an immediate
1736 * wrap and only need to effectively wait for the
1737 * reserved size from the start of ringbuffer.
1739 total_bytes
= req
->reserved_space
+ remain_actual
;
1743 if (unlikely(total_bytes
> ring
->space
)) {
1744 int ret
= wait_for_space(req
, total_bytes
);
1746 return ERR_PTR(ret
);
1749 if (unlikely(need_wrap
)) {
1751 GEM_BUG_ON(need_wrap
> ring
->space
);
1752 GEM_BUG_ON(ring
->emit
+ need_wrap
> ring
->size
);
1754 /* Fill the tail with MI_NOOP */
1755 memset(ring
->vaddr
+ ring
->emit
, 0, need_wrap
);
1757 ring
->space
-= need_wrap
;
1760 GEM_BUG_ON(ring
->emit
> ring
->size
- bytes
);
1761 GEM_BUG_ON(ring
->space
< bytes
);
1762 cs
= ring
->vaddr
+ ring
->emit
;
1763 GEM_DEBUG_EXEC(memset(cs
, POISON_INUSE
, bytes
));
1764 ring
->emit
+= bytes
;
1765 ring
->space
-= bytes
;
1770 /* Align the ring tail to a cacheline boundary */
1771 int intel_ring_cacheline_align(struct drm_i915_gem_request
*req
)
1774 (req
->ring
->emit
& (CACHELINE_BYTES
- 1)) / sizeof(uint32_t);
1777 if (num_dwords
== 0)
1780 num_dwords
= CACHELINE_BYTES
/ sizeof(uint32_t) - num_dwords
;
1781 cs
= intel_ring_begin(req
, num_dwords
);
1785 while (num_dwords
--)
1788 intel_ring_advance(req
, cs
);
1793 static void gen6_bsd_submit_request(struct drm_i915_gem_request
*request
)
1795 struct drm_i915_private
*dev_priv
= request
->i915
;
1797 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1799 /* Every tail move must follow the sequence below */
1801 /* Disable notification that the ring is IDLE. The GT
1802 * will then assume that it is busy and bring it out of rc6.
1804 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
1805 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
1807 /* Clear the context id. Here be magic! */
1808 I915_WRITE64_FW(GEN6_BSD_RNCID
, 0x0);
1810 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1811 if (__intel_wait_for_register_fw(dev_priv
,
1812 GEN6_BSD_SLEEP_PSMI_CONTROL
,
1813 GEN6_BSD_SLEEP_INDICATOR
,
1816 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1818 /* Now that the ring is fully powered up, update the tail */
1819 i9xx_submit_request(request
);
1821 /* Let the ring send IDLE messages to the GT again,
1822 * and so let it sleep to conserve power when idle.
1824 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
1825 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
1827 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1830 static int gen6_bsd_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
1834 cs
= intel_ring_begin(req
, 4);
1839 if (INTEL_GEN(req
->i915
) >= 8)
1842 /* We always require a command barrier so that subsequent
1843 * commands, such as breadcrumb interrupts, are strictly ordered
1844 * wrt the contents of the write cache being flushed to memory
1845 * (and thus being coherent from the CPU).
1847 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
1850 * Bspec vol 1c.5 - video engine command streamer:
1851 * "If ENABLED, all TLBs will be invalidated once the flush
1852 * operation is complete. This bit is only valid when the
1853 * Post-Sync Operation field is a value of 1h or 3h."
1855 if (mode
& EMIT_INVALIDATE
)
1856 cmd
|= MI_INVALIDATE_TLB
| MI_INVALIDATE_BSD
;
1859 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
1860 if (INTEL_GEN(req
->i915
) >= 8) {
1861 *cs
++ = 0; /* upper addr */
1862 *cs
++ = 0; /* value */
1867 intel_ring_advance(req
, cs
);
1872 gen8_emit_bb_start(struct drm_i915_gem_request
*req
,
1873 u64 offset
, u32 len
,
1874 unsigned int dispatch_flags
)
1876 bool ppgtt
= USES_PPGTT(req
->i915
) &&
1877 !(dispatch_flags
& I915_DISPATCH_SECURE
);
1880 cs
= intel_ring_begin(req
, 4);
1884 /* FIXME(BDW): Address space and security selectors. */
1885 *cs
++ = MI_BATCH_BUFFER_START_GEN8
| (ppgtt
<< 8) | (dispatch_flags
&
1886 I915_DISPATCH_RS
? MI_BATCH_RESOURCE_STREAMER
: 0);
1887 *cs
++ = lower_32_bits(offset
);
1888 *cs
++ = upper_32_bits(offset
);
1890 intel_ring_advance(req
, cs
);
1896 hsw_emit_bb_start(struct drm_i915_gem_request
*req
,
1897 u64 offset
, u32 len
,
1898 unsigned int dispatch_flags
)
1902 cs
= intel_ring_begin(req
, 2);
1906 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
1907 0 : MI_BATCH_PPGTT_HSW
| MI_BATCH_NON_SECURE_HSW
) |
1908 (dispatch_flags
& I915_DISPATCH_RS
?
1909 MI_BATCH_RESOURCE_STREAMER
: 0);
1910 /* bit0-7 is the length on GEN6+ */
1912 intel_ring_advance(req
, cs
);
1918 gen6_emit_bb_start(struct drm_i915_gem_request
*req
,
1919 u64 offset
, u32 len
,
1920 unsigned int dispatch_flags
)
1924 cs
= intel_ring_begin(req
, 2);
1928 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
1929 0 : MI_BATCH_NON_SECURE_I965
);
1930 /* bit0-7 is the length on GEN6+ */
1932 intel_ring_advance(req
, cs
);
1937 /* Blitter support (SandyBridge+) */
1939 static int gen6_ring_flush(struct drm_i915_gem_request
*req
, u32 mode
)
1943 cs
= intel_ring_begin(req
, 4);
1948 if (INTEL_GEN(req
->i915
) >= 8)
1951 /* We always require a command barrier so that subsequent
1952 * commands, such as breadcrumb interrupts, are strictly ordered
1953 * wrt the contents of the write cache being flushed to memory
1954 * (and thus being coherent from the CPU).
1956 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
1959 * Bspec vol 1c.3 - blitter engine command streamer:
1960 * "If ENABLED, all TLBs will be invalidated once the flush
1961 * operation is complete. This bit is only valid when the
1962 * Post-Sync Operation field is a value of 1h or 3h."
1964 if (mode
& EMIT_INVALIDATE
)
1965 cmd
|= MI_INVALIDATE_TLB
;
1967 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
1968 if (INTEL_GEN(req
->i915
) >= 8) {
1969 *cs
++ = 0; /* upper addr */
1970 *cs
++ = 0; /* value */
1975 intel_ring_advance(req
, cs
);
1980 static void intel_ring_init_semaphores(struct drm_i915_private
*dev_priv
,
1981 struct intel_engine_cs
*engine
)
1983 struct drm_i915_gem_object
*obj
;
1986 if (!i915
.semaphores
)
1989 if (INTEL_GEN(dev_priv
) >= 8 && !dev_priv
->semaphore
) {
1990 struct i915_vma
*vma
;
1992 obj
= i915_gem_object_create(dev_priv
, PAGE_SIZE
);
1996 vma
= i915_vma_instance(obj
, &dev_priv
->ggtt
.base
, NULL
);
2000 ret
= i915_gem_object_set_to_gtt_domain(obj
, false);
2004 ret
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
2008 dev_priv
->semaphore
= vma
;
2011 if (INTEL_GEN(dev_priv
) >= 8) {
2012 u32 offset
= i915_ggtt_offset(dev_priv
->semaphore
);
2014 engine
->semaphore
.sync_to
= gen8_ring_sync_to
;
2015 engine
->semaphore
.signal
= gen8_xcs_signal
;
2017 for (i
= 0; i
< I915_NUM_ENGINES
; i
++) {
2020 if (i
!= engine
->id
)
2021 ring_offset
= offset
+ GEN8_SEMAPHORE_OFFSET(engine
->id
, i
);
2023 ring_offset
= MI_SEMAPHORE_SYNC_INVALID
;
2025 engine
->semaphore
.signal_ggtt
[i
] = ring_offset
;
2027 } else if (INTEL_GEN(dev_priv
) >= 6) {
2028 engine
->semaphore
.sync_to
= gen6_ring_sync_to
;
2029 engine
->semaphore
.signal
= gen6_signal
;
2032 * The current semaphore is only applied on pre-gen8
2033 * platform. And there is no VCS2 ring on the pre-gen8
2034 * platform. So the semaphore between RCS and VCS2 is
2035 * initialized as INVALID. Gen8 will initialize the
2036 * sema between VCS2 and RCS later.
2038 for (i
= 0; i
< GEN6_NUM_SEMAPHORES
; i
++) {
2039 static const struct {
2041 i915_reg_t mbox_reg
;
2042 } sem_data
[GEN6_NUM_SEMAPHORES
][GEN6_NUM_SEMAPHORES
] = {
2044 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RV
, .mbox_reg
= GEN6_VRSYNC
},
2045 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RB
, .mbox_reg
= GEN6_BRSYNC
},
2046 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RVE
, .mbox_reg
= GEN6_VERSYNC
},
2049 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VR
, .mbox_reg
= GEN6_RVSYNC
},
2050 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VB
, .mbox_reg
= GEN6_BVSYNC
},
2051 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VVE
, .mbox_reg
= GEN6_VEVSYNC
},
2054 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BR
, .mbox_reg
= GEN6_RBSYNC
},
2055 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BV
, .mbox_reg
= GEN6_VBSYNC
},
2056 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BVE
, .mbox_reg
= GEN6_VEBSYNC
},
2059 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VER
, .mbox_reg
= GEN6_RVESYNC
},
2060 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEV
, .mbox_reg
= GEN6_VVESYNC
},
2061 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEB
, .mbox_reg
= GEN6_BVESYNC
},
2065 i915_reg_t mbox_reg
;
2067 if (i
== engine
->hw_id
) {
2068 wait_mbox
= MI_SEMAPHORE_SYNC_INVALID
;
2069 mbox_reg
= GEN6_NOSYNC
;
2071 wait_mbox
= sem_data
[engine
->hw_id
][i
].wait_mbox
;
2072 mbox_reg
= sem_data
[engine
->hw_id
][i
].mbox_reg
;
2075 engine
->semaphore
.mbox
.wait
[i
] = wait_mbox
;
2076 engine
->semaphore
.mbox
.signal
[i
] = mbox_reg
;
2083 i915_gem_object_put(obj
);
2085 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2086 i915
.semaphores
= 0;
2089 static void intel_ring_init_irq(struct drm_i915_private
*dev_priv
,
2090 struct intel_engine_cs
*engine
)
2092 engine
->irq_enable_mask
= GT_RENDER_USER_INTERRUPT
<< engine
->irq_shift
;
2094 if (INTEL_GEN(dev_priv
) >= 8) {
2095 engine
->irq_enable
= gen8_irq_enable
;
2096 engine
->irq_disable
= gen8_irq_disable
;
2097 engine
->irq_seqno_barrier
= gen6_seqno_barrier
;
2098 } else if (INTEL_GEN(dev_priv
) >= 6) {
2099 engine
->irq_enable
= gen6_irq_enable
;
2100 engine
->irq_disable
= gen6_irq_disable
;
2101 engine
->irq_seqno_barrier
= gen6_seqno_barrier
;
2102 } else if (INTEL_GEN(dev_priv
) >= 5) {
2103 engine
->irq_enable
= gen5_irq_enable
;
2104 engine
->irq_disable
= gen5_irq_disable
;
2105 engine
->irq_seqno_barrier
= gen5_seqno_barrier
;
2106 } else if (INTEL_GEN(dev_priv
) >= 3) {
2107 engine
->irq_enable
= i9xx_irq_enable
;
2108 engine
->irq_disable
= i9xx_irq_disable
;
2110 engine
->irq_enable
= i8xx_irq_enable
;
2111 engine
->irq_disable
= i8xx_irq_disable
;
2115 static void i9xx_set_default_submission(struct intel_engine_cs
*engine
)
2117 engine
->submit_request
= i9xx_submit_request
;
2120 static void gen6_bsd_set_default_submission(struct intel_engine_cs
*engine
)
2122 engine
->submit_request
= gen6_bsd_submit_request
;
2125 static void intel_ring_default_vfuncs(struct drm_i915_private
*dev_priv
,
2126 struct intel_engine_cs
*engine
)
2128 intel_ring_init_irq(dev_priv
, engine
);
2129 intel_ring_init_semaphores(dev_priv
, engine
);
2131 engine
->init_hw
= init_ring_common
;
2132 engine
->reset_hw
= reset_ring_common
;
2134 engine
->context_pin
= intel_ring_context_pin
;
2135 engine
->context_unpin
= intel_ring_context_unpin
;
2137 engine
->request_alloc
= ring_request_alloc
;
2139 engine
->emit_breadcrumb
= i9xx_emit_breadcrumb
;
2140 engine
->emit_breadcrumb_sz
= i9xx_emit_breadcrumb_sz
;
2141 if (i915
.semaphores
) {
2144 engine
->emit_breadcrumb
= gen6_sema_emit_breadcrumb
;
2146 num_rings
= INTEL_INFO(dev_priv
)->num_rings
- 1;
2147 if (INTEL_GEN(dev_priv
) >= 8) {
2148 engine
->emit_breadcrumb_sz
+= num_rings
* 6;
2150 engine
->emit_breadcrumb_sz
+= num_rings
* 3;
2152 engine
->emit_breadcrumb_sz
++;
2156 engine
->set_default_submission
= i9xx_set_default_submission
;
2158 if (INTEL_GEN(dev_priv
) >= 8)
2159 engine
->emit_bb_start
= gen8_emit_bb_start
;
2160 else if (INTEL_GEN(dev_priv
) >= 6)
2161 engine
->emit_bb_start
= gen6_emit_bb_start
;
2162 else if (INTEL_GEN(dev_priv
) >= 4)
2163 engine
->emit_bb_start
= i965_emit_bb_start
;
2164 else if (IS_I830(dev_priv
) || IS_I845G(dev_priv
))
2165 engine
->emit_bb_start
= i830_emit_bb_start
;
2167 engine
->emit_bb_start
= i915_emit_bb_start
;
2170 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
)
2172 struct drm_i915_private
*dev_priv
= engine
->i915
;
2175 intel_ring_default_vfuncs(dev_priv
, engine
);
2177 if (HAS_L3_DPF(dev_priv
))
2178 engine
->irq_keep_mask
= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2180 if (INTEL_GEN(dev_priv
) >= 8) {
2181 engine
->init_context
= intel_rcs_ctx_init
;
2182 engine
->emit_breadcrumb
= gen8_render_emit_breadcrumb
;
2183 engine
->emit_breadcrumb_sz
= gen8_render_emit_breadcrumb_sz
;
2184 engine
->emit_flush
= gen8_render_ring_flush
;
2185 if (i915
.semaphores
) {
2188 engine
->semaphore
.signal
= gen8_rcs_signal
;
2190 num_rings
= INTEL_INFO(dev_priv
)->num_rings
- 1;
2191 engine
->emit_breadcrumb_sz
+= num_rings
* 8;
2193 } else if (INTEL_GEN(dev_priv
) >= 6) {
2194 engine
->init_context
= intel_rcs_ctx_init
;
2195 engine
->emit_flush
= gen7_render_ring_flush
;
2196 if (IS_GEN6(dev_priv
))
2197 engine
->emit_flush
= gen6_render_ring_flush
;
2198 } else if (IS_GEN5(dev_priv
)) {
2199 engine
->emit_flush
= gen4_render_ring_flush
;
2201 if (INTEL_GEN(dev_priv
) < 4)
2202 engine
->emit_flush
= gen2_render_ring_flush
;
2204 engine
->emit_flush
= gen4_render_ring_flush
;
2205 engine
->irq_enable_mask
= I915_USER_INTERRUPT
;
2208 if (IS_HASWELL(dev_priv
))
2209 engine
->emit_bb_start
= hsw_emit_bb_start
;
2211 engine
->init_hw
= init_render_ring
;
2212 engine
->cleanup
= render_ring_cleanup
;
2214 ret
= intel_init_ring_buffer(engine
);
2218 if (INTEL_GEN(dev_priv
) >= 6) {
2219 ret
= intel_engine_create_scratch(engine
, PAGE_SIZE
);
2222 } else if (HAS_BROKEN_CS_TLB(dev_priv
)) {
2223 ret
= intel_engine_create_scratch(engine
, I830_WA_SIZE
);
2231 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
)
2233 struct drm_i915_private
*dev_priv
= engine
->i915
;
2235 intel_ring_default_vfuncs(dev_priv
, engine
);
2237 if (INTEL_GEN(dev_priv
) >= 6) {
2238 /* gen6 bsd needs a special wa for tail updates */
2239 if (IS_GEN6(dev_priv
))
2240 engine
->set_default_submission
= gen6_bsd_set_default_submission
;
2241 engine
->emit_flush
= gen6_bsd_ring_flush
;
2242 if (INTEL_GEN(dev_priv
) < 8)
2243 engine
->irq_enable_mask
= GT_BSD_USER_INTERRUPT
;
2245 engine
->mmio_base
= BSD_RING_BASE
;
2246 engine
->emit_flush
= bsd_ring_flush
;
2247 if (IS_GEN5(dev_priv
))
2248 engine
->irq_enable_mask
= ILK_BSD_USER_INTERRUPT
;
2250 engine
->irq_enable_mask
= I915_BSD_USER_INTERRUPT
;
2253 return intel_init_ring_buffer(engine
);
2256 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
)
2258 struct drm_i915_private
*dev_priv
= engine
->i915
;
2260 intel_ring_default_vfuncs(dev_priv
, engine
);
2262 engine
->emit_flush
= gen6_ring_flush
;
2263 if (INTEL_GEN(dev_priv
) < 8)
2264 engine
->irq_enable_mask
= GT_BLT_USER_INTERRUPT
;
2266 return intel_init_ring_buffer(engine
);
2269 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
)
2271 struct drm_i915_private
*dev_priv
= engine
->i915
;
2273 intel_ring_default_vfuncs(dev_priv
, engine
);
2275 engine
->emit_flush
= gen6_ring_flush
;
2277 if (INTEL_GEN(dev_priv
) < 8) {
2278 engine
->irq_enable_mask
= PM_VEBOX_USER_INTERRUPT
;
2279 engine
->irq_enable
= hsw_vebox_irq_enable
;
2280 engine
->irq_disable
= hsw_vebox_irq_disable
;
2283 return intel_init_ring_buffer(engine
);