2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "intel_ringbuffer.h"
27 #include "intel_lrc.h"
29 /* Haswell does have the CXT_SIZE register however it does not appear to be
30 * valid. Now, docs explain in dwords what is in the context object. The full
31 * size is 70720 bytes, however, the power context and execlist context will
32 * never be saved (power context is stored elsewhere, and execlists don't work
33 * on HSW) - so the final size, including the extra state required for the
34 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
36 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
37 /* Same as Haswell, but 72064 bytes now. */
38 #define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
40 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
41 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
43 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
45 struct engine_class_info
{
47 int (*init_legacy
)(struct intel_engine_cs
*engine
);
48 int (*init_execlists
)(struct intel_engine_cs
*engine
);
51 static const struct engine_class_info intel_engine_classes
[] = {
54 .init_execlists
= logical_render_ring_init
,
55 .init_legacy
= intel_init_render_ring_buffer
,
57 [COPY_ENGINE_CLASS
] = {
59 .init_execlists
= logical_xcs_ring_init
,
60 .init_legacy
= intel_init_blt_ring_buffer
,
62 [VIDEO_DECODE_CLASS
] = {
64 .init_execlists
= logical_xcs_ring_init
,
65 .init_legacy
= intel_init_bsd_ring_buffer
,
67 [VIDEO_ENHANCEMENT_CLASS
] = {
69 .init_execlists
= logical_xcs_ring_init
,
70 .init_legacy
= intel_init_vebox_ring_buffer
,
83 static const struct engine_info intel_engines
[] = {
86 .uabi_id
= I915_EXEC_RENDER
,
87 .class = RENDER_CLASS
,
89 .mmio_base
= RENDER_RING_BASE
,
90 .irq_shift
= GEN8_RCS_IRQ_SHIFT
,
94 .uabi_id
= I915_EXEC_BLT
,
95 .class = COPY_ENGINE_CLASS
,
97 .mmio_base
= BLT_RING_BASE
,
98 .irq_shift
= GEN8_BCS_IRQ_SHIFT
,
102 .uabi_id
= I915_EXEC_BSD
,
103 .class = VIDEO_DECODE_CLASS
,
105 .mmio_base
= GEN6_BSD_RING_BASE
,
106 .irq_shift
= GEN8_VCS1_IRQ_SHIFT
,
110 .uabi_id
= I915_EXEC_BSD
,
111 .class = VIDEO_DECODE_CLASS
,
113 .mmio_base
= GEN8_BSD2_RING_BASE
,
114 .irq_shift
= GEN8_VCS2_IRQ_SHIFT
,
118 .uabi_id
= I915_EXEC_VEBOX
,
119 .class = VIDEO_ENHANCEMENT_CLASS
,
121 .mmio_base
= VEBOX_RING_BASE
,
122 .irq_shift
= GEN8_VECS_IRQ_SHIFT
,
127 * ___intel_engine_context_size() - return the size of the context for an engine
128 * @dev_priv: i915 device private
129 * @class: engine class
131 * Each engine class may require a different amount of space for a context
134 * Return: size (in bytes) of an engine class specific context image
136 * Note: this size includes the HWSP, which is part of the context image
137 * in LRC mode, but does not include the "shared data page" used with
138 * GuC submission. The caller should account for this if using the GuC.
141 __intel_engine_context_size(struct drm_i915_private
*dev_priv
, u8
class)
145 BUILD_BUG_ON(I915_GTT_PAGE_SIZE
!= PAGE_SIZE
);
149 switch (INTEL_GEN(dev_priv
)) {
151 MISSING_CASE(INTEL_GEN(dev_priv
));
153 return GEN9_LR_CONTEXT_RENDER_SIZE
;
155 return i915
.enable_execlists
?
156 GEN8_LR_CONTEXT_RENDER_SIZE
:
159 if (IS_HASWELL(dev_priv
))
160 return HSW_CXT_TOTAL_SIZE
;
162 cxt_size
= I915_READ(GEN7_CXT_SIZE
);
163 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size
) * 64,
166 cxt_size
= I915_READ(CXT_SIZE
);
167 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size
) * 64,
173 /* For the special day when i810 gets merged. */
180 case VIDEO_DECODE_CLASS
:
181 case VIDEO_ENHANCEMENT_CLASS
:
182 case COPY_ENGINE_CLASS
:
183 if (INTEL_GEN(dev_priv
) < 8)
185 return GEN8_LR_CONTEXT_OTHER_SIZE
;
190 intel_engine_setup(struct drm_i915_private
*dev_priv
,
191 enum intel_engine_id id
)
193 const struct engine_info
*info
= &intel_engines
[id
];
194 const struct engine_class_info
*class_info
;
195 struct intel_engine_cs
*engine
;
197 GEM_BUG_ON(info
->class >= ARRAY_SIZE(intel_engine_classes
));
198 class_info
= &intel_engine_classes
[info
->class];
200 GEM_BUG_ON(dev_priv
->engine
[id
]);
201 engine
= kzalloc(sizeof(*engine
), GFP_KERNEL
);
206 engine
->i915
= dev_priv
;
207 WARN_ON(snprintf(engine
->name
, sizeof(engine
->name
), "%s%u",
208 class_info
->name
, info
->instance
) >=
209 sizeof(engine
->name
));
210 engine
->uabi_id
= info
->uabi_id
;
211 engine
->hw_id
= engine
->guc_id
= info
->hw_id
;
212 engine
->mmio_base
= info
->mmio_base
;
213 engine
->irq_shift
= info
->irq_shift
;
214 engine
->class = info
->class;
215 engine
->instance
= info
->instance
;
217 engine
->context_size
= __intel_engine_context_size(dev_priv
,
219 if (WARN_ON(engine
->context_size
> BIT(20)))
220 engine
->context_size
= 0;
222 /* Nothing to do here, execute in order of dependencies */
223 engine
->schedule
= NULL
;
225 ATOMIC_INIT_NOTIFIER_HEAD(&engine
->context_status_notifier
);
227 dev_priv
->engine
[id
] = engine
;
232 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
233 * @dev_priv: i915 device private
235 * Return: non-zero if the initialization failed.
237 int intel_engines_init_mmio(struct drm_i915_private
*dev_priv
)
239 struct intel_device_info
*device_info
= mkwrite_device_info(dev_priv
);
240 const unsigned int ring_mask
= INTEL_INFO(dev_priv
)->ring_mask
;
241 struct intel_engine_cs
*engine
;
242 enum intel_engine_id id
;
243 unsigned int mask
= 0;
247 WARN_ON(ring_mask
== 0);
249 GENMASK(sizeof(mask
) * BITS_PER_BYTE
- 1, I915_NUM_ENGINES
));
251 for (i
= 0; i
< ARRAY_SIZE(intel_engines
); i
++) {
252 if (!HAS_ENGINE(dev_priv
, i
))
255 err
= intel_engine_setup(dev_priv
, i
);
259 mask
|= ENGINE_MASK(i
);
263 * Catch failures to update intel_engines table when the new engines
264 * are added to the driver by a warning and disabling the forgotten
267 if (WARN_ON(mask
!= ring_mask
))
268 device_info
->ring_mask
= mask
;
270 /* We always presume we have at least RCS available for later probing */
271 if (WARN_ON(!HAS_ENGINE(dev_priv
, RCS
))) {
276 device_info
->num_rings
= hweight32(mask
);
281 for_each_engine(engine
, dev_priv
, id
)
287 * intel_engines_init() - init the Engine Command Streamers
288 * @dev_priv: i915 device private
290 * Return: non-zero if the initialization failed.
292 int intel_engines_init(struct drm_i915_private
*dev_priv
)
294 struct intel_device_info
*device_info
= mkwrite_device_info(dev_priv
);
295 struct intel_engine_cs
*engine
;
296 enum intel_engine_id id
, err_id
;
297 unsigned int mask
= 0;
300 for_each_engine(engine
, dev_priv
, id
) {
301 const struct engine_class_info
*class_info
=
302 &intel_engine_classes
[engine
->class];
303 int (*init
)(struct intel_engine_cs
*engine
);
305 if (i915
.enable_execlists
)
306 init
= class_info
->init_execlists
;
308 init
= class_info
->init_legacy
;
311 dev_priv
->engine
[id
] = NULL
;
321 GEM_BUG_ON(!engine
->submit_request
);
322 mask
|= ENGINE_MASK(id
);
326 * Catch failures to update intel_engines table when the new engines
327 * are added to the driver by a warning and disabling the forgotten
330 if (WARN_ON(mask
!= INTEL_INFO(dev_priv
)->ring_mask
))
331 device_info
->ring_mask
= mask
;
333 device_info
->num_rings
= hweight32(mask
);
338 for_each_engine(engine
, dev_priv
, id
) {
342 dev_priv
->gt
.cleanup_engine(engine
);
347 void intel_engine_init_global_seqno(struct intel_engine_cs
*engine
, u32 seqno
)
349 struct drm_i915_private
*dev_priv
= engine
->i915
;
351 GEM_BUG_ON(!intel_engine_is_idle(engine
));
352 GEM_BUG_ON(i915_gem_active_isset(&engine
->timeline
->last_request
));
354 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
355 * so long as the semaphore value in the register/page is greater
356 * than the sync value), so whenever we reset the seqno,
357 * so long as we reset the tracking semaphore value to 0, it will
358 * always be before the next request's seqno. If we don't reset
359 * the semaphore value, then when the seqno moves backwards all
360 * future waits will complete instantly (causing rendering corruption).
362 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
)) {
363 I915_WRITE(RING_SYNC_0(engine
->mmio_base
), 0);
364 I915_WRITE(RING_SYNC_1(engine
->mmio_base
), 0);
365 if (HAS_VEBOX(dev_priv
))
366 I915_WRITE(RING_SYNC_2(engine
->mmio_base
), 0);
368 if (dev_priv
->semaphore
) {
369 struct page
*page
= i915_vma_first_page(dev_priv
->semaphore
);
372 /* Semaphores are in noncoherent memory, flush to be safe */
373 semaphores
= kmap_atomic(page
);
374 memset(semaphores
+ GEN8_SEMAPHORE_OFFSET(engine
->id
, 0),
375 0, I915_NUM_ENGINES
* gen8_semaphore_seqno_size
);
376 drm_clflush_virt_range(semaphores
+ GEN8_SEMAPHORE_OFFSET(engine
->id
, 0),
377 I915_NUM_ENGINES
* gen8_semaphore_seqno_size
);
378 kunmap_atomic(semaphores
);
381 intel_write_status_page(engine
, I915_GEM_HWS_INDEX
, seqno
);
382 clear_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
384 /* After manually advancing the seqno, fake the interrupt in case
385 * there are any waiters for that seqno.
387 intel_engine_wakeup(engine
);
389 GEM_BUG_ON(intel_engine_get_seqno(engine
) != seqno
);
392 static void intel_engine_init_timeline(struct intel_engine_cs
*engine
)
394 engine
->timeline
= &engine
->i915
->gt
.global_timeline
.engine
[engine
->id
];
398 * intel_engines_setup_common - setup engine state not requiring hw access
399 * @engine: Engine to setup.
401 * Initializes @engine@ structure members shared between legacy and execlists
402 * submission modes which do not require hardware access.
404 * Typically done early in the submission mode specific engine setup stage.
406 void intel_engine_setup_common(struct intel_engine_cs
*engine
)
408 engine
->execlist_queue
= RB_ROOT
;
409 engine
->execlist_first
= NULL
;
411 intel_engine_init_timeline(engine
);
412 intel_engine_init_hangcheck(engine
);
413 i915_gem_batch_pool_init(engine
, &engine
->batch_pool
);
415 intel_engine_init_cmd_parser(engine
);
418 int intel_engine_create_scratch(struct intel_engine_cs
*engine
, int size
)
420 struct drm_i915_gem_object
*obj
;
421 struct i915_vma
*vma
;
424 WARN_ON(engine
->scratch
);
426 obj
= i915_gem_object_create_stolen(engine
->i915
, size
);
428 obj
= i915_gem_object_create_internal(engine
->i915
, size
);
430 DRM_ERROR("Failed to allocate scratch page\n");
434 vma
= i915_vma_instance(obj
, &engine
->i915
->ggtt
.base
, NULL
);
440 ret
= i915_vma_pin(vma
, 0, 4096, PIN_GLOBAL
| PIN_HIGH
);
444 engine
->scratch
= vma
;
445 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
446 engine
->name
, i915_ggtt_offset(vma
));
450 i915_gem_object_put(obj
);
454 static void intel_engine_cleanup_scratch(struct intel_engine_cs
*engine
)
456 i915_vma_unpin_and_release(&engine
->scratch
);
460 * intel_engines_init_common - initialize cengine state which might require hw access
461 * @engine: Engine to initialize.
463 * Initializes @engine@ structure members shared between legacy and execlists
464 * submission modes which do require hardware access.
466 * Typcally done at later stages of submission mode specific engine setup.
468 * Returns zero on success or an error code on failure.
470 int intel_engine_init_common(struct intel_engine_cs
*engine
)
472 struct intel_ring
*ring
;
475 engine
->set_default_submission(engine
);
477 /* We may need to do things with the shrinker which
478 * require us to immediately switch back to the default
479 * context. This can cause a problem as pinning the
480 * default context also requires GTT space which may not
481 * be available. To avoid this we always pin the default
484 ring
= engine
->context_pin(engine
, engine
->i915
->kernel_context
);
486 return PTR_ERR(ring
);
488 ret
= intel_engine_init_breadcrumbs(engine
);
492 ret
= i915_gem_render_state_init(engine
);
499 engine
->context_unpin(engine
, engine
->i915
->kernel_context
);
504 * intel_engines_cleanup_common - cleans up the engine state created by
505 * the common initiailizers.
506 * @engine: Engine to cleanup.
508 * This cleans up everything created by the common helpers.
510 void intel_engine_cleanup_common(struct intel_engine_cs
*engine
)
512 intel_engine_cleanup_scratch(engine
);
514 i915_gem_render_state_fini(engine
);
515 intel_engine_fini_breadcrumbs(engine
);
516 intel_engine_cleanup_cmd_parser(engine
);
517 i915_gem_batch_pool_fini(&engine
->batch_pool
);
519 engine
->context_unpin(engine
, engine
->i915
->kernel_context
);
522 u64
intel_engine_get_active_head(struct intel_engine_cs
*engine
)
524 struct drm_i915_private
*dev_priv
= engine
->i915
;
527 if (INTEL_GEN(dev_priv
) >= 8)
528 acthd
= I915_READ64_2x32(RING_ACTHD(engine
->mmio_base
),
529 RING_ACTHD_UDW(engine
->mmio_base
));
530 else if (INTEL_GEN(dev_priv
) >= 4)
531 acthd
= I915_READ(RING_ACTHD(engine
->mmio_base
));
533 acthd
= I915_READ(ACTHD
);
538 u64
intel_engine_get_last_batch_head(struct intel_engine_cs
*engine
)
540 struct drm_i915_private
*dev_priv
= engine
->i915
;
543 if (INTEL_GEN(dev_priv
) >= 8)
544 bbaddr
= I915_READ64_2x32(RING_BBADDR(engine
->mmio_base
),
545 RING_BBADDR_UDW(engine
->mmio_base
));
547 bbaddr
= I915_READ(RING_BBADDR(engine
->mmio_base
));
552 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
555 case I915_CACHE_NONE
: return " uncached";
556 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
557 case I915_CACHE_L3_LLC
: return " L3+LLC";
558 case I915_CACHE_WT
: return " WT";
563 static inline uint32_t
564 read_subslice_reg(struct drm_i915_private
*dev_priv
, int slice
,
565 int subslice
, i915_reg_t reg
)
569 enum forcewake_domains fw_domains
;
571 fw_domains
= intel_uncore_forcewake_for_reg(dev_priv
, reg
,
573 fw_domains
|= intel_uncore_forcewake_for_reg(dev_priv
,
575 FW_REG_READ
| FW_REG_WRITE
);
577 spin_lock_irq(&dev_priv
->uncore
.lock
);
578 intel_uncore_forcewake_get__locked(dev_priv
, fw_domains
);
580 mcr
= I915_READ_FW(GEN8_MCR_SELECTOR
);
582 * The HW expects the slice and sublice selectors to be reset to 0
583 * after reading out the registers.
585 WARN_ON_ONCE(mcr
& (GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
));
586 mcr
&= ~(GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
);
587 mcr
|= GEN8_MCR_SLICE(slice
) | GEN8_MCR_SUBSLICE(subslice
);
588 I915_WRITE_FW(GEN8_MCR_SELECTOR
, mcr
);
590 ret
= I915_READ_FW(reg
);
592 mcr
&= ~(GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
);
593 I915_WRITE_FW(GEN8_MCR_SELECTOR
, mcr
);
595 intel_uncore_forcewake_put__locked(dev_priv
, fw_domains
);
596 spin_unlock_irq(&dev_priv
->uncore
.lock
);
601 /* NB: please notice the memset */
602 void intel_engine_get_instdone(struct intel_engine_cs
*engine
,
603 struct intel_instdone
*instdone
)
605 struct drm_i915_private
*dev_priv
= engine
->i915
;
606 u32 mmio_base
= engine
->mmio_base
;
610 memset(instdone
, 0, sizeof(*instdone
));
612 switch (INTEL_GEN(dev_priv
)) {
614 instdone
->instdone
= I915_READ(RING_INSTDONE(mmio_base
));
616 if (engine
->id
!= RCS
)
619 instdone
->slice_common
= I915_READ(GEN7_SC_INSTDONE
);
620 for_each_instdone_slice_subslice(dev_priv
, slice
, subslice
) {
621 instdone
->sampler
[slice
][subslice
] =
622 read_subslice_reg(dev_priv
, slice
, subslice
,
623 GEN7_SAMPLER_INSTDONE
);
624 instdone
->row
[slice
][subslice
] =
625 read_subslice_reg(dev_priv
, slice
, subslice
,
630 instdone
->instdone
= I915_READ(RING_INSTDONE(mmio_base
));
632 if (engine
->id
!= RCS
)
635 instdone
->slice_common
= I915_READ(GEN7_SC_INSTDONE
);
636 instdone
->sampler
[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE
);
637 instdone
->row
[0][0] = I915_READ(GEN7_ROW_INSTDONE
);
643 instdone
->instdone
= I915_READ(RING_INSTDONE(mmio_base
));
645 if (engine
->id
== RCS
)
646 /* HACK: Using the wrong struct member */
647 instdone
->slice_common
= I915_READ(GEN4_INSTDONE1
);
651 instdone
->instdone
= I915_READ(GEN2_INSTDONE
);
656 static int wa_add(struct drm_i915_private
*dev_priv
,
658 const u32 mask
, const u32 val
)
660 const u32 idx
= dev_priv
->workarounds
.count
;
662 if (WARN_ON(idx
>= I915_MAX_WA_REGS
))
665 dev_priv
->workarounds
.reg
[idx
].addr
= addr
;
666 dev_priv
->workarounds
.reg
[idx
].value
= val
;
667 dev_priv
->workarounds
.reg
[idx
].mask
= mask
;
669 dev_priv
->workarounds
.count
++;
674 #define WA_REG(addr, mask, val) do { \
675 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
680 #define WA_SET_BIT_MASKED(addr, mask) \
681 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
683 #define WA_CLR_BIT_MASKED(addr, mask) \
684 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
686 #define WA_SET_FIELD_MASKED(addr, mask, value) \
687 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
689 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
690 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
692 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
694 static int wa_ring_whitelist_reg(struct intel_engine_cs
*engine
,
697 struct drm_i915_private
*dev_priv
= engine
->i915
;
698 struct i915_workarounds
*wa
= &dev_priv
->workarounds
;
699 const uint32_t index
= wa
->hw_whitelist_count
[engine
->id
];
701 if (WARN_ON(index
>= RING_MAX_NONPRIV_SLOTS
))
704 WA_WRITE(RING_FORCE_TO_NONPRIV(engine
->mmio_base
, index
),
705 i915_mmio_reg_offset(reg
));
706 wa
->hw_whitelist_count
[engine
->id
]++;
711 static int gen8_init_workarounds(struct intel_engine_cs
*engine
)
713 struct drm_i915_private
*dev_priv
= engine
->i915
;
715 WA_SET_BIT_MASKED(INSTPM
, INSTPM_FORCE_ORDERING
);
717 /* WaDisableAsyncFlipPerfMode:bdw,chv */
718 WA_SET_BIT_MASKED(MI_MODE
, ASYNC_FLIP_PERF_DISABLE
);
720 /* WaDisablePartialInstShootdown:bdw,chv */
721 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
,
722 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
);
724 /* Use Force Non-Coherent whenever executing a 3D context. This is a
725 * workaround for for a possible hang in the unlikely event a TLB
726 * invalidation occurs during a PSD flush.
728 /* WaForceEnableNonCoherent:bdw,chv */
729 /* WaHdcDisableFetchWhenMasked:bdw,chv */
730 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
731 HDC_DONOT_FETCH_MEM_WHEN_MASKED
|
732 HDC_FORCE_NON_COHERENT
);
734 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
735 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
736 * polygons in the same 8x4 pixel/sample area to be processed without
737 * stalling waiting for the earlier ones to write to Hierarchical Z
740 * This optimization is off by default for BDW and CHV; turn it on.
742 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7
, HIZ_RAW_STALL_OPT_DISABLE
);
744 /* Wa4x4STCOptimizationDisable:bdw,chv */
745 WA_SET_BIT_MASKED(CACHE_MODE_1
, GEN8_4x4_STC_OPTIMIZATION_DISABLE
);
748 * BSpec recommends 8x4 when MSAA is used,
749 * however in practice 16x4 seems fastest.
751 * Note that PS/WM thread counts depend on the WIZ hashing
752 * disable bit, which we don't touch here, but it's good
753 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
755 WA_SET_FIELD_MASKED(GEN7_GT_MODE
,
756 GEN6_WIZ_HASHING_MASK
,
757 GEN6_WIZ_HASHING_16x4
);
762 static int bdw_init_workarounds(struct intel_engine_cs
*engine
)
764 struct drm_i915_private
*dev_priv
= engine
->i915
;
767 ret
= gen8_init_workarounds(engine
);
771 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
772 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
, STALL_DOP_GATING_DISABLE
);
774 /* WaDisableDopClockGating:bdw
776 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
777 * to disable EUTC clock gating.
779 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2
,
780 DOP_CLOCK_GATING_DISABLE
);
782 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
,
783 GEN8_SAMPLER_POWER_BYPASS_DIS
);
785 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
786 /* WaForceContextSaveRestoreNonCoherent:bdw */
787 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT
|
788 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
789 (IS_BDW_GT3(dev_priv
) ? HDC_FENCE_DEST_SLM_DISABLE
: 0));
794 static int chv_init_workarounds(struct intel_engine_cs
*engine
)
796 struct drm_i915_private
*dev_priv
= engine
->i915
;
799 ret
= gen8_init_workarounds(engine
);
803 /* WaDisableThreadStallDopClockGating:chv */
804 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
, STALL_DOP_GATING_DISABLE
);
806 /* Improve HiZ throughput on CHV. */
807 WA_SET_BIT_MASKED(HIZ_CHICKEN
, CHV_HZ_8X8_MODE_IN_1X
);
812 static int gen9_init_workarounds(struct intel_engine_cs
*engine
)
814 struct drm_i915_private
*dev_priv
= engine
->i915
;
817 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
818 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS
, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE
));
820 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
821 I915_WRITE(BDW_SCRATCH1
, I915_READ(BDW_SCRATCH1
) |
822 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE
);
824 /* WaDisableKillLogic:bxt,skl,kbl,cfl */
825 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) |
828 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
829 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
830 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
,
831 FLOW_CONTROL_ENABLE
|
832 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
);
834 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
835 if (!IS_COFFEELAKE(dev_priv
))
836 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
,
837 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC
);
839 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
840 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
))
841 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5
,
842 GEN9_DG_MIRROR_FIX_ENABLE
);
844 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
845 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
846 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1
,
847 GEN9_RHWO_OPTIMIZATION_DISABLE
);
849 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
850 * but we do that in per ctx batchbuffer as there is an issue
851 * with this register not getting restored on ctx restore
855 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
856 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
857 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7
,
858 GEN9_ENABLE_YV12_BUGFIX
|
859 GEN9_ENABLE_GPGPU_PREEMPTION
);
861 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
862 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
863 WA_SET_BIT_MASKED(CACHE_MODE_1
, (GEN8_4x4_STC_OPTIMIZATION_DISABLE
|
864 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE
));
866 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
867 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5
,
868 GEN9_CCS_TLB_PREFETCH_ENABLE
);
870 /* WaDisableMaskBasedCammingInRCC:bxt */
871 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
))
872 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0
,
873 PIXEL_MASK_CAMMING_DISABLE
);
875 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
876 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
877 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT
|
878 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE
);
880 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
881 * both tied to WaForceContextSaveRestoreNonCoherent
882 * in some hsds for skl. We keep the tie for all gen9. The
883 * documentation is a bit hazy and so we want to get common behaviour,
884 * even though there is no clear evidence we would need both on kbl/bxt.
885 * This area has been source of system hangs so we play it safe
886 * and mimic the skl regardless of what bspec says.
888 * Use Force Non-Coherent whenever executing a 3D context. This
889 * is a workaround for a possible hang in the unlikely event
890 * a TLB invalidation occurs during a PSD flush.
893 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
894 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
895 HDC_FORCE_NON_COHERENT
);
897 /* WaDisableHDCInvalidation:skl,bxt,kbl */
898 if (!IS_COFFEELAKE(dev_priv
))
899 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) |
900 BDW_DISABLE_HDC_INVALIDATION
);
902 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
903 if (IS_SKYLAKE(dev_priv
) ||
904 IS_KABYLAKE(dev_priv
) ||
905 IS_COFFEELAKE(dev_priv
) ||
906 IS_BXT_REVID(dev_priv
, 0, BXT_REVID_B0
))
907 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3
,
908 GEN8_SAMPLER_POWER_BYPASS_DIS
);
910 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
911 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2
, GEN8_ST_PO_DISABLE
);
913 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
914 I915_WRITE(GEN8_L3SQCREG4
, (I915_READ(GEN8_L3SQCREG4
) |
915 GEN8_LQSC_FLUSH_COHERENT_LINES
));
917 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
918 ret
= wa_ring_whitelist_reg(engine
, GEN9_CTX_PREEMPT_REG
);
922 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl */
923 ret
= wa_ring_whitelist_reg(engine
, GEN8_CS_CHICKEN1
);
927 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
928 ret
= wa_ring_whitelist_reg(engine
, GEN8_HDC_CHICKEN1
);
935 static int skl_tune_iz_hashing(struct intel_engine_cs
*engine
)
937 struct drm_i915_private
*dev_priv
= engine
->i915
;
938 u8 vals
[3] = { 0, 0, 0 };
941 for (i
= 0; i
< 3; i
++) {
945 * Only consider slices where one, and only one, subslice has 7
948 if (!is_power_of_2(INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[i
]))
952 * subslice_7eu[i] != 0 (because of the check above) and
953 * ss_max == 4 (maximum number of subslices possible per slice)
957 ss
= ffs(INTEL_INFO(dev_priv
)->sseu
.subslice_7eu
[i
]) - 1;
961 if (vals
[0] == 0 && vals
[1] == 0 && vals
[2] == 0)
964 /* Tune IZ hashing. See intel_device_info_runtime_init() */
965 WA_SET_FIELD_MASKED(GEN7_GT_MODE
,
966 GEN9_IZ_HASHING_MASK(2) |
967 GEN9_IZ_HASHING_MASK(1) |
968 GEN9_IZ_HASHING_MASK(0),
969 GEN9_IZ_HASHING(2, vals
[2]) |
970 GEN9_IZ_HASHING(1, vals
[1]) |
971 GEN9_IZ_HASHING(0, vals
[0]));
976 static int skl_init_workarounds(struct intel_engine_cs
*engine
)
978 struct drm_i915_private
*dev_priv
= engine
->i915
;
981 ret
= gen9_init_workarounds(engine
);
986 * Actual WA is to disable percontext preemption granularity control
987 * until D0 which is the default case so this is equivalent to
988 * !WaDisablePerCtxtPreemptionGranularityControl:skl
990 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1
,
991 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL
));
993 /* WaEnableGapsTsvCreditFix:skl */
994 I915_WRITE(GEN8_GARBCNTL
, (I915_READ(GEN8_GARBCNTL
) |
995 GEN9_GAPS_TSV_CREDIT_DISABLE
));
997 /* WaDisableGafsUnitClkGating:skl */
998 WA_SET_BIT(GEN7_UCGCTL4
, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE
);
1000 /* WaInPlaceDecompressionHang:skl */
1001 if (IS_SKL_REVID(dev_priv
, SKL_REVID_H0
, REVID_FOREVER
))
1002 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA
,
1003 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
);
1005 /* WaDisableLSQCROPERFforOCL:skl */
1006 ret
= wa_ring_whitelist_reg(engine
, GEN8_L3SQCREG4
);
1010 return skl_tune_iz_hashing(engine
);
1013 static int bxt_init_workarounds(struct intel_engine_cs
*engine
)
1015 struct drm_i915_private
*dev_priv
= engine
->i915
;
1018 ret
= gen9_init_workarounds(engine
);
1022 /* WaStoreMultiplePTEenable:bxt */
1023 /* This is a requirement according to Hardware specification */
1024 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
))
1025 I915_WRITE(TILECTL
, I915_READ(TILECTL
) | TILECTL_TLBPF
);
1027 /* WaSetClckGatingDisableMedia:bxt */
1028 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
1029 I915_WRITE(GEN7_MISCCPCTL
, (I915_READ(GEN7_MISCCPCTL
) &
1030 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE
));
1033 /* WaDisableThreadStallDopClockGating:bxt */
1034 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN
,
1035 STALL_DOP_GATING_DISABLE
);
1037 /* WaDisablePooledEuLoadBalancingFix:bxt */
1038 if (IS_BXT_REVID(dev_priv
, BXT_REVID_B0
, REVID_FOREVER
)) {
1039 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2
,
1040 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE
);
1043 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1044 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_B0
)) {
1046 GEN7_HALF_SLICE_CHICKEN1
,
1047 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE
);
1050 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1051 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1052 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1053 /* WaDisableLSQCROPERFforOCL:bxt */
1054 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
1055 ret
= wa_ring_whitelist_reg(engine
, GEN9_CS_DEBUG_MODE1
);
1059 ret
= wa_ring_whitelist_reg(engine
, GEN8_L3SQCREG4
);
1064 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1065 if (IS_BXT_REVID(dev_priv
, BXT_REVID_B0
, REVID_FOREVER
))
1066 I915_WRITE(GEN8_L3SQCREG1
, L3_GENERAL_PRIO_CREDITS(62) |
1067 L3_HIGH_PRIO_CREDITS(2));
1069 /* WaToEnableHwFixForPushConstHWBug:bxt */
1070 if (IS_BXT_REVID(dev_priv
, BXT_REVID_C0
, REVID_FOREVER
))
1071 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1072 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1074 /* WaInPlaceDecompressionHang:bxt */
1075 if (IS_BXT_REVID(dev_priv
, BXT_REVID_C0
, REVID_FOREVER
))
1076 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA
,
1077 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
);
1082 static int kbl_init_workarounds(struct intel_engine_cs
*engine
)
1084 struct drm_i915_private
*dev_priv
= engine
->i915
;
1087 ret
= gen9_init_workarounds(engine
);
1091 /* WaEnableGapsTsvCreditFix:kbl */
1092 I915_WRITE(GEN8_GARBCNTL
, (I915_READ(GEN8_GARBCNTL
) |
1093 GEN9_GAPS_TSV_CREDIT_DISABLE
));
1095 /* WaDisableDynamicCreditSharing:kbl */
1096 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
1097 WA_SET_BIT(GAMT_CHKN_BIT_REG
,
1098 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING
);
1100 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1101 if (IS_KBL_REVID(dev_priv
, KBL_REVID_A0
, KBL_REVID_A0
))
1102 WA_SET_BIT_MASKED(HDC_CHICKEN0
,
1103 HDC_FENCE_DEST_SLM_DISABLE
);
1105 /* WaToEnableHwFixForPushConstHWBug:kbl */
1106 if (IS_KBL_REVID(dev_priv
, KBL_REVID_C0
, REVID_FOREVER
))
1107 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1108 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1110 /* WaDisableGafsUnitClkGating:kbl */
1111 WA_SET_BIT(GEN7_UCGCTL4
, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE
);
1113 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1115 GEN7_HALF_SLICE_CHICKEN1
,
1116 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE
);
1118 /* WaInPlaceDecompressionHang:kbl */
1119 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA
,
1120 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
);
1122 /* WaDisableLSQCROPERFforOCL:kbl */
1123 ret
= wa_ring_whitelist_reg(engine
, GEN8_L3SQCREG4
);
1130 static int glk_init_workarounds(struct intel_engine_cs
*engine
)
1132 struct drm_i915_private
*dev_priv
= engine
->i915
;
1135 ret
= gen9_init_workarounds(engine
);
1139 /* WaToEnableHwFixForPushConstHWBug:glk */
1140 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1141 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1146 static int cfl_init_workarounds(struct intel_engine_cs
*engine
)
1148 struct drm_i915_private
*dev_priv
= engine
->i915
;
1151 ret
= gen9_init_workarounds(engine
);
1155 /* WaEnableGapsTsvCreditFix:cfl */
1156 I915_WRITE(GEN8_GARBCNTL
, (I915_READ(GEN8_GARBCNTL
) |
1157 GEN9_GAPS_TSV_CREDIT_DISABLE
));
1159 /* WaToEnableHwFixForPushConstHWBug:cfl */
1160 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2
,
1161 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION
);
1163 /* WaDisableGafsUnitClkGating:cfl */
1164 WA_SET_BIT(GEN7_UCGCTL4
, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE
);
1166 /* WaDisableSbeCacheDispatchPortSharing:cfl */
1168 GEN7_HALF_SLICE_CHICKEN1
,
1169 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE
);
1171 /* WaInPlaceDecompressionHang:cfl */
1172 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA
,
1173 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS
);
1178 int init_workarounds_ring(struct intel_engine_cs
*engine
)
1180 struct drm_i915_private
*dev_priv
= engine
->i915
;
1183 WARN_ON(engine
->id
!= RCS
);
1185 dev_priv
->workarounds
.count
= 0;
1186 dev_priv
->workarounds
.hw_whitelist_count
[engine
->id
] = 0;
1188 if (IS_BROADWELL(dev_priv
))
1189 err
= bdw_init_workarounds(engine
);
1190 else if (IS_CHERRYVIEW(dev_priv
))
1191 err
= chv_init_workarounds(engine
);
1192 else if (IS_SKYLAKE(dev_priv
))
1193 err
= skl_init_workarounds(engine
);
1194 else if (IS_BROXTON(dev_priv
))
1195 err
= bxt_init_workarounds(engine
);
1196 else if (IS_KABYLAKE(dev_priv
))
1197 err
= kbl_init_workarounds(engine
);
1198 else if (IS_GEMINILAKE(dev_priv
))
1199 err
= glk_init_workarounds(engine
);
1200 else if (IS_COFFEELAKE(dev_priv
))
1201 err
= cfl_init_workarounds(engine
);
1207 DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
1208 engine
->name
, dev_priv
->workarounds
.count
);
1212 int intel_ring_workarounds_emit(struct drm_i915_gem_request
*req
)
1214 struct i915_workarounds
*w
= &req
->i915
->workarounds
;
1221 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
1225 cs
= intel_ring_begin(req
, (w
->count
* 2 + 2));
1229 *cs
++ = MI_LOAD_REGISTER_IMM(w
->count
);
1230 for (i
= 0; i
< w
->count
; i
++) {
1231 *cs
++ = i915_mmio_reg_offset(w
->reg
[i
].addr
);
1232 *cs
++ = w
->reg
[i
].value
;
1236 intel_ring_advance(req
, cs
);
1238 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
1245 static bool ring_is_idle(struct intel_engine_cs
*engine
)
1247 struct drm_i915_private
*dev_priv
= engine
->i915
;
1250 intel_runtime_pm_get(dev_priv
);
1252 /* First check that no commands are left in the ring */
1253 if ((I915_READ_HEAD(engine
) & HEAD_ADDR
) !=
1254 (I915_READ_TAIL(engine
) & TAIL_ADDR
))
1257 /* No bit for gen2, so assume the CS parser is idle */
1258 if (INTEL_GEN(dev_priv
) > 2 && !(I915_READ_MODE(engine
) & MODE_IDLE
))
1261 intel_runtime_pm_put(dev_priv
);
1267 * intel_engine_is_idle() - Report if the engine has finished process all work
1268 * @engine: the intel_engine_cs
1270 * Return true if there are no requests pending, nothing left to be submitted
1271 * to hardware, and that the engine is idle.
1273 bool intel_engine_is_idle(struct intel_engine_cs
*engine
)
1275 struct drm_i915_private
*dev_priv
= engine
->i915
;
1277 /* More white lies, if wedged, hw state is inconsistent */
1278 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
1281 /* Any inflight/incomplete requests? */
1282 if (!i915_seqno_passed(intel_engine_get_seqno(engine
),
1283 intel_engine_last_submit(engine
)))
1286 if (I915_SELFTEST_ONLY(engine
->breadcrumbs
.mock
))
1289 /* Interrupt/tasklet pending? */
1290 if (test_bit(ENGINE_IRQ_EXECLIST
, &engine
->irq_posted
))
1293 /* Both ports drained, no more ELSP submission? */
1294 if (port_request(&engine
->execlist_port
[0]))
1298 if (!ring_is_idle(engine
))
1304 bool intel_engines_are_idle(struct drm_i915_private
*dev_priv
)
1306 struct intel_engine_cs
*engine
;
1307 enum intel_engine_id id
;
1309 if (READ_ONCE(dev_priv
->gt
.active_requests
))
1312 /* If the driver is wedged, HW state may be very inconsistent and
1313 * report that it is still busy, even though we have stopped using it.
1315 if (i915_terminally_wedged(&dev_priv
->gpu_error
))
1318 for_each_engine(engine
, dev_priv
, id
) {
1319 if (!intel_engine_is_idle(engine
))
1326 void intel_engines_reset_default_submission(struct drm_i915_private
*i915
)
1328 struct intel_engine_cs
*engine
;
1329 enum intel_engine_id id
;
1331 for_each_engine(engine
, i915
, id
)
1332 engine
->set_default_submission(engine
);
1335 void intel_engines_mark_idle(struct drm_i915_private
*i915
)
1337 struct intel_engine_cs
*engine
;
1338 enum intel_engine_id id
;
1340 for_each_engine(engine
, i915
, id
) {
1341 intel_engine_disarm_breadcrumbs(engine
);
1342 i915_gem_batch_pool_fini(&engine
->batch_pool
);
1343 engine
->no_priolist
= false;
1347 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1348 #include "selftests/mock_engine.c"