2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #include <linux/pm_runtime.h>
30 #define FORCEWAKE_ACK_TIMEOUT_MS 50
32 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
34 static const char * const forcewake_domain_names
[] = {
41 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id
)
43 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names
) != FW_DOMAIN_ID_COUNT
);
45 if (id
>= 0 && id
< FW_DOMAIN_ID_COUNT
)
46 return forcewake_domain_names
[id
];
54 fw_domain_reset(const struct intel_uncore_forcewake_domain
*d
)
56 WARN_ON(!i915_mmio_reg_valid(d
->reg_set
));
57 __raw_i915_write32(d
->i915
, d
->reg_set
, d
->val_reset
);
61 fw_domain_arm_timer(struct intel_uncore_forcewake_domain
*d
)
64 hrtimer_start_range_ns(&d
->timer
,
71 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain
*d
)
73 if (wait_for_atomic((__raw_i915_read32(d
->i915
, d
->reg_ack
) &
74 FORCEWAKE_KERNEL
) == 0,
75 FORCEWAKE_ACK_TIMEOUT_MS
))
76 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
77 intel_uncore_forcewake_domain_to_str(d
->id
));
81 fw_domain_get(const struct intel_uncore_forcewake_domain
*d
)
83 __raw_i915_write32(d
->i915
, d
->reg_set
, d
->val_set
);
87 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain
*d
)
89 if (wait_for_atomic((__raw_i915_read32(d
->i915
, d
->reg_ack
) &
91 FORCEWAKE_ACK_TIMEOUT_MS
))
92 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
93 intel_uncore_forcewake_domain_to_str(d
->id
));
97 fw_domain_put(const struct intel_uncore_forcewake_domain
*d
)
99 __raw_i915_write32(d
->i915
, d
->reg_set
, d
->val_clear
);
103 fw_domain_posting_read(const struct intel_uncore_forcewake_domain
*d
)
105 /* something from same cacheline, but not from the set register */
106 if (i915_mmio_reg_valid(d
->reg_post
))
107 __raw_posting_read(d
->i915
, d
->reg_post
);
111 fw_domains_get(struct drm_i915_private
*dev_priv
, enum forcewake_domains fw_domains
)
113 struct intel_uncore_forcewake_domain
*d
;
115 for_each_fw_domain_masked(d
, fw_domains
, dev_priv
) {
116 fw_domain_wait_ack_clear(d
);
120 for_each_fw_domain_masked(d
, fw_domains
, dev_priv
)
121 fw_domain_wait_ack(d
);
123 dev_priv
->uncore
.fw_domains_active
|= fw_domains
;
127 fw_domains_put(struct drm_i915_private
*dev_priv
, enum forcewake_domains fw_domains
)
129 struct intel_uncore_forcewake_domain
*d
;
131 for_each_fw_domain_masked(d
, fw_domains
, dev_priv
) {
133 fw_domain_posting_read(d
);
136 dev_priv
->uncore
.fw_domains_active
&= ~fw_domains
;
140 fw_domains_posting_read(struct drm_i915_private
*dev_priv
)
142 struct intel_uncore_forcewake_domain
*d
;
144 /* No need to do for all, just do for first found */
145 for_each_fw_domain(d
, dev_priv
) {
146 fw_domain_posting_read(d
);
152 fw_domains_reset(struct drm_i915_private
*dev_priv
, enum forcewake_domains fw_domains
)
154 struct intel_uncore_forcewake_domain
*d
;
156 if (dev_priv
->uncore
.fw_domains
== 0)
159 for_each_fw_domain_masked(d
, fw_domains
, dev_priv
)
162 fw_domains_posting_read(dev_priv
);
165 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
167 /* w/a for a sporadic read returning 0 by waiting for the GT
170 if (wait_for_atomic_us((__raw_i915_read32(dev_priv
, GEN6_GT_THREAD_STATUS_REG
) &
171 GEN6_GT_THREAD_STATUS_CORE_MASK
) == 0, 500))
172 DRM_ERROR("GT thread status wait timed out\n");
175 static void fw_domains_get_with_thread_status(struct drm_i915_private
*dev_priv
,
176 enum forcewake_domains fw_domains
)
178 fw_domains_get(dev_priv
, fw_domains
);
180 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
181 __gen6_gt_wait_for_thread_c0(dev_priv
);
184 static void gen6_gt_check_fifodbg(struct drm_i915_private
*dev_priv
)
188 gtfifodbg
= __raw_i915_read32(dev_priv
, GTFIFODBG
);
189 if (WARN(gtfifodbg
, "GT wake FIFO error 0x%x\n", gtfifodbg
))
190 __raw_i915_write32(dev_priv
, GTFIFODBG
, gtfifodbg
);
193 static void fw_domains_put_with_fifo(struct drm_i915_private
*dev_priv
,
194 enum forcewake_domains fw_domains
)
196 fw_domains_put(dev_priv
, fw_domains
);
197 gen6_gt_check_fifodbg(dev_priv
);
200 static inline u32
fifo_free_entries(struct drm_i915_private
*dev_priv
)
202 u32 count
= __raw_i915_read32(dev_priv
, GTFIFOCTL
);
204 return count
& GT_FIFO_FREE_ENTRIES_MASK
;
207 static int __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
211 /* On VLV, FIFO will be shared by both SW and HW.
212 * So, we need to read the FREE_ENTRIES everytime */
213 if (IS_VALLEYVIEW(dev_priv
))
214 dev_priv
->uncore
.fifo_count
= fifo_free_entries(dev_priv
);
216 if (dev_priv
->uncore
.fifo_count
< GT_FIFO_NUM_RESERVED_ENTRIES
) {
218 u32 fifo
= fifo_free_entries(dev_priv
);
220 while (fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
&& loop
--) {
222 fifo
= fifo_free_entries(dev_priv
);
224 if (WARN_ON(loop
< 0 && fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
))
226 dev_priv
->uncore
.fifo_count
= fifo
;
228 dev_priv
->uncore
.fifo_count
--;
233 static enum hrtimer_restart
234 intel_uncore_fw_release_timer(struct hrtimer
*timer
)
236 struct intel_uncore_forcewake_domain
*domain
=
237 container_of(timer
, struct intel_uncore_forcewake_domain
, timer
);
238 struct drm_i915_private
*dev_priv
= domain
->i915
;
239 unsigned long irqflags
;
241 assert_rpm_device_not_suspended(dev_priv
);
243 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
244 if (WARN_ON(domain
->wake_count
== 0))
245 domain
->wake_count
++;
247 if (--domain
->wake_count
== 0)
248 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, domain
->mask
);
250 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
252 return HRTIMER_NORESTART
;
255 void intel_uncore_forcewake_reset(struct drm_i915_private
*dev_priv
,
258 unsigned long irqflags
;
259 struct intel_uncore_forcewake_domain
*domain
;
260 int retry_count
= 100;
261 enum forcewake_domains fw
, active_domains
;
263 /* Hold uncore.lock across reset to prevent any register access
264 * with forcewake not set correctly. Wait until all pending
265 * timers are run before holding.
270 for_each_fw_domain(domain
, dev_priv
) {
271 if (hrtimer_cancel(&domain
->timer
) == 0)
274 intel_uncore_fw_release_timer(&domain
->timer
);
277 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
279 for_each_fw_domain(domain
, dev_priv
) {
280 if (hrtimer_active(&domain
->timer
))
281 active_domains
|= domain
->mask
;
284 if (active_domains
== 0)
287 if (--retry_count
== 0) {
288 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
292 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
296 WARN_ON(active_domains
);
298 fw
= dev_priv
->uncore
.fw_domains_active
;
300 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, fw
);
302 fw_domains_reset(dev_priv
, FORCEWAKE_ALL
);
304 if (restore
) { /* If reset with a user forcewake, try to restore */
306 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw
);
308 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
))
309 dev_priv
->uncore
.fifo_count
=
310 fifo_free_entries(dev_priv
);
314 assert_forcewakes_inactive(dev_priv
);
316 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
319 static u64
gen9_edram_size(struct drm_i915_private
*dev_priv
)
321 const unsigned int ways
[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
322 const unsigned int sets
[4] = { 1, 1, 2, 2 };
323 const u32 cap
= dev_priv
->edram_cap
;
325 return EDRAM_NUM_BANKS(cap
) *
326 ways
[EDRAM_WAYS_IDX(cap
)] *
327 sets
[EDRAM_SETS_IDX(cap
)] *
331 u64
intel_uncore_edram_size(struct drm_i915_private
*dev_priv
)
333 if (!HAS_EDRAM(dev_priv
))
336 /* The needed capability bits for size calculation
337 * are not there with pre gen9 so return 128MB always.
339 if (INTEL_GEN(dev_priv
) < 9)
340 return 128 * 1024 * 1024;
342 return gen9_edram_size(dev_priv
);
345 static void intel_uncore_edram_detect(struct drm_i915_private
*dev_priv
)
347 if (IS_HASWELL(dev_priv
) ||
348 IS_BROADWELL(dev_priv
) ||
349 INTEL_GEN(dev_priv
) >= 9) {
350 dev_priv
->edram_cap
= __raw_i915_read32(dev_priv
,
353 /* NB: We can't write IDICR yet because we do not have gt funcs
356 dev_priv
->edram_cap
= 0;
359 if (HAS_EDRAM(dev_priv
))
360 DRM_INFO("Found %lluMB of eDRAM\n",
361 intel_uncore_edram_size(dev_priv
) / (1024 * 1024));
365 fpga_check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
369 dbg
= __raw_i915_read32(dev_priv
, FPGA_DBG
);
370 if (likely(!(dbg
& FPGA_DBG_RM_NOCLAIM
)))
373 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
379 vlv_check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
383 cer
= __raw_i915_read32(dev_priv
, CLAIM_ER
);
384 if (likely(!(cer
& (CLAIM_ER_OVERFLOW
| CLAIM_ER_CTR_MASK
))))
387 __raw_i915_write32(dev_priv
, CLAIM_ER
, CLAIM_ER_CLR
);
393 check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
395 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv
))
396 return fpga_check_for_unclaimed_mmio(dev_priv
);
398 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
399 return vlv_check_for_unclaimed_mmio(dev_priv
);
404 static void __intel_uncore_early_sanitize(struct drm_i915_private
*dev_priv
,
405 bool restore_forcewake
)
407 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
409 /* clear out unclaimed reg detection bit */
410 if (check_for_unclaimed_mmio(dev_priv
))
411 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
413 /* clear out old GT FIFO errors */
414 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
))
415 __raw_i915_write32(dev_priv
, GTFIFODBG
,
416 __raw_i915_read32(dev_priv
, GTFIFODBG
));
418 /* WaDisableShadowRegForCpd:chv */
419 if (IS_CHERRYVIEW(dev_priv
)) {
420 __raw_i915_write32(dev_priv
, GTFIFOCTL
,
421 __raw_i915_read32(dev_priv
, GTFIFOCTL
) |
422 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL
|
423 GT_FIFO_CTL_RC6_POLICY_STALL
);
426 /* Enable Decoupled MMIO only on BXT C stepping onwards */
427 if (!IS_BXT_REVID(dev_priv
, BXT_REVID_C0
, REVID_FOREVER
))
428 info
->has_decoupled_mmio
= false;
430 intel_uncore_forcewake_reset(dev_priv
, restore_forcewake
);
433 void intel_uncore_early_sanitize(struct drm_i915_private
*dev_priv
,
434 bool restore_forcewake
)
436 __intel_uncore_early_sanitize(dev_priv
, restore_forcewake
);
437 i915_check_and_clear_faults(dev_priv
);
440 void intel_uncore_sanitize(struct drm_i915_private
*dev_priv
)
442 i915
.enable_rc6
= sanitize_rc6_option(dev_priv
, i915
.enable_rc6
);
444 /* BIOS often leaves RC6 enabled, but disable it for hw init */
445 intel_sanitize_gt_powersave(dev_priv
);
448 static void __intel_uncore_forcewake_get(struct drm_i915_private
*dev_priv
,
449 enum forcewake_domains fw_domains
)
451 struct intel_uncore_forcewake_domain
*domain
;
453 fw_domains
&= dev_priv
->uncore
.fw_domains
;
455 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
) {
456 if (domain
->wake_count
++)
457 fw_domains
&= ~domain
->mask
;
461 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_domains
);
465 * intel_uncore_forcewake_get - grab forcewake domain references
466 * @dev_priv: i915 device instance
467 * @fw_domains: forcewake domains to get reference on
469 * This function can be used get GT's forcewake domain references.
470 * Normal register access will handle the forcewake domains automatically.
471 * However if some sequence requires the GT to not power down a particular
472 * forcewake domains this function should be called at the beginning of the
473 * sequence. And subsequently the reference should be dropped by symmetric
474 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
475 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
477 void intel_uncore_forcewake_get(struct drm_i915_private
*dev_priv
,
478 enum forcewake_domains fw_domains
)
480 unsigned long irqflags
;
482 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
485 assert_rpm_wakelock_held(dev_priv
);
487 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
488 __intel_uncore_forcewake_get(dev_priv
, fw_domains
);
489 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
493 * intel_uncore_forcewake_get__locked - grab forcewake domain references
494 * @dev_priv: i915 device instance
495 * @fw_domains: forcewake domains to get reference on
497 * See intel_uncore_forcewake_get(). This variant places the onus
498 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
500 void intel_uncore_forcewake_get__locked(struct drm_i915_private
*dev_priv
,
501 enum forcewake_domains fw_domains
)
503 assert_spin_locked(&dev_priv
->uncore
.lock
);
505 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
508 __intel_uncore_forcewake_get(dev_priv
, fw_domains
);
511 static void __intel_uncore_forcewake_put(struct drm_i915_private
*dev_priv
,
512 enum forcewake_domains fw_domains
)
514 struct intel_uncore_forcewake_domain
*domain
;
516 fw_domains
&= dev_priv
->uncore
.fw_domains
;
518 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
) {
519 if (WARN_ON(domain
->wake_count
== 0))
522 if (--domain
->wake_count
)
525 fw_domain_arm_timer(domain
);
530 * intel_uncore_forcewake_put - release a forcewake domain reference
531 * @dev_priv: i915 device instance
532 * @fw_domains: forcewake domains to put references
534 * This function drops the device-level forcewakes for specified
535 * domains obtained by intel_uncore_forcewake_get().
537 void intel_uncore_forcewake_put(struct drm_i915_private
*dev_priv
,
538 enum forcewake_domains fw_domains
)
540 unsigned long irqflags
;
542 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
545 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
546 __intel_uncore_forcewake_put(dev_priv
, fw_domains
);
547 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
551 * intel_uncore_forcewake_put__locked - grab forcewake domain references
552 * @dev_priv: i915 device instance
553 * @fw_domains: forcewake domains to get reference on
555 * See intel_uncore_forcewake_put(). This variant places the onus
556 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
558 void intel_uncore_forcewake_put__locked(struct drm_i915_private
*dev_priv
,
559 enum forcewake_domains fw_domains
)
561 assert_spin_locked(&dev_priv
->uncore
.lock
);
563 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
566 __intel_uncore_forcewake_put(dev_priv
, fw_domains
);
569 void assert_forcewakes_inactive(struct drm_i915_private
*dev_priv
)
571 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
574 WARN_ON(dev_priv
->uncore
.fw_domains_active
);
577 /* We give fast paths for the really cool registers */
578 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
580 #define __gen6_reg_read_fw_domains(offset) \
582 enum forcewake_domains __fwd; \
583 if (NEEDS_FORCE_WAKE(offset)) \
584 __fwd = FORCEWAKE_RENDER; \
590 static int fw_range_cmp(u32 offset
, const struct intel_forcewake_range
*entry
)
592 if (offset
< entry
->start
)
594 else if (offset
> entry
->end
)
600 /* Copied and "macroized" from lib/bsearch.c */
601 #define BSEARCH(key, base, num, cmp) ({ \
602 unsigned int start__ = 0, end__ = (num); \
603 typeof(base) result__ = NULL; \
604 while (start__ < end__) { \
605 unsigned int mid__ = start__ + (end__ - start__) / 2; \
606 int ret__ = (cmp)((key), (base) + mid__); \
609 } else if (ret__ > 0) { \
610 start__ = mid__ + 1; \
612 result__ = (base) + mid__; \
619 static enum forcewake_domains
620 find_fw_domain(struct drm_i915_private
*dev_priv
, u32 offset
)
622 const struct intel_forcewake_range
*entry
;
624 entry
= BSEARCH(offset
,
625 dev_priv
->uncore
.fw_domains_table
,
626 dev_priv
->uncore
.fw_domains_table_entries
,
629 return entry
? entry
->domains
: 0;
633 intel_fw_table_check(struct drm_i915_private
*dev_priv
)
635 const struct intel_forcewake_range
*ranges
;
636 unsigned int num_ranges
;
640 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG
))
643 ranges
= dev_priv
->uncore
.fw_domains_table
;
647 num_ranges
= dev_priv
->uncore
.fw_domains_table_entries
;
649 for (i
= 0, prev
= -1; i
< num_ranges
; i
++, ranges
++) {
650 WARN_ON_ONCE(IS_GEN9(dev_priv
) &&
651 (prev
+ 1) != (s32
)ranges
->start
);
652 WARN_ON_ONCE(prev
>= (s32
)ranges
->start
);
653 prev
= ranges
->start
;
654 WARN_ON_ONCE(prev
>= (s32
)ranges
->end
);
659 #define GEN_FW_RANGE(s, e, d) \
660 { .start = (s), .end = (e), .domains = (d) }
662 #define HAS_FWTABLE(dev_priv) \
663 (IS_GEN9(dev_priv) || \
664 IS_CHERRYVIEW(dev_priv) || \
665 IS_VALLEYVIEW(dev_priv))
667 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
668 static const struct intel_forcewake_range __vlv_fw_ranges
[] = {
669 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER
),
670 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER
),
671 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER
),
672 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
673 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA
),
674 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER
),
675 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA
),
678 #define __fwtable_reg_read_fw_domains(offset) \
680 enum forcewake_domains __fwd = 0; \
681 if (NEEDS_FORCE_WAKE((offset))) \
682 __fwd = find_fw_domain(dev_priv, offset); \
686 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
687 static const i915_reg_t gen8_shadowed_regs
[] = {
688 RING_TAIL(RENDER_RING_BASE
), /* 0x2000 (base) */
689 GEN6_RPNSWREQ
, /* 0xA008 */
690 GEN6_RC_VIDEO_FREQ
, /* 0xA00C */
691 RING_TAIL(GEN6_BSD_RING_BASE
), /* 0x12000 (base) */
692 RING_TAIL(VEBOX_RING_BASE
), /* 0x1a000 (base) */
693 RING_TAIL(BLT_RING_BASE
), /* 0x22000 (base) */
694 /* TODO: Other registers are not yet used */
697 static void intel_shadow_table_check(void)
699 const i915_reg_t
*reg
= gen8_shadowed_regs
;
704 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG
))
707 for (i
= 0, prev
= -1; i
< ARRAY_SIZE(gen8_shadowed_regs
); i
++, reg
++) {
708 offset
= i915_mmio_reg_offset(*reg
);
709 WARN_ON_ONCE(prev
>= (s32
)offset
);
714 static int mmio_reg_cmp(u32 key
, const i915_reg_t
*reg
)
716 u32 offset
= i915_mmio_reg_offset(*reg
);
720 else if (key
> offset
)
726 static bool is_gen8_shadowed(u32 offset
)
728 const i915_reg_t
*regs
= gen8_shadowed_regs
;
730 return BSEARCH(offset
, regs
, ARRAY_SIZE(gen8_shadowed_regs
),
734 #define __gen8_reg_write_fw_domains(offset) \
736 enum forcewake_domains __fwd; \
737 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
738 __fwd = FORCEWAKE_RENDER; \
744 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
745 static const struct intel_forcewake_range __chv_fw_ranges
[] = {
746 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER
),
747 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
748 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER
),
749 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
750 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER
),
751 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
752 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA
),
753 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
754 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER
),
755 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA
),
756 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER
),
757 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
758 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
759 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA
),
760 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA
),
761 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA
),
764 #define __fwtable_reg_write_fw_domains(offset) \
766 enum forcewake_domains __fwd = 0; \
767 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
768 __fwd = find_fw_domain(dev_priv, offset); \
772 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
773 static const struct intel_forcewake_range __gen9_fw_ranges
[] = {
774 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER
),
775 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
776 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER
),
777 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER
),
778 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER
),
779 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER
),
780 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER
),
781 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER
),
782 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA
),
783 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER
),
784 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER
),
785 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER
),
786 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER
),
787 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA
),
788 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER
),
789 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER
),
790 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER
),
791 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
792 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER
),
793 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER
),
794 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER
),
795 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA
),
796 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER
),
797 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER
),
798 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER
),
799 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
800 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER
),
801 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA
),
802 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER
),
803 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER
),
804 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER
),
805 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA
),
809 ilk_dummy_write(struct drm_i915_private
*dev_priv
)
811 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
812 * the chip from rc6 before touching it for real. MI_MODE is masked,
813 * hence harmless to write 0 into. */
814 __raw_i915_write32(dev_priv
, MI_MODE
, 0);
818 __unclaimed_reg_debug(struct drm_i915_private
*dev_priv
,
819 const i915_reg_t reg
,
823 if (WARN(check_for_unclaimed_mmio(dev_priv
) && !before
,
824 "Unclaimed %s register 0x%x\n",
825 read
? "read from" : "write to",
826 i915_mmio_reg_offset(reg
)))
827 i915
.mmio_debug
--; /* Only report the first N failures */
831 unclaimed_reg_debug(struct drm_i915_private
*dev_priv
,
832 const i915_reg_t reg
,
836 if (likely(!i915
.mmio_debug
))
839 __unclaimed_reg_debug(dev_priv
, reg
, read
, before
);
842 static const enum decoupled_power_domain fw2dpd_domain
[] = {
843 GEN9_DECOUPLED_PD_RENDER
,
844 GEN9_DECOUPLED_PD_BLITTER
,
845 GEN9_DECOUPLED_PD_ALL
,
846 GEN9_DECOUPLED_PD_MEDIA
,
847 GEN9_DECOUPLED_PD_ALL
,
848 GEN9_DECOUPLED_PD_ALL
,
849 GEN9_DECOUPLED_PD_ALL
853 * Decoupled MMIO access for only 1 DWORD
855 static void __gen9_decoupled_mmio_access(struct drm_i915_private
*dev_priv
,
857 enum forcewake_domains fw_domain
,
858 enum decoupled_ops operation
)
860 enum decoupled_power_domain dp_domain
;
861 u32 ctrl_reg_data
= 0;
863 dp_domain
= fw2dpd_domain
[fw_domain
- 1];
865 ctrl_reg_data
|= reg
;
866 ctrl_reg_data
|= (operation
<< GEN9_DECOUPLED_OP_SHIFT
);
867 ctrl_reg_data
|= (dp_domain
<< GEN9_DECOUPLED_PD_SHIFT
);
868 ctrl_reg_data
|= GEN9_DECOUPLED_DW1_GO
;
869 __raw_i915_write32(dev_priv
, GEN9_DECOUPLED_REG0_DW1
, ctrl_reg_data
);
871 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
872 GEN9_DECOUPLED_REG0_DW1
) &
873 GEN9_DECOUPLED_DW1_GO
) == 0,
874 FORCEWAKE_ACK_TIMEOUT_MS
))
875 DRM_ERROR("Decoupled MMIO wait timed out\n");
879 __gen9_decoupled_mmio_read32(struct drm_i915_private
*dev_priv
,
881 enum forcewake_domains fw_domain
)
883 __gen9_decoupled_mmio_access(dev_priv
, reg
, fw_domain
,
884 GEN9_DECOUPLED_OP_READ
);
886 return __raw_i915_read32(dev_priv
, GEN9_DECOUPLED_REG0_DW0
);
890 __gen9_decoupled_mmio_write(struct drm_i915_private
*dev_priv
,
892 enum forcewake_domains fw_domain
)
895 __raw_i915_write32(dev_priv
, GEN9_DECOUPLED_REG0_DW0
, data
);
897 __gen9_decoupled_mmio_access(dev_priv
, reg
, fw_domain
,
898 GEN9_DECOUPLED_OP_WRITE
);
902 #define GEN2_READ_HEADER(x) \
904 assert_rpm_wakelock_held(dev_priv);
906 #define GEN2_READ_FOOTER \
907 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
910 #define __gen2_read(x) \
912 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
913 GEN2_READ_HEADER(x); \
914 val = __raw_i915_read##x(dev_priv, reg); \
918 #define __gen5_read(x) \
920 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
921 GEN2_READ_HEADER(x); \
922 ilk_dummy_write(dev_priv); \
923 val = __raw_i915_read##x(dev_priv, reg); \
939 #undef GEN2_READ_FOOTER
940 #undef GEN2_READ_HEADER
942 #define GEN6_READ_HEADER(x) \
943 u32 offset = i915_mmio_reg_offset(reg); \
944 unsigned long irqflags; \
946 assert_rpm_wakelock_held(dev_priv); \
947 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
948 unclaimed_reg_debug(dev_priv, reg, true, true)
950 #define GEN6_READ_FOOTER \
951 unclaimed_reg_debug(dev_priv, reg, true, false); \
952 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
953 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
956 static noinline
void ___force_wake_auto(struct drm_i915_private
*dev_priv
,
957 enum forcewake_domains fw_domains
)
959 struct intel_uncore_forcewake_domain
*domain
;
961 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
)
962 fw_domain_arm_timer(domain
);
964 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_domains
);
967 static inline void __force_wake_auto(struct drm_i915_private
*dev_priv
,
968 enum forcewake_domains fw_domains
)
970 if (WARN_ON(!fw_domains
))
973 /* Turn on all requested but inactive supported forcewake domains. */
974 fw_domains
&= dev_priv
->uncore
.fw_domains
;
975 fw_domains
&= ~dev_priv
->uncore
.fw_domains_active
;
978 ___force_wake_auto(dev_priv
, fw_domains
);
981 #define __gen6_read(x) \
983 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
984 enum forcewake_domains fw_engine; \
985 GEN6_READ_HEADER(x); \
986 fw_engine = __gen6_reg_read_fw_domains(offset); \
988 __force_wake_auto(dev_priv, fw_engine); \
989 val = __raw_i915_read##x(dev_priv, reg); \
993 #define __fwtable_read(x) \
995 fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
996 enum forcewake_domains fw_engine; \
997 GEN6_READ_HEADER(x); \
998 fw_engine = __fwtable_reg_read_fw_domains(offset); \
1000 __force_wake_auto(dev_priv, fw_engine); \
1001 val = __raw_i915_read##x(dev_priv, reg); \
1005 #define __gen9_decoupled_read(x) \
1007 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
1008 i915_reg_t reg, bool trace) { \
1009 enum forcewake_domains fw_engine; \
1010 GEN6_READ_HEADER(x); \
1011 fw_engine = __fwtable_reg_read_fw_domains(offset); \
1012 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
1014 u32 *ptr_data = (u32 *) &val; \
1015 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
1016 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
1020 val = __raw_i915_read##x(dev_priv, reg); \
1025 __gen9_decoupled_read(32)
1026 __gen9_decoupled_read(64)
1036 #undef __fwtable_read
1038 #undef GEN6_READ_FOOTER
1039 #undef GEN6_READ_HEADER
1041 #define VGPU_READ_HEADER(x) \
1042 unsigned long irqflags; \
1044 assert_rpm_device_not_suspended(dev_priv); \
1045 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1047 #define VGPU_READ_FOOTER \
1048 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
1049 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1052 #define __vgpu_read(x) \
1054 vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
1055 VGPU_READ_HEADER(x); \
1056 val = __raw_i915_read##x(dev_priv, reg); \
1066 #undef VGPU_READ_FOOTER
1067 #undef VGPU_READ_HEADER
1069 #define GEN2_WRITE_HEADER \
1070 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1071 assert_rpm_wakelock_held(dev_priv); \
1073 #define GEN2_WRITE_FOOTER
1075 #define __gen2_write(x) \
1077 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1078 GEN2_WRITE_HEADER; \
1079 __raw_i915_write##x(dev_priv, reg, val); \
1080 GEN2_WRITE_FOOTER; \
1083 #define __gen5_write(x) \
1085 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1086 GEN2_WRITE_HEADER; \
1087 ilk_dummy_write(dev_priv); \
1088 __raw_i915_write##x(dev_priv, reg, val); \
1089 GEN2_WRITE_FOOTER; \
1102 #undef GEN2_WRITE_FOOTER
1103 #undef GEN2_WRITE_HEADER
1105 #define GEN6_WRITE_HEADER \
1106 u32 offset = i915_mmio_reg_offset(reg); \
1107 unsigned long irqflags; \
1108 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1109 assert_rpm_wakelock_held(dev_priv); \
1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1111 unclaimed_reg_debug(dev_priv, reg, false, true)
1113 #define GEN6_WRITE_FOOTER \
1114 unclaimed_reg_debug(dev_priv, reg, false, false); \
1115 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1117 #define __gen6_write(x) \
1119 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1120 u32 __fifo_ret = 0; \
1121 GEN6_WRITE_HEADER; \
1122 if (NEEDS_FORCE_WAKE(offset)) { \
1123 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1125 __raw_i915_write##x(dev_priv, reg, val); \
1126 if (unlikely(__fifo_ret)) { \
1127 gen6_gt_check_fifodbg(dev_priv); \
1129 GEN6_WRITE_FOOTER; \
1132 #define __gen8_write(x) \
1134 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1135 enum forcewake_domains fw_engine; \
1136 GEN6_WRITE_HEADER; \
1137 fw_engine = __gen8_reg_write_fw_domains(offset); \
1139 __force_wake_auto(dev_priv, fw_engine); \
1140 __raw_i915_write##x(dev_priv, reg, val); \
1141 GEN6_WRITE_FOOTER; \
1144 #define __fwtable_write(x) \
1146 fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1147 enum forcewake_domains fw_engine; \
1148 GEN6_WRITE_HEADER; \
1149 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1151 __force_wake_auto(dev_priv, fw_engine); \
1152 __raw_i915_write##x(dev_priv, reg, val); \
1153 GEN6_WRITE_FOOTER; \
1156 #define __gen9_decoupled_write(x) \
1158 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1159 i915_reg_t reg, u##x val, \
1161 enum forcewake_domains fw_engine; \
1162 GEN6_WRITE_HEADER; \
1163 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1164 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1165 __gen9_decoupled_mmio_write(dev_priv, \
1170 __raw_i915_write##x(dev_priv, reg, val); \
1171 GEN6_WRITE_FOOTER; \
1174 __gen9_decoupled_write(32)
1185 #undef __fwtable_write
1188 #undef GEN6_WRITE_FOOTER
1189 #undef GEN6_WRITE_HEADER
1191 #define VGPU_WRITE_HEADER \
1192 unsigned long irqflags; \
1193 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1194 assert_rpm_device_not_suspended(dev_priv); \
1195 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
1197 #define VGPU_WRITE_FOOTER \
1198 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1200 #define __vgpu_write(x) \
1201 static void vgpu_write##x(struct drm_i915_private *dev_priv, \
1202 i915_reg_t reg, u##x val, bool trace) { \
1203 VGPU_WRITE_HEADER; \
1204 __raw_i915_write##x(dev_priv, reg, val); \
1205 VGPU_WRITE_FOOTER; \
1213 #undef VGPU_WRITE_FOOTER
1214 #undef VGPU_WRITE_HEADER
1216 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1218 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1219 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1220 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1223 #define ASSIGN_READ_MMIO_VFUNCS(x) \
1225 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1226 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1227 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1228 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1232 static void fw_domain_init(struct drm_i915_private
*dev_priv
,
1233 enum forcewake_domain_id domain_id
,
1237 struct intel_uncore_forcewake_domain
*d
;
1239 if (WARN_ON(domain_id
>= FW_DOMAIN_ID_COUNT
))
1242 d
= &dev_priv
->uncore
.fw_domain
[domain_id
];
1244 WARN_ON(d
->wake_count
);
1247 d
->reg_set
= reg_set
;
1248 d
->reg_ack
= reg_ack
;
1250 if (IS_GEN6(dev_priv
)) {
1252 d
->val_set
= FORCEWAKE_KERNEL
;
1255 /* WaRsClearFWBitsAtReset:bdw,skl */
1256 d
->val_reset
= _MASKED_BIT_DISABLE(0xffff);
1257 d
->val_set
= _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
);
1258 d
->val_clear
= _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
);
1261 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
1262 d
->reg_post
= FORCEWAKE_ACK_VLV
;
1263 else if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
) || IS_GEN8(dev_priv
))
1264 d
->reg_post
= ECOBUS
;
1269 BUILD_BUG_ON(FORCEWAKE_RENDER
!= (1 << FW_DOMAIN_ID_RENDER
));
1270 BUILD_BUG_ON(FORCEWAKE_BLITTER
!= (1 << FW_DOMAIN_ID_BLITTER
));
1271 BUILD_BUG_ON(FORCEWAKE_MEDIA
!= (1 << FW_DOMAIN_ID_MEDIA
));
1273 d
->mask
= 1 << domain_id
;
1275 hrtimer_init(&d
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1276 d
->timer
.function
= intel_uncore_fw_release_timer
;
1278 dev_priv
->uncore
.fw_domains
|= (1 << domain_id
);
1283 static void intel_uncore_fw_domains_init(struct drm_i915_private
*dev_priv
)
1285 if (INTEL_INFO(dev_priv
)->gen
<= 5)
1288 if (IS_GEN9(dev_priv
)) {
1289 dev_priv
->uncore
.funcs
.force_wake_get
= fw_domains_get
;
1290 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1291 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1292 FORCEWAKE_RENDER_GEN9
,
1293 FORCEWAKE_ACK_RENDER_GEN9
);
1294 fw_domain_init(dev_priv
, FW_DOMAIN_ID_BLITTER
,
1295 FORCEWAKE_BLITTER_GEN9
,
1296 FORCEWAKE_ACK_BLITTER_GEN9
);
1297 fw_domain_init(dev_priv
, FW_DOMAIN_ID_MEDIA
,
1298 FORCEWAKE_MEDIA_GEN9
, FORCEWAKE_ACK_MEDIA_GEN9
);
1299 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1300 dev_priv
->uncore
.funcs
.force_wake_get
= fw_domains_get
;
1301 if (!IS_CHERRYVIEW(dev_priv
))
1302 dev_priv
->uncore
.funcs
.force_wake_put
=
1303 fw_domains_put_with_fifo
;
1305 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1306 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1307 FORCEWAKE_VLV
, FORCEWAKE_ACK_VLV
);
1308 fw_domain_init(dev_priv
, FW_DOMAIN_ID_MEDIA
,
1309 FORCEWAKE_MEDIA_VLV
, FORCEWAKE_ACK_MEDIA_VLV
);
1310 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
1311 dev_priv
->uncore
.funcs
.force_wake_get
=
1312 fw_domains_get_with_thread_status
;
1313 if (IS_HASWELL(dev_priv
))
1314 dev_priv
->uncore
.funcs
.force_wake_put
=
1315 fw_domains_put_with_fifo
;
1317 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1318 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1319 FORCEWAKE_MT
, FORCEWAKE_ACK_HSW
);
1320 } else if (IS_IVYBRIDGE(dev_priv
)) {
1323 /* IVB configs may use multi-threaded forcewake */
1325 /* A small trick here - if the bios hasn't configured
1326 * MT forcewake, and if the device is in RC6, then
1327 * force_wake_mt_get will not wake the device and the
1328 * ECOBUS read will return zero. Which will be
1329 * (correctly) interpreted by the test below as MT
1330 * forcewake being disabled.
1332 dev_priv
->uncore
.funcs
.force_wake_get
=
1333 fw_domains_get_with_thread_status
;
1334 dev_priv
->uncore
.funcs
.force_wake_put
=
1335 fw_domains_put_with_fifo
;
1337 /* We need to init first for ECOBUS access and then
1338 * determine later if we want to reinit, in case of MT access is
1339 * not working. In this stage we don't know which flavour this
1340 * ivb is, so it is better to reset also the gen6 fw registers
1341 * before the ecobus check.
1344 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
1345 __raw_posting_read(dev_priv
, ECOBUS
);
1347 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1348 FORCEWAKE_MT
, FORCEWAKE_MT_ACK
);
1350 spin_lock_irq(&dev_priv
->uncore
.lock
);
1351 fw_domains_get_with_thread_status(dev_priv
, FORCEWAKE_ALL
);
1352 ecobus
= __raw_i915_read32(dev_priv
, ECOBUS
);
1353 fw_domains_put_with_fifo(dev_priv
, FORCEWAKE_ALL
);
1354 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1356 if (!(ecobus
& FORCEWAKE_MT_ENABLE
)) {
1357 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1358 DRM_INFO("when using vblank-synced partial screen updates.\n");
1359 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1360 FORCEWAKE
, FORCEWAKE_ACK
);
1362 } else if (IS_GEN6(dev_priv
)) {
1363 dev_priv
->uncore
.funcs
.force_wake_get
=
1364 fw_domains_get_with_thread_status
;
1365 dev_priv
->uncore
.funcs
.force_wake_put
=
1366 fw_domains_put_with_fifo
;
1367 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1368 FORCEWAKE
, FORCEWAKE_ACK
);
1371 /* All future platforms are expected to require complex power gating */
1372 WARN_ON(dev_priv
->uncore
.fw_domains
== 0);
1375 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1377 dev_priv->uncore.fw_domains_table = \
1378 (struct intel_forcewake_range *)(d); \
1379 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1382 void intel_uncore_init(struct drm_i915_private
*dev_priv
)
1384 i915_check_vgpu(dev_priv
);
1386 intel_uncore_edram_detect(dev_priv
);
1387 intel_uncore_fw_domains_init(dev_priv
);
1388 __intel_uncore_early_sanitize(dev_priv
, false);
1390 dev_priv
->uncore
.unclaimed_mmio_check
= 1;
1392 switch (INTEL_INFO(dev_priv
)->gen
) {
1395 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges
);
1396 ASSIGN_WRITE_MMIO_VFUNCS(fwtable
);
1397 ASSIGN_READ_MMIO_VFUNCS(fwtable
);
1398 if (HAS_DECOUPLED_MMIO(dev_priv
)) {
1399 dev_priv
->uncore
.funcs
.mmio_readl
=
1400 gen9_decoupled_read32
;
1401 dev_priv
->uncore
.funcs
.mmio_readq
=
1402 gen9_decoupled_read64
;
1403 dev_priv
->uncore
.funcs
.mmio_writel
=
1404 gen9_decoupled_write32
;
1408 if (IS_CHERRYVIEW(dev_priv
)) {
1409 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges
);
1410 ASSIGN_WRITE_MMIO_VFUNCS(fwtable
);
1411 ASSIGN_READ_MMIO_VFUNCS(fwtable
);
1414 ASSIGN_WRITE_MMIO_VFUNCS(gen8
);
1415 ASSIGN_READ_MMIO_VFUNCS(gen6
);
1420 ASSIGN_WRITE_MMIO_VFUNCS(gen6
);
1422 if (IS_VALLEYVIEW(dev_priv
)) {
1423 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges
);
1424 ASSIGN_READ_MMIO_VFUNCS(fwtable
);
1426 ASSIGN_READ_MMIO_VFUNCS(gen6
);
1430 ASSIGN_WRITE_MMIO_VFUNCS(gen5
);
1431 ASSIGN_READ_MMIO_VFUNCS(gen5
);
1436 ASSIGN_WRITE_MMIO_VFUNCS(gen2
);
1437 ASSIGN_READ_MMIO_VFUNCS(gen2
);
1441 intel_fw_table_check(dev_priv
);
1442 if (INTEL_GEN(dev_priv
) >= 8)
1443 intel_shadow_table_check();
1445 if (intel_vgpu_active(dev_priv
)) {
1446 ASSIGN_WRITE_MMIO_VFUNCS(vgpu
);
1447 ASSIGN_READ_MMIO_VFUNCS(vgpu
);
1450 i915_check_and_clear_faults(dev_priv
);
1452 #undef ASSIGN_WRITE_MMIO_VFUNCS
1453 #undef ASSIGN_READ_MMIO_VFUNCS
1455 void intel_uncore_fini(struct drm_i915_private
*dev_priv
)
1457 /* Paranoia: make sure we have disabled everything before we exit. */
1458 intel_uncore_sanitize(dev_priv
);
1459 intel_uncore_forcewake_reset(dev_priv
, false);
1462 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1464 static const struct register_whitelist
{
1465 i915_reg_t offset_ldw
, offset_udw
;
1467 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1468 uint32_t gen_bitmask
;
1470 { .offset_ldw
= RING_TIMESTAMP(RENDER_RING_BASE
),
1471 .offset_udw
= RING_TIMESTAMP_UDW(RENDER_RING_BASE
),
1472 .size
= 8, .gen_bitmask
= GEN_RANGE(4, 9) },
1475 int i915_reg_read_ioctl(struct drm_device
*dev
,
1476 void *data
, struct drm_file
*file
)
1478 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1479 struct drm_i915_reg_read
*reg
= data
;
1480 struct register_whitelist
const *entry
= whitelist
;
1482 i915_reg_t offset_ldw
, offset_udw
;
1485 for (i
= 0; i
< ARRAY_SIZE(whitelist
); i
++, entry
++) {
1486 if (i915_mmio_reg_offset(entry
->offset_ldw
) == (reg
->offset
& -entry
->size
) &&
1487 (INTEL_INFO(dev_priv
)->gen_mask
& entry
->gen_bitmask
))
1491 if (i
== ARRAY_SIZE(whitelist
))
1494 /* We use the low bits to encode extra flags as the register should
1495 * be naturally aligned (and those that are not so aligned merely
1496 * limit the available flags for that register).
1498 offset_ldw
= entry
->offset_ldw
;
1499 offset_udw
= entry
->offset_udw
;
1501 size
|= reg
->offset
^ i915_mmio_reg_offset(offset_ldw
);
1503 intel_runtime_pm_get(dev_priv
);
1507 reg
->val
= I915_READ64_2x32(offset_ldw
, offset_udw
);
1510 reg
->val
= I915_READ64(offset_ldw
);
1513 reg
->val
= I915_READ(offset_ldw
);
1516 reg
->val
= I915_READ16(offset_ldw
);
1519 reg
->val
= I915_READ8(offset_ldw
);
1527 intel_runtime_pm_put(dev_priv
);
1531 static int i915_reset_complete(struct pci_dev
*pdev
)
1534 pci_read_config_byte(pdev
, I915_GDRST
, &gdrst
);
1535 return (gdrst
& GRDOM_RESET_STATUS
) == 0;
1538 static int i915_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1540 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1542 /* assert reset for at least 20 usec */
1543 pci_write_config_byte(pdev
, I915_GDRST
, GRDOM_RESET_ENABLE
);
1545 pci_write_config_byte(pdev
, I915_GDRST
, 0);
1547 return wait_for(i915_reset_complete(pdev
), 500);
1550 static int g4x_reset_complete(struct pci_dev
*pdev
)
1553 pci_read_config_byte(pdev
, I915_GDRST
, &gdrst
);
1554 return (gdrst
& GRDOM_RESET_ENABLE
) == 0;
1557 static int g33_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1559 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1560 pci_write_config_byte(pdev
, I915_GDRST
, GRDOM_RESET_ENABLE
);
1561 return wait_for(g4x_reset_complete(pdev
), 500);
1564 static int g4x_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1566 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1569 pci_write_config_byte(pdev
, I915_GDRST
,
1570 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1571 ret
= wait_for(g4x_reset_complete(pdev
), 500);
1575 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1576 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) | VCP_UNIT_CLOCK_GATE_DISABLE
);
1577 POSTING_READ(VDECCLK_GATE_D
);
1579 pci_write_config_byte(pdev
, I915_GDRST
,
1580 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1581 ret
= wait_for(g4x_reset_complete(pdev
), 500);
1585 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1586 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) & ~VCP_UNIT_CLOCK_GATE_DISABLE
);
1587 POSTING_READ(VDECCLK_GATE_D
);
1589 pci_write_config_byte(pdev
, I915_GDRST
, 0);
1594 static int ironlake_do_reset(struct drm_i915_private
*dev_priv
,
1595 unsigned engine_mask
)
1599 I915_WRITE(ILK_GDSR
,
1600 ILK_GRDOM_RENDER
| ILK_GRDOM_RESET_ENABLE
);
1601 ret
= intel_wait_for_register(dev_priv
,
1602 ILK_GDSR
, ILK_GRDOM_RESET_ENABLE
, 0,
1607 I915_WRITE(ILK_GDSR
,
1608 ILK_GRDOM_MEDIA
| ILK_GRDOM_RESET_ENABLE
);
1609 ret
= intel_wait_for_register(dev_priv
,
1610 ILK_GDSR
, ILK_GRDOM_RESET_ENABLE
, 0,
1615 I915_WRITE(ILK_GDSR
, 0);
1620 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1621 static int gen6_hw_domain_reset(struct drm_i915_private
*dev_priv
,
1624 /* GEN6_GDRST is not in the gt power well, no need to check
1625 * for fifo space for the write or forcewake the chip for
1628 __raw_i915_write32(dev_priv
, GEN6_GDRST
, hw_domain_mask
);
1630 /* Spin waiting for the device to ack the reset requests */
1631 return intel_wait_for_register_fw(dev_priv
,
1632 GEN6_GDRST
, hw_domain_mask
, 0,
1637 * gen6_reset_engines - reset individual engines
1638 * @dev_priv: i915 device
1639 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1641 * This function will reset the individual engines that are set in engine_mask.
1642 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1644 * Note: It is responsibility of the caller to handle the difference between
1645 * asking full domain reset versus reset for all available individual engines.
1647 * Returns 0 on success, nonzero on error.
1649 static int gen6_reset_engines(struct drm_i915_private
*dev_priv
,
1650 unsigned engine_mask
)
1652 struct intel_engine_cs
*engine
;
1653 const u32 hw_engine_mask
[I915_NUM_ENGINES
] = {
1654 [RCS
] = GEN6_GRDOM_RENDER
,
1655 [BCS
] = GEN6_GRDOM_BLT
,
1656 [VCS
] = GEN6_GRDOM_MEDIA
,
1657 [VCS2
] = GEN8_GRDOM_MEDIA2
,
1658 [VECS
] = GEN6_GRDOM_VECS
,
1663 if (engine_mask
== ALL_ENGINES
) {
1664 hw_mask
= GEN6_GRDOM_FULL
;
1669 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1670 hw_mask
|= hw_engine_mask
[engine
->id
];
1673 ret
= gen6_hw_domain_reset(dev_priv
, hw_mask
);
1675 intel_uncore_forcewake_reset(dev_priv
, true);
1681 * intel_wait_for_register_fw - wait until register matches expected state
1682 * @dev_priv: the i915 device
1683 * @reg: the register to read
1684 * @mask: mask to apply to register value
1685 * @value: expected value
1686 * @timeout_ms: timeout in millisecond
1688 * This routine waits until the target register @reg contains the expected
1689 * @value after applying the @mask, i.e. it waits until ::
1691 * (I915_READ_FW(reg) & mask) == value
1693 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1695 * Note that this routine assumes the caller holds forcewake asserted, it is
1696 * not suitable for very long waits. See intel_wait_for_register() if you
1697 * wish to wait without holding forcewake for the duration (i.e. you expect
1698 * the wait to be slow).
1700 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1702 int intel_wait_for_register_fw(struct drm_i915_private
*dev_priv
,
1706 const unsigned long timeout_ms
)
1708 #define done ((I915_READ_FW(reg) & mask) == value)
1709 int ret
= wait_for_us(done
, 2);
1711 ret
= wait_for(done
, timeout_ms
);
1717 * intel_wait_for_register - wait until register matches expected state
1718 * @dev_priv: the i915 device
1719 * @reg: the register to read
1720 * @mask: mask to apply to register value
1721 * @value: expected value
1722 * @timeout_ms: timeout in millisecond
1724 * This routine waits until the target register @reg contains the expected
1725 * @value after applying the @mask, i.e. it waits until ::
1727 * (I915_READ(reg) & mask) == value
1729 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1731 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1733 int intel_wait_for_register(struct drm_i915_private
*dev_priv
,
1737 const unsigned long timeout_ms
)
1741 intel_uncore_forcewake_for_reg(dev_priv
, reg
, FW_REG_READ
);
1744 intel_uncore_forcewake_get(dev_priv
, fw
);
1745 ret
= wait_for_us((I915_READ_FW(reg
) & mask
) == value
, 2);
1746 intel_uncore_forcewake_put(dev_priv
, fw
);
1748 ret
= wait_for((I915_READ_NOTRACE(reg
) & mask
) == value
,
1754 static int gen8_request_engine_reset(struct intel_engine_cs
*engine
)
1756 struct drm_i915_private
*dev_priv
= engine
->i915
;
1759 I915_WRITE_FW(RING_RESET_CTL(engine
->mmio_base
),
1760 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET
));
1762 ret
= intel_wait_for_register_fw(dev_priv
,
1763 RING_RESET_CTL(engine
->mmio_base
),
1764 RESET_CTL_READY_TO_RESET
,
1765 RESET_CTL_READY_TO_RESET
,
1768 DRM_ERROR("%s: reset request timeout\n", engine
->name
);
1773 static void gen8_unrequest_engine_reset(struct intel_engine_cs
*engine
)
1775 struct drm_i915_private
*dev_priv
= engine
->i915
;
1777 I915_WRITE_FW(RING_RESET_CTL(engine
->mmio_base
),
1778 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET
));
1781 static int gen8_reset_engines(struct drm_i915_private
*dev_priv
,
1782 unsigned engine_mask
)
1784 struct intel_engine_cs
*engine
;
1787 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1788 if (gen8_request_engine_reset(engine
))
1791 return gen6_reset_engines(dev_priv
, engine_mask
);
1794 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1795 gen8_unrequest_engine_reset(engine
);
1800 typedef int (*reset_func
)(struct drm_i915_private
*, unsigned engine_mask
);
1802 static reset_func
intel_get_gpu_reset(struct drm_i915_private
*dev_priv
)
1807 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1808 return gen8_reset_engines
;
1809 else if (INTEL_INFO(dev_priv
)->gen
>= 6)
1810 return gen6_reset_engines
;
1811 else if (IS_GEN5(dev_priv
))
1812 return ironlake_do_reset
;
1813 else if (IS_G4X(dev_priv
))
1814 return g4x_do_reset
;
1815 else if (IS_G33(dev_priv
))
1816 return g33_do_reset
;
1817 else if (INTEL_INFO(dev_priv
)->gen
>= 3)
1818 return i915_do_reset
;
1823 int intel_gpu_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1828 reset
= intel_get_gpu_reset(dev_priv
);
1832 /* If the power well sleeps during the reset, the reset
1833 * request may be dropped and never completes (causing -EIO).
1835 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1836 ret
= reset(dev_priv
, engine_mask
);
1837 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1842 bool intel_has_gpu_reset(struct drm_i915_private
*dev_priv
)
1844 return intel_get_gpu_reset(dev_priv
) != NULL
;
1847 int intel_guc_reset(struct drm_i915_private
*dev_priv
)
1850 unsigned long irqflags
;
1852 if (!HAS_GUC(dev_priv
))
1855 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1856 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
1858 ret
= gen6_hw_domain_reset(dev_priv
, GEN9_GRDOM_GUC
);
1860 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
1861 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1866 bool intel_uncore_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
1868 return check_for_unclaimed_mmio(dev_priv
);
1872 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private
*dev_priv
)
1874 if (unlikely(i915
.mmio_debug
||
1875 dev_priv
->uncore
.unclaimed_mmio_check
<= 0))
1878 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv
))) {
1879 DRM_DEBUG("Unclaimed register detected, "
1880 "enabling oneshot unclaimed register reporting. "
1881 "Please use i915.mmio_debug=N for more information.\n");
1883 dev_priv
->uncore
.unclaimed_mmio_check
--;
1890 static enum forcewake_domains
1891 intel_uncore_forcewake_for_read(struct drm_i915_private
*dev_priv
,
1894 u32 offset
= i915_mmio_reg_offset(reg
);
1895 enum forcewake_domains fw_domains
;
1897 if (HAS_FWTABLE(dev_priv
)) {
1898 fw_domains
= __fwtable_reg_read_fw_domains(offset
);
1899 } else if (INTEL_GEN(dev_priv
) >= 6) {
1900 fw_domains
= __gen6_reg_read_fw_domains(offset
);
1902 WARN_ON(!IS_GEN(dev_priv
, 2, 5));
1906 WARN_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
1911 static enum forcewake_domains
1912 intel_uncore_forcewake_for_write(struct drm_i915_private
*dev_priv
,
1915 u32 offset
= i915_mmio_reg_offset(reg
);
1916 enum forcewake_domains fw_domains
;
1918 if (HAS_FWTABLE(dev_priv
) && !IS_VALLEYVIEW(dev_priv
)) {
1919 fw_domains
= __fwtable_reg_write_fw_domains(offset
);
1920 } else if (IS_GEN8(dev_priv
)) {
1921 fw_domains
= __gen8_reg_write_fw_domains(offset
);
1922 } else if (IS_GEN(dev_priv
, 6, 7)) {
1923 fw_domains
= FORCEWAKE_RENDER
;
1925 WARN_ON(!IS_GEN(dev_priv
, 2, 5));
1929 WARN_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
1935 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1937 * @dev_priv: pointer to struct drm_i915_private
1938 * @reg: register in question
1939 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1941 * Returns a set of forcewake domains required to be taken with for example
1942 * intel_uncore_forcewake_get for the specified register to be accessible in the
1943 * specified mode (read, write or read/write) with raw mmio accessors.
1945 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1946 * callers to do FIFO management on their own or risk losing writes.
1948 enum forcewake_domains
1949 intel_uncore_forcewake_for_reg(struct drm_i915_private
*dev_priv
,
1950 i915_reg_t reg
, unsigned int op
)
1952 enum forcewake_domains fw_domains
= 0;
1956 if (intel_vgpu_active(dev_priv
))
1959 if (op
& FW_REG_READ
)
1960 fw_domains
= intel_uncore_forcewake_for_read(dev_priv
, reg
);
1962 if (op
& FW_REG_WRITE
)
1963 fw_domains
|= intel_uncore_forcewake_for_write(dev_priv
, reg
);