]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/intel_uncore.c
Merge tag 'nios2-v3.20-rc1' of git://git.rocketboards.org/linux-socfpga-next
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / intel_uncore.c
CommitLineData
907b28c5
CW
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_drv.h"
26
6daccb0b
CW
27#include <linux/pm_runtime.h>
28
907b28c5
CW
29#define FORCEWAKE_ACK_TIMEOUT_MS 2
30
6af5d92f
CW
31#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
32#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
33
34#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
35#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
36
37#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
38#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
39
40#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
41#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
42
43#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
44
05a2fb15
MK
45static const char * const forcewake_domain_names[] = {
46 "render",
47 "blitter",
48 "media",
49};
50
51const char *
48c1026a 52intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
05a2fb15
MK
53{
54 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
55 FW_DOMAIN_ID_COUNT);
56
57 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
58 return forcewake_domain_names[id];
59
60 WARN_ON(id);
61
62 return "unknown";
63}
64
b2ec142c
PZ
65static void
66assert_device_not_suspended(struct drm_i915_private *dev_priv)
67{
2b387059
CW
68 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
69 "Device suspended\n");
b2ec142c 70}
6af5d92f 71
05a2fb15
MK
72static inline void
73fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
907b28c5 74{
f9b3927a 75 WARN_ON(d->reg_set == 0);
05a2fb15 76 __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
907b28c5
CW
77}
78
05a2fb15
MK
79static inline void
80fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
907b28c5 81{
05a2fb15 82 mod_timer_pinned(&d->timer, jiffies + 1);
907b28c5
CW
83}
84
05a2fb15
MK
85static inline void
86fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
907b28c5 87{
05a2fb15
MK
88 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
89 FORCEWAKE_KERNEL) == 0,
907b28c5 90 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
91 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
92 intel_uncore_forcewake_domain_to_str(d->id));
93}
907b28c5 94
05a2fb15
MK
95static inline void
96fw_domain_get(const struct intel_uncore_forcewake_domain *d)
97{
98 __raw_i915_write32(d->i915, d->reg_set, d->val_set);
99}
907b28c5 100
05a2fb15
MK
101static inline void
102fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
103{
104 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
105 FORCEWAKE_KERNEL),
907b28c5 106 FORCEWAKE_ACK_TIMEOUT_MS))
05a2fb15
MK
107 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
108 intel_uncore_forcewake_domain_to_str(d->id));
109}
907b28c5 110
05a2fb15
MK
111static inline void
112fw_domain_put(const struct intel_uncore_forcewake_domain *d)
113{
114 __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
907b28c5
CW
115}
116
05a2fb15
MK
117static inline void
118fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
907b28c5 119{
05a2fb15
MK
120 /* something from same cacheline, but not from the set register */
121 if (d->reg_post)
122 __raw_posting_read(d->i915, d->reg_post);
907b28c5
CW
123}
124
05a2fb15 125static void
48c1026a 126fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
907b28c5 127{
05a2fb15 128 struct intel_uncore_forcewake_domain *d;
48c1026a 129 enum forcewake_domain_id id;
907b28c5 130
05a2fb15
MK
131 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
132 fw_domain_wait_ack_clear(d);
133 fw_domain_get(d);
05a2fb15
MK
134 fw_domain_wait_ack(d);
135 }
136}
907b28c5 137
05a2fb15 138static void
48c1026a 139fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
05a2fb15
MK
140{
141 struct intel_uncore_forcewake_domain *d;
48c1026a 142 enum forcewake_domain_id id;
907b28c5 143
05a2fb15
MK
144 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
145 fw_domain_put(d);
146 fw_domain_posting_read(d);
147 }
148}
907b28c5 149
05a2fb15
MK
150static void
151fw_domains_posting_read(struct drm_i915_private *dev_priv)
152{
153 struct intel_uncore_forcewake_domain *d;
48c1026a 154 enum forcewake_domain_id id;
05a2fb15
MK
155
156 /* No need to do for all, just do for first found */
157 for_each_fw_domain(d, dev_priv, id) {
158 fw_domain_posting_read(d);
159 break;
160 }
161}
162
163static void
48c1026a 164fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
05a2fb15
MK
165{
166 struct intel_uncore_forcewake_domain *d;
48c1026a 167 enum forcewake_domain_id id;
05a2fb15 168
3225b2f9
MK
169 if (dev_priv->uncore.fw_domains == 0)
170 return;
f9b3927a 171
05a2fb15
MK
172 for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
173 fw_domain_reset(d);
174
175 fw_domains_posting_read(dev_priv);
176}
177
178static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
179{
180 /* w/a for a sporadic read returning 0 by waiting for the GT
181 * thread to wake up.
182 */
183 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
184 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
185 DRM_ERROR("GT thread status wait timed out\n");
186}
187
188static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
48c1026a 189 enum forcewake_domains fw_domains)
05a2fb15
MK
190{
191 fw_domains_get(dev_priv, fw_domains);
907b28c5 192
05a2fb15 193 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
c549f738 194 __gen6_gt_wait_for_thread_c0(dev_priv);
907b28c5
CW
195}
196
197static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
198{
199 u32 gtfifodbg;
6af5d92f
CW
200
201 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
90f256b5
VS
202 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg))
203 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
907b28c5
CW
204}
205
05a2fb15 206static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
48c1026a 207 enum forcewake_domains fw_domains)
907b28c5 208{
05a2fb15 209 fw_domains_put(dev_priv, fw_domains);
907b28c5
CW
210 gen6_gt_check_fifodbg(dev_priv);
211}
212
907b28c5
CW
213static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
214{
215 int ret = 0;
216
5135d64b
D
217 /* On VLV, FIFO will be shared by both SW and HW.
218 * So, we need to read the FREE_ENTRIES everytime */
219 if (IS_VALLEYVIEW(dev_priv->dev))
220 dev_priv->uncore.fifo_count =
221 __raw_i915_read32(dev_priv, GTFIFOCTL) &
222 GT_FIFO_FREE_ENTRIES_MASK;
223
907b28c5
CW
224 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
225 int loop = 500;
46520e2b 226 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
907b28c5
CW
227 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
228 udelay(10);
46520e2b 229 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK;
907b28c5
CW
230 }
231 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
232 ++ret;
233 dev_priv->uncore.fifo_count = fifo;
234 }
235 dev_priv->uncore.fifo_count--;
236
237 return ret;
238}
239
59bad947 240static void intel_uncore_fw_release_timer(unsigned long arg)
38cff0b1 241{
b2cff0db
CW
242 struct intel_uncore_forcewake_domain *domain = (void *)arg;
243 unsigned long irqflags;
38cff0b1 244
b2cff0db 245 assert_device_not_suspended(domain->i915);
38cff0b1 246
b2cff0db
CW
247 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags);
248 if (WARN_ON(domain->wake_count == 0))
249 domain->wake_count++;
250
251 if (--domain->wake_count == 0)
252 domain->i915->uncore.funcs.force_wake_put(domain->i915,
253 1 << domain->id);
254
255 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
38cff0b1
ZW
256}
257
b2cff0db 258void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
38cff0b1 259{
b2cff0db 260 struct drm_i915_private *dev_priv = dev->dev_private;
48c1026a 261 unsigned long irqflags;
b2cff0db 262 struct intel_uncore_forcewake_domain *domain;
48c1026a
MK
263 int retry_count = 100;
264 enum forcewake_domain_id id;
265 enum forcewake_domains fw = 0, active_domains;
38cff0b1 266
b2cff0db
CW
267 /* Hold uncore.lock across reset to prevent any register access
268 * with forcewake not set correctly. Wait until all pending
269 * timers are run before holding.
270 */
271 while (1) {
272 active_domains = 0;
38cff0b1 273
b2cff0db
CW
274 for_each_fw_domain(domain, dev_priv, id) {
275 if (del_timer_sync(&domain->timer) == 0)
276 continue;
38cff0b1 277
59bad947 278 intel_uncore_fw_release_timer((unsigned long)domain);
b2cff0db 279 }
aec347ab 280
b2cff0db 281 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
b2ec142c 282
b2cff0db
CW
283 for_each_fw_domain(domain, dev_priv, id) {
284 if (timer_pending(&domain->timer))
285 active_domains |= (1 << id);
286 }
3123fcaf 287
b2cff0db
CW
288 if (active_domains == 0)
289 break;
aec347ab 290
b2cff0db
CW
291 if (--retry_count == 0) {
292 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
293 break;
294 }
0294ae7b 295
b2cff0db
CW
296 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
297 cond_resched();
298 }
0294ae7b 299
b2cff0db
CW
300 WARN_ON(active_domains);
301
302 for_each_fw_domain(domain, dev_priv, id)
303 if (domain->wake_count)
304 fw |= 1 << id;
305
306 if (fw)
307 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
ef46e0d2 308
05a2fb15 309 fw_domains_reset(dev_priv, FORCEWAKE_ALL);
38cff0b1 310
0294ae7b 311 if (restore) { /* If reset with a user forcewake, try to restore */
0294ae7b
CW
312 if (fw)
313 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
314
315 if (IS_GEN6(dev) || IS_GEN7(dev))
316 dev_priv->uncore.fifo_count =
317 __raw_i915_read32(dev_priv, GTFIFOCTL) &
318 GT_FIFO_FREE_ENTRIES_MASK;
0294ae7b
CW
319 }
320
b2cff0db 321 if (!restore)
59bad947 322 assert_forcewakes_inactive(dev_priv);
b2cff0db 323
0294ae7b 324 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
ef46e0d2
DV
325}
326
f9b3927a 327static void intel_uncore_ellc_detect(struct drm_device *dev)
907b28c5
CW
328{
329 struct drm_i915_private *dev_priv = dev->dev_private;
330
1d2866ba 331 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) &&
18ce3994
BW
332 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
333 /* The docs do not explain exactly how the calculation can be
334 * made. It is somewhat guessable, but for now, it's always
335 * 128MB.
336 * NB: We can't write IDICR yet because we do not have gt funcs
337 * set up */
338 dev_priv->ellc_size = 128;
339 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
340 }
f9b3927a
MK
341}
342
343static void __intel_uncore_early_sanitize(struct drm_device *dev,
344 bool restore_forcewake)
345{
346 struct drm_i915_private *dev_priv = dev->dev_private;
347
348 if (HAS_FPGA_DBG_UNCLAIMED(dev))
349 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5 350
97058870
VS
351 /* clear out old GT FIFO errors */
352 if (IS_GEN6(dev) || IS_GEN7(dev))
353 __raw_i915_write32(dev_priv, GTFIFODBG,
354 __raw_i915_read32(dev_priv, GTFIFODBG));
355
10018603 356 intel_uncore_forcewake_reset(dev, restore_forcewake);
521198a2
MK
357}
358
ed493883
ID
359void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
360{
361 __intel_uncore_early_sanitize(dev, restore_forcewake);
362 i915_check_and_clear_faults(dev);
363}
364
521198a2
MK
365void intel_uncore_sanitize(struct drm_device *dev)
366{
907b28c5
CW
367 /* BIOS often leaves RC6 enabled, but disable it for hw init */
368 intel_disable_gt_powersave(dev);
369}
370
59bad947
MK
371/**
372 * intel_uncore_forcewake_get - grab forcewake domain references
373 * @dev_priv: i915 device instance
374 * @fw_domains: forcewake domains to get reference on
375 *
376 * This function can be used get GT's forcewake domain references.
377 * Normal register access will handle the forcewake domains automatically.
378 * However if some sequence requires the GT to not power down a particular
379 * forcewake domains this function should be called at the beginning of the
380 * sequence. And subsequently the reference should be dropped by symmetric
381 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
382 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
907b28c5 383 */
59bad947 384void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
48c1026a 385 enum forcewake_domains fw_domains)
907b28c5
CW
386{
387 unsigned long irqflags;
b2cff0db 388 struct intel_uncore_forcewake_domain *domain;
48c1026a 389 enum forcewake_domain_id id;
907b28c5 390
ab484f8f
BW
391 if (!dev_priv->uncore.funcs.force_wake_get)
392 return;
393
6daccb0b 394 WARN_ON(dev_priv->pm.suspended);
c8c8fb33 395
b2cff0db
CW
396 fw_domains &= dev_priv->uncore.fw_domains;
397
6daccb0b 398 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
38cff0b1 399
b2cff0db
CW
400 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
401 if (domain->wake_count++)
402 fw_domains &= ~(1 << id);
6daccb0b 403 }
940aece4 404
b2cff0db
CW
405 if (fw_domains)
406 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
407
907b28c5
CW
408 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
409}
410
59bad947
MK
411/**
412 * intel_uncore_forcewake_put - release a forcewake domain reference
413 * @dev_priv: i915 device instance
414 * @fw_domains: forcewake domains to put references
415 *
416 * This function drops the device-level forcewakes for specified
417 * domains obtained by intel_uncore_forcewake_get().
907b28c5 418 */
59bad947 419void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
48c1026a 420 enum forcewake_domains fw_domains)
907b28c5
CW
421{
422 unsigned long irqflags;
b2cff0db 423 struct intel_uncore_forcewake_domain *domain;
48c1026a 424 enum forcewake_domain_id id;
907b28c5 425
ab484f8f
BW
426 if (!dev_priv->uncore.funcs.force_wake_put)
427 return;
428
b2cff0db
CW
429 fw_domains &= dev_priv->uncore.fw_domains;
430
6daccb0b
CW
431 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
432
b2cff0db
CW
433 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
434 if (WARN_ON(domain->wake_count == 0))
435 continue;
436
437 if (--domain->wake_count)
438 continue;
439
440 domain->wake_count++;
05a2fb15 441 fw_domain_arm_timer(domain);
aec347ab 442 }
dc9fb09c 443
907b28c5
CW
444 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
445}
446
59bad947 447void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
e998c40f 448{
b2cff0db 449 struct intel_uncore_forcewake_domain *domain;
48c1026a 450 enum forcewake_domain_id id;
b2cff0db 451
e998c40f
PZ
452 if (!dev_priv->uncore.funcs.force_wake_get)
453 return;
454
05a2fb15 455 for_each_fw_domain(domain, dev_priv, id)
b2cff0db 456 WARN_ON(domain->wake_count);
e998c40f
PZ
457}
458
907b28c5
CW
459/* We give fast paths for the really cool registers */
460#define NEEDS_FORCE_WAKE(dev_priv, reg) \
ab484f8f 461 ((reg) < 0x40000 && (reg) != FORCEWAKE)
907b28c5 462
1938e59a 463#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
38fb6a40 464
1938e59a
D
465#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
466 (REG_RANGE((reg), 0x2000, 0x4000) || \
467 REG_RANGE((reg), 0x5000, 0x8000) || \
468 REG_RANGE((reg), 0xB000, 0x12000) || \
469 REG_RANGE((reg), 0x2E000, 0x30000))
470
471#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
472 (REG_RANGE((reg), 0x12000, 0x14000) || \
473 REG_RANGE((reg), 0x22000, 0x24000) || \
474 REG_RANGE((reg), 0x30000, 0x40000))
475
476#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
477 (REG_RANGE((reg), 0x2000, 0x4000) || \
db5ff4ac 478 REG_RANGE((reg), 0x5200, 0x8000) || \
1938e59a 479 REG_RANGE((reg), 0x8300, 0x8500) || \
db5ff4ac 480 REG_RANGE((reg), 0xB000, 0xB480) || \
1938e59a
D
481 REG_RANGE((reg), 0xE000, 0xE800))
482
483#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
484 (REG_RANGE((reg), 0x8800, 0x8900) || \
485 REG_RANGE((reg), 0xD000, 0xD800) || \
486 REG_RANGE((reg), 0x12000, 0x14000) || \
487 REG_RANGE((reg), 0x1A000, 0x1C000) || \
488 REG_RANGE((reg), 0x1E800, 0x1EA00) || \
db5ff4ac 489 REG_RANGE((reg), 0x30000, 0x38000))
1938e59a
D
490
491#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
492 (REG_RANGE((reg), 0x4000, 0x5000) || \
493 REG_RANGE((reg), 0x8000, 0x8300) || \
494 REG_RANGE((reg), 0x8500, 0x8600) || \
495 REG_RANGE((reg), 0x9000, 0xB000) || \
db5ff4ac 496 REG_RANGE((reg), 0xF000, 0x10000))
38fb6a40 497
4597a88a 498#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
8ee558d8 499 REG_RANGE((reg), 0xB00, 0x2000)
4597a88a
ZW
500
501#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
8ee558d8
AG
502 (REG_RANGE((reg), 0x2000, 0x2700) || \
503 REG_RANGE((reg), 0x3000, 0x4000) || \
4597a88a 504 REG_RANGE((reg), 0x5200, 0x8000) || \
8ee558d8 505 REG_RANGE((reg), 0x8140, 0x8160) || \
4597a88a
ZW
506 REG_RANGE((reg), 0x8300, 0x8500) || \
507 REG_RANGE((reg), 0x8C00, 0x8D00) || \
508 REG_RANGE((reg), 0xB000, 0xB480) || \
8ee558d8
AG
509 REG_RANGE((reg), 0xE000, 0xE900) || \
510 REG_RANGE((reg), 0x24400, 0x24800))
4597a88a
ZW
511
512#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
8ee558d8
AG
513 (REG_RANGE((reg), 0x8130, 0x8140) || \
514 REG_RANGE((reg), 0x8800, 0x8A00) || \
4597a88a
ZW
515 REG_RANGE((reg), 0xD000, 0xD800) || \
516 REG_RANGE((reg), 0x12000, 0x14000) || \
517 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
518 REG_RANGE((reg), 0x30000, 0x40000))
519
520#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
521 REG_RANGE((reg), 0x9400, 0x9800)
522
523#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
524 ((reg) < 0x40000 &&\
525 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
526 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
527 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
528 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
529
907b28c5
CW
530static void
531ilk_dummy_write(struct drm_i915_private *dev_priv)
532{
533 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
534 * the chip from rc6 before touching it for real. MI_MODE is masked,
535 * hence harmless to write 0 into. */
6af5d92f 536 __raw_i915_write32(dev_priv, MI_MODE, 0);
907b28c5
CW
537}
538
539static void
5978118c
PZ
540hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
541 bool before)
907b28c5 542{
5978118c
PZ
543 const char *op = read ? "reading" : "writing to";
544 const char *when = before ? "before" : "after";
545
546 if (!i915.mmio_debug)
547 return;
548
ab484f8f 549 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
5978118c
PZ
550 WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
551 when, op, reg);
6af5d92f 552 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5
CW
553 }
554}
555
556static void
5978118c 557hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
907b28c5 558{
5978118c
PZ
559 if (i915.mmio_debug)
560 return;
561
ab484f8f 562 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
5978118c 563 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
6af5d92f 564 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5
CW
565 }
566}
567
51f67885 568#define GEN2_READ_HEADER(x) \
5d738795 569 u##x val = 0; \
51f67885 570 assert_device_not_suspended(dev_priv);
5d738795 571
51f67885 572#define GEN2_READ_FOOTER \
5d738795
BW
573 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
574 return val
575
51f67885 576#define __gen2_read(x) \
0b274481 577static u##x \
51f67885
CW
578gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
579 GEN2_READ_HEADER(x); \
3967018e 580 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 581 GEN2_READ_FOOTER; \
3967018e
BW
582}
583
584#define __gen5_read(x) \
585static u##x \
586gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
51f67885 587 GEN2_READ_HEADER(x); \
3967018e
BW
588 ilk_dummy_write(dev_priv); \
589 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 590 GEN2_READ_FOOTER; \
3967018e
BW
591}
592
51f67885
CW
593__gen5_read(8)
594__gen5_read(16)
595__gen5_read(32)
596__gen5_read(64)
597__gen2_read(8)
598__gen2_read(16)
599__gen2_read(32)
600__gen2_read(64)
601
602#undef __gen5_read
603#undef __gen2_read
604
605#undef GEN2_READ_FOOTER
606#undef GEN2_READ_HEADER
607
608#define GEN6_READ_HEADER(x) \
609 unsigned long irqflags; \
610 u##x val = 0; \
611 assert_device_not_suspended(dev_priv); \
612 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
613
614#define GEN6_READ_FOOTER \
615 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
616 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
617 return val
618
b2cff0db 619static inline void __force_wake_get(struct drm_i915_private *dev_priv,
48c1026a 620 enum forcewake_domains fw_domains)
b2cff0db
CW
621{
622 struct intel_uncore_forcewake_domain *domain;
48c1026a 623 enum forcewake_domain_id id;
b2cff0db
CW
624
625 if (WARN_ON(!fw_domains))
626 return;
627
628 /* Ideally GCC would be constant-fold and eliminate this loop */
05a2fb15 629 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
b2cff0db 630 if (domain->wake_count) {
05a2fb15 631 fw_domains &= ~(1 << id);
b2cff0db
CW
632 continue;
633 }
634
635 domain->wake_count++;
05a2fb15 636 fw_domain_arm_timer(domain);
b2cff0db
CW
637 }
638
639 if (fw_domains)
640 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
641}
642
3967018e
BW
643#define __gen6_read(x) \
644static u##x \
645gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
51f67885 646 GEN6_READ_HEADER(x); \
5978118c 647 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
b2cff0db
CW
648 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \
649 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
dc9fb09c 650 val = __raw_i915_read##x(dev_priv, reg); \
5978118c 651 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
51f67885 652 GEN6_READ_FOOTER; \
907b28c5
CW
653}
654
940aece4
D
655#define __vlv_read(x) \
656static u##x \
657vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
51f67885 658 GEN6_READ_HEADER(x); \
b2cff0db
CW
659 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \
660 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
661 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \
662 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
6fe72865 663 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 664 GEN6_READ_FOOTER; \
940aece4
D
665}
666
1938e59a
D
667#define __chv_read(x) \
668static u##x \
669chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
51f67885 670 GEN6_READ_HEADER(x); \
b2cff0db
CW
671 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
672 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
673 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
674 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
675 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
676 __force_wake_get(dev_priv, \
677 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
1938e59a 678 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 679 GEN6_READ_FOOTER; \
1938e59a 680}
940aece4 681
4597a88a
ZW
682#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
683 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
684
685#define __gen9_read(x) \
686static u##x \
687gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
48c1026a 688 enum forcewake_domains fw_engine; \
51f67885 689 GEN6_READ_HEADER(x); \
b2cff0db
CW
690 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \
691 fw_engine = 0; \
692 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
693 fw_engine = FORCEWAKE_RENDER; \
694 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
695 fw_engine = FORCEWAKE_MEDIA; \
696 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
697 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
698 else \
699 fw_engine = FORCEWAKE_BLITTER; \
700 if (fw_engine) \
701 __force_wake_get(dev_priv, fw_engine); \
702 val = __raw_i915_read##x(dev_priv, reg); \
51f67885 703 GEN6_READ_FOOTER; \
4597a88a
ZW
704}
705
706__gen9_read(8)
707__gen9_read(16)
708__gen9_read(32)
709__gen9_read(64)
1938e59a
D
710__chv_read(8)
711__chv_read(16)
712__chv_read(32)
713__chv_read(64)
940aece4
D
714__vlv_read(8)
715__vlv_read(16)
716__vlv_read(32)
717__vlv_read(64)
3967018e
BW
718__gen6_read(8)
719__gen6_read(16)
720__gen6_read(32)
721__gen6_read(64)
3967018e 722
4597a88a 723#undef __gen9_read
1938e59a 724#undef __chv_read
940aece4 725#undef __vlv_read
3967018e 726#undef __gen6_read
51f67885
CW
727#undef GEN6_READ_FOOTER
728#undef GEN6_READ_HEADER
5d738795 729
51f67885 730#define GEN2_WRITE_HEADER \
5d738795 731 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
6f0ea9e2 732 assert_device_not_suspended(dev_priv); \
907b28c5 733
51f67885 734#define GEN2_WRITE_FOOTER
0d965301 735
51f67885 736#define __gen2_write(x) \
0b274481 737static void \
51f67885
CW
738gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
739 GEN2_WRITE_HEADER; \
4032ef43 740 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 741 GEN2_WRITE_FOOTER; \
4032ef43
BW
742}
743
744#define __gen5_write(x) \
745static void \
746gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
51f67885 747 GEN2_WRITE_HEADER; \
4032ef43
BW
748 ilk_dummy_write(dev_priv); \
749 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 750 GEN2_WRITE_FOOTER; \
4032ef43
BW
751}
752
51f67885
CW
753__gen5_write(8)
754__gen5_write(16)
755__gen5_write(32)
756__gen5_write(64)
757__gen2_write(8)
758__gen2_write(16)
759__gen2_write(32)
760__gen2_write(64)
761
762#undef __gen5_write
763#undef __gen2_write
764
765#undef GEN2_WRITE_FOOTER
766#undef GEN2_WRITE_HEADER
767
768#define GEN6_WRITE_HEADER \
769 unsigned long irqflags; \
770 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
771 assert_device_not_suspended(dev_priv); \
772 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
773
774#define GEN6_WRITE_FOOTER \
775 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
776
4032ef43
BW
777#define __gen6_write(x) \
778static void \
779gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
780 u32 __fifo_ret = 0; \
51f67885 781 GEN6_WRITE_HEADER; \
4032ef43
BW
782 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
783 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
784 } \
785 __raw_i915_write##x(dev_priv, reg, val); \
786 if (unlikely(__fifo_ret)) { \
787 gen6_gt_check_fifodbg(dev_priv); \
788 } \
51f67885 789 GEN6_WRITE_FOOTER; \
4032ef43
BW
790}
791
792#define __hsw_write(x) \
793static void \
794hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
907b28c5 795 u32 __fifo_ret = 0; \
51f67885 796 GEN6_WRITE_HEADER; \
907b28c5
CW
797 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
798 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
799 } \
5978118c 800 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
6af5d92f 801 __raw_i915_write##x(dev_priv, reg, val); \
907b28c5
CW
802 if (unlikely(__fifo_ret)) { \
803 gen6_gt_check_fifodbg(dev_priv); \
804 } \
5978118c
PZ
805 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
806 hsw_unclaimed_reg_detect(dev_priv); \
51f67885 807 GEN6_WRITE_FOOTER; \
907b28c5 808}
3967018e 809
ab2aa47e
BW
810static const u32 gen8_shadowed_regs[] = {
811 FORCEWAKE_MT,
812 GEN6_RPNSWREQ,
813 GEN6_RC_VIDEO_FREQ,
814 RING_TAIL(RENDER_RING_BASE),
815 RING_TAIL(GEN6_BSD_RING_BASE),
816 RING_TAIL(VEBOX_RING_BASE),
817 RING_TAIL(BLT_RING_BASE),
818 /* TODO: Other registers are not yet used */
819};
820
821static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
822{
823 int i;
824 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
825 if (reg == gen8_shadowed_regs[i])
826 return true;
827
828 return false;
829}
830
831#define __gen8_write(x) \
832static void \
833gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
51f67885 834 GEN6_WRITE_HEADER; \
66bc2cab 835 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
b2cff0db
CW
836 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \
837 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
838 __raw_i915_write##x(dev_priv, reg, val); \
66bc2cab
PZ
839 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
840 hsw_unclaimed_reg_detect(dev_priv); \
51f67885 841 GEN6_WRITE_FOOTER; \
ab2aa47e
BW
842}
843
1938e59a
D
844#define __chv_write(x) \
845static void \
846chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
1938e59a 847 bool shadowed = is_gen8_shadowed(dev_priv, reg); \
51f67885 848 GEN6_WRITE_HEADER; \
1938e59a 849 if (!shadowed) { \
b2cff0db
CW
850 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \
851 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
852 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \
853 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \
854 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \
855 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \
1938e59a 856 } \
1938e59a 857 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 858 GEN6_WRITE_FOOTER; \
1938e59a
D
859}
860
7c859007
ZW
861static const u32 gen9_shadowed_regs[] = {
862 RING_TAIL(RENDER_RING_BASE),
863 RING_TAIL(GEN6_BSD_RING_BASE),
864 RING_TAIL(VEBOX_RING_BASE),
865 RING_TAIL(BLT_RING_BASE),
866 FORCEWAKE_BLITTER_GEN9,
867 FORCEWAKE_RENDER_GEN9,
868 FORCEWAKE_MEDIA_GEN9,
869 GEN6_RPNSWREQ,
870 GEN6_RC_VIDEO_FREQ,
871 /* TODO: Other registers are not yet used */
872};
873
874static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
875{
876 int i;
877 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
878 if (reg == gen9_shadowed_regs[i])
879 return true;
880
881 return false;
882}
883
4597a88a
ZW
884#define __gen9_write(x) \
885static void \
886gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
887 bool trace) { \
48c1026a 888 enum forcewake_domains fw_engine; \
51f67885 889 GEN6_WRITE_HEADER; \
b2cff0db
CW
890 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
891 is_gen9_shadowed(dev_priv, reg)) \
892 fw_engine = 0; \
893 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \
894 fw_engine = FORCEWAKE_RENDER; \
895 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \
896 fw_engine = FORCEWAKE_MEDIA; \
897 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \
898 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
899 else \
900 fw_engine = FORCEWAKE_BLITTER; \
901 if (fw_engine) \
902 __force_wake_get(dev_priv, fw_engine); \
903 __raw_i915_write##x(dev_priv, reg, val); \
51f67885 904 GEN6_WRITE_FOOTER; \
4597a88a
ZW
905}
906
907__gen9_write(8)
908__gen9_write(16)
909__gen9_write(32)
910__gen9_write(64)
1938e59a
D
911__chv_write(8)
912__chv_write(16)
913__chv_write(32)
914__chv_write(64)
ab2aa47e
BW
915__gen8_write(8)
916__gen8_write(16)
917__gen8_write(32)
918__gen8_write(64)
4032ef43
BW
919__hsw_write(8)
920__hsw_write(16)
921__hsw_write(32)
922__hsw_write(64)
923__gen6_write(8)
924__gen6_write(16)
925__gen6_write(32)
926__gen6_write(64)
4032ef43 927
4597a88a 928#undef __gen9_write
1938e59a 929#undef __chv_write
ab2aa47e 930#undef __gen8_write
4032ef43
BW
931#undef __hsw_write
932#undef __gen6_write
51f67885
CW
933#undef GEN6_WRITE_FOOTER
934#undef GEN6_WRITE_HEADER
907b28c5 935
43d942a7
YZ
936#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
937do { \
938 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
939 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
940 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
941 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
942} while (0)
943
944#define ASSIGN_READ_MMIO_VFUNCS(x) \
945do { \
946 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
947 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
948 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
949 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
950} while (0)
951
05a2fb15
MK
952
953static void fw_domain_init(struct drm_i915_private *dev_priv,
48c1026a
MK
954 enum forcewake_domain_id domain_id,
955 u32 reg_set, u32 reg_ack)
05a2fb15
MK
956{
957 struct intel_uncore_forcewake_domain *d;
958
959 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
960 return;
961
962 d = &dev_priv->uncore.fw_domain[domain_id];
963
964 WARN_ON(d->wake_count);
965
966 d->wake_count = 0;
967 d->reg_set = reg_set;
968 d->reg_ack = reg_ack;
969
970 if (IS_GEN6(dev_priv)) {
971 d->val_reset = 0;
972 d->val_set = FORCEWAKE_KERNEL;
973 d->val_clear = 0;
974 } else {
975 d->val_reset = _MASKED_BIT_DISABLE(0xffff);
976 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
977 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
978 }
979
980 if (IS_VALLEYVIEW(dev_priv))
981 d->reg_post = FORCEWAKE_ACK_VLV;
982 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
983 d->reg_post = ECOBUS;
984 else
985 d->reg_post = 0;
986
987 d->i915 = dev_priv;
988 d->id = domain_id;
989
59bad947 990 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
05a2fb15
MK
991
992 dev_priv->uncore.fw_domains |= (1 << domain_id);
f9b3927a
MK
993
994 fw_domain_reset(d);
05a2fb15
MK
995}
996
f9b3927a 997static void intel_uncore_fw_domains_init(struct drm_device *dev)
0b274481
BW
998{
999 struct drm_i915_private *dev_priv = dev->dev_private;
0b274481 1000
3225b2f9
MK
1001 if (INTEL_INFO(dev_priv->dev)->gen <= 5)
1002 return;
1003
38cff0b1 1004 if (IS_GEN9(dev)) {
05a2fb15
MK
1005 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1006 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1007 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1008 FORCEWAKE_RENDER_GEN9,
1009 FORCEWAKE_ACK_RENDER_GEN9);
1010 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
1011 FORCEWAKE_BLITTER_GEN9,
1012 FORCEWAKE_ACK_BLITTER_GEN9);
1013 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1014 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
38cff0b1 1015 } else if (IS_VALLEYVIEW(dev)) {
05a2fb15 1016 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
756c349d
MK
1017 if (!IS_CHERRYVIEW(dev))
1018 dev_priv->uncore.funcs.force_wake_put =
1019 fw_domains_put_with_fifo;
1020 else
1021 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
05a2fb15
MK
1022 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1023 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1024 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1025 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
f98cd096 1026 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
05a2fb15
MK
1027 dev_priv->uncore.funcs.force_wake_get =
1028 fw_domains_get_with_thread_status;
1029 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1030 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1031 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
0b274481
BW
1032 } else if (IS_IVYBRIDGE(dev)) {
1033 u32 ecobus;
1034
1035 /* IVB configs may use multi-threaded forcewake */
1036
1037 /* A small trick here - if the bios hasn't configured
1038 * MT forcewake, and if the device is in RC6, then
1039 * force_wake_mt_get will not wake the device and the
1040 * ECOBUS read will return zero. Which will be
1041 * (correctly) interpreted by the test below as MT
1042 * forcewake being disabled.
1043 */
05a2fb15
MK
1044 dev_priv->uncore.funcs.force_wake_get =
1045 fw_domains_get_with_thread_status;
1046 dev_priv->uncore.funcs.force_wake_put =
1047 fw_domains_put_with_fifo;
1048
f9b3927a
MK
1049 /* We need to init first for ECOBUS access and then
1050 * determine later if we want to reinit, in case of MT access is
1051 * not working
1052 */
05a2fb15
MK
1053 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1054 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
f9b3927a 1055
0b274481 1056 mutex_lock(&dev->struct_mutex);
05a2fb15 1057 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
0b274481 1058 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
05a2fb15 1059 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
0b274481
BW
1060 mutex_unlock(&dev->struct_mutex);
1061
05a2fb15 1062 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
0b274481
BW
1063 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1064 DRM_INFO("when using vblank-synced partial screen updates.\n");
05a2fb15
MK
1065 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1066 FORCEWAKE, FORCEWAKE_ACK);
0b274481
BW
1067 }
1068 } else if (IS_GEN6(dev)) {
1069 dev_priv->uncore.funcs.force_wake_get =
05a2fb15 1070 fw_domains_get_with_thread_status;
0b274481 1071 dev_priv->uncore.funcs.force_wake_put =
05a2fb15
MK
1072 fw_domains_put_with_fifo;
1073 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1074 FORCEWAKE, FORCEWAKE_ACK);
0b274481 1075 }
3225b2f9
MK
1076
1077 /* All future platforms are expected to require complex power gating */
1078 WARN_ON(dev_priv->uncore.fw_domains == 0);
f9b3927a
MK
1079}
1080
1081void intel_uncore_init(struct drm_device *dev)
1082{
1083 struct drm_i915_private *dev_priv = dev->dev_private;
1084
1085 intel_uncore_ellc_detect(dev);
1086 intel_uncore_fw_domains_init(dev);
1087 __intel_uncore_early_sanitize(dev, false);
0b274481 1088
3967018e 1089 switch (INTEL_INFO(dev)->gen) {
ab2aa47e 1090 default:
5f77eeb0 1091 MISSING_CASE(INTEL_INFO(dev)->gen);
4597a88a
ZW
1092 return;
1093 case 9:
1094 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1095 ASSIGN_READ_MMIO_VFUNCS(gen9);
1096 break;
1097 case 8:
1938e59a 1098 if (IS_CHERRYVIEW(dev)) {
43d942a7
YZ
1099 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1100 ASSIGN_READ_MMIO_VFUNCS(chv);
1938e59a
D
1101
1102 } else {
43d942a7
YZ
1103 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
1104 ASSIGN_READ_MMIO_VFUNCS(gen6);
1938e59a 1105 }
ab2aa47e 1106 break;
3967018e
BW
1107 case 7:
1108 case 6:
4032ef43 1109 if (IS_HASWELL(dev)) {
43d942a7 1110 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
4032ef43 1111 } else {
43d942a7 1112 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
4032ef43 1113 }
940aece4
D
1114
1115 if (IS_VALLEYVIEW(dev)) {
43d942a7 1116 ASSIGN_READ_MMIO_VFUNCS(vlv);
940aece4 1117 } else {
43d942a7 1118 ASSIGN_READ_MMIO_VFUNCS(gen6);
940aece4 1119 }
3967018e
BW
1120 break;
1121 case 5:
43d942a7
YZ
1122 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
1123 ASSIGN_READ_MMIO_VFUNCS(gen5);
3967018e
BW
1124 break;
1125 case 4:
1126 case 3:
1127 case 2:
51f67885
CW
1128 ASSIGN_WRITE_MMIO_VFUNCS(gen2);
1129 ASSIGN_READ_MMIO_VFUNCS(gen2);
3967018e
BW
1130 break;
1131 }
ed493883
ID
1132
1133 i915_check_and_clear_faults(dev);
0b274481 1134}
43d942a7
YZ
1135#undef ASSIGN_WRITE_MMIO_VFUNCS
1136#undef ASSIGN_READ_MMIO_VFUNCS
0b274481
BW
1137
1138void intel_uncore_fini(struct drm_device *dev)
1139{
0b274481
BW
1140 /* Paranoia: make sure we have disabled everything before we exit. */
1141 intel_uncore_sanitize(dev);
0294ae7b 1142 intel_uncore_forcewake_reset(dev, false);
0b274481
BW
1143}
1144
af76ae44
DL
1145#define GEN_RANGE(l, h) GENMASK(h, l)
1146
907b28c5
CW
1147static const struct register_whitelist {
1148 uint64_t offset;
1149 uint32_t size;
af76ae44
DL
1150 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1151 uint32_t gen_bitmask;
907b28c5 1152} whitelist[] = {
c3f59a67 1153 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
907b28c5
CW
1154};
1155
1156int i915_reg_read_ioctl(struct drm_device *dev,
1157 void *data, struct drm_file *file)
1158{
1159 struct drm_i915_private *dev_priv = dev->dev_private;
1160 struct drm_i915_reg_read *reg = data;
1161 struct register_whitelist const *entry = whitelist;
cf67c70f 1162 int i, ret = 0;
907b28c5
CW
1163
1164 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1165 if (entry->offset == reg->offset &&
1166 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1167 break;
1168 }
1169
1170 if (i == ARRAY_SIZE(whitelist))
1171 return -EINVAL;
1172
cf67c70f
PZ
1173 intel_runtime_pm_get(dev_priv);
1174
907b28c5
CW
1175 switch (entry->size) {
1176 case 8:
1177 reg->val = I915_READ64(reg->offset);
1178 break;
1179 case 4:
1180 reg->val = I915_READ(reg->offset);
1181 break;
1182 case 2:
1183 reg->val = I915_READ16(reg->offset);
1184 break;
1185 case 1:
1186 reg->val = I915_READ8(reg->offset);
1187 break;
1188 default:
5f77eeb0 1189 MISSING_CASE(entry->size);
cf67c70f
PZ
1190 ret = -EINVAL;
1191 goto out;
907b28c5
CW
1192 }
1193
cf67c70f
PZ
1194out:
1195 intel_runtime_pm_put(dev_priv);
1196 return ret;
907b28c5
CW
1197}
1198
b6359918
MK
1199int i915_get_reset_stats_ioctl(struct drm_device *dev,
1200 void *data, struct drm_file *file)
1201{
1202 struct drm_i915_private *dev_priv = dev->dev_private;
1203 struct drm_i915_reset_stats *args = data;
1204 struct i915_ctx_hang_stats *hs;
273497e5 1205 struct intel_context *ctx;
b6359918
MK
1206 int ret;
1207
661df041
MK
1208 if (args->flags || args->pad)
1209 return -EINVAL;
1210
821d66dd 1211 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
b6359918
MK
1212 return -EPERM;
1213
1214 ret = mutex_lock_interruptible(&dev->struct_mutex);
1215 if (ret)
1216 return ret;
1217
41bde553
BW
1218 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1219 if (IS_ERR(ctx)) {
b6359918 1220 mutex_unlock(&dev->struct_mutex);
41bde553 1221 return PTR_ERR(ctx);
b6359918 1222 }
41bde553 1223 hs = &ctx->hang_stats;
b6359918
MK
1224
1225 if (capable(CAP_SYS_ADMIN))
1226 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1227 else
1228 args->reset_count = 0;
1229
1230 args->batch_active = hs->batch_active;
1231 args->batch_pending = hs->batch_pending;
1232
1233 mutex_unlock(&dev->struct_mutex);
1234
1235 return 0;
1236}
1237
59ea9054 1238static int i915_reset_complete(struct drm_device *dev)
907b28c5
CW
1239{
1240 u8 gdrst;
59ea9054 1241 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
73bbf6bd 1242 return (gdrst & GRDOM_RESET_STATUS) == 0;
907b28c5
CW
1243}
1244
59ea9054 1245static int i915_do_reset(struct drm_device *dev)
907b28c5 1246{
73bbf6bd 1247 /* assert reset for at least 20 usec */
59ea9054 1248 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
73bbf6bd 1249 udelay(20);
59ea9054 1250 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
907b28c5 1251
59ea9054 1252 return wait_for(i915_reset_complete(dev), 500);
73bbf6bd
VS
1253}
1254
1255static int g4x_reset_complete(struct drm_device *dev)
1256{
1257 u8 gdrst;
59ea9054 1258 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
73bbf6bd 1259 return (gdrst & GRDOM_RESET_ENABLE) == 0;
907b28c5
CW
1260}
1261
408d4b9e
VS
1262static int g33_do_reset(struct drm_device *dev)
1263{
408d4b9e
VS
1264 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1265 return wait_for(g4x_reset_complete(dev), 500);
1266}
1267
fa4f53c4
VS
1268static int g4x_do_reset(struct drm_device *dev)
1269{
1270 struct drm_i915_private *dev_priv = dev->dev_private;
1271 int ret;
1272
59ea9054 1273 pci_write_config_byte(dev->pdev, I915_GDRST,
fa4f53c4 1274 GRDOM_RENDER | GRDOM_RESET_ENABLE);
73bbf6bd 1275 ret = wait_for(g4x_reset_complete(dev), 500);
fa4f53c4
VS
1276 if (ret)
1277 return ret;
1278
1279 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1280 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1281 POSTING_READ(VDECCLK_GATE_D);
1282
59ea9054 1283 pci_write_config_byte(dev->pdev, I915_GDRST,
fa4f53c4 1284 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
73bbf6bd 1285 ret = wait_for(g4x_reset_complete(dev), 500);
fa4f53c4
VS
1286 if (ret)
1287 return ret;
1288
1289 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1290 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1291 POSTING_READ(VDECCLK_GATE_D);
1292
59ea9054 1293 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
fa4f53c4
VS
1294
1295 return 0;
1296}
1297
907b28c5
CW
1298static int ironlake_do_reset(struct drm_device *dev)
1299{
1300 struct drm_i915_private *dev_priv = dev->dev_private;
907b28c5
CW
1301 int ret;
1302
907b28c5 1303 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
0f08ffd6 1304 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
f67deb72 1305 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
b3a3f03d 1306 ILK_GRDOM_RESET_ENABLE) == 0, 500);
907b28c5
CW
1307 if (ret)
1308 return ret;
1309
907b28c5 1310 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
0f08ffd6 1311 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
9aa7250f
VS
1312 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
1313 ILK_GRDOM_RESET_ENABLE) == 0, 500);
1314 if (ret)
1315 return ret;
1316
1317 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
1318
1319 return 0;
907b28c5
CW
1320}
1321
1322static int gen6_do_reset(struct drm_device *dev)
1323{
1324 struct drm_i915_private *dev_priv = dev->dev_private;
1325 int ret;
907b28c5
CW
1326
1327 /* Reset the chip */
1328
1329 /* GEN6_GDRST is not in the gt power well, no need to check
1330 * for fifo space for the write or forcewake the chip for
1331 * the read
1332 */
6af5d92f 1333 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
907b28c5
CW
1334
1335 /* Spin waiting for the device to ack the reset request */
6af5d92f 1336 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
907b28c5 1337
0294ae7b 1338 intel_uncore_forcewake_reset(dev, true);
5babf0fc 1339
907b28c5
CW
1340 return ret;
1341}
1342
1343int intel_gpu_reset(struct drm_device *dev)
1344{
542c184f
RB
1345 if (INTEL_INFO(dev)->gen >= 6)
1346 return gen6_do_reset(dev);
1347 else if (IS_GEN5(dev))
1348 return ironlake_do_reset(dev);
1349 else if (IS_G4X(dev))
1350 return g4x_do_reset(dev);
408d4b9e
VS
1351 else if (IS_G33(dev))
1352 return g33_do_reset(dev);
1353 else if (INTEL_INFO(dev)->gen >= 3)
59ea9054 1354 return i915_do_reset(dev);
542c184f
RB
1355 else
1356 return -ENODEV;
907b28c5
CW
1357}
1358
907b28c5
CW
1359void intel_uncore_check_errors(struct drm_device *dev)
1360{
1361 struct drm_i915_private *dev_priv = dev->dev_private;
1362
1363 if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
6af5d92f 1364 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
907b28c5 1365 DRM_ERROR("Unclaimed register before interrupt\n");
6af5d92f 1366 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
907b28c5
CW
1367 }
1368}