]>
Commit | Line | Data |
---|---|---|
907b28c5 CW |
1 | /* |
2 | * Copyright © 2013 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include "i915_drv.h" | |
25 | #include "intel_drv.h" | |
cf9d2890 | 26 | #include "i915_vgpu.h" |
907b28c5 | 27 | |
264ec1a8 | 28 | #include <asm/iosf_mbi.h> |
6daccb0b CW |
29 | #include <linux/pm_runtime.h> |
30 | ||
83e33372 | 31 | #define FORCEWAKE_ACK_TIMEOUT_MS 50 |
6b07b6d2 | 32 | #define GT_FIFO_TIMEOUT_MS 10 |
907b28c5 | 33 | |
75aa3f63 | 34 | #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) |
6af5d92f | 35 | |
05a2fb15 MK |
36 | static const char * const forcewake_domain_names[] = { |
37 | "render", | |
38 | "blitter", | |
39 | "media", | |
40 | }; | |
41 | ||
42 | const char * | |
48c1026a | 43 | intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) |
05a2fb15 | 44 | { |
53abb679 | 45 | BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT); |
05a2fb15 MK |
46 | |
47 | if (id >= 0 && id < FW_DOMAIN_ID_COUNT) | |
48 | return forcewake_domain_names[id]; | |
49 | ||
50 | WARN_ON(id); | |
51 | ||
52 | return "unknown"; | |
53 | } | |
54 | ||
05a2fb15 | 55 | static inline void |
577ac4bd CW |
56 | fw_domain_reset(struct drm_i915_private *i915, |
57 | const struct intel_uncore_forcewake_domain *d) | |
907b28c5 | 58 | { |
6e3955a5 | 59 | __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); |
907b28c5 CW |
60 | } |
61 | ||
05a2fb15 MK |
62 | static inline void |
63 | fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) | |
907b28c5 | 64 | { |
a57a4a67 TU |
65 | d->wake_count++; |
66 | hrtimer_start_range_ns(&d->timer, | |
8b0e1953 | 67 | NSEC_PER_MSEC, |
a57a4a67 TU |
68 | NSEC_PER_MSEC, |
69 | HRTIMER_MODE_REL); | |
907b28c5 CW |
70 | } |
71 | ||
05a2fb15 | 72 | static inline void |
6e3955a5 | 73 | fw_domain_wait_ack_clear(const struct drm_i915_private *i915, |
577ac4bd | 74 | const struct intel_uncore_forcewake_domain *d) |
907b28c5 | 75 | { |
577ac4bd | 76 | if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & |
05a2fb15 | 77 | FORCEWAKE_KERNEL) == 0, |
907b28c5 | 78 | FORCEWAKE_ACK_TIMEOUT_MS)) |
05a2fb15 MK |
79 | DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", |
80 | intel_uncore_forcewake_domain_to_str(d->id)); | |
81 | } | |
907b28c5 | 82 | |
05a2fb15 | 83 | static inline void |
577ac4bd CW |
84 | fw_domain_get(struct drm_i915_private *i915, |
85 | const struct intel_uncore_forcewake_domain *d) | |
05a2fb15 | 86 | { |
6e3955a5 | 87 | __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); |
05a2fb15 | 88 | } |
907b28c5 | 89 | |
05a2fb15 | 90 | static inline void |
6e3955a5 | 91 | fw_domain_wait_ack(const struct drm_i915_private *i915, |
577ac4bd | 92 | const struct intel_uncore_forcewake_domain *d) |
05a2fb15 | 93 | { |
577ac4bd | 94 | if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & |
05a2fb15 | 95 | FORCEWAKE_KERNEL), |
907b28c5 | 96 | FORCEWAKE_ACK_TIMEOUT_MS)) |
05a2fb15 MK |
97 | DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", |
98 | intel_uncore_forcewake_domain_to_str(d->id)); | |
99 | } | |
907b28c5 | 100 | |
05a2fb15 | 101 | static inline void |
6e3955a5 | 102 | fw_domain_put(const struct drm_i915_private *i915, |
577ac4bd | 103 | const struct intel_uncore_forcewake_domain *d) |
05a2fb15 | 104 | { |
6e3955a5 | 105 | __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); |
907b28c5 CW |
106 | } |
107 | ||
05a2fb15 | 108 | static void |
577ac4bd | 109 | fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) |
907b28c5 | 110 | { |
05a2fb15 | 111 | struct intel_uncore_forcewake_domain *d; |
d2dc94bc | 112 | unsigned int tmp; |
907b28c5 | 113 | |
d2dc94bc CW |
114 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
115 | ||
116 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) { | |
577ac4bd CW |
117 | fw_domain_wait_ack_clear(i915, d); |
118 | fw_domain_get(i915, d); | |
05a2fb15 | 119 | } |
4e1176dd | 120 | |
d2dc94bc | 121 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
577ac4bd | 122 | fw_domain_wait_ack(i915, d); |
b8473050 | 123 | |
577ac4bd | 124 | i915->uncore.fw_domains_active |= fw_domains; |
05a2fb15 | 125 | } |
907b28c5 | 126 | |
05a2fb15 | 127 | static void |
577ac4bd | 128 | fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) |
05a2fb15 MK |
129 | { |
130 | struct intel_uncore_forcewake_domain *d; | |
d2dc94bc CW |
131 | unsigned int tmp; |
132 | ||
133 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); | |
907b28c5 | 134 | |
0f966aaf | 135 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) |
577ac4bd | 136 | fw_domain_put(i915, d); |
b8473050 | 137 | |
577ac4bd | 138 | i915->uncore.fw_domains_active &= ~fw_domains; |
05a2fb15 | 139 | } |
907b28c5 | 140 | |
05a2fb15 | 141 | static void |
577ac4bd CW |
142 | fw_domains_reset(struct drm_i915_private *i915, |
143 | enum forcewake_domains fw_domains) | |
05a2fb15 MK |
144 | { |
145 | struct intel_uncore_forcewake_domain *d; | |
d2dc94bc | 146 | unsigned int tmp; |
05a2fb15 | 147 | |
d2dc94bc | 148 | if (!fw_domains) |
3225b2f9 | 149 | return; |
f9b3927a | 150 | |
d2dc94bc CW |
151 | GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); |
152 | ||
153 | for_each_fw_domain_masked(d, fw_domains, i915, tmp) | |
577ac4bd | 154 | fw_domain_reset(i915, d); |
05a2fb15 MK |
155 | } |
156 | ||
157 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | |
158 | { | |
159 | /* w/a for a sporadic read returning 0 by waiting for the GT | |
160 | * thread to wake up. | |
161 | */ | |
162 | if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & | |
163 | GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) | |
164 | DRM_ERROR("GT thread status wait timed out\n"); | |
165 | } | |
166 | ||
167 | static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, | |
48c1026a | 168 | enum forcewake_domains fw_domains) |
05a2fb15 MK |
169 | { |
170 | fw_domains_get(dev_priv, fw_domains); | |
907b28c5 | 171 | |
05a2fb15 | 172 | /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ |
c549f738 | 173 | __gen6_gt_wait_for_thread_c0(dev_priv); |
907b28c5 CW |
174 | } |
175 | ||
c32e3788 DG |
176 | static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) |
177 | { | |
178 | u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); | |
179 | ||
180 | return count & GT_FIFO_FREE_ENTRIES_MASK; | |
181 | } | |
182 | ||
6b07b6d2 | 183 | static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
907b28c5 | 184 | { |
6b07b6d2 | 185 | u32 n; |
907b28c5 | 186 | |
5135d64b D |
187 | /* On VLV, FIFO will be shared by both SW and HW. |
188 | * So, we need to read the FREE_ENTRIES everytime */ | |
2d1fe073 | 189 | if (IS_VALLEYVIEW(dev_priv)) |
6b07b6d2 MK |
190 | n = fifo_free_entries(dev_priv); |
191 | else | |
192 | n = dev_priv->uncore.fifo_count; | |
193 | ||
194 | if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { | |
195 | if (wait_for_atomic((n = fifo_free_entries(dev_priv)) > | |
196 | GT_FIFO_NUM_RESERVED_ENTRIES, | |
197 | GT_FIFO_TIMEOUT_MS)) { | |
198 | DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); | |
199 | return; | |
907b28c5 | 200 | } |
907b28c5 | 201 | } |
907b28c5 | 202 | |
6b07b6d2 | 203 | dev_priv->uncore.fifo_count = n - 1; |
907b28c5 CW |
204 | } |
205 | ||
a57a4a67 TU |
206 | static enum hrtimer_restart |
207 | intel_uncore_fw_release_timer(struct hrtimer *timer) | |
38cff0b1 | 208 | { |
a57a4a67 TU |
209 | struct intel_uncore_forcewake_domain *domain = |
210 | container_of(timer, struct intel_uncore_forcewake_domain, timer); | |
577ac4bd CW |
211 | struct drm_i915_private *dev_priv = |
212 | container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); | |
b2cff0db | 213 | unsigned long irqflags; |
38cff0b1 | 214 | |
003342a5 | 215 | assert_rpm_device_not_suspended(dev_priv); |
38cff0b1 | 216 | |
c9e0c6da CW |
217 | if (xchg(&domain->active, false)) |
218 | return HRTIMER_RESTART; | |
219 | ||
003342a5 | 220 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
b2cff0db CW |
221 | if (WARN_ON(domain->wake_count == 0)) |
222 | domain->wake_count++; | |
223 | ||
b8473050 | 224 | if (--domain->wake_count == 0) |
003342a5 | 225 | dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); |
b2cff0db | 226 | |
003342a5 | 227 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
a57a4a67 TU |
228 | |
229 | return HRTIMER_NORESTART; | |
38cff0b1 ZW |
230 | } |
231 | ||
68f60946 HG |
232 | static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, |
233 | bool restore) | |
38cff0b1 | 234 | { |
48c1026a | 235 | unsigned long irqflags; |
b2cff0db | 236 | struct intel_uncore_forcewake_domain *domain; |
48c1026a | 237 | int retry_count = 100; |
003342a5 | 238 | enum forcewake_domains fw, active_domains; |
38cff0b1 | 239 | |
b2cff0db CW |
240 | /* Hold uncore.lock across reset to prevent any register access |
241 | * with forcewake not set correctly. Wait until all pending | |
242 | * timers are run before holding. | |
243 | */ | |
244 | while (1) { | |
d2dc94bc CW |
245 | unsigned int tmp; |
246 | ||
b2cff0db | 247 | active_domains = 0; |
38cff0b1 | 248 | |
d2dc94bc | 249 | for_each_fw_domain(domain, dev_priv, tmp) { |
c9e0c6da | 250 | smp_store_mb(domain->active, false); |
a57a4a67 | 251 | if (hrtimer_cancel(&domain->timer) == 0) |
b2cff0db | 252 | continue; |
38cff0b1 | 253 | |
a57a4a67 | 254 | intel_uncore_fw_release_timer(&domain->timer); |
b2cff0db | 255 | } |
aec347ab | 256 | |
b2cff0db | 257 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
b2ec142c | 258 | |
d2dc94bc | 259 | for_each_fw_domain(domain, dev_priv, tmp) { |
a57a4a67 | 260 | if (hrtimer_active(&domain->timer)) |
33c582c1 | 261 | active_domains |= domain->mask; |
b2cff0db | 262 | } |
3123fcaf | 263 | |
b2cff0db CW |
264 | if (active_domains == 0) |
265 | break; | |
aec347ab | 266 | |
b2cff0db CW |
267 | if (--retry_count == 0) { |
268 | DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); | |
269 | break; | |
270 | } | |
0294ae7b | 271 | |
b2cff0db CW |
272 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
273 | cond_resched(); | |
274 | } | |
0294ae7b | 275 | |
b2cff0db CW |
276 | WARN_ON(active_domains); |
277 | ||
003342a5 | 278 | fw = dev_priv->uncore.fw_domains_active; |
b2cff0db CW |
279 | if (fw) |
280 | dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); | |
ef46e0d2 | 281 | |
cb3600db | 282 | fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); |
38cff0b1 | 283 | |
0294ae7b | 284 | if (restore) { /* If reset with a user forcewake, try to restore */ |
0294ae7b CW |
285 | if (fw) |
286 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); | |
287 | ||
dc97997a | 288 | if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) |
0294ae7b | 289 | dev_priv->uncore.fifo_count = |
c32e3788 | 290 | fifo_free_entries(dev_priv); |
0294ae7b CW |
291 | } |
292 | ||
b2cff0db | 293 | if (!restore) |
59bad947 | 294 | assert_forcewakes_inactive(dev_priv); |
b2cff0db | 295 | |
0294ae7b | 296 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
ef46e0d2 DV |
297 | } |
298 | ||
c02e85a0 MK |
299 | static u64 gen9_edram_size(struct drm_i915_private *dev_priv) |
300 | { | |
301 | const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; | |
302 | const unsigned int sets[4] = { 1, 1, 2, 2 }; | |
303 | const u32 cap = dev_priv->edram_cap; | |
304 | ||
305 | return EDRAM_NUM_BANKS(cap) * | |
306 | ways[EDRAM_WAYS_IDX(cap)] * | |
307 | sets[EDRAM_SETS_IDX(cap)] * | |
308 | 1024 * 1024; | |
309 | } | |
310 | ||
3accaf7e | 311 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv) |
907b28c5 | 312 | { |
3accaf7e MK |
313 | if (!HAS_EDRAM(dev_priv)) |
314 | return 0; | |
315 | ||
c02e85a0 MK |
316 | /* The needed capability bits for size calculation |
317 | * are not there with pre gen9 so return 128MB always. | |
3accaf7e | 318 | */ |
c02e85a0 MK |
319 | if (INTEL_GEN(dev_priv) < 9) |
320 | return 128 * 1024 * 1024; | |
3accaf7e | 321 | |
c02e85a0 | 322 | return gen9_edram_size(dev_priv); |
3accaf7e | 323 | } |
907b28c5 | 324 | |
3accaf7e MK |
325 | static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) |
326 | { | |
327 | if (IS_HASWELL(dev_priv) || | |
328 | IS_BROADWELL(dev_priv) || | |
329 | INTEL_GEN(dev_priv) >= 9) { | |
330 | dev_priv->edram_cap = __raw_i915_read32(dev_priv, | |
331 | HSW_EDRAM_CAP); | |
332 | ||
333 | /* NB: We can't write IDICR yet because we do not have gt funcs | |
18ce3994 | 334 | * set up */ |
3accaf7e MK |
335 | } else { |
336 | dev_priv->edram_cap = 0; | |
18ce3994 | 337 | } |
3accaf7e MK |
338 | |
339 | if (HAS_EDRAM(dev_priv)) | |
340 | DRM_INFO("Found %lluMB of eDRAM\n", | |
341 | intel_uncore_edram_size(dev_priv) / (1024 * 1024)); | |
f9b3927a MK |
342 | } |
343 | ||
8a47eb19 | 344 | static bool |
8ac3e1bb | 345 | fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) |
8a47eb19 MK |
346 | { |
347 | u32 dbg; | |
348 | ||
8a47eb19 MK |
349 | dbg = __raw_i915_read32(dev_priv, FPGA_DBG); |
350 | if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) | |
351 | return false; | |
352 | ||
353 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | |
354 | ||
355 | return true; | |
356 | } | |
357 | ||
8ac3e1bb MK |
358 | static bool |
359 | vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) | |
360 | { | |
361 | u32 cer; | |
362 | ||
363 | cer = __raw_i915_read32(dev_priv, CLAIM_ER); | |
364 | if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) | |
365 | return false; | |
366 | ||
367 | __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); | |
368 | ||
369 | return true; | |
370 | } | |
371 | ||
a338908c MK |
372 | static bool |
373 | gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv) | |
374 | { | |
375 | u32 fifodbg; | |
376 | ||
377 | fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); | |
378 | ||
379 | if (unlikely(fifodbg)) { | |
380 | DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); | |
381 | __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg); | |
382 | } | |
383 | ||
384 | return fifodbg; | |
385 | } | |
386 | ||
8ac3e1bb MK |
387 | static bool |
388 | check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) | |
389 | { | |
a338908c MK |
390 | bool ret = false; |
391 | ||
8ac3e1bb | 392 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) |
a338908c | 393 | ret |= fpga_check_for_unclaimed_mmio(dev_priv); |
8ac3e1bb MK |
394 | |
395 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | |
a338908c MK |
396 | ret |= vlv_check_for_unclaimed_mmio(dev_priv); |
397 | ||
398 | if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) | |
399 | ret |= gen6_check_for_fifo_debug(dev_priv); | |
8ac3e1bb | 400 | |
a338908c | 401 | return ret; |
8ac3e1bb MK |
402 | } |
403 | ||
dc97997a | 404 | static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, |
f9b3927a MK |
405 | bool restore_forcewake) |
406 | { | |
8a47eb19 MK |
407 | /* clear out unclaimed reg detection bit */ |
408 | if (check_for_unclaimed_mmio(dev_priv)) | |
409 | DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); | |
907b28c5 | 410 | |
a04f90a3 | 411 | /* WaDisableShadowRegForCpd:chv */ |
dc97997a | 412 | if (IS_CHERRYVIEW(dev_priv)) { |
a04f90a3 D |
413 | __raw_i915_write32(dev_priv, GTFIFOCTL, |
414 | __raw_i915_read32(dev_priv, GTFIFOCTL) | | |
415 | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | | |
416 | GT_FIFO_CTL_RC6_POLICY_STALL); | |
417 | } | |
418 | ||
dc97997a | 419 | intel_uncore_forcewake_reset(dev_priv, restore_forcewake); |
521198a2 MK |
420 | } |
421 | ||
68f60946 | 422 | void intel_uncore_suspend(struct drm_i915_private *dev_priv) |
ed493883 | 423 | { |
264ec1a8 HG |
424 | iosf_mbi_unregister_pmic_bus_access_notifier( |
425 | &dev_priv->uncore.pmic_bus_access_nb); | |
68f60946 HG |
426 | intel_uncore_forcewake_reset(dev_priv, false); |
427 | } | |
428 | ||
429 | void intel_uncore_resume_early(struct drm_i915_private *dev_priv) | |
430 | { | |
431 | __intel_uncore_early_sanitize(dev_priv, true); | |
264ec1a8 HG |
432 | iosf_mbi_register_pmic_bus_access_notifier( |
433 | &dev_priv->uncore.pmic_bus_access_nb); | |
dc97997a | 434 | i915_check_and_clear_faults(dev_priv); |
ed493883 ID |
435 | } |
436 | ||
294cf1af HG |
437 | void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) |
438 | { | |
439 | iosf_mbi_register_pmic_bus_access_notifier( | |
440 | &dev_priv->uncore.pmic_bus_access_nb); | |
441 | } | |
442 | ||
dc97997a | 443 | void intel_uncore_sanitize(struct drm_i915_private *dev_priv) |
521198a2 | 444 | { |
4f044a88 MW |
445 | i915_modparams.enable_rc6 = |
446 | sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6); | |
274008e8 | 447 | |
907b28c5 | 448 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ |
54b4f68f | 449 | intel_sanitize_gt_powersave(dev_priv); |
907b28c5 CW |
450 | } |
451 | ||
a6111f7b CW |
452 | static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
453 | enum forcewake_domains fw_domains) | |
454 | { | |
455 | struct intel_uncore_forcewake_domain *domain; | |
d2dc94bc | 456 | unsigned int tmp; |
a6111f7b | 457 | |
a6111f7b CW |
458 | fw_domains &= dev_priv->uncore.fw_domains; |
459 | ||
c9e0c6da CW |
460 | for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { |
461 | if (domain->wake_count++) { | |
33c582c1 | 462 | fw_domains &= ~domain->mask; |
c9e0c6da CW |
463 | domain->active = true; |
464 | } | |
465 | } | |
a6111f7b | 466 | |
b8473050 | 467 | if (fw_domains) |
a6111f7b CW |
468 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); |
469 | } | |
470 | ||
59bad947 MK |
471 | /** |
472 | * intel_uncore_forcewake_get - grab forcewake domain references | |
473 | * @dev_priv: i915 device instance | |
474 | * @fw_domains: forcewake domains to get reference on | |
475 | * | |
476 | * This function can be used get GT's forcewake domain references. | |
477 | * Normal register access will handle the forcewake domains automatically. | |
478 | * However if some sequence requires the GT to not power down a particular | |
479 | * forcewake domains this function should be called at the beginning of the | |
480 | * sequence. And subsequently the reference should be dropped by symmetric | |
481 | * call to intel_unforce_forcewake_put(). Usually caller wants all the domains | |
482 | * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. | |
907b28c5 | 483 | */ |
59bad947 | 484 | void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
48c1026a | 485 | enum forcewake_domains fw_domains) |
907b28c5 CW |
486 | { |
487 | unsigned long irqflags; | |
488 | ||
ab484f8f BW |
489 | if (!dev_priv->uncore.funcs.force_wake_get) |
490 | return; | |
491 | ||
c9b8846a | 492 | assert_rpm_wakelock_held(dev_priv); |
c8c8fb33 | 493 | |
6daccb0b | 494 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
a6111f7b | 495 | __intel_uncore_forcewake_get(dev_priv, fw_domains); |
907b28c5 CW |
496 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
497 | } | |
498 | ||
d7a133d8 CW |
499 | /** |
500 | * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace | |
501 | * @dev_priv: i915 device instance | |
502 | * | |
503 | * This function is a wrapper around intel_uncore_forcewake_get() to acquire | |
504 | * the GT powerwell and in the process disable our debugging for the | |
505 | * duration of userspace's bypass. | |
506 | */ | |
507 | void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) | |
508 | { | |
509 | spin_lock_irq(&dev_priv->uncore.lock); | |
510 | if (!dev_priv->uncore.user_forcewake.count++) { | |
511 | intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); | |
512 | ||
513 | /* Save and disable mmio debugging for the user bypass */ | |
514 | dev_priv->uncore.user_forcewake.saved_mmio_check = | |
515 | dev_priv->uncore.unclaimed_mmio_check; | |
516 | dev_priv->uncore.user_forcewake.saved_mmio_debug = | |
4f044a88 | 517 | i915_modparams.mmio_debug; |
d7a133d8 CW |
518 | |
519 | dev_priv->uncore.unclaimed_mmio_check = 0; | |
4f044a88 | 520 | i915_modparams.mmio_debug = 0; |
d7a133d8 CW |
521 | } |
522 | spin_unlock_irq(&dev_priv->uncore.lock); | |
523 | } | |
524 | ||
525 | /** | |
526 | * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace | |
527 | * @dev_priv: i915 device instance | |
528 | * | |
529 | * This function complements intel_uncore_forcewake_user_get() and releases | |
530 | * the GT powerwell taken on behalf of the userspace bypass. | |
531 | */ | |
532 | void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) | |
533 | { | |
534 | spin_lock_irq(&dev_priv->uncore.lock); | |
535 | if (!--dev_priv->uncore.user_forcewake.count) { | |
536 | if (intel_uncore_unclaimed_mmio(dev_priv)) | |
537 | dev_info(dev_priv->drm.dev, | |
538 | "Invalid mmio detected during user access\n"); | |
539 | ||
540 | dev_priv->uncore.unclaimed_mmio_check = | |
541 | dev_priv->uncore.user_forcewake.saved_mmio_check; | |
4f044a88 | 542 | i915_modparams.mmio_debug = |
d7a133d8 CW |
543 | dev_priv->uncore.user_forcewake.saved_mmio_debug; |
544 | ||
545 | intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); | |
546 | } | |
547 | spin_unlock_irq(&dev_priv->uncore.lock); | |
548 | } | |
549 | ||
59bad947 | 550 | /** |
a6111f7b | 551 | * intel_uncore_forcewake_get__locked - grab forcewake domain references |
59bad947 | 552 | * @dev_priv: i915 device instance |
a6111f7b | 553 | * @fw_domains: forcewake domains to get reference on |
59bad947 | 554 | * |
a6111f7b CW |
555 | * See intel_uncore_forcewake_get(). This variant places the onus |
556 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. | |
907b28c5 | 557 | */ |
a6111f7b CW |
558 | void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, |
559 | enum forcewake_domains fw_domains) | |
560 | { | |
67520415 | 561 | lockdep_assert_held(&dev_priv->uncore.lock); |
a6111f7b CW |
562 | |
563 | if (!dev_priv->uncore.funcs.force_wake_get) | |
564 | return; | |
565 | ||
566 | __intel_uncore_forcewake_get(dev_priv, fw_domains); | |
567 | } | |
568 | ||
569 | static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, | |
570 | enum forcewake_domains fw_domains) | |
907b28c5 | 571 | { |
b2cff0db | 572 | struct intel_uncore_forcewake_domain *domain; |
d2dc94bc | 573 | unsigned int tmp; |
907b28c5 | 574 | |
b2cff0db CW |
575 | fw_domains &= dev_priv->uncore.fw_domains; |
576 | ||
d2dc94bc | 577 | for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { |
b2cff0db CW |
578 | if (WARN_ON(domain->wake_count == 0)) |
579 | continue; | |
580 | ||
c9e0c6da CW |
581 | if (--domain->wake_count) { |
582 | domain->active = true; | |
b2cff0db | 583 | continue; |
c9e0c6da | 584 | } |
b2cff0db | 585 | |
05a2fb15 | 586 | fw_domain_arm_timer(domain); |
aec347ab | 587 | } |
a6111f7b | 588 | } |
dc9fb09c | 589 | |
a6111f7b CW |
590 | /** |
591 | * intel_uncore_forcewake_put - release a forcewake domain reference | |
592 | * @dev_priv: i915 device instance | |
593 | * @fw_domains: forcewake domains to put references | |
594 | * | |
595 | * This function drops the device-level forcewakes for specified | |
596 | * domains obtained by intel_uncore_forcewake_get(). | |
597 | */ | |
598 | void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, | |
599 | enum forcewake_domains fw_domains) | |
600 | { | |
601 | unsigned long irqflags; | |
602 | ||
603 | if (!dev_priv->uncore.funcs.force_wake_put) | |
604 | return; | |
605 | ||
606 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
607 | __intel_uncore_forcewake_put(dev_priv, fw_domains); | |
907b28c5 CW |
608 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
609 | } | |
610 | ||
a6111f7b CW |
611 | /** |
612 | * intel_uncore_forcewake_put__locked - grab forcewake domain references | |
613 | * @dev_priv: i915 device instance | |
614 | * @fw_domains: forcewake domains to get reference on | |
615 | * | |
616 | * See intel_uncore_forcewake_put(). This variant places the onus | |
617 | * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. | |
618 | */ | |
619 | void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, | |
620 | enum forcewake_domains fw_domains) | |
621 | { | |
67520415 | 622 | lockdep_assert_held(&dev_priv->uncore.lock); |
a6111f7b CW |
623 | |
624 | if (!dev_priv->uncore.funcs.force_wake_put) | |
625 | return; | |
626 | ||
627 | __intel_uncore_forcewake_put(dev_priv, fw_domains); | |
628 | } | |
629 | ||
59bad947 | 630 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) |
e998c40f PZ |
631 | { |
632 | if (!dev_priv->uncore.funcs.force_wake_get) | |
633 | return; | |
634 | ||
67e64564 CW |
635 | WARN(dev_priv->uncore.fw_domains_active, |
636 | "Expected all fw_domains to be inactive, but %08x are still on\n", | |
637 | dev_priv->uncore.fw_domains_active); | |
638 | } | |
639 | ||
640 | void assert_forcewakes_active(struct drm_i915_private *dev_priv, | |
641 | enum forcewake_domains fw_domains) | |
642 | { | |
643 | if (!dev_priv->uncore.funcs.force_wake_get) | |
644 | return; | |
645 | ||
646 | assert_rpm_wakelock_held(dev_priv); | |
647 | ||
648 | fw_domains &= dev_priv->uncore.fw_domains; | |
649 | WARN(fw_domains & ~dev_priv->uncore.fw_domains_active, | |
650 | "Expected %08x fw_domains to be active, but %08x are off\n", | |
651 | fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active); | |
e998c40f PZ |
652 | } |
653 | ||
907b28c5 | 654 | /* We give fast paths for the really cool registers */ |
40181697 | 655 | #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) |
907b28c5 | 656 | |
6863b76c TU |
657 | #define __gen6_reg_read_fw_domains(offset) \ |
658 | ({ \ | |
659 | enum forcewake_domains __fwd; \ | |
660 | if (NEEDS_FORCE_WAKE(offset)) \ | |
661 | __fwd = FORCEWAKE_RENDER; \ | |
662 | else \ | |
663 | __fwd = 0; \ | |
664 | __fwd; \ | |
665 | }) | |
666 | ||
9480dbf0 | 667 | static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) |
91e630b9 | 668 | { |
91e630b9 TU |
669 | if (offset < entry->start) |
670 | return -1; | |
671 | else if (offset > entry->end) | |
672 | return 1; | |
673 | else | |
674 | return 0; | |
675 | } | |
676 | ||
9480dbf0 TU |
677 | /* Copied and "macroized" from lib/bsearch.c */ |
678 | #define BSEARCH(key, base, num, cmp) ({ \ | |
679 | unsigned int start__ = 0, end__ = (num); \ | |
680 | typeof(base) result__ = NULL; \ | |
681 | while (start__ < end__) { \ | |
682 | unsigned int mid__ = start__ + (end__ - start__) / 2; \ | |
683 | int ret__ = (cmp)((key), (base) + mid__); \ | |
684 | if (ret__ < 0) { \ | |
685 | end__ = mid__; \ | |
686 | } else if (ret__ > 0) { \ | |
687 | start__ = mid__ + 1; \ | |
688 | } else { \ | |
689 | result__ = (base) + mid__; \ | |
690 | break; \ | |
691 | } \ | |
692 | } \ | |
693 | result__; \ | |
694 | }) | |
695 | ||
9fc1117c | 696 | static enum forcewake_domains |
15157970 | 697 | find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) |
9fc1117c | 698 | { |
9480dbf0 | 699 | const struct intel_forcewake_range *entry; |
9fc1117c | 700 | |
9480dbf0 TU |
701 | entry = BSEARCH(offset, |
702 | dev_priv->uncore.fw_domains_table, | |
703 | dev_priv->uncore.fw_domains_table_entries, | |
91e630b9 | 704 | fw_range_cmp); |
38fb6a40 | 705 | |
99191427 JL |
706 | if (!entry) |
707 | return 0; | |
708 | ||
709 | WARN(entry->domains & ~dev_priv->uncore.fw_domains, | |
710 | "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", | |
711 | entry->domains & ~dev_priv->uncore.fw_domains, offset); | |
712 | ||
713 | return entry->domains; | |
9fc1117c TU |
714 | } |
715 | ||
716 | #define GEN_FW_RANGE(s, e, d) \ | |
717 | { .start = (s), .end = (e), .domains = (d) } | |
1938e59a | 718 | |
895833bd | 719 | #define HAS_FWTABLE(dev_priv) \ |
3d16ca58 | 720 | (INTEL_GEN(dev_priv) >= 9 || \ |
895833bd TU |
721 | IS_CHERRYVIEW(dev_priv) || \ |
722 | IS_VALLEYVIEW(dev_priv)) | |
723 | ||
b0081239 | 724 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
9fc1117c TU |
725 | static const struct intel_forcewake_range __vlv_fw_ranges[] = { |
726 | GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), | |
727 | GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER), | |
728 | GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER), | |
9fc1117c TU |
729 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
730 | GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA), | |
b0081239 | 731 | GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER), |
9fc1117c TU |
732 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
733 | }; | |
1938e59a | 734 | |
895833bd | 735 | #define __fwtable_reg_read_fw_domains(offset) \ |
6863b76c TU |
736 | ({ \ |
737 | enum forcewake_domains __fwd = 0; \ | |
0dd356bb | 738 | if (NEEDS_FORCE_WAKE((offset))) \ |
15157970 | 739 | __fwd = find_fw_domain(dev_priv, offset); \ |
6863b76c TU |
740 | __fwd; \ |
741 | }) | |
742 | ||
47188574 | 743 | /* *Must* be sorted by offset! See intel_shadow_table_check(). */ |
6863b76c | 744 | static const i915_reg_t gen8_shadowed_regs[] = { |
47188574 TU |
745 | RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ |
746 | GEN6_RPNSWREQ, /* 0xA008 */ | |
747 | GEN6_RC_VIDEO_FREQ, /* 0xA00C */ | |
748 | RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */ | |
749 | RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */ | |
750 | RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ | |
6863b76c TU |
751 | /* TODO: Other registers are not yet used */ |
752 | }; | |
753 | ||
9480dbf0 | 754 | static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) |
5a659383 | 755 | { |
9480dbf0 | 756 | u32 offset = i915_mmio_reg_offset(*reg); |
5a659383 | 757 | |
9480dbf0 | 758 | if (key < offset) |
5a659383 | 759 | return -1; |
9480dbf0 | 760 | else if (key > offset) |
5a659383 TU |
761 | return 1; |
762 | else | |
763 | return 0; | |
764 | } | |
765 | ||
6863b76c TU |
766 | static bool is_gen8_shadowed(u32 offset) |
767 | { | |
9480dbf0 | 768 | const i915_reg_t *regs = gen8_shadowed_regs; |
5a659383 | 769 | |
9480dbf0 TU |
770 | return BSEARCH(offset, regs, ARRAY_SIZE(gen8_shadowed_regs), |
771 | mmio_reg_cmp); | |
6863b76c TU |
772 | } |
773 | ||
774 | #define __gen8_reg_write_fw_domains(offset) \ | |
775 | ({ \ | |
776 | enum forcewake_domains __fwd; \ | |
777 | if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ | |
778 | __fwd = FORCEWAKE_RENDER; \ | |
779 | else \ | |
780 | __fwd = 0; \ | |
781 | __fwd; \ | |
782 | }) | |
783 | ||
b0081239 | 784 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
9fc1117c TU |
785 | static const struct intel_forcewake_range __chv_fw_ranges[] = { |
786 | GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER), | |
b0081239 | 787 | GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
9fc1117c | 788 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
b0081239 | 789 | GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
9fc1117c | 790 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
b0081239 | 791 | GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
9fc1117c | 792 | GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA), |
b0081239 TU |
793 | GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
794 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), | |
9fc1117c | 795 | GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), |
b0081239 TU |
796 | GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER), |
797 | GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), | |
9fc1117c TU |
798 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
799 | GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA), | |
800 | GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA), | |
801 | GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), | |
9fc1117c | 802 | }; |
38fb6a40 | 803 | |
22d48c55 | 804 | #define __fwtable_reg_write_fw_domains(offset) \ |
6863b76c TU |
805 | ({ \ |
806 | enum forcewake_domains __fwd = 0; \ | |
0dd356bb | 807 | if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ |
15157970 | 808 | __fwd = find_fw_domain(dev_priv, offset); \ |
6863b76c TU |
809 | __fwd; \ |
810 | }) | |
811 | ||
b0081239 | 812 | /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ |
9fc1117c | 813 | static const struct intel_forcewake_range __gen9_fw_ranges[] = { |
0dd356bb | 814 | GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), |
9fc1117c TU |
815 | GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ |
816 | GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), | |
0dd356bb | 817 | GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), |
9fc1117c | 818 | GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), |
0dd356bb | 819 | GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), |
9fc1117c | 820 | GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), |
0dd356bb | 821 | GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER), |
b0081239 | 822 | GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA), |
9fc1117c | 823 | GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), |
0dd356bb | 824 | GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), |
9fc1117c | 825 | GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), |
0dd356bb | 826 | GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER), |
b0081239 | 827 | GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA), |
0dd356bb | 828 | GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER), |
9fc1117c | 829 | GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), |
0dd356bb | 830 | GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), |
9fc1117c | 831 | GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA), |
0dd356bb | 832 | GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), |
b0081239 | 833 | GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), |
78424c92 | 834 | GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER), |
9fc1117c | 835 | GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA), |
0dd356bb | 836 | GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER), |
b0081239 | 837 | GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), |
0dd356bb | 838 | GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER), |
9fc1117c | 839 | GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA), |
0dd356bb | 840 | GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER), |
9fc1117c | 841 | GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA), |
0dd356bb | 842 | GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER), |
b0081239 | 843 | GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), |
0dd356bb | 844 | GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER), |
9fc1117c TU |
845 | GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), |
846 | }; | |
6863b76c | 847 | |
907b28c5 CW |
848 | static void |
849 | ilk_dummy_write(struct drm_i915_private *dev_priv) | |
850 | { | |
851 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up | |
852 | * the chip from rc6 before touching it for real. MI_MODE is masked, | |
853 | * hence harmless to write 0 into. */ | |
6af5d92f | 854 | __raw_i915_write32(dev_priv, MI_MODE, 0); |
907b28c5 CW |
855 | } |
856 | ||
857 | static void | |
9c053501 MK |
858 | __unclaimed_reg_debug(struct drm_i915_private *dev_priv, |
859 | const i915_reg_t reg, | |
860 | const bool read, | |
861 | const bool before) | |
907b28c5 | 862 | { |
dda96033 CW |
863 | if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, |
864 | "Unclaimed %s register 0x%x\n", | |
865 | read ? "read from" : "write to", | |
4bd0a25d | 866 | i915_mmio_reg_offset(reg))) |
4f044a88 MW |
867 | /* Only report the first N failures */ |
868 | i915_modparams.mmio_debug--; | |
907b28c5 CW |
869 | } |
870 | ||
9c053501 MK |
871 | static inline void |
872 | unclaimed_reg_debug(struct drm_i915_private *dev_priv, | |
873 | const i915_reg_t reg, | |
874 | const bool read, | |
875 | const bool before) | |
876 | { | |
4f044a88 | 877 | if (likely(!i915_modparams.mmio_debug)) |
9c053501 MK |
878 | return; |
879 | ||
880 | __unclaimed_reg_debug(dev_priv, reg, read, before); | |
881 | } | |
882 | ||
51f67885 | 883 | #define GEN2_READ_HEADER(x) \ |
5d738795 | 884 | u##x val = 0; \ |
da5827c3 | 885 | assert_rpm_wakelock_held(dev_priv); |
5d738795 | 886 | |
51f67885 | 887 | #define GEN2_READ_FOOTER \ |
5d738795 BW |
888 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ |
889 | return val | |
890 | ||
51f67885 | 891 | #define __gen2_read(x) \ |
0b274481 | 892 | static u##x \ |
f0f59a00 | 893 | gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
51f67885 | 894 | GEN2_READ_HEADER(x); \ |
3967018e | 895 | val = __raw_i915_read##x(dev_priv, reg); \ |
51f67885 | 896 | GEN2_READ_FOOTER; \ |
3967018e BW |
897 | } |
898 | ||
899 | #define __gen5_read(x) \ | |
900 | static u##x \ | |
f0f59a00 | 901 | gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
51f67885 | 902 | GEN2_READ_HEADER(x); \ |
3967018e BW |
903 | ilk_dummy_write(dev_priv); \ |
904 | val = __raw_i915_read##x(dev_priv, reg); \ | |
51f67885 | 905 | GEN2_READ_FOOTER; \ |
3967018e BW |
906 | } |
907 | ||
51f67885 CW |
908 | __gen5_read(8) |
909 | __gen5_read(16) | |
910 | __gen5_read(32) | |
911 | __gen5_read(64) | |
912 | __gen2_read(8) | |
913 | __gen2_read(16) | |
914 | __gen2_read(32) | |
915 | __gen2_read(64) | |
916 | ||
917 | #undef __gen5_read | |
918 | #undef __gen2_read | |
919 | ||
920 | #undef GEN2_READ_FOOTER | |
921 | #undef GEN2_READ_HEADER | |
922 | ||
923 | #define GEN6_READ_HEADER(x) \ | |
f0f59a00 | 924 | u32 offset = i915_mmio_reg_offset(reg); \ |
51f67885 CW |
925 | unsigned long irqflags; \ |
926 | u##x val = 0; \ | |
da5827c3 | 927 | assert_rpm_wakelock_held(dev_priv); \ |
9c053501 MK |
928 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
929 | unclaimed_reg_debug(dev_priv, reg, true, true) | |
51f67885 CW |
930 | |
931 | #define GEN6_READ_FOOTER \ | |
9c053501 | 932 | unclaimed_reg_debug(dev_priv, reg, true, false); \ |
51f67885 CW |
933 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ |
934 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ | |
935 | return val | |
936 | ||
c521b0c8 TU |
937 | static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, |
938 | enum forcewake_domains fw_domains) | |
b2cff0db CW |
939 | { |
940 | struct intel_uncore_forcewake_domain *domain; | |
d2dc94bc CW |
941 | unsigned int tmp; |
942 | ||
943 | GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); | |
b2cff0db | 944 | |
d2dc94bc | 945 | for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) |
c521b0c8 TU |
946 | fw_domain_arm_timer(domain); |
947 | ||
948 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); | |
c521b0c8 TU |
949 | } |
950 | ||
951 | static inline void __force_wake_auto(struct drm_i915_private *dev_priv, | |
952 | enum forcewake_domains fw_domains) | |
953 | { | |
b2cff0db CW |
954 | if (WARN_ON(!fw_domains)) |
955 | return; | |
956 | ||
003342a5 TU |
957 | /* Turn on all requested but inactive supported forcewake domains. */ |
958 | fw_domains &= dev_priv->uncore.fw_domains; | |
959 | fw_domains &= ~dev_priv->uncore.fw_domains_active; | |
b2cff0db | 960 | |
c521b0c8 TU |
961 | if (fw_domains) |
962 | ___force_wake_auto(dev_priv, fw_domains); | |
b2cff0db CW |
963 | } |
964 | ||
ccfceda2 | 965 | #define __gen_read(func, x) \ |
3967018e | 966 | static u##x \ |
ccfceda2 | 967 | func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ |
6863b76c | 968 | enum forcewake_domains fw_engine; \ |
51f67885 | 969 | GEN6_READ_HEADER(x); \ |
ccfceda2 | 970 | fw_engine = __##func##_reg_read_fw_domains(offset); \ |
6a42d0f4 | 971 | if (fw_engine) \ |
b208ba8e | 972 | __force_wake_auto(dev_priv, fw_engine); \ |
6fe72865 | 973 | val = __raw_i915_read##x(dev_priv, reg); \ |
51f67885 | 974 | GEN6_READ_FOOTER; \ |
940aece4 | 975 | } |
ccfceda2 DCS |
976 | #define __gen6_read(x) __gen_read(gen6, x) |
977 | #define __fwtable_read(x) __gen_read(fwtable, x) | |
940aece4 | 978 | |
6044c4a3 TU |
979 | __fwtable_read(8) |
980 | __fwtable_read(16) | |
981 | __fwtable_read(32) | |
982 | __fwtable_read(64) | |
3967018e BW |
983 | __gen6_read(8) |
984 | __gen6_read(16) | |
985 | __gen6_read(32) | |
986 | __gen6_read(64) | |
3967018e | 987 | |
6044c4a3 | 988 | #undef __fwtable_read |
3967018e | 989 | #undef __gen6_read |
51f67885 CW |
990 | #undef GEN6_READ_FOOTER |
991 | #undef GEN6_READ_HEADER | |
5d738795 | 992 | |
51f67885 | 993 | #define GEN2_WRITE_HEADER \ |
5d738795 | 994 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ |
da5827c3 | 995 | assert_rpm_wakelock_held(dev_priv); \ |
907b28c5 | 996 | |
51f67885 | 997 | #define GEN2_WRITE_FOOTER |
0d965301 | 998 | |
51f67885 | 999 | #define __gen2_write(x) \ |
0b274481 | 1000 | static void \ |
f0f59a00 | 1001 | gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
51f67885 | 1002 | GEN2_WRITE_HEADER; \ |
4032ef43 | 1003 | __raw_i915_write##x(dev_priv, reg, val); \ |
51f67885 | 1004 | GEN2_WRITE_FOOTER; \ |
4032ef43 BW |
1005 | } |
1006 | ||
1007 | #define __gen5_write(x) \ | |
1008 | static void \ | |
f0f59a00 | 1009 | gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
51f67885 | 1010 | GEN2_WRITE_HEADER; \ |
4032ef43 BW |
1011 | ilk_dummy_write(dev_priv); \ |
1012 | __raw_i915_write##x(dev_priv, reg, val); \ | |
51f67885 | 1013 | GEN2_WRITE_FOOTER; \ |
4032ef43 BW |
1014 | } |
1015 | ||
51f67885 CW |
1016 | __gen5_write(8) |
1017 | __gen5_write(16) | |
1018 | __gen5_write(32) | |
51f67885 CW |
1019 | __gen2_write(8) |
1020 | __gen2_write(16) | |
1021 | __gen2_write(32) | |
51f67885 CW |
1022 | |
1023 | #undef __gen5_write | |
1024 | #undef __gen2_write | |
1025 | ||
1026 | #undef GEN2_WRITE_FOOTER | |
1027 | #undef GEN2_WRITE_HEADER | |
1028 | ||
1029 | #define GEN6_WRITE_HEADER \ | |
f0f59a00 | 1030 | u32 offset = i915_mmio_reg_offset(reg); \ |
51f67885 CW |
1031 | unsigned long irqflags; \ |
1032 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | |
da5827c3 | 1033 | assert_rpm_wakelock_held(dev_priv); \ |
9c053501 MK |
1034 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ |
1035 | unclaimed_reg_debug(dev_priv, reg, false, true) | |
51f67885 CW |
1036 | |
1037 | #define GEN6_WRITE_FOOTER \ | |
9c053501 | 1038 | unclaimed_reg_debug(dev_priv, reg, false, false); \ |
51f67885 CW |
1039 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) |
1040 | ||
4032ef43 BW |
1041 | #define __gen6_write(x) \ |
1042 | static void \ | |
f0f59a00 | 1043 | gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
51f67885 | 1044 | GEN6_WRITE_HEADER; \ |
a338908c MK |
1045 | if (NEEDS_FORCE_WAKE(offset)) \ |
1046 | __gen6_gt_wait_for_fifo(dev_priv); \ | |
4032ef43 | 1047 | __raw_i915_write##x(dev_priv, reg, val); \ |
51f67885 | 1048 | GEN6_WRITE_FOOTER; \ |
4032ef43 BW |
1049 | } |
1050 | ||
ccfceda2 | 1051 | #define __gen_write(func, x) \ |
ab2aa47e | 1052 | static void \ |
ccfceda2 | 1053 | func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ |
6863b76c | 1054 | enum forcewake_domains fw_engine; \ |
51f67885 | 1055 | GEN6_WRITE_HEADER; \ |
ccfceda2 | 1056 | fw_engine = __##func##_reg_write_fw_domains(offset); \ |
6a42d0f4 | 1057 | if (fw_engine) \ |
b208ba8e | 1058 | __force_wake_auto(dev_priv, fw_engine); \ |
1938e59a | 1059 | __raw_i915_write##x(dev_priv, reg, val); \ |
51f67885 | 1060 | GEN6_WRITE_FOOTER; \ |
1938e59a | 1061 | } |
ccfceda2 DCS |
1062 | #define __gen8_write(x) __gen_write(gen8, x) |
1063 | #define __fwtable_write(x) __gen_write(fwtable, x) | |
1938e59a | 1064 | |
22d48c55 TU |
1065 | __fwtable_write(8) |
1066 | __fwtable_write(16) | |
1067 | __fwtable_write(32) | |
ab2aa47e BW |
1068 | __gen8_write(8) |
1069 | __gen8_write(16) | |
1070 | __gen8_write(32) | |
4032ef43 BW |
1071 | __gen6_write(8) |
1072 | __gen6_write(16) | |
1073 | __gen6_write(32) | |
4032ef43 | 1074 | |
22d48c55 | 1075 | #undef __fwtable_write |
ab2aa47e | 1076 | #undef __gen8_write |
4032ef43 | 1077 | #undef __gen6_write |
51f67885 CW |
1078 | #undef GEN6_WRITE_FOOTER |
1079 | #undef GEN6_WRITE_HEADER | |
907b28c5 | 1080 | |
0757ac8f | 1081 | #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \ |
43d942a7 | 1082 | do { \ |
0757ac8f CW |
1083 | (i915)->uncore.funcs.mmio_writeb = x##_write8; \ |
1084 | (i915)->uncore.funcs.mmio_writew = x##_write16; \ | |
1085 | (i915)->uncore.funcs.mmio_writel = x##_write32; \ | |
43d942a7 YZ |
1086 | } while (0) |
1087 | ||
0757ac8f | 1088 | #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \ |
43d942a7 | 1089 | do { \ |
0757ac8f CW |
1090 | (i915)->uncore.funcs.mmio_readb = x##_read8; \ |
1091 | (i915)->uncore.funcs.mmio_readw = x##_read16; \ | |
1092 | (i915)->uncore.funcs.mmio_readl = x##_read32; \ | |
1093 | (i915)->uncore.funcs.mmio_readq = x##_read64; \ | |
43d942a7 YZ |
1094 | } while (0) |
1095 | ||
05a2fb15 MK |
1096 | |
1097 | static void fw_domain_init(struct drm_i915_private *dev_priv, | |
48c1026a | 1098 | enum forcewake_domain_id domain_id, |
f0f59a00 VS |
1099 | i915_reg_t reg_set, |
1100 | i915_reg_t reg_ack) | |
05a2fb15 MK |
1101 | { |
1102 | struct intel_uncore_forcewake_domain *d; | |
1103 | ||
1104 | if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) | |
1105 | return; | |
1106 | ||
1107 | d = &dev_priv->uncore.fw_domain[domain_id]; | |
1108 | ||
1109 | WARN_ON(d->wake_count); | |
1110 | ||
6e3955a5 CW |
1111 | WARN_ON(!i915_mmio_reg_valid(reg_set)); |
1112 | WARN_ON(!i915_mmio_reg_valid(reg_ack)); | |
1113 | ||
05a2fb15 MK |
1114 | d->wake_count = 0; |
1115 | d->reg_set = reg_set; | |
1116 | d->reg_ack = reg_ack; | |
1117 | ||
05a2fb15 MK |
1118 | d->id = domain_id; |
1119 | ||
33c582c1 TU |
1120 | BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); |
1121 | BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); | |
1122 | BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); | |
1123 | ||
d2dc94bc | 1124 | d->mask = BIT(domain_id); |
33c582c1 | 1125 | |
a57a4a67 TU |
1126 | hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1127 | d->timer.function = intel_uncore_fw_release_timer; | |
05a2fb15 | 1128 | |
6e3955a5 | 1129 | dev_priv->uncore.fw_domains |= BIT(domain_id); |
f9b3927a | 1130 | |
577ac4bd | 1131 | fw_domain_reset(dev_priv, d); |
05a2fb15 MK |
1132 | } |
1133 | ||
dc97997a | 1134 | static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) |
0b274481 | 1135 | { |
e3b1895f | 1136 | if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) |
3225b2f9 MK |
1137 | return; |
1138 | ||
6e3955a5 CW |
1139 | if (IS_GEN6(dev_priv)) { |
1140 | dev_priv->uncore.fw_reset = 0; | |
1141 | dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; | |
1142 | dev_priv->uncore.fw_clear = 0; | |
1143 | } else { | |
1144 | /* WaRsClearFWBitsAtReset:bdw,skl */ | |
1145 | dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); | |
1146 | dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); | |
1147 | dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); | |
1148 | } | |
1149 | ||
3d16ca58 | 1150 | if (INTEL_GEN(dev_priv) >= 9) { |
05a2fb15 MK |
1151 | dev_priv->uncore.funcs.force_wake_get = fw_domains_get; |
1152 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; | |
1153 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, | |
1154 | FORCEWAKE_RENDER_GEN9, | |
1155 | FORCEWAKE_ACK_RENDER_GEN9); | |
1156 | fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, | |
1157 | FORCEWAKE_BLITTER_GEN9, | |
1158 | FORCEWAKE_ACK_BLITTER_GEN9); | |
1159 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, | |
1160 | FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); | |
dc97997a | 1161 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
05a2fb15 | 1162 | dev_priv->uncore.funcs.force_wake_get = fw_domains_get; |
a338908c | 1163 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 MK |
1164 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1165 | FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); | |
1166 | fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, | |
1167 | FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); | |
dc97997a | 1168 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
05a2fb15 MK |
1169 | dev_priv->uncore.funcs.force_wake_get = |
1170 | fw_domains_get_with_thread_status; | |
a338908c | 1171 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 MK |
1172 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1173 | FORCEWAKE_MT, FORCEWAKE_ACK_HSW); | |
dc97997a | 1174 | } else if (IS_IVYBRIDGE(dev_priv)) { |
0b274481 BW |
1175 | u32 ecobus; |
1176 | ||
1177 | /* IVB configs may use multi-threaded forcewake */ | |
1178 | ||
1179 | /* A small trick here - if the bios hasn't configured | |
1180 | * MT forcewake, and if the device is in RC6, then | |
1181 | * force_wake_mt_get will not wake the device and the | |
1182 | * ECOBUS read will return zero. Which will be | |
1183 | * (correctly) interpreted by the test below as MT | |
1184 | * forcewake being disabled. | |
1185 | */ | |
05a2fb15 MK |
1186 | dev_priv->uncore.funcs.force_wake_get = |
1187 | fw_domains_get_with_thread_status; | |
a338908c | 1188 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 | 1189 | |
f9b3927a MK |
1190 | /* We need to init first for ECOBUS access and then |
1191 | * determine later if we want to reinit, in case of MT access is | |
6ea2556f MK |
1192 | * not working. In this stage we don't know which flavour this |
1193 | * ivb is, so it is better to reset also the gen6 fw registers | |
1194 | * before the ecobus check. | |
f9b3927a | 1195 | */ |
6ea2556f MK |
1196 | |
1197 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | |
1198 | __raw_posting_read(dev_priv, ECOBUS); | |
1199 | ||
05a2fb15 MK |
1200 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1201 | FORCEWAKE_MT, FORCEWAKE_MT_ACK); | |
f9b3927a | 1202 | |
556ab7a6 | 1203 | spin_lock_irq(&dev_priv->uncore.lock); |
bd527504 | 1204 | fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); |
0b274481 | 1205 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); |
a338908c | 1206 | fw_domains_put(dev_priv, FORCEWAKE_RENDER); |
556ab7a6 | 1207 | spin_unlock_irq(&dev_priv->uncore.lock); |
0b274481 | 1208 | |
05a2fb15 | 1209 | if (!(ecobus & FORCEWAKE_MT_ENABLE)) { |
0b274481 BW |
1210 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); |
1211 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | |
05a2fb15 MK |
1212 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1213 | FORCEWAKE, FORCEWAKE_ACK); | |
0b274481 | 1214 | } |
dc97997a | 1215 | } else if (IS_GEN6(dev_priv)) { |
0b274481 | 1216 | dev_priv->uncore.funcs.force_wake_get = |
05a2fb15 | 1217 | fw_domains_get_with_thread_status; |
a338908c | 1218 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
05a2fb15 MK |
1219 | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, |
1220 | FORCEWAKE, FORCEWAKE_ACK); | |
0b274481 | 1221 | } |
3225b2f9 MK |
1222 | |
1223 | /* All future platforms are expected to require complex power gating */ | |
1224 | WARN_ON(dev_priv->uncore.fw_domains == 0); | |
f9b3927a MK |
1225 | } |
1226 | ||
15157970 TU |
1227 | #define ASSIGN_FW_DOMAINS_TABLE(d) \ |
1228 | { \ | |
1229 | dev_priv->uncore.fw_domains_table = \ | |
1230 | (struct intel_forcewake_range *)(d); \ | |
1231 | dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ | |
1232 | } | |
1233 | ||
264ec1a8 HG |
1234 | static int i915_pmic_bus_access_notifier(struct notifier_block *nb, |
1235 | unsigned long action, void *data) | |
1236 | { | |
1237 | struct drm_i915_private *dev_priv = container_of(nb, | |
1238 | struct drm_i915_private, uncore.pmic_bus_access_nb); | |
1239 | ||
1240 | switch (action) { | |
1241 | case MBI_PMIC_BUS_ACCESS_BEGIN: | |
1242 | /* | |
1243 | * forcewake all now to make sure that we don't need to do a | |
1244 | * forcewake later which on systems where this notifier gets | |
1245 | * called requires the punit to access to the shared pmic i2c | |
1246 | * bus, which will be busy after this notification, leading to: | |
1247 | * "render: timed out waiting for forcewake ack request." | |
1248 | * errors. | |
f4359ced HG |
1249 | * |
1250 | * The notifier is unregistered during intel_runtime_suspend(), | |
1251 | * so it's ok to access the HW here without holding a RPM | |
1252 | * wake reference -> disable wakeref asserts for the time of | |
1253 | * the access. | |
264ec1a8 | 1254 | */ |
f4359ced | 1255 | disable_rpm_wakeref_asserts(dev_priv); |
264ec1a8 | 1256 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
f4359ced | 1257 | enable_rpm_wakeref_asserts(dev_priv); |
264ec1a8 HG |
1258 | break; |
1259 | case MBI_PMIC_BUS_ACCESS_END: | |
1260 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | |
1261 | break; | |
1262 | } | |
1263 | ||
1264 | return NOTIFY_OK; | |
1265 | } | |
1266 | ||
dc97997a | 1267 | void intel_uncore_init(struct drm_i915_private *dev_priv) |
f9b3927a | 1268 | { |
dc97997a | 1269 | i915_check_vgpu(dev_priv); |
cf9d2890 | 1270 | |
3accaf7e | 1271 | intel_uncore_edram_detect(dev_priv); |
dc97997a CW |
1272 | intel_uncore_fw_domains_init(dev_priv); |
1273 | __intel_uncore_early_sanitize(dev_priv, false); | |
0b274481 | 1274 | |
75714940 | 1275 | dev_priv->uncore.unclaimed_mmio_check = 1; |
264ec1a8 HG |
1276 | dev_priv->uncore.pmic_bus_access_nb.notifier_call = |
1277 | i915_pmic_bus_access_notifier; | |
75714940 | 1278 | |
e3b1895f | 1279 | if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { |
0757ac8f CW |
1280 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); |
1281 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); | |
e3b1895f | 1282 | } else if (IS_GEN5(dev_priv)) { |
0757ac8f CW |
1283 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); |
1284 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); | |
e3b1895f | 1285 | } else if (IS_GEN(dev_priv, 6, 7)) { |
0757ac8f | 1286 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); |
e3b1895f TU |
1287 | |
1288 | if (IS_VALLEYVIEW(dev_priv)) { | |
1289 | ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); | |
0757ac8f | 1290 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); |
e3b1895f | 1291 | } else { |
0757ac8f | 1292 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); |
85ee17eb | 1293 | } |
e3b1895f | 1294 | } else if (IS_GEN8(dev_priv)) { |
dc97997a | 1295 | if (IS_CHERRYVIEW(dev_priv)) { |
15157970 | 1296 | ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); |
0757ac8f CW |
1297 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); |
1298 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); | |
1938e59a D |
1299 | |
1300 | } else { | |
0757ac8f CW |
1301 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); |
1302 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); | |
1938e59a | 1303 | } |
e3b1895f TU |
1304 | } else { |
1305 | ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); | |
0757ac8f CW |
1306 | ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); |
1307 | ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); | |
3967018e | 1308 | } |
ed493883 | 1309 | |
264ec1a8 HG |
1310 | iosf_mbi_register_pmic_bus_access_notifier( |
1311 | &dev_priv->uncore.pmic_bus_access_nb); | |
1312 | ||
dc97997a | 1313 | i915_check_and_clear_faults(dev_priv); |
0b274481 BW |
1314 | } |
1315 | ||
dc97997a | 1316 | void intel_uncore_fini(struct drm_i915_private *dev_priv) |
0b274481 | 1317 | { |
264ec1a8 HG |
1318 | iosf_mbi_unregister_pmic_bus_access_notifier( |
1319 | &dev_priv->uncore.pmic_bus_access_nb); | |
1320 | ||
0b274481 | 1321 | /* Paranoia: make sure we have disabled everything before we exit. */ |
dc97997a CW |
1322 | intel_uncore_sanitize(dev_priv); |
1323 | intel_uncore_forcewake_reset(dev_priv, false); | |
0b274481 BW |
1324 | } |
1325 | ||
3fd3a6ff JL |
1326 | static const struct reg_whitelist { |
1327 | i915_reg_t offset_ldw; | |
1328 | i915_reg_t offset_udw; | |
1329 | u16 gen_mask; | |
1330 | u8 size; | |
1331 | } reg_read_whitelist[] = { { | |
1332 | .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), | |
1333 | .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), | |
1334 | .gen_mask = INTEL_GEN_MASK(4, 10), | |
1335 | .size = 8 | |
1336 | } }; | |
907b28c5 CW |
1337 | |
1338 | int i915_reg_read_ioctl(struct drm_device *dev, | |
1339 | void *data, struct drm_file *file) | |
1340 | { | |
fac5e23e | 1341 | struct drm_i915_private *dev_priv = to_i915(dev); |
907b28c5 | 1342 | struct drm_i915_reg_read *reg = data; |
3fd3a6ff JL |
1343 | struct reg_whitelist const *entry; |
1344 | unsigned int flags; | |
1345 | int remain; | |
1346 | int ret = 0; | |
1347 | ||
1348 | entry = reg_read_whitelist; | |
1349 | remain = ARRAY_SIZE(reg_read_whitelist); | |
1350 | while (remain) { | |
1351 | u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); | |
1352 | ||
1353 | GEM_BUG_ON(!is_power_of_2(entry->size)); | |
1354 | GEM_BUG_ON(entry->size > 8); | |
1355 | GEM_BUG_ON(entry_offset & (entry->size - 1)); | |
1356 | ||
1357 | if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && | |
1358 | entry_offset == (reg->offset & -entry->size)) | |
907b28c5 | 1359 | break; |
3fd3a6ff JL |
1360 | entry++; |
1361 | remain--; | |
907b28c5 CW |
1362 | } |
1363 | ||
3fd3a6ff | 1364 | if (!remain) |
907b28c5 CW |
1365 | return -EINVAL; |
1366 | ||
3fd3a6ff | 1367 | flags = reg->offset & (entry->size - 1); |
648a9bc5 | 1368 | |
cf67c70f | 1369 | intel_runtime_pm_get(dev_priv); |
3fd3a6ff JL |
1370 | if (entry->size == 8 && flags == I915_REG_READ_8B_WA) |
1371 | reg->val = I915_READ64_2x32(entry->offset_ldw, | |
1372 | entry->offset_udw); | |
1373 | else if (entry->size == 8 && flags == 0) | |
1374 | reg->val = I915_READ64(entry->offset_ldw); | |
1375 | else if (entry->size == 4 && flags == 0) | |
1376 | reg->val = I915_READ(entry->offset_ldw); | |
1377 | else if (entry->size == 2 && flags == 0) | |
1378 | reg->val = I915_READ16(entry->offset_ldw); | |
1379 | else if (entry->size == 1 && flags == 0) | |
1380 | reg->val = I915_READ8(entry->offset_ldw); | |
1381 | else | |
cf67c70f | 1382 | ret = -EINVAL; |
cf67c70f | 1383 | intel_runtime_pm_put(dev_priv); |
3fd3a6ff | 1384 | |
cf67c70f | 1385 | return ret; |
907b28c5 CW |
1386 | } |
1387 | ||
87de8d56 MK |
1388 | static void gen3_stop_engine(struct intel_engine_cs *engine) |
1389 | { | |
1390 | struct drm_i915_private *dev_priv = engine->i915; | |
1391 | const u32 base = engine->mmio_base; | |
1392 | const i915_reg_t mode = RING_MI_MODE(base); | |
1393 | ||
1394 | I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); | |
1395 | if (intel_wait_for_register_fw(dev_priv, | |
1396 | mode, | |
1397 | MODE_IDLE, | |
1398 | MODE_IDLE, | |
1399 | 500)) | |
1400 | DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", | |
1401 | engine->name); | |
1402 | ||
1403 | I915_WRITE_FW(RING_CTL(base), 0); | |
1404 | I915_WRITE_FW(RING_HEAD(base), 0); | |
1405 | I915_WRITE_FW(RING_TAIL(base), 0); | |
1406 | ||
1407 | /* Check acts as a post */ | |
1408 | if (I915_READ_FW(RING_HEAD(base)) != 0) | |
1409 | DRM_DEBUG_DRIVER("%s: ring head not parked\n", | |
1410 | engine->name); | |
1411 | } | |
1412 | ||
1413 | static void i915_stop_engines(struct drm_i915_private *dev_priv, | |
1414 | unsigned engine_mask) | |
2c80353f MK |
1415 | { |
1416 | struct intel_engine_cs *engine; | |
1417 | enum intel_engine_id id; | |
1418 | ||
5896a5c8 CW |
1419 | if (INTEL_GEN(dev_priv) < 3) |
1420 | return; | |
1421 | ||
87de8d56 MK |
1422 | for_each_engine_masked(engine, dev_priv, engine_mask, id) |
1423 | gen3_stop_engine(engine); | |
2c80353f MK |
1424 | } |
1425 | ||
9593a657 | 1426 | static bool i915_reset_complete(struct pci_dev *pdev) |
907b28c5 CW |
1427 | { |
1428 | u8 gdrst; | |
9593a657 | 1429 | |
dc97997a | 1430 | pci_read_config_byte(pdev, I915_GDRST, &gdrst); |
73bbf6bd | 1431 | return (gdrst & GRDOM_RESET_STATUS) == 0; |
907b28c5 CW |
1432 | } |
1433 | ||
dc97997a | 1434 | static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
907b28c5 | 1435 | { |
91c8a326 | 1436 | struct pci_dev *pdev = dev_priv->drm.pdev; |
dc97997a | 1437 | |
73bbf6bd | 1438 | /* assert reset for at least 20 usec */ |
dc97997a | 1439 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
9593a657 | 1440 | usleep_range(50, 200); |
dc97997a | 1441 | pci_write_config_byte(pdev, I915_GDRST, 0); |
907b28c5 | 1442 | |
dc97997a | 1443 | return wait_for(i915_reset_complete(pdev), 500); |
73bbf6bd VS |
1444 | } |
1445 | ||
9593a657 | 1446 | static bool g4x_reset_complete(struct pci_dev *pdev) |
73bbf6bd VS |
1447 | { |
1448 | u8 gdrst; | |
9593a657 | 1449 | |
dc97997a | 1450 | pci_read_config_byte(pdev, I915_GDRST, &gdrst); |
73bbf6bd | 1451 | return (gdrst & GRDOM_RESET_ENABLE) == 0; |
907b28c5 CW |
1452 | } |
1453 | ||
dc97997a | 1454 | static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
408d4b9e | 1455 | { |
91c8a326 | 1456 | struct pci_dev *pdev = dev_priv->drm.pdev; |
9593a657 | 1457 | |
dc97997a CW |
1458 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
1459 | return wait_for(g4x_reset_complete(pdev), 500); | |
408d4b9e VS |
1460 | } |
1461 | ||
dc97997a | 1462 | static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
fa4f53c4 | 1463 | { |
91c8a326 | 1464 | struct pci_dev *pdev = dev_priv->drm.pdev; |
fa4f53c4 VS |
1465 | int ret; |
1466 | ||
fa4f53c4 | 1467 | /* WaVcpClkGateDisableForMediaReset:ctg,elk */ |
44e1e7ba CW |
1468 | I915_WRITE(VDECCLK_GATE_D, |
1469 | I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); | |
fa4f53c4 VS |
1470 | POSTING_READ(VDECCLK_GATE_D); |
1471 | ||
dc97997a | 1472 | pci_write_config_byte(pdev, I915_GDRST, |
fa4f53c4 | 1473 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
dc97997a | 1474 | ret = wait_for(g4x_reset_complete(pdev), 500); |
9593a657 CW |
1475 | if (ret) { |
1476 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); | |
44e1e7ba | 1477 | goto out; |
9593a657 | 1478 | } |
fa4f53c4 | 1479 | |
44e1e7ba CW |
1480 | pci_write_config_byte(pdev, I915_GDRST, |
1481 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | |
1482 | ret = wait_for(g4x_reset_complete(pdev), 500); | |
1483 | if (ret) { | |
1484 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); | |
1485 | goto out; | |
1486 | } | |
fa4f53c4 | 1487 | |
9593a657 | 1488 | out: |
dc97997a | 1489 | pci_write_config_byte(pdev, I915_GDRST, 0); |
44e1e7ba CW |
1490 | |
1491 | I915_WRITE(VDECCLK_GATE_D, | |
1492 | I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); | |
1493 | POSTING_READ(VDECCLK_GATE_D); | |
1494 | ||
9593a657 | 1495 | return ret; |
fa4f53c4 VS |
1496 | } |
1497 | ||
dc97997a CW |
1498 | static int ironlake_do_reset(struct drm_i915_private *dev_priv, |
1499 | unsigned engine_mask) | |
907b28c5 | 1500 | { |
907b28c5 CW |
1501 | int ret; |
1502 | ||
9593a657 | 1503 | I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); |
87273b71 CW |
1504 | ret = intel_wait_for_register(dev_priv, |
1505 | ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, | |
1506 | 500); | |
9593a657 CW |
1507 | if (ret) { |
1508 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); | |
1509 | goto out; | |
1510 | } | |
907b28c5 | 1511 | |
9593a657 | 1512 | I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); |
87273b71 CW |
1513 | ret = intel_wait_for_register(dev_priv, |
1514 | ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, | |
1515 | 500); | |
9593a657 CW |
1516 | if (ret) { |
1517 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); | |
1518 | goto out; | |
1519 | } | |
9aa7250f | 1520 | |
9593a657 | 1521 | out: |
c039b7f2 | 1522 | I915_WRITE(ILK_GDSR, 0); |
9593a657 CW |
1523 | POSTING_READ(ILK_GDSR); |
1524 | return ret; | |
907b28c5 CW |
1525 | } |
1526 | ||
ee4b6faf MK |
1527 | /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ |
1528 | static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, | |
1529 | u32 hw_domain_mask) | |
907b28c5 | 1530 | { |
9593a657 CW |
1531 | int err; |
1532 | ||
907b28c5 CW |
1533 | /* GEN6_GDRST is not in the gt power well, no need to check |
1534 | * for fifo space for the write or forcewake the chip for | |
1535 | * the read | |
1536 | */ | |
ee4b6faf | 1537 | __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); |
907b28c5 | 1538 | |
a3662830 | 1539 | /* Wait for the device to ack the reset requests */ |
9593a657 | 1540 | err = intel_wait_for_register_fw(dev_priv, |
4a17fe13 CW |
1541 | GEN6_GDRST, hw_domain_mask, 0, |
1542 | 500); | |
9593a657 CW |
1543 | if (err) |
1544 | DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", | |
1545 | hw_domain_mask); | |
1546 | ||
1547 | return err; | |
ee4b6faf MK |
1548 | } |
1549 | ||
1550 | /** | |
1551 | * gen6_reset_engines - reset individual engines | |
dc97997a | 1552 | * @dev_priv: i915 device |
ee4b6faf MK |
1553 | * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset |
1554 | * | |
1555 | * This function will reset the individual engines that are set in engine_mask. | |
1556 | * If you provide ALL_ENGINES as mask, full global domain reset will be issued. | |
1557 | * | |
1558 | * Note: It is responsibility of the caller to handle the difference between | |
1559 | * asking full domain reset versus reset for all available individual engines. | |
1560 | * | |
1561 | * Returns 0 on success, nonzero on error. | |
1562 | */ | |
dc97997a CW |
1563 | static int gen6_reset_engines(struct drm_i915_private *dev_priv, |
1564 | unsigned engine_mask) | |
ee4b6faf | 1565 | { |
ee4b6faf MK |
1566 | struct intel_engine_cs *engine; |
1567 | const u32 hw_engine_mask[I915_NUM_ENGINES] = { | |
1568 | [RCS] = GEN6_GRDOM_RENDER, | |
1569 | [BCS] = GEN6_GRDOM_BLT, | |
1570 | [VCS] = GEN6_GRDOM_MEDIA, | |
1571 | [VCS2] = GEN8_GRDOM_MEDIA2, | |
1572 | [VECS] = GEN6_GRDOM_VECS, | |
1573 | }; | |
1574 | u32 hw_mask; | |
ee4b6faf MK |
1575 | |
1576 | if (engine_mask == ALL_ENGINES) { | |
1577 | hw_mask = GEN6_GRDOM_FULL; | |
1578 | } else { | |
bafb0fce CW |
1579 | unsigned int tmp; |
1580 | ||
ee4b6faf | 1581 | hw_mask = 0; |
bafb0fce | 1582 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
ee4b6faf MK |
1583 | hw_mask |= hw_engine_mask[engine->id]; |
1584 | } | |
1585 | ||
4055dc75 | 1586 | return gen6_hw_domain_reset(dev_priv, hw_mask); |
907b28c5 CW |
1587 | } |
1588 | ||
1758b90e | 1589 | /** |
1d1a9774 | 1590 | * __intel_wait_for_register_fw - wait until register matches expected state |
1758b90e CW |
1591 | * @dev_priv: the i915 device |
1592 | * @reg: the register to read | |
1593 | * @mask: mask to apply to register value | |
1594 | * @value: expected value | |
1d1a9774 MW |
1595 | * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait |
1596 | * @slow_timeout_ms: slow timeout in millisecond | |
1597 | * @out_value: optional placeholder to hold registry value | |
1758b90e CW |
1598 | * |
1599 | * This routine waits until the target register @reg contains the expected | |
3d466cd6 DV |
1600 | * @value after applying the @mask, i.e. it waits until :: |
1601 | * | |
1602 | * (I915_READ_FW(reg) & mask) == value | |
1603 | * | |
1d1a9774 | 1604 | * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. |
6976e74b | 1605 | * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us |
84d84cb7 | 1606 | * must be not larger than 20,0000 microseconds. |
1758b90e CW |
1607 | * |
1608 | * Note that this routine assumes the caller holds forcewake asserted, it is | |
1609 | * not suitable for very long waits. See intel_wait_for_register() if you | |
1610 | * wish to wait without holding forcewake for the duration (i.e. you expect | |
1611 | * the wait to be slow). | |
1612 | * | |
1613 | * Returns 0 if the register matches the desired condition, or -ETIMEOUT. | |
1614 | */ | |
1d1a9774 MW |
1615 | int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, |
1616 | i915_reg_t reg, | |
3fc7d86b MW |
1617 | u32 mask, |
1618 | u32 value, | |
1619 | unsigned int fast_timeout_us, | |
1620 | unsigned int slow_timeout_ms, | |
1d1a9774 | 1621 | u32 *out_value) |
1758b90e | 1622 | { |
ff26ffa8 | 1623 | u32 uninitialized_var(reg_value); |
1d1a9774 MW |
1624 | #define done (((reg_value = I915_READ_FW(reg)) & mask) == value) |
1625 | int ret; | |
1626 | ||
6976e74b | 1627 | /* Catch any overuse of this function */ |
84d84cb7 CW |
1628 | might_sleep_if(slow_timeout_ms); |
1629 | GEM_BUG_ON(fast_timeout_us > 20000); | |
6976e74b | 1630 | |
84d84cb7 CW |
1631 | ret = -ETIMEDOUT; |
1632 | if (fast_timeout_us && fast_timeout_us <= 20000) | |
1d1a9774 | 1633 | ret = _wait_for_atomic(done, fast_timeout_us, 0); |
ff26ffa8 | 1634 | if (ret && slow_timeout_ms) |
1d1a9774 | 1635 | ret = wait_for(done, slow_timeout_ms); |
84d84cb7 | 1636 | |
1d1a9774 MW |
1637 | if (out_value) |
1638 | *out_value = reg_value; | |
84d84cb7 | 1639 | |
1758b90e CW |
1640 | return ret; |
1641 | #undef done | |
1642 | } | |
1643 | ||
1644 | /** | |
1645 | * intel_wait_for_register - wait until register matches expected state | |
1646 | * @dev_priv: the i915 device | |
1647 | * @reg: the register to read | |
1648 | * @mask: mask to apply to register value | |
1649 | * @value: expected value | |
1650 | * @timeout_ms: timeout in millisecond | |
1651 | * | |
1652 | * This routine waits until the target register @reg contains the expected | |
3d466cd6 DV |
1653 | * @value after applying the @mask, i.e. it waits until :: |
1654 | * | |
1655 | * (I915_READ(reg) & mask) == value | |
1656 | * | |
1758b90e CW |
1657 | * Otherwise, the wait will timeout after @timeout_ms milliseconds. |
1658 | * | |
1659 | * Returns 0 if the register matches the desired condition, or -ETIMEOUT. | |
1660 | */ | |
1661 | int intel_wait_for_register(struct drm_i915_private *dev_priv, | |
1662 | i915_reg_t reg, | |
3fc7d86b MW |
1663 | u32 mask, |
1664 | u32 value, | |
1665 | unsigned int timeout_ms) | |
7fd2d269 | 1666 | { |
1758b90e CW |
1667 | unsigned fw = |
1668 | intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); | |
1669 | int ret; | |
1670 | ||
05646543 CW |
1671 | might_sleep(); |
1672 | ||
1673 | spin_lock_irq(&dev_priv->uncore.lock); | |
1674 | intel_uncore_forcewake_get__locked(dev_priv, fw); | |
1675 | ||
1676 | ret = __intel_wait_for_register_fw(dev_priv, | |
1677 | reg, mask, value, | |
1678 | 2, 0, NULL); | |
1679 | ||
1680 | intel_uncore_forcewake_put__locked(dev_priv, fw); | |
1681 | spin_unlock_irq(&dev_priv->uncore.lock); | |
1682 | ||
1758b90e CW |
1683 | if (ret) |
1684 | ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value, | |
1685 | timeout_ms); | |
1686 | ||
1687 | return ret; | |
d431440c TE |
1688 | } |
1689 | ||
e3895af8 | 1690 | static int gen8_reset_engine_start(struct intel_engine_cs *engine) |
d431440c | 1691 | { |
c033666a | 1692 | struct drm_i915_private *dev_priv = engine->i915; |
d431440c | 1693 | int ret; |
d431440c TE |
1694 | |
1695 | I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), | |
1696 | _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); | |
1697 | ||
1758b90e CW |
1698 | ret = intel_wait_for_register_fw(dev_priv, |
1699 | RING_RESET_CTL(engine->mmio_base), | |
1700 | RESET_CTL_READY_TO_RESET, | |
1701 | RESET_CTL_READY_TO_RESET, | |
1702 | 700); | |
d431440c TE |
1703 | if (ret) |
1704 | DRM_ERROR("%s: reset request timeout\n", engine->name); | |
1705 | ||
1706 | return ret; | |
1707 | } | |
1708 | ||
e3895af8 | 1709 | static void gen8_reset_engine_cancel(struct intel_engine_cs *engine) |
d431440c | 1710 | { |
c033666a | 1711 | struct drm_i915_private *dev_priv = engine->i915; |
d431440c TE |
1712 | |
1713 | I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), | |
1714 | _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); | |
7fd2d269 MK |
1715 | } |
1716 | ||
dc97997a CW |
1717 | static int gen8_reset_engines(struct drm_i915_private *dev_priv, |
1718 | unsigned engine_mask) | |
7fd2d269 | 1719 | { |
7fd2d269 | 1720 | struct intel_engine_cs *engine; |
bafb0fce | 1721 | unsigned int tmp; |
7fd2d269 | 1722 | |
bafb0fce | 1723 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
e3895af8 | 1724 | if (gen8_reset_engine_start(engine)) |
7fd2d269 | 1725 | goto not_ready; |
7fd2d269 | 1726 | |
dc97997a | 1727 | return gen6_reset_engines(dev_priv, engine_mask); |
7fd2d269 MK |
1728 | |
1729 | not_ready: | |
bafb0fce | 1730 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
e3895af8 | 1731 | gen8_reset_engine_cancel(engine); |
7fd2d269 MK |
1732 | |
1733 | return -EIO; | |
1734 | } | |
1735 | ||
dc97997a CW |
1736 | typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); |
1737 | ||
1738 | static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) | |
907b28c5 | 1739 | { |
4f044a88 | 1740 | if (!i915_modparams.reset) |
b1330fbb CW |
1741 | return NULL; |
1742 | ||
dc97997a | 1743 | if (INTEL_INFO(dev_priv)->gen >= 8) |
ee4b6faf | 1744 | return gen8_reset_engines; |
dc97997a | 1745 | else if (INTEL_INFO(dev_priv)->gen >= 6) |
ee4b6faf | 1746 | return gen6_reset_engines; |
dc97997a | 1747 | else if (IS_GEN5(dev_priv)) |
49e4d842 | 1748 | return ironlake_do_reset; |
dc97997a | 1749 | else if (IS_G4X(dev_priv)) |
49e4d842 | 1750 | return g4x_do_reset; |
73f67aa8 | 1751 | else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) |
49e4d842 | 1752 | return g33_do_reset; |
dc97997a | 1753 | else if (INTEL_INFO(dev_priv)->gen >= 3) |
49e4d842 | 1754 | return i915_do_reset; |
542c184f | 1755 | else |
49e4d842 CW |
1756 | return NULL; |
1757 | } | |
1758 | ||
dc97997a | 1759 | int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) |
49e4d842 | 1760 | { |
5896a5c8 | 1761 | reset_func reset = intel_get_gpu_reset(dev_priv); |
9593a657 | 1762 | int retry; |
99106bc1 | 1763 | int ret; |
49e4d842 | 1764 | |
9593a657 CW |
1765 | might_sleep(); |
1766 | ||
99106bc1 MK |
1767 | /* If the power well sleeps during the reset, the reset |
1768 | * request may be dropped and never completes (causing -EIO). | |
1769 | */ | |
1770 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
9593a657 | 1771 | for (retry = 0; retry < 3; retry++) { |
87de8d56 MK |
1772 | |
1773 | /* We stop engines, otherwise we might get failed reset and a | |
1774 | * dead gpu (on elk). Also as modern gpu as kbl can suffer | |
1775 | * from system hang if batchbuffer is progressing when | |
1776 | * the reset is issued, regardless of READY_TO_RESET ack. | |
1777 | * Thus assume it is best to stop engines on all gens | |
1778 | * where we have a gpu reset. | |
1779 | * | |
1780 | * WaMediaResetMainRingCleanup:ctg,elk (presumably) | |
1781 | * | |
1782 | * FIXME: Wa for more modern gens needs to be validated | |
1783 | */ | |
1784 | i915_stop_engines(dev_priv, engine_mask); | |
1785 | ||
5896a5c8 CW |
1786 | ret = -ENODEV; |
1787 | if (reset) | |
1788 | ret = reset(dev_priv, engine_mask); | |
9593a657 CW |
1789 | if (ret != -ETIMEDOUT) |
1790 | break; | |
1791 | ||
1792 | cond_resched(); | |
1793 | } | |
99106bc1 MK |
1794 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
1795 | ||
1796 | return ret; | |
49e4d842 CW |
1797 | } |
1798 | ||
dc97997a | 1799 | bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) |
49e4d842 | 1800 | { |
dc97997a | 1801 | return intel_get_gpu_reset(dev_priv) != NULL; |
907b28c5 CW |
1802 | } |
1803 | ||
142bc7d9 MT |
1804 | /* |
1805 | * When GuC submission is enabled, GuC manages ELSP and can initiate the | |
1806 | * engine reset too. For now, fall back to full GPU reset if it is enabled. | |
1807 | */ | |
1808 | bool intel_has_reset_engine(struct drm_i915_private *dev_priv) | |
1809 | { | |
1810 | return (dev_priv->info.has_reset_engine && | |
1811 | !dev_priv->guc.execbuf_client && | |
4f044a88 | 1812 | i915_modparams.reset >= 2); |
142bc7d9 MT |
1813 | } |
1814 | ||
6b332fa2 AS |
1815 | int intel_guc_reset(struct drm_i915_private *dev_priv) |
1816 | { | |
1817 | int ret; | |
6b332fa2 | 1818 | |
1a3d1898 | 1819 | if (!HAS_GUC(dev_priv)) |
6b332fa2 AS |
1820 | return -EINVAL; |
1821 | ||
1822 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | |
6b332fa2 | 1823 | ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); |
6b332fa2 AS |
1824 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
1825 | ||
1826 | return ret; | |
1827 | } | |
1828 | ||
fc97618b | 1829 | bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) |
907b28c5 | 1830 | { |
fc97618b | 1831 | return check_for_unclaimed_mmio(dev_priv); |
907b28c5 | 1832 | } |
75714940 | 1833 | |
bc3b9346 | 1834 | bool |
75714940 MK |
1835 | intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) |
1836 | { | |
4f044a88 | 1837 | if (unlikely(i915_modparams.mmio_debug || |
75714940 | 1838 | dev_priv->uncore.unclaimed_mmio_check <= 0)) |
bc3b9346 | 1839 | return false; |
75714940 MK |
1840 | |
1841 | if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { | |
1842 | DRM_DEBUG("Unclaimed register detected, " | |
1843 | "enabling oneshot unclaimed register reporting. " | |
1844 | "Please use i915.mmio_debug=N for more information.\n"); | |
4f044a88 | 1845 | i915_modparams.mmio_debug++; |
75714940 | 1846 | dev_priv->uncore.unclaimed_mmio_check--; |
bc3b9346 | 1847 | return true; |
75714940 | 1848 | } |
bc3b9346 MK |
1849 | |
1850 | return false; | |
75714940 | 1851 | } |
3756685a TU |
1852 | |
1853 | static enum forcewake_domains | |
1854 | intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, | |
1855 | i915_reg_t reg) | |
1856 | { | |
895833bd | 1857 | u32 offset = i915_mmio_reg_offset(reg); |
3756685a TU |
1858 | enum forcewake_domains fw_domains; |
1859 | ||
895833bd TU |
1860 | if (HAS_FWTABLE(dev_priv)) { |
1861 | fw_domains = __fwtable_reg_read_fw_domains(offset); | |
1862 | } else if (INTEL_GEN(dev_priv) >= 6) { | |
1863 | fw_domains = __gen6_reg_read_fw_domains(offset); | |
1864 | } else { | |
1865 | WARN_ON(!IS_GEN(dev_priv, 2, 5)); | |
1866 | fw_domains = 0; | |
3756685a TU |
1867 | } |
1868 | ||
1869 | WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); | |
1870 | ||
1871 | return fw_domains; | |
1872 | } | |
1873 | ||
1874 | static enum forcewake_domains | |
1875 | intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, | |
1876 | i915_reg_t reg) | |
1877 | { | |
22d48c55 | 1878 | u32 offset = i915_mmio_reg_offset(reg); |
3756685a TU |
1879 | enum forcewake_domains fw_domains; |
1880 | ||
22d48c55 TU |
1881 | if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { |
1882 | fw_domains = __fwtable_reg_write_fw_domains(offset); | |
1883 | } else if (IS_GEN8(dev_priv)) { | |
1884 | fw_domains = __gen8_reg_write_fw_domains(offset); | |
1885 | } else if (IS_GEN(dev_priv, 6, 7)) { | |
3756685a | 1886 | fw_domains = FORCEWAKE_RENDER; |
22d48c55 TU |
1887 | } else { |
1888 | WARN_ON(!IS_GEN(dev_priv, 2, 5)); | |
1889 | fw_domains = 0; | |
3756685a TU |
1890 | } |
1891 | ||
1892 | WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); | |
1893 | ||
1894 | return fw_domains; | |
1895 | } | |
1896 | ||
1897 | /** | |
1898 | * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access | |
1899 | * a register | |
1900 | * @dev_priv: pointer to struct drm_i915_private | |
1901 | * @reg: register in question | |
1902 | * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE | |
1903 | * | |
1904 | * Returns a set of forcewake domains required to be taken with for example | |
1905 | * intel_uncore_forcewake_get for the specified register to be accessible in the | |
1906 | * specified mode (read, write or read/write) with raw mmio accessors. | |
1907 | * | |
1908 | * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the | |
1909 | * callers to do FIFO management on their own or risk losing writes. | |
1910 | */ | |
1911 | enum forcewake_domains | |
1912 | intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, | |
1913 | i915_reg_t reg, unsigned int op) | |
1914 | { | |
1915 | enum forcewake_domains fw_domains = 0; | |
1916 | ||
1917 | WARN_ON(!op); | |
1918 | ||
895833bd TU |
1919 | if (intel_vgpu_active(dev_priv)) |
1920 | return 0; | |
1921 | ||
3756685a TU |
1922 | if (op & FW_REG_READ) |
1923 | fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); | |
1924 | ||
1925 | if (op & FW_REG_WRITE) | |
1926 | fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); | |
1927 | ||
1928 | return fw_domains; | |
1929 | } | |
26e7a2a1 CW |
1930 | |
1931 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
0757ac8f | 1932 | #include "selftests/mock_uncore.c" |
26e7a2a1 CW |
1933 | #include "selftests/intel_uncore.c" |
1934 | #endif |