]>
Commit | Line | Data |
---|---|---|
0d6aa60b | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
1da177e4 | 2 | */ |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 | 28 | |
a70491cc JP |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | ||
63eeaf38 | 31 | #include <linux/sysrq.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
b2c88f5b | 33 | #include <linux/circ_buf.h> |
760285e7 DH |
34 | #include <drm/drmP.h> |
35 | #include <drm/i915_drm.h> | |
1da177e4 | 36 | #include "i915_drv.h" |
1c5d22f7 | 37 | #include "i915_trace.h" |
79e53945 | 38 | #include "intel_drv.h" |
1da177e4 | 39 | |
e5868a31 EE |
40 | static const u32 hpd_ibx[] = { |
41 | [HPD_CRT] = SDE_CRT_HOTPLUG, | |
42 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | |
43 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | |
44 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | |
45 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | |
46 | }; | |
47 | ||
48 | static const u32 hpd_cpt[] = { | |
49 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | |
73c352a2 | 50 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
e5868a31 EE |
51 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
52 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | |
53 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | |
54 | }; | |
55 | ||
56 | static const u32 hpd_mask_i915[] = { | |
57 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | |
58 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | |
59 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | |
60 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | |
61 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | |
62 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | |
63 | }; | |
64 | ||
704cfb87 | 65 | static const u32 hpd_status_g4x[] = { |
e5868a31 EE |
66 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
67 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | |
68 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | |
69 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
70 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
71 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
72 | }; | |
73 | ||
e5868a31 EE |
74 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
75 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
76 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | |
77 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | |
78 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
79 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
80 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
81 | }; | |
82 | ||
5c502442 | 83 | /* IIR can theoretically queue up two events. Be paranoid. */ |
f86f3fb0 | 84 | #define GEN8_IRQ_RESET_NDX(type, which) do { \ |
5c502442 PZ |
85 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ |
86 | POSTING_READ(GEN8_##type##_IMR(which)); \ | |
87 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | |
88 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
89 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
90 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
91 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
92 | } while (0) | |
93 | ||
f86f3fb0 | 94 | #define GEN5_IRQ_RESET(type) do { \ |
a9d356a6 | 95 | I915_WRITE(type##IMR, 0xffffffff); \ |
5c502442 | 96 | POSTING_READ(type##IMR); \ |
a9d356a6 | 97 | I915_WRITE(type##IER, 0); \ |
5c502442 PZ |
98 | I915_WRITE(type##IIR, 0xffffffff); \ |
99 | POSTING_READ(type##IIR); \ | |
100 | I915_WRITE(type##IIR, 0xffffffff); \ | |
101 | POSTING_READ(type##IIR); \ | |
a9d356a6 PZ |
102 | } while (0) |
103 | ||
337ba017 PZ |
104 | /* |
105 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. | |
106 | */ | |
107 | #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ | |
108 | u32 val = I915_READ(reg); \ | |
109 | if (val) { \ | |
110 | WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ | |
111 | (reg), val); \ | |
112 | I915_WRITE((reg), 0xffffffff); \ | |
113 | POSTING_READ(reg); \ | |
114 | I915_WRITE((reg), 0xffffffff); \ | |
115 | POSTING_READ(reg); \ | |
116 | } \ | |
117 | } while (0) | |
118 | ||
35079899 | 119 | #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ |
337ba017 | 120 | GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ |
35079899 PZ |
121 | I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ |
122 | I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ | |
123 | POSTING_READ(GEN8_##type##_IER(which)); \ | |
124 | } while (0) | |
125 | ||
126 | #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ | |
337ba017 | 127 | GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ |
35079899 PZ |
128 | I915_WRITE(type##IMR, (imr_val)); \ |
129 | I915_WRITE(type##IER, (ier_val)); \ | |
130 | POSTING_READ(type##IER); \ | |
131 | } while (0) | |
132 | ||
036a4a7d | 133 | /* For display hotplug interrupt */ |
995b6762 | 134 | static void |
2d1013dd | 135 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
036a4a7d | 136 | { |
4bc9d430 DV |
137 | assert_spin_locked(&dev_priv->irq_lock); |
138 | ||
9df7575f | 139 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 140 | return; |
c67a470b | 141 | |
1ec14ad3 CW |
142 | if ((dev_priv->irq_mask & mask) != 0) { |
143 | dev_priv->irq_mask &= ~mask; | |
144 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 145 | POSTING_READ(DEIMR); |
036a4a7d ZW |
146 | } |
147 | } | |
148 | ||
0ff9800a | 149 | static void |
2d1013dd | 150 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
036a4a7d | 151 | { |
4bc9d430 DV |
152 | assert_spin_locked(&dev_priv->irq_lock); |
153 | ||
06ffc778 | 154 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 155 | return; |
c67a470b | 156 | |
1ec14ad3 CW |
157 | if ((dev_priv->irq_mask & mask) != mask) { |
158 | dev_priv->irq_mask |= mask; | |
159 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 160 | POSTING_READ(DEIMR); |
036a4a7d ZW |
161 | } |
162 | } | |
163 | ||
43eaea13 PZ |
164 | /** |
165 | * ilk_update_gt_irq - update GTIMR | |
166 | * @dev_priv: driver private | |
167 | * @interrupt_mask: mask of interrupt bits to update | |
168 | * @enabled_irq_mask: mask of interrupt bits to enable | |
169 | */ | |
170 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |
171 | uint32_t interrupt_mask, | |
172 | uint32_t enabled_irq_mask) | |
173 | { | |
174 | assert_spin_locked(&dev_priv->irq_lock); | |
175 | ||
9df7575f | 176 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 177 | return; |
c67a470b | 178 | |
43eaea13 PZ |
179 | dev_priv->gt_irq_mask &= ~interrupt_mask; |
180 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); | |
181 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
182 | POSTING_READ(GTIMR); | |
183 | } | |
184 | ||
480c8033 | 185 | void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
43eaea13 PZ |
186 | { |
187 | ilk_update_gt_irq(dev_priv, mask, mask); | |
188 | } | |
189 | ||
480c8033 | 190 | void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
43eaea13 PZ |
191 | { |
192 | ilk_update_gt_irq(dev_priv, mask, 0); | |
193 | } | |
194 | ||
edbfdb45 PZ |
195 | /** |
196 | * snb_update_pm_irq - update GEN6_PMIMR | |
197 | * @dev_priv: driver private | |
198 | * @interrupt_mask: mask of interrupt bits to update | |
199 | * @enabled_irq_mask: mask of interrupt bits to enable | |
200 | */ | |
201 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |
202 | uint32_t interrupt_mask, | |
203 | uint32_t enabled_irq_mask) | |
204 | { | |
605cd25b | 205 | uint32_t new_val; |
edbfdb45 PZ |
206 | |
207 | assert_spin_locked(&dev_priv->irq_lock); | |
208 | ||
9df7575f | 209 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 210 | return; |
c67a470b | 211 | |
605cd25b | 212 | new_val = dev_priv->pm_irq_mask; |
f52ecbcf PZ |
213 | new_val &= ~interrupt_mask; |
214 | new_val |= (~enabled_irq_mask & interrupt_mask); | |
215 | ||
605cd25b PZ |
216 | if (new_val != dev_priv->pm_irq_mask) { |
217 | dev_priv->pm_irq_mask = new_val; | |
218 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); | |
f52ecbcf PZ |
219 | POSTING_READ(GEN6_PMIMR); |
220 | } | |
edbfdb45 PZ |
221 | } |
222 | ||
480c8033 | 223 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
edbfdb45 PZ |
224 | { |
225 | snb_update_pm_irq(dev_priv, mask, mask); | |
226 | } | |
227 | ||
480c8033 | 228 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
edbfdb45 PZ |
229 | { |
230 | snb_update_pm_irq(dev_priv, mask, 0); | |
231 | } | |
232 | ||
8664281b PZ |
233 | static bool ivb_can_enable_err_int(struct drm_device *dev) |
234 | { | |
235 | struct drm_i915_private *dev_priv = dev->dev_private; | |
236 | struct intel_crtc *crtc; | |
237 | enum pipe pipe; | |
238 | ||
4bc9d430 DV |
239 | assert_spin_locked(&dev_priv->irq_lock); |
240 | ||
055e393f | 241 | for_each_pipe(dev_priv, pipe) { |
8664281b PZ |
242 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
243 | ||
244 | if (crtc->cpu_fifo_underrun_disabled) | |
245 | return false; | |
246 | } | |
247 | ||
248 | return true; | |
249 | } | |
250 | ||
0961021a BW |
251 | /** |
252 | * bdw_update_pm_irq - update GT interrupt 2 | |
253 | * @dev_priv: driver private | |
254 | * @interrupt_mask: mask of interrupt bits to update | |
255 | * @enabled_irq_mask: mask of interrupt bits to enable | |
256 | * | |
257 | * Copied from the snb function, updated with relevant register offsets | |
258 | */ | |
259 | static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, | |
260 | uint32_t interrupt_mask, | |
261 | uint32_t enabled_irq_mask) | |
262 | { | |
263 | uint32_t new_val; | |
264 | ||
265 | assert_spin_locked(&dev_priv->irq_lock); | |
266 | ||
9df7575f | 267 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
0961021a BW |
268 | return; |
269 | ||
270 | new_val = dev_priv->pm_irq_mask; | |
271 | new_val &= ~interrupt_mask; | |
272 | new_val |= (~enabled_irq_mask & interrupt_mask); | |
273 | ||
274 | if (new_val != dev_priv->pm_irq_mask) { | |
275 | dev_priv->pm_irq_mask = new_val; | |
276 | I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); | |
277 | POSTING_READ(GEN8_GT_IMR(2)); | |
278 | } | |
279 | } | |
280 | ||
480c8033 | 281 | void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
0961021a BW |
282 | { |
283 | bdw_update_pm_irq(dev_priv, mask, mask); | |
284 | } | |
285 | ||
480c8033 | 286 | void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
0961021a BW |
287 | { |
288 | bdw_update_pm_irq(dev_priv, mask, 0); | |
289 | } | |
290 | ||
8664281b PZ |
291 | static bool cpt_can_enable_serr_int(struct drm_device *dev) |
292 | { | |
293 | struct drm_i915_private *dev_priv = dev->dev_private; | |
294 | enum pipe pipe; | |
295 | struct intel_crtc *crtc; | |
296 | ||
fee884ed DV |
297 | assert_spin_locked(&dev_priv->irq_lock); |
298 | ||
055e393f | 299 | for_each_pipe(dev_priv, pipe) { |
8664281b PZ |
300 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
301 | ||
302 | if (crtc->pch_fifo_underrun_disabled) | |
303 | return false; | |
304 | } | |
305 | ||
306 | return true; | |
307 | } | |
308 | ||
56b80e1f VS |
309 | void i9xx_check_fifo_underruns(struct drm_device *dev) |
310 | { | |
311 | struct drm_i915_private *dev_priv = dev->dev_private; | |
312 | struct intel_crtc *crtc; | |
313 | unsigned long flags; | |
314 | ||
315 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
316 | ||
317 | for_each_intel_crtc(dev, crtc) { | |
318 | u32 reg = PIPESTAT(crtc->pipe); | |
319 | u32 pipestat; | |
320 | ||
321 | if (crtc->cpu_fifo_underrun_disabled) | |
322 | continue; | |
323 | ||
324 | pipestat = I915_READ(reg) & 0xffff0000; | |
325 | if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) | |
326 | continue; | |
327 | ||
328 | I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); | |
329 | POSTING_READ(reg); | |
330 | ||
331 | DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); | |
332 | } | |
333 | ||
334 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
335 | } | |
336 | ||
e69abff0 | 337 | static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, |
2ae2a50c DV |
338 | enum pipe pipe, |
339 | bool enable, bool old) | |
2d9d2b0b VS |
340 | { |
341 | struct drm_i915_private *dev_priv = dev->dev_private; | |
342 | u32 reg = PIPESTAT(pipe); | |
e69abff0 | 343 | u32 pipestat = I915_READ(reg) & 0xffff0000; |
2d9d2b0b VS |
344 | |
345 | assert_spin_locked(&dev_priv->irq_lock); | |
346 | ||
e69abff0 VS |
347 | if (enable) { |
348 | I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); | |
349 | POSTING_READ(reg); | |
350 | } else { | |
2ae2a50c | 351 | if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) |
e69abff0 VS |
352 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
353 | } | |
2d9d2b0b VS |
354 | } |
355 | ||
8664281b PZ |
356 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, |
357 | enum pipe pipe, bool enable) | |
358 | { | |
359 | struct drm_i915_private *dev_priv = dev->dev_private; | |
360 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : | |
361 | DE_PIPEB_FIFO_UNDERRUN; | |
362 | ||
363 | if (enable) | |
364 | ironlake_enable_display_irq(dev_priv, bit); | |
365 | else | |
366 | ironlake_disable_display_irq(dev_priv, bit); | |
367 | } | |
368 | ||
369 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, | |
2ae2a50c DV |
370 | enum pipe pipe, |
371 | bool enable, bool old) | |
8664281b PZ |
372 | { |
373 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8664281b | 374 | if (enable) { |
7336df65 DV |
375 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); |
376 | ||
8664281b PZ |
377 | if (!ivb_can_enable_err_int(dev)) |
378 | return; | |
379 | ||
8664281b PZ |
380 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
381 | } else { | |
382 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); | |
7336df65 | 383 | |
2ae2a50c DV |
384 | if (old && |
385 | I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { | |
823c6909 VS |
386 | DRM_ERROR("uncleared fifo underrun on pipe %c\n", |
387 | pipe_name(pipe)); | |
7336df65 | 388 | } |
8664281b PZ |
389 | } |
390 | } | |
391 | ||
38d83c96 DV |
392 | static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, |
393 | enum pipe pipe, bool enable) | |
394 | { | |
395 | struct drm_i915_private *dev_priv = dev->dev_private; | |
396 | ||
397 | assert_spin_locked(&dev_priv->irq_lock); | |
398 | ||
399 | if (enable) | |
400 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; | |
401 | else | |
402 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; | |
403 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
404 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
405 | } | |
406 | ||
fee884ed DV |
407 | /** |
408 | * ibx_display_interrupt_update - update SDEIMR | |
409 | * @dev_priv: driver private | |
410 | * @interrupt_mask: mask of interrupt bits to update | |
411 | * @enabled_irq_mask: mask of interrupt bits to enable | |
412 | */ | |
413 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |
414 | uint32_t interrupt_mask, | |
415 | uint32_t enabled_irq_mask) | |
416 | { | |
417 | uint32_t sdeimr = I915_READ(SDEIMR); | |
418 | sdeimr &= ~interrupt_mask; | |
419 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | |
420 | ||
421 | assert_spin_locked(&dev_priv->irq_lock); | |
422 | ||
9df7575f | 423 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 424 | return; |
c67a470b | 425 | |
fee884ed DV |
426 | I915_WRITE(SDEIMR, sdeimr); |
427 | POSTING_READ(SDEIMR); | |
428 | } | |
429 | #define ibx_enable_display_interrupt(dev_priv, bits) \ | |
430 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) | |
431 | #define ibx_disable_display_interrupt(dev_priv, bits) \ | |
432 | ibx_display_interrupt_update((dev_priv), (bits), 0) | |
433 | ||
de28075d DV |
434 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, |
435 | enum transcoder pch_transcoder, | |
8664281b PZ |
436 | bool enable) |
437 | { | |
8664281b | 438 | struct drm_i915_private *dev_priv = dev->dev_private; |
de28075d DV |
439 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? |
440 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; | |
8664281b PZ |
441 | |
442 | if (enable) | |
fee884ed | 443 | ibx_enable_display_interrupt(dev_priv, bit); |
8664281b | 444 | else |
fee884ed | 445 | ibx_disable_display_interrupt(dev_priv, bit); |
8664281b PZ |
446 | } |
447 | ||
448 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, | |
449 | enum transcoder pch_transcoder, | |
2ae2a50c | 450 | bool enable, bool old) |
8664281b PZ |
451 | { |
452 | struct drm_i915_private *dev_priv = dev->dev_private; | |
453 | ||
454 | if (enable) { | |
1dd246fb DV |
455 | I915_WRITE(SERR_INT, |
456 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); | |
457 | ||
8664281b PZ |
458 | if (!cpt_can_enable_serr_int(dev)) |
459 | return; | |
460 | ||
fee884ed | 461 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
8664281b | 462 | } else { |
fee884ed | 463 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
1dd246fb | 464 | |
2ae2a50c DV |
465 | if (old && I915_READ(SERR_INT) & |
466 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { | |
823c6909 VS |
467 | DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", |
468 | transcoder_name(pch_transcoder)); | |
1dd246fb | 469 | } |
8664281b | 470 | } |
8664281b PZ |
471 | } |
472 | ||
473 | /** | |
474 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
475 | * @dev: drm device | |
476 | * @pipe: pipe | |
477 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
478 | * | |
479 | * This function makes us disable or enable CPU fifo underruns for a specific | |
480 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun | |
481 | * reporting for one pipe may also disable all the other CPU error interruts for | |
482 | * the other pipes, due to the fact that there's just one interrupt mask/enable | |
483 | * bit for all the pipes. | |
484 | * | |
485 | * Returns the previous state of underrun reporting. | |
486 | */ | |
c5ab3bc0 DV |
487 | static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, |
488 | enum pipe pipe, bool enable) | |
8664281b PZ |
489 | { |
490 | struct drm_i915_private *dev_priv = dev->dev_private; | |
491 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
492 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2ae2a50c | 493 | bool old; |
8664281b | 494 | |
77961eb9 ID |
495 | assert_spin_locked(&dev_priv->irq_lock); |
496 | ||
2ae2a50c | 497 | old = !intel_crtc->cpu_fifo_underrun_disabled; |
8664281b PZ |
498 | intel_crtc->cpu_fifo_underrun_disabled = !enable; |
499 | ||
a3ed6aad | 500 | if (HAS_GMCH_DISPLAY(dev)) |
2ae2a50c | 501 | i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); |
2d9d2b0b | 502 | else if (IS_GEN5(dev) || IS_GEN6(dev)) |
8664281b PZ |
503 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); |
504 | else if (IS_GEN7(dev)) | |
2ae2a50c | 505 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); |
38d83c96 DV |
506 | else if (IS_GEN8(dev)) |
507 | broadwell_set_fifo_underrun_reporting(dev, pipe, enable); | |
8664281b | 508 | |
2ae2a50c | 509 | return old; |
f88d42f1 ID |
510 | } |
511 | ||
512 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |
513 | enum pipe pipe, bool enable) | |
514 | { | |
515 | struct drm_i915_private *dev_priv = dev->dev_private; | |
516 | unsigned long flags; | |
517 | bool ret; | |
518 | ||
519 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
520 | ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); | |
8664281b | 521 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
f88d42f1 | 522 | |
8664281b PZ |
523 | return ret; |
524 | } | |
525 | ||
91d181dd ID |
526 | static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, |
527 | enum pipe pipe) | |
528 | { | |
529 | struct drm_i915_private *dev_priv = dev->dev_private; | |
530 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
531 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
532 | ||
533 | return !intel_crtc->cpu_fifo_underrun_disabled; | |
534 | } | |
535 | ||
8664281b PZ |
536 | /** |
537 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
538 | * @dev: drm device | |
539 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) | |
540 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
541 | * | |
542 | * This function makes us disable or enable PCH fifo underruns for a specific | |
543 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO | |
544 | * underrun reporting for one transcoder may also disable all the other PCH | |
545 | * error interruts for the other transcoders, due to the fact that there's just | |
546 | * one interrupt mask/enable bit for all the transcoders. | |
547 | * | |
548 | * Returns the previous state of underrun reporting. | |
549 | */ | |
550 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | |
551 | enum transcoder pch_transcoder, | |
552 | bool enable) | |
553 | { | |
554 | struct drm_i915_private *dev_priv = dev->dev_private; | |
de28075d DV |
555 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; |
556 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
8664281b | 557 | unsigned long flags; |
2ae2a50c | 558 | bool old; |
8664281b | 559 | |
de28075d DV |
560 | /* |
561 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT | |
562 | * has only one pch transcoder A that all pipes can use. To avoid racy | |
563 | * pch transcoder -> pipe lookups from interrupt code simply store the | |
564 | * underrun statistics in crtc A. Since we never expose this anywhere | |
565 | * nor use it outside of the fifo underrun code here using the "wrong" | |
566 | * crtc on LPT won't cause issues. | |
567 | */ | |
8664281b PZ |
568 | |
569 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
570 | ||
2ae2a50c | 571 | old = !intel_crtc->pch_fifo_underrun_disabled; |
8664281b PZ |
572 | intel_crtc->pch_fifo_underrun_disabled = !enable; |
573 | ||
574 | if (HAS_PCH_IBX(dev)) | |
de28075d | 575 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
8664281b | 576 | else |
2ae2a50c | 577 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); |
8664281b | 578 | |
8664281b | 579 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
2ae2a50c | 580 | return old; |
8664281b PZ |
581 | } |
582 | ||
583 | ||
b5ea642a | 584 | static void |
755e9019 ID |
585 | __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
586 | u32 enable_mask, u32 status_mask) | |
7c463586 | 587 | { |
46c06a30 | 588 | u32 reg = PIPESTAT(pipe); |
755e9019 | 589 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
7c463586 | 590 | |
b79480ba DV |
591 | assert_spin_locked(&dev_priv->irq_lock); |
592 | ||
04feced9 VS |
593 | if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || |
594 | status_mask & ~PIPESTAT_INT_STATUS_MASK, | |
595 | "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", | |
596 | pipe_name(pipe), enable_mask, status_mask)) | |
755e9019 ID |
597 | return; |
598 | ||
599 | if ((pipestat & enable_mask) == enable_mask) | |
46c06a30 VS |
600 | return; |
601 | ||
91d181dd ID |
602 | dev_priv->pipestat_irq_mask[pipe] |= status_mask; |
603 | ||
46c06a30 | 604 | /* Enable the interrupt, clear any pending status */ |
755e9019 | 605 | pipestat |= enable_mask | status_mask; |
46c06a30 VS |
606 | I915_WRITE(reg, pipestat); |
607 | POSTING_READ(reg); | |
7c463586 KP |
608 | } |
609 | ||
b5ea642a | 610 | static void |
755e9019 ID |
611 | __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
612 | u32 enable_mask, u32 status_mask) | |
7c463586 | 613 | { |
46c06a30 | 614 | u32 reg = PIPESTAT(pipe); |
755e9019 | 615 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
7c463586 | 616 | |
b79480ba DV |
617 | assert_spin_locked(&dev_priv->irq_lock); |
618 | ||
04feced9 VS |
619 | if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || |
620 | status_mask & ~PIPESTAT_INT_STATUS_MASK, | |
621 | "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", | |
622 | pipe_name(pipe), enable_mask, status_mask)) | |
46c06a30 VS |
623 | return; |
624 | ||
755e9019 ID |
625 | if ((pipestat & enable_mask) == 0) |
626 | return; | |
627 | ||
91d181dd ID |
628 | dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; |
629 | ||
755e9019 | 630 | pipestat &= ~enable_mask; |
46c06a30 VS |
631 | I915_WRITE(reg, pipestat); |
632 | POSTING_READ(reg); | |
7c463586 KP |
633 | } |
634 | ||
10c59c51 ID |
635 | static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) |
636 | { | |
637 | u32 enable_mask = status_mask << 16; | |
638 | ||
639 | /* | |
724a6905 VS |
640 | * On pipe A we don't support the PSR interrupt yet, |
641 | * on pipe B and C the same bit MBZ. | |
10c59c51 ID |
642 | */ |
643 | if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) | |
644 | return 0; | |
724a6905 VS |
645 | /* |
646 | * On pipe B and C we don't support the PSR interrupt yet, on pipe | |
647 | * A the same bit is for perf counters which we don't use either. | |
648 | */ | |
649 | if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) | |
650 | return 0; | |
10c59c51 ID |
651 | |
652 | enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | | |
653 | SPRITE0_FLIP_DONE_INT_EN_VLV | | |
654 | SPRITE1_FLIP_DONE_INT_EN_VLV); | |
655 | if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) | |
656 | enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; | |
657 | if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) | |
658 | enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; | |
659 | ||
660 | return enable_mask; | |
661 | } | |
662 | ||
755e9019 ID |
663 | void |
664 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
665 | u32 status_mask) | |
666 | { | |
667 | u32 enable_mask; | |
668 | ||
10c59c51 ID |
669 | if (IS_VALLEYVIEW(dev_priv->dev)) |
670 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | |
671 | status_mask); | |
672 | else | |
673 | enable_mask = status_mask << 16; | |
755e9019 ID |
674 | __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); |
675 | } | |
676 | ||
677 | void | |
678 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
679 | u32 status_mask) | |
680 | { | |
681 | u32 enable_mask; | |
682 | ||
10c59c51 ID |
683 | if (IS_VALLEYVIEW(dev_priv->dev)) |
684 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | |
685 | status_mask); | |
686 | else | |
687 | enable_mask = status_mask << 16; | |
755e9019 ID |
688 | __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); |
689 | } | |
690 | ||
01c66889 | 691 | /** |
f49e38dd | 692 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
01c66889 | 693 | */ |
f49e38dd | 694 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
01c66889 | 695 | { |
2d1013dd | 696 | struct drm_i915_private *dev_priv = dev->dev_private; |
1ec14ad3 CW |
697 | unsigned long irqflags; |
698 | ||
f49e38dd JN |
699 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
700 | return; | |
701 | ||
1ec14ad3 | 702 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
01c66889 | 703 | |
755e9019 | 704 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); |
f898780b | 705 | if (INTEL_INFO(dev)->gen >= 4) |
3b6c42e8 | 706 | i915_enable_pipestat(dev_priv, PIPE_A, |
755e9019 | 707 | PIPE_LEGACY_BLC_EVENT_STATUS); |
1ec14ad3 CW |
708 | |
709 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
01c66889 ZY |
710 | } |
711 | ||
0a3e67a4 JB |
712 | /** |
713 | * i915_pipe_enabled - check if a pipe is enabled | |
714 | * @dev: DRM device | |
715 | * @pipe: pipe to check | |
716 | * | |
717 | * Reading certain registers when the pipe is disabled can hang the chip. | |
718 | * Use this routine to make sure the PLL is running and the pipe is active | |
719 | * before reading such registers if unsure. | |
720 | */ | |
721 | static int | |
722 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
723 | { | |
2d1013dd | 724 | struct drm_i915_private *dev_priv = dev->dev_private; |
702e7a56 | 725 | |
a01025af DV |
726 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
727 | /* Locking is horribly broken here, but whatever. */ | |
728 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
729 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
71f8ba6b | 730 | |
a01025af DV |
731 | return intel_crtc->active; |
732 | } else { | |
733 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | |
734 | } | |
0a3e67a4 JB |
735 | } |
736 | ||
f75f3746 VS |
737 | /* |
738 | * This timing diagram depicts the video signal in and | |
739 | * around the vertical blanking period. | |
740 | * | |
741 | * Assumptions about the fictitious mode used in this example: | |
742 | * vblank_start >= 3 | |
743 | * vsync_start = vblank_start + 1 | |
744 | * vsync_end = vblank_start + 2 | |
745 | * vtotal = vblank_start + 3 | |
746 | * | |
747 | * start of vblank: | |
748 | * latch double buffered registers | |
749 | * increment frame counter (ctg+) | |
750 | * generate start of vblank interrupt (gen4+) | |
751 | * | | |
752 | * | frame start: | |
753 | * | generate frame start interrupt (aka. vblank interrupt) (gmch) | |
754 | * | may be shifted forward 1-3 extra lines via PIPECONF | |
755 | * | | | |
756 | * | | start of vsync: | |
757 | * | | generate vsync interrupt | |
758 | * | | | | |
759 | * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx | |
760 | * . \hs/ . \hs/ \hs/ \hs/ . \hs/ | |
761 | * ----va---> <-----------------vb--------------------> <--------va------------- | |
762 | * | | <----vs-----> | | |
763 | * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) | |
764 | * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) | |
765 | * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) | |
766 | * | | | | |
767 | * last visible pixel first visible pixel | |
768 | * | increment frame counter (gen3/4) | |
769 | * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) | |
770 | * | |
771 | * x = horizontal active | |
772 | * _ = horizontal blanking | |
773 | * hs = horizontal sync | |
774 | * va = vertical active | |
775 | * vb = vertical blanking | |
776 | * vs = vertical sync | |
777 | * vbs = vblank_start (number) | |
778 | * | |
779 | * Summary: | |
780 | * - most events happen at the start of horizontal sync | |
781 | * - frame start happens at the start of horizontal blank, 1-4 lines | |
782 | * (depending on PIPECONF settings) after the start of vblank | |
783 | * - gen3/4 pixel and frame counter are synchronized with the start | |
784 | * of horizontal active on the first line of vertical active | |
785 | */ | |
786 | ||
4cdb83ec VS |
787 | static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) |
788 | { | |
789 | /* Gen2 doesn't have a hardware frame counter */ | |
790 | return 0; | |
791 | } | |
792 | ||
42f52ef8 KP |
793 | /* Called from drm generic code, passed a 'crtc', which |
794 | * we use as a pipe index | |
795 | */ | |
f71d4af4 | 796 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
0a3e67a4 | 797 | { |
2d1013dd | 798 | struct drm_i915_private *dev_priv = dev->dev_private; |
0a3e67a4 JB |
799 | unsigned long high_frame; |
800 | unsigned long low_frame; | |
0b2a8e09 | 801 | u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; |
0a3e67a4 JB |
802 | |
803 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 804 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 805 | "pipe %c\n", pipe_name(pipe)); |
0a3e67a4 JB |
806 | return 0; |
807 | } | |
808 | ||
391f75e2 VS |
809 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
810 | struct intel_crtc *intel_crtc = | |
811 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
812 | const struct drm_display_mode *mode = | |
813 | &intel_crtc->config.adjusted_mode; | |
814 | ||
0b2a8e09 VS |
815 | htotal = mode->crtc_htotal; |
816 | hsync_start = mode->crtc_hsync_start; | |
817 | vbl_start = mode->crtc_vblank_start; | |
818 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
819 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
391f75e2 | 820 | } else { |
a2d213dd | 821 | enum transcoder cpu_transcoder = (enum transcoder) pipe; |
391f75e2 VS |
822 | |
823 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | |
0b2a8e09 | 824 | hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; |
391f75e2 | 825 | vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; |
0b2a8e09 VS |
826 | if ((I915_READ(PIPECONF(cpu_transcoder)) & |
827 | PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) | |
828 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
391f75e2 VS |
829 | } |
830 | ||
0b2a8e09 VS |
831 | /* Convert to pixel count */ |
832 | vbl_start *= htotal; | |
833 | ||
834 | /* Start of vblank event occurs at start of hsync */ | |
835 | vbl_start -= htotal - hsync_start; | |
836 | ||
9db4a9c7 JB |
837 | high_frame = PIPEFRAME(pipe); |
838 | low_frame = PIPEFRAMEPIXEL(pipe); | |
5eddb70b | 839 | |
0a3e67a4 JB |
840 | /* |
841 | * High & low register fields aren't synchronized, so make sure | |
842 | * we get a low value that's stable across two reads of the high | |
843 | * register. | |
844 | */ | |
845 | do { | |
5eddb70b | 846 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
391f75e2 | 847 | low = I915_READ(low_frame); |
5eddb70b | 848 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
0a3e67a4 JB |
849 | } while (high1 != high2); |
850 | ||
5eddb70b | 851 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
391f75e2 | 852 | pixel = low & PIPE_PIXEL_MASK; |
5eddb70b | 853 | low >>= PIPE_FRAME_LOW_SHIFT; |
391f75e2 VS |
854 | |
855 | /* | |
856 | * The frame counter increments at beginning of active. | |
857 | * Cook up a vblank counter by also checking the pixel | |
858 | * counter against vblank start. | |
859 | */ | |
edc08d0a | 860 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; |
0a3e67a4 JB |
861 | } |
862 | ||
f71d4af4 | 863 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
9880b7a5 | 864 | { |
2d1013dd | 865 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 866 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
9880b7a5 JB |
867 | |
868 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 869 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 870 | "pipe %c\n", pipe_name(pipe)); |
9880b7a5 JB |
871 | return 0; |
872 | } | |
873 | ||
874 | return I915_READ(reg); | |
875 | } | |
876 | ||
ad3543ed MK |
877 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
878 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | |
ad3543ed | 879 | |
a225f079 VS |
880 | static int __intel_get_crtc_scanline(struct intel_crtc *crtc) |
881 | { | |
882 | struct drm_device *dev = crtc->base.dev; | |
883 | struct drm_i915_private *dev_priv = dev->dev_private; | |
884 | const struct drm_display_mode *mode = &crtc->config.adjusted_mode; | |
885 | enum pipe pipe = crtc->pipe; | |
80715b2f | 886 | int position, vtotal; |
a225f079 | 887 | |
80715b2f | 888 | vtotal = mode->crtc_vtotal; |
a225f079 VS |
889 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
890 | vtotal /= 2; | |
891 | ||
892 | if (IS_GEN2(dev)) | |
893 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; | |
894 | else | |
895 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | |
896 | ||
897 | /* | |
80715b2f VS |
898 | * See update_scanline_offset() for the details on the |
899 | * scanline_offset adjustment. | |
a225f079 | 900 | */ |
80715b2f | 901 | return (position + crtc->scanline_offset) % vtotal; |
a225f079 VS |
902 | } |
903 | ||
f71d4af4 | 904 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
abca9e45 VS |
905 | unsigned int flags, int *vpos, int *hpos, |
906 | ktime_t *stime, ktime_t *etime) | |
0af7e4df | 907 | { |
c2baf4b7 VS |
908 | struct drm_i915_private *dev_priv = dev->dev_private; |
909 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
910 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
911 | const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; | |
3aa18df8 | 912 | int position; |
78e8fc6b | 913 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; |
0af7e4df MK |
914 | bool in_vbl = true; |
915 | int ret = 0; | |
ad3543ed | 916 | unsigned long irqflags; |
0af7e4df | 917 | |
c2baf4b7 | 918 | if (!intel_crtc->active) { |
0af7e4df | 919 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
9db4a9c7 | 920 | "pipe %c\n", pipe_name(pipe)); |
0af7e4df MK |
921 | return 0; |
922 | } | |
923 | ||
c2baf4b7 | 924 | htotal = mode->crtc_htotal; |
78e8fc6b | 925 | hsync_start = mode->crtc_hsync_start; |
c2baf4b7 VS |
926 | vtotal = mode->crtc_vtotal; |
927 | vbl_start = mode->crtc_vblank_start; | |
928 | vbl_end = mode->crtc_vblank_end; | |
0af7e4df | 929 | |
d31faf65 VS |
930 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { |
931 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
932 | vbl_end /= 2; | |
933 | vtotal /= 2; | |
934 | } | |
935 | ||
c2baf4b7 VS |
936 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
937 | ||
ad3543ed MK |
938 | /* |
939 | * Lock uncore.lock, as we will do multiple timing critical raw | |
940 | * register reads, potentially with preemption disabled, so the | |
941 | * following code must not block on uncore.lock. | |
942 | */ | |
943 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
78e8fc6b | 944 | |
ad3543ed MK |
945 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
946 | ||
947 | /* Get optional system timestamp before query. */ | |
948 | if (stime) | |
949 | *stime = ktime_get(); | |
950 | ||
7c06b08a | 951 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
0af7e4df MK |
952 | /* No obvious pixelcount register. Only query vertical |
953 | * scanout position from Display scan line register. | |
954 | */ | |
a225f079 | 955 | position = __intel_get_crtc_scanline(intel_crtc); |
0af7e4df MK |
956 | } else { |
957 | /* Have access to pixelcount since start of frame. | |
958 | * We can split this into vertical and horizontal | |
959 | * scanout position. | |
960 | */ | |
ad3543ed | 961 | position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
0af7e4df | 962 | |
3aa18df8 VS |
963 | /* convert to pixel counts */ |
964 | vbl_start *= htotal; | |
965 | vbl_end *= htotal; | |
966 | vtotal *= htotal; | |
78e8fc6b | 967 | |
7e78f1cb VS |
968 | /* |
969 | * In interlaced modes, the pixel counter counts all pixels, | |
970 | * so one field will have htotal more pixels. In order to avoid | |
971 | * the reported position from jumping backwards when the pixel | |
972 | * counter is beyond the length of the shorter field, just | |
973 | * clamp the position the length of the shorter field. This | |
974 | * matches how the scanline counter based position works since | |
975 | * the scanline counter doesn't count the two half lines. | |
976 | */ | |
977 | if (position >= vtotal) | |
978 | position = vtotal - 1; | |
979 | ||
78e8fc6b VS |
980 | /* |
981 | * Start of vblank interrupt is triggered at start of hsync, | |
982 | * just prior to the first active line of vblank. However we | |
983 | * consider lines to start at the leading edge of horizontal | |
984 | * active. So, should we get here before we've crossed into | |
985 | * the horizontal active of the first line in vblank, we would | |
986 | * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, | |
987 | * always add htotal-hsync_start to the current pixel position. | |
988 | */ | |
989 | position = (position + htotal - hsync_start) % vtotal; | |
0af7e4df MK |
990 | } |
991 | ||
ad3543ed MK |
992 | /* Get optional system timestamp after query. */ |
993 | if (etime) | |
994 | *etime = ktime_get(); | |
995 | ||
996 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | |
997 | ||
998 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
999 | ||
3aa18df8 VS |
1000 | in_vbl = position >= vbl_start && position < vbl_end; |
1001 | ||
1002 | /* | |
1003 | * While in vblank, position will be negative | |
1004 | * counting up towards 0 at vbl_end. And outside | |
1005 | * vblank, position will be positive counting | |
1006 | * up since vbl_end. | |
1007 | */ | |
1008 | if (position >= vbl_start) | |
1009 | position -= vbl_end; | |
1010 | else | |
1011 | position += vtotal - vbl_end; | |
0af7e4df | 1012 | |
7c06b08a | 1013 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
3aa18df8 VS |
1014 | *vpos = position; |
1015 | *hpos = 0; | |
1016 | } else { | |
1017 | *vpos = position / htotal; | |
1018 | *hpos = position - (*vpos * htotal); | |
1019 | } | |
0af7e4df | 1020 | |
0af7e4df MK |
1021 | /* In vblank? */ |
1022 | if (in_vbl) | |
3d3cbd84 | 1023 | ret |= DRM_SCANOUTPOS_IN_VBLANK; |
0af7e4df MK |
1024 | |
1025 | return ret; | |
1026 | } | |
1027 | ||
a225f079 VS |
1028 | int intel_get_crtc_scanline(struct intel_crtc *crtc) |
1029 | { | |
1030 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
1031 | unsigned long irqflags; | |
1032 | int position; | |
1033 | ||
1034 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
1035 | position = __intel_get_crtc_scanline(crtc); | |
1036 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
1037 | ||
1038 | return position; | |
1039 | } | |
1040 | ||
f71d4af4 | 1041 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
0af7e4df MK |
1042 | int *max_error, |
1043 | struct timeval *vblank_time, | |
1044 | unsigned flags) | |
1045 | { | |
4041b853 | 1046 | struct drm_crtc *crtc; |
0af7e4df | 1047 | |
7eb552ae | 1048 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
4041b853 | 1049 | DRM_ERROR("Invalid crtc %d\n", pipe); |
0af7e4df MK |
1050 | return -EINVAL; |
1051 | } | |
1052 | ||
1053 | /* Get drm_crtc to timestamp: */ | |
4041b853 CW |
1054 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
1055 | if (crtc == NULL) { | |
1056 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
1057 | return -EINVAL; | |
1058 | } | |
1059 | ||
1060 | if (!crtc->enabled) { | |
1061 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
1062 | return -EBUSY; | |
1063 | } | |
0af7e4df MK |
1064 | |
1065 | /* Helper routine in DRM core does all the work: */ | |
4041b853 CW |
1066 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
1067 | vblank_time, flags, | |
7da903ef VS |
1068 | crtc, |
1069 | &to_intel_crtc(crtc)->config.adjusted_mode); | |
0af7e4df MK |
1070 | } |
1071 | ||
67c347ff JN |
1072 | static bool intel_hpd_irq_event(struct drm_device *dev, |
1073 | struct drm_connector *connector) | |
321a1b30 EE |
1074 | { |
1075 | enum drm_connector_status old_status; | |
1076 | ||
1077 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
1078 | old_status = connector->status; | |
1079 | ||
1080 | connector->status = connector->funcs->detect(connector, false); | |
67c347ff JN |
1081 | if (old_status == connector->status) |
1082 | return false; | |
1083 | ||
1084 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | |
321a1b30 | 1085 | connector->base.id, |
c23cc417 | 1086 | connector->name, |
67c347ff JN |
1087 | drm_get_connector_status_name(old_status), |
1088 | drm_get_connector_status_name(connector->status)); | |
1089 | ||
1090 | return true; | |
321a1b30 EE |
1091 | } |
1092 | ||
13cf5504 DA |
1093 | static void i915_digport_work_func(struct work_struct *work) |
1094 | { | |
1095 | struct drm_i915_private *dev_priv = | |
1096 | container_of(work, struct drm_i915_private, dig_port_work); | |
1097 | unsigned long irqflags; | |
1098 | u32 long_port_mask, short_port_mask; | |
1099 | struct intel_digital_port *intel_dig_port; | |
1100 | int i, ret; | |
1101 | u32 old_bits = 0; | |
1102 | ||
1103 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1104 | long_port_mask = dev_priv->long_hpd_port_mask; | |
1105 | dev_priv->long_hpd_port_mask = 0; | |
1106 | short_port_mask = dev_priv->short_hpd_port_mask; | |
1107 | dev_priv->short_hpd_port_mask = 0; | |
1108 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1109 | ||
1110 | for (i = 0; i < I915_MAX_PORTS; i++) { | |
1111 | bool valid = false; | |
1112 | bool long_hpd = false; | |
1113 | intel_dig_port = dev_priv->hpd_irq_port[i]; | |
1114 | if (!intel_dig_port || !intel_dig_port->hpd_pulse) | |
1115 | continue; | |
1116 | ||
1117 | if (long_port_mask & (1 << i)) { | |
1118 | valid = true; | |
1119 | long_hpd = true; | |
1120 | } else if (short_port_mask & (1 << i)) | |
1121 | valid = true; | |
1122 | ||
1123 | if (valid) { | |
1124 | ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); | |
1125 | if (ret == true) { | |
1126 | /* if we get true fallback to old school hpd */ | |
1127 | old_bits |= (1 << intel_dig_port->base.hpd_pin); | |
1128 | } | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | if (old_bits) { | |
1133 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1134 | dev_priv->hpd_event_bits |= old_bits; | |
1135 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1136 | schedule_work(&dev_priv->hotplug_work); | |
1137 | } | |
1138 | } | |
1139 | ||
5ca58282 JB |
1140 | /* |
1141 | * Handle hotplug events outside the interrupt handler proper. | |
1142 | */ | |
ac4c16c5 EE |
1143 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
1144 | ||
5ca58282 JB |
1145 | static void i915_hotplug_work_func(struct work_struct *work) |
1146 | { | |
2d1013dd JN |
1147 | struct drm_i915_private *dev_priv = |
1148 | container_of(work, struct drm_i915_private, hotplug_work); | |
5ca58282 | 1149 | struct drm_device *dev = dev_priv->dev; |
c31c4ba3 | 1150 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed EE |
1151 | struct intel_connector *intel_connector; |
1152 | struct intel_encoder *intel_encoder; | |
1153 | struct drm_connector *connector; | |
1154 | unsigned long irqflags; | |
1155 | bool hpd_disabled = false; | |
321a1b30 | 1156 | bool changed = false; |
142e2398 | 1157 | u32 hpd_event_bits; |
4ef69c7a | 1158 | |
a65e34c7 | 1159 | mutex_lock(&mode_config->mutex); |
e67189ab JB |
1160 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
1161 | ||
cd569aed | 1162 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
142e2398 EE |
1163 | |
1164 | hpd_event_bits = dev_priv->hpd_event_bits; | |
1165 | dev_priv->hpd_event_bits = 0; | |
cd569aed EE |
1166 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
1167 | intel_connector = to_intel_connector(connector); | |
36cd7444 DA |
1168 | if (!intel_connector->encoder) |
1169 | continue; | |
cd569aed EE |
1170 | intel_encoder = intel_connector->encoder; |
1171 | if (intel_encoder->hpd_pin > HPD_NONE && | |
1172 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | |
1173 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | |
1174 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
1175 | "switching from hotplug detection to polling\n", | |
c23cc417 | 1176 | connector->name); |
cd569aed EE |
1177 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; |
1178 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
1179 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
1180 | hpd_disabled = true; | |
1181 | } | |
142e2398 EE |
1182 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
1183 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
c23cc417 | 1184 | connector->name, intel_encoder->hpd_pin); |
142e2398 | 1185 | } |
cd569aed EE |
1186 | } |
1187 | /* if there were no outputs to poll, poll was disabled, | |
1188 | * therefore make sure it's enabled when disabling HPD on | |
1189 | * some connectors */ | |
ac4c16c5 | 1190 | if (hpd_disabled) { |
cd569aed | 1191 | drm_kms_helper_poll_enable(dev); |
6323751d ID |
1192 | mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, |
1193 | msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | |
ac4c16c5 | 1194 | } |
cd569aed EE |
1195 | |
1196 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1197 | ||
321a1b30 EE |
1198 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
1199 | intel_connector = to_intel_connector(connector); | |
36cd7444 DA |
1200 | if (!intel_connector->encoder) |
1201 | continue; | |
321a1b30 EE |
1202 | intel_encoder = intel_connector->encoder; |
1203 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
1204 | if (intel_encoder->hot_plug) | |
1205 | intel_encoder->hot_plug(intel_encoder); | |
1206 | if (intel_hpd_irq_event(dev, connector)) | |
1207 | changed = true; | |
1208 | } | |
1209 | } | |
40ee3381 KP |
1210 | mutex_unlock(&mode_config->mutex); |
1211 | ||
321a1b30 EE |
1212 | if (changed) |
1213 | drm_kms_helper_hotplug_event(dev); | |
5ca58282 JB |
1214 | } |
1215 | ||
d0ecd7e2 | 1216 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
f97108d1 | 1217 | { |
2d1013dd | 1218 | struct drm_i915_private *dev_priv = dev->dev_private; |
b5b72e89 | 1219 | u32 busy_up, busy_down, max_avg, min_avg; |
9270388e | 1220 | u8 new_delay; |
9270388e | 1221 | |
d0ecd7e2 | 1222 | spin_lock(&mchdev_lock); |
f97108d1 | 1223 | |
73edd18f DV |
1224 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
1225 | ||
20e4d407 | 1226 | new_delay = dev_priv->ips.cur_delay; |
9270388e | 1227 | |
7648fa99 | 1228 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
b5b72e89 MG |
1229 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
1230 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
f97108d1 JB |
1231 | max_avg = I915_READ(RCBMAXAVG); |
1232 | min_avg = I915_READ(RCBMINAVG); | |
1233 | ||
1234 | /* Handle RCS change request from hw */ | |
b5b72e89 | 1235 | if (busy_up > max_avg) { |
20e4d407 DV |
1236 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
1237 | new_delay = dev_priv->ips.cur_delay - 1; | |
1238 | if (new_delay < dev_priv->ips.max_delay) | |
1239 | new_delay = dev_priv->ips.max_delay; | |
b5b72e89 | 1240 | } else if (busy_down < min_avg) { |
20e4d407 DV |
1241 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
1242 | new_delay = dev_priv->ips.cur_delay + 1; | |
1243 | if (new_delay > dev_priv->ips.min_delay) | |
1244 | new_delay = dev_priv->ips.min_delay; | |
f97108d1 JB |
1245 | } |
1246 | ||
7648fa99 | 1247 | if (ironlake_set_drps(dev, new_delay)) |
20e4d407 | 1248 | dev_priv->ips.cur_delay = new_delay; |
f97108d1 | 1249 | |
d0ecd7e2 | 1250 | spin_unlock(&mchdev_lock); |
9270388e | 1251 | |
f97108d1 JB |
1252 | return; |
1253 | } | |
1254 | ||
549f7365 | 1255 | static void notify_ring(struct drm_device *dev, |
a4872ba6 | 1256 | struct intel_engine_cs *ring) |
549f7365 | 1257 | { |
93b0a4e0 | 1258 | if (!intel_ring_initialized(ring)) |
475553de CW |
1259 | return; |
1260 | ||
814e9b57 | 1261 | trace_i915_gem_request_complete(ring); |
9862e600 | 1262 | |
84c33a64 SG |
1263 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
1264 | intel_notify_mmio_flip(ring); | |
1265 | ||
549f7365 | 1266 | wake_up_all(&ring->irq_queue); |
10cd45b6 | 1267 | i915_queue_hangcheck(dev); |
549f7365 CW |
1268 | } |
1269 | ||
31685c25 | 1270 | static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, |
bf225f20 | 1271 | struct intel_rps_ei *rps_ei) |
31685c25 D |
1272 | { |
1273 | u32 cz_ts, cz_freq_khz; | |
1274 | u32 render_count, media_count; | |
1275 | u32 elapsed_render, elapsed_media, elapsed_time; | |
1276 | u32 residency = 0; | |
1277 | ||
1278 | cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); | |
1279 | cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); | |
1280 | ||
1281 | render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); | |
1282 | media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); | |
1283 | ||
bf225f20 CW |
1284 | if (rps_ei->cz_clock == 0) { |
1285 | rps_ei->cz_clock = cz_ts; | |
1286 | rps_ei->render_c0 = render_count; | |
1287 | rps_ei->media_c0 = media_count; | |
31685c25 D |
1288 | |
1289 | return dev_priv->rps.cur_freq; | |
1290 | } | |
1291 | ||
bf225f20 CW |
1292 | elapsed_time = cz_ts - rps_ei->cz_clock; |
1293 | rps_ei->cz_clock = cz_ts; | |
31685c25 | 1294 | |
bf225f20 CW |
1295 | elapsed_render = render_count - rps_ei->render_c0; |
1296 | rps_ei->render_c0 = render_count; | |
31685c25 | 1297 | |
bf225f20 CW |
1298 | elapsed_media = media_count - rps_ei->media_c0; |
1299 | rps_ei->media_c0 = media_count; | |
31685c25 D |
1300 | |
1301 | /* Convert all the counters into common unit of milli sec */ | |
1302 | elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; | |
1303 | elapsed_render /= cz_freq_khz; | |
1304 | elapsed_media /= cz_freq_khz; | |
1305 | ||
1306 | /* | |
1307 | * Calculate overall C0 residency percentage | |
1308 | * only if elapsed time is non zero | |
1309 | */ | |
1310 | if (elapsed_time) { | |
1311 | residency = | |
1312 | ((max(elapsed_render, elapsed_media) * 100) | |
1313 | / elapsed_time); | |
1314 | } | |
1315 | ||
1316 | return residency; | |
1317 | } | |
1318 | ||
1319 | /** | |
1320 | * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU | |
1321 | * busy-ness calculated from C0 counters of render & media power wells | |
1322 | * @dev_priv: DRM device private | |
1323 | * | |
1324 | */ | |
4fa79042 | 1325 | static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) |
31685c25 D |
1326 | { |
1327 | u32 residency_C0_up = 0, residency_C0_down = 0; | |
4fa79042 | 1328 | int new_delay, adj; |
31685c25 D |
1329 | |
1330 | dev_priv->rps.ei_interrupt_count++; | |
1331 | ||
1332 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
1333 | ||
1334 | ||
bf225f20 CW |
1335 | if (dev_priv->rps.up_ei.cz_clock == 0) { |
1336 | vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); | |
1337 | vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); | |
31685c25 D |
1338 | return dev_priv->rps.cur_freq; |
1339 | } | |
1340 | ||
1341 | ||
1342 | /* | |
1343 | * To down throttle, C0 residency should be less than down threshold | |
1344 | * for continous EI intervals. So calculate down EI counters | |
1345 | * once in VLV_INT_COUNT_FOR_DOWN_EI | |
1346 | */ | |
1347 | if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { | |
1348 | ||
1349 | dev_priv->rps.ei_interrupt_count = 0; | |
1350 | ||
1351 | residency_C0_down = vlv_c0_residency(dev_priv, | |
bf225f20 | 1352 | &dev_priv->rps.down_ei); |
31685c25 D |
1353 | } else { |
1354 | residency_C0_up = vlv_c0_residency(dev_priv, | |
bf225f20 | 1355 | &dev_priv->rps.up_ei); |
31685c25 D |
1356 | } |
1357 | ||
1358 | new_delay = dev_priv->rps.cur_freq; | |
1359 | ||
1360 | adj = dev_priv->rps.last_adj; | |
1361 | /* C0 residency is greater than UP threshold. Increase Frequency */ | |
1362 | if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { | |
1363 | if (adj > 0) | |
1364 | adj *= 2; | |
1365 | else | |
1366 | adj = 1; | |
1367 | ||
1368 | if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) | |
1369 | new_delay = dev_priv->rps.cur_freq + adj; | |
1370 | ||
1371 | /* | |
1372 | * For better performance, jump directly | |
1373 | * to RPe if we're below it. | |
1374 | */ | |
1375 | if (new_delay < dev_priv->rps.efficient_freq) | |
1376 | new_delay = dev_priv->rps.efficient_freq; | |
1377 | ||
1378 | } else if (!dev_priv->rps.ei_interrupt_count && | |
1379 | (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { | |
1380 | if (adj < 0) | |
1381 | adj *= 2; | |
1382 | else | |
1383 | adj = -1; | |
1384 | /* | |
1385 | * This means, C0 residency is less than down threshold over | |
1386 | * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq | |
1387 | */ | |
1388 | if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) | |
1389 | new_delay = dev_priv->rps.cur_freq + adj; | |
1390 | } | |
1391 | ||
1392 | return new_delay; | |
1393 | } | |
1394 | ||
4912d041 | 1395 | static void gen6_pm_rps_work(struct work_struct *work) |
3b8d8d91 | 1396 | { |
2d1013dd JN |
1397 | struct drm_i915_private *dev_priv = |
1398 | container_of(work, struct drm_i915_private, rps.work); | |
edbfdb45 | 1399 | u32 pm_iir; |
dd75fdc8 | 1400 | int new_delay, adj; |
4912d041 | 1401 | |
59cdb63d | 1402 | spin_lock_irq(&dev_priv->irq_lock); |
c6a828d3 DV |
1403 | pm_iir = dev_priv->rps.pm_iir; |
1404 | dev_priv->rps.pm_iir = 0; | |
6af257cd | 1405 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) |
480c8033 | 1406 | gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
0961021a BW |
1407 | else { |
1408 | /* Make sure not to corrupt PMIMR state used by ringbuffer */ | |
480c8033 | 1409 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
0961021a | 1410 | } |
59cdb63d | 1411 | spin_unlock_irq(&dev_priv->irq_lock); |
3b8d8d91 | 1412 | |
60611c13 | 1413 | /* Make sure we didn't queue anything we're not going to process. */ |
a6706b45 | 1414 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); |
60611c13 | 1415 | |
a6706b45 | 1416 | if ((pm_iir & dev_priv->pm_rps_events) == 0) |
3b8d8d91 JB |
1417 | return; |
1418 | ||
4fc688ce | 1419 | mutex_lock(&dev_priv->rps.hw_lock); |
7b9e0ae6 | 1420 | |
dd75fdc8 | 1421 | adj = dev_priv->rps.last_adj; |
7425034a | 1422 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
dd75fdc8 CW |
1423 | if (adj > 0) |
1424 | adj *= 2; | |
13a5660c D |
1425 | else { |
1426 | /* CHV needs even encode values */ | |
1427 | adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; | |
1428 | } | |
b39fb297 | 1429 | new_delay = dev_priv->rps.cur_freq + adj; |
7425034a VS |
1430 | |
1431 | /* | |
1432 | * For better performance, jump directly | |
1433 | * to RPe if we're below it. | |
1434 | */ | |
b39fb297 BW |
1435 | if (new_delay < dev_priv->rps.efficient_freq) |
1436 | new_delay = dev_priv->rps.efficient_freq; | |
dd75fdc8 | 1437 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { |
b39fb297 BW |
1438 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) |
1439 | new_delay = dev_priv->rps.efficient_freq; | |
dd75fdc8 | 1440 | else |
b39fb297 | 1441 | new_delay = dev_priv->rps.min_freq_softlimit; |
dd75fdc8 | 1442 | adj = 0; |
31685c25 D |
1443 | } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { |
1444 | new_delay = vlv_calc_delay_from_C0_counters(dev_priv); | |
dd75fdc8 CW |
1445 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1446 | if (adj < 0) | |
1447 | adj *= 2; | |
13a5660c D |
1448 | else { |
1449 | /* CHV needs even encode values */ | |
1450 | adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; | |
1451 | } | |
b39fb297 | 1452 | new_delay = dev_priv->rps.cur_freq + adj; |
dd75fdc8 | 1453 | } else { /* unknown event */ |
b39fb297 | 1454 | new_delay = dev_priv->rps.cur_freq; |
dd75fdc8 | 1455 | } |
3b8d8d91 | 1456 | |
79249636 BW |
1457 | /* sysfs frequency interfaces may have snuck in while servicing the |
1458 | * interrupt | |
1459 | */ | |
1272e7b8 | 1460 | new_delay = clamp_t(int, new_delay, |
b39fb297 BW |
1461 | dev_priv->rps.min_freq_softlimit, |
1462 | dev_priv->rps.max_freq_softlimit); | |
27544369 | 1463 | |
b39fb297 | 1464 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; |
dd75fdc8 CW |
1465 | |
1466 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
1467 | valleyview_set_rps(dev_priv->dev, new_delay); | |
1468 | else | |
1469 | gen6_set_rps(dev_priv->dev, new_delay); | |
3b8d8d91 | 1470 | |
4fc688ce | 1471 | mutex_unlock(&dev_priv->rps.hw_lock); |
3b8d8d91 JB |
1472 | } |
1473 | ||
e3689190 BW |
1474 | |
1475 | /** | |
1476 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
1477 | * occurred. | |
1478 | * @work: workqueue struct | |
1479 | * | |
1480 | * Doesn't actually do anything except notify userspace. As a consequence of | |
1481 | * this event, userspace should try to remap the bad rows since statistically | |
1482 | * it is likely the same row is more likely to go bad again. | |
1483 | */ | |
1484 | static void ivybridge_parity_work(struct work_struct *work) | |
1485 | { | |
2d1013dd JN |
1486 | struct drm_i915_private *dev_priv = |
1487 | container_of(work, struct drm_i915_private, l3_parity.error_work); | |
e3689190 | 1488 | u32 error_status, row, bank, subbank; |
35a85ac6 | 1489 | char *parity_event[6]; |
e3689190 BW |
1490 | uint32_t misccpctl; |
1491 | unsigned long flags; | |
35a85ac6 | 1492 | uint8_t slice = 0; |
e3689190 BW |
1493 | |
1494 | /* We must turn off DOP level clock gating to access the L3 registers. | |
1495 | * In order to prevent a get/put style interface, acquire struct mutex | |
1496 | * any time we access those registers. | |
1497 | */ | |
1498 | mutex_lock(&dev_priv->dev->struct_mutex); | |
1499 | ||
35a85ac6 BW |
1500 | /* If we've screwed up tracking, just let the interrupt fire again */ |
1501 | if (WARN_ON(!dev_priv->l3_parity.which_slice)) | |
1502 | goto out; | |
1503 | ||
e3689190 BW |
1504 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
1505 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
1506 | POSTING_READ(GEN7_MISCCPCTL); | |
1507 | ||
35a85ac6 BW |
1508 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
1509 | u32 reg; | |
e3689190 | 1510 | |
35a85ac6 BW |
1511 | slice--; |
1512 | if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) | |
1513 | break; | |
e3689190 | 1514 | |
35a85ac6 | 1515 | dev_priv->l3_parity.which_slice &= ~(1<<slice); |
e3689190 | 1516 | |
35a85ac6 | 1517 | reg = GEN7_L3CDERRST1 + (slice * 0x200); |
e3689190 | 1518 | |
35a85ac6 BW |
1519 | error_status = I915_READ(reg); |
1520 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
1521 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
1522 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
1523 | ||
1524 | I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); | |
1525 | POSTING_READ(reg); | |
1526 | ||
1527 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; | |
1528 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
1529 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
1530 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
1531 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); | |
1532 | parity_event[5] = NULL; | |
1533 | ||
5bdebb18 | 1534 | kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, |
35a85ac6 | 1535 | KOBJ_CHANGE, parity_event); |
e3689190 | 1536 | |
35a85ac6 BW |
1537 | DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", |
1538 | slice, row, bank, subbank); | |
e3689190 | 1539 | |
35a85ac6 BW |
1540 | kfree(parity_event[4]); |
1541 | kfree(parity_event[3]); | |
1542 | kfree(parity_event[2]); | |
1543 | kfree(parity_event[1]); | |
1544 | } | |
e3689190 | 1545 | |
35a85ac6 | 1546 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
e3689190 | 1547 | |
35a85ac6 BW |
1548 | out: |
1549 | WARN_ON(dev_priv->l3_parity.which_slice); | |
1550 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
480c8033 | 1551 | gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); |
35a85ac6 BW |
1552 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1553 | ||
1554 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
e3689190 BW |
1555 | } |
1556 | ||
35a85ac6 | 1557 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) |
e3689190 | 1558 | { |
2d1013dd | 1559 | struct drm_i915_private *dev_priv = dev->dev_private; |
e3689190 | 1560 | |
040d2baa | 1561 | if (!HAS_L3_DPF(dev)) |
e3689190 BW |
1562 | return; |
1563 | ||
d0ecd7e2 | 1564 | spin_lock(&dev_priv->irq_lock); |
480c8033 | 1565 | gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); |
d0ecd7e2 | 1566 | spin_unlock(&dev_priv->irq_lock); |
e3689190 | 1567 | |
35a85ac6 BW |
1568 | iir &= GT_PARITY_ERROR(dev); |
1569 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) | |
1570 | dev_priv->l3_parity.which_slice |= 1 << 1; | |
1571 | ||
1572 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) | |
1573 | dev_priv->l3_parity.which_slice |= 1 << 0; | |
1574 | ||
a4da4fa4 | 1575 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
e3689190 BW |
1576 | } |
1577 | ||
f1af8fc1 PZ |
1578 | static void ilk_gt_irq_handler(struct drm_device *dev, |
1579 | struct drm_i915_private *dev_priv, | |
1580 | u32 gt_iir) | |
1581 | { | |
1582 | if (gt_iir & | |
1583 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
1584 | notify_ring(dev, &dev_priv->ring[RCS]); | |
1585 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | |
1586 | notify_ring(dev, &dev_priv->ring[VCS]); | |
1587 | } | |
1588 | ||
e7b4c6b1 DV |
1589 | static void snb_gt_irq_handler(struct drm_device *dev, |
1590 | struct drm_i915_private *dev_priv, | |
1591 | u32 gt_iir) | |
1592 | { | |
1593 | ||
cc609d5d BW |
1594 | if (gt_iir & |
1595 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 1596 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 1597 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
e7b4c6b1 | 1598 | notify_ring(dev, &dev_priv->ring[VCS]); |
cc609d5d | 1599 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
e7b4c6b1 DV |
1600 | notify_ring(dev, &dev_priv->ring[BCS]); |
1601 | ||
cc609d5d BW |
1602 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
1603 | GT_BSD_CS_ERROR_INTERRUPT | | |
1604 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | |
58174462 MK |
1605 | i915_handle_error(dev, false, "GT error interrupt 0x%08x", |
1606 | gt_iir); | |
e7b4c6b1 | 1607 | } |
e3689190 | 1608 | |
35a85ac6 BW |
1609 | if (gt_iir & GT_PARITY_ERROR(dev)) |
1610 | ivybridge_parity_error_irq_handler(dev, gt_iir); | |
e7b4c6b1 DV |
1611 | } |
1612 | ||
0961021a BW |
1613 | static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) |
1614 | { | |
1615 | if ((pm_iir & dev_priv->pm_rps_events) == 0) | |
1616 | return; | |
1617 | ||
1618 | spin_lock(&dev_priv->irq_lock); | |
1619 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | |
480c8033 | 1620 | gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
0961021a BW |
1621 | spin_unlock(&dev_priv->irq_lock); |
1622 | ||
1623 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
1624 | } | |
1625 | ||
abd58f01 BW |
1626 | static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, |
1627 | struct drm_i915_private *dev_priv, | |
1628 | u32 master_ctl) | |
1629 | { | |
e981e7b1 | 1630 | struct intel_engine_cs *ring; |
abd58f01 BW |
1631 | u32 rcs, bcs, vcs; |
1632 | uint32_t tmp = 0; | |
1633 | irqreturn_t ret = IRQ_NONE; | |
1634 | ||
1635 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | |
1636 | tmp = I915_READ(GEN8_GT_IIR(0)); | |
1637 | if (tmp) { | |
38cc46d7 | 1638 | I915_WRITE(GEN8_GT_IIR(0), tmp); |
abd58f01 | 1639 | ret = IRQ_HANDLED; |
e981e7b1 | 1640 | |
abd58f01 | 1641 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; |
e981e7b1 | 1642 | ring = &dev_priv->ring[RCS]; |
abd58f01 | 1643 | if (rcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 TD |
1644 | notify_ring(dev, ring); |
1645 | if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1646 | intel_execlists_handle_ctx_events(ring); | |
1647 | ||
1648 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; | |
1649 | ring = &dev_priv->ring[BCS]; | |
abd58f01 | 1650 | if (bcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 TD |
1651 | notify_ring(dev, ring); |
1652 | if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
1653 | intel_execlists_handle_ctx_events(ring); | |
abd58f01 BW |
1654 | } else |
1655 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | |
1656 | } | |
1657 | ||
85f9b5f9 | 1658 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { |
abd58f01 BW |
1659 | tmp = I915_READ(GEN8_GT_IIR(1)); |
1660 | if (tmp) { | |
38cc46d7 | 1661 | I915_WRITE(GEN8_GT_IIR(1), tmp); |
abd58f01 | 1662 | ret = IRQ_HANDLED; |
e981e7b1 | 1663 | |
abd58f01 | 1664 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; |
e981e7b1 | 1665 | ring = &dev_priv->ring[VCS]; |
abd58f01 | 1666 | if (vcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 | 1667 | notify_ring(dev, ring); |
73d477f6 | 1668 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
e981e7b1 TD |
1669 | intel_execlists_handle_ctx_events(ring); |
1670 | ||
85f9b5f9 | 1671 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; |
e981e7b1 | 1672 | ring = &dev_priv->ring[VCS2]; |
85f9b5f9 | 1673 | if (vcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 | 1674 | notify_ring(dev, ring); |
73d477f6 | 1675 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
e981e7b1 | 1676 | intel_execlists_handle_ctx_events(ring); |
abd58f01 BW |
1677 | } else |
1678 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | |
1679 | } | |
1680 | ||
0961021a BW |
1681 | if (master_ctl & GEN8_GT_PM_IRQ) { |
1682 | tmp = I915_READ(GEN8_GT_IIR(2)); | |
1683 | if (tmp & dev_priv->pm_rps_events) { | |
0961021a BW |
1684 | I915_WRITE(GEN8_GT_IIR(2), |
1685 | tmp & dev_priv->pm_rps_events); | |
38cc46d7 OM |
1686 | ret = IRQ_HANDLED; |
1687 | gen8_rps_irq_handler(dev_priv, tmp); | |
0961021a BW |
1688 | } else |
1689 | DRM_ERROR("The master control interrupt lied (PM)!\n"); | |
1690 | } | |
1691 | ||
abd58f01 BW |
1692 | if (master_ctl & GEN8_GT_VECS_IRQ) { |
1693 | tmp = I915_READ(GEN8_GT_IIR(3)); | |
1694 | if (tmp) { | |
38cc46d7 | 1695 | I915_WRITE(GEN8_GT_IIR(3), tmp); |
abd58f01 | 1696 | ret = IRQ_HANDLED; |
e981e7b1 | 1697 | |
abd58f01 | 1698 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; |
e981e7b1 | 1699 | ring = &dev_priv->ring[VECS]; |
abd58f01 | 1700 | if (vcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 | 1701 | notify_ring(dev, ring); |
73d477f6 | 1702 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
e981e7b1 | 1703 | intel_execlists_handle_ctx_events(ring); |
abd58f01 BW |
1704 | } else |
1705 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | |
1706 | } | |
1707 | ||
1708 | return ret; | |
1709 | } | |
1710 | ||
b543fb04 EE |
1711 | #define HPD_STORM_DETECT_PERIOD 1000 |
1712 | #define HPD_STORM_THRESHOLD 5 | |
1713 | ||
13cf5504 DA |
1714 | static int ilk_port_to_hotplug_shift(enum port port) |
1715 | { | |
1716 | switch (port) { | |
1717 | case PORT_A: | |
1718 | case PORT_E: | |
1719 | default: | |
1720 | return -1; | |
1721 | case PORT_B: | |
1722 | return 0; | |
1723 | case PORT_C: | |
1724 | return 8; | |
1725 | case PORT_D: | |
1726 | return 16; | |
1727 | } | |
1728 | } | |
1729 | ||
1730 | static int g4x_port_to_hotplug_shift(enum port port) | |
1731 | { | |
1732 | switch (port) { | |
1733 | case PORT_A: | |
1734 | case PORT_E: | |
1735 | default: | |
1736 | return -1; | |
1737 | case PORT_B: | |
1738 | return 17; | |
1739 | case PORT_C: | |
1740 | return 19; | |
1741 | case PORT_D: | |
1742 | return 21; | |
1743 | } | |
1744 | } | |
1745 | ||
1746 | static inline enum port get_port_from_pin(enum hpd_pin pin) | |
1747 | { | |
1748 | switch (pin) { | |
1749 | case HPD_PORT_B: | |
1750 | return PORT_B; | |
1751 | case HPD_PORT_C: | |
1752 | return PORT_C; | |
1753 | case HPD_PORT_D: | |
1754 | return PORT_D; | |
1755 | default: | |
1756 | return PORT_A; /* no hpd */ | |
1757 | } | |
1758 | } | |
1759 | ||
10a504de | 1760 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
22062dba | 1761 | u32 hotplug_trigger, |
13cf5504 | 1762 | u32 dig_hotplug_reg, |
22062dba | 1763 | const u32 *hpd) |
b543fb04 | 1764 | { |
2d1013dd | 1765 | struct drm_i915_private *dev_priv = dev->dev_private; |
b543fb04 | 1766 | int i; |
13cf5504 | 1767 | enum port port; |
10a504de | 1768 | bool storm_detected = false; |
13cf5504 DA |
1769 | bool queue_dig = false, queue_hp = false; |
1770 | u32 dig_shift; | |
1771 | u32 dig_port_mask = 0; | |
b543fb04 | 1772 | |
91d131d2 DV |
1773 | if (!hotplug_trigger) |
1774 | return; | |
1775 | ||
13cf5504 DA |
1776 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", |
1777 | hotplug_trigger, dig_hotplug_reg); | |
cc9bd499 | 1778 | |
b5ea2d56 | 1779 | spin_lock(&dev_priv->irq_lock); |
b543fb04 | 1780 | for (i = 1; i < HPD_NUM_PINS; i++) { |
13cf5504 DA |
1781 | if (!(hpd[i] & hotplug_trigger)) |
1782 | continue; | |
1783 | ||
1784 | port = get_port_from_pin(i); | |
1785 | if (port && dev_priv->hpd_irq_port[port]) { | |
1786 | bool long_hpd; | |
1787 | ||
1788 | if (IS_G4X(dev)) { | |
1789 | dig_shift = g4x_port_to_hotplug_shift(port); | |
1790 | long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | |
1791 | } else { | |
1792 | dig_shift = ilk_port_to_hotplug_shift(port); | |
1793 | long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | |
1794 | } | |
1795 | ||
26fbb774 VS |
1796 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", |
1797 | port_name(port), | |
1798 | long_hpd ? "long" : "short"); | |
13cf5504 DA |
1799 | /* for long HPD pulses we want to have the digital queue happen, |
1800 | but we still want HPD storm detection to function. */ | |
1801 | if (long_hpd) { | |
1802 | dev_priv->long_hpd_port_mask |= (1 << port); | |
1803 | dig_port_mask |= hpd[i]; | |
1804 | } else { | |
1805 | /* for short HPD just trigger the digital queue */ | |
1806 | dev_priv->short_hpd_port_mask |= (1 << port); | |
1807 | hotplug_trigger &= ~hpd[i]; | |
1808 | } | |
1809 | queue_dig = true; | |
1810 | } | |
1811 | } | |
821450c6 | 1812 | |
13cf5504 | 1813 | for (i = 1; i < HPD_NUM_PINS; i++) { |
3ff04a16 DV |
1814 | if (hpd[i] & hotplug_trigger && |
1815 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { | |
1816 | /* | |
1817 | * On GMCH platforms the interrupt mask bits only | |
1818 | * prevent irq generation, not the setting of the | |
1819 | * hotplug bits itself. So only WARN about unexpected | |
1820 | * interrupts on saner platforms. | |
1821 | */ | |
1822 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | |
1823 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | |
1824 | hotplug_trigger, i, hpd[i]); | |
1825 | ||
1826 | continue; | |
1827 | } | |
b8f102e8 | 1828 | |
b543fb04 EE |
1829 | if (!(hpd[i] & hotplug_trigger) || |
1830 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | |
1831 | continue; | |
1832 | ||
13cf5504 DA |
1833 | if (!(dig_port_mask & hpd[i])) { |
1834 | dev_priv->hpd_event_bits |= (1 << i); | |
1835 | queue_hp = true; | |
1836 | } | |
1837 | ||
b543fb04 EE |
1838 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
1839 | dev_priv->hpd_stats[i].hpd_last_jiffies | |
1840 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | |
1841 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | |
1842 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
b8f102e8 | 1843 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); |
b543fb04 EE |
1844 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
1845 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | |
142e2398 | 1846 | dev_priv->hpd_event_bits &= ~(1 << i); |
b543fb04 | 1847 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
10a504de | 1848 | storm_detected = true; |
b543fb04 EE |
1849 | } else { |
1850 | dev_priv->hpd_stats[i].hpd_cnt++; | |
b8f102e8 EE |
1851 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, |
1852 | dev_priv->hpd_stats[i].hpd_cnt); | |
b543fb04 EE |
1853 | } |
1854 | } | |
1855 | ||
10a504de DV |
1856 | if (storm_detected) |
1857 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 1858 | spin_unlock(&dev_priv->irq_lock); |
5876fa0d | 1859 | |
645416f5 DV |
1860 | /* |
1861 | * Our hotplug handler can grab modeset locks (by calling down into the | |
1862 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | |
1863 | * queue for otherwise the flush_work in the pageflip code will | |
1864 | * deadlock. | |
1865 | */ | |
13cf5504 | 1866 | if (queue_dig) |
0e32b39c | 1867 | queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); |
13cf5504 DA |
1868 | if (queue_hp) |
1869 | schedule_work(&dev_priv->hotplug_work); | |
b543fb04 EE |
1870 | } |
1871 | ||
515ac2bb DV |
1872 | static void gmbus_irq_handler(struct drm_device *dev) |
1873 | { | |
2d1013dd | 1874 | struct drm_i915_private *dev_priv = dev->dev_private; |
28c70f16 | 1875 | |
28c70f16 | 1876 | wake_up_all(&dev_priv->gmbus_wait_queue); |
515ac2bb DV |
1877 | } |
1878 | ||
ce99c256 DV |
1879 | static void dp_aux_irq_handler(struct drm_device *dev) |
1880 | { | |
2d1013dd | 1881 | struct drm_i915_private *dev_priv = dev->dev_private; |
9ee32fea | 1882 | |
9ee32fea | 1883 | wake_up_all(&dev_priv->gmbus_wait_queue); |
ce99c256 DV |
1884 | } |
1885 | ||
8bf1e9f1 | 1886 | #if defined(CONFIG_DEBUG_FS) |
277de95e DV |
1887 | static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, |
1888 | uint32_t crc0, uint32_t crc1, | |
1889 | uint32_t crc2, uint32_t crc3, | |
1890 | uint32_t crc4) | |
8bf1e9f1 SH |
1891 | { |
1892 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1893 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | |
1894 | struct intel_pipe_crc_entry *entry; | |
ac2300d4 | 1895 | int head, tail; |
b2c88f5b | 1896 | |
d538bbdf DL |
1897 | spin_lock(&pipe_crc->lock); |
1898 | ||
0c912c79 | 1899 | if (!pipe_crc->entries) { |
d538bbdf | 1900 | spin_unlock(&pipe_crc->lock); |
0c912c79 DL |
1901 | DRM_ERROR("spurious interrupt\n"); |
1902 | return; | |
1903 | } | |
1904 | ||
d538bbdf DL |
1905 | head = pipe_crc->head; |
1906 | tail = pipe_crc->tail; | |
b2c88f5b DL |
1907 | |
1908 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { | |
d538bbdf | 1909 | spin_unlock(&pipe_crc->lock); |
b2c88f5b DL |
1910 | DRM_ERROR("CRC buffer overflowing\n"); |
1911 | return; | |
1912 | } | |
1913 | ||
1914 | entry = &pipe_crc->entries[head]; | |
8bf1e9f1 | 1915 | |
8bc5e955 | 1916 | entry->frame = dev->driver->get_vblank_counter(dev, pipe); |
eba94eb9 DV |
1917 | entry->crc[0] = crc0; |
1918 | entry->crc[1] = crc1; | |
1919 | entry->crc[2] = crc2; | |
1920 | entry->crc[3] = crc3; | |
1921 | entry->crc[4] = crc4; | |
b2c88f5b DL |
1922 | |
1923 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | |
d538bbdf DL |
1924 | pipe_crc->head = head; |
1925 | ||
1926 | spin_unlock(&pipe_crc->lock); | |
07144428 DL |
1927 | |
1928 | wake_up_interruptible(&pipe_crc->wq); | |
8bf1e9f1 | 1929 | } |
277de95e DV |
1930 | #else |
1931 | static inline void | |
1932 | display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | |
1933 | uint32_t crc0, uint32_t crc1, | |
1934 | uint32_t crc2, uint32_t crc3, | |
1935 | uint32_t crc4) {} | |
1936 | #endif | |
1937 | ||
eba94eb9 | 1938 | |
277de95e | 1939 | static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
5a69b89f DV |
1940 | { |
1941 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1942 | ||
277de95e DV |
1943 | display_pipe_crc_irq_handler(dev, pipe, |
1944 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | |
1945 | 0, 0, 0, 0); | |
5a69b89f DV |
1946 | } |
1947 | ||
277de95e | 1948 | static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
eba94eb9 DV |
1949 | { |
1950 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1951 | ||
277de95e DV |
1952 | display_pipe_crc_irq_handler(dev, pipe, |
1953 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | |
1954 | I915_READ(PIPE_CRC_RES_2_IVB(pipe)), | |
1955 | I915_READ(PIPE_CRC_RES_3_IVB(pipe)), | |
1956 | I915_READ(PIPE_CRC_RES_4_IVB(pipe)), | |
1957 | I915_READ(PIPE_CRC_RES_5_IVB(pipe))); | |
eba94eb9 | 1958 | } |
5b3a856b | 1959 | |
277de95e | 1960 | static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
5b3a856b DV |
1961 | { |
1962 | struct drm_i915_private *dev_priv = dev->dev_private; | |
0b5c5ed0 DV |
1963 | uint32_t res1, res2; |
1964 | ||
1965 | if (INTEL_INFO(dev)->gen >= 3) | |
1966 | res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); | |
1967 | else | |
1968 | res1 = 0; | |
1969 | ||
1970 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | |
1971 | res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); | |
1972 | else | |
1973 | res2 = 0; | |
5b3a856b | 1974 | |
277de95e DV |
1975 | display_pipe_crc_irq_handler(dev, pipe, |
1976 | I915_READ(PIPE_CRC_RES_RED(pipe)), | |
1977 | I915_READ(PIPE_CRC_RES_GREEN(pipe)), | |
1978 | I915_READ(PIPE_CRC_RES_BLUE(pipe)), | |
1979 | res1, res2); | |
5b3a856b | 1980 | } |
8bf1e9f1 | 1981 | |
1403c0d4 PZ |
1982 | /* The RPS events need forcewake, so we add them to a work queue and mask their |
1983 | * IMR bits until the work is done. Other interrupts can be processed without | |
1984 | * the work queue. */ | |
1985 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |
baf02a1f | 1986 | { |
a6706b45 | 1987 | if (pm_iir & dev_priv->pm_rps_events) { |
59cdb63d | 1988 | spin_lock(&dev_priv->irq_lock); |
a6706b45 | 1989 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; |
480c8033 | 1990 | gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
59cdb63d | 1991 | spin_unlock(&dev_priv->irq_lock); |
2adbee62 DV |
1992 | |
1993 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
baf02a1f | 1994 | } |
baf02a1f | 1995 | |
1403c0d4 PZ |
1996 | if (HAS_VEBOX(dev_priv->dev)) { |
1997 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | |
1998 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | |
12638c57 | 1999 | |
1403c0d4 | 2000 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { |
58174462 MK |
2001 | i915_handle_error(dev_priv->dev, false, |
2002 | "VEBOX CS error interrupt 0x%08x", | |
2003 | pm_iir); | |
1403c0d4 | 2004 | } |
12638c57 | 2005 | } |
baf02a1f BW |
2006 | } |
2007 | ||
8d7849db VS |
2008 | static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) |
2009 | { | |
8d7849db VS |
2010 | if (!drm_handle_vblank(dev, pipe)) |
2011 | return false; | |
2012 | ||
8d7849db VS |
2013 | return true; |
2014 | } | |
2015 | ||
c1874ed7 ID |
2016 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) |
2017 | { | |
2018 | struct drm_i915_private *dev_priv = dev->dev_private; | |
91d181dd | 2019 | u32 pipe_stats[I915_MAX_PIPES] = { }; |
c1874ed7 ID |
2020 | int pipe; |
2021 | ||
58ead0d7 | 2022 | spin_lock(&dev_priv->irq_lock); |
055e393f | 2023 | for_each_pipe(dev_priv, pipe) { |
91d181dd | 2024 | int reg; |
bbb5eebf | 2025 | u32 mask, iir_bit = 0; |
91d181dd | 2026 | |
bbb5eebf DV |
2027 | /* |
2028 | * PIPESTAT bits get signalled even when the interrupt is | |
2029 | * disabled with the mask bits, and some of the status bits do | |
2030 | * not generate interrupts at all (like the underrun bit). Hence | |
2031 | * we need to be careful that we only handle what we want to | |
2032 | * handle. | |
2033 | */ | |
2034 | mask = 0; | |
2035 | if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) | |
2036 | mask |= PIPE_FIFO_UNDERRUN_STATUS; | |
2037 | ||
2038 | switch (pipe) { | |
2039 | case PIPE_A: | |
2040 | iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | |
2041 | break; | |
2042 | case PIPE_B: | |
2043 | iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
2044 | break; | |
3278f67f VS |
2045 | case PIPE_C: |
2046 | iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | |
2047 | break; | |
bbb5eebf DV |
2048 | } |
2049 | if (iir & iir_bit) | |
2050 | mask |= dev_priv->pipestat_irq_mask[pipe]; | |
2051 | ||
2052 | if (!mask) | |
91d181dd ID |
2053 | continue; |
2054 | ||
2055 | reg = PIPESTAT(pipe); | |
bbb5eebf DV |
2056 | mask |= PIPESTAT_INT_ENABLE_MASK; |
2057 | pipe_stats[pipe] = I915_READ(reg) & mask; | |
c1874ed7 ID |
2058 | |
2059 | /* | |
2060 | * Clear the PIPE*STAT regs before the IIR | |
2061 | */ | |
91d181dd ID |
2062 | if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | |
2063 | PIPESTAT_INT_STATUS_MASK)) | |
c1874ed7 ID |
2064 | I915_WRITE(reg, pipe_stats[pipe]); |
2065 | } | |
58ead0d7 | 2066 | spin_unlock(&dev_priv->irq_lock); |
c1874ed7 | 2067 | |
055e393f | 2068 | for_each_pipe(dev_priv, pipe) { |
d6bbafa1 CW |
2069 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
2070 | intel_pipe_handle_vblank(dev, pipe)) | |
2071 | intel_check_page_flip(dev, pipe); | |
c1874ed7 | 2072 | |
579a9b0e | 2073 | if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { |
c1874ed7 ID |
2074 | intel_prepare_page_flip(dev, pipe); |
2075 | intel_finish_page_flip(dev, pipe); | |
2076 | } | |
2077 | ||
2078 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
2079 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
2080 | ||
2081 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
2082 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
2083 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); | |
2084 | } | |
2085 | ||
2086 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) | |
2087 | gmbus_irq_handler(dev); | |
2088 | } | |
2089 | ||
16c6c56b VS |
2090 | static void i9xx_hpd_irq_handler(struct drm_device *dev) |
2091 | { | |
2092 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2093 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
2094 | ||
3ff60f89 OM |
2095 | if (hotplug_status) { |
2096 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | |
2097 | /* | |
2098 | * Make sure hotplug status is cleared before we clear IIR, or else we | |
2099 | * may miss hotplug events. | |
2100 | */ | |
2101 | POSTING_READ(PORT_HOTPLUG_STAT); | |
16c6c56b | 2102 | |
3ff60f89 OM |
2103 | if (IS_G4X(dev)) { |
2104 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | |
16c6c56b | 2105 | |
13cf5504 | 2106 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); |
3ff60f89 OM |
2107 | } else { |
2108 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | |
16c6c56b | 2109 | |
13cf5504 | 2110 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); |
3ff60f89 | 2111 | } |
16c6c56b | 2112 | |
3ff60f89 OM |
2113 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && |
2114 | hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | |
2115 | dp_aux_irq_handler(dev); | |
2116 | } | |
16c6c56b VS |
2117 | } |
2118 | ||
ff1f525e | 2119 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
7e231dbe | 2120 | { |
45a83f84 | 2121 | struct drm_device *dev = arg; |
2d1013dd | 2122 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe JB |
2123 | u32 iir, gt_iir, pm_iir; |
2124 | irqreturn_t ret = IRQ_NONE; | |
7e231dbe | 2125 | |
7e231dbe | 2126 | while (true) { |
3ff60f89 OM |
2127 | /* Find, clear, then process each source of interrupt */ |
2128 | ||
7e231dbe | 2129 | gt_iir = I915_READ(GTIIR); |
3ff60f89 OM |
2130 | if (gt_iir) |
2131 | I915_WRITE(GTIIR, gt_iir); | |
2132 | ||
7e231dbe | 2133 | pm_iir = I915_READ(GEN6_PMIIR); |
3ff60f89 OM |
2134 | if (pm_iir) |
2135 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
2136 | ||
2137 | iir = I915_READ(VLV_IIR); | |
2138 | if (iir) { | |
2139 | /* Consume port before clearing IIR or we'll miss events */ | |
2140 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
2141 | i9xx_hpd_irq_handler(dev); | |
2142 | I915_WRITE(VLV_IIR, iir); | |
2143 | } | |
7e231dbe JB |
2144 | |
2145 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
2146 | goto out; | |
2147 | ||
2148 | ret = IRQ_HANDLED; | |
2149 | ||
3ff60f89 OM |
2150 | if (gt_iir) |
2151 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
60611c13 | 2152 | if (pm_iir) |
d0ecd7e2 | 2153 | gen6_rps_irq_handler(dev_priv, pm_iir); |
3ff60f89 OM |
2154 | /* Call regardless, as some status bits might not be |
2155 | * signalled in iir */ | |
2156 | valleyview_pipestat_irq_handler(dev, iir); | |
7e231dbe JB |
2157 | } |
2158 | ||
2159 | out: | |
2160 | return ret; | |
2161 | } | |
2162 | ||
43f328d7 VS |
2163 | static irqreturn_t cherryview_irq_handler(int irq, void *arg) |
2164 | { | |
45a83f84 | 2165 | struct drm_device *dev = arg; |
43f328d7 VS |
2166 | struct drm_i915_private *dev_priv = dev->dev_private; |
2167 | u32 master_ctl, iir; | |
2168 | irqreturn_t ret = IRQ_NONE; | |
43f328d7 | 2169 | |
8e5fd599 VS |
2170 | for (;;) { |
2171 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | |
2172 | iir = I915_READ(VLV_IIR); | |
43f328d7 | 2173 | |
8e5fd599 VS |
2174 | if (master_ctl == 0 && iir == 0) |
2175 | break; | |
43f328d7 | 2176 | |
27b6c122 OM |
2177 | ret = IRQ_HANDLED; |
2178 | ||
8e5fd599 | 2179 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
43f328d7 | 2180 | |
27b6c122 | 2181 | /* Find, clear, then process each source of interrupt */ |
43f328d7 | 2182 | |
27b6c122 OM |
2183 | if (iir) { |
2184 | /* Consume port before clearing IIR or we'll miss events */ | |
2185 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
2186 | i9xx_hpd_irq_handler(dev); | |
2187 | I915_WRITE(VLV_IIR, iir); | |
2188 | } | |
43f328d7 | 2189 | |
27b6c122 | 2190 | gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
43f328d7 | 2191 | |
27b6c122 OM |
2192 | /* Call regardless, as some status bits might not be |
2193 | * signalled in iir */ | |
2194 | valleyview_pipestat_irq_handler(dev, iir); | |
43f328d7 | 2195 | |
8e5fd599 VS |
2196 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); |
2197 | POSTING_READ(GEN8_MASTER_IRQ); | |
8e5fd599 | 2198 | } |
3278f67f | 2199 | |
43f328d7 VS |
2200 | return ret; |
2201 | } | |
2202 | ||
23e81d69 | 2203 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
776ad806 | 2204 | { |
2d1013dd | 2205 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 2206 | int pipe; |
b543fb04 | 2207 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
13cf5504 DA |
2208 | u32 dig_hotplug_reg; |
2209 | ||
2210 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | |
2211 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | |
776ad806 | 2212 | |
13cf5504 | 2213 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); |
91d131d2 | 2214 | |
cfc33bf7 VS |
2215 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
2216 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | |
2217 | SDE_AUDIO_POWER_SHIFT); | |
776ad806 | 2218 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
cfc33bf7 VS |
2219 | port_name(port)); |
2220 | } | |
776ad806 | 2221 | |
ce99c256 DV |
2222 | if (pch_iir & SDE_AUX_MASK) |
2223 | dp_aux_irq_handler(dev); | |
2224 | ||
776ad806 | 2225 | if (pch_iir & SDE_GMBUS) |
515ac2bb | 2226 | gmbus_irq_handler(dev); |
776ad806 JB |
2227 | |
2228 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
2229 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
2230 | ||
2231 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
2232 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
2233 | ||
2234 | if (pch_iir & SDE_POISON) | |
2235 | DRM_ERROR("PCH poison interrupt\n"); | |
2236 | ||
9db4a9c7 | 2237 | if (pch_iir & SDE_FDI_MASK) |
055e393f | 2238 | for_each_pipe(dev_priv, pipe) |
9db4a9c7 JB |
2239 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
2240 | pipe_name(pipe), | |
2241 | I915_READ(FDI_RX_IIR(pipe))); | |
776ad806 JB |
2242 | |
2243 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
2244 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
2245 | ||
2246 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
2247 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
2248 | ||
776ad806 | 2249 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
8664281b PZ |
2250 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
2251 | false)) | |
fc2c807b | 2252 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); |
8664281b PZ |
2253 | |
2254 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
2255 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
2256 | false)) | |
fc2c807b | 2257 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); |
8664281b PZ |
2258 | } |
2259 | ||
2260 | static void ivb_err_int_handler(struct drm_device *dev) | |
2261 | { | |
2262 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2263 | u32 err_int = I915_READ(GEN7_ERR_INT); | |
5a69b89f | 2264 | enum pipe pipe; |
8664281b | 2265 | |
de032bf4 PZ |
2266 | if (err_int & ERR_INT_POISON) |
2267 | DRM_ERROR("Poison interrupt\n"); | |
2268 | ||
055e393f | 2269 | for_each_pipe(dev_priv, pipe) { |
5a69b89f DV |
2270 | if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { |
2271 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | |
2272 | false)) | |
fc2c807b VS |
2273 | DRM_ERROR("Pipe %c FIFO underrun\n", |
2274 | pipe_name(pipe)); | |
5a69b89f | 2275 | } |
8bf1e9f1 | 2276 | |
5a69b89f DV |
2277 | if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { |
2278 | if (IS_IVYBRIDGE(dev)) | |
277de95e | 2279 | ivb_pipe_crc_irq_handler(dev, pipe); |
5a69b89f | 2280 | else |
277de95e | 2281 | hsw_pipe_crc_irq_handler(dev, pipe); |
5a69b89f DV |
2282 | } |
2283 | } | |
8bf1e9f1 | 2284 | |
8664281b PZ |
2285 | I915_WRITE(GEN7_ERR_INT, err_int); |
2286 | } | |
2287 | ||
2288 | static void cpt_serr_int_handler(struct drm_device *dev) | |
2289 | { | |
2290 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2291 | u32 serr_int = I915_READ(SERR_INT); | |
2292 | ||
de032bf4 PZ |
2293 | if (serr_int & SERR_INT_POISON) |
2294 | DRM_ERROR("PCH poison interrupt\n"); | |
2295 | ||
8664281b PZ |
2296 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
2297 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | |
2298 | false)) | |
fc2c807b | 2299 | DRM_ERROR("PCH transcoder A FIFO underrun\n"); |
8664281b PZ |
2300 | |
2301 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | |
2302 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
2303 | false)) | |
fc2c807b | 2304 | DRM_ERROR("PCH transcoder B FIFO underrun\n"); |
8664281b PZ |
2305 | |
2306 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | |
2307 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, | |
2308 | false)) | |
fc2c807b | 2309 | DRM_ERROR("PCH transcoder C FIFO underrun\n"); |
8664281b PZ |
2310 | |
2311 | I915_WRITE(SERR_INT, serr_int); | |
776ad806 JB |
2312 | } |
2313 | ||
23e81d69 AJ |
2314 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
2315 | { | |
2d1013dd | 2316 | struct drm_i915_private *dev_priv = dev->dev_private; |
23e81d69 | 2317 | int pipe; |
b543fb04 | 2318 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
13cf5504 DA |
2319 | u32 dig_hotplug_reg; |
2320 | ||
2321 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | |
2322 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | |
23e81d69 | 2323 | |
13cf5504 | 2324 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); |
91d131d2 | 2325 | |
cfc33bf7 VS |
2326 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
2327 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
2328 | SDE_AUDIO_POWER_SHIFT_CPT); | |
2329 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | |
2330 | port_name(port)); | |
2331 | } | |
23e81d69 AJ |
2332 | |
2333 | if (pch_iir & SDE_AUX_MASK_CPT) | |
ce99c256 | 2334 | dp_aux_irq_handler(dev); |
23e81d69 AJ |
2335 | |
2336 | if (pch_iir & SDE_GMBUS_CPT) | |
515ac2bb | 2337 | gmbus_irq_handler(dev); |
23e81d69 AJ |
2338 | |
2339 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
2340 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
2341 | ||
2342 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
2343 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
2344 | ||
2345 | if (pch_iir & SDE_FDI_MASK_CPT) | |
055e393f | 2346 | for_each_pipe(dev_priv, pipe) |
23e81d69 AJ |
2347 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
2348 | pipe_name(pipe), | |
2349 | I915_READ(FDI_RX_IIR(pipe))); | |
8664281b PZ |
2350 | |
2351 | if (pch_iir & SDE_ERROR_CPT) | |
2352 | cpt_serr_int_handler(dev); | |
23e81d69 AJ |
2353 | } |
2354 | ||
c008bc6e PZ |
2355 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
2356 | { | |
2357 | struct drm_i915_private *dev_priv = dev->dev_private; | |
40da17c2 | 2358 | enum pipe pipe; |
c008bc6e PZ |
2359 | |
2360 | if (de_iir & DE_AUX_CHANNEL_A) | |
2361 | dp_aux_irq_handler(dev); | |
2362 | ||
2363 | if (de_iir & DE_GSE) | |
2364 | intel_opregion_asle_intr(dev); | |
2365 | ||
c008bc6e PZ |
2366 | if (de_iir & DE_POISON) |
2367 | DRM_ERROR("Poison interrupt\n"); | |
2368 | ||
055e393f | 2369 | for_each_pipe(dev_priv, pipe) { |
d6bbafa1 CW |
2370 | if (de_iir & DE_PIPE_VBLANK(pipe) && |
2371 | intel_pipe_handle_vblank(dev, pipe)) | |
2372 | intel_check_page_flip(dev, pipe); | |
5b3a856b | 2373 | |
40da17c2 DV |
2374 | if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) |
2375 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
fc2c807b VS |
2376 | DRM_ERROR("Pipe %c FIFO underrun\n", |
2377 | pipe_name(pipe)); | |
5b3a856b | 2378 | |
40da17c2 DV |
2379 | if (de_iir & DE_PIPE_CRC_DONE(pipe)) |
2380 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
c008bc6e | 2381 | |
40da17c2 DV |
2382 | /* plane/pipes map 1:1 on ilk+ */ |
2383 | if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { | |
2384 | intel_prepare_page_flip(dev, pipe); | |
2385 | intel_finish_page_flip_plane(dev, pipe); | |
2386 | } | |
c008bc6e PZ |
2387 | } |
2388 | ||
2389 | /* check event from PCH */ | |
2390 | if (de_iir & DE_PCH_EVENT) { | |
2391 | u32 pch_iir = I915_READ(SDEIIR); | |
2392 | ||
2393 | if (HAS_PCH_CPT(dev)) | |
2394 | cpt_irq_handler(dev, pch_iir); | |
2395 | else | |
2396 | ibx_irq_handler(dev, pch_iir); | |
2397 | ||
2398 | /* should clear PCH hotplug event before clear CPU irq */ | |
2399 | I915_WRITE(SDEIIR, pch_iir); | |
2400 | } | |
2401 | ||
2402 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | |
2403 | ironlake_rps_change_irq_handler(dev); | |
2404 | } | |
2405 | ||
9719fb98 PZ |
2406 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) |
2407 | { | |
2408 | struct drm_i915_private *dev_priv = dev->dev_private; | |
07d27e20 | 2409 | enum pipe pipe; |
9719fb98 PZ |
2410 | |
2411 | if (de_iir & DE_ERR_INT_IVB) | |
2412 | ivb_err_int_handler(dev); | |
2413 | ||
2414 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | |
2415 | dp_aux_irq_handler(dev); | |
2416 | ||
2417 | if (de_iir & DE_GSE_IVB) | |
2418 | intel_opregion_asle_intr(dev); | |
2419 | ||
055e393f | 2420 | for_each_pipe(dev_priv, pipe) { |
d6bbafa1 CW |
2421 | if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && |
2422 | intel_pipe_handle_vblank(dev, pipe)) | |
2423 | intel_check_page_flip(dev, pipe); | |
40da17c2 DV |
2424 | |
2425 | /* plane/pipes map 1:1 on ilk+ */ | |
07d27e20 DL |
2426 | if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { |
2427 | intel_prepare_page_flip(dev, pipe); | |
2428 | intel_finish_page_flip_plane(dev, pipe); | |
9719fb98 PZ |
2429 | } |
2430 | } | |
2431 | ||
2432 | /* check event from PCH */ | |
2433 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | |
2434 | u32 pch_iir = I915_READ(SDEIIR); | |
2435 | ||
2436 | cpt_irq_handler(dev, pch_iir); | |
2437 | ||
2438 | /* clear PCH hotplug event before clear CPU irq */ | |
2439 | I915_WRITE(SDEIIR, pch_iir); | |
2440 | } | |
2441 | } | |
2442 | ||
72c90f62 OM |
2443 | /* |
2444 | * To handle irqs with the minimum potential races with fresh interrupts, we: | |
2445 | * 1 - Disable Master Interrupt Control. | |
2446 | * 2 - Find the source(s) of the interrupt. | |
2447 | * 3 - Clear the Interrupt Identity bits (IIR). | |
2448 | * 4 - Process the interrupt(s) that had bits set in the IIRs. | |
2449 | * 5 - Re-enable Master Interrupt Control. | |
2450 | */ | |
f1af8fc1 | 2451 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
b1f14ad0 | 2452 | { |
45a83f84 | 2453 | struct drm_device *dev = arg; |
2d1013dd | 2454 | struct drm_i915_private *dev_priv = dev->dev_private; |
f1af8fc1 | 2455 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
0e43406b | 2456 | irqreturn_t ret = IRQ_NONE; |
b1f14ad0 | 2457 | |
8664281b PZ |
2458 | /* We get interrupts on unclaimed registers, so check for this before we |
2459 | * do any I915_{READ,WRITE}. */ | |
907b28c5 | 2460 | intel_uncore_check_errors(dev); |
8664281b | 2461 | |
b1f14ad0 JB |
2462 | /* disable master interrupt before clearing iir */ |
2463 | de_ier = I915_READ(DEIER); | |
2464 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
23a78516 | 2465 | POSTING_READ(DEIER); |
b1f14ad0 | 2466 | |
44498aea PZ |
2467 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
2468 | * interrupts will will be stored on its back queue, and then we'll be | |
2469 | * able to process them after we restore SDEIER (as soon as we restore | |
2470 | * it, we'll get an interrupt if SDEIIR still has something to process | |
2471 | * due to its back queue). */ | |
ab5c608b BW |
2472 | if (!HAS_PCH_NOP(dev)) { |
2473 | sde_ier = I915_READ(SDEIER); | |
2474 | I915_WRITE(SDEIER, 0); | |
2475 | POSTING_READ(SDEIER); | |
2476 | } | |
44498aea | 2477 | |
72c90f62 OM |
2478 | /* Find, clear, then process each source of interrupt */ |
2479 | ||
b1f14ad0 | 2480 | gt_iir = I915_READ(GTIIR); |
0e43406b | 2481 | if (gt_iir) { |
72c90f62 OM |
2482 | I915_WRITE(GTIIR, gt_iir); |
2483 | ret = IRQ_HANDLED; | |
d8fc8a47 | 2484 | if (INTEL_INFO(dev)->gen >= 6) |
f1af8fc1 | 2485 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
d8fc8a47 PZ |
2486 | else |
2487 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
b1f14ad0 JB |
2488 | } |
2489 | ||
0e43406b CW |
2490 | de_iir = I915_READ(DEIIR); |
2491 | if (de_iir) { | |
72c90f62 OM |
2492 | I915_WRITE(DEIIR, de_iir); |
2493 | ret = IRQ_HANDLED; | |
f1af8fc1 PZ |
2494 | if (INTEL_INFO(dev)->gen >= 7) |
2495 | ivb_display_irq_handler(dev, de_iir); | |
2496 | else | |
2497 | ilk_display_irq_handler(dev, de_iir); | |
b1f14ad0 JB |
2498 | } |
2499 | ||
f1af8fc1 PZ |
2500 | if (INTEL_INFO(dev)->gen >= 6) { |
2501 | u32 pm_iir = I915_READ(GEN6_PMIIR); | |
2502 | if (pm_iir) { | |
f1af8fc1 PZ |
2503 | I915_WRITE(GEN6_PMIIR, pm_iir); |
2504 | ret = IRQ_HANDLED; | |
72c90f62 | 2505 | gen6_rps_irq_handler(dev_priv, pm_iir); |
f1af8fc1 | 2506 | } |
0e43406b | 2507 | } |
b1f14ad0 | 2508 | |
b1f14ad0 JB |
2509 | I915_WRITE(DEIER, de_ier); |
2510 | POSTING_READ(DEIER); | |
ab5c608b BW |
2511 | if (!HAS_PCH_NOP(dev)) { |
2512 | I915_WRITE(SDEIER, sde_ier); | |
2513 | POSTING_READ(SDEIER); | |
2514 | } | |
b1f14ad0 JB |
2515 | |
2516 | return ret; | |
2517 | } | |
2518 | ||
abd58f01 BW |
2519 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
2520 | { | |
2521 | struct drm_device *dev = arg; | |
2522 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2523 | u32 master_ctl; | |
2524 | irqreturn_t ret = IRQ_NONE; | |
2525 | uint32_t tmp = 0; | |
c42664cc | 2526 | enum pipe pipe; |
abd58f01 | 2527 | |
abd58f01 BW |
2528 | master_ctl = I915_READ(GEN8_MASTER_IRQ); |
2529 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; | |
2530 | if (!master_ctl) | |
2531 | return IRQ_NONE; | |
2532 | ||
2533 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
2534 | POSTING_READ(GEN8_MASTER_IRQ); | |
2535 | ||
38cc46d7 OM |
2536 | /* Find, clear, then process each source of interrupt */ |
2537 | ||
abd58f01 BW |
2538 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
2539 | ||
2540 | if (master_ctl & GEN8_DE_MISC_IRQ) { | |
2541 | tmp = I915_READ(GEN8_DE_MISC_IIR); | |
abd58f01 BW |
2542 | if (tmp) { |
2543 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); | |
2544 | ret = IRQ_HANDLED; | |
38cc46d7 OM |
2545 | if (tmp & GEN8_DE_MISC_GSE) |
2546 | intel_opregion_asle_intr(dev); | |
2547 | else | |
2548 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | |
abd58f01 | 2549 | } |
38cc46d7 OM |
2550 | else |
2551 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); | |
abd58f01 BW |
2552 | } |
2553 | ||
6d766f02 DV |
2554 | if (master_ctl & GEN8_DE_PORT_IRQ) { |
2555 | tmp = I915_READ(GEN8_DE_PORT_IIR); | |
6d766f02 DV |
2556 | if (tmp) { |
2557 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); | |
2558 | ret = IRQ_HANDLED; | |
38cc46d7 OM |
2559 | if (tmp & GEN8_AUX_CHANNEL_A) |
2560 | dp_aux_irq_handler(dev); | |
2561 | else | |
2562 | DRM_ERROR("Unexpected DE Port interrupt\n"); | |
6d766f02 | 2563 | } |
38cc46d7 OM |
2564 | else |
2565 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); | |
6d766f02 DV |
2566 | } |
2567 | ||
055e393f | 2568 | for_each_pipe(dev_priv, pipe) { |
c42664cc | 2569 | uint32_t pipe_iir; |
abd58f01 | 2570 | |
c42664cc DV |
2571 | if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) |
2572 | continue; | |
abd58f01 | 2573 | |
c42664cc | 2574 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); |
c42664cc DV |
2575 | if (pipe_iir) { |
2576 | ret = IRQ_HANDLED; | |
2577 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); | |
d6bbafa1 CW |
2578 | if (pipe_iir & GEN8_PIPE_VBLANK && |
2579 | intel_pipe_handle_vblank(dev, pipe)) | |
2580 | intel_check_page_flip(dev, pipe); | |
38cc46d7 OM |
2581 | |
2582 | if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { | |
2583 | intel_prepare_page_flip(dev, pipe); | |
2584 | intel_finish_page_flip_plane(dev, pipe); | |
2585 | } | |
2586 | ||
2587 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) | |
2588 | hsw_pipe_crc_irq_handler(dev, pipe); | |
2589 | ||
2590 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { | |
2591 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, | |
2592 | false)) | |
2593 | DRM_ERROR("Pipe %c FIFO underrun\n", | |
2594 | pipe_name(pipe)); | |
2595 | } | |
2596 | ||
2597 | if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { | |
2598 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", | |
2599 | pipe_name(pipe), | |
2600 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); | |
2601 | } | |
c42664cc | 2602 | } else |
abd58f01 BW |
2603 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); |
2604 | } | |
2605 | ||
92d03a80 DV |
2606 | if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { |
2607 | /* | |
2608 | * FIXME(BDW): Assume for now that the new interrupt handling | |
2609 | * scheme also closed the SDE interrupt handling race we've seen | |
2610 | * on older pch-split platforms. But this needs testing. | |
2611 | */ | |
2612 | u32 pch_iir = I915_READ(SDEIIR); | |
92d03a80 DV |
2613 | if (pch_iir) { |
2614 | I915_WRITE(SDEIIR, pch_iir); | |
2615 | ret = IRQ_HANDLED; | |
38cc46d7 OM |
2616 | cpt_irq_handler(dev, pch_iir); |
2617 | } else | |
2618 | DRM_ERROR("The master control interrupt lied (SDE)!\n"); | |
2619 | ||
92d03a80 DV |
2620 | } |
2621 | ||
abd58f01 BW |
2622 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
2623 | POSTING_READ(GEN8_MASTER_IRQ); | |
2624 | ||
2625 | return ret; | |
2626 | } | |
2627 | ||
17e1df07 DV |
2628 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, |
2629 | bool reset_completed) | |
2630 | { | |
a4872ba6 | 2631 | struct intel_engine_cs *ring; |
17e1df07 DV |
2632 | int i; |
2633 | ||
2634 | /* | |
2635 | * Notify all waiters for GPU completion events that reset state has | |
2636 | * been changed, and that they need to restart their wait after | |
2637 | * checking for potential errors (and bail out to drop locks if there is | |
2638 | * a gpu reset pending so that i915_error_work_func can acquire them). | |
2639 | */ | |
2640 | ||
2641 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ | |
2642 | for_each_ring(ring, dev_priv, i) | |
2643 | wake_up_all(&ring->irq_queue); | |
2644 | ||
2645 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ | |
2646 | wake_up_all(&dev_priv->pending_flip_queue); | |
2647 | ||
2648 | /* | |
2649 | * Signal tasks blocked in i915_gem_wait_for_error that the pending | |
2650 | * reset state is cleared. | |
2651 | */ | |
2652 | if (reset_completed) | |
2653 | wake_up_all(&dev_priv->gpu_error.reset_queue); | |
2654 | } | |
2655 | ||
8a905236 JB |
2656 | /** |
2657 | * i915_error_work_func - do process context error handling work | |
2658 | * @work: work struct | |
2659 | * | |
2660 | * Fire an error uevent so userspace can see that a hang or error | |
2661 | * was detected. | |
2662 | */ | |
2663 | static void i915_error_work_func(struct work_struct *work) | |
2664 | { | |
1f83fee0 DV |
2665 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
2666 | work); | |
2d1013dd JN |
2667 | struct drm_i915_private *dev_priv = |
2668 | container_of(error, struct drm_i915_private, gpu_error); | |
8a905236 | 2669 | struct drm_device *dev = dev_priv->dev; |
cce723ed BW |
2670 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
2671 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | |
2672 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | |
17e1df07 | 2673 | int ret; |
8a905236 | 2674 | |
5bdebb18 | 2675 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); |
f316a42c | 2676 | |
7db0ba24 DV |
2677 | /* |
2678 | * Note that there's only one work item which does gpu resets, so we | |
2679 | * need not worry about concurrent gpu resets potentially incrementing | |
2680 | * error->reset_counter twice. We only need to take care of another | |
2681 | * racing irq/hangcheck declaring the gpu dead for a second time. A | |
2682 | * quick check for that is good enough: schedule_work ensures the | |
2683 | * correct ordering between hang detection and this work item, and since | |
2684 | * the reset in-progress bit is only ever set by code outside of this | |
2685 | * work we don't need to worry about any other races. | |
2686 | */ | |
2687 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | |
f803aa55 | 2688 | DRM_DEBUG_DRIVER("resetting chip\n"); |
5bdebb18 | 2689 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, |
7db0ba24 | 2690 | reset_event); |
1f83fee0 | 2691 | |
f454c694 ID |
2692 | /* |
2693 | * In most cases it's guaranteed that we get here with an RPM | |
2694 | * reference held, for example because there is a pending GPU | |
2695 | * request that won't finish until the reset is done. This | |
2696 | * isn't the case at least when we get here by doing a | |
2697 | * simulated reset via debugs, so get an RPM reference. | |
2698 | */ | |
2699 | intel_runtime_pm_get(dev_priv); | |
17e1df07 DV |
2700 | /* |
2701 | * All state reset _must_ be completed before we update the | |
2702 | * reset counter, for otherwise waiters might miss the reset | |
2703 | * pending state and not properly drop locks, resulting in | |
2704 | * deadlocks with the reset work. | |
2705 | */ | |
f69061be DV |
2706 | ret = i915_reset(dev); |
2707 | ||
17e1df07 DV |
2708 | intel_display_handle_reset(dev); |
2709 | ||
f454c694 ID |
2710 | intel_runtime_pm_put(dev_priv); |
2711 | ||
f69061be DV |
2712 | if (ret == 0) { |
2713 | /* | |
2714 | * After all the gem state is reset, increment the reset | |
2715 | * counter and wake up everyone waiting for the reset to | |
2716 | * complete. | |
2717 | * | |
2718 | * Since unlock operations are a one-sided barrier only, | |
2719 | * we need to insert a barrier here to order any seqno | |
2720 | * updates before | |
2721 | * the counter increment. | |
2722 | */ | |
4e857c58 | 2723 | smp_mb__before_atomic(); |
f69061be DV |
2724 | atomic_inc(&dev_priv->gpu_error.reset_counter); |
2725 | ||
5bdebb18 | 2726 | kobject_uevent_env(&dev->primary->kdev->kobj, |
f69061be | 2727 | KOBJ_CHANGE, reset_done_event); |
1f83fee0 | 2728 | } else { |
2ac0f450 | 2729 | atomic_set_mask(I915_WEDGED, &error->reset_counter); |
f316a42c | 2730 | } |
1f83fee0 | 2731 | |
17e1df07 DV |
2732 | /* |
2733 | * Note: The wake_up also serves as a memory barrier so that | |
2734 | * waiters see the update value of the reset counter atomic_t. | |
2735 | */ | |
2736 | i915_error_wake_up(dev_priv, true); | |
f316a42c | 2737 | } |
8a905236 JB |
2738 | } |
2739 | ||
35aed2e6 | 2740 | static void i915_report_and_clear_eir(struct drm_device *dev) |
8a905236 JB |
2741 | { |
2742 | struct drm_i915_private *dev_priv = dev->dev_private; | |
bd9854f9 | 2743 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
8a905236 | 2744 | u32 eir = I915_READ(EIR); |
050ee91f | 2745 | int pipe, i; |
8a905236 | 2746 | |
35aed2e6 CW |
2747 | if (!eir) |
2748 | return; | |
8a905236 | 2749 | |
a70491cc | 2750 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
8a905236 | 2751 | |
bd9854f9 BW |
2752 | i915_get_extra_instdone(dev, instdone); |
2753 | ||
8a905236 JB |
2754 | if (IS_G4X(dev)) { |
2755 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
2756 | u32 ipeir = I915_READ(IPEIR_I965); | |
2757 | ||
a70491cc JP |
2758 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2759 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
050ee91f BW |
2760 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2761 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a70491cc | 2762 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 2763 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 2764 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 2765 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
2766 | } |
2767 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
2768 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
2769 | pr_err("page table error\n"); |
2770 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 2771 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 2772 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
2773 | } |
2774 | } | |
2775 | ||
a6c45cf0 | 2776 | if (!IS_GEN2(dev)) { |
8a905236 JB |
2777 | if (eir & I915_ERROR_PAGE_TABLE) { |
2778 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
2779 | pr_err("page table error\n"); |
2780 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 2781 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 2782 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
2783 | } |
2784 | } | |
2785 | ||
2786 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
a70491cc | 2787 | pr_err("memory refresh error:\n"); |
055e393f | 2788 | for_each_pipe(dev_priv, pipe) |
a70491cc | 2789 | pr_err("pipe %c stat: 0x%08x\n", |
9db4a9c7 | 2790 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
8a905236 JB |
2791 | /* pipestat has already been acked */ |
2792 | } | |
2793 | if (eir & I915_ERROR_INSTRUCTION) { | |
a70491cc JP |
2794 | pr_err("instruction error\n"); |
2795 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
050ee91f BW |
2796 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2797 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a6c45cf0 | 2798 | if (INTEL_INFO(dev)->gen < 4) { |
8a905236 JB |
2799 | u32 ipeir = I915_READ(IPEIR); |
2800 | ||
a70491cc JP |
2801 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
2802 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
a70491cc | 2803 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
8a905236 | 2804 | I915_WRITE(IPEIR, ipeir); |
3143a2bf | 2805 | POSTING_READ(IPEIR); |
8a905236 JB |
2806 | } else { |
2807 | u32 ipeir = I915_READ(IPEIR_I965); | |
2808 | ||
a70491cc JP |
2809 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2810 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
a70491cc | 2811 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 2812 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 2813 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 2814 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
2815 | } |
2816 | } | |
2817 | ||
2818 | I915_WRITE(EIR, eir); | |
3143a2bf | 2819 | POSTING_READ(EIR); |
8a905236 JB |
2820 | eir = I915_READ(EIR); |
2821 | if (eir) { | |
2822 | /* | |
2823 | * some errors might have become stuck, | |
2824 | * mask them. | |
2825 | */ | |
2826 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
2827 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
2828 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2829 | } | |
35aed2e6 CW |
2830 | } |
2831 | ||
2832 | /** | |
2833 | * i915_handle_error - handle an error interrupt | |
2834 | * @dev: drm device | |
2835 | * | |
2836 | * Do some basic checking of regsiter state at error interrupt time and | |
2837 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
2838 | * sure we get a record and make it available in debugfs. Fire a uevent | |
2839 | * so userspace knows something bad happened (should trigger collection | |
2840 | * of a ring dump etc.). | |
2841 | */ | |
58174462 MK |
2842 | void i915_handle_error(struct drm_device *dev, bool wedged, |
2843 | const char *fmt, ...) | |
35aed2e6 CW |
2844 | { |
2845 | struct drm_i915_private *dev_priv = dev->dev_private; | |
58174462 MK |
2846 | va_list args; |
2847 | char error_msg[80]; | |
35aed2e6 | 2848 | |
58174462 MK |
2849 | va_start(args, fmt); |
2850 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | |
2851 | va_end(args); | |
2852 | ||
2853 | i915_capture_error_state(dev, wedged, error_msg); | |
35aed2e6 | 2854 | i915_report_and_clear_eir(dev); |
8a905236 | 2855 | |
ba1234d1 | 2856 | if (wedged) { |
f69061be DV |
2857 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
2858 | &dev_priv->gpu_error.reset_counter); | |
ba1234d1 | 2859 | |
11ed50ec | 2860 | /* |
17e1df07 DV |
2861 | * Wakeup waiting processes so that the reset work function |
2862 | * i915_error_work_func doesn't deadlock trying to grab various | |
2863 | * locks. By bumping the reset counter first, the woken | |
2864 | * processes will see a reset in progress and back off, | |
2865 | * releasing their locks and then wait for the reset completion. | |
2866 | * We must do this for _all_ gpu waiters that might hold locks | |
2867 | * that the reset work needs to acquire. | |
2868 | * | |
2869 | * Note: The wake_up serves as the required memory barrier to | |
2870 | * ensure that the waiters see the updated value of the reset | |
2871 | * counter atomic_t. | |
11ed50ec | 2872 | */ |
17e1df07 | 2873 | i915_error_wake_up(dev_priv, false); |
11ed50ec BG |
2874 | } |
2875 | ||
122f46ba DV |
2876 | /* |
2877 | * Our reset work can grab modeset locks (since it needs to reset the | |
2878 | * state of outstanding pagelips). Hence it must not be run on our own | |
2879 | * dev-priv->wq work queue for otherwise the flush_work in the pageflip | |
2880 | * code will deadlock. | |
2881 | */ | |
2882 | schedule_work(&dev_priv->gpu_error.work); | |
8a905236 JB |
2883 | } |
2884 | ||
42f52ef8 KP |
2885 | /* Called from drm generic code, passed 'crtc' which |
2886 | * we use as a pipe index | |
2887 | */ | |
f71d4af4 | 2888 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 | 2889 | { |
2d1013dd | 2890 | struct drm_i915_private *dev_priv = dev->dev_private; |
e9d21d7f | 2891 | unsigned long irqflags; |
71e0ffa5 | 2892 | |
5eddb70b | 2893 | if (!i915_pipe_enabled(dev, pipe)) |
71e0ffa5 | 2894 | return -EINVAL; |
0a3e67a4 | 2895 | |
1ec14ad3 | 2896 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 2897 | if (INTEL_INFO(dev)->gen >= 4) |
7c463586 | 2898 | i915_enable_pipestat(dev_priv, pipe, |
755e9019 | 2899 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
e9d21d7f | 2900 | else |
7c463586 | 2901 | i915_enable_pipestat(dev_priv, pipe, |
755e9019 | 2902 | PIPE_VBLANK_INTERRUPT_STATUS); |
1ec14ad3 | 2903 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
8692d00e | 2904 | |
0a3e67a4 JB |
2905 | return 0; |
2906 | } | |
2907 | ||
f71d4af4 | 2908 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
f796cf8f | 2909 | { |
2d1013dd | 2910 | struct drm_i915_private *dev_priv = dev->dev_private; |
f796cf8f | 2911 | unsigned long irqflags; |
b518421f | 2912 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
40da17c2 | 2913 | DE_PIPE_VBLANK(pipe); |
f796cf8f JB |
2914 | |
2915 | if (!i915_pipe_enabled(dev, pipe)) | |
2916 | return -EINVAL; | |
2917 | ||
2918 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b518421f | 2919 | ironlake_enable_display_irq(dev_priv, bit); |
b1f14ad0 JB |
2920 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2921 | ||
2922 | return 0; | |
2923 | } | |
2924 | ||
7e231dbe JB |
2925 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
2926 | { | |
2d1013dd | 2927 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe | 2928 | unsigned long irqflags; |
7e231dbe JB |
2929 | |
2930 | if (!i915_pipe_enabled(dev, pipe)) | |
2931 | return -EINVAL; | |
2932 | ||
2933 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2934 | i915_enable_pipestat(dev_priv, pipe, |
755e9019 | 2935 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
7e231dbe JB |
2936 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2937 | ||
2938 | return 0; | |
2939 | } | |
2940 | ||
abd58f01 BW |
2941 | static int gen8_enable_vblank(struct drm_device *dev, int pipe) |
2942 | { | |
2943 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2944 | unsigned long irqflags; | |
abd58f01 BW |
2945 | |
2946 | if (!i915_pipe_enabled(dev, pipe)) | |
2947 | return -EINVAL; | |
2948 | ||
2949 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7167d7c6 DV |
2950 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; |
2951 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
2952 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
abd58f01 BW |
2953 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2954 | return 0; | |
2955 | } | |
2956 | ||
42f52ef8 KP |
2957 | /* Called from drm generic code, passed 'crtc' which |
2958 | * we use as a pipe index | |
2959 | */ | |
f71d4af4 | 2960 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 | 2961 | { |
2d1013dd | 2962 | struct drm_i915_private *dev_priv = dev->dev_private; |
e9d21d7f | 2963 | unsigned long irqflags; |
0a3e67a4 | 2964 | |
1ec14ad3 | 2965 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 2966 | i915_disable_pipestat(dev_priv, pipe, |
755e9019 ID |
2967 | PIPE_VBLANK_INTERRUPT_STATUS | |
2968 | PIPE_START_VBLANK_INTERRUPT_STATUS); | |
f796cf8f JB |
2969 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2970 | } | |
2971 | ||
f71d4af4 | 2972 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
f796cf8f | 2973 | { |
2d1013dd | 2974 | struct drm_i915_private *dev_priv = dev->dev_private; |
f796cf8f | 2975 | unsigned long irqflags; |
b518421f | 2976 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
40da17c2 | 2977 | DE_PIPE_VBLANK(pipe); |
f796cf8f JB |
2978 | |
2979 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b518421f | 2980 | ironlake_disable_display_irq(dev_priv, bit); |
b1f14ad0 JB |
2981 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2982 | } | |
2983 | ||
7e231dbe JB |
2984 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
2985 | { | |
2d1013dd | 2986 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe | 2987 | unsigned long irqflags; |
7e231dbe JB |
2988 | |
2989 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2990 | i915_disable_pipestat(dev_priv, pipe, |
755e9019 | 2991 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
7e231dbe JB |
2992 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2993 | } | |
2994 | ||
abd58f01 BW |
2995 | static void gen8_disable_vblank(struct drm_device *dev, int pipe) |
2996 | { | |
2997 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2998 | unsigned long irqflags; | |
abd58f01 BW |
2999 | |
3000 | if (!i915_pipe_enabled(dev, pipe)) | |
3001 | return; | |
3002 | ||
3003 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7167d7c6 DV |
3004 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; |
3005 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
3006 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
abd58f01 BW |
3007 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3008 | } | |
3009 | ||
893eead0 | 3010 | static u32 |
a4872ba6 | 3011 | ring_last_seqno(struct intel_engine_cs *ring) |
852835f3 | 3012 | { |
893eead0 CW |
3013 | return list_entry(ring->request_list.prev, |
3014 | struct drm_i915_gem_request, list)->seqno; | |
3015 | } | |
3016 | ||
9107e9d2 | 3017 | static bool |
a4872ba6 | 3018 | ring_idle(struct intel_engine_cs *ring, u32 seqno) |
9107e9d2 CW |
3019 | { |
3020 | return (list_empty(&ring->request_list) || | |
3021 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | |
f65d9421 BG |
3022 | } |
3023 | ||
a028c4b0 DV |
3024 | static bool |
3025 | ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) | |
3026 | { | |
3027 | if (INTEL_INFO(dev)->gen >= 8) { | |
a6cdb93a | 3028 | return (ipehr >> 23) == 0x1c; |
a028c4b0 DV |
3029 | } else { |
3030 | ipehr &= ~MI_SEMAPHORE_SYNC_MASK; | |
3031 | return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | | |
3032 | MI_SEMAPHORE_REGISTER); | |
3033 | } | |
3034 | } | |
3035 | ||
a4872ba6 | 3036 | static struct intel_engine_cs * |
a6cdb93a | 3037 | semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) |
921d42ea DV |
3038 | { |
3039 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
a4872ba6 | 3040 | struct intel_engine_cs *signaller; |
921d42ea DV |
3041 | int i; |
3042 | ||
3043 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) { | |
a6cdb93a RV |
3044 | for_each_ring(signaller, dev_priv, i) { |
3045 | if (ring == signaller) | |
3046 | continue; | |
3047 | ||
3048 | if (offset == signaller->semaphore.signal_ggtt[ring->id]) | |
3049 | return signaller; | |
3050 | } | |
921d42ea DV |
3051 | } else { |
3052 | u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; | |
3053 | ||
3054 | for_each_ring(signaller, dev_priv, i) { | |
3055 | if(ring == signaller) | |
3056 | continue; | |
3057 | ||
ebc348b2 | 3058 | if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) |
921d42ea DV |
3059 | return signaller; |
3060 | } | |
3061 | } | |
3062 | ||
a6cdb93a RV |
3063 | DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", |
3064 | ring->id, ipehr, offset); | |
921d42ea DV |
3065 | |
3066 | return NULL; | |
3067 | } | |
3068 | ||
a4872ba6 OM |
3069 | static struct intel_engine_cs * |
3070 | semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) | |
a24a11e6 CW |
3071 | { |
3072 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
88fe429d | 3073 | u32 cmd, ipehr, head; |
a6cdb93a RV |
3074 | u64 offset = 0; |
3075 | int i, backwards; | |
a24a11e6 CW |
3076 | |
3077 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | |
a028c4b0 | 3078 | if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) |
6274f212 | 3079 | return NULL; |
a24a11e6 | 3080 | |
88fe429d DV |
3081 | /* |
3082 | * HEAD is likely pointing to the dword after the actual command, | |
3083 | * so scan backwards until we find the MBOX. But limit it to just 3 | |
a6cdb93a RV |
3084 | * or 4 dwords depending on the semaphore wait command size. |
3085 | * Note that we don't care about ACTHD here since that might | |
88fe429d DV |
3086 | * point at at batch, and semaphores are always emitted into the |
3087 | * ringbuffer itself. | |
a24a11e6 | 3088 | */ |
88fe429d | 3089 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
a6cdb93a | 3090 | backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; |
88fe429d | 3091 | |
a6cdb93a | 3092 | for (i = backwards; i; --i) { |
88fe429d DV |
3093 | /* |
3094 | * Be paranoid and presume the hw has gone off into the wild - | |
3095 | * our ring is smaller than what the hardware (and hence | |
3096 | * HEAD_ADDR) allows. Also handles wrap-around. | |
3097 | */ | |
ee1b1e5e | 3098 | head &= ring->buffer->size - 1; |
88fe429d DV |
3099 | |
3100 | /* This here seems to blow up */ | |
ee1b1e5e | 3101 | cmd = ioread32(ring->buffer->virtual_start + head); |
a24a11e6 CW |
3102 | if (cmd == ipehr) |
3103 | break; | |
3104 | ||
88fe429d DV |
3105 | head -= 4; |
3106 | } | |
a24a11e6 | 3107 | |
88fe429d DV |
3108 | if (!i) |
3109 | return NULL; | |
a24a11e6 | 3110 | |
ee1b1e5e | 3111 | *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; |
a6cdb93a RV |
3112 | if (INTEL_INFO(ring->dev)->gen >= 8) { |
3113 | offset = ioread32(ring->buffer->virtual_start + head + 12); | |
3114 | offset <<= 32; | |
3115 | offset = ioread32(ring->buffer->virtual_start + head + 8); | |
3116 | } | |
3117 | return semaphore_wait_to_signaller_ring(ring, ipehr, offset); | |
a24a11e6 CW |
3118 | } |
3119 | ||
a4872ba6 | 3120 | static int semaphore_passed(struct intel_engine_cs *ring) |
6274f212 CW |
3121 | { |
3122 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
a4872ba6 | 3123 | struct intel_engine_cs *signaller; |
a0d036b0 | 3124 | u32 seqno; |
6274f212 | 3125 | |
4be17381 | 3126 | ring->hangcheck.deadlock++; |
6274f212 CW |
3127 | |
3128 | signaller = semaphore_waits_for(ring, &seqno); | |
4be17381 CW |
3129 | if (signaller == NULL) |
3130 | return -1; | |
3131 | ||
3132 | /* Prevent pathological recursion due to driver bugs */ | |
3133 | if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) | |
6274f212 CW |
3134 | return -1; |
3135 | ||
4be17381 CW |
3136 | if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) |
3137 | return 1; | |
3138 | ||
a0d036b0 CW |
3139 | /* cursory check for an unkickable deadlock */ |
3140 | if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && | |
3141 | semaphore_passed(signaller) < 0) | |
4be17381 CW |
3142 | return -1; |
3143 | ||
3144 | return 0; | |
6274f212 CW |
3145 | } |
3146 | ||
3147 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | |
3148 | { | |
a4872ba6 | 3149 | struct intel_engine_cs *ring; |
6274f212 CW |
3150 | int i; |
3151 | ||
3152 | for_each_ring(ring, dev_priv, i) | |
4be17381 | 3153 | ring->hangcheck.deadlock = 0; |
6274f212 CW |
3154 | } |
3155 | ||
ad8beaea | 3156 | static enum intel_ring_hangcheck_action |
a4872ba6 | 3157 | ring_stuck(struct intel_engine_cs *ring, u64 acthd) |
1ec14ad3 CW |
3158 | { |
3159 | struct drm_device *dev = ring->dev; | |
3160 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9107e9d2 CW |
3161 | u32 tmp; |
3162 | ||
f260fe7b MK |
3163 | if (acthd != ring->hangcheck.acthd) { |
3164 | if (acthd > ring->hangcheck.max_acthd) { | |
3165 | ring->hangcheck.max_acthd = acthd; | |
3166 | return HANGCHECK_ACTIVE; | |
3167 | } | |
3168 | ||
3169 | return HANGCHECK_ACTIVE_LOOP; | |
3170 | } | |
6274f212 | 3171 | |
9107e9d2 | 3172 | if (IS_GEN2(dev)) |
f2f4d82f | 3173 | return HANGCHECK_HUNG; |
9107e9d2 CW |
3174 | |
3175 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
3176 | * If so we can simply poke the RB_WAIT bit | |
3177 | * and break the hang. This should work on | |
3178 | * all but the second generation chipsets. | |
3179 | */ | |
3180 | tmp = I915_READ_CTL(ring); | |
1ec14ad3 | 3181 | if (tmp & RING_WAIT) { |
58174462 MK |
3182 | i915_handle_error(dev, false, |
3183 | "Kicking stuck wait on %s", | |
3184 | ring->name); | |
1ec14ad3 | 3185 | I915_WRITE_CTL(ring, tmp); |
f2f4d82f | 3186 | return HANGCHECK_KICK; |
6274f212 CW |
3187 | } |
3188 | ||
3189 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | |
3190 | switch (semaphore_passed(ring)) { | |
3191 | default: | |
f2f4d82f | 3192 | return HANGCHECK_HUNG; |
6274f212 | 3193 | case 1: |
58174462 MK |
3194 | i915_handle_error(dev, false, |
3195 | "Kicking stuck semaphore on %s", | |
3196 | ring->name); | |
6274f212 | 3197 | I915_WRITE_CTL(ring, tmp); |
f2f4d82f | 3198 | return HANGCHECK_KICK; |
6274f212 | 3199 | case 0: |
f2f4d82f | 3200 | return HANGCHECK_WAIT; |
6274f212 | 3201 | } |
9107e9d2 | 3202 | } |
ed5cbb03 | 3203 | |
f2f4d82f | 3204 | return HANGCHECK_HUNG; |
ed5cbb03 MK |
3205 | } |
3206 | ||
f65d9421 BG |
3207 | /** |
3208 | * This is called when the chip hasn't reported back with completed | |
05407ff8 MK |
3209 | * batchbuffers in a long time. We keep track per ring seqno progress and |
3210 | * if there are no progress, hangcheck score for that ring is increased. | |
3211 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | |
3212 | * we kick the ring. If we see no progress on three subsequent calls | |
3213 | * we assume chip is wedged and try to fix it by resetting the chip. | |
f65d9421 | 3214 | */ |
a658b5d2 | 3215 | static void i915_hangcheck_elapsed(unsigned long data) |
f65d9421 BG |
3216 | { |
3217 | struct drm_device *dev = (struct drm_device *)data; | |
2d1013dd | 3218 | struct drm_i915_private *dev_priv = dev->dev_private; |
a4872ba6 | 3219 | struct intel_engine_cs *ring; |
b4519513 | 3220 | int i; |
05407ff8 | 3221 | int busy_count = 0, rings_hung = 0; |
9107e9d2 CW |
3222 | bool stuck[I915_NUM_RINGS] = { 0 }; |
3223 | #define BUSY 1 | |
3224 | #define KICK 5 | |
3225 | #define HUNG 20 | |
893eead0 | 3226 | |
d330a953 | 3227 | if (!i915.enable_hangcheck) |
3e0dc6b0 BW |
3228 | return; |
3229 | ||
b4519513 | 3230 | for_each_ring(ring, dev_priv, i) { |
50877445 CW |
3231 | u64 acthd; |
3232 | u32 seqno; | |
9107e9d2 | 3233 | bool busy = true; |
05407ff8 | 3234 | |
6274f212 CW |
3235 | semaphore_clear_deadlocks(dev_priv); |
3236 | ||
05407ff8 MK |
3237 | seqno = ring->get_seqno(ring, false); |
3238 | acthd = intel_ring_get_active_head(ring); | |
b4519513 | 3239 | |
9107e9d2 CW |
3240 | if (ring->hangcheck.seqno == seqno) { |
3241 | if (ring_idle(ring, seqno)) { | |
da661464 MK |
3242 | ring->hangcheck.action = HANGCHECK_IDLE; |
3243 | ||
9107e9d2 CW |
3244 | if (waitqueue_active(&ring->irq_queue)) { |
3245 | /* Issue a wake-up to catch stuck h/w. */ | |
094f9a54 | 3246 | if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { |
f4adcd24 DV |
3247 | if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) |
3248 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
3249 | ring->name); | |
3250 | else | |
3251 | DRM_INFO("Fake missed irq on %s\n", | |
3252 | ring->name); | |
094f9a54 CW |
3253 | wake_up_all(&ring->irq_queue); |
3254 | } | |
3255 | /* Safeguard against driver failure */ | |
3256 | ring->hangcheck.score += BUSY; | |
9107e9d2 CW |
3257 | } else |
3258 | busy = false; | |
05407ff8 | 3259 | } else { |
6274f212 CW |
3260 | /* We always increment the hangcheck score |
3261 | * if the ring is busy and still processing | |
3262 | * the same request, so that no single request | |
3263 | * can run indefinitely (such as a chain of | |
3264 | * batches). The only time we do not increment | |
3265 | * the hangcheck score on this ring, if this | |
3266 | * ring is in a legitimate wait for another | |
3267 | * ring. In that case the waiting ring is a | |
3268 | * victim and we want to be sure we catch the | |
3269 | * right culprit. Then every time we do kick | |
3270 | * the ring, add a small increment to the | |
3271 | * score so that we can catch a batch that is | |
3272 | * being repeatedly kicked and so responsible | |
3273 | * for stalling the machine. | |
3274 | */ | |
ad8beaea MK |
3275 | ring->hangcheck.action = ring_stuck(ring, |
3276 | acthd); | |
3277 | ||
3278 | switch (ring->hangcheck.action) { | |
da661464 | 3279 | case HANGCHECK_IDLE: |
f2f4d82f | 3280 | case HANGCHECK_WAIT: |
f2f4d82f | 3281 | case HANGCHECK_ACTIVE: |
f260fe7b MK |
3282 | break; |
3283 | case HANGCHECK_ACTIVE_LOOP: | |
ea04cb31 | 3284 | ring->hangcheck.score += BUSY; |
6274f212 | 3285 | break; |
f2f4d82f | 3286 | case HANGCHECK_KICK: |
ea04cb31 | 3287 | ring->hangcheck.score += KICK; |
6274f212 | 3288 | break; |
f2f4d82f | 3289 | case HANGCHECK_HUNG: |
ea04cb31 | 3290 | ring->hangcheck.score += HUNG; |
6274f212 CW |
3291 | stuck[i] = true; |
3292 | break; | |
3293 | } | |
05407ff8 | 3294 | } |
9107e9d2 | 3295 | } else { |
da661464 MK |
3296 | ring->hangcheck.action = HANGCHECK_ACTIVE; |
3297 | ||
9107e9d2 CW |
3298 | /* Gradually reduce the count so that we catch DoS |
3299 | * attempts across multiple batches. | |
3300 | */ | |
3301 | if (ring->hangcheck.score > 0) | |
3302 | ring->hangcheck.score--; | |
f260fe7b MK |
3303 | |
3304 | ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; | |
d1e61e7f CW |
3305 | } |
3306 | ||
05407ff8 MK |
3307 | ring->hangcheck.seqno = seqno; |
3308 | ring->hangcheck.acthd = acthd; | |
9107e9d2 | 3309 | busy_count += busy; |
893eead0 | 3310 | } |
b9201c14 | 3311 | |
92cab734 | 3312 | for_each_ring(ring, dev_priv, i) { |
b6b0fac0 | 3313 | if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { |
b8d88d1d DV |
3314 | DRM_INFO("%s on %s\n", |
3315 | stuck[i] ? "stuck" : "no progress", | |
3316 | ring->name); | |
a43adf07 | 3317 | rings_hung++; |
92cab734 MK |
3318 | } |
3319 | } | |
3320 | ||
05407ff8 | 3321 | if (rings_hung) |
58174462 | 3322 | return i915_handle_error(dev, true, "Ring hung"); |
f65d9421 | 3323 | |
05407ff8 MK |
3324 | if (busy_count) |
3325 | /* Reset timer case chip hangs without another request | |
3326 | * being added */ | |
10cd45b6 MK |
3327 | i915_queue_hangcheck(dev); |
3328 | } | |
3329 | ||
3330 | void i915_queue_hangcheck(struct drm_device *dev) | |
3331 | { | |
3332 | struct drm_i915_private *dev_priv = dev->dev_private; | |
d330a953 | 3333 | if (!i915.enable_hangcheck) |
10cd45b6 MK |
3334 | return; |
3335 | ||
3336 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | |
3337 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
f65d9421 BG |
3338 | } |
3339 | ||
1c69eb42 | 3340 | static void ibx_irq_reset(struct drm_device *dev) |
91738a95 PZ |
3341 | { |
3342 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3343 | ||
3344 | if (HAS_PCH_NOP(dev)) | |
3345 | return; | |
3346 | ||
f86f3fb0 | 3347 | GEN5_IRQ_RESET(SDE); |
105b122e PZ |
3348 | |
3349 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) | |
3350 | I915_WRITE(SERR_INT, 0xffffffff); | |
622364b6 | 3351 | } |
105b122e | 3352 | |
622364b6 PZ |
3353 | /* |
3354 | * SDEIER is also touched by the interrupt handler to work around missed PCH | |
3355 | * interrupts. Hence we can't update it after the interrupt handler is enabled - | |
3356 | * instead we unconditionally enable all PCH interrupt sources here, but then | |
3357 | * only unmask them as needed with SDEIMR. | |
3358 | * | |
3359 | * This function needs to be called before interrupts are enabled. | |
3360 | */ | |
3361 | static void ibx_irq_pre_postinstall(struct drm_device *dev) | |
3362 | { | |
3363 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3364 | ||
3365 | if (HAS_PCH_NOP(dev)) | |
3366 | return; | |
3367 | ||
3368 | WARN_ON(I915_READ(SDEIER) != 0); | |
91738a95 PZ |
3369 | I915_WRITE(SDEIER, 0xffffffff); |
3370 | POSTING_READ(SDEIER); | |
3371 | } | |
3372 | ||
7c4d664e | 3373 | static void gen5_gt_irq_reset(struct drm_device *dev) |
d18ea1b5 DV |
3374 | { |
3375 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3376 | ||
f86f3fb0 | 3377 | GEN5_IRQ_RESET(GT); |
a9d356a6 | 3378 | if (INTEL_INFO(dev)->gen >= 6) |
f86f3fb0 | 3379 | GEN5_IRQ_RESET(GEN6_PM); |
d18ea1b5 DV |
3380 | } |
3381 | ||
1da177e4 LT |
3382 | /* drm_dma.h hooks |
3383 | */ | |
be30b29f | 3384 | static void ironlake_irq_reset(struct drm_device *dev) |
036a4a7d | 3385 | { |
2d1013dd | 3386 | struct drm_i915_private *dev_priv = dev->dev_private; |
036a4a7d | 3387 | |
0c841212 | 3388 | I915_WRITE(HWSTAM, 0xffffffff); |
bdfcdb63 | 3389 | |
f86f3fb0 | 3390 | GEN5_IRQ_RESET(DE); |
c6d954c1 PZ |
3391 | if (IS_GEN7(dev)) |
3392 | I915_WRITE(GEN7_ERR_INT, 0xffffffff); | |
036a4a7d | 3393 | |
7c4d664e | 3394 | gen5_gt_irq_reset(dev); |
c650156a | 3395 | |
1c69eb42 | 3396 | ibx_irq_reset(dev); |
7d99163d | 3397 | } |
c650156a | 3398 | |
7e231dbe JB |
3399 | static void valleyview_irq_preinstall(struct drm_device *dev) |
3400 | { | |
2d1013dd | 3401 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe JB |
3402 | int pipe; |
3403 | ||
7e231dbe JB |
3404 | /* VLV magic */ |
3405 | I915_WRITE(VLV_IMR, 0); | |
3406 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
3407 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
3408 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
3409 | ||
7e231dbe JB |
3410 | /* and GT */ |
3411 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
3412 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
d18ea1b5 | 3413 | |
7c4d664e | 3414 | gen5_gt_irq_reset(dev); |
7e231dbe JB |
3415 | |
3416 | I915_WRITE(DPINVGTT, 0xff); | |
3417 | ||
3418 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3419 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
055e393f | 3420 | for_each_pipe(dev_priv, pipe) |
7e231dbe JB |
3421 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
3422 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3423 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3424 | I915_WRITE(VLV_IER, 0x0); | |
3425 | POSTING_READ(VLV_IER); | |
3426 | } | |
3427 | ||
d6e3cca3 DV |
3428 | static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) |
3429 | { | |
3430 | GEN8_IRQ_RESET_NDX(GT, 0); | |
3431 | GEN8_IRQ_RESET_NDX(GT, 1); | |
3432 | GEN8_IRQ_RESET_NDX(GT, 2); | |
3433 | GEN8_IRQ_RESET_NDX(GT, 3); | |
3434 | } | |
3435 | ||
823f6b38 | 3436 | static void gen8_irq_reset(struct drm_device *dev) |
abd58f01 BW |
3437 | { |
3438 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3439 | int pipe; | |
3440 | ||
abd58f01 BW |
3441 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
3442 | POSTING_READ(GEN8_MASTER_IRQ); | |
3443 | ||
d6e3cca3 | 3444 | gen8_gt_irq_reset(dev_priv); |
abd58f01 | 3445 | |
055e393f | 3446 | for_each_pipe(dev_priv, pipe) |
813bde43 PZ |
3447 | if (intel_display_power_enabled(dev_priv, |
3448 | POWER_DOMAIN_PIPE(pipe))) | |
3449 | GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); | |
abd58f01 | 3450 | |
f86f3fb0 PZ |
3451 | GEN5_IRQ_RESET(GEN8_DE_PORT_); |
3452 | GEN5_IRQ_RESET(GEN8_DE_MISC_); | |
3453 | GEN5_IRQ_RESET(GEN8_PCU_); | |
abd58f01 | 3454 | |
1c69eb42 | 3455 | ibx_irq_reset(dev); |
abd58f01 | 3456 | } |
09f2344d | 3457 | |
d49bdb0e PZ |
3458 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) |
3459 | { | |
3460 | unsigned long irqflags; | |
1180e206 | 3461 | uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; |
d49bdb0e PZ |
3462 | |
3463 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3464 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], | |
1180e206 | 3465 | ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); |
d49bdb0e | 3466 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], |
1180e206 | 3467 | ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); |
d49bdb0e PZ |
3468 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
3469 | } | |
3470 | ||
43f328d7 VS |
3471 | static void cherryview_irq_preinstall(struct drm_device *dev) |
3472 | { | |
3473 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3474 | int pipe; | |
3475 | ||
3476 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3477 | POSTING_READ(GEN8_MASTER_IRQ); | |
3478 | ||
d6e3cca3 | 3479 | gen8_gt_irq_reset(dev_priv); |
43f328d7 VS |
3480 | |
3481 | GEN5_IRQ_RESET(GEN8_PCU_); | |
3482 | ||
3483 | POSTING_READ(GEN8_PCU_IIR); | |
3484 | ||
3485 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); | |
3486 | ||
3487 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3488 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3489 | ||
055e393f | 3490 | for_each_pipe(dev_priv, pipe) |
43f328d7 VS |
3491 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
3492 | ||
3493 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3494 | I915_WRITE(VLV_IER, 0x0); | |
3495 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3496 | POSTING_READ(VLV_IIR); | |
3497 | } | |
3498 | ||
82a28bcf | 3499 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
7fe0b973 | 3500 | { |
2d1013dd | 3501 | struct drm_i915_private *dev_priv = dev->dev_private; |
82a28bcf | 3502 | struct intel_encoder *intel_encoder; |
fee884ed | 3503 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
82a28bcf DV |
3504 | |
3505 | if (HAS_PCH_IBX(dev)) { | |
fee884ed | 3506 | hotplug_irqs = SDE_HOTPLUG_MASK; |
b2784e15 | 3507 | for_each_intel_encoder(dev, intel_encoder) |
cd569aed | 3508 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 3509 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
82a28bcf | 3510 | } else { |
fee884ed | 3511 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
b2784e15 | 3512 | for_each_intel_encoder(dev, intel_encoder) |
cd569aed | 3513 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 3514 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
82a28bcf | 3515 | } |
7fe0b973 | 3516 | |
fee884ed | 3517 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
82a28bcf DV |
3518 | |
3519 | /* | |
3520 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
3521 | * duration to 2ms (which is the minimum in the Display Port spec) | |
3522 | * | |
3523 | * This register is the same on all known PCH chips. | |
3524 | */ | |
7fe0b973 KP |
3525 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
3526 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
3527 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
3528 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
3529 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
3530 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
3531 | } | |
3532 | ||
d46da437 PZ |
3533 | static void ibx_irq_postinstall(struct drm_device *dev) |
3534 | { | |
2d1013dd | 3535 | struct drm_i915_private *dev_priv = dev->dev_private; |
82a28bcf | 3536 | u32 mask; |
e5868a31 | 3537 | |
692a04cf DV |
3538 | if (HAS_PCH_NOP(dev)) |
3539 | return; | |
3540 | ||
105b122e | 3541 | if (HAS_PCH_IBX(dev)) |
5c673b60 | 3542 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; |
105b122e | 3543 | else |
5c673b60 | 3544 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
8664281b | 3545 | |
337ba017 | 3546 | GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); |
d46da437 | 3547 | I915_WRITE(SDEIMR, ~mask); |
d46da437 PZ |
3548 | } |
3549 | ||
0a9a8c91 DV |
3550 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
3551 | { | |
3552 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3553 | u32 pm_irqs, gt_irqs; | |
3554 | ||
3555 | pm_irqs = gt_irqs = 0; | |
3556 | ||
3557 | dev_priv->gt_irq_mask = ~0; | |
040d2baa | 3558 | if (HAS_L3_DPF(dev)) { |
0a9a8c91 | 3559 | /* L3 parity interrupt is always unmasked. */ |
35a85ac6 BW |
3560 | dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); |
3561 | gt_irqs |= GT_PARITY_ERROR(dev); | |
0a9a8c91 DV |
3562 | } |
3563 | ||
3564 | gt_irqs |= GT_RENDER_USER_INTERRUPT; | |
3565 | if (IS_GEN5(dev)) { | |
3566 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | |
3567 | ILK_BSD_USER_INTERRUPT; | |
3568 | } else { | |
3569 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | |
3570 | } | |
3571 | ||
35079899 | 3572 | GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); |
0a9a8c91 DV |
3573 | |
3574 | if (INTEL_INFO(dev)->gen >= 6) { | |
a6706b45 | 3575 | pm_irqs |= dev_priv->pm_rps_events; |
0a9a8c91 DV |
3576 | |
3577 | if (HAS_VEBOX(dev)) | |
3578 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | |
3579 | ||
605cd25b | 3580 | dev_priv->pm_irq_mask = 0xffffffff; |
35079899 | 3581 | GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); |
0a9a8c91 DV |
3582 | } |
3583 | } | |
3584 | ||
f71d4af4 | 3585 | static int ironlake_irq_postinstall(struct drm_device *dev) |
036a4a7d | 3586 | { |
4bc9d430 | 3587 | unsigned long irqflags; |
2d1013dd | 3588 | struct drm_i915_private *dev_priv = dev->dev_private; |
8e76f8dc PZ |
3589 | u32 display_mask, extra_mask; |
3590 | ||
3591 | if (INTEL_INFO(dev)->gen >= 7) { | |
3592 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | |
3593 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | |
3594 | DE_PLANEB_FLIP_DONE_IVB | | |
5c673b60 | 3595 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
8e76f8dc | 3596 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
5c673b60 | 3597 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); |
8e76f8dc PZ |
3598 | } else { |
3599 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | |
3600 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | |
5b3a856b | 3601 | DE_AUX_CHANNEL_A | |
5b3a856b DV |
3602 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
3603 | DE_POISON); | |
5c673b60 DV |
3604 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
3605 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | |
8e76f8dc | 3606 | } |
036a4a7d | 3607 | |
1ec14ad3 | 3608 | dev_priv->irq_mask = ~display_mask; |
036a4a7d | 3609 | |
0c841212 PZ |
3610 | I915_WRITE(HWSTAM, 0xeffe); |
3611 | ||
622364b6 PZ |
3612 | ibx_irq_pre_postinstall(dev); |
3613 | ||
35079899 | 3614 | GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); |
036a4a7d | 3615 | |
0a9a8c91 | 3616 | gen5_gt_irq_postinstall(dev); |
036a4a7d | 3617 | |
d46da437 | 3618 | ibx_irq_postinstall(dev); |
7fe0b973 | 3619 | |
f97108d1 | 3620 | if (IS_IRONLAKE_M(dev)) { |
6005ce42 DV |
3621 | /* Enable PCU event interrupts |
3622 | * | |
3623 | * spinlocking not required here for correctness since interrupt | |
4bc9d430 DV |
3624 | * setup is guaranteed to run in single-threaded context. But we |
3625 | * need it to make the assert_spin_locked happy. */ | |
3626 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
f97108d1 | 3627 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
4bc9d430 | 3628 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
f97108d1 JB |
3629 | } |
3630 | ||
036a4a7d ZW |
3631 | return 0; |
3632 | } | |
3633 | ||
f8b79e58 ID |
3634 | static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) |
3635 | { | |
3636 | u32 pipestat_mask; | |
3637 | u32 iir_mask; | |
3638 | ||
3639 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | |
3640 | PIPE_FIFO_UNDERRUN_STATUS; | |
3641 | ||
3642 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); | |
3643 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); | |
3644 | POSTING_READ(PIPESTAT(PIPE_A)); | |
3645 | ||
3646 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3647 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3648 | ||
3649 | i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | | |
3650 | PIPE_GMBUS_INTERRUPT_STATUS); | |
3651 | i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); | |
3652 | ||
3653 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3654 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3655 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
3656 | dev_priv->irq_mask &= ~iir_mask; | |
3657 | ||
3658 | I915_WRITE(VLV_IIR, iir_mask); | |
3659 | I915_WRITE(VLV_IIR, iir_mask); | |
3660 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3661 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | |
3662 | POSTING_READ(VLV_IER); | |
3663 | } | |
3664 | ||
3665 | static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) | |
3666 | { | |
3667 | u32 pipestat_mask; | |
3668 | u32 iir_mask; | |
3669 | ||
3670 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3671 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
6c7fba04 | 3672 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; |
f8b79e58 ID |
3673 | |
3674 | dev_priv->irq_mask |= iir_mask; | |
3675 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | |
3676 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3677 | I915_WRITE(VLV_IIR, iir_mask); | |
3678 | I915_WRITE(VLV_IIR, iir_mask); | |
3679 | POSTING_READ(VLV_IIR); | |
3680 | ||
3681 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3682 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3683 | ||
3684 | i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | | |
3685 | PIPE_GMBUS_INTERRUPT_STATUS); | |
3686 | i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); | |
3687 | ||
3688 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | |
3689 | PIPE_FIFO_UNDERRUN_STATUS; | |
3690 | I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); | |
3691 | I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); | |
3692 | POSTING_READ(PIPESTAT(PIPE_A)); | |
3693 | } | |
3694 | ||
3695 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) | |
3696 | { | |
3697 | assert_spin_locked(&dev_priv->irq_lock); | |
3698 | ||
3699 | if (dev_priv->display_irqs_enabled) | |
3700 | return; | |
3701 | ||
3702 | dev_priv->display_irqs_enabled = true; | |
3703 | ||
3704 | if (dev_priv->dev->irq_enabled) | |
3705 | valleyview_display_irqs_install(dev_priv); | |
3706 | } | |
3707 | ||
3708 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) | |
3709 | { | |
3710 | assert_spin_locked(&dev_priv->irq_lock); | |
3711 | ||
3712 | if (!dev_priv->display_irqs_enabled) | |
3713 | return; | |
3714 | ||
3715 | dev_priv->display_irqs_enabled = false; | |
3716 | ||
3717 | if (dev_priv->dev->irq_enabled) | |
3718 | valleyview_display_irqs_uninstall(dev_priv); | |
3719 | } | |
3720 | ||
7e231dbe JB |
3721 | static int valleyview_irq_postinstall(struct drm_device *dev) |
3722 | { | |
2d1013dd | 3723 | struct drm_i915_private *dev_priv = dev->dev_private; |
b79480ba | 3724 | unsigned long irqflags; |
7e231dbe | 3725 | |
f8b79e58 | 3726 | dev_priv->irq_mask = ~0; |
7e231dbe | 3727 | |
20afbda2 DV |
3728 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3729 | POSTING_READ(PORT_HOTPLUG_EN); | |
3730 | ||
7e231dbe | 3731 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
f8b79e58 | 3732 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
7e231dbe | 3733 | I915_WRITE(VLV_IIR, 0xffffffff); |
7e231dbe JB |
3734 | POSTING_READ(VLV_IER); |
3735 | ||
b79480ba DV |
3736 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3737 | * just to make the assert_spin_locked check happy. */ | |
3738 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
f8b79e58 ID |
3739 | if (dev_priv->display_irqs_enabled) |
3740 | valleyview_display_irqs_install(dev_priv); | |
b79480ba | 3741 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
31acc7f5 | 3742 | |
7e231dbe JB |
3743 | I915_WRITE(VLV_IIR, 0xffffffff); |
3744 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3745 | ||
0a9a8c91 | 3746 | gen5_gt_irq_postinstall(dev); |
7e231dbe JB |
3747 | |
3748 | /* ack & enable invalid PTE error interrupts */ | |
3749 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
3750 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
3751 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
3752 | #endif | |
3753 | ||
3754 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
20afbda2 DV |
3755 | |
3756 | return 0; | |
3757 | } | |
3758 | ||
abd58f01 BW |
3759 | static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) |
3760 | { | |
abd58f01 BW |
3761 | /* These are interrupts we'll toggle with the ring mask register */ |
3762 | uint32_t gt_interrupts[] = { | |
3763 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | |
73d477f6 | 3764 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | |
abd58f01 | 3765 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | |
73d477f6 OM |
3766 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | |
3767 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, | |
abd58f01 | 3768 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | |
73d477f6 OM |
3769 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | |
3770 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | | |
3771 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, | |
abd58f01 | 3772 | 0, |
73d477f6 OM |
3773 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | |
3774 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT | |
abd58f01 BW |
3775 | }; |
3776 | ||
0961021a | 3777 | dev_priv->pm_irq_mask = 0xffffffff; |
9a2d2d87 D |
3778 | GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); |
3779 | GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); | |
3780 | GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); | |
3781 | GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); | |
abd58f01 BW |
3782 | } |
3783 | ||
3784 | static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |
3785 | { | |
d0e1f1cb | 3786 | uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | |
13b3a0a7 | 3787 | GEN8_PIPE_CDCLK_CRC_DONE | |
13b3a0a7 | 3788 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
5c673b60 DV |
3789 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | |
3790 | GEN8_PIPE_FIFO_UNDERRUN; | |
abd58f01 | 3791 | int pipe; |
13b3a0a7 DV |
3792 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
3793 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | |
3794 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; | |
abd58f01 | 3795 | |
055e393f | 3796 | for_each_pipe(dev_priv, pipe) |
813bde43 PZ |
3797 | if (intel_display_power_enabled(dev_priv, |
3798 | POWER_DOMAIN_PIPE(pipe))) | |
3799 | GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, | |
3800 | dev_priv->de_irq_mask[pipe], | |
3801 | de_pipe_enables); | |
abd58f01 | 3802 | |
35079899 | 3803 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); |
abd58f01 BW |
3804 | } |
3805 | ||
3806 | static int gen8_irq_postinstall(struct drm_device *dev) | |
3807 | { | |
3808 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3809 | ||
622364b6 PZ |
3810 | ibx_irq_pre_postinstall(dev); |
3811 | ||
abd58f01 BW |
3812 | gen8_gt_irq_postinstall(dev_priv); |
3813 | gen8_de_irq_postinstall(dev_priv); | |
3814 | ||
3815 | ibx_irq_postinstall(dev); | |
3816 | ||
3817 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | |
3818 | POSTING_READ(GEN8_MASTER_IRQ); | |
3819 | ||
3820 | return 0; | |
3821 | } | |
3822 | ||
43f328d7 VS |
3823 | static int cherryview_irq_postinstall(struct drm_device *dev) |
3824 | { | |
3825 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3826 | u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3827 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
43f328d7 | 3828 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
3278f67f VS |
3829 | I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; |
3830 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3831 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
43f328d7 VS |
3832 | unsigned long irqflags; |
3833 | int pipe; | |
3834 | ||
3835 | /* | |
3836 | * Leave vblank interrupts masked initially. enable/disable will | |
3837 | * toggle them based on usage. | |
3838 | */ | |
3278f67f | 3839 | dev_priv->irq_mask = ~enable_mask; |
43f328d7 | 3840 | |
055e393f | 3841 | for_each_pipe(dev_priv, pipe) |
43f328d7 VS |
3842 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
3843 | ||
3844 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3278f67f | 3845 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
055e393f | 3846 | for_each_pipe(dev_priv, pipe) |
43f328d7 VS |
3847 | i915_enable_pipestat(dev_priv, pipe, pipestat_enable); |
3848 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3849 | ||
3850 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3851 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3852 | I915_WRITE(VLV_IER, enable_mask); | |
3853 | ||
3854 | gen8_gt_irq_postinstall(dev_priv); | |
3855 | ||
3856 | I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); | |
3857 | POSTING_READ(GEN8_MASTER_IRQ); | |
3858 | ||
3859 | return 0; | |
3860 | } | |
3861 | ||
abd58f01 BW |
3862 | static void gen8_irq_uninstall(struct drm_device *dev) |
3863 | { | |
3864 | struct drm_i915_private *dev_priv = dev->dev_private; | |
abd58f01 BW |
3865 | |
3866 | if (!dev_priv) | |
3867 | return; | |
3868 | ||
823f6b38 | 3869 | gen8_irq_reset(dev); |
abd58f01 BW |
3870 | } |
3871 | ||
7e231dbe JB |
3872 | static void valleyview_irq_uninstall(struct drm_device *dev) |
3873 | { | |
2d1013dd | 3874 | struct drm_i915_private *dev_priv = dev->dev_private; |
f8b79e58 | 3875 | unsigned long irqflags; |
7e231dbe JB |
3876 | int pipe; |
3877 | ||
3878 | if (!dev_priv) | |
3879 | return; | |
3880 | ||
843d0e7d ID |
3881 | I915_WRITE(VLV_MASTER_IER, 0); |
3882 | ||
055e393f | 3883 | for_each_pipe(dev_priv, pipe) |
7e231dbe JB |
3884 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
3885 | ||
3886 | I915_WRITE(HWSTAM, 0xffffffff); | |
3887 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3888 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
f8b79e58 ID |
3889 | |
3890 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3891 | if (dev_priv->display_irqs_enabled) | |
3892 | valleyview_display_irqs_uninstall(dev_priv); | |
3893 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3894 | ||
3895 | dev_priv->irq_mask = 0; | |
3896 | ||
7e231dbe JB |
3897 | I915_WRITE(VLV_IIR, 0xffffffff); |
3898 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3899 | I915_WRITE(VLV_IER, 0x0); | |
3900 | POSTING_READ(VLV_IER); | |
3901 | } | |
3902 | ||
43f328d7 VS |
3903 | static void cherryview_irq_uninstall(struct drm_device *dev) |
3904 | { | |
3905 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3906 | int pipe; | |
3907 | ||
3908 | if (!dev_priv) | |
3909 | return; | |
3910 | ||
3911 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3912 | POSTING_READ(GEN8_MASTER_IRQ); | |
3913 | ||
3914 | #define GEN8_IRQ_FINI_NDX(type, which) \ | |
3915 | do { \ | |
3916 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ | |
3917 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | |
3918 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
3919 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
3920 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
3921 | } while (0) | |
3922 | ||
3923 | #define GEN8_IRQ_FINI(type) \ | |
3924 | do { \ | |
3925 | I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ | |
3926 | I915_WRITE(GEN8_##type##_IER, 0); \ | |
3927 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | |
3928 | POSTING_READ(GEN8_##type##_IIR); \ | |
3929 | I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ | |
3930 | } while (0) | |
3931 | ||
3932 | GEN8_IRQ_FINI_NDX(GT, 0); | |
3933 | GEN8_IRQ_FINI_NDX(GT, 1); | |
3934 | GEN8_IRQ_FINI_NDX(GT, 2); | |
3935 | GEN8_IRQ_FINI_NDX(GT, 3); | |
3936 | ||
3937 | GEN8_IRQ_FINI(PCU); | |
3938 | ||
3939 | #undef GEN8_IRQ_FINI | |
3940 | #undef GEN8_IRQ_FINI_NDX | |
3941 | ||
3942 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3943 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3944 | ||
055e393f | 3945 | for_each_pipe(dev_priv, pipe) |
43f328d7 VS |
3946 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
3947 | ||
3948 | I915_WRITE(VLV_IMR, 0xffffffff); | |
3949 | I915_WRITE(VLV_IER, 0x0); | |
3950 | I915_WRITE(VLV_IIR, 0xffffffff); | |
3951 | POSTING_READ(VLV_IIR); | |
3952 | } | |
3953 | ||
f71d4af4 | 3954 | static void ironlake_irq_uninstall(struct drm_device *dev) |
036a4a7d | 3955 | { |
2d1013dd | 3956 | struct drm_i915_private *dev_priv = dev->dev_private; |
4697995b JB |
3957 | |
3958 | if (!dev_priv) | |
3959 | return; | |
3960 | ||
be30b29f | 3961 | ironlake_irq_reset(dev); |
036a4a7d ZW |
3962 | } |
3963 | ||
a266c7d5 | 3964 | static void i8xx_irq_preinstall(struct drm_device * dev) |
1da177e4 | 3965 | { |
2d1013dd | 3966 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 3967 | int pipe; |
91e3738e | 3968 | |
055e393f | 3969 | for_each_pipe(dev_priv, pipe) |
9db4a9c7 | 3970 | I915_WRITE(PIPESTAT(pipe), 0); |
a266c7d5 CW |
3971 | I915_WRITE16(IMR, 0xffff); |
3972 | I915_WRITE16(IER, 0x0); | |
3973 | POSTING_READ16(IER); | |
c2798b19 CW |
3974 | } |
3975 | ||
3976 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
3977 | { | |
2d1013dd | 3978 | struct drm_i915_private *dev_priv = dev->dev_private; |
379ef82d | 3979 | unsigned long irqflags; |
c2798b19 | 3980 | |
c2798b19 CW |
3981 | I915_WRITE16(EMR, |
3982 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
3983 | ||
3984 | /* Unmask the interrupts that we always want on. */ | |
3985 | dev_priv->irq_mask = | |
3986 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3987 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3988 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3989 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
3990 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
3991 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
3992 | ||
3993 | I915_WRITE16(IER, | |
3994 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3995 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3996 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
3997 | I915_USER_INTERRUPT); | |
3998 | POSTING_READ16(IER); | |
3999 | ||
379ef82d DV |
4000 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
4001 | * just to make the assert_spin_locked check happy. */ | |
4002 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
755e9019 ID |
4003 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
4004 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
379ef82d DV |
4005 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4006 | ||
c2798b19 CW |
4007 | return 0; |
4008 | } | |
4009 | ||
90a72f87 VS |
4010 | /* |
4011 | * Returns true when a page flip has completed. | |
4012 | */ | |
4013 | static bool i8xx_handle_vblank(struct drm_device *dev, | |
1f1c2e24 | 4014 | int plane, int pipe, u32 iir) |
90a72f87 | 4015 | { |
2d1013dd | 4016 | struct drm_i915_private *dev_priv = dev->dev_private; |
1f1c2e24 | 4017 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
90a72f87 | 4018 | |
8d7849db | 4019 | if (!intel_pipe_handle_vblank(dev, pipe)) |
90a72f87 VS |
4020 | return false; |
4021 | ||
4022 | if ((iir & flip_pending) == 0) | |
d6bbafa1 | 4023 | goto check_page_flip; |
90a72f87 | 4024 | |
1f1c2e24 | 4025 | intel_prepare_page_flip(dev, plane); |
90a72f87 VS |
4026 | |
4027 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
4028 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
4029 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
4030 | * the flip is completed (no longer pending). Since this doesn't raise | |
4031 | * an interrupt per se, we watch for the change at vblank. | |
4032 | */ | |
4033 | if (I915_READ16(ISR) & flip_pending) | |
d6bbafa1 | 4034 | goto check_page_flip; |
90a72f87 VS |
4035 | |
4036 | intel_finish_page_flip(dev, pipe); | |
90a72f87 | 4037 | return true; |
d6bbafa1 CW |
4038 | |
4039 | check_page_flip: | |
4040 | intel_check_page_flip(dev, pipe); | |
4041 | return false; | |
90a72f87 VS |
4042 | } |
4043 | ||
ff1f525e | 4044 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
c2798b19 | 4045 | { |
45a83f84 | 4046 | struct drm_device *dev = arg; |
2d1013dd | 4047 | struct drm_i915_private *dev_priv = dev->dev_private; |
c2798b19 CW |
4048 | u16 iir, new_iir; |
4049 | u32 pipe_stats[2]; | |
4050 | unsigned long irqflags; | |
c2798b19 CW |
4051 | int pipe; |
4052 | u16 flip_mask = | |
4053 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4054 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
4055 | ||
c2798b19 CW |
4056 | iir = I915_READ16(IIR); |
4057 | if (iir == 0) | |
4058 | return IRQ_NONE; | |
4059 | ||
4060 | while (iir & ~flip_mask) { | |
4061 | /* Can't rely on pipestat interrupt bit in iir as it might | |
4062 | * have been cleared after the pipestat interrupt was received. | |
4063 | * It doesn't set the bit in iir again, but it still produces | |
4064 | * interrupts (for non-MSI). | |
4065 | */ | |
4066 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4067 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
58174462 MK |
4068 | i915_handle_error(dev, false, |
4069 | "Command parser error, iir 0x%08x", | |
4070 | iir); | |
c2798b19 | 4071 | |
055e393f | 4072 | for_each_pipe(dev_priv, pipe) { |
c2798b19 CW |
4073 | int reg = PIPESTAT(pipe); |
4074 | pipe_stats[pipe] = I915_READ(reg); | |
4075 | ||
4076 | /* | |
4077 | * Clear the PIPE*STAT regs before the IIR | |
4078 | */ | |
2d9d2b0b | 4079 | if (pipe_stats[pipe] & 0x8000ffff) |
c2798b19 | 4080 | I915_WRITE(reg, pipe_stats[pipe]); |
c2798b19 CW |
4081 | } |
4082 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4083 | ||
4084 | I915_WRITE16(IIR, iir & ~flip_mask); | |
4085 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
4086 | ||
d05c617e | 4087 | i915_update_dri1_breadcrumb(dev); |
c2798b19 CW |
4088 | |
4089 | if (iir & I915_USER_INTERRUPT) | |
4090 | notify_ring(dev, &dev_priv->ring[RCS]); | |
4091 | ||
055e393f | 4092 | for_each_pipe(dev_priv, pipe) { |
1f1c2e24 | 4093 | int plane = pipe; |
3a77c4c4 | 4094 | if (HAS_FBC(dev)) |
1f1c2e24 VS |
4095 | plane = !plane; |
4096 | ||
4356d586 | 4097 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
1f1c2e24 VS |
4098 | i8xx_handle_vblank(dev, plane, pipe, iir)) |
4099 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
c2798b19 | 4100 | |
4356d586 | 4101 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
277de95e | 4102 | i9xx_pipe_crc_irq_handler(dev, pipe); |
2d9d2b0b VS |
4103 | |
4104 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
4105 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
fc2c807b | 4106 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
4356d586 | 4107 | } |
c2798b19 CW |
4108 | |
4109 | iir = new_iir; | |
4110 | } | |
4111 | ||
4112 | return IRQ_HANDLED; | |
4113 | } | |
4114 | ||
4115 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
4116 | { | |
2d1013dd | 4117 | struct drm_i915_private *dev_priv = dev->dev_private; |
c2798b19 CW |
4118 | int pipe; |
4119 | ||
055e393f | 4120 | for_each_pipe(dev_priv, pipe) { |
c2798b19 CW |
4121 | /* Clear enable bits; then clear status bits */ |
4122 | I915_WRITE(PIPESTAT(pipe), 0); | |
4123 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
4124 | } | |
4125 | I915_WRITE16(IMR, 0xffff); | |
4126 | I915_WRITE16(IER, 0x0); | |
4127 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
4128 | } | |
4129 | ||
a266c7d5 CW |
4130 | static void i915_irq_preinstall(struct drm_device * dev) |
4131 | { | |
2d1013dd | 4132 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4133 | int pipe; |
4134 | ||
a266c7d5 CW |
4135 | if (I915_HAS_HOTPLUG(dev)) { |
4136 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4137 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4138 | } | |
4139 | ||
00d98ebd | 4140 | I915_WRITE16(HWSTAM, 0xeffe); |
055e393f | 4141 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4142 | I915_WRITE(PIPESTAT(pipe), 0); |
4143 | I915_WRITE(IMR, 0xffffffff); | |
4144 | I915_WRITE(IER, 0x0); | |
4145 | POSTING_READ(IER); | |
4146 | } | |
4147 | ||
4148 | static int i915_irq_postinstall(struct drm_device *dev) | |
4149 | { | |
2d1013dd | 4150 | struct drm_i915_private *dev_priv = dev->dev_private; |
38bde180 | 4151 | u32 enable_mask; |
379ef82d | 4152 | unsigned long irqflags; |
a266c7d5 | 4153 | |
38bde180 CW |
4154 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
4155 | ||
4156 | /* Unmask the interrupts that we always want on. */ | |
4157 | dev_priv->irq_mask = | |
4158 | ~(I915_ASLE_INTERRUPT | | |
4159 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4160 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4161 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4162 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
4163 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
4164 | ||
4165 | enable_mask = | |
4166 | I915_ASLE_INTERRUPT | | |
4167 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
4168 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4169 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
4170 | I915_USER_INTERRUPT; | |
4171 | ||
a266c7d5 | 4172 | if (I915_HAS_HOTPLUG(dev)) { |
20afbda2 DV |
4173 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4174 | POSTING_READ(PORT_HOTPLUG_EN); | |
4175 | ||
a266c7d5 CW |
4176 | /* Enable in IER... */ |
4177 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
4178 | /* and unmask in IMR */ | |
4179 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
4180 | } | |
4181 | ||
a266c7d5 CW |
4182 | I915_WRITE(IMR, dev_priv->irq_mask); |
4183 | I915_WRITE(IER, enable_mask); | |
4184 | POSTING_READ(IER); | |
4185 | ||
f49e38dd | 4186 | i915_enable_asle_pipestat(dev); |
20afbda2 | 4187 | |
379ef82d DV |
4188 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
4189 | * just to make the assert_spin_locked check happy. */ | |
4190 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
755e9019 ID |
4191 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
4192 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
379ef82d DV |
4193 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
4194 | ||
20afbda2 DV |
4195 | return 0; |
4196 | } | |
4197 | ||
90a72f87 VS |
4198 | /* |
4199 | * Returns true when a page flip has completed. | |
4200 | */ | |
4201 | static bool i915_handle_vblank(struct drm_device *dev, | |
4202 | int plane, int pipe, u32 iir) | |
4203 | { | |
2d1013dd | 4204 | struct drm_i915_private *dev_priv = dev->dev_private; |
90a72f87 VS |
4205 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
4206 | ||
8d7849db | 4207 | if (!intel_pipe_handle_vblank(dev, pipe)) |
90a72f87 VS |
4208 | return false; |
4209 | ||
4210 | if ((iir & flip_pending) == 0) | |
d6bbafa1 | 4211 | goto check_page_flip; |
90a72f87 VS |
4212 | |
4213 | intel_prepare_page_flip(dev, plane); | |
4214 | ||
4215 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
4216 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
4217 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
4218 | * the flip is completed (no longer pending). Since this doesn't raise | |
4219 | * an interrupt per se, we watch for the change at vblank. | |
4220 | */ | |
4221 | if (I915_READ(ISR) & flip_pending) | |
d6bbafa1 | 4222 | goto check_page_flip; |
90a72f87 VS |
4223 | |
4224 | intel_finish_page_flip(dev, pipe); | |
90a72f87 | 4225 | return true; |
d6bbafa1 CW |
4226 | |
4227 | check_page_flip: | |
4228 | intel_check_page_flip(dev, pipe); | |
4229 | return false; | |
90a72f87 VS |
4230 | } |
4231 | ||
ff1f525e | 4232 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
a266c7d5 | 4233 | { |
45a83f84 | 4234 | struct drm_device *dev = arg; |
2d1013dd | 4235 | struct drm_i915_private *dev_priv = dev->dev_private; |
8291ee90 | 4236 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
a266c7d5 | 4237 | unsigned long irqflags; |
38bde180 CW |
4238 | u32 flip_mask = |
4239 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4240 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
38bde180 | 4241 | int pipe, ret = IRQ_NONE; |
a266c7d5 | 4242 | |
a266c7d5 | 4243 | iir = I915_READ(IIR); |
38bde180 CW |
4244 | do { |
4245 | bool irq_received = (iir & ~flip_mask) != 0; | |
8291ee90 | 4246 | bool blc_event = false; |
a266c7d5 CW |
4247 | |
4248 | /* Can't rely on pipestat interrupt bit in iir as it might | |
4249 | * have been cleared after the pipestat interrupt was received. | |
4250 | * It doesn't set the bit in iir again, but it still produces | |
4251 | * interrupts (for non-MSI). | |
4252 | */ | |
4253 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4254 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
58174462 MK |
4255 | i915_handle_error(dev, false, |
4256 | "Command parser error, iir 0x%08x", | |
4257 | iir); | |
a266c7d5 | 4258 | |
055e393f | 4259 | for_each_pipe(dev_priv, pipe) { |
a266c7d5 CW |
4260 | int reg = PIPESTAT(pipe); |
4261 | pipe_stats[pipe] = I915_READ(reg); | |
4262 | ||
38bde180 | 4263 | /* Clear the PIPE*STAT regs before the IIR */ |
a266c7d5 | 4264 | if (pipe_stats[pipe] & 0x8000ffff) { |
a266c7d5 | 4265 | I915_WRITE(reg, pipe_stats[pipe]); |
38bde180 | 4266 | irq_received = true; |
a266c7d5 CW |
4267 | } |
4268 | } | |
4269 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4270 | ||
4271 | if (!irq_received) | |
4272 | break; | |
4273 | ||
a266c7d5 | 4274 | /* Consume port. Then clear IIR or we'll miss events */ |
16c6c56b VS |
4275 | if (I915_HAS_HOTPLUG(dev) && |
4276 | iir & I915_DISPLAY_PORT_INTERRUPT) | |
4277 | i9xx_hpd_irq_handler(dev); | |
a266c7d5 | 4278 | |
38bde180 | 4279 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
4280 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
4281 | ||
a266c7d5 CW |
4282 | if (iir & I915_USER_INTERRUPT) |
4283 | notify_ring(dev, &dev_priv->ring[RCS]); | |
a266c7d5 | 4284 | |
055e393f | 4285 | for_each_pipe(dev_priv, pipe) { |
38bde180 | 4286 | int plane = pipe; |
3a77c4c4 | 4287 | if (HAS_FBC(dev)) |
38bde180 | 4288 | plane = !plane; |
90a72f87 | 4289 | |
8291ee90 | 4290 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
4291 | i915_handle_vblank(dev, plane, pipe, iir)) |
4292 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
a266c7d5 CW |
4293 | |
4294 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
4295 | blc_event = true; | |
4356d586 DV |
4296 | |
4297 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
277de95e | 4298 | i9xx_pipe_crc_irq_handler(dev, pipe); |
2d9d2b0b VS |
4299 | |
4300 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && | |
4301 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
fc2c807b | 4302 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
a266c7d5 CW |
4303 | } |
4304 | ||
a266c7d5 CW |
4305 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
4306 | intel_opregion_asle_intr(dev); | |
4307 | ||
4308 | /* With MSI, interrupts are only generated when iir | |
4309 | * transitions from zero to nonzero. If another bit got | |
4310 | * set while we were handling the existing iir bits, then | |
4311 | * we would never get another interrupt. | |
4312 | * | |
4313 | * This is fine on non-MSI as well, as if we hit this path | |
4314 | * we avoid exiting the interrupt handler only to generate | |
4315 | * another one. | |
4316 | * | |
4317 | * Note that for MSI this could cause a stray interrupt report | |
4318 | * if an interrupt landed in the time between writing IIR and | |
4319 | * the posting read. This should be rare enough to never | |
4320 | * trigger the 99% of 100,000 interrupts test for disabling | |
4321 | * stray interrupts. | |
4322 | */ | |
38bde180 | 4323 | ret = IRQ_HANDLED; |
a266c7d5 | 4324 | iir = new_iir; |
38bde180 | 4325 | } while (iir & ~flip_mask); |
a266c7d5 | 4326 | |
d05c617e | 4327 | i915_update_dri1_breadcrumb(dev); |
8291ee90 | 4328 | |
a266c7d5 CW |
4329 | return ret; |
4330 | } | |
4331 | ||
4332 | static void i915_irq_uninstall(struct drm_device * dev) | |
4333 | { | |
2d1013dd | 4334 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4335 | int pipe; |
4336 | ||
a266c7d5 CW |
4337 | if (I915_HAS_HOTPLUG(dev)) { |
4338 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4339 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4340 | } | |
4341 | ||
00d98ebd | 4342 | I915_WRITE16(HWSTAM, 0xffff); |
055e393f | 4343 | for_each_pipe(dev_priv, pipe) { |
55b39755 | 4344 | /* Clear enable bits; then clear status bits */ |
a266c7d5 | 4345 | I915_WRITE(PIPESTAT(pipe), 0); |
55b39755 CW |
4346 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
4347 | } | |
a266c7d5 CW |
4348 | I915_WRITE(IMR, 0xffffffff); |
4349 | I915_WRITE(IER, 0x0); | |
4350 | ||
a266c7d5 CW |
4351 | I915_WRITE(IIR, I915_READ(IIR)); |
4352 | } | |
4353 | ||
4354 | static void i965_irq_preinstall(struct drm_device * dev) | |
4355 | { | |
2d1013dd | 4356 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4357 | int pipe; |
4358 | ||
adca4730 CW |
4359 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4360 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
4361 | |
4362 | I915_WRITE(HWSTAM, 0xeffe); | |
055e393f | 4363 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4364 | I915_WRITE(PIPESTAT(pipe), 0); |
4365 | I915_WRITE(IMR, 0xffffffff); | |
4366 | I915_WRITE(IER, 0x0); | |
4367 | POSTING_READ(IER); | |
4368 | } | |
4369 | ||
4370 | static int i965_irq_postinstall(struct drm_device *dev) | |
4371 | { | |
2d1013dd | 4372 | struct drm_i915_private *dev_priv = dev->dev_private; |
bbba0a97 | 4373 | u32 enable_mask; |
a266c7d5 | 4374 | u32 error_mask; |
b79480ba | 4375 | unsigned long irqflags; |
a266c7d5 | 4376 | |
a266c7d5 | 4377 | /* Unmask the interrupts that we always want on. */ |
bbba0a97 | 4378 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
adca4730 | 4379 | I915_DISPLAY_PORT_INTERRUPT | |
bbba0a97 CW |
4380 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4381 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4382 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4383 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
4384 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
4385 | ||
4386 | enable_mask = ~dev_priv->irq_mask; | |
21ad8330 VS |
4387 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
4388 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | |
bbba0a97 CW |
4389 | enable_mask |= I915_USER_INTERRUPT; |
4390 | ||
4391 | if (IS_G4X(dev)) | |
4392 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
a266c7d5 | 4393 | |
b79480ba DV |
4394 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
4395 | * just to make the assert_spin_locked check happy. */ | |
4396 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
755e9019 ID |
4397 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
4398 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4399 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
b79480ba | 4400 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
a266c7d5 | 4401 | |
a266c7d5 CW |
4402 | /* |
4403 | * Enable some error detection, note the instruction error mask | |
4404 | * bit is reserved, so we leave it masked. | |
4405 | */ | |
4406 | if (IS_G4X(dev)) { | |
4407 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
4408 | GM45_ERROR_MEM_PRIV | | |
4409 | GM45_ERROR_CP_PRIV | | |
4410 | I915_ERROR_MEMORY_REFRESH); | |
4411 | } else { | |
4412 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
4413 | I915_ERROR_MEMORY_REFRESH); | |
4414 | } | |
4415 | I915_WRITE(EMR, error_mask); | |
4416 | ||
4417 | I915_WRITE(IMR, dev_priv->irq_mask); | |
4418 | I915_WRITE(IER, enable_mask); | |
4419 | POSTING_READ(IER); | |
4420 | ||
20afbda2 DV |
4421 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4422 | POSTING_READ(PORT_HOTPLUG_EN); | |
4423 | ||
f49e38dd | 4424 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
4425 | |
4426 | return 0; | |
4427 | } | |
4428 | ||
bac56d5b | 4429 | static void i915_hpd_irq_setup(struct drm_device *dev) |
20afbda2 | 4430 | { |
2d1013dd | 4431 | struct drm_i915_private *dev_priv = dev->dev_private; |
cd569aed | 4432 | struct intel_encoder *intel_encoder; |
20afbda2 DV |
4433 | u32 hotplug_en; |
4434 | ||
b5ea2d56 DV |
4435 | assert_spin_locked(&dev_priv->irq_lock); |
4436 | ||
bac56d5b EE |
4437 | if (I915_HAS_HOTPLUG(dev)) { |
4438 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
4439 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | |
4440 | /* Note HDMI and DP share hotplug bits */ | |
e5868a31 | 4441 | /* enable bits are the same for all generations */ |
b2784e15 | 4442 | for_each_intel_encoder(dev, intel_encoder) |
cd569aed EE |
4443 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
4444 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | |
bac56d5b EE |
4445 | /* Programming the CRT detection parameters tends |
4446 | to generate a spurious hotplug event about three | |
4447 | seconds later. So just do it once. | |
4448 | */ | |
4449 | if (IS_G4X(dev)) | |
4450 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
85fc95ba | 4451 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
bac56d5b | 4452 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
a266c7d5 | 4453 | |
bac56d5b EE |
4454 | /* Ignore TV since it's buggy */ |
4455 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
4456 | } | |
a266c7d5 CW |
4457 | } |
4458 | ||
ff1f525e | 4459 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
a266c7d5 | 4460 | { |
45a83f84 | 4461 | struct drm_device *dev = arg; |
2d1013dd | 4462 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4463 | u32 iir, new_iir; |
4464 | u32 pipe_stats[I915_MAX_PIPES]; | |
a266c7d5 | 4465 | unsigned long irqflags; |
a266c7d5 | 4466 | int ret = IRQ_NONE, pipe; |
21ad8330 VS |
4467 | u32 flip_mask = |
4468 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4469 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
a266c7d5 | 4470 | |
a266c7d5 CW |
4471 | iir = I915_READ(IIR); |
4472 | ||
a266c7d5 | 4473 | for (;;) { |
501e01d7 | 4474 | bool irq_received = (iir & ~flip_mask) != 0; |
2c8ba29f CW |
4475 | bool blc_event = false; |
4476 | ||
a266c7d5 CW |
4477 | /* Can't rely on pipestat interrupt bit in iir as it might |
4478 | * have been cleared after the pipestat interrupt was received. | |
4479 | * It doesn't set the bit in iir again, but it still produces | |
4480 | * interrupts (for non-MSI). | |
4481 | */ | |
4482 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
4483 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
58174462 MK |
4484 | i915_handle_error(dev, false, |
4485 | "Command parser error, iir 0x%08x", | |
4486 | iir); | |
a266c7d5 | 4487 | |
055e393f | 4488 | for_each_pipe(dev_priv, pipe) { |
a266c7d5 CW |
4489 | int reg = PIPESTAT(pipe); |
4490 | pipe_stats[pipe] = I915_READ(reg); | |
4491 | ||
4492 | /* | |
4493 | * Clear the PIPE*STAT regs before the IIR | |
4494 | */ | |
4495 | if (pipe_stats[pipe] & 0x8000ffff) { | |
a266c7d5 | 4496 | I915_WRITE(reg, pipe_stats[pipe]); |
501e01d7 | 4497 | irq_received = true; |
a266c7d5 CW |
4498 | } |
4499 | } | |
4500 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
4501 | ||
4502 | if (!irq_received) | |
4503 | break; | |
4504 | ||
4505 | ret = IRQ_HANDLED; | |
4506 | ||
4507 | /* Consume port. Then clear IIR or we'll miss events */ | |
16c6c56b VS |
4508 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
4509 | i9xx_hpd_irq_handler(dev); | |
a266c7d5 | 4510 | |
21ad8330 | 4511 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
4512 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
4513 | ||
a266c7d5 CW |
4514 | if (iir & I915_USER_INTERRUPT) |
4515 | notify_ring(dev, &dev_priv->ring[RCS]); | |
4516 | if (iir & I915_BSD_USER_INTERRUPT) | |
4517 | notify_ring(dev, &dev_priv->ring[VCS]); | |
4518 | ||
055e393f | 4519 | for_each_pipe(dev_priv, pipe) { |
2c8ba29f | 4520 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
4521 | i915_handle_vblank(dev, pipe, pipe, iir)) |
4522 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | |
a266c7d5 CW |
4523 | |
4524 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
4525 | blc_event = true; | |
4356d586 DV |
4526 | |
4527 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
277de95e | 4528 | i9xx_pipe_crc_irq_handler(dev, pipe); |
a266c7d5 | 4529 | |
2d9d2b0b VS |
4530 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && |
4531 | intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) | |
fc2c807b | 4532 | DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); |
2d9d2b0b | 4533 | } |
a266c7d5 CW |
4534 | |
4535 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
4536 | intel_opregion_asle_intr(dev); | |
4537 | ||
515ac2bb DV |
4538 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
4539 | gmbus_irq_handler(dev); | |
4540 | ||
a266c7d5 CW |
4541 | /* With MSI, interrupts are only generated when iir |
4542 | * transitions from zero to nonzero. If another bit got | |
4543 | * set while we were handling the existing iir bits, then | |
4544 | * we would never get another interrupt. | |
4545 | * | |
4546 | * This is fine on non-MSI as well, as if we hit this path | |
4547 | * we avoid exiting the interrupt handler only to generate | |
4548 | * another one. | |
4549 | * | |
4550 | * Note that for MSI this could cause a stray interrupt report | |
4551 | * if an interrupt landed in the time between writing IIR and | |
4552 | * the posting read. This should be rare enough to never | |
4553 | * trigger the 99% of 100,000 interrupts test for disabling | |
4554 | * stray interrupts. | |
4555 | */ | |
4556 | iir = new_iir; | |
4557 | } | |
4558 | ||
d05c617e | 4559 | i915_update_dri1_breadcrumb(dev); |
2c8ba29f | 4560 | |
a266c7d5 CW |
4561 | return ret; |
4562 | } | |
4563 | ||
4564 | static void i965_irq_uninstall(struct drm_device * dev) | |
4565 | { | |
2d1013dd | 4566 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4567 | int pipe; |
4568 | ||
4569 | if (!dev_priv) | |
4570 | return; | |
4571 | ||
adca4730 CW |
4572 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4573 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
4574 | |
4575 | I915_WRITE(HWSTAM, 0xffffffff); | |
055e393f | 4576 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4577 | I915_WRITE(PIPESTAT(pipe), 0); |
4578 | I915_WRITE(IMR, 0xffffffff); | |
4579 | I915_WRITE(IER, 0x0); | |
4580 | ||
055e393f | 4581 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4582 | I915_WRITE(PIPESTAT(pipe), |
4583 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | |
4584 | I915_WRITE(IIR, I915_READ(IIR)); | |
4585 | } | |
4586 | ||
6323751d | 4587 | static void intel_hpd_irq_reenable(struct work_struct *work) |
ac4c16c5 | 4588 | { |
6323751d ID |
4589 | struct drm_i915_private *dev_priv = |
4590 | container_of(work, typeof(*dev_priv), | |
4591 | hotplug_reenable_work.work); | |
ac4c16c5 EE |
4592 | struct drm_device *dev = dev_priv->dev; |
4593 | struct drm_mode_config *mode_config = &dev->mode_config; | |
4594 | unsigned long irqflags; | |
4595 | int i; | |
4596 | ||
6323751d ID |
4597 | intel_runtime_pm_get(dev_priv); |
4598 | ||
ac4c16c5 EE |
4599 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
4600 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | |
4601 | struct drm_connector *connector; | |
4602 | ||
4603 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | |
4604 | continue; | |
4605 | ||
4606 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
4607 | ||
4608 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
4609 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
4610 | ||
4611 | if (intel_connector->encoder->hpd_pin == i) { | |
4612 | if (connector->polled != intel_connector->polled) | |
4613 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
c23cc417 | 4614 | connector->name); |
ac4c16c5 EE |
4615 | connector->polled = intel_connector->polled; |
4616 | if (!connector->polled) | |
4617 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4618 | } | |
4619 | } | |
4620 | } | |
4621 | if (dev_priv->display.hpd_irq_setup) | |
4622 | dev_priv->display.hpd_irq_setup(dev); | |
4623 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
6323751d ID |
4624 | |
4625 | intel_runtime_pm_put(dev_priv); | |
ac4c16c5 EE |
4626 | } |
4627 | ||
f71d4af4 JB |
4628 | void intel_irq_init(struct drm_device *dev) |
4629 | { | |
8b2e326d CW |
4630 | struct drm_i915_private *dev_priv = dev->dev_private; |
4631 | ||
4632 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | |
13cf5504 | 4633 | INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); |
99584db3 | 4634 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
c6a828d3 | 4635 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
a4da4fa4 | 4636 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
8b2e326d | 4637 | |
a6706b45 | 4638 | /* Let's track the enabled rps events */ |
6c65a587 VS |
4639 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) |
4640 | /* WaGsvRC0ResidencyMethod:vlv */ | |
31685c25 D |
4641 | dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; |
4642 | else | |
4643 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; | |
a6706b45 | 4644 | |
99584db3 DV |
4645 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
4646 | i915_hangcheck_elapsed, | |
61bac78e | 4647 | (unsigned long) dev); |
6323751d ID |
4648 | INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, |
4649 | intel_hpd_irq_reenable); | |
61bac78e | 4650 | |
97a19a24 | 4651 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
9ee32fea | 4652 | |
95f25bed JB |
4653 | /* Haven't installed the IRQ handler yet */ |
4654 | dev_priv->pm._irqs_disabled = true; | |
4655 | ||
4cdb83ec VS |
4656 | if (IS_GEN2(dev)) { |
4657 | dev->max_vblank_count = 0; | |
4658 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; | |
4659 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | |
f71d4af4 JB |
4660 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
4661 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | |
391f75e2 VS |
4662 | } else { |
4663 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | |
4664 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | |
f71d4af4 JB |
4665 | } |
4666 | ||
21da2700 VS |
4667 | /* |
4668 | * Opt out of the vblank disable timer on everything except gen2. | |
4669 | * Gen2 doesn't have a hardware frame counter and so depends on | |
4670 | * vblank interrupts to produce sane vblank seuquence numbers. | |
4671 | */ | |
4672 | if (!IS_GEN2(dev)) | |
4673 | dev->vblank_disable_immediate = true; | |
4674 | ||
c2baf4b7 | 4675 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
c3613de9 | 4676 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
c2baf4b7 VS |
4677 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
4678 | } | |
f71d4af4 | 4679 | |
43f328d7 VS |
4680 | if (IS_CHERRYVIEW(dev)) { |
4681 | dev->driver->irq_handler = cherryview_irq_handler; | |
4682 | dev->driver->irq_preinstall = cherryview_irq_preinstall; | |
4683 | dev->driver->irq_postinstall = cherryview_irq_postinstall; | |
4684 | dev->driver->irq_uninstall = cherryview_irq_uninstall; | |
4685 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
4686 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
4687 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | |
4688 | } else if (IS_VALLEYVIEW(dev)) { | |
7e231dbe JB |
4689 | dev->driver->irq_handler = valleyview_irq_handler; |
4690 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | |
4691 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | |
4692 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | |
4693 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
4694 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
fa00abe0 | 4695 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
abd58f01 BW |
4696 | } else if (IS_GEN8(dev)) { |
4697 | dev->driver->irq_handler = gen8_irq_handler; | |
723761b8 | 4698 | dev->driver->irq_preinstall = gen8_irq_reset; |
abd58f01 BW |
4699 | dev->driver->irq_postinstall = gen8_irq_postinstall; |
4700 | dev->driver->irq_uninstall = gen8_irq_uninstall; | |
4701 | dev->driver->enable_vblank = gen8_enable_vblank; | |
4702 | dev->driver->disable_vblank = gen8_disable_vblank; | |
4703 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | |
f71d4af4 JB |
4704 | } else if (HAS_PCH_SPLIT(dev)) { |
4705 | dev->driver->irq_handler = ironlake_irq_handler; | |
723761b8 | 4706 | dev->driver->irq_preinstall = ironlake_irq_reset; |
f71d4af4 JB |
4707 | dev->driver->irq_postinstall = ironlake_irq_postinstall; |
4708 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
4709 | dev->driver->enable_vblank = ironlake_enable_vblank; | |
4710 | dev->driver->disable_vblank = ironlake_disable_vblank; | |
82a28bcf | 4711 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 | 4712 | } else { |
c2798b19 CW |
4713 | if (INTEL_INFO(dev)->gen == 2) { |
4714 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | |
4715 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | |
4716 | dev->driver->irq_handler = i8xx_irq_handler; | |
4717 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | |
a266c7d5 CW |
4718 | } else if (INTEL_INFO(dev)->gen == 3) { |
4719 | dev->driver->irq_preinstall = i915_irq_preinstall; | |
4720 | dev->driver->irq_postinstall = i915_irq_postinstall; | |
4721 | dev->driver->irq_uninstall = i915_irq_uninstall; | |
4722 | dev->driver->irq_handler = i915_irq_handler; | |
20afbda2 | 4723 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 4724 | } else { |
a266c7d5 CW |
4725 | dev->driver->irq_preinstall = i965_irq_preinstall; |
4726 | dev->driver->irq_postinstall = i965_irq_postinstall; | |
4727 | dev->driver->irq_uninstall = i965_irq_uninstall; | |
4728 | dev->driver->irq_handler = i965_irq_handler; | |
bac56d5b | 4729 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 4730 | } |
f71d4af4 JB |
4731 | dev->driver->enable_vblank = i915_enable_vblank; |
4732 | dev->driver->disable_vblank = i915_disable_vblank; | |
4733 | } | |
4734 | } | |
20afbda2 DV |
4735 | |
4736 | void intel_hpd_init(struct drm_device *dev) | |
4737 | { | |
4738 | struct drm_i915_private *dev_priv = dev->dev_private; | |
821450c6 EE |
4739 | struct drm_mode_config *mode_config = &dev->mode_config; |
4740 | struct drm_connector *connector; | |
b5ea2d56 | 4741 | unsigned long irqflags; |
821450c6 | 4742 | int i; |
20afbda2 | 4743 | |
821450c6 EE |
4744 | for (i = 1; i < HPD_NUM_PINS; i++) { |
4745 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
4746 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
4747 | } | |
4748 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
4749 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
4750 | connector->polled = intel_connector->polled; | |
0e32b39c DA |
4751 | if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) |
4752 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4753 | if (intel_connector->mst_port) | |
821450c6 EE |
4754 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
4755 | } | |
b5ea2d56 DV |
4756 | |
4757 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
4758 | * just to make the assert_spin_locked checks happy. */ | |
4759 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
20afbda2 DV |
4760 | if (dev_priv->display.hpd_irq_setup) |
4761 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 4762 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
20afbda2 | 4763 | } |
c67a470b | 4764 | |
5d584b2e | 4765 | /* Disable interrupts so we can allow runtime PM. */ |
730488b2 | 4766 | void intel_runtime_pm_disable_interrupts(struct drm_device *dev) |
c67a470b PZ |
4767 | { |
4768 | struct drm_i915_private *dev_priv = dev->dev_private; | |
c67a470b | 4769 | |
730488b2 | 4770 | dev->driver->irq_uninstall(dev); |
9df7575f | 4771 | dev_priv->pm._irqs_disabled = true; |
c67a470b PZ |
4772 | } |
4773 | ||
5d584b2e | 4774 | /* Restore interrupts so we can recover from runtime PM. */ |
730488b2 | 4775 | void intel_runtime_pm_restore_interrupts(struct drm_device *dev) |
c67a470b PZ |
4776 | { |
4777 | struct drm_i915_private *dev_priv = dev->dev_private; | |
c67a470b | 4778 | |
9df7575f | 4779 | dev_priv->pm._irqs_disabled = false; |
730488b2 PZ |
4780 | dev->driver->irq_preinstall(dev); |
4781 | dev->driver->irq_postinstall(dev); | |
c67a470b | 4782 | } |