]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Ack interrupts before handling them (GEN5 - GEN7)
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
e5868a31
EE
40static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46};
47
48static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54};
55
56static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63};
64
704cfb87 65static const u32 hpd_status_g4x[] = {
e5868a31
EE
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72};
73
e5868a31
EE
74static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81};
82
5c502442 83/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 84#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92} while (0)
93
f86f3fb0 94#define GEN5_IRQ_RESET(type) do { \
a9d356a6 95 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 96 POSTING_READ(type##IMR); \
a9d356a6 97 I915_WRITE(type##IER, 0); \
5c502442
PZ
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
a9d356a6
PZ
102} while (0)
103
337ba017
PZ
104/*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117} while (0)
118
35079899 119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
337ba017 120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
35079899
PZ
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124} while (0)
125
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
337ba017 127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
35079899
PZ
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131} while (0)
132
036a4a7d 133/* For display hotplug interrupt */
995b6762 134static void
2d1013dd 135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 136{
4bc9d430
DV
137 assert_spin_locked(&dev_priv->irq_lock);
138
730488b2 139 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 140 return;
c67a470b 141
1ec14ad3
CW
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 145 POSTING_READ(DEIMR);
036a4a7d
ZW
146 }
147}
148
0ff9800a 149static void
2d1013dd 150ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 151{
4bc9d430
DV
152 assert_spin_locked(&dev_priv->irq_lock);
153
730488b2 154 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 155 return;
c67a470b 156
1ec14ad3
CW
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 160 POSTING_READ(DEIMR);
036a4a7d
ZW
161 }
162}
163
43eaea13
PZ
164/**
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
169 */
170static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173{
174 assert_spin_locked(&dev_priv->irq_lock);
175
730488b2 176 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 177 return;
c67a470b 178
43eaea13
PZ
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183}
184
185void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186{
187 ilk_update_gt_irq(dev_priv, mask, mask);
188}
189
190void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191{
192 ilk_update_gt_irq(dev_priv, mask, 0);
193}
194
edbfdb45
PZ
195/**
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204{
605cd25b 205 uint32_t new_val;
edbfdb45
PZ
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
730488b2 209 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 210 return;
c67a470b 211
605cd25b 212 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
605cd25b
PZ
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
f52ecbcf
PZ
219 POSTING_READ(GEN6_PMIMR);
220 }
edbfdb45
PZ
221}
222
223void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{
225 snb_update_pm_irq(dev_priv, mask, mask);
226}
227
228void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229{
230 snb_update_pm_irq(dev_priv, mask, 0);
231}
232
8664281b
PZ
233static bool ivb_can_enable_err_int(struct drm_device *dev)
234{
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
4bc9d430
DV
239 assert_spin_locked(&dev_priv->irq_lock);
240
8664281b
PZ
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249}
250
0961021a
BW
251/**
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */
259static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262{
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(dev_priv->pm.irqs_disabled))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279}
280
281void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{
283 bdw_update_pm_irq(dev_priv, mask, mask);
284}
285
286void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287{
288 bdw_update_pm_irq(dev_priv, mask, 0);
289}
290
8664281b
PZ
291static bool cpt_can_enable_serr_int(struct drm_device *dev)
292{
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
fee884ed
DV
297 assert_spin_locked(&dev_priv->irq_lock);
298
8664281b
PZ
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307}
308
56b80e1f
VS
309void i9xx_check_fifo_underruns(struct drm_device *dev)
310{
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313 unsigned long flags;
314
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
319 u32 pipestat;
320
321 if (crtc->cpu_fifo_underrun_disabled)
322 continue;
323
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
326 continue;
327
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
329 POSTING_READ(reg);
330
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 }
333
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
335}
336
e69abff0 337static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
2ae2a50c
DV
338 enum pipe pipe,
339 bool enable, bool old)
2d9d2b0b
VS
340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe);
e69abff0 343 u32 pipestat = I915_READ(reg) & 0xffff0000;
2d9d2b0b
VS
344
345 assert_spin_locked(&dev_priv->irq_lock);
346
e69abff0
VS
347 if (enable) {
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
349 POSTING_READ(reg);
350 } else {
2ae2a50c 351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
e69abff0
VS
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
353 }
2d9d2b0b
VS
354}
355
8664281b
PZ
356static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
357 enum pipe pipe, bool enable)
358{
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
362
363 if (enable)
364 ironlake_enable_display_irq(dev_priv, bit);
365 else
366 ironlake_disable_display_irq(dev_priv, bit);
367}
368
369static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
2ae2a50c
DV
370 enum pipe pipe,
371 bool enable, bool old)
8664281b
PZ
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
8664281b 374 if (enable) {
7336df65
DV
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
376
8664281b
PZ
377 if (!ivb_can_enable_err_int(dev))
378 return;
379
8664281b
PZ
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
381 } else {
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
7336df65 383
2ae2a50c
DV
384 if (old &&
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
823c6909
VS
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
387 pipe_name(pipe));
7336df65 388 }
8664281b
PZ
389 }
390}
391
38d83c96
DV
392static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
393 enum pipe pipe, bool enable)
394{
395 struct drm_i915_private *dev_priv = dev->dev_private;
396
397 assert_spin_locked(&dev_priv->irq_lock);
398
399 if (enable)
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
401 else
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
405}
406
fee884ed
DV
407/**
408 * ibx_display_interrupt_update - update SDEIMR
409 * @dev_priv: driver private
410 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable
412 */
413static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask)
416{
417 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask;
419 sdeimr |= (~enabled_irq_mask & interrupt_mask);
420
421 assert_spin_locked(&dev_priv->irq_lock);
422
730488b2 423 if (WARN_ON(dev_priv->pm.irqs_disabled))
c67a470b 424 return;
c67a470b 425
fee884ed
DV
426 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR);
428}
429#define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431#define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
433
de28075d
DV
434static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
8664281b
PZ
436 bool enable)
437{
8664281b 438 struct drm_i915_private *dev_priv = dev->dev_private;
de28075d
DV
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
8664281b
PZ
441
442 if (enable)
fee884ed 443 ibx_enable_display_interrupt(dev_priv, bit);
8664281b 444 else
fee884ed 445 ibx_disable_display_interrupt(dev_priv, bit);
8664281b
PZ
446}
447
448static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
2ae2a50c 450 bool enable, bool old)
8664281b
PZ
451{
452 struct drm_i915_private *dev_priv = dev->dev_private;
453
454 if (enable) {
1dd246fb
DV
455 I915_WRITE(SERR_INT,
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
457
8664281b
PZ
458 if (!cpt_can_enable_serr_int(dev))
459 return;
460
fee884ed 461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
8664281b 462 } else {
fee884ed 463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
1dd246fb 464
2ae2a50c
DV
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
823c6909
VS
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
1dd246fb 469 }
8664281b 470 }
8664281b
PZ
471}
472
473/**
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
475 * @dev: drm device
476 * @pipe: pipe
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
478 *
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
484 *
485 * Returns the previous state of underrun reporting.
486 */
c5ab3bc0
DV
487static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
8664281b
PZ
489{
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2ae2a50c 493 bool old;
8664281b 494
77961eb9
ID
495 assert_spin_locked(&dev_priv->irq_lock);
496
2ae2a50c 497 old = !intel_crtc->cpu_fifo_underrun_disabled;
8664281b
PZ
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499
e69abff0 500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
2ae2a50c 501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
2d9d2b0b 502 else if (IS_GEN5(dev) || IS_GEN6(dev))
8664281b
PZ
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
2ae2a50c 505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
38d83c96
DV
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
8664281b 508
2ae2a50c 509 return old;
f88d42f1
ID
510}
511
512bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
514{
515 struct drm_i915_private *dev_priv = dev->dev_private;
516 unsigned long flags;
517 bool ret;
518
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
8664281b 521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
f88d42f1 522
8664281b
PZ
523 return ret;
524}
525
91d181dd
ID
526static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
527 enum pipe pipe)
528{
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
532
533 return !intel_crtc->cpu_fifo_underrun_disabled;
534}
535
8664281b
PZ
536/**
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
538 * @dev: drm device
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
541 *
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
547 *
548 * Returns the previous state of underrun reporting.
549 */
550bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
552 bool enable)
553{
554 struct drm_i915_private *dev_priv = dev->dev_private;
de28075d
DV
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8664281b 557 unsigned long flags;
2ae2a50c 558 bool old;
8664281b 559
de28075d
DV
560 /*
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
567 */
8664281b
PZ
568
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
570
2ae2a50c 571 old = !intel_crtc->pch_fifo_underrun_disabled;
8664281b
PZ
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
573
574 if (HAS_PCH_IBX(dev))
de28075d 575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
8664281b 576 else
2ae2a50c 577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
8664281b 578
8664281b 579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
2ae2a50c 580 return old;
8664281b
PZ
581}
582
583
b5ea642a 584static void
755e9019
ID
585__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
586 u32 enable_mask, u32 status_mask)
7c463586 587{
46c06a30 588 u32 reg = PIPESTAT(pipe);
755e9019 589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 590
b79480ba
DV
591 assert_spin_locked(&dev_priv->irq_lock);
592
04feced9
VS
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
597 return;
598
599 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
600 return;
601
91d181dd
ID
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
603
46c06a30 604 /* Enable the interrupt, clear any pending status */
755e9019 605 pipestat |= enable_mask | status_mask;
46c06a30
VS
606 I915_WRITE(reg, pipestat);
607 POSTING_READ(reg);
7c463586
KP
608}
609
b5ea642a 610static void
755e9019
ID
611__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
7c463586 613{
46c06a30 614 u32 reg = PIPESTAT(pipe);
755e9019 615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 616
b79480ba
DV
617 assert_spin_locked(&dev_priv->irq_lock);
618
04feced9
VS
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
623 return;
624
755e9019
ID
625 if ((pipestat & enable_mask) == 0)
626 return;
627
91d181dd
ID
628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
629
755e9019 630 pipestat &= ~enable_mask;
46c06a30
VS
631 I915_WRITE(reg, pipestat);
632 POSTING_READ(reg);
7c463586
KP
633}
634
10c59c51
ID
635static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
636{
637 u32 enable_mask = status_mask << 16;
638
639 /*
724a6905
VS
640 * On pipe A we don't support the PSR interrupt yet,
641 * on pipe B and C the same bit MBZ.
10c59c51
ID
642 */
643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
644 return 0;
724a6905
VS
645 /*
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
648 */
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
650 return 0;
10c59c51
ID
651
652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
653 SPRITE0_FLIP_DONE_INT_EN_VLV |
654 SPRITE1_FLIP_DONE_INT_EN_VLV);
655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
659
660 return enable_mask;
661}
662
755e9019
ID
663void
664i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
665 u32 status_mask)
666{
667 u32 enable_mask;
668
10c59c51
ID
669 if (IS_VALLEYVIEW(dev_priv->dev))
670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
671 status_mask);
672 else
673 enable_mask = status_mask << 16;
755e9019
ID
674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
675}
676
677void
678i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
679 u32 status_mask)
680{
681 u32 enable_mask;
682
10c59c51
ID
683 if (IS_VALLEYVIEW(dev_priv->dev))
684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
685 status_mask);
686 else
687 enable_mask = status_mask << 16;
755e9019
ID
688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
689}
690
01c66889 691/**
f49e38dd 692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 693 */
f49e38dd 694static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 695{
2d1013dd 696 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3
CW
697 unsigned long irqflags;
698
f49e38dd
JN
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return;
701
1ec14ad3 702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
01c66889 703
755e9019 704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 705 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 706 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 707 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3
CW
708
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
01c66889
ZY
710}
711
0a3e67a4
JB
712/**
713 * i915_pipe_enabled - check if a pipe is enabled
714 * @dev: DRM device
715 * @pipe: pipe to check
716 *
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
720 */
721static int
722i915_pipe_enabled(struct drm_device *dev, int pipe)
723{
2d1013dd 724 struct drm_i915_private *dev_priv = dev->dev_private;
702e7a56 725
a01025af
DV
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
71f8ba6b 730
a01025af
DV
731 return intel_crtc->active;
732 } else {
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
734 }
0a3e67a4
JB
735}
736
f75f3746
VS
737/*
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
740 *
741 * Assumptions about the fictitious mode used in this example:
742 * vblank_start >= 3
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
746 *
747 * start of vblank:
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
751 * |
752 * | frame start:
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
755 * | |
756 * | | start of vsync:
757 * | | generate vsync interrupt
758 * | | |
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
766 * | | |
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
770 *
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
776 * vs = vertical sync
777 * vbs = vblank_start (number)
778 *
779 * Summary:
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
785 */
786
4cdb83ec
VS
787static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
788{
789 /* Gen2 doesn't have a hardware frame counter */
790 return 0;
791}
792
42f52ef8
KP
793/* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
795 */
f71d4af4 796static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4 797{
2d1013dd 798 struct drm_i915_private *dev_priv = dev->dev_private;
0a3e67a4
JB
799 unsigned long high_frame;
800 unsigned long low_frame;
0b2a8e09 801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
0a3e67a4
JB
802
803 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 805 "pipe %c\n", pipe_name(pipe));
0a3e67a4
JB
806 return 0;
807 }
808
391f75e2
VS
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
814
0b2a8e09
VS
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 820 } else {
a2d213dd 821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
391f75e2
VS
822
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
0b2a8e09 824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
391f75e2 825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
0b2a8e09
VS
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2
VS
829 }
830
0b2a8e09
VS
831 /* Convert to pixel count */
832 vbl_start *= htotal;
833
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
836
9db4a9c7
JB
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 839
0a3e67a4
JB
840 /*
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
843 * register.
844 */
845 do {
5eddb70b 846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 847 low = I915_READ(low_frame);
5eddb70b 848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
849 } while (high1 != high2);
850
5eddb70b 851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 852 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 853 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
854
855 /*
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
859 */
edc08d0a 860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
861}
862
f71d4af4 863static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5 864{
2d1013dd 865 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 866 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5
JB
867
868 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 870 "pipe %c\n", pipe_name(pipe));
9880b7a5
JB
871 return 0;
872 }
873
874 return I915_READ(reg);
875}
876
ad3543ed
MK
877/* raw reads, only for fast reads of display block, no need for forcewake etc. */
878#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
ad3543ed 879
a225f079
VS
880static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
881{
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
80715b2f 886 int position, vtotal;
a225f079 887
80715b2f 888 vtotal = mode->crtc_vtotal;
a225f079
VS
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
890 vtotal /= 2;
891
892 if (IS_GEN2(dev))
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
894 else
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
896
897 /*
80715b2f
VS
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
a225f079 900 */
80715b2f 901 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
902}
903
f71d4af4 904static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
abca9e45
VS
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
0af7e4df 907{
c2baf4b7
VS
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
3aa18df8 912 int position;
78e8fc6b 913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
914 bool in_vbl = true;
915 int ret = 0;
ad3543ed 916 unsigned long irqflags;
0af7e4df 917
c2baf4b7 918 if (!intel_crtc->active) {
0af7e4df 919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 920 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
921 return 0;
922 }
923
c2baf4b7 924 htotal = mode->crtc_htotal;
78e8fc6b 925 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
0af7e4df 929
d31faf65
VS
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
932 vbl_end /= 2;
933 vtotal /= 2;
934 }
935
c2baf4b7
VS
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
937
ad3543ed
MK
938 /*
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
942 */
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 944
ad3543ed
MK
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
946
947 /* Get optional system timestamp before query. */
948 if (stime)
949 *stime = ktime_get();
950
7c06b08a 951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
954 */
a225f079 955 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
956 } else {
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
959 * scanout position.
960 */
ad3543ed 961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 962
3aa18df8
VS
963 /* convert to pixel counts */
964 vbl_start *= htotal;
965 vbl_end *= htotal;
966 vtotal *= htotal;
78e8fc6b 967
7e78f1cb
VS
968 /*
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
976 */
977 if (position >= vtotal)
978 position = vtotal - 1;
979
78e8fc6b
VS
980 /*
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
988 */
989 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
990 }
991
ad3543ed
MK
992 /* Get optional system timestamp after query. */
993 if (etime)
994 *etime = ktime_get();
995
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
997
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
999
3aa18df8
VS
1000 in_vbl = position >= vbl_start && position < vbl_end;
1001
1002 /*
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1006 * up since vbl_end.
1007 */
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1010 else
1011 position += vtotal - vbl_end;
0af7e4df 1012
7c06b08a 1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
1014 *vpos = position;
1015 *hpos = 0;
1016 } else {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1019 }
0af7e4df 1020
0af7e4df
MK
1021 /* In vblank? */
1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_INVBL;
1024
1025 return ret;
1026}
1027
a225f079
VS
1028int intel_get_crtc_scanline(struct intel_crtc *crtc)
1029{
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1032 int position;
1033
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1037
1038 return position;
1039}
1040
f71d4af4 1041static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
1042 int *max_error,
1043 struct timeval *vblank_time,
1044 unsigned flags)
1045{
4041b853 1046 struct drm_crtc *crtc;
0af7e4df 1047
7eb552ae 1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 1049 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
1050 return -EINVAL;
1051 }
1052
1053 /* Get drm_crtc to timestamp: */
4041b853
CW
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1055 if (crtc == NULL) {
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1057 return -EINVAL;
1058 }
1059
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1062 return -EBUSY;
1063 }
0af7e4df
MK
1064
1065 /* Helper routine in DRM core does all the work: */
4041b853
CW
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1067 vblank_time, flags,
7da903ef
VS
1068 crtc,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
0af7e4df
MK
1070}
1071
67c347ff
JN
1072static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
321a1b30
EE
1074{
1075 enum drm_connector_status old_status;
1076
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1079
1080 connector->status = connector->funcs->detect(connector, false);
67c347ff
JN
1081 if (old_status == connector->status)
1082 return false;
1083
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
321a1b30 1085 connector->base.id,
c23cc417 1086 connector->name,
67c347ff
JN
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1089
1090 return true;
321a1b30
EE
1091}
1092
5ca58282
JB
1093/*
1094 * Handle hotplug events outside the interrupt handler proper.
1095 */
ac4c16c5
EE
1096#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1097
5ca58282
JB
1098static void i915_hotplug_work_func(struct work_struct *work)
1099{
2d1013dd
JN
1100 struct drm_i915_private *dev_priv =
1101 container_of(work, struct drm_i915_private, hotplug_work);
5ca58282 1102 struct drm_device *dev = dev_priv->dev;
c31c4ba3 1103 struct drm_mode_config *mode_config = &dev->mode_config;
cd569aed
EE
1104 struct intel_connector *intel_connector;
1105 struct intel_encoder *intel_encoder;
1106 struct drm_connector *connector;
1107 unsigned long irqflags;
1108 bool hpd_disabled = false;
321a1b30 1109 bool changed = false;
142e2398 1110 u32 hpd_event_bits;
4ef69c7a 1111
52d7eced
DV
1112 /* HPD irq before everything is fully set up. */
1113 if (!dev_priv->enable_hotplug_processing)
1114 return;
1115
a65e34c7 1116 mutex_lock(&mode_config->mutex);
e67189ab
JB
1117 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1118
cd569aed 1119 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
142e2398
EE
1120
1121 hpd_event_bits = dev_priv->hpd_event_bits;
1122 dev_priv->hpd_event_bits = 0;
cd569aed
EE
1123 list_for_each_entry(connector, &mode_config->connector_list, head) {
1124 intel_connector = to_intel_connector(connector);
1125 intel_encoder = intel_connector->encoder;
1126 if (intel_encoder->hpd_pin > HPD_NONE &&
1127 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1128 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1129 DRM_INFO("HPD interrupt storm detected on connector %s: "
1130 "switching from hotplug detection to polling\n",
c23cc417 1131 connector->name);
cd569aed
EE
1132 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1133 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1134 | DRM_CONNECTOR_POLL_DISCONNECT;
1135 hpd_disabled = true;
1136 }
142e2398
EE
1137 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1138 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
c23cc417 1139 connector->name, intel_encoder->hpd_pin);
142e2398 1140 }
cd569aed
EE
1141 }
1142 /* if there were no outputs to poll, poll was disabled,
1143 * therefore make sure it's enabled when disabling HPD on
1144 * some connectors */
ac4c16c5 1145 if (hpd_disabled) {
cd569aed 1146 drm_kms_helper_poll_enable(dev);
ac4c16c5
EE
1147 mod_timer(&dev_priv->hotplug_reenable_timer,
1148 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1149 }
cd569aed
EE
1150
1151 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1152
321a1b30
EE
1153 list_for_each_entry(connector, &mode_config->connector_list, head) {
1154 intel_connector = to_intel_connector(connector);
1155 intel_encoder = intel_connector->encoder;
1156 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1157 if (intel_encoder->hot_plug)
1158 intel_encoder->hot_plug(intel_encoder);
1159 if (intel_hpd_irq_event(dev, connector))
1160 changed = true;
1161 }
1162 }
40ee3381
KP
1163 mutex_unlock(&mode_config->mutex);
1164
321a1b30
EE
1165 if (changed)
1166 drm_kms_helper_hotplug_event(dev);
5ca58282
JB
1167}
1168
3ca1cced
VS
1169static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1170{
1171 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1172}
1173
d0ecd7e2 1174static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 1175{
2d1013dd 1176 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 1177 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 1178 u8 new_delay;
9270388e 1179
d0ecd7e2 1180 spin_lock(&mchdev_lock);
f97108d1 1181
73edd18f
DV
1182 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1183
20e4d407 1184 new_delay = dev_priv->ips.cur_delay;
9270388e 1185
7648fa99 1186 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
1187 busy_up = I915_READ(RCPREVBSYTUPAVG);
1188 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
1189 max_avg = I915_READ(RCBMAXAVG);
1190 min_avg = I915_READ(RCBMINAVG);
1191
1192 /* Handle RCS change request from hw */
b5b72e89 1193 if (busy_up > max_avg) {
20e4d407
DV
1194 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1195 new_delay = dev_priv->ips.cur_delay - 1;
1196 if (new_delay < dev_priv->ips.max_delay)
1197 new_delay = dev_priv->ips.max_delay;
b5b72e89 1198 } else if (busy_down < min_avg) {
20e4d407
DV
1199 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1200 new_delay = dev_priv->ips.cur_delay + 1;
1201 if (new_delay > dev_priv->ips.min_delay)
1202 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
1203 }
1204
7648fa99 1205 if (ironlake_set_drps(dev, new_delay))
20e4d407 1206 dev_priv->ips.cur_delay = new_delay;
f97108d1 1207
d0ecd7e2 1208 spin_unlock(&mchdev_lock);
9270388e 1209
f97108d1
JB
1210 return;
1211}
1212
549f7365 1213static void notify_ring(struct drm_device *dev,
a4872ba6 1214 struct intel_engine_cs *ring)
549f7365 1215{
93b0a4e0 1216 if (!intel_ring_initialized(ring))
475553de
CW
1217 return;
1218
814e9b57 1219 trace_i915_gem_request_complete(ring);
9862e600 1220
84c33a64
SG
1221 if (drm_core_check_feature(dev, DRIVER_MODESET))
1222 intel_notify_mmio_flip(ring);
1223
549f7365 1224 wake_up_all(&ring->irq_queue);
10cd45b6 1225 i915_queue_hangcheck(dev);
549f7365
CW
1226}
1227
4912d041 1228static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 1229{
2d1013dd
JN
1230 struct drm_i915_private *dev_priv =
1231 container_of(work, struct drm_i915_private, rps.work);
edbfdb45 1232 u32 pm_iir;
dd75fdc8 1233 int new_delay, adj;
4912d041 1234
59cdb63d 1235 spin_lock_irq(&dev_priv->irq_lock);
c6a828d3
DV
1236 pm_iir = dev_priv->rps.pm_iir;
1237 dev_priv->rps.pm_iir = 0;
0961021a
BW
1238 if (IS_BROADWELL(dev_priv->dev))
1239 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1240 else {
1241 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1242 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1243 }
59cdb63d 1244 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 1245
60611c13 1246 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 1247 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 1248
a6706b45 1249 if ((pm_iir & dev_priv->pm_rps_events) == 0)
3b8d8d91
JB
1250 return;
1251
4fc688ce 1252 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1253
dd75fdc8 1254 adj = dev_priv->rps.last_adj;
7425034a 1255 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1256 if (adj > 0)
1257 adj *= 2;
13a5660c
D
1258 else {
1259 /* CHV needs even encode values */
1260 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1261 }
b39fb297 1262 new_delay = dev_priv->rps.cur_freq + adj;
7425034a
VS
1263
1264 /*
1265 * For better performance, jump directly
1266 * to RPe if we're below it.
1267 */
b39fb297
BW
1268 if (new_delay < dev_priv->rps.efficient_freq)
1269 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1270 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1271 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1272 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1273 else
b39fb297 1274 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1275 adj = 0;
1276 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1277 if (adj < 0)
1278 adj *= 2;
13a5660c
D
1279 else {
1280 /* CHV needs even encode values */
1281 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1282 }
b39fb297 1283 new_delay = dev_priv->rps.cur_freq + adj;
dd75fdc8 1284 } else { /* unknown event */
b39fb297 1285 new_delay = dev_priv->rps.cur_freq;
dd75fdc8 1286 }
3b8d8d91 1287
79249636
BW
1288 /* sysfs frequency interfaces may have snuck in while servicing the
1289 * interrupt
1290 */
1272e7b8 1291 new_delay = clamp_t(int, new_delay,
b39fb297
BW
1292 dev_priv->rps.min_freq_softlimit,
1293 dev_priv->rps.max_freq_softlimit);
27544369 1294
b39fb297 1295 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
dd75fdc8
CW
1296
1297 if (IS_VALLEYVIEW(dev_priv->dev))
1298 valleyview_set_rps(dev_priv->dev, new_delay);
1299 else
1300 gen6_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1301
4fc688ce 1302 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1303}
1304
e3689190
BW
1305
1306/**
1307 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1308 * occurred.
1309 * @work: workqueue struct
1310 *
1311 * Doesn't actually do anything except notify userspace. As a consequence of
1312 * this event, userspace should try to remap the bad rows since statistically
1313 * it is likely the same row is more likely to go bad again.
1314 */
1315static void ivybridge_parity_work(struct work_struct *work)
1316{
2d1013dd
JN
1317 struct drm_i915_private *dev_priv =
1318 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1319 u32 error_status, row, bank, subbank;
35a85ac6 1320 char *parity_event[6];
e3689190
BW
1321 uint32_t misccpctl;
1322 unsigned long flags;
35a85ac6 1323 uint8_t slice = 0;
e3689190
BW
1324
1325 /* We must turn off DOP level clock gating to access the L3 registers.
1326 * In order to prevent a get/put style interface, acquire struct mutex
1327 * any time we access those registers.
1328 */
1329 mutex_lock(&dev_priv->dev->struct_mutex);
1330
35a85ac6
BW
1331 /* If we've screwed up tracking, just let the interrupt fire again */
1332 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1333 goto out;
1334
e3689190
BW
1335 misccpctl = I915_READ(GEN7_MISCCPCTL);
1336 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1337 POSTING_READ(GEN7_MISCCPCTL);
1338
35a85ac6
BW
1339 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1340 u32 reg;
e3689190 1341
35a85ac6
BW
1342 slice--;
1343 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1344 break;
e3689190 1345
35a85ac6 1346 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1347
35a85ac6 1348 reg = GEN7_L3CDERRST1 + (slice * 0x200);
e3689190 1349
35a85ac6
BW
1350 error_status = I915_READ(reg);
1351 row = GEN7_PARITY_ERROR_ROW(error_status);
1352 bank = GEN7_PARITY_ERROR_BANK(error_status);
1353 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1354
1355 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1356 POSTING_READ(reg);
1357
1358 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1359 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1360 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1361 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1362 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1363 parity_event[5] = NULL;
1364
5bdebb18 1365 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1366 KOBJ_CHANGE, parity_event);
e3689190 1367
35a85ac6
BW
1368 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1369 slice, row, bank, subbank);
e3689190 1370
35a85ac6
BW
1371 kfree(parity_event[4]);
1372 kfree(parity_event[3]);
1373 kfree(parity_event[2]);
1374 kfree(parity_event[1]);
1375 }
e3689190 1376
35a85ac6 1377 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1378
35a85ac6
BW
1379out:
1380 WARN_ON(dev_priv->l3_parity.which_slice);
1381 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1382 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1383 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1384
1385 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1386}
1387
35a85ac6 1388static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1389{
2d1013dd 1390 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1391
040d2baa 1392 if (!HAS_L3_DPF(dev))
e3689190
BW
1393 return;
1394
d0ecd7e2 1395 spin_lock(&dev_priv->irq_lock);
35a85ac6 1396 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1397 spin_unlock(&dev_priv->irq_lock);
e3689190 1398
35a85ac6
BW
1399 iir &= GT_PARITY_ERROR(dev);
1400 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1401 dev_priv->l3_parity.which_slice |= 1 << 1;
1402
1403 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1404 dev_priv->l3_parity.which_slice |= 1 << 0;
1405
a4da4fa4 1406 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1407}
1408
f1af8fc1
PZ
1409static void ilk_gt_irq_handler(struct drm_device *dev,
1410 struct drm_i915_private *dev_priv,
1411 u32 gt_iir)
1412{
1413 if (gt_iir &
1414 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1415 notify_ring(dev, &dev_priv->ring[RCS]);
1416 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1417 notify_ring(dev, &dev_priv->ring[VCS]);
1418}
1419
e7b4c6b1
DV
1420static void snb_gt_irq_handler(struct drm_device *dev,
1421 struct drm_i915_private *dev_priv,
1422 u32 gt_iir)
1423{
1424
cc609d5d
BW
1425 if (gt_iir &
1426 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
e7b4c6b1 1427 notify_ring(dev, &dev_priv->ring[RCS]);
cc609d5d 1428 if (gt_iir & GT_BSD_USER_INTERRUPT)
e7b4c6b1 1429 notify_ring(dev, &dev_priv->ring[VCS]);
cc609d5d 1430 if (gt_iir & GT_BLT_USER_INTERRUPT)
e7b4c6b1
DV
1431 notify_ring(dev, &dev_priv->ring[BCS]);
1432
cc609d5d
BW
1433 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1434 GT_BSD_CS_ERROR_INTERRUPT |
1435 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
58174462
MK
1436 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1437 gt_iir);
e7b4c6b1 1438 }
e3689190 1439
35a85ac6
BW
1440 if (gt_iir & GT_PARITY_ERROR(dev))
1441 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1442}
1443
0961021a
BW
1444static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1445{
1446 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1447 return;
1448
1449 spin_lock(&dev_priv->irq_lock);
1450 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1451 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1452 spin_unlock(&dev_priv->irq_lock);
1453
1454 queue_work(dev_priv->wq, &dev_priv->rps.work);
1455}
1456
abd58f01
BW
1457static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1458 struct drm_i915_private *dev_priv,
1459 u32 master_ctl)
1460{
1461 u32 rcs, bcs, vcs;
1462 uint32_t tmp = 0;
1463 irqreturn_t ret = IRQ_NONE;
1464
1465 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1466 tmp = I915_READ(GEN8_GT_IIR(0));
1467 if (tmp) {
1468 ret = IRQ_HANDLED;
1469 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1470 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1471 if (rcs & GT_RENDER_USER_INTERRUPT)
1472 notify_ring(dev, &dev_priv->ring[RCS]);
1473 if (bcs & GT_RENDER_USER_INTERRUPT)
1474 notify_ring(dev, &dev_priv->ring[BCS]);
1475 I915_WRITE(GEN8_GT_IIR(0), tmp);
1476 } else
1477 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1478 }
1479
85f9b5f9 1480 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
abd58f01
BW
1481 tmp = I915_READ(GEN8_GT_IIR(1));
1482 if (tmp) {
1483 ret = IRQ_HANDLED;
1484 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1485 if (vcs & GT_RENDER_USER_INTERRUPT)
1486 notify_ring(dev, &dev_priv->ring[VCS]);
85f9b5f9
ZY
1487 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1488 if (vcs & GT_RENDER_USER_INTERRUPT)
1489 notify_ring(dev, &dev_priv->ring[VCS2]);
abd58f01
BW
1490 I915_WRITE(GEN8_GT_IIR(1), tmp);
1491 } else
1492 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1493 }
1494
0961021a
BW
1495 if (master_ctl & GEN8_GT_PM_IRQ) {
1496 tmp = I915_READ(GEN8_GT_IIR(2));
1497 if (tmp & dev_priv->pm_rps_events) {
1498 ret = IRQ_HANDLED;
1499 gen8_rps_irq_handler(dev_priv, tmp);
1500 I915_WRITE(GEN8_GT_IIR(2),
1501 tmp & dev_priv->pm_rps_events);
1502 } else
1503 DRM_ERROR("The master control interrupt lied (PM)!\n");
1504 }
1505
abd58f01
BW
1506 if (master_ctl & GEN8_GT_VECS_IRQ) {
1507 tmp = I915_READ(GEN8_GT_IIR(3));
1508 if (tmp) {
1509 ret = IRQ_HANDLED;
1510 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1511 if (vcs & GT_RENDER_USER_INTERRUPT)
1512 notify_ring(dev, &dev_priv->ring[VECS]);
1513 I915_WRITE(GEN8_GT_IIR(3), tmp);
1514 } else
1515 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1516 }
1517
1518 return ret;
1519}
1520
b543fb04
EE
1521#define HPD_STORM_DETECT_PERIOD 1000
1522#define HPD_STORM_THRESHOLD 5
1523
10a504de 1524static inline void intel_hpd_irq_handler(struct drm_device *dev,
22062dba
DV
1525 u32 hotplug_trigger,
1526 const u32 *hpd)
b543fb04 1527{
2d1013dd 1528 struct drm_i915_private *dev_priv = dev->dev_private;
b543fb04 1529 int i;
10a504de 1530 bool storm_detected = false;
b543fb04 1531
91d131d2
DV
1532 if (!hotplug_trigger)
1533 return;
1534
cc9bd499
ID
1535 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1536 hotplug_trigger);
1537
b5ea2d56 1538 spin_lock(&dev_priv->irq_lock);
b543fb04 1539 for (i = 1; i < HPD_NUM_PINS; i++) {
821450c6 1540
3ff04a16
DV
1541 if (hpd[i] & hotplug_trigger &&
1542 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1543 /*
1544 * On GMCH platforms the interrupt mask bits only
1545 * prevent irq generation, not the setting of the
1546 * hotplug bits itself. So only WARN about unexpected
1547 * interrupts on saner platforms.
1548 */
1549 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1550 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1551 hotplug_trigger, i, hpd[i]);
1552
1553 continue;
1554 }
b8f102e8 1555
b543fb04
EE
1556 if (!(hpd[i] & hotplug_trigger) ||
1557 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1558 continue;
1559
bc5ead8c 1560 dev_priv->hpd_event_bits |= (1 << i);
b543fb04
EE
1561 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1562 dev_priv->hpd_stats[i].hpd_last_jiffies
1563 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1564 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1565 dev_priv->hpd_stats[i].hpd_cnt = 0;
b8f102e8 1566 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
b543fb04
EE
1567 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1568 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
142e2398 1569 dev_priv->hpd_event_bits &= ~(1 << i);
b543fb04 1570 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
10a504de 1571 storm_detected = true;
b543fb04
EE
1572 } else {
1573 dev_priv->hpd_stats[i].hpd_cnt++;
b8f102e8
EE
1574 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1575 dev_priv->hpd_stats[i].hpd_cnt);
b543fb04
EE
1576 }
1577 }
1578
10a504de
DV
1579 if (storm_detected)
1580 dev_priv->display.hpd_irq_setup(dev);
b5ea2d56 1581 spin_unlock(&dev_priv->irq_lock);
5876fa0d 1582
645416f5
DV
1583 /*
1584 * Our hotplug handler can grab modeset locks (by calling down into the
1585 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1586 * queue for otherwise the flush_work in the pageflip code will
1587 * deadlock.
1588 */
1589 schedule_work(&dev_priv->hotplug_work);
b543fb04
EE
1590}
1591
515ac2bb
DV
1592static void gmbus_irq_handler(struct drm_device *dev)
1593{
2d1013dd 1594 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1595
28c70f16 1596 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1597}
1598
ce99c256
DV
1599static void dp_aux_irq_handler(struct drm_device *dev)
1600{
2d1013dd 1601 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1602
9ee32fea 1603 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1604}
1605
8bf1e9f1 1606#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1607static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1608 uint32_t crc0, uint32_t crc1,
1609 uint32_t crc2, uint32_t crc3,
1610 uint32_t crc4)
8bf1e9f1
SH
1611{
1612 struct drm_i915_private *dev_priv = dev->dev_private;
1613 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1614 struct intel_pipe_crc_entry *entry;
ac2300d4 1615 int head, tail;
b2c88f5b 1616
d538bbdf
DL
1617 spin_lock(&pipe_crc->lock);
1618
0c912c79 1619 if (!pipe_crc->entries) {
d538bbdf 1620 spin_unlock(&pipe_crc->lock);
0c912c79
DL
1621 DRM_ERROR("spurious interrupt\n");
1622 return;
1623 }
1624
d538bbdf
DL
1625 head = pipe_crc->head;
1626 tail = pipe_crc->tail;
b2c88f5b
DL
1627
1628 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1629 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1630 DRM_ERROR("CRC buffer overflowing\n");
1631 return;
1632 }
1633
1634 entry = &pipe_crc->entries[head];
8bf1e9f1 1635
8bc5e955 1636 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1637 entry->crc[0] = crc0;
1638 entry->crc[1] = crc1;
1639 entry->crc[2] = crc2;
1640 entry->crc[3] = crc3;
1641 entry->crc[4] = crc4;
b2c88f5b
DL
1642
1643 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1644 pipe_crc->head = head;
1645
1646 spin_unlock(&pipe_crc->lock);
07144428
DL
1647
1648 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1649}
277de95e
DV
1650#else
1651static inline void
1652display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1653 uint32_t crc0, uint32_t crc1,
1654 uint32_t crc2, uint32_t crc3,
1655 uint32_t crc4) {}
1656#endif
1657
eba94eb9 1658
277de95e 1659static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1660{
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662
277de95e
DV
1663 display_pipe_crc_irq_handler(dev, pipe,
1664 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1665 0, 0, 0, 0);
5a69b89f
DV
1666}
1667
277de95e 1668static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1669{
1670 struct drm_i915_private *dev_priv = dev->dev_private;
1671
277de95e
DV
1672 display_pipe_crc_irq_handler(dev, pipe,
1673 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1674 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1675 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1676 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1677 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1678}
5b3a856b 1679
277de95e 1680static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1681{
1682 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1683 uint32_t res1, res2;
1684
1685 if (INTEL_INFO(dev)->gen >= 3)
1686 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1687 else
1688 res1 = 0;
1689
1690 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1691 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1692 else
1693 res2 = 0;
5b3a856b 1694
277de95e
DV
1695 display_pipe_crc_irq_handler(dev, pipe,
1696 I915_READ(PIPE_CRC_RES_RED(pipe)),
1697 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1698 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1699 res1, res2);
5b3a856b 1700}
8bf1e9f1 1701
1403c0d4
PZ
1702/* The RPS events need forcewake, so we add them to a work queue and mask their
1703 * IMR bits until the work is done. Other interrupts can be processed without
1704 * the work queue. */
1705static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1706{
a6706b45 1707 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1708 spin_lock(&dev_priv->irq_lock);
a6706b45
D
1709 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1710 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
59cdb63d 1711 spin_unlock(&dev_priv->irq_lock);
2adbee62
DV
1712
1713 queue_work(dev_priv->wq, &dev_priv->rps.work);
baf02a1f 1714 }
baf02a1f 1715
1403c0d4
PZ
1716 if (HAS_VEBOX(dev_priv->dev)) {
1717 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1718 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
12638c57 1719
1403c0d4 1720 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
58174462
MK
1721 i915_handle_error(dev_priv->dev, false,
1722 "VEBOX CS error interrupt 0x%08x",
1723 pm_iir);
1403c0d4 1724 }
12638c57 1725 }
baf02a1f
BW
1726}
1727
8d7849db
VS
1728static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1729{
1730 struct intel_crtc *crtc;
1731
1732 if (!drm_handle_vblank(dev, pipe))
1733 return false;
1734
1735 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1736 wake_up(&crtc->vbl_wait);
1737
1738 return true;
1739}
1740
c1874ed7
ID
1741static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1742{
1743 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1744 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1745 int pipe;
1746
58ead0d7 1747 spin_lock(&dev_priv->irq_lock);
c1874ed7 1748 for_each_pipe(pipe) {
91d181dd 1749 int reg;
bbb5eebf 1750 u32 mask, iir_bit = 0;
91d181dd 1751
bbb5eebf
DV
1752 /*
1753 * PIPESTAT bits get signalled even when the interrupt is
1754 * disabled with the mask bits, and some of the status bits do
1755 * not generate interrupts at all (like the underrun bit). Hence
1756 * we need to be careful that we only handle what we want to
1757 * handle.
1758 */
1759 mask = 0;
1760 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
1761 mask |= PIPE_FIFO_UNDERRUN_STATUS;
1762
1763 switch (pipe) {
1764 case PIPE_A:
1765 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1766 break;
1767 case PIPE_B:
1768 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1769 break;
3278f67f
VS
1770 case PIPE_C:
1771 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1772 break;
bbb5eebf
DV
1773 }
1774 if (iir & iir_bit)
1775 mask |= dev_priv->pipestat_irq_mask[pipe];
1776
1777 if (!mask)
91d181dd
ID
1778 continue;
1779
1780 reg = PIPESTAT(pipe);
bbb5eebf
DV
1781 mask |= PIPESTAT_INT_ENABLE_MASK;
1782 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1783
1784 /*
1785 * Clear the PIPE*STAT regs before the IIR
1786 */
91d181dd
ID
1787 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1788 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1789 I915_WRITE(reg, pipe_stats[pipe]);
1790 }
58ead0d7 1791 spin_unlock(&dev_priv->irq_lock);
c1874ed7
ID
1792
1793 for_each_pipe(pipe) {
1794 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
8d7849db 1795 intel_pipe_handle_vblank(dev, pipe);
c1874ed7 1796
579a9b0e 1797 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1798 intel_prepare_page_flip(dev, pipe);
1799 intel_finish_page_flip(dev, pipe);
1800 }
1801
1802 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1803 i9xx_pipe_crc_irq_handler(dev, pipe);
1804
1805 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1806 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1807 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1808 }
1809
1810 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1811 gmbus_irq_handler(dev);
1812}
1813
16c6c56b
VS
1814static void i9xx_hpd_irq_handler(struct drm_device *dev)
1815{
1816 struct drm_i915_private *dev_priv = dev->dev_private;
1817 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1818
1819 if (IS_G4X(dev)) {
1820 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1821
1822 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
1823 } else {
1824 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1825
1826 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1827 }
1828
1829 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1830 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1831 dp_aux_irq_handler(dev);
1832
1833 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1834 /*
1835 * Make sure hotplug status is cleared before we clear IIR, or else we
1836 * may miss hotplug events.
1837 */
1838 POSTING_READ(PORT_HOTPLUG_STAT);
1839}
1840
ff1f525e 1841static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1842{
45a83f84 1843 struct drm_device *dev = arg;
2d1013dd 1844 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1845 u32 iir, gt_iir, pm_iir;
1846 irqreturn_t ret = IRQ_NONE;
7e231dbe 1847
7e231dbe
JB
1848 while (true) {
1849 iir = I915_READ(VLV_IIR);
1850 gt_iir = I915_READ(GTIIR);
1851 pm_iir = I915_READ(GEN6_PMIIR);
1852
1853 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1854 goto out;
1855
1856 ret = IRQ_HANDLED;
1857
e7b4c6b1 1858 snb_gt_irq_handler(dev, dev_priv, gt_iir);
7e231dbe 1859
c1874ed7 1860 valleyview_pipestat_irq_handler(dev, iir);
31acc7f5 1861
7e231dbe 1862 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
1863 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1864 i9xx_hpd_irq_handler(dev);
7e231dbe 1865
60611c13 1866 if (pm_iir)
d0ecd7e2 1867 gen6_rps_irq_handler(dev_priv, pm_iir);
7e231dbe
JB
1868
1869 I915_WRITE(GTIIR, gt_iir);
1870 I915_WRITE(GEN6_PMIIR, pm_iir);
1871 I915_WRITE(VLV_IIR, iir);
1872 }
1873
1874out:
1875 return ret;
1876}
1877
43f328d7
VS
1878static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1879{
45a83f84 1880 struct drm_device *dev = arg;
43f328d7
VS
1881 struct drm_i915_private *dev_priv = dev->dev_private;
1882 u32 master_ctl, iir;
1883 irqreturn_t ret = IRQ_NONE;
43f328d7 1884
8e5fd599
VS
1885 for (;;) {
1886 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1887 iir = I915_READ(VLV_IIR);
43f328d7 1888
8e5fd599
VS
1889 if (master_ctl == 0 && iir == 0)
1890 break;
43f328d7 1891
8e5fd599 1892 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1893
8e5fd599 1894 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
43f328d7 1895
8e5fd599 1896 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1897
8e5fd599 1898 /* Consume port. Then clear IIR or we'll miss events */
3278f67f 1899 i9xx_hpd_irq_handler(dev);
43f328d7 1900
8e5fd599 1901 I915_WRITE(VLV_IIR, iir);
43f328d7 1902
8e5fd599
VS
1903 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1904 POSTING_READ(GEN8_MASTER_IRQ);
43f328d7 1905
8e5fd599
VS
1906 ret = IRQ_HANDLED;
1907 }
3278f67f 1908
43f328d7
VS
1909 return ret;
1910}
1911
23e81d69 1912static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1913{
2d1013dd 1914 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1915 int pipe;
b543fb04 1916 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
776ad806 1917
91d131d2
DV
1918 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1919
cfc33bf7
VS
1920 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1921 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1922 SDE_AUDIO_POWER_SHIFT);
776ad806 1923 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1924 port_name(port));
1925 }
776ad806 1926
ce99c256
DV
1927 if (pch_iir & SDE_AUX_MASK)
1928 dp_aux_irq_handler(dev);
1929
776ad806 1930 if (pch_iir & SDE_GMBUS)
515ac2bb 1931 gmbus_irq_handler(dev);
776ad806
JB
1932
1933 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1934 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1935
1936 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1937 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1938
1939 if (pch_iir & SDE_POISON)
1940 DRM_ERROR("PCH poison interrupt\n");
1941
9db4a9c7
JB
1942 if (pch_iir & SDE_FDI_MASK)
1943 for_each_pipe(pipe)
1944 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1945 pipe_name(pipe),
1946 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1947
1948 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1949 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1950
1951 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1952 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1953
776ad806 1954 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
8664281b
PZ
1955 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1956 false))
fc2c807b 1957 DRM_ERROR("PCH transcoder A FIFO underrun\n");
8664281b
PZ
1958
1959 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1960 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1961 false))
fc2c807b 1962 DRM_ERROR("PCH transcoder B FIFO underrun\n");
8664281b
PZ
1963}
1964
1965static void ivb_err_int_handler(struct drm_device *dev)
1966{
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1969 enum pipe pipe;
8664281b 1970
de032bf4
PZ
1971 if (err_int & ERR_INT_POISON)
1972 DRM_ERROR("Poison interrupt\n");
1973
5a69b89f
DV
1974 for_each_pipe(pipe) {
1975 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1976 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1977 false))
fc2c807b
VS
1978 DRM_ERROR("Pipe %c FIFO underrun\n",
1979 pipe_name(pipe));
5a69b89f 1980 }
8bf1e9f1 1981
5a69b89f
DV
1982 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1983 if (IS_IVYBRIDGE(dev))
277de95e 1984 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1985 else
277de95e 1986 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1987 }
1988 }
8bf1e9f1 1989
8664281b
PZ
1990 I915_WRITE(GEN7_ERR_INT, err_int);
1991}
1992
1993static void cpt_serr_int_handler(struct drm_device *dev)
1994{
1995 struct drm_i915_private *dev_priv = dev->dev_private;
1996 u32 serr_int = I915_READ(SERR_INT);
1997
de032bf4
PZ
1998 if (serr_int & SERR_INT_POISON)
1999 DRM_ERROR("PCH poison interrupt\n");
2000
8664281b
PZ
2001 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2002 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2003 false))
fc2c807b 2004 DRM_ERROR("PCH transcoder A FIFO underrun\n");
8664281b
PZ
2005
2006 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2007 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2008 false))
fc2c807b 2009 DRM_ERROR("PCH transcoder B FIFO underrun\n");
8664281b
PZ
2010
2011 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2012 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2013 false))
fc2c807b 2014 DRM_ERROR("PCH transcoder C FIFO underrun\n");
8664281b
PZ
2015
2016 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
2017}
2018
23e81d69
AJ
2019static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2020{
2d1013dd 2021 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 2022 int pipe;
b543fb04 2023 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
23e81d69 2024
91d131d2
DV
2025 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
2026
cfc33bf7
VS
2027 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2028 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2029 SDE_AUDIO_POWER_SHIFT_CPT);
2030 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2031 port_name(port));
2032 }
23e81d69
AJ
2033
2034 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 2035 dp_aux_irq_handler(dev);
23e81d69
AJ
2036
2037 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 2038 gmbus_irq_handler(dev);
23e81d69
AJ
2039
2040 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2041 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2042
2043 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2044 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2045
2046 if (pch_iir & SDE_FDI_MASK_CPT)
2047 for_each_pipe(pipe)
2048 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2049 pipe_name(pipe),
2050 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
2051
2052 if (pch_iir & SDE_ERROR_CPT)
2053 cpt_serr_int_handler(dev);
23e81d69
AJ
2054}
2055
c008bc6e
PZ
2056static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2057{
2058 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 2059 enum pipe pipe;
c008bc6e
PZ
2060
2061 if (de_iir & DE_AUX_CHANNEL_A)
2062 dp_aux_irq_handler(dev);
2063
2064 if (de_iir & DE_GSE)
2065 intel_opregion_asle_intr(dev);
2066
c008bc6e
PZ
2067 if (de_iir & DE_POISON)
2068 DRM_ERROR("Poison interrupt\n");
2069
40da17c2
DV
2070 for_each_pipe(pipe) {
2071 if (de_iir & DE_PIPE_VBLANK(pipe))
8d7849db 2072 intel_pipe_handle_vblank(dev, pipe);
5b3a856b 2073
40da17c2
DV
2074 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2075 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b
VS
2076 DRM_ERROR("Pipe %c FIFO underrun\n",
2077 pipe_name(pipe));
5b3a856b 2078
40da17c2
DV
2079 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2080 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 2081
40da17c2
DV
2082 /* plane/pipes map 1:1 on ilk+ */
2083 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2084 intel_prepare_page_flip(dev, pipe);
2085 intel_finish_page_flip_plane(dev, pipe);
2086 }
c008bc6e
PZ
2087 }
2088
2089 /* check event from PCH */
2090 if (de_iir & DE_PCH_EVENT) {
2091 u32 pch_iir = I915_READ(SDEIIR);
2092
2093 if (HAS_PCH_CPT(dev))
2094 cpt_irq_handler(dev, pch_iir);
2095 else
2096 ibx_irq_handler(dev, pch_iir);
2097
2098 /* should clear PCH hotplug event before clear CPU irq */
2099 I915_WRITE(SDEIIR, pch_iir);
2100 }
2101
2102 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2103 ironlake_rps_change_irq_handler(dev);
2104}
2105
9719fb98
PZ
2106static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2107{
2108 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 2109 enum pipe pipe;
9719fb98
PZ
2110
2111 if (de_iir & DE_ERR_INT_IVB)
2112 ivb_err_int_handler(dev);
2113
2114 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2115 dp_aux_irq_handler(dev);
2116
2117 if (de_iir & DE_GSE_IVB)
2118 intel_opregion_asle_intr(dev);
2119
07d27e20
DL
2120 for_each_pipe(pipe) {
2121 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
8d7849db 2122 intel_pipe_handle_vblank(dev, pipe);
40da17c2
DV
2123
2124 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
2125 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2126 intel_prepare_page_flip(dev, pipe);
2127 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
2128 }
2129 }
2130
2131 /* check event from PCH */
2132 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2133 u32 pch_iir = I915_READ(SDEIIR);
2134
2135 cpt_irq_handler(dev, pch_iir);
2136
2137 /* clear PCH hotplug event before clear CPU irq */
2138 I915_WRITE(SDEIIR, pch_iir);
2139 }
2140}
2141
72c90f62
OM
2142/*
2143 * To handle irqs with the minimum potential races with fresh interrupts, we:
2144 * 1 - Disable Master Interrupt Control.
2145 * 2 - Find the source(s) of the interrupt.
2146 * 3 - Clear the Interrupt Identity bits (IIR).
2147 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2148 * 5 - Re-enable Master Interrupt Control.
2149 */
f1af8fc1 2150static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2151{
45a83f84 2152 struct drm_device *dev = arg;
2d1013dd 2153 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 2154 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2155 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2156
8664281b
PZ
2157 /* We get interrupts on unclaimed registers, so check for this before we
2158 * do any I915_{READ,WRITE}. */
907b28c5 2159 intel_uncore_check_errors(dev);
8664281b 2160
b1f14ad0
JB
2161 /* disable master interrupt before clearing iir */
2162 de_ier = I915_READ(DEIER);
2163 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2164 POSTING_READ(DEIER);
b1f14ad0 2165
44498aea
PZ
2166 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2167 * interrupts will will be stored on its back queue, and then we'll be
2168 * able to process them after we restore SDEIER (as soon as we restore
2169 * it, we'll get an interrupt if SDEIIR still has something to process
2170 * due to its back queue). */
ab5c608b
BW
2171 if (!HAS_PCH_NOP(dev)) {
2172 sde_ier = I915_READ(SDEIER);
2173 I915_WRITE(SDEIER, 0);
2174 POSTING_READ(SDEIER);
2175 }
44498aea 2176
72c90f62
OM
2177 /* Find, clear, then process each source of interrupt */
2178
b1f14ad0 2179 gt_iir = I915_READ(GTIIR);
0e43406b 2180 if (gt_iir) {
72c90f62
OM
2181 I915_WRITE(GTIIR, gt_iir);
2182 ret = IRQ_HANDLED;
d8fc8a47 2183 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 2184 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
2185 else
2186 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
b1f14ad0
JB
2187 }
2188
0e43406b
CW
2189 de_iir = I915_READ(DEIIR);
2190 if (de_iir) {
72c90f62
OM
2191 I915_WRITE(DEIIR, de_iir);
2192 ret = IRQ_HANDLED;
f1af8fc1
PZ
2193 if (INTEL_INFO(dev)->gen >= 7)
2194 ivb_display_irq_handler(dev, de_iir);
2195 else
2196 ilk_display_irq_handler(dev, de_iir);
b1f14ad0
JB
2197 }
2198
f1af8fc1
PZ
2199 if (INTEL_INFO(dev)->gen >= 6) {
2200 u32 pm_iir = I915_READ(GEN6_PMIIR);
2201 if (pm_iir) {
f1af8fc1
PZ
2202 I915_WRITE(GEN6_PMIIR, pm_iir);
2203 ret = IRQ_HANDLED;
72c90f62 2204 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2205 }
0e43406b 2206 }
b1f14ad0 2207
b1f14ad0
JB
2208 I915_WRITE(DEIER, de_ier);
2209 POSTING_READ(DEIER);
ab5c608b
BW
2210 if (!HAS_PCH_NOP(dev)) {
2211 I915_WRITE(SDEIER, sde_ier);
2212 POSTING_READ(SDEIER);
2213 }
b1f14ad0
JB
2214
2215 return ret;
2216}
2217
abd58f01
BW
2218static irqreturn_t gen8_irq_handler(int irq, void *arg)
2219{
2220 struct drm_device *dev = arg;
2221 struct drm_i915_private *dev_priv = dev->dev_private;
2222 u32 master_ctl;
2223 irqreturn_t ret = IRQ_NONE;
2224 uint32_t tmp = 0;
c42664cc 2225 enum pipe pipe;
abd58f01 2226
abd58f01
BW
2227 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2228 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2229 if (!master_ctl)
2230 return IRQ_NONE;
2231
2232 I915_WRITE(GEN8_MASTER_IRQ, 0);
2233 POSTING_READ(GEN8_MASTER_IRQ);
2234
2235 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2236
2237 if (master_ctl & GEN8_DE_MISC_IRQ) {
2238 tmp = I915_READ(GEN8_DE_MISC_IIR);
2239 if (tmp & GEN8_DE_MISC_GSE)
2240 intel_opregion_asle_intr(dev);
2241 else if (tmp)
2242 DRM_ERROR("Unexpected DE Misc interrupt\n");
2243 else
2244 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2245
2246 if (tmp) {
2247 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2248 ret = IRQ_HANDLED;
2249 }
2250 }
2251
6d766f02
DV
2252 if (master_ctl & GEN8_DE_PORT_IRQ) {
2253 tmp = I915_READ(GEN8_DE_PORT_IIR);
2254 if (tmp & GEN8_AUX_CHANNEL_A)
2255 dp_aux_irq_handler(dev);
2256 else if (tmp)
2257 DRM_ERROR("Unexpected DE Port interrupt\n");
2258 else
2259 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2260
2261 if (tmp) {
2262 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2263 ret = IRQ_HANDLED;
2264 }
2265 }
2266
c42664cc
DV
2267 for_each_pipe(pipe) {
2268 uint32_t pipe_iir;
abd58f01 2269
c42664cc
DV
2270 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2271 continue;
abd58f01 2272
c42664cc
DV
2273 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2274 if (pipe_iir & GEN8_PIPE_VBLANK)
8d7849db 2275 intel_pipe_handle_vblank(dev, pipe);
abd58f01 2276
d0e1f1cb 2277 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
c42664cc
DV
2278 intel_prepare_page_flip(dev, pipe);
2279 intel_finish_page_flip_plane(dev, pipe);
abd58f01 2280 }
c42664cc 2281
0fbe7870
DV
2282 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2283 hsw_pipe_crc_irq_handler(dev, pipe);
2284
38d83c96
DV
2285 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2286 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2287 false))
fc2c807b
VS
2288 DRM_ERROR("Pipe %c FIFO underrun\n",
2289 pipe_name(pipe));
38d83c96
DV
2290 }
2291
30100f2b
DV
2292 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2293 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2294 pipe_name(pipe),
2295 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2296 }
c42664cc
DV
2297
2298 if (pipe_iir) {
2299 ret = IRQ_HANDLED;
2300 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2301 } else
abd58f01
BW
2302 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2303 }
2304
92d03a80
DV
2305 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2306 /*
2307 * FIXME(BDW): Assume for now that the new interrupt handling
2308 * scheme also closed the SDE interrupt handling race we've seen
2309 * on older pch-split platforms. But this needs testing.
2310 */
2311 u32 pch_iir = I915_READ(SDEIIR);
2312
2313 cpt_irq_handler(dev, pch_iir);
2314
2315 if (pch_iir) {
2316 I915_WRITE(SDEIIR, pch_iir);
2317 ret = IRQ_HANDLED;
2318 }
2319 }
2320
abd58f01
BW
2321 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2322 POSTING_READ(GEN8_MASTER_IRQ);
2323
2324 return ret;
2325}
2326
17e1df07
DV
2327static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2328 bool reset_completed)
2329{
a4872ba6 2330 struct intel_engine_cs *ring;
17e1df07
DV
2331 int i;
2332
2333 /*
2334 * Notify all waiters for GPU completion events that reset state has
2335 * been changed, and that they need to restart their wait after
2336 * checking for potential errors (and bail out to drop locks if there is
2337 * a gpu reset pending so that i915_error_work_func can acquire them).
2338 */
2339
2340 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2341 for_each_ring(ring, dev_priv, i)
2342 wake_up_all(&ring->irq_queue);
2343
2344 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2345 wake_up_all(&dev_priv->pending_flip_queue);
2346
2347 /*
2348 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2349 * reset state is cleared.
2350 */
2351 if (reset_completed)
2352 wake_up_all(&dev_priv->gpu_error.reset_queue);
2353}
2354
8a905236
JB
2355/**
2356 * i915_error_work_func - do process context error handling work
2357 * @work: work struct
2358 *
2359 * Fire an error uevent so userspace can see that a hang or error
2360 * was detected.
2361 */
2362static void i915_error_work_func(struct work_struct *work)
2363{
1f83fee0
DV
2364 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2365 work);
2d1013dd
JN
2366 struct drm_i915_private *dev_priv =
2367 container_of(error, struct drm_i915_private, gpu_error);
8a905236 2368 struct drm_device *dev = dev_priv->dev;
cce723ed
BW
2369 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2370 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2371 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2372 int ret;
8a905236 2373
5bdebb18 2374 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2375
7db0ba24
DV
2376 /*
2377 * Note that there's only one work item which does gpu resets, so we
2378 * need not worry about concurrent gpu resets potentially incrementing
2379 * error->reset_counter twice. We only need to take care of another
2380 * racing irq/hangcheck declaring the gpu dead for a second time. A
2381 * quick check for that is good enough: schedule_work ensures the
2382 * correct ordering between hang detection and this work item, and since
2383 * the reset in-progress bit is only ever set by code outside of this
2384 * work we don't need to worry about any other races.
2385 */
2386 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2387 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2388 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2389 reset_event);
1f83fee0 2390
f454c694
ID
2391 /*
2392 * In most cases it's guaranteed that we get here with an RPM
2393 * reference held, for example because there is a pending GPU
2394 * request that won't finish until the reset is done. This
2395 * isn't the case at least when we get here by doing a
2396 * simulated reset via debugs, so get an RPM reference.
2397 */
2398 intel_runtime_pm_get(dev_priv);
17e1df07
DV
2399 /*
2400 * All state reset _must_ be completed before we update the
2401 * reset counter, for otherwise waiters might miss the reset
2402 * pending state and not properly drop locks, resulting in
2403 * deadlocks with the reset work.
2404 */
f69061be
DV
2405 ret = i915_reset(dev);
2406
17e1df07
DV
2407 intel_display_handle_reset(dev);
2408
f454c694
ID
2409 intel_runtime_pm_put(dev_priv);
2410
f69061be
DV
2411 if (ret == 0) {
2412 /*
2413 * After all the gem state is reset, increment the reset
2414 * counter and wake up everyone waiting for the reset to
2415 * complete.
2416 *
2417 * Since unlock operations are a one-sided barrier only,
2418 * we need to insert a barrier here to order any seqno
2419 * updates before
2420 * the counter increment.
2421 */
2422 smp_mb__before_atomic_inc();
2423 atomic_inc(&dev_priv->gpu_error.reset_counter);
2424
5bdebb18 2425 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2426 KOBJ_CHANGE, reset_done_event);
1f83fee0 2427 } else {
2ac0f450 2428 atomic_set_mask(I915_WEDGED, &error->reset_counter);
f316a42c 2429 }
1f83fee0 2430
17e1df07
DV
2431 /*
2432 * Note: The wake_up also serves as a memory barrier so that
2433 * waiters see the update value of the reset counter atomic_t.
2434 */
2435 i915_error_wake_up(dev_priv, true);
f316a42c 2436 }
8a905236
JB
2437}
2438
35aed2e6 2439static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2440{
2441 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2442 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2443 u32 eir = I915_READ(EIR);
050ee91f 2444 int pipe, i;
8a905236 2445
35aed2e6
CW
2446 if (!eir)
2447 return;
8a905236 2448
a70491cc 2449 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2450
bd9854f9
BW
2451 i915_get_extra_instdone(dev, instdone);
2452
8a905236
JB
2453 if (IS_G4X(dev)) {
2454 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2455 u32 ipeir = I915_READ(IPEIR_I965);
2456
a70491cc
JP
2457 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2458 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2459 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2460 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2461 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2462 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2463 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2464 POSTING_READ(IPEIR_I965);
8a905236
JB
2465 }
2466 if (eir & GM45_ERROR_PAGE_TABLE) {
2467 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2468 pr_err("page table error\n");
2469 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2470 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2471 POSTING_READ(PGTBL_ER);
8a905236
JB
2472 }
2473 }
2474
a6c45cf0 2475 if (!IS_GEN2(dev)) {
8a905236
JB
2476 if (eir & I915_ERROR_PAGE_TABLE) {
2477 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2478 pr_err("page table error\n");
2479 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2480 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2481 POSTING_READ(PGTBL_ER);
8a905236
JB
2482 }
2483 }
2484
2485 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2486 pr_err("memory refresh error:\n");
9db4a9c7 2487 for_each_pipe(pipe)
a70491cc 2488 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2489 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2490 /* pipestat has already been acked */
2491 }
2492 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2493 pr_err("instruction error\n");
2494 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2495 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2496 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2497 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2498 u32 ipeir = I915_READ(IPEIR);
2499
a70491cc
JP
2500 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2501 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2502 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2503 I915_WRITE(IPEIR, ipeir);
3143a2bf 2504 POSTING_READ(IPEIR);
8a905236
JB
2505 } else {
2506 u32 ipeir = I915_READ(IPEIR_I965);
2507
a70491cc
JP
2508 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2509 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2510 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2511 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2512 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2513 POSTING_READ(IPEIR_I965);
8a905236
JB
2514 }
2515 }
2516
2517 I915_WRITE(EIR, eir);
3143a2bf 2518 POSTING_READ(EIR);
8a905236
JB
2519 eir = I915_READ(EIR);
2520 if (eir) {
2521 /*
2522 * some errors might have become stuck,
2523 * mask them.
2524 */
2525 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2526 I915_WRITE(EMR, I915_READ(EMR) | eir);
2527 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2528 }
35aed2e6
CW
2529}
2530
2531/**
2532 * i915_handle_error - handle an error interrupt
2533 * @dev: drm device
2534 *
2535 * Do some basic checking of regsiter state at error interrupt time and
2536 * dump it to the syslog. Also call i915_capture_error_state() to make
2537 * sure we get a record and make it available in debugfs. Fire a uevent
2538 * so userspace knows something bad happened (should trigger collection
2539 * of a ring dump etc.).
2540 */
58174462
MK
2541void i915_handle_error(struct drm_device *dev, bool wedged,
2542 const char *fmt, ...)
35aed2e6
CW
2543{
2544 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2545 va_list args;
2546 char error_msg[80];
35aed2e6 2547
58174462
MK
2548 va_start(args, fmt);
2549 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2550 va_end(args);
2551
2552 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2553 i915_report_and_clear_eir(dev);
8a905236 2554
ba1234d1 2555 if (wedged) {
f69061be
DV
2556 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2557 &dev_priv->gpu_error.reset_counter);
ba1234d1 2558
11ed50ec 2559 /*
17e1df07
DV
2560 * Wakeup waiting processes so that the reset work function
2561 * i915_error_work_func doesn't deadlock trying to grab various
2562 * locks. By bumping the reset counter first, the woken
2563 * processes will see a reset in progress and back off,
2564 * releasing their locks and then wait for the reset completion.
2565 * We must do this for _all_ gpu waiters that might hold locks
2566 * that the reset work needs to acquire.
2567 *
2568 * Note: The wake_up serves as the required memory barrier to
2569 * ensure that the waiters see the updated value of the reset
2570 * counter atomic_t.
11ed50ec 2571 */
17e1df07 2572 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2573 }
2574
122f46ba
DV
2575 /*
2576 * Our reset work can grab modeset locks (since it needs to reset the
2577 * state of outstanding pagelips). Hence it must not be run on our own
2578 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2579 * code will deadlock.
2580 */
2581 schedule_work(&dev_priv->gpu_error.work);
8a905236
JB
2582}
2583
21ad8330 2584static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
4e5359cd 2585{
2d1013dd 2586 struct drm_i915_private *dev_priv = dev->dev_private;
4e5359cd
SF
2587 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2588 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 2589 struct drm_i915_gem_object *obj;
4e5359cd
SF
2590 struct intel_unpin_work *work;
2591 unsigned long flags;
2592 bool stall_detected;
2593
2594 /* Ignore early vblank irqs */
2595 if (intel_crtc == NULL)
2596 return;
2597
2598 spin_lock_irqsave(&dev->event_lock, flags);
2599 work = intel_crtc->unpin_work;
2600
e7d841ca
CW
2601 if (work == NULL ||
2602 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2603 !work->enable_stall_check) {
4e5359cd
SF
2604 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2605 spin_unlock_irqrestore(&dev->event_lock, flags);
2606 return;
2607 }
2608
2609 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
05394f39 2610 obj = work->pending_flip_obj;
a6c45cf0 2611 if (INTEL_INFO(dev)->gen >= 4) {
9db4a9c7 2612 int dspsurf = DSPSURF(intel_crtc->plane);
446f2545 2613 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
f343c5f6 2614 i915_gem_obj_ggtt_offset(obj);
4e5359cd 2615 } else {
9db4a9c7 2616 int dspaddr = DSPADDR(intel_crtc->plane);
f343c5f6 2617 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
f4510a27
MR
2618 crtc->y * crtc->primary->fb->pitches[0] +
2619 crtc->x * crtc->primary->fb->bits_per_pixel/8);
4e5359cd
SF
2620 }
2621
2622 spin_unlock_irqrestore(&dev->event_lock, flags);
2623
2624 if (stall_detected) {
2625 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2626 intel_prepare_page_flip(dev, intel_crtc->plane);
2627 }
2628}
2629
42f52ef8
KP
2630/* Called from drm generic code, passed 'crtc' which
2631 * we use as a pipe index
2632 */
f71d4af4 2633static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2634{
2d1013dd 2635 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2636 unsigned long irqflags;
71e0ffa5 2637
5eddb70b 2638 if (!i915_pipe_enabled(dev, pipe))
71e0ffa5 2639 return -EINVAL;
0a3e67a4 2640
1ec14ad3 2641 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2642 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2643 i915_enable_pipestat(dev_priv, pipe,
755e9019 2644 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2645 else
7c463586 2646 i915_enable_pipestat(dev_priv, pipe,
755e9019 2647 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2649
0a3e67a4
JB
2650 return 0;
2651}
2652
f71d4af4 2653static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2654{
2d1013dd 2655 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2656 unsigned long irqflags;
b518421f 2657 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2658 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2659
2660 if (!i915_pipe_enabled(dev, pipe))
2661 return -EINVAL;
2662
2663 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2664 ironlake_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2665 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2666
2667 return 0;
2668}
2669
7e231dbe
JB
2670static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2671{
2d1013dd 2672 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2673 unsigned long irqflags;
7e231dbe
JB
2674
2675 if (!i915_pipe_enabled(dev, pipe))
2676 return -EINVAL;
2677
2678 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2679 i915_enable_pipestat(dev_priv, pipe,
755e9019 2680 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2681 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2682
2683 return 0;
2684}
2685
abd58f01
BW
2686static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2687{
2688 struct drm_i915_private *dev_priv = dev->dev_private;
2689 unsigned long irqflags;
abd58f01
BW
2690
2691 if (!i915_pipe_enabled(dev, pipe))
2692 return -EINVAL;
2693
2694 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2695 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2696 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2697 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2698 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2699 return 0;
2700}
2701
42f52ef8
KP
2702/* Called from drm generic code, passed 'crtc' which
2703 * we use as a pipe index
2704 */
f71d4af4 2705static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2706{
2d1013dd 2707 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2708 unsigned long irqflags;
0a3e67a4 2709
1ec14ad3 2710 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2711 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2712 PIPE_VBLANK_INTERRUPT_STATUS |
2713 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2715}
2716
f71d4af4 2717static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2718{
2d1013dd 2719 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2720 unsigned long irqflags;
b518421f 2721 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2722 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2723
2724 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2725 ironlake_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2726 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727}
2728
7e231dbe
JB
2729static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2730{
2d1013dd 2731 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2732 unsigned long irqflags;
7e231dbe
JB
2733
2734 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2735 i915_disable_pipestat(dev_priv, pipe,
755e9019 2736 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2737 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2738}
2739
abd58f01
BW
2740static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2741{
2742 struct drm_i915_private *dev_priv = dev->dev_private;
2743 unsigned long irqflags;
abd58f01
BW
2744
2745 if (!i915_pipe_enabled(dev, pipe))
2746 return;
2747
2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2749 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2750 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2751 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2752 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2753}
2754
893eead0 2755static u32
a4872ba6 2756ring_last_seqno(struct intel_engine_cs *ring)
852835f3 2757{
893eead0
CW
2758 return list_entry(ring->request_list.prev,
2759 struct drm_i915_gem_request, list)->seqno;
2760}
2761
9107e9d2 2762static bool
a4872ba6 2763ring_idle(struct intel_engine_cs *ring, u32 seqno)
9107e9d2
CW
2764{
2765 return (list_empty(&ring->request_list) ||
2766 i915_seqno_passed(seqno, ring_last_seqno(ring)));
f65d9421
BG
2767}
2768
a028c4b0
DV
2769static bool
2770ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2771{
2772 if (INTEL_INFO(dev)->gen >= 8) {
2773 /*
2774 * FIXME: gen8 semaphore support - currently we don't emit
2775 * semaphores on bdw anyway, but this needs to be addressed when
2776 * we merge that code.
2777 */
2778 return false;
2779 } else {
2780 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2781 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2782 MI_SEMAPHORE_REGISTER);
2783 }
2784}
2785
a4872ba6
OM
2786static struct intel_engine_cs *
2787semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
921d42ea
DV
2788{
2789 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2790 struct intel_engine_cs *signaller;
921d42ea
DV
2791 int i;
2792
2793 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2794 /*
2795 * FIXME: gen8 semaphore support - currently we don't emit
2796 * semaphores on bdw anyway, but this needs to be addressed when
2797 * we merge that code.
2798 */
2799 return NULL;
2800 } else {
2801 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2802
2803 for_each_ring(signaller, dev_priv, i) {
2804 if(ring == signaller)
2805 continue;
2806
ebc348b2 2807 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2808 return signaller;
2809 }
2810 }
2811
2812 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
2813 ring->id, ipehr);
2814
2815 return NULL;
2816}
2817
a4872ba6
OM
2818static struct intel_engine_cs *
2819semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
a24a11e6
CW
2820{
2821 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d
DV
2822 u32 cmd, ipehr, head;
2823 int i;
a24a11e6
CW
2824
2825 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2826 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2827 return NULL;
a24a11e6 2828
88fe429d
DV
2829 /*
2830 * HEAD is likely pointing to the dword after the actual command,
2831 * so scan backwards until we find the MBOX. But limit it to just 3
2832 * dwords. Note that we don't care about ACTHD here since that might
2833 * point at at batch, and semaphores are always emitted into the
2834 * ringbuffer itself.
a24a11e6 2835 */
88fe429d
DV
2836 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2837
2838 for (i = 4; i; --i) {
2839 /*
2840 * Be paranoid and presume the hw has gone off into the wild -
2841 * our ring is smaller than what the hardware (and hence
2842 * HEAD_ADDR) allows. Also handles wrap-around.
2843 */
ee1b1e5e 2844 head &= ring->buffer->size - 1;
88fe429d
DV
2845
2846 /* This here seems to blow up */
ee1b1e5e 2847 cmd = ioread32(ring->buffer->virtual_start + head);
a24a11e6
CW
2848 if (cmd == ipehr)
2849 break;
2850
88fe429d
DV
2851 head -= 4;
2852 }
a24a11e6 2853
88fe429d
DV
2854 if (!i)
2855 return NULL;
a24a11e6 2856
ee1b1e5e 2857 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
921d42ea 2858 return semaphore_wait_to_signaller_ring(ring, ipehr);
a24a11e6
CW
2859}
2860
a4872ba6 2861static int semaphore_passed(struct intel_engine_cs *ring)
6274f212
CW
2862{
2863 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2864 struct intel_engine_cs *signaller;
6274f212
CW
2865 u32 seqno, ctl;
2866
2867 ring->hangcheck.deadlock = true;
2868
2869 signaller = semaphore_waits_for(ring, &seqno);
2870 if (signaller == NULL || signaller->hangcheck.deadlock)
2871 return -1;
2872
2873 /* cursory check for an unkickable deadlock */
2874 ctl = I915_READ_CTL(signaller);
2875 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2876 return -1;
2877
2878 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2879}
2880
2881static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2882{
a4872ba6 2883 struct intel_engine_cs *ring;
6274f212
CW
2884 int i;
2885
2886 for_each_ring(ring, dev_priv, i)
2887 ring->hangcheck.deadlock = false;
2888}
2889
ad8beaea 2890static enum intel_ring_hangcheck_action
a4872ba6 2891ring_stuck(struct intel_engine_cs *ring, u64 acthd)
1ec14ad3
CW
2892{
2893 struct drm_device *dev = ring->dev;
2894 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2895 u32 tmp;
2896
6274f212 2897 if (ring->hangcheck.acthd != acthd)
f2f4d82f 2898 return HANGCHECK_ACTIVE;
6274f212 2899
9107e9d2 2900 if (IS_GEN2(dev))
f2f4d82f 2901 return HANGCHECK_HUNG;
9107e9d2
CW
2902
2903 /* Is the chip hanging on a WAIT_FOR_EVENT?
2904 * If so we can simply poke the RB_WAIT bit
2905 * and break the hang. This should work on
2906 * all but the second generation chipsets.
2907 */
2908 tmp = I915_READ_CTL(ring);
1ec14ad3 2909 if (tmp & RING_WAIT) {
58174462
MK
2910 i915_handle_error(dev, false,
2911 "Kicking stuck wait on %s",
2912 ring->name);
1ec14ad3 2913 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2914 return HANGCHECK_KICK;
6274f212
CW
2915 }
2916
2917 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2918 switch (semaphore_passed(ring)) {
2919 default:
f2f4d82f 2920 return HANGCHECK_HUNG;
6274f212 2921 case 1:
58174462
MK
2922 i915_handle_error(dev, false,
2923 "Kicking stuck semaphore on %s",
2924 ring->name);
6274f212 2925 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2926 return HANGCHECK_KICK;
6274f212 2927 case 0:
f2f4d82f 2928 return HANGCHECK_WAIT;
6274f212 2929 }
9107e9d2 2930 }
ed5cbb03 2931
f2f4d82f 2932 return HANGCHECK_HUNG;
ed5cbb03
MK
2933}
2934
f65d9421
BG
2935/**
2936 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2937 * batchbuffers in a long time. We keep track per ring seqno progress and
2938 * if there are no progress, hangcheck score for that ring is increased.
2939 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2940 * we kick the ring. If we see no progress on three subsequent calls
2941 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 2942 */
a658b5d2 2943static void i915_hangcheck_elapsed(unsigned long data)
f65d9421
BG
2944{
2945 struct drm_device *dev = (struct drm_device *)data;
2d1013dd 2946 struct drm_i915_private *dev_priv = dev->dev_private;
a4872ba6 2947 struct intel_engine_cs *ring;
b4519513 2948 int i;
05407ff8 2949 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2950 bool stuck[I915_NUM_RINGS] = { 0 };
2951#define BUSY 1
2952#define KICK 5
2953#define HUNG 20
893eead0 2954
d330a953 2955 if (!i915.enable_hangcheck)
3e0dc6b0
BW
2956 return;
2957
b4519513 2958 for_each_ring(ring, dev_priv, i) {
50877445
CW
2959 u64 acthd;
2960 u32 seqno;
9107e9d2 2961 bool busy = true;
05407ff8 2962
6274f212
CW
2963 semaphore_clear_deadlocks(dev_priv);
2964
05407ff8
MK
2965 seqno = ring->get_seqno(ring, false);
2966 acthd = intel_ring_get_active_head(ring);
b4519513 2967
9107e9d2
CW
2968 if (ring->hangcheck.seqno == seqno) {
2969 if (ring_idle(ring, seqno)) {
da661464
MK
2970 ring->hangcheck.action = HANGCHECK_IDLE;
2971
9107e9d2
CW
2972 if (waitqueue_active(&ring->irq_queue)) {
2973 /* Issue a wake-up to catch stuck h/w. */
094f9a54 2974 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
2975 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2976 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2977 ring->name);
2978 else
2979 DRM_INFO("Fake missed irq on %s\n",
2980 ring->name);
094f9a54
CW
2981 wake_up_all(&ring->irq_queue);
2982 }
2983 /* Safeguard against driver failure */
2984 ring->hangcheck.score += BUSY;
9107e9d2
CW
2985 } else
2986 busy = false;
05407ff8 2987 } else {
6274f212
CW
2988 /* We always increment the hangcheck score
2989 * if the ring is busy and still processing
2990 * the same request, so that no single request
2991 * can run indefinitely (such as a chain of
2992 * batches). The only time we do not increment
2993 * the hangcheck score on this ring, if this
2994 * ring is in a legitimate wait for another
2995 * ring. In that case the waiting ring is a
2996 * victim and we want to be sure we catch the
2997 * right culprit. Then every time we do kick
2998 * the ring, add a small increment to the
2999 * score so that we can catch a batch that is
3000 * being repeatedly kicked and so responsible
3001 * for stalling the machine.
3002 */
ad8beaea
MK
3003 ring->hangcheck.action = ring_stuck(ring,
3004 acthd);
3005
3006 switch (ring->hangcheck.action) {
da661464 3007 case HANGCHECK_IDLE:
f2f4d82f 3008 case HANGCHECK_WAIT:
6274f212 3009 break;
f2f4d82f 3010 case HANGCHECK_ACTIVE:
ea04cb31 3011 ring->hangcheck.score += BUSY;
6274f212 3012 break;
f2f4d82f 3013 case HANGCHECK_KICK:
ea04cb31 3014 ring->hangcheck.score += KICK;
6274f212 3015 break;
f2f4d82f 3016 case HANGCHECK_HUNG:
ea04cb31 3017 ring->hangcheck.score += HUNG;
6274f212
CW
3018 stuck[i] = true;
3019 break;
3020 }
05407ff8 3021 }
9107e9d2 3022 } else {
da661464
MK
3023 ring->hangcheck.action = HANGCHECK_ACTIVE;
3024
9107e9d2
CW
3025 /* Gradually reduce the count so that we catch DoS
3026 * attempts across multiple batches.
3027 */
3028 if (ring->hangcheck.score > 0)
3029 ring->hangcheck.score--;
d1e61e7f
CW
3030 }
3031
05407ff8
MK
3032 ring->hangcheck.seqno = seqno;
3033 ring->hangcheck.acthd = acthd;
9107e9d2 3034 busy_count += busy;
893eead0 3035 }
b9201c14 3036
92cab734 3037 for_each_ring(ring, dev_priv, i) {
b6b0fac0 3038 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
3039 DRM_INFO("%s on %s\n",
3040 stuck[i] ? "stuck" : "no progress",
3041 ring->name);
a43adf07 3042 rings_hung++;
92cab734
MK
3043 }
3044 }
3045
05407ff8 3046 if (rings_hung)
58174462 3047 return i915_handle_error(dev, true, "Ring hung");
f65d9421 3048
05407ff8
MK
3049 if (busy_count)
3050 /* Reset timer case chip hangs without another request
3051 * being added */
10cd45b6
MK
3052 i915_queue_hangcheck(dev);
3053}
3054
3055void i915_queue_hangcheck(struct drm_device *dev)
3056{
3057 struct drm_i915_private *dev_priv = dev->dev_private;
d330a953 3058 if (!i915.enable_hangcheck)
10cd45b6
MK
3059 return;
3060
3061 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3062 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
3063}
3064
1c69eb42 3065static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
3066{
3067 struct drm_i915_private *dev_priv = dev->dev_private;
3068
3069 if (HAS_PCH_NOP(dev))
3070 return;
3071
f86f3fb0 3072 GEN5_IRQ_RESET(SDE);
105b122e
PZ
3073
3074 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3075 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 3076}
105b122e 3077
622364b6
PZ
3078/*
3079 * SDEIER is also touched by the interrupt handler to work around missed PCH
3080 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3081 * instead we unconditionally enable all PCH interrupt sources here, but then
3082 * only unmask them as needed with SDEIMR.
3083 *
3084 * This function needs to be called before interrupts are enabled.
3085 */
3086static void ibx_irq_pre_postinstall(struct drm_device *dev)
3087{
3088 struct drm_i915_private *dev_priv = dev->dev_private;
3089
3090 if (HAS_PCH_NOP(dev))
3091 return;
3092
3093 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
3094 I915_WRITE(SDEIER, 0xffffffff);
3095 POSTING_READ(SDEIER);
3096}
3097
7c4d664e 3098static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
3099{
3100 struct drm_i915_private *dev_priv = dev->dev_private;
3101
f86f3fb0 3102 GEN5_IRQ_RESET(GT);
a9d356a6 3103 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 3104 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
3105}
3106
1da177e4
LT
3107/* drm_dma.h hooks
3108*/
be30b29f 3109static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 3110{
2d1013dd 3111 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 3112
0c841212 3113 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 3114
f86f3fb0 3115 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
3116 if (IS_GEN7(dev))
3117 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 3118
7c4d664e 3119 gen5_gt_irq_reset(dev);
c650156a 3120
1c69eb42 3121 ibx_irq_reset(dev);
7d99163d 3122}
c650156a 3123
7e231dbe
JB
3124static void valleyview_irq_preinstall(struct drm_device *dev)
3125{
2d1013dd 3126 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3127 int pipe;
3128
7e231dbe
JB
3129 /* VLV magic */
3130 I915_WRITE(VLV_IMR, 0);
3131 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3132 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3133 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3134
7e231dbe
JB
3135 /* and GT */
3136 I915_WRITE(GTIIR, I915_READ(GTIIR));
3137 I915_WRITE(GTIIR, I915_READ(GTIIR));
d18ea1b5 3138
7c4d664e 3139 gen5_gt_irq_reset(dev);
7e231dbe
JB
3140
3141 I915_WRITE(DPINVGTT, 0xff);
3142
3143 I915_WRITE(PORT_HOTPLUG_EN, 0);
3144 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3145 for_each_pipe(pipe)
3146 I915_WRITE(PIPESTAT(pipe), 0xffff);
3147 I915_WRITE(VLV_IIR, 0xffffffff);
3148 I915_WRITE(VLV_IMR, 0xffffffff);
3149 I915_WRITE(VLV_IER, 0x0);
3150 POSTING_READ(VLV_IER);
3151}
3152
d6e3cca3
DV
3153static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3154{
3155 GEN8_IRQ_RESET_NDX(GT, 0);
3156 GEN8_IRQ_RESET_NDX(GT, 1);
3157 GEN8_IRQ_RESET_NDX(GT, 2);
3158 GEN8_IRQ_RESET_NDX(GT, 3);
3159}
3160
823f6b38 3161static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
3162{
3163 struct drm_i915_private *dev_priv = dev->dev_private;
3164 int pipe;
3165
abd58f01
BW
3166 I915_WRITE(GEN8_MASTER_IRQ, 0);
3167 POSTING_READ(GEN8_MASTER_IRQ);
3168
d6e3cca3 3169 gen8_gt_irq_reset(dev_priv);
abd58f01 3170
823f6b38 3171 for_each_pipe(pipe)
f86f3fb0 3172 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3173
f86f3fb0
PZ
3174 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3175 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3176 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3177
1c69eb42 3178 ibx_irq_reset(dev);
abd58f01 3179}
09f2344d 3180
43f328d7
VS
3181static void cherryview_irq_preinstall(struct drm_device *dev)
3182{
3183 struct drm_i915_private *dev_priv = dev->dev_private;
3184 int pipe;
3185
3186 I915_WRITE(GEN8_MASTER_IRQ, 0);
3187 POSTING_READ(GEN8_MASTER_IRQ);
3188
d6e3cca3 3189 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3190
3191 GEN5_IRQ_RESET(GEN8_PCU_);
3192
3193 POSTING_READ(GEN8_PCU_IIR);
3194
3195 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3196
3197 I915_WRITE(PORT_HOTPLUG_EN, 0);
3198 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3199
3200 for_each_pipe(pipe)
3201 I915_WRITE(PIPESTAT(pipe), 0xffff);
3202
3203 I915_WRITE(VLV_IMR, 0xffffffff);
3204 I915_WRITE(VLV_IER, 0x0);
3205 I915_WRITE(VLV_IIR, 0xffffffff);
3206 POSTING_READ(VLV_IIR);
3207}
3208
82a28bcf 3209static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3210{
2d1013dd 3211 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf
DV
3212 struct drm_mode_config *mode_config = &dev->mode_config;
3213 struct intel_encoder *intel_encoder;
fee884ed 3214 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
82a28bcf
DV
3215
3216 if (HAS_PCH_IBX(dev)) {
fee884ed 3217 hotplug_irqs = SDE_HOTPLUG_MASK;
82a28bcf 3218 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
cd569aed 3219 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
fee884ed 3220 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
82a28bcf 3221 } else {
fee884ed 3222 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
82a28bcf 3223 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
cd569aed 3224 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
fee884ed 3225 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
82a28bcf 3226 }
7fe0b973 3227
fee884ed 3228 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3229
3230 /*
3231 * Enable digital hotplug on the PCH, and configure the DP short pulse
3232 * duration to 2ms (which is the minimum in the Display Port spec)
3233 *
3234 * This register is the same on all known PCH chips.
3235 */
7fe0b973
KP
3236 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3237 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3238 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3239 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3240 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3241 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3242}
3243
d46da437
PZ
3244static void ibx_irq_postinstall(struct drm_device *dev)
3245{
2d1013dd 3246 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3247 u32 mask;
e5868a31 3248
692a04cf
DV
3249 if (HAS_PCH_NOP(dev))
3250 return;
3251
105b122e 3252 if (HAS_PCH_IBX(dev))
5c673b60 3253 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3254 else
5c673b60 3255 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3256
337ba017 3257 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
d46da437 3258 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3259}
3260
0a9a8c91
DV
3261static void gen5_gt_irq_postinstall(struct drm_device *dev)
3262{
3263 struct drm_i915_private *dev_priv = dev->dev_private;
3264 u32 pm_irqs, gt_irqs;
3265
3266 pm_irqs = gt_irqs = 0;
3267
3268 dev_priv->gt_irq_mask = ~0;
040d2baa 3269 if (HAS_L3_DPF(dev)) {
0a9a8c91 3270 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3271 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3272 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3273 }
3274
3275 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3276 if (IS_GEN5(dev)) {
3277 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3278 ILK_BSD_USER_INTERRUPT;
3279 } else {
3280 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3281 }
3282
35079899 3283 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3284
3285 if (INTEL_INFO(dev)->gen >= 6) {
a6706b45 3286 pm_irqs |= dev_priv->pm_rps_events;
0a9a8c91
DV
3287
3288 if (HAS_VEBOX(dev))
3289 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3290
605cd25b 3291 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3292 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3293 }
3294}
3295
f71d4af4 3296static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3297{
4bc9d430 3298 unsigned long irqflags;
2d1013dd 3299 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3300 u32 display_mask, extra_mask;
3301
3302 if (INTEL_INFO(dev)->gen >= 7) {
3303 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3304 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3305 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3306 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3307 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
5c673b60 3308 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
8e76f8dc
PZ
3309 } else {
3310 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3311 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3312 DE_AUX_CHANNEL_A |
5b3a856b
DV
3313 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3314 DE_POISON);
5c673b60
DV
3315 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3316 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
8e76f8dc 3317 }
036a4a7d 3318
1ec14ad3 3319 dev_priv->irq_mask = ~display_mask;
036a4a7d 3320
0c841212
PZ
3321 I915_WRITE(HWSTAM, 0xeffe);
3322
622364b6
PZ
3323 ibx_irq_pre_postinstall(dev);
3324
35079899 3325 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3326
0a9a8c91 3327 gen5_gt_irq_postinstall(dev);
036a4a7d 3328
d46da437 3329 ibx_irq_postinstall(dev);
7fe0b973 3330
f97108d1 3331 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3332 /* Enable PCU event interrupts
3333 *
3334 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3335 * setup is guaranteed to run in single-threaded context. But we
3336 * need it to make the assert_spin_locked happy. */
3337 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f97108d1 3338 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
4bc9d430 3339 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
f97108d1
JB
3340 }
3341
036a4a7d
ZW
3342 return 0;
3343}
3344
f8b79e58
ID
3345static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3346{
3347 u32 pipestat_mask;
3348 u32 iir_mask;
3349
3350 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3351 PIPE_FIFO_UNDERRUN_STATUS;
3352
3353 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3354 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3355 POSTING_READ(PIPESTAT(PIPE_A));
3356
3357 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3358 PIPE_CRC_DONE_INTERRUPT_STATUS;
3359
3360 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3361 PIPE_GMBUS_INTERRUPT_STATUS);
3362 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3363
3364 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3365 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3366 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3367 dev_priv->irq_mask &= ~iir_mask;
3368
3369 I915_WRITE(VLV_IIR, iir_mask);
3370 I915_WRITE(VLV_IIR, iir_mask);
3371 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3372 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3373 POSTING_READ(VLV_IER);
3374}
3375
3376static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3377{
3378 u32 pipestat_mask;
3379 u32 iir_mask;
3380
3381 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3382 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3383 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
f8b79e58
ID
3384
3385 dev_priv->irq_mask |= iir_mask;
3386 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3387 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3388 I915_WRITE(VLV_IIR, iir_mask);
3389 I915_WRITE(VLV_IIR, iir_mask);
3390 POSTING_READ(VLV_IIR);
3391
3392 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3393 PIPE_CRC_DONE_INTERRUPT_STATUS;
3394
3395 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3396 PIPE_GMBUS_INTERRUPT_STATUS);
3397 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3398
3399 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3400 PIPE_FIFO_UNDERRUN_STATUS;
3401 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3402 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3403 POSTING_READ(PIPESTAT(PIPE_A));
3404}
3405
3406void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3407{
3408 assert_spin_locked(&dev_priv->irq_lock);
3409
3410 if (dev_priv->display_irqs_enabled)
3411 return;
3412
3413 dev_priv->display_irqs_enabled = true;
3414
3415 if (dev_priv->dev->irq_enabled)
3416 valleyview_display_irqs_install(dev_priv);
3417}
3418
3419void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3420{
3421 assert_spin_locked(&dev_priv->irq_lock);
3422
3423 if (!dev_priv->display_irqs_enabled)
3424 return;
3425
3426 dev_priv->display_irqs_enabled = false;
3427
3428 if (dev_priv->dev->irq_enabled)
3429 valleyview_display_irqs_uninstall(dev_priv);
3430}
3431
7e231dbe
JB
3432static int valleyview_irq_postinstall(struct drm_device *dev)
3433{
2d1013dd 3434 struct drm_i915_private *dev_priv = dev->dev_private;
b79480ba 3435 unsigned long irqflags;
7e231dbe 3436
f8b79e58 3437 dev_priv->irq_mask = ~0;
7e231dbe 3438
20afbda2
DV
3439 I915_WRITE(PORT_HOTPLUG_EN, 0);
3440 POSTING_READ(PORT_HOTPLUG_EN);
3441
7e231dbe 3442 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
f8b79e58 3443 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
7e231dbe 3444 I915_WRITE(VLV_IIR, 0xffffffff);
7e231dbe
JB
3445 POSTING_READ(VLV_IER);
3446
b79480ba
DV
3447 /* Interrupt setup is already guaranteed to be single-threaded, this is
3448 * just to make the assert_spin_locked check happy. */
3449 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f8b79e58
ID
3450 if (dev_priv->display_irqs_enabled)
3451 valleyview_display_irqs_install(dev_priv);
b79480ba 3452 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
31acc7f5 3453
7e231dbe
JB
3454 I915_WRITE(VLV_IIR, 0xffffffff);
3455 I915_WRITE(VLV_IIR, 0xffffffff);
3456
0a9a8c91 3457 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3458
3459 /* ack & enable invalid PTE error interrupts */
3460#if 0 /* FIXME: add support to irq handler for checking these bits */
3461 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3462 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3463#endif
3464
3465 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3466
3467 return 0;
3468}
3469
abd58f01
BW
3470static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3471{
3472 int i;
3473
3474 /* These are interrupts we'll toggle with the ring mask register */
3475 uint32_t gt_interrupts[] = {
3476 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3477 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3478 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3479 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3480 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3481 0,
3482 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3483 };
3484
337ba017 3485 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
35079899 3486 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
0961021a
BW
3487
3488 dev_priv->pm_irq_mask = 0xffffffff;
abd58f01
BW
3489}
3490
3491static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3492{
3493 struct drm_device *dev = dev_priv->dev;
d0e1f1cb 3494 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
13b3a0a7 3495 GEN8_PIPE_CDCLK_CRC_DONE |
13b3a0a7 3496 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
5c673b60
DV
3497 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3498 GEN8_PIPE_FIFO_UNDERRUN;
abd58f01 3499 int pipe;
13b3a0a7
DV
3500 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3501 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3502 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3503
337ba017 3504 for_each_pipe(pipe)
35079899
PZ
3505 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
3506 de_pipe_enables);
abd58f01 3507
35079899 3508 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
abd58f01
BW
3509}
3510
3511static int gen8_irq_postinstall(struct drm_device *dev)
3512{
3513 struct drm_i915_private *dev_priv = dev->dev_private;
3514
622364b6
PZ
3515 ibx_irq_pre_postinstall(dev);
3516
abd58f01
BW
3517 gen8_gt_irq_postinstall(dev_priv);
3518 gen8_de_irq_postinstall(dev_priv);
3519
3520 ibx_irq_postinstall(dev);
3521
3522 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3523 POSTING_READ(GEN8_MASTER_IRQ);
3524
3525 return 0;
3526}
3527
43f328d7
VS
3528static int cherryview_irq_postinstall(struct drm_device *dev)
3529{
3530 struct drm_i915_private *dev_priv = dev->dev_private;
3531 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3532 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
43f328d7 3533 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3278f67f
VS
3534 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3535 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3536 PIPE_CRC_DONE_INTERRUPT_STATUS;
43f328d7
VS
3537 unsigned long irqflags;
3538 int pipe;
3539
3540 /*
3541 * Leave vblank interrupts masked initially. enable/disable will
3542 * toggle them based on usage.
3543 */
3278f67f 3544 dev_priv->irq_mask = ~enable_mask;
43f328d7
VS
3545
3546 for_each_pipe(pipe)
3547 I915_WRITE(PIPESTAT(pipe), 0xffff);
3548
3549 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3278f67f 3550 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
43f328d7
VS
3551 for_each_pipe(pipe)
3552 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3553 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3554
3555 I915_WRITE(VLV_IIR, 0xffffffff);
3556 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3557 I915_WRITE(VLV_IER, enable_mask);
3558
3559 gen8_gt_irq_postinstall(dev_priv);
3560
3561 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3562 POSTING_READ(GEN8_MASTER_IRQ);
3563
3564 return 0;
3565}
3566
abd58f01
BW
3567static void gen8_irq_uninstall(struct drm_device *dev)
3568{
3569 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3570
3571 if (!dev_priv)
3572 return;
3573
d4eb6b10 3574 intel_hpd_irq_uninstall(dev_priv);
abd58f01 3575
823f6b38 3576 gen8_irq_reset(dev);
abd58f01
BW
3577}
3578
7e231dbe
JB
3579static void valleyview_irq_uninstall(struct drm_device *dev)
3580{
2d1013dd 3581 struct drm_i915_private *dev_priv = dev->dev_private;
f8b79e58 3582 unsigned long irqflags;
7e231dbe
JB
3583 int pipe;
3584
3585 if (!dev_priv)
3586 return;
3587
843d0e7d
ID
3588 I915_WRITE(VLV_MASTER_IER, 0);
3589
3ca1cced 3590 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 3591
7e231dbe
JB
3592 for_each_pipe(pipe)
3593 I915_WRITE(PIPESTAT(pipe), 0xffff);
3594
3595 I915_WRITE(HWSTAM, 0xffffffff);
3596 I915_WRITE(PORT_HOTPLUG_EN, 0);
3597 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
f8b79e58
ID
3598
3599 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3600 if (dev_priv->display_irqs_enabled)
3601 valleyview_display_irqs_uninstall(dev_priv);
3602 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3603
3604 dev_priv->irq_mask = 0;
3605
7e231dbe
JB
3606 I915_WRITE(VLV_IIR, 0xffffffff);
3607 I915_WRITE(VLV_IMR, 0xffffffff);
3608 I915_WRITE(VLV_IER, 0x0);
3609 POSTING_READ(VLV_IER);
3610}
3611
43f328d7
VS
3612static void cherryview_irq_uninstall(struct drm_device *dev)
3613{
3614 struct drm_i915_private *dev_priv = dev->dev_private;
3615 int pipe;
3616
3617 if (!dev_priv)
3618 return;
3619
3620 I915_WRITE(GEN8_MASTER_IRQ, 0);
3621 POSTING_READ(GEN8_MASTER_IRQ);
3622
3623#define GEN8_IRQ_FINI_NDX(type, which) \
3624do { \
3625 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3626 I915_WRITE(GEN8_##type##_IER(which), 0); \
3627 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3628 POSTING_READ(GEN8_##type##_IIR(which)); \
3629 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3630} while (0)
3631
3632#define GEN8_IRQ_FINI(type) \
3633do { \
3634 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3635 I915_WRITE(GEN8_##type##_IER, 0); \
3636 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3637 POSTING_READ(GEN8_##type##_IIR); \
3638 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3639} while (0)
3640
3641 GEN8_IRQ_FINI_NDX(GT, 0);
3642 GEN8_IRQ_FINI_NDX(GT, 1);
3643 GEN8_IRQ_FINI_NDX(GT, 2);
3644 GEN8_IRQ_FINI_NDX(GT, 3);
3645
3646 GEN8_IRQ_FINI(PCU);
3647
3648#undef GEN8_IRQ_FINI
3649#undef GEN8_IRQ_FINI_NDX
3650
3651 I915_WRITE(PORT_HOTPLUG_EN, 0);
3652 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3653
3654 for_each_pipe(pipe)
3655 I915_WRITE(PIPESTAT(pipe), 0xffff);
3656
3657 I915_WRITE(VLV_IMR, 0xffffffff);
3658 I915_WRITE(VLV_IER, 0x0);
3659 I915_WRITE(VLV_IIR, 0xffffffff);
3660 POSTING_READ(VLV_IIR);
3661}
3662
f71d4af4 3663static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3664{
2d1013dd 3665 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3666
3667 if (!dev_priv)
3668 return;
3669
3ca1cced 3670 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 3671
be30b29f 3672 ironlake_irq_reset(dev);
036a4a7d
ZW
3673}
3674
a266c7d5 3675static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3676{
2d1013dd 3677 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3678 int pipe;
91e3738e 3679
9db4a9c7
JB
3680 for_each_pipe(pipe)
3681 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3682 I915_WRITE16(IMR, 0xffff);
3683 I915_WRITE16(IER, 0x0);
3684 POSTING_READ16(IER);
c2798b19
CW
3685}
3686
3687static int i8xx_irq_postinstall(struct drm_device *dev)
3688{
2d1013dd 3689 struct drm_i915_private *dev_priv = dev->dev_private;
379ef82d 3690 unsigned long irqflags;
c2798b19 3691
c2798b19
CW
3692 I915_WRITE16(EMR,
3693 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3694
3695 /* Unmask the interrupts that we always want on. */
3696 dev_priv->irq_mask =
3697 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3698 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3699 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3700 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3701 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3702 I915_WRITE16(IMR, dev_priv->irq_mask);
3703
3704 I915_WRITE16(IER,
3705 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3706 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3707 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3708 I915_USER_INTERRUPT);
3709 POSTING_READ16(IER);
3710
379ef82d
DV
3711 /* Interrupt setup is already guaranteed to be single-threaded, this is
3712 * just to make the assert_spin_locked check happy. */
3713 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
755e9019
ID
3714 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3715 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
379ef82d
DV
3716 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3717
c2798b19
CW
3718 return 0;
3719}
3720
90a72f87
VS
3721/*
3722 * Returns true when a page flip has completed.
3723 */
3724static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3725 int plane, int pipe, u32 iir)
90a72f87 3726{
2d1013dd 3727 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3728 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3729
8d7849db 3730 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3731 return false;
3732
3733 if ((iir & flip_pending) == 0)
3734 return false;
3735
1f1c2e24 3736 intel_prepare_page_flip(dev, plane);
90a72f87
VS
3737
3738 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3739 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3740 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3741 * the flip is completed (no longer pending). Since this doesn't raise
3742 * an interrupt per se, we watch for the change at vblank.
3743 */
3744 if (I915_READ16(ISR) & flip_pending)
3745 return false;
3746
3747 intel_finish_page_flip(dev, pipe);
3748
3749 return true;
3750}
3751
ff1f525e 3752static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3753{
45a83f84 3754 struct drm_device *dev = arg;
2d1013dd 3755 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3756 u16 iir, new_iir;
3757 u32 pipe_stats[2];
3758 unsigned long irqflags;
c2798b19
CW
3759 int pipe;
3760 u16 flip_mask =
3761 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3762 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3763
c2798b19
CW
3764 iir = I915_READ16(IIR);
3765 if (iir == 0)
3766 return IRQ_NONE;
3767
3768 while (iir & ~flip_mask) {
3769 /* Can't rely on pipestat interrupt bit in iir as it might
3770 * have been cleared after the pipestat interrupt was received.
3771 * It doesn't set the bit in iir again, but it still produces
3772 * interrupts (for non-MSI).
3773 */
3774 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3775 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
58174462
MK
3776 i915_handle_error(dev, false,
3777 "Command parser error, iir 0x%08x",
3778 iir);
c2798b19
CW
3779
3780 for_each_pipe(pipe) {
3781 int reg = PIPESTAT(pipe);
3782 pipe_stats[pipe] = I915_READ(reg);
3783
3784 /*
3785 * Clear the PIPE*STAT regs before the IIR
3786 */
2d9d2b0b 3787 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3788 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19
CW
3789 }
3790 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3791
3792 I915_WRITE16(IIR, iir & ~flip_mask);
3793 new_iir = I915_READ16(IIR); /* Flush posted writes */
3794
d05c617e 3795 i915_update_dri1_breadcrumb(dev);
c2798b19
CW
3796
3797 if (iir & I915_USER_INTERRUPT)
3798 notify_ring(dev, &dev_priv->ring[RCS]);
3799
4356d586 3800 for_each_pipe(pipe) {
1f1c2e24 3801 int plane = pipe;
3a77c4c4 3802 if (HAS_FBC(dev))
1f1c2e24
VS
3803 plane = !plane;
3804
4356d586 3805 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
3806 i8xx_handle_vblank(dev, plane, pipe, iir))
3807 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 3808
4356d586 3809 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3810 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b
VS
3811
3812 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3813 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b 3814 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4356d586 3815 }
c2798b19
CW
3816
3817 iir = new_iir;
3818 }
3819
3820 return IRQ_HANDLED;
3821}
3822
3823static void i8xx_irq_uninstall(struct drm_device * dev)
3824{
2d1013dd 3825 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3826 int pipe;
3827
c2798b19
CW
3828 for_each_pipe(pipe) {
3829 /* Clear enable bits; then clear status bits */
3830 I915_WRITE(PIPESTAT(pipe), 0);
3831 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3832 }
3833 I915_WRITE16(IMR, 0xffff);
3834 I915_WRITE16(IER, 0x0);
3835 I915_WRITE16(IIR, I915_READ16(IIR));
3836}
3837
a266c7d5
CW
3838static void i915_irq_preinstall(struct drm_device * dev)
3839{
2d1013dd 3840 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3841 int pipe;
3842
a266c7d5
CW
3843 if (I915_HAS_HOTPLUG(dev)) {
3844 I915_WRITE(PORT_HOTPLUG_EN, 0);
3845 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3846 }
3847
00d98ebd 3848 I915_WRITE16(HWSTAM, 0xeffe);
a266c7d5
CW
3849 for_each_pipe(pipe)
3850 I915_WRITE(PIPESTAT(pipe), 0);
3851 I915_WRITE(IMR, 0xffffffff);
3852 I915_WRITE(IER, 0x0);
3853 POSTING_READ(IER);
3854}
3855
3856static int i915_irq_postinstall(struct drm_device *dev)
3857{
2d1013dd 3858 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 3859 u32 enable_mask;
379ef82d 3860 unsigned long irqflags;
a266c7d5 3861
38bde180
CW
3862 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3863
3864 /* Unmask the interrupts that we always want on. */
3865 dev_priv->irq_mask =
3866 ~(I915_ASLE_INTERRUPT |
3867 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3868 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3869 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3870 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3871 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3872
3873 enable_mask =
3874 I915_ASLE_INTERRUPT |
3875 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3876 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3877 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3878 I915_USER_INTERRUPT;
3879
a266c7d5 3880 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
3881 I915_WRITE(PORT_HOTPLUG_EN, 0);
3882 POSTING_READ(PORT_HOTPLUG_EN);
3883
a266c7d5
CW
3884 /* Enable in IER... */
3885 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3886 /* and unmask in IMR */
3887 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3888 }
3889
a266c7d5
CW
3890 I915_WRITE(IMR, dev_priv->irq_mask);
3891 I915_WRITE(IER, enable_mask);
3892 POSTING_READ(IER);
3893
f49e38dd 3894 i915_enable_asle_pipestat(dev);
20afbda2 3895
379ef82d
DV
3896 /* Interrupt setup is already guaranteed to be single-threaded, this is
3897 * just to make the assert_spin_locked check happy. */
3898 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
755e9019
ID
3899 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3900 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
379ef82d
DV
3901 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3902
20afbda2
DV
3903 return 0;
3904}
3905
90a72f87
VS
3906/*
3907 * Returns true when a page flip has completed.
3908 */
3909static bool i915_handle_vblank(struct drm_device *dev,
3910 int plane, int pipe, u32 iir)
3911{
2d1013dd 3912 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
3913 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3914
8d7849db 3915 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3916 return false;
3917
3918 if ((iir & flip_pending) == 0)
3919 return false;
3920
3921 intel_prepare_page_flip(dev, plane);
3922
3923 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3924 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3925 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3926 * the flip is completed (no longer pending). Since this doesn't raise
3927 * an interrupt per se, we watch for the change at vblank.
3928 */
3929 if (I915_READ(ISR) & flip_pending)
3930 return false;
3931
3932 intel_finish_page_flip(dev, pipe);
3933
3934 return true;
3935}
3936
ff1f525e 3937static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3938{
45a83f84 3939 struct drm_device *dev = arg;
2d1013dd 3940 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 3941 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
a266c7d5 3942 unsigned long irqflags;
38bde180
CW
3943 u32 flip_mask =
3944 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3945 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 3946 int pipe, ret = IRQ_NONE;
a266c7d5 3947
a266c7d5 3948 iir = I915_READ(IIR);
38bde180
CW
3949 do {
3950 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 3951 bool blc_event = false;
a266c7d5
CW
3952
3953 /* Can't rely on pipestat interrupt bit in iir as it might
3954 * have been cleared after the pipestat interrupt was received.
3955 * It doesn't set the bit in iir again, but it still produces
3956 * interrupts (for non-MSI).
3957 */
3958 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3959 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
58174462
MK
3960 i915_handle_error(dev, false,
3961 "Command parser error, iir 0x%08x",
3962 iir);
a266c7d5
CW
3963
3964 for_each_pipe(pipe) {
3965 int reg = PIPESTAT(pipe);
3966 pipe_stats[pipe] = I915_READ(reg);
3967
38bde180 3968 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 3969 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 3970 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 3971 irq_received = true;
a266c7d5
CW
3972 }
3973 }
3974 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3975
3976 if (!irq_received)
3977 break;
3978
a266c7d5 3979 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
3980 if (I915_HAS_HOTPLUG(dev) &&
3981 iir & I915_DISPLAY_PORT_INTERRUPT)
3982 i9xx_hpd_irq_handler(dev);
a266c7d5 3983
38bde180 3984 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3985 new_iir = I915_READ(IIR); /* Flush posted writes */
3986
a266c7d5
CW
3987 if (iir & I915_USER_INTERRUPT)
3988 notify_ring(dev, &dev_priv->ring[RCS]);
a266c7d5 3989
a266c7d5 3990 for_each_pipe(pipe) {
38bde180 3991 int plane = pipe;
3a77c4c4 3992 if (HAS_FBC(dev))
38bde180 3993 plane = !plane;
90a72f87 3994
8291ee90 3995 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3996 i915_handle_vblank(dev, plane, pipe, iir))
3997 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
3998
3999 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4000 blc_event = true;
4356d586
DV
4001
4002 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4003 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b
VS
4004
4005 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4006 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b 4007 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
a266c7d5
CW
4008 }
4009
a266c7d5
CW
4010 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4011 intel_opregion_asle_intr(dev);
4012
4013 /* With MSI, interrupts are only generated when iir
4014 * transitions from zero to nonzero. If another bit got
4015 * set while we were handling the existing iir bits, then
4016 * we would never get another interrupt.
4017 *
4018 * This is fine on non-MSI as well, as if we hit this path
4019 * we avoid exiting the interrupt handler only to generate
4020 * another one.
4021 *
4022 * Note that for MSI this could cause a stray interrupt report
4023 * if an interrupt landed in the time between writing IIR and
4024 * the posting read. This should be rare enough to never
4025 * trigger the 99% of 100,000 interrupts test for disabling
4026 * stray interrupts.
4027 */
38bde180 4028 ret = IRQ_HANDLED;
a266c7d5 4029 iir = new_iir;
38bde180 4030 } while (iir & ~flip_mask);
a266c7d5 4031
d05c617e 4032 i915_update_dri1_breadcrumb(dev);
8291ee90 4033
a266c7d5
CW
4034 return ret;
4035}
4036
4037static void i915_irq_uninstall(struct drm_device * dev)
4038{
2d1013dd 4039 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4040 int pipe;
4041
3ca1cced 4042 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 4043
a266c7d5
CW
4044 if (I915_HAS_HOTPLUG(dev)) {
4045 I915_WRITE(PORT_HOTPLUG_EN, 0);
4046 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4047 }
4048
00d98ebd 4049 I915_WRITE16(HWSTAM, 0xffff);
55b39755
CW
4050 for_each_pipe(pipe) {
4051 /* Clear enable bits; then clear status bits */
a266c7d5 4052 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
4053 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4054 }
a266c7d5
CW
4055 I915_WRITE(IMR, 0xffffffff);
4056 I915_WRITE(IER, 0x0);
4057
a266c7d5
CW
4058 I915_WRITE(IIR, I915_READ(IIR));
4059}
4060
4061static void i965_irq_preinstall(struct drm_device * dev)
4062{
2d1013dd 4063 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4064 int pipe;
4065
adca4730
CW
4066 I915_WRITE(PORT_HOTPLUG_EN, 0);
4067 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4068
4069 I915_WRITE(HWSTAM, 0xeffe);
4070 for_each_pipe(pipe)
4071 I915_WRITE(PIPESTAT(pipe), 0);
4072 I915_WRITE(IMR, 0xffffffff);
4073 I915_WRITE(IER, 0x0);
4074 POSTING_READ(IER);
4075}
4076
4077static int i965_irq_postinstall(struct drm_device *dev)
4078{
2d1013dd 4079 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 4080 u32 enable_mask;
a266c7d5 4081 u32 error_mask;
b79480ba 4082 unsigned long irqflags;
a266c7d5 4083
a266c7d5 4084 /* Unmask the interrupts that we always want on. */
bbba0a97 4085 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4086 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4087 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4088 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4089 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4090 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4091 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4092
4093 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4094 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4095 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4096 enable_mask |= I915_USER_INTERRUPT;
4097
4098 if (IS_G4X(dev))
4099 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4100
b79480ba
DV
4101 /* Interrupt setup is already guaranteed to be single-threaded, this is
4102 * just to make the assert_spin_locked check happy. */
4103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
755e9019
ID
4104 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4105 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4106 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
b79480ba 4107 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
a266c7d5 4108
a266c7d5
CW
4109 /*
4110 * Enable some error detection, note the instruction error mask
4111 * bit is reserved, so we leave it masked.
4112 */
4113 if (IS_G4X(dev)) {
4114 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4115 GM45_ERROR_MEM_PRIV |
4116 GM45_ERROR_CP_PRIV |
4117 I915_ERROR_MEMORY_REFRESH);
4118 } else {
4119 error_mask = ~(I915_ERROR_PAGE_TABLE |
4120 I915_ERROR_MEMORY_REFRESH);
4121 }
4122 I915_WRITE(EMR, error_mask);
4123
4124 I915_WRITE(IMR, dev_priv->irq_mask);
4125 I915_WRITE(IER, enable_mask);
4126 POSTING_READ(IER);
4127
20afbda2
DV
4128 I915_WRITE(PORT_HOTPLUG_EN, 0);
4129 POSTING_READ(PORT_HOTPLUG_EN);
4130
f49e38dd 4131 i915_enable_asle_pipestat(dev);
20afbda2
DV
4132
4133 return 0;
4134}
4135
bac56d5b 4136static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 4137{
2d1013dd 4138 struct drm_i915_private *dev_priv = dev->dev_private;
e5868a31 4139 struct drm_mode_config *mode_config = &dev->mode_config;
cd569aed 4140 struct intel_encoder *intel_encoder;
20afbda2
DV
4141 u32 hotplug_en;
4142
b5ea2d56
DV
4143 assert_spin_locked(&dev_priv->irq_lock);
4144
bac56d5b
EE
4145 if (I915_HAS_HOTPLUG(dev)) {
4146 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4147 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4148 /* Note HDMI and DP share hotplug bits */
e5868a31 4149 /* enable bits are the same for all generations */
cd569aed
EE
4150 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
4151 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4152 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
bac56d5b
EE
4153 /* Programming the CRT detection parameters tends
4154 to generate a spurious hotplug event about three
4155 seconds later. So just do it once.
4156 */
4157 if (IS_G4X(dev))
4158 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
85fc95ba 4159 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
bac56d5b 4160 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
a266c7d5 4161
bac56d5b
EE
4162 /* Ignore TV since it's buggy */
4163 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4164 }
a266c7d5
CW
4165}
4166
ff1f525e 4167static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4168{
45a83f84 4169 struct drm_device *dev = arg;
2d1013dd 4170 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4171 u32 iir, new_iir;
4172 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4173 unsigned long irqflags;
a266c7d5 4174 int ret = IRQ_NONE, pipe;
21ad8330
VS
4175 u32 flip_mask =
4176 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4177 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4178
a266c7d5
CW
4179 iir = I915_READ(IIR);
4180
a266c7d5 4181 for (;;) {
501e01d7 4182 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4183 bool blc_event = false;
4184
a266c7d5
CW
4185 /* Can't rely on pipestat interrupt bit in iir as it might
4186 * have been cleared after the pipestat interrupt was received.
4187 * It doesn't set the bit in iir again, but it still produces
4188 * interrupts (for non-MSI).
4189 */
4190 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4191 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
58174462
MK
4192 i915_handle_error(dev, false,
4193 "Command parser error, iir 0x%08x",
4194 iir);
a266c7d5
CW
4195
4196 for_each_pipe(pipe) {
4197 int reg = PIPESTAT(pipe);
4198 pipe_stats[pipe] = I915_READ(reg);
4199
4200 /*
4201 * Clear the PIPE*STAT regs before the IIR
4202 */
4203 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4204 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4205 irq_received = true;
a266c7d5
CW
4206 }
4207 }
4208 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4209
4210 if (!irq_received)
4211 break;
4212
4213 ret = IRQ_HANDLED;
4214
4215 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4216 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4217 i9xx_hpd_irq_handler(dev);
a266c7d5 4218
21ad8330 4219 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4220 new_iir = I915_READ(IIR); /* Flush posted writes */
4221
a266c7d5
CW
4222 if (iir & I915_USER_INTERRUPT)
4223 notify_ring(dev, &dev_priv->ring[RCS]);
4224 if (iir & I915_BSD_USER_INTERRUPT)
4225 notify_ring(dev, &dev_priv->ring[VCS]);
4226
a266c7d5 4227 for_each_pipe(pipe) {
2c8ba29f 4228 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4229 i915_handle_vblank(dev, pipe, pipe, iir))
4230 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4231
4232 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4233 blc_event = true;
4356d586
DV
4234
4235 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4236 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4237
2d9d2b0b
VS
4238 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4239 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
fc2c807b 4240 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2d9d2b0b 4241 }
a266c7d5
CW
4242
4243 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4244 intel_opregion_asle_intr(dev);
4245
515ac2bb
DV
4246 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4247 gmbus_irq_handler(dev);
4248
a266c7d5
CW
4249 /* With MSI, interrupts are only generated when iir
4250 * transitions from zero to nonzero. If another bit got
4251 * set while we were handling the existing iir bits, then
4252 * we would never get another interrupt.
4253 *
4254 * This is fine on non-MSI as well, as if we hit this path
4255 * we avoid exiting the interrupt handler only to generate
4256 * another one.
4257 *
4258 * Note that for MSI this could cause a stray interrupt report
4259 * if an interrupt landed in the time between writing IIR and
4260 * the posting read. This should be rare enough to never
4261 * trigger the 99% of 100,000 interrupts test for disabling
4262 * stray interrupts.
4263 */
4264 iir = new_iir;
4265 }
4266
d05c617e 4267 i915_update_dri1_breadcrumb(dev);
2c8ba29f 4268
a266c7d5
CW
4269 return ret;
4270}
4271
4272static void i965_irq_uninstall(struct drm_device * dev)
4273{
2d1013dd 4274 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4275 int pipe;
4276
4277 if (!dev_priv)
4278 return;
4279
3ca1cced 4280 intel_hpd_irq_uninstall(dev_priv);
ac4c16c5 4281
adca4730
CW
4282 I915_WRITE(PORT_HOTPLUG_EN, 0);
4283 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4284
4285 I915_WRITE(HWSTAM, 0xffffffff);
4286 for_each_pipe(pipe)
4287 I915_WRITE(PIPESTAT(pipe), 0);
4288 I915_WRITE(IMR, 0xffffffff);
4289 I915_WRITE(IER, 0x0);
4290
4291 for_each_pipe(pipe)
4292 I915_WRITE(PIPESTAT(pipe),
4293 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4294 I915_WRITE(IIR, I915_READ(IIR));
4295}
4296
3ca1cced 4297static void intel_hpd_irq_reenable(unsigned long data)
ac4c16c5 4298{
2d1013dd 4299 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
ac4c16c5
EE
4300 struct drm_device *dev = dev_priv->dev;
4301 struct drm_mode_config *mode_config = &dev->mode_config;
4302 unsigned long irqflags;
4303 int i;
4304
4305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4306 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4307 struct drm_connector *connector;
4308
4309 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4310 continue;
4311
4312 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4313
4314 list_for_each_entry(connector, &mode_config->connector_list, head) {
4315 struct intel_connector *intel_connector = to_intel_connector(connector);
4316
4317 if (intel_connector->encoder->hpd_pin == i) {
4318 if (connector->polled != intel_connector->polled)
4319 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
c23cc417 4320 connector->name);
ac4c16c5
EE
4321 connector->polled = intel_connector->polled;
4322 if (!connector->polled)
4323 connector->polled = DRM_CONNECTOR_POLL_HPD;
4324 }
4325 }
4326 }
4327 if (dev_priv->display.hpd_irq_setup)
4328 dev_priv->display.hpd_irq_setup(dev);
4329 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4330}
4331
f71d4af4
JB
4332void intel_irq_init(struct drm_device *dev)
4333{
8b2e326d
CW
4334 struct drm_i915_private *dev_priv = dev->dev_private;
4335
4336 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
99584db3 4337 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
c6a828d3 4338 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4339 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4340
a6706b45
D
4341 /* Let's track the enabled rps events */
4342 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4343
99584db3
DV
4344 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4345 i915_hangcheck_elapsed,
61bac78e 4346 (unsigned long) dev);
3ca1cced 4347 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
ac4c16c5 4348 (unsigned long) dev_priv);
61bac78e 4349
97a19a24 4350 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4351
4cdb83ec
VS
4352 if (IS_GEN2(dev)) {
4353 dev->max_vblank_count = 0;
4354 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4355 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
f71d4af4
JB
4356 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4357 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
391f75e2
VS
4358 } else {
4359 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4360 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4361 }
4362
c2baf4b7 4363 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
c3613de9 4364 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
c2baf4b7
VS
4365 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4366 }
f71d4af4 4367
43f328d7
VS
4368 if (IS_CHERRYVIEW(dev)) {
4369 dev->driver->irq_handler = cherryview_irq_handler;
4370 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4371 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4372 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4373 dev->driver->enable_vblank = valleyview_enable_vblank;
4374 dev->driver->disable_vblank = valleyview_disable_vblank;
4375 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4376 } else if (IS_VALLEYVIEW(dev)) {
7e231dbe
JB
4377 dev->driver->irq_handler = valleyview_irq_handler;
4378 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4379 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4380 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4381 dev->driver->enable_vblank = valleyview_enable_vblank;
4382 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4383 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
abd58f01
BW
4384 } else if (IS_GEN8(dev)) {
4385 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4386 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4387 dev->driver->irq_postinstall = gen8_irq_postinstall;
4388 dev->driver->irq_uninstall = gen8_irq_uninstall;
4389 dev->driver->enable_vblank = gen8_enable_vblank;
4390 dev->driver->disable_vblank = gen8_disable_vblank;
4391 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4
JB
4392 } else if (HAS_PCH_SPLIT(dev)) {
4393 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4394 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4395 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4396 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4397 dev->driver->enable_vblank = ironlake_enable_vblank;
4398 dev->driver->disable_vblank = ironlake_disable_vblank;
82a28bcf 4399 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4 4400 } else {
c2798b19
CW
4401 if (INTEL_INFO(dev)->gen == 2) {
4402 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4403 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4404 dev->driver->irq_handler = i8xx_irq_handler;
4405 dev->driver->irq_uninstall = i8xx_irq_uninstall;
a266c7d5
CW
4406 } else if (INTEL_INFO(dev)->gen == 3) {
4407 dev->driver->irq_preinstall = i915_irq_preinstall;
4408 dev->driver->irq_postinstall = i915_irq_postinstall;
4409 dev->driver->irq_uninstall = i915_irq_uninstall;
4410 dev->driver->irq_handler = i915_irq_handler;
20afbda2 4411 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 4412 } else {
a266c7d5
CW
4413 dev->driver->irq_preinstall = i965_irq_preinstall;
4414 dev->driver->irq_postinstall = i965_irq_postinstall;
4415 dev->driver->irq_uninstall = i965_irq_uninstall;
4416 dev->driver->irq_handler = i965_irq_handler;
bac56d5b 4417 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 4418 }
f71d4af4
JB
4419 dev->driver->enable_vblank = i915_enable_vblank;
4420 dev->driver->disable_vblank = i915_disable_vblank;
4421 }
4422}
20afbda2
DV
4423
4424void intel_hpd_init(struct drm_device *dev)
4425{
4426 struct drm_i915_private *dev_priv = dev->dev_private;
821450c6
EE
4427 struct drm_mode_config *mode_config = &dev->mode_config;
4428 struct drm_connector *connector;
b5ea2d56 4429 unsigned long irqflags;
821450c6 4430 int i;
20afbda2 4431
821450c6
EE
4432 for (i = 1; i < HPD_NUM_PINS; i++) {
4433 dev_priv->hpd_stats[i].hpd_cnt = 0;
4434 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4435 }
4436 list_for_each_entry(connector, &mode_config->connector_list, head) {
4437 struct intel_connector *intel_connector = to_intel_connector(connector);
4438 connector->polled = intel_connector->polled;
4439 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4440 connector->polled = DRM_CONNECTOR_POLL_HPD;
4441 }
b5ea2d56
DV
4442
4443 /* Interrupt setup is already guaranteed to be single-threaded, this is
4444 * just to make the assert_spin_locked checks happy. */
4445 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
20afbda2
DV
4446 if (dev_priv->display.hpd_irq_setup)
4447 dev_priv->display.hpd_irq_setup(dev);
b5ea2d56 4448 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
20afbda2 4449}
c67a470b 4450
5d584b2e 4451/* Disable interrupts so we can allow runtime PM. */
730488b2 4452void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
c67a470b
PZ
4453{
4454 struct drm_i915_private *dev_priv = dev->dev_private;
c67a470b 4455
730488b2 4456 dev->driver->irq_uninstall(dev);
5d584b2e 4457 dev_priv->pm.irqs_disabled = true;
c67a470b
PZ
4458}
4459
5d584b2e 4460/* Restore interrupts so we can recover from runtime PM. */
730488b2 4461void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
c67a470b
PZ
4462{
4463 struct drm_i915_private *dev_priv = dev->dev_private;
c67a470b 4464
5d584b2e 4465 dev_priv->pm.irqs_disabled = false;
730488b2
PZ
4466 dev->driver->irq_preinstall(dev);
4467 dev->driver->irq_postinstall(dev);
c67a470b 4468}