]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Introduce spt_irq_handler()
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
7c7e10db 48static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54};
55
7c7e10db 56static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62};
63
26951caf
XZ
64static const u32 hpd_spt[HPD_NUM_PINS] = {
65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
68 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
69};
70
7c7e10db 71static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
72 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
73 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
74 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
75 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
76 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
77 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
78};
79
7c7e10db 80static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
81 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
82 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
83 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
84 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
85 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
86 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
87};
88
4bca26d0 89static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
90 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
91 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
92 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
93 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
94 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
95 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
96};
97
e0a20ad7
SS
98/* BXT hpd list */
99static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 100 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7
SS
101 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
102 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
103};
104
5c502442 105/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 106#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
107 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
108 POSTING_READ(GEN8_##type##_IMR(which)); \
109 I915_WRITE(GEN8_##type##_IER(which), 0); \
110 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
111 POSTING_READ(GEN8_##type##_IIR(which)); \
112 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
113 POSTING_READ(GEN8_##type##_IIR(which)); \
114} while (0)
115
f86f3fb0 116#define GEN5_IRQ_RESET(type) do { \
a9d356a6 117 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 118 POSTING_READ(type##IMR); \
a9d356a6 119 I915_WRITE(type##IER, 0); \
5c502442
PZ
120 I915_WRITE(type##IIR, 0xffffffff); \
121 POSTING_READ(type##IIR); \
122 I915_WRITE(type##IIR, 0xffffffff); \
123 POSTING_READ(type##IIR); \
a9d356a6
PZ
124} while (0)
125
337ba017
PZ
126/*
127 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
128 */
129#define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
130 u32 val = I915_READ(reg); \
131 if (val) { \
132 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
133 (reg), val); \
134 I915_WRITE((reg), 0xffffffff); \
135 POSTING_READ(reg); \
136 I915_WRITE((reg), 0xffffffff); \
137 POSTING_READ(reg); \
138 } \
139} while (0)
140
35079899 141#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
337ba017 142 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
35079899 143 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
144 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
145 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
146} while (0)
147
148#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
337ba017 149 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
35079899 150 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
151 I915_WRITE(type##IMR, (imr_val)); \
152 POSTING_READ(type##IMR); \
35079899
PZ
153} while (0)
154
c9a9a268
ID
155static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
156
d9dc34f1
VS
157/**
158 * ilk_update_display_irq - update DEIMR
159 * @dev_priv: driver private
160 * @interrupt_mask: mask of interrupt bits to update
161 * @enabled_irq_mask: mask of interrupt bits to enable
162 */
163static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
164 uint32_t interrupt_mask,
165 uint32_t enabled_irq_mask)
036a4a7d 166{
d9dc34f1
VS
167 uint32_t new_val;
168
4bc9d430
DV
169 assert_spin_locked(&dev_priv->irq_lock);
170
d9dc34f1
VS
171 WARN_ON(enabled_irq_mask & ~interrupt_mask);
172
9df7575f 173 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 174 return;
c67a470b 175
d9dc34f1
VS
176 new_val = dev_priv->irq_mask;
177 new_val &= ~interrupt_mask;
178 new_val |= (~enabled_irq_mask & interrupt_mask);
179
180 if (new_val != dev_priv->irq_mask) {
181 dev_priv->irq_mask = new_val;
1ec14ad3 182 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 183 POSTING_READ(DEIMR);
036a4a7d
ZW
184 }
185}
186
47339cd9 187void
d9dc34f1 188ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
036a4a7d 189{
d9dc34f1
VS
190 ilk_update_display_irq(dev_priv, mask, mask);
191}
c67a470b 192
d9dc34f1
VS
193void
194ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
195{
196 ilk_update_display_irq(dev_priv, mask, 0);
036a4a7d
ZW
197}
198
43eaea13
PZ
199/**
200 * ilk_update_gt_irq - update GTIMR
201 * @dev_priv: driver private
202 * @interrupt_mask: mask of interrupt bits to update
203 * @enabled_irq_mask: mask of interrupt bits to enable
204 */
205static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
206 uint32_t interrupt_mask,
207 uint32_t enabled_irq_mask)
208{
209 assert_spin_locked(&dev_priv->irq_lock);
210
15a17aae
DV
211 WARN_ON(enabled_irq_mask & ~interrupt_mask);
212
9df7575f 213 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 214 return;
c67a470b 215
43eaea13
PZ
216 dev_priv->gt_irq_mask &= ~interrupt_mask;
217 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
218 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
219 POSTING_READ(GTIMR);
220}
221
480c8033 222void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
223{
224 ilk_update_gt_irq(dev_priv, mask, mask);
225}
226
480c8033 227void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
228{
229 ilk_update_gt_irq(dev_priv, mask, 0);
230}
231
b900b949
ID
232static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
233{
234 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
235}
236
a72fbc3a
ID
237static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
238{
239 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
240}
241
b900b949
ID
242static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
243{
244 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
245}
246
edbfdb45
PZ
247/**
248 * snb_update_pm_irq - update GEN6_PMIMR
249 * @dev_priv: driver private
250 * @interrupt_mask: mask of interrupt bits to update
251 * @enabled_irq_mask: mask of interrupt bits to enable
252 */
253static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
254 uint32_t interrupt_mask,
255 uint32_t enabled_irq_mask)
256{
605cd25b 257 uint32_t new_val;
edbfdb45 258
15a17aae
DV
259 WARN_ON(enabled_irq_mask & ~interrupt_mask);
260
edbfdb45
PZ
261 assert_spin_locked(&dev_priv->irq_lock);
262
605cd25b 263 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
264 new_val &= ~interrupt_mask;
265 new_val |= (~enabled_irq_mask & interrupt_mask);
266
605cd25b
PZ
267 if (new_val != dev_priv->pm_irq_mask) {
268 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
269 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
270 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 271 }
edbfdb45
PZ
272}
273
480c8033 274void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 275{
9939fba2
ID
276 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
277 return;
278
edbfdb45
PZ
279 snb_update_pm_irq(dev_priv, mask, mask);
280}
281
9939fba2
ID
282static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
283 uint32_t mask)
edbfdb45
PZ
284{
285 snb_update_pm_irq(dev_priv, mask, 0);
286}
287
9939fba2
ID
288void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
289{
290 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
291 return;
292
293 __gen6_disable_pm_irq(dev_priv, mask);
294}
295
3cc134e3
ID
296void gen6_reset_rps_interrupts(struct drm_device *dev)
297{
298 struct drm_i915_private *dev_priv = dev->dev_private;
299 uint32_t reg = gen6_pm_iir(dev_priv);
300
301 spin_lock_irq(&dev_priv->irq_lock);
302 I915_WRITE(reg, dev_priv->pm_rps_events);
303 I915_WRITE(reg, dev_priv->pm_rps_events);
304 POSTING_READ(reg);
096fad9e 305 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
306 spin_unlock_irq(&dev_priv->irq_lock);
307}
308
b900b949
ID
309void gen6_enable_rps_interrupts(struct drm_device *dev)
310{
311 struct drm_i915_private *dev_priv = dev->dev_private;
312
313 spin_lock_irq(&dev_priv->irq_lock);
78e68d36 314
b900b949 315 WARN_ON(dev_priv->rps.pm_iir);
3cc134e3 316 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 317 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
318 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
319 dev_priv->pm_rps_events);
b900b949 320 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 321
b900b949
ID
322 spin_unlock_irq(&dev_priv->irq_lock);
323}
324
59d02a1f
ID
325u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
326{
327 /*
f24eeb19 328 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
59d02a1f 329 * if GEN6_PM_UP_EI_EXPIRED is masked.
f24eeb19
ID
330 *
331 * TODO: verify if this can be reproduced on VLV,CHV.
59d02a1f
ID
332 */
333 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
334 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
335
336 if (INTEL_INFO(dev_priv)->gen >= 8)
337 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
338
339 return mask;
340}
341
b900b949
ID
342void gen6_disable_rps_interrupts(struct drm_device *dev)
343{
344 struct drm_i915_private *dev_priv = dev->dev_private;
345
d4d70aa5
ID
346 spin_lock_irq(&dev_priv->irq_lock);
347 dev_priv->rps.interrupts_enabled = false;
348 spin_unlock_irq(&dev_priv->irq_lock);
349
350 cancel_work_sync(&dev_priv->rps.work);
351
9939fba2
ID
352 spin_lock_irq(&dev_priv->irq_lock);
353
59d02a1f 354 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
355
356 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
358 ~dev_priv->pm_rps_events);
58072ccb
ID
359
360 spin_unlock_irq(&dev_priv->irq_lock);
361
362 synchronize_irq(dev->irq);
b900b949
ID
363}
364
fee884ed
DV
365/**
366 * ibx_display_interrupt_update - update SDEIMR
367 * @dev_priv: driver private
368 * @interrupt_mask: mask of interrupt bits to update
369 * @enabled_irq_mask: mask of interrupt bits to enable
370 */
47339cd9
DV
371void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
372 uint32_t interrupt_mask,
373 uint32_t enabled_irq_mask)
fee884ed
DV
374{
375 uint32_t sdeimr = I915_READ(SDEIMR);
376 sdeimr &= ~interrupt_mask;
377 sdeimr |= (~enabled_irq_mask & interrupt_mask);
378
15a17aae
DV
379 WARN_ON(enabled_irq_mask & ~interrupt_mask);
380
fee884ed
DV
381 assert_spin_locked(&dev_priv->irq_lock);
382
9df7575f 383 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 384 return;
c67a470b 385
fee884ed
DV
386 I915_WRITE(SDEIMR, sdeimr);
387 POSTING_READ(SDEIMR);
388}
8664281b 389
b5ea642a 390static void
755e9019
ID
391__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
392 u32 enable_mask, u32 status_mask)
7c463586 393{
46c06a30 394 u32 reg = PIPESTAT(pipe);
755e9019 395 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 396
b79480ba 397 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 398 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 399
04feced9
VS
400 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
401 status_mask & ~PIPESTAT_INT_STATUS_MASK,
402 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
403 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
404 return;
405
406 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
407 return;
408
91d181dd
ID
409 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
410
46c06a30 411 /* Enable the interrupt, clear any pending status */
755e9019 412 pipestat |= enable_mask | status_mask;
46c06a30
VS
413 I915_WRITE(reg, pipestat);
414 POSTING_READ(reg);
7c463586
KP
415}
416
b5ea642a 417static void
755e9019
ID
418__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
419 u32 enable_mask, u32 status_mask)
7c463586 420{
46c06a30 421 u32 reg = PIPESTAT(pipe);
755e9019 422 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 423
b79480ba 424 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 425 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 426
04feced9
VS
427 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
428 status_mask & ~PIPESTAT_INT_STATUS_MASK,
429 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
430 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
431 return;
432
755e9019
ID
433 if ((pipestat & enable_mask) == 0)
434 return;
435
91d181dd
ID
436 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
437
755e9019 438 pipestat &= ~enable_mask;
46c06a30
VS
439 I915_WRITE(reg, pipestat);
440 POSTING_READ(reg);
7c463586
KP
441}
442
10c59c51
ID
443static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
444{
445 u32 enable_mask = status_mask << 16;
446
447 /*
724a6905
VS
448 * On pipe A we don't support the PSR interrupt yet,
449 * on pipe B and C the same bit MBZ.
10c59c51
ID
450 */
451 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
452 return 0;
724a6905
VS
453 /*
454 * On pipe B and C we don't support the PSR interrupt yet, on pipe
455 * A the same bit is for perf counters which we don't use either.
456 */
457 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
458 return 0;
10c59c51
ID
459
460 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
461 SPRITE0_FLIP_DONE_INT_EN_VLV |
462 SPRITE1_FLIP_DONE_INT_EN_VLV);
463 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
464 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
465 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
466 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
467
468 return enable_mask;
469}
470
755e9019
ID
471void
472i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
473 u32 status_mask)
474{
475 u32 enable_mask;
476
10c59c51
ID
477 if (IS_VALLEYVIEW(dev_priv->dev))
478 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
479 status_mask);
480 else
481 enable_mask = status_mask << 16;
755e9019
ID
482 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
483}
484
485void
486i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
487 u32 status_mask)
488{
489 u32 enable_mask;
490
10c59c51
ID
491 if (IS_VALLEYVIEW(dev_priv->dev))
492 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
493 status_mask);
494 else
495 enable_mask = status_mask << 16;
755e9019
ID
496 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
497}
498
01c66889 499/**
f49e38dd 500 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 501 */
f49e38dd 502static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 503{
2d1013dd 504 struct drm_i915_private *dev_priv = dev->dev_private;
1ec14ad3 505
f49e38dd
JN
506 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
507 return;
508
13321786 509 spin_lock_irq(&dev_priv->irq_lock);
01c66889 510
755e9019 511 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
f898780b 512 if (INTEL_INFO(dev)->gen >= 4)
3b6c42e8 513 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 514 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 515
13321786 516 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
517}
518
f75f3746
VS
519/*
520 * This timing diagram depicts the video signal in and
521 * around the vertical blanking period.
522 *
523 * Assumptions about the fictitious mode used in this example:
524 * vblank_start >= 3
525 * vsync_start = vblank_start + 1
526 * vsync_end = vblank_start + 2
527 * vtotal = vblank_start + 3
528 *
529 * start of vblank:
530 * latch double buffered registers
531 * increment frame counter (ctg+)
532 * generate start of vblank interrupt (gen4+)
533 * |
534 * | frame start:
535 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
536 * | may be shifted forward 1-3 extra lines via PIPECONF
537 * | |
538 * | | start of vsync:
539 * | | generate vsync interrupt
540 * | | |
541 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
542 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
543 * ----va---> <-----------------vb--------------------> <--------va-------------
544 * | | <----vs-----> |
545 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
546 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
547 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
548 * | | |
549 * last visible pixel first visible pixel
550 * | increment frame counter (gen3/4)
551 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
552 *
553 * x = horizontal active
554 * _ = horizontal blanking
555 * hs = horizontal sync
556 * va = vertical active
557 * vb = vertical blanking
558 * vs = vertical sync
559 * vbs = vblank_start (number)
560 *
561 * Summary:
562 * - most events happen at the start of horizontal sync
563 * - frame start happens at the start of horizontal blank, 1-4 lines
564 * (depending on PIPECONF settings) after the start of vblank
565 * - gen3/4 pixel and frame counter are synchronized with the start
566 * of horizontal active on the first line of vertical active
567 */
568
4cdb83ec
VS
569static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
570{
571 /* Gen2 doesn't have a hardware frame counter */
572 return 0;
573}
574
42f52ef8
KP
575/* Called from drm generic code, passed a 'crtc', which
576 * we use as a pipe index
577 */
f71d4af4 578static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4 579{
2d1013dd 580 struct drm_i915_private *dev_priv = dev->dev_private;
0a3e67a4
JB
581 unsigned long high_frame;
582 unsigned long low_frame;
0b2a8e09 583 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
584 struct intel_crtc *intel_crtc =
585 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 586 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 587
f3a5c3f6
DV
588 htotal = mode->crtc_htotal;
589 hsync_start = mode->crtc_hsync_start;
590 vbl_start = mode->crtc_vblank_start;
591 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
592 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 593
0b2a8e09
VS
594 /* Convert to pixel count */
595 vbl_start *= htotal;
596
597 /* Start of vblank event occurs at start of hsync */
598 vbl_start -= htotal - hsync_start;
599
9db4a9c7
JB
600 high_frame = PIPEFRAME(pipe);
601 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 602
0a3e67a4
JB
603 /*
604 * High & low register fields aren't synchronized, so make sure
605 * we get a low value that's stable across two reads of the high
606 * register.
607 */
608 do {
5eddb70b 609 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 610 low = I915_READ(low_frame);
5eddb70b 611 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
612 } while (high1 != high2);
613
5eddb70b 614 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 615 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 616 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
617
618 /*
619 * The frame counter increments at beginning of active.
620 * Cook up a vblank counter by also checking the pixel
621 * counter against vblank start.
622 */
edc08d0a 623 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
624}
625
f71d4af4 626static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5 627{
2d1013dd 628 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 629 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5 630
9880b7a5
JB
631 return I915_READ(reg);
632}
633
ad3543ed
MK
634/* raw reads, only for fast reads of display block, no need for forcewake etc. */
635#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
ad3543ed 636
a225f079
VS
637static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
638{
639 struct drm_device *dev = crtc->base.dev;
640 struct drm_i915_private *dev_priv = dev->dev_private;
fc467a22 641 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 642 enum pipe pipe = crtc->pipe;
80715b2f 643 int position, vtotal;
a225f079 644
80715b2f 645 vtotal = mode->crtc_vtotal;
a225f079
VS
646 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
647 vtotal /= 2;
648
649 if (IS_GEN2(dev))
650 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
651 else
652 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
653
654 /*
80715b2f
VS
655 * See update_scanline_offset() for the details on the
656 * scanline_offset adjustment.
a225f079 657 */
80715b2f 658 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
659}
660
f71d4af4 661static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
abca9e45
VS
662 unsigned int flags, int *vpos, int *hpos,
663 ktime_t *stime, ktime_t *etime)
0af7e4df 664{
c2baf4b7
VS
665 struct drm_i915_private *dev_priv = dev->dev_private;
666 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
fc467a22 668 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
3aa18df8 669 int position;
78e8fc6b 670 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
671 bool in_vbl = true;
672 int ret = 0;
ad3543ed 673 unsigned long irqflags;
0af7e4df 674
fc467a22 675 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 676 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 677 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
678 return 0;
679 }
680
c2baf4b7 681 htotal = mode->crtc_htotal;
78e8fc6b 682 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
683 vtotal = mode->crtc_vtotal;
684 vbl_start = mode->crtc_vblank_start;
685 vbl_end = mode->crtc_vblank_end;
0af7e4df 686
d31faf65
VS
687 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
688 vbl_start = DIV_ROUND_UP(vbl_start, 2);
689 vbl_end /= 2;
690 vtotal /= 2;
691 }
692
c2baf4b7
VS
693 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
694
ad3543ed
MK
695 /*
696 * Lock uncore.lock, as we will do multiple timing critical raw
697 * register reads, potentially with preemption disabled, so the
698 * following code must not block on uncore.lock.
699 */
700 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 701
ad3543ed
MK
702 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
703
704 /* Get optional system timestamp before query. */
705 if (stime)
706 *stime = ktime_get();
707
7c06b08a 708 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
0af7e4df
MK
709 /* No obvious pixelcount register. Only query vertical
710 * scanout position from Display scan line register.
711 */
a225f079 712 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
713 } else {
714 /* Have access to pixelcount since start of frame.
715 * We can split this into vertical and horizontal
716 * scanout position.
717 */
ad3543ed 718 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 719
3aa18df8
VS
720 /* convert to pixel counts */
721 vbl_start *= htotal;
722 vbl_end *= htotal;
723 vtotal *= htotal;
78e8fc6b 724
7e78f1cb
VS
725 /*
726 * In interlaced modes, the pixel counter counts all pixels,
727 * so one field will have htotal more pixels. In order to avoid
728 * the reported position from jumping backwards when the pixel
729 * counter is beyond the length of the shorter field, just
730 * clamp the position the length of the shorter field. This
731 * matches how the scanline counter based position works since
732 * the scanline counter doesn't count the two half lines.
733 */
734 if (position >= vtotal)
735 position = vtotal - 1;
736
78e8fc6b
VS
737 /*
738 * Start of vblank interrupt is triggered at start of hsync,
739 * just prior to the first active line of vblank. However we
740 * consider lines to start at the leading edge of horizontal
741 * active. So, should we get here before we've crossed into
742 * the horizontal active of the first line in vblank, we would
743 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
744 * always add htotal-hsync_start to the current pixel position.
745 */
746 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
747 }
748
ad3543ed
MK
749 /* Get optional system timestamp after query. */
750 if (etime)
751 *etime = ktime_get();
752
753 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
754
755 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
756
3aa18df8
VS
757 in_vbl = position >= vbl_start && position < vbl_end;
758
759 /*
760 * While in vblank, position will be negative
761 * counting up towards 0 at vbl_end. And outside
762 * vblank, position will be positive counting
763 * up since vbl_end.
764 */
765 if (position >= vbl_start)
766 position -= vbl_end;
767 else
768 position += vtotal - vbl_end;
0af7e4df 769
7c06b08a 770 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3aa18df8
VS
771 *vpos = position;
772 *hpos = 0;
773 } else {
774 *vpos = position / htotal;
775 *hpos = position - (*vpos * htotal);
776 }
0af7e4df 777
0af7e4df
MK
778 /* In vblank? */
779 if (in_vbl)
3d3cbd84 780 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
781
782 return ret;
783}
784
a225f079
VS
785int intel_get_crtc_scanline(struct intel_crtc *crtc)
786{
787 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
788 unsigned long irqflags;
789 int position;
790
791 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
792 position = __intel_get_crtc_scanline(crtc);
793 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
794
795 return position;
796}
797
f71d4af4 798static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
799 int *max_error,
800 struct timeval *vblank_time,
801 unsigned flags)
802{
4041b853 803 struct drm_crtc *crtc;
0af7e4df 804
7eb552ae 805 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 806 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
807 return -EINVAL;
808 }
809
810 /* Get drm_crtc to timestamp: */
4041b853
CW
811 crtc = intel_get_crtc_for_pipe(dev, pipe);
812 if (crtc == NULL) {
813 DRM_ERROR("Invalid crtc %d\n", pipe);
814 return -EINVAL;
815 }
816
fc467a22 817 if (!crtc->hwmode.crtc_clock) {
4041b853
CW
818 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
819 return -EBUSY;
820 }
0af7e4df
MK
821
822 /* Helper routine in DRM core does all the work: */
4041b853
CW
823 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
824 vblank_time, flags,
7da903ef 825 crtc,
fc467a22 826 &crtc->hwmode);
0af7e4df
MK
827}
828
d0ecd7e2 829static void ironlake_rps_change_irq_handler(struct drm_device *dev)
f97108d1 830{
2d1013dd 831 struct drm_i915_private *dev_priv = dev->dev_private;
b5b72e89 832 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 833 u8 new_delay;
9270388e 834
d0ecd7e2 835 spin_lock(&mchdev_lock);
f97108d1 836
73edd18f
DV
837 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
838
20e4d407 839 new_delay = dev_priv->ips.cur_delay;
9270388e 840
7648fa99 841 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
842 busy_up = I915_READ(RCPREVBSYTUPAVG);
843 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
844 max_avg = I915_READ(RCBMAXAVG);
845 min_avg = I915_READ(RCBMINAVG);
846
847 /* Handle RCS change request from hw */
b5b72e89 848 if (busy_up > max_avg) {
20e4d407
DV
849 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
850 new_delay = dev_priv->ips.cur_delay - 1;
851 if (new_delay < dev_priv->ips.max_delay)
852 new_delay = dev_priv->ips.max_delay;
b5b72e89 853 } else if (busy_down < min_avg) {
20e4d407
DV
854 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
855 new_delay = dev_priv->ips.cur_delay + 1;
856 if (new_delay > dev_priv->ips.min_delay)
857 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
858 }
859
7648fa99 860 if (ironlake_set_drps(dev, new_delay))
20e4d407 861 dev_priv->ips.cur_delay = new_delay;
f97108d1 862
d0ecd7e2 863 spin_unlock(&mchdev_lock);
9270388e 864
f97108d1
JB
865 return;
866}
867
74cdb337 868static void notify_ring(struct intel_engine_cs *ring)
549f7365 869{
93b0a4e0 870 if (!intel_ring_initialized(ring))
475553de
CW
871 return;
872
bcfcc8ba 873 trace_i915_gem_request_notify(ring);
9862e600 874
549f7365 875 wake_up_all(&ring->irq_queue);
549f7365
CW
876}
877
43cf3bf0
CW
878static void vlv_c0_read(struct drm_i915_private *dev_priv,
879 struct intel_rps_ei *ei)
31685c25 880{
43cf3bf0
CW
881 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
882 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
883 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
884}
31685c25 885
43cf3bf0
CW
886static bool vlv_c0_above(struct drm_i915_private *dev_priv,
887 const struct intel_rps_ei *old,
888 const struct intel_rps_ei *now,
889 int threshold)
890{
891 u64 time, c0;
31685c25 892
43cf3bf0
CW
893 if (old->cz_clock == 0)
894 return false;
31685c25 895
43cf3bf0
CW
896 time = now->cz_clock - old->cz_clock;
897 time *= threshold * dev_priv->mem_freq;
31685c25 898
43cf3bf0
CW
899 /* Workload can be split between render + media, e.g. SwapBuffers
900 * being blitted in X after being rendered in mesa. To account for
901 * this we need to combine both engines into our activity counter.
31685c25 902 */
43cf3bf0
CW
903 c0 = now->render_c0 - old->render_c0;
904 c0 += now->media_c0 - old->media_c0;
905 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
31685c25 906
43cf3bf0 907 return c0 >= time;
31685c25
D
908}
909
43cf3bf0 910void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 911{
43cf3bf0
CW
912 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
913 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 914}
31685c25 915
43cf3bf0
CW
916static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
917{
918 struct intel_rps_ei now;
919 u32 events = 0;
31685c25 920
6f4b12f8 921 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 922 return 0;
31685c25 923
43cf3bf0
CW
924 vlv_c0_read(dev_priv, &now);
925 if (now.cz_clock == 0)
926 return 0;
31685c25 927
43cf3bf0
CW
928 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
929 if (!vlv_c0_above(dev_priv,
930 &dev_priv->rps.down_ei, &now,
8fb55197 931 dev_priv->rps.down_threshold))
43cf3bf0
CW
932 events |= GEN6_PM_RP_DOWN_THRESHOLD;
933 dev_priv->rps.down_ei = now;
934 }
31685c25 935
43cf3bf0
CW
936 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
937 if (vlv_c0_above(dev_priv,
938 &dev_priv->rps.up_ei, &now,
8fb55197 939 dev_priv->rps.up_threshold))
43cf3bf0
CW
940 events |= GEN6_PM_RP_UP_THRESHOLD;
941 dev_priv->rps.up_ei = now;
31685c25
D
942 }
943
43cf3bf0 944 return events;
31685c25
D
945}
946
f5a4c67d
CW
947static bool any_waiters(struct drm_i915_private *dev_priv)
948{
949 struct intel_engine_cs *ring;
950 int i;
951
952 for_each_ring(ring, dev_priv, i)
953 if (ring->irq_refcount)
954 return true;
955
956 return false;
957}
958
4912d041 959static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 960{
2d1013dd
JN
961 struct drm_i915_private *dev_priv =
962 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
963 bool client_boost;
964 int new_delay, adj, min, max;
edbfdb45 965 u32 pm_iir;
4912d041 966
59cdb63d 967 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
968 /* Speed up work cancelation during disabling rps interrupts. */
969 if (!dev_priv->rps.interrupts_enabled) {
970 spin_unlock_irq(&dev_priv->irq_lock);
971 return;
972 }
c6a828d3
DV
973 pm_iir = dev_priv->rps.pm_iir;
974 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
975 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
976 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
977 client_boost = dev_priv->rps.client_boost;
978 dev_priv->rps.client_boost = false;
59cdb63d 979 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 980
60611c13 981 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 982 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 983
8d3afd7d 984 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
3b8d8d91
JB
985 return;
986
4fc688ce 987 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 988
43cf3bf0
CW
989 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
990
dd75fdc8 991 adj = dev_priv->rps.last_adj;
edcf284b 992 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
993 min = dev_priv->rps.min_freq_softlimit;
994 max = dev_priv->rps.max_freq_softlimit;
995
996 if (client_boost) {
997 new_delay = dev_priv->rps.max_freq_softlimit;
998 adj = 0;
999 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1000 if (adj > 0)
1001 adj *= 2;
edcf284b
CW
1002 else /* CHV needs even encode values */
1003 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1004 /*
1005 * For better performance, jump directly
1006 * to RPe if we're below it.
1007 */
edcf284b 1008 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1009 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1010 adj = 0;
1011 }
f5a4c67d
CW
1012 } else if (any_waiters(dev_priv)) {
1013 adj = 0;
dd75fdc8 1014 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1015 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1016 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1017 else
b39fb297 1018 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1019 adj = 0;
1020 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1021 if (adj < 0)
1022 adj *= 2;
edcf284b
CW
1023 else /* CHV needs even encode values */
1024 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1025 } else { /* unknown event */
edcf284b 1026 adj = 0;
dd75fdc8 1027 }
3b8d8d91 1028
edcf284b
CW
1029 dev_priv->rps.last_adj = adj;
1030
79249636
BW
1031 /* sysfs frequency interfaces may have snuck in while servicing the
1032 * interrupt
1033 */
edcf284b 1034 new_delay += adj;
8d3afd7d 1035 new_delay = clamp_t(int, new_delay, min, max);
27544369 1036
ffe02b40 1037 intel_set_rps(dev_priv->dev, new_delay);
3b8d8d91 1038
4fc688ce 1039 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1040}
1041
e3689190
BW
1042
1043/**
1044 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1045 * occurred.
1046 * @work: workqueue struct
1047 *
1048 * Doesn't actually do anything except notify userspace. As a consequence of
1049 * this event, userspace should try to remap the bad rows since statistically
1050 * it is likely the same row is more likely to go bad again.
1051 */
1052static void ivybridge_parity_work(struct work_struct *work)
1053{
2d1013dd
JN
1054 struct drm_i915_private *dev_priv =
1055 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1056 u32 error_status, row, bank, subbank;
35a85ac6 1057 char *parity_event[6];
e3689190 1058 uint32_t misccpctl;
35a85ac6 1059 uint8_t slice = 0;
e3689190
BW
1060
1061 /* We must turn off DOP level clock gating to access the L3 registers.
1062 * In order to prevent a get/put style interface, acquire struct mutex
1063 * any time we access those registers.
1064 */
1065 mutex_lock(&dev_priv->dev->struct_mutex);
1066
35a85ac6
BW
1067 /* If we've screwed up tracking, just let the interrupt fire again */
1068 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1069 goto out;
1070
e3689190
BW
1071 misccpctl = I915_READ(GEN7_MISCCPCTL);
1072 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1073 POSTING_READ(GEN7_MISCCPCTL);
1074
35a85ac6
BW
1075 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1076 u32 reg;
e3689190 1077
35a85ac6
BW
1078 slice--;
1079 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1080 break;
e3689190 1081
35a85ac6 1082 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1083
35a85ac6 1084 reg = GEN7_L3CDERRST1 + (slice * 0x200);
e3689190 1085
35a85ac6
BW
1086 error_status = I915_READ(reg);
1087 row = GEN7_PARITY_ERROR_ROW(error_status);
1088 bank = GEN7_PARITY_ERROR_BANK(error_status);
1089 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1090
1091 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1092 POSTING_READ(reg);
1093
1094 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1095 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1096 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1097 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1098 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1099 parity_event[5] = NULL;
1100
5bdebb18 1101 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1102 KOBJ_CHANGE, parity_event);
e3689190 1103
35a85ac6
BW
1104 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1105 slice, row, bank, subbank);
e3689190 1106
35a85ac6
BW
1107 kfree(parity_event[4]);
1108 kfree(parity_event[3]);
1109 kfree(parity_event[2]);
1110 kfree(parity_event[1]);
1111 }
e3689190 1112
35a85ac6 1113 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1114
35a85ac6
BW
1115out:
1116 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1117 spin_lock_irq(&dev_priv->irq_lock);
480c8033 1118 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
4cb21832 1119 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6
BW
1120
1121 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1122}
1123
35a85ac6 1124static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
e3689190 1125{
2d1013dd 1126 struct drm_i915_private *dev_priv = dev->dev_private;
e3689190 1127
040d2baa 1128 if (!HAS_L3_DPF(dev))
e3689190
BW
1129 return;
1130
d0ecd7e2 1131 spin_lock(&dev_priv->irq_lock);
480c8033 1132 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
d0ecd7e2 1133 spin_unlock(&dev_priv->irq_lock);
e3689190 1134
35a85ac6
BW
1135 iir &= GT_PARITY_ERROR(dev);
1136 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1137 dev_priv->l3_parity.which_slice |= 1 << 1;
1138
1139 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1140 dev_priv->l3_parity.which_slice |= 1 << 0;
1141
a4da4fa4 1142 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1143}
1144
f1af8fc1
PZ
1145static void ilk_gt_irq_handler(struct drm_device *dev,
1146 struct drm_i915_private *dev_priv,
1147 u32 gt_iir)
1148{
1149 if (gt_iir &
1150 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1151 notify_ring(&dev_priv->ring[RCS]);
f1af8fc1 1152 if (gt_iir & ILK_BSD_USER_INTERRUPT)
74cdb337 1153 notify_ring(&dev_priv->ring[VCS]);
f1af8fc1
PZ
1154}
1155
e7b4c6b1
DV
1156static void snb_gt_irq_handler(struct drm_device *dev,
1157 struct drm_i915_private *dev_priv,
1158 u32 gt_iir)
1159{
1160
cc609d5d
BW
1161 if (gt_iir &
1162 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
74cdb337 1163 notify_ring(&dev_priv->ring[RCS]);
cc609d5d 1164 if (gt_iir & GT_BSD_USER_INTERRUPT)
74cdb337 1165 notify_ring(&dev_priv->ring[VCS]);
cc609d5d 1166 if (gt_iir & GT_BLT_USER_INTERRUPT)
74cdb337 1167 notify_ring(&dev_priv->ring[BCS]);
e7b4c6b1 1168
cc609d5d
BW
1169 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1170 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1171 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1172 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1173
35a85ac6
BW
1174 if (gt_iir & GT_PARITY_ERROR(dev))
1175 ivybridge_parity_error_irq_handler(dev, gt_iir);
e7b4c6b1
DV
1176}
1177
74cdb337 1178static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
abd58f01
BW
1179 u32 master_ctl)
1180{
abd58f01
BW
1181 irqreturn_t ret = IRQ_NONE;
1182
1183 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
74cdb337 1184 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
abd58f01 1185 if (tmp) {
cb0d205e 1186 I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
abd58f01 1187 ret = IRQ_HANDLED;
e981e7b1 1188
74cdb337
CW
1189 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1190 intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1191 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1192 notify_ring(&dev_priv->ring[RCS]);
1193
1194 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1195 intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1196 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1197 notify_ring(&dev_priv->ring[BCS]);
abd58f01
BW
1198 } else
1199 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1200 }
1201
85f9b5f9 1202 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
74cdb337 1203 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
abd58f01 1204 if (tmp) {
cb0d205e 1205 I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
abd58f01 1206 ret = IRQ_HANDLED;
e981e7b1 1207
74cdb337
CW
1208 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1209 intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1210 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1211 notify_ring(&dev_priv->ring[VCS]);
abd58f01 1212
74cdb337
CW
1213 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1214 intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1215 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1216 notify_ring(&dev_priv->ring[VCS2]);
0961021a 1217 } else
abd58f01 1218 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1219 }
1220
abd58f01 1221 if (master_ctl & GEN8_GT_VECS_IRQ) {
74cdb337 1222 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
abd58f01 1223 if (tmp) {
74cdb337 1224 I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
abd58f01 1225 ret = IRQ_HANDLED;
e981e7b1 1226
74cdb337
CW
1227 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1228 intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1229 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1230 notify_ring(&dev_priv->ring[VECS]);
abd58f01
BW
1231 } else
1232 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1233 }
1234
0961021a 1235 if (master_ctl & GEN8_GT_PM_IRQ) {
74cdb337 1236 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
0961021a 1237 if (tmp & dev_priv->pm_rps_events) {
cb0d205e
CW
1238 I915_WRITE_FW(GEN8_GT_IIR(2),
1239 tmp & dev_priv->pm_rps_events);
38cc46d7 1240 ret = IRQ_HANDLED;
c9a9a268 1241 gen6_rps_irq_handler(dev_priv, tmp);
0961021a
BW
1242 } else
1243 DRM_ERROR("The master control interrupt lied (PM)!\n");
1244 }
1245
abd58f01
BW
1246 return ret;
1247}
1248
63c88d22
ID
1249static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1250{
1251 switch (port) {
1252 case PORT_A:
195baa06 1253 return val & PORTA_HOTPLUG_LONG_DETECT;
63c88d22
ID
1254 case PORT_B:
1255 return val & PORTB_HOTPLUG_LONG_DETECT;
1256 case PORT_C:
1257 return val & PORTC_HOTPLUG_LONG_DETECT;
1258 case PORT_D:
1259 return val & PORTD_HOTPLUG_LONG_DETECT;
1260 default:
1261 return false;
1262 }
1263}
1264
6dbf30ce
VS
1265static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1266{
1267 switch (port) {
1268 case PORT_E:
1269 return val & PORTE_HOTPLUG_LONG_DETECT;
1270 default:
1271 return false;
1272 }
1273}
1274
676574df 1275static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1276{
1277 switch (port) {
13cf5504 1278 case PORT_B:
676574df 1279 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1280 case PORT_C:
676574df 1281 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1282 case PORT_D:
676574df
JN
1283 return val & PORTD_HOTPLUG_LONG_DETECT;
1284 default:
1285 return false;
13cf5504
DA
1286 }
1287}
1288
676574df 1289static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1290{
1291 switch (port) {
13cf5504 1292 case PORT_B:
676574df 1293 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1294 case PORT_C:
676574df 1295 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1296 case PORT_D:
676574df
JN
1297 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1298 default:
1299 return false;
13cf5504
DA
1300 }
1301}
1302
42db67d6
VS
1303/*
1304 * Get a bit mask of pins that have triggered, and which ones may be long.
1305 * This can be called multiple times with the same masks to accumulate
1306 * hotplug detection results from several registers.
1307 *
1308 * Note that the caller is expected to zero out the masks initially.
1309 */
fd63e2a9 1310static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1311 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1312 const u32 hpd[HPD_NUM_PINS],
1313 bool long_pulse_detect(enum port port, u32 val))
676574df 1314{
8c841e57 1315 enum port port;
676574df
JN
1316 int i;
1317
676574df 1318 for_each_hpd_pin(i) {
8c841e57
JN
1319 if ((hpd[i] & hotplug_trigger) == 0)
1320 continue;
676574df 1321
8c841e57
JN
1322 *pin_mask |= BIT(i);
1323
cc24fcdc
ID
1324 if (!intel_hpd_pin_to_port(i, &port))
1325 continue;
1326
fd63e2a9 1327 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1328 *long_mask |= BIT(i);
676574df
JN
1329 }
1330
1331 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1332 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1333
1334}
1335
515ac2bb
DV
1336static void gmbus_irq_handler(struct drm_device *dev)
1337{
2d1013dd 1338 struct drm_i915_private *dev_priv = dev->dev_private;
28c70f16 1339
28c70f16 1340 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1341}
1342
ce99c256
DV
1343static void dp_aux_irq_handler(struct drm_device *dev)
1344{
2d1013dd 1345 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 1346
9ee32fea 1347 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1348}
1349
8bf1e9f1 1350#if defined(CONFIG_DEBUG_FS)
277de95e
DV
1351static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1352 uint32_t crc0, uint32_t crc1,
1353 uint32_t crc2, uint32_t crc3,
1354 uint32_t crc4)
8bf1e9f1
SH
1355{
1356 struct drm_i915_private *dev_priv = dev->dev_private;
1357 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1358 struct intel_pipe_crc_entry *entry;
ac2300d4 1359 int head, tail;
b2c88f5b 1360
d538bbdf
DL
1361 spin_lock(&pipe_crc->lock);
1362
0c912c79 1363 if (!pipe_crc->entries) {
d538bbdf 1364 spin_unlock(&pipe_crc->lock);
34273620 1365 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1366 return;
1367 }
1368
d538bbdf
DL
1369 head = pipe_crc->head;
1370 tail = pipe_crc->tail;
b2c88f5b
DL
1371
1372 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1373 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1374 DRM_ERROR("CRC buffer overflowing\n");
1375 return;
1376 }
1377
1378 entry = &pipe_crc->entries[head];
8bf1e9f1 1379
8bc5e955 1380 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
eba94eb9
DV
1381 entry->crc[0] = crc0;
1382 entry->crc[1] = crc1;
1383 entry->crc[2] = crc2;
1384 entry->crc[3] = crc3;
1385 entry->crc[4] = crc4;
b2c88f5b
DL
1386
1387 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1388 pipe_crc->head = head;
1389
1390 spin_unlock(&pipe_crc->lock);
07144428
DL
1391
1392 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1393}
277de95e
DV
1394#else
1395static inline void
1396display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1397 uint32_t crc0, uint32_t crc1,
1398 uint32_t crc2, uint32_t crc3,
1399 uint32_t crc4) {}
1400#endif
1401
eba94eb9 1402
277de95e 1403static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5a69b89f
DV
1404{
1405 struct drm_i915_private *dev_priv = dev->dev_private;
1406
277de95e
DV
1407 display_pipe_crc_irq_handler(dev, pipe,
1408 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1409 0, 0, 0, 0);
5a69b89f
DV
1410}
1411
277de95e 1412static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
eba94eb9
DV
1413{
1414 struct drm_i915_private *dev_priv = dev->dev_private;
1415
277de95e
DV
1416 display_pipe_crc_irq_handler(dev, pipe,
1417 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1418 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1419 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1420 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1421 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1422}
5b3a856b 1423
277de95e 1424static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
5b3a856b
DV
1425{
1426 struct drm_i915_private *dev_priv = dev->dev_private;
0b5c5ed0
DV
1427 uint32_t res1, res2;
1428
1429 if (INTEL_INFO(dev)->gen >= 3)
1430 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1431 else
1432 res1 = 0;
1433
1434 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1435 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1436 else
1437 res2 = 0;
5b3a856b 1438
277de95e
DV
1439 display_pipe_crc_irq_handler(dev, pipe,
1440 I915_READ(PIPE_CRC_RES_RED(pipe)),
1441 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1442 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1443 res1, res2);
5b3a856b 1444}
8bf1e9f1 1445
1403c0d4
PZ
1446/* The RPS events need forcewake, so we add them to a work queue and mask their
1447 * IMR bits until the work is done. Other interrupts can be processed without
1448 * the work queue. */
1449static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1450{
a6706b45 1451 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1452 spin_lock(&dev_priv->irq_lock);
480c8033 1453 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1454 if (dev_priv->rps.interrupts_enabled) {
1455 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1456 queue_work(dev_priv->wq, &dev_priv->rps.work);
1457 }
59cdb63d 1458 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1459 }
baf02a1f 1460
c9a9a268
ID
1461 if (INTEL_INFO(dev_priv)->gen >= 8)
1462 return;
1463
1403c0d4
PZ
1464 if (HAS_VEBOX(dev_priv->dev)) {
1465 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
74cdb337 1466 notify_ring(&dev_priv->ring[VECS]);
12638c57 1467
aaecdf61
DV
1468 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1469 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1470 }
baf02a1f
BW
1471}
1472
8d7849db
VS
1473static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1474{
8d7849db
VS
1475 if (!drm_handle_vblank(dev, pipe))
1476 return false;
1477
8d7849db
VS
1478 return true;
1479}
1480
c1874ed7
ID
1481static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1482{
1483 struct drm_i915_private *dev_priv = dev->dev_private;
91d181dd 1484 u32 pipe_stats[I915_MAX_PIPES] = { };
c1874ed7
ID
1485 int pipe;
1486
58ead0d7 1487 spin_lock(&dev_priv->irq_lock);
055e393f 1488 for_each_pipe(dev_priv, pipe) {
91d181dd 1489 int reg;
bbb5eebf 1490 u32 mask, iir_bit = 0;
91d181dd 1491
bbb5eebf
DV
1492 /*
1493 * PIPESTAT bits get signalled even when the interrupt is
1494 * disabled with the mask bits, and some of the status bits do
1495 * not generate interrupts at all (like the underrun bit). Hence
1496 * we need to be careful that we only handle what we want to
1497 * handle.
1498 */
0f239f4c
DV
1499
1500 /* fifo underruns are filterered in the underrun handler. */
1501 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1502
1503 switch (pipe) {
1504 case PIPE_A:
1505 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1506 break;
1507 case PIPE_B:
1508 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1509 break;
3278f67f
VS
1510 case PIPE_C:
1511 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1512 break;
bbb5eebf
DV
1513 }
1514 if (iir & iir_bit)
1515 mask |= dev_priv->pipestat_irq_mask[pipe];
1516
1517 if (!mask)
91d181dd
ID
1518 continue;
1519
1520 reg = PIPESTAT(pipe);
bbb5eebf
DV
1521 mask |= PIPESTAT_INT_ENABLE_MASK;
1522 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1523
1524 /*
1525 * Clear the PIPE*STAT regs before the IIR
1526 */
91d181dd
ID
1527 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1528 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1529 I915_WRITE(reg, pipe_stats[pipe]);
1530 }
58ead0d7 1531 spin_unlock(&dev_priv->irq_lock);
c1874ed7 1532
055e393f 1533 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1534 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1535 intel_pipe_handle_vblank(dev, pipe))
1536 intel_check_page_flip(dev, pipe);
c1874ed7 1537
579a9b0e 1538 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
c1874ed7
ID
1539 intel_prepare_page_flip(dev, pipe);
1540 intel_finish_page_flip(dev, pipe);
1541 }
1542
1543 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1544 i9xx_pipe_crc_irq_handler(dev, pipe);
1545
1f7247c0
DV
1546 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1547 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1548 }
1549
1550 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1551 gmbus_irq_handler(dev);
1552}
1553
16c6c56b
VS
1554static void i9xx_hpd_irq_handler(struct drm_device *dev)
1555{
1556 struct drm_i915_private *dev_priv = dev->dev_private;
1557 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
42db67d6 1558 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1559
0d2e4297
JN
1560 if (!hotplug_status)
1561 return;
16c6c56b 1562
0d2e4297
JN
1563 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1564 /*
1565 * Make sure hotplug status is cleared before we clear IIR, or else we
1566 * may miss hotplug events.
1567 */
1568 POSTING_READ(PORT_HOTPLUG_STAT);
16c6c56b 1569
0d2e4297
JN
1570 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1571 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1572
fd63e2a9
ID
1573 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1574 hotplug_trigger, hpd_status_g4x,
1575 i9xx_port_hotplug_long_detect);
676574df 1576 intel_hpd_irq_handler(dev, pin_mask, long_mask);
369712e8
JN
1577
1578 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1579 dp_aux_irq_handler(dev);
0d2e4297
JN
1580 } else {
1581 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1582
fd63e2a9
ID
1583 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1584 hotplug_trigger, hpd_status_g4x,
1585 i9xx_port_hotplug_long_detect);
676574df 1586 intel_hpd_irq_handler(dev, pin_mask, long_mask);
3ff60f89 1587 }
16c6c56b
VS
1588}
1589
ff1f525e 1590static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1591{
45a83f84 1592 struct drm_device *dev = arg;
2d1013dd 1593 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
1594 u32 iir, gt_iir, pm_iir;
1595 irqreturn_t ret = IRQ_NONE;
7e231dbe 1596
2dd2a883
ID
1597 if (!intel_irqs_enabled(dev_priv))
1598 return IRQ_NONE;
1599
7e231dbe 1600 while (true) {
3ff60f89
OM
1601 /* Find, clear, then process each source of interrupt */
1602
7e231dbe 1603 gt_iir = I915_READ(GTIIR);
3ff60f89
OM
1604 if (gt_iir)
1605 I915_WRITE(GTIIR, gt_iir);
1606
7e231dbe 1607 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89
OM
1608 if (pm_iir)
1609 I915_WRITE(GEN6_PMIIR, pm_iir);
1610
1611 iir = I915_READ(VLV_IIR);
1612 if (iir) {
1613 /* Consume port before clearing IIR or we'll miss events */
1614 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1615 i9xx_hpd_irq_handler(dev);
1616 I915_WRITE(VLV_IIR, iir);
1617 }
7e231dbe
JB
1618
1619 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1620 goto out;
1621
1622 ret = IRQ_HANDLED;
1623
3ff60f89
OM
1624 if (gt_iir)
1625 snb_gt_irq_handler(dev, dev_priv, gt_iir);
60611c13 1626 if (pm_iir)
d0ecd7e2 1627 gen6_rps_irq_handler(dev_priv, pm_iir);
3ff60f89
OM
1628 /* Call regardless, as some status bits might not be
1629 * signalled in iir */
1630 valleyview_pipestat_irq_handler(dev, iir);
7e231dbe
JB
1631 }
1632
1633out:
1634 return ret;
1635}
1636
43f328d7
VS
1637static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1638{
45a83f84 1639 struct drm_device *dev = arg;
43f328d7
VS
1640 struct drm_i915_private *dev_priv = dev->dev_private;
1641 u32 master_ctl, iir;
1642 irqreturn_t ret = IRQ_NONE;
43f328d7 1643
2dd2a883
ID
1644 if (!intel_irqs_enabled(dev_priv))
1645 return IRQ_NONE;
1646
8e5fd599
VS
1647 for (;;) {
1648 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1649 iir = I915_READ(VLV_IIR);
43f328d7 1650
8e5fd599
VS
1651 if (master_ctl == 0 && iir == 0)
1652 break;
43f328d7 1653
27b6c122
OM
1654 ret = IRQ_HANDLED;
1655
8e5fd599 1656 I915_WRITE(GEN8_MASTER_IRQ, 0);
43f328d7 1657
27b6c122 1658 /* Find, clear, then process each source of interrupt */
43f328d7 1659
27b6c122
OM
1660 if (iir) {
1661 /* Consume port before clearing IIR or we'll miss events */
1662 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1663 i9xx_hpd_irq_handler(dev);
1664 I915_WRITE(VLV_IIR, iir);
1665 }
43f328d7 1666
74cdb337 1667 gen8_gt_irq_handler(dev_priv, master_ctl);
43f328d7 1668
27b6c122
OM
1669 /* Call regardless, as some status bits might not be
1670 * signalled in iir */
1671 valleyview_pipestat_irq_handler(dev, iir);
43f328d7 1672
8e5fd599
VS
1673 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1674 POSTING_READ(GEN8_MASTER_IRQ);
8e5fd599 1675 }
3278f67f 1676
43f328d7
VS
1677 return ret;
1678}
1679
23e81d69 1680static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806 1681{
2d1013dd 1682 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 1683 int pipe;
b543fb04 1684 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1685
aaf5ec2e 1686 if (hotplug_trigger) {
42db67d6 1687 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
aaf5ec2e
SJ
1688
1689 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1690 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
776ad806 1691
fd63e2a9
ID
1692 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1693 dig_hotplug_reg, hpd_ibx,
1694 pch_port_hotplug_long_detect);
aaf5ec2e
SJ
1695 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1696 }
91d131d2 1697
cfc33bf7
VS
1698 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1699 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1700 SDE_AUDIO_POWER_SHIFT);
776ad806 1701 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1702 port_name(port));
1703 }
776ad806 1704
ce99c256
DV
1705 if (pch_iir & SDE_AUX_MASK)
1706 dp_aux_irq_handler(dev);
1707
776ad806 1708 if (pch_iir & SDE_GMBUS)
515ac2bb 1709 gmbus_irq_handler(dev);
776ad806
JB
1710
1711 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1712 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1713
1714 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1715 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1716
1717 if (pch_iir & SDE_POISON)
1718 DRM_ERROR("PCH poison interrupt\n");
1719
9db4a9c7 1720 if (pch_iir & SDE_FDI_MASK)
055e393f 1721 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1722 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1723 pipe_name(pipe),
1724 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1725
1726 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1727 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1728
1729 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1730 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1731
776ad806 1732 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1733 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1734
1735 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1736 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1737}
1738
1739static void ivb_err_int_handler(struct drm_device *dev)
1740{
1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1743 enum pipe pipe;
8664281b 1744
de032bf4
PZ
1745 if (err_int & ERR_INT_POISON)
1746 DRM_ERROR("Poison interrupt\n");
1747
055e393f 1748 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1749 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1750 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1751
5a69b89f
DV
1752 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1753 if (IS_IVYBRIDGE(dev))
277de95e 1754 ivb_pipe_crc_irq_handler(dev, pipe);
5a69b89f 1755 else
277de95e 1756 hsw_pipe_crc_irq_handler(dev, pipe);
5a69b89f
DV
1757 }
1758 }
8bf1e9f1 1759
8664281b
PZ
1760 I915_WRITE(GEN7_ERR_INT, err_int);
1761}
1762
1763static void cpt_serr_int_handler(struct drm_device *dev)
1764{
1765 struct drm_i915_private *dev_priv = dev->dev_private;
1766 u32 serr_int = I915_READ(SERR_INT);
1767
de032bf4
PZ
1768 if (serr_int & SERR_INT_POISON)
1769 DRM_ERROR("PCH poison interrupt\n");
1770
8664281b 1771 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 1772 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1773
1774 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 1775 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1776
1777 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 1778 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
1779
1780 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1781}
1782
23e81d69
AJ
1783static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1784{
2d1013dd 1785 struct drm_i915_private *dev_priv = dev->dev_private;
23e81d69 1786 int pipe;
6dbf30ce 1787 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 1788
aaf5ec2e 1789 if (hotplug_trigger) {
42db67d6 1790 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
23e81d69 1791
aaf5ec2e
SJ
1792 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1793 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
fd63e2a9 1794
6dbf30ce
VS
1795 intel_get_hpd_pins(&pin_mask, &long_mask,
1796 hotplug_trigger,
1797 dig_hotplug_reg, hpd_cpt,
1798 pch_port_hotplug_long_detect);
26951caf 1799
aaf5ec2e
SJ
1800 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1801 }
91d131d2 1802
cfc33bf7
VS
1803 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1804 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1805 SDE_AUDIO_POWER_SHIFT_CPT);
1806 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1807 port_name(port));
1808 }
23e81d69
AJ
1809
1810 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 1811 dp_aux_irq_handler(dev);
23e81d69
AJ
1812
1813 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 1814 gmbus_irq_handler(dev);
23e81d69
AJ
1815
1816 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1817 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1818
1819 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1820 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1821
1822 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 1823 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
1824 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1825 pipe_name(pipe),
1826 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
1827
1828 if (pch_iir & SDE_ERROR_CPT)
1829 cpt_serr_int_handler(dev);
23e81d69
AJ
1830}
1831
6dbf30ce
VS
1832static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1833{
1834 struct drm_i915_private *dev_priv = dev->dev_private;
1835 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1836 ~SDE_PORTE_HOTPLUG_SPT;
1837 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1838 u32 pin_mask = 0, long_mask = 0;
1839
1840 if (hotplug_trigger) {
1841 u32 dig_hotplug_reg;
1842
1843 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1844 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1845
1846 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1847 dig_hotplug_reg, hpd_spt,
1848 pch_port_hotplug_long_detect);
1849 }
1850
1851 if (hotplug2_trigger) {
1852 u32 dig_hotplug_reg;
1853
1854 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1855 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1856
1857 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1858 dig_hotplug_reg, hpd_spt,
1859 spt_port_hotplug2_long_detect);
1860 }
1861
1862 if (pin_mask)
1863 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1864
1865 if (pch_iir & SDE_GMBUS_CPT)
1866 gmbus_irq_handler(dev);
1867}
1868
c008bc6e
PZ
1869static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1870{
1871 struct drm_i915_private *dev_priv = dev->dev_private;
40da17c2 1872 enum pipe pipe;
c008bc6e
PZ
1873
1874 if (de_iir & DE_AUX_CHANNEL_A)
1875 dp_aux_irq_handler(dev);
1876
1877 if (de_iir & DE_GSE)
1878 intel_opregion_asle_intr(dev);
1879
c008bc6e
PZ
1880 if (de_iir & DE_POISON)
1881 DRM_ERROR("Poison interrupt\n");
1882
055e393f 1883 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1884 if (de_iir & DE_PIPE_VBLANK(pipe) &&
1885 intel_pipe_handle_vblank(dev, pipe))
1886 intel_check_page_flip(dev, pipe);
5b3a856b 1887
40da17c2 1888 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 1889 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 1890
40da17c2
DV
1891 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1892 i9xx_pipe_crc_irq_handler(dev, pipe);
c008bc6e 1893
40da17c2
DV
1894 /* plane/pipes map 1:1 on ilk+ */
1895 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1896 intel_prepare_page_flip(dev, pipe);
1897 intel_finish_page_flip_plane(dev, pipe);
1898 }
c008bc6e
PZ
1899 }
1900
1901 /* check event from PCH */
1902 if (de_iir & DE_PCH_EVENT) {
1903 u32 pch_iir = I915_READ(SDEIIR);
1904
1905 if (HAS_PCH_CPT(dev))
1906 cpt_irq_handler(dev, pch_iir);
1907 else
1908 ibx_irq_handler(dev, pch_iir);
1909
1910 /* should clear PCH hotplug event before clear CPU irq */
1911 I915_WRITE(SDEIIR, pch_iir);
1912 }
1913
1914 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1915 ironlake_rps_change_irq_handler(dev);
1916}
1917
9719fb98
PZ
1918static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1919{
1920 struct drm_i915_private *dev_priv = dev->dev_private;
07d27e20 1921 enum pipe pipe;
9719fb98
PZ
1922
1923 if (de_iir & DE_ERR_INT_IVB)
1924 ivb_err_int_handler(dev);
1925
1926 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1927 dp_aux_irq_handler(dev);
1928
1929 if (de_iir & DE_GSE_IVB)
1930 intel_opregion_asle_intr(dev);
1931
055e393f 1932 for_each_pipe(dev_priv, pipe) {
d6bbafa1
CW
1933 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1934 intel_pipe_handle_vblank(dev, pipe))
1935 intel_check_page_flip(dev, pipe);
40da17c2
DV
1936
1937 /* plane/pipes map 1:1 on ilk+ */
07d27e20
DL
1938 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1939 intel_prepare_page_flip(dev, pipe);
1940 intel_finish_page_flip_plane(dev, pipe);
9719fb98
PZ
1941 }
1942 }
1943
1944 /* check event from PCH */
1945 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1946 u32 pch_iir = I915_READ(SDEIIR);
1947
1948 cpt_irq_handler(dev, pch_iir);
1949
1950 /* clear PCH hotplug event before clear CPU irq */
1951 I915_WRITE(SDEIIR, pch_iir);
1952 }
1953}
1954
72c90f62
OM
1955/*
1956 * To handle irqs with the minimum potential races with fresh interrupts, we:
1957 * 1 - Disable Master Interrupt Control.
1958 * 2 - Find the source(s) of the interrupt.
1959 * 3 - Clear the Interrupt Identity bits (IIR).
1960 * 4 - Process the interrupt(s) that had bits set in the IIRs.
1961 * 5 - Re-enable Master Interrupt Control.
1962 */
f1af8fc1 1963static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 1964{
45a83f84 1965 struct drm_device *dev = arg;
2d1013dd 1966 struct drm_i915_private *dev_priv = dev->dev_private;
f1af8fc1 1967 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 1968 irqreturn_t ret = IRQ_NONE;
b1f14ad0 1969
2dd2a883
ID
1970 if (!intel_irqs_enabled(dev_priv))
1971 return IRQ_NONE;
1972
8664281b
PZ
1973 /* We get interrupts on unclaimed registers, so check for this before we
1974 * do any I915_{READ,WRITE}. */
907b28c5 1975 intel_uncore_check_errors(dev);
8664281b 1976
b1f14ad0
JB
1977 /* disable master interrupt before clearing iir */
1978 de_ier = I915_READ(DEIER);
1979 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 1980 POSTING_READ(DEIER);
b1f14ad0 1981
44498aea
PZ
1982 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1983 * interrupts will will be stored on its back queue, and then we'll be
1984 * able to process them after we restore SDEIER (as soon as we restore
1985 * it, we'll get an interrupt if SDEIIR still has something to process
1986 * due to its back queue). */
ab5c608b
BW
1987 if (!HAS_PCH_NOP(dev)) {
1988 sde_ier = I915_READ(SDEIER);
1989 I915_WRITE(SDEIER, 0);
1990 POSTING_READ(SDEIER);
1991 }
44498aea 1992
72c90f62
OM
1993 /* Find, clear, then process each source of interrupt */
1994
b1f14ad0 1995 gt_iir = I915_READ(GTIIR);
0e43406b 1996 if (gt_iir) {
72c90f62
OM
1997 I915_WRITE(GTIIR, gt_iir);
1998 ret = IRQ_HANDLED;
d8fc8a47 1999 if (INTEL_INFO(dev)->gen >= 6)
f1af8fc1 2000 snb_gt_irq_handler(dev, dev_priv, gt_iir);
d8fc8a47
PZ
2001 else
2002 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
b1f14ad0
JB
2003 }
2004
0e43406b
CW
2005 de_iir = I915_READ(DEIIR);
2006 if (de_iir) {
72c90f62
OM
2007 I915_WRITE(DEIIR, de_iir);
2008 ret = IRQ_HANDLED;
f1af8fc1
PZ
2009 if (INTEL_INFO(dev)->gen >= 7)
2010 ivb_display_irq_handler(dev, de_iir);
2011 else
2012 ilk_display_irq_handler(dev, de_iir);
b1f14ad0
JB
2013 }
2014
f1af8fc1
PZ
2015 if (INTEL_INFO(dev)->gen >= 6) {
2016 u32 pm_iir = I915_READ(GEN6_PMIIR);
2017 if (pm_iir) {
f1af8fc1
PZ
2018 I915_WRITE(GEN6_PMIIR, pm_iir);
2019 ret = IRQ_HANDLED;
72c90f62 2020 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2021 }
0e43406b 2022 }
b1f14ad0 2023
b1f14ad0
JB
2024 I915_WRITE(DEIER, de_ier);
2025 POSTING_READ(DEIER);
ab5c608b
BW
2026 if (!HAS_PCH_NOP(dev)) {
2027 I915_WRITE(SDEIER, sde_ier);
2028 POSTING_READ(SDEIER);
2029 }
b1f14ad0
JB
2030
2031 return ret;
2032}
2033
d04a492d
SS
2034static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2035{
2036 struct drm_i915_private *dev_priv = dev->dev_private;
676574df 2037 u32 hp_control, hp_trigger;
42db67d6 2038 u32 pin_mask = 0, long_mask = 0;
d04a492d
SS
2039
2040 /* Get the status */
2041 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2042 hp_control = I915_READ(BXT_HOTPLUG_CTL);
2043
2044 /* Hotplug not enabled ? */
2045 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2046 DRM_ERROR("Interrupt when HPD disabled\n");
2047 return;
2048 }
2049
475c2e3b
JN
2050 /* Clear sticky bits in hpd status */
2051 I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
d04a492d 2052
fd63e2a9 2053 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
63c88d22 2054 hpd_bxt, bxt_port_hotplug_long_detect);
676574df 2055 intel_hpd_irq_handler(dev, pin_mask, long_mask);
d04a492d
SS
2056}
2057
abd58f01
BW
2058static irqreturn_t gen8_irq_handler(int irq, void *arg)
2059{
2060 struct drm_device *dev = arg;
2061 struct drm_i915_private *dev_priv = dev->dev_private;
2062 u32 master_ctl;
2063 irqreturn_t ret = IRQ_NONE;
2064 uint32_t tmp = 0;
c42664cc 2065 enum pipe pipe;
88e04703
JB
2066 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2067
2dd2a883
ID
2068 if (!intel_irqs_enabled(dev_priv))
2069 return IRQ_NONE;
2070
88e04703
JB
2071 if (IS_GEN9(dev))
2072 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2073 GEN9_AUX_CHANNEL_D;
abd58f01 2074
cb0d205e 2075 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2076 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2077 if (!master_ctl)
2078 return IRQ_NONE;
2079
cb0d205e 2080 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
abd58f01 2081
38cc46d7
OM
2082 /* Find, clear, then process each source of interrupt */
2083
74cdb337 2084 ret = gen8_gt_irq_handler(dev_priv, master_ctl);
abd58f01
BW
2085
2086 if (master_ctl & GEN8_DE_MISC_IRQ) {
2087 tmp = I915_READ(GEN8_DE_MISC_IIR);
abd58f01
BW
2088 if (tmp) {
2089 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2090 ret = IRQ_HANDLED;
38cc46d7
OM
2091 if (tmp & GEN8_DE_MISC_GSE)
2092 intel_opregion_asle_intr(dev);
2093 else
2094 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2095 }
38cc46d7
OM
2096 else
2097 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2098 }
2099
6d766f02
DV
2100 if (master_ctl & GEN8_DE_PORT_IRQ) {
2101 tmp = I915_READ(GEN8_DE_PORT_IIR);
6d766f02 2102 if (tmp) {
d04a492d
SS
2103 bool found = false;
2104
6d766f02
DV
2105 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2106 ret = IRQ_HANDLED;
88e04703 2107
d04a492d 2108 if (tmp & aux_mask) {
38cc46d7 2109 dp_aux_irq_handler(dev);
d04a492d
SS
2110 found = true;
2111 }
2112
2113 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2114 bxt_hpd_handler(dev, tmp);
2115 found = true;
2116 }
2117
9e63743e
SS
2118 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2119 gmbus_irq_handler(dev);
2120 found = true;
2121 }
2122
d04a492d 2123 if (!found)
38cc46d7 2124 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2125 }
38cc46d7
OM
2126 else
2127 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2128 }
2129
055e393f 2130 for_each_pipe(dev_priv, pipe) {
770de83d 2131 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
abd58f01 2132
c42664cc
DV
2133 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2134 continue;
abd58f01 2135
c42664cc 2136 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
c42664cc
DV
2137 if (pipe_iir) {
2138 ret = IRQ_HANDLED;
2139 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
770de83d 2140
d6bbafa1
CW
2141 if (pipe_iir & GEN8_PIPE_VBLANK &&
2142 intel_pipe_handle_vblank(dev, pipe))
2143 intel_check_page_flip(dev, pipe);
38cc46d7 2144
770de83d
DL
2145 if (IS_GEN9(dev))
2146 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2147 else
2148 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2149
2150 if (flip_done) {
38cc46d7
OM
2151 intel_prepare_page_flip(dev, pipe);
2152 intel_finish_page_flip_plane(dev, pipe);
2153 }
2154
2155 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2156 hsw_pipe_crc_irq_handler(dev, pipe);
2157
1f7247c0
DV
2158 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2159 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2160 pipe);
38cc46d7 2161
770de83d
DL
2162
2163 if (IS_GEN9(dev))
2164 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2165 else
2166 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2167
2168 if (fault_errors)
38cc46d7
OM
2169 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2170 pipe_name(pipe),
2171 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
c42664cc 2172 } else
abd58f01
BW
2173 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2174 }
2175
266ea3d9
SS
2176 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2177 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2178 /*
2179 * FIXME(BDW): Assume for now that the new interrupt handling
2180 * scheme also closed the SDE interrupt handling race we've seen
2181 * on older pch-split platforms. But this needs testing.
2182 */
2183 u32 pch_iir = I915_READ(SDEIIR);
92d03a80
DV
2184 if (pch_iir) {
2185 I915_WRITE(SDEIIR, pch_iir);
2186 ret = IRQ_HANDLED;
6dbf30ce
VS
2187
2188 if (HAS_PCH_SPT(dev_priv))
2189 spt_irq_handler(dev, pch_iir);
2190 else
2191 cpt_irq_handler(dev, pch_iir);
38cc46d7
OM
2192 } else
2193 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2194
92d03a80
DV
2195 }
2196
cb0d205e
CW
2197 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2198 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01
BW
2199
2200 return ret;
2201}
2202
17e1df07
DV
2203static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2204 bool reset_completed)
2205{
a4872ba6 2206 struct intel_engine_cs *ring;
17e1df07
DV
2207 int i;
2208
2209 /*
2210 * Notify all waiters for GPU completion events that reset state has
2211 * been changed, and that they need to restart their wait after
2212 * checking for potential errors (and bail out to drop locks if there is
2213 * a gpu reset pending so that i915_error_work_func can acquire them).
2214 */
2215
2216 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2217 for_each_ring(ring, dev_priv, i)
2218 wake_up_all(&ring->irq_queue);
2219
2220 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2221 wake_up_all(&dev_priv->pending_flip_queue);
2222
2223 /*
2224 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2225 * reset state is cleared.
2226 */
2227 if (reset_completed)
2228 wake_up_all(&dev_priv->gpu_error.reset_queue);
2229}
2230
8a905236 2231/**
b8d24a06 2232 * i915_reset_and_wakeup - do process context error handling work
8a905236
JB
2233 *
2234 * Fire an error uevent so userspace can see that a hang or error
2235 * was detected.
2236 */
b8d24a06 2237static void i915_reset_and_wakeup(struct drm_device *dev)
8a905236 2238{
b8d24a06
MK
2239 struct drm_i915_private *dev_priv = to_i915(dev);
2240 struct i915_gpu_error *error = &dev_priv->gpu_error;
cce723ed
BW
2241 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2242 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2243 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2244 int ret;
8a905236 2245
5bdebb18 2246 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
f316a42c 2247
7db0ba24
DV
2248 /*
2249 * Note that there's only one work item which does gpu resets, so we
2250 * need not worry about concurrent gpu resets potentially incrementing
2251 * error->reset_counter twice. We only need to take care of another
2252 * racing irq/hangcheck declaring the gpu dead for a second time. A
2253 * quick check for that is good enough: schedule_work ensures the
2254 * correct ordering between hang detection and this work item, and since
2255 * the reset in-progress bit is only ever set by code outside of this
2256 * work we don't need to worry about any other races.
2257 */
2258 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 2259 DRM_DEBUG_DRIVER("resetting chip\n");
5bdebb18 2260 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
7db0ba24 2261 reset_event);
1f83fee0 2262
f454c694
ID
2263 /*
2264 * In most cases it's guaranteed that we get here with an RPM
2265 * reference held, for example because there is a pending GPU
2266 * request that won't finish until the reset is done. This
2267 * isn't the case at least when we get here by doing a
2268 * simulated reset via debugs, so get an RPM reference.
2269 */
2270 intel_runtime_pm_get(dev_priv);
7514747d
VS
2271
2272 intel_prepare_reset(dev);
2273
17e1df07
DV
2274 /*
2275 * All state reset _must_ be completed before we update the
2276 * reset counter, for otherwise waiters might miss the reset
2277 * pending state and not properly drop locks, resulting in
2278 * deadlocks with the reset work.
2279 */
f69061be
DV
2280 ret = i915_reset(dev);
2281
7514747d 2282 intel_finish_reset(dev);
17e1df07 2283
f454c694
ID
2284 intel_runtime_pm_put(dev_priv);
2285
f69061be
DV
2286 if (ret == 0) {
2287 /*
2288 * After all the gem state is reset, increment the reset
2289 * counter and wake up everyone waiting for the reset to
2290 * complete.
2291 *
2292 * Since unlock operations are a one-sided barrier only,
2293 * we need to insert a barrier here to order any seqno
2294 * updates before
2295 * the counter increment.
2296 */
4e857c58 2297 smp_mb__before_atomic();
f69061be
DV
2298 atomic_inc(&dev_priv->gpu_error.reset_counter);
2299
5bdebb18 2300 kobject_uevent_env(&dev->primary->kdev->kobj,
f69061be 2301 KOBJ_CHANGE, reset_done_event);
1f83fee0 2302 } else {
2ac0f450 2303 atomic_set_mask(I915_WEDGED, &error->reset_counter);
f316a42c 2304 }
1f83fee0 2305
17e1df07
DV
2306 /*
2307 * Note: The wake_up also serves as a memory barrier so that
2308 * waiters see the update value of the reset counter atomic_t.
2309 */
2310 i915_error_wake_up(dev_priv, true);
f316a42c 2311 }
8a905236
JB
2312}
2313
35aed2e6 2314static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2315{
2316 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2317 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2318 u32 eir = I915_READ(EIR);
050ee91f 2319 int pipe, i;
8a905236 2320
35aed2e6
CW
2321 if (!eir)
2322 return;
8a905236 2323
a70491cc 2324 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2325
bd9854f9
BW
2326 i915_get_extra_instdone(dev, instdone);
2327
8a905236
JB
2328 if (IS_G4X(dev)) {
2329 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2330 u32 ipeir = I915_READ(IPEIR_I965);
2331
a70491cc
JP
2332 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2333 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2334 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2335 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2336 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2337 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2338 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2339 POSTING_READ(IPEIR_I965);
8a905236
JB
2340 }
2341 if (eir & GM45_ERROR_PAGE_TABLE) {
2342 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2343 pr_err("page table error\n");
2344 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2345 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2346 POSTING_READ(PGTBL_ER);
8a905236
JB
2347 }
2348 }
2349
a6c45cf0 2350 if (!IS_GEN2(dev)) {
8a905236
JB
2351 if (eir & I915_ERROR_PAGE_TABLE) {
2352 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2353 pr_err("page table error\n");
2354 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2355 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2356 POSTING_READ(PGTBL_ER);
8a905236
JB
2357 }
2358 }
2359
2360 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2361 pr_err("memory refresh error:\n");
055e393f 2362 for_each_pipe(dev_priv, pipe)
a70491cc 2363 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2364 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2365 /* pipestat has already been acked */
2366 }
2367 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2368 pr_err("instruction error\n");
2369 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2370 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2371 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2372 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2373 u32 ipeir = I915_READ(IPEIR);
2374
a70491cc
JP
2375 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2376 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2377 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2378 I915_WRITE(IPEIR, ipeir);
3143a2bf 2379 POSTING_READ(IPEIR);
8a905236
JB
2380 } else {
2381 u32 ipeir = I915_READ(IPEIR_I965);
2382
a70491cc
JP
2383 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2384 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2385 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2386 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2387 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2388 POSTING_READ(IPEIR_I965);
8a905236
JB
2389 }
2390 }
2391
2392 I915_WRITE(EIR, eir);
3143a2bf 2393 POSTING_READ(EIR);
8a905236
JB
2394 eir = I915_READ(EIR);
2395 if (eir) {
2396 /*
2397 * some errors might have become stuck,
2398 * mask them.
2399 */
2400 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2401 I915_WRITE(EMR, I915_READ(EMR) | eir);
2402 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2403 }
35aed2e6
CW
2404}
2405
2406/**
b8d24a06 2407 * i915_handle_error - handle a gpu error
35aed2e6
CW
2408 * @dev: drm device
2409 *
b8d24a06 2410 * Do some basic checking of regsiter state at error time and
35aed2e6
CW
2411 * dump it to the syslog. Also call i915_capture_error_state() to make
2412 * sure we get a record and make it available in debugfs. Fire a uevent
2413 * so userspace knows something bad happened (should trigger collection
2414 * of a ring dump etc.).
2415 */
58174462
MK
2416void i915_handle_error(struct drm_device *dev, bool wedged,
2417 const char *fmt, ...)
35aed2e6
CW
2418{
2419 struct drm_i915_private *dev_priv = dev->dev_private;
58174462
MK
2420 va_list args;
2421 char error_msg[80];
35aed2e6 2422
58174462
MK
2423 va_start(args, fmt);
2424 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2425 va_end(args);
2426
2427 i915_capture_error_state(dev, wedged, error_msg);
35aed2e6 2428 i915_report_and_clear_eir(dev);
8a905236 2429
ba1234d1 2430 if (wedged) {
f69061be
DV
2431 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2432 &dev_priv->gpu_error.reset_counter);
ba1234d1 2433
11ed50ec 2434 /*
b8d24a06
MK
2435 * Wakeup waiting processes so that the reset function
2436 * i915_reset_and_wakeup doesn't deadlock trying to grab
2437 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2438 * processes will see a reset in progress and back off,
2439 * releasing their locks and then wait for the reset completion.
2440 * We must do this for _all_ gpu waiters that might hold locks
2441 * that the reset work needs to acquire.
2442 *
2443 * Note: The wake_up serves as the required memory barrier to
2444 * ensure that the waiters see the updated value of the reset
2445 * counter atomic_t.
11ed50ec 2446 */
17e1df07 2447 i915_error_wake_up(dev_priv, false);
11ed50ec
BG
2448 }
2449
b8d24a06 2450 i915_reset_and_wakeup(dev);
8a905236
JB
2451}
2452
42f52ef8
KP
2453/* Called from drm generic code, passed 'crtc' which
2454 * we use as a pipe index
2455 */
f71d4af4 2456static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2457{
2d1013dd 2458 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2459 unsigned long irqflags;
71e0ffa5 2460
1ec14ad3 2461 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2462 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2463 i915_enable_pipestat(dev_priv, pipe,
755e9019 2464 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2465 else
7c463586 2466 i915_enable_pipestat(dev_priv, pipe,
755e9019 2467 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2468 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2469
0a3e67a4
JB
2470 return 0;
2471}
2472
f71d4af4 2473static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2474{
2d1013dd 2475 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2476 unsigned long irqflags;
b518421f 2477 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2478 DE_PIPE_VBLANK(pipe);
f796cf8f 2479
f796cf8f 2480 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2481 ironlake_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2482 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2483
2484 return 0;
2485}
2486
7e231dbe
JB
2487static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2488{
2d1013dd 2489 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2490 unsigned long irqflags;
7e231dbe 2491
7e231dbe 2492 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2493 i915_enable_pipestat(dev_priv, pipe,
755e9019 2494 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2495 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2496
2497 return 0;
2498}
2499
abd58f01
BW
2500static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2501{
2502 struct drm_i915_private *dev_priv = dev->dev_private;
2503 unsigned long irqflags;
abd58f01 2504
abd58f01 2505 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2506 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2507 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2508 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2509 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2510 return 0;
2511}
2512
42f52ef8
KP
2513/* Called from drm generic code, passed 'crtc' which
2514 * we use as a pipe index
2515 */
f71d4af4 2516static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4 2517{
2d1013dd 2518 struct drm_i915_private *dev_priv = dev->dev_private;
e9d21d7f 2519 unsigned long irqflags;
0a3e67a4 2520
1ec14ad3 2521 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2522 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2523 PIPE_VBLANK_INTERRUPT_STATUS |
2524 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2525 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2526}
2527
f71d4af4 2528static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f 2529{
2d1013dd 2530 struct drm_i915_private *dev_priv = dev->dev_private;
f796cf8f 2531 unsigned long irqflags;
b518421f 2532 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2533 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2534
2535 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b518421f 2536 ironlake_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2537 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2538}
2539
7e231dbe
JB
2540static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2541{
2d1013dd 2542 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2543 unsigned long irqflags;
7e231dbe
JB
2544
2545 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2546 i915_disable_pipestat(dev_priv, pipe,
755e9019 2547 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2548 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2549}
2550
abd58f01
BW
2551static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2552{
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554 unsigned long irqflags;
abd58f01 2555
abd58f01 2556 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7167d7c6
DV
2557 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2558 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2559 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
abd58f01
BW
2560 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2561}
2562
9107e9d2 2563static bool
94f7bbe1 2564ring_idle(struct intel_engine_cs *ring, u32 seqno)
9107e9d2
CW
2565{
2566 return (list_empty(&ring->request_list) ||
94f7bbe1 2567 i915_seqno_passed(seqno, ring->last_submitted_seqno));
f65d9421
BG
2568}
2569
a028c4b0
DV
2570static bool
2571ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2572{
2573 if (INTEL_INFO(dev)->gen >= 8) {
a6cdb93a 2574 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2575 } else {
2576 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2577 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2578 MI_SEMAPHORE_REGISTER);
2579 }
2580}
2581
a4872ba6 2582static struct intel_engine_cs *
a6cdb93a 2583semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
921d42ea
DV
2584{
2585 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2586 struct intel_engine_cs *signaller;
921d42ea
DV
2587 int i;
2588
2589 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
a6cdb93a
RV
2590 for_each_ring(signaller, dev_priv, i) {
2591 if (ring == signaller)
2592 continue;
2593
2594 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2595 return signaller;
2596 }
921d42ea
DV
2597 } else {
2598 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2599
2600 for_each_ring(signaller, dev_priv, i) {
2601 if(ring == signaller)
2602 continue;
2603
ebc348b2 2604 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
921d42ea
DV
2605 return signaller;
2606 }
2607 }
2608
a6cdb93a
RV
2609 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2610 ring->id, ipehr, offset);
921d42ea
DV
2611
2612 return NULL;
2613}
2614
a4872ba6
OM
2615static struct intel_engine_cs *
2616semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
a24a11e6
CW
2617{
2618 struct drm_i915_private *dev_priv = ring->dev->dev_private;
88fe429d 2619 u32 cmd, ipehr, head;
a6cdb93a
RV
2620 u64 offset = 0;
2621 int i, backwards;
a24a11e6
CW
2622
2623 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
a028c4b0 2624 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
6274f212 2625 return NULL;
a24a11e6 2626
88fe429d
DV
2627 /*
2628 * HEAD is likely pointing to the dword after the actual command,
2629 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2630 * or 4 dwords depending on the semaphore wait command size.
2631 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2632 * point at at batch, and semaphores are always emitted into the
2633 * ringbuffer itself.
a24a11e6 2634 */
88fe429d 2635 head = I915_READ_HEAD(ring) & HEAD_ADDR;
a6cdb93a 2636 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
88fe429d 2637
a6cdb93a 2638 for (i = backwards; i; --i) {
88fe429d
DV
2639 /*
2640 * Be paranoid and presume the hw has gone off into the wild -
2641 * our ring is smaller than what the hardware (and hence
2642 * HEAD_ADDR) allows. Also handles wrap-around.
2643 */
ee1b1e5e 2644 head &= ring->buffer->size - 1;
88fe429d
DV
2645
2646 /* This here seems to blow up */
ee1b1e5e 2647 cmd = ioread32(ring->buffer->virtual_start + head);
a24a11e6
CW
2648 if (cmd == ipehr)
2649 break;
2650
88fe429d
DV
2651 head -= 4;
2652 }
a24a11e6 2653
88fe429d
DV
2654 if (!i)
2655 return NULL;
a24a11e6 2656
ee1b1e5e 2657 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
a6cdb93a
RV
2658 if (INTEL_INFO(ring->dev)->gen >= 8) {
2659 offset = ioread32(ring->buffer->virtual_start + head + 12);
2660 offset <<= 32;
2661 offset = ioread32(ring->buffer->virtual_start + head + 8);
2662 }
2663 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
a24a11e6
CW
2664}
2665
a4872ba6 2666static int semaphore_passed(struct intel_engine_cs *ring)
6274f212
CW
2667{
2668 struct drm_i915_private *dev_priv = ring->dev->dev_private;
a4872ba6 2669 struct intel_engine_cs *signaller;
a0d036b0 2670 u32 seqno;
6274f212 2671
4be17381 2672 ring->hangcheck.deadlock++;
6274f212
CW
2673
2674 signaller = semaphore_waits_for(ring, &seqno);
4be17381
CW
2675 if (signaller == NULL)
2676 return -1;
2677
2678 /* Prevent pathological recursion due to driver bugs */
2679 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
6274f212
CW
2680 return -1;
2681
4be17381
CW
2682 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2683 return 1;
2684
a0d036b0
CW
2685 /* cursory check for an unkickable deadlock */
2686 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2687 semaphore_passed(signaller) < 0)
4be17381
CW
2688 return -1;
2689
2690 return 0;
6274f212
CW
2691}
2692
2693static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2694{
a4872ba6 2695 struct intel_engine_cs *ring;
6274f212
CW
2696 int i;
2697
2698 for_each_ring(ring, dev_priv, i)
4be17381 2699 ring->hangcheck.deadlock = 0;
6274f212
CW
2700}
2701
ad8beaea 2702static enum intel_ring_hangcheck_action
a4872ba6 2703ring_stuck(struct intel_engine_cs *ring, u64 acthd)
1ec14ad3
CW
2704{
2705 struct drm_device *dev = ring->dev;
2706 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2707 u32 tmp;
2708
f260fe7b
MK
2709 if (acthd != ring->hangcheck.acthd) {
2710 if (acthd > ring->hangcheck.max_acthd) {
2711 ring->hangcheck.max_acthd = acthd;
2712 return HANGCHECK_ACTIVE;
2713 }
2714
2715 return HANGCHECK_ACTIVE_LOOP;
2716 }
6274f212 2717
9107e9d2 2718 if (IS_GEN2(dev))
f2f4d82f 2719 return HANGCHECK_HUNG;
9107e9d2
CW
2720
2721 /* Is the chip hanging on a WAIT_FOR_EVENT?
2722 * If so we can simply poke the RB_WAIT bit
2723 * and break the hang. This should work on
2724 * all but the second generation chipsets.
2725 */
2726 tmp = I915_READ_CTL(ring);
1ec14ad3 2727 if (tmp & RING_WAIT) {
58174462
MK
2728 i915_handle_error(dev, false,
2729 "Kicking stuck wait on %s",
2730 ring->name);
1ec14ad3 2731 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2732 return HANGCHECK_KICK;
6274f212
CW
2733 }
2734
2735 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2736 switch (semaphore_passed(ring)) {
2737 default:
f2f4d82f 2738 return HANGCHECK_HUNG;
6274f212 2739 case 1:
58174462
MK
2740 i915_handle_error(dev, false,
2741 "Kicking stuck semaphore on %s",
2742 ring->name);
6274f212 2743 I915_WRITE_CTL(ring, tmp);
f2f4d82f 2744 return HANGCHECK_KICK;
6274f212 2745 case 0:
f2f4d82f 2746 return HANGCHECK_WAIT;
6274f212 2747 }
9107e9d2 2748 }
ed5cbb03 2749
f2f4d82f 2750 return HANGCHECK_HUNG;
ed5cbb03
MK
2751}
2752
737b1506 2753/*
f65d9421 2754 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2755 * batchbuffers in a long time. We keep track per ring seqno progress and
2756 * if there are no progress, hangcheck score for that ring is increased.
2757 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2758 * we kick the ring. If we see no progress on three subsequent calls
2759 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 2760 */
737b1506 2761static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 2762{
737b1506
CW
2763 struct drm_i915_private *dev_priv =
2764 container_of(work, typeof(*dev_priv),
2765 gpu_error.hangcheck_work.work);
2766 struct drm_device *dev = dev_priv->dev;
a4872ba6 2767 struct intel_engine_cs *ring;
b4519513 2768 int i;
05407ff8 2769 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2770 bool stuck[I915_NUM_RINGS] = { 0 };
2771#define BUSY 1
2772#define KICK 5
2773#define HUNG 20
893eead0 2774
d330a953 2775 if (!i915.enable_hangcheck)
3e0dc6b0
BW
2776 return;
2777
b4519513 2778 for_each_ring(ring, dev_priv, i) {
50877445
CW
2779 u64 acthd;
2780 u32 seqno;
9107e9d2 2781 bool busy = true;
05407ff8 2782
6274f212
CW
2783 semaphore_clear_deadlocks(dev_priv);
2784
05407ff8
MK
2785 seqno = ring->get_seqno(ring, false);
2786 acthd = intel_ring_get_active_head(ring);
b4519513 2787
9107e9d2 2788 if (ring->hangcheck.seqno == seqno) {
94f7bbe1 2789 if (ring_idle(ring, seqno)) {
da661464
MK
2790 ring->hangcheck.action = HANGCHECK_IDLE;
2791
9107e9d2
CW
2792 if (waitqueue_active(&ring->irq_queue)) {
2793 /* Issue a wake-up to catch stuck h/w. */
094f9a54 2794 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
f4adcd24
DV
2795 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2796 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2797 ring->name);
2798 else
2799 DRM_INFO("Fake missed irq on %s\n",
2800 ring->name);
094f9a54
CW
2801 wake_up_all(&ring->irq_queue);
2802 }
2803 /* Safeguard against driver failure */
2804 ring->hangcheck.score += BUSY;
9107e9d2
CW
2805 } else
2806 busy = false;
05407ff8 2807 } else {
6274f212
CW
2808 /* We always increment the hangcheck score
2809 * if the ring is busy and still processing
2810 * the same request, so that no single request
2811 * can run indefinitely (such as a chain of
2812 * batches). The only time we do not increment
2813 * the hangcheck score on this ring, if this
2814 * ring is in a legitimate wait for another
2815 * ring. In that case the waiting ring is a
2816 * victim and we want to be sure we catch the
2817 * right culprit. Then every time we do kick
2818 * the ring, add a small increment to the
2819 * score so that we can catch a batch that is
2820 * being repeatedly kicked and so responsible
2821 * for stalling the machine.
2822 */
ad8beaea
MK
2823 ring->hangcheck.action = ring_stuck(ring,
2824 acthd);
2825
2826 switch (ring->hangcheck.action) {
da661464 2827 case HANGCHECK_IDLE:
f2f4d82f 2828 case HANGCHECK_WAIT:
f2f4d82f 2829 case HANGCHECK_ACTIVE:
f260fe7b
MK
2830 break;
2831 case HANGCHECK_ACTIVE_LOOP:
ea04cb31 2832 ring->hangcheck.score += BUSY;
6274f212 2833 break;
f2f4d82f 2834 case HANGCHECK_KICK:
ea04cb31 2835 ring->hangcheck.score += KICK;
6274f212 2836 break;
f2f4d82f 2837 case HANGCHECK_HUNG:
ea04cb31 2838 ring->hangcheck.score += HUNG;
6274f212
CW
2839 stuck[i] = true;
2840 break;
2841 }
05407ff8 2842 }
9107e9d2 2843 } else {
da661464
MK
2844 ring->hangcheck.action = HANGCHECK_ACTIVE;
2845
9107e9d2
CW
2846 /* Gradually reduce the count so that we catch DoS
2847 * attempts across multiple batches.
2848 */
2849 if (ring->hangcheck.score > 0)
2850 ring->hangcheck.score--;
f260fe7b
MK
2851
2852 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
d1e61e7f
CW
2853 }
2854
05407ff8
MK
2855 ring->hangcheck.seqno = seqno;
2856 ring->hangcheck.acthd = acthd;
9107e9d2 2857 busy_count += busy;
893eead0 2858 }
b9201c14 2859
92cab734 2860 for_each_ring(ring, dev_priv, i) {
b6b0fac0 2861 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d
DV
2862 DRM_INFO("%s on %s\n",
2863 stuck[i] ? "stuck" : "no progress",
2864 ring->name);
a43adf07 2865 rings_hung++;
92cab734
MK
2866 }
2867 }
2868
05407ff8 2869 if (rings_hung)
58174462 2870 return i915_handle_error(dev, true, "Ring hung");
f65d9421 2871
05407ff8
MK
2872 if (busy_count)
2873 /* Reset timer case chip hangs without another request
2874 * being added */
10cd45b6
MK
2875 i915_queue_hangcheck(dev);
2876}
2877
2878void i915_queue_hangcheck(struct drm_device *dev)
2879{
737b1506 2880 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
672e7b7c 2881
d330a953 2882 if (!i915.enable_hangcheck)
10cd45b6
MK
2883 return;
2884
737b1506
CW
2885 /* Don't continually defer the hangcheck so that it is always run at
2886 * least once after work has been scheduled on any ring. Otherwise,
2887 * we will ignore a hung ring if a second ring is kept busy.
2888 */
2889
2890 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2891 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
2892}
2893
1c69eb42 2894static void ibx_irq_reset(struct drm_device *dev)
91738a95
PZ
2895{
2896 struct drm_i915_private *dev_priv = dev->dev_private;
2897
2898 if (HAS_PCH_NOP(dev))
2899 return;
2900
f86f3fb0 2901 GEN5_IRQ_RESET(SDE);
105b122e
PZ
2902
2903 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2904 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 2905}
105b122e 2906
622364b6
PZ
2907/*
2908 * SDEIER is also touched by the interrupt handler to work around missed PCH
2909 * interrupts. Hence we can't update it after the interrupt handler is enabled -
2910 * instead we unconditionally enable all PCH interrupt sources here, but then
2911 * only unmask them as needed with SDEIMR.
2912 *
2913 * This function needs to be called before interrupts are enabled.
2914 */
2915static void ibx_irq_pre_postinstall(struct drm_device *dev)
2916{
2917 struct drm_i915_private *dev_priv = dev->dev_private;
2918
2919 if (HAS_PCH_NOP(dev))
2920 return;
2921
2922 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
2923 I915_WRITE(SDEIER, 0xffffffff);
2924 POSTING_READ(SDEIER);
2925}
2926
7c4d664e 2927static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5
DV
2928{
2929 struct drm_i915_private *dev_priv = dev->dev_private;
2930
f86f3fb0 2931 GEN5_IRQ_RESET(GT);
a9d356a6 2932 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 2933 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
2934}
2935
1da177e4
LT
2936/* drm_dma.h hooks
2937*/
be30b29f 2938static void ironlake_irq_reset(struct drm_device *dev)
036a4a7d 2939{
2d1013dd 2940 struct drm_i915_private *dev_priv = dev->dev_private;
036a4a7d 2941
0c841212 2942 I915_WRITE(HWSTAM, 0xffffffff);
bdfcdb63 2943
f86f3fb0 2944 GEN5_IRQ_RESET(DE);
c6d954c1
PZ
2945 if (IS_GEN7(dev))
2946 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
036a4a7d 2947
7c4d664e 2948 gen5_gt_irq_reset(dev);
c650156a 2949
1c69eb42 2950 ibx_irq_reset(dev);
7d99163d 2951}
c650156a 2952
70591a41
VS
2953static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2954{
2955 enum pipe pipe;
2956
2957 I915_WRITE(PORT_HOTPLUG_EN, 0);
2958 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2959
2960 for_each_pipe(dev_priv, pipe)
2961 I915_WRITE(PIPESTAT(pipe), 0xffff);
2962
2963 GEN5_IRQ_RESET(VLV_);
2964}
2965
7e231dbe
JB
2966static void valleyview_irq_preinstall(struct drm_device *dev)
2967{
2d1013dd 2968 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe 2969
7e231dbe
JB
2970 /* VLV magic */
2971 I915_WRITE(VLV_IMR, 0);
2972 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2973 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2974 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2975
7c4d664e 2976 gen5_gt_irq_reset(dev);
7e231dbe 2977
7c4cde39 2978 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
7e231dbe 2979
70591a41 2980 vlv_display_irq_reset(dev_priv);
7e231dbe
JB
2981}
2982
d6e3cca3
DV
2983static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2984{
2985 GEN8_IRQ_RESET_NDX(GT, 0);
2986 GEN8_IRQ_RESET_NDX(GT, 1);
2987 GEN8_IRQ_RESET_NDX(GT, 2);
2988 GEN8_IRQ_RESET_NDX(GT, 3);
2989}
2990
823f6b38 2991static void gen8_irq_reset(struct drm_device *dev)
abd58f01
BW
2992{
2993 struct drm_i915_private *dev_priv = dev->dev_private;
2994 int pipe;
2995
abd58f01
BW
2996 I915_WRITE(GEN8_MASTER_IRQ, 0);
2997 POSTING_READ(GEN8_MASTER_IRQ);
2998
d6e3cca3 2999 gen8_gt_irq_reset(dev_priv);
abd58f01 3000
055e393f 3001 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
3002 if (intel_display_power_is_enabled(dev_priv,
3003 POWER_DOMAIN_PIPE(pipe)))
813bde43 3004 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3005
f86f3fb0
PZ
3006 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3007 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3008 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3009
266ea3d9
SS
3010 if (HAS_PCH_SPLIT(dev))
3011 ibx_irq_reset(dev);
abd58f01 3012}
09f2344d 3013
4c6c03be
DL
3014void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3015 unsigned int pipe_mask)
d49bdb0e 3016{
1180e206 3017 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
d49bdb0e 3018
13321786 3019 spin_lock_irq(&dev_priv->irq_lock);
d14c0343
DL
3020 if (pipe_mask & 1 << PIPE_A)
3021 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3022 dev_priv->de_irq_mask[PIPE_A],
3023 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
4c6c03be
DL
3024 if (pipe_mask & 1 << PIPE_B)
3025 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3026 dev_priv->de_irq_mask[PIPE_B],
3027 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3028 if (pipe_mask & 1 << PIPE_C)
3029 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3030 dev_priv->de_irq_mask[PIPE_C],
3031 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
13321786 3032 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3033}
3034
43f328d7
VS
3035static void cherryview_irq_preinstall(struct drm_device *dev)
3036{
3037 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3038
3039 I915_WRITE(GEN8_MASTER_IRQ, 0);
3040 POSTING_READ(GEN8_MASTER_IRQ);
3041
d6e3cca3 3042 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3043
3044 GEN5_IRQ_RESET(GEN8_PCU_);
3045
43f328d7
VS
3046 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3047
70591a41 3048 vlv_display_irq_reset(dev_priv);
43f328d7
VS
3049}
3050
87a02106
VS
3051static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3052 const u32 hpd[HPD_NUM_PINS])
3053{
3054 struct drm_i915_private *dev_priv = to_i915(dev);
3055 struct intel_encoder *encoder;
3056 u32 enabled_irqs = 0;
3057
3058 for_each_intel_encoder(dev, encoder)
3059 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3060 enabled_irqs |= hpd[encoder->hpd_pin];
3061
3062 return enabled_irqs;
3063}
3064
82a28bcf 3065static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973 3066{
2d1013dd 3067 struct drm_i915_private *dev_priv = dev->dev_private;
87a02106 3068 u32 hotplug_irqs, hotplug, enabled_irqs;
82a28bcf
DV
3069
3070 if (HAS_PCH_IBX(dev)) {
fee884ed 3071 hotplug_irqs = SDE_HOTPLUG_MASK;
87a02106 3072 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
82a28bcf 3073 } else {
fee884ed 3074 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
87a02106 3075 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
82a28bcf 3076 }
7fe0b973 3077
fee884ed 3078 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3079
3080 /*
3081 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
3082 * duration to 2ms (which is the minimum in the Display Port spec).
3083 * The pulse duration bits are reserved on LPT+.
82a28bcf 3084 */
7fe0b973
KP
3085 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3086 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3087 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3088 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3089 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3090 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 3091}
26951caf 3092
6dbf30ce
VS
3093static void spt_hpd_irq_setup(struct drm_device *dev)
3094{
3095 struct drm_i915_private *dev_priv = dev->dev_private;
3096 u32 hotplug_irqs, hotplug, enabled_irqs;
3097
3098 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3099 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3100
3101 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3102
3103 /* Enable digital hotplug on the PCH */
3104 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3105 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3106 PORTB_HOTPLUG_ENABLE;
3107 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3108
3109 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3110 hotplug |= PORTE_HOTPLUG_ENABLE;
3111 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3112}
3113
e0a20ad7
SS
3114static void bxt_hpd_irq_setup(struct drm_device *dev)
3115{
3116 struct drm_i915_private *dev_priv = dev->dev_private;
87a02106 3117 u32 hotplug_port;
e0a20ad7
SS
3118 u32 hotplug_ctrl;
3119
87a02106 3120 hotplug_port = intel_hpd_enabled_irqs(dev, hpd_bxt);
e0a20ad7 3121
e0a20ad7
SS
3122 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3123
7f3561be
SJ
3124 if (hotplug_port & BXT_DE_PORT_HP_DDIA)
3125 hotplug_ctrl |= BXT_DDIA_HPD_ENABLE;
e0a20ad7
SS
3126 if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3127 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3128 if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3129 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3130 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3131
e0a20ad7
SS
3132 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3133 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3134
e0a20ad7
SS
3135 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3136 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3137 POSTING_READ(GEN8_DE_PORT_IER);
3138}
3139
d46da437
PZ
3140static void ibx_irq_postinstall(struct drm_device *dev)
3141{
2d1013dd 3142 struct drm_i915_private *dev_priv = dev->dev_private;
82a28bcf 3143 u32 mask;
e5868a31 3144
692a04cf
DV
3145 if (HAS_PCH_NOP(dev))
3146 return;
3147
105b122e 3148 if (HAS_PCH_IBX(dev))
5c673b60 3149 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3150 else
5c673b60 3151 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3152
337ba017 3153 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
d46da437 3154 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3155}
3156
0a9a8c91
DV
3157static void gen5_gt_irq_postinstall(struct drm_device *dev)
3158{
3159 struct drm_i915_private *dev_priv = dev->dev_private;
3160 u32 pm_irqs, gt_irqs;
3161
3162 pm_irqs = gt_irqs = 0;
3163
3164 dev_priv->gt_irq_mask = ~0;
040d2baa 3165 if (HAS_L3_DPF(dev)) {
0a9a8c91 3166 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3167 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3168 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3169 }
3170
3171 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3172 if (IS_GEN5(dev)) {
3173 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3174 ILK_BSD_USER_INTERRUPT;
3175 } else {
3176 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3177 }
3178
35079899 3179 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3180
3181 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3182 /*
3183 * RPS interrupts will get enabled/disabled on demand when RPS
3184 * itself is enabled/disabled.
3185 */
0a9a8c91
DV
3186 if (HAS_VEBOX(dev))
3187 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3188
605cd25b 3189 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3190 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3191 }
3192}
3193
f71d4af4 3194static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3195{
2d1013dd 3196 struct drm_i915_private *dev_priv = dev->dev_private;
8e76f8dc
PZ
3197 u32 display_mask, extra_mask;
3198
3199 if (INTEL_INFO(dev)->gen >= 7) {
3200 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3201 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3202 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3203 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3204 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
5c673b60 3205 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
8e76f8dc
PZ
3206 } else {
3207 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3208 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3209 DE_AUX_CHANNEL_A |
5b3a856b
DV
3210 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3211 DE_POISON);
5c673b60
DV
3212 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3213 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
8e76f8dc 3214 }
036a4a7d 3215
1ec14ad3 3216 dev_priv->irq_mask = ~display_mask;
036a4a7d 3217
0c841212
PZ
3218 I915_WRITE(HWSTAM, 0xeffe);
3219
622364b6
PZ
3220 ibx_irq_pre_postinstall(dev);
3221
35079899 3222 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3223
0a9a8c91 3224 gen5_gt_irq_postinstall(dev);
036a4a7d 3225
d46da437 3226 ibx_irq_postinstall(dev);
7fe0b973 3227
f97108d1 3228 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3229 /* Enable PCU event interrupts
3230 *
3231 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3232 * setup is guaranteed to run in single-threaded context. But we
3233 * need it to make the assert_spin_locked happy. */
d6207435 3234 spin_lock_irq(&dev_priv->irq_lock);
f97108d1 3235 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3236 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3237 }
3238
036a4a7d
ZW
3239 return 0;
3240}
3241
f8b79e58
ID
3242static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3243{
3244 u32 pipestat_mask;
3245 u32 iir_mask;
120dda4f 3246 enum pipe pipe;
f8b79e58
ID
3247
3248 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3249 PIPE_FIFO_UNDERRUN_STATUS;
3250
120dda4f
VS
3251 for_each_pipe(dev_priv, pipe)
3252 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3253 POSTING_READ(PIPESTAT(PIPE_A));
3254
3255 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3256 PIPE_CRC_DONE_INTERRUPT_STATUS;
3257
120dda4f
VS
3258 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3259 for_each_pipe(dev_priv, pipe)
3260 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3261
3262 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3263 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3264 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3265 if (IS_CHERRYVIEW(dev_priv))
3266 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3267 dev_priv->irq_mask &= ~iir_mask;
3268
3269 I915_WRITE(VLV_IIR, iir_mask);
3270 I915_WRITE(VLV_IIR, iir_mask);
f8b79e58 3271 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
76e41860
VS
3272 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3273 POSTING_READ(VLV_IMR);
f8b79e58
ID
3274}
3275
3276static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3277{
3278 u32 pipestat_mask;
3279 u32 iir_mask;
120dda4f 3280 enum pipe pipe;
f8b79e58
ID
3281
3282 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3283 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
6c7fba04 3284 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
120dda4f
VS
3285 if (IS_CHERRYVIEW(dev_priv))
3286 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
f8b79e58
ID
3287
3288 dev_priv->irq_mask |= iir_mask;
f8b79e58 3289 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
76e41860 3290 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
f8b79e58
ID
3291 I915_WRITE(VLV_IIR, iir_mask);
3292 I915_WRITE(VLV_IIR, iir_mask);
3293 POSTING_READ(VLV_IIR);
3294
3295 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3296 PIPE_CRC_DONE_INTERRUPT_STATUS;
3297
120dda4f
VS
3298 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3299 for_each_pipe(dev_priv, pipe)
3300 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
f8b79e58
ID
3301
3302 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3303 PIPE_FIFO_UNDERRUN_STATUS;
120dda4f
VS
3304
3305 for_each_pipe(dev_priv, pipe)
3306 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
f8b79e58
ID
3307 POSTING_READ(PIPESTAT(PIPE_A));
3308}
3309
3310void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3311{
3312 assert_spin_locked(&dev_priv->irq_lock);
3313
3314 if (dev_priv->display_irqs_enabled)
3315 return;
3316
3317 dev_priv->display_irqs_enabled = true;
3318
950eabaf 3319 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3320 valleyview_display_irqs_install(dev_priv);
3321}
3322
3323void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3324{
3325 assert_spin_locked(&dev_priv->irq_lock);
3326
3327 if (!dev_priv->display_irqs_enabled)
3328 return;
3329
3330 dev_priv->display_irqs_enabled = false;
3331
950eabaf 3332 if (intel_irqs_enabled(dev_priv))
f8b79e58
ID
3333 valleyview_display_irqs_uninstall(dev_priv);
3334}
3335
0e6c9a9e 3336static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
7e231dbe 3337{
f8b79e58 3338 dev_priv->irq_mask = ~0;
7e231dbe 3339
20afbda2
DV
3340 I915_WRITE(PORT_HOTPLUG_EN, 0);
3341 POSTING_READ(PORT_HOTPLUG_EN);
3342
7e231dbe 3343 I915_WRITE(VLV_IIR, 0xffffffff);
76e41860
VS
3344 I915_WRITE(VLV_IIR, 0xffffffff);
3345 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3346 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3347 POSTING_READ(VLV_IMR);
7e231dbe 3348
b79480ba
DV
3349 /* Interrupt setup is already guaranteed to be single-threaded, this is
3350 * just to make the assert_spin_locked check happy. */
d6207435 3351 spin_lock_irq(&dev_priv->irq_lock);
f8b79e58
ID
3352 if (dev_priv->display_irqs_enabled)
3353 valleyview_display_irqs_install(dev_priv);
d6207435 3354 spin_unlock_irq(&dev_priv->irq_lock);
0e6c9a9e
VS
3355}
3356
3357static int valleyview_irq_postinstall(struct drm_device *dev)
3358{
3359 struct drm_i915_private *dev_priv = dev->dev_private;
3360
3361 vlv_display_irq_postinstall(dev_priv);
7e231dbe 3362
0a9a8c91 3363 gen5_gt_irq_postinstall(dev);
7e231dbe
JB
3364
3365 /* ack & enable invalid PTE error interrupts */
3366#if 0 /* FIXME: add support to irq handler for checking these bits */
3367 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3368 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3369#endif
3370
3371 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
3372
3373 return 0;
3374}
3375
abd58f01
BW
3376static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3377{
abd58f01
BW
3378 /* These are interrupts we'll toggle with the ring mask register */
3379 uint32_t gt_interrupts[] = {
3380 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3381 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
abd58f01 3382 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
73d477f6
OM
3383 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3384 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3385 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3386 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3387 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3388 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3389 0,
73d477f6
OM
3390 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3391 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3392 };
3393
0961021a 3394 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3395 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3396 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3397 /*
3398 * RPS interrupts will get enabled/disabled on demand when RPS itself
3399 * is enabled/disabled.
3400 */
3401 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3402 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3403}
3404
3405static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3406{
770de83d
DL
3407 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3408 uint32_t de_pipe_enables;
abd58f01 3409 int pipe;
9e63743e 3410 u32 de_port_en = GEN8_AUX_CHANNEL_A;
770de83d 3411
88e04703 3412 if (IS_GEN9(dev_priv)) {
770de83d
DL
3413 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3414 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
9e63743e 3415 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
88e04703 3416 GEN9_AUX_CHANNEL_D;
9e63743e
SS
3417
3418 if (IS_BROXTON(dev_priv))
3419 de_port_en |= BXT_DE_PORT_GMBUS;
88e04703 3420 } else
770de83d
DL
3421 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3422 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3423
3424 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3425 GEN8_PIPE_FIFO_UNDERRUN;
3426
13b3a0a7
DV
3427 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3428 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3429 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3430
055e393f 3431 for_each_pipe(dev_priv, pipe)
f458ebbc 3432 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3433 POWER_DOMAIN_PIPE(pipe)))
3434 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3435 dev_priv->de_irq_mask[pipe],
3436 de_pipe_enables);
abd58f01 3437
9e63743e 3438 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
abd58f01
BW
3439}
3440
3441static int gen8_irq_postinstall(struct drm_device *dev)
3442{
3443 struct drm_i915_private *dev_priv = dev->dev_private;
3444
266ea3d9
SS
3445 if (HAS_PCH_SPLIT(dev))
3446 ibx_irq_pre_postinstall(dev);
622364b6 3447
abd58f01
BW
3448 gen8_gt_irq_postinstall(dev_priv);
3449 gen8_de_irq_postinstall(dev_priv);
3450
266ea3d9
SS
3451 if (HAS_PCH_SPLIT(dev))
3452 ibx_irq_postinstall(dev);
abd58f01
BW
3453
3454 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3455 POSTING_READ(GEN8_MASTER_IRQ);
3456
3457 return 0;
3458}
3459
43f328d7
VS
3460static int cherryview_irq_postinstall(struct drm_device *dev)
3461{
3462 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7 3463
c2b66797 3464 vlv_display_irq_postinstall(dev_priv);
43f328d7
VS
3465
3466 gen8_gt_irq_postinstall(dev_priv);
3467
3468 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3469 POSTING_READ(GEN8_MASTER_IRQ);
3470
3471 return 0;
3472}
3473
abd58f01
BW
3474static void gen8_irq_uninstall(struct drm_device *dev)
3475{
3476 struct drm_i915_private *dev_priv = dev->dev_private;
abd58f01
BW
3477
3478 if (!dev_priv)
3479 return;
3480
823f6b38 3481 gen8_irq_reset(dev);
abd58f01
BW
3482}
3483
8ea0be4f
VS
3484static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3485{
3486 /* Interrupt setup is already guaranteed to be single-threaded, this is
3487 * just to make the assert_spin_locked check happy. */
3488 spin_lock_irq(&dev_priv->irq_lock);
3489 if (dev_priv->display_irqs_enabled)
3490 valleyview_display_irqs_uninstall(dev_priv);
3491 spin_unlock_irq(&dev_priv->irq_lock);
3492
3493 vlv_display_irq_reset(dev_priv);
3494
c352d1ba 3495 dev_priv->irq_mask = ~0;
8ea0be4f
VS
3496}
3497
7e231dbe
JB
3498static void valleyview_irq_uninstall(struct drm_device *dev)
3499{
2d1013dd 3500 struct drm_i915_private *dev_priv = dev->dev_private;
7e231dbe
JB
3501
3502 if (!dev_priv)
3503 return;
3504
843d0e7d
ID
3505 I915_WRITE(VLV_MASTER_IER, 0);
3506
893fce8e
VS
3507 gen5_gt_irq_reset(dev);
3508
7e231dbe 3509 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3510
8ea0be4f 3511 vlv_display_irq_uninstall(dev_priv);
7e231dbe
JB
3512}
3513
43f328d7
VS
3514static void cherryview_irq_uninstall(struct drm_device *dev)
3515{
3516 struct drm_i915_private *dev_priv = dev->dev_private;
43f328d7
VS
3517
3518 if (!dev_priv)
3519 return;
3520
3521 I915_WRITE(GEN8_MASTER_IRQ, 0);
3522 POSTING_READ(GEN8_MASTER_IRQ);
3523
a2c30fba 3524 gen8_gt_irq_reset(dev_priv);
43f328d7 3525
a2c30fba 3526 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3527
c2b66797 3528 vlv_display_irq_uninstall(dev_priv);
43f328d7
VS
3529}
3530
f71d4af4 3531static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3532{
2d1013dd 3533 struct drm_i915_private *dev_priv = dev->dev_private;
4697995b
JB
3534
3535 if (!dev_priv)
3536 return;
3537
be30b29f 3538 ironlake_irq_reset(dev);
036a4a7d
ZW
3539}
3540
a266c7d5 3541static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3542{
2d1013dd 3543 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 3544 int pipe;
91e3738e 3545
055e393f 3546 for_each_pipe(dev_priv, pipe)
9db4a9c7 3547 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3548 I915_WRITE16(IMR, 0xffff);
3549 I915_WRITE16(IER, 0x0);
3550 POSTING_READ16(IER);
c2798b19
CW
3551}
3552
3553static int i8xx_irq_postinstall(struct drm_device *dev)
3554{
2d1013dd 3555 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19 3556
c2798b19
CW
3557 I915_WRITE16(EMR,
3558 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3559
3560 /* Unmask the interrupts that we always want on. */
3561 dev_priv->irq_mask =
3562 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3563 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3564 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3565 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3566 I915_WRITE16(IMR, dev_priv->irq_mask);
3567
3568 I915_WRITE16(IER,
3569 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3570 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3571 I915_USER_INTERRUPT);
3572 POSTING_READ16(IER);
3573
379ef82d
DV
3574 /* Interrupt setup is already guaranteed to be single-threaded, this is
3575 * just to make the assert_spin_locked check happy. */
d6207435 3576 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3577 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3578 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3579 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3580
c2798b19
CW
3581 return 0;
3582}
3583
90a72f87
VS
3584/*
3585 * Returns true when a page flip has completed.
3586 */
3587static bool i8xx_handle_vblank(struct drm_device *dev,
1f1c2e24 3588 int plane, int pipe, u32 iir)
90a72f87 3589{
2d1013dd 3590 struct drm_i915_private *dev_priv = dev->dev_private;
1f1c2e24 3591 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
90a72f87 3592
8d7849db 3593 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3594 return false;
3595
3596 if ((iir & flip_pending) == 0)
d6bbafa1 3597 goto check_page_flip;
90a72f87 3598
90a72f87
VS
3599 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3600 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3601 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3602 * the flip is completed (no longer pending). Since this doesn't raise
3603 * an interrupt per se, we watch for the change at vblank.
3604 */
3605 if (I915_READ16(ISR) & flip_pending)
d6bbafa1 3606 goto check_page_flip;
90a72f87 3607
7d47559e 3608 intel_prepare_page_flip(dev, plane);
90a72f87 3609 intel_finish_page_flip(dev, pipe);
90a72f87 3610 return true;
d6bbafa1
CW
3611
3612check_page_flip:
3613 intel_check_page_flip(dev, pipe);
3614 return false;
90a72f87
VS
3615}
3616
ff1f525e 3617static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3618{
45a83f84 3619 struct drm_device *dev = arg;
2d1013dd 3620 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3621 u16 iir, new_iir;
3622 u32 pipe_stats[2];
c2798b19
CW
3623 int pipe;
3624 u16 flip_mask =
3625 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3626 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3627
2dd2a883
ID
3628 if (!intel_irqs_enabled(dev_priv))
3629 return IRQ_NONE;
3630
c2798b19
CW
3631 iir = I915_READ16(IIR);
3632 if (iir == 0)
3633 return IRQ_NONE;
3634
3635 while (iir & ~flip_mask) {
3636 /* Can't rely on pipestat interrupt bit in iir as it might
3637 * have been cleared after the pipestat interrupt was received.
3638 * It doesn't set the bit in iir again, but it still produces
3639 * interrupts (for non-MSI).
3640 */
222c7f51 3641 spin_lock(&dev_priv->irq_lock);
c2798b19 3642 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3643 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 3644
055e393f 3645 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3646 int reg = PIPESTAT(pipe);
3647 pipe_stats[pipe] = I915_READ(reg);
3648
3649 /*
3650 * Clear the PIPE*STAT regs before the IIR
3651 */
2d9d2b0b 3652 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3653 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 3654 }
222c7f51 3655 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
3656
3657 I915_WRITE16(IIR, iir & ~flip_mask);
3658 new_iir = I915_READ16(IIR); /* Flush posted writes */
3659
c2798b19 3660 if (iir & I915_USER_INTERRUPT)
74cdb337 3661 notify_ring(&dev_priv->ring[RCS]);
c2798b19 3662
055e393f 3663 for_each_pipe(dev_priv, pipe) {
1f1c2e24 3664 int plane = pipe;
3a77c4c4 3665 if (HAS_FBC(dev))
1f1c2e24
VS
3666 plane = !plane;
3667
4356d586 3668 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
1f1c2e24
VS
3669 i8xx_handle_vblank(dev, plane, pipe, iir))
3670 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 3671
4356d586 3672 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3673 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3674
1f7247c0
DV
3675 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3676 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3677 pipe);
4356d586 3678 }
c2798b19
CW
3679
3680 iir = new_iir;
3681 }
3682
3683 return IRQ_HANDLED;
3684}
3685
3686static void i8xx_irq_uninstall(struct drm_device * dev)
3687{
2d1013dd 3688 struct drm_i915_private *dev_priv = dev->dev_private;
c2798b19
CW
3689 int pipe;
3690
055e393f 3691 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
3692 /* Clear enable bits; then clear status bits */
3693 I915_WRITE(PIPESTAT(pipe), 0);
3694 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3695 }
3696 I915_WRITE16(IMR, 0xffff);
3697 I915_WRITE16(IER, 0x0);
3698 I915_WRITE16(IIR, I915_READ16(IIR));
3699}
3700
a266c7d5
CW
3701static void i915_irq_preinstall(struct drm_device * dev)
3702{
2d1013dd 3703 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3704 int pipe;
3705
a266c7d5
CW
3706 if (I915_HAS_HOTPLUG(dev)) {
3707 I915_WRITE(PORT_HOTPLUG_EN, 0);
3708 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3709 }
3710
00d98ebd 3711 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 3712 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
3713 I915_WRITE(PIPESTAT(pipe), 0);
3714 I915_WRITE(IMR, 0xffffffff);
3715 I915_WRITE(IER, 0x0);
3716 POSTING_READ(IER);
3717}
3718
3719static int i915_irq_postinstall(struct drm_device *dev)
3720{
2d1013dd 3721 struct drm_i915_private *dev_priv = dev->dev_private;
38bde180 3722 u32 enable_mask;
a266c7d5 3723
38bde180
CW
3724 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3725
3726 /* Unmask the interrupts that we always want on. */
3727 dev_priv->irq_mask =
3728 ~(I915_ASLE_INTERRUPT |
3729 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3730 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3731 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3732 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
3733
3734 enable_mask =
3735 I915_ASLE_INTERRUPT |
3736 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3737 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
3738 I915_USER_INTERRUPT;
3739
a266c7d5 3740 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
3741 I915_WRITE(PORT_HOTPLUG_EN, 0);
3742 POSTING_READ(PORT_HOTPLUG_EN);
3743
a266c7d5
CW
3744 /* Enable in IER... */
3745 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3746 /* and unmask in IMR */
3747 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3748 }
3749
a266c7d5
CW
3750 I915_WRITE(IMR, dev_priv->irq_mask);
3751 I915_WRITE(IER, enable_mask);
3752 POSTING_READ(IER);
3753
f49e38dd 3754 i915_enable_asle_pipestat(dev);
20afbda2 3755
379ef82d
DV
3756 /* Interrupt setup is already guaranteed to be single-threaded, this is
3757 * just to make the assert_spin_locked check happy. */
d6207435 3758 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3759 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3760 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3761 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3762
20afbda2
DV
3763 return 0;
3764}
3765
90a72f87
VS
3766/*
3767 * Returns true when a page flip has completed.
3768 */
3769static bool i915_handle_vblank(struct drm_device *dev,
3770 int plane, int pipe, u32 iir)
3771{
2d1013dd 3772 struct drm_i915_private *dev_priv = dev->dev_private;
90a72f87
VS
3773 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3774
8d7849db 3775 if (!intel_pipe_handle_vblank(dev, pipe))
90a72f87
VS
3776 return false;
3777
3778 if ((iir & flip_pending) == 0)
d6bbafa1 3779 goto check_page_flip;
90a72f87 3780
90a72f87
VS
3781 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3782 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3783 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3784 * the flip is completed (no longer pending). Since this doesn't raise
3785 * an interrupt per se, we watch for the change at vblank.
3786 */
3787 if (I915_READ(ISR) & flip_pending)
d6bbafa1 3788 goto check_page_flip;
90a72f87 3789
7d47559e 3790 intel_prepare_page_flip(dev, plane);
90a72f87 3791 intel_finish_page_flip(dev, pipe);
90a72f87 3792 return true;
d6bbafa1
CW
3793
3794check_page_flip:
3795 intel_check_page_flip(dev, pipe);
3796 return false;
90a72f87
VS
3797}
3798
ff1f525e 3799static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 3800{
45a83f84 3801 struct drm_device *dev = arg;
2d1013dd 3802 struct drm_i915_private *dev_priv = dev->dev_private;
8291ee90 3803 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
3804 u32 flip_mask =
3805 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3806 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 3807 int pipe, ret = IRQ_NONE;
a266c7d5 3808
2dd2a883
ID
3809 if (!intel_irqs_enabled(dev_priv))
3810 return IRQ_NONE;
3811
a266c7d5 3812 iir = I915_READ(IIR);
38bde180
CW
3813 do {
3814 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 3815 bool blc_event = false;
a266c7d5
CW
3816
3817 /* Can't rely on pipestat interrupt bit in iir as it might
3818 * have been cleared after the pipestat interrupt was received.
3819 * It doesn't set the bit in iir again, but it still produces
3820 * interrupts (for non-MSI).
3821 */
222c7f51 3822 spin_lock(&dev_priv->irq_lock);
a266c7d5 3823 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3824 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 3825
055e393f 3826 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
3827 int reg = PIPESTAT(pipe);
3828 pipe_stats[pipe] = I915_READ(reg);
3829
38bde180 3830 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 3831 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 3832 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 3833 irq_received = true;
a266c7d5
CW
3834 }
3835 }
222c7f51 3836 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
3837
3838 if (!irq_received)
3839 break;
3840
a266c7d5 3841 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
3842 if (I915_HAS_HOTPLUG(dev) &&
3843 iir & I915_DISPLAY_PORT_INTERRUPT)
3844 i9xx_hpd_irq_handler(dev);
a266c7d5 3845
38bde180 3846 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3847 new_iir = I915_READ(IIR); /* Flush posted writes */
3848
a266c7d5 3849 if (iir & I915_USER_INTERRUPT)
74cdb337 3850 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 3851
055e393f 3852 for_each_pipe(dev_priv, pipe) {
38bde180 3853 int plane = pipe;
3a77c4c4 3854 if (HAS_FBC(dev))
38bde180 3855 plane = !plane;
90a72f87 3856
8291ee90 3857 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3858 i915_handle_vblank(dev, plane, pipe, iir))
3859 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
3860
3861 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3862 blc_event = true;
4356d586
DV
3863
3864 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 3865 i9xx_pipe_crc_irq_handler(dev, pipe);
2d9d2b0b 3866
1f7247c0
DV
3867 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3868 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3869 pipe);
a266c7d5
CW
3870 }
3871
a266c7d5
CW
3872 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3873 intel_opregion_asle_intr(dev);
3874
3875 /* With MSI, interrupts are only generated when iir
3876 * transitions from zero to nonzero. If another bit got
3877 * set while we were handling the existing iir bits, then
3878 * we would never get another interrupt.
3879 *
3880 * This is fine on non-MSI as well, as if we hit this path
3881 * we avoid exiting the interrupt handler only to generate
3882 * another one.
3883 *
3884 * Note that for MSI this could cause a stray interrupt report
3885 * if an interrupt landed in the time between writing IIR and
3886 * the posting read. This should be rare enough to never
3887 * trigger the 99% of 100,000 interrupts test for disabling
3888 * stray interrupts.
3889 */
38bde180 3890 ret = IRQ_HANDLED;
a266c7d5 3891 iir = new_iir;
38bde180 3892 } while (iir & ~flip_mask);
a266c7d5
CW
3893
3894 return ret;
3895}
3896
3897static void i915_irq_uninstall(struct drm_device * dev)
3898{
2d1013dd 3899 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3900 int pipe;
3901
a266c7d5
CW
3902 if (I915_HAS_HOTPLUG(dev)) {
3903 I915_WRITE(PORT_HOTPLUG_EN, 0);
3904 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3905 }
3906
00d98ebd 3907 I915_WRITE16(HWSTAM, 0xffff);
055e393f 3908 for_each_pipe(dev_priv, pipe) {
55b39755 3909 /* Clear enable bits; then clear status bits */
a266c7d5 3910 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
3911 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3912 }
a266c7d5
CW
3913 I915_WRITE(IMR, 0xffffffff);
3914 I915_WRITE(IER, 0x0);
3915
a266c7d5
CW
3916 I915_WRITE(IIR, I915_READ(IIR));
3917}
3918
3919static void i965_irq_preinstall(struct drm_device * dev)
3920{
2d1013dd 3921 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
3922 int pipe;
3923
adca4730
CW
3924 I915_WRITE(PORT_HOTPLUG_EN, 0);
3925 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
3926
3927 I915_WRITE(HWSTAM, 0xeffe);
055e393f 3928 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
3929 I915_WRITE(PIPESTAT(pipe), 0);
3930 I915_WRITE(IMR, 0xffffffff);
3931 I915_WRITE(IER, 0x0);
3932 POSTING_READ(IER);
3933}
3934
3935static int i965_irq_postinstall(struct drm_device *dev)
3936{
2d1013dd 3937 struct drm_i915_private *dev_priv = dev->dev_private;
bbba0a97 3938 u32 enable_mask;
a266c7d5
CW
3939 u32 error_mask;
3940
a266c7d5 3941 /* Unmask the interrupts that we always want on. */
bbba0a97 3942 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 3943 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
3944 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3945 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3946 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3947 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3948 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3949
3950 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
3951 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3952 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
3953 enable_mask |= I915_USER_INTERRUPT;
3954
3955 if (IS_G4X(dev))
3956 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 3957
b79480ba
DV
3958 /* Interrupt setup is already guaranteed to be single-threaded, this is
3959 * just to make the assert_spin_locked check happy. */
d6207435 3960 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3961 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3962 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3963 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3964 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 3965
a266c7d5
CW
3966 /*
3967 * Enable some error detection, note the instruction error mask
3968 * bit is reserved, so we leave it masked.
3969 */
3970 if (IS_G4X(dev)) {
3971 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3972 GM45_ERROR_MEM_PRIV |
3973 GM45_ERROR_CP_PRIV |
3974 I915_ERROR_MEMORY_REFRESH);
3975 } else {
3976 error_mask = ~(I915_ERROR_PAGE_TABLE |
3977 I915_ERROR_MEMORY_REFRESH);
3978 }
3979 I915_WRITE(EMR, error_mask);
3980
3981 I915_WRITE(IMR, dev_priv->irq_mask);
3982 I915_WRITE(IER, enable_mask);
3983 POSTING_READ(IER);
3984
20afbda2
DV
3985 I915_WRITE(PORT_HOTPLUG_EN, 0);
3986 POSTING_READ(PORT_HOTPLUG_EN);
3987
f49e38dd 3988 i915_enable_asle_pipestat(dev);
20afbda2
DV
3989
3990 return 0;
3991}
3992
bac56d5b 3993static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2 3994{
2d1013dd 3995 struct drm_i915_private *dev_priv = dev->dev_private;
20afbda2
DV
3996 u32 hotplug_en;
3997
b5ea2d56
DV
3998 assert_spin_locked(&dev_priv->irq_lock);
3999
778eb334
VS
4000 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4001 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4002 /* Note HDMI and DP share hotplug bits */
4003 /* enable bits are the same for all generations */
87a02106 4004 hotplug_en |= intel_hpd_enabled_irqs(dev, hpd_mask_i915);
778eb334
VS
4005 /* Programming the CRT detection parameters tends
4006 to generate a spurious hotplug event about three
4007 seconds later. So just do it once.
4008 */
4009 if (IS_G4X(dev))
4010 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4011 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4012 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4013
4014 /* Ignore TV since it's buggy */
4015 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
a266c7d5
CW
4016}
4017
ff1f525e 4018static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4019{
45a83f84 4020 struct drm_device *dev = arg;
2d1013dd 4021 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4022 u32 iir, new_iir;
4023 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4024 int ret = IRQ_NONE, pipe;
21ad8330
VS
4025 u32 flip_mask =
4026 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4027 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4028
2dd2a883
ID
4029 if (!intel_irqs_enabled(dev_priv))
4030 return IRQ_NONE;
4031
a266c7d5
CW
4032 iir = I915_READ(IIR);
4033
a266c7d5 4034 for (;;) {
501e01d7 4035 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4036 bool blc_event = false;
4037
a266c7d5
CW
4038 /* Can't rely on pipestat interrupt bit in iir as it might
4039 * have been cleared after the pipestat interrupt was received.
4040 * It doesn't set the bit in iir again, but it still produces
4041 * interrupts (for non-MSI).
4042 */
222c7f51 4043 spin_lock(&dev_priv->irq_lock);
a266c7d5 4044 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4045 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4046
055e393f 4047 for_each_pipe(dev_priv, pipe) {
a266c7d5
CW
4048 int reg = PIPESTAT(pipe);
4049 pipe_stats[pipe] = I915_READ(reg);
4050
4051 /*
4052 * Clear the PIPE*STAT regs before the IIR
4053 */
4054 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4055 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4056 irq_received = true;
a266c7d5
CW
4057 }
4058 }
222c7f51 4059 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4060
4061 if (!irq_received)
4062 break;
4063
4064 ret = IRQ_HANDLED;
4065
4066 /* Consume port. Then clear IIR or we'll miss events */
16c6c56b
VS
4067 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4068 i9xx_hpd_irq_handler(dev);
a266c7d5 4069
21ad8330 4070 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4071 new_iir = I915_READ(IIR); /* Flush posted writes */
4072
a266c7d5 4073 if (iir & I915_USER_INTERRUPT)
74cdb337 4074 notify_ring(&dev_priv->ring[RCS]);
a266c7d5 4075 if (iir & I915_BSD_USER_INTERRUPT)
74cdb337 4076 notify_ring(&dev_priv->ring[VCS]);
a266c7d5 4077
055e393f 4078 for_each_pipe(dev_priv, pipe) {
2c8ba29f 4079 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
4080 i915_handle_vblank(dev, pipe, pipe, iir))
4081 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4082
4083 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4084 blc_event = true;
4356d586
DV
4085
4086 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
277de95e 4087 i9xx_pipe_crc_irq_handler(dev, pipe);
a266c7d5 4088
1f7247c0
DV
4089 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4090 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4091 }
a266c7d5
CW
4092
4093 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4094 intel_opregion_asle_intr(dev);
4095
515ac2bb
DV
4096 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4097 gmbus_irq_handler(dev);
4098
a266c7d5
CW
4099 /* With MSI, interrupts are only generated when iir
4100 * transitions from zero to nonzero. If another bit got
4101 * set while we were handling the existing iir bits, then
4102 * we would never get another interrupt.
4103 *
4104 * This is fine on non-MSI as well, as if we hit this path
4105 * we avoid exiting the interrupt handler only to generate
4106 * another one.
4107 *
4108 * Note that for MSI this could cause a stray interrupt report
4109 * if an interrupt landed in the time between writing IIR and
4110 * the posting read. This should be rare enough to never
4111 * trigger the 99% of 100,000 interrupts test for disabling
4112 * stray interrupts.
4113 */
4114 iir = new_iir;
4115 }
4116
4117 return ret;
4118}
4119
4120static void i965_irq_uninstall(struct drm_device * dev)
4121{
2d1013dd 4122 struct drm_i915_private *dev_priv = dev->dev_private;
a266c7d5
CW
4123 int pipe;
4124
4125 if (!dev_priv)
4126 return;
4127
adca4730
CW
4128 I915_WRITE(PORT_HOTPLUG_EN, 0);
4129 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4130
4131 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4132 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4133 I915_WRITE(PIPESTAT(pipe), 0);
4134 I915_WRITE(IMR, 0xffffffff);
4135 I915_WRITE(IER, 0x0);
4136
055e393f 4137 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4138 I915_WRITE(PIPESTAT(pipe),
4139 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4140 I915_WRITE(IIR, I915_READ(IIR));
4141}
4142
fca52a55
DV
4143/**
4144 * intel_irq_init - initializes irq support
4145 * @dev_priv: i915 device instance
4146 *
4147 * This function initializes all the irq support including work items, timers
4148 * and all the vtables. It does not setup the interrupt itself though.
4149 */
b963291c 4150void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4151{
b963291c 4152 struct drm_device *dev = dev_priv->dev;
8b2e326d 4153
77913b39
JN
4154 intel_hpd_init_work(dev_priv);
4155
c6a828d3 4156 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4157 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4158
a6706b45 4159 /* Let's track the enabled rps events */
b963291c 4160 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
6c65a587 4161 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4162 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4163 else
4164 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4165
737b1506
CW
4166 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4167 i915_hangcheck_elapsed);
61bac78e 4168
97a19a24 4169 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 4170
b963291c 4171 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4172 dev->max_vblank_count = 0;
4173 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4174 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4
JB
4175 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4176 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
391f75e2
VS
4177 } else {
4178 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4179 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4180 }
4181
21da2700
VS
4182 /*
4183 * Opt out of the vblank disable timer on everything except gen2.
4184 * Gen2 doesn't have a hardware frame counter and so depends on
4185 * vblank interrupts to produce sane vblank seuquence numbers.
4186 */
b963291c 4187 if (!IS_GEN2(dev_priv))
21da2700
VS
4188 dev->vblank_disable_immediate = true;
4189
f3a5c3f6
DV
4190 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4191 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4192
b963291c 4193 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4194 dev->driver->irq_handler = cherryview_irq_handler;
4195 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4196 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4197 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4198 dev->driver->enable_vblank = valleyview_enable_vblank;
4199 dev->driver->disable_vblank = valleyview_disable_vblank;
4200 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4201 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4202 dev->driver->irq_handler = valleyview_irq_handler;
4203 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4204 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4205 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4206 dev->driver->enable_vblank = valleyview_enable_vblank;
4207 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4208 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4209 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4210 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4211 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4212 dev->driver->irq_postinstall = gen8_irq_postinstall;
4213 dev->driver->irq_uninstall = gen8_irq_uninstall;
4214 dev->driver->enable_vblank = gen8_enable_vblank;
4215 dev->driver->disable_vblank = gen8_disable_vblank;
6dbf30ce 4216 if (IS_BROXTON(dev))
e0a20ad7 4217 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
6dbf30ce
VS
4218 else if (HAS_PCH_SPT(dev))
4219 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4220 else
4221 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4
JB
4222 } else if (HAS_PCH_SPLIT(dev)) {
4223 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4224 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4225 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4226 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4227 dev->driver->enable_vblank = ironlake_enable_vblank;
4228 dev->driver->disable_vblank = ironlake_disable_vblank;
82a28bcf 4229 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4 4230 } else {
b963291c 4231 if (INTEL_INFO(dev_priv)->gen == 2) {
c2798b19
CW
4232 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4233 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4234 dev->driver->irq_handler = i8xx_irq_handler;
4235 dev->driver->irq_uninstall = i8xx_irq_uninstall;
b963291c 4236 } else if (INTEL_INFO(dev_priv)->gen == 3) {
a266c7d5
CW
4237 dev->driver->irq_preinstall = i915_irq_preinstall;
4238 dev->driver->irq_postinstall = i915_irq_postinstall;
4239 dev->driver->irq_uninstall = i915_irq_uninstall;
4240 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4241 } else {
a266c7d5
CW
4242 dev->driver->irq_preinstall = i965_irq_preinstall;
4243 dev->driver->irq_postinstall = i965_irq_postinstall;
4244 dev->driver->irq_uninstall = i965_irq_uninstall;
4245 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4246 }
778eb334
VS
4247 if (I915_HAS_HOTPLUG(dev_priv))
4248 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4249 dev->driver->enable_vblank = i915_enable_vblank;
4250 dev->driver->disable_vblank = i915_disable_vblank;
4251 }
4252}
20afbda2 4253
fca52a55
DV
4254/**
4255 * intel_irq_install - enables the hardware interrupt
4256 * @dev_priv: i915 device instance
4257 *
4258 * This function enables the hardware interrupt handling, but leaves the hotplug
4259 * handling still disabled. It is called after intel_irq_init().
4260 *
4261 * In the driver load and resume code we need working interrupts in a few places
4262 * but don't want to deal with the hassle of concurrent probe and hotplug
4263 * workers. Hence the split into this two-stage approach.
4264 */
2aeb7d3a
DV
4265int intel_irq_install(struct drm_i915_private *dev_priv)
4266{
4267 /*
4268 * We enable some interrupt sources in our postinstall hooks, so mark
4269 * interrupts as enabled _before_ actually enabling them to avoid
4270 * special cases in our ordering checks.
4271 */
4272 dev_priv->pm.irqs_enabled = true;
4273
4274 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4275}
4276
fca52a55
DV
4277/**
4278 * intel_irq_uninstall - finilizes all irq handling
4279 * @dev_priv: i915 device instance
4280 *
4281 * This stops interrupt and hotplug handling and unregisters and frees all
4282 * resources acquired in the init functions.
4283 */
2aeb7d3a
DV
4284void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4285{
4286 drm_irq_uninstall(dev_priv->dev);
4287 intel_hpd_cancel_work(dev_priv);
4288 dev_priv->pm.irqs_enabled = false;
4289}
4290
fca52a55
DV
4291/**
4292 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4293 * @dev_priv: i915 device instance
4294 *
4295 * This function is used to disable interrupts at runtime, both in the runtime
4296 * pm and the system suspend/resume code.
4297 */
b963291c 4298void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4299{
b963291c 4300 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
2aeb7d3a 4301 dev_priv->pm.irqs_enabled = false;
2dd2a883 4302 synchronize_irq(dev_priv->dev->irq);
c67a470b
PZ
4303}
4304
fca52a55
DV
4305/**
4306 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4307 * @dev_priv: i915 device instance
4308 *
4309 * This function is used to enable interrupts at runtime, both in the runtime
4310 * pm and the system suspend/resume code.
4311 */
b963291c 4312void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4313{
2aeb7d3a 4314 dev_priv->pm.irqs_enabled = true;
b963291c
DV
4315 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4316 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
c67a470b 4317}