]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Mass convert dev->dev_private to to_i915(dev)
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
b2c88f5b 33#include <linux/circ_buf.h>
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
1da177e4 36#include "i915_drv.h"
1c5d22f7 37#include "i915_trace.h"
79e53945 38#include "intel_drv.h"
1da177e4 39
fca52a55
DV
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
e4ce95aa
VS
48static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50};
51
23bb4cb5
VS
52static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54};
55
3a3b3c7d
VS
56static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58};
59
7c7e10db 60static const u32 hpd_ibx[HPD_NUM_PINS] = {
e5868a31
EE
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66};
67
7c7e10db 68static const u32 hpd_cpt[HPD_NUM_PINS] = {
e5868a31 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74};
75
26951caf 76static const u32 hpd_spt[HPD_NUM_PINS] = {
74c0b395 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
26951caf
XZ
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82};
83
7c7e10db 84static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
e5868a31
EE
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91};
92
7c7e10db 93static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
e5868a31
EE
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100};
101
4bca26d0 102static const u32 hpd_status_i915[HPD_NUM_PINS] = {
e5868a31
EE
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109};
110
e0a20ad7
SS
111/* BXT hpd list */
112static const u32 hpd_bxt[HPD_NUM_PINS] = {
7f3561be 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
e0a20ad7
SS
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116};
117
5c502442 118/* IIR can theoretically queue up two events. Be paranoid. */
f86f3fb0 119#define GEN8_IRQ_RESET_NDX(type, which) do { \
5c502442
PZ
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127} while (0)
128
f86f3fb0 129#define GEN5_IRQ_RESET(type) do { \
a9d356a6 130 I915_WRITE(type##IMR, 0xffffffff); \
5c502442 131 POSTING_READ(type##IMR); \
a9d356a6 132 I915_WRITE(type##IER, 0); \
5c502442
PZ
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
a9d356a6
PZ
137} while (0)
138
337ba017
PZ
139/*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
f0f59a00
VS
142static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
b51a2842
VS
144{
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
f0f59a00 151 i915_mmio_reg_offset(reg), val);
b51a2842
VS
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156}
337ba017 157
35079899 158#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
b51a2842 159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
35079899 160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
7d1bd539
VS
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
35079899
PZ
163} while (0)
164
165#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
b51a2842 166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
35079899 167 I915_WRITE(type##IER, (ier_val)); \
7d1bd539
VS
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
35079899
PZ
170} while (0)
171
c9a9a268
ID
172static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
0706f17c
EE
174/* For display hotplug interrupt */
175static inline void
176i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179{
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189}
190
191/**
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
202 */
203void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206{
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210}
211
d9dc34f1
VS
212/**
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
217 */
fbdedaea
VS
218void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
036a4a7d 221{
d9dc34f1
VS
222 uint32_t new_val;
223
4bc9d430
DV
224 assert_spin_locked(&dev_priv->irq_lock);
225
d9dc34f1
VS
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
9df7575f 228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 229 return;
c67a470b 230
d9dc34f1
VS
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
1ec14ad3 237 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 238 POSTING_READ(DEIMR);
036a4a7d
ZW
239 }
240}
241
43eaea13
PZ
242/**
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
247 */
248static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251{
252 assert_spin_locked(&dev_priv->irq_lock);
253
15a17aae
DV
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
9df7575f 256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 257 return;
c67a470b 258
43eaea13
PZ
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
43eaea13
PZ
262}
263
480c8033 264void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
265{
266 ilk_update_gt_irq(dev_priv, mask, mask);
31bb59cc 267 POSTING_READ_FW(GTIMR);
43eaea13
PZ
268}
269
480c8033 270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
43eaea13
PZ
271{
272 ilk_update_gt_irq(dev_priv, mask, 0);
273}
274
f0f59a00 275static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
b900b949
ID
276{
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278}
279
f0f59a00 280static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
a72fbc3a
ID
281{
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283}
284
f0f59a00 285static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
b900b949
ID
286{
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288}
289
edbfdb45 290/**
81fd874e
VS
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
295 */
edbfdb45
PZ
296static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299{
605cd25b 300 uint32_t new_val;
edbfdb45 301
15a17aae
DV
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
edbfdb45
PZ
304 assert_spin_locked(&dev_priv->irq_lock);
305
605cd25b 306 new_val = dev_priv->pm_irq_mask;
f52ecbcf
PZ
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
605cd25b
PZ
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
a72fbc3a
ID
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
f52ecbcf 314 }
edbfdb45
PZ
315}
316
480c8033 317void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
edbfdb45 318{
9939fba2
ID
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
edbfdb45
PZ
322 snb_update_pm_irq(dev_priv, mask, mask);
323}
324
9939fba2
ID
325static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
edbfdb45
PZ
327{
328 snb_update_pm_irq(dev_priv, mask, 0);
329}
330
9939fba2
ID
331void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332{
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
337}
338
dc97997a 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
3cc134e3 340{
f0f59a00 341 i915_reg_t reg = gen6_pm_iir(dev_priv);
3cc134e3
ID
342
343 spin_lock_irq(&dev_priv->irq_lock);
344 I915_WRITE(reg, dev_priv->pm_rps_events);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 POSTING_READ(reg);
096fad9e 347 dev_priv->rps.pm_iir = 0;
3cc134e3
ID
348 spin_unlock_irq(&dev_priv->irq_lock);
349}
350
91d14251 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
b900b949 352{
b900b949 353 spin_lock_irq(&dev_priv->irq_lock);
c33d247d
CW
354 WARN_ON_ONCE(dev_priv->rps.pm_iir);
355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
d4d70aa5 356 dev_priv->rps.interrupts_enabled = true;
78e68d36
ID
357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
358 dev_priv->pm_rps_events);
b900b949 359 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
78e68d36 360
b900b949
ID
361 spin_unlock_irq(&dev_priv->irq_lock);
362}
363
59d02a1f
ID
364u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
365{
1800ad25 366 return (mask & ~dev_priv->rps.pm_intr_keep);
59d02a1f
ID
367}
368
91d14251 369void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
b900b949 370{
d4d70aa5
ID
371 spin_lock_irq(&dev_priv->irq_lock);
372 dev_priv->rps.interrupts_enabled = false;
9939fba2 373
59d02a1f 374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
9939fba2
ID
375
376 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
b900b949
ID
377 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
378 ~dev_priv->pm_rps_events);
58072ccb
ID
379
380 spin_unlock_irq(&dev_priv->irq_lock);
91d14251 381 synchronize_irq(dev_priv->dev->irq);
c33d247d
CW
382
383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
387 */
388 cancel_work_sync(&dev_priv->rps.work);
389 gen6_reset_rps_interrupts(dev_priv);
b900b949
ID
390}
391
3a3b3c7d 392/**
81fd874e
VS
393 * bdw_update_port_irq - update DE port interrupt
394 * @dev_priv: driver private
395 * @interrupt_mask: mask of interrupt bits to update
396 * @enabled_irq_mask: mask of interrupt bits to enable
397 */
3a3b3c7d
VS
398static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
399 uint32_t interrupt_mask,
400 uint32_t enabled_irq_mask)
401{
402 uint32_t new_val;
403 uint32_t old_val;
404
405 assert_spin_locked(&dev_priv->irq_lock);
406
407 WARN_ON(enabled_irq_mask & ~interrupt_mask);
408
409 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
410 return;
411
412 old_val = I915_READ(GEN8_DE_PORT_IMR);
413
414 new_val = old_val;
415 new_val &= ~interrupt_mask;
416 new_val |= (~enabled_irq_mask & interrupt_mask);
417
418 if (new_val != old_val) {
419 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
420 POSTING_READ(GEN8_DE_PORT_IMR);
421 }
422}
423
013d3752
VS
424/**
425 * bdw_update_pipe_irq - update DE pipe interrupt
426 * @dev_priv: driver private
427 * @pipe: pipe whose interrupt to update
428 * @interrupt_mask: mask of interrupt bits to update
429 * @enabled_irq_mask: mask of interrupt bits to enable
430 */
431void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
432 enum pipe pipe,
433 uint32_t interrupt_mask,
434 uint32_t enabled_irq_mask)
435{
436 uint32_t new_val;
437
438 assert_spin_locked(&dev_priv->irq_lock);
439
440 WARN_ON(enabled_irq_mask & ~interrupt_mask);
441
442 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
443 return;
444
445 new_val = dev_priv->de_irq_mask[pipe];
446 new_val &= ~interrupt_mask;
447 new_val |= (~enabled_irq_mask & interrupt_mask);
448
449 if (new_val != dev_priv->de_irq_mask[pipe]) {
450 dev_priv->de_irq_mask[pipe] = new_val;
451 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
452 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
453 }
454}
455
fee884ed
DV
456/**
457 * ibx_display_interrupt_update - update SDEIMR
458 * @dev_priv: driver private
459 * @interrupt_mask: mask of interrupt bits to update
460 * @enabled_irq_mask: mask of interrupt bits to enable
461 */
47339cd9
DV
462void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
463 uint32_t interrupt_mask,
464 uint32_t enabled_irq_mask)
fee884ed
DV
465{
466 uint32_t sdeimr = I915_READ(SDEIMR);
467 sdeimr &= ~interrupt_mask;
468 sdeimr |= (~enabled_irq_mask & interrupt_mask);
469
15a17aae
DV
470 WARN_ON(enabled_irq_mask & ~interrupt_mask);
471
fee884ed
DV
472 assert_spin_locked(&dev_priv->irq_lock);
473
9df7575f 474 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
c67a470b 475 return;
c67a470b 476
fee884ed
DV
477 I915_WRITE(SDEIMR, sdeimr);
478 POSTING_READ(SDEIMR);
479}
8664281b 480
b5ea642a 481static void
755e9019
ID
482__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
483 u32 enable_mask, u32 status_mask)
7c463586 484{
f0f59a00 485 i915_reg_t reg = PIPESTAT(pipe);
755e9019 486 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 487
b79480ba 488 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 489 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 490
04feced9
VS
491 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
492 status_mask & ~PIPESTAT_INT_STATUS_MASK,
493 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
494 pipe_name(pipe), enable_mask, status_mask))
755e9019
ID
495 return;
496
497 if ((pipestat & enable_mask) == enable_mask)
46c06a30
VS
498 return;
499
91d181dd
ID
500 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
501
46c06a30 502 /* Enable the interrupt, clear any pending status */
755e9019 503 pipestat |= enable_mask | status_mask;
46c06a30
VS
504 I915_WRITE(reg, pipestat);
505 POSTING_READ(reg);
7c463586
KP
506}
507
b5ea642a 508static void
755e9019
ID
509__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
510 u32 enable_mask, u32 status_mask)
7c463586 511{
f0f59a00 512 i915_reg_t reg = PIPESTAT(pipe);
755e9019 513 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
7c463586 514
b79480ba 515 assert_spin_locked(&dev_priv->irq_lock);
d518ce50 516 WARN_ON(!intel_irqs_enabled(dev_priv));
b79480ba 517
04feced9
VS
518 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
519 status_mask & ~PIPESTAT_INT_STATUS_MASK,
520 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
521 pipe_name(pipe), enable_mask, status_mask))
46c06a30
VS
522 return;
523
755e9019
ID
524 if ((pipestat & enable_mask) == 0)
525 return;
526
91d181dd
ID
527 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
528
755e9019 529 pipestat &= ~enable_mask;
46c06a30
VS
530 I915_WRITE(reg, pipestat);
531 POSTING_READ(reg);
7c463586
KP
532}
533
10c59c51
ID
534static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
535{
536 u32 enable_mask = status_mask << 16;
537
538 /*
724a6905
VS
539 * On pipe A we don't support the PSR interrupt yet,
540 * on pipe B and C the same bit MBZ.
10c59c51
ID
541 */
542 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
543 return 0;
724a6905
VS
544 /*
545 * On pipe B and C we don't support the PSR interrupt yet, on pipe
546 * A the same bit is for perf counters which we don't use either.
547 */
548 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
549 return 0;
10c59c51
ID
550
551 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
552 SPRITE0_FLIP_DONE_INT_EN_VLV |
553 SPRITE1_FLIP_DONE_INT_EN_VLV);
554 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
555 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
556 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
557 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
558
559 return enable_mask;
560}
561
755e9019
ID
562void
563i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
564 u32 status_mask)
565{
566 u32 enable_mask;
567
666a4537 568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10c59c51
ID
569 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
570 status_mask);
571 else
572 enable_mask = status_mask << 16;
755e9019
ID
573 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
574}
575
576void
577i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
578 u32 status_mask)
579{
580 u32 enable_mask;
581
666a4537 582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
10c59c51
ID
583 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
584 status_mask);
585 else
586 enable_mask = status_mask << 16;
755e9019
ID
587 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
588}
589
01c66889 590/**
f49e38dd 591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
14bb2c11 592 * @dev_priv: i915 device private
01c66889 593 */
91d14251 594static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
01c66889 595{
91d14251 596 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
f49e38dd
JN
597 return;
598
13321786 599 spin_lock_irq(&dev_priv->irq_lock);
01c66889 600
755e9019 601 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
91d14251 602 if (INTEL_GEN(dev_priv) >= 4)
3b6c42e8 603 i915_enable_pipestat(dev_priv, PIPE_A,
755e9019 604 PIPE_LEGACY_BLC_EVENT_STATUS);
1ec14ad3 605
13321786 606 spin_unlock_irq(&dev_priv->irq_lock);
01c66889
ZY
607}
608
f75f3746
VS
609/*
610 * This timing diagram depicts the video signal in and
611 * around the vertical blanking period.
612 *
613 * Assumptions about the fictitious mode used in this example:
614 * vblank_start >= 3
615 * vsync_start = vblank_start + 1
616 * vsync_end = vblank_start + 2
617 * vtotal = vblank_start + 3
618 *
619 * start of vblank:
620 * latch double buffered registers
621 * increment frame counter (ctg+)
622 * generate start of vblank interrupt (gen4+)
623 * |
624 * | frame start:
625 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
626 * | may be shifted forward 1-3 extra lines via PIPECONF
627 * | |
628 * | | start of vsync:
629 * | | generate vsync interrupt
630 * | | |
631 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
632 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
633 * ----va---> <-----------------vb--------------------> <--------va-------------
634 * | | <----vs-----> |
635 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
636 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
637 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
638 * | | |
639 * last visible pixel first visible pixel
640 * | increment frame counter (gen3/4)
641 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
642 *
643 * x = horizontal active
644 * _ = horizontal blanking
645 * hs = horizontal sync
646 * va = vertical active
647 * vb = vertical blanking
648 * vs = vertical sync
649 * vbs = vblank_start (number)
650 *
651 * Summary:
652 * - most events happen at the start of horizontal sync
653 * - frame start happens at the start of horizontal blank, 1-4 lines
654 * (depending on PIPECONF settings) after the start of vblank
655 * - gen3/4 pixel and frame counter are synchronized with the start
656 * of horizontal active on the first line of vertical active
657 */
658
88e72717 659static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
4cdb83ec
VS
660{
661 /* Gen2 doesn't have a hardware frame counter */
662 return 0;
663}
664
42f52ef8
KP
665/* Called from drm generic code, passed a 'crtc', which
666 * we use as a pipe index
667 */
88e72717 668static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
0a3e67a4 669{
fac5e23e 670 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 671 i915_reg_t high_frame, low_frame;
0b2a8e09 672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
f3a5c3f6
DV
673 struct intel_crtc *intel_crtc =
674 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
fc467a22 675 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
0a3e67a4 676
f3a5c3f6
DV
677 htotal = mode->crtc_htotal;
678 hsync_start = mode->crtc_hsync_start;
679 vbl_start = mode->crtc_vblank_start;
680 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
681 vbl_start = DIV_ROUND_UP(vbl_start, 2);
391f75e2 682
0b2a8e09
VS
683 /* Convert to pixel count */
684 vbl_start *= htotal;
685
686 /* Start of vblank event occurs at start of hsync */
687 vbl_start -= htotal - hsync_start;
688
9db4a9c7
JB
689 high_frame = PIPEFRAME(pipe);
690 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 691
0a3e67a4
JB
692 /*
693 * High & low register fields aren't synchronized, so make sure
694 * we get a low value that's stable across two reads of the high
695 * register.
696 */
697 do {
5eddb70b 698 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
391f75e2 699 low = I915_READ(low_frame);
5eddb70b 700 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
701 } while (high1 != high2);
702
5eddb70b 703 high1 >>= PIPE_FRAME_HIGH_SHIFT;
391f75e2 704 pixel = low & PIPE_PIXEL_MASK;
5eddb70b 705 low >>= PIPE_FRAME_LOW_SHIFT;
391f75e2
VS
706
707 /*
708 * The frame counter increments at beginning of active.
709 * Cook up a vblank counter by also checking the pixel
710 * counter against vblank start.
711 */
edc08d0a 712 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
0a3e67a4
JB
713}
714
974e59ba 715static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
9880b7a5 716{
fac5e23e 717 struct drm_i915_private *dev_priv = to_i915(dev);
9880b7a5 718
649636ef 719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
9880b7a5
JB
720}
721
75aa3f63 722/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
a225f079
VS
723static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
724{
725 struct drm_device *dev = crtc->base.dev;
fac5e23e 726 struct drm_i915_private *dev_priv = to_i915(dev);
fc467a22 727 const struct drm_display_mode *mode = &crtc->base.hwmode;
a225f079 728 enum pipe pipe = crtc->pipe;
80715b2f 729 int position, vtotal;
a225f079 730
80715b2f 731 vtotal = mode->crtc_vtotal;
a225f079
VS
732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
733 vtotal /= 2;
734
91d14251 735 if (IS_GEN2(dev_priv))
75aa3f63 736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
a225f079 737 else
75aa3f63 738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
a225f079 739
41b578fb
JB
740 /*
741 * On HSW, the DSL reg (0x70000) appears to return 0 if we
742 * read it just before the start of vblank. So try it again
743 * so we don't accidentally end up spanning a vblank frame
744 * increment, causing the pipe_update_end() code to squak at us.
745 *
746 * The nature of this problem means we can't simply check the ISR
747 * bit and return the vblank start value; nor can we use the scanline
748 * debug register in the transcoder as it appears to have the same
749 * problem. We may need to extend this to include other platforms,
750 * but so far testing only shows the problem on HSW.
751 */
91d14251 752 if (HAS_DDI(dev_priv) && !position) {
41b578fb
JB
753 int i, temp;
754
755 for (i = 0; i < 100; i++) {
756 udelay(1);
757 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
758 DSL_LINEMASK_GEN3;
759 if (temp != position) {
760 position = temp;
761 break;
762 }
763 }
764 }
765
a225f079 766 /*
80715b2f
VS
767 * See update_scanline_offset() for the details on the
768 * scanline_offset adjustment.
a225f079 769 */
80715b2f 770 return (position + crtc->scanline_offset) % vtotal;
a225f079
VS
771}
772
88e72717 773static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
abca9e45 774 unsigned int flags, int *vpos, int *hpos,
3bb403bf
VS
775 ktime_t *stime, ktime_t *etime,
776 const struct drm_display_mode *mode)
0af7e4df 777{
fac5e23e 778 struct drm_i915_private *dev_priv = to_i915(dev);
c2baf4b7
VS
779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3aa18df8 781 int position;
78e8fc6b 782 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
0af7e4df
MK
783 bool in_vbl = true;
784 int ret = 0;
ad3543ed 785 unsigned long irqflags;
0af7e4df 786
fc467a22 787 if (WARN_ON(!mode->crtc_clock)) {
0af7e4df 788 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 789 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
790 return 0;
791 }
792
c2baf4b7 793 htotal = mode->crtc_htotal;
78e8fc6b 794 hsync_start = mode->crtc_hsync_start;
c2baf4b7
VS
795 vtotal = mode->crtc_vtotal;
796 vbl_start = mode->crtc_vblank_start;
797 vbl_end = mode->crtc_vblank_end;
0af7e4df 798
d31faf65
VS
799 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
800 vbl_start = DIV_ROUND_UP(vbl_start, 2);
801 vbl_end /= 2;
802 vtotal /= 2;
803 }
804
c2baf4b7
VS
805 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
806
ad3543ed
MK
807 /*
808 * Lock uncore.lock, as we will do multiple timing critical raw
809 * register reads, potentially with preemption disabled, so the
810 * following code must not block on uncore.lock.
811 */
812 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
78e8fc6b 813
ad3543ed
MK
814 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
815
816 /* Get optional system timestamp before query. */
817 if (stime)
818 *stime = ktime_get();
819
91d14251 820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
0af7e4df
MK
821 /* No obvious pixelcount register. Only query vertical
822 * scanout position from Display scan line register.
823 */
a225f079 824 position = __intel_get_crtc_scanline(intel_crtc);
0af7e4df
MK
825 } else {
826 /* Have access to pixelcount since start of frame.
827 * We can split this into vertical and horizontal
828 * scanout position.
829 */
75aa3f63 830 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
0af7e4df 831
3aa18df8
VS
832 /* convert to pixel counts */
833 vbl_start *= htotal;
834 vbl_end *= htotal;
835 vtotal *= htotal;
78e8fc6b 836
7e78f1cb
VS
837 /*
838 * In interlaced modes, the pixel counter counts all pixels,
839 * so one field will have htotal more pixels. In order to avoid
840 * the reported position from jumping backwards when the pixel
841 * counter is beyond the length of the shorter field, just
842 * clamp the position the length of the shorter field. This
843 * matches how the scanline counter based position works since
844 * the scanline counter doesn't count the two half lines.
845 */
846 if (position >= vtotal)
847 position = vtotal - 1;
848
78e8fc6b
VS
849 /*
850 * Start of vblank interrupt is triggered at start of hsync,
851 * just prior to the first active line of vblank. However we
852 * consider lines to start at the leading edge of horizontal
853 * active. So, should we get here before we've crossed into
854 * the horizontal active of the first line in vblank, we would
855 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
856 * always add htotal-hsync_start to the current pixel position.
857 */
858 position = (position + htotal - hsync_start) % vtotal;
0af7e4df
MK
859 }
860
ad3543ed
MK
861 /* Get optional system timestamp after query. */
862 if (etime)
863 *etime = ktime_get();
864
865 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
866
867 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
868
3aa18df8
VS
869 in_vbl = position >= vbl_start && position < vbl_end;
870
871 /*
872 * While in vblank, position will be negative
873 * counting up towards 0 at vbl_end. And outside
874 * vblank, position will be positive counting
875 * up since vbl_end.
876 */
877 if (position >= vbl_start)
878 position -= vbl_end;
879 else
880 position += vtotal - vbl_end;
0af7e4df 881
91d14251 882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
3aa18df8
VS
883 *vpos = position;
884 *hpos = 0;
885 } else {
886 *vpos = position / htotal;
887 *hpos = position - (*vpos * htotal);
888 }
0af7e4df 889
0af7e4df
MK
890 /* In vblank? */
891 if (in_vbl)
3d3cbd84 892 ret |= DRM_SCANOUTPOS_IN_VBLANK;
0af7e4df
MK
893
894 return ret;
895}
896
a225f079
VS
897int intel_get_crtc_scanline(struct intel_crtc *crtc)
898{
fac5e23e 899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
a225f079
VS
900 unsigned long irqflags;
901 int position;
902
903 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
904 position = __intel_get_crtc_scanline(crtc);
905 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
906
907 return position;
908}
909
88e72717 910static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
0af7e4df
MK
911 int *max_error,
912 struct timeval *vblank_time,
913 unsigned flags)
914{
4041b853 915 struct drm_crtc *crtc;
0af7e4df 916
88e72717
TR
917 if (pipe >= INTEL_INFO(dev)->num_pipes) {
918 DRM_ERROR("Invalid crtc %u\n", pipe);
0af7e4df
MK
919 return -EINVAL;
920 }
921
922 /* Get drm_crtc to timestamp: */
4041b853
CW
923 crtc = intel_get_crtc_for_pipe(dev, pipe);
924 if (crtc == NULL) {
88e72717 925 DRM_ERROR("Invalid crtc %u\n", pipe);
4041b853
CW
926 return -EINVAL;
927 }
928
fc467a22 929 if (!crtc->hwmode.crtc_clock) {
88e72717 930 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
4041b853
CW
931 return -EBUSY;
932 }
0af7e4df
MK
933
934 /* Helper routine in DRM core does all the work: */
4041b853
CW
935 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
936 vblank_time, flags,
fc467a22 937 &crtc->hwmode);
0af7e4df
MK
938}
939
91d14251 940static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
f97108d1 941{
b5b72e89 942 u32 busy_up, busy_down, max_avg, min_avg;
9270388e 943 u8 new_delay;
9270388e 944
d0ecd7e2 945 spin_lock(&mchdev_lock);
f97108d1 946
73edd18f
DV
947 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
948
20e4d407 949 new_delay = dev_priv->ips.cur_delay;
9270388e 950
7648fa99 951 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
952 busy_up = I915_READ(RCPREVBSYTUPAVG);
953 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
954 max_avg = I915_READ(RCBMAXAVG);
955 min_avg = I915_READ(RCBMINAVG);
956
957 /* Handle RCS change request from hw */
b5b72e89 958 if (busy_up > max_avg) {
20e4d407
DV
959 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
960 new_delay = dev_priv->ips.cur_delay - 1;
961 if (new_delay < dev_priv->ips.max_delay)
962 new_delay = dev_priv->ips.max_delay;
b5b72e89 963 } else if (busy_down < min_avg) {
20e4d407
DV
964 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
965 new_delay = dev_priv->ips.cur_delay + 1;
966 if (new_delay > dev_priv->ips.min_delay)
967 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
968 }
969
91d14251 970 if (ironlake_set_drps(dev_priv, new_delay))
20e4d407 971 dev_priv->ips.cur_delay = new_delay;
f97108d1 972
d0ecd7e2 973 spin_unlock(&mchdev_lock);
9270388e 974
f97108d1
JB
975 return;
976}
977
0bc40be8 978static void notify_ring(struct intel_engine_cs *engine)
549f7365 979{
3d5564e9 980 smp_store_mb(engine->irq_posted, true);
688e6c72
CW
981 if (intel_engine_wakeup(engine)) {
982 trace_i915_gem_request_notify(engine);
983 engine->user_interrupts++;
984 }
549f7365
CW
985}
986
43cf3bf0
CW
987static void vlv_c0_read(struct drm_i915_private *dev_priv,
988 struct intel_rps_ei *ei)
31685c25 989{
43cf3bf0
CW
990 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
991 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
992 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
993}
31685c25 994
43cf3bf0
CW
995static bool vlv_c0_above(struct drm_i915_private *dev_priv,
996 const struct intel_rps_ei *old,
997 const struct intel_rps_ei *now,
998 int threshold)
999{
1000 u64 time, c0;
7bad74d5 1001 unsigned int mul = 100;
31685c25 1002
43cf3bf0
CW
1003 if (old->cz_clock == 0)
1004 return false;
31685c25 1005
7bad74d5
VS
1006 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1007 mul <<= 8;
1008
43cf3bf0 1009 time = now->cz_clock - old->cz_clock;
7bad74d5 1010 time *= threshold * dev_priv->czclk_freq;
31685c25 1011
43cf3bf0
CW
1012 /* Workload can be split between render + media, e.g. SwapBuffers
1013 * being blitted in X after being rendered in mesa. To account for
1014 * this we need to combine both engines into our activity counter.
31685c25 1015 */
43cf3bf0
CW
1016 c0 = now->render_c0 - old->render_c0;
1017 c0 += now->media_c0 - old->media_c0;
7bad74d5 1018 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
31685c25 1019
43cf3bf0 1020 return c0 >= time;
31685c25
D
1021}
1022
43cf3bf0 1023void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
31685c25 1024{
43cf3bf0
CW
1025 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1026 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
43cf3bf0 1027}
31685c25 1028
43cf3bf0
CW
1029static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1030{
1031 struct intel_rps_ei now;
1032 u32 events = 0;
31685c25 1033
6f4b12f8 1034 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
43cf3bf0 1035 return 0;
31685c25 1036
43cf3bf0
CW
1037 vlv_c0_read(dev_priv, &now);
1038 if (now.cz_clock == 0)
1039 return 0;
31685c25 1040
43cf3bf0
CW
1041 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1042 if (!vlv_c0_above(dev_priv,
1043 &dev_priv->rps.down_ei, &now,
8fb55197 1044 dev_priv->rps.down_threshold))
43cf3bf0
CW
1045 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1046 dev_priv->rps.down_ei = now;
1047 }
31685c25 1048
43cf3bf0
CW
1049 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1050 if (vlv_c0_above(dev_priv,
1051 &dev_priv->rps.up_ei, &now,
8fb55197 1052 dev_priv->rps.up_threshold))
43cf3bf0
CW
1053 events |= GEN6_PM_RP_UP_THRESHOLD;
1054 dev_priv->rps.up_ei = now;
31685c25
D
1055 }
1056
43cf3bf0 1057 return events;
31685c25
D
1058}
1059
f5a4c67d
CW
1060static bool any_waiters(struct drm_i915_private *dev_priv)
1061{
e2f80391 1062 struct intel_engine_cs *engine;
f5a4c67d 1063
b4ac5afc 1064 for_each_engine(engine, dev_priv)
688e6c72 1065 if (intel_engine_has_waiter(engine))
f5a4c67d
CW
1066 return true;
1067
1068 return false;
1069}
1070
4912d041 1071static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 1072{
2d1013dd
JN
1073 struct drm_i915_private *dev_priv =
1074 container_of(work, struct drm_i915_private, rps.work);
8d3afd7d
CW
1075 bool client_boost;
1076 int new_delay, adj, min, max;
edbfdb45 1077 u32 pm_iir;
4912d041 1078
59cdb63d 1079 spin_lock_irq(&dev_priv->irq_lock);
d4d70aa5
ID
1080 /* Speed up work cancelation during disabling rps interrupts. */
1081 if (!dev_priv->rps.interrupts_enabled) {
1082 spin_unlock_irq(&dev_priv->irq_lock);
1083 return;
1084 }
1f814dac 1085
c6a828d3
DV
1086 pm_iir = dev_priv->rps.pm_iir;
1087 dev_priv->rps.pm_iir = 0;
a72fbc3a
ID
1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1089 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
8d3afd7d
CW
1090 client_boost = dev_priv->rps.client_boost;
1091 dev_priv->rps.client_boost = false;
59cdb63d 1092 spin_unlock_irq(&dev_priv->irq_lock);
3b8d8d91 1093
60611c13 1094 /* Make sure we didn't queue anything we're not going to process. */
a6706b45 1095 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
60611c13 1096
8d3afd7d 1097 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
c33d247d 1098 return;
3b8d8d91 1099
4fc688ce 1100 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 1101
43cf3bf0
CW
1102 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1103
dd75fdc8 1104 adj = dev_priv->rps.last_adj;
edcf284b 1105 new_delay = dev_priv->rps.cur_freq;
8d3afd7d
CW
1106 min = dev_priv->rps.min_freq_softlimit;
1107 max = dev_priv->rps.max_freq_softlimit;
1108
1109 if (client_boost) {
1110 new_delay = dev_priv->rps.max_freq_softlimit;
1111 adj = 0;
1112 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
dd75fdc8
CW
1113 if (adj > 0)
1114 adj *= 2;
edcf284b
CW
1115 else /* CHV needs even encode values */
1116 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
7425034a
VS
1117 /*
1118 * For better performance, jump directly
1119 * to RPe if we're below it.
1120 */
edcf284b 1121 if (new_delay < dev_priv->rps.efficient_freq - adj) {
b39fb297 1122 new_delay = dev_priv->rps.efficient_freq;
edcf284b
CW
1123 adj = 0;
1124 }
f5a4c67d
CW
1125 } else if (any_waiters(dev_priv)) {
1126 adj = 0;
dd75fdc8 1127 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
b39fb297
BW
1128 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1129 new_delay = dev_priv->rps.efficient_freq;
dd75fdc8 1130 else
b39fb297 1131 new_delay = dev_priv->rps.min_freq_softlimit;
dd75fdc8
CW
1132 adj = 0;
1133 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1134 if (adj < 0)
1135 adj *= 2;
edcf284b
CW
1136 else /* CHV needs even encode values */
1137 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
dd75fdc8 1138 } else { /* unknown event */
edcf284b 1139 adj = 0;
dd75fdc8 1140 }
3b8d8d91 1141
edcf284b
CW
1142 dev_priv->rps.last_adj = adj;
1143
79249636
BW
1144 /* sysfs frequency interfaces may have snuck in while servicing the
1145 * interrupt
1146 */
edcf284b 1147 new_delay += adj;
8d3afd7d 1148 new_delay = clamp_t(int, new_delay, min, max);
27544369 1149
dc97997a 1150 intel_set_rps(dev_priv, new_delay);
3b8d8d91 1151
4fc688ce 1152 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
1153}
1154
e3689190
BW
1155
1156/**
1157 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1158 * occurred.
1159 * @work: workqueue struct
1160 *
1161 * Doesn't actually do anything except notify userspace. As a consequence of
1162 * this event, userspace should try to remap the bad rows since statistically
1163 * it is likely the same row is more likely to go bad again.
1164 */
1165static void ivybridge_parity_work(struct work_struct *work)
1166{
2d1013dd
JN
1167 struct drm_i915_private *dev_priv =
1168 container_of(work, struct drm_i915_private, l3_parity.error_work);
e3689190 1169 u32 error_status, row, bank, subbank;
35a85ac6 1170 char *parity_event[6];
e3689190 1171 uint32_t misccpctl;
35a85ac6 1172 uint8_t slice = 0;
e3689190
BW
1173
1174 /* We must turn off DOP level clock gating to access the L3 registers.
1175 * In order to prevent a get/put style interface, acquire struct mutex
1176 * any time we access those registers.
1177 */
1178 mutex_lock(&dev_priv->dev->struct_mutex);
1179
35a85ac6
BW
1180 /* If we've screwed up tracking, just let the interrupt fire again */
1181 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1182 goto out;
1183
e3689190
BW
1184 misccpctl = I915_READ(GEN7_MISCCPCTL);
1185 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1186 POSTING_READ(GEN7_MISCCPCTL);
1187
35a85ac6 1188 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
f0f59a00 1189 i915_reg_t reg;
e3689190 1190
35a85ac6 1191 slice--;
2d1fe073 1192 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
35a85ac6 1193 break;
e3689190 1194
35a85ac6 1195 dev_priv->l3_parity.which_slice &= ~(1<<slice);
e3689190 1196
6fa1c5f1 1197 reg = GEN7_L3CDERRST1(slice);
e3689190 1198
35a85ac6
BW
1199 error_status = I915_READ(reg);
1200 row = GEN7_PARITY_ERROR_ROW(error_status);
1201 bank = GEN7_PARITY_ERROR_BANK(error_status);
1202 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1203
1204 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1205 POSTING_READ(reg);
1206
1207 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1208 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1209 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1210 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1212 parity_event[5] = NULL;
1213
5bdebb18 1214 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
35a85ac6 1215 KOBJ_CHANGE, parity_event);
e3689190 1216
35a85ac6
BW
1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1218 slice, row, bank, subbank);
e3689190 1219
35a85ac6
BW
1220 kfree(parity_event[4]);
1221 kfree(parity_event[3]);
1222 kfree(parity_event[2]);
1223 kfree(parity_event[1]);
1224 }
e3689190 1225
35a85ac6 1226 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
e3689190 1227
35a85ac6
BW
1228out:
1229 WARN_ON(dev_priv->l3_parity.which_slice);
4cb21832 1230 spin_lock_irq(&dev_priv->irq_lock);
2d1fe073 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
4cb21832 1232 spin_unlock_irq(&dev_priv->irq_lock);
35a85ac6
BW
1233
1234 mutex_unlock(&dev_priv->dev->struct_mutex);
e3689190
BW
1235}
1236
261e40b8
VS
1237static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1238 u32 iir)
e3689190 1239{
261e40b8 1240 if (!HAS_L3_DPF(dev_priv))
e3689190
BW
1241 return;
1242
d0ecd7e2 1243 spin_lock(&dev_priv->irq_lock);
261e40b8 1244 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
d0ecd7e2 1245 spin_unlock(&dev_priv->irq_lock);
e3689190 1246
261e40b8 1247 iir &= GT_PARITY_ERROR(dev_priv);
35a85ac6
BW
1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1249 dev_priv->l3_parity.which_slice |= 1 << 1;
1250
1251 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1252 dev_priv->l3_parity.which_slice |= 1 << 0;
1253
a4da4fa4 1254 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
1255}
1256
261e40b8 1257static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
f1af8fc1
PZ
1258 u32 gt_iir)
1259{
f8973c21 1260 if (gt_iir & GT_RENDER_USER_INTERRUPT)
4a570db5 1261 notify_ring(&dev_priv->engine[RCS]);
f1af8fc1 1262 if (gt_iir & ILK_BSD_USER_INTERRUPT)
4a570db5 1263 notify_ring(&dev_priv->engine[VCS]);
f1af8fc1
PZ
1264}
1265
261e40b8 1266static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
e7b4c6b1
DV
1267 u32 gt_iir)
1268{
f8973c21 1269 if (gt_iir & GT_RENDER_USER_INTERRUPT)
4a570db5 1270 notify_ring(&dev_priv->engine[RCS]);
cc609d5d 1271 if (gt_iir & GT_BSD_USER_INTERRUPT)
4a570db5 1272 notify_ring(&dev_priv->engine[VCS]);
cc609d5d 1273 if (gt_iir & GT_BLT_USER_INTERRUPT)
4a570db5 1274 notify_ring(&dev_priv->engine[BCS]);
e7b4c6b1 1275
cc609d5d
BW
1276 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1277 GT_BSD_CS_ERROR_INTERRUPT |
aaecdf61
DV
1278 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1279 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
e3689190 1280
261e40b8
VS
1281 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1282 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
e7b4c6b1
DV
1283}
1284
fbcc1a0c 1285static __always_inline void
0bc40be8 1286gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
fbcc1a0c
NH
1287{
1288 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
0bc40be8 1289 notify_ring(engine);
fbcc1a0c 1290 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
27af5eea 1291 tasklet_schedule(&engine->irq_tasklet);
fbcc1a0c
NH
1292}
1293
e30e251a
VS
1294static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1295 u32 master_ctl,
1296 u32 gt_iir[4])
abd58f01 1297{
abd58f01
BW
1298 irqreturn_t ret = IRQ_NONE;
1299
1300 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
e30e251a
VS
1301 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1302 if (gt_iir[0]) {
1303 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
abd58f01 1304 ret = IRQ_HANDLED;
abd58f01
BW
1305 } else
1306 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1307 }
1308
85f9b5f9 1309 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
e30e251a
VS
1310 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1311 if (gt_iir[1]) {
1312 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
abd58f01 1313 ret = IRQ_HANDLED;
0961021a 1314 } else
abd58f01 1315 DRM_ERROR("The master control interrupt lied (GT1)!\n");
0961021a
BW
1316 }
1317
abd58f01 1318 if (master_ctl & GEN8_GT_VECS_IRQ) {
e30e251a
VS
1319 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1320 if (gt_iir[3]) {
1321 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
abd58f01 1322 ret = IRQ_HANDLED;
abd58f01
BW
1323 } else
1324 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1325 }
1326
0961021a 1327 if (master_ctl & GEN8_GT_PM_IRQ) {
e30e251a
VS
1328 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1329 if (gt_iir[2] & dev_priv->pm_rps_events) {
cb0d205e 1330 I915_WRITE_FW(GEN8_GT_IIR(2),
e30e251a 1331 gt_iir[2] & dev_priv->pm_rps_events);
38cc46d7 1332 ret = IRQ_HANDLED;
0961021a
BW
1333 } else
1334 DRM_ERROR("The master control interrupt lied (PM)!\n");
1335 }
1336
abd58f01
BW
1337 return ret;
1338}
1339
e30e251a
VS
1340static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1341 u32 gt_iir[4])
1342{
1343 if (gt_iir[0]) {
1344 gen8_cs_irq_handler(&dev_priv->engine[RCS],
1345 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1346 gen8_cs_irq_handler(&dev_priv->engine[BCS],
1347 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1348 }
1349
1350 if (gt_iir[1]) {
1351 gen8_cs_irq_handler(&dev_priv->engine[VCS],
1352 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1353 gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1354 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1355 }
1356
1357 if (gt_iir[3])
1358 gen8_cs_irq_handler(&dev_priv->engine[VECS],
1359 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1360
1361 if (gt_iir[2] & dev_priv->pm_rps_events)
1362 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1363}
1364
63c88d22
ID
1365static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1366{
1367 switch (port) {
1368 case PORT_A:
195baa06 1369 return val & PORTA_HOTPLUG_LONG_DETECT;
63c88d22
ID
1370 case PORT_B:
1371 return val & PORTB_HOTPLUG_LONG_DETECT;
1372 case PORT_C:
1373 return val & PORTC_HOTPLUG_LONG_DETECT;
63c88d22
ID
1374 default:
1375 return false;
1376 }
1377}
1378
6dbf30ce
VS
1379static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1380{
1381 switch (port) {
1382 case PORT_E:
1383 return val & PORTE_HOTPLUG_LONG_DETECT;
1384 default:
1385 return false;
1386 }
1387}
1388
74c0b395
VS
1389static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1390{
1391 switch (port) {
1392 case PORT_A:
1393 return val & PORTA_HOTPLUG_LONG_DETECT;
1394 case PORT_B:
1395 return val & PORTB_HOTPLUG_LONG_DETECT;
1396 case PORT_C:
1397 return val & PORTC_HOTPLUG_LONG_DETECT;
1398 case PORT_D:
1399 return val & PORTD_HOTPLUG_LONG_DETECT;
1400 default:
1401 return false;
1402 }
1403}
1404
e4ce95aa
VS
1405static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1406{
1407 switch (port) {
1408 case PORT_A:
1409 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1410 default:
1411 return false;
1412 }
1413}
1414
676574df 1415static bool pch_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1416{
1417 switch (port) {
13cf5504 1418 case PORT_B:
676574df 1419 return val & PORTB_HOTPLUG_LONG_DETECT;
13cf5504 1420 case PORT_C:
676574df 1421 return val & PORTC_HOTPLUG_LONG_DETECT;
13cf5504 1422 case PORT_D:
676574df
JN
1423 return val & PORTD_HOTPLUG_LONG_DETECT;
1424 default:
1425 return false;
13cf5504
DA
1426 }
1427}
1428
676574df 1429static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
13cf5504
DA
1430{
1431 switch (port) {
13cf5504 1432 case PORT_B:
676574df 1433 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
13cf5504 1434 case PORT_C:
676574df 1435 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
13cf5504 1436 case PORT_D:
676574df
JN
1437 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1438 default:
1439 return false;
13cf5504
DA
1440 }
1441}
1442
42db67d6
VS
1443/*
1444 * Get a bit mask of pins that have triggered, and which ones may be long.
1445 * This can be called multiple times with the same masks to accumulate
1446 * hotplug detection results from several registers.
1447 *
1448 * Note that the caller is expected to zero out the masks initially.
1449 */
fd63e2a9 1450static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
8c841e57 1451 u32 hotplug_trigger, u32 dig_hotplug_reg,
fd63e2a9
ID
1452 const u32 hpd[HPD_NUM_PINS],
1453 bool long_pulse_detect(enum port port, u32 val))
676574df 1454{
8c841e57 1455 enum port port;
676574df
JN
1456 int i;
1457
676574df 1458 for_each_hpd_pin(i) {
8c841e57
JN
1459 if ((hpd[i] & hotplug_trigger) == 0)
1460 continue;
676574df 1461
8c841e57
JN
1462 *pin_mask |= BIT(i);
1463
cc24fcdc
ID
1464 if (!intel_hpd_pin_to_port(i, &port))
1465 continue;
1466
fd63e2a9 1467 if (long_pulse_detect(port, dig_hotplug_reg))
8c841e57 1468 *long_mask |= BIT(i);
676574df
JN
1469 }
1470
1471 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1472 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1473
1474}
1475
91d14251 1476static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
515ac2bb 1477{
28c70f16 1478 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
1479}
1480
91d14251 1481static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
ce99c256 1482{
9ee32fea 1483 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
1484}
1485
8bf1e9f1 1486#if defined(CONFIG_DEBUG_FS)
91d14251
TU
1487static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1488 enum pipe pipe,
277de95e
DV
1489 uint32_t crc0, uint32_t crc1,
1490 uint32_t crc2, uint32_t crc3,
1491 uint32_t crc4)
8bf1e9f1 1492{
8bf1e9f1
SH
1493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1494 struct intel_pipe_crc_entry *entry;
ac2300d4 1495 int head, tail;
b2c88f5b 1496
d538bbdf
DL
1497 spin_lock(&pipe_crc->lock);
1498
0c912c79 1499 if (!pipe_crc->entries) {
d538bbdf 1500 spin_unlock(&pipe_crc->lock);
34273620 1501 DRM_DEBUG_KMS("spurious interrupt\n");
0c912c79
DL
1502 return;
1503 }
1504
d538bbdf
DL
1505 head = pipe_crc->head;
1506 tail = pipe_crc->tail;
b2c88f5b
DL
1507
1508 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
d538bbdf 1509 spin_unlock(&pipe_crc->lock);
b2c88f5b
DL
1510 DRM_ERROR("CRC buffer overflowing\n");
1511 return;
1512 }
1513
1514 entry = &pipe_crc->entries[head];
8bf1e9f1 1515
91d14251
TU
1516 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
1517 pipe);
eba94eb9
DV
1518 entry->crc[0] = crc0;
1519 entry->crc[1] = crc1;
1520 entry->crc[2] = crc2;
1521 entry->crc[3] = crc3;
1522 entry->crc[4] = crc4;
b2c88f5b
DL
1523
1524 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
d538bbdf
DL
1525 pipe_crc->head = head;
1526
1527 spin_unlock(&pipe_crc->lock);
07144428
DL
1528
1529 wake_up_interruptible(&pipe_crc->wq);
8bf1e9f1 1530}
277de95e
DV
1531#else
1532static inline void
91d14251
TU
1533display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1534 enum pipe pipe,
277de95e
DV
1535 uint32_t crc0, uint32_t crc1,
1536 uint32_t crc2, uint32_t crc3,
1537 uint32_t crc4) {}
1538#endif
1539
eba94eb9 1540
91d14251
TU
1541static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1542 enum pipe pipe)
5a69b89f 1543{
91d14251 1544 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1546 0, 0, 0, 0);
5a69b89f
DV
1547}
1548
91d14251
TU
1549static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1550 enum pipe pipe)
eba94eb9 1551{
91d14251 1552 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1556 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
eba94eb9 1558}
5b3a856b 1559
91d14251
TU
1560static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1561 enum pipe pipe)
5b3a856b 1562{
0b5c5ed0
DV
1563 uint32_t res1, res2;
1564
91d14251 1565 if (INTEL_GEN(dev_priv) >= 3)
0b5c5ed0
DV
1566 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1567 else
1568 res1 = 0;
1569
91d14251 1570 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
0b5c5ed0
DV
1571 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1572 else
1573 res2 = 0;
5b3a856b 1574
91d14251 1575 display_pipe_crc_irq_handler(dev_priv, pipe,
277de95e
DV
1576 I915_READ(PIPE_CRC_RES_RED(pipe)),
1577 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1578 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1579 res1, res2);
5b3a856b 1580}
8bf1e9f1 1581
1403c0d4
PZ
1582/* The RPS events need forcewake, so we add them to a work queue and mask their
1583 * IMR bits until the work is done. Other interrupts can be processed without
1584 * the work queue. */
1585static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
baf02a1f 1586{
a6706b45 1587 if (pm_iir & dev_priv->pm_rps_events) {
59cdb63d 1588 spin_lock(&dev_priv->irq_lock);
480c8033 1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
d4d70aa5
ID
1590 if (dev_priv->rps.interrupts_enabled) {
1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
c33d247d 1592 schedule_work(&dev_priv->rps.work);
d4d70aa5 1593 }
59cdb63d 1594 spin_unlock(&dev_priv->irq_lock);
baf02a1f 1595 }
baf02a1f 1596
c9a9a268
ID
1597 if (INTEL_INFO(dev_priv)->gen >= 8)
1598 return;
1599
2d1fe073 1600 if (HAS_VEBOX(dev_priv)) {
1403c0d4 1601 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
4a570db5 1602 notify_ring(&dev_priv->engine[VECS]);
12638c57 1603
aaecdf61
DV
1604 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1605 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
12638c57 1606 }
baf02a1f
BW
1607}
1608
5a21b665 1609static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
91d14251 1610 enum pipe pipe)
8d7849db 1611{
5a21b665
DV
1612 bool ret;
1613
1614 ret = drm_handle_vblank(dev_priv->dev, pipe);
1615 if (ret)
51cbaf01 1616 intel_finish_page_flip_mmio(dev_priv, pipe);
5a21b665
DV
1617
1618 return ret;
8d7849db
VS
1619}
1620
91d14251
TU
1621static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1622 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
c1874ed7 1623{
c1874ed7
ID
1624 int pipe;
1625
58ead0d7 1626 spin_lock(&dev_priv->irq_lock);
1ca993d2
VS
1627
1628 if (!dev_priv->display_irqs_enabled) {
1629 spin_unlock(&dev_priv->irq_lock);
1630 return;
1631 }
1632
055e393f 1633 for_each_pipe(dev_priv, pipe) {
f0f59a00 1634 i915_reg_t reg;
bbb5eebf 1635 u32 mask, iir_bit = 0;
91d181dd 1636
bbb5eebf
DV
1637 /*
1638 * PIPESTAT bits get signalled even when the interrupt is
1639 * disabled with the mask bits, and some of the status bits do
1640 * not generate interrupts at all (like the underrun bit). Hence
1641 * we need to be careful that we only handle what we want to
1642 * handle.
1643 */
0f239f4c
DV
1644
1645 /* fifo underruns are filterered in the underrun handler. */
1646 mask = PIPE_FIFO_UNDERRUN_STATUS;
bbb5eebf
DV
1647
1648 switch (pipe) {
1649 case PIPE_A:
1650 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1651 break;
1652 case PIPE_B:
1653 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1654 break;
3278f67f
VS
1655 case PIPE_C:
1656 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1657 break;
bbb5eebf
DV
1658 }
1659 if (iir & iir_bit)
1660 mask |= dev_priv->pipestat_irq_mask[pipe];
1661
1662 if (!mask)
91d181dd
ID
1663 continue;
1664
1665 reg = PIPESTAT(pipe);
bbb5eebf
DV
1666 mask |= PIPESTAT_INT_ENABLE_MASK;
1667 pipe_stats[pipe] = I915_READ(reg) & mask;
c1874ed7
ID
1668
1669 /*
1670 * Clear the PIPE*STAT regs before the IIR
1671 */
91d181dd
ID
1672 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1673 PIPESTAT_INT_STATUS_MASK))
c1874ed7
ID
1674 I915_WRITE(reg, pipe_stats[pipe]);
1675 }
58ead0d7 1676 spin_unlock(&dev_priv->irq_lock);
2ecb8ca4
VS
1677}
1678
91d14251 1679static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
2ecb8ca4
VS
1680 u32 pipe_stats[I915_MAX_PIPES])
1681{
2ecb8ca4 1682 enum pipe pipe;
c1874ed7 1683
055e393f 1684 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
1685 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1686 intel_pipe_handle_vblank(dev_priv, pipe))
1687 intel_check_page_flip(dev_priv, pipe);
c1874ed7 1688
5251f04e 1689 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
51cbaf01 1690 intel_finish_page_flip_cs(dev_priv, pipe);
c1874ed7
ID
1691
1692 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 1693 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
c1874ed7 1694
1f7247c0
DV
1695 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1696 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
c1874ed7
ID
1697 }
1698
1699 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
91d14251 1700 gmbus_irq_handler(dev_priv);
c1874ed7
ID
1701}
1702
1ae3c34c 1703static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
16c6c56b 1704{
16c6c56b
VS
1705 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1706
1ae3c34c
VS
1707 if (hotplug_status)
1708 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
16c6c56b 1709
1ae3c34c
VS
1710 return hotplug_status;
1711}
1712
91d14251 1713static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1ae3c34c
VS
1714 u32 hotplug_status)
1715{
1716 u32 pin_mask = 0, long_mask = 0;
16c6c56b 1717
91d14251
TU
1718 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1719 IS_CHERRYVIEW(dev_priv)) {
0d2e4297 1720 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
16c6c56b 1721
58f2cf24
VS
1722 if (hotplug_trigger) {
1723 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1724 hotplug_trigger, hpd_status_g4x,
1725 i9xx_port_hotplug_long_detect);
1726
91d14251 1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
58f2cf24 1728 }
369712e8
JN
1729
1730 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
91d14251 1731 dp_aux_irq_handler(dev_priv);
0d2e4297
JN
1732 } else {
1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
16c6c56b 1734
58f2cf24
VS
1735 if (hotplug_trigger) {
1736 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
44cc6c08 1737 hotplug_trigger, hpd_status_i915,
58f2cf24 1738 i9xx_port_hotplug_long_detect);
91d14251 1739 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
58f2cf24 1740 }
3ff60f89 1741 }
16c6c56b
VS
1742}
1743
ff1f525e 1744static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe 1745{
45a83f84 1746 struct drm_device *dev = arg;
fac5e23e 1747 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 1748 irqreturn_t ret = IRQ_NONE;
7e231dbe 1749
2dd2a883
ID
1750 if (!intel_irqs_enabled(dev_priv))
1751 return IRQ_NONE;
1752
1f814dac
ID
1753 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1754 disable_rpm_wakeref_asserts(dev_priv);
1755
1e1cace9 1756 do {
6e814800 1757 u32 iir, gt_iir, pm_iir;
2ecb8ca4 1758 u32 pipe_stats[I915_MAX_PIPES] = {};
1ae3c34c 1759 u32 hotplug_status = 0;
a5e485a9 1760 u32 ier = 0;
3ff60f89 1761
7e231dbe
JB
1762 gt_iir = I915_READ(GTIIR);
1763 pm_iir = I915_READ(GEN6_PMIIR);
3ff60f89 1764 iir = I915_READ(VLV_IIR);
7e231dbe
JB
1765
1766 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1e1cace9 1767 break;
7e231dbe
JB
1768
1769 ret = IRQ_HANDLED;
1770
a5e485a9
VS
1771 /*
1772 * Theory on interrupt generation, based on empirical evidence:
1773 *
1774 * x = ((VLV_IIR & VLV_IER) ||
1775 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1776 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1777 *
1778 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1779 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1780 * guarantee the CPU interrupt will be raised again even if we
1781 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1782 * bits this time around.
1783 */
4a0a0202 1784 I915_WRITE(VLV_MASTER_IER, 0);
a5e485a9
VS
1785 ier = I915_READ(VLV_IER);
1786 I915_WRITE(VLV_IER, 0);
4a0a0202
VS
1787
1788 if (gt_iir)
1789 I915_WRITE(GTIIR, gt_iir);
1790 if (pm_iir)
1791 I915_WRITE(GEN6_PMIIR, pm_iir);
1792
7ce4d1f2 1793 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1ae3c34c 1794 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
7ce4d1f2 1795
3ff60f89
OM
1796 /* Call regardless, as some status bits might not be
1797 * signalled in iir */
91d14251 1798 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
7ce4d1f2
VS
1799
1800 /*
1801 * VLV_IIR is single buffered, and reflects the level
1802 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1803 */
1804 if (iir)
1805 I915_WRITE(VLV_IIR, iir);
4a0a0202 1806
a5e485a9 1807 I915_WRITE(VLV_IER, ier);
4a0a0202
VS
1808 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1809 POSTING_READ(VLV_MASTER_IER);
1ae3c34c 1810
52894874 1811 if (gt_iir)
261e40b8 1812 snb_gt_irq_handler(dev_priv, gt_iir);
52894874
VS
1813 if (pm_iir)
1814 gen6_rps_irq_handler(dev_priv, pm_iir);
1815
1ae3c34c 1816 if (hotplug_status)
91d14251 1817 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2ecb8ca4 1818
91d14251 1819 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1e1cace9 1820 } while (0);
7e231dbe 1821
1f814dac
ID
1822 enable_rpm_wakeref_asserts(dev_priv);
1823
7e231dbe
JB
1824 return ret;
1825}
1826
43f328d7
VS
1827static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1828{
45a83f84 1829 struct drm_device *dev = arg;
fac5e23e 1830 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7 1831 irqreturn_t ret = IRQ_NONE;
43f328d7 1832
2dd2a883
ID
1833 if (!intel_irqs_enabled(dev_priv))
1834 return IRQ_NONE;
1835
1f814dac
ID
1836 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1837 disable_rpm_wakeref_asserts(dev_priv);
1838
579de73b 1839 do {
6e814800 1840 u32 master_ctl, iir;
e30e251a 1841 u32 gt_iir[4] = {};
2ecb8ca4 1842 u32 pipe_stats[I915_MAX_PIPES] = {};
1ae3c34c 1843 u32 hotplug_status = 0;
a5e485a9
VS
1844 u32 ier = 0;
1845
8e5fd599
VS
1846 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1847 iir = I915_READ(VLV_IIR);
43f328d7 1848
8e5fd599
VS
1849 if (master_ctl == 0 && iir == 0)
1850 break;
43f328d7 1851
27b6c122
OM
1852 ret = IRQ_HANDLED;
1853
a5e485a9
VS
1854 /*
1855 * Theory on interrupt generation, based on empirical evidence:
1856 *
1857 * x = ((VLV_IIR & VLV_IER) ||
1858 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1859 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1860 *
1861 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1862 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1863 * guarantee the CPU interrupt will be raised again even if we
1864 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1865 * bits this time around.
1866 */
8e5fd599 1867 I915_WRITE(GEN8_MASTER_IRQ, 0);
a5e485a9
VS
1868 ier = I915_READ(VLV_IER);
1869 I915_WRITE(VLV_IER, 0);
43f328d7 1870
e30e251a 1871 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
43f328d7 1872
7ce4d1f2 1873 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1ae3c34c 1874 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
7ce4d1f2 1875
27b6c122
OM
1876 /* Call regardless, as some status bits might not be
1877 * signalled in iir */
91d14251 1878 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
43f328d7 1879
7ce4d1f2
VS
1880 /*
1881 * VLV_IIR is single buffered, and reflects the level
1882 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1883 */
1884 if (iir)
1885 I915_WRITE(VLV_IIR, iir);
1886
a5e485a9 1887 I915_WRITE(VLV_IER, ier);
e5328c43 1888 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
8e5fd599 1889 POSTING_READ(GEN8_MASTER_IRQ);
1ae3c34c 1890
e30e251a
VS
1891 gen8_gt_irq_handler(dev_priv, gt_iir);
1892
1ae3c34c 1893 if (hotplug_status)
91d14251 1894 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
2ecb8ca4 1895
91d14251 1896 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
579de73b 1897 } while (0);
3278f67f 1898
1f814dac
ID
1899 enable_rpm_wakeref_asserts(dev_priv);
1900
43f328d7
VS
1901 return ret;
1902}
1903
91d14251
TU
1904static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1905 u32 hotplug_trigger,
40e56410
VS
1906 const u32 hpd[HPD_NUM_PINS])
1907{
40e56410
VS
1908 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1909
6a39d7c9
JN
1910 /*
1911 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1912 * unless we touch the hotplug register, even if hotplug_trigger is
1913 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1914 * errors.
1915 */
40e56410 1916 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
6a39d7c9
JN
1917 if (!hotplug_trigger) {
1918 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1919 PORTD_HOTPLUG_STATUS_MASK |
1920 PORTC_HOTPLUG_STATUS_MASK |
1921 PORTB_HOTPLUG_STATUS_MASK;
1922 dig_hotplug_reg &= ~mask;
1923 }
1924
40e56410 1925 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
6a39d7c9
JN
1926 if (!hotplug_trigger)
1927 return;
40e56410
VS
1928
1929 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1930 dig_hotplug_reg, hpd,
1931 pch_port_hotplug_long_detect);
1932
91d14251 1933 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
40e56410
VS
1934}
1935
91d14251 1936static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
776ad806 1937{
9db4a9c7 1938 int pipe;
b543fb04 1939 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
13cf5504 1940
91d14251 1941 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
91d131d2 1942
cfc33bf7
VS
1943 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1945 SDE_AUDIO_POWER_SHIFT);
776ad806 1946 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1947 port_name(port));
1948 }
776ad806 1949
ce99c256 1950 if (pch_iir & SDE_AUX_MASK)
91d14251 1951 dp_aux_irq_handler(dev_priv);
ce99c256 1952
776ad806 1953 if (pch_iir & SDE_GMBUS)
91d14251 1954 gmbus_irq_handler(dev_priv);
776ad806
JB
1955
1956 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1958
1959 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1960 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1961
1962 if (pch_iir & SDE_POISON)
1963 DRM_ERROR("PCH poison interrupt\n");
1964
9db4a9c7 1965 if (pch_iir & SDE_FDI_MASK)
055e393f 1966 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
1967 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1968 pipe_name(pipe),
1969 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1970
1971 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1972 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1973
1974 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1975 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1976
776ad806 1977 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1f7247c0 1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
1979
1980 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1f7247c0 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
1982}
1983
91d14251 1984static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
8664281b 1985{
8664281b 1986 u32 err_int = I915_READ(GEN7_ERR_INT);
5a69b89f 1987 enum pipe pipe;
8664281b 1988
de032bf4
PZ
1989 if (err_int & ERR_INT_POISON)
1990 DRM_ERROR("Poison interrupt\n");
1991
055e393f 1992 for_each_pipe(dev_priv, pipe) {
1f7247c0
DV
1993 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1994 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
8bf1e9f1 1995
5a69b89f 1996 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
91d14251
TU
1997 if (IS_IVYBRIDGE(dev_priv))
1998 ivb_pipe_crc_irq_handler(dev_priv, pipe);
5a69b89f 1999 else
91d14251 2000 hsw_pipe_crc_irq_handler(dev_priv, pipe);
5a69b89f
DV
2001 }
2002 }
8bf1e9f1 2003
8664281b
PZ
2004 I915_WRITE(GEN7_ERR_INT, err_int);
2005}
2006
91d14251 2007static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
8664281b 2008{
8664281b
PZ
2009 u32 serr_int = I915_READ(SERR_INT);
2010
de032bf4
PZ
2011 if (serr_int & SERR_INT_POISON)
2012 DRM_ERROR("PCH poison interrupt\n");
2013
8664281b 2014 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1f7247c0 2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
8664281b
PZ
2016
2017 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1f7247c0 2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
8664281b
PZ
2019
2020 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1f7247c0 2021 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
8664281b
PZ
2022
2023 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
2024}
2025
91d14251 2026static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
23e81d69 2027{
23e81d69 2028 int pipe;
6dbf30ce 2029 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
13cf5504 2030
91d14251 2031 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
91d131d2 2032
cfc33bf7
VS
2033 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2034 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2035 SDE_AUDIO_POWER_SHIFT_CPT);
2036 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2037 port_name(port));
2038 }
23e81d69
AJ
2039
2040 if (pch_iir & SDE_AUX_MASK_CPT)
91d14251 2041 dp_aux_irq_handler(dev_priv);
23e81d69
AJ
2042
2043 if (pch_iir & SDE_GMBUS_CPT)
91d14251 2044 gmbus_irq_handler(dev_priv);
23e81d69
AJ
2045
2046 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2048
2049 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2050 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2051
2052 if (pch_iir & SDE_FDI_MASK_CPT)
055e393f 2053 for_each_pipe(dev_priv, pipe)
23e81d69
AJ
2054 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2055 pipe_name(pipe),
2056 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
2057
2058 if (pch_iir & SDE_ERROR_CPT)
91d14251 2059 cpt_serr_int_handler(dev_priv);
23e81d69
AJ
2060}
2061
91d14251 2062static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
6dbf30ce 2063{
6dbf30ce
VS
2064 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2065 ~SDE_PORTE_HOTPLUG_SPT;
2066 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2067 u32 pin_mask = 0, long_mask = 0;
2068
2069 if (hotplug_trigger) {
2070 u32 dig_hotplug_reg;
2071
2072 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2073 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2074
2075 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2076 dig_hotplug_reg, hpd_spt,
74c0b395 2077 spt_port_hotplug_long_detect);
6dbf30ce
VS
2078 }
2079
2080 if (hotplug2_trigger) {
2081 u32 dig_hotplug_reg;
2082
2083 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2084 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2085
2086 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2087 dig_hotplug_reg, hpd_spt,
2088 spt_port_hotplug2_long_detect);
2089 }
2090
2091 if (pin_mask)
91d14251 2092 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
6dbf30ce
VS
2093
2094 if (pch_iir & SDE_GMBUS_CPT)
91d14251 2095 gmbus_irq_handler(dev_priv);
6dbf30ce
VS
2096}
2097
91d14251
TU
2098static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2099 u32 hotplug_trigger,
40e56410
VS
2100 const u32 hpd[HPD_NUM_PINS])
2101{
40e56410
VS
2102 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2103
2104 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2105 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2106
2107 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2108 dig_hotplug_reg, hpd,
2109 ilk_port_hotplug_long_detect);
2110
91d14251 2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
40e56410
VS
2112}
2113
91d14251
TU
2114static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2115 u32 de_iir)
c008bc6e 2116{
40da17c2 2117 enum pipe pipe;
e4ce95aa
VS
2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2119
40e56410 2120 if (hotplug_trigger)
91d14251 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
c008bc6e
PZ
2122
2123 if (de_iir & DE_AUX_CHANNEL_A)
91d14251 2124 dp_aux_irq_handler(dev_priv);
c008bc6e
PZ
2125
2126 if (de_iir & DE_GSE)
91d14251 2127 intel_opregion_asle_intr(dev_priv);
c008bc6e 2128
c008bc6e
PZ
2129 if (de_iir & DE_POISON)
2130 DRM_ERROR("Poison interrupt\n");
2131
055e393f 2132 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
2133 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2134 intel_pipe_handle_vblank(dev_priv, pipe))
2135 intel_check_page_flip(dev_priv, pipe);
5b3a856b 2136
40da17c2 2137 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1f7247c0 2138 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
5b3a856b 2139
40da17c2 2140 if (de_iir & DE_PIPE_CRC_DONE(pipe))
91d14251 2141 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
c008bc6e 2142
40da17c2 2143 /* plane/pipes map 1:1 on ilk+ */
5251f04e 2144 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
51cbaf01 2145 intel_finish_page_flip_cs(dev_priv, pipe);
c008bc6e
PZ
2146 }
2147
2148 /* check event from PCH */
2149 if (de_iir & DE_PCH_EVENT) {
2150 u32 pch_iir = I915_READ(SDEIIR);
2151
91d14251
TU
2152 if (HAS_PCH_CPT(dev_priv))
2153 cpt_irq_handler(dev_priv, pch_iir);
c008bc6e 2154 else
91d14251 2155 ibx_irq_handler(dev_priv, pch_iir);
c008bc6e
PZ
2156
2157 /* should clear PCH hotplug event before clear CPU irq */
2158 I915_WRITE(SDEIIR, pch_iir);
2159 }
2160
91d14251
TU
2161 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2162 ironlake_rps_change_irq_handler(dev_priv);
c008bc6e
PZ
2163}
2164
91d14251
TU
2165static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2166 u32 de_iir)
9719fb98 2167{
07d27e20 2168 enum pipe pipe;
23bb4cb5
VS
2169 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2170
40e56410 2171 if (hotplug_trigger)
91d14251 2172 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
9719fb98
PZ
2173
2174 if (de_iir & DE_ERR_INT_IVB)
91d14251 2175 ivb_err_int_handler(dev_priv);
9719fb98
PZ
2176
2177 if (de_iir & DE_AUX_CHANNEL_A_IVB)
91d14251 2178 dp_aux_irq_handler(dev_priv);
9719fb98
PZ
2179
2180 if (de_iir & DE_GSE_IVB)
91d14251 2181 intel_opregion_asle_intr(dev_priv);
9719fb98 2182
055e393f 2183 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
2184 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2185 intel_pipe_handle_vblank(dev_priv, pipe))
2186 intel_check_page_flip(dev_priv, pipe);
40da17c2
DV
2187
2188 /* plane/pipes map 1:1 on ilk+ */
5251f04e 2189 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
51cbaf01 2190 intel_finish_page_flip_cs(dev_priv, pipe);
9719fb98
PZ
2191 }
2192
2193 /* check event from PCH */
91d14251 2194 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
9719fb98
PZ
2195 u32 pch_iir = I915_READ(SDEIIR);
2196
91d14251 2197 cpt_irq_handler(dev_priv, pch_iir);
9719fb98
PZ
2198
2199 /* clear PCH hotplug event before clear CPU irq */
2200 I915_WRITE(SDEIIR, pch_iir);
2201 }
2202}
2203
72c90f62
OM
2204/*
2205 * To handle irqs with the minimum potential races with fresh interrupts, we:
2206 * 1 - Disable Master Interrupt Control.
2207 * 2 - Find the source(s) of the interrupt.
2208 * 3 - Clear the Interrupt Identity bits (IIR).
2209 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2210 * 5 - Re-enable Master Interrupt Control.
2211 */
f1af8fc1 2212static irqreturn_t ironlake_irq_handler(int irq, void *arg)
b1f14ad0 2213{
45a83f84 2214 struct drm_device *dev = arg;
fac5e23e 2215 struct drm_i915_private *dev_priv = to_i915(dev);
f1af8fc1 2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
0e43406b 2217 irqreturn_t ret = IRQ_NONE;
b1f14ad0 2218
2dd2a883
ID
2219 if (!intel_irqs_enabled(dev_priv))
2220 return IRQ_NONE;
2221
1f814dac
ID
2222 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2223 disable_rpm_wakeref_asserts(dev_priv);
2224
b1f14ad0
JB
2225 /* disable master interrupt before clearing iir */
2226 de_ier = I915_READ(DEIER);
2227 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
23a78516 2228 POSTING_READ(DEIER);
b1f14ad0 2229
44498aea
PZ
2230 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2231 * interrupts will will be stored on its back queue, and then we'll be
2232 * able to process them after we restore SDEIER (as soon as we restore
2233 * it, we'll get an interrupt if SDEIIR still has something to process
2234 * due to its back queue). */
91d14251 2235 if (!HAS_PCH_NOP(dev_priv)) {
ab5c608b
BW
2236 sde_ier = I915_READ(SDEIER);
2237 I915_WRITE(SDEIER, 0);
2238 POSTING_READ(SDEIER);
2239 }
44498aea 2240
72c90f62
OM
2241 /* Find, clear, then process each source of interrupt */
2242
b1f14ad0 2243 gt_iir = I915_READ(GTIIR);
0e43406b 2244 if (gt_iir) {
72c90f62
OM
2245 I915_WRITE(GTIIR, gt_iir);
2246 ret = IRQ_HANDLED;
91d14251 2247 if (INTEL_GEN(dev_priv) >= 6)
261e40b8 2248 snb_gt_irq_handler(dev_priv, gt_iir);
d8fc8a47 2249 else
261e40b8 2250 ilk_gt_irq_handler(dev_priv, gt_iir);
b1f14ad0
JB
2251 }
2252
0e43406b
CW
2253 de_iir = I915_READ(DEIIR);
2254 if (de_iir) {
72c90f62
OM
2255 I915_WRITE(DEIIR, de_iir);
2256 ret = IRQ_HANDLED;
91d14251
TU
2257 if (INTEL_GEN(dev_priv) >= 7)
2258 ivb_display_irq_handler(dev_priv, de_iir);
f1af8fc1 2259 else
91d14251 2260 ilk_display_irq_handler(dev_priv, de_iir);
b1f14ad0
JB
2261 }
2262
91d14251 2263 if (INTEL_GEN(dev_priv) >= 6) {
f1af8fc1
PZ
2264 u32 pm_iir = I915_READ(GEN6_PMIIR);
2265 if (pm_iir) {
f1af8fc1
PZ
2266 I915_WRITE(GEN6_PMIIR, pm_iir);
2267 ret = IRQ_HANDLED;
72c90f62 2268 gen6_rps_irq_handler(dev_priv, pm_iir);
f1af8fc1 2269 }
0e43406b 2270 }
b1f14ad0 2271
b1f14ad0
JB
2272 I915_WRITE(DEIER, de_ier);
2273 POSTING_READ(DEIER);
91d14251 2274 if (!HAS_PCH_NOP(dev_priv)) {
ab5c608b
BW
2275 I915_WRITE(SDEIER, sde_ier);
2276 POSTING_READ(SDEIER);
2277 }
b1f14ad0 2278
1f814dac
ID
2279 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2280 enable_rpm_wakeref_asserts(dev_priv);
2281
b1f14ad0
JB
2282 return ret;
2283}
2284
91d14251
TU
2285static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2286 u32 hotplug_trigger,
40e56410 2287 const u32 hpd[HPD_NUM_PINS])
d04a492d 2288{
cebd87a0 2289 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
d04a492d 2290
a52bb15b
VS
2291 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2292 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
d04a492d 2293
cebd87a0 2294 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
40e56410 2295 dig_hotplug_reg, hpd,
cebd87a0 2296 bxt_port_hotplug_long_detect);
40e56410 2297
91d14251 2298 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
d04a492d
SS
2299}
2300
f11a0f46
TU
2301static irqreturn_t
2302gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
abd58f01 2303{
abd58f01 2304 irqreturn_t ret = IRQ_NONE;
f11a0f46 2305 u32 iir;
c42664cc 2306 enum pipe pipe;
88e04703 2307
abd58f01 2308 if (master_ctl & GEN8_DE_MISC_IRQ) {
e32192e1
TU
2309 iir = I915_READ(GEN8_DE_MISC_IIR);
2310 if (iir) {
2311 I915_WRITE(GEN8_DE_MISC_IIR, iir);
abd58f01 2312 ret = IRQ_HANDLED;
e32192e1 2313 if (iir & GEN8_DE_MISC_GSE)
91d14251 2314 intel_opregion_asle_intr(dev_priv);
38cc46d7
OM
2315 else
2316 DRM_ERROR("Unexpected DE Misc interrupt\n");
abd58f01 2317 }
38cc46d7
OM
2318 else
2319 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
abd58f01
BW
2320 }
2321
6d766f02 2322 if (master_ctl & GEN8_DE_PORT_IRQ) {
e32192e1
TU
2323 iir = I915_READ(GEN8_DE_PORT_IIR);
2324 if (iir) {
2325 u32 tmp_mask;
d04a492d 2326 bool found = false;
cebd87a0 2327
e32192e1 2328 I915_WRITE(GEN8_DE_PORT_IIR, iir);
6d766f02 2329 ret = IRQ_HANDLED;
88e04703 2330
e32192e1
TU
2331 tmp_mask = GEN8_AUX_CHANNEL_A;
2332 if (INTEL_INFO(dev_priv)->gen >= 9)
2333 tmp_mask |= GEN9_AUX_CHANNEL_B |
2334 GEN9_AUX_CHANNEL_C |
2335 GEN9_AUX_CHANNEL_D;
2336
2337 if (iir & tmp_mask) {
91d14251 2338 dp_aux_irq_handler(dev_priv);
d04a492d
SS
2339 found = true;
2340 }
2341
e32192e1
TU
2342 if (IS_BROXTON(dev_priv)) {
2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2344 if (tmp_mask) {
91d14251
TU
2345 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2346 hpd_bxt);
e32192e1
TU
2347 found = true;
2348 }
2349 } else if (IS_BROADWELL(dev_priv)) {
2350 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2351 if (tmp_mask) {
91d14251
TU
2352 ilk_hpd_irq_handler(dev_priv,
2353 tmp_mask, hpd_bdw);
e32192e1
TU
2354 found = true;
2355 }
d04a492d
SS
2356 }
2357
91d14251
TU
2358 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2359 gmbus_irq_handler(dev_priv);
9e63743e
SS
2360 found = true;
2361 }
2362
d04a492d 2363 if (!found)
38cc46d7 2364 DRM_ERROR("Unexpected DE Port interrupt\n");
6d766f02 2365 }
38cc46d7
OM
2366 else
2367 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
6d766f02
DV
2368 }
2369
055e393f 2370 for_each_pipe(dev_priv, pipe) {
e32192e1 2371 u32 flip_done, fault_errors;
abd58f01 2372
c42664cc
DV
2373 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2374 continue;
abd58f01 2375
e32192e1
TU
2376 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2377 if (!iir) {
2378 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2379 continue;
2380 }
770de83d 2381
e32192e1
TU
2382 ret = IRQ_HANDLED;
2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
38cc46d7 2384
5a21b665
DV
2385 if (iir & GEN8_PIPE_VBLANK &&
2386 intel_pipe_handle_vblank(dev_priv, pipe))
2387 intel_check_page_flip(dev_priv, pipe);
770de83d 2388
e32192e1
TU
2389 flip_done = iir;
2390 if (INTEL_INFO(dev_priv)->gen >= 9)
2391 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2392 else
2393 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
38cc46d7 2394
5251f04e 2395 if (flip_done)
51cbaf01 2396 intel_finish_page_flip_cs(dev_priv, pipe);
38cc46d7 2397
e32192e1 2398 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
91d14251 2399 hsw_pipe_crc_irq_handler(dev_priv, pipe);
38cc46d7 2400
e32192e1
TU
2401 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2402 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
770de83d 2403
e32192e1
TU
2404 fault_errors = iir;
2405 if (INTEL_INFO(dev_priv)->gen >= 9)
2406 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2407 else
2408 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
770de83d 2409
e32192e1
TU
2410 if (fault_errors)
2411 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2412 pipe_name(pipe),
2413 fault_errors);
abd58f01
BW
2414 }
2415
91d14251 2416 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
266ea3d9 2417 master_ctl & GEN8_DE_PCH_IRQ) {
92d03a80
DV
2418 /*
2419 * FIXME(BDW): Assume for now that the new interrupt handling
2420 * scheme also closed the SDE interrupt handling race we've seen
2421 * on older pch-split platforms. But this needs testing.
2422 */
e32192e1
TU
2423 iir = I915_READ(SDEIIR);
2424 if (iir) {
2425 I915_WRITE(SDEIIR, iir);
92d03a80 2426 ret = IRQ_HANDLED;
6dbf30ce
VS
2427
2428 if (HAS_PCH_SPT(dev_priv))
91d14251 2429 spt_irq_handler(dev_priv, iir);
6dbf30ce 2430 else
91d14251 2431 cpt_irq_handler(dev_priv, iir);
2dfb0b81
JN
2432 } else {
2433 /*
2434 * Like on previous PCH there seems to be something
2435 * fishy going on with forwarding PCH interrupts.
2436 */
2437 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2438 }
92d03a80
DV
2439 }
2440
f11a0f46
TU
2441 return ret;
2442}
2443
2444static irqreturn_t gen8_irq_handler(int irq, void *arg)
2445{
2446 struct drm_device *dev = arg;
fac5e23e 2447 struct drm_i915_private *dev_priv = to_i915(dev);
f11a0f46 2448 u32 master_ctl;
e30e251a 2449 u32 gt_iir[4] = {};
f11a0f46
TU
2450 irqreturn_t ret;
2451
2452 if (!intel_irqs_enabled(dev_priv))
2453 return IRQ_NONE;
2454
2455 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2456 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2457 if (!master_ctl)
2458 return IRQ_NONE;
2459
2460 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2461
2462 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2463 disable_rpm_wakeref_asserts(dev_priv);
2464
2465 /* Find, clear, then process each source of interrupt */
e30e251a
VS
2466 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2467 gen8_gt_irq_handler(dev_priv, gt_iir);
f11a0f46
TU
2468 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2469
cb0d205e
CW
2470 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2471 POSTING_READ_FW(GEN8_MASTER_IRQ);
abd58f01 2472
1f814dac
ID
2473 enable_rpm_wakeref_asserts(dev_priv);
2474
abd58f01
BW
2475 return ret;
2476}
2477
1f15b76f 2478static void i915_error_wake_up(struct drm_i915_private *dev_priv)
17e1df07 2479{
17e1df07
DV
2480 /*
2481 * Notify all waiters for GPU completion events that reset state has
2482 * been changed, and that they need to restart their wait after
2483 * checking for potential errors (and bail out to drop locks if there is
2484 * a gpu reset pending so that i915_error_work_func can acquire them).
2485 */
2486
2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1f15b76f 2488 wake_up_all(&dev_priv->gpu_error.wait_queue);
17e1df07
DV
2489
2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2491 wake_up_all(&dev_priv->pending_flip_queue);
17e1df07
DV
2492}
2493
8a905236 2494/**
b8d24a06 2495 * i915_reset_and_wakeup - do process context error handling work
14bb2c11 2496 * @dev_priv: i915 device private
8a905236
JB
2497 *
2498 * Fire an error uevent so userspace can see that a hang or error
2499 * was detected.
2500 */
c033666a 2501static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
8a905236 2502{
c033666a 2503 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
cce723ed
BW
2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
17e1df07 2507 int ret;
8a905236 2508
c033666a 2509 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
f316a42c 2510
7db0ba24
DV
2511 /*
2512 * Note that there's only one work item which does gpu resets, so we
2513 * need not worry about concurrent gpu resets potentially incrementing
2514 * error->reset_counter twice. We only need to take care of another
2515 * racing irq/hangcheck declaring the gpu dead for a second time. A
2516 * quick check for that is good enough: schedule_work ensures the
2517 * correct ordering between hang detection and this work item, and since
2518 * the reset in-progress bit is only ever set by code outside of this
2519 * work we don't need to worry about any other races.
2520 */
d98c52cf 2521 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
f803aa55 2522 DRM_DEBUG_DRIVER("resetting chip\n");
c033666a 2523 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1f83fee0 2524
f454c694
ID
2525 /*
2526 * In most cases it's guaranteed that we get here with an RPM
2527 * reference held, for example because there is a pending GPU
2528 * request that won't finish until the reset is done. This
2529 * isn't the case at least when we get here by doing a
2530 * simulated reset via debugs, so get an RPM reference.
2531 */
2532 intel_runtime_pm_get(dev_priv);
7514747d 2533
c033666a 2534 intel_prepare_reset(dev_priv);
7514747d 2535
17e1df07
DV
2536 /*
2537 * All state reset _must_ be completed before we update the
2538 * reset counter, for otherwise waiters might miss the reset
2539 * pending state and not properly drop locks, resulting in
2540 * deadlocks with the reset work.
2541 */
c033666a 2542 ret = i915_reset(dev_priv);
f69061be 2543
c033666a 2544 intel_finish_reset(dev_priv);
17e1df07 2545
f454c694
ID
2546 intel_runtime_pm_put(dev_priv);
2547
d98c52cf 2548 if (ret == 0)
c033666a 2549 kobject_uevent_env(kobj,
f69061be 2550 KOBJ_CHANGE, reset_done_event);
1f83fee0 2551
17e1df07
DV
2552 /*
2553 * Note: The wake_up also serves as a memory barrier so that
2554 * waiters see the update value of the reset counter atomic_t.
2555 */
1f15b76f 2556 wake_up_all(&dev_priv->gpu_error.reset_queue);
f316a42c 2557 }
8a905236
JB
2558}
2559
c033666a 2560static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
8a905236 2561{
bd9854f9 2562 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2563 u32 eir = I915_READ(EIR);
050ee91f 2564 int pipe, i;
8a905236 2565
35aed2e6
CW
2566 if (!eir)
2567 return;
8a905236 2568
a70491cc 2569 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2570
c033666a 2571 i915_get_extra_instdone(dev_priv, instdone);
bd9854f9 2572
c033666a 2573 if (IS_G4X(dev_priv)) {
8a905236
JB
2574 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2575 u32 ipeir = I915_READ(IPEIR_I965);
2576
a70491cc
JP
2577 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2578 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2579 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2580 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2581 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2582 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2583 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2584 POSTING_READ(IPEIR_I965);
8a905236
JB
2585 }
2586 if (eir & GM45_ERROR_PAGE_TABLE) {
2587 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2588 pr_err("page table error\n");
2589 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2590 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2591 POSTING_READ(PGTBL_ER);
8a905236
JB
2592 }
2593 }
2594
c033666a 2595 if (!IS_GEN2(dev_priv)) {
8a905236
JB
2596 if (eir & I915_ERROR_PAGE_TABLE) {
2597 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2598 pr_err("page table error\n");
2599 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2600 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2601 POSTING_READ(PGTBL_ER);
8a905236
JB
2602 }
2603 }
2604
2605 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2606 pr_err("memory refresh error:\n");
055e393f 2607 for_each_pipe(dev_priv, pipe)
a70491cc 2608 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2609 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2610 /* pipestat has already been acked */
2611 }
2612 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2613 pr_err("instruction error\n");
2614 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2615 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2616 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
c033666a 2617 if (INTEL_GEN(dev_priv) < 4) {
8a905236
JB
2618 u32 ipeir = I915_READ(IPEIR);
2619
a70491cc
JP
2620 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2621 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2622 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2623 I915_WRITE(IPEIR, ipeir);
3143a2bf 2624 POSTING_READ(IPEIR);
8a905236
JB
2625 } else {
2626 u32 ipeir = I915_READ(IPEIR_I965);
2627
a70491cc
JP
2628 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2629 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2630 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2631 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2632 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2633 POSTING_READ(IPEIR_I965);
8a905236
JB
2634 }
2635 }
2636
2637 I915_WRITE(EIR, eir);
3143a2bf 2638 POSTING_READ(EIR);
8a905236
JB
2639 eir = I915_READ(EIR);
2640 if (eir) {
2641 /*
2642 * some errors might have become stuck,
2643 * mask them.
2644 */
2645 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2646 I915_WRITE(EMR, I915_READ(EMR) | eir);
2647 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2648 }
35aed2e6
CW
2649}
2650
2651/**
b8d24a06 2652 * i915_handle_error - handle a gpu error
14bb2c11 2653 * @dev_priv: i915 device private
14b730fc 2654 * @engine_mask: mask representing engines that are hung
aafd8581 2655 * Do some basic checking of register state at error time and
35aed2e6
CW
2656 * dump it to the syslog. Also call i915_capture_error_state() to make
2657 * sure we get a record and make it available in debugfs. Fire a uevent
2658 * so userspace knows something bad happened (should trigger collection
2659 * of a ring dump etc.).
14bb2c11 2660 * @fmt: Error message format string
35aed2e6 2661 */
c033666a
CW
2662void i915_handle_error(struct drm_i915_private *dev_priv,
2663 u32 engine_mask,
58174462 2664 const char *fmt, ...)
35aed2e6 2665{
58174462
MK
2666 va_list args;
2667 char error_msg[80];
35aed2e6 2668
58174462
MK
2669 va_start(args, fmt);
2670 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2671 va_end(args);
2672
c033666a
CW
2673 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2674 i915_report_and_clear_eir(dev_priv);
8a905236 2675
14b730fc 2676 if (engine_mask) {
805de8f4 2677 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
f69061be 2678 &dev_priv->gpu_error.reset_counter);
ba1234d1 2679
11ed50ec 2680 /*
b8d24a06
MK
2681 * Wakeup waiting processes so that the reset function
2682 * i915_reset_and_wakeup doesn't deadlock trying to grab
2683 * various locks. By bumping the reset counter first, the woken
17e1df07
DV
2684 * processes will see a reset in progress and back off,
2685 * releasing their locks and then wait for the reset completion.
2686 * We must do this for _all_ gpu waiters that might hold locks
2687 * that the reset work needs to acquire.
2688 *
2689 * Note: The wake_up serves as the required memory barrier to
2690 * ensure that the waiters see the updated value of the reset
2691 * counter atomic_t.
11ed50ec 2692 */
1f15b76f 2693 i915_error_wake_up(dev_priv);
11ed50ec
BG
2694 }
2695
c033666a 2696 i915_reset_and_wakeup(dev_priv);
8a905236
JB
2697}
2698
42f52ef8
KP
2699/* Called from drm generic code, passed 'crtc' which
2700 * we use as a pipe index
2701 */
88e72717 2702static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
0a3e67a4 2703{
fac5e23e 2704 struct drm_i915_private *dev_priv = to_i915(dev);
e9d21d7f 2705 unsigned long irqflags;
71e0ffa5 2706
1ec14ad3 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2708 if (INTEL_INFO(dev)->gen >= 4)
7c463586 2709 i915_enable_pipestat(dev_priv, pipe,
755e9019 2710 PIPE_START_VBLANK_INTERRUPT_STATUS);
e9d21d7f 2711 else
7c463586 2712 i915_enable_pipestat(dev_priv, pipe,
755e9019 2713 PIPE_VBLANK_INTERRUPT_STATUS);
1ec14ad3 2714 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2715
0a3e67a4
JB
2716 return 0;
2717}
2718
88e72717 2719static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
f796cf8f 2720{
fac5e23e 2721 struct drm_i915_private *dev_priv = to_i915(dev);
f796cf8f 2722 unsigned long irqflags;
b518421f 2723 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2724 DE_PIPE_VBLANK(pipe);
f796cf8f 2725
f796cf8f 2726 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
fbdedaea 2727 ilk_enable_display_irq(dev_priv, bit);
b1f14ad0
JB
2728 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2729
2730 return 0;
2731}
2732
88e72717 2733static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
7e231dbe 2734{
fac5e23e 2735 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 2736 unsigned long irqflags;
7e231dbe 2737
7e231dbe 2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2739 i915_enable_pipestat(dev_priv, pipe,
755e9019 2740 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2742
2743 return 0;
2744}
2745
88e72717 2746static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
abd58f01 2747{
fac5e23e 2748 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01 2749 unsigned long irqflags;
abd58f01 2750
abd58f01 2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2752 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01 2753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
013d3752 2754
abd58f01
BW
2755 return 0;
2756}
2757
42f52ef8
KP
2758/* Called from drm generic code, passed 'crtc' which
2759 * we use as a pipe index
2760 */
88e72717 2761static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
0a3e67a4 2762{
fac5e23e 2763 struct drm_i915_private *dev_priv = to_i915(dev);
e9d21d7f 2764 unsigned long irqflags;
0a3e67a4 2765
1ec14ad3 2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2767 i915_disable_pipestat(dev_priv, pipe,
755e9019
ID
2768 PIPE_VBLANK_INTERRUPT_STATUS |
2769 PIPE_START_VBLANK_INTERRUPT_STATUS);
f796cf8f
JB
2770 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2771}
2772
88e72717 2773static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
f796cf8f 2774{
fac5e23e 2775 struct drm_i915_private *dev_priv = to_i915(dev);
f796cf8f 2776 unsigned long irqflags;
b518421f 2777 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
40da17c2 2778 DE_PIPE_VBLANK(pipe);
f796cf8f
JB
2779
2780 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
fbdedaea 2781 ilk_disable_display_irq(dev_priv, bit);
b1f14ad0
JB
2782 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2783}
2784
88e72717 2785static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
7e231dbe 2786{
fac5e23e 2787 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 2788 unsigned long irqflags;
7e231dbe
JB
2789
2790 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5 2791 i915_disable_pipestat(dev_priv, pipe,
755e9019 2792 PIPE_START_VBLANK_INTERRUPT_STATUS);
7e231dbe
JB
2793 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2794}
2795
88e72717 2796static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
abd58f01 2797{
fac5e23e 2798 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01 2799 unsigned long irqflags;
abd58f01 2800
abd58f01 2801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
013d3752 2802 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
abd58f01
BW
2803 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2804}
2805
9107e9d2 2806static bool
0bc40be8 2807ring_idle(struct intel_engine_cs *engine, u32 seqno)
9107e9d2 2808{
cffa781e
CW
2809 return i915_seqno_passed(seqno,
2810 READ_ONCE(engine->last_submitted_seqno));
f65d9421
BG
2811}
2812
a028c4b0 2813static bool
31bb59cc 2814ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
a028c4b0 2815{
31bb59cc 2816 if (INTEL_GEN(engine->i915) >= 8) {
a6cdb93a 2817 return (ipehr >> 23) == 0x1c;
a028c4b0
DV
2818 } else {
2819 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2820 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2821 MI_SEMAPHORE_REGISTER);
2822 }
2823}
2824
a4872ba6 2825static struct intel_engine_cs *
0bc40be8
TU
2826semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2827 u64 offset)
921d42ea 2828{
c033666a 2829 struct drm_i915_private *dev_priv = engine->i915;
a4872ba6 2830 struct intel_engine_cs *signaller;
921d42ea 2831
c033666a 2832 if (INTEL_GEN(dev_priv) >= 8) {
b4ac5afc 2833 for_each_engine(signaller, dev_priv) {
0bc40be8 2834 if (engine == signaller)
a6cdb93a
RV
2835 continue;
2836
0bc40be8 2837 if (offset == signaller->semaphore.signal_ggtt[engine->id])
a6cdb93a
RV
2838 return signaller;
2839 }
921d42ea
DV
2840 } else {
2841 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2842
b4ac5afc 2843 for_each_engine(signaller, dev_priv) {
0bc40be8 2844 if(engine == signaller)
921d42ea
DV
2845 continue;
2846
0bc40be8 2847 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
921d42ea
DV
2848 return signaller;
2849 }
2850 }
2851
a6cdb93a 2852 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
0bc40be8 2853 engine->id, ipehr, offset);
921d42ea
DV
2854
2855 return NULL;
2856}
2857
a4872ba6 2858static struct intel_engine_cs *
0bc40be8 2859semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
a24a11e6 2860{
c033666a 2861 struct drm_i915_private *dev_priv = engine->i915;
88fe429d 2862 u32 cmd, ipehr, head;
a6cdb93a
RV
2863 u64 offset = 0;
2864 int i, backwards;
a24a11e6 2865
381e8ae3
TE
2866 /*
2867 * This function does not support execlist mode - any attempt to
2868 * proceed further into this function will result in a kernel panic
2869 * when dereferencing ring->buffer, which is not set up in execlist
2870 * mode.
2871 *
2872 * The correct way of doing it would be to derive the currently
2873 * executing ring buffer from the current context, which is derived
2874 * from the currently running request. Unfortunately, to get the
2875 * current request we would have to grab the struct_mutex before doing
2876 * anything else, which would be ill-advised since some other thread
2877 * might have grabbed it already and managed to hang itself, causing
2878 * the hang checker to deadlock.
2879 *
2880 * Therefore, this function does not support execlist mode in its
2881 * current form. Just return NULL and move on.
2882 */
0bc40be8 2883 if (engine->buffer == NULL)
381e8ae3
TE
2884 return NULL;
2885
0bc40be8 2886 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
31bb59cc 2887 if (!ipehr_is_semaphore_wait(engine, ipehr))
6274f212 2888 return NULL;
a24a11e6 2889
88fe429d
DV
2890 /*
2891 * HEAD is likely pointing to the dword after the actual command,
2892 * so scan backwards until we find the MBOX. But limit it to just 3
a6cdb93a
RV
2893 * or 4 dwords depending on the semaphore wait command size.
2894 * Note that we don't care about ACTHD here since that might
88fe429d
DV
2895 * point at at batch, and semaphores are always emitted into the
2896 * ringbuffer itself.
a24a11e6 2897 */
0bc40be8 2898 head = I915_READ_HEAD(engine) & HEAD_ADDR;
c033666a 2899 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
88fe429d 2900
a6cdb93a 2901 for (i = backwards; i; --i) {
88fe429d
DV
2902 /*
2903 * Be paranoid and presume the hw has gone off into the wild -
2904 * our ring is smaller than what the hardware (and hence
2905 * HEAD_ADDR) allows. Also handles wrap-around.
2906 */
0bc40be8 2907 head &= engine->buffer->size - 1;
88fe429d
DV
2908
2909 /* This here seems to blow up */
0bc40be8 2910 cmd = ioread32(engine->buffer->virtual_start + head);
a24a11e6
CW
2911 if (cmd == ipehr)
2912 break;
2913
88fe429d
DV
2914 head -= 4;
2915 }
a24a11e6 2916
88fe429d
DV
2917 if (!i)
2918 return NULL;
a24a11e6 2919
0bc40be8 2920 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
c033666a 2921 if (INTEL_GEN(dev_priv) >= 8) {
0bc40be8 2922 offset = ioread32(engine->buffer->virtual_start + head + 12);
a6cdb93a 2923 offset <<= 32;
0bc40be8 2924 offset = ioread32(engine->buffer->virtual_start + head + 8);
a6cdb93a 2925 }
0bc40be8 2926 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
a24a11e6
CW
2927}
2928
0bc40be8 2929static int semaphore_passed(struct intel_engine_cs *engine)
6274f212 2930{
c033666a 2931 struct drm_i915_private *dev_priv = engine->i915;
a4872ba6 2932 struct intel_engine_cs *signaller;
a0d036b0 2933 u32 seqno;
6274f212 2934
0bc40be8 2935 engine->hangcheck.deadlock++;
6274f212 2936
0bc40be8 2937 signaller = semaphore_waits_for(engine, &seqno);
4be17381
CW
2938 if (signaller == NULL)
2939 return -1;
2940
2941 /* Prevent pathological recursion due to driver bugs */
666796da 2942 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
6274f212
CW
2943 return -1;
2944
1b7744e7 2945 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
4be17381
CW
2946 return 1;
2947
a0d036b0
CW
2948 /* cursory check for an unkickable deadlock */
2949 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2950 semaphore_passed(signaller) < 0)
4be17381
CW
2951 return -1;
2952
2953 return 0;
6274f212
CW
2954}
2955
2956static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2957{
e2f80391 2958 struct intel_engine_cs *engine;
6274f212 2959
b4ac5afc 2960 for_each_engine(engine, dev_priv)
e2f80391 2961 engine->hangcheck.deadlock = 0;
6274f212
CW
2962}
2963
0bc40be8 2964static bool subunits_stuck(struct intel_engine_cs *engine)
1ec14ad3 2965{
61642ff0
MK
2966 u32 instdone[I915_NUM_INSTDONE_REG];
2967 bool stuck;
2968 int i;
2969
0bc40be8 2970 if (engine->id != RCS)
61642ff0
MK
2971 return true;
2972
c033666a 2973 i915_get_extra_instdone(engine->i915, instdone);
9107e9d2 2974
61642ff0
MK
2975 /* There might be unstable subunit states even when
2976 * actual head is not moving. Filter out the unstable ones by
2977 * accumulating the undone -> done transitions and only
2978 * consider those as progress.
2979 */
2980 stuck = true;
2981 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
0bc40be8 2982 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
61642ff0 2983
0bc40be8 2984 if (tmp != engine->hangcheck.instdone[i])
61642ff0
MK
2985 stuck = false;
2986
0bc40be8 2987 engine->hangcheck.instdone[i] |= tmp;
61642ff0
MK
2988 }
2989
2990 return stuck;
2991}
2992
2993static enum intel_ring_hangcheck_action
0bc40be8 2994head_stuck(struct intel_engine_cs *engine, u64 acthd)
61642ff0 2995{
0bc40be8 2996 if (acthd != engine->hangcheck.acthd) {
61642ff0
MK
2997
2998 /* Clear subunit states on head movement */
0bc40be8
TU
2999 memset(engine->hangcheck.instdone, 0,
3000 sizeof(engine->hangcheck.instdone));
61642ff0 3001
24a65e62 3002 return HANGCHECK_ACTIVE;
f260fe7b 3003 }
6274f212 3004
0bc40be8 3005 if (!subunits_stuck(engine))
61642ff0
MK
3006 return HANGCHECK_ACTIVE;
3007
3008 return HANGCHECK_HUNG;
3009}
3010
3011static enum intel_ring_hangcheck_action
0bc40be8 3012ring_stuck(struct intel_engine_cs *engine, u64 acthd)
61642ff0 3013{
c033666a 3014 struct drm_i915_private *dev_priv = engine->i915;
61642ff0
MK
3015 enum intel_ring_hangcheck_action ha;
3016 u32 tmp;
3017
0bc40be8 3018 ha = head_stuck(engine, acthd);
61642ff0
MK
3019 if (ha != HANGCHECK_HUNG)
3020 return ha;
3021
c033666a 3022 if (IS_GEN2(dev_priv))
f2f4d82f 3023 return HANGCHECK_HUNG;
9107e9d2
CW
3024
3025 /* Is the chip hanging on a WAIT_FOR_EVENT?
3026 * If so we can simply poke the RB_WAIT bit
3027 * and break the hang. This should work on
3028 * all but the second generation chipsets.
3029 */
0bc40be8 3030 tmp = I915_READ_CTL(engine);
1ec14ad3 3031 if (tmp & RING_WAIT) {
c033666a 3032 i915_handle_error(dev_priv, 0,
58174462 3033 "Kicking stuck wait on %s",
0bc40be8
TU
3034 engine->name);
3035 I915_WRITE_CTL(engine, tmp);
f2f4d82f 3036 return HANGCHECK_KICK;
6274f212
CW
3037 }
3038
c033666a 3039 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
0bc40be8 3040 switch (semaphore_passed(engine)) {
6274f212 3041 default:
f2f4d82f 3042 return HANGCHECK_HUNG;
6274f212 3043 case 1:
c033666a 3044 i915_handle_error(dev_priv, 0,
58174462 3045 "Kicking stuck semaphore on %s",
0bc40be8
TU
3046 engine->name);
3047 I915_WRITE_CTL(engine, tmp);
f2f4d82f 3048 return HANGCHECK_KICK;
6274f212 3049 case 0:
f2f4d82f 3050 return HANGCHECK_WAIT;
6274f212 3051 }
9107e9d2 3052 }
ed5cbb03 3053
f2f4d82f 3054 return HANGCHECK_HUNG;
ed5cbb03
MK
3055}
3056
12471ba8
CW
3057static unsigned kick_waiters(struct intel_engine_cs *engine)
3058{
c033666a 3059 struct drm_i915_private *i915 = engine->i915;
12471ba8
CW
3060 unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3061
3062 if (engine->hangcheck.user_interrupts == user_interrupts &&
3063 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
688e6c72 3064 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
12471ba8
CW
3065 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3066 engine->name);
688e6c72
CW
3067
3068 intel_engine_enable_fake_irq(engine);
12471ba8
CW
3069 }
3070
3071 return user_interrupts;
3072}
737b1506 3073/*
f65d9421 3074 * This is called when the chip hasn't reported back with completed
05407ff8
MK
3075 * batchbuffers in a long time. We keep track per ring seqno progress and
3076 * if there are no progress, hangcheck score for that ring is increased.
3077 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3078 * we kick the ring. If we see no progress on three subsequent calls
3079 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421 3080 */
737b1506 3081static void i915_hangcheck_elapsed(struct work_struct *work)
f65d9421 3082{
737b1506
CW
3083 struct drm_i915_private *dev_priv =
3084 container_of(work, typeof(*dev_priv),
3085 gpu_error.hangcheck_work.work);
e2f80391 3086 struct intel_engine_cs *engine;
c3232b18 3087 enum intel_engine_id id;
05407ff8 3088 int busy_count = 0, rings_hung = 0;
666796da 3089 bool stuck[I915_NUM_ENGINES] = { 0 };
9107e9d2
CW
3090#define BUSY 1
3091#define KICK 5
3092#define HUNG 20
24a65e62 3093#define ACTIVE_DECAY 15
893eead0 3094
d330a953 3095 if (!i915.enable_hangcheck)
3e0dc6b0
BW
3096 return;
3097
67d97da3
CW
3098 if (!lockless_dereference(dev_priv->gt.awake))
3099 return;
1f814dac 3100
75714940
MK
3101 /* As enabling the GPU requires fairly extensive mmio access,
3102 * periodically arm the mmio checker to see if we are triggering
3103 * any invalid access.
3104 */
3105 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3106
c3232b18 3107 for_each_engine_id(engine, dev_priv, id) {
688e6c72 3108 bool busy = intel_engine_has_waiter(engine);
50877445
CW
3109 u64 acthd;
3110 u32 seqno;
12471ba8 3111 unsigned user_interrupts;
05407ff8 3112
6274f212
CW
3113 semaphore_clear_deadlocks(dev_priv);
3114
c04e0f3b
CW
3115 /* We don't strictly need an irq-barrier here, as we are not
3116 * serving an interrupt request, be paranoid in case the
3117 * barrier has side-effects (such as preventing a broken
3118 * cacheline snoop) and so be sure that we can see the seqno
3119 * advance. If the seqno should stick, due to a stale
3120 * cacheline, we would erroneously declare the GPU hung.
3121 */
3122 if (engine->irq_seqno_barrier)
3123 engine->irq_seqno_barrier(engine);
3124
e2f80391 3125 acthd = intel_ring_get_active_head(engine);
1b7744e7 3126 seqno = intel_engine_get_seqno(engine);
b4519513 3127
12471ba8
CW
3128 /* Reset stuck interrupts between batch advances */
3129 user_interrupts = 0;
3130
e2f80391
TU
3131 if (engine->hangcheck.seqno == seqno) {
3132 if (ring_idle(engine, seqno)) {
3133 engine->hangcheck.action = HANGCHECK_IDLE;
05535726 3134 if (busy) {
094f9a54 3135 /* Safeguard against driver failure */
12471ba8 3136 user_interrupts = kick_waiters(engine);
e2f80391 3137 engine->hangcheck.score += BUSY;
05535726 3138 }
05407ff8 3139 } else {
6274f212
CW
3140 /* We always increment the hangcheck score
3141 * if the ring is busy and still processing
3142 * the same request, so that no single request
3143 * can run indefinitely (such as a chain of
3144 * batches). The only time we do not increment
3145 * the hangcheck score on this ring, if this
3146 * ring is in a legitimate wait for another
3147 * ring. In that case the waiting ring is a
3148 * victim and we want to be sure we catch the
3149 * right culprit. Then every time we do kick
3150 * the ring, add a small increment to the
3151 * score so that we can catch a batch that is
3152 * being repeatedly kicked and so responsible
3153 * for stalling the machine.
3154 */
e2f80391
TU
3155 engine->hangcheck.action = ring_stuck(engine,
3156 acthd);
ad8beaea 3157
e2f80391 3158 switch (engine->hangcheck.action) {
da661464 3159 case HANGCHECK_IDLE:
f2f4d82f 3160 case HANGCHECK_WAIT:
f260fe7b 3161 break;
24a65e62 3162 case HANGCHECK_ACTIVE:
e2f80391 3163 engine->hangcheck.score += BUSY;
6274f212 3164 break;
f2f4d82f 3165 case HANGCHECK_KICK:
e2f80391 3166 engine->hangcheck.score += KICK;
6274f212 3167 break;
f2f4d82f 3168 case HANGCHECK_HUNG:
e2f80391 3169 engine->hangcheck.score += HUNG;
c3232b18 3170 stuck[id] = true;
6274f212
CW
3171 break;
3172 }
05407ff8 3173 }
9107e9d2 3174 } else {
e2f80391 3175 engine->hangcheck.action = HANGCHECK_ACTIVE;
da661464 3176
9107e9d2
CW
3177 /* Gradually reduce the count so that we catch DoS
3178 * attempts across multiple batches.
3179 */
e2f80391
TU
3180 if (engine->hangcheck.score > 0)
3181 engine->hangcheck.score -= ACTIVE_DECAY;
3182 if (engine->hangcheck.score < 0)
3183 engine->hangcheck.score = 0;
f260fe7b 3184
61642ff0 3185 /* Clear head and subunit states on seqno movement */
12471ba8 3186 acthd = 0;
61642ff0 3187
e2f80391
TU
3188 memset(engine->hangcheck.instdone, 0,
3189 sizeof(engine->hangcheck.instdone));
d1e61e7f
CW
3190 }
3191
e2f80391
TU
3192 engine->hangcheck.seqno = seqno;
3193 engine->hangcheck.acthd = acthd;
12471ba8 3194 engine->hangcheck.user_interrupts = user_interrupts;
9107e9d2 3195 busy_count += busy;
893eead0 3196 }
b9201c14 3197
c3232b18 3198 for_each_engine_id(engine, dev_priv, id) {
e2f80391 3199 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
b8d88d1d 3200 DRM_INFO("%s on %s\n",
c3232b18 3201 stuck[id] ? "stuck" : "no progress",
e2f80391 3202 engine->name);
14b730fc 3203 rings_hung |= intel_engine_flag(engine);
92cab734
MK
3204 }
3205 }
3206
67d97da3 3207 if (rings_hung)
c033666a 3208 i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
f65d9421 3209
05535726 3210 /* Reset timer in case GPU hangs without another request being added */
05407ff8 3211 if (busy_count)
c033666a 3212 i915_queue_hangcheck(dev_priv);
10cd45b6
MK
3213}
3214
1c69eb42 3215static void ibx_irq_reset(struct drm_device *dev)
91738a95 3216{
fac5e23e 3217 struct drm_i915_private *dev_priv = to_i915(dev);
91738a95
PZ
3218
3219 if (HAS_PCH_NOP(dev))
3220 return;
3221
f86f3fb0 3222 GEN5_IRQ_RESET(SDE);
105b122e
PZ
3223
3224 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3225 I915_WRITE(SERR_INT, 0xffffffff);
622364b6 3226}
105b122e 3227
622364b6
PZ
3228/*
3229 * SDEIER is also touched by the interrupt handler to work around missed PCH
3230 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3231 * instead we unconditionally enable all PCH interrupt sources here, but then
3232 * only unmask them as needed with SDEIMR.
3233 *
3234 * This function needs to be called before interrupts are enabled.
3235 */
3236static void ibx_irq_pre_postinstall(struct drm_device *dev)
3237{
fac5e23e 3238 struct drm_i915_private *dev_priv = to_i915(dev);
622364b6
PZ
3239
3240 if (HAS_PCH_NOP(dev))
3241 return;
3242
3243 WARN_ON(I915_READ(SDEIER) != 0);
91738a95
PZ
3244 I915_WRITE(SDEIER, 0xffffffff);
3245 POSTING_READ(SDEIER);
3246}
3247
7c4d664e 3248static void gen5_gt_irq_reset(struct drm_device *dev)
d18ea1b5 3249{
fac5e23e 3250 struct drm_i915_private *dev_priv = to_i915(dev);
d18ea1b5 3251
f86f3fb0 3252 GEN5_IRQ_RESET(GT);
a9d356a6 3253 if (INTEL_INFO(dev)->gen >= 6)
f86f3fb0 3254 GEN5_IRQ_RESET(GEN6_PM);
d18ea1b5
DV
3255}
3256
70591a41
VS
3257static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3258{
3259 enum pipe pipe;
3260
71b8b41d
VS
3261 if (IS_CHERRYVIEW(dev_priv))
3262 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3263 else
3264 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3265
ad22d106 3266 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
70591a41
VS
3267 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3268
ad22d106
VS
3269 for_each_pipe(dev_priv, pipe) {
3270 I915_WRITE(PIPESTAT(pipe),
3271 PIPE_FIFO_UNDERRUN_STATUS |
3272 PIPESTAT_INT_STATUS_MASK);
3273 dev_priv->pipestat_irq_mask[pipe] = 0;
3274 }
70591a41
VS
3275
3276 GEN5_IRQ_RESET(VLV_);
ad22d106 3277 dev_priv->irq_mask = ~0;
70591a41
VS
3278}
3279
8bb61306
VS
3280static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3281{
3282 u32 pipestat_mask;
9ab981f2 3283 u32 enable_mask;
8bb61306
VS
3284 enum pipe pipe;
3285
8bb61306
VS
3286 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3287 PIPE_CRC_DONE_INTERRUPT_STATUS;
3288
3289 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3290 for_each_pipe(dev_priv, pipe)
3291 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3292
9ab981f2
VS
3293 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3294 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3295 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
8bb61306 3296 if (IS_CHERRYVIEW(dev_priv))
9ab981f2 3297 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
6b7eafc1
VS
3298
3299 WARN_ON(dev_priv->irq_mask != ~0);
3300
9ab981f2
VS
3301 dev_priv->irq_mask = ~enable_mask;
3302
3303 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
8bb61306
VS
3304}
3305
3306/* drm_dma.h hooks
3307*/
3308static void ironlake_irq_reset(struct drm_device *dev)
3309{
fac5e23e 3310 struct drm_i915_private *dev_priv = to_i915(dev);
8bb61306
VS
3311
3312 I915_WRITE(HWSTAM, 0xffffffff);
3313
3314 GEN5_IRQ_RESET(DE);
3315 if (IS_GEN7(dev))
3316 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3317
3318 gen5_gt_irq_reset(dev);
3319
3320 ibx_irq_reset(dev);
3321}
3322
7e231dbe
JB
3323static void valleyview_irq_preinstall(struct drm_device *dev)
3324{
fac5e23e 3325 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe 3326
34c7b8a7
VS
3327 I915_WRITE(VLV_MASTER_IER, 0);
3328 POSTING_READ(VLV_MASTER_IER);
3329
7c4d664e 3330 gen5_gt_irq_reset(dev);
7e231dbe 3331
ad22d106 3332 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3333 if (dev_priv->display_irqs_enabled)
3334 vlv_display_irq_reset(dev_priv);
ad22d106 3335 spin_unlock_irq(&dev_priv->irq_lock);
7e231dbe
JB
3336}
3337
d6e3cca3
DV
3338static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3339{
3340 GEN8_IRQ_RESET_NDX(GT, 0);
3341 GEN8_IRQ_RESET_NDX(GT, 1);
3342 GEN8_IRQ_RESET_NDX(GT, 2);
3343 GEN8_IRQ_RESET_NDX(GT, 3);
3344}
3345
823f6b38 3346static void gen8_irq_reset(struct drm_device *dev)
abd58f01 3347{
fac5e23e 3348 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01
BW
3349 int pipe;
3350
abd58f01
BW
3351 I915_WRITE(GEN8_MASTER_IRQ, 0);
3352 POSTING_READ(GEN8_MASTER_IRQ);
3353
d6e3cca3 3354 gen8_gt_irq_reset(dev_priv);
abd58f01 3355
055e393f 3356 for_each_pipe(dev_priv, pipe)
f458ebbc
DV
3357 if (intel_display_power_is_enabled(dev_priv,
3358 POWER_DOMAIN_PIPE(pipe)))
813bde43 3359 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
abd58f01 3360
f86f3fb0
PZ
3361 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3362 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3363 GEN5_IRQ_RESET(GEN8_PCU_);
abd58f01 3364
266ea3d9
SS
3365 if (HAS_PCH_SPLIT(dev))
3366 ibx_irq_reset(dev);
abd58f01 3367}
09f2344d 3368
4c6c03be
DL
3369void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3370 unsigned int pipe_mask)
d49bdb0e 3371{
1180e206 3372 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
6831f3e3 3373 enum pipe pipe;
d49bdb0e 3374
13321786 3375 spin_lock_irq(&dev_priv->irq_lock);
6831f3e3
VS
3376 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3377 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3378 dev_priv->de_irq_mask[pipe],
3379 ~dev_priv->de_irq_mask[pipe] | extra_ier);
13321786 3380 spin_unlock_irq(&dev_priv->irq_lock);
d49bdb0e
PZ
3381}
3382
aae8ba84
VS
3383void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3384 unsigned int pipe_mask)
3385{
6831f3e3
VS
3386 enum pipe pipe;
3387
aae8ba84 3388 spin_lock_irq(&dev_priv->irq_lock);
6831f3e3
VS
3389 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3390 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
aae8ba84
VS
3391 spin_unlock_irq(&dev_priv->irq_lock);
3392
3393 /* make sure we're done processing display irqs */
3394 synchronize_irq(dev_priv->dev->irq);
3395}
3396
43f328d7
VS
3397static void cherryview_irq_preinstall(struct drm_device *dev)
3398{
fac5e23e 3399 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7
VS
3400
3401 I915_WRITE(GEN8_MASTER_IRQ, 0);
3402 POSTING_READ(GEN8_MASTER_IRQ);
3403
d6e3cca3 3404 gen8_gt_irq_reset(dev_priv);
43f328d7
VS
3405
3406 GEN5_IRQ_RESET(GEN8_PCU_);
3407
ad22d106 3408 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3409 if (dev_priv->display_irqs_enabled)
3410 vlv_display_irq_reset(dev_priv);
ad22d106 3411 spin_unlock_irq(&dev_priv->irq_lock);
43f328d7
VS
3412}
3413
91d14251 3414static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
87a02106
VS
3415 const u32 hpd[HPD_NUM_PINS])
3416{
87a02106
VS
3417 struct intel_encoder *encoder;
3418 u32 enabled_irqs = 0;
3419
91d14251 3420 for_each_intel_encoder(dev_priv->dev, encoder)
87a02106
VS
3421 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3422 enabled_irqs |= hpd[encoder->hpd_pin];
3423
3424 return enabled_irqs;
3425}
3426
91d14251 3427static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
7fe0b973 3428{
87a02106 3429 u32 hotplug_irqs, hotplug, enabled_irqs;
82a28bcf 3430
91d14251 3431 if (HAS_PCH_IBX(dev_priv)) {
fee884ed 3432 hotplug_irqs = SDE_HOTPLUG_MASK;
91d14251 3433 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
82a28bcf 3434 } else {
fee884ed 3435 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
91d14251 3436 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
82a28bcf 3437 }
7fe0b973 3438
fee884ed 3439 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
82a28bcf
DV
3440
3441 /*
3442 * Enable digital hotplug on the PCH, and configure the DP short pulse
6dbf30ce
VS
3443 * duration to 2ms (which is the minimum in the Display Port spec).
3444 * The pulse duration bits are reserved on LPT+.
82a28bcf 3445 */
7fe0b973
KP
3446 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3447 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3448 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3449 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3450 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
0b2eb33e
VS
3451 /*
3452 * When CPU and PCH are on the same package, port A
3453 * HPD must be enabled in both north and south.
3454 */
91d14251 3455 if (HAS_PCH_LPT_LP(dev_priv))
0b2eb33e 3456 hotplug |= PORTA_HOTPLUG_ENABLE;
7fe0b973 3457 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
6dbf30ce 3458}
26951caf 3459
91d14251 3460static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
6dbf30ce 3461{
6dbf30ce
VS
3462 u32 hotplug_irqs, hotplug, enabled_irqs;
3463
3464 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
91d14251 3465 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
6dbf30ce
VS
3466
3467 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3468
3469 /* Enable digital hotplug on the PCH */
3470 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3471 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
74c0b395 3472 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
6dbf30ce
VS
3473 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3474
3475 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3476 hotplug |= PORTE_HOTPLUG_ENABLE;
3477 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
7fe0b973
KP
3478}
3479
91d14251 3480static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
e4ce95aa 3481{
e4ce95aa
VS
3482 u32 hotplug_irqs, hotplug, enabled_irqs;
3483
91d14251 3484 if (INTEL_GEN(dev_priv) >= 8) {
3a3b3c7d 3485 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
91d14251 3486 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3a3b3c7d
VS
3487
3488 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
91d14251 3489 } else if (INTEL_GEN(dev_priv) >= 7) {
23bb4cb5 3490 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
91d14251 3491 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3a3b3c7d
VS
3492
3493 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
23bb4cb5
VS
3494 } else {
3495 hotplug_irqs = DE_DP_A_HOTPLUG;
91d14251 3496 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
e4ce95aa 3497
3a3b3c7d
VS
3498 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3499 }
e4ce95aa
VS
3500
3501 /*
3502 * Enable digital hotplug on the CPU, and configure the DP short pulse
3503 * duration to 2ms (which is the minimum in the Display Port spec)
23bb4cb5 3504 * The pulse duration bits are reserved on HSW+.
e4ce95aa
VS
3505 */
3506 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3507 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3508 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3509 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3510
91d14251 3511 ibx_hpd_irq_setup(dev_priv);
e4ce95aa
VS
3512}
3513
91d14251 3514static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
e0a20ad7 3515{
a52bb15b 3516 u32 hotplug_irqs, hotplug, enabled_irqs;
e0a20ad7 3517
91d14251 3518 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
a52bb15b 3519 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
e0a20ad7 3520
a52bb15b 3521 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
e0a20ad7 3522
a52bb15b
VS
3523 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3524 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3525 PORTA_HOTPLUG_ENABLE;
d252bf68
SS
3526
3527 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3528 hotplug, enabled_irqs);
3529 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3530
3531 /*
3532 * For BXT invert bit has to be set based on AOB design
3533 * for HPD detection logic, update it based on VBT fields.
3534 */
3535
3536 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3537 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3538 hotplug |= BXT_DDIA_HPD_INVERT;
3539 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3540 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3541 hotplug |= BXT_DDIB_HPD_INVERT;
3542 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3543 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3544 hotplug |= BXT_DDIC_HPD_INVERT;
3545
a52bb15b 3546 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
e0a20ad7
SS
3547}
3548
d46da437
PZ
3549static void ibx_irq_postinstall(struct drm_device *dev)
3550{
fac5e23e 3551 struct drm_i915_private *dev_priv = to_i915(dev);
82a28bcf 3552 u32 mask;
e5868a31 3553
692a04cf
DV
3554 if (HAS_PCH_NOP(dev))
3555 return;
3556
105b122e 3557 if (HAS_PCH_IBX(dev))
5c673b60 3558 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
105b122e 3559 else
5c673b60 3560 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
8664281b 3561
b51a2842 3562 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
d46da437 3563 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
3564}
3565
0a9a8c91
DV
3566static void gen5_gt_irq_postinstall(struct drm_device *dev)
3567{
fac5e23e 3568 struct drm_i915_private *dev_priv = to_i915(dev);
0a9a8c91
DV
3569 u32 pm_irqs, gt_irqs;
3570
3571 pm_irqs = gt_irqs = 0;
3572
3573 dev_priv->gt_irq_mask = ~0;
040d2baa 3574 if (HAS_L3_DPF(dev)) {
0a9a8c91 3575 /* L3 parity interrupt is always unmasked. */
35a85ac6
BW
3576 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3577 gt_irqs |= GT_PARITY_ERROR(dev);
0a9a8c91
DV
3578 }
3579
3580 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3581 if (IS_GEN5(dev)) {
f8973c21 3582 gt_irqs |= ILK_BSD_USER_INTERRUPT;
0a9a8c91
DV
3583 } else {
3584 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3585 }
3586
35079899 3587 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
0a9a8c91
DV
3588
3589 if (INTEL_INFO(dev)->gen >= 6) {
78e68d36
ID
3590 /*
3591 * RPS interrupts will get enabled/disabled on demand when RPS
3592 * itself is enabled/disabled.
3593 */
0a9a8c91
DV
3594 if (HAS_VEBOX(dev))
3595 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3596
605cd25b 3597 dev_priv->pm_irq_mask = 0xffffffff;
35079899 3598 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
0a9a8c91
DV
3599 }
3600}
3601
f71d4af4 3602static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 3603{
fac5e23e 3604 struct drm_i915_private *dev_priv = to_i915(dev);
8e76f8dc
PZ
3605 u32 display_mask, extra_mask;
3606
3607 if (INTEL_INFO(dev)->gen >= 7) {
3608 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3609 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3610 DE_PLANEB_FLIP_DONE_IVB |
5c673b60 3611 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
8e76f8dc 3612 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
23bb4cb5
VS
3613 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3614 DE_DP_A_HOTPLUG_IVB);
8e76f8dc
PZ
3615 } else {
3616 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3617 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
5b3a856b 3618 DE_AUX_CHANNEL_A |
5b3a856b
DV
3619 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3620 DE_POISON);
e4ce95aa
VS
3621 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3622 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3623 DE_DP_A_HOTPLUG);
8e76f8dc 3624 }
036a4a7d 3625
1ec14ad3 3626 dev_priv->irq_mask = ~display_mask;
036a4a7d 3627
0c841212
PZ
3628 I915_WRITE(HWSTAM, 0xeffe);
3629
622364b6
PZ
3630 ibx_irq_pre_postinstall(dev);
3631
35079899 3632 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
036a4a7d 3633
0a9a8c91 3634 gen5_gt_irq_postinstall(dev);
036a4a7d 3635
d46da437 3636 ibx_irq_postinstall(dev);
7fe0b973 3637
f97108d1 3638 if (IS_IRONLAKE_M(dev)) {
6005ce42
DV
3639 /* Enable PCU event interrupts
3640 *
3641 * spinlocking not required here for correctness since interrupt
4bc9d430
DV
3642 * setup is guaranteed to run in single-threaded context. But we
3643 * need it to make the assert_spin_locked happy. */
d6207435 3644 spin_lock_irq(&dev_priv->irq_lock);
fbdedaea 3645 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
d6207435 3646 spin_unlock_irq(&dev_priv->irq_lock);
f97108d1
JB
3647 }
3648
036a4a7d
ZW
3649 return 0;
3650}
3651
f8b79e58
ID
3652void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3653{
3654 assert_spin_locked(&dev_priv->irq_lock);
3655
3656 if (dev_priv->display_irqs_enabled)
3657 return;
3658
3659 dev_priv->display_irqs_enabled = true;
3660
d6c69803
VS
3661 if (intel_irqs_enabled(dev_priv)) {
3662 vlv_display_irq_reset(dev_priv);
ad22d106 3663 vlv_display_irq_postinstall(dev_priv);
d6c69803 3664 }
f8b79e58
ID
3665}
3666
3667void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3668{
3669 assert_spin_locked(&dev_priv->irq_lock);
3670
3671 if (!dev_priv->display_irqs_enabled)
3672 return;
3673
3674 dev_priv->display_irqs_enabled = false;
3675
950eabaf 3676 if (intel_irqs_enabled(dev_priv))
ad22d106 3677 vlv_display_irq_reset(dev_priv);
f8b79e58
ID
3678}
3679
0e6c9a9e
VS
3680
3681static int valleyview_irq_postinstall(struct drm_device *dev)
3682{
fac5e23e 3683 struct drm_i915_private *dev_priv = to_i915(dev);
0e6c9a9e 3684
0a9a8c91 3685 gen5_gt_irq_postinstall(dev);
7e231dbe 3686
ad22d106 3687 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3688 if (dev_priv->display_irqs_enabled)
3689 vlv_display_irq_postinstall(dev_priv);
ad22d106
VS
3690 spin_unlock_irq(&dev_priv->irq_lock);
3691
7e231dbe 3692 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
34c7b8a7 3693 POSTING_READ(VLV_MASTER_IER);
20afbda2
DV
3694
3695 return 0;
3696}
3697
abd58f01
BW
3698static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3699{
abd58f01
BW
3700 /* These are interrupts we'll toggle with the ring mask register */
3701 uint32_t gt_interrupts[] = {
3702 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6 3703 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
73d477f6
OM
3704 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3705 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
abd58f01 3706 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
73d477f6
OM
3707 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3708 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3709 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
abd58f01 3710 0,
73d477f6
OM
3711 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3712 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
abd58f01
BW
3713 };
3714
98735739
TU
3715 if (HAS_L3_DPF(dev_priv))
3716 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3717
0961021a 3718 dev_priv->pm_irq_mask = 0xffffffff;
9a2d2d87
D
3719 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3720 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
78e68d36
ID
3721 /*
3722 * RPS interrupts will get enabled/disabled on demand when RPS itself
3723 * is enabled/disabled.
3724 */
3725 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
9a2d2d87 3726 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
abd58f01
BW
3727}
3728
3729static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3730{
770de83d
DL
3731 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3732 uint32_t de_pipe_enables;
3a3b3c7d
VS
3733 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3734 u32 de_port_enables;
11825b0d 3735 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3a3b3c7d 3736 enum pipe pipe;
770de83d 3737
b4834a50 3738 if (INTEL_INFO(dev_priv)->gen >= 9) {
770de83d
DL
3739 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3740 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d
VS
3741 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3742 GEN9_AUX_CHANNEL_D;
9e63743e 3743 if (IS_BROXTON(dev_priv))
3a3b3c7d
VS
3744 de_port_masked |= BXT_DE_PORT_GMBUS;
3745 } else {
770de83d
DL
3746 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3747 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3a3b3c7d 3748 }
770de83d
DL
3749
3750 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3751 GEN8_PIPE_FIFO_UNDERRUN;
3752
3a3b3c7d 3753 de_port_enables = de_port_masked;
a52bb15b
VS
3754 if (IS_BROXTON(dev_priv))
3755 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3756 else if (IS_BROADWELL(dev_priv))
3a3b3c7d
VS
3757 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3758
13b3a0a7
DV
3759 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3760 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3761 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
abd58f01 3762
055e393f 3763 for_each_pipe(dev_priv, pipe)
f458ebbc 3764 if (intel_display_power_is_enabled(dev_priv,
813bde43
PZ
3765 POWER_DOMAIN_PIPE(pipe)))
3766 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3767 dev_priv->de_irq_mask[pipe],
3768 de_pipe_enables);
abd58f01 3769
3a3b3c7d 3770 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
11825b0d 3771 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
abd58f01
BW
3772}
3773
3774static int gen8_irq_postinstall(struct drm_device *dev)
3775{
fac5e23e 3776 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01 3777
266ea3d9
SS
3778 if (HAS_PCH_SPLIT(dev))
3779 ibx_irq_pre_postinstall(dev);
622364b6 3780
abd58f01
BW
3781 gen8_gt_irq_postinstall(dev_priv);
3782 gen8_de_irq_postinstall(dev_priv);
3783
266ea3d9
SS
3784 if (HAS_PCH_SPLIT(dev))
3785 ibx_irq_postinstall(dev);
abd58f01 3786
e5328c43 3787 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
abd58f01
BW
3788 POSTING_READ(GEN8_MASTER_IRQ);
3789
3790 return 0;
3791}
3792
43f328d7
VS
3793static int cherryview_irq_postinstall(struct drm_device *dev)
3794{
fac5e23e 3795 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7 3796
43f328d7
VS
3797 gen8_gt_irq_postinstall(dev_priv);
3798
ad22d106 3799 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3800 if (dev_priv->display_irqs_enabled)
3801 vlv_display_irq_postinstall(dev_priv);
ad22d106
VS
3802 spin_unlock_irq(&dev_priv->irq_lock);
3803
e5328c43 3804 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
43f328d7
VS
3805 POSTING_READ(GEN8_MASTER_IRQ);
3806
3807 return 0;
3808}
3809
abd58f01
BW
3810static void gen8_irq_uninstall(struct drm_device *dev)
3811{
fac5e23e 3812 struct drm_i915_private *dev_priv = to_i915(dev);
abd58f01
BW
3813
3814 if (!dev_priv)
3815 return;
3816
823f6b38 3817 gen8_irq_reset(dev);
abd58f01
BW
3818}
3819
7e231dbe
JB
3820static void valleyview_irq_uninstall(struct drm_device *dev)
3821{
fac5e23e 3822 struct drm_i915_private *dev_priv = to_i915(dev);
7e231dbe
JB
3823
3824 if (!dev_priv)
3825 return;
3826
843d0e7d 3827 I915_WRITE(VLV_MASTER_IER, 0);
34c7b8a7 3828 POSTING_READ(VLV_MASTER_IER);
843d0e7d 3829
893fce8e
VS
3830 gen5_gt_irq_reset(dev);
3831
7e231dbe 3832 I915_WRITE(HWSTAM, 0xffffffff);
f8b79e58 3833
ad22d106 3834 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3835 if (dev_priv->display_irqs_enabled)
3836 vlv_display_irq_reset(dev_priv);
ad22d106 3837 spin_unlock_irq(&dev_priv->irq_lock);
7e231dbe
JB
3838}
3839
43f328d7
VS
3840static void cherryview_irq_uninstall(struct drm_device *dev)
3841{
fac5e23e 3842 struct drm_i915_private *dev_priv = to_i915(dev);
43f328d7
VS
3843
3844 if (!dev_priv)
3845 return;
3846
3847 I915_WRITE(GEN8_MASTER_IRQ, 0);
3848 POSTING_READ(GEN8_MASTER_IRQ);
3849
a2c30fba 3850 gen8_gt_irq_reset(dev_priv);
43f328d7 3851
a2c30fba 3852 GEN5_IRQ_RESET(GEN8_PCU_);
43f328d7 3853
ad22d106 3854 spin_lock_irq(&dev_priv->irq_lock);
9918271e
VS
3855 if (dev_priv->display_irqs_enabled)
3856 vlv_display_irq_reset(dev_priv);
ad22d106 3857 spin_unlock_irq(&dev_priv->irq_lock);
43f328d7
VS
3858}
3859
f71d4af4 3860static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d 3861{
fac5e23e 3862 struct drm_i915_private *dev_priv = to_i915(dev);
4697995b
JB
3863
3864 if (!dev_priv)
3865 return;
3866
be30b29f 3867 ironlake_irq_reset(dev);
036a4a7d
ZW
3868}
3869
a266c7d5 3870static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4 3871{
fac5e23e 3872 struct drm_i915_private *dev_priv = to_i915(dev);
9db4a9c7 3873 int pipe;
91e3738e 3874
055e393f 3875 for_each_pipe(dev_priv, pipe)
9db4a9c7 3876 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
3877 I915_WRITE16(IMR, 0xffff);
3878 I915_WRITE16(IER, 0x0);
3879 POSTING_READ16(IER);
c2798b19
CW
3880}
3881
3882static int i8xx_irq_postinstall(struct drm_device *dev)
3883{
fac5e23e 3884 struct drm_i915_private *dev_priv = to_i915(dev);
c2798b19 3885
c2798b19
CW
3886 I915_WRITE16(EMR,
3887 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3888
3889 /* Unmask the interrupts that we always want on. */
3890 dev_priv->irq_mask =
3891 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3892 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3893 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 3894 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
c2798b19
CW
3895 I915_WRITE16(IMR, dev_priv->irq_mask);
3896
3897 I915_WRITE16(IER,
3898 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3899 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
c2798b19
CW
3900 I915_USER_INTERRUPT);
3901 POSTING_READ16(IER);
3902
379ef82d
DV
3903 /* Interrupt setup is already guaranteed to be single-threaded, this is
3904 * just to make the assert_spin_locked check happy. */
d6207435 3905 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
3906 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3907 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 3908 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 3909
c2798b19
CW
3910 return 0;
3911}
3912
5a21b665
DV
3913/*
3914 * Returns true when a page flip has completed.
3915 */
3916static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3917 int plane, int pipe, u32 iir)
3918{
3919 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3920
3921 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3922 return false;
3923
3924 if ((iir & flip_pending) == 0)
3925 goto check_page_flip;
3926
3927 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3928 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3929 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3930 * the flip is completed (no longer pending). Since this doesn't raise
3931 * an interrupt per se, we watch for the change at vblank.
3932 */
3933 if (I915_READ16(ISR) & flip_pending)
3934 goto check_page_flip;
3935
3936 intel_finish_page_flip_cs(dev_priv, pipe);
3937 return true;
3938
3939check_page_flip:
3940 intel_check_page_flip(dev_priv, pipe);
3941 return false;
3942}
3943
ff1f525e 3944static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19 3945{
45a83f84 3946 struct drm_device *dev = arg;
fac5e23e 3947 struct drm_i915_private *dev_priv = to_i915(dev);
c2798b19
CW
3948 u16 iir, new_iir;
3949 u32 pipe_stats[2];
c2798b19
CW
3950 int pipe;
3951 u16 flip_mask =
3952 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3953 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
1f814dac 3954 irqreturn_t ret;
c2798b19 3955
2dd2a883
ID
3956 if (!intel_irqs_enabled(dev_priv))
3957 return IRQ_NONE;
3958
1f814dac
ID
3959 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3960 disable_rpm_wakeref_asserts(dev_priv);
3961
3962 ret = IRQ_NONE;
c2798b19
CW
3963 iir = I915_READ16(IIR);
3964 if (iir == 0)
1f814dac 3965 goto out;
c2798b19
CW
3966
3967 while (iir & ~flip_mask) {
3968 /* Can't rely on pipestat interrupt bit in iir as it might
3969 * have been cleared after the pipestat interrupt was received.
3970 * It doesn't set the bit in iir again, but it still produces
3971 * interrupts (for non-MSI).
3972 */
222c7f51 3973 spin_lock(&dev_priv->irq_lock);
c2798b19 3974 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 3975 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
c2798b19 3976
055e393f 3977 for_each_pipe(dev_priv, pipe) {
f0f59a00 3978 i915_reg_t reg = PIPESTAT(pipe);
c2798b19
CW
3979 pipe_stats[pipe] = I915_READ(reg);
3980
3981 /*
3982 * Clear the PIPE*STAT regs before the IIR
3983 */
2d9d2b0b 3984 if (pipe_stats[pipe] & 0x8000ffff)
c2798b19 3985 I915_WRITE(reg, pipe_stats[pipe]);
c2798b19 3986 }
222c7f51 3987 spin_unlock(&dev_priv->irq_lock);
c2798b19
CW
3988
3989 I915_WRITE16(IIR, iir & ~flip_mask);
3990 new_iir = I915_READ16(IIR); /* Flush posted writes */
3991
c2798b19 3992 if (iir & I915_USER_INTERRUPT)
4a570db5 3993 notify_ring(&dev_priv->engine[RCS]);
c2798b19 3994
055e393f 3995 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
3996 int plane = pipe;
3997 if (HAS_FBC(dev_priv))
3998 plane = !plane;
3999
4000 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4001 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4002 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
c2798b19 4003
4356d586 4004 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 4005 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2d9d2b0b 4006
1f7247c0
DV
4007 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4008 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4009 pipe);
4356d586 4010 }
c2798b19
CW
4011
4012 iir = new_iir;
4013 }
1f814dac
ID
4014 ret = IRQ_HANDLED;
4015
4016out:
4017 enable_rpm_wakeref_asserts(dev_priv);
c2798b19 4018
1f814dac 4019 return ret;
c2798b19
CW
4020}
4021
4022static void i8xx_irq_uninstall(struct drm_device * dev)
4023{
fac5e23e 4024 struct drm_i915_private *dev_priv = to_i915(dev);
c2798b19
CW
4025 int pipe;
4026
055e393f 4027 for_each_pipe(dev_priv, pipe) {
c2798b19
CW
4028 /* Clear enable bits; then clear status bits */
4029 I915_WRITE(PIPESTAT(pipe), 0);
4030 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4031 }
4032 I915_WRITE16(IMR, 0xffff);
4033 I915_WRITE16(IER, 0x0);
4034 I915_WRITE16(IIR, I915_READ16(IIR));
4035}
4036
a266c7d5
CW
4037static void i915_irq_preinstall(struct drm_device * dev)
4038{
fac5e23e 4039 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4040 int pipe;
4041
a266c7d5 4042 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4043 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4044 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4045 }
4046
00d98ebd 4047 I915_WRITE16(HWSTAM, 0xeffe);
055e393f 4048 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4049 I915_WRITE(PIPESTAT(pipe), 0);
4050 I915_WRITE(IMR, 0xffffffff);
4051 I915_WRITE(IER, 0x0);
4052 POSTING_READ(IER);
4053}
4054
4055static int i915_irq_postinstall(struct drm_device *dev)
4056{
fac5e23e 4057 struct drm_i915_private *dev_priv = to_i915(dev);
38bde180 4058 u32 enable_mask;
a266c7d5 4059
38bde180
CW
4060 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4061
4062 /* Unmask the interrupts that we always want on. */
4063 dev_priv->irq_mask =
4064 ~(I915_ASLE_INTERRUPT |
4065 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4066 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4067 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
37ef01ab 4068 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
38bde180
CW
4069
4070 enable_mask =
4071 I915_ASLE_INTERRUPT |
4072 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4073 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
38bde180
CW
4074 I915_USER_INTERRUPT;
4075
a266c7d5 4076 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4077 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4078 POSTING_READ(PORT_HOTPLUG_EN);
4079
a266c7d5
CW
4080 /* Enable in IER... */
4081 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4082 /* and unmask in IMR */
4083 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4084 }
4085
a266c7d5
CW
4086 I915_WRITE(IMR, dev_priv->irq_mask);
4087 I915_WRITE(IER, enable_mask);
4088 POSTING_READ(IER);
4089
91d14251 4090 i915_enable_asle_pipestat(dev_priv);
20afbda2 4091
379ef82d
DV
4092 /* Interrupt setup is already guaranteed to be single-threaded, this is
4093 * just to make the assert_spin_locked check happy. */
d6207435 4094 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4095 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4096 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4097 spin_unlock_irq(&dev_priv->irq_lock);
379ef82d 4098
20afbda2
DV
4099 return 0;
4100}
4101
5a21b665
DV
4102/*
4103 * Returns true when a page flip has completed.
4104 */
4105static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4106 int plane, int pipe, u32 iir)
4107{
4108 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4109
4110 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4111 return false;
4112
4113 if ((iir & flip_pending) == 0)
4114 goto check_page_flip;
4115
4116 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4117 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4118 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4119 * the flip is completed (no longer pending). Since this doesn't raise
4120 * an interrupt per se, we watch for the change at vblank.
4121 */
4122 if (I915_READ(ISR) & flip_pending)
4123 goto check_page_flip;
4124
4125 intel_finish_page_flip_cs(dev_priv, pipe);
4126 return true;
4127
4128check_page_flip:
4129 intel_check_page_flip(dev_priv, pipe);
4130 return false;
4131}
4132
ff1f525e 4133static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5 4134{
45a83f84 4135 struct drm_device *dev = arg;
fac5e23e 4136 struct drm_i915_private *dev_priv = to_i915(dev);
8291ee90 4137 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
38bde180
CW
4138 u32 flip_mask =
4139 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4140 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 4141 int pipe, ret = IRQ_NONE;
a266c7d5 4142
2dd2a883
ID
4143 if (!intel_irqs_enabled(dev_priv))
4144 return IRQ_NONE;
4145
1f814dac
ID
4146 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4147 disable_rpm_wakeref_asserts(dev_priv);
4148
a266c7d5 4149 iir = I915_READ(IIR);
38bde180
CW
4150 do {
4151 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 4152 bool blc_event = false;
a266c7d5
CW
4153
4154 /* Can't rely on pipestat interrupt bit in iir as it might
4155 * have been cleared after the pipestat interrupt was received.
4156 * It doesn't set the bit in iir again, but it still produces
4157 * interrupts (for non-MSI).
4158 */
222c7f51 4159 spin_lock(&dev_priv->irq_lock);
a266c7d5 4160 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4161 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4162
055e393f 4163 for_each_pipe(dev_priv, pipe) {
f0f59a00 4164 i915_reg_t reg = PIPESTAT(pipe);
a266c7d5
CW
4165 pipe_stats[pipe] = I915_READ(reg);
4166
38bde180 4167 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5 4168 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4169 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 4170 irq_received = true;
a266c7d5
CW
4171 }
4172 }
222c7f51 4173 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4174
4175 if (!irq_received)
4176 break;
4177
a266c7d5 4178 /* Consume port. Then clear IIR or we'll miss events */
91d14251 4179 if (I915_HAS_HOTPLUG(dev_priv) &&
1ae3c34c
VS
4180 iir & I915_DISPLAY_PORT_INTERRUPT) {
4181 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4182 if (hotplug_status)
91d14251 4183 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1ae3c34c 4184 }
a266c7d5 4185
38bde180 4186 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4187 new_iir = I915_READ(IIR); /* Flush posted writes */
4188
a266c7d5 4189 if (iir & I915_USER_INTERRUPT)
4a570db5 4190 notify_ring(&dev_priv->engine[RCS]);
a266c7d5 4191
055e393f 4192 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
4193 int plane = pipe;
4194 if (HAS_FBC(dev_priv))
4195 plane = !plane;
4196
4197 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4198 i915_handle_vblank(dev_priv, plane, pipe, iir))
4199 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
4200
4201 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4202 blc_event = true;
4356d586
DV
4203
4204 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 4205 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2d9d2b0b 4206
1f7247c0
DV
4207 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4208 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4209 pipe);
a266c7d5
CW
4210 }
4211
a266c7d5 4212 if (blc_event || (iir & I915_ASLE_INTERRUPT))
91d14251 4213 intel_opregion_asle_intr(dev_priv);
a266c7d5
CW
4214
4215 /* With MSI, interrupts are only generated when iir
4216 * transitions from zero to nonzero. If another bit got
4217 * set while we were handling the existing iir bits, then
4218 * we would never get another interrupt.
4219 *
4220 * This is fine on non-MSI as well, as if we hit this path
4221 * we avoid exiting the interrupt handler only to generate
4222 * another one.
4223 *
4224 * Note that for MSI this could cause a stray interrupt report
4225 * if an interrupt landed in the time between writing IIR and
4226 * the posting read. This should be rare enough to never
4227 * trigger the 99% of 100,000 interrupts test for disabling
4228 * stray interrupts.
4229 */
38bde180 4230 ret = IRQ_HANDLED;
a266c7d5 4231 iir = new_iir;
38bde180 4232 } while (iir & ~flip_mask);
a266c7d5 4233
1f814dac
ID
4234 enable_rpm_wakeref_asserts(dev_priv);
4235
a266c7d5
CW
4236 return ret;
4237}
4238
4239static void i915_irq_uninstall(struct drm_device * dev)
4240{
fac5e23e 4241 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4242 int pipe;
4243
a266c7d5 4244 if (I915_HAS_HOTPLUG(dev)) {
0706f17c 4245 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
a266c7d5
CW
4246 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4247 }
4248
00d98ebd 4249 I915_WRITE16(HWSTAM, 0xffff);
055e393f 4250 for_each_pipe(dev_priv, pipe) {
55b39755 4251 /* Clear enable bits; then clear status bits */
a266c7d5 4252 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
4253 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4254 }
a266c7d5
CW
4255 I915_WRITE(IMR, 0xffffffff);
4256 I915_WRITE(IER, 0x0);
4257
a266c7d5
CW
4258 I915_WRITE(IIR, I915_READ(IIR));
4259}
4260
4261static void i965_irq_preinstall(struct drm_device * dev)
4262{
fac5e23e 4263 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4264 int pipe;
4265
0706f17c 4266 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4267 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4268
4269 I915_WRITE(HWSTAM, 0xeffe);
055e393f 4270 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4271 I915_WRITE(PIPESTAT(pipe), 0);
4272 I915_WRITE(IMR, 0xffffffff);
4273 I915_WRITE(IER, 0x0);
4274 POSTING_READ(IER);
4275}
4276
4277static int i965_irq_postinstall(struct drm_device *dev)
4278{
fac5e23e 4279 struct drm_i915_private *dev_priv = to_i915(dev);
bbba0a97 4280 u32 enable_mask;
a266c7d5
CW
4281 u32 error_mask;
4282
a266c7d5 4283 /* Unmask the interrupts that we always want on. */
bbba0a97 4284 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 4285 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
4286 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4287 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4288 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4289 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4290 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4291
4292 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
4293 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4294 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
4295 enable_mask |= I915_USER_INTERRUPT;
4296
91d14251 4297 if (IS_G4X(dev_priv))
bbba0a97 4298 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 4299
b79480ba
DV
4300 /* Interrupt setup is already guaranteed to be single-threaded, this is
4301 * just to make the assert_spin_locked check happy. */
d6207435 4302 spin_lock_irq(&dev_priv->irq_lock);
755e9019
ID
4303 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4304 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4305 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
d6207435 4306 spin_unlock_irq(&dev_priv->irq_lock);
a266c7d5 4307
a266c7d5
CW
4308 /*
4309 * Enable some error detection, note the instruction error mask
4310 * bit is reserved, so we leave it masked.
4311 */
91d14251 4312 if (IS_G4X(dev_priv)) {
a266c7d5
CW
4313 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4314 GM45_ERROR_MEM_PRIV |
4315 GM45_ERROR_CP_PRIV |
4316 I915_ERROR_MEMORY_REFRESH);
4317 } else {
4318 error_mask = ~(I915_ERROR_PAGE_TABLE |
4319 I915_ERROR_MEMORY_REFRESH);
4320 }
4321 I915_WRITE(EMR, error_mask);
4322
4323 I915_WRITE(IMR, dev_priv->irq_mask);
4324 I915_WRITE(IER, enable_mask);
4325 POSTING_READ(IER);
4326
0706f17c 4327 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
20afbda2
DV
4328 POSTING_READ(PORT_HOTPLUG_EN);
4329
91d14251 4330 i915_enable_asle_pipestat(dev_priv);
20afbda2
DV
4331
4332 return 0;
4333}
4334
91d14251 4335static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
20afbda2 4336{
20afbda2
DV
4337 u32 hotplug_en;
4338
b5ea2d56
DV
4339 assert_spin_locked(&dev_priv->irq_lock);
4340
778eb334
VS
4341 /* Note HDMI and DP share hotplug bits */
4342 /* enable bits are the same for all generations */
91d14251 4343 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
778eb334
VS
4344 /* Programming the CRT detection parameters tends
4345 to generate a spurious hotplug event about three
4346 seconds later. So just do it once.
4347 */
91d14251 4348 if (IS_G4X(dev_priv))
778eb334 4349 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
778eb334
VS
4350 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4351
4352 /* Ignore TV since it's buggy */
0706f17c 4353 i915_hotplug_interrupt_update_locked(dev_priv,
f9e3dc78
JN
4354 HOTPLUG_INT_EN_MASK |
4355 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4356 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4357 hotplug_en);
a266c7d5
CW
4358}
4359
ff1f525e 4360static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5 4361{
45a83f84 4362 struct drm_device *dev = arg;
fac5e23e 4363 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4364 u32 iir, new_iir;
4365 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5 4366 int ret = IRQ_NONE, pipe;
21ad8330
VS
4367 u32 flip_mask =
4368 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4369 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5 4370
2dd2a883
ID
4371 if (!intel_irqs_enabled(dev_priv))
4372 return IRQ_NONE;
4373
1f814dac
ID
4374 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4375 disable_rpm_wakeref_asserts(dev_priv);
4376
a266c7d5
CW
4377 iir = I915_READ(IIR);
4378
a266c7d5 4379 for (;;) {
501e01d7 4380 bool irq_received = (iir & ~flip_mask) != 0;
2c8ba29f
CW
4381 bool blc_event = false;
4382
a266c7d5
CW
4383 /* Can't rely on pipestat interrupt bit in iir as it might
4384 * have been cleared after the pipestat interrupt was received.
4385 * It doesn't set the bit in iir again, but it still produces
4386 * interrupts (for non-MSI).
4387 */
222c7f51 4388 spin_lock(&dev_priv->irq_lock);
a266c7d5 4389 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
aaecdf61 4390 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
a266c7d5 4391
055e393f 4392 for_each_pipe(dev_priv, pipe) {
f0f59a00 4393 i915_reg_t reg = PIPESTAT(pipe);
a266c7d5
CW
4394 pipe_stats[pipe] = I915_READ(reg);
4395
4396 /*
4397 * Clear the PIPE*STAT regs before the IIR
4398 */
4399 if (pipe_stats[pipe] & 0x8000ffff) {
a266c7d5 4400 I915_WRITE(reg, pipe_stats[pipe]);
501e01d7 4401 irq_received = true;
a266c7d5
CW
4402 }
4403 }
222c7f51 4404 spin_unlock(&dev_priv->irq_lock);
a266c7d5
CW
4405
4406 if (!irq_received)
4407 break;
4408
4409 ret = IRQ_HANDLED;
4410
4411 /* Consume port. Then clear IIR or we'll miss events */
1ae3c34c
VS
4412 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4413 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4414 if (hotplug_status)
91d14251 4415 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1ae3c34c 4416 }
a266c7d5 4417
21ad8330 4418 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
4419 new_iir = I915_READ(IIR); /* Flush posted writes */
4420
a266c7d5 4421 if (iir & I915_USER_INTERRUPT)
4a570db5 4422 notify_ring(&dev_priv->engine[RCS]);
a266c7d5 4423 if (iir & I915_BSD_USER_INTERRUPT)
4a570db5 4424 notify_ring(&dev_priv->engine[VCS]);
a266c7d5 4425
055e393f 4426 for_each_pipe(dev_priv, pipe) {
5a21b665
DV
4427 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4428 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4429 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
4430
4431 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4432 blc_event = true;
4356d586
DV
4433
4434 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
91d14251 4435 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
a266c7d5 4436
1f7247c0
DV
4437 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4438 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2d9d2b0b 4439 }
a266c7d5
CW
4440
4441 if (blc_event || (iir & I915_ASLE_INTERRUPT))
91d14251 4442 intel_opregion_asle_intr(dev_priv);
a266c7d5 4443
515ac2bb 4444 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
91d14251 4445 gmbus_irq_handler(dev_priv);
515ac2bb 4446
a266c7d5
CW
4447 /* With MSI, interrupts are only generated when iir
4448 * transitions from zero to nonzero. If another bit got
4449 * set while we were handling the existing iir bits, then
4450 * we would never get another interrupt.
4451 *
4452 * This is fine on non-MSI as well, as if we hit this path
4453 * we avoid exiting the interrupt handler only to generate
4454 * another one.
4455 *
4456 * Note that for MSI this could cause a stray interrupt report
4457 * if an interrupt landed in the time between writing IIR and
4458 * the posting read. This should be rare enough to never
4459 * trigger the 99% of 100,000 interrupts test for disabling
4460 * stray interrupts.
4461 */
4462 iir = new_iir;
4463 }
4464
1f814dac
ID
4465 enable_rpm_wakeref_asserts(dev_priv);
4466
a266c7d5
CW
4467 return ret;
4468}
4469
4470static void i965_irq_uninstall(struct drm_device * dev)
4471{
fac5e23e 4472 struct drm_i915_private *dev_priv = to_i915(dev);
a266c7d5
CW
4473 int pipe;
4474
4475 if (!dev_priv)
4476 return;
4477
0706f17c 4478 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
adca4730 4479 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
4480
4481 I915_WRITE(HWSTAM, 0xffffffff);
055e393f 4482 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4483 I915_WRITE(PIPESTAT(pipe), 0);
4484 I915_WRITE(IMR, 0xffffffff);
4485 I915_WRITE(IER, 0x0);
4486
055e393f 4487 for_each_pipe(dev_priv, pipe)
a266c7d5
CW
4488 I915_WRITE(PIPESTAT(pipe),
4489 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4490 I915_WRITE(IIR, I915_READ(IIR));
4491}
4492
fca52a55
DV
4493/**
4494 * intel_irq_init - initializes irq support
4495 * @dev_priv: i915 device instance
4496 *
4497 * This function initializes all the irq support including work items, timers
4498 * and all the vtables. It does not setup the interrupt itself though.
4499 */
b963291c 4500void intel_irq_init(struct drm_i915_private *dev_priv)
f71d4af4 4501{
b963291c 4502 struct drm_device *dev = dev_priv->dev;
8b2e326d 4503
77913b39
JN
4504 intel_hpd_init_work(dev_priv);
4505
c6a828d3 4506 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 4507 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 4508
a6706b45 4509 /* Let's track the enabled rps events */
666a4537 4510 if (IS_VALLEYVIEW(dev_priv))
6c65a587 4511 /* WaGsvRC0ResidencyMethod:vlv */
6f4b12f8 4512 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
31685c25
D
4513 else
4514 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
a6706b45 4515
1800ad25
SAK
4516 dev_priv->rps.pm_intr_keep = 0;
4517
4518 /*
4519 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4520 * if GEN6_PM_UP_EI_EXPIRED is masked.
4521 *
4522 * TODO: verify if this can be reproduced on VLV,CHV.
4523 */
4524 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4525 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4526
4527 if (INTEL_INFO(dev_priv)->gen >= 8)
4528 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4529
737b1506
CW
4530 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4531 i915_hangcheck_elapsed);
61bac78e 4532
b963291c 4533 if (IS_GEN2(dev_priv)) {
4cdb83ec
VS
4534 dev->max_vblank_count = 0;
4535 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
b963291c 4536 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
f71d4af4 4537 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
fd8f507c 4538 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
391f75e2
VS
4539 } else {
4540 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4541 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
f71d4af4
JB
4542 }
4543
21da2700
VS
4544 /*
4545 * Opt out of the vblank disable timer on everything except gen2.
4546 * Gen2 doesn't have a hardware frame counter and so depends on
4547 * vblank interrupts to produce sane vblank seuquence numbers.
4548 */
b963291c 4549 if (!IS_GEN2(dev_priv))
21da2700
VS
4550 dev->vblank_disable_immediate = true;
4551
f3a5c3f6
DV
4552 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4553 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
f71d4af4 4554
b963291c 4555 if (IS_CHERRYVIEW(dev_priv)) {
43f328d7
VS
4556 dev->driver->irq_handler = cherryview_irq_handler;
4557 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4558 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4559 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4560 dev->driver->enable_vblank = valleyview_enable_vblank;
4561 dev->driver->disable_vblank = valleyview_disable_vblank;
4562 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4563 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
4564 dev->driver->irq_handler = valleyview_irq_handler;
4565 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4566 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4567 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4568 dev->driver->enable_vblank = valleyview_enable_vblank;
4569 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 4570 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
b963291c 4571 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
abd58f01 4572 dev->driver->irq_handler = gen8_irq_handler;
723761b8 4573 dev->driver->irq_preinstall = gen8_irq_reset;
abd58f01
BW
4574 dev->driver->irq_postinstall = gen8_irq_postinstall;
4575 dev->driver->irq_uninstall = gen8_irq_uninstall;
4576 dev->driver->enable_vblank = gen8_enable_vblank;
4577 dev->driver->disable_vblank = gen8_disable_vblank;
6dbf30ce 4578 if (IS_BROXTON(dev))
e0a20ad7 4579 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
6dbf30ce
VS
4580 else if (HAS_PCH_SPT(dev))
4581 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4582 else
3a3b3c7d 4583 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4
JB
4584 } else if (HAS_PCH_SPLIT(dev)) {
4585 dev->driver->irq_handler = ironlake_irq_handler;
723761b8 4586 dev->driver->irq_preinstall = ironlake_irq_reset;
f71d4af4
JB
4587 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4588 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4589 dev->driver->enable_vblank = ironlake_enable_vblank;
4590 dev->driver->disable_vblank = ironlake_disable_vblank;
23bb4cb5 4591 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
f71d4af4 4592 } else {
7e22dbbb 4593 if (IS_GEN2(dev_priv)) {
c2798b19
CW
4594 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4595 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4596 dev->driver->irq_handler = i8xx_irq_handler;
4597 dev->driver->irq_uninstall = i8xx_irq_uninstall;
7e22dbbb 4598 } else if (IS_GEN3(dev_priv)) {
a266c7d5
CW
4599 dev->driver->irq_preinstall = i915_irq_preinstall;
4600 dev->driver->irq_postinstall = i915_irq_postinstall;
4601 dev->driver->irq_uninstall = i915_irq_uninstall;
4602 dev->driver->irq_handler = i915_irq_handler;
c2798b19 4603 } else {
a266c7d5
CW
4604 dev->driver->irq_preinstall = i965_irq_preinstall;
4605 dev->driver->irq_postinstall = i965_irq_postinstall;
4606 dev->driver->irq_uninstall = i965_irq_uninstall;
4607 dev->driver->irq_handler = i965_irq_handler;
c2798b19 4608 }
778eb334
VS
4609 if (I915_HAS_HOTPLUG(dev_priv))
4610 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
f71d4af4
JB
4611 dev->driver->enable_vblank = i915_enable_vblank;
4612 dev->driver->disable_vblank = i915_disable_vblank;
4613 }
4614}
20afbda2 4615
fca52a55
DV
4616/**
4617 * intel_irq_install - enables the hardware interrupt
4618 * @dev_priv: i915 device instance
4619 *
4620 * This function enables the hardware interrupt handling, but leaves the hotplug
4621 * handling still disabled. It is called after intel_irq_init().
4622 *
4623 * In the driver load and resume code we need working interrupts in a few places
4624 * but don't want to deal with the hassle of concurrent probe and hotplug
4625 * workers. Hence the split into this two-stage approach.
4626 */
2aeb7d3a
DV
4627int intel_irq_install(struct drm_i915_private *dev_priv)
4628{
4629 /*
4630 * We enable some interrupt sources in our postinstall hooks, so mark
4631 * interrupts as enabled _before_ actually enabling them to avoid
4632 * special cases in our ordering checks.
4633 */
4634 dev_priv->pm.irqs_enabled = true;
4635
4636 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4637}
4638
fca52a55
DV
4639/**
4640 * intel_irq_uninstall - finilizes all irq handling
4641 * @dev_priv: i915 device instance
4642 *
4643 * This stops interrupt and hotplug handling and unregisters and frees all
4644 * resources acquired in the init functions.
4645 */
2aeb7d3a
DV
4646void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4647{
4648 drm_irq_uninstall(dev_priv->dev);
4649 intel_hpd_cancel_work(dev_priv);
4650 dev_priv->pm.irqs_enabled = false;
4651}
4652
fca52a55
DV
4653/**
4654 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4655 * @dev_priv: i915 device instance
4656 *
4657 * This function is used to disable interrupts at runtime, both in the runtime
4658 * pm and the system suspend/resume code.
4659 */
b963291c 4660void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4661{
b963291c 4662 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
2aeb7d3a 4663 dev_priv->pm.irqs_enabled = false;
2dd2a883 4664 synchronize_irq(dev_priv->dev->irq);
c67a470b
PZ
4665}
4666
fca52a55
DV
4667/**
4668 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4669 * @dev_priv: i915 device instance
4670 *
4671 * This function is used to enable interrupts at runtime, both in the runtime
4672 * pm and the system suspend/resume code.
4673 */
b963291c 4674void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
c67a470b 4675{
2aeb7d3a 4676 dev_priv->pm.irqs_enabled = true;
b963291c
DV
4677 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4678 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
c67a470b 4679}