1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
40 static const u32 hpd_ibx
[] = {
41 [HPD_CRT
] = SDE_CRT_HOTPLUG
,
42 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG
,
43 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG
,
44 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG
,
45 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG
48 static const u32 hpd_cpt
[] = {
49 [HPD_CRT
] = SDE_CRT_HOTPLUG_CPT
,
50 [HPD_SDVO_B
] = SDE_SDVOB_HOTPLUG_CPT
,
51 [HPD_PORT_B
] = SDE_PORTB_HOTPLUG_CPT
,
52 [HPD_PORT_C
] = SDE_PORTC_HOTPLUG_CPT
,
53 [HPD_PORT_D
] = SDE_PORTD_HOTPLUG_CPT
56 static const u32 hpd_mask_i915
[] = {
57 [HPD_CRT
] = CRT_HOTPLUG_INT_EN
,
58 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_EN
,
59 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_EN
,
60 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_EN
,
61 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_EN
,
62 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_EN
65 static const u32 hpd_status_g4x
[] = {
66 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
67 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_G4X
,
68 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_G4X
,
69 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
70 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
71 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
74 static const u32 hpd_status_i915
[] = { /* i915 and valleyview are the same */
75 [HPD_CRT
] = CRT_HOTPLUG_INT_STATUS
,
76 [HPD_SDVO_B
] = SDVOB_HOTPLUG_INT_STATUS_I915
,
77 [HPD_SDVO_C
] = SDVOC_HOTPLUG_INT_STATUS_I915
,
78 [HPD_PORT_B
] = PORTB_HOTPLUG_INT_STATUS
,
79 [HPD_PORT_C
] = PORTC_HOTPLUG_INT_STATUS
,
80 [HPD_PORT_D
] = PORTD_HOTPLUG_INT_STATUS
83 /* For display hotplug interrupt */
85 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
87 assert_spin_locked(&dev_priv
->irq_lock
);
89 if (dev_priv
->pc8
.irqs_disabled
) {
90 WARN(1, "IRQs disabled\n");
91 dev_priv
->pc8
.regsave
.deimr
&= ~mask
;
95 if ((dev_priv
->irq_mask
& mask
) != 0) {
96 dev_priv
->irq_mask
&= ~mask
;
97 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
103 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
105 assert_spin_locked(&dev_priv
->irq_lock
);
107 if (dev_priv
->pc8
.irqs_disabled
) {
108 WARN(1, "IRQs disabled\n");
109 dev_priv
->pc8
.regsave
.deimr
|= mask
;
113 if ((dev_priv
->irq_mask
& mask
) != mask
) {
114 dev_priv
->irq_mask
|= mask
;
115 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
121 * ilk_update_gt_irq - update GTIMR
122 * @dev_priv: driver private
123 * @interrupt_mask: mask of interrupt bits to update
124 * @enabled_irq_mask: mask of interrupt bits to enable
126 static void ilk_update_gt_irq(struct drm_i915_private
*dev_priv
,
127 uint32_t interrupt_mask
,
128 uint32_t enabled_irq_mask
)
130 assert_spin_locked(&dev_priv
->irq_lock
);
132 if (dev_priv
->pc8
.irqs_disabled
) {
133 WARN(1, "IRQs disabled\n");
134 dev_priv
->pc8
.regsave
.gtimr
&= ~interrupt_mask
;
135 dev_priv
->pc8
.regsave
.gtimr
|= (~enabled_irq_mask
&
140 dev_priv
->gt_irq_mask
&= ~interrupt_mask
;
141 dev_priv
->gt_irq_mask
|= (~enabled_irq_mask
& interrupt_mask
);
142 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
146 void ilk_enable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
148 ilk_update_gt_irq(dev_priv
, mask
, mask
);
151 void ilk_disable_gt_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
153 ilk_update_gt_irq(dev_priv
, mask
, 0);
157 * snb_update_pm_irq - update GEN6_PMIMR
158 * @dev_priv: driver private
159 * @interrupt_mask: mask of interrupt bits to update
160 * @enabled_irq_mask: mask of interrupt bits to enable
162 static void snb_update_pm_irq(struct drm_i915_private
*dev_priv
,
163 uint32_t interrupt_mask
,
164 uint32_t enabled_irq_mask
)
168 assert_spin_locked(&dev_priv
->irq_lock
);
170 if (dev_priv
->pc8
.irqs_disabled
) {
171 WARN(1, "IRQs disabled\n");
172 dev_priv
->pc8
.regsave
.gen6_pmimr
&= ~interrupt_mask
;
173 dev_priv
->pc8
.regsave
.gen6_pmimr
|= (~enabled_irq_mask
&
178 new_val
= dev_priv
->pm_irq_mask
;
179 new_val
&= ~interrupt_mask
;
180 new_val
|= (~enabled_irq_mask
& interrupt_mask
);
182 if (new_val
!= dev_priv
->pm_irq_mask
) {
183 dev_priv
->pm_irq_mask
= new_val
;
184 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
185 POSTING_READ(GEN6_PMIMR
);
189 void snb_enable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
191 snb_update_pm_irq(dev_priv
, mask
, mask
);
194 void snb_disable_pm_irq(struct drm_i915_private
*dev_priv
, uint32_t mask
)
196 snb_update_pm_irq(dev_priv
, mask
, 0);
199 static bool ivb_can_enable_err_int(struct drm_device
*dev
)
201 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
202 struct intel_crtc
*crtc
;
205 assert_spin_locked(&dev_priv
->irq_lock
);
207 for_each_pipe(pipe
) {
208 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
210 if (crtc
->cpu_fifo_underrun_disabled
)
217 static bool cpt_can_enable_serr_int(struct drm_device
*dev
)
219 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
221 struct intel_crtc
*crtc
;
223 assert_spin_locked(&dev_priv
->irq_lock
);
225 for_each_pipe(pipe
) {
226 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
228 if (crtc
->pch_fifo_underrun_disabled
)
235 static void ironlake_set_fifo_underrun_reporting(struct drm_device
*dev
,
236 enum pipe pipe
, bool enable
)
238 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
239 uint32_t bit
= (pipe
== PIPE_A
) ? DE_PIPEA_FIFO_UNDERRUN
:
240 DE_PIPEB_FIFO_UNDERRUN
;
243 ironlake_enable_display_irq(dev_priv
, bit
);
245 ironlake_disable_display_irq(dev_priv
, bit
);
248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device
*dev
,
249 enum pipe pipe
, bool enable
)
251 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
253 I915_WRITE(GEN7_ERR_INT
, ERR_INT_FIFO_UNDERRUN(pipe
));
255 if (!ivb_can_enable_err_int(dev
))
258 ironlake_enable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
260 bool was_enabled
= !(I915_READ(DEIMR
) & DE_ERR_INT_IVB
);
262 /* Change the state _after_ we've read out the current one. */
263 ironlake_disable_display_irq(dev_priv
, DE_ERR_INT_IVB
);
266 (I915_READ(GEN7_ERR_INT
) & ERR_INT_FIFO_UNDERRUN(pipe
))) {
267 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
273 static void broadwell_set_fifo_underrun_reporting(struct drm_device
*dev
,
274 enum pipe pipe
, bool enable
)
276 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
278 assert_spin_locked(&dev_priv
->irq_lock
);
281 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_FIFO_UNDERRUN
;
283 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_FIFO_UNDERRUN
;
284 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
285 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
289 * ibx_display_interrupt_update - update SDEIMR
290 * @dev_priv: driver private
291 * @interrupt_mask: mask of interrupt bits to update
292 * @enabled_irq_mask: mask of interrupt bits to enable
294 static void ibx_display_interrupt_update(struct drm_i915_private
*dev_priv
,
295 uint32_t interrupt_mask
,
296 uint32_t enabled_irq_mask
)
298 uint32_t sdeimr
= I915_READ(SDEIMR
);
299 sdeimr
&= ~interrupt_mask
;
300 sdeimr
|= (~enabled_irq_mask
& interrupt_mask
);
302 assert_spin_locked(&dev_priv
->irq_lock
);
304 if (dev_priv
->pc8
.irqs_disabled
&&
305 (interrupt_mask
& SDE_HOTPLUG_MASK_CPT
)) {
306 WARN(1, "IRQs disabled\n");
307 dev_priv
->pc8
.regsave
.sdeimr
&= ~interrupt_mask
;
308 dev_priv
->pc8
.regsave
.sdeimr
|= (~enabled_irq_mask
&
313 I915_WRITE(SDEIMR
, sdeimr
);
314 POSTING_READ(SDEIMR
);
316 #define ibx_enable_display_interrupt(dev_priv, bits) \
317 ibx_display_interrupt_update((dev_priv), (bits), (bits))
318 #define ibx_disable_display_interrupt(dev_priv, bits) \
319 ibx_display_interrupt_update((dev_priv), (bits), 0)
321 static void ibx_set_fifo_underrun_reporting(struct drm_device
*dev
,
322 enum transcoder pch_transcoder
,
325 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
326 uint32_t bit
= (pch_transcoder
== TRANSCODER_A
) ?
327 SDE_TRANSA_FIFO_UNDER
: SDE_TRANSB_FIFO_UNDER
;
330 ibx_enable_display_interrupt(dev_priv
, bit
);
332 ibx_disable_display_interrupt(dev_priv
, bit
);
335 static void cpt_set_fifo_underrun_reporting(struct drm_device
*dev
,
336 enum transcoder pch_transcoder
,
339 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
343 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
));
345 if (!cpt_can_enable_serr_int(dev
))
348 ibx_enable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
350 uint32_t tmp
= I915_READ(SERR_INT
);
351 bool was_enabled
= !(I915_READ(SDEIMR
) & SDE_ERROR_CPT
);
353 /* Change the state _after_ we've read out the current one. */
354 ibx_disable_display_interrupt(dev_priv
, SDE_ERROR_CPT
);
357 (tmp
& SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder
))) {
358 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
359 transcoder_name(pch_transcoder
));
365 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
368 * @enable: true if we want to report FIFO underrun errors, false otherwise
370 * This function makes us disable or enable CPU fifo underruns for a specific
371 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
372 * reporting for one pipe may also disable all the other CPU error interruts for
373 * the other pipes, due to the fact that there's just one interrupt mask/enable
374 * bit for all the pipes.
376 * Returns the previous state of underrun reporting.
378 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device
*dev
,
379 enum pipe pipe
, bool enable
)
381 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
382 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
383 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
387 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
389 ret
= !intel_crtc
->cpu_fifo_underrun_disabled
;
394 intel_crtc
->cpu_fifo_underrun_disabled
= !enable
;
396 if (IS_GEN5(dev
) || IS_GEN6(dev
))
397 ironlake_set_fifo_underrun_reporting(dev
, pipe
, enable
);
398 else if (IS_GEN7(dev
))
399 ivybridge_set_fifo_underrun_reporting(dev
, pipe
, enable
);
400 else if (IS_GEN8(dev
))
401 broadwell_set_fifo_underrun_reporting(dev
, pipe
, enable
);
404 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
409 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
411 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
412 * @enable: true if we want to report FIFO underrun errors, false otherwise
414 * This function makes us disable or enable PCH fifo underruns for a specific
415 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
416 * underrun reporting for one transcoder may also disable all the other PCH
417 * error interruts for the other transcoders, due to the fact that there's just
418 * one interrupt mask/enable bit for all the transcoders.
420 * Returns the previous state of underrun reporting.
422 bool intel_set_pch_fifo_underrun_reporting(struct drm_device
*dev
,
423 enum transcoder pch_transcoder
,
426 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
427 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pch_transcoder
];
428 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
433 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
434 * has only one pch transcoder A that all pipes can use. To avoid racy
435 * pch transcoder -> pipe lookups from interrupt code simply store the
436 * underrun statistics in crtc A. Since we never expose this anywhere
437 * nor use it outside of the fifo underrun code here using the "wrong"
438 * crtc on LPT won't cause issues.
441 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
443 ret
= !intel_crtc
->pch_fifo_underrun_disabled
;
448 intel_crtc
->pch_fifo_underrun_disabled
= !enable
;
450 if (HAS_PCH_IBX(dev
))
451 ibx_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
453 cpt_set_fifo_underrun_reporting(dev
, pch_transcoder
, enable
);
456 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
462 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, enum pipe pipe
, u32 mask
)
464 u32 reg
= PIPESTAT(pipe
);
465 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
467 assert_spin_locked(&dev_priv
->irq_lock
);
469 if ((pipestat
& mask
) == mask
)
472 /* Enable the interrupt, clear any pending status */
473 pipestat
|= mask
| (mask
>> 16);
474 I915_WRITE(reg
, pipestat
);
479 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, enum pipe pipe
, u32 mask
)
481 u32 reg
= PIPESTAT(pipe
);
482 u32 pipestat
= I915_READ(reg
) & 0x7fff0000;
484 assert_spin_locked(&dev_priv
->irq_lock
);
486 if ((pipestat
& mask
) == 0)
490 I915_WRITE(reg
, pipestat
);
495 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
497 static void i915_enable_asle_pipestat(struct drm_device
*dev
)
499 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
500 unsigned long irqflags
;
502 if (!dev_priv
->opregion
.asle
|| !IS_MOBILE(dev
))
505 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
507 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_LEGACY_BLC_EVENT_ENABLE
);
508 if (INTEL_INFO(dev
)->gen
>= 4)
509 i915_enable_pipestat(dev_priv
, PIPE_A
,
510 PIPE_LEGACY_BLC_EVENT_ENABLE
);
512 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
516 * i915_pipe_enabled - check if a pipe is enabled
518 * @pipe: pipe to check
520 * Reading certain registers when the pipe is disabled can hang the chip.
521 * Use this routine to make sure the PLL is running and the pipe is active
522 * before reading such registers if unsure.
525 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
527 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
529 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
530 /* Locking is horribly broken here, but whatever. */
531 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
532 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
534 return intel_crtc
->active
;
536 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
540 static u32
i8xx_get_vblank_counter(struct drm_device
*dev
, int pipe
)
542 /* Gen2 doesn't have a hardware frame counter */
546 /* Called from drm generic code, passed a 'crtc', which
547 * we use as a pipe index
549 static u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
551 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
552 unsigned long high_frame
;
553 unsigned long low_frame
;
554 u32 high1
, high2
, low
, pixel
, vbl_start
;
556 if (!i915_pipe_enabled(dev
, pipe
)) {
557 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
558 "pipe %c\n", pipe_name(pipe
));
562 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
563 struct intel_crtc
*intel_crtc
=
564 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
565 const struct drm_display_mode
*mode
=
566 &intel_crtc
->config
.adjusted_mode
;
568 vbl_start
= mode
->crtc_vblank_start
* mode
->crtc_htotal
;
570 enum transcoder cpu_transcoder
= (enum transcoder
) pipe
;
573 htotal
= ((I915_READ(HTOTAL(cpu_transcoder
)) >> 16) & 0x1fff) + 1;
574 vbl_start
= (I915_READ(VBLANK(cpu_transcoder
)) & 0x1fff) + 1;
579 high_frame
= PIPEFRAME(pipe
);
580 low_frame
= PIPEFRAMEPIXEL(pipe
);
583 * High & low register fields aren't synchronized, so make sure
584 * we get a low value that's stable across two reads of the high
588 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
589 low
= I915_READ(low_frame
);
590 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
591 } while (high1
!= high2
);
593 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
594 pixel
= low
& PIPE_PIXEL_MASK
;
595 low
>>= PIPE_FRAME_LOW_SHIFT
;
598 * The frame counter increments at beginning of active.
599 * Cook up a vblank counter by also checking the pixel
600 * counter against vblank start.
602 return (((high1
<< 8) | low
) + (pixel
>= vbl_start
)) & 0xffffff;
605 static u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
607 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
608 int reg
= PIPE_FRMCOUNT_GM45(pipe
);
610 if (!i915_pipe_enabled(dev
, pipe
)) {
611 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
612 "pipe %c\n", pipe_name(pipe
));
616 return I915_READ(reg
);
619 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
620 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
622 static bool ilk_pipe_in_vblank_locked(struct drm_device
*dev
, enum pipe pipe
)
624 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
628 if (INTEL_INFO(dev
)->gen
>= 8) {
629 status
= GEN8_PIPE_VBLANK
;
630 reg
= GEN8_DE_PIPE_ISR(pipe
);
631 } else if (INTEL_INFO(dev
)->gen
>= 7) {
632 status
= DE_PIPE_VBLANK_IVB(pipe
);
635 status
= DE_PIPE_VBLANK(pipe
);
639 return __raw_i915_read32(dev_priv
, reg
) & status
;
642 static int i915_get_crtc_scanoutpos(struct drm_device
*dev
, int pipe
,
643 unsigned int flags
, int *vpos
, int *hpos
,
644 ktime_t
*stime
, ktime_t
*etime
)
646 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
647 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
648 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
649 const struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
651 int vbl_start
, vbl_end
, htotal
, vtotal
;
654 unsigned long irqflags
;
656 if (!intel_crtc
->active
) {
657 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
658 "pipe %c\n", pipe_name(pipe
));
662 htotal
= mode
->crtc_htotal
;
663 vtotal
= mode
->crtc_vtotal
;
664 vbl_start
= mode
->crtc_vblank_start
;
665 vbl_end
= mode
->crtc_vblank_end
;
667 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
668 vbl_start
= DIV_ROUND_UP(vbl_start
, 2);
673 ret
|= DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
;
676 * Lock uncore.lock, as we will do multiple timing critical raw
677 * register reads, potentially with preemption disabled, so the
678 * following code must not block on uncore.lock.
680 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
682 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
684 /* Get optional system timestamp before query. */
686 *stime
= ktime_get();
688 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
689 /* No obvious pixelcount register. Only query vertical
690 * scanout position from Display scan line register.
693 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN2
;
695 position
= __raw_i915_read32(dev_priv
, PIPEDSL(pipe
)) & DSL_LINEMASK_GEN3
;
699 * On HSW HDMI outputs there seems to be a 2 line
700 * difference, whereas eDP has the normal 1 line
701 * difference that earlier platforms have. External
702 * DP is unknown. For now just check for the 2 line
703 * difference case on all output types on HSW+.
705 * This might misinterpret the scanline counter being
706 * one line too far along on eDP, but that's less
707 * dangerous than the alternative since that would lead
708 * the vblank timestamp code astray when it sees a
709 * scanline count before vblank_start during a vblank
712 in_vbl
= ilk_pipe_in_vblank_locked(dev
, pipe
);
713 if ((in_vbl
&& (position
== vbl_start
- 2 ||
714 position
== vbl_start
- 1)) ||
715 (!in_vbl
&& (position
== vbl_end
- 2 ||
716 position
== vbl_end
- 1)))
717 position
= (position
+ 2) % vtotal
;
718 } else if (HAS_PCH_SPLIT(dev
)) {
720 * The scanline counter increments at the leading edge
721 * of hsync, ie. it completely misses the active portion
722 * of the line. Fix up the counter at both edges of vblank
723 * to get a more accurate picture whether we're in vblank
726 in_vbl
= ilk_pipe_in_vblank_locked(dev
, pipe
);
727 if ((in_vbl
&& position
== vbl_start
- 1) ||
728 (!in_vbl
&& position
== vbl_end
- 1))
729 position
= (position
+ 1) % vtotal
;
732 * ISR vblank status bits don't work the way we'd want
733 * them to work on non-PCH platforms (for
734 * ilk_pipe_in_vblank_locked()), and there doesn't
735 * appear any other way to determine if we're currently
738 * Instead let's assume that we're already in vblank if
739 * we got called from the vblank interrupt and the
740 * scanline counter value indicates that we're on the
741 * line just prior to vblank start. This should result
742 * in the correct answer, unless the vblank interrupt
743 * delivery really got delayed for almost exactly one
746 if (flags
& DRM_CALLED_FROM_VBLIRQ
&&
747 position
== vbl_start
- 1) {
748 position
= (position
+ 1) % vtotal
;
750 /* Signal this correction as "applied". */
755 /* Have access to pixelcount since start of frame.
756 * We can split this into vertical and horizontal
759 position
= (__raw_i915_read32(dev_priv
, PIPEFRAMEPIXEL(pipe
)) & PIPE_PIXEL_MASK
) >> PIPE_PIXEL_SHIFT
;
761 /* convert to pixel counts */
767 /* Get optional system timestamp after query. */
769 *etime
= ktime_get();
771 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
773 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
775 in_vbl
= position
>= vbl_start
&& position
< vbl_end
;
778 * While in vblank, position will be negative
779 * counting up towards 0 at vbl_end. And outside
780 * vblank, position will be positive counting
783 if (position
>= vbl_start
)
786 position
+= vtotal
- vbl_end
;
788 if (IS_GEN2(dev
) || IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
792 *vpos
= position
/ htotal
;
793 *hpos
= position
- (*vpos
* htotal
);
798 ret
|= DRM_SCANOUTPOS_INVBL
;
803 static int i915_get_vblank_timestamp(struct drm_device
*dev
, int pipe
,
805 struct timeval
*vblank_time
,
808 struct drm_crtc
*crtc
;
810 if (pipe
< 0 || pipe
>= INTEL_INFO(dev
)->num_pipes
) {
811 DRM_ERROR("Invalid crtc %d\n", pipe
);
815 /* Get drm_crtc to timestamp: */
816 crtc
= intel_get_crtc_for_pipe(dev
, pipe
);
818 DRM_ERROR("Invalid crtc %d\n", pipe
);
822 if (!crtc
->enabled
) {
823 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe
);
827 /* Helper routine in DRM core does all the work: */
828 return drm_calc_vbltimestamp_from_scanoutpos(dev
, pipe
, max_error
,
831 &to_intel_crtc(crtc
)->config
.adjusted_mode
);
834 static bool intel_hpd_irq_event(struct drm_device
*dev
,
835 struct drm_connector
*connector
)
837 enum drm_connector_status old_status
;
839 WARN_ON(!mutex_is_locked(&dev
->mode_config
.mutex
));
840 old_status
= connector
->status
;
842 connector
->status
= connector
->funcs
->detect(connector
, false);
843 if (old_status
== connector
->status
)
846 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
848 drm_get_connector_name(connector
),
849 drm_get_connector_status_name(old_status
),
850 drm_get_connector_status_name(connector
->status
));
856 * Handle hotplug events outside the interrupt handler proper.
858 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
860 static void i915_hotplug_work_func(struct work_struct
*work
)
862 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
864 struct drm_device
*dev
= dev_priv
->dev
;
865 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
866 struct intel_connector
*intel_connector
;
867 struct intel_encoder
*intel_encoder
;
868 struct drm_connector
*connector
;
869 unsigned long irqflags
;
870 bool hpd_disabled
= false;
871 bool changed
= false;
874 /* HPD irq before everything is fully set up. */
875 if (!dev_priv
->enable_hotplug_processing
)
878 mutex_lock(&mode_config
->mutex
);
879 DRM_DEBUG_KMS("running encoder hotplug functions\n");
881 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
883 hpd_event_bits
= dev_priv
->hpd_event_bits
;
884 dev_priv
->hpd_event_bits
= 0;
885 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
886 intel_connector
= to_intel_connector(connector
);
887 intel_encoder
= intel_connector
->encoder
;
888 if (intel_encoder
->hpd_pin
> HPD_NONE
&&
889 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_MARK_DISABLED
&&
890 connector
->polled
== DRM_CONNECTOR_POLL_HPD
) {
891 DRM_INFO("HPD interrupt storm detected on connector %s: "
892 "switching from hotplug detection to polling\n",
893 drm_get_connector_name(connector
));
894 dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
= HPD_DISABLED
;
895 connector
->polled
= DRM_CONNECTOR_POLL_CONNECT
896 | DRM_CONNECTOR_POLL_DISCONNECT
;
899 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
900 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
901 drm_get_connector_name(connector
), intel_encoder
->hpd_pin
);
904 /* if there were no outputs to poll, poll was disabled,
905 * therefore make sure it's enabled when disabling HPD on
908 drm_kms_helper_poll_enable(dev
);
909 mod_timer(&dev_priv
->hotplug_reenable_timer
,
910 jiffies
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY
));
913 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
915 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
916 intel_connector
= to_intel_connector(connector
);
917 intel_encoder
= intel_connector
->encoder
;
918 if (hpd_event_bits
& (1 << intel_encoder
->hpd_pin
)) {
919 if (intel_encoder
->hot_plug
)
920 intel_encoder
->hot_plug(intel_encoder
);
921 if (intel_hpd_irq_event(dev
, connector
))
925 mutex_unlock(&mode_config
->mutex
);
928 drm_kms_helper_hotplug_event(dev
);
931 static void ironlake_rps_change_irq_handler(struct drm_device
*dev
)
933 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
934 u32 busy_up
, busy_down
, max_avg
, min_avg
;
937 spin_lock(&mchdev_lock
);
939 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
941 new_delay
= dev_priv
->ips
.cur_delay
;
943 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
944 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
945 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
946 max_avg
= I915_READ(RCBMAXAVG
);
947 min_avg
= I915_READ(RCBMINAVG
);
949 /* Handle RCS change request from hw */
950 if (busy_up
> max_avg
) {
951 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.max_delay
)
952 new_delay
= dev_priv
->ips
.cur_delay
- 1;
953 if (new_delay
< dev_priv
->ips
.max_delay
)
954 new_delay
= dev_priv
->ips
.max_delay
;
955 } else if (busy_down
< min_avg
) {
956 if (dev_priv
->ips
.cur_delay
!= dev_priv
->ips
.min_delay
)
957 new_delay
= dev_priv
->ips
.cur_delay
+ 1;
958 if (new_delay
> dev_priv
->ips
.min_delay
)
959 new_delay
= dev_priv
->ips
.min_delay
;
962 if (ironlake_set_drps(dev
, new_delay
))
963 dev_priv
->ips
.cur_delay
= new_delay
;
965 spin_unlock(&mchdev_lock
);
970 static void notify_ring(struct drm_device
*dev
,
971 struct intel_ring_buffer
*ring
)
973 if (ring
->obj
== NULL
)
976 trace_i915_gem_request_complete(ring
);
978 wake_up_all(&ring
->irq_queue
);
979 i915_queue_hangcheck(dev
);
982 static void gen6_pm_rps_work(struct work_struct
*work
)
984 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
989 spin_lock_irq(&dev_priv
->irq_lock
);
990 pm_iir
= dev_priv
->rps
.pm_iir
;
991 dev_priv
->rps
.pm_iir
= 0;
992 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
993 snb_enable_pm_irq(dev_priv
, GEN6_PM_RPS_EVENTS
);
994 spin_unlock_irq(&dev_priv
->irq_lock
);
996 /* Make sure we didn't queue anything we're not going to process. */
997 WARN_ON(pm_iir
& ~GEN6_PM_RPS_EVENTS
);
999 if ((pm_iir
& GEN6_PM_RPS_EVENTS
) == 0)
1002 mutex_lock(&dev_priv
->rps
.hw_lock
);
1004 adj
= dev_priv
->rps
.last_adj
;
1005 if (pm_iir
& GEN6_PM_RP_UP_THRESHOLD
) {
1010 new_delay
= dev_priv
->rps
.cur_delay
+ adj
;
1013 * For better performance, jump directly
1014 * to RPe if we're below it.
1016 if (new_delay
< dev_priv
->rps
.rpe_delay
)
1017 new_delay
= dev_priv
->rps
.rpe_delay
;
1018 } else if (pm_iir
& GEN6_PM_RP_DOWN_TIMEOUT
) {
1019 if (dev_priv
->rps
.cur_delay
> dev_priv
->rps
.rpe_delay
)
1020 new_delay
= dev_priv
->rps
.rpe_delay
;
1022 new_delay
= dev_priv
->rps
.min_delay
;
1024 } else if (pm_iir
& GEN6_PM_RP_DOWN_THRESHOLD
) {
1029 new_delay
= dev_priv
->rps
.cur_delay
+ adj
;
1030 } else { /* unknown event */
1031 new_delay
= dev_priv
->rps
.cur_delay
;
1034 /* sysfs frequency interfaces may have snuck in while servicing the
1037 new_delay
= clamp_t(int, new_delay
,
1038 dev_priv
->rps
.min_delay
, dev_priv
->rps
.max_delay
);
1039 dev_priv
->rps
.last_adj
= new_delay
- dev_priv
->rps
.cur_delay
;
1041 if (IS_VALLEYVIEW(dev_priv
->dev
))
1042 valleyview_set_rps(dev_priv
->dev
, new_delay
);
1044 gen6_set_rps(dev_priv
->dev
, new_delay
);
1046 mutex_unlock(&dev_priv
->rps
.hw_lock
);
1051 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1053 * @work: workqueue struct
1055 * Doesn't actually do anything except notify userspace. As a consequence of
1056 * this event, userspace should try to remap the bad rows since statistically
1057 * it is likely the same row is more likely to go bad again.
1059 static void ivybridge_parity_work(struct work_struct
*work
)
1061 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
1062 l3_parity
.error_work
);
1063 u32 error_status
, row
, bank
, subbank
;
1064 char *parity_event
[6];
1066 unsigned long flags
;
1069 /* We must turn off DOP level clock gating to access the L3 registers.
1070 * In order to prevent a get/put style interface, acquire struct mutex
1071 * any time we access those registers.
1073 mutex_lock(&dev_priv
->dev
->struct_mutex
);
1075 /* If we've screwed up tracking, just let the interrupt fire again */
1076 if (WARN_ON(!dev_priv
->l3_parity
.which_slice
))
1079 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1080 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
1081 POSTING_READ(GEN7_MISCCPCTL
);
1083 while ((slice
= ffs(dev_priv
->l3_parity
.which_slice
)) != 0) {
1087 if (WARN_ON_ONCE(slice
>= NUM_L3_SLICES(dev_priv
->dev
)))
1090 dev_priv
->l3_parity
.which_slice
&= ~(1<<slice
);
1092 reg
= GEN7_L3CDERRST1
+ (slice
* 0x200);
1094 error_status
= I915_READ(reg
);
1095 row
= GEN7_PARITY_ERROR_ROW(error_status
);
1096 bank
= GEN7_PARITY_ERROR_BANK(error_status
);
1097 subbank
= GEN7_PARITY_ERROR_SUBBANK(error_status
);
1099 I915_WRITE(reg
, GEN7_PARITY_ERROR_VALID
| GEN7_L3CDERRST1_ENABLE
);
1102 parity_event
[0] = I915_L3_PARITY_UEVENT
"=1";
1103 parity_event
[1] = kasprintf(GFP_KERNEL
, "ROW=%d", row
);
1104 parity_event
[2] = kasprintf(GFP_KERNEL
, "BANK=%d", bank
);
1105 parity_event
[3] = kasprintf(GFP_KERNEL
, "SUBBANK=%d", subbank
);
1106 parity_event
[4] = kasprintf(GFP_KERNEL
, "SLICE=%d", slice
);
1107 parity_event
[5] = NULL
;
1109 kobject_uevent_env(&dev_priv
->dev
->primary
->kdev
->kobj
,
1110 KOBJ_CHANGE
, parity_event
);
1112 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1113 slice
, row
, bank
, subbank
);
1115 kfree(parity_event
[4]);
1116 kfree(parity_event
[3]);
1117 kfree(parity_event
[2]);
1118 kfree(parity_event
[1]);
1121 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
1124 WARN_ON(dev_priv
->l3_parity
.which_slice
);
1125 spin_lock_irqsave(&dev_priv
->irq_lock
, flags
);
1126 ilk_enable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev_priv
->dev
));
1127 spin_unlock_irqrestore(&dev_priv
->irq_lock
, flags
);
1129 mutex_unlock(&dev_priv
->dev
->struct_mutex
);
1132 static void ivybridge_parity_error_irq_handler(struct drm_device
*dev
, u32 iir
)
1134 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1136 if (!HAS_L3_DPF(dev
))
1139 spin_lock(&dev_priv
->irq_lock
);
1140 ilk_disable_gt_irq(dev_priv
, GT_PARITY_ERROR(dev
));
1141 spin_unlock(&dev_priv
->irq_lock
);
1143 iir
&= GT_PARITY_ERROR(dev
);
1144 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1
)
1145 dev_priv
->l3_parity
.which_slice
|= 1 << 1;
1147 if (iir
& GT_RENDER_L3_PARITY_ERROR_INTERRUPT
)
1148 dev_priv
->l3_parity
.which_slice
|= 1 << 0;
1150 queue_work(dev_priv
->wq
, &dev_priv
->l3_parity
.error_work
);
1153 static void ilk_gt_irq_handler(struct drm_device
*dev
,
1154 struct drm_i915_private
*dev_priv
,
1158 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1159 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1160 if (gt_iir
& ILK_BSD_USER_INTERRUPT
)
1161 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1164 static void snb_gt_irq_handler(struct drm_device
*dev
,
1165 struct drm_i915_private
*dev_priv
,
1170 (GT_RENDER_USER_INTERRUPT
| GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
))
1171 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1172 if (gt_iir
& GT_BSD_USER_INTERRUPT
)
1173 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1174 if (gt_iir
& GT_BLT_USER_INTERRUPT
)
1175 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1177 if (gt_iir
& (GT_BLT_CS_ERROR_INTERRUPT
|
1178 GT_BSD_CS_ERROR_INTERRUPT
|
1179 GT_RENDER_CS_MASTER_ERROR_INTERRUPT
)) {
1180 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir
);
1181 i915_handle_error(dev
, false);
1184 if (gt_iir
& GT_PARITY_ERROR(dev
))
1185 ivybridge_parity_error_irq_handler(dev
, gt_iir
);
1188 static irqreturn_t
gen8_gt_irq_handler(struct drm_device
*dev
,
1189 struct drm_i915_private
*dev_priv
,
1194 irqreturn_t ret
= IRQ_NONE
;
1196 if (master_ctl
& (GEN8_GT_RCS_IRQ
| GEN8_GT_BCS_IRQ
)) {
1197 tmp
= I915_READ(GEN8_GT_IIR(0));
1200 rcs
= tmp
>> GEN8_RCS_IRQ_SHIFT
;
1201 bcs
= tmp
>> GEN8_BCS_IRQ_SHIFT
;
1202 if (rcs
& GT_RENDER_USER_INTERRUPT
)
1203 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
1204 if (bcs
& GT_RENDER_USER_INTERRUPT
)
1205 notify_ring(dev
, &dev_priv
->ring
[BCS
]);
1206 I915_WRITE(GEN8_GT_IIR(0), tmp
);
1208 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1211 if (master_ctl
& GEN8_GT_VCS1_IRQ
) {
1212 tmp
= I915_READ(GEN8_GT_IIR(1));
1215 vcs
= tmp
>> GEN8_VCS1_IRQ_SHIFT
;
1216 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1217 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
1218 I915_WRITE(GEN8_GT_IIR(1), tmp
);
1220 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1223 if (master_ctl
& GEN8_GT_VECS_IRQ
) {
1224 tmp
= I915_READ(GEN8_GT_IIR(3));
1227 vcs
= tmp
>> GEN8_VECS_IRQ_SHIFT
;
1228 if (vcs
& GT_RENDER_USER_INTERRUPT
)
1229 notify_ring(dev
, &dev_priv
->ring
[VECS
]);
1230 I915_WRITE(GEN8_GT_IIR(3), tmp
);
1232 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1238 #define HPD_STORM_DETECT_PERIOD 1000
1239 #define HPD_STORM_THRESHOLD 5
1241 static inline void intel_hpd_irq_handler(struct drm_device
*dev
,
1242 u32 hotplug_trigger
,
1245 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1247 bool storm_detected
= false;
1249 if (!hotplug_trigger
)
1252 spin_lock(&dev_priv
->irq_lock
);
1253 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
1255 WARN_ONCE(hpd
[i
] & hotplug_trigger
&&
1256 dev_priv
->hpd_stats
[i
].hpd_mark
== HPD_DISABLED
,
1257 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1258 hotplug_trigger
, i
, hpd
[i
]);
1260 if (!(hpd
[i
] & hotplug_trigger
) ||
1261 dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_ENABLED
)
1264 dev_priv
->hpd_event_bits
|= (1 << i
);
1265 if (!time_in_range(jiffies
, dev_priv
->hpd_stats
[i
].hpd_last_jiffies
,
1266 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
1267 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD
))) {
1268 dev_priv
->hpd_stats
[i
].hpd_last_jiffies
= jiffies
;
1269 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
1270 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i
);
1271 } else if (dev_priv
->hpd_stats
[i
].hpd_cnt
> HPD_STORM_THRESHOLD
) {
1272 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_MARK_DISABLED
;
1273 dev_priv
->hpd_event_bits
&= ~(1 << i
);
1274 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i
);
1275 storm_detected
= true;
1277 dev_priv
->hpd_stats
[i
].hpd_cnt
++;
1278 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i
,
1279 dev_priv
->hpd_stats
[i
].hpd_cnt
);
1284 dev_priv
->display
.hpd_irq_setup(dev
);
1285 spin_unlock(&dev_priv
->irq_lock
);
1288 * Our hotplug handler can grab modeset locks (by calling down into the
1289 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1290 * queue for otherwise the flush_work in the pageflip code will
1293 schedule_work(&dev_priv
->hotplug_work
);
1296 static void gmbus_irq_handler(struct drm_device
*dev
)
1298 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1300 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1303 static void dp_aux_irq_handler(struct drm_device
*dev
)
1305 struct drm_i915_private
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1307 wake_up_all(&dev_priv
->gmbus_wait_queue
);
1310 #if defined(CONFIG_DEBUG_FS)
1311 static void display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1312 uint32_t crc0
, uint32_t crc1
,
1313 uint32_t crc2
, uint32_t crc3
,
1316 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1317 struct intel_pipe_crc
*pipe_crc
= &dev_priv
->pipe_crc
[pipe
];
1318 struct intel_pipe_crc_entry
*entry
;
1321 spin_lock(&pipe_crc
->lock
);
1323 if (!pipe_crc
->entries
) {
1324 spin_unlock(&pipe_crc
->lock
);
1325 DRM_ERROR("spurious interrupt\n");
1329 head
= pipe_crc
->head
;
1330 tail
= pipe_crc
->tail
;
1332 if (CIRC_SPACE(head
, tail
, INTEL_PIPE_CRC_ENTRIES_NR
) < 1) {
1333 spin_unlock(&pipe_crc
->lock
);
1334 DRM_ERROR("CRC buffer overflowing\n");
1338 entry
= &pipe_crc
->entries
[head
];
1340 entry
->frame
= dev
->driver
->get_vblank_counter(dev
, pipe
);
1341 entry
->crc
[0] = crc0
;
1342 entry
->crc
[1] = crc1
;
1343 entry
->crc
[2] = crc2
;
1344 entry
->crc
[3] = crc3
;
1345 entry
->crc
[4] = crc4
;
1347 head
= (head
+ 1) & (INTEL_PIPE_CRC_ENTRIES_NR
- 1);
1348 pipe_crc
->head
= head
;
1350 spin_unlock(&pipe_crc
->lock
);
1352 wake_up_interruptible(&pipe_crc
->wq
);
1356 display_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
,
1357 uint32_t crc0
, uint32_t crc1
,
1358 uint32_t crc2
, uint32_t crc3
,
1363 static void hsw_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1365 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1367 display_pipe_crc_irq_handler(dev
, pipe
,
1368 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1372 static void ivb_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1374 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1376 display_pipe_crc_irq_handler(dev
, pipe
,
1377 I915_READ(PIPE_CRC_RES_1_IVB(pipe
)),
1378 I915_READ(PIPE_CRC_RES_2_IVB(pipe
)),
1379 I915_READ(PIPE_CRC_RES_3_IVB(pipe
)),
1380 I915_READ(PIPE_CRC_RES_4_IVB(pipe
)),
1381 I915_READ(PIPE_CRC_RES_5_IVB(pipe
)));
1384 static void i9xx_pipe_crc_irq_handler(struct drm_device
*dev
, enum pipe pipe
)
1386 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1387 uint32_t res1
, res2
;
1389 if (INTEL_INFO(dev
)->gen
>= 3)
1390 res1
= I915_READ(PIPE_CRC_RES_RES1_I915(pipe
));
1394 if (INTEL_INFO(dev
)->gen
>= 5 || IS_G4X(dev
))
1395 res2
= I915_READ(PIPE_CRC_RES_RES2_G4X(pipe
));
1399 display_pipe_crc_irq_handler(dev
, pipe
,
1400 I915_READ(PIPE_CRC_RES_RED(pipe
)),
1401 I915_READ(PIPE_CRC_RES_GREEN(pipe
)),
1402 I915_READ(PIPE_CRC_RES_BLUE(pipe
)),
1406 /* The RPS events need forcewake, so we add them to a work queue and mask their
1407 * IMR bits until the work is done. Other interrupts can be processed without
1408 * the work queue. */
1409 static void gen6_rps_irq_handler(struct drm_i915_private
*dev_priv
, u32 pm_iir
)
1411 if (pm_iir
& GEN6_PM_RPS_EVENTS
) {
1412 spin_lock(&dev_priv
->irq_lock
);
1413 dev_priv
->rps
.pm_iir
|= pm_iir
& GEN6_PM_RPS_EVENTS
;
1414 snb_disable_pm_irq(dev_priv
, pm_iir
& GEN6_PM_RPS_EVENTS
);
1415 spin_unlock(&dev_priv
->irq_lock
);
1417 queue_work(dev_priv
->wq
, &dev_priv
->rps
.work
);
1420 if (HAS_VEBOX(dev_priv
->dev
)) {
1421 if (pm_iir
& PM_VEBOX_USER_INTERRUPT
)
1422 notify_ring(dev_priv
->dev
, &dev_priv
->ring
[VECS
]);
1424 if (pm_iir
& PM_VEBOX_CS_ERROR_INTERRUPT
) {
1425 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir
);
1426 i915_handle_error(dev_priv
->dev
, false);
1431 static irqreturn_t
valleyview_irq_handler(int irq
, void *arg
)
1433 struct drm_device
*dev
= (struct drm_device
*) arg
;
1434 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1435 u32 iir
, gt_iir
, pm_iir
;
1436 irqreturn_t ret
= IRQ_NONE
;
1437 unsigned long irqflags
;
1439 u32 pipe_stats
[I915_MAX_PIPES
];
1441 atomic_inc(&dev_priv
->irq_received
);
1444 iir
= I915_READ(VLV_IIR
);
1445 gt_iir
= I915_READ(GTIIR
);
1446 pm_iir
= I915_READ(GEN6_PMIIR
);
1448 if (gt_iir
== 0 && pm_iir
== 0 && iir
== 0)
1453 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1455 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
1456 for_each_pipe(pipe
) {
1457 int reg
= PIPESTAT(pipe
);
1458 pipe_stats
[pipe
] = I915_READ(reg
);
1461 * Clear the PIPE*STAT regs before the IIR
1463 if (pipe_stats
[pipe
] & 0x8000ffff) {
1464 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
1465 DRM_DEBUG_DRIVER("pipe %c underrun\n",
1467 I915_WRITE(reg
, pipe_stats
[pipe
]);
1470 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
1472 for_each_pipe(pipe
) {
1473 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
)
1474 drm_handle_vblank(dev
, pipe
);
1476 if (pipe_stats
[pipe
] & PLANE_FLIPDONE_INT_STATUS_VLV
) {
1477 intel_prepare_page_flip(dev
, pipe
);
1478 intel_finish_page_flip(dev
, pipe
);
1481 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
1482 i9xx_pipe_crc_irq_handler(dev
, pipe
);
1485 /* Consume port. Then clear IIR or we'll miss events */
1486 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
1487 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1488 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
1490 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1493 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_status_i915
);
1495 if (hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
)
1496 dp_aux_irq_handler(dev
);
1498 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1499 I915_READ(PORT_HOTPLUG_STAT
);
1502 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
1503 gmbus_irq_handler(dev
);
1506 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1508 I915_WRITE(GTIIR
, gt_iir
);
1509 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1510 I915_WRITE(VLV_IIR
, iir
);
1517 static void ibx_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1519 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1521 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK
;
1523 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_ibx
);
1525 if (pch_iir
& SDE_AUDIO_POWER_MASK
) {
1526 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK
) >>
1527 SDE_AUDIO_POWER_SHIFT
);
1528 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1532 if (pch_iir
& SDE_AUX_MASK
)
1533 dp_aux_irq_handler(dev
);
1535 if (pch_iir
& SDE_GMBUS
)
1536 gmbus_irq_handler(dev
);
1538 if (pch_iir
& SDE_AUDIO_HDCP_MASK
)
1539 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1541 if (pch_iir
& SDE_AUDIO_TRANS_MASK
)
1542 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1544 if (pch_iir
& SDE_POISON
)
1545 DRM_ERROR("PCH poison interrupt\n");
1547 if (pch_iir
& SDE_FDI_MASK
)
1549 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1551 I915_READ(FDI_RX_IIR(pipe
)));
1553 if (pch_iir
& (SDE_TRANSB_CRC_DONE
| SDE_TRANSA_CRC_DONE
))
1554 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1556 if (pch_iir
& (SDE_TRANSB_CRC_ERR
| SDE_TRANSA_CRC_ERR
))
1557 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1559 if (pch_iir
& SDE_TRANSA_FIFO_UNDER
)
1560 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
1562 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1564 if (pch_iir
& SDE_TRANSB_FIFO_UNDER
)
1565 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
1567 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1570 static void ivb_err_int_handler(struct drm_device
*dev
)
1572 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1573 u32 err_int
= I915_READ(GEN7_ERR_INT
);
1576 if (err_int
& ERR_INT_POISON
)
1577 DRM_ERROR("Poison interrupt\n");
1579 for_each_pipe(pipe
) {
1580 if (err_int
& ERR_INT_FIFO_UNDERRUN(pipe
)) {
1581 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
,
1583 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1587 if (err_int
& ERR_INT_PIPE_CRC_DONE(pipe
)) {
1588 if (IS_IVYBRIDGE(dev
))
1589 ivb_pipe_crc_irq_handler(dev
, pipe
);
1591 hsw_pipe_crc_irq_handler(dev
, pipe
);
1595 I915_WRITE(GEN7_ERR_INT
, err_int
);
1598 static void cpt_serr_int_handler(struct drm_device
*dev
)
1600 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1601 u32 serr_int
= I915_READ(SERR_INT
);
1603 if (serr_int
& SERR_INT_POISON
)
1604 DRM_ERROR("PCH poison interrupt\n");
1606 if (serr_int
& SERR_INT_TRANS_A_FIFO_UNDERRUN
)
1607 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_A
,
1609 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1611 if (serr_int
& SERR_INT_TRANS_B_FIFO_UNDERRUN
)
1612 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_B
,
1614 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1616 if (serr_int
& SERR_INT_TRANS_C_FIFO_UNDERRUN
)
1617 if (intel_set_pch_fifo_underrun_reporting(dev
, TRANSCODER_C
,
1619 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1621 I915_WRITE(SERR_INT
, serr_int
);
1624 static void cpt_irq_handler(struct drm_device
*dev
, u32 pch_iir
)
1626 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1628 u32 hotplug_trigger
= pch_iir
& SDE_HOTPLUG_MASK_CPT
;
1630 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_cpt
);
1632 if (pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) {
1633 int port
= ffs((pch_iir
& SDE_AUDIO_POWER_MASK_CPT
) >>
1634 SDE_AUDIO_POWER_SHIFT_CPT
);
1635 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1639 if (pch_iir
& SDE_AUX_MASK_CPT
)
1640 dp_aux_irq_handler(dev
);
1642 if (pch_iir
& SDE_GMBUS_CPT
)
1643 gmbus_irq_handler(dev
);
1645 if (pch_iir
& SDE_AUDIO_CP_REQ_CPT
)
1646 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1648 if (pch_iir
& SDE_AUDIO_CP_CHG_CPT
)
1649 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1651 if (pch_iir
& SDE_FDI_MASK_CPT
)
1653 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1655 I915_READ(FDI_RX_IIR(pipe
)));
1657 if (pch_iir
& SDE_ERROR_CPT
)
1658 cpt_serr_int_handler(dev
);
1661 static void ilk_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
1663 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1666 if (de_iir
& DE_AUX_CHANNEL_A
)
1667 dp_aux_irq_handler(dev
);
1669 if (de_iir
& DE_GSE
)
1670 intel_opregion_asle_intr(dev
);
1672 if (de_iir
& DE_POISON
)
1673 DRM_ERROR("Poison interrupt\n");
1675 for_each_pipe(pipe
) {
1676 if (de_iir
& DE_PIPE_VBLANK(pipe
))
1677 drm_handle_vblank(dev
, pipe
);
1679 if (de_iir
& DE_PIPE_FIFO_UNDERRUN(pipe
))
1680 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false))
1681 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1684 if (de_iir
& DE_PIPE_CRC_DONE(pipe
))
1685 i9xx_pipe_crc_irq_handler(dev
, pipe
);
1687 /* plane/pipes map 1:1 on ilk+ */
1688 if (de_iir
& DE_PLANE_FLIP_DONE(pipe
)) {
1689 intel_prepare_page_flip(dev
, pipe
);
1690 intel_finish_page_flip_plane(dev
, pipe
);
1694 /* check event from PCH */
1695 if (de_iir
& DE_PCH_EVENT
) {
1696 u32 pch_iir
= I915_READ(SDEIIR
);
1698 if (HAS_PCH_CPT(dev
))
1699 cpt_irq_handler(dev
, pch_iir
);
1701 ibx_irq_handler(dev
, pch_iir
);
1703 /* should clear PCH hotplug event before clear CPU irq */
1704 I915_WRITE(SDEIIR
, pch_iir
);
1707 if (IS_GEN5(dev
) && de_iir
& DE_PCU_EVENT
)
1708 ironlake_rps_change_irq_handler(dev
);
1711 static void ivb_display_irq_handler(struct drm_device
*dev
, u32 de_iir
)
1713 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1716 if (de_iir
& DE_ERR_INT_IVB
)
1717 ivb_err_int_handler(dev
);
1719 if (de_iir
& DE_AUX_CHANNEL_A_IVB
)
1720 dp_aux_irq_handler(dev
);
1722 if (de_iir
& DE_GSE_IVB
)
1723 intel_opregion_asle_intr(dev
);
1726 if (de_iir
& (DE_PIPE_VBLANK_IVB(i
)))
1727 drm_handle_vblank(dev
, i
);
1729 /* plane/pipes map 1:1 on ilk+ */
1730 if (de_iir
& DE_PLANE_FLIP_DONE_IVB(i
)) {
1731 intel_prepare_page_flip(dev
, i
);
1732 intel_finish_page_flip_plane(dev
, i
);
1736 /* check event from PCH */
1737 if (!HAS_PCH_NOP(dev
) && (de_iir
& DE_PCH_EVENT_IVB
)) {
1738 u32 pch_iir
= I915_READ(SDEIIR
);
1740 cpt_irq_handler(dev
, pch_iir
);
1742 /* clear PCH hotplug event before clear CPU irq */
1743 I915_WRITE(SDEIIR
, pch_iir
);
1747 static irqreturn_t
ironlake_irq_handler(int irq
, void *arg
)
1749 struct drm_device
*dev
= (struct drm_device
*) arg
;
1750 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1751 u32 de_iir
, gt_iir
, de_ier
, sde_ier
= 0;
1752 irqreturn_t ret
= IRQ_NONE
;
1754 atomic_inc(&dev_priv
->irq_received
);
1756 /* We get interrupts on unclaimed registers, so check for this before we
1757 * do any I915_{READ,WRITE}. */
1758 intel_uncore_check_errors(dev
);
1760 /* disable master interrupt before clearing iir */
1761 de_ier
= I915_READ(DEIER
);
1762 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
1763 POSTING_READ(DEIER
);
1765 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1766 * interrupts will will be stored on its back queue, and then we'll be
1767 * able to process them after we restore SDEIER (as soon as we restore
1768 * it, we'll get an interrupt if SDEIIR still has something to process
1769 * due to its back queue). */
1770 if (!HAS_PCH_NOP(dev
)) {
1771 sde_ier
= I915_READ(SDEIER
);
1772 I915_WRITE(SDEIER
, 0);
1773 POSTING_READ(SDEIER
);
1776 gt_iir
= I915_READ(GTIIR
);
1778 if (INTEL_INFO(dev
)->gen
>= 6)
1779 snb_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1781 ilk_gt_irq_handler(dev
, dev_priv
, gt_iir
);
1782 I915_WRITE(GTIIR
, gt_iir
);
1786 de_iir
= I915_READ(DEIIR
);
1788 if (INTEL_INFO(dev
)->gen
>= 7)
1789 ivb_display_irq_handler(dev
, de_iir
);
1791 ilk_display_irq_handler(dev
, de_iir
);
1792 I915_WRITE(DEIIR
, de_iir
);
1796 if (INTEL_INFO(dev
)->gen
>= 6) {
1797 u32 pm_iir
= I915_READ(GEN6_PMIIR
);
1799 gen6_rps_irq_handler(dev_priv
, pm_iir
);
1800 I915_WRITE(GEN6_PMIIR
, pm_iir
);
1805 I915_WRITE(DEIER
, de_ier
);
1806 POSTING_READ(DEIER
);
1807 if (!HAS_PCH_NOP(dev
)) {
1808 I915_WRITE(SDEIER
, sde_ier
);
1809 POSTING_READ(SDEIER
);
1815 static irqreturn_t
gen8_irq_handler(int irq
, void *arg
)
1817 struct drm_device
*dev
= arg
;
1818 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1820 irqreturn_t ret
= IRQ_NONE
;
1824 atomic_inc(&dev_priv
->irq_received
);
1826 master_ctl
= I915_READ(GEN8_MASTER_IRQ
);
1827 master_ctl
&= ~GEN8_MASTER_IRQ_CONTROL
;
1831 I915_WRITE(GEN8_MASTER_IRQ
, 0);
1832 POSTING_READ(GEN8_MASTER_IRQ
);
1834 ret
= gen8_gt_irq_handler(dev
, dev_priv
, master_ctl
);
1836 if (master_ctl
& GEN8_DE_MISC_IRQ
) {
1837 tmp
= I915_READ(GEN8_DE_MISC_IIR
);
1838 if (tmp
& GEN8_DE_MISC_GSE
)
1839 intel_opregion_asle_intr(dev
);
1841 DRM_ERROR("Unexpected DE Misc interrupt\n");
1843 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1846 I915_WRITE(GEN8_DE_MISC_IIR
, tmp
);
1851 if (master_ctl
& GEN8_DE_PORT_IRQ
) {
1852 tmp
= I915_READ(GEN8_DE_PORT_IIR
);
1853 if (tmp
& GEN8_AUX_CHANNEL_A
)
1854 dp_aux_irq_handler(dev
);
1856 DRM_ERROR("Unexpected DE Port interrupt\n");
1858 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1861 I915_WRITE(GEN8_DE_PORT_IIR
, tmp
);
1866 for_each_pipe(pipe
) {
1869 if (!(master_ctl
& GEN8_DE_PIPE_IRQ(pipe
)))
1872 pipe_iir
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
1873 if (pipe_iir
& GEN8_PIPE_VBLANK
)
1874 drm_handle_vblank(dev
, pipe
);
1876 if (pipe_iir
& GEN8_PIPE_FLIP_DONE
) {
1877 intel_prepare_page_flip(dev
, pipe
);
1878 intel_finish_page_flip_plane(dev
, pipe
);
1881 if (pipe_iir
& GEN8_PIPE_CDCLK_CRC_DONE
)
1882 hsw_pipe_crc_irq_handler(dev
, pipe
);
1884 if (pipe_iir
& GEN8_PIPE_FIFO_UNDERRUN
) {
1885 if (intel_set_cpu_fifo_underrun_reporting(dev
, pipe
,
1887 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1891 if (pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
) {
1892 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1894 pipe_iir
& GEN8_DE_PIPE_IRQ_FAULT_ERRORS
);
1899 I915_WRITE(GEN8_DE_PIPE_IIR(pipe
), pipe_iir
);
1901 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1904 if (!HAS_PCH_NOP(dev
) && master_ctl
& GEN8_DE_PCH_IRQ
) {
1906 * FIXME(BDW): Assume for now that the new interrupt handling
1907 * scheme also closed the SDE interrupt handling race we've seen
1908 * on older pch-split platforms. But this needs testing.
1910 u32 pch_iir
= I915_READ(SDEIIR
);
1912 cpt_irq_handler(dev
, pch_iir
);
1915 I915_WRITE(SDEIIR
, pch_iir
);
1920 I915_WRITE(GEN8_MASTER_IRQ
, GEN8_MASTER_IRQ_CONTROL
);
1921 POSTING_READ(GEN8_MASTER_IRQ
);
1926 static void i915_error_wake_up(struct drm_i915_private
*dev_priv
,
1927 bool reset_completed
)
1929 struct intel_ring_buffer
*ring
;
1933 * Notify all waiters for GPU completion events that reset state has
1934 * been changed, and that they need to restart their wait after
1935 * checking for potential errors (and bail out to drop locks if there is
1936 * a gpu reset pending so that i915_error_work_func can acquire them).
1939 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1940 for_each_ring(ring
, dev_priv
, i
)
1941 wake_up_all(&ring
->irq_queue
);
1943 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1944 wake_up_all(&dev_priv
->pending_flip_queue
);
1947 * Signal tasks blocked in i915_gem_wait_for_error that the pending
1948 * reset state is cleared.
1950 if (reset_completed
)
1951 wake_up_all(&dev_priv
->gpu_error
.reset_queue
);
1955 * i915_error_work_func - do process context error handling work
1956 * @work: work struct
1958 * Fire an error uevent so userspace can see that a hang or error
1961 static void i915_error_work_func(struct work_struct
*work
)
1963 struct i915_gpu_error
*error
= container_of(work
, struct i915_gpu_error
,
1965 drm_i915_private_t
*dev_priv
= container_of(error
, drm_i915_private_t
,
1967 struct drm_device
*dev
= dev_priv
->dev
;
1968 char *error_event
[] = { I915_ERROR_UEVENT
"=1", NULL
};
1969 char *reset_event
[] = { I915_RESET_UEVENT
"=1", NULL
};
1970 char *reset_done_event
[] = { I915_ERROR_UEVENT
"=0", NULL
};
1973 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
, error_event
);
1976 * Note that there's only one work item which does gpu resets, so we
1977 * need not worry about concurrent gpu resets potentially incrementing
1978 * error->reset_counter twice. We only need to take care of another
1979 * racing irq/hangcheck declaring the gpu dead for a second time. A
1980 * quick check for that is good enough: schedule_work ensures the
1981 * correct ordering between hang detection and this work item, and since
1982 * the reset in-progress bit is only ever set by code outside of this
1983 * work we don't need to worry about any other races.
1985 if (i915_reset_in_progress(error
) && !i915_terminally_wedged(error
)) {
1986 DRM_DEBUG_DRIVER("resetting chip\n");
1987 kobject_uevent_env(&dev
->primary
->kdev
->kobj
, KOBJ_CHANGE
,
1991 * All state reset _must_ be completed before we update the
1992 * reset counter, for otherwise waiters might miss the reset
1993 * pending state and not properly drop locks, resulting in
1994 * deadlocks with the reset work.
1996 ret
= i915_reset(dev
);
1998 intel_display_handle_reset(dev
);
2002 * After all the gem state is reset, increment the reset
2003 * counter and wake up everyone waiting for the reset to
2006 * Since unlock operations are a one-sided barrier only,
2007 * we need to insert a barrier here to order any seqno
2009 * the counter increment.
2011 smp_mb__before_atomic_inc();
2012 atomic_inc(&dev_priv
->gpu_error
.reset_counter
);
2014 kobject_uevent_env(&dev
->primary
->kdev
->kobj
,
2015 KOBJ_CHANGE
, reset_done_event
);
2017 atomic_set_mask(I915_WEDGED
, &error
->reset_counter
);
2021 * Note: The wake_up also serves as a memory barrier so that
2022 * waiters see the update value of the reset counter atomic_t.
2024 i915_error_wake_up(dev_priv
, true);
2028 static void i915_report_and_clear_eir(struct drm_device
*dev
)
2030 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2031 uint32_t instdone
[I915_NUM_INSTDONE_REG
];
2032 u32 eir
= I915_READ(EIR
);
2038 pr_err("render error detected, EIR: 0x%08x\n", eir
);
2040 i915_get_extra_instdone(dev
, instdone
);
2043 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
2044 u32 ipeir
= I915_READ(IPEIR_I965
);
2046 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2047 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2048 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2049 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2050 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2051 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2052 I915_WRITE(IPEIR_I965
, ipeir
);
2053 POSTING_READ(IPEIR_I965
);
2055 if (eir
& GM45_ERROR_PAGE_TABLE
) {
2056 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2057 pr_err("page table error\n");
2058 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2059 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2060 POSTING_READ(PGTBL_ER
);
2064 if (!IS_GEN2(dev
)) {
2065 if (eir
& I915_ERROR_PAGE_TABLE
) {
2066 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
2067 pr_err("page table error\n");
2068 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err
);
2069 I915_WRITE(PGTBL_ER
, pgtbl_err
);
2070 POSTING_READ(PGTBL_ER
);
2074 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
2075 pr_err("memory refresh error:\n");
2077 pr_err("pipe %c stat: 0x%08x\n",
2078 pipe_name(pipe
), I915_READ(PIPESTAT(pipe
)));
2079 /* pipestat has already been acked */
2081 if (eir
& I915_ERROR_INSTRUCTION
) {
2082 pr_err("instruction error\n");
2083 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM
));
2084 for (i
= 0; i
< ARRAY_SIZE(instdone
); i
++)
2085 pr_err(" INSTDONE_%d: 0x%08x\n", i
, instdone
[i
]);
2086 if (INTEL_INFO(dev
)->gen
< 4) {
2087 u32 ipeir
= I915_READ(IPEIR
);
2089 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR
));
2090 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR
));
2091 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD
));
2092 I915_WRITE(IPEIR
, ipeir
);
2093 POSTING_READ(IPEIR
);
2095 u32 ipeir
= I915_READ(IPEIR_I965
);
2097 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965
));
2098 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965
));
2099 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS
));
2100 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965
));
2101 I915_WRITE(IPEIR_I965
, ipeir
);
2102 POSTING_READ(IPEIR_I965
);
2106 I915_WRITE(EIR
, eir
);
2108 eir
= I915_READ(EIR
);
2111 * some errors might have become stuck,
2114 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
2115 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
2116 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
2121 * i915_handle_error - handle an error interrupt
2124 * Do some basic checking of regsiter state at error interrupt time and
2125 * dump it to the syslog. Also call i915_capture_error_state() to make
2126 * sure we get a record and make it available in debugfs. Fire a uevent
2127 * so userspace knows something bad happened (should trigger collection
2128 * of a ring dump etc.).
2130 void i915_handle_error(struct drm_device
*dev
, bool wedged
)
2132 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2134 i915_capture_error_state(dev
);
2135 i915_report_and_clear_eir(dev
);
2138 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG
,
2139 &dev_priv
->gpu_error
.reset_counter
);
2142 * Wakeup waiting processes so that the reset work function
2143 * i915_error_work_func doesn't deadlock trying to grab various
2144 * locks. By bumping the reset counter first, the woken
2145 * processes will see a reset in progress and back off,
2146 * releasing their locks and then wait for the reset completion.
2147 * We must do this for _all_ gpu waiters that might hold locks
2148 * that the reset work needs to acquire.
2150 * Note: The wake_up serves as the required memory barrier to
2151 * ensure that the waiters see the updated value of the reset
2154 i915_error_wake_up(dev_priv
, false);
2158 * Our reset work can grab modeset locks (since it needs to reset the
2159 * state of outstanding pagelips). Hence it must not be run on our own
2160 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2161 * code will deadlock.
2163 schedule_work(&dev_priv
->gpu_error
.work
);
2166 static void __always_unused
i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
2168 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2169 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
2170 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2171 struct drm_i915_gem_object
*obj
;
2172 struct intel_unpin_work
*work
;
2173 unsigned long flags
;
2174 bool stall_detected
;
2176 /* Ignore early vblank irqs */
2177 if (intel_crtc
== NULL
)
2180 spin_lock_irqsave(&dev
->event_lock
, flags
);
2181 work
= intel_crtc
->unpin_work
;
2184 atomic_read(&work
->pending
) >= INTEL_FLIP_COMPLETE
||
2185 !work
->enable_stall_check
) {
2186 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2187 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
2191 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2192 obj
= work
->pending_flip_obj
;
2193 if (INTEL_INFO(dev
)->gen
>= 4) {
2194 int dspsurf
= DSPSURF(intel_crtc
->plane
);
2195 stall_detected
= I915_HI_DISPBASE(I915_READ(dspsurf
)) ==
2196 i915_gem_obj_ggtt_offset(obj
);
2198 int dspaddr
= DSPADDR(intel_crtc
->plane
);
2199 stall_detected
= I915_READ(dspaddr
) == (i915_gem_obj_ggtt_offset(obj
) +
2200 crtc
->y
* crtc
->fb
->pitches
[0] +
2201 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
2204 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
2206 if (stall_detected
) {
2207 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2208 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
2212 /* Called from drm generic code, passed 'crtc' which
2213 * we use as a pipe index
2215 static int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
2217 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2218 unsigned long irqflags
;
2220 if (!i915_pipe_enabled(dev
, pipe
))
2223 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2224 if (INTEL_INFO(dev
)->gen
>= 4)
2225 i915_enable_pipestat(dev_priv
, pipe
,
2226 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2228 i915_enable_pipestat(dev_priv
, pipe
,
2229 PIPE_VBLANK_INTERRUPT_ENABLE
);
2231 /* maintain vblank delivery even in deep C-states */
2232 if (dev_priv
->info
->gen
== 3)
2233 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS
));
2234 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2239 static int ironlake_enable_vblank(struct drm_device
*dev
, int pipe
)
2241 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2242 unsigned long irqflags
;
2243 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2244 DE_PIPE_VBLANK(pipe
);
2246 if (!i915_pipe_enabled(dev
, pipe
))
2249 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2250 ironlake_enable_display_irq(dev_priv
, bit
);
2251 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2256 static int valleyview_enable_vblank(struct drm_device
*dev
, int pipe
)
2258 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2259 unsigned long irqflags
;
2262 if (!i915_pipe_enabled(dev
, pipe
))
2265 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2266 imr
= I915_READ(VLV_IMR
);
2268 imr
&= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
2270 imr
&= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2271 I915_WRITE(VLV_IMR
, imr
);
2272 i915_enable_pipestat(dev_priv
, pipe
,
2273 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2274 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2279 static int gen8_enable_vblank(struct drm_device
*dev
, int pipe
)
2281 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2282 unsigned long irqflags
;
2284 if (!i915_pipe_enabled(dev
, pipe
))
2287 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2288 dev_priv
->de_irq_mask
[pipe
] &= ~GEN8_PIPE_VBLANK
;
2289 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2290 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2291 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2295 /* Called from drm generic code, passed 'crtc' which
2296 * we use as a pipe index
2298 static void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
2300 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2301 unsigned long irqflags
;
2303 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2304 if (dev_priv
->info
->gen
== 3)
2305 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS
));
2307 i915_disable_pipestat(dev_priv
, pipe
,
2308 PIPE_VBLANK_INTERRUPT_ENABLE
|
2309 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2310 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2313 static void ironlake_disable_vblank(struct drm_device
*dev
, int pipe
)
2315 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2316 unsigned long irqflags
;
2317 uint32_t bit
= (INTEL_INFO(dev
)->gen
>= 7) ? DE_PIPE_VBLANK_IVB(pipe
) :
2318 DE_PIPE_VBLANK(pipe
);
2320 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2321 ironlake_disable_display_irq(dev_priv
, bit
);
2322 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2325 static void valleyview_disable_vblank(struct drm_device
*dev
, int pipe
)
2327 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2328 unsigned long irqflags
;
2331 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2332 i915_disable_pipestat(dev_priv
, pipe
,
2333 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
2334 imr
= I915_READ(VLV_IMR
);
2336 imr
|= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
;
2338 imr
|= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2339 I915_WRITE(VLV_IMR
, imr
);
2340 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2343 static void gen8_disable_vblank(struct drm_device
*dev
, int pipe
)
2345 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2346 unsigned long irqflags
;
2348 if (!i915_pipe_enabled(dev
, pipe
))
2351 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2352 dev_priv
->de_irq_mask
[pipe
] |= GEN8_PIPE_VBLANK
;
2353 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2354 POSTING_READ(GEN8_DE_PIPE_IMR(pipe
));
2355 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2359 ring_last_seqno(struct intel_ring_buffer
*ring
)
2361 return list_entry(ring
->request_list
.prev
,
2362 struct drm_i915_gem_request
, list
)->seqno
;
2366 ring_idle(struct intel_ring_buffer
*ring
, u32 seqno
)
2368 return (list_empty(&ring
->request_list
) ||
2369 i915_seqno_passed(seqno
, ring_last_seqno(ring
)));
2372 static struct intel_ring_buffer
*
2373 semaphore_waits_for(struct intel_ring_buffer
*ring
, u32
*seqno
)
2375 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2376 u32 cmd
, ipehr
, acthd
, acthd_min
;
2378 ipehr
= I915_READ(RING_IPEHR(ring
->mmio_base
));
2379 if ((ipehr
& ~(0x3 << 16)) !=
2380 (MI_SEMAPHORE_MBOX
| MI_SEMAPHORE_COMPARE
| MI_SEMAPHORE_REGISTER
))
2383 /* ACTHD is likely pointing to the dword after the actual command,
2384 * so scan backwards until we find the MBOX.
2386 acthd
= intel_ring_get_active_head(ring
) & HEAD_ADDR
;
2387 acthd_min
= max((int)acthd
- 3 * 4, 0);
2389 cmd
= ioread32(ring
->virtual_start
+ acthd
);
2394 if (acthd
< acthd_min
)
2398 *seqno
= ioread32(ring
->virtual_start
+acthd
+4)+1;
2399 return &dev_priv
->ring
[(ring
->id
+ (((ipehr
>> 17) & 1) + 1)) % 3];
2402 static int semaphore_passed(struct intel_ring_buffer
*ring
)
2404 struct drm_i915_private
*dev_priv
= ring
->dev
->dev_private
;
2405 struct intel_ring_buffer
*signaller
;
2408 ring
->hangcheck
.deadlock
= true;
2410 signaller
= semaphore_waits_for(ring
, &seqno
);
2411 if (signaller
== NULL
|| signaller
->hangcheck
.deadlock
)
2414 /* cursory check for an unkickable deadlock */
2415 ctl
= I915_READ_CTL(signaller
);
2416 if (ctl
& RING_WAIT_SEMAPHORE
&& semaphore_passed(signaller
) < 0)
2419 return i915_seqno_passed(signaller
->get_seqno(signaller
, false), seqno
);
2422 static void semaphore_clear_deadlocks(struct drm_i915_private
*dev_priv
)
2424 struct intel_ring_buffer
*ring
;
2427 for_each_ring(ring
, dev_priv
, i
)
2428 ring
->hangcheck
.deadlock
= false;
2431 static enum intel_ring_hangcheck_action
2432 ring_stuck(struct intel_ring_buffer
*ring
, u32 acthd
)
2434 struct drm_device
*dev
= ring
->dev
;
2435 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2438 if (ring
->hangcheck
.acthd
!= acthd
)
2439 return HANGCHECK_ACTIVE
;
2442 return HANGCHECK_HUNG
;
2444 /* Is the chip hanging on a WAIT_FOR_EVENT?
2445 * If so we can simply poke the RB_WAIT bit
2446 * and break the hang. This should work on
2447 * all but the second generation chipsets.
2449 tmp
= I915_READ_CTL(ring
);
2450 if (tmp
& RING_WAIT
) {
2451 DRM_ERROR("Kicking stuck wait on %s\n",
2453 i915_handle_error(dev
, false);
2454 I915_WRITE_CTL(ring
, tmp
);
2455 return HANGCHECK_KICK
;
2458 if (INTEL_INFO(dev
)->gen
>= 6 && tmp
& RING_WAIT_SEMAPHORE
) {
2459 switch (semaphore_passed(ring
)) {
2461 return HANGCHECK_HUNG
;
2463 DRM_ERROR("Kicking stuck semaphore on %s\n",
2465 i915_handle_error(dev
, false);
2466 I915_WRITE_CTL(ring
, tmp
);
2467 return HANGCHECK_KICK
;
2469 return HANGCHECK_WAIT
;
2473 return HANGCHECK_HUNG
;
2477 * This is called when the chip hasn't reported back with completed
2478 * batchbuffers in a long time. We keep track per ring seqno progress and
2479 * if there are no progress, hangcheck score for that ring is increased.
2480 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2481 * we kick the ring. If we see no progress on three subsequent calls
2482 * we assume chip is wedged and try to fix it by resetting the chip.
2484 static void i915_hangcheck_elapsed(unsigned long data
)
2486 struct drm_device
*dev
= (struct drm_device
*)data
;
2487 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2488 struct intel_ring_buffer
*ring
;
2490 int busy_count
= 0, rings_hung
= 0;
2491 bool stuck
[I915_NUM_RINGS
] = { 0 };
2497 if (!i915_enable_hangcheck
)
2500 for_each_ring(ring
, dev_priv
, i
) {
2504 semaphore_clear_deadlocks(dev_priv
);
2506 seqno
= ring
->get_seqno(ring
, false);
2507 acthd
= intel_ring_get_active_head(ring
);
2509 if (ring
->hangcheck
.seqno
== seqno
) {
2510 if (ring_idle(ring
, seqno
)) {
2511 ring
->hangcheck
.action
= HANGCHECK_IDLE
;
2513 if (waitqueue_active(&ring
->irq_queue
)) {
2514 /* Issue a wake-up to catch stuck h/w. */
2515 if (!test_and_set_bit(ring
->id
, &dev_priv
->gpu_error
.missed_irq_rings
)) {
2516 if (!(dev_priv
->gpu_error
.test_irq_rings
& intel_ring_flag(ring
)))
2517 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2520 DRM_INFO("Fake missed irq on %s\n",
2522 wake_up_all(&ring
->irq_queue
);
2524 /* Safeguard against driver failure */
2525 ring
->hangcheck
.score
+= BUSY
;
2529 /* We always increment the hangcheck score
2530 * if the ring is busy and still processing
2531 * the same request, so that no single request
2532 * can run indefinitely (such as a chain of
2533 * batches). The only time we do not increment
2534 * the hangcheck score on this ring, if this
2535 * ring is in a legitimate wait for another
2536 * ring. In that case the waiting ring is a
2537 * victim and we want to be sure we catch the
2538 * right culprit. Then every time we do kick
2539 * the ring, add a small increment to the
2540 * score so that we can catch a batch that is
2541 * being repeatedly kicked and so responsible
2542 * for stalling the machine.
2544 ring
->hangcheck
.action
= ring_stuck(ring
,
2547 switch (ring
->hangcheck
.action
) {
2548 case HANGCHECK_IDLE
:
2549 case HANGCHECK_WAIT
:
2551 case HANGCHECK_ACTIVE
:
2552 ring
->hangcheck
.score
+= BUSY
;
2554 case HANGCHECK_KICK
:
2555 ring
->hangcheck
.score
+= KICK
;
2557 case HANGCHECK_HUNG
:
2558 ring
->hangcheck
.score
+= HUNG
;
2564 ring
->hangcheck
.action
= HANGCHECK_ACTIVE
;
2566 /* Gradually reduce the count so that we catch DoS
2567 * attempts across multiple batches.
2569 if (ring
->hangcheck
.score
> 0)
2570 ring
->hangcheck
.score
--;
2573 ring
->hangcheck
.seqno
= seqno
;
2574 ring
->hangcheck
.acthd
= acthd
;
2578 for_each_ring(ring
, dev_priv
, i
) {
2579 if (ring
->hangcheck
.score
> FIRE
) {
2580 DRM_INFO("%s on %s\n",
2581 stuck
[i
] ? "stuck" : "no progress",
2588 return i915_handle_error(dev
, true);
2591 /* Reset timer case chip hangs without another request
2593 i915_queue_hangcheck(dev
);
2596 void i915_queue_hangcheck(struct drm_device
*dev
)
2598 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2599 if (!i915_enable_hangcheck
)
2602 mod_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
2603 round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
));
2606 static void ibx_irq_preinstall(struct drm_device
*dev
)
2608 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2610 if (HAS_PCH_NOP(dev
))
2613 /* south display irq */
2614 I915_WRITE(SDEIMR
, 0xffffffff);
2616 * SDEIER is also touched by the interrupt handler to work around missed
2617 * PCH interrupts. Hence we can't update it after the interrupt handler
2618 * is enabled - instead we unconditionally enable all PCH interrupt
2619 * sources here, but then only unmask them as needed with SDEIMR.
2621 I915_WRITE(SDEIER
, 0xffffffff);
2622 POSTING_READ(SDEIER
);
2625 static void gen5_gt_irq_preinstall(struct drm_device
*dev
)
2627 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2630 I915_WRITE(GTIMR
, 0xffffffff);
2631 I915_WRITE(GTIER
, 0x0);
2632 POSTING_READ(GTIER
);
2634 if (INTEL_INFO(dev
)->gen
>= 6) {
2636 I915_WRITE(GEN6_PMIMR
, 0xffffffff);
2637 I915_WRITE(GEN6_PMIER
, 0x0);
2638 POSTING_READ(GEN6_PMIER
);
2644 static void ironlake_irq_preinstall(struct drm_device
*dev
)
2646 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2648 atomic_set(&dev_priv
->irq_received
, 0);
2650 I915_WRITE(HWSTAM
, 0xeffe);
2652 I915_WRITE(DEIMR
, 0xffffffff);
2653 I915_WRITE(DEIER
, 0x0);
2654 POSTING_READ(DEIER
);
2656 gen5_gt_irq_preinstall(dev
);
2658 ibx_irq_preinstall(dev
);
2661 static void valleyview_irq_preinstall(struct drm_device
*dev
)
2663 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2666 atomic_set(&dev_priv
->irq_received
, 0);
2669 I915_WRITE(VLV_IMR
, 0);
2670 I915_WRITE(RING_IMR(RENDER_RING_BASE
), 0);
2671 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE
), 0);
2672 I915_WRITE(RING_IMR(BLT_RING_BASE
), 0);
2675 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2676 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2678 gen5_gt_irq_preinstall(dev
);
2680 I915_WRITE(DPINVGTT
, 0xff);
2682 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2683 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
2685 I915_WRITE(PIPESTAT(pipe
), 0xffff);
2686 I915_WRITE(VLV_IIR
, 0xffffffff);
2687 I915_WRITE(VLV_IMR
, 0xffffffff);
2688 I915_WRITE(VLV_IER
, 0x0);
2689 POSTING_READ(VLV_IER
);
2692 static void gen8_irq_preinstall(struct drm_device
*dev
)
2694 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2697 atomic_set(&dev_priv
->irq_received
, 0);
2699 I915_WRITE(GEN8_MASTER_IRQ
, 0);
2700 POSTING_READ(GEN8_MASTER_IRQ
);
2702 /* IIR can theoretically queue up two events. Be paranoid */
2703 #define GEN8_IRQ_INIT_NDX(type, which) do { \
2704 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2705 POSTING_READ(GEN8_##type##_IMR(which)); \
2706 I915_WRITE(GEN8_##type##_IER(which), 0); \
2707 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2708 POSTING_READ(GEN8_##type##_IIR(which)); \
2709 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2712 #define GEN8_IRQ_INIT(type) do { \
2713 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2714 POSTING_READ(GEN8_##type##_IMR); \
2715 I915_WRITE(GEN8_##type##_IER, 0); \
2716 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2717 POSTING_READ(GEN8_##type##_IIR); \
2718 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2721 GEN8_IRQ_INIT_NDX(GT
, 0);
2722 GEN8_IRQ_INIT_NDX(GT
, 1);
2723 GEN8_IRQ_INIT_NDX(GT
, 2);
2724 GEN8_IRQ_INIT_NDX(GT
, 3);
2726 for_each_pipe(pipe
) {
2727 GEN8_IRQ_INIT_NDX(DE_PIPE
, pipe
);
2730 GEN8_IRQ_INIT(DE_PORT
);
2731 GEN8_IRQ_INIT(DE_MISC
);
2733 #undef GEN8_IRQ_INIT
2734 #undef GEN8_IRQ_INIT_NDX
2736 POSTING_READ(GEN8_PCU_IIR
);
2738 ibx_irq_preinstall(dev
);
2741 static void ibx_hpd_irq_setup(struct drm_device
*dev
)
2743 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2744 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
2745 struct intel_encoder
*intel_encoder
;
2746 u32 hotplug_irqs
, hotplug
, enabled_irqs
= 0;
2748 if (HAS_PCH_IBX(dev
)) {
2749 hotplug_irqs
= SDE_HOTPLUG_MASK
;
2750 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2751 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2752 enabled_irqs
|= hpd_ibx
[intel_encoder
->hpd_pin
];
2754 hotplug_irqs
= SDE_HOTPLUG_MASK_CPT
;
2755 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
2756 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
2757 enabled_irqs
|= hpd_cpt
[intel_encoder
->hpd_pin
];
2760 ibx_display_interrupt_update(dev_priv
, hotplug_irqs
, enabled_irqs
);
2763 * Enable digital hotplug on the PCH, and configure the DP short pulse
2764 * duration to 2ms (which is the minimum in the Display Port spec)
2766 * This register is the same on all known PCH chips.
2768 hotplug
= I915_READ(PCH_PORT_HOTPLUG
);
2769 hotplug
&= ~(PORTD_PULSE_DURATION_MASK
|PORTC_PULSE_DURATION_MASK
|PORTB_PULSE_DURATION_MASK
);
2770 hotplug
|= PORTD_HOTPLUG_ENABLE
| PORTD_PULSE_DURATION_2ms
;
2771 hotplug
|= PORTC_HOTPLUG_ENABLE
| PORTC_PULSE_DURATION_2ms
;
2772 hotplug
|= PORTB_HOTPLUG_ENABLE
| PORTB_PULSE_DURATION_2ms
;
2773 I915_WRITE(PCH_PORT_HOTPLUG
, hotplug
);
2776 static void ibx_irq_postinstall(struct drm_device
*dev
)
2778 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2781 if (HAS_PCH_NOP(dev
))
2784 if (HAS_PCH_IBX(dev
)) {
2785 mask
= SDE_GMBUS
| SDE_AUX_MASK
| SDE_POISON
;
2787 mask
= SDE_GMBUS_CPT
| SDE_AUX_MASK_CPT
;
2789 I915_WRITE(SERR_INT
, I915_READ(SERR_INT
));
2792 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
2793 I915_WRITE(SDEIMR
, ~mask
);
2796 static void gen5_gt_irq_postinstall(struct drm_device
*dev
)
2798 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2799 u32 pm_irqs
, gt_irqs
;
2801 pm_irqs
= gt_irqs
= 0;
2803 dev_priv
->gt_irq_mask
= ~0;
2804 if (HAS_L3_DPF(dev
)) {
2805 /* L3 parity interrupt is always unmasked. */
2806 dev_priv
->gt_irq_mask
= ~GT_PARITY_ERROR(dev
);
2807 gt_irqs
|= GT_PARITY_ERROR(dev
);
2810 gt_irqs
|= GT_RENDER_USER_INTERRUPT
;
2812 gt_irqs
|= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT
|
2813 ILK_BSD_USER_INTERRUPT
;
2815 gt_irqs
|= GT_BLT_USER_INTERRUPT
| GT_BSD_USER_INTERRUPT
;
2818 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
2819 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask
);
2820 I915_WRITE(GTIER
, gt_irqs
);
2821 POSTING_READ(GTIER
);
2823 if (INTEL_INFO(dev
)->gen
>= 6) {
2824 pm_irqs
|= GEN6_PM_RPS_EVENTS
;
2827 pm_irqs
|= PM_VEBOX_USER_INTERRUPT
;
2829 dev_priv
->pm_irq_mask
= 0xffffffff;
2830 I915_WRITE(GEN6_PMIIR
, I915_READ(GEN6_PMIIR
));
2831 I915_WRITE(GEN6_PMIMR
, dev_priv
->pm_irq_mask
);
2832 I915_WRITE(GEN6_PMIER
, pm_irqs
);
2833 POSTING_READ(GEN6_PMIER
);
2837 static int ironlake_irq_postinstall(struct drm_device
*dev
)
2839 unsigned long irqflags
;
2840 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2841 u32 display_mask
, extra_mask
;
2843 if (INTEL_INFO(dev
)->gen
>= 7) {
2844 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE_IVB
|
2845 DE_PCH_EVENT_IVB
| DE_PLANEC_FLIP_DONE_IVB
|
2846 DE_PLANEB_FLIP_DONE_IVB
|
2847 DE_PLANEA_FLIP_DONE_IVB
| DE_AUX_CHANNEL_A_IVB
);
2848 extra_mask
= (DE_PIPEC_VBLANK_IVB
| DE_PIPEB_VBLANK_IVB
|
2849 DE_PIPEA_VBLANK_IVB
| DE_ERR_INT_IVB
);
2851 I915_WRITE(GEN7_ERR_INT
, I915_READ(GEN7_ERR_INT
));
2853 display_mask
= (DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
2854 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
|
2856 DE_PIPEB_CRC_DONE
| DE_PIPEA_CRC_DONE
|
2858 extra_mask
= DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
| DE_PCU_EVENT
|
2859 DE_PIPEB_FIFO_UNDERRUN
| DE_PIPEA_FIFO_UNDERRUN
;
2862 dev_priv
->irq_mask
= ~display_mask
;
2864 /* should always can generate irq */
2865 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
2866 I915_WRITE(DEIMR
, dev_priv
->irq_mask
);
2867 I915_WRITE(DEIER
, display_mask
| extra_mask
);
2868 POSTING_READ(DEIER
);
2870 gen5_gt_irq_postinstall(dev
);
2872 ibx_irq_postinstall(dev
);
2874 if (IS_IRONLAKE_M(dev
)) {
2875 /* Enable PCU event interrupts
2877 * spinlocking not required here for correctness since interrupt
2878 * setup is guaranteed to run in single-threaded context. But we
2879 * need it to make the assert_spin_locked happy. */
2880 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2881 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
2882 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2888 static int valleyview_irq_postinstall(struct drm_device
*dev
)
2890 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
2892 u32 pipestat_enable
= PLANE_FLIP_DONE_INT_EN_VLV
|
2893 PIPE_CRC_DONE_ENABLE
;
2894 unsigned long irqflags
;
2896 enable_mask
= I915_DISPLAY_PORT_INTERRUPT
;
2897 enable_mask
|= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
2898 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2899 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
2900 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2903 *Leave vblank interrupts masked initially. enable/disable will
2904 * toggle them based on usage.
2906 dev_priv
->irq_mask
= (~enable_mask
) |
2907 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT
|
2908 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT
;
2910 I915_WRITE(PORT_HOTPLUG_EN
, 0);
2911 POSTING_READ(PORT_HOTPLUG_EN
);
2913 I915_WRITE(VLV_IMR
, dev_priv
->irq_mask
);
2914 I915_WRITE(VLV_IER
, enable_mask
);
2915 I915_WRITE(VLV_IIR
, 0xffffffff);
2916 I915_WRITE(PIPESTAT(0), 0xffff);
2917 I915_WRITE(PIPESTAT(1), 0xffff);
2918 POSTING_READ(VLV_IER
);
2920 /* Interrupt setup is already guaranteed to be single-threaded, this is
2921 * just to make the assert_spin_locked check happy. */
2922 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
2923 i915_enable_pipestat(dev_priv
, PIPE_A
, pipestat_enable
);
2924 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_EVENT_ENABLE
);
2925 i915_enable_pipestat(dev_priv
, PIPE_B
, pipestat_enable
);
2926 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
2928 I915_WRITE(VLV_IIR
, 0xffffffff);
2929 I915_WRITE(VLV_IIR
, 0xffffffff);
2931 gen5_gt_irq_postinstall(dev
);
2933 /* ack & enable invalid PTE error interrupts */
2934 #if 0 /* FIXME: add support to irq handler for checking these bits */
2935 I915_WRITE(DPINVGTT
, DPINVGTT_STATUS_MASK
);
2936 I915_WRITE(DPINVGTT
, DPINVGTT_EN_MASK
);
2939 I915_WRITE(VLV_MASTER_IER
, MASTER_INTERRUPT_ENABLE
);
2944 static void gen8_gt_irq_postinstall(struct drm_i915_private
*dev_priv
)
2948 /* These are interrupts we'll toggle with the ring mask register */
2949 uint32_t gt_interrupts
[] = {
2950 GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
2951 GT_RENDER_L3_PARITY_ERROR_INTERRUPT
|
2952 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
,
2953 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS1_IRQ_SHIFT
|
2954 GT_RENDER_USER_INTERRUPT
<< GEN8_VCS2_IRQ_SHIFT
,
2956 GT_RENDER_USER_INTERRUPT
<< GEN8_VECS_IRQ_SHIFT
2959 for (i
= 0; i
< ARRAY_SIZE(gt_interrupts
); i
++) {
2960 u32 tmp
= I915_READ(GEN8_GT_IIR(i
));
2962 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2964 I915_WRITE(GEN8_GT_IMR(i
), ~gt_interrupts
[i
]);
2965 I915_WRITE(GEN8_GT_IER(i
), gt_interrupts
[i
]);
2967 POSTING_READ(GEN8_GT_IER(0));
2970 static void gen8_de_irq_postinstall(struct drm_i915_private
*dev_priv
)
2972 struct drm_device
*dev
= dev_priv
->dev
;
2973 uint32_t de_pipe_masked
= GEN8_PIPE_FLIP_DONE
|
2974 GEN8_PIPE_CDCLK_CRC_DONE
|
2975 GEN8_DE_PIPE_IRQ_FAULT_ERRORS
;
2976 uint32_t de_pipe_enables
= de_pipe_masked
| GEN8_PIPE_VBLANK
|
2977 GEN8_PIPE_FIFO_UNDERRUN
;
2979 dev_priv
->de_irq_mask
[PIPE_A
] = ~de_pipe_masked
;
2980 dev_priv
->de_irq_mask
[PIPE_B
] = ~de_pipe_masked
;
2981 dev_priv
->de_irq_mask
[PIPE_C
] = ~de_pipe_masked
;
2983 for_each_pipe(pipe
) {
2984 u32 tmp
= I915_READ(GEN8_DE_PIPE_IIR(pipe
));
2986 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2988 I915_WRITE(GEN8_DE_PIPE_IMR(pipe
), dev_priv
->de_irq_mask
[pipe
]);
2989 I915_WRITE(GEN8_DE_PIPE_IER(pipe
), de_pipe_enables
);
2991 POSTING_READ(GEN8_DE_PIPE_ISR(0));
2993 I915_WRITE(GEN8_DE_PORT_IMR
, ~GEN8_AUX_CHANNEL_A
);
2994 I915_WRITE(GEN8_DE_PORT_IER
, GEN8_AUX_CHANNEL_A
);
2995 POSTING_READ(GEN8_DE_PORT_IER
);
2998 static int gen8_irq_postinstall(struct drm_device
*dev
)
3000 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3002 gen8_gt_irq_postinstall(dev_priv
);
3003 gen8_de_irq_postinstall(dev_priv
);
3005 ibx_irq_postinstall(dev
);
3007 I915_WRITE(GEN8_MASTER_IRQ
, DE_MASTER_IRQ_CONTROL
);
3008 POSTING_READ(GEN8_MASTER_IRQ
);
3013 static void gen8_irq_uninstall(struct drm_device
*dev
)
3015 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3021 atomic_set(&dev_priv
->irq_received
, 0);
3023 I915_WRITE(GEN8_MASTER_IRQ
, 0);
3025 #define GEN8_IRQ_FINI_NDX(type, which) do { \
3026 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3027 I915_WRITE(GEN8_##type##_IER(which), 0); \
3028 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3031 #define GEN8_IRQ_FINI(type) do { \
3032 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3033 I915_WRITE(GEN8_##type##_IER, 0); \
3034 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3037 GEN8_IRQ_FINI_NDX(GT
, 0);
3038 GEN8_IRQ_FINI_NDX(GT
, 1);
3039 GEN8_IRQ_FINI_NDX(GT
, 2);
3040 GEN8_IRQ_FINI_NDX(GT
, 3);
3042 for_each_pipe(pipe
) {
3043 GEN8_IRQ_FINI_NDX(DE_PIPE
, pipe
);
3046 GEN8_IRQ_FINI(DE_PORT
);
3047 GEN8_IRQ_FINI(DE_MISC
);
3049 #undef GEN8_IRQ_FINI
3050 #undef GEN8_IRQ_FINI_NDX
3052 POSTING_READ(GEN8_PCU_IIR
);
3055 static void valleyview_irq_uninstall(struct drm_device
*dev
)
3057 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3063 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3066 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3068 I915_WRITE(HWSTAM
, 0xffffffff);
3069 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3070 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3072 I915_WRITE(PIPESTAT(pipe
), 0xffff);
3073 I915_WRITE(VLV_IIR
, 0xffffffff);
3074 I915_WRITE(VLV_IMR
, 0xffffffff);
3075 I915_WRITE(VLV_IER
, 0x0);
3076 POSTING_READ(VLV_IER
);
3079 static void ironlake_irq_uninstall(struct drm_device
*dev
)
3081 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3086 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3088 I915_WRITE(HWSTAM
, 0xffffffff);
3090 I915_WRITE(DEIMR
, 0xffffffff);
3091 I915_WRITE(DEIER
, 0x0);
3092 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
3094 I915_WRITE(GEN7_ERR_INT
, I915_READ(GEN7_ERR_INT
));
3096 I915_WRITE(GTIMR
, 0xffffffff);
3097 I915_WRITE(GTIER
, 0x0);
3098 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
3100 if (HAS_PCH_NOP(dev
))
3103 I915_WRITE(SDEIMR
, 0xffffffff);
3104 I915_WRITE(SDEIER
, 0x0);
3105 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
3106 if (HAS_PCH_CPT(dev
) || HAS_PCH_LPT(dev
))
3107 I915_WRITE(SERR_INT
, I915_READ(SERR_INT
));
3110 static void i8xx_irq_preinstall(struct drm_device
* dev
)
3112 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3115 atomic_set(&dev_priv
->irq_received
, 0);
3118 I915_WRITE(PIPESTAT(pipe
), 0);
3119 I915_WRITE16(IMR
, 0xffff);
3120 I915_WRITE16(IER
, 0x0);
3121 POSTING_READ16(IER
);
3124 static int i8xx_irq_postinstall(struct drm_device
*dev
)
3126 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3127 unsigned long irqflags
;
3130 ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3132 /* Unmask the interrupts that we always want on. */
3133 dev_priv
->irq_mask
=
3134 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3135 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3136 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3137 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3138 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3139 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
3142 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3143 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3144 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3145 I915_USER_INTERRUPT
);
3146 POSTING_READ16(IER
);
3148 /* Interrupt setup is already guaranteed to be single-threaded, this is
3149 * just to make the assert_spin_locked check happy. */
3150 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3151 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_ENABLE
);
3152 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_ENABLE
);
3153 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3159 * Returns true when a page flip has completed.
3161 static bool i8xx_handle_vblank(struct drm_device
*dev
,
3162 int plane
, int pipe
, u32 iir
)
3164 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3165 u16 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3167 if (!drm_handle_vblank(dev
, pipe
))
3170 if ((iir
& flip_pending
) == 0)
3173 intel_prepare_page_flip(dev
, plane
);
3175 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3176 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3177 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3178 * the flip is completed (no longer pending). Since this doesn't raise
3179 * an interrupt per se, we watch for the change at vblank.
3181 if (I915_READ16(ISR
) & flip_pending
)
3184 intel_finish_page_flip(dev
, pipe
);
3189 static irqreturn_t
i8xx_irq_handler(int irq
, void *arg
)
3191 struct drm_device
*dev
= (struct drm_device
*) arg
;
3192 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3195 unsigned long irqflags
;
3198 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3199 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3201 atomic_inc(&dev_priv
->irq_received
);
3203 iir
= I915_READ16(IIR
);
3207 while (iir
& ~flip_mask
) {
3208 /* Can't rely on pipestat interrupt bit in iir as it might
3209 * have been cleared after the pipestat interrupt was received.
3210 * It doesn't set the bit in iir again, but it still produces
3211 * interrupts (for non-MSI).
3213 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3214 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3215 i915_handle_error(dev
, false);
3217 for_each_pipe(pipe
) {
3218 int reg
= PIPESTAT(pipe
);
3219 pipe_stats
[pipe
] = I915_READ(reg
);
3222 * Clear the PIPE*STAT regs before the IIR
3224 if (pipe_stats
[pipe
] & 0x8000ffff) {
3225 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3226 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3228 I915_WRITE(reg
, pipe_stats
[pipe
]);
3231 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3233 I915_WRITE16(IIR
, iir
& ~flip_mask
);
3234 new_iir
= I915_READ16(IIR
); /* Flush posted writes */
3236 i915_update_dri1_breadcrumb(dev
);
3238 if (iir
& I915_USER_INTERRUPT
)
3239 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3241 for_each_pipe(pipe
) {
3246 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3247 i8xx_handle_vblank(dev
, plane
, pipe
, iir
))
3248 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3250 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3251 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3260 static void i8xx_irq_uninstall(struct drm_device
* dev
)
3262 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3265 for_each_pipe(pipe
) {
3266 /* Clear enable bits; then clear status bits */
3267 I915_WRITE(PIPESTAT(pipe
), 0);
3268 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3270 I915_WRITE16(IMR
, 0xffff);
3271 I915_WRITE16(IER
, 0x0);
3272 I915_WRITE16(IIR
, I915_READ16(IIR
));
3275 static void i915_irq_preinstall(struct drm_device
* dev
)
3277 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3280 atomic_set(&dev_priv
->irq_received
, 0);
3282 if (I915_HAS_HOTPLUG(dev
)) {
3283 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3284 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3287 I915_WRITE16(HWSTAM
, 0xeffe);
3289 I915_WRITE(PIPESTAT(pipe
), 0);
3290 I915_WRITE(IMR
, 0xffffffff);
3291 I915_WRITE(IER
, 0x0);
3295 static int i915_irq_postinstall(struct drm_device
*dev
)
3297 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3299 unsigned long irqflags
;
3301 I915_WRITE(EMR
, ~(I915_ERROR_PAGE_TABLE
| I915_ERROR_MEMORY_REFRESH
));
3303 /* Unmask the interrupts that we always want on. */
3304 dev_priv
->irq_mask
=
3305 ~(I915_ASLE_INTERRUPT
|
3306 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3307 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3308 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3309 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3310 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3313 I915_ASLE_INTERRUPT
|
3314 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3315 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3316 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
|
3317 I915_USER_INTERRUPT
;
3319 if (I915_HAS_HOTPLUG(dev
)) {
3320 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3321 POSTING_READ(PORT_HOTPLUG_EN
);
3323 /* Enable in IER... */
3324 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
3325 /* and unmask in IMR */
3326 dev_priv
->irq_mask
&= ~I915_DISPLAY_PORT_INTERRUPT
;
3329 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3330 I915_WRITE(IER
, enable_mask
);
3333 i915_enable_asle_pipestat(dev
);
3335 /* Interrupt setup is already guaranteed to be single-threaded, this is
3336 * just to make the assert_spin_locked check happy. */
3337 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3338 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_ENABLE
);
3339 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_ENABLE
);
3340 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3346 * Returns true when a page flip has completed.
3348 static bool i915_handle_vblank(struct drm_device
*dev
,
3349 int plane
, int pipe
, u32 iir
)
3351 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
3352 u32 flip_pending
= DISPLAY_PLANE_FLIP_PENDING(plane
);
3354 if (!drm_handle_vblank(dev
, pipe
))
3357 if ((iir
& flip_pending
) == 0)
3360 intel_prepare_page_flip(dev
, plane
);
3362 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3363 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3364 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3365 * the flip is completed (no longer pending). Since this doesn't raise
3366 * an interrupt per se, we watch for the change at vblank.
3368 if (I915_READ(ISR
) & flip_pending
)
3371 intel_finish_page_flip(dev
, pipe
);
3376 static irqreturn_t
i915_irq_handler(int irq
, void *arg
)
3378 struct drm_device
*dev
= (struct drm_device
*) arg
;
3379 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3380 u32 iir
, new_iir
, pipe_stats
[I915_MAX_PIPES
];
3381 unsigned long irqflags
;
3383 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3384 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3385 int pipe
, ret
= IRQ_NONE
;
3387 atomic_inc(&dev_priv
->irq_received
);
3389 iir
= I915_READ(IIR
);
3391 bool irq_received
= (iir
& ~flip_mask
) != 0;
3392 bool blc_event
= false;
3394 /* Can't rely on pipestat interrupt bit in iir as it might
3395 * have been cleared after the pipestat interrupt was received.
3396 * It doesn't set the bit in iir again, but it still produces
3397 * interrupts (for non-MSI).
3399 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3400 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3401 i915_handle_error(dev
, false);
3403 for_each_pipe(pipe
) {
3404 int reg
= PIPESTAT(pipe
);
3405 pipe_stats
[pipe
] = I915_READ(reg
);
3407 /* Clear the PIPE*STAT regs before the IIR */
3408 if (pipe_stats
[pipe
] & 0x8000ffff) {
3409 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3410 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3412 I915_WRITE(reg
, pipe_stats
[pipe
]);
3413 irq_received
= true;
3416 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3421 /* Consume port. Then clear IIR or we'll miss events */
3422 if ((I915_HAS_HOTPLUG(dev
)) &&
3423 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
3424 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
3425 u32 hotplug_trigger
= hotplug_status
& HOTPLUG_INT_STATUS_I915
;
3427 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3430 intel_hpd_irq_handler(dev
, hotplug_trigger
, hpd_status_i915
);
3432 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
3433 POSTING_READ(PORT_HOTPLUG_STAT
);
3436 I915_WRITE(IIR
, iir
& ~flip_mask
);
3437 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3439 if (iir
& I915_USER_INTERRUPT
)
3440 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3442 for_each_pipe(pipe
) {
3447 if (pipe_stats
[pipe
] & PIPE_VBLANK_INTERRUPT_STATUS
&&
3448 i915_handle_vblank(dev
, plane
, pipe
, iir
))
3449 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(plane
);
3451 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3454 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3455 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3458 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
3459 intel_opregion_asle_intr(dev
);
3461 /* With MSI, interrupts are only generated when iir
3462 * transitions from zero to nonzero. If another bit got
3463 * set while we were handling the existing iir bits, then
3464 * we would never get another interrupt.
3466 * This is fine on non-MSI as well, as if we hit this path
3467 * we avoid exiting the interrupt handler only to generate
3470 * Note that for MSI this could cause a stray interrupt report
3471 * if an interrupt landed in the time between writing IIR and
3472 * the posting read. This should be rare enough to never
3473 * trigger the 99% of 100,000 interrupts test for disabling
3478 } while (iir
& ~flip_mask
);
3480 i915_update_dri1_breadcrumb(dev
);
3485 static void i915_irq_uninstall(struct drm_device
* dev
)
3487 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3490 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3492 if (I915_HAS_HOTPLUG(dev
)) {
3493 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3494 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3497 I915_WRITE16(HWSTAM
, 0xffff);
3498 for_each_pipe(pipe
) {
3499 /* Clear enable bits; then clear status bits */
3500 I915_WRITE(PIPESTAT(pipe
), 0);
3501 I915_WRITE(PIPESTAT(pipe
), I915_READ(PIPESTAT(pipe
)));
3503 I915_WRITE(IMR
, 0xffffffff);
3504 I915_WRITE(IER
, 0x0);
3506 I915_WRITE(IIR
, I915_READ(IIR
));
3509 static void i965_irq_preinstall(struct drm_device
* dev
)
3511 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3514 atomic_set(&dev_priv
->irq_received
, 0);
3516 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3517 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3519 I915_WRITE(HWSTAM
, 0xeffe);
3521 I915_WRITE(PIPESTAT(pipe
), 0);
3522 I915_WRITE(IMR
, 0xffffffff);
3523 I915_WRITE(IER
, 0x0);
3527 static int i965_irq_postinstall(struct drm_device
*dev
)
3529 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3532 unsigned long irqflags
;
3534 /* Unmask the interrupts that we always want on. */
3535 dev_priv
->irq_mask
= ~(I915_ASLE_INTERRUPT
|
3536 I915_DISPLAY_PORT_INTERRUPT
|
3537 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT
|
3538 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT
|
3539 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3540 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
|
3541 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
3543 enable_mask
= ~dev_priv
->irq_mask
;
3544 enable_mask
&= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3545 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
);
3546 enable_mask
|= I915_USER_INTERRUPT
;
3549 enable_mask
|= I915_BSD_USER_INTERRUPT
;
3551 /* Interrupt setup is already guaranteed to be single-threaded, this is
3552 * just to make the assert_spin_locked check happy. */
3553 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3554 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_GMBUS_EVENT_ENABLE
);
3555 i915_enable_pipestat(dev_priv
, PIPE_A
, PIPE_CRC_DONE_ENABLE
);
3556 i915_enable_pipestat(dev_priv
, PIPE_B
, PIPE_CRC_DONE_ENABLE
);
3557 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3560 * Enable some error detection, note the instruction error mask
3561 * bit is reserved, so we leave it masked.
3564 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
3565 GM45_ERROR_MEM_PRIV
|
3566 GM45_ERROR_CP_PRIV
|
3567 I915_ERROR_MEMORY_REFRESH
);
3569 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
3570 I915_ERROR_MEMORY_REFRESH
);
3572 I915_WRITE(EMR
, error_mask
);
3574 I915_WRITE(IMR
, dev_priv
->irq_mask
);
3575 I915_WRITE(IER
, enable_mask
);
3578 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3579 POSTING_READ(PORT_HOTPLUG_EN
);
3581 i915_enable_asle_pipestat(dev
);
3586 static void i915_hpd_irq_setup(struct drm_device
*dev
)
3588 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3589 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3590 struct intel_encoder
*intel_encoder
;
3593 assert_spin_locked(&dev_priv
->irq_lock
);
3595 if (I915_HAS_HOTPLUG(dev
)) {
3596 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
3597 hotplug_en
&= ~HOTPLUG_INT_EN_MASK
;
3598 /* Note HDMI and DP share hotplug bits */
3599 /* enable bits are the same for all generations */
3600 list_for_each_entry(intel_encoder
, &mode_config
->encoder_list
, base
.head
)
3601 if (dev_priv
->hpd_stats
[intel_encoder
->hpd_pin
].hpd_mark
== HPD_ENABLED
)
3602 hotplug_en
|= hpd_mask_i915
[intel_encoder
->hpd_pin
];
3603 /* Programming the CRT detection parameters tends
3604 to generate a spurious hotplug event about three
3605 seconds later. So just do it once.
3608 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
3609 hotplug_en
&= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK
;
3610 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
3612 /* Ignore TV since it's buggy */
3613 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
3617 static irqreturn_t
i965_irq_handler(int irq
, void *arg
)
3619 struct drm_device
*dev
= (struct drm_device
*) arg
;
3620 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3622 u32 pipe_stats
[I915_MAX_PIPES
];
3623 unsigned long irqflags
;
3625 int ret
= IRQ_NONE
, pipe
;
3627 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
|
3628 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
;
3630 atomic_inc(&dev_priv
->irq_received
);
3632 iir
= I915_READ(IIR
);
3635 bool blc_event
= false;
3637 irq_received
= (iir
& ~flip_mask
) != 0;
3639 /* Can't rely on pipestat interrupt bit in iir as it might
3640 * have been cleared after the pipestat interrupt was received.
3641 * It doesn't set the bit in iir again, but it still produces
3642 * interrupts (for non-MSI).
3644 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3645 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
3646 i915_handle_error(dev
, false);
3648 for_each_pipe(pipe
) {
3649 int reg
= PIPESTAT(pipe
);
3650 pipe_stats
[pipe
] = I915_READ(reg
);
3653 * Clear the PIPE*STAT regs before the IIR
3655 if (pipe_stats
[pipe
] & 0x8000ffff) {
3656 if (pipe_stats
[pipe
] & PIPE_FIFO_UNDERRUN_STATUS
)
3657 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3659 I915_WRITE(reg
, pipe_stats
[pipe
]);
3663 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3670 /* Consume port. Then clear IIR or we'll miss events */
3671 if (iir
& I915_DISPLAY_PORT_INTERRUPT
) {
3672 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
3673 u32 hotplug_trigger
= hotplug_status
& (IS_G4X(dev
) ?
3674 HOTPLUG_INT_STATUS_G4X
:
3675 HOTPLUG_INT_STATUS_I915
);
3677 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3680 intel_hpd_irq_handler(dev
, hotplug_trigger
,
3681 IS_G4X(dev
) ? hpd_status_g4x
: hpd_status_i915
);
3684 (hotplug_status
& DP_AUX_CHANNEL_MASK_INT_STATUS_G4X
))
3685 dp_aux_irq_handler(dev
);
3687 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
3688 I915_READ(PORT_HOTPLUG_STAT
);
3691 I915_WRITE(IIR
, iir
& ~flip_mask
);
3692 new_iir
= I915_READ(IIR
); /* Flush posted writes */
3694 if (iir
& I915_USER_INTERRUPT
)
3695 notify_ring(dev
, &dev_priv
->ring
[RCS
]);
3696 if (iir
& I915_BSD_USER_INTERRUPT
)
3697 notify_ring(dev
, &dev_priv
->ring
[VCS
]);
3699 for_each_pipe(pipe
) {
3700 if (pipe_stats
[pipe
] & PIPE_START_VBLANK_INTERRUPT_STATUS
&&
3701 i915_handle_vblank(dev
, pipe
, pipe
, iir
))
3702 flip_mask
&= ~DISPLAY_PLANE_FLIP_PENDING(pipe
);
3704 if (pipe_stats
[pipe
] & PIPE_LEGACY_BLC_EVENT_STATUS
)
3707 if (pipe_stats
[pipe
] & PIPE_CRC_DONE_INTERRUPT_STATUS
)
3708 i9xx_pipe_crc_irq_handler(dev
, pipe
);
3712 if (blc_event
|| (iir
& I915_ASLE_INTERRUPT
))
3713 intel_opregion_asle_intr(dev
);
3715 if (pipe_stats
[0] & PIPE_GMBUS_INTERRUPT_STATUS
)
3716 gmbus_irq_handler(dev
);
3718 /* With MSI, interrupts are only generated when iir
3719 * transitions from zero to nonzero. If another bit got
3720 * set while we were handling the existing iir bits, then
3721 * we would never get another interrupt.
3723 * This is fine on non-MSI as well, as if we hit this path
3724 * we avoid exiting the interrupt handler only to generate
3727 * Note that for MSI this could cause a stray interrupt report
3728 * if an interrupt landed in the time between writing IIR and
3729 * the posting read. This should be rare enough to never
3730 * trigger the 99% of 100,000 interrupts test for disabling
3736 i915_update_dri1_breadcrumb(dev
);
3741 static void i965_irq_uninstall(struct drm_device
* dev
)
3743 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
3749 del_timer_sync(&dev_priv
->hotplug_reenable_timer
);
3751 I915_WRITE(PORT_HOTPLUG_EN
, 0);
3752 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
3754 I915_WRITE(HWSTAM
, 0xffffffff);
3756 I915_WRITE(PIPESTAT(pipe
), 0);
3757 I915_WRITE(IMR
, 0xffffffff);
3758 I915_WRITE(IER
, 0x0);
3761 I915_WRITE(PIPESTAT(pipe
),
3762 I915_READ(PIPESTAT(pipe
)) & 0x8000ffff);
3763 I915_WRITE(IIR
, I915_READ(IIR
));
3766 static void i915_reenable_hotplug_timer_func(unsigned long data
)
3768 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*)data
;
3769 struct drm_device
*dev
= dev_priv
->dev
;
3770 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3771 unsigned long irqflags
;
3774 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3775 for (i
= (HPD_NONE
+ 1); i
< HPD_NUM_PINS
; i
++) {
3776 struct drm_connector
*connector
;
3778 if (dev_priv
->hpd_stats
[i
].hpd_mark
!= HPD_DISABLED
)
3781 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
3783 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
3784 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3786 if (intel_connector
->encoder
->hpd_pin
== i
) {
3787 if (connector
->polled
!= intel_connector
->polled
)
3788 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3789 drm_get_connector_name(connector
));
3790 connector
->polled
= intel_connector
->polled
;
3791 if (!connector
->polled
)
3792 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
3796 if (dev_priv
->display
.hpd_irq_setup
)
3797 dev_priv
->display
.hpd_irq_setup(dev
);
3798 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3801 void intel_irq_init(struct drm_device
*dev
)
3803 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3805 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
3806 INIT_WORK(&dev_priv
->gpu_error
.work
, i915_error_work_func
);
3807 INIT_WORK(&dev_priv
->rps
.work
, gen6_pm_rps_work
);
3808 INIT_WORK(&dev_priv
->l3_parity
.error_work
, ivybridge_parity_work
);
3810 setup_timer(&dev_priv
->gpu_error
.hangcheck_timer
,
3811 i915_hangcheck_elapsed
,
3812 (unsigned long) dev
);
3813 setup_timer(&dev_priv
->hotplug_reenable_timer
, i915_reenable_hotplug_timer_func
,
3814 (unsigned long) dev_priv
);
3816 pm_qos_add_request(&dev_priv
->pm_qos
, PM_QOS_CPU_DMA_LATENCY
, PM_QOS_DEFAULT_VALUE
);
3819 dev
->max_vblank_count
= 0;
3820 dev
->driver
->get_vblank_counter
= i8xx_get_vblank_counter
;
3821 } else if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
3822 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
3823 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
3825 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
3826 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
3829 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
3830 dev
->driver
->get_vblank_timestamp
= i915_get_vblank_timestamp
;
3831 dev
->driver
->get_scanout_position
= i915_get_crtc_scanoutpos
;
3834 if (IS_VALLEYVIEW(dev
)) {
3835 dev
->driver
->irq_handler
= valleyview_irq_handler
;
3836 dev
->driver
->irq_preinstall
= valleyview_irq_preinstall
;
3837 dev
->driver
->irq_postinstall
= valleyview_irq_postinstall
;
3838 dev
->driver
->irq_uninstall
= valleyview_irq_uninstall
;
3839 dev
->driver
->enable_vblank
= valleyview_enable_vblank
;
3840 dev
->driver
->disable_vblank
= valleyview_disable_vblank
;
3841 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3842 } else if (IS_GEN8(dev
)) {
3843 dev
->driver
->irq_handler
= gen8_irq_handler
;
3844 dev
->driver
->irq_preinstall
= gen8_irq_preinstall
;
3845 dev
->driver
->irq_postinstall
= gen8_irq_postinstall
;
3846 dev
->driver
->irq_uninstall
= gen8_irq_uninstall
;
3847 dev
->driver
->enable_vblank
= gen8_enable_vblank
;
3848 dev
->driver
->disable_vblank
= gen8_disable_vblank
;
3849 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
3850 } else if (HAS_PCH_SPLIT(dev
)) {
3851 dev
->driver
->irq_handler
= ironlake_irq_handler
;
3852 dev
->driver
->irq_preinstall
= ironlake_irq_preinstall
;
3853 dev
->driver
->irq_postinstall
= ironlake_irq_postinstall
;
3854 dev
->driver
->irq_uninstall
= ironlake_irq_uninstall
;
3855 dev
->driver
->enable_vblank
= ironlake_enable_vblank
;
3856 dev
->driver
->disable_vblank
= ironlake_disable_vblank
;
3857 dev_priv
->display
.hpd_irq_setup
= ibx_hpd_irq_setup
;
3859 if (INTEL_INFO(dev
)->gen
== 2) {
3860 dev
->driver
->irq_preinstall
= i8xx_irq_preinstall
;
3861 dev
->driver
->irq_postinstall
= i8xx_irq_postinstall
;
3862 dev
->driver
->irq_handler
= i8xx_irq_handler
;
3863 dev
->driver
->irq_uninstall
= i8xx_irq_uninstall
;
3864 } else if (INTEL_INFO(dev
)->gen
== 3) {
3865 dev
->driver
->irq_preinstall
= i915_irq_preinstall
;
3866 dev
->driver
->irq_postinstall
= i915_irq_postinstall
;
3867 dev
->driver
->irq_uninstall
= i915_irq_uninstall
;
3868 dev
->driver
->irq_handler
= i915_irq_handler
;
3869 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3871 dev
->driver
->irq_preinstall
= i965_irq_preinstall
;
3872 dev
->driver
->irq_postinstall
= i965_irq_postinstall
;
3873 dev
->driver
->irq_uninstall
= i965_irq_uninstall
;
3874 dev
->driver
->irq_handler
= i965_irq_handler
;
3875 dev_priv
->display
.hpd_irq_setup
= i915_hpd_irq_setup
;
3877 dev
->driver
->enable_vblank
= i915_enable_vblank
;
3878 dev
->driver
->disable_vblank
= i915_disable_vblank
;
3882 void intel_hpd_init(struct drm_device
*dev
)
3884 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3885 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
3886 struct drm_connector
*connector
;
3887 unsigned long irqflags
;
3890 for (i
= 1; i
< HPD_NUM_PINS
; i
++) {
3891 dev_priv
->hpd_stats
[i
].hpd_cnt
= 0;
3892 dev_priv
->hpd_stats
[i
].hpd_mark
= HPD_ENABLED
;
3894 list_for_each_entry(connector
, &mode_config
->connector_list
, head
) {
3895 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
3896 connector
->polled
= intel_connector
->polled
;
3897 if (!connector
->polled
&& I915_HAS_HOTPLUG(dev
) && intel_connector
->encoder
->hpd_pin
> HPD_NONE
)
3898 connector
->polled
= DRM_CONNECTOR_POLL_HPD
;
3901 /* Interrupt setup is already guaranteed to be single-threaded, this is
3902 * just to make the assert_spin_locked checks happy. */
3903 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3904 if (dev_priv
->display
.hpd_irq_setup
)
3905 dev_priv
->display
.hpd_irq_setup(dev
);
3906 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3909 /* Disable interrupts so we can allow Package C8+. */
3910 void hsw_pc8_disable_interrupts(struct drm_device
*dev
)
3912 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3913 unsigned long irqflags
;
3915 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3917 dev_priv
->pc8
.regsave
.deimr
= I915_READ(DEIMR
);
3918 dev_priv
->pc8
.regsave
.sdeimr
= I915_READ(SDEIMR
);
3919 dev_priv
->pc8
.regsave
.gtimr
= I915_READ(GTIMR
);
3920 dev_priv
->pc8
.regsave
.gtier
= I915_READ(GTIER
);
3921 dev_priv
->pc8
.regsave
.gen6_pmimr
= I915_READ(GEN6_PMIMR
);
3923 ironlake_disable_display_irq(dev_priv
, 0xffffffff);
3924 ibx_disable_display_interrupt(dev_priv
, 0xffffffff);
3925 ilk_disable_gt_irq(dev_priv
, 0xffffffff);
3926 snb_disable_pm_irq(dev_priv
, 0xffffffff);
3928 dev_priv
->pc8
.irqs_disabled
= true;
3930 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
3933 /* Restore interrupts so we can recover from Package C8+. */
3934 void hsw_pc8_restore_interrupts(struct drm_device
*dev
)
3936 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3937 unsigned long irqflags
;
3940 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
3942 val
= I915_READ(DEIMR
);
3943 WARN(val
!= 0xffffffff, "DEIMR is 0x%08x\n", val
);
3945 val
= I915_READ(SDEIMR
);
3946 WARN(val
!= 0xffffffff, "SDEIMR is 0x%08x\n", val
);
3948 val
= I915_READ(GTIMR
);
3949 WARN(val
!= 0xffffffff, "GTIMR is 0x%08x\n", val
);
3951 val
= I915_READ(GEN6_PMIMR
);
3952 WARN(val
!= 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val
);
3954 dev_priv
->pc8
.irqs_disabled
= false;
3956 ironlake_enable_display_irq(dev_priv
, ~dev_priv
->pc8
.regsave
.deimr
);
3957 ibx_enable_display_interrupt(dev_priv
, ~dev_priv
->pc8
.regsave
.sdeimr
);
3958 ilk_enable_gt_irq(dev_priv
, ~dev_priv
->pc8
.regsave
.gtimr
);
3959 snb_enable_pm_irq(dev_priv
, ~dev_priv
->pc8
.regsave
.gen6_pmimr
);
3960 I915_WRITE(GTIER
, dev_priv
->pc8
.regsave
.gtier
);
3962 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);